max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
831 | <gh_stars>100-1000
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.idea.uibuilder.handlers.linear;
import com.android.tools.idea.common.SyncNlModel;
import com.android.tools.idea.common.model.NlComponent;
import com.android.tools.idea.common.scene.draw.DisplayList;
import com.android.tools.idea.common.surface.DesignSurface;
import com.android.tools.idea.common.surface.InteractionManager;
import com.android.tools.idea.uibuilder.LayoutTestCase;
import com.android.tools.idea.uibuilder.LayoutTestUtilities;
import com.google.common.collect.ImmutableList;
import java.awt.datatransfer.Transferable;
import java.awt.dnd.DnDConstants;
import static org.mockito.Mockito.mock;
public class LinearDragHandlerTest extends LayoutTestCase {
/**
* Simulate a drag of a component in an outer LinearLayout from the
* component tree to an inner linar layout.
*
* The dragged compoenent should have been deleted from the outer
* linear layout and added into the inner one.
*/
public void testDragFromTree() {
SyncNlModel model = model("model.xml",
component("LinearLayout")
.id("@+id/outer")
.withBounds(0, 0, 100, 100)
.children(
component("Button")
.id("@+id/button")
.withBounds(0, 0, 10, 10),
component("LinearLayout")
.viewObject(linearLayout())
.id("@+id/inner")
.withBounds(10, 0, 90, 100)
.children(component("TextView")
.withBounds(10, 0, 10, 10),
component("TextView")
.withBounds(20, 0, 10, 10)))).build();
NlComponent button = model.find("button");
DesignSurface surface = LayoutTestUtilities.createScreen(model).getSurface();
surface.getScene().buildDisplayList(new DisplayList(), 0);
surface.getSelectionModel().setSelection(ImmutableList.of(button));
surface.setModel(model);
Transferable transferable = surface.getSelectionAsTransferable();
InteractionManager manager = surface.getInteractionManager();
manager.startListening();
LayoutTestUtilities.dragDrop(manager, 0, 0, 13, 0, transferable, DnDConstants.ACTION_MOVE);
assertEquals(3, model.find("inner").getChildCount());
assertEquals("button", model.find("inner").getChild(1).getId());
assertEquals(1, model.find("outer").getChildCount());
}
private static android.widget.LinearLayout linearLayout() {
return mock(android.widget.LinearLayout.class);
}
} | 1,460 |
5,079 | <gh_stars>1000+
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.encoding import smart_str
from desktop.lib import export_csvxls
LOG = logging.getLogger(__name__)
DL_FORMATS = [ 'csv', 'xls' ]
def download(api, session, cell, format, user_agent=None):
if format not in DL_FORMATS:
LOG.error('Unknown download format "%s"' % format)
return
content_generator = SparkDataAdapter(api, session, cell)
generator = export_csvxls.create_generator(content_generator, format)
return export_csvxls.make_response(generator, format, 'script_result', user_agent=user_agent)
def SparkDataAdapter(api, session, cell):
response = api.fetch_data(session, cell)
content = response['output']
data = content['data']
table = data['application/vnd.livy.table.v1+json']
rows = table['data']
headers = table['headers']
yield headers, rows
| 479 |
357 | <gh_stars>100-1000
#include <exception>
#include <algorithm>
#include <numeric>
#include <random>
#include <cmath>
#include <mipp.h>
#include <catch.hpp>
template <typename T1, typename T2>
void test_reg_cvt()
{
constexpr int N1 = mipp::N<T1>();
constexpr int N2 = mipp::N<T2>();
T1 inputs1[N1];
std::iota(inputs1, inputs1 + N1, (T1)-N1/2);
for (auto i = 0; i < N1; i++)
inputs1[i] += i % 2 ? (T1)0.4 : (T1)0.6;
mipp::reg r1 = mipp::load<T1>(inputs1);
mipp::reg r2 = N1 != N2 ? mipp::cvt<T1,T2>(mipp::low<T1>(r1)) : mipp::cvt<T1,T2>(r1);
for (auto i = 0; i < N2; i++)
{
auto res = static_cast<T2>(std::round(inputs1[i]));
REQUIRE(mipp::get<T2>(r2, i) == res);
}
}
#ifndef MIPP_NO
TEST_CASE("Convert - mipp::reg", "[mipp::cvt]")
{
#if defined(MIPP_64BIT)
#if !defined(MIPP_AVX512) || (defined(MIPP_AVX512) && defined(__AVX512DQ__))
#if !defined(MIPP_SSE) && !defined(MIPP_AVX)
SECTION("datatype = int64_t -> double") { test_reg_cvt<int64_t,double>(); }
SECTION("datatype = double -> int64_t") { test_reg_cvt<double,int64_t>(); }
#endif
#endif
#endif
SECTION("datatype = int32_t -> float") { test_reg_cvt<int32_t,float>(); }
SECTION("datatype = float -> int32_t") { test_reg_cvt<float,int32_t>(); }
#if !defined(MIPP_AVX) || (defined(MIPP_AVX) && MIPP_INSTR_VERSION >= 2)
#if !defined(MIPP_SSE) || (defined(MIPP_SSE) && MIPP_INSTR_VERSION >= 41)
#if defined(MIPP_64BIT)
SECTION("datatype = int32_t -> int64_t") { test_reg_cvt<int32_t,int64_t>(); }
#endif
#if defined(MIPP_BW)
SECTION("datatype = int16_t -> int32_t") { test_reg_cvt<int16_t,int32_t>(); }
SECTION("datatype = int8_t -> int16_t") { test_reg_cvt<int8_t,int16_t>(); }
#endif
#endif
#endif
}
#endif
template <typename T1, typename T2>
void test_Reg_cvt()
{
constexpr int N1 = mipp::N<T1>();
constexpr int N2 = mipp::N<T2>();
T1 inputs1[N1];
std::iota(inputs1, inputs1 + N1, (T1)-N1/2);
for (auto i = 0; i < N1; i++)
inputs1[i] += i % 2 ? (T1)0.4 : (T1)0.6;
mipp::Reg<T1> r1 = inputs1;
mipp::Reg<T2> r2 = N1 != N2 ? mipp::cvt<T1,T2>(r1.low()) : mipp::cvt<T1,T2>(r1);
for (auto i = 0; i < N2; i++)
{
auto res = static_cast<T2>(std::round(inputs1[i]));
REQUIRE(r2[i] == res);
}
}
TEST_CASE("Convert - mipp::Reg", "[mipp::cvt]")
{
#if defined(MIPP_64BIT)
#if !defined(MIPP_AVX512) || (defined(MIPP_AVX512) && defined(__AVX512DQ__))
#if !defined(MIPP_SSE) && !defined(MIPP_AVX)
SECTION("datatype = int64_t -> double") { test_Reg_cvt<int64_t,double>(); }
SECTION("datatype = double -> int64_t") { test_Reg_cvt<double,int64_t>(); }
#endif
#endif
#endif
SECTION("datatype = int32_t -> float") { test_Reg_cvt<int32_t,float>(); }
SECTION("datatype = float -> int32_t") { test_Reg_cvt<float,int32_t>(); }
#if !defined(MIPP_AVX) || (defined(MIPP_AVX) && MIPP_INSTR_VERSION >= 2)
#if !defined(MIPP_SSE) || (defined(MIPP_SSE) && MIPP_INSTR_VERSION >= 41)
#if defined(MIPP_64BIT)
SECTION("datatype = int32_t -> int64_t") { test_Reg_cvt<int32_t,int64_t>(); }
#endif
#if defined(MIPP_BW)
SECTION("datatype = int16_t -> int32_t") { test_Reg_cvt<int16_t,int32_t>(); }
SECTION("datatype = int8_t -> int16_t") { test_Reg_cvt<int8_t,int16_t>(); }
#endif
#endif
#endif
}
| 1,540 |
2,111 | # -*- coding: utf-8 -*-
import pytest
import env # noqa: F401
from pybind11_tests import ConstructorStats
from pybind11_tests import multiple_inheritance as m
def test_multiple_inheritance_cpp():
mt = m.MIType(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
@pytest.mark.skipif("env.PYPY and env.PY2")
@pytest.mark.xfail("env.PYPY and not env.PY2")
def test_multiple_inheritance_mix1():
class Base1:
def __init__(self, i):
self.i = i
def foo(self):
return self.i
class MITypePy(Base1, m.Base2):
def __init__(self, i, j):
Base1.__init__(self, i)
m.Base2.__init__(self, j)
mt = MITypePy(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
def test_multiple_inheritance_mix2():
class Base2:
def __init__(self, i):
self.i = i
def bar(self):
return self.i
class MITypePy(m.Base1, Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
Base2.__init__(self, j)
mt = MITypePy(3, 4)
assert mt.foo() == 3
assert mt.bar() == 4
@pytest.mark.skipif("env.PYPY and env.PY2")
@pytest.mark.xfail("env.PYPY and not env.PY2")
def test_multiple_inheritance_python():
class MI1(m.Base1, m.Base2):
def __init__(self, i, j):
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class B1(object):
def v(self):
return 1
class MI2(B1, m.Base1, m.Base2):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI3(MI2):
def __init__(self, i, j):
MI2.__init__(self, i, j)
class MI4(MI3, m.Base2):
def __init__(self, i, j):
MI3.__init__(self, i, j)
# This should be ignored (Base2 is already initialized via MI2):
m.Base2.__init__(self, i + 100)
class MI5(m.Base2, B1, m.Base1):
def __init__(self, i, j):
B1.__init__(self)
m.Base1.__init__(self, i)
m.Base2.__init__(self, j)
class MI6(m.Base2, B1):
def __init__(self, i):
m.Base2.__init__(self, i)
B1.__init__(self)
class B2(B1):
def v(self):
return 2
class B3(object):
def v(self):
return 3
class B4(B3, B2):
def v(self):
return 4
class MI7(B4, MI6):
def __init__(self, i):
B4.__init__(self)
MI6.__init__(self, i)
class MI8(MI6, B3):
def __init__(self, i):
MI6.__init__(self, i)
B3.__init__(self)
class MI8b(B3, MI6):
def __init__(self, i):
B3.__init__(self)
MI6.__init__(self, i)
mi1 = MI1(1, 2)
assert mi1.foo() == 1
assert mi1.bar() == 2
mi2 = MI2(3, 4)
assert mi2.v() == 1
assert mi2.foo() == 3
assert mi2.bar() == 4
mi3 = MI3(5, 6)
assert mi3.v() == 1
assert mi3.foo() == 5
assert mi3.bar() == 6
mi4 = MI4(7, 8)
assert mi4.v() == 1
assert mi4.foo() == 7
assert mi4.bar() == 8
mi5 = MI5(10, 11)
assert mi5.v() == 1
assert mi5.foo() == 10
assert mi5.bar() == 11
mi6 = MI6(12)
assert mi6.v() == 1
assert mi6.bar() == 12
mi7 = MI7(13)
assert mi7.v() == 4
assert mi7.bar() == 13
mi8 = MI8(14)
assert mi8.v() == 1
assert mi8.bar() == 14
mi8b = MI8b(15)
assert mi8b.v() == 3
assert mi8b.bar() == 15
def test_multiple_inheritance_python_many_bases():
class MIMany14(m.BaseN1, m.BaseN2, m.BaseN3, m.BaseN4):
def __init__(self):
m.BaseN1.__init__(self, 1)
m.BaseN2.__init__(self, 2)
m.BaseN3.__init__(self, 3)
m.BaseN4.__init__(self, 4)
class MIMany58(m.BaseN5, m.BaseN6, m.BaseN7, m.BaseN8):
def __init__(self):
m.BaseN5.__init__(self, 5)
m.BaseN6.__init__(self, 6)
m.BaseN7.__init__(self, 7)
m.BaseN8.__init__(self, 8)
class MIMany916(
m.BaseN9,
m.BaseN10,
m.BaseN11,
m.BaseN12,
m.BaseN13,
m.BaseN14,
m.BaseN15,
m.BaseN16,
):
def __init__(self):
m.BaseN9.__init__(self, 9)
m.BaseN10.__init__(self, 10)
m.BaseN11.__init__(self, 11)
m.BaseN12.__init__(self, 12)
m.BaseN13.__init__(self, 13)
m.BaseN14.__init__(self, 14)
m.BaseN15.__init__(self, 15)
m.BaseN16.__init__(self, 16)
class MIMany19(MIMany14, MIMany58, m.BaseN9):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
m.BaseN9.__init__(self, 9)
class MIMany117(MIMany14, MIMany58, MIMany916, m.BaseN17):
def __init__(self):
MIMany14.__init__(self)
MIMany58.__init__(self)
MIMany916.__init__(self)
m.BaseN17.__init__(self, 17)
# Inherits from 4 registered C++ classes: can fit in one pointer on any modern arch:
a = MIMany14()
for i in range(1, 4):
assert getattr(a, "f" + str(i))() == 2 * i
# Inherits from 8: requires 1/2 pointers worth of holder flags on 32/64-bit arch:
b = MIMany916()
for i in range(9, 16):
assert getattr(b, "f" + str(i))() == 2 * i
# Inherits from 9: requires >= 2 pointers worth of holder flags
c = MIMany19()
for i in range(1, 9):
assert getattr(c, "f" + str(i))() == 2 * i
# Inherits from 17: requires >= 3 pointers worth of holder flags
d = MIMany117()
for i in range(1, 17):
assert getattr(d, "f" + str(i))() == 2 * i
def test_multiple_inheritance_virtbase():
class MITypePy(m.Base12a):
def __init__(self, i, j):
m.Base12a.__init__(self, i, j)
mt = MITypePy(3, 4)
assert mt.bar() == 4
assert m.bar_base2a(mt) == 4
assert m.bar_base2a_sharedptr(mt) == 4
def test_mi_static_properties():
"""Mixing bases with and without static properties should be possible
and the result should be independent of base definition order"""
for d in (m.VanillaStaticMix1(), m.VanillaStaticMix2()):
assert d.vanilla() == "Vanilla"
assert d.static_func1() == "WithStatic1"
assert d.static_func2() == "WithStatic2"
assert d.static_func() == d.__class__.__name__
m.WithStatic1.static_value1 = 1
m.WithStatic2.static_value2 = 2
assert d.static_value1 == 1
assert d.static_value2 == 2
assert d.static_value == 12
d.static_value1 = 0
assert d.static_value1 == 0
d.static_value2 = 0
assert d.static_value2 == 0
d.static_value = 0
assert d.static_value == 0
# Requires PyPy 6+
def test_mi_dynamic_attributes():
"""Mixing bases with and without dynamic attribute support"""
for d in (m.VanillaDictMix1(), m.VanillaDictMix2()):
d.dynamic = 1
assert d.dynamic == 1
def test_mi_unaligned_base():
"""Returning an offset (non-first MI) base class pointer should recognize the instance"""
n_inst = ConstructorStats.detail_reg_inst()
c = m.I801C()
d = m.I801D()
# + 4 below because we have the two instances, and each instance has offset base I801B2
assert ConstructorStats.detail_reg_inst() == n_inst + 4
b1c = m.i801b1_c(c)
assert b1c is c
b2c = m.i801b2_c(c)
assert b2c is c
b1d = m.i801b1_d(d)
assert b1d is d
b2d = m.i801b2_d(d)
assert b2d is d
assert ConstructorStats.detail_reg_inst() == n_inst + 4 # no extra instances
del c, b1c, b2c
assert ConstructorStats.detail_reg_inst() == n_inst + 2
del d, b1d, b2d
assert ConstructorStats.detail_reg_inst() == n_inst
def test_mi_base_return():
"""Tests returning an offset (non-first MI) base class pointer to a derived instance"""
n_inst = ConstructorStats.detail_reg_inst()
c1 = m.i801c_b1()
assert type(c1) is m.I801C
assert c1.a == 1
assert c1.b == 2
d1 = m.i801d_b1()
assert type(d1) is m.I801D
assert d1.a == 1
assert d1.b == 2
assert ConstructorStats.detail_reg_inst() == n_inst + 4
c2 = m.i801c_b2()
assert type(c2) is m.I801C
assert c2.a == 1
assert c2.b == 2
d2 = m.i801d_b2()
assert type(d2) is m.I801D
assert d2.a == 1
assert d2.b == 2
assert ConstructorStats.detail_reg_inst() == n_inst + 8
del c2
assert ConstructorStats.detail_reg_inst() == n_inst + 6
del c1, d1, d2
assert ConstructorStats.detail_reg_inst() == n_inst
# Returning an unregistered derived type with a registered base; we won't
# pick up the derived type, obviously, but should still work (as an object
# of whatever type was returned).
e1 = m.i801e_c()
assert type(e1) is m.I801C
assert e1.a == 1
assert e1.b == 2
e2 = m.i801e_b2()
assert type(e2) is m.I801B2
assert e2.b == 2
def test_diamond_inheritance():
"""Tests that diamond inheritance works as expected (issue #959)"""
# Issue #959: this shouldn't segfault:
d = m.D()
# Make sure all the various distinct pointers are all recognized as registered instances:
assert d is d.c0()
assert d is d.c1()
assert d is d.b()
assert d is d.c0().b()
assert d is d.c1().b()
assert d is d.c0().c1().b().c0().b()
| 4,747 |
328 | package com.ctg.test.service;
import com.ctg.test.model.User;
import java.util.List;
/**
* @Description: TODO
* @Author: yanhonghai
* @Date: 2018/9/19 10:03
*/
public interface UserService {
User findByUserName(String userName);
User findUserInfo(String userName);
List<User> findAll();
}
| 115 |
14,668 | <filename>third_party/private_membership/src/internal/aes_ctr_256_with_fixed_iv_test.cc
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "third_party/private_membership/src/internal/aes_ctr_256_with_fixed_iv.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include <openssl/rand.h>
#include "third_party/shell-encryption/src/testing/status_matchers.h"
#include "third_party/shell-encryption/src/testing/status_testing.h"
namespace private_membership {
namespace {
using ::rlwe::testing::StatusIs;
using testing::HasSubstr;
std::string GetRandomBytes(size_t length) {
std::unique_ptr<uint8_t[]> buf(new uint8_t[length]);
// BoringSSL documentation says that it always returns 1; while
// OpenSSL documentation says that it returns 1 on success, 0 otherwise. We
// use BoringSSL, so we don't check the return value.
RAND_bytes(buf.get(), length);
return std::string(reinterpret_cast<const char *>(buf.get()), length);
}
TEST(AesCtr256WithFixedIVTest, TestEncryptDecrypt) {
std::string key(GetRandomBytes(AesCtr256WithFixedIV::kKeySize));
ASSERT_OK_AND_ASSIGN(auto cipher, AesCtr256WithFixedIV::Create(key));
for (int i = 0; i < 256; ++i) {
std::string message(GetRandomBytes(i));
ASSERT_OK_AND_ASSIGN(auto ciphertext, cipher->Encrypt(message));
EXPECT_EQ(ciphertext.size(), i);
ASSERT_OK_AND_ASSIGN(auto plaintext, cipher->Decrypt(ciphertext));
EXPECT_EQ(plaintext, message);
}
}
TEST(AesCtr256WithFixedIVTest, TestEncryptDifferentFromMessage) {
std::string key(GetRandomBytes(AesCtr256WithFixedIV::kKeySize));
ASSERT_OK_AND_ASSIGN(auto cipher, AesCtr256WithFixedIV::Create(key));
// Check for non-empty messages.
for (int i = 1; i < 256; ++i) {
std::string message(GetRandomBytes(i));
ASSERT_OK_AND_ASSIGN(auto ciphertext, cipher->Encrypt(message));
EXPECT_NE(message, ciphertext);
}
}
TEST(AesCtr256WithFixedIVTest, InvalidKeySize) {
std::string short_key(GetRandomBytes(AesCtr256WithFixedIV::kKeySize - 1));
EXPECT_THAT(AesCtr256WithFixedIV::Create(short_key),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Key size is invalid")));
std::string long_key(GetRandomBytes(AesCtr256WithFixedIV::kKeySize + 1));
EXPECT_THAT(AesCtr256WithFixedIV::Create(long_key),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Key size is invalid")));
}
TEST(AesCtr256WithFixedIVTest, TestMultipleEncryptionsSameKeyAndMessage) {
std::string key(GetRandomBytes(AesCtr256WithFixedIV::kKeySize));
ASSERT_OK_AND_ASSIGN(auto cipher, AesCtr256WithFixedIV::Create(key));
for (int i = 0; i < 256; ++i) {
std::string message(GetRandomBytes(i));
ASSERT_OK_AND_ASSIGN(auto ciphertext1, cipher->Encrypt(message));
ASSERT_OK_AND_ASSIGN(auto ciphertext2, cipher->Encrypt(message));
EXPECT_EQ(ciphertext1, ciphertext2);
}
}
TEST(AesCtr256WithFixedIVTest, TestMultipleEncryptionsDifferentKey) {
std::string key1(GetRandomBytes(AesCtr256WithFixedIV::kKeySize));
std::string key2(GetRandomBytes(AesCtr256WithFixedIV::kKeySize));
ASSERT_OK_AND_ASSIGN(auto cipher1, AesCtr256WithFixedIV::Create(key1));
ASSERT_OK_AND_ASSIGN(auto cipher2, AesCtr256WithFixedIV::Create(key2));
// Check non-empty messages.
for (int i = 1; i < 256; ++i) {
std::string message(GetRandomBytes(i));
ASSERT_OK_AND_ASSIGN(auto ciphertext1, cipher1->Encrypt(message));
ASSERT_OK_AND_ASSIGN(auto ciphertext2, cipher2->Encrypt(message));
EXPECT_NE(ciphertext1, ciphertext2);
}
}
TEST(AesCtr256WithFixedIVTest, TestMultipleEncryptionsDifferentMessages) {
std::string key(GetRandomBytes(AesCtr256WithFixedIV::kKeySize));
ASSERT_OK_AND_ASSIGN(auto cipher, AesCtr256WithFixedIV::Create(key));
// Check non-empty messages.
for (int i = 1; i < 256; ++i) {
std::string message1(GetRandomBytes(i));
std::string message2(GetRandomBytes(i));
while (message2 == message1) { // Ensure the messages are different.
message2 = GetRandomBytes(i);
}
ASSERT_OK_AND_ASSIGN(auto ciphertext1, cipher->Encrypt(message1));
ASSERT_OK_AND_ASSIGN(auto ciphertext2, cipher->Encrypt(message2));
EXPECT_NE(ciphertext1, ciphertext2);
}
}
} // namespace
} // namespace private_membership
| 1,784 |
941 | <reponame>3cL1p5e7/ic
#!/usr/bin/env python3
"""Helper package for parsing dependency inventories."""
import abc
import collections
import dataclasses
import fnmatch
import logging
import os
import pathlib
import subprocess
import typing
import git
import gitlab
import json5
import jsonschema
import toml
import git_changes
PROJECT_ROOT = pathlib.Path(os.environ.get("CI_PROJECT_DIR", pathlib.Path(__file__).parent.parent.parent.parent))
GITLAB_TOKEN = os.environ.get("GITLAB_API_TOKEN", "")
if GITLAB_TOKEN is None:
logging.error("GITLAB_API_TOKEN is not set, can not send comments to GitLab")
GITLAB_PROJECT_NAME = os.environ.get("CI_PROJECT_PATH", "dfinity-lab/core/ic")
DELTA_HEADER = "*Dependency Delta*"
with open(pathlib.Path(__file__).parent / "inventory.schema") as f:
SCHEMA = json5.load(f)
def is_owned_path(path):
if path.startswith("https://gitlab.com/dfinity-lab/"):
return True
if path.startswith("https://github.com/dfinity/"):
return True
if path.startswith("https://github.com/dfinity-lab/"):
return True
if path.startswith("/"):
return True
return False
@dataclasses.dataclass
class Dependency:
"""Data class for dependency information."""
name: str
version: str
is_external: bool = True
def _key(self):
return self.name + self.version
def __hash__(self):
"""Hash to be used for deduplication in sets and dicts."""
return hash(self._key())
def __eq__(self, other):
"""Compare two dependencies."""
return self._key() == other._key()
@classmethod
def from_cargo_tree(cls, line: str) -> "Dependency":
"""
Convert single line from cargo tree to a dependency.
Expects that the depth prefix was already removed.
"""
is_external = True
name, line = line.split(" ", 1)
if " " in line:
version, line = line.split(" ", 1)
path = line.split(" ")[0]
if path != "(*)" and is_owned_path(path[1:-1]):
is_external = False
else:
version = line
if version[0] != "v":
raise ValueError("Version should start with v")
version = version[1:]
return Dependency(name=name, version=version, is_external=is_external)
@dataclasses.dataclass
class PackageDiff:
"""Data class for package dependency diff."""
name: str
added_deps: set = dataclasses.field(default_factory=set)
removed_deps: set = dataclasses.field(default_factory=set)
added_direct_deps: set = dataclasses.field(default_factory=set)
def is_empty(self) -> bool:
"""Return True if package diff is empty."""
return not len(self.added_deps) and not len(self.removed_deps) and not len(self.added_direct_deps)
@dataclasses.dataclass
class PackageDiffDelta:
"""Data class for diff deltas."""
total_added: int = 0
total_removed: int = 0
internal_added: int = 0
internal_removed: int = 0
def get_total_delta(self):
"""Return total dependency delta."""
return self.total_added - self.total_removed
def get_internal_delta(self):
"""Return internal dependency delta."""
return self.internal_added - self.internal_removed
class PackageManager(abc.ABC):
"""Base class for helper classes for different package managers."""
@abc.abstractmethod
def get_inventory_file(self) -> pathlib.Path:
"""Return path to inventory file."""
@abc.abstractmethod
def get_external_direct_deps(self) -> typing.Dict[str, typing.Set[Dependency]]:
"""Key is a dependency name, value is a set of all versions in use."""
@abc.abstractmethod
def get_package_diff(self) -> typing.Dict[str, PackageDiff]:
"""Return list of package diffs for modified packages."""
@abc.abstractmethod
def inventory_changed(self) -> bool:
"""Return true if inventory was modified."""
class Cargo(PackageManager):
"""Helper for cargo-related functions."""
def __init__(self, root=PROJECT_ROOT):
"""Construct default object."""
self.package_files = {}
self.direct_deps = None
self.root = root
@classmethod
def is_duplicate_line(cls, dep_line: str) -> bool:
"""Return True if this line already occurred in the output before."""
return dep_line.endswith("(*)")
@classmethod
def parse_depth(cls, dep_line: str) -> typing.Tuple[int, str]:
"""Return depth as int and the remaining dependency string."""
i = 0
while dep_line[i].isdigit():
i += 1
return (int(dep_line[:i]), dep_line[i:])
def get_inventory_file(self) -> pathlib.Path:
"""Return path to inventory file."""
return self.root / ".dependencies" / "cargo.json"
def get_external_direct_deps(self) -> typing.Dict[str, typing.Set[Dependency]]:
"""Return dict of external direct dependencies."""
# Running cargo tree for the whole codebase is expensive.
if self.direct_deps:
return self.direct_deps
self.direct_deps = collections.defaultdict(set)
logging.info("Running cargo tree...")
tree = self._get_cargo_tree_output()
current_path = []
logging.info("Parsing cargo tree...")
# nix-shell outputs affinity, etc, so skip to the first package
while tree[0] != "0":
_, tree = tree.split("\n", 1)
for dep_line in tree.split("\n"):
if not dep_line:
continue
depth, dep_line = Cargo.parse_depth(dep_line)
dependency = Dependency.from_cargo_tree(dep_line)
if depth == len(current_path):
current_path.append(dependency)
else:
current_path[depth] = dependency
del current_path[depth + 1 :]
if dependency.is_external:
parent = current_path[depth - 1]
if not parent.is_external:
self.direct_deps[dependency.name].add(dependency)
return self.direct_deps
def get_package_diff(self) -> typing.Dict[str, PackageDiff]:
"""Return list of dependency diffs for modified packages."""
changed = []
for f in git_changes.get_changed_files(self.root, ["rs/"]):
if fnmatch.fnmatch(f, "*/Cargo.toml"):
changed.append(pathlib.Path(f))
if not changed:
logging.info("No Cargo.toml modified.")
return {}
logging.info(
"Modified Cargo.toml files: %s",
",".join(map(lambda x: x.as_posix(), changed)),
)
diffs = {}
for mfile in changed:
if not mfile.exists():
# Manifest was completely removed.
continue
logging.info("Collecting current dependencies for %s", mfile.as_posix())
with open(mfile) as f:
manifest = toml.load(f)
if "package" not in manifest:
logging.info("No package entry, probably a workspace, skipping")
continue
diff = PackageDiff(name=manifest["package"]["name"])
for dep_line in self._enumerate_package_deps(manifest):
depth, dep_line = Cargo.parse_depth(dep_line)
dependency = Dependency.from_cargo_tree(dep_line)
if dependency.name == diff.name:
# Skip the package itself
continue
if depth == 1 and dependency.is_external:
# This should be done before duplicate check, because
# the same dependency may be direct and indirect.
diff.added_direct_deps.add(dependency)
if Cargo.is_duplicate_line(dep_line):
continue
diff.added_deps.add(dependency)
diffs[diff.name] = diff
# Running cargo tree modifies local Cargo.lock
logging.info("Resetting local git changes")
repo = git.Repo(self.root)
repo.git.reset("--hard")
merge_top = repo.head.commit
logging.info("Checking out merge base")
repo.git.checkout(git_changes.get_merge_base(repo)[0].hexsha)
for mfile in changed:
if not mfile.exists():
# Manifest was freshly added.
continue
logging.info("Collecting previous dependencies for %s", mfile.as_posix())
with open(mfile) as f:
manifest = toml.load(f)
if "package" not in manifest:
logging.info("No package entry, probably a workspace, skipping")
continue
name = manifest["package"]["name"]
if name not in diffs:
diffs[name] = PackageDiff(name=name)
# (*) only marks a truncated duplicate subtree, duplicated
# dependencies without further dependencies will not have it.
already_seen = set()
diff = diffs[name]
for dep_line in self._enumerate_package_deps(manifest):
depth, dep_line = Cargo.parse_depth(dep_line)
dependency = Dependency.from_cargo_tree(dep_line)
if dependency.name == diff.name:
# Skip the package itself
continue
if depth == 1 and dependency.is_external and dependency in diff.added_direct_deps:
diff.added_direct_deps.remove(dependency)
if Cargo.is_duplicate_line(dep_line) or dependency in already_seen:
continue
if dependency in diff.added_deps:
already_seen.add(dependency)
diff.added_deps.remove(dependency)
else:
diff.removed_deps.add(dependency)
logging.info("Resetting local git changes")
repo.git.reset("--hard")
logging.info("Checking out merge top")
repo.git.checkout(merge_top)
return {name: diffs[name] for name in diffs if not diffs[name].is_empty()}
def _enumerate_package_deps(self, manifest):
logging.info("Running cargo tree...")
try:
tree = self._get_cargo_tree_output(
package="{}:{}".format(manifest["package"]["name"], manifest["package"]["version"])
)
except subprocess.CalledProcessError:
return
logging.info("Parsing cargo tree...")
# nix-shell outputs affinity, etc, so skip to the first package
while not tree[0] == "0":
_, tree = tree.split("\n", 1)
for dep_line in tree.split("\n"):
if not dep_line:
continue
yield dep_line
def _get_cargo_tree_output(self, package=None, offline="", cargo_home=""):
logging.info("Running cargo tree...")
command = f"cd {self.root}/rs && {cargo_home} cargo tree --edges=no-dev --prefix=depth {offline}"
if package:
command += f" -p {package}"
return subprocess.check_output(
[
"nix-shell",
"--run",
command,
],
cwd=PROJECT_ROOT / "rs",
encoding="utf-8",
).strip()
def inventory_changed(self) -> bool:
"""Return True if inventory was modified."""
if git_changes.get_changed_files(
self.root,
[self.get_inventory_file().relative_to(self.root).as_posix()],
):
logging.info("Inventory changed.")
return True
logging.info("Inventory did not change.")
return False
class Inventory:
"""Dependencty inventory helper."""
@dataclasses.dataclass
class Error:
"""Data class for inventory error reporting."""
message: str
is_fatal: bool = False
def __init__(self, package_manager: PackageManager, use_gitlab=True):
"""Load dependency inventory from a file."""
with open(package_manager.get_inventory_file()) as f:
self.dependencies = json5.load(f)
self.package_manager = package_manager
self.use_gitlab = use_gitlab
def _comment_on_gitlab(self, deltas):
"""Add a gitlab comment with dependency delta info."""
if not deltas or not GITLAB_TOKEN:
return
comment_body = self._generate_comment_markdown(deltas)
glab = gitlab.Gitlab("https://gitlab.com", private_token=os.environ["GITLAB_API_TOKEN"])
glab.auth() # needed for setting glab.user.username (current user)
glab_repo = glab.projects.get(GITLAB_PROJECT_NAME)
for merge_req in glab_repo.mergerequests.list(
state="opened",
order_by="updated_at",
source_branch=os.environ["CI_COMMIT_REF_NAME"],
):
comment = None
for note in merge_req.notes.list():
if note.author["username"] == glab.user.username and note.body.startswith(DELTA_HEADER):
comment = note
break
if comment:
comment.body = comment_body
comment.save()
else:
merge_req.notes.create({"body": comment_body})
def _generate_comment_markdown(self, deltas):
"""Generate dependency delta comment using markdown."""
delta_body = DELTA_HEADER + "\n"
for name, delta in deltas.items():
delta_body += (
"* {}\n" " * Internal: {:+} ({} added, {} removed)\n" " * Total: {:+} ({} added, {} removed)\n"
).format(
name,
delta.get_internal_delta(),
delta.internal_added,
delta.internal_removed,
delta.get_total_delta(),
delta.total_added,
delta.total_removed,
)
return delta_body
def validate(self):
"""Validate the inventory."""
errors = []
logging.info("Enumerate changed packages")
diffs = self.package_manager.get_package_diff()
if not diffs:
if not self.package_manager.inventory_changed():
logging.info("Inventory and dependencies were not modified, skipping")
return []
try:
jsonschema.validate(instance=self.dependencies, schema=SCHEMA)
except jsonschema.ValidationError as e:
return [
self.Error(
message="JSON Schema validation failed with the following error:\n{}\n"
"The schema can be found here: gitlab-ci/src/dependencies/inventory.schema.".format(e.message),
is_fatal=True,
)
]
external_direct_deps = self.package_manager.get_external_direct_deps()
for d in self.dependencies:
logging.info("Validating %s", d)
if d not in external_direct_deps:
errors.append(
self.Error(
message="{} is not referenced in any manifest file.".format(d),
is_fatal=True,
)
)
elif len(external_direct_deps[d]) > 1:
version_missing = []
owners_count = collections.defaultdict(int)
for owner in self.dependencies[d]["owners"]:
if "versions" not in owner:
version_missing.append(owner["email"])
continue
for version in owner["versions"]:
owners_count[version] += 1
if Dependency(name=d, version=version) not in external_direct_deps[d]:
errors.append(
self.Error(
message="Version {} of package {} is not used, but is owned by {}".format(
version, d, owner["email"]
),
is_fatal=True,
)
)
continue
insufficient_owners = [v for v, c in owners_count.items() if c < 2]
if insufficient_owners:
errors.append(
self.Error(
message="The following versions of {} have less than 2 owners: {}".format(
d, ",".join(sorted(insufficient_owners))
),
is_fatal=True,
)
)
if version_missing:
errors.append(
self.Error(
message="Multiple versions of {} are in use: {}, but "
"the following owners do not specify a version "
"explicitly: {}".format(
d,
sorted([x.version for x in external_direct_deps[d]]),
version_missing,
),
is_fatal=True,
)
)
else:
has_version = []
for owner in self.dependencies[d]["owners"]:
if "versions" in owner:
has_version.append("{} ({})".format(owner["email"], owner["versions"]))
if has_version:
errors.append(
self.Error(
message="Only one version of {} is in use, but the "
"following owners explicitly specify versions: {}".format(d, sorted(has_version)),
is_fatal=True,
)
)
deltas = {}
for package, diff in diffs.items():
delta = PackageDiffDelta(total_added=len(diff.added_deps), total_removed=len(diff.removed_deps))
for dep in diff.added_deps:
if not dep.is_external:
delta.internal_added += 1
for dep in diff.removed_deps:
if not dep.is_external:
delta.internal_removed += 1
if delta.total_added > 0 or delta.total_removed > 0:
# Diff may be non-empty if direct dependencies were added. This may not
# affect total dependencies if these were already present indirectly.
deltas[package] = delta
for dep in diff.added_direct_deps:
if dep.name not in self.dependencies:
errors.append(
self.Error(
message="New dependency {} is added, but it is not in the inventory".format(dep.name)
)
)
continue
if len(external_direct_deps[dep.name]) > 1:
# Several versions of this dependency are in use, ensure the
# added one is in the inventory.
found = False
for owner in self.dependencies[dep.name]["owners"]:
if "versions" not in owner:
continue
for version in owner["versions"]:
if version == dep.version:
found = True
break
if found:
break
if not found:
errors.append(
self.Error(
message="Version {} of {} is added, but is not owned by anyone.".format(
dep.version, dep.name
)
)
)
if self.use_gitlab:
self._comment_on_gitlab(deltas)
return errors
| 10,016 |
668 | <gh_stars>100-1000
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are
* permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.oracle.truffle.llvm.nodes.intrinsics.llvm;
import com.oracle.truffle.api.dsl.Cached;
import com.oracle.truffle.api.dsl.NodeChild;
import com.oracle.truffle.api.dsl.NodeChildren;
import com.oracle.truffle.api.dsl.Specialization;
import com.oracle.truffle.llvm.runtime.LLVMVirtualAllocationAddress;
import com.oracle.truffle.llvm.runtime.memory.LLVMMemMoveNode;
import com.oracle.truffle.llvm.runtime.memory.LLVMMemory;
import com.oracle.truffle.llvm.runtime.memory.UnsafeArrayAccess;
import com.oracle.truffle.llvm.runtime.nodes.api.LLVMExpressionNode;
import com.oracle.truffle.llvm.runtime.pointer.LLVMNativePointer;
import com.oracle.truffle.llvm.runtime.pointer.LLVMPointer;
@NodeChildren({@NodeChild(type = LLVMExpressionNode.class, value = "destination"), @NodeChild(type = LLVMExpressionNode.class, value = "source"),
@NodeChild(type = LLVMExpressionNode.class, value = "length"),
@NodeChild(type = LLVMExpressionNode.class, value = "align"), @NodeChild(type = LLVMExpressionNode.class, value = "isVolatile")})
public abstract class LLVMMemCopy extends LLVMBuiltin {
@Child private LLVMMemMoveNode memMove;
public LLVMMemCopy(LLVMMemMoveNode memMove) {
this.memMove = memMove;
}
// TODO: remove duplication for length argument with a cast node
@Specialization
protected Object doVoid(LLVMVirtualAllocationAddress target, LLVMVirtualAllocationAddress source, int length, int align, boolean isVolatile,
@Cached("getUnsafeArrayAccess()") UnsafeArrayAccess arrayAccess) {
return doVoid(target, source, (long) length, align, isVolatile, arrayAccess);
}
@Specialization
protected Object doVoid(LLVMVirtualAllocationAddress target, LLVMNativePointer source, int length, int align, boolean isVolatile,
@Cached("getLLVMMemory()") LLVMMemory memory,
@Cached("getUnsafeArrayAccess()") UnsafeArrayAccess arrayAccess) {
return doVoid(target, source, (long) length, align, isVolatile, memory, arrayAccess);
}
@Specialization
protected Object doVoid(LLVMNativePointer target, LLVMVirtualAllocationAddress source, int length, int align, boolean isVolatile,
@Cached("getLLVMMemory()") LLVMMemory memory,
@Cached("getUnsafeArrayAccess()") UnsafeArrayAccess arrayAccess) {
return doVoid(target, source, (long) length, align, isVolatile, memory, arrayAccess);
}
@Specialization
protected Object doVoid(LLVMPointer target, LLVMPointer source, int length, int align, boolean isVolatile) {
return doVoid(target, source, (long) length, align, isVolatile);
}
@SuppressWarnings("unused")
@Specialization
protected Object doVoid(LLVMVirtualAllocationAddress target, LLVMVirtualAllocationAddress source, long length, int align, boolean isVolatile,
@Cached("getUnsafeArrayAccess()") UnsafeArrayAccess arrayAccess) {
copy(arrayAccess, target, source, length);
return null;
}
@SuppressWarnings("unused")
@Specialization
protected Object doVoid(LLVMVirtualAllocationAddress target, LLVMNativePointer source, long length, int align, boolean isVolatile,
@Cached("getLLVMMemory()") LLVMMemory memory,
@Cached("getUnsafeArrayAccess()") UnsafeArrayAccess arrayAccess) {
copy(arrayAccess, memory, target, source.asNative(), length);
return null;
}
@SuppressWarnings("unused")
@Specialization
protected Object doVoid(LLVMNativePointer target, LLVMVirtualAllocationAddress source, long length, int align, boolean isVolatile,
@Cached("getLLVMMemory()") LLVMMemory memory,
@Cached("getUnsafeArrayAccess()") UnsafeArrayAccess arrayAccess) {
copy(arrayAccess, memory, target.asNative(), source, length);
return null;
}
@SuppressWarnings("unused")
@Specialization
protected Object doVoid(LLVMPointer target, LLVMPointer source, long length, int align, boolean isVolatile) {
memMove.executeWithTarget(target, source, length);
return null;
}
private static void copy(UnsafeArrayAccess arrayAccess, LLVMMemory memory, LLVMVirtualAllocationAddress target, long source, long length) {
long sourcePointer = source;
LLVMVirtualAllocationAddress targetAddress = target;
for (long i = 0; i < length; i++) {
byte value = memory.getI8(sourcePointer);
targetAddress.writeI8(arrayAccess, value);
targetAddress = targetAddress.increment(1);
sourcePointer++;
}
}
private static void copy(UnsafeArrayAccess arrayAccess, LLVMMemory memory, long target, LLVMVirtualAllocationAddress source, long length) {
LLVMVirtualAllocationAddress sourcePointer = source;
long targetAddress = target;
for (long i = 0; i < length; i++) {
byte value = sourcePointer.getI8(arrayAccess);
sourcePointer = sourcePointer.increment(1);
memory.putI8(targetAddress, value);
targetAddress++;
}
}
private static void copy(UnsafeArrayAccess memory, LLVMVirtualAllocationAddress target, LLVMVirtualAllocationAddress source, long length) {
LLVMVirtualAllocationAddress sourcePointer = source;
LLVMVirtualAllocationAddress targetAddress = target;
for (long i = 0; i < length; i++) {
byte value = sourcePointer.getI8(memory);
sourcePointer = sourcePointer.increment(1);
targetAddress.writeI8(memory, value);
targetAddress = targetAddress.increment(1);
}
}
}
| 2,626 |
3,705 | import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.functions.pooling import pooling_2d
from chainer.utils import conv
from chainer.utils import type_check
class Upsampling2D(pooling_2d.Pooling2D):
"""Upsampling over a set of 2d planes w/ indices used for max pooling."""
def __init__(self, indexes, ksize, stride=None, pad=0, outsize=None,
cover_all=True):
super(Upsampling2D, self).__init__(ksize, stride, pad, cover_all)
self.indexes = indexes
self.outh, self.outw = (None, None) if outsize is None else outsize
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.kind == 'f',
x_type.ndim == 4,
x_type.shape == self.indexes.shape,
)
if self.outh is not None:
expected_h = conv.get_conv_outsize(
self.outh, self.kh, self.sy, self.ph, cover_all=self.cover_all)
type_check.expect(x_type.shape[2] == expected_h)
if self.outw is not None:
expected_w = conv.get_conv_outsize(
self.outw, self.kw, self.sx, self.pw, cover_all=self.cover_all)
type_check.expect(x_type.shape[3] == expected_w)
def forward_cpu(self, x):
self._in_dtype = x[0].dtype
n, c, h, w = x[0].shape
if self.outh is None:
self.outh = conv.get_deconv_outsize(
h, self.kh, self.sy, self.ph, cover_all=self.cover_all)
if self.outw is None:
self.outw = conv.get_deconv_outsize(
w, self.kw, self.sx, self.pw, cover_all=self.cover_all)
up_y = numpy.zeros((n, c, self.outh, self.outw), dtype=self._in_dtype)
up_y = conv.im2col_cpu(
up_y, self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all).transpose(0, 1, 4, 5, 2, 3)
colh, colw = up_y.shape[2:4]
up_y = up_y.reshape(-1, self.kh * self.kw)
indexes = self.indexes.ravel()
up_y[numpy.arange(len(indexes)), indexes] = x[0].ravel()
up_y = up_y.reshape(n, c, colh, colw, self.kh, self.kw)
up_y = conv.col2im_cpu(
up_y.transpose(0, 1, 4, 5, 2, 3), self.sy, self.sx, self.ph,
self.pw, self.outh, self.outw)
return up_y,
def forward_gpu(self, x):
self._in_dtype = x[0].dtype
xp = cuda.cupy
n, c, h, w = x[0].shape
if self.outh is None:
self.outh = conv.get_deconv_outsize(
h, self.kh, self.sy, self.ph, cover_all=self.cover_all)
if self.outw is None:
self.outw = conv.get_deconv_outsize(
w, self.kw, self.sx, self.pw, cover_all=self.cover_all)
up_y = xp.zeros((n, c, self.outh, self.outw), dtype=self._in_dtype)
up_y = conv.im2col_gpu(
up_y, self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
up_y = up_y.transpose(0, 1, 4, 5, 2, 3)
n, c, oy, ox, ky, kx = up_y.shape
indexes = xp.asarray(self.indexes, dtype=numpy.int32)
cuda.elementwise(
'int32 index, T x, int32 n, int32 c, int32 oy, int32 ox,'
'int32 ky, int32 kx', 'raw T up_y',
'''
int yn = i / c / oy / ox;
int yc = (i / oy / ox) % c;
int yoy = (i / ox) % oy;
int yox = i % ox;
up_y[yn * c * oy * ox * ky * kx +
yc * oy * ox * ky * kx +
yoy * ox * ky * kx +
yox * ky * kx +
index] = x;
''',
'upsampling_2d_fwd')(indexes, x[0], n, c, oy, ox, ky, kx, up_y)
up_y = up_y.transpose(0, 1, 4, 5, 2, 3)
up_y = conv.col2im_gpu(up_y, self.sy, self.sx, self.ph, self.pw,
self.outh, self.outw)
return up_y,
def backward(self, indexes, grad_outputs):
return Upsampling2DGrad(self).apply(grad_outputs)
class Upsampling2DGrad(function_node.FunctionNode):
def __init__(self, upsampling2d):
self.kh = upsampling2d.kh
self.kw = upsampling2d.kw
self.sy = upsampling2d.sy
self.sx = upsampling2d.sx
self.ph = upsampling2d.ph
self.pw = upsampling2d.pw
self.outh = upsampling2d.outh
self.outw = upsampling2d.outw
self.cover_all = upsampling2d.cover_all
self.indexes = upsampling2d.indexes
self._in_dtype = upsampling2d._in_dtype
def forward_cpu(self, gy):
gcol = conv.im2col_cpu(
gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
n, c, kh, kw, out_h, out_w = gcol.shape
gcol = gcol.transpose(0, 1, 4, 5, 2, 3).reshape(-1, kh * kw)
indexes = self.indexes.ravel()
gx = gcol[numpy.arange(len(indexes)), indexes]
return gx.reshape(n, c, out_h, out_w),
def forward_gpu(self, gy):
xp = cuda.cupy
gcol = conv.im2col_gpu(
gy[0], self.kh, self.kw, self.sy, self.sx, self.ph, self.pw,
cover_all=self.cover_all)
gcol = gcol.transpose(0, 1, 4, 5, 2, 3)
n, c, oy, ox, ky, kx = gcol.shape
gcol = gcol.reshape((n, c, oy, ox, ky * kx))
indexes = xp.asarray(self.indexes, dtype=numpy.int32)
gx = xp.empty((n, c, oy, ox), dtype=self._in_dtype)
cuda.elementwise(
'int32 indexes, raw T gcol, int32 n, int32 c, int32 oy,'
'int32 ox, int32 ky, int32 kx',
'raw T gx',
'''
int ind_n = i / c / oy / ox;
int ind_c = (i / oy / ox) % c;
int ind_oy = (i / ox) % oy;
int ind_ox = i % ox;
int gcol_ky = indexes / kx;
int gcol_kx = indexes % kx;
float top_gx = gcol[ind_n * c * oy * ox * ky * kx +
ind_c * oy * ox * ky * kx +
ind_oy * ox * ky * kx +
ind_ox * ky * kx +
gcol_ky * kx +
gcol_kx];
gx[ind_n * c * oy * ox +
ind_c * oy * ox +
ind_oy * ox +
ind_ox] = top_gx;
''',
'upsampling_2d_bwd')(indexes, gcol, n, c, oy, ox, ky, kx, gx)
return gx,
def backward(self, indexes, ggx):
return Upsampling2D(
self.indexes, (self.kh, self.kw), (self.sy, self.sx),
(self.ph, self.pw), (self.outh, self.outw),
self.cover_all).apply(ggx)
def upsampling_2d(
x, indexes, ksize, stride=None, pad=0, outsize=None, cover_all=True):
"""Upsampling using pooling indices.
This function produces an upsampled image using pooling indices.
.. admonition:: Example
>>> x = np.arange(1, 37).reshape(1, 1, 6, 6).astype(np.float32)
>>> x = chainer.Variable(x)
>>> x.array
array([[[[ 1., 2., 3., 4., 5., 6.],
[ 7., 8., 9., 10., 11., 12.],
[13., 14., 15., 16., 17., 18.],
[19., 20., 21., 22., 23., 24.],
[25., 26., 27., 28., 29., 30.],
[31., 32., 33., 34., 35., 36.]]]], dtype=float32)
This is the original ``x`` before max pooling.
>>> pooled_x, indexes = F.max_pooling_2d(
... x, ksize=2, stride=2, return_indices=True)
>>> pooled_x.array
array([[[[ 8., 10., 12.],
[20., 22., 24.],
[32., 34., 36.]]]], dtype=float32)
>>> indexes
array([[[[3, 3, 3],
[3, 3, 3],
[3, 3, 3]]]])
These are the outputs from the max pooling operation including the
resulting indices that will be used to upsample ``pooled_x``. Note
that the indices all point to the largest, in the case the last,
elements in each window.
>>> upsampled_x = F.upsampling_2d(
... pooled_x, indexes, ksize=2, stride=2, outsize=x.shape[2:])
>>> upsampled_x.shape
(1, 1, 6, 6)
>>> upsampled_x.array
array([[[[ 0., 0., 0., 0., 0., 0.],
[ 0., 8., 0., 10., 0., 12.],
[ 0., 0., 0., 0., 0., 0.],
[ 0., 20., 0., 22., 0., 24.],
[ 0., 0., 0., 0., 0., 0.],
[ 0., 32., 0., 34., 0., 36.]]]], dtype=float32)
Args:
x (~chainer.Variable): Input variable.
indexes (:ref:`ndarray`): Index array returned from
preceding call to :meth:`~chainer.functions.max_pooling_2d`.
ksize (int or pair of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k)`` are equivalent.
stride (int or pair of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s, s)`` are equivalent. If ``None`` is
specified, then it uses same stride as the pooling window size.
pad (int or pair of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p)`` are equivalent.
outsize ((int, int)): Expected output size (height, width).
cover_all (bool): Should be set to ``True`` if all spatial locations
were pooled into some output pixels during the preceding pooling
operation. ``False`` otherwise. See
:meth:`~chainer.functions.max_pooling_2d`.
Returns:
~chainer.Variable: Output variable.
"""
return Upsampling2D(
indexes, ksize, stride, pad, outsize, cover_all).apply((x,))[0]
| 5,242 |
2,425 | // Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: <EMAIL> (<NAME>)
//
// This is the implementation of the public Problem API. The pointer to
// implementation (PIMPL) idiom makes it possible for Ceres internal code to
// refer to the private data members without needing to exposing it to the
// world. An alternative to PIMPL is to have a factory which returns instances
// of a virtual base class; while that approach would work, it requires clients
// to always put a Problem object into a scoped pointer; this needlessly muddies
// client code for little benefit. Therefore, the PIMPL comprise was chosen.
#ifndef CERES_PUBLIC_PROBLEM_IMPL_H_
#define CERES_PUBLIC_PROBLEM_IMPL_H_
#include <array>
#include <map>
#include <memory>
#include <unordered_set>
#include <vector>
#include "ceres/context_impl.h"
#include "ceres/internal/port.h"
#include "ceres/problem.h"
#include "ceres/types.h"
namespace ceres {
class CostFunction;
class EvaluationCallback;
class LossFunction;
class LocalParameterization;
struct CRSMatrix;
namespace internal {
class Program;
class ResidualBlock;
class CERES_EXPORT_INTERNAL ProblemImpl {
public:
typedef std::map<double*, ParameterBlock*> ParameterMap;
typedef std::unordered_set<ResidualBlock*> ResidualBlockSet;
typedef std::map<CostFunction*, int> CostFunctionRefCount;
typedef std::map<LossFunction*, int> LossFunctionRefCount;
ProblemImpl();
explicit ProblemImpl(const Problem::Options& options);
ProblemImpl(const ProblemImpl&) = delete;
void operator=(const ProblemImpl&) = delete;
~ProblemImpl();
// See the public problem.h file for description of these methods.
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* const* const parameter_blocks,
int num_parameter_blocks);
template <typename... Ts>
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
double* x0,
Ts*... xs) {
const std::array<double*, sizeof...(Ts) + 1> parameter_blocks{{x0, xs...}};
return AddResidualBlock(cost_function,
loss_function,
parameter_blocks.data(),
static_cast<int>(parameter_blocks.size()));
}
void AddParameterBlock(double* values, int size);
void AddParameterBlock(double* values,
int size,
LocalParameterization* local_parameterization);
void RemoveResidualBlock(ResidualBlock* residual_block);
void RemoveParameterBlock(const double* values);
void SetParameterBlockConstant(const double* values);
void SetParameterBlockVariable(double* values);
bool IsParameterBlockConstant(const double* values) const;
void SetParameterization(double* values,
LocalParameterization* local_parameterization);
const LocalParameterization* GetParameterization(const double* values) const;
void SetParameterLowerBound(double* values, int index, double lower_bound);
void SetParameterUpperBound(double* values, int index, double upper_bound);
double GetParameterLowerBound(const double* values, int index) const;
double GetParameterUpperBound(const double* values, int index) const;
bool Evaluate(const Problem::EvaluateOptions& options,
double* cost,
std::vector<double>* residuals,
std::vector<double>* gradient,
CRSMatrix* jacobian);
bool EvaluateResidualBlock(ResidualBlock* residual_block,
bool apply_loss_function,
bool new_point,
double* cost,
double* residuals,
double** jacobians) const;
int NumParameterBlocks() const;
int NumParameters() const;
int NumResidualBlocks() const;
int NumResiduals() const;
int ParameterBlockSize(const double* parameter_block) const;
int ParameterBlockLocalSize(const double* parameter_block) const;
bool HasParameterBlock(const double* parameter_block) const;
void GetParameterBlocks(std::vector<double*>* parameter_blocks) const;
void GetResidualBlocks(std::vector<ResidualBlockId>* residual_blocks) const;
void GetParameterBlocksForResidualBlock(
const ResidualBlockId residual_block,
std::vector<double*>* parameter_blocks) const;
const CostFunction* GetCostFunctionForResidualBlock(
const ResidualBlockId residual_block) const;
const LossFunction* GetLossFunctionForResidualBlock(
const ResidualBlockId residual_block) const;
void GetResidualBlocksForParameterBlock(
const double* values,
std::vector<ResidualBlockId>* residual_blocks) const;
const Program& program() const { return *program_; }
Program* mutable_program() { return program_.get(); }
const ParameterMap& parameter_map() const { return parameter_block_map_; }
const ResidualBlockSet& residual_block_set() const {
CHECK(options_.enable_fast_removal)
<< "Fast removal not enabled, residual_block_set is not maintained.";
return residual_block_set_;
}
ContextImpl* context() { return context_impl_; }
private:
ParameterBlock* InternalAddParameterBlock(double* values, int size);
void InternalRemoveResidualBlock(ResidualBlock* residual_block);
// Delete the arguments in question. These differ from the Remove* functions
// in that they do not clean up references to the block to delete; they
// merely delete them.
template <typename Block>
void DeleteBlockInVector(std::vector<Block*>* mutable_blocks,
Block* block_to_remove);
void DeleteBlock(ResidualBlock* residual_block);
void DeleteBlock(ParameterBlock* parameter_block);
const Problem::Options options_;
bool context_impl_owned_;
ContextImpl* context_impl_;
// The mapping from user pointers to parameter blocks.
ParameterMap parameter_block_map_;
// Iff enable_fast_removal is enabled, contains the current residual blocks.
ResidualBlockSet residual_block_set_;
// The actual parameter and residual blocks.
std::unique_ptr<internal::Program> program_;
// When removing parameter blocks, parameterizations have ambiguous
// ownership. Instead of scanning the entire problem to see if the
// parameterization is shared with other parameter blocks, buffer
// them until destruction.
//
// TODO(keir): See if it makes sense to use sets instead.
std::vector<LocalParameterization*> local_parameterizations_to_delete_;
// For each cost function and loss function in the problem, a count
// of the number of residual blocks that refer to them. When the
// count goes to zero and the problem owns these objects, they are
// destroyed.
CostFunctionRefCount cost_function_ref_count_;
LossFunctionRefCount loss_function_ref_count_;
};
} // namespace internal
} // namespace ceres
#endif // CERES_PUBLIC_PROBLEM_IMPL_H_
| 2,930 |
565 | import pytest
import torch
from d3rlpy.models.builders import create_continuous_q_function
from d3rlpy.models.encoders import DefaultEncoderFactory
from d3rlpy.models.q_functions import MeanQFunctionFactory, QRQFunctionFactory
from d3rlpy.models.torch.q_functions import compute_max_with_n_actions
@pytest.mark.parametrize("observation_shape", [(4, 84, 84), (100,)])
@pytest.mark.parametrize("action_size", [3])
@pytest.mark.parametrize("encoder_factory", [DefaultEncoderFactory()])
@pytest.mark.parametrize(
"q_func_factory", [MeanQFunctionFactory(), QRQFunctionFactory()]
)
@pytest.mark.parametrize("n_ensembles", [2])
@pytest.mark.parametrize("batch_size", [100])
@pytest.mark.parametrize("n_quantiles", [32])
@pytest.mark.parametrize("n_actions", [10])
@pytest.mark.parametrize("lam", [0.75])
def test_compute_max_with_n_actions(
observation_shape,
action_size,
encoder_factory,
q_func_factory,
n_ensembles,
batch_size,
n_quantiles,
n_actions,
lam,
):
q_func = create_continuous_q_function(
observation_shape,
action_size,
encoder_factory,
q_func_factory,
n_ensembles=n_ensembles,
)
x = torch.rand(batch_size, *observation_shape)
actions = torch.rand(batch_size, n_actions, action_size)
y = compute_max_with_n_actions(x, actions, q_func, lam)
if isinstance(q_func_factory, MeanQFunctionFactory):
assert y.shape == (batch_size, 1)
else:
assert y.shape == (batch_size, q_func_factory.n_quantiles)
| 639 |
1,694 | <reponame>dengliming/stagemonitor
package org.stagemonitor.tracing.freemarker;
import freemarker.core.Environment;
import freemarker.core.Expression;
import net.bytebuddy.asm.Advice;
import net.bytebuddy.description.method.MethodDescription;
import net.bytebuddy.description.type.TypeDescription;
import net.bytebuddy.matcher.ElementMatcher;
import net.bytebuddy.matcher.ElementMatchers;
import org.stagemonitor.core.instrument.StagemonitorByteBuddyTransformer;
import org.stagemonitor.tracing.profiler.CallStackElement;
import org.stagemonitor.tracing.profiler.Profiler;
import static net.bytebuddy.matcher.ElementMatchers.named;
import static net.bytebuddy.matcher.ElementMatchers.takesArguments;
/**
* This class "profiles FTL files"
* <p>
* This class actually adds expressions and methods which are evaluated by freemarker to the call tree. So that when a
* model class of our application is called, we know which FTL file originated the call.
* <p>
* You may argue that this is not of particular interest because model objects are mostly POJOs and calling a getter
* is not interesting performance wise. This is true for POJOs but some applications may choose to not fully initialize
* the model objects but instead lazy-load the values on demand i.e. if they are actually needed for the template.
* <p>
* Example:
*
* <pre>{@code
* test.ftl:1#templateModel.allTheThings
* `-- String org.example.TemplateModel.getAllTheThings()
* `-- String org.example.ExampleDao.getAllTheThingsFromDB()
* `-- SELECT * from THINGS
* }</pre>
*
* Note that this will only be active when working with Freemarker versions starting at 2.3.23.
*/
public class FreemarkerProfilingTransformer extends StagemonitorByteBuddyTransformer {
/**
* Application code can be called by freemarker via the {@code freemarker.core.Dot} or the
* {@code freemarker.core.MethodCall} classes.
* <p>
* For example, when the expression ${templateModel.foo} is evaluated, {@code freemarker.core.Dot#_eval(Environment)}
* evaluates {@code foo} by calling {@code TemplateModel#getFoo()}.
* <p>
* The expression ${templateModel.getFoo()} will be evaluated a bit differently as
* {@code freemarker.core.Dot#_eval(Environment)} only returns a reference to the method
* {@code TemplateModel#getFoo()} instead of calling it directly. {@code freemarker.core.MethodCall#_eval(Environment)}
* then performs the actual call to {@code TemplateModel#getFoo()}.
*/
@Override
protected ElementMatcher.Junction<TypeDescription> getIncludeTypeMatcher() {
return named("freemarker.core.Dot")
.or(named("freemarker.core.MethodCall"));
}
@Override
protected ElementMatcher.Junction<MethodDescription> getExtraMethodElementMatcher() {
return named("_eval").and(takesArguments(Environment.class));
}
@Advice.OnMethodEnter(inline = false)
public static void onBeforeEvaluate(@Advice.Argument(0) Environment env, @Advice.This Expression dot) {
Profiler.start(env.getCurrentTemplate().getName() + ':' + dot.getBeginLine() + '#' + dot.toString());
}
@Advice.OnMethodExit(inline = false, onThrowable = Throwable.class)
public static void onAfterEvaluate() {
final CallStackElement currentFreemarkerCall = Profiler.getMethodCallParent();
Profiler.stop();
removeCurrentNodeIfItHasNoChildren(currentFreemarkerCall);
}
/**
* <pre>{@code
* test.ftl:1#templateModel.getFoo() <- added by {@code freemarker.core.MethodCall#_eval(Environment)}
* |-- test.ftl:1#templateModel.getFoo <- added by {@code freemarker.core.Dot#_eval(Environment)}
* `-- String org.stagemonitor.tracing.freemarker.FreemarkerProfilingTest$TemplateModel.getFoo()
* }</pre>
*
* We want to remove <code>templateModel.getFoo</code> as getFoo only returns the method reference, which is then
* invoked by {@code freemarker.core.MethodCall#_eval(Environment)}.
* Therefore, <code>getFoo</code> does not invoke the model and thus is not relevant for the call tree
*/
private static void removeCurrentNodeIfItHasNoChildren(CallStackElement currentFreemarkerCall) {
if (currentFreemarkerCall != null && currentFreemarkerCall.getChildren().isEmpty()) {
currentFreemarkerCall.remove();
}
}
/**
* Makes sure that this transformer is only applied on Freemarker versions {@code >= 2.3.23} where the
* {@link Environment#getCurrentTemplate()} method was made public. This prevents nasty
* {@link IllegalAccessError}s and {@link NoSuchMethodError}s.
*/
@Override
public boolean isActive() {
try {
return hasMethodThat(named("getCurrentTemplate")
.and(ElementMatchers.<MethodDescription.InDefinedShape>isPublic())
.and(takesArguments(0)))
.matches(new TypeDescription.ForLoadedType(Class.forName("freemarker.core.Environment")));
} catch (ClassNotFoundException e) {
return false;
}
}
private ElementMatcher.Junction<TypeDescription> hasMethodThat(final ElementMatcher<MethodDescription.InDefinedShape> methodElementMatcher) {
return new ElementMatcher.Junction.AbstractBase<TypeDescription>() {
@Override
public boolean matches(TypeDescription target) {
return !target.getDeclaredMethods().filter(methodElementMatcher).isEmpty();
}
};
}
}
| 1,635 |
1,676 | <filename>chat-sdk-core/src/main/java/sdk/chat/core/image/ImageUploadResult.java
package sdk.chat.core.image;
public class ImageUploadResult {
public String url;
public String uri;
public ImageUploadResult(String url, String uri) {
this.url = url;
this.uri = uri;
}
}
| 144 |
389 | <reponame>tcmoore32/sheer-madness
/*
* Copyright 2014 Guidewire Software, Inc.
*/
package gw.internal.gosu.ir.compiler.bytecode;
import gw.lang.ir.IRSymbol;
import java.util.Collection;
import java.util.HashMap;
public class IRCompilerScope {
private IRCompilerScope _parent;
private HashMap<String, IRCompilerLocalVar> _localVars;
private int _size;
private boolean _active = true;
public IRCompilerScope(IRCompilerScope parent) {
_parent = parent;
_localVars = new HashMap<String, IRCompilerLocalVar>();
}
public IRCompilerLocalVar findLocalVar(IRSymbol symbol) {
if (_localVars.containsKey(symbol.getName())) {
return _localVars.get(symbol.getName());
} else if (_parent != null) {
return _parent.findLocalVar(symbol);
} else {
return null;
}
}
public IRCompilerLocalVar createLocalVar(IRSymbol symbol) {
IRCompilerLocalVar existingVar = findLocalVar( symbol );
if (existingVar == null) {
existingVar = new IRCompilerLocalVar(symbol, totalWidth(), this);
_localVars.put(symbol.getName(), existingVar);
_size += existingVar.getWidth();
}
return existingVar;
}
public Collection<IRCompilerLocalVar> getLocalVars() {
return _localVars.values();
}
public int totalWidth() {
return (_parent == null ? 0 : _parent.totalWidth()) + _size;
}
public boolean isActive() {
return _active;
}
public void scopeRemoved() {
_active = false;
}
}
| 531 |
10,608 | """TODO(blended_skill_talk): Add a description here."""
import json
import datasets
# TODO(blended_skill_talk): BibTeX citation
_CITATION = """\
@misc{smith2020evaluating,
title={Can You Put it All Together: Evaluating Conversational Agents' Ability to Blend Skills},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2020},
eprint={2004.08449},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
# TODO(blended_skill_talk):
_DESCRIPTION = """\
A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.
"""
_URL = "http://parl.ai/downloads/blended_skill_talk/blended_skill_talk.tar.gz"
_TASK = ["convai2", "empathetic_dialogues", "wizard_of_wikipedia"]
class BlendedSkillTalk(datasets.GeneratorBasedBuilder):
"""TODO(blended_skill_talk): Short description of my dataset."""
# TODO(blended_skill_talk): Set up version.
VERSION = datasets.Version("1.0.0")
def _info(self):
# TODO(blended_skill_talk): Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"personas": datasets.features.Sequence(datasets.Value("string")),
"additional_context": datasets.Value("string"),
"previous_utterance": datasets.features.Sequence(datasets.Value("string")),
"context": datasets.Value("string"),
"free_messages": datasets.features.Sequence(datasets.Value("string")),
"guided_messages": datasets.features.Sequence(datasets.Value("string")),
"suggestions": datasets.features.Sequence({task: datasets.Value("string") for task in _TASK})
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://parl.ai/projects/bst/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(blended_skill_talk): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
archive = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": "train.json",
"files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": "valid.json",
"files": dl_manager.iter_archive(archive),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": "test.json",
"files": dl_manager.iter_archive(archive),
},
),
]
def _generate_examples(self, filepath, files):
"""Yields examples."""
# TODO(blended_skill_talk): Yields (key, example) tuples from the dataset
for path, f in files:
if path == filepath:
data = json.load(f)
for id_, row in enumerate(data):
personas = [row["personas"][1][0], row["personas"][1][1]]
dialogs = [dialog[1] for dialog in row["dialog"]]
free_messages = []
guided_messages = []
for i in range(len(dialogs) // 2):
free_messages.append(dialogs[2 * i])
guided_messages.append(dialogs[2 * i + 1])
context = row["context_dataset"]
add_context = row["additional_context"] if context == "wizard_of_wikipedia" else ""
previous_utterance = [row["free_turker_utterance"], row["guided_turker_utterance"]]
suggestions = row["suggestions"]
convai_suggestions = []
empathetic_suggestions = []
wow_suggestions = []
for i in range(len(suggestions) // 2):
convai_suggestions.append(suggestions[2 * i + 1]["convai2"])
empathetic_suggestions.append(suggestions[2 * i + 1]["empathetic_dialogues"])
wow_suggestions.append(suggestions[2 * i + 1]["wizard_of_wikipedia"])
yield id_, {
"personas": personas,
"additional_context": add_context,
"previous_utterance": previous_utterance,
"context": context,
"free_messages": free_messages,
"guided_messages": guided_messages,
"suggestions": {
"convai2": convai_suggestions,
"empathetic_dialogues": empathetic_suggestions,
"wizard_of_wikipedia": wow_suggestions,
},
}
break
| 2,960 |
342 | package sentinelgroup.io.sentinel.ui.custom;
public interface VpnListSearchListener {
void onSearchTriggered(String iSearchQuery);
}
| 43 |
1,100 | package com.xncoding.echarts.api.model.jmh;
/**
* XAxis
*
* @author XiongNeng
* @version 1.0
* @since 2018/2/9
*/
public class XAxis {
private String type;
private String name;
public XAxis() {
}
public XAxis(String type, String name) {
this.type = type;
this.name = name;
}
public String getType() {
return type;
}
public void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
| 262 |
357 | /*
* Copyright © 2012-2015 VMware, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not
* use this file except in compliance with the License. You may obtain a copy
* of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an “AS IS” BASIS, without
* warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
#ifndef _VMDNS_BUFFER_H__
#define _VMDNS_BUFFER_H__
#ifdef __cplusplus
extern "C" {
#endif
#define VMDNS_MAX_SIZE_BUFFER 2048
typedef struct _VMDNS_MESSAGE_BUFFER
{
size_t szMaxSize;
size_t szCurrentSize;
size_t szLength; //write cursor
size_t szCursor; //read cursor
BOOL bCanWrite;
BOOL bTokenizeDomainName;
PBYTE pMessage;
}VMDNS_MESSAGE_BUFFER, *PVMDNS_MESSAGE_BUFFER;
DWORD
VmDnsAllocateBufferStream(
size_t dwMaxSize,
PVMDNS_MESSAGE_BUFFER *ppVmDnsBuffer
);
VOID
VmDnsFreeBufferStream(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsLockBufferForWrite(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsAllocateBufferStreamWithBuffer(
PBYTE pBuffer,
size_t szBufSize,
size_t szMaxSize,
BOOL bCanWrite,
PVMDNS_MESSAGE_BUFFER *ppVmDnsBuffer
);
DWORD
VmDnsAllocateBufferStreamFromBufferStream(
PVMDNS_MESSAGE_BUFFER pVmDnsBufferSource,
BOOL bCanWrite,
PVMDNS_MESSAGE_BUFFER *ppVmDnsBufferDest
);
DWORD
VmDnsCopyBufferFromBufferStream(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PBYTE pBuffer,
PDWORD pdwBufferSize
);
DWORD
VmDnsWriteBoolToBuffer(
BOOL bData,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteBooleanToBuffer(
BOOLEAN bData,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteCharToBuffer(
CHAR cData,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteUCharToBuffer(
UCHAR ucData,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteUINT16ToBuffer(
UINT16 uData,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteUINT32ToBuffer(
UINT32 uData,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteUINT64ToBuffer(
UINT64 uData,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteINT16ToBuffer(
INT16 iData,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteINT32ToBuffer(
INT32 iData,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteINT64ToBuffer(
INT64 iData,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteStringToBuffer(
PSTR pszString,
UINT8 uStringLength,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsWriteBlobToBuffer(
PVMDNS_BLOB pBlob,
BOOL bWriteSize,
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer
);
DWORD
VmDnsReadBoolFromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PBOOL pbData
);
DWORD
VmDnsReadBooleanFromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PBOOLEAN pbData
);
DWORD
VmDnsReadCharFromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PCHAR pcData
);
DWORD
VmDnsReadUCharFromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PUCHAR pucData
);
DWORD
VmDnsReadUINT16FromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PUINT16 puData
);
DWORD
VmDnsReadUINT32FromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PUINT32 puData
);
DWORD
VmDnsReadUINT64FromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PUINT64 puData
);
DWORD
VmDnsReadINT16FromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PINT16 piData
);
DWORD
VmDnsReadINT32FromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PINT32 piData
);
DWORD
VmDnsReadINT64FromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PINT64 piData
);
DWORD
VmDnsReadStringFromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PSTR *ppszString,
PDWORD pdwStringLength,
PBOOL pbEndOfString
);
DWORD
VmDnsReadOffsetStringFromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
UINT8 unOffset,
PSTR *ppszString,
PDWORD pdwStringLength,
PBOOL pbEndOfString
);
DWORD
VmDnsReadBlobFromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PVMDNS_BLOB *ppBlob
);
DWORD
VmDnsReadRecordFromBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
PVMDNS_RECORD *ppDnsRecord
);
DWORD
VmDnsIsTokenizedBuffer(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
BOOL bTokenized
);
DWORD
VmDnsSetBufferTokenizedFlag(
PVMDNS_MESSAGE_BUFFER pVmDnsBuffer,
BOOL bTokenized
);
#ifdef __cplusplus
}
#endif
#endif /* _VMDNS_BUFFER_H__ */
| 2,275 |
1,604 | package org.bouncycastle.oer.its;
import org.bouncycastle.asn1.ASN1Object;
import org.bouncycastle.asn1.ASN1Primitive;
/**
* <pre>
* SymmRecipientInfo ::= SEQUENCE {
* recipientId HashedId8,
* encKey SymmetricCiphertext
* }
* </pre>
*/
public class SymmRecipientInfo
extends ASN1Object
{
private final HashedId recipientId;
private final SymmetricCiphertext encKey;
public SymmRecipientInfo(HashedId recipientId, SymmetricCiphertext encKey)
{
this.recipientId = recipientId;
this.encKey = encKey;
}
public HashedId getRecipientId()
{
return recipientId;
}
public SymmetricCiphertext getEncKey()
{
return encKey;
}
public ASN1Primitive toASN1Primitive()
{
return Utils.toSequence(recipientId, encKey);
}
}
| 365 |
307 | <gh_stars>100-1000
{"name":"Seaside Surfin'","toppings":[{"id":6,"name":"mushroom"},{"id":7,"name":"olive"},{"id":3,"name":"basil"},{"id":1,"name":"anchovy"},{"id":8,"name":"onion"},{"id":11,"name":"sweetcorn"},{"id":9,"name":"pepper"},{"id":5,"name":"mozzarella"}],"id":2} | 100 |
852 | #ifndef VFillMap_H
#ifndef __CINT__
#define VFillMap_H
#include <map>
#include <vector>
#include "DataFormats/EcalRecHit/interface/EcalRecHitCollections.h"
#include "DataFormats/DetId/interface/DetId.h"
#include "CondFormats/DataRecord/interface/EcalIntercalibConstantsRcd.h"
#include "CondFormats/EcalObjects/interface/EcalIntercalibConstants.h"
class VFillMap {
public:
//!ctor
VFillMap(int,
int,
const std::map<int, int> &,
double,
double,
const std::map<int, int> &,
EcalIntercalibConstantMap *,
EcalIntercalibConstantMap *);
//!dtor
virtual ~VFillMap(){};
//! The Map filler
virtual void fillMap(const std::vector<std::pair<DetId, float> > &,
const DetId,
const EcalRecHitCollection *,
const EcalRecHitCollection *,
std::map<int, double> &xtlMap,
double &) = 0;
int m_recoWindowSidex;
int m_recoWindowSidey;
std::map<int, int> m_xtalRegionId;
double m_minEnergyPerCrystal;
double m_maxEnergyPerCrystal;
std::map<int, int> m_IndexInRegion;
EcalIntercalibConstantMap *m_barrelMap;
EcalIntercalibConstantMap *m_endcapMap;
};
#endif
#endif
| 643 |
335 | <reponame>KitsuneAlex/ProjectE
package moze_intel.projecte.network.packets.to_client;
import javax.annotation.Nullable;
import moze_intel.projecte.api.ItemInfo;
import moze_intel.projecte.gameObjs.container.CondenserContainer;
import moze_intel.projecte.network.packets.IPEPacket;
import net.minecraft.client.Minecraft;
import net.minecraft.client.entity.player.ClientPlayerEntity;
import net.minecraft.network.PacketBuffer;
import net.minecraftforge.fml.network.NetworkEvent.Context;
public class UpdateCondenserLockPKT implements IPEPacket {
@Nullable
private final ItemInfo lockInfo;
private final short windowId;
public UpdateCondenserLockPKT(short windowId, @Nullable ItemInfo lockInfo) {
this.windowId = windowId;
this.lockInfo = lockInfo;
}
@Override
public void handle(Context context) {
ClientPlayerEntity player = Minecraft.getInstance().player;
if (player != null && player.openContainer instanceof CondenserContainer && player.openContainer.windowId == windowId) {
((CondenserContainer) player.openContainer).updateLockInfo(lockInfo);
}
}
@Override
public void encode(PacketBuffer buffer) {
buffer.writeShort(windowId);
if (lockInfo == null) {
buffer.writeBoolean(false);
} else {
buffer.writeBoolean(true);
buffer.writeRegistryId(lockInfo.getItem());
buffer.writeCompoundTag(lockInfo.getNBT());
}
}
public static UpdateCondenserLockPKT decode(PacketBuffer buffer) {
short windowId = buffer.readShort();
ItemInfo lockInfo = null;
if (buffer.readBoolean()) {
lockInfo = ItemInfo.fromItem(buffer.readRegistryId(), buffer.readCompoundTag());
}
return new UpdateCondenserLockPKT(windowId, lockInfo);
}
} | 541 |
512 | <reponame>Serchinastico/CoolSwitch
package com.serchinastico.sample;
/*
* Copyright (C) 2015 <NAME>.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import android.os.Bundle;
import android.support.v7.app.ActionBarActivity;
import com.serchinastico.coolswitch.CoolSwitch;
/**
* @author <NAME>.
*/
public class SampleActivity extends ActionBarActivity implements CoolSwitch.AnimationListener {
private CoolSwitch coolSwitch;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_sample);
mapViews();
coolSwitch.addAnimationListener(this);
}
@Override
public void onCheckedAnimationFinished() {
// Empty
}
@Override
public void onUncheckedAnimationFinished() {
// Empty
}
private void mapViews() {
coolSwitch = (CoolSwitch) findViewById(R.id.cool_switch_foo);
}
@Override
protected void onDestroy() {
coolSwitch.removeAnimationListener(this);
super.onDestroy();
}
}
| 442 |
813 | from torchvision.transforms import *
from PIL import Image
class ToGray(object):
"""
Convert image from RGB to gray level.
"""
def __call__(self, img):
return img.convert('L') | 73 |
2,151 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef ASH_METRICS_TIME_TO_FIRST_PRESENT_RECORDER_H_
#define ASH_METRICS_TIME_TO_FIRST_PRESENT_RECORDER_H_
#include <stdint.h>
#include "ash/public/interfaces/process_creation_time_recorder.mojom.h"
#include "base/callback.h"
#include "base/macros.h"
#include "base/time/time.h"
#include "mojo/public/cpp/bindings/binding.h"
namespace aura {
class Window;
}
namespace ash {
class TimeToFirstPresentRecorderTestApi;
// Used for tracking the time main started to the time the first bits make it
// the screen and logging a histogram of the time. Chrome is responsible for
// providing the start time by way of ProcessCreationTimeRecorder.
//
// This only logs the time to present the primary root window.
class TimeToFirstPresentRecorder : public mojom::ProcessCreationTimeRecorder {
public:
explicit TimeToFirstPresentRecorder(aura::Window* window);
~TimeToFirstPresentRecorder() override;
void Bind(mojom::ProcessCreationTimeRecorderRequest request);
private:
friend class TimeToFirstPresentRecorderTestApi;
// If both times are available the time to present is logged.
void LogTime();
// Callback from the compositor when it presented a valid frame.
void DidPresentCompositorFrame(base::TimeTicks time,
base::TimeDelta refresh,
uint32_t flags);
base::TimeDelta time_to_first_present() const {
return present_time_ - process_creation_time_;
}
// mojom::ProcessCreationTimeRecorder:
void SetMainProcessCreationTime(base::TimeTicks start_time) override;
base::TimeTicks process_creation_time_;
base::TimeTicks present_time_;
// Only used by tests. If valid it's Run() when both times are determined.
base::OnceClosure log_callback_;
mojo::Binding<mojom::ProcessCreationTimeRecorder> binding_{this};
DISALLOW_COPY_AND_ASSIGN(TimeToFirstPresentRecorder);
};
} // namespace ash
#endif // ASH_METRICS_TIME_TO_FIRST_PRESENT_RECORDER_H_
| 712 |
956 | /*-
* BSD LICENSE
*
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 <NAME>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_ethdev.h>
#include <rte_log.h>
#include <rte_pci.h>
#include <rte_malloc.h>
#include <nt.h>
#include "rte_eth_ntacc.h"
#include "filter_ntacc.h"
#define USE_KEY_MATCH
#define NON_ZERO2(a) (*a != 0 || *(a + 1) != 0)
#define NON_ZERO4(a) (*a != 0 || *(a + 1) != 0 || *(a + 2) != 0 || *(a + 3) != 0)
#define NON_ZERO6(a) (a[0] != 0 || a[1] != 0 || a[2] != 0 || a[3] != 0 || a[4] != 0 || a[5] != 0)
#define NON_ZERO16(a) (a[0] != 0 || a[1] != 0 || a[2] != 0 || a[3] != 0 || \
a[4] != 0 || a[5] != 0 || a[6] != 0 || a[7] != 0 || \
a[8] != 0 || a[9] != 0 || a[10] != 0 || a[11] != 0 || \
a[12] != 0 || a[13] != 0 || a[14] != 0 || a[15] != 0)
#define IPV4_ADDRESS(a) ((const char *)&a)[3] & 0xFF, ((const char *)&a)[2] & 0xFF, \
((const char *)&a)[1] & 0xFF, ((const char *)&a)[0] & 0xFF
#define IPV6_ADDRESS(a) a[0] & 0xFF, a[1] & 0xFF, a[2] & 0xFF, a[3] & 0xFF, \
a[4] & 0xFF, a[5] & 0xFF, a[6] & 0xFF, a[7] & 0xFF, \
a[8] & 0xFF, a[9] & 0xFF, a[10] & 0xFF, a[11] & 0xFF, \
a[12] & 0xFF, a[13] & 0xFF, a[14] & 0xFF, a[15] & 0xFF
#define MAC_ADDRESS2(a) a[5] & 0xFF, a[4] & 0xFF, a[3] & 0xFF, a[2] & 0xFF, a[1] & 0xFF, a[0] & 0xFF, \
a[11] & 0xFF, a[10] & 0xFF, a[9] & 0xFF, a[8] & 0xFF, a[7] & 0xFF, a[6] & 0xFF, \
a[12] & 0xFF, a[13] & 0xFF, a[14] & 0xFF, a[15] & 0xFF
#define MAC_ADDRESS(a) a[0] & 0xFF, a[1] & 0xFF, a[2] & 0xFF, a[3] & 0xFF, a[4] & 0xFF, a[5] & 0xFF
#define MAC_ADDRESS_SWAP(a,b) {b[5]=a[0];b[4]=a[1];b[3]=a[2];b[2]=a[3];b[1]=a[4];b[0]=a[5];}
#if 0
#define PRINT_IPV4(a, b) { uint32_t c = b; printf("%s: %d.%d.%d.%d\n", a, IPV4_ADDRESS(c)); }
#else
#define PRINT_IPV4(a, b)
#endif
#define CHECK8(a, b) (a != NULL && (a->b != 0 && a->b != 0xFF))
#define CHECK16(a, b) (a != NULL && (a->b != 0 && a->b != 0xFFFF))
#define CHECK32(a, b) (a != NULL && (a->b != 0 && a->b != 0xFFFFFFFF))
#define CHECK64(a, b) (a != NULL && (a->b != 0 && a->b != 0xFFFFFFFFFFFFFFFF))
#define CHECKIPV6(a) _CheckArray(a, 16)
#define CHECKETHER(a) _CheckArray(a, 6)
static inline int _CheckArray(const uint8_t *addr, uint8_t len)
{
int i;
int zeros = 0;
int ffs = 0;
for (i = 0; i < len; i++) {
if (addr[i] == 0)
zeros++;
if (addr[i] == 0xFF)
ffs++;
}
if (zeros == len ||
ffs == len) {
return 0;
}
return 1;
}
void pushNtplID(struct rte_flow *flow, uint32_t ntplId)
{
struct filter_flow *filter = rte_zmalloc("filter", sizeof(struct filter_flow), 0);
if (!filter) {
PMD_NTACC_LOG(ERR, "Memory allocation failed. Filter clean up is not possible\n");
}
else {
memset(filter, 0, sizeof(struct filter_flow));
filter->ntpl_id = ntplId;
LIST_INSERT_HEAD(&flow->ntpl_id, filter, next);
}
}
enum layer_e {
LAYER2,
LAYER3,
LAYER4,
VLAN,
IP,
MPLS,
PROTO,
};
/**
* Get the layer NTPL keyword. Either an outer or an inner
* version.
*/
static const char *GetLayer(enum layer_e layer, bool tunnel)
{
switch (layer) {
case LAYER2:
if (tunnel)
return "InnerLayer2Header";
else
return "Layer2Header";
case LAYER3:
case IP:
if (tunnel)
return "InnerLayer3Header";
else
return "Layer3Header";
case LAYER4:
if (tunnel)
return "InnerLayer4Header";
else
return "Layer4Header";
case VLAN:
if (tunnel)
return "InnerFirstVLAN";
else
return "FirstVLAN";
case MPLS:
if (tunnel)
return "InnerFirstMPLS";
else
return "FirstMPLS";
case PROTO:
if (tunnel)
return "InnerIpProtocol";
else
return "IpProtocol";
}
return "UNKNOWN";
}
void FlushHash(struct pmd_internals *internals)
{
char ntpl_buf[21];
struct filter_hash_s *pHash;
loop:
LIST_FOREACH(pHash, &internals->filter_hash, next) {
if (pHash->port == internals->port) {
LIST_REMOVE(pHash, next);
snprintf(ntpl_buf, 20, "delete=%d", pHash->ntpl_id);
NTACC_LOCK(&internals->configlock);
DoNtpl(ntpl_buf, NULL, internals, NULL);
NTACC_UNLOCK(&internals->configlock);
PMD_NTACC_LOG(DEBUG, "Deleting Hash filter: %s\n", ntpl_buf);
rte_free(pHash);
goto loop;
}
}
}
//void DeleteHash(uint64_t rss_hf, uint8_t port, int priority, struct pmd_internals *internals) {
// char ntpl_buf[21];
// struct filter_hash_s *pHash;
//
// LIST_FOREACH(pHash, &internals->filter_hash, next) {
// if (pHash->rss_hf == rss_hf && pHash->port == port && pHash->priority == priority) {
// LIST_REMOVE(pHash, next);
// snprintf(ntpl_buf, 20, "delete=%d", pHash->ntpl_id);
// DoNtpl(ntpl_buf, NULL, internals);
// PMD_NTACC_LOG(DEBUG, "Deleting Hash filter: %s\n", ntpl_buf);
// rte_free(pHash);
// }
// }
//}
static void pushHash(uint32_t ntpl_id, uint64_t rss_hf, struct pmd_internals *internals, int priority)
{
struct filter_hash_s *pHash = rte_zmalloc(internals->name, sizeof(struct filter_hash_s), 0);
if (!pHash) {
PMD_NTACC_LOG(ERR, "Memory allocation failed. Filter clean up is not possible\n");
}
else {
pHash->ntpl_id = ntpl_id;
pHash->port = internals->port;
pHash->priority = priority;
pHash->rss_hf = rss_hf;
LIST_INSERT_HEAD(&internals->filter_hash, pHash, next);
}
}
static int FindHash(uint64_t rss_hf, struct pmd_internals *internals, int priority) {
struct filter_hash_s *pHash;
LIST_FOREACH(pHash, &internals->filter_hash, next) {
if (pHash->rss_hf == rss_hf && pHash->port == internals->port && pHash->priority == priority) {
return 1;
}
}
return 0;
}
#define TMP_BSIZE 200
#define PRINT_HASH(a,b) { if (PrintHash(a, priority, internals, rss_hf, b) != 0) return -1; }
static int PrintHash(const char *str, int priority, struct pmd_internals *internals, uint64_t rss_hf, uint8_t tuple)
{
uint32_t ntplID;
char tmpBuf[TMP_BSIZE + 1];
const char *ptrTuple = "hashroundrobin";
switch (internals->symHashMode) {
case SYM_HASH_DIS_PER_PORT:
switch (tuple) {
case 0x02:
ptrTuple = "hash2Tuple";
break;
case 0x05:
ptrTuple = "hash5Tuple";
break;
case 0x06:
ptrTuple = "hash5TupleSCTP";
break;
case 0x12:
ptrTuple = "hashInner2Tuple";
break;
case 0x15:
ptrTuple = "hashInner5Tuple";
break;
}
break;
default:
case SYM_HASH_ENA_PER_PORT:
switch (tuple) {
case 0x02:
ptrTuple = "hash2TupleSorted";
break;
case 0x05:
ptrTuple = "hash5TupleSorted";
break;
case 0x06:
ptrTuple = "hash5TupleSCTPSorted";
break;
case 0x12:
ptrTuple = "hashInner2TupleSorted";
break;
case 0x15:
ptrTuple = "hashInner5TupleSorted";
break;
}
break;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wformat-nonliteral"
snprintf(tmpBuf, TMP_BSIZE, str, priority, internals->port, internals->tagName, ptrTuple);
#pragma GCC diagnostic pop
NTACC_LOCK(&internals->configlock);
if (DoNtpl(tmpBuf, &ntplID, internals, NULL) != 0) {
NTACC_UNLOCK(&internals->configlock);
return -1;
}
NTACC_UNLOCK(&internals->configlock);
NTACC_LOCK(&internals->lock);
pushHash(ntplID, rss_hf, internals, priority);
NTACC_UNLOCK(&internals->lock);
return 0;
}
/**
* Create the hash filter from the DPDK hash function.
*/
int CreateHashModeHash(uint64_t rss_hf, struct pmd_internals *internals, struct rte_flow *flow, int priority)
{
if (rss_hf == 0) {
PMD_NTACC_LOG(ERR, "No HASH function is selected. Ignoring hash.\n");
return 0;
}
// These hash functions is not supported and will cause an error
if ((rss_hf & ETH_RSS_L2_PAYLOAD) ||
(rss_hf & ETH_RSS_PORT) ||
(rss_hf & ETH_RSS_VXLAN) ||
(rss_hf & ETH_RSS_GENEVE) ||
(rss_hf & ETH_RSS_NVGRE)) {
PMD_NTACC_LOG(ERR, "One of the selected HASH functions is not supported\n");
return -1;
}
if (flow) {
flow->port = internals->port;
flow->rss_hf = rss_hf;
flow->priority = priority;
}
NTACC_LOCK(&internals->lock);
if (FindHash(rss_hf, internals, priority)) {
// Hash is already programmed
NTACC_UNLOCK(&internals->lock);
return 0;
}
NTACC_UNLOCK(&internals->lock);
/*****************************/
/* Outer UDP hash mode setup */
/*****************************/
if ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) || (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)) {
if ((rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) && (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IP;Layer4Type=UDP;tag=%s]=%s", 0x05);
}
else if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IPV4;Layer4Type=UDP;tag=%s]=%s", 0x05);
}
else if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IPV6;Layer4Type=UDP;tag=%s]=%s", 0x05);
}
}
/*****************************/
/* Outer TCP hash mode setup */
/*****************************/
if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) || (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)) {
if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) && (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IP;Layer4Type=TCP;tag=%s]=%s", 0x05);
}
else if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IPV4;Layer4Type=TCP;tag=%s]=%s", 0x05);
}
else if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IPV6;Layer4Type=TCP;tag=%s]=%s", 0x05);
}
}
/******************************/
/* Outer SCTP hash mode setup */
/******************************/
if ((rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) || (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP)) {
if ((rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) && (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP)) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IP;Layer4Type=SCTP;tag=%s]=%s", 0x06);
}
else if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IPV4;Layer4Type=SCTP;tag=%s]=%s", 0x06);
}
else if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IPV6;Layer4Type=SCTP;tag=%s]=%s", 0x06);
}
}
/****************************/
/* Outer IP hash mode setup */
/****************************/
if ((rss_hf & ETH_RSS_IPV4) || (rss_hf & ETH_RSS_IPV6) || (rss_hf & ETH_RSS_NONFRAG_IPV4_OTHER) || (rss_hf & ETH_RSS_NONFRAG_IPV6_OTHER)) {
if (((rss_hf & ETH_RSS_IPV4) && (rss_hf & ETH_RSS_IPV6)) ||
((rss_hf & ETH_RSS_NONFRAG_IPV4_OTHER) && (rss_hf & ETH_RSS_NONFRAG_IPV6_OTHER)) ||
((rss_hf & ETH_RSS_NONFRAG_IPV4_OTHER) && (rss_hf & ETH_RSS_IPV6)) ||
((rss_hf & ETH_RSS_IPV6) && (rss_hf & ETH_RSS_NONFRAG_IPV6_OTHER))) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IP;tag=%s]=%s", 0x02);
}
else if ((rss_hf & ETH_RSS_IPV4) || (rss_hf & ETH_RSS_NONFRAG_IPV4_OTHER)) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IPV4;tag=%s]=%s", 0x02);
}
else if ((rss_hf & ETH_RSS_IPV6) || (rss_hf & ETH_RSS_NONFRAG_IPV6_OTHER)) {
PRINT_HASH("Hashmode[priority=%u;port=%u;Layer3Type=IPV6;tag=%s]=%s", 0x02);
}
}
return 0;
}
static const char *GetSorted(enum rte_eth_hash_function func)
{
if (func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
return "XOR=true";
}
else {
return "XOR=false";
}
}
void CreateHash(char *ntpl_buf, const struct rte_flow_action_rss *rss, struct pmd_internals *internals)
{
enum rte_eth_hash_function func;
bool tunnel = false;
// Select either sorted or non-sorted hash
switch (rss->func)
{
case RTE_ETH_HASH_FUNCTION_DEFAULT:
internals->symHashMode = SYM_HASH_DIS_PER_PORT;
func = RTE_ETH_HASH_FUNCTION_DEFAULT;
break;
case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
internals->symHashMode = SYM_HASH_ENA_PER_PORT;
func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
break;
default:
return;
}
// Select either inner tunnel or outer tunnel hash
switch (rss->level)
{
case 0:
case 1:
tunnel = false;
break;
case 2:
tunnel = true;
break;
}
if (rss->types & ETH_RSS_NONFRAG_IPV4_OTHER) {
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1,
";Hash=HashWord0_3=%s[12]/32,HashWord4_7=%s[16]/32,HashWordP=%s,%s",
GetLayer(LAYER3, tunnel), GetLayer(LAYER3, tunnel),
GetLayer(PROTO, tunnel), GetSorted(func));
return;
}
if ((rss->types & ETH_RSS_NONFRAG_IPV4_TCP) || (rss->types & ETH_RSS_NONFRAG_IPV4_UDP) || (rss->types & ETH_RSS_NONFRAG_IPV4_SCTP)) {
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1,
";Hash=HashWord0_3=%s[12]/32,HashWord4_7=%s[16]/32,HashWord8=%s[0]/32,HashWordP=%s,%s",
GetLayer(LAYER3, tunnel), GetLayer(LAYER3, tunnel), GetLayer(LAYER4, tunnel),
GetLayer(PROTO, tunnel), GetSorted(func));
return;
}
if ((rss->types & ETH_RSS_IPV4) || (rss->types & ETH_RSS_FRAG_IPV4)) {
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1,
";Hash=HashWord0_3=%s[12]/32,HashWord4_7=%s[16]/32,%s",
GetLayer(LAYER3, tunnel), GetLayer(LAYER3, tunnel), GetSorted(func));
return;
}
if (rss->types & ETH_RSS_NONFRAG_IPV6_OTHER) {
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1,
";Hash=HashWord0_3=%s[8]/128,HashWord4_7=%s[24]/128,HashWordP=%s,%s",
GetLayer(LAYER3, tunnel), GetLayer(LAYER3, tunnel),
GetLayer(PROTO, tunnel), GetSorted(func));
return;
}
if ((rss->types & ETH_RSS_NONFRAG_IPV6_TCP) ||
(rss->types & ETH_RSS_IPV6_TCP_EX) ||
(rss->types & ETH_RSS_NONFRAG_IPV6_UDP) ||
(rss->types & ETH_RSS_IPV6_UDP_EX) ||
(rss->types & ETH_RSS_NONFRAG_IPV6_SCTP)) {
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1,
";Hash=HashWord0_3=%s[8]/128,HashWord4_7=%s[24]/128,HashWord8=%s[0]/32,HashWordP=%s,%s",
GetLayer(LAYER3, tunnel), GetLayer(LAYER3, tunnel), GetLayer(LAYER4, tunnel),
GetLayer(PROTO, tunnel), GetSorted(func));
return;
}
if ((rss->types & ETH_RSS_IPV6) || (rss->types & ETH_RSS_FRAG_IPV6) || (rss->types & ETH_RSS_IPV6_EX)) {
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1,
";Hash=HashWord0_3=%s[8]/128,HashWord4_7=%s[24]/128,%s",
GetLayer(LAYER3, tunnel), GetLayer(LAYER3, tunnel), GetSorted(func));
return;
}
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, ";Hash=roundrobin");
}
/**
* Get a keyset value from the keyset pool.
* Used by the keymatcher command
*/
int GetKeysetValue(struct pmd_internals *internals)
{
int i;
if (internals->adapterNo >= 8) {
return -1;
}
pthread_mutex_lock(&internals->shm->mutex);
for (i = 0; i < 12; i++) {
if (internals->shm->keyset[0][i] == 0) {
internals->shm->keyset[0][i] = 1;
pthread_mutex_unlock(&internals->shm->mutex);
return (i + 3);
}
}
pthread_mutex_unlock(&internals->shm->mutex);
return -1;
}
/**
* Return a keyset value to the keyset pool.
*/
int ReturnKeysetValue(struct pmd_internals *internals, int value)
{
if (internals->adapterNo >= 8 || value < 3 || value > 15) {
return -1;
}
pthread_mutex_lock(&internals->shm->mutex);
internals->shm->keyset[0][value - 3] = 0;
pthread_mutex_unlock(&internals->shm->mutex);
return 0;
}
/**
* Create the stream ID part of the NTPL assign command.
*
* The stream ID is created form the number of queues as a range
* or if only one queue as a single value
*/
void CreateStreamid(char *ntpl_buf, struct pmd_internals *internals, uint32_t nb_queues, uint8_t *list_queues)
{
bool range = true;
char buf[21];
uint32_t i;
if (nb_queues > 1) {
// We need to check whether we can set it up as a range or list
for (i = 0; i < nb_queues - 1; i++) {
if (list_queues[i] != (list_queues[i + 1] - 1)) {
// The queues are not contigous, so we need to use a list
range = false;
break;
}
}
}
else {
range = false;
}
strcat(ntpl_buf, "streamid=");
if (range) {
snprintf(buf, 20, "(%u..%u)", internals->rxq[list_queues[0]].stream_id, internals->rxq[list_queues[nb_queues - 1]].stream_id);
strcat(ntpl_buf, buf);
for (i = 0; i < nb_queues; i++) {
internals->rxq[list_queues[i]].stream_assigned = 1;
}
}
else {
for (i = 0; i < nb_queues; i++) {
snprintf(buf, 20, "%u", internals->rxq[list_queues[i]].stream_id);
internals->rxq[list_queues[i]].stream_assigned = 1;
strcat(ntpl_buf, buf);
if (i < nb_queues - 1) {
strcat(ntpl_buf, ",");
}
}
}
}
static void InsertFilterValues(struct filter_values_s *pInsertFilterValues, struct pmd_internals *internals)
{
struct filter_values_s *pFilter_values;
struct filter_values_s *pLast_value;
uint32_t insertVal;
uint32_t sortVal;
if (LIST_EMPTY(&internals->filter_values)) {
LIST_INSERT_HEAD(&internals->filter_values, pInsertFilterValues, next);
return;
}
insertVal = pInsertFilterValues->layer +
(((uint32_t)pInsertFilterValues->offset << 16) & 0x00FF0000) +
(((uint32_t)pInsertFilterValues->size << 8) & 0x0000FF00);
LIST_FOREACH(pFilter_values, &internals->filter_values, next) {
sortVal = pFilter_values->layer +
(((uint32_t)pFilter_values->offset << 16) & 0x00FF0000) +
(((uint32_t)pFilter_values->size << 8) & 0x0000FF00);
if (sortVal > insertVal) {
LIST_INSERT_BEFORE(pFilter_values, pInsertFilterValues, next);
return;
}
pLast_value = pFilter_values;
}
LIST_INSERT_AFTER(pLast_value, pInsertFilterValues, next);
}
/**
* Create the NTPL filter expression part.
*/
static int SetFilter(int size,
uint64_t mask,
int offset,
bool tunnel,
enum layer_e layer,
const void *specVal,
const void *maskVal,
const void *lastVal,
struct pmd_internals *internals,
struct rte_flow_error *error)
{
struct filter_values_s *pFilter_values = rte_zmalloc(internals->name, sizeof(struct filter_values_s), 0);
if (!pFilter_values) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Failed to allocate memory while setting up filter");
return -1;
}
memset(pFilter_values, 0, sizeof(struct filter_values_s));
// Store the filter values;
pFilter_values->offset = offset;
pFilter_values->size = size;
pFilter_values->layerString = GetLayer(layer, tunnel);
pFilter_values->layer = layer;
pFilter_values->mask = mask;
// Create the keylist depending on the size of the search value
switch (size)
{
case 16:
if (lastVal) {
pFilter_values->value.v16.lastVal = *((const uint16_t *)lastVal);
pFilter_values->value.v16.specVal = *((const uint16_t *)specVal);
}
else if (maskVal) {
pFilter_values->value.v16.maskVal = *((const uint16_t *)maskVal);
pFilter_values->value.v16.specVal = *((const uint16_t *)specVal);
}
else {
pFilter_values->value.v16.specVal = *((const uint16_t *)specVal);
}
break;
case 32:
if (lastVal) {
pFilter_values->value.v32.lastVal = *((const uint32_t *)lastVal);
pFilter_values->value.v32.specVal = *((const uint32_t *)specVal);
}
else if (maskVal) {
pFilter_values->value.v32.maskVal = *((const uint32_t *)maskVal);
pFilter_values->value.v32.specVal = *((const uint32_t *)specVal);
}
else {
pFilter_values->value.v32.specVal = *((const uint32_t *)specVal);
}
break;
case 64:
if (lastVal) {
pFilter_values->value.v64.lastVal = *((const uint64_t *)lastVal);
pFilter_values->value.v64.specVal = *((const uint64_t *)specVal);
}
else if (maskVal) {
pFilter_values->value.v64.maskVal = *((const uint64_t *)maskVal);
pFilter_values->value.v64.specVal = *((const uint64_t *)specVal);
}
else {
pFilter_values->value.v64.specVal = *((const uint64_t *)specVal);
}
break;
case 128:
if (lastVal) {
const char *vLast = (const char *)lastVal;
const char *vSpec = (const char *)specVal;
memcpy(pFilter_values->value.v128.lastVal, vLast, 16);
memcpy(pFilter_values->value.v128.specVal, vSpec, 16);
}
else if (maskVal) {
const char *vMask = (const char *)maskVal;
const char *vSpec = (const char *)specVal;
memcpy(pFilter_values->value.v128.maskVal, vMask, 16);
memcpy(pFilter_values->value.v128.specVal, vSpec, 16);
}
else {
const char *vSpec = (const char *)specVal;
memcpy(pFilter_values->value.v128.specVal, vSpec, 16);
}
break;
}
NTACC_LOCK(&internals->lock);
InsertFilterValues(pFilter_values, internals);
NTACC_UNLOCK(&internals->lock);
return 0;
}
void DeleteKeyset(int key, struct pmd_internals *internals, struct rte_flow_error *error) {
char ntpl_buf[21];
struct filter_keyset_s *key_set;
LIST_FOREACH(key_set, &internals->filter_keyset, next) {
if (key_set->key == key) {
LIST_REMOVE(key_set, next);
snprintf(ntpl_buf, 20, "delete=%d", key_set->ntpl_id2);
NTACC_LOCK(&internals->configlock);
DoNtpl(ntpl_buf, NULL, internals, error);
NTACC_UNLOCK(&internals->configlock);
snprintf(ntpl_buf, 20, "delete=%d", key_set->ntpl_id1);
NTACC_LOCK(&internals->configlock);
DoNtpl(ntpl_buf, NULL, internals, error);
NTACC_UNLOCK(&internals->configlock);
rte_free(key_set);
return;
}
}
}
/******************************************************
Find the keyset for multiple shared filter commands.
A filter command is shared when:
typeMask = commands and fields in the different commands
plist_queues = Queues used by the commands
port = Adapter port
are the same. This means that the filter can be
optimized to take less space in the FPGA.
If no match is found then it is the first
command or optimization cannot be done.
*******************************************************/
static int FindKeyset(uint64_t typeMask, uint8_t *plist_queues, uint8_t nb_queues, struct pmd_internals *internals)
{
struct filter_keyset_s *key_set;
int match = 0;
int i;
LIST_FOREACH(key_set, &internals->filter_keyset, next) {
if (key_set->typeMask == typeMask && nb_queues == key_set->nb_queues && key_set->port == internals->port) {
match = 0;
for (i = 0; i < nb_queues; i++) {
if (plist_queues[i] == key_set->list_queues[i]) {
match++;
}
}
if (match == nb_queues) {
return key_set->key;
}
}
}
return 0;
}
/******************************************************
Find the NTPL ID for the assign command as it is
the same assign command used when multiple shared
filter commands are used.
A filter command is shared when:
typeMask = commands and fields in the different commands
plist_queues = Queues used by the commands
port = Adapter port
are the same. This means that the filter can be
optimized to take less space in the FPGA.
If no assign NTPL ID is found then it is the first
command or optimization cannot be done.
*******************************************************/
static inline uint32_t FindAssignNtplID(uint64_t typeMask, uint8_t *plist_queues, uint8_t nb_queues, struct pmd_internals *internals)
{
struct rte_flow *pFlow;
int match = 0;
int i;
LIST_FOREACH(pFlow, &internals->flows, next) {
if (pFlow->typeMask == typeMask && nb_queues == pFlow->nb_queues && pFlow->port == internals->port) {
match = 0;
for (i = 0; i < nb_queues; i++) {
if (plist_queues[i] == pFlow->list_queues[i]) {
match++;
}
}
if (match == nb_queues) {
return pFlow->assign_ntpl_id;
}
}
}
return 0;
}
//#define DUMP_FLOWS
#ifdef DUMP_FLOWS
static void DumpFlows(struct pmd_internals *internals)
{
struct rte_flow *pFlow;
struct filter_flow *pNtlpid;
unsigned i = 0;
printf("Dump flows\n");
printf("----------\n");
LIST_FOREACH(pFlow, &internals->flows, next) {
unsigned j = 0;
printf("Flow no %u\n", i++);
printf("P %u, K %u, M %016llX. Queues:", pFlow->port, pFlow->key, (long long unsigned int)pFlow->typeMask);
for (j = 0; j < pFlow->nb_queues; j++) {
printf(" %u", pFlow->list_queues[j]);
}
printf("\nNTPL ID: Assign = %u", pFlow->assign_ntpl_id);
LIST_FOREACH(pNtlpid, &pFlow->ntpl_id, next) {
printf(" %u", pNtlpid->ntpl_id);
}
printf("\n---------------------------------------------------\n");
}
}
#endif
bool IsFilterReuse(struct pmd_internals *internals,
uint64_t typeMask,
uint8_t *plist_queues,
uint8_t nb_queues,
int *key)
{
if (LIST_EMPTY(&internals->filter_keyset)) {
*key = 0;
return false;
}
*key = FindKeyset(typeMask, plist_queues, nb_queues, internals);
if (*key == 0)
return false;
else
return true;
}
int CreateOptimizedFilter(char *ntpl_buf,
struct pmd_internals *internals,
struct rte_flow *flow,
bool *fc,
uint64_t typeMask,
uint8_t *plist_queues,
uint8_t nb_queues,
int key,
struct color_s *pColor,
struct rte_flow_error *error)
{
struct filter_values_s *pFilter_values;
int iRet = 0;
bool first = true;
char *filter_buffer1 = NULL;
char *filter_buffer2 = NULL;
char *filter_buffer3 = NULL;
int i;
uint32_t ntplID;
bool reuse = true;
#ifdef DUMP_FLOWS
DumpFlows(internals);
#endif
NTACC_LOCK(&internals->lock);
if (LIST_EMPTY(&internals->filter_values)) {
NTACC_UNLOCK(&internals->lock);
return 0;
}
first = true;
/*************************************************************/
/* Make the keytype and keydef commands */
/*************************************************************/
if (key == 0) {
struct filter_keyset_s *key_set = rte_zmalloc(internals->name, sizeof(struct filter_keyset_s), 0);
if (!key_set) {
iRet = -1;
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Allocating memory failed");
goto Errors;
}
key = GetKeysetValue(internals);
if (key < 0) {
rte_free(key_set);
iRet = -1;
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Internal error: Illegal key set value returned");
goto Errors;
}
key_set->key = key;
key_set->typeMask = typeMask;
filter_buffer2 = rte_malloc(internals->name, NTPL_BSIZE + 1, 0);
if (!filter_buffer2) {
iRet = -1;
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Allocating memory failed");
goto Errors;
}
filter_buffer3 = rte_malloc(internals->name, NTPL_BSIZE + 1, 0);
if (!filter_buffer3) {
iRet = -1;
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Allocating memory failed");
goto Errors;
}
LIST_FOREACH(pFilter_values, &internals->filter_values, next) {
if (first) {
if (pColor->type == ONE_COLOR) {
snprintf(filter_buffer3, NTPL_BSIZE,
"KeyType[name=KT%u;Access=partial;Bank=0;colorinfo=true;tag=%s]={", key, internals->tagName);
}
else {
snprintf(filter_buffer3, NTPL_BSIZE,
"KeyType[name=KT%u;Access=partial;Bank=0;tag=%s]={", key, internals->tagName);
}
snprintf(filter_buffer2, NTPL_BSIZE,
"KeyDef[name=KDEF%u;KeyType=KT%u;tag=%s]=(", key, key, internals->tagName);
first=false;
}
else {
snprintf(&filter_buffer3[strlen(filter_buffer3)], NTPL_BSIZE - strlen(filter_buffer3) - 1, ",");
snprintf(&filter_buffer2[strlen(filter_buffer2)], NTPL_BSIZE - strlen(filter_buffer2) - 1, ",");
}
snprintf(&filter_buffer3[strlen(filter_buffer3)], NTPL_BSIZE - strlen(filter_buffer3) - 1, "%u", pFilter_values->size);
if (pFilter_values->size == 128 && pFilter_values->layer == LAYER2) {
// This is an ethernet address
snprintf(&filter_buffer2[strlen(filter_buffer2)], NTPL_BSIZE - strlen(filter_buffer2) - 1,
"{0xFFFFFFFFFFFFFFFFFFFFFFFF00000000:%s[%u]/%u}", pFilter_values->layerString, pFilter_values->offset, pFilter_values->size);
}
else {
if (pFilter_values->mask != 0) {
snprintf(&filter_buffer2[strlen(filter_buffer2)], NTPL_BSIZE - strlen(filter_buffer2) - 1,
"{0x%llX:%s[%u]/%u}", (const long long unsigned int)pFilter_values->mask, pFilter_values->layerString, pFilter_values->offset, pFilter_values->size);
}
else {
snprintf(&filter_buffer2[strlen(filter_buffer2)], NTPL_BSIZE - strlen(filter_buffer2) - 1,
"%s[%u]/%u", pFilter_values->layerString, pFilter_values->offset, pFilter_values->size);
}
}
}
snprintf(&filter_buffer3[strlen(filter_buffer3)], NTPL_BSIZE - strlen(filter_buffer3) - 1, "}");
snprintf(&filter_buffer2[strlen(filter_buffer2)], NTPL_BSIZE - strlen(filter_buffer2) - 1, ")");
if (DoNtpl(filter_buffer3, &ntplID, internals, error)) {
rte_free(key_set);
iRet = -1;
goto Errors;
}
key_set->ntpl_id1 = ntplID;
if (DoNtpl(filter_buffer2, &ntplID, internals, error)) {
rte_free(key_set);
iRet = -1;
goto Errors;
}
key_set->ntpl_id2 = ntplID;
for (i = 0; i < nb_queues; i++) {
key_set->list_queues[i] = plist_queues[i];
}
key_set->nb_queues = nb_queues;
key_set->port = internals->port;
LIST_INSERT_HEAD(&internals->filter_keyset, key_set, next);
flow->assign_ntpl_id = 0;
reuse = false;
}
else {
flow->assign_ntpl_id = FindAssignNtplID(typeMask, plist_queues, nb_queues, internals);
}
for (i = 0; i < nb_queues; i++) {
flow->list_queues[i] = plist_queues[i];
}
flow->nb_queues = nb_queues;
flow->port = internals->port;
flow->key = key;
flow->typeMask = typeMask;
first = true;
/*************************************************************/
/* Make the keylist command */
/*************************************************************/
filter_buffer1 = rte_malloc(internals->name, NTPL_BSIZE + 1, 0);
if (!filter_buffer1) {
iRet = -1;
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Allocating memory failed");
goto Errors;
}
while (!LIST_EMPTY(&internals->filter_values)) {
pFilter_values = LIST_FIRST(&internals->filter_values);
LIST_REMOVE(pFilter_values, next);
if (first) {
if (pColor->type == ONE_COLOR) {
snprintf(filter_buffer1, NTPL_BSIZE,
"KeyList[KeySet=%u;KeyType=KT%u;color=%u;tag=%s]=(", key, key, pColor->color, internals->tagName);
}
else {
snprintf(filter_buffer1, NTPL_BSIZE,
"KeyList[KeySet=%u;KeyType=KT%u;tag=%s]=(", key, key, internals->tagName);
}
first=false;
}
else {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1, ",");
}
switch (pFilter_values->size)
{
case 16:
if (pFilter_values->value.v16.lastVal) {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"(0x%04X..0x%04X)", pFilter_values->value.v16.specVal, pFilter_values->value.v16.lastVal);
}
else if (pFilter_values->value.v16.maskVal) {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"{0x%04X:0x%04X}", pFilter_values->value.v16.maskVal, pFilter_values->value.v16.specVal);
}
else {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"0x%04X", pFilter_values->value.v16.specVal);
}
break;
case 32:
if (pFilter_values->value.v32.lastVal) {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"(0x%08X..0x%08X)", pFilter_values->value.v32.specVal, pFilter_values->value.v32.lastVal);
}
else if (pFilter_values->value.v32.maskVal) {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"{0x%08X:0x%08X}", pFilter_values->value.v32.maskVal, pFilter_values->value.v32.specVal);
}
else {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"0x%08X", pFilter_values->value.v32.specVal);
}
break;
case 64:
if (pFilter_values->value.v64.lastVal) {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"(0x%016llX..0x%016llX)", (long long unsigned int)pFilter_values->value.v64.specVal,
(long long unsigned int)pFilter_values->value.v64.lastVal);
}
else if (pFilter_values->value.v64.maskVal) {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"{0x%016llX:0x%016llX}", (long long unsigned int)pFilter_values->value.v64.maskVal,
(long long unsigned int)pFilter_values->value.v64.specVal);
}
else {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"0x%016llX", (long long unsigned int)pFilter_values->value.v64.specVal);
}
break;
case 128:
if (pFilter_values->layer == LAYER2) {
if (NON_ZERO16(pFilter_values->value.v128.lastVal)) {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"(0x%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X.."
"0x%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X)",
MAC_ADDRESS2(pFilter_values->value.v128.specVal), MAC_ADDRESS2(pFilter_values->value.v128.lastVal));
}
else if (NON_ZERO16(pFilter_values->value.v128.maskVal)) {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"{0x%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X:"
"0x%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X}",
MAC_ADDRESS2(pFilter_values->value.v128.maskVal), MAC_ADDRESS2(pFilter_values->value.v128.specVal));
}
else {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"0x%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X",
MAC_ADDRESS2(pFilter_values->value.v128.specVal));
}
}
else {
if (NON_ZERO16(pFilter_values->value.v128.lastVal)) {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"([%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X].."
"[%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X])",
IPV6_ADDRESS(pFilter_values->value.v128.specVal), IPV6_ADDRESS(pFilter_values->value.v128.lastVal));
}
else if (NON_ZERO16(pFilter_values->value.v128.maskVal)) {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"{[%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X]:"
"[%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X]}",
IPV6_ADDRESS(pFilter_values->value.v128.maskVal), IPV6_ADDRESS(pFilter_values->value.v128.specVal));
}
else {
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1,
"[%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X]",
IPV6_ADDRESS(pFilter_values->value.v128.specVal));
}
}
break;
}
}
snprintf(&filter_buffer1[strlen(filter_buffer1)], NTPL_BSIZE - strlen(filter_buffer1) - 1, ")");
// Set keylist filter
if (DoNtpl(filter_buffer1, &ntplID, internals, error)) {
iRet = -1;
goto Errors;
}
pushNtplID(flow, ntplID);
if (pFilter_values) {
rte_free(pFilter_values);
}
if (!(reuse)) {
if (*fc)
strcat(ntpl_buf, " and ");
*fc = true;
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, "Key(KDEF%u)==%u", key, key);
}
Errors:
NTACC_UNLOCK(&internals->lock);
if (filter_buffer1) {
rte_free(filter_buffer1);
}
if (filter_buffer2) {
rte_free(filter_buffer2);
}
if (filter_buffer3) {
rte_free(filter_buffer3);
}
return iRet;
}
/**
* Setup an ethernet filter.
*/
int SetEthernetFilter(const struct rte_flow_item *item,
bool tnl,
uint64_t *typeMask,
struct pmd_internals *internals,
struct rte_flow_error *error)
{
const struct rte_flow_item_eth *spec = (const struct rte_flow_item_eth *)item->spec;
const struct rte_flow_item_eth *mask = (const struct rte_flow_item_eth *)item->mask;
const struct rte_flow_item_eth *last = (const struct rte_flow_item_eth *)item->last;
if ((spec || mask || last) && internals->keyMatcher == 0) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Ethernet filter not supported for this adapter");
return -1;
}
bool singleSetup = true;
if (spec && NON_ZERO6(spec->src.addr_bytes) && NON_ZERO6(spec->dst.addr_bytes)) {
if (last && (NON_ZERO6(last->src.addr_bytes) || NON_ZERO6(last->dst.addr_bytes))) {
singleSetup = true;
}
else if (mask && CHECKETHER(mask->src.addr_bytes) && CHECKETHER(mask->dst.addr_bytes)) {
uint8_t addr0[16];
uint8_t addr1[16];
memset(addr0, 0, 16 * sizeof(uint8_t));
memset(addr1, 0, 16 * sizeof(uint8_t));
MAC_ADDRESS_SWAP(spec->dst.addr_bytes,addr0);
MAC_ADDRESS_SWAP(spec->src.addr_bytes,(&addr0[6]));
MAC_ADDRESS_SWAP(mask->dst.addr_bytes,addr1);
MAC_ADDRESS_SWAP(mask->src.addr_bytes,(&addr1[6]));
if (SetFilter(128, 0, 0, tnl, LAYER2, (const void *)addr0, (const void *)addr1, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_ADDR_DST | ETHER_ADDR_SRC);
singleSetup = false;
}
else if (!mask || (mask && !CHECKETHER(mask->src.addr_bytes) && !CHECKETHER(mask->dst.addr_bytes))) {
/* Setup source and destination simpel filter */
uint8_t addr[16];
memset(addr, 0, 16 * sizeof(uint8_t));
MAC_ADDRESS_SWAP(spec->dst.addr_bytes,addr);
MAC_ADDRESS_SWAP(spec->src.addr_bytes,(&addr[6]));
if (SetFilter(128, 0, 0, tnl, LAYER2, (const void *)addr, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_ADDR_DST | ETHER_ADDR_SRC);
singleSetup = false;
}
}
if (singleSetup) {
if (spec && NON_ZERO6(spec->src.addr_bytes)) {
if (last && NON_ZERO6(last->src.addr_bytes)) {
/* Setup source range filter */
uint8_t addr0[RTE_ETHER_ADDR_LEN];
uint8_t addr1[RTE_ETHER_ADDR_LEN];
MAC_ADDRESS_SWAP(spec->src.addr_bytes,addr0);
MAC_ADDRESS_SWAP(last->src.addr_bytes,addr1);
const uint64_t *tmp0 = (const uint64_t *)addr0;
uint64_t vSpec = ((*tmp0) << 16) & 0xFFFFFFFFFFFF0000;
const uint64_t *tmp1 = (const uint64_t *)addr1;
uint64_t vLast = ((*tmp1) << 16) & 0xFFFFFFFFFFFF0000;
if (SetFilter(64, 0xFFFFFFFFFFFF0000, 6, tnl, LAYER2, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_ADDR_SRC);
}
else if (mask && CHECKETHER(mask->src.addr_bytes)) {
/* Setup source mask filter */
uint8_t addr0[RTE_ETHER_ADDR_LEN];
uint8_t addr1[RTE_ETHER_ADDR_LEN];
MAC_ADDRESS_SWAP(spec->src.addr_bytes,addr0);
MAC_ADDRESS_SWAP(mask->src.addr_bytes,addr1);
const uint64_t *tmp0 = (const uint64_t *)addr0;
uint64_t vSpec = ((*tmp0) << 16) & 0xFFFFFFFFFFFF0000;
const uint64_t *tmp1 = (const uint64_t *)addr1;
uint64_t vMask = ((*tmp1) << 16) & 0xFFFFFFFFFFFF0000;
if (SetFilter(64, 0xFFFFFFFFFFFF0000, 6, tnl, LAYER2, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_ADDR_SRC);
}
else {
/* Setup source simpel filter */
uint8_t addr[RTE_ETHER_ADDR_LEN];
MAC_ADDRESS_SWAP(spec->src.addr_bytes,addr);
const uint64_t *tmp0 = (const uint64_t *)addr;
uint64_t vSpec = ((*tmp0) << 16) & 0xFFFFFFFFFFFF0000;
if (SetFilter(64, 0xFFFFFFFFFFFF0000, 6, tnl, LAYER2, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_ADDR_SRC);
}
}
if (spec && NON_ZERO6(spec->dst.addr_bytes)) {
if (last && NON_ZERO6(last->dst.addr_bytes)) {
/* Setup destination range filter */
uint8_t addr0[RTE_ETHER_ADDR_LEN];
uint8_t addr1[RTE_ETHER_ADDR_LEN];
MAC_ADDRESS_SWAP(spec->dst.addr_bytes,addr0);
MAC_ADDRESS_SWAP(last->dst.addr_bytes,addr1);
const uint64_t *tmp0 = (const uint64_t *)addr0;
uint64_t vSpec = ((*tmp0) << 16) & 0xFFFFFFFFFFFF0000;
const uint64_t *tmp1 = (const uint64_t *)addr1;
uint64_t vLast = ((*tmp1) << 16) & 0xFFFFFFFFFFFF0000;
if (SetFilter(64, 0xFFFFFFFFFFFF0000, 0, tnl, LAYER2, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_ADDR_DST);
}
else if (mask && CHECKETHER(mask->dst.addr_bytes)) {
/* Setup destination mask filter */
uint8_t addr0[RTE_ETHER_ADDR_LEN];
uint8_t addr1[RTE_ETHER_ADDR_LEN];
MAC_ADDRESS_SWAP(spec->dst.addr_bytes,addr0);
MAC_ADDRESS_SWAP(mask->dst.addr_bytes,addr1);
const uint64_t *tmp0 = (const uint64_t *)addr0;
uint64_t vSpec = ((*tmp0) << 16) & 0xFFFFFFFFFFFF0000;
const uint64_t *tmp1 = (const uint64_t *)addr1;
uint64_t vMask = ((*tmp1) << 16) & 0xFFFFFFFFFFFF0000;
if (SetFilter(64, 0xFFFFFFFFFFFF0000, 0, tnl, LAYER2, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_ADDR_DST);
}
else {
/* Setup destination simpel filter */
uint8_t addr[RTE_ETHER_ADDR_LEN];
MAC_ADDRESS_SWAP(spec->dst.addr_bytes,addr);
const uint64_t *tmp0 = (const uint64_t *)addr;
uint64_t vSpec = ((*tmp0) << 16) & 0xFFFFFFFFFFFF0000;
if (SetFilter(64, 0xFFFFFFFFFFFF0000, 0, tnl, LAYER2, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_ADDR_DST);
}
}
}
if (spec && spec->type) {
if (last && last->type) {
/* Setup type range filter */
uint16_t vSpec = rte_bswap16(spec->type);
uint16_t vLast = rte_bswap16(last->type);
if (SetFilter(16, 0, 12, tnl, LAYER2, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_TYPE);
}
else if (CHECK16(mask, type)) {
/* Setup type mask filter */
uint16_t vSpec = rte_bswap16(spec->type);
uint16_t vMask = rte_bswap16(mask->type);
if (SetFilter(16, 0, 12, tnl, LAYER2, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_TYPE);
}
else {
/* Setup type simpel filter */
uint16_t vSpec = rte_bswap16(spec->type);
if (SetFilter(16, 0, 12, tnl, LAYER2, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= (ETHER_TYPE);
}
}
return 0;
}
/**
* Setup an IPv4 filter.
*/
int SetIPV4Filter(char *ntpl_buf,
bool *fc,
const struct rte_flow_item *item,
bool tnl,
uint64_t *typeMask,
struct pmd_internals *internals,
struct rte_flow_error *error)
{
const struct rte_flow_item_ipv4 *spec = (const struct rte_flow_item_ipv4 *)item->spec;
const struct rte_flow_item_ipv4 *mask = (const struct rte_flow_item_ipv4 *)item->mask;
const struct rte_flow_item_ipv4 *last = (const struct rte_flow_item_ipv4 *)item->last;
if (*fc) strcat(ntpl_buf," and ");
*fc = true;
if (tnl) {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(InnerLayer3Protocol==IPV4)");
}
else {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(Layer3Protocol==IPV4)");
}
if (!last && !mask && !spec) {
return 0;
}
if (internals->keyMatcher == 0) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Only IPv4 flow without values supported for this adapter");
return -1;
}
if (spec && (spec->hdr.version_ihl && spec->hdr.type_of_service)) {
if (last && (last->hdr.version_ihl || spec->hdr.type_of_service)) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_LAST, NULL, "last not supported for version_ihl and type_of_service");
}
if (CHECK8(mask, hdr.version_ihl) || CHECK8(mask, hdr.type_of_service)) {
uint16_t vSpec;
uint16_t vMask;
vSpec = ((spec->hdr.version_ihl << 8) & 0xFF00) | spec->hdr.type_of_service;
vMask = ((mask->hdr.version_ihl << 8) & 0xFF00) | mask->hdr.type_of_service;
if (SetFilter(16, 0, 0, tnl, LAYER3, (void *)&vSpec, (void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_VERSION_IHL | IPV4_TYPE_OF_SERVICE;
}
else {
uint16_t vSpec;
vSpec = ((spec->hdr.version_ihl << 8) & 0xFF00) | spec->hdr.type_of_service;
if (SetFilter(16, 0, 0, tnl, LAYER3, (void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_VERSION_IHL | IPV4_TYPE_OF_SERVICE;
}
}
else {
if (spec && spec->hdr.type_of_service) {
if (last && last->hdr.type_of_service) {
uint16_t vSpec;
uint16_t vLast;
vSpec = spec->hdr.type_of_service;
vLast = last->hdr.type_of_service;
if (SetFilter(16, 0xFF, 0, tnl, LAYER3, (void *)&vSpec, NULL, (void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_TYPE_OF_SERVICE;
} else if (CHECK8(mask, hdr.type_of_service)) {
uint16_t vSpec;
uint16_t vMask;
vSpec = spec->hdr.type_of_service;
vMask = mask->hdr.type_of_service;
if (SetFilter(16, 0xFF, 0, tnl, LAYER3, (void *)&vSpec, (void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_TYPE_OF_SERVICE;
}
else {
uint16_t vSpec;
vSpec = spec->hdr.type_of_service;
if (SetFilter(16, 0xFF, 0, tnl, LAYER3, (void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_TYPE_OF_SERVICE;
}
}
if (spec && spec->hdr.version_ihl) {
if (last && last->hdr.version_ihl) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, NULL, "mask not supported for for version_ihl");
}
if (CHECK8(mask, hdr.version_ihl)) {
uint16_t vSpec;
uint16_t vMask;
vSpec = (spec->hdr.version_ihl << 8) & 0xFF00;
vMask = (mask->hdr.version_ihl << 8) & 0xFF00;
if (SetFilter(16, 0xFF00, 0, tnl, LAYER3, (void *)&vSpec, (void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_VERSION_IHL;
}
else {
uint16_t vSpec;
vSpec = (spec->hdr.version_ihl << 8) & 0xFF00;
if (SetFilter(16, 0xFF00, 0, tnl, LAYER3, (void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_VERSION_IHL;
}
}
}
if (spec && spec->hdr.total_length) {
if (last && last->hdr.total_length) {
uint16_t vSpec = rte_bswap16(spec->hdr.total_length);
uint16_t vLast = rte_bswap16(last->hdr.total_length);
if (SetFilter(16, 0, 2, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_TOTAL_LENGTH;
}
else if (CHECK16(mask, hdr.total_length)) {
uint16_t vSpec = rte_bswap16(spec->hdr.total_length);
uint16_t vMask = rte_bswap16(mask->hdr.total_length);
if (SetFilter(16, 0, 2, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_TOTAL_LENGTH;
}
else {
uint16_t vSpec = rte_bswap16(spec->hdr.total_length);
if (SetFilter(16, 0, 2, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_TOTAL_LENGTH;
}
}
if (spec && spec->hdr.packet_id) {
if (last && last->hdr.packet_id) {
uint16_t vSpec = rte_bswap16(spec->hdr.packet_id);
uint16_t vLast = rte_bswap16(last->hdr.packet_id);
if (SetFilter(16, 0, 4, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_PACKET_ID;
}
else if (CHECK16(mask, hdr.packet_id)) {
uint16_t vSpec = rte_bswap16(spec->hdr.packet_id);
uint16_t vMask = rte_bswap16(mask->hdr.packet_id);
if (SetFilter(16, 0, 4, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_PACKET_ID;
}
else {
uint16_t vSpec = rte_bswap16(spec->hdr.packet_id);
if (SetFilter(16, 0, 4, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_PACKET_ID;
}
}
if (spec && spec->hdr.fragment_offset) {
if (last && last->hdr.fragment_offset) {
uint16_t vSpec = rte_bswap16(spec->hdr.fragment_offset);
uint16_t vLast = rte_bswap16(last->hdr.fragment_offset);
if (SetFilter(16, 0, 6, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_FRAGMENT_OFFSET;
}
else if (CHECK16(mask, hdr.fragment_offset)) {
uint16_t vSpec = rte_bswap16(spec->hdr.fragment_offset);
uint16_t vMask = rte_bswap16(mask->hdr.fragment_offset);
if (SetFilter(16, 0, 6, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_FRAGMENT_OFFSET;
}
else {
uint16_t vSpec = rte_bswap16(spec->hdr.fragment_offset);
if (SetFilter(16, 0, 6, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_FRAGMENT_OFFSET;
}
}
if (spec && (spec->hdr.time_to_live && spec->hdr.next_proto_id)) {
if (last && (last->hdr.time_to_live || last->hdr.next_proto_id)) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_MASK, NULL, "mask not supported for time_to_live and next_proto_id");
}
if (CHECK8(mask, hdr.time_to_live) || CHECK8(mask, hdr.next_proto_id)) {
uint16_t vSpec;
uint16_t vMask;
vSpec = ((spec->hdr.time_to_live << 8) & 0xFF00) | spec->hdr.next_proto_id;
vMask = ((mask->hdr.time_to_live << 8) & 0xFF00) | mask->hdr.next_proto_id;
if (SetFilter(16, 0, 8, tnl, LAYER3, (void *)&vSpec, (void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_TIME_TO_LIVE | IPV4_NEXT_PROTO_ID;
}
else {
uint16_t vSpec;
vSpec = ((spec->hdr.time_to_live << 8) & 0xFF00) | spec->hdr.next_proto_id;
if (SetFilter(16, 0, 8, tnl, LAYER3, (void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_TIME_TO_LIVE | IPV4_NEXT_PROTO_ID;
}
}
else {
if (spec && spec->hdr.time_to_live) {
uint16_t vSpec;
uint16_t vMask;
uint16_t vLast;
if (last && last->hdr.time_to_live) {
vSpec = spec->hdr.time_to_live << 8;
vLast = last->hdr.time_to_live << 8;
if (SetFilter(16, 0xFF00, 8, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_TIME_TO_LIVE;
}
else if (CHECK8(mask, hdr.time_to_live)) {
vSpec = spec->hdr.time_to_live << 8;
vMask = mask->hdr.time_to_live << 8;
if (SetFilter(16, 0xFF00, 8, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
}
else {
vSpec = spec->hdr.time_to_live << 8;
if (SetFilter(16, 0xFF00, 8, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
}
}
if (spec && spec->hdr.next_proto_id) {
uint16_t vSpec;
uint16_t vMask;
uint16_t vLast;
if (last && last->hdr.next_proto_id) {
vSpec = spec->hdr.next_proto_id;
vLast = last->hdr.next_proto_id;
if (SetFilter(16, 0x00FF, 8, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_NEXT_PROTO_ID;
}
else if (CHECK8(mask, hdr.next_proto_id)) {
vSpec = spec->hdr.next_proto_id;
vMask = mask->hdr.next_proto_id;
if (SetFilter(16, 0x00FF, 8, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_NEXT_PROTO_ID;
}
else {
vSpec = spec->hdr.next_proto_id;
if (SetFilter(16, 0x00FF, 8, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_NEXT_PROTO_ID;
}
}
}
bool singleSetup = true;
if (spec && spec->hdr.src_addr && spec->hdr.dst_addr) {
if (last && (last->hdr.src_addr || last->hdr.dst_addr)) {
singleSetup = true;
}
else if (CHECK32(mask, hdr.src_addr) || CHECK32(mask, hdr.dst_addr)) {
if (mask->hdr.src_addr && mask->hdr.dst_addr) {
uint64_t vSpec = (((uint64_t)rte_bswap32(spec->hdr.src_addr) << 32ULL) & 0xFFFFFFFF00000000ULL) | (uint64_t)rte_bswap32(spec->hdr.dst_addr);
uint64_t vMask = (((uint64_t)rte_bswap32(mask->hdr.src_addr) << 32ULL) & 0xFFFFFFFF00000000ULL) | (uint64_t)rte_bswap32(mask->hdr.dst_addr);
if (SetFilter(64, 0, 12, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_SRC_ADDR | IPV4_DST_ADDR;
singleSetup = false;
PRINT_IPV4("SRC", rte_bswap32(spec->hdr.src_addr));
PRINT_IPV4("DST", rte_bswap32(spec->hdr.dst_addr));
}
}
else {
uint64_t vSpec = (((uint64_t)rte_bswap32(spec->hdr.src_addr) << 32ULL) & 0xFFFFFFFF00000000ULL) | (uint64_t)rte_bswap32(spec->hdr.dst_addr);
if (SetFilter(64, 0, 12, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_SRC_ADDR | IPV4_DST_ADDR;
singleSetup = false;
PRINT_IPV4("SRC", rte_bswap32(spec->hdr.src_addr));
PRINT_IPV4("DST", rte_bswap32(spec->hdr.dst_addr));
}
}
if (singleSetup) {
if (spec && spec->hdr.src_addr) {
if (last && last->hdr.src_addr) {
uint32_t vSpec = rte_bswap32(spec->hdr.src_addr);
uint32_t vLast = rte_bswap32(last->hdr.src_addr);
if (SetFilter(32, 0, 12, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_SRC_ADDR;
PRINT_IPV4("SRC", rte_bswap32(spec->hdr.src_addr));
}
else if (CHECK32(mask, hdr.src_addr)) {
uint32_t vSpec = rte_bswap32(spec->hdr.src_addr);
uint32_t vMask = rte_bswap32(mask->hdr.src_addr);
if (SetFilter(32, 0, 12, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_SRC_ADDR;
PRINT_IPV4("SRC", rte_bswap32(spec->hdr.src_addr));
}
else {
uint32_t vSpec = rte_bswap32(spec->hdr.src_addr);
if (SetFilter(32, 0, 12, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_SRC_ADDR;
PRINT_IPV4("SRC", rte_bswap32(spec->hdr.src_addr));
}
}
if (spec && spec->hdr.dst_addr) {
if (last && last->hdr.dst_addr) {
uint32_t vSpec = rte_bswap32(spec->hdr.dst_addr);
uint32_t vLast = rte_bswap32(last->hdr.dst_addr);
if (SetFilter(32, 0, 16, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_DST_ADDR;
PRINT_IPV4("DST", rte_bswap32(spec->hdr.dst_addr));
}
else if (CHECK32(mask, hdr.dst_addr)) {
uint32_t vSpec = rte_bswap32(spec->hdr.dst_addr);
uint32_t vMask = rte_bswap32(mask->hdr.dst_addr);
if (SetFilter(32, 0, 16, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_DST_ADDR;
PRINT_IPV4("DST", rte_bswap32(spec->hdr.dst_addr));
}
else {
uint32_t vSpec = rte_bswap32(spec->hdr.dst_addr);
if (SetFilter(32, 0, 16, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV4_DST_ADDR;
PRINT_IPV4("DST", rte_bswap32(spec->hdr.dst_addr));
}
}
}
return 0;
}
int SetIPV6Filter(char *ntpl_buf,
bool *fc,
const struct rte_flow_item *item,
bool tnl,
uint64_t *typeMask,
struct pmd_internals *internals,
struct rte_flow_error *error)
{
const struct rte_flow_item_ipv6 *spec = (const struct rte_flow_item_ipv6 *)item->spec;
const struct rte_flow_item_ipv6 *mask = (const struct rte_flow_item_ipv6 *)item->mask;
const struct rte_flow_item_ipv6 *last = (const struct rte_flow_item_ipv6 *)item->last;
if (*fc) strcat(ntpl_buf," and ");
*fc = true;
if (tnl) {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(InnerLayer3Protocol==IPV6)");
}
else {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(Layer3Protocol==IPV6)");
}
if (!last && !mask && !spec) {
return 0;
}
if (internals->keyMatcher == 0) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Only IPv6 flow without values supported for this adapter");
return -1;
}
if (spec && (spec->hdr.vtc_flow)) {
if (last && (last->hdr.vtc_flow)) {
uint32_t vSpec = rte_bswap32(spec->hdr.vtc_flow);
uint32_t vLast = rte_bswap32(last->hdr.vtc_flow);
if (SetFilter(32, 0, 0, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_VTC_FLOW;
}
else if (CHECK32(mask, hdr.vtc_flow)) {
uint32_t vSpec = rte_bswap32(spec->hdr.vtc_flow);
uint32_t vMask = rte_bswap32(mask->hdr.vtc_flow);
if (SetFilter(32, 0, 0, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_VTC_FLOW;
}
else {
uint32_t vSpec = rte_bswap32(spec->hdr.vtc_flow);
if (SetFilter(32, 0, 0, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_VTC_FLOW;
}
}
if (spec && spec->hdr.proto && spec->hdr.hop_limits) {
if (last && last->hdr.proto && last->hdr.hop_limits) {
uint16_t vSpec = ((spec->hdr.proto << 8) & 0xFF00) + spec->hdr.hop_limits;
uint16_t vLast = ((last->hdr.proto << 8) & 0xFF00) + last->hdr.hop_limits;
if (SetFilter(16, 0, 6, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_PROTO | IPV6_HOP_LIMITS;
}
else if (CHECK8(mask, hdr.proto) && CHECK8(mask, hdr.hop_limits)) {
uint16_t vSpec = ((spec->hdr.proto << 8) & 0xFF00) + spec->hdr.hop_limits;
uint16_t vMask = ((mask->hdr.proto << 8) & 0xFF00) + mask->hdr.hop_limits;
if (SetFilter(16, 0, 6, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_PROTO | IPV6_HOP_LIMITS;
}
else {
uint16_t vSpec = ((spec->hdr.proto << 8) & 0xFF00) + spec->hdr.hop_limits;
if (SetFilter(16, 0, 6, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_PROTO | IPV6_HOP_LIMITS;
}
}
else {
if (spec && (spec->hdr.proto)) {
if (last && (last->hdr.proto)) {
uint16_t vSpec = (spec->hdr.proto << 8) & 0xFF00;
uint16_t vLast = (last->hdr.proto << 8) & 0xFF00;
if (SetFilter(16, 0xFF00, 6, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_PROTO;
}
else if (CHECK8(mask, hdr.proto)) {
uint16_t vSpec = (spec->hdr.proto << 8) & 0xFF00;
uint16_t vMask = (mask->hdr.proto << 8) & 0xFF00;
if (SetFilter(16, 0xFF00, 6, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_PROTO;
}
else {
uint16_t vSpec = (spec->hdr.proto << 8) & 0xFF00;
if (SetFilter(16, 0xFF00, 6, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_PROTO;
}
}
if (spec && (spec->hdr.hop_limits)) {
if (last && (last->hdr.hop_limits)) {
uint16_t vSpec = (spec->hdr.hop_limits);
uint16_t vLast = (last->hdr.hop_limits);
if (SetFilter(16, 0xFF, 6, tnl, LAYER3, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_HOP_LIMITS;
}
else if (CHECK8(mask, hdr.hop_limits)) {
uint16_t vSpec = (spec->hdr.hop_limits);
uint16_t vMask = (mask->hdr.hop_limits);
if (SetFilter(16, 0xFF, 6, tnl, LAYER3, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_HOP_LIMITS;
}
else {
uint16_t vSpec = (spec->hdr.hop_limits);
if (SetFilter(16, 0xFF, 6, tnl, LAYER3, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_HOP_LIMITS;
}
}
}
if (spec && NON_ZERO16(spec->hdr.src_addr)) {
if (last && NON_ZERO16(last->hdr.src_addr)) {
if (SetFilter(128, 0, 8, tnl, LAYER3, (const void *)&spec->hdr.src_addr, NULL, (const void *)&last->hdr.src_addr, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_SRC_ADDR;
}
else if (mask && CHECKIPV6(mask->hdr.src_addr)) {
if (SetFilter(128, 0, 8, tnl, LAYER3, (const void *)&spec->hdr.src_addr, (const void *)&mask->hdr.src_addr, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_SRC_ADDR;
}
else {
if (SetFilter(128, 0, 8, tnl, LAYER3, (const void *)&spec->hdr.src_addr, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_SRC_ADDR;
}
}
if (spec && NON_ZERO16(spec->hdr.dst_addr)) {
if (last && NON_ZERO16(last->hdr.dst_addr)) {
if (SetFilter(128, 0, 24, tnl, LAYER3, (const void *)&spec->hdr.dst_addr, NULL, (const void *)&last->hdr.dst_addr, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_DST_ADDR;
}
else if (mask && CHECKIPV6(mask->hdr.dst_addr)) {
if (SetFilter(128, 0, 24, tnl, LAYER3, (const void *)&spec->hdr.dst_addr, (const void *)&mask->hdr.dst_addr, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_DST_ADDR;
}
else {
if (SetFilter(128, 0, 24, tnl, LAYER3, (const void *)&spec->hdr.dst_addr, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= IPV6_DST_ADDR;
}
}
return 0;
}
int SetTCPFilter(char *ntpl_buf,
bool *fc,
const struct rte_flow_item *item,
bool tnl,
uint64_t *typeMask,
struct pmd_internals *internals,
struct rte_flow_error *error)
{
const struct rte_flow_item_tcp *spec = (const struct rte_flow_item_tcp *)item->spec;
const struct rte_flow_item_tcp *mask = (const struct rte_flow_item_tcp *)item->mask;
const struct rte_flow_item_tcp *last = (const struct rte_flow_item_tcp *)item->last;
bool singleSetup = true;
if (*fc)
strcat(ntpl_buf, " and ");
*fc = true;
if (tnl) {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(InnerLayer4Protocol==TCP)");
}
else {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(Layer4Protocol==TCP)");
}
if (!last && !mask && !spec) {
return 0;
}
if (internals->keyMatcher == 0) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Only TCP flow without values supported for this adapter");
return -1;
}
if (spec && spec->hdr.src_port && spec && spec->hdr.dst_port) {
if (last && (last->hdr.src_port || last->hdr.dst_port)) {
singleSetup = true;
}
else if (CHECK16(mask, hdr.src_port) || CHECK16(mask, hdr.dst_port)) {
uint32_t vSpec = ((rte_bswap16(spec->hdr.src_port) << 16) & 0xFFFF0000) | rte_bswap16(spec->hdr.dst_port);
uint32_t vMask = ((rte_bswap16(mask->hdr.src_port) << 16) & 0xFFFF0000) | rte_bswap16(mask->hdr.dst_port);
if (SetFilter(32, 0, 0, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_SRC_PORT | TCP_DST_PORT;
singleSetup = false;
}
else {
uint32_t vSpec = ((rte_bswap16(spec->hdr.src_port) << 16) & 0xFFFF0000) | rte_bswap16(spec->hdr.dst_port);
if (SetFilter(32, 0, 0, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_SRC_PORT | TCP_DST_PORT;
singleSetup = false;
}
}
if (singleSetup) {
if (spec && spec->hdr.src_port) {
if (last && last->hdr.src_port) {
uint16_t vSpec = rte_bswap16(spec->hdr.src_port);
uint16_t vLast = rte_bswap16(last->hdr.src_port);
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_SRC_PORT;
}
else if (CHECK16(mask, hdr.src_port)) {
uint16_t vSpec = rte_bswap16(spec->hdr.src_port);
uint16_t vMask = rte_bswap16(mask->hdr.src_port);
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_SRC_PORT;
}
else {
uint16_t vSpec = rte_bswap16(spec->hdr.src_port);
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_SRC_PORT;
}
}
if (spec && spec->hdr.dst_port) {
if (last && last->hdr.dst_port) {
uint16_t vSpec = rte_bswap16(spec->hdr.dst_port);
uint16_t vLast = rte_bswap16(last->hdr.dst_port);
if (SetFilter(16, 0, 2, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_DST_PORT;
}
else if (CHECK16(mask, hdr.dst_port)) {
uint16_t vSpec = rte_bswap16(spec->hdr.dst_port);
uint16_t vMask = rte_bswap16(mask->hdr.dst_port);
if (SetFilter(16, 0, 2, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_DST_PORT;
}
else {
uint16_t vSpec = rte_bswap16(spec->hdr.dst_port);
if (SetFilter(16, 0, 2, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_DST_PORT;
}
}
}
if (spec && spec->hdr.data_off && spec->hdr.tcp_flags) {
if (last && (last->hdr.data_off || last->hdr.tcp_flags)) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM_LAST, NULL, "last not supported for data_off and tcp_flags");
return -1;
}
if (CHECK8(mask, hdr.data_off) || CHECK8(mask, hdr.tcp_flags)) {
uint16_t vSpec = ((spec->hdr.data_off << 8) & 0xFF00) | spec->hdr.tcp_flags;
uint16_t vMask = ((mask->hdr.data_off << 8) & 0xFF00) | mask->hdr.tcp_flags;
if (SetFilter(16, 0, 12, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_DATA_OFF | TCP_FLAGS;
}
else {
uint16_t vSpec = ((spec->hdr.data_off << 8) & 0xFF00) | spec->hdr.tcp_flags;
if (SetFilter(16, 0, 12, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_DATA_OFF | TCP_FLAGS;
}
}
else {
if (spec && spec->hdr.tcp_flags) {
if (last && last->hdr.tcp_flags) {
uint16_t vSpec = spec->hdr.tcp_flags;
uint16_t vLast = last->hdr.tcp_flags;
if (SetFilter(16, 0xFF, 12, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_FLAGS;
}
else if (CHECK8(mask, hdr.tcp_flags)) {
uint16_t vSpec = spec->hdr.tcp_flags;
uint16_t vMask = mask->hdr.tcp_flags;
if (SetFilter(16, 0xFF, 12, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_FLAGS;
}
else {
uint16_t vSpec = spec->hdr.tcp_flags;
if (SetFilter(16, 0xFF, 12, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_FLAGS;
}
}
if (spec && spec->hdr.data_off) {
if (last && last->hdr.data_off) {
uint16_t vSpec = ((spec->hdr.data_off << 8) & 0xFF00);
uint16_t vLast = ((last->hdr.data_off << 8) & 0xFF00);
if (SetFilter(16, 0xFF00, 12, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_DATA_OFF;
}
else if (CHECK8(mask, hdr.data_off)) {
uint16_t vSpec = ((spec->hdr.data_off << 8) & 0xFF00);
uint16_t vMask = ((mask->hdr.data_off << 8) & 0xFF00);
if (SetFilter(16, 0xFF00, 12, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_DATA_OFF;
}
else {
uint16_t vSpec = (spec->hdr.data_off << 8) & 0xFF00;
if (SetFilter(16, 0xFF00, 12, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= TCP_DATA_OFF;
}
}
}
return 0;
}
int SetUDPFilter(char *ntpl_buf,
bool *fc,
const struct rte_flow_item *item,
bool tnl,
uint64_t *typeMask,
struct pmd_internals *internals,
struct rte_flow_error *error)
{
const struct rte_flow_item_udp *spec = (const struct rte_flow_item_udp *)item->spec;
const struct rte_flow_item_udp *mask = (const struct rte_flow_item_udp *)item->mask;
const struct rte_flow_item_udp *last = (const struct rte_flow_item_udp *)item->last;
bool singleSetup = true;
if (*fc)
strcat(ntpl_buf, " and ");
*fc = true;
if (tnl) {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(InnerLayer4Protocol==UDP)");
}
else {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(Layer4Protocol==UDP)");
}
if (!last && !mask && !spec) {
return 0;
}
if (internals->keyMatcher == 0) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Only UDP flow without values supported for this adapter");
return -1;
}
if (spec && spec->hdr.src_port && spec->hdr.dst_port) {
if (last && (last->hdr.src_port || last->hdr.dst_port)) {
singleSetup = true;
}
else if (CHECK16(mask, hdr.src_port) || CHECK16(mask, hdr.dst_port)) {
uint32_t vSpec = ((rte_bswap16(spec->hdr.src_port) << 16) & 0xFFFF0000) | rte_bswap16(spec->hdr.dst_port);
uint32_t vMask = ((rte_bswap16(mask->hdr.src_port) << 16) & 0xFFFF0000) | rte_bswap16(mask->hdr.dst_port);
if (SetFilter(32, 0, 0, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= UDP_SRC_PORT | UDP_DST_PORT;
singleSetup = false;
}
else {
uint32_t vSpec = ((rte_bswap16(spec->hdr.src_port) << 16) & 0xFFFF0000) | rte_bswap16(spec->hdr.dst_port);
if (SetFilter(32, 0, 0, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= UDP_SRC_PORT | UDP_DST_PORT;
singleSetup = false;
}
}
if (singleSetup) {
if (spec && spec->hdr.src_port) {
if (last && last->hdr.src_port) {
uint16_t vSpec = rte_bswap16(spec->hdr.src_port);
uint16_t vLast = rte_bswap16(last->hdr.src_port);
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= UDP_SRC_PORT;
}
else if (CHECK16(mask, hdr.src_port)) {
uint16_t vSpec = rte_bswap16(spec->hdr.src_port);
uint16_t vMask = rte_bswap16(mask->hdr.src_port);
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= UDP_SRC_PORT;
}
else {
uint16_t vSpec = rte_bswap16(spec->hdr.src_port);
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= UDP_SRC_PORT;
}
}
if (spec && spec->hdr.dst_port) {
if (last && last->hdr.dst_port) {
uint16_t vSpec = rte_bswap16(spec->hdr.dst_port);
uint16_t vLast = rte_bswap16(last->hdr.dst_port);
if (SetFilter(16, 0, 2, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= UDP_DST_PORT;
}
else if (CHECK16(mask, hdr.dst_port)) {
uint16_t vSpec = rte_bswap16(spec->hdr.dst_port);
uint16_t vMask = rte_bswap16(mask->hdr.dst_port);
if (SetFilter(16, 0, 2, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= UDP_DST_PORT;
}
else {
uint16_t vSpec = rte_bswap16(spec->hdr.dst_port);
if (SetFilter(16, 0, 2, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= UDP_DST_PORT;
}
}
}
return 0;
}
int SetSCTPFilter(char *ntpl_buf,
bool *fc,
const struct rte_flow_item *item,
bool tnl,
uint64_t *typeMask,
struct pmd_internals *internals,
struct rte_flow_error *error)
{
const struct rte_flow_item_sctp *spec = (const struct rte_flow_item_sctp *)item->spec;
const struct rte_flow_item_sctp *mask = (const struct rte_flow_item_sctp *)item->mask;
const struct rte_flow_item_sctp *last = (const struct rte_flow_item_sctp *)item->last;
bool singleSetup = true;
if (*fc) strcat(ntpl_buf," and ");
*fc = true;
if (tnl) {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(InnerLayer4Protocol==SCTP)");
}
else {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(Layer4Protocol==SCTP)");
}
if (!last && !mask && !spec) {
return 0;
}
if (internals->keyMatcher == 0) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Only SCTP flow without values supported for this adapter");
return -1;
}
if (spec && spec->hdr.src_port && spec->hdr.dst_port) {
if (last && (last->hdr.src_port || last->hdr.dst_port)) {
singleSetup = true;
}
else if (CHECK16(mask, hdr.src_port) || CHECK16(mask, hdr.dst_port)) {
uint32_t vSpec = ((rte_bswap16(spec->hdr.src_port) << 16) & 0xFFFF0000) | rte_bswap16(spec->hdr.dst_port);
uint32_t vMask = ((rte_bswap16(mask->hdr.src_port) << 16) & 0xFFFF0000) | rte_bswap16(mask->hdr.dst_port);
if (SetFilter(32, 0, 0, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= SCTP_SRC_PORT | SCTP_DST_PORT;
singleSetup = false;
}
else {
uint32_t vSpec = ((rte_bswap16(spec->hdr.src_port) << 16) & 0xFFFF0000) | rte_bswap16(spec->hdr.dst_port);
if (SetFilter(32, 0, 0, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= SCTP_SRC_PORT | SCTP_DST_PORT;
singleSetup = false;
}
}
if (singleSetup) {
if (spec && spec->hdr.src_port) {
if (last && last->hdr.src_port) {
uint16_t vSpec = rte_bswap16(spec->hdr.src_port);
uint16_t vLast = rte_bswap16(last->hdr.src_port);
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= SCTP_SRC_PORT;
}
else if (CHECK16(mask, hdr.src_port)) {
uint16_t vSpec = rte_bswap16(spec->hdr.src_port);
uint16_t vMask = rte_bswap16(mask->hdr.src_port);
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= SCTP_SRC_PORT;
}
else {
uint16_t vSpec = rte_bswap16(spec->hdr.src_port);
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= SCTP_SRC_PORT;
}
}
if (spec && spec->hdr.dst_port) {
if (last && last->hdr.dst_port) {
uint16_t vSpec = rte_bswap16(spec->hdr.dst_port);
uint16_t vLast = rte_bswap16(last->hdr.dst_port);
if (SetFilter(16, 0, 2, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= SCTP_DST_PORT;
}
else if (CHECK16(mask, hdr.dst_port)) {
uint16_t vSpec = rte_bswap16(spec->hdr.dst_port);
uint16_t vMask = rte_bswap16(mask->hdr.dst_port);
if (SetFilter(16, 0, 2, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= SCTP_DST_PORT;
}
else {
uint16_t vSpec = rte_bswap16(spec->hdr.dst_port);
if (SetFilter(16, 0, 2, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= SCTP_DST_PORT;
}
}
}
return 0;
}
int SetICMPFilter(char *ntpl_buf,
bool *fc,
const struct rte_flow_item *item,
bool tnl,
uint64_t *typeMask,
struct pmd_internals *internals,
struct rte_flow_error *error)
{
const struct rte_flow_item_icmp *spec = (const struct rte_flow_item_icmp *)item->spec;
const struct rte_flow_item_icmp *mask = (const struct rte_flow_item_icmp *)item->mask;
const struct rte_flow_item_icmp *last = (const struct rte_flow_item_icmp *)item->last;
bool singleSetup = true;
if (*fc) strcat(ntpl_buf," and ");
*fc = true;
if (tnl) {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(InnerLayer4Protocol==ICMP)");
}
else {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(Layer4Protocol==ICMP)");
}
if (!last && !mask && !spec) {
return 0;
}
if (internals->keyMatcher == 0) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Only ICMP flow without values supported for this adapter");
return -1;
}
if (spec && spec->hdr.icmp_type && spec->hdr.icmp_code) {
if (last && (last->hdr.icmp_type || last->hdr.icmp_code)) {
singleSetup = true;
}
else if (mask && CHECK8(mask, hdr.icmp_type) && CHECK8(mask, hdr.icmp_code)) {
uint16_t vSpec = ((spec->hdr.icmp_type << 8) & 0xFF00) | spec->hdr.icmp_code;
uint16_t vMask = ((mask->hdr.icmp_type << 8) & 0xFF00) | mask->hdr.icmp_code;
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_TYPE | ICMP_CODE;
singleSetup = false;
}
else if (!mask || (mask && !CHECK8(mask, hdr.icmp_type) && !CHECK8(mask, hdr.icmp_code))) {
uint16_t vSpec = ((spec->hdr.icmp_type << 8) & 0xFF00) | spec->hdr.icmp_code;
if (SetFilter(16, 0, 0, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_TYPE | ICMP_CODE;
singleSetup = false;
}
}
if (singleSetup) {
if (spec && spec->hdr.icmp_type) {
if (last && last->hdr.icmp_type) {
uint16_t vSpec = ((spec->hdr.icmp_type << 8) & 0xFF00);
uint16_t vLast = ((last->hdr.icmp_type << 8) & 0xFF00);
if (SetFilter(16, 0xFF00, 0, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_TYPE;
}
else if (mask && CHECK8(mask, hdr.icmp_type)) {
uint16_t vSpec = ((spec->hdr.icmp_type << 8) & 0xFF00);
uint16_t vMask = ((mask->hdr.icmp_type << 8) & 0xFF00);
if (SetFilter(16, 0xFF00, 0, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_TYPE;
}
else {
uint16_t vSpec = ((spec->hdr.icmp_type << 8) & 0xFF00);
if (SetFilter(16, 0xFF00, 0, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_TYPE;
}
}
if (spec && spec->hdr.icmp_code) {
if (last && last->hdr.icmp_code) {
uint16_t vSpec = spec->hdr.icmp_code;
uint16_t vLast = last->hdr.icmp_code;
if (SetFilter(16, 0xFF, 0, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_CODE;
}
else if (mask && CHECK8(mask, hdr.icmp_code)) {
uint16_t vSpec = spec->hdr.icmp_code;
uint16_t vMask = mask->hdr.icmp_code;
if (SetFilter(16, 0xFF, 0, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_CODE;
}
else {
uint16_t vSpec = spec->hdr.icmp_code;
if (SetFilter(16, 0xFF, 0, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_CODE;
}
}
}
if (spec && spec->hdr.icmp_ident) {
if (last && last->hdr.icmp_ident) {
uint16_t vSpec = rte_bswap16(spec->hdr.icmp_ident);
uint16_t vLast = rte_bswap16(last->hdr.icmp_ident);
if (SetFilter(16, 0, 4, tnl, LAYER4, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_IDENT;
}
else if (mask && CHECK16(mask, hdr.icmp_ident)) {
uint16_t vSpec = rte_bswap16(spec->hdr.icmp_ident);
uint16_t vMask = rte_bswap16(mask->hdr.icmp_ident);
if (SetFilter(16, 0, 4, tnl, LAYER4, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_IDENT;
}
else {
uint16_t vSpec = rte_bswap16(spec->hdr.icmp_ident);
if (SetFilter(16, 0, 4, tnl, LAYER4, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= ICMP_IDENT;
}
}
return 0;
}
int SetVlanFilter(char *ntpl_buf,
bool *fc,
const struct rte_flow_item *item,
bool tnl,
uint64_t *typeMask,
struct pmd_internals *internals,
struct rte_flow_error *error)
{
const struct rte_flow_item_vlan *spec = (const struct rte_flow_item_vlan *)item->spec;
const struct rte_flow_item_vlan *mask = (const struct rte_flow_item_vlan *)item->mask;
const struct rte_flow_item_vlan *last = (const struct rte_flow_item_vlan *)item->last;
if (*fc) strcat(ntpl_buf," and ");
*fc = true;
if (tnl) {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(InnerEncapsulation==VLAN)");
}
else {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(Encapsulation==VLAN)");
}
if (!last && !mask && !spec) {
return 0;
}
if (internals->keyMatcher == 0) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Only VLAN flow without values supported for this adapter");
return -1;
}
if (spec && spec->tci) {
if (last && last->tci) {
uint16_t vSpec = rte_bswap16(spec->tci);
uint16_t vLast = rte_bswap16(last->tci);
if (SetFilter(16, 0, 2, tnl, VLAN, (const void *)&vSpec, NULL, &vLast, internals, error) != 0) {
return -1;
}
*typeMask |= VLAN_TCI;
}
else if (mask && CHECK16(mask, tci)) {
uint16_t vSpec = rte_bswap16(spec->tci);
uint16_t vMask = rte_bswap16(mask->tci);
if (SetFilter(16, 0, 2, tnl, VLAN, (const void *)&vSpec, &vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= VLAN_TCI;
}
else {
uint16_t vSpec = rte_bswap16(spec->tci);
if (SetFilter(16, 0, 2, tnl, VLAN, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= VLAN_TCI;
}
}
return 0;
}
int SetMplsFilter(char *ntpl_buf,
bool *fc,
const struct rte_flow_item *item,
bool tnl,
uint64_t *typeMask,
struct pmd_internals *internals,
struct rte_flow_error *error)
{
const struct rte_flow_item_mpls *spec = (const struct rte_flow_item_mpls *)item->spec;
const struct rte_flow_item_mpls *mask = (const struct rte_flow_item_mpls *)item->mask;
const struct rte_flow_item_mpls *last = (const struct rte_flow_item_mpls *)item->last;
if (*fc) strcat(ntpl_buf," and ");
*fc = true;
if (tnl) {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(InnerEncapsulation==MPLS)");
}
else {
strcat(&ntpl_buf[strlen(ntpl_buf)], "(Encapsulation==MPLS)");
}
if (!last && !mask && !spec) {
return 0;
}
if (internals->keyMatcher == 0) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Only MPLS flow without values supported for this adapter");
return -1;
}
if (spec) {
if (last) {
uint32_t vSpec = ((spec->label_tc_s[0] << 24) & 0xFF000000) | ((spec->label_tc_s[1] << 16) & 0xFF0000) | ((spec->label_tc_s[2] << 8) & 0xF000);
uint32_t vLast = ((last->label_tc_s[0] << 24) & 0xFF000000) | ((last->label_tc_s[1] << 16) & 0xFF0000) | ((last->label_tc_s[2] << 8) & 0xF000);
if (SetFilter(32, 0xFFFFF000, 0, tnl, MPLS, (const void *)&vSpec, NULL, (const void *)&vLast, internals, error) != 0) {
return -1;
}
*typeMask |= MPLS_LABEL;
}
else if (mask) {
uint32_t vSpec = ((spec->label_tc_s[0] << 24) & 0xFF000000) | ((spec->label_tc_s[1] << 16) & 0xFF0000) | ((spec->label_tc_s[2] << 8) & 0xF000);
uint32_t vMask = ((mask->label_tc_s[0] << 24) & 0xFF000000) | ((mask->label_tc_s[1] << 16) & 0xFF0000) | ((mask->label_tc_s[2] << 8) & 0xF000);
if (SetFilter(32, 0xFFFFF000, 0, tnl, MPLS, (const void *)&vSpec, (const void *)&vMask, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= MPLS_LABEL;
}
else {
uint32_t vSpec = ((spec->label_tc_s[0] << 24) & 0xFF000000) | ((spec->label_tc_s[1] << 16) & 0xFF0000) | ((spec->label_tc_s[2] << 8) & 0xF000);
if (SetFilter(32, 0xFFFFF000, 0, tnl, MPLS, (const void *)&vSpec, NULL, NULL, internals, error) != 0) {
return -1;
}
*typeMask |= MPLS_LABEL;
}
}
return 0;
}
int SetTunnelFilter(char *ntpl_buf,
bool *fc,
int type,
uint64_t *typeMask)
{
if (*fc) strcat(ntpl_buf," and ");
*fc = true;
switch (type) {
case GTPU0_TUNNEL_TYPE:
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, "(TunnelType==GTPv0-U)");
*typeMask |= GTPU0_TUNNEL;
break;
case GTPU1_TUNNEL_TYPE:
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, "(TunnelType==GTPv1-U)");
*typeMask |= GTPU1_TUNNEL;
break;
case GTPC2_TUNNEL_TYPE:
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, "(TunnelType==GTPv2-C)");
*typeMask |= GTPC2_TUNNEL;
break;
case GTPC1_TUNNEL_TYPE:
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, "(TunnelType==GTPv1-C)");
*typeMask |= GTPC1_TUNNEL;
break;
case GTPC1_2_TUNNEL_TYPE:
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, "(TunnelType==GTPv1v2-C)");
*typeMask |= GTPC1_2_TUNNEL;
break;
case GREV0_TUNNEL_TYPE:
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, "(TunnelType==GREv0)");
*typeMask |= GREV0_TUNNEL;
break;
case GREV1_TUNNEL_TYPE:
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, "(TunnelType==GREv1)");
*typeMask |= GREV1_TUNNEL;
break;
case VXLAN_TUNNEL_TYPE:
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, "(TunnelType==VXLAN)");
*typeMask |= VXLAN_TUNNEL;
break;
case NVGRE_TUNNEL_TYPE:
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1,"(TunnelType == NVGRE)");
*typeMask |= NVGRE_TUNNEL;
break;
case IP_IN_IP_TUNNEL_TYPE:
snprintf(&ntpl_buf[strlen(ntpl_buf)], NTPL_BSIZE - strlen(ntpl_buf) - 1, "(TunnelType == IPinIP)");
*typeMask |= IP_IN_IP_TUNNEL;
break;
}
return 0;
}
int SetGreFilter(char *ntpl_buf,
bool *fc,
const struct rte_flow_item *item,
uint64_t *typeMask)
{
const struct rte_flow_item_gre *spec = (const struct rte_flow_item_gre *)item->spec;
int type;
if (spec) {
switch (spec->c_rsvd0_ver & 0x7) {
default:
case 0:
type = GREV0_TUNNEL_TYPE;
break;
case 1:
type = GREV1_TUNNEL_TYPE;
break;
}
}
else
type = GREV0_TUNNEL_TYPE;
return SetTunnelFilter(ntpl_buf, fc, type, typeMask);
}
int SetGtpFilter(char *ntpl_buf,
bool *fc,
const struct rte_flow_item *item,
uint64_t *typeMask,
int protocol)
{
const struct rte_flow_item_gtp *spec = (const struct rte_flow_item_gtp *)item->spec;
int type;
if (spec) {
switch ((spec->v_pt_rsv_flags >> 5) & 0x7) {
case 0:
if (protocol == 'U')
type = GTPU0_TUNNEL_TYPE;
else
return -1;
break;
default:
case 1:
if (protocol == 'U')
type = GTPU1_TUNNEL_TYPE;
else
type = GTPC1_TUNNEL_TYPE;
break;
case 2:
if (protocol == 'U')
return -1;
else
type = GTPC2_TUNNEL_TYPE;
break;
}
}
else {
if (protocol == 'U')
type = GTPU1_TUNNEL_TYPE;
else
type = GTPC1_2_TUNNEL_TYPE;
}
return SetTunnelFilter(ntpl_buf, fc, type, typeMask);
}
| 47,395 |
724 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Default Constructors."""
import importlib
import copy
from functools import partial
from collections import OrderedDict
from modnas.registry.arch_space import build as build_module
from modnas.registry.construct import register, build
from modnas.arch_space.slot import Slot
from modnas.utils.logging import get_logger
from modnas.utils import import_file
logger = get_logger('construct')
def get_convert_fn(convert_fn, **kwargs):
"""Return a new convert function."""
if isinstance(convert_fn, str):
return build(convert_fn, **kwargs)
elif callable(convert_fn):
return convert_fn
else:
raise ValueError('unsupported convert_fn type: {}'.format(type(convert_fn)))
@register
class DefaultModelConstructor():
"""Constructor that builds model from registered architectures."""
def __init__(self, model_type, args=None):
self.model_type = model_type
self.args = args or {}
def __call__(self, model):
"""Run constructor."""
model = build_module(self.model_type, **copy.deepcopy(self.args))
return model
@register
class ExternalModelConstructor():
"""Constructor that builds model from external sources or libraries."""
def __init__(self, model_type, src_path=None, import_path=None, args=None):
self.model_type = model_type
self.import_path = import_path
self.src_path = src_path
self.args = args or {}
def __call__(self, model):
"""Run constructor."""
if self.src_path is not None:
logger.info('Importing model from path: {}'.format(self.src_path))
mod = import_file(self.src_path)
elif self.import_path is not None:
logger.info('Importing model from lib: {}'.format(self.import_path))
mod = importlib.import_module(self.import_path)
model = mod.__dict__[self.model_type](**self.args)
return model
@register
class DefaultTraversalConstructor():
"""Constructor that traverses and converts modules."""
def __init__(self, by_class=None, by_classname=None):
self.by_class = by_class
self.by_classname = by_classname
def convert(self, module):
"""Return converted module."""
raise NotImplementedError
def __call__(self, model):
"""Run constructor."""
for m in model.modules():
for k, sm in m._modules.items():
if self.by_class and not isinstance(sm, self.by_class):
continue
if self.by_classname and type(sm).__qualname__ != self.by_classname:
continue
new_sm = self.convert(sm)
if new_sm is not None:
m._modules[k] = new_sm
return model
@register
class DefaultSlotTraversalConstructor():
"""Constructor that traverses and converts Slots."""
def __init__(self, gen=None, convert_fn=None, args=None, skip_exist=True):
self.gen = gen
self.skip_exist = skip_exist
if convert_fn:
self.convert = get_convert_fn(convert_fn, **(args or {}))
self.visited = set()
def convert(self, slot):
"""Return converted module from slot."""
raise NotImplementedError
def __call__(self, model):
"""Run constructor."""
Slot.set_convert_fn(self.convert)
gen = self.gen or Slot.gen_slots_model(model)
all_slots = list(gen())
for m in all_slots:
if self.skip_exist and m.get_entity() is not None:
continue
ent = self.convert(m)
if ent is not None:
m.set_entity(ent)
self.visited.clear()
return model
@register
class DefaultMixedOpConstructor(DefaultSlotTraversalConstructor):
"""Default Mixed Operator Constructor."""
def __init__(self, candidates, mixed_op, candidate_args=None):
DefaultSlotTraversalConstructor.__init__(self)
self.candidates = candidates
self.mixed_op_conf = mixed_op
self.candidate_args = candidate_args or {}
def convert(self, slot):
"""Return converted MixedOp from slot."""
cand_args = self.candidate_args.copy()
candidates = self.candidates
if isinstance(candidates, (list, tuple)):
candidates = {k: k for k in candidates}
cands = OrderedDict([(k, build_module(v, slot=slot, **cand_args)) for k, v in candidates.items()])
return build_module(self.mixed_op_conf, candidates=cands)
@register
class DefaultOpConstructor(DefaultSlotTraversalConstructor):
"""Default Network Operator Constructor."""
def __init__(self, op):
DefaultSlotTraversalConstructor.__init__(self)
self.op_conf = op
def convert(self, slot):
"""Return converted operator from slot."""
return build_module(copy.deepcopy(self.op_conf), slot)
def make_traversal_constructor(convert_fn):
"""Return default slot traversal constructor with given function as converter."""
return partial(DefaultSlotTraversalConstructor, convert_fn=convert_fn)
| 2,173 |
8,323 | <gh_stars>1000+
from sympy.assumptions import Predicate
from sympy.multipledispatch import Dispatcher
class PrimePredicate(Predicate):
"""
Prime number predicate.
Explanation
===========
``ask(Q.prime(x))`` is true iff ``x`` is a natural number greater
than 1 that has no positive divisors other than ``1`` and the
number itself.
Examples
========
>>> from sympy import Q, ask
>>> ask(Q.prime(0))
False
>>> ask(Q.prime(1))
False
>>> ask(Q.prime(2))
True
>>> ask(Q.prime(20))
False
>>> ask(Q.prime(-3))
False
"""
name = 'prime'
handler = Dispatcher(
"PrimeHandler",
doc=("Handler for key 'prime'. Test that an expression represents a prime"
" number. When the expression is an exact number, the result (when True)"
" is subject to the limitations of isprime() which is used to return the "
"result.")
)
class CompositePredicate(Predicate):
"""
Composite number predicate.
Explanation
===========
``ask(Q.composite(x))`` is true iff ``x`` is a positive integer and has
at least one positive divisor other than ``1`` and the number itself.
Examples
========
>>> from sympy import Q, ask
>>> ask(Q.composite(0))
False
>>> ask(Q.composite(1))
False
>>> ask(Q.composite(2))
False
>>> ask(Q.composite(20))
True
"""
name = 'composite'
handler = Dispatcher("CompositeHandler", doc="Handler for key 'composite'.")
class EvenPredicate(Predicate):
"""
Even number predicate.
Explanation
===========
``ask(Q.even(x))`` is true iff ``x`` belongs to the set of even
integers.
Examples
========
>>> from sympy import Q, ask, pi
>>> ask(Q.even(0))
True
>>> ask(Q.even(2))
True
>>> ask(Q.even(3))
False
>>> ask(Q.even(pi))
False
"""
name = 'even'
handler = Dispatcher("EvenHandler", doc="Handler for key 'even'.")
class OddPredicate(Predicate):
"""
Odd number predicate.
Explanation
===========
``ask(Q.odd(x))`` is true iff ``x`` belongs to the set of odd numbers.
Examples
========
>>> from sympy import Q, ask, pi
>>> ask(Q.odd(0))
False
>>> ask(Q.odd(2))
False
>>> ask(Q.odd(3))
True
>>> ask(Q.odd(pi))
False
"""
name = 'odd'
handler = Dispatcher(
"OddHandler",
doc=("Handler for key 'odd'. Test that an expression represents an odd"
" number.")
)
| 1,034 |
2,542 | <reponame>gridgentoo/ServiceFabricAzure
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
namespace Reliability
{
class NodeUpgradeReplyMessageBody : public Serialization::FabricSerializable
{
public:
NodeUpgradeReplyMessageBody()
{
}
NodeUpgradeReplyMessageBody(
ServiceModel::ApplicationUpgradeSpecification const & upgradeSpecification,
std::vector<Reliability::FailoverUnitInfo> && replicaList,
std::vector<Reliability::FailoverUnitInfo> && droppedReplicaList)
: applicationId_(upgradeSpecification.ApplicationId),
instanceId_(upgradeSpecification.InstanceId),
replicas_(std::move(replicaList), std::move(droppedReplicaList), false)
{
}
__declspec(property(get=get_ApplicationId)) ServiceModel::ApplicationIdentifier const & ApplicationId;
ServiceModel::ApplicationIdentifier const & get_ApplicationId() const { return applicationId_; }
__declspec(property(get=get_InstanceId)) uint64 InstanceId;
uint64 get_InstanceId() const { return instanceId_; }
__declspec(property(get=get_Replicas)) ReplicaUpMessageBody & Replicas;
ReplicaUpMessageBody & get_Replicas() { return replicas_; }
void WriteTo(Common::TextWriter & w, Common::FormatOptions const &) const
{
w.Write("ApplicationId: {0} InstanceId: {1}\r\n{2}", applicationId_, instanceId_, replicas_);
}
void WriteToEtw(uint16 contextSequenceId) const;
FABRIC_FIELDS_03(applicationId_, instanceId_, replicas_);
private:
ServiceModel::ApplicationIdentifier applicationId_;
uint64 instanceId_;
ReplicaUpMessageBody replicas_;
};
}
| 720 |
2,574 | {
"recurse": true,
"skip": [
"http://localhost:8080*",
"\/node_modules\/"
],
"silent": true,
"concurrency": 10,
"directoryListing": true
}
| 69 |
658 | <filename>stage23/sys/lapic.c
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <sys/lapic.h>
#include <sys/cpu.h>
#include <lib/blib.h>
#include <lib/acpi.h>
#include <mm/pmm.h>
struct madt {
struct sdt header;
uint32_t local_controller_addr;
uint32_t flags;
char madt_entries_begin[];
} __attribute__((packed));
struct madt_io_apic {
uint8_t type;
uint8_t length;
uint8_t apic_id;
uint8_t reserved;
uint32_t address;
uint32_t gsib;
} __attribute__((packed));
struct dmar {
struct sdt header;
uint8_t host_address_width;
uint8_t flags;
uint8_t reserved[10];
symbol remapping_structures;
} __attribute__((packed));
bool lapic_check(void) {
uint32_t eax, ebx, ecx, edx;
if (!cpuid(1, 0, &eax, &ebx, &ecx, &edx))
return false;
if (!(edx & (1 << 9)))
return false;
return true;
}
uint32_t lapic_read(uint32_t reg) {
size_t lapic_mmio_base = (size_t)(rdmsr(0x1b) & 0xfffff000);
return mmind(lapic_mmio_base + reg);
}
void lapic_write(uint32_t reg, uint32_t data) {
size_t lapic_mmio_base = (size_t)(rdmsr(0x1b) & 0xfffff000);
mmoutd(lapic_mmio_base + reg, data);
}
bool x2apic_check(void) {
uint32_t eax, ebx, ecx, edx;
if (!cpuid(1, 0, &eax, &ebx, &ecx, &edx))
return false;
if (!(ecx & (1 << 21)))
return false;
// According to the Intel VT-d spec, we're required
// to check if bit 0 and 1 of the flags field of the
// DMAR ACPI table are set, and if they are, we should
// not report x2APIC capabilities.
struct dmar *dmar = acpi_get_table("DMAR", 0);
if (!dmar)
return true;
if ((dmar->flags & (1 << 0)) && (dmar->flags & (1 << 1)))
return false;
return true;
}
static bool x2apic_mode = false;
bool x2apic_enable(void) {
if (!x2apic_check())
return false;
uint64_t ia32_apic_base = rdmsr(0x1b);
ia32_apic_base |= (1 << 10);
wrmsr(0x1b, ia32_apic_base);
x2apic_mode = true;
return true;
}
void lapic_eoi(void) {
if (!x2apic_mode) {
lapic_write(0xb0, 0);
} else {
x2apic_write(0xb0, 0);
}
}
uint64_t x2apic_read(uint32_t reg) {
return rdmsr(0x800 + (reg >> 4));
}
void x2apic_write(uint32_t reg, uint64_t data) {
wrmsr(0x800 + (reg >> 4), data);
}
static struct madt_io_apic **io_apics = NULL;
static size_t max_io_apics = 0;
void init_io_apics(void) {
static bool already_inited = false;
if (already_inited) {
return;
}
struct madt *madt = acpi_get_table("APIC", 0);
if (madt == NULL) {
goto out;
}
for (uint8_t *madt_ptr = (uint8_t *)madt->madt_entries_begin;
(uintptr_t)madt_ptr < (uintptr_t)madt + madt->header.length;
madt_ptr += *(madt_ptr + 1)) {
switch (*madt_ptr) {
case 1: {
max_io_apics++;
continue;
}
}
}
io_apics = ext_mem_alloc(max_io_apics * sizeof(struct madt_io_apic *));
max_io_apics = 0;
for (uint8_t *madt_ptr = (uint8_t *)madt->madt_entries_begin;
(uintptr_t)madt_ptr < (uintptr_t)madt + madt->header.length;
madt_ptr += *(madt_ptr + 1)) {
switch (*madt_ptr) {
case 1: {
io_apics[max_io_apics++] = (void *)madt_ptr;
continue;
}
}
}
out:
already_inited = true;
}
uint32_t io_apic_read(size_t io_apic, uint32_t reg) {
uintptr_t base = (uintptr_t)io_apics[io_apic]->address;
mmoutd(base, reg);
return mmind(base + 16);
}
void io_apic_write(size_t io_apic, uint32_t reg, uint32_t value) {
uintptr_t base = (uintptr_t)io_apics[io_apic]->address;
mmoutd(base, reg);
mmoutd(base + 16, value);
}
uint32_t io_apic_gsi_count(size_t io_apic) {
return ((io_apic_read(io_apic, 1) & 0xff0000) >> 16) + 1;
}
void io_apic_mask_all(void) {
for (size_t i = 0; i < max_io_apics; i++) {
uint32_t gsi_count = io_apic_gsi_count(i);
for (uint32_t j = 0; j < gsi_count; j++) {
uintptr_t ioredtbl = j * 2 + 16;
io_apic_write(i, ioredtbl, (1 << 16)); // mask
io_apic_write(i, ioredtbl + 1, 0);
}
}
}
| 2,158 |
335 | {
"word": "Patent",
"definitions": [
"Easily recognizable; obvious.",
"(of a vessel, duct, or aperture) open and unobstructed; failing to close.",
"(of a parasitic infection) showing detectable parasites in the tissues or faeces.",
"Made and marketed under a patent; proprietary."
],
"parts-of-speech": "Adjective"
} | 130 |
480 | /*
* Copyright [2013-2021], Alibaba Group Holding Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.alibaba.polardbx.optimizer.core.rel;
import com.alibaba.polardbx.optimizer.core.planner.rule.util.CBOUtil;
import com.alibaba.polardbx.optimizer.memory.MemoryEstimator;
import com.alibaba.polardbx.optimizer.config.meta.CostModelWeight;
import com.alibaba.polardbx.optimizer.config.table.IndexMeta;
import com.alibaba.polardbx.optimizer.config.table.IndexType;
import com.alibaba.polardbx.optimizer.config.table.Relationship;
import com.alibaba.polardbx.optimizer.index.Index;
import com.alibaba.polardbx.optimizer.index.IndexUtil;
import org.apache.calcite.plan.Convention;
import org.apache.calcite.plan.RelOptCluster;
import org.apache.calcite.plan.RelOptCost;
import org.apache.calcite.plan.RelOptPlanner;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelInput;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.RelShuttle;
import org.apache.calcite.rel.RelWriter;
import org.apache.calcite.rel.core.Aggregate;
import org.apache.calcite.rel.core.AggregateCall;
import org.apache.calcite.rel.core.Join;
import org.apache.calcite.rel.core.JoinRelType;
import org.apache.calcite.rel.externalize.RelDrdsWriter;
import org.apache.calcite.rel.externalize.RexExplainVisitor;
import org.apache.calcite.rel.logical.LogicalFilter;
import org.apache.calcite.rel.logical.LogicalProject;
import org.apache.calcite.rel.metadata.RelColumnOrigin;
import org.apache.calcite.rel.metadata.RelMetadataQuery;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.util.ImmutableBitSet;
import org.apache.calcite.util.Util;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
* @author dylan
*/
public class MysqlAgg extends Aggregate implements MysqlRel {
public static Index OPTIMIZED_AWAY_INDEX = new Index(
new IndexMeta("optimized_away_table", new ArrayList<>(), new ArrayList<>(),
IndexType.NONE, Relationship.NONE, false, false, false,
"optimized_away_index"), Arrays.asList(Index.PredicateType.EQUAL), 1, 1);
public MysqlAgg(
RelOptCluster cluster,
RelTraitSet traitSet,
RelNode child,
boolean indicator,
ImmutableBitSet groupSet,
List<ImmutableBitSet> groupSets,
List<AggregateCall> aggCalls) {
super(cluster, traitSet, child, indicator, groupSet, groupSets, aggCalls);
}
public MysqlAgg(RelInput input) {
super(input);
}
public static MysqlAgg create(final RelNode input,
ImmutableBitSet groupSet, List<ImmutableBitSet> groupSets,
List<AggregateCall> aggCalls) {
return create_(input, false, groupSet, groupSets, aggCalls);
}
private static MysqlAgg create_(final RelNode input,
boolean indicator,
ImmutableBitSet groupSet,
List<ImmutableBitSet> groupSets,
List<AggregateCall> aggCalls) {
final RelOptCluster cluster = input.getCluster();
final RelTraitSet traitSet = cluster.traitSetOf(Convention.NONE);
return new MysqlAgg(cluster, traitSet, input, indicator, groupSet,
groupSets, aggCalls);
}
@Override
public MysqlAgg copy(RelTraitSet traitSet, RelNode input, boolean indicator, ImmutableBitSet groupSet,
List<ImmutableBitSet> groupSets, List<AggregateCall> aggCalls) {
assert traitSet.containsIfApplicable(Convention.NONE);
MysqlAgg mysqlAgg = new MysqlAgg(getCluster(),
traitSet,
input,
indicator,
groupSet,
groupSets,
aggCalls);
return mysqlAgg;
}
public MysqlAgg copy(RelNode input, ImmutableBitSet groupSet, List<AggregateCall> aggCalls) {
return copy(traitSet, input, indicator, groupSet, null, aggCalls);
}
@Override
public RelNode accept(RelShuttle shuttle) {
return shuttle.visit(this);
}
@Override
public RelWriter explainTermsForDisplay(RelWriter pw) {
pw.item(RelDrdsWriter.REL_NAME, "MysqlAgg");
List<String> groupList = new ArrayList<String>(groupSet.length());
for (int groupIndex : groupSet.asList()) {
RexExplainVisitor visitor = new RexExplainVisitor(this);
groupList.add(visitor.getField(groupIndex).getKey());
}
pw.itemIf("group", StringUtils.join(groupList, ","), !groupList.isEmpty());
int groupCount = groupSet.cardinality();
for (int i = groupCount; i < getRowType().getFieldCount(); i++) {
RexExplainVisitor visitor = new RexExplainVisitor(this);
aggCalls.get(i - groupCount).accept(visitor);
pw.item(rowType.getFieldList().get(i).getKey(), visitor.toSqlString());
}
Index index = canUseIndex(getCluster().getMetadataQuery());
if (index != null) {
pw.item("index", index.getIndexMeta().getPhysicalIndexName());
}
return pw;
}
@Override
public RelOptCost computeSelfCost(RelOptPlanner planner,
RelMetadataQuery mq) {
double rowCount = mq.getRowCount(this.input);
if (Double.isInfinite(rowCount)) {
return planner.getCostFactory().makeHugeCost();
}
final double sortCpu;
Index aggIndex = canUseIndex(mq);
if (aggIndex == OPTIMIZED_AWAY_INDEX) {
return planner.getCostFactory().makeTinyCost();
}
if (aggIndex != null) {
MysqlTableScan mysqlTableScan = getAccessMysqlTableScan(this.getInput());
Index tableScanIndex = mysqlTableScan.canUseIndex(mq);
if (tableScanIndex == null) {
if (aggIndex.getIndexMeta().isPrimaryKeyIndex()) {
// clustering index
sortCpu = 0;
} else {
// non-clustering index
// choose min cost index of aggIndex or tableScanIndex
if (overJoin(this)) {
sortCpu = Math.min(
Util.nLogN(rowCount) * CostModelWeight.INSTANCE.getSortWeight() * groupSet.cardinality(),
mq.getCumulativeCost(mysqlTableScan).getIo() * CostModelWeight.INSTANCE.getIoWeight());
} else {
sortCpu = 0;
}
}
} else {
// aggIndex and tableScanIndex must be the same index
sortCpu = 0;
}
} else {
sortCpu = Util.nLogN(rowCount) * CostModelWeight.INSTANCE.getSortWeight()
* groupSet.cardinality();
}
final double sortAggWeight = CostModelWeight.INSTANCE.getSortAggWeight();
final double useAggSize =
aggCalls.stream().filter(x -> x.getAggregation().kind != SqlKind.__FIRST_VALUE
&& x.getAggregation().getKind() != SqlKind.FIRST_VALUE).count();
// 1 for grouping
final double aggCpu = rowCount * sortAggWeight * Math.max(useAggSize, 1);
final double memory = MemoryEstimator.estimateRowSizeInArrayList(getRowType()) * mq.getRowCount(this);
return planner.getCostFactory().makeCost(rowCount, sortCpu + aggCpu, memory, 0, 0);
}
private boolean overJoin(RelNode rel) {
if (rel instanceof Join) {
return true;
}
for (RelNode input : rel.getInputs()) {
if (overJoin(input)) {
return true;
}
}
return false;
}
@Override
public Index canUseIndex(RelMetadataQuery mq) {
MysqlTableScan mysqlTableScan = getAccessMysqlTableScan(this.getInput());
if (mysqlTableScan != null) {
Index accessIndex = MysqlRel.canUseIndex(this.getInput(), mq);
Set<Integer> groupKeyColumnSet = new HashSet<>();
List<Integer> groupByColumn = new ArrayList<>();
for (Integer i : this.getGroupSet()) {
RelColumnOrigin relColumnOrigin = mq.getColumnOrigin(this.getInput(), i);
if (relColumnOrigin == null) {
return null;
}
// FIXME: Is that condition strict enough ?
if (relColumnOrigin.getOriginTable() != mysqlTableScan.getTable()) {
return null;
}
if (groupKeyColumnSet.add(relColumnOrigin.getOriginColumnOrdinal())) {
groupByColumn.add(relColumnOrigin.getOriginColumnOrdinal());
}
}
Index groupByIndex;
if (accessIndex == null) {
// mysql tablescan without access index, we can use a index suitable to group by
if (!groupByColumn.isEmpty()) {
groupByIndex = IndexUtil.selectIndexForGroupBy(CBOUtil.getTableMeta(mysqlTableScan.getTable()),
groupByColumn, IndexUtil.getCanUseIndexSet(mysqlTableScan));
} else {
// select min(a), max(b) from t;
Set<Integer> minMaxAggCallKeyColumnSet = getMinMaxAggCallKeyColumnSet();
if (minMaxAggCallKeyColumnSet == null) {
return null;
}
// if have filter, but no index can use, impossible to optimize away
if (!mysqlTableScan.getFilters().isEmpty()) {
return null;
}
groupByIndex =
IndexUtil.selectIndexForMinMaxAggCall(CBOUtil.getTableMeta(mysqlTableScan.getTable()),
minMaxAggCallKeyColumnSet, IndexUtil.getCanUseIndexSet(mysqlTableScan));
}
} else {
// mysql tablescan with access index, check whether group by can use
if (!groupByColumn.isEmpty()) {
groupByIndex = IndexUtil.canIndexUsedByGroupBy(CBOUtil.getTableMeta(mysqlTableScan.getTable()),
accessIndex.getIndexMeta(), accessIndex.getPrefixTypeList(), groupByColumn);
} else {
// select min(a), max(b) from t;
Set<Integer> minMaxAggCallKeyColumnSet = getMinMaxAggCallKeyColumnSet();
if (minMaxAggCallKeyColumnSet == null) {
return null;
}
groupByIndex =
IndexUtil.canIndexUsedByMinMaxAggCall(CBOUtil.getTableMeta(mysqlTableScan.getTable()),
accessIndex.getIndexMeta(), minMaxAggCallKeyColumnSet);
}
}
return groupByIndex;
} else {
return null;
}
}
private Set<Integer> getMinMaxAggCallKeyColumnSet() {
if (getAggCallList().stream().allMatch(x -> x.getAggregation().getKind().belongsTo(SqlKind.MIN_MAX_AGG))) {
if (overJoin(this)) {
return null;
}
Set<Integer> aggCallKeyColumnSet = new HashSet<>();
for (AggregateCall aggregateCall : getAggCallList()) {
if (aggregateCall.getAggregation().getKind().belongsTo(SqlKind.MIN_MAX_AGG)) {
RelColumnOrigin columnOrigin = this.getCluster().getMetadataQuery().getColumnOrigin(this.getInput(),
aggregateCall.getArgList().get(0));
if (columnOrigin != null) {
aggCallKeyColumnSet.add(columnOrigin.getOriginColumnOrdinal());
} else {
return null;
}
} else {
return null;
}
}
return aggCallKeyColumnSet;
} else {
return null;
}
}
private static MysqlTableScan getAccessMysqlTableScan(RelNode input) {
// for now only support single table
if (input instanceof MysqlTableScan) {
return (MysqlTableScan) input;
} else if (input instanceof LogicalProject) {
return getAccessMysqlTableScan(((LogicalProject) input).getInput());
} else if (input instanceof LogicalFilter) {
return getAccessMysqlTableScan(((LogicalFilter) input).getInput());
} else if (input instanceof MysqlIndexNLJoin
|| input instanceof MysqlNLJoin
|| input instanceof MysqlHashJoin) {
if (((Join) input).getJoinType() == JoinRelType.RIGHT) {
return getAccessMysqlTableScan(((Join) input).getRight());
} else {
return getAccessMysqlTableScan(((Join) input).getLeft());
}
} else if (input instanceof MysqlSemiIndexNLJoin
|| input instanceof MysqlSemiNLJoin
|| input instanceof MysqlSemiHashJoin) {
return getAccessMysqlTableScan(((Join) input).getLeft());
} else {
return null;
}
}
}
| 6,426 |
5,169 | <gh_stars>1000+
{
"name": "TTEmojiFlagString",
"version": "0.1.1",
"summary": "ObjectiveC category to convert 2 letter ISO 3166-1 alpha-2 country codes to a Unicode Flag Emoji",
"description": "This little ObjectiveC category turns any given 2 letter ISO 3166-1 alpha-2 country code in an NSString to a Unicode Flag Emoji that can be directly used wherever you want.",
"homepage": "https://github.com/dhiraj/TTEmojiFlagString",
"license": {
"type": "Apache",
"file": "LICENSE"
},
"authors": {
"<NAME>": "<EMAIL>"
},
"source": {
"git": "https://github.com/dhiraj/TTEmojiFlagString.git",
"tag": "0.1.1"
},
"social_media_url": "https://twitter.com/dhiraj",
"platforms": {
"ios": "8.3"
},
"source_files": "TTEmojiFlagString/Classes/**/*",
"public_header_files": "TTEmojiFlagString/Classes/**/*.h"
}
| 326 |
16,461 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#import <UIKit/UIKit.h>
#import <ABI42_0_0React/ABI42_0_0RCTSurfaceStage.h>
#import <ABI42_0_0React/ABI42_0_0RCTSurfaceView.h>
@class ABI42_0_0RCTSurfaceRootView;
NS_ASSUME_NONNULL_BEGIN
@interface ABI42_0_0RCTSurfaceView (Internal)
@property (nonatomic, strong) ABI42_0_0RCTSurfaceRootView *rootView;
@property (nonatomic, assign) ABI42_0_0RCTSurfaceStage stage;
@end
NS_ASSUME_NONNULL_END
| 236 |
2,777 | /* Copyright 2013-2021 MultiMC Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "VersionSelectDialog.h"
#include <QtWidgets/QButtonGroup>
#include <QtWidgets/QDialogButtonBox>
#include <QtWidgets/QHBoxLayout>
#include <QtWidgets/QPushButton>
#include <QtWidgets/QVBoxLayout>
#include <QDebug>
#include "ui/dialogs/ProgressDialog.h"
#include "ui/widgets/VersionSelectWidget.h"
#include "ui/dialogs/CustomMessageBox.h"
#include "BaseVersion.h"
#include "BaseVersionList.h"
#include "tasks/Task.h"
#include "Application.h"
#include "VersionProxyModel.h"
VersionSelectDialog::VersionSelectDialog(BaseVersionList *vlist, QString title, QWidget *parent, bool cancelable)
: QDialog(parent)
{
setObjectName(QStringLiteral("VersionSelectDialog"));
resize(400, 347);
m_verticalLayout = new QVBoxLayout(this);
m_verticalLayout->setObjectName(QStringLiteral("verticalLayout"));
m_versionWidget = new VersionSelectWidget(parent);
m_verticalLayout->addWidget(m_versionWidget);
m_horizontalLayout = new QHBoxLayout();
m_horizontalLayout->setObjectName(QStringLiteral("horizontalLayout"));
m_refreshButton = new QPushButton(this);
m_refreshButton->setObjectName(QStringLiteral("refreshButton"));
m_horizontalLayout->addWidget(m_refreshButton);
m_buttonBox = new QDialogButtonBox(this);
m_buttonBox->setObjectName(QStringLiteral("buttonBox"));
m_buttonBox->setOrientation(Qt::Horizontal);
m_buttonBox->setStandardButtons(QDialogButtonBox::Cancel|QDialogButtonBox::Ok);
m_horizontalLayout->addWidget(m_buttonBox);
m_verticalLayout->addLayout(m_horizontalLayout);
retranslate();
QObject::connect(m_buttonBox, SIGNAL(accepted()), this, SLOT(accept()));
QObject::connect(m_buttonBox, SIGNAL(rejected()), this, SLOT(reject()));
QMetaObject::connectSlotsByName(this);
setWindowModality(Qt::WindowModal);
setWindowTitle(title);
m_vlist = vlist;
if (!cancelable)
{
m_buttonBox->button(QDialogButtonBox::Cancel)->setEnabled(false);
}
}
void VersionSelectDialog::retranslate()
{
// FIXME: overrides custom title given in constructor!
setWindowTitle(tr("Choose Version"));
m_refreshButton->setToolTip(tr("Reloads the version list."));
m_refreshButton->setText(tr("&Refresh"));
}
void VersionSelectDialog::setCurrentVersion(const QString& version)
{
m_currentVersion = version;
m_versionWidget->setCurrentVersion(version);
}
void VersionSelectDialog::setEmptyString(QString emptyString)
{
m_versionWidget->setEmptyString(emptyString);
}
void VersionSelectDialog::setEmptyErrorString(QString emptyErrorString)
{
m_versionWidget->setEmptyErrorString(emptyErrorString);
}
void VersionSelectDialog::setResizeOn(int column)
{
resizeOnColumn = column;
}
int VersionSelectDialog::exec()
{
QDialog::open();
m_versionWidget->initialize(m_vlist);
if(resizeOnColumn != -1)
{
m_versionWidget->setResizeOn(resizeOnColumn);
}
return QDialog::exec();
}
void VersionSelectDialog::selectRecommended()
{
m_versionWidget->selectRecommended();
}
BaseVersionPtr VersionSelectDialog::selectedVersion() const
{
return m_versionWidget->selectedVersion();
}
void VersionSelectDialog::on_refreshButton_clicked()
{
m_versionWidget->loadList();
}
void VersionSelectDialog::setExactFilter(BaseVersionList::ModelRoles role, QString filter)
{
m_versionWidget->setExactFilter(role, filter);
}
void VersionSelectDialog::setFuzzyFilter(BaseVersionList::ModelRoles role, QString filter)
{
m_versionWidget->setFuzzyFilter(role, filter);
}
| 1,410 |
363 | <gh_stars>100-1000
// Copyright (c) Improbable Worlds Ltd, All Rights Reserved
#pragma once
#include "SpatialView/OpList/OpList.h"
#include "Templates/UniquePtr.h"
#include <improbable/c_worker.h>
namespace SpatialGDK
{
struct WorkerConnectionOpListData : OpListData
{
struct Deleter
{
void operator()(Worker_OpList* OpListToDelete) const noexcept
{
if (OpListToDelete != nullptr)
{
Worker_OpList_Destroy(OpListToDelete);
}
}
};
TUniquePtr<Worker_OpList, Deleter> OpList;
explicit WorkerConnectionOpListData(Worker_OpList* OpList) : OpList(OpList)
{
}
};
inline OpList GetOpListFromConnection(Worker_Connection* Connection)
{
Worker_OpList* Ops = Worker_Connection_GetOpList(Connection, 0);
return {Ops->ops, Ops->op_count, MakeUnique<WorkerConnectionOpListData>(Ops)};
}
} // namespace SpatialGDK
| 311 |
521 | /*
rdesktop: A Remote Desktop Protocol client.
Miscellaneous protocol constants
Copyright (C) <NAME> 1999-2008
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* Oracle GPL Disclaimer: For the avoidance of doubt, except that if any license choice
* other than GPL or LGPL is available it will apply instead, Oracle elects to use only
* the General Public License version 2 (GPLv2) at this time for any software where
* a choice of GPL license versions is made available with the language indicating
* that GPLv2 or any later version may be used, or where a choice of which version
* of the GPL is applied is otherwise unspecified.
*/
/* TCP port for Remote Desktop Protocol */
#define TCP_PORT_RDP 3389
#define DEFAULT_CODEPAGE "UTF-8"
#define WINDOWS_CODEPAGE "UTF-16LE"
/* ISO PDU codes */
enum ISO_PDU_CODE
{
ISO_PDU_CR = 0xE0, /* Connection Request */
ISO_PDU_CC = 0xD0, /* Connection Confirm */
ISO_PDU_DR = 0x80, /* Disconnect Request */
ISO_PDU_DT = 0xF0, /* Data */
ISO_PDU_ER = 0x70 /* Error */
};
/* RDP protocol negotiating constants */
enum RDP_NEG_TYPE_CODE
{
RDP_NEG_REQ = 1,
RDP_NEG_RSP = 2,
RDP_NEG_FAILURE = 3
};
enum RDP_NEG_REQ_CODE
{
PROTOCOL_RDP = 0,
PROTOCOL_SSL = 1,
PROTOCOL_HYBRID = 2
};
enum RDP_NEG_FAILURE_CODE
{
SSL_REQUIRED_BY_SERVER = 1,
SSL_NOT_ALLOWED_BY_SERVER = 2,
SSL_CERT_NOT_ON_SERVER = 3,
INCONSISTENT_FLAGS = 4,
HYBRID_REQUIRED_BY_SERVER = 5,
SSL_WITH_USER_AUTH_REQUIRED_BY_SERVER = 6
};
/* MCS PDU codes */
enum MCS_PDU_TYPE
{
MCS_EDRQ = 1, /* Erect Domain Request */
MCS_DPUM = 8, /* Disconnect Provider Ultimatum */
MCS_AURQ = 10, /* Attach User Request */
MCS_AUCF = 11, /* Attach User Confirm */
MCS_CJRQ = 14, /* Channel Join Request */
MCS_CJCF = 15, /* Channel Join Confirm */
MCS_SDRQ = 25, /* Send Data Request */
MCS_SDIN = 26 /* Send Data Indication */
};
#define MCS_CONNECT_INITIAL 0x7f65
#define MCS_CONNECT_RESPONSE 0x7f66
#define BER_TAG_BOOLEAN 1
#define BER_TAG_INTEGER 2
#define BER_TAG_OCTET_STRING 4
#define BER_TAG_RESULT 10
#define BER_TAG_SEQUENCE 16
#define BER_TAG_CONSTRUCTED 0x20
#define BER_TAG_CTXT_SPECIFIC 0x80
#define MCS_TAG_DOMAIN_PARAMS 0x30
#define MCS_GLOBAL_CHANNEL 1003
#define MCS_USERCHANNEL_BASE 1001
/* RDP secure transport constants */
#define SEC_RANDOM_SIZE 32
#define SEC_MODULUS_SIZE 64
#define SEC_MAX_MODULUS_SIZE 256
#define SEC_PADDING_SIZE 8
#define SEC_EXPONENT_SIZE 4
#define SEC_CLIENT_RANDOM 0x0001
#define SEC_ENCRYPT 0x0008
#define SEC_LOGON_INFO 0x0040
#define SEC_LICENCE_NEG 0x0080
#define SEC_REDIRECT_ENCRYPT 0x0C00
#define SEC_TAG_SRV_INFO 0x0c01
#define SEC_TAG_SRV_CRYPT 0x0c02
#define SEC_TAG_SRV_CHANNELS 0x0c03
#define SEC_TAG_CLI_INFO 0xc001
#define SEC_TAG_CLI_CRYPT 0xc002
#define SEC_TAG_CLI_CHANNELS 0xc003
#define SEC_TAG_CLI_CLUSTER 0xc004
#define SEC_TAG_PUBKEY 0x0006
#define SEC_TAG_KEYSIG 0x0008
#define SEC_RSA_MAGIC 0x31415352 /* RSA1 */
/* Client cluster constants */
#define SEC_CC_REDIRECTION_SUPPORTED 0x00000001
#define SEC_CC_REDIRECT_SESSIONID_FIELD_VALID 0x00000002
#define SEC_CC_REDIRECTED_SMARTCARD 0x00000040
#define SEC_CC_REDIRECT_VERSION_MASK 0x0000003c
#define SEC_CC_REDIRECT_VERSION_3 0x02
#define SEC_CC_REDIRECT_VERSION_4 0x03
#define SEC_CC_REDIRECT_VERSION_5 0x04
#define SEC_CC_REDIRECT_VERSION_6 0x05
/* RDP licensing constants */
#define LICENCE_TOKEN_SIZE 10
#define LICENCE_HWID_SIZE 20
#define LICENCE_SIGNATURE_SIZE 16
#define LICENCE_TAG_REQUEST 0x01
#define LICENCE_TAG_PLATFORM_CHALLANGE 0x02
#define LICENCE_TAG_NEW_LICENCE 0x03
#define LICENCE_TAG_UPGRADE_LICENCE 0x04
#define LICENCE_TAG_LICENCE_INFO 0x12
#define LICENCE_TAG_NEW_LICENCE_REQUEST 0x13
#define LICENCE_TAG_PLATFORM_CHALLANGE_RESPONSE 0x15
#define LICENCE_TAG_ERROR_ALERT 0xff
#define BB_CLIENT_USER_NAME_BLOB 0x000f
#define BB_CLIENT_MACHINE_NAME_BLOB 0x0010
/* RDP PDU codes */
enum RDP_PDU_TYPE
{
RDP_PDU_DEMAND_ACTIVE = 1,
RDP_PDU_CONFIRM_ACTIVE = 3,
RDP_PDU_REDIRECT = 4, /* Standard Server Redirect */
RDP_PDU_DEACTIVATE = 6,
RDP_PDU_DATA = 7,
RDP_PDU_ENHANCED_REDIRECT = 10 /* Enhanced Server Redirect */
};
enum RDP_DATA_PDU_TYPE
{
RDP_DATA_PDU_UPDATE = 2,
RDP_DATA_PDU_CONTROL = 20,
RDP_DATA_PDU_POINTER = 27,
RDP_DATA_PDU_INPUT = 28,
RDP_DATA_PDU_SYNCHRONISE = 31,
RDP_DATA_PDU_BELL = 34,
RDP_DATA_PDU_CLIENT_WINDOW_STATUS = 35,
RDP_DATA_PDU_LOGON = 38, /* PDUTYPE2_SAVE_SESSION_INFO */
RDP_DATA_PDU_FONT2 = 39,
RDP_DATA_PDU_KEYBOARD_INDICATORS = 41,
RDP_DATA_PDU_DISCONNECT = 47,
RDP_DATA_PDU_AUTORECONNECT_STATUS = 50
};
enum RDP_SAVE_SESSION_PDU_TYPE
{
INFOTYPE_LOGON = 0,
INFOTYPE_LOGON_LONG = 1,
INFOTYPE_LOGON_PLAINNOTIFY = 2,
INFOTYPE_LOGON_EXTENDED_INF = 3
};
enum RDP_LOGON_INFO_EXTENDED_TYPE
{
LOGON_EX_AUTORECONNECTCOOKIE = 1,
LOGON_EX_LOGONERRORS = 2
};
enum RDP_CONTROL_PDU_TYPE
{
RDP_CTL_REQUEST_CONTROL = 1,
RDP_CTL_GRANT_CONTROL = 2,
RDP_CTL_DETACH = 3,
RDP_CTL_COOPERATE = 4
};
enum RDP_UPDATE_PDU_TYPE
{
RDP_UPDATE_ORDERS = 0,
RDP_UPDATE_BITMAP = 1,
RDP_UPDATE_PALETTE = 2,
RDP_UPDATE_SYNCHRONIZE = 3
};
enum RDP_POINTER_PDU_TYPE
{
RDP_POINTER_SYSTEM = 1,
RDP_POINTER_MOVE = 3,
RDP_POINTER_COLOR = 6,
RDP_POINTER_CACHED = 7,
RDP_POINTER_NEW = 8
};
enum RDP_SYSTEM_POINTER_TYPE
{
RDP_NULL_POINTER = 0,
RDP_DEFAULT_POINTER = 0x7F00
};
enum RDP_INPUT_DEVICE
{
RDP_INPUT_SYNCHRONIZE = 0,
RDP_INPUT_CODEPOINT = 1,
RDP_INPUT_VIRTKEY = 2,
RDP_INPUT_SCANCODE = 4,
RDP_INPUT_MOUSE = 0x8001
};
/* Device flags */
#define KBD_FLAG_RIGHT 0x0001
#define KBD_FLAG_EXT 0x0100
#define KBD_FLAG_EXT2 0x0200
#define KBD_FLAG_QUIET 0x1000
#define KBD_FLAG_DOWN 0x4000
#define KBD_FLAG_UP 0x8000
/* These are for synchronization; not for keystrokes */
#define KBD_FLAG_SCROLL 0x0001
#define KBD_FLAG_NUMLOCK 0x0002
#define KBD_FLAG_CAPITAL 0x0004
/* See T.128 */
#define RDP_KEYPRESS 0
#define RDP_KEYRELEASE (KBD_FLAG_DOWN | KBD_FLAG_UP)
#define MOUSE_FLAG_MOVE 0x0800
#define MOUSE_FLAG_BUTTON1 0x1000
#define MOUSE_FLAG_BUTTON2 0x2000
#define MOUSE_FLAG_BUTTON3 0x4000
#define MOUSE_FLAG_BUTTON4 0x0280
#define MOUSE_FLAG_BUTTON5 0x0380
#define MOUSE_FLAG_DOWN 0x8000
/* Raster operation masks */
#define ROP2_S(rop3) (rop3 & 0xf)
#define ROP2_P(rop3) ((rop3 & 0x3) | ((rop3 & 0x30) >> 2))
#define ROP2_COPY 0xc
#define ROP2_XOR 0x6
#define ROP2_AND 0x8
#define ROP2_NXOR 0x9
#define ROP2_OR 0xe
#define MIX_TRANSPARENT 0
#define MIX_OPAQUE 1
#define TEXT2_VERTICAL 0x04
#define TEXT2_IMPLICIT_X 0x20
#define ALTERNATE 1
#define WINDING 2
/* RDP bitmap cache (version 2) constants */
#define BMPCACHE2_C0_CELLS 0x78
#define BMPCACHE2_C1_CELLS 0x78
#define BMPCACHE2_C2_CELLS 0x150
#define BMPCACHE2_NUM_PSTCELLS 0x9f6
#define PDU_FLAG_FIRST 0x01
#define PDU_FLAG_LAST 0x02
/* RDP capabilities */
#define RDP_CAPSET_GENERAL 1 /* Maps to generalCapabilitySet in T.128 page 138 */
#define RDP_CAPLEN_GENERAL 0x18
#define OS_MAJOR_TYPE_UNIX 4
#define OS_MINOR_TYPE_XSERVER 7
#define RDP_CAPSET_BITMAP 2
#define RDP_CAPLEN_BITMAP 0x1C
#define RDP_CAPSET_ORDER 3
#define RDP_CAPLEN_ORDER 0x58
#define ORDER_CAP_NEGOTIATE 2
#define ORDER_CAP_NOSUPPORT 4
#define RDP_CAPSET_BMPCACHE 4
#define RDP_CAPLEN_BMPCACHE 0x28
#define RDP_CAPSET_CONTROL 5
#define RDP_CAPLEN_CONTROL 0x0C
#define RDP_CAPSET_ACTIVATE 7
#define RDP_CAPLEN_ACTIVATE 0x0C
#define RDP_CAPSET_POINTER 8
#define RDP_CAPLEN_POINTER 0x08
#define RDP_CAPLEN_NEWPOINTER 0x0a
#define RDP_CAPSET_SHARE 9
#define RDP_CAPLEN_SHARE 0x08
#define RDP_CAPSET_COLCACHE 10
#define RDP_CAPLEN_COLCACHE 0x08
#define RDP_CAPSET_BRUSHCACHE 15
#define RDP_CAPLEN_BRUSHCACHE 0x08
#define RDP_CAPSET_BMPCACHE2 19
#define RDP_CAPLEN_BMPCACHE2 0x28
#define BMPCACHE2_FLAG_PERSIST ((uint32)1<<31)
#define RDP_SOURCE "MSTSC"
/* Logon flags */
#define RDP_INFO_MOUSE 0x00000001
#define RDP_INFO_DISABLECTRLALTDEL 0x00000002
#define RDP_INFO_AUTOLOGON 0x00000008
#define RDP_INFO_UNICODE 0x00000010
#define RDP_INFO_MAXIMIZESHELL 0x00000020
#define RDP_INFO_COMPRESSION 0x00000080 /* mppc compression with 8kB histroy buffer */
#define RDP_INFO_ENABLEWINDOWSKEY 0x00000100
#define RDP_INFO_COMPRESSION2 0x00000200 /* rdp5 mppc compression with 64kB history buffer */
#define RDP_INFO_REMOTE_CONSOLE_AUDIO 0x00002000
#define RDP_INFO_PASSWORD_IS_SC_PIN 0x00040000
#define RDP5_DISABLE_NOTHING 0x00
#define RDP5_NO_WALLPAPER 0x01
#define RDP5_NO_FULLWINDOWDRAG 0x02
#define RDP5_NO_MENUANIMATIONS 0x04
#define RDP5_NO_THEMING 0x08
#define RDP5_NO_CURSOR_SHADOW 0x20
#define RDP5_NO_CURSORSETTINGS 0x40 /* disables cursor blinking */
/* compression types */
#define RDP_MPPC_BIG 0x01
#define RDP_MPPC_COMPRESSED 0x20
#define RDP_MPPC_RESET 0x40
#define RDP_MPPC_FLUSH 0x80
#define RDP_MPPC_DICT_SIZE 65536
#define RDP5_COMPRESSED 0x80
/* Keymap flags */
#define MapRightShiftMask (1<<0)
#define MapLeftShiftMask (1<<1)
#define MapShiftMask (MapRightShiftMask | MapLeftShiftMask)
#define MapRightAltMask (1<<2)
#define MapLeftAltMask (1<<3)
#define MapAltGrMask MapRightAltMask
#define MapRightCtrlMask (1<<4)
#define MapLeftCtrlMask (1<<5)
#define MapCtrlMask (MapRightCtrlMask | MapLeftCtrlMask)
#define MapRightWinMask (1<<6)
#define MapLeftWinMask (1<<7)
#define MapWinMask (MapRightWinMask | MapLeftWinMask)
#define MapNumLockMask (1<<8)
#define MapCapsLockMask (1<<9)
#define MapLocalStateMask (1<<10)
#define MapInhibitMask (1<<11)
#define MASK_ADD_BITS(var, mask) (var |= mask)
#define MASK_REMOVE_BITS(var, mask) (var &= ~mask)
#define MASK_HAS_BITS(var, mask) ((var & mask)>0)
#define MASK_CHANGE_BIT(var, mask, active) (var = ((var & ~mask) | (active ? mask : 0)))
/* Clipboard constants, "borrowed" from GCC system headers in
the w32 cross compiler
this is the CF_ set when WINVER is 0x0400 */
#ifndef CF_TEXT
#define CF_TEXT 1
#define CF_BITMAP 2
#define CF_METAFILEPICT 3
#define CF_SYLK 4
#define CF_DIF 5
#define CF_TIFF 6
#define CF_OEMTEXT 7
#define CF_DIB 8
#define CF_PALETTE 9
#define CF_PENDATA 10
#define CF_RIFF 11
#define CF_WAVE 12
#define CF_UNICODETEXT 13
#define CF_ENHMETAFILE 14
#define CF_HDROP 15
#define CF_LOCALE 16
#define CF_MAX 17
#define CF_OWNERDISPLAY 128
#define CF_DSPTEXT 129
#define CF_DSPBITMAP 130
#define CF_DSPMETAFILEPICT 131
#define CF_DSPENHMETAFILE 142
#define CF_PRIVATEFIRST 512
#define CF_PRIVATELAST 767
#define CF_GDIOBJFIRST 768
#define CF_GDIOBJLAST 1023
#endif
/* Sound format constants */
#define WAVE_FORMAT_PCM 1
#define WAVE_FORMAT_ADPCM 2
#define WAVE_FORMAT_ALAW 6
#define WAVE_FORMAT_MULAW 7
/* Virtual channel options */
#define CHANNEL_OPTION_INITIALIZED 0x80000000
#define CHANNEL_OPTION_ENCRYPT_RDP 0x40000000
#define CHANNEL_OPTION_COMPRESS_RDP 0x00800000
#define CHANNEL_OPTION_SHOW_PROTOCOL 0x00200000
/* NT status codes for RDPDR */
#define RD_STATUS_SUCCESS 0x00000000
#define RD_STATUS_NOT_IMPLEMENTED 0x00000001
#define RD_STATUS_PENDING 0x00000103
#define RD_STATUS_NO_MORE_FILES 0x80000006
#define RD_STATUS_DEVICE_PAPER_EMPTY 0x8000000e
#define RD_STATUS_DEVICE_POWERED_OFF 0x8000000f
#define RD_STATUS_DEVICE_OFF_LINE 0x80000010
#define RD_STATUS_DEVICE_BUSY 0x80000011
#define RD_STATUS_INVALID_HANDLE 0xc0000008
#define RD_STATUS_INVALID_PARAMETER 0xc000000d
#define RD_STATUS_NO_SUCH_FILE 0xc000000f
#define RD_STATUS_INVALID_DEVICE_REQUEST 0xc0000010
#define RD_STATUS_ACCESS_DENIED 0xc0000022
#define RD_STATUS_OBJECT_NAME_COLLISION 0xc0000035
#define RD_STATUS_DISK_FULL 0xc000007f
#define RD_STATUS_FILE_IS_A_DIRECTORY 0xc00000ba
#define RD_STATUS_NOT_SUPPORTED 0xc00000bb
#define RD_STATUS_TIMEOUT 0xc0000102
#define RD_STATUS_NOTIFY_ENUM_DIR 0xc000010c
#define RD_STATUS_CANCELLED 0xc0000120
#define RD_STATUS_DIRECTORY_NOT_EMPTY 0xc0000101
/* RDPSND constants */
#define TSSNDCAPS_ALIVE 0x00000001
#define TSSNDCAPS_VOLUME 0x00000002
/* RDPDR constants */
#define RDPDR_CTYP_CORE 0x4472
#define RDPDR_CTYP_PRN 0x5052
#define PAKID_CORE_SERVER_ANNOUNCE 0x496e
#define PAKID_CORE_CLIENTID_CONFIRM 0x4343
#define PAKID_CORE_CLIENT_NAME 0x434e
#define PAKID_CORE_DEVICE_LIST_ANNOUNCE 0x4441
#define PAKID_CORE_DEVICE_REPLY 0x6472
#define PAKID_CORE_DEVICE_IOREQUEST 0x4952
#define PAKID_CORE_DEVICE_IOCOMPLETION 0x4943
#define PAKID_CORE_SERVER_CAPABILITY 0x5350
#define PAKID_CORE_CLIENT_CAPABILITY 0x4350
#define PAKID_CORE_DEVICELIST_REMOVE 0x444d
#define PAKID_PRN_CACHE_DATA 0x5043
#define PAKID_CORE_USER_LOGGEDON 0x554c
#define PAKID_PRN_USING_XPS 0x5543
#define RDPDR_MAX_DEVICES 0x10
#define DEVICE_TYPE_SERIAL 0x01
#define DEVICE_TYPE_PARALLEL 0x02
#define DEVICE_TYPE_PRINTER 0x04
#define DEVICE_TYPE_DISK 0x08
#define DEVICE_TYPE_SCARD 0x20
#define FILE_DIRECTORY_FILE 0x00000001
#define FILE_NON_DIRECTORY_FILE 0x00000040
#define FILE_COMPLETE_IF_OPLOCKED 0x00000100
#define FILE_DELETE_ON_CLOSE 0x00001000
#define FILE_OPEN_FOR_FREE_SPACE_QUERY 0x00800000
/* [MS-RDPBCGR], TS_BITMAP_DATA, flags */
#define BITMAP_COMPRESSION 0x0001
#define NO_BITMAP_COMPRESSION_HDR 0x0400
/* RDP5 disconnect PDU */
#define exDiscReasonNoInfo 0x0000
#define exDiscReasonAPIInitiatedDisconnect 0x0001
#define exDiscReasonAPIInitiatedLogoff 0x0002
#define exDiscReasonServerIdleTimeout 0x0003
#define exDiscReasonServerLogonTimeout 0x0004
#define exDiscReasonReplacedByOtherConnection 0x0005
#define exDiscReasonOutOfMemory 0x0006
#define exDiscReasonServerDeniedConnection 0x0007
#define exDiscReasonServerDeniedConnectionFips 0x0008
#define exDiscReasonServerInsufficientPrivileges 0x0009
#define exDiscReasonServerFreshCredentialsRequired 0x000a
#define exDiscReasonRPCInitiatedDisconnectByUser 0x000b
#define exDiscReasonByUser 0x000c
#define exDiscReasonLicenseInternal 0x0100
#define exDiscReasonLicenseNoLicenseServer 0x0101
#define exDiscReasonLicenseNoLicense 0x0102
#define exDiscReasonLicenseErrClientMsg 0x0103
#define exDiscReasonLicenseHwidDoesntMatchLicense 0x0104
#define exDiscReasonLicenseErrClientLicense 0x0105
#define exDiscReasonLicenseCantFinishProtocol 0x0106
#define exDiscReasonLicenseClientEndedProtocol 0x0107
#define exDiscReasonLicenseErrClientEncryption 0x0108
#define exDiscReasonLicenseCantUpgradeLicense 0x0109
#define exDiscReasonLicenseNoRemoteConnections 0x010a
/* SeamlessRDP constants */
#define SEAMLESSRDP_NOTYETMAPPED -1
#define SEAMLESSRDP_NORMAL 0
#define SEAMLESSRDP_MINIMIZED 1
#define SEAMLESSRDP_MAXIMIZED 2
#define SEAMLESSRDP_POSITION_TIMER 200000
#define SEAMLESSRDP_CREATE_MODAL 0x0001
#define SEAMLESSRDP_CREATE_TOPMOST 0x0002
#define SEAMLESSRDP_HELLO_RECONNECT 0x0001
#define SEAMLESSRDP_HELLO_HIDDEN 0x0002
/* Smartcard constants */
#define SCARD_LOCK_TCP 0
#define SCARD_LOCK_SEC 1
#define SCARD_LOCK_CHANNEL 2
#define SCARD_LOCK_RDPDR 3
#define SCARD_LOCK_LAST 4
/* redirect flags, from [MS-RDPBCGR] 2.2.13.1 */
enum RDP_PDU_REDIRECT_FLAGS
{
PDU_REDIRECT_HAS_IP = 0x1,
PDU_REDIRECT_HAS_LOAD_BALANCE_INFO = 0x2,
PDU_REDIRECT_HAS_USERNAME = 0x4,
PDU_REDIRECT_HAS_DOMAIN = 0x8,
PDU_REDIRECT_HAS_PASSWORD = <PASSWORD>,
PDU_REDIRECT_DONT_STORE_USERNAME = 0x20,
PDU_REDIRECT_USE_SMARTCARD = 0x40,
PDU_REDIRECT_INFORMATIONAL = 0x80,
PDU_REDIRECT_HAS_TARGET_FQDN = 0x100,
PDU_REDIRECT_HAS_TARGET_NETBIOS = 0x200,
PDU_REDIRECT_HAS_TARGET_IP_ARRAY = 0x800
};
| 7,927 |
12,278 | // Boost Lambda Library -- member_ptr.hpp ---------------------
// Copyright (C) 1999, 2000 <NAME> (<EMAIL>)
// Copyright (C) 2000 <NAME> (<EMAIL>)
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// For more information, see www.boost.org
// --------------------------------------------------------------------------
#if !defined(BOOST_LAMBDA_MEMBER_PTR_HPP)
#define BOOST_LAMBDA_MEMBER_PTR_HPP
namespace boost {
namespace lambda {
class member_pointer_action {};
namespace detail {
// the boost type_traits member_pointer traits are not enough,
// need to know more details.
template<class T>
struct member_pointer {
typedef typename boost::add_reference<T>::type type;
typedef detail::unspecified class_type;
typedef detail::unspecified qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = false);
};
template<class T, class U>
struct member_pointer<T U::*> {
typedef typename boost::add_reference<T>::type type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = true);
BOOST_STATIC_CONSTANT(bool, is_function_member = false);
};
template<class T, class U>
struct member_pointer<const T U::*> {
typedef typename boost::add_reference<const T>::type type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = true);
BOOST_STATIC_CONSTANT(bool, is_function_member = false);
};
template<class T, class U>
struct member_pointer<volatile T U::*> {
typedef typename boost::add_reference<volatile T>::type type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = true);
BOOST_STATIC_CONSTANT(bool, is_function_member = false);
};
template<class T, class U>
struct member_pointer<const volatile T U::*> {
typedef typename boost::add_reference<const volatile T>::type type;
typedef U class_type;
typedef const volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = true);
BOOST_STATIC_CONSTANT(bool, is_function_member = false);
};
// -- nonconst member functions --
template<class T, class U>
struct member_pointer<T (U::*)()> {
typedef T type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1>
struct member_pointer<T (U::*)(A1)> {
typedef T type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2>
struct member_pointer<T (U::*)(A1, A2)> {
typedef T type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3>
struct member_pointer<T (U::*)(A1, A2, A3)> {
typedef T type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4>
struct member_pointer<T (U::*)(A1, A2, A3, A4)> {
typedef T type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5)> {
typedef T type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6)> {
typedef T type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7)> {
typedef T type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7, class A8>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7, A8)> {
typedef T type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7, class A8, class A9>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7, A8, A9)> {
typedef T type;
typedef U class_type;
typedef U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
// -- const member functions --
template<class T, class U>
struct member_pointer<T (U::*)() const> {
typedef T type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1>
struct member_pointer<T (U::*)(A1) const> {
typedef T type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2>
struct member_pointer<T (U::*)(A1, A2) const> {
typedef T type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3>
struct member_pointer<T (U::*)(A1, A2, A3) const> {
typedef T type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4>
struct member_pointer<T (U::*)(A1, A2, A3, A4) const> {
typedef T type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5) const> {
typedef T type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6) const> {
typedef T type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7) const> {
typedef T type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7, class A8>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7, A8) const> {
typedef T type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7, class A8, class A9>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7, A8, A9) const> {
typedef T type;
typedef U class_type;
typedef const U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
// -- volatile --
template<class T, class U>
struct member_pointer<T (U::*)() volatile> {
typedef T type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1>
struct member_pointer<T (U::*)(A1) volatile> {
typedef T type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2>
struct member_pointer<T (U::*)(A1, A2) volatile> {
typedef T type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3>
struct member_pointer<T (U::*)(A1, A2, A3) volatile> {
typedef T type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4>
struct member_pointer<T (U::*)(A1, A2, A3, A4) volatile> {
typedef T type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5) volatile> {
typedef T type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6) volatile> {
typedef T type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7) volatile> {
typedef T type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7, class A8>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7, A8) volatile> {
typedef T type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7, class A8, class A9>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7, A8, A9) volatile> {
typedef T type;
typedef U class_type;
typedef volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
// -- const volatile
template<class T, class U>
struct member_pointer<T (U::*)() const volatile> {
typedef T type;
typedef U class_type;
typedef const volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1>
struct member_pointer<T (U::*)(A1) const volatile> {
typedef T type;
typedef U class_type;
typedef const volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2>
struct member_pointer<T (U::*)(A1, A2) const volatile> {
typedef T type;
typedef U class_type;
typedef const volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3>
struct member_pointer<T (U::*)(A1, A2, A3) const volatile> {
typedef T type;
typedef U class_type;
typedef const volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4>
struct member_pointer<T (U::*)(A1, A2, A3, A4) const volatile> {
typedef T type;
typedef U class_type;
typedef const volatile U qualified_class_type;
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5) const volatile> {
typedef T type;
typedef U class_type;
typedef const volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6) const volatile> {
typedef T type;
typedef U class_type;
typedef const volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7) const volatile> {
typedef T type;
typedef U class_type;
typedef const volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7, class A8>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7, A8) const volatile> {
typedef T type;
typedef U class_type;
typedef const volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
template<class T, class U, class A1, class A2, class A3, class A4, class A5,
class A6, class A7, class A8, class A9>
struct member_pointer<T (U::*)(A1, A2, A3, A4, A5, A6, A7, A8, A9) const volatile> {
typedef T type;
typedef U class_type;
typedef const volatile U qualified_class_type;
BOOST_STATIC_CONSTANT(bool, is_data_member = false);
BOOST_STATIC_CONSTANT(bool, is_function_member = true);
};
} // detail
namespace detail {
// this class holds a pointer to a member function and the object.
// when called, it just calls the member function with the parameters
// provided
// It would have been possible to use existing lambda_functors to represent
// a bound member function like this, but to have a separate template is
// safer, since now this functor doesn't mix and match with lambda_functors
// only thing you can do with this is to call it
// note that previously instantiated classes
// (other_action<member_pointer_action> and member_pointer_action_helper
// guarantee, that A and B are
// such types, that for objects a and b of corresponding types, a->*b leads
// to the builtin ->* to be called. So types that would end in a call to
// a user defined ->* do not create a member_pointer_caller object.
template<class RET, class A, class B>
class member_pointer_caller {
A a; B b;
public:
member_pointer_caller(const A& aa, const B& bb) : a(aa), b(bb) {}
RET operator()() const { return (a->*b)(); }
template<class A1>
RET operator()(const A1& a1) const { return (a->*b)(a1); }
template<class A1, class A2>
RET operator()(const A1& a1, const A2& a2) const { return (a->*b)(a1, a2); }
template<class A1, class A2, class A3>
RET operator()(const A1& a1, const A2& a2, const A3& a3) const {
return (a->*b)(a1, a2, a3);
}
template<class A1, class A2, class A3, class A4>
RET operator()(const A1& a1, const A2& a2, const A3& a3,
const A4& a4) const {
return (a->*b)(a1, a2, a3, a4);
}
template<class A1, class A2, class A3, class A4, class A5>
RET operator()(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
const A5& a5) const {
return (a->*b)(a1, a2, a3, a4, a5);
}
template<class A1, class A2, class A3, class A4, class A5, class A6>
RET operator()(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
const A5& a5, const A6& a6) const {
return (a->*b)(a1, a2, a3, a4, a5, a6);
}
template<class A1, class A2, class A3, class A4, class A5, class A6,
class A7>
RET operator()(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
const A5& a5, const A6& a6, const A7& a7) const {
return (a->*b)(a1, a2, a3, a4, a5, a6, a7);
}
template<class A1, class A2, class A3, class A4, class A5, class A6,
class A7, class A8>
RET operator()(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
const A5& a5, const A6& a6, const A7& a7,
const A8& a8) const {
return (a->*b)(a1, a2, a3, a4, a5, a6, a7, a8);
}
template<class A1, class A2, class A3, class A4, class A5, class A6,
class A7, class A8, class A9>
RET operator()(const A1& a1, const A2& a2, const A3& a3, const A4& a4,
const A5& a5, const A6& a6, const A7& a7,
const A8& a8, const A9& a9) const {
return (a->*b)(a1, a2, a3, a4, a5, a6, a7, a8, a9);
}
};
// helper templates for return type deduction and action classes
// different cases for data member, function member, neither
// true-true case
template <bool Is_data_member, bool Is_function_member>
struct member_pointer_action_helper;
// cannot be both, no body provided
// data member case
// this means, that B is a data member and A is a pointer type,
// so either built-in ->* should be called, or there is an error
template <>
struct member_pointer_action_helper<true, false> {
public:
template<class RET, class A, class B>
static RET apply(A& a, B& b) {
return a->*b;
}
template<class A, class B>
struct return_type {
private:
typedef typename detail::remove_reference_and_cv<B>::type plainB;
typedef typename detail::member_pointer<plainB>::type type0;
// we remove the reference now, as we may have to add cv:s
typedef typename boost::remove_reference<type0>::type type1;
// A is a reference to pointer
// remove the top level cv qualifiers and reference
typedef typename
detail::remove_reference_and_cv<A>::type non_ref_A;
// A is a pointer type, so take the type pointed to
typedef typename ::boost::remove_pointer<non_ref_A>::type non_pointer_A;
public:
// For non-reference types, we must add const and/or volatile if
// the pointer type has these qualifiers
// If the member is a reference, these do not have any effect
// (cv T == T if T is a reference type)
typedef typename detail::IF<
::boost::is_const<non_pointer_A>::value,
typename ::boost::add_const<type1>::type,
type1
>::RET type2;
typedef typename detail::IF<
::boost::is_volatile<non_pointer_A>::value,
typename ::boost::add_volatile<type2>::type,
type2
>::RET type3;
// add reference back
typedef typename ::boost::add_reference<type3>::type type;
};
};
// neither case
template <>
struct member_pointer_action_helper<false, false> {
public:
template<class RET, class A, class B>
static RET apply(A& a, B& b) {
// not a built in member pointer operator, just call ->*
return a->*b;
}
// an overloaded member pointer operators, user should have specified
// the return type
// At this point we know that there is no matching specialization for
// return_type_2, so try return_type_2_plain
template<class A, class B>
struct return_type {
typedef typename plain_return_type_2<
other_action<member_pointer_action>, A, B
>::type type;
};
};
// member pointer function case
// This is a built in ->* call for a member function,
// the only thing that you can do with that, is to give it some arguments
// note, it is guaranteed that A is a pointer type, and thus it cannot
// be a call to overloaded ->*
template <>
struct member_pointer_action_helper<false, true> {
public:
template<class RET, class A, class B>
static RET apply(A& a, B& b) {
typedef typename ::boost::remove_cv<B>::type plainB;
typedef typename detail::member_pointer<plainB>::type ret_t;
typedef typename ::boost::remove_cv<A>::type plainA;
// we always strip cv:s to
// make the two routes (calling and type deduction)
// to give the same results (and the const does not make any functional
// difference)
return detail::member_pointer_caller<ret_t, plainA, plainB>(a, b);
}
template<class A, class B>
struct return_type {
typedef typename detail::remove_reference_and_cv<B>::type plainB;
typedef typename detail::member_pointer<plainB>::type ret_t;
typedef typename detail::remove_reference_and_cv<A>::type plainA;
typedef detail::member_pointer_caller<ret_t, plainA, plainB> type;
};
};
} // detail
template<> class other_action<member_pointer_action> {
public:
template<class RET, class A, class B>
static RET apply(A& a, B& b) {
typedef typename
::boost::remove_cv<B>::type plainB;
return detail::member_pointer_action_helper<
boost::is_pointer<A>::value &&
detail::member_pointer<plainB>::is_data_member,
boost::is_pointer<A>::value &&
detail::member_pointer<plainB>::is_function_member
>::template apply<RET>(a, b);
}
};
// return type deduction --
// If the right argument is a pointer to data member,
// and the left argument is of compatible pointer to class type
// return type is a reference to the data member type
// if right argument is a pointer to a member function, and the left
// argument is of a compatible type, the return type is a
// member_pointer_caller (see above)
// Otherwise, return type deduction fails. There is either an error,
// or the user is trying to call an overloaded ->*
// In such a case either ret<> must be used, or a return_type_2 user
// defined specialization must be provided
template<class A, class B>
struct return_type_2<other_action<member_pointer_action>, A, B> {
private:
typedef typename
detail::remove_reference_and_cv<B>::type plainB;
public:
typedef typename
detail::member_pointer_action_helper<
detail::member_pointer<plainB>::is_data_member,
detail::member_pointer<plainB>::is_function_member
>::template return_type<A, B>::type type;
};
// this is the way the generic lambda_functor_base functions instantiate
// return type deduction. We turn it into return_type_2, so that the
// user can provide specializations on that level.
template<class Args>
struct return_type_N<other_action<member_pointer_action>, Args> {
typedef typename boost::tuples::element<0, Args>::type A;
typedef typename boost::tuples::element<1, Args>::type B;
typedef typename
return_type_2<other_action<member_pointer_action>,
typename boost::remove_reference<A>::type,
typename boost::remove_reference<B>::type
>::type type;
};
template<class Arg1, class Arg2>
inline const
lambda_functor<
lambda_functor_base<
action<2, other_action<member_pointer_action> >,
tuple<lambda_functor<Arg1>, typename const_copy_argument<Arg2>::type>
>
>
operator->*(const lambda_functor<Arg1>& a1, const Arg2& a2)
{
return
lambda_functor_base<
action<2, other_action<member_pointer_action> >,
tuple<lambda_functor<Arg1>, typename const_copy_argument<Arg2>::type>
>
(tuple<lambda_functor<Arg1>,
typename const_copy_argument<Arg2>::type>(a1, a2));
}
template<class Arg1, class Arg2>
inline const
lambda_functor<
lambda_functor_base<
action<2, other_action<member_pointer_action> >,
tuple<lambda_functor<Arg1>, lambda_functor<Arg2> >
>
>
operator->*(const lambda_functor<Arg1>& a1, const lambda_functor<Arg2>& a2)
{
return
lambda_functor_base<
action<2, other_action<member_pointer_action> >,
tuple<lambda_functor<Arg1>, lambda_functor<Arg2> >
>
(tuple<lambda_functor<Arg1>, lambda_functor<Arg2> >(a1, a2));
}
template<class Arg1, class Arg2>
inline const
lambda_functor<
lambda_functor_base<
action<2, other_action<member_pointer_action> >,
tuple<typename const_copy_argument<Arg1>::type, lambda_functor<Arg2> >
>
>
operator->*(const Arg1& a1, const lambda_functor<Arg2>& a2)
{
return
lambda_functor_base<
action<2, other_action<member_pointer_action> >,
tuple<typename const_copy_argument<Arg1>::type, lambda_functor<Arg2> >
>
(tuple<typename const_copy_argument<Arg1>::type,
lambda_functor<Arg2> >(a1, a2));
}
} // namespace lambda
} // namespace boost
#endif
| 10,281 |
575 | <reponame>Ron423c/chromium
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ash/shelf/drag_handle.h"
#include "ash/accessibility/accessibility_controller_impl.h"
#include "ash/shelf/shelf_layout_manager.h"
#include "ash/shelf/shelf_widget.h"
#include "ash/shell.h"
#include "ash/test/ash_test_base.h"
#include "ash/test/test_widget_builder.h"
#include "ash/wm/tablet_mode/tablet_mode_controller.h"
namespace ash {
namespace {
enum class TestAccessibilityFeature {
kTabletModeShelfNavigationButtons,
kSpokenFeedback,
kAutoclick,
kSwitchAccess
};
// Tests drag handle functionalities with number of accessibility setting
// enabled.
class DragHandleTest
: public AshTestBase,
public ::testing::WithParamInterface<TestAccessibilityFeature> {
public:
DragHandleTest() = default;
~DragHandleTest() override = default;
const DragHandle* drag_handle() const {
return GetPrimaryShelf()->shelf_widget()->GetDragHandle();
}
void ClickDragHandle() {
gfx::Point center = drag_handle()->GetBoundsInScreen().CenterPoint();
GetEventGenerator()->MoveMouseTo(center);
GetEventGenerator()->ClickLeftButton();
}
void SetTestA11yFeatureEnabled(bool enabled) {
switch (GetParam()) {
case TestAccessibilityFeature::kTabletModeShelfNavigationButtons:
Shell::Get()
->accessibility_controller()
->SetTabletModeShelfNavigationButtonsEnabled(enabled);
break;
case TestAccessibilityFeature::kSpokenFeedback:
Shell::Get()->accessibility_controller()->SetSpokenFeedbackEnabled(
enabled, A11Y_NOTIFICATION_NONE);
break;
case TestAccessibilityFeature::kAutoclick:
Shell::Get()->accessibility_controller()->autoclick().SetEnabled(
enabled);
break;
case TestAccessibilityFeature::kSwitchAccess:
Shell::Get()->accessibility_controller()->switch_access().SetEnabled(
enabled);
Shell::Get()
->accessibility_controller()
->DisableSwitchAccessDisableConfirmationDialogTesting();
break;
}
}
};
} // namespace
INSTANTIATE_TEST_SUITE_P(
All,
DragHandleTest,
::testing::Values(
TestAccessibilityFeature::kTabletModeShelfNavigationButtons,
TestAccessibilityFeature::kSpokenFeedback,
TestAccessibilityFeature::kAutoclick,
TestAccessibilityFeature::kSwitchAccess));
TEST_P(DragHandleTest, AccessibilityFeaturesEnabled) {
Shell::Get()->tablet_mode_controller()->SetEnabledForTest(true);
UpdateDisplay("800x800");
// Create a widget to transition to the in-app shelf.
TestWidgetBuilder()
.SetTestWidgetDelegate()
.SetBounds(gfx::Rect(0, 0, 800, 800))
.BuildOwnedByNativeWidget();
EXPECT_TRUE(drag_handle()->GetVisible());
// By default, drag handle should not function as a button.
EXPECT_FALSE(drag_handle()->GetEnabled());
// If a11y feature is enabled, the drag handle button should behave like a
// button.
SetTestA11yFeatureEnabled(true /*enabled*/);
EXPECT_TRUE(drag_handle()->GetEnabled());
EXPECT_EQ(HotseatState::kHidden,
GetPrimaryShelf()->shelf_layout_manager()->hotseat_state());
// Click on the drag handle should extend the hotseat.
ClickDragHandle();
EXPECT_EQ(HotseatState::kExtended,
GetPrimaryShelf()->shelf_layout_manager()->hotseat_state());
// Click again should hide the hotseat.
ClickDragHandle();
EXPECT_EQ(HotseatState::kHidden,
GetPrimaryShelf()->shelf_layout_manager()->hotseat_state());
// Exit a11y feature should disable drag handle.
SetTestA11yFeatureEnabled(false /*enabled*/);
EXPECT_FALSE(drag_handle()->GetEnabled());
}
} // namespace ash
| 1,385 |
633 | <gh_stars>100-1000
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_guides:
#
# Colorbars and legends
# =====================
#
# Proplot includes some useful improvements to the matplotlib API that make
# working with colorbars and legends :ref:`easier <why_colorbars_legends>`.
# Notable features include "inset" colorbars, "outer" legends,
# on-the-fly colorbars and legends, colorbars built from artists,
# and row-major and centered-row legends.
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_guides_loc:
#
# Outer and inset locations
# -------------------------
#
# Matplotlib supports drawing "inset" legends and "outer" colorbars using the `loc`
# and `location` keyword arguments. However, "outer" legends are only posssible using
# the somewhat counterintuitive `bbox_to_anchor` keyword (see `here
# <https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html#legend-location>`__)
# and "inset" colorbars are not possible without manually creating and positioning
# the associated axes. Proplot tries to improve this behavior:
#
# * `proplot.axes.Axes.legend` can draw both "inset" legends when you request an inset
# location (e.g., ``loc='upper right'`` or the shorthand ``loc='ur'``) and "outer"
# legends along a subplot edge when you request a :ref:`side location <legend_table>`
# (e.g., ``loc='right'`` or the shorthand ``loc='r'``). If you draw multiple legends
# or colorbars on one side, they are "stacked" on top of each other. Unlike using
# `bbox_to_anchor`, the "outer" legend position is adjusted automatically when the
# :ref:`tight layout algorithm <ug_tight>` is active.
# * Proplot adds the axes command `proplot.axes.Axes.colorbar`,
# analogous to `proplot.axes.Axes.legend` and equivalent to
# calling `proplot.figure.Figure.colorbar` with an `ax` keyword.
# `~proplot.axes.Axes.colorbar` can draw both "outer" colorbars when you request
# a side location (e.g., ``loc='right'`` or the shorthand ``loc='r'``) and "inset"
# colorbars when you request an :ref:`inset location <colorbar_table>`
# (e.g., ``loc='upper right'`` or the shorthand ``loc='ur'``). Inset
# colorbars have optional background "frames" that can be configured
# with various `~proplot.axes.Axes.colorbar` keywords.
# * Outer colorbars and legends can be aligned using the `align` keyword.
# `~proplot.axes.Axes.colorbar` and `~proplot.axes.Axes.legend` also both accept
# `space` and `pad` keywords. `space` controls the absolute separation of the
# "outer" colorbar or legend from the parent subplot edge and `pad` controls the
# :ref:`tight layout <ug_tight>` padding relative to the subplot's tick and axis labels
# (or, for "inset" locations, the padding between the subplot edge and the inset frame).
# The below example shows a variety of arrangements of "outer" and "inset"
# colorbars and legends.
#
# .. important::
#
# Unlike matplotlib, proplot adds "outer" colorbars and legends by allocating
# new rows and columns in the `~proplot.gridspec.GridSpec` rather than
# "stealing" space from the parent subplot (note that subsequently indexing
# the `~proplot.gridspec.GridSpec` will ignore the slots allocated for
# colorbars and legends). This approach means that "outer" colorbars and
# legends :ref:`do not affect subplot aspect ratios <ug_autosize>`
# and :ref:`do not affect subplot spacing <ug_tight>`, which lets
# proplot avoid relying on complicated `"constrained layout" algorithms
# <https://matplotlib.org/stable/tutorials/intermediate/constrainedlayout_guide.html>`__
# and tends to improve the appearance of figures with even the most
# complex arrangements of subplots, colorbars, and legends.
# %%
import proplot as pplt
import numpy as np
state = np.random.RandomState(51423)
fig = pplt.figure(share=False, refwidth=2.3)
# Colorbars
ax = fig.subplot(121, title='Axes colorbars')
data = state.rand(10, 10)
m = ax.heatmap(data, cmap='dusk')
ax.colorbar(m, loc='r')
ax.colorbar(m, loc='t') # title is automatically adjusted
ax.colorbar(m, loc='ll', label='colorbar label') # inset colorbar demonstration
# Legends
ax = fig.subplot(122, title='Axes legends', titlepad='0em')
data = (state.rand(10, 5) - 0.5).cumsum(axis=0)
hs = ax.plot(data, lw=3, cycle='ggplot', labels=list('abcde'))
ax.legend(loc='ll', label='legend label') # automatically infer handles and labels
ax.legend(hs, loc='t', ncols=5, frame=False) # automatically infer labels from handles
ax.legend(hs, list('jklmn'), loc='r', ncols=1, frame=False) # manually override labels
fig.format(
abc=True, xlabel='xlabel', ylabel='ylabel',
suptitle='Colorbar and legend location demo'
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_guides_plot:
#
# On-the-fly colorbars and legends
# --------------------------------
#
# In proplot, you can add colorbars and legends on-the-fly by supplying keyword
# arguments to various `~proplot.axes.PlotAxes` commands. To plot data and
# draw a colorbar or legend in one go, pass a location (e.g., ``colorbar='r'``
# or ``legend='b'``) to the plotting command (e.g., `~proplot.axes.PlotAxes.plot`
# or `~proplot.axes.PlotAxes.contour`). To pass keyword arguments to the colorbar
# and legend commands, use the `legend_kw` and `colorbar_kw` arguments (e.g.,
# ``legend_kw={'ncol': 3}``). Note that `~proplot.axes.Axes.colorbar` can also
# build colorbars from lists of arbitrary matplotlib artists, for example the lines
# generated by `~proplot.axes.PlotAxes.plot` (see :ref:`below <ug_colorbars>`).
#
# .. note::
#
# Specifying the same `colorbar` location with multiple plotting calls will have
# a different effect depending on the plotting command. For :ref:`1D commands
# <ug_1dplots>`, this will add each item to a "queue" used to build colorbars
# from a list of artists. For :ref:`2D commands <ug_2dplots>`, this will "stack"
# colorbars in outer locations, or replace existing colorbars in inset locations.
# By contrast, specifying the same `legend` location will always add items to
# the same legend rather than "stacking" them.
# %%
import proplot as pplt
labels = list('xyzpq')
state = np.random.RandomState(51423)
fig = pplt.figure(share=0, refwidth=2.3, suptitle='On-the-fly colorbar and legend demo')
# Legends
data = (state.rand(30, 10) - 0.5).cumsum(axis=0)
ax = fig.subplot(121, title='On-the-fly legend')
ax.plot( # add all at once
data[:, :5], lw=2, cycle='Reds1', cycle_kw={'ls': ('-', '--'), 'left': 0.1},
labels=labels, legend='b', legend_kw={'title': 'legend title'}
)
for i in range(5):
ax.plot( # add one-by-one
data[:, 5 + i], label=labels[i], linewidth=2,
cycle='Blues1', cycle_kw={'N': 5, 'ls': ('-', '--'), 'left': 0.1},
colorbar='ul', colorbar_kw={'label': 'colorbar from lines'}
)
# Colorbars
ax = fig.subplot(122, title='On-the-fly colorbar')
data = state.rand(8, 8)
ax.contourf(
data, cmap='Reds1', extend='both', colorbar='b',
colorbar_kw={'length': 0.8, 'label': 'colorbar label'},
)
ax.contour(
data, color='gray7', lw=1.5,
label='contour', legend='ul', legend_kw={'label': 'legend from contours'},
)
# %%
import proplot as pplt
import numpy as np
N = 10
state = np.random.RandomState(51423)
fig, axs = pplt.subplots(
nrows=2, share=False,
refwidth='55mm', panelpad='1em',
suptitle='Stacked colorbars demo'
)
# Repeat for both axes
args1 = (0, 0.5, 1, 1, 'grays', 0.5)
args2 = (0, 0, 0.5, 0.5, 'reds', 1)
args3 = (0.5, 0, 1, 0.5, 'blues', 2)
for j, ax in enumerate(axs):
ax.format(xlabel='data', xlocator=np.linspace(0, 0.8, 5), title=f'Subplot #{j+1}')
for i, (x0, y0, x1, y1, cmap, scale) in enumerate((args1, args2, args3)):
if j == 1 and i == 0:
continue
data = state.rand(N, N) * scale
x, y = np.linspace(x0, x1, N + 1), np.linspace(y0, y1, N + 1)
m = ax.pcolormesh(x, y, data, cmap=cmap, levels=np.linspace(0, scale, 11))
ax.colorbar(m, loc='l', label=f'dataset #{i + 1}')
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_guides_multi:
#
# Figure-wide colorbars and legends
# ---------------------------------
#
# In proplot, colorbars and legends can be added to the edge of figures using the
# figure methods `proplot.figure.Figure.colorbar` and `proplot.figure.Figure.legend`.
# These methods align colorbars and legends between the edges
# of the `~proplot.figure.Figure.gridspec` rather than the figure.
# As with :ref:`axes colorbars and legends <ug_guides_loc>`, if you
# draw multiple colorbars or legends on the same side, they are stacked on
# top of each other. To draw a colorbar or legend alongside particular row(s) or
# column(s) of the subplot grid, use the `row`, `rows`, `col`, or `cols` keyword
# arguments. You can pass an integer to draw the colorbar or legend beside a
# single row or column (e.g., ``fig.colorbar(m, row=1)``), or pass a tuple to
# draw the colorbar or legend along a range of rows or columns
# (e.g., ``fig.colorbar(m, rows=(1, 2))``). The space separation between the subplot
# grid edge and the colorbars or legends can be controlled with the `space` keyword,
# and the tight layout padding can be controlled with the `pad` keyword.
# %%
import proplot as pplt
import numpy as np
state = np.random.RandomState(51423)
fig, axs = pplt.subplots(ncols=3, nrows=3, refwidth=1.4)
for ax in axs:
m = ax.pcolormesh(
state.rand(20, 20), cmap='grays',
levels=np.linspace(0, 1, 11), extend='both'
)
fig.format(
suptitle='Figure colorbars and legends demo',
abc='a.', abcloc='l', xlabel='xlabel', ylabel='ylabel'
)
fig.colorbar(m, label='column 1', ticks=0.5, loc='b', col=1)
fig.colorbar(m, label='columns 2 and 3', ticks=0.2, loc='b', cols=(2, 3))
fig.colorbar(m, label='stacked colorbar', ticks=0.1, loc='b', minorticks=0.05)
fig.colorbar(m, label='colorbar with length <1', ticks=0.1, loc='r', length=0.7)
# %%
import proplot as pplt
import numpy as np
state = np.random.RandomState(51423)
fig, axs = pplt.subplots(
ncols=2, nrows=2, order='F', refwidth=1.7, wspace=2.5, share=False
)
# Plot data
data = (state.rand(50, 50) - 0.1).cumsum(axis=0)
for ax in axs[:2]:
m = ax.contourf(data, cmap='grays', extend='both')
hs = []
colors = pplt.get_colors('grays', 5)
for abc, color in zip('ABCDEF', colors):
data = state.rand(10)
for ax in axs[2:]:
h, = ax.plot(data, color=color, lw=3, label=f'line {abc}')
hs.append(h)
# Add colorbars and legends
fig.colorbar(m, length=0.8, label='colorbar label', loc='b', col=1, locator=5)
fig.colorbar(m, label='colorbar label', loc='l')
fig.legend(hs, ncols=2, center=True, frame=False, loc='b', col=2)
fig.legend(hs, ncols=1, label='legend label', frame=False, loc='r')
fig.format(abc='A', abcloc='ul', suptitle='Figure colorbars and legends demo')
for ax, title in zip(axs, ('2D {} #1', '2D {} #2', 'Line {} #1', 'Line {} #2')):
ax.format(xlabel='xlabel', title=title.format('dataset'))
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_colorbars:
#
# Colorbar improvements
# ---------------------
#
# The basic usage of `proplot.axes.Axes.colorbar` and `proplot.figure.Figure.colorbar`
# includes a few useful improvements compared to the matplotlib commands. The following
# feature is useful for labeling discrete plot elements that bear some numeric
# relationship to one another:
# * Calling ``colorbar`` with a list of `~matplotlib.artist.Artist`\ s,
# a `~matplotlib.colors.Colormap` name or object, or a list of colors
# will build the required `~matplotlib.cm.ScalarMappable` on-the-fly. Lists
# of `~matplotlib.artist.Artists`\ s are used when you use the `colorbar`
# keyword with :ref:`1D commands <ug_1dplots>` like `~proplot.axes.PlotAxes.plot`.
# * The associated :ref:`colormap normalizer <ug_norm>` can be specified with the
# `vmin`, `vmax`, `norm`, and `norm_kw` keywords. The `~proplot.colors.DiscreteNorm`
# levels can be specified with `values`, or proplot will infer them from the
# `~matplotlib.artist.Artist` labels (non-numeric labels will be applied to
# the colorbar as tick labels).
#
# Proplot also includes improvements for adding ticks and tick labels to colorbars.
# Similar to `proplot.axes.CartesianAxes.format`, you can flexibly specify
# major tick locations, minor tick locations, and major tick labels using the
# `locator`, `minorlocator`, `formatter`, `ticks`, `minorticks`, and `ticklabels`
# keywords. These arguments are passed through the `~proplot.constructor.Locator` and
# `~proplot.constructor.Formatter` :ref:`constructor functions <why_constructor>`.
# Unlike matplotlib, the default ticks for :ref:`discrete colormaps <ug_discrete>`
# are restricted based on the axis length using `~proplot.ticker.DiscreteLocator`.
# You can easily toggle minor ticks using ``tickminor=True``.
#
# The geometry of colorbars is also better constrained in proplot. You can set
# the colorbar width in :ref:`physical units <ug_units>` using the `width` keyword
# (physical units help avoid the common issue where colorbars appear "too skinny"
# or "too fat" and preserves their appearance when the figure size changes).
# The default widths for outer and inset colorbars are controlled with
# :rcraw:`colorbar.width` and :rcraw:`colorbar.insetwidth`. Similarly, you can
# control the colorbar length with the `length` keyword. The default length
# for inset colorbars is controlled by :rcraw:`colorbar.insetlength`, and the
# outer colorbar length is always relative to the subplot grid with a default of ``1``.
# You can also specify the size of the colorbar "extensions" in physical units rather
# than relative units using the `extendsize` keyword rather than matplotlib's
# `extendfrac`. The default sizes for outer and inset colorbars are
# controlled by :rcraw:`colorbar.extend` and :rcraw:`colorbar.insetextend`.
# See `~proplot.axes.Axes.colorbar` for details.
# %%
import proplot as pplt
import numpy as np
fig = pplt.figure(share=False, refwidth=2)
# Colorbars from lines
ax = fig.subplot(121)
state = np.random.RandomState(51423)
data = 1 + (state.rand(12, 10) - 0.45).cumsum(axis=0)
cycle = pplt.Cycle('algae')
hs = ax.line(
data, lw=4, cycle=cycle, colorbar='lr',
colorbar_kw={'length': '8em', 'label': 'line colorbar'}
)
ax.colorbar(
hs, loc='t', values=np.arange(0, 10),
label='line colorbar', ticks=2
)
# Colorbars from a mappable
ax = fig.subplot(122)
m = ax.contourf(
data.T, extend='both', cmap='algae',
levels=pplt.arange(0, 3, 0.5)
)
fig.colorbar(
m, loc='r', length=1, # length is relative
label='interior ticks', tickloc='left'
)
ax.colorbar(
m, loc='ul', length=6, # length is em widths
label='inset colorbar', tickminor=True, alpha=0.5,
)
fig.format(
suptitle='Colorbar formatting demo',
xlabel='xlabel', ylabel='ylabel', titleabove=False
)
# %% [raw] raw_mimetype="text/restructuredtext"
# .. _ug_legends:
#
# Legend improvements
# -------------------
#
# The basic usage of `proplot.axes.Axes.legend` and `proplot.figure.Figure.legend`
# includes a few useful improvements compared to the matplotlib commands.
# The following core features are the same as matplotlib:
# * Calling ``legend`` without positional arguments will
# automatically fill the legend with the labeled artist in the
# the parent axes (when using `proplot.axes.Axes.legend`) or
# or the parent figure (when using `proplot.figure.Figure.legend`).
# * Legend labels can be assigned early by calling plotting comamnds with
# the `label` keyword (e.g., ``ax.plot(..., label='label')``) or on-the-fly by
# passing two positional arguments to ``legend`` (where the first argument is the
# "handle" list and the second is the "label" list).
# The following core features are unique to proplot:
# * A "handle" list can be passed to ``legend`` as the sole
# positional argument and the labels will be automatically inferred
# using `~matplotlib.artist.Artist.get_label`. Valid "handles" include
# `~matplotlib.lines.Line2D`\ s returned by `~proplot.axes.PlotAxes.plot`,
# `~matplotlib.container.BarContainer`\ s returned by `~proplot.axes.PlotAxes.bar`,
# and `~matplotlib.collections.PolyCollection`\ s
# returned by `~proplot.axes.PlotAxes.fill_between`.
# * A composite handle can be created by grouping the "handle"
# list objects into tuples (see this `matplotlib guide
# <https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html#legend-handlers>`__
# for more on tuple groups). The associated label will be automatically
# inferred from the objects in the group. If multiple distinct
# labels are found then the group is automatically expanded.
# * Legend labels can be assigned for each column of a
# :ref:`2D array passed to a 1D plotting command <ug_1dstd>`
# using the `labels` keyword (e.g., ``labels=['label1', 'label2', ...]``).
# * Legend labels can be assigned to `~matplotlib.contour.ContourSet`\ s by passing
# the `label` keyword to a contouring command (e.g., `~proplot.axes.PlotAxes.contour`
# or `~proplot.axes.PlotAxes.contourf`).
#
# `proplot.axes.Axes.legend` and `proplot.figure.Figure.legend` also include a few
# additional features. To draw legends with centered rows, pass ``center=True`` or
# a list of lists of "handles" to ``legend`` (this stacks several single-row,
# horizontally centered legends and adds an encompassing frame behind them).
# To switch between row-major and column-major order for legend entries,
# use the `order` keyword (the default ``order='C'`` is row-major,
# unlike matplotlib's column-major ``order='F'``). To alphabetize the legend
# entries, pass ``alphabetize=True`` to ``legend``. To modify the legend handles
# (e.g., `~proplot.axes.PlotAxes.plot` or `~proplot.axes.PlotAxes.scatter` handles)
# pass the relevant properties like `color`, `linewidth`, or `markersize` to ``legend``
# (or use the `handle_kw` keyword). See `proplot.axes.Axes.legend` for details.
# %%
import proplot as pplt
import numpy as np
pplt.rc.cycle = '538'
fig, axs = pplt.subplots(ncols=2, span=False, share='labels', refwidth=2.3)
labels = ['a', 'bb', 'ccc', 'dddd', 'eeeee']
hs1, hs2 = [], []
# On-the-fly legends
state = np.random.RandomState(51423)
for i, label in enumerate(labels):
data = (state.rand(20) - 0.45).cumsum(axis=0)
h1 = axs[0].plot(
data, lw=4, label=label, legend='ul',
legend_kw={'order': 'F', 'title': 'column major'}
)
hs1.extend(h1)
h2 = axs[1].plot(
data, lw=4, cycle='Set3', label=label, legend='r',
legend_kw={'lw': 8, 'ncols': 1, 'frame': False, 'title': 'modified\n handles'}
)
hs2.extend(h2)
# Outer legends
ax = axs[0]
ax.legend(hs1, loc='b', ncols=3, title='row major', order='C', facecolor='gray2')
ax = axs[1]
ax.legend(hs2, loc='b', ncols=3, center=True, title='centered rows')
axs.format(xlabel='xlabel', ylabel='ylabel', suptitle='Legend formatting demo')
| 6,851 |
2,494 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef vm_SharedArrayObject_h
#define vm_SharedArrayObject_h
#include "mozilla/Atomics.h"
#include "jsapi.h"
#include "jsobj.h"
#include "jstypes.h"
#include "gc/Barrier.h"
#include "vm/ArrayBufferObject.h"
typedef struct JSProperty JSProperty;
namespace js {
/*
* SharedArrayRawBuffer
*
* A bookkeeping object always stored immediately before the raw buffer.
* The buffer itself is mmap()'d and refcounted.
* SharedArrayBufferObjects and AsmJS code may hold references.
*
* |<------ sizeof ------>|<- length ->|
*
* | waste | SharedArrayRawBuffer | data array | waste |
*/
class SharedArrayRawBuffer
{
private:
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> refcount;
uint32_t length;
protected:
SharedArrayRawBuffer(uint8_t *buffer, uint32_t length)
: refcount(1), length(length)
{
JS_ASSERT(buffer == dataPointer());
}
public:
static SharedArrayRawBuffer *New(uint32_t length);
inline uint8_t *dataPointer() const {
return ((uint8_t *)this) + sizeof(SharedArrayRawBuffer);
}
inline uint32_t byteLength() const {
return length;
}
void addReference();
void dropReference();
};
/*
* SharedArrayBufferObject
*
* When transferred to a WebWorker, the buffer is not neutered on the parent side,
* and both child and parent reference the same buffer.
*/
class SharedArrayBufferObject : public ArrayBufferObject
{
static bool byteLengthGetterImpl(JSContext *cx, CallArgs args);
public:
static const Class class_;
static const Class protoClass;
// Slot used for storing a pointer to the SharedArrayRawBuffer.
static const uint8_t RAWBUF_SLOT = ArrayBufferObject::RESERVED_SLOTS;
static const uint8_t RESERVED_SLOTS = ArrayBufferObject::RESERVED_SLOTS + 1;
static bool class_constructor(JSContext *cx, unsigned argc, Value *vp);
// Create a SharedArrayBufferObject with a new SharedArrayRawBuffer.
static JSObject *New(JSContext *cx, uint32_t length);
// Create a SharedArrayBufferObject using an existing SharedArrayRawBuffer.
static JSObject *New(JSContext *cx, SharedArrayRawBuffer *buffer);
static bool byteLengthGetter(JSContext *cx, unsigned argc, Value *vp);
static void Finalize(FreeOp *fop, JSObject *obj);
void acceptRawBuffer(SharedArrayRawBuffer *buffer);
void dropRawBuffer();
SharedArrayRawBuffer *rawBufferObject() const;
uint8_t *dataPointer() const;
uint32_t byteLength() const;
};
bool
IsSharedArrayBuffer(HandleValue v);
} // namespace js
#endif // vm_SharedArrayObject_h
| 1,003 |
22,481 | """Block I/O being done in asyncio."""
from http.client import HTTPConnection
from .util.async_ import protect_loop
def enable() -> None:
"""Enable the detection of I/O in the event loop."""
# Prevent urllib3 and requests doing I/O in event loop
HTTPConnection.putrequest = protect_loop(HTTPConnection.putrequest) # type: ignore
# Currently disabled. pytz doing I/O when getting timezone.
# Prevent files being opened inside the event loop
# builtins.open = protect_loop(builtins.open)
| 155 |
589 | <reponame>ClaudioWaldvogel/inspectIT
package rocks.inspectit.shared.all.instrumentation.classcache.util;
import java.util.Set;
/**
* Set that has a {@link #addOrUpdate(Object)} method. This is a special type of the add method that
* if the element e2 exists in the set and element e is equal to it, the replace will occur if these
* are not the same objects in terms of reference address. This can be useful when the objects are
* equal in terms of equalTo, but not with == terms.
*
* @author <NAME>
*
* @param <E>
* Type of element in the set.
*/
public interface UpdateableSet<E> extends Set<E> {
/**
* Adds the element to the set if it's not existing. if the element e2 exists in the set and
* element e is equal to it, the replace will occur if these are not the same objects in terms
* of reference address. This can be useful when the objects are equal in terms of equalTo, but
* not with == terms.
*
* @param e
* element
*/
void addOrUpdate(E e);
}
| 316 |
403 | <gh_stars>100-1000
#ifndef VKHR_EMBREE_HAIR_STYLE_HH
#define VKHR_EMBREE_HAIR_STYLE_HH
#include <vkhr/scene_graph/hair_style.hh>
#include <vkhr/ray_tracer/shadable.hh>
#include <glm/glm.hpp>
#include <embree3/rtcore.h>
#include <vector>
#include <vkhr/rasterizer/hair_style.hh>
namespace vkhr {
class Raytracer;
namespace embree {
class HairStyle final : public Shadable {
public:
HairStyle() = default;
HairStyle(const vkhr::HairStyle& hair_style, const vkhr::Raytracer& raytracer);
void load(const vkhr::HairStyle& hair_style, const vkhr::Raytracer& raytracer);
glm::vec3 shade(const Ray& surface_intersection,
const LightSource& light_source,
const Camera& projection_camera) override;
glm::vec4 get_tangent(const Ray& position) const;
unsigned get_geometry() const;
void update_parameters(const vkhr::vulkan::HairStyle& hair_style);
const vkhr::HairStyle* get_pointer() const;
private:
glm::vec3 kajiya_kay(const glm::vec3& diffuse,
const glm::vec3& specular,
float p,
const glm::vec3& tangent,
const glm::vec3& light,
const glm::vec3& eye);
unsigned geometry { RTC_INVALID_GEOMETRY_ID };
const vkhr::HairStyle* pointer { nullptr };
RTCScene scene { nullptr };
glm::vec3 hair_diffuse;
float hair_exponent;
std::vector<glm::vec4> position_thickness;
};
}
}
#endif | 986 |
933 | /*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// This is a script to upgrade old solver prototxts to the new format.
// Usage:
// upgrade_solver_proto_text old_solver_proto_file_in solver_proto_file_out
#include <cstring>
#include <fstream> // NOLINT(readability/streams)
#include <iostream> // NOLINT(readability/streams)
#include <string>
#include "caffe/caffe.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/upgrade_proto.hpp"
using std::ofstream;
using namespace caffe; // NOLINT(build/namespaces)
int main(int argc, char** argv) {
FLAGS_alsologtostderr = 1; // Print output to stderr (while still logging)
::google::InitGoogleLogging(argv[0]);
if (argc != 3) {
LOG(ERROR) << "Usage: upgrade_solver_proto_text "
<< "old_solver_proto_file_in solver_proto_file_out";
return 1;
}
SolverParameter solver_param;
string input_filename(argv[1]);
if (!ReadProtoFromTextFile(input_filename, &solver_param)) {
LOG(ERROR) << "Failed to parse input text file as SolverParameter: "
<< input_filename;
return 2;
}
bool need_upgrade = SolverNeedsTypeUpgrade(solver_param);
bool success = true;
if (need_upgrade) {
success = UpgradeSolverAsNeeded(input_filename, &solver_param);
if (!success) {
LOG(ERROR) << "Encountered error(s) while upgrading prototxt; "
<< "see details above.";
}
} else {
LOG(ERROR) << "File already in latest proto format: " << input_filename;
}
// Save new format prototxt.
WriteProtoToTextFile(solver_param, argv[2]);
LOG(INFO) << "Wrote upgraded SolverParameter text proto to " << argv[2];
return !success;
}
| 1,141 |
1,334 | package org.mockserver.serialization.serializers.schema;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.ser.std.StdSerializer;
import io.swagger.v3.oas.models.media.PasswordSchema;
import io.swagger.v3.oas.models.media.Schema;
import org.mockserver.serialization.ObjectMapperFactory;
import org.mockserver.serialization.model.JsonSchemaBodyDTO;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
/**
* @author jamesdbloom
*/
@SuppressWarnings("rawtypes")
public class SchemaSerializer extends AbstractSchemaSerializer<Schema> {
public SchemaSerializer() {
super(Schema.class);
}
}
| 319 |
2,073 | <filename>Hadoop/apache-mahout-0.10.2-compile/mr/src/test/java/org/apache/mahout/cf/taste/impl/recommender/ReversingRescorer.java
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mahout.cf.taste.impl.recommender;
import org.apache.mahout.cf.taste.recommender.IDRescorer;
import org.apache.mahout.cf.taste.recommender.Rescorer;
/** <p>Simple {@link Rescorer} which negates the given score, thus reversing order of rankings.</p> */
public final class ReversingRescorer<T> implements Rescorer<T>, IDRescorer {
@Override
public double rescore(T thing, double originalScore) {
return -originalScore;
}
@Override
public boolean isFiltered(T thing) {
return false;
}
@Override
public double rescore(long ID, double originalScore) {
return -originalScore;
}
@Override
public boolean isFiltered(long ID) {
return false;
}
}
| 488 |
348 | {"nom":"Rahon","circ":"3ème circonscription","dpt":"Doubs","inscrits":93,"abs":19,"votants":74,"blancs":2,"nuls":2,"exp":70,"res":[{"nuance":"LR","nom":"<NAME>","voix":55},{"nuance":"REM","nom":"M. <NAME>","voix":15}]} | 89 |
1,816 | """New Rule Enclosure Command."""
import inflection
import os
from ...utils.filesystem import get_module_dir, make_directory, render_stub_file
from ...utils.location import base_path
from ...utils.str import as_filepath
from ...commands.Command import Command
class MakeRuleEnclosureCommand(Command):
"""
Creates a new rule enclosure.
rule:enclosure
{name : Name of the rule enclosure}
{--f|force=? : Force overriding file if already exists}
"""
def __init__(self, application):
super().__init__()
self.app = application
def handle(self):
name = inflection.camelize(self.argument("name"))
content = render_stub_file(self.get_stub_rule_enclosure_path(), name)
relative_filename = os.path.join(
as_filepath(self.app.make("validation.location")), name + ".py"
)
if os.path.exists(relative_filename) and not self.option("force"):
self.warning(
f"{relative_filename} already exists! Run the command with -f (force) to override."
)
return -1
filepath = base_path(relative_filename)
make_directory(filepath)
with open(filepath, "w") as f:
f.write(content)
self.info(f"Validation Created ({relative_filename})")
def get_stub_rule_enclosure_path(self):
return os.path.join(
get_module_dir(__file__), "../../stubs/validation/RuleEnclosure.py"
)
| 603 |
521 | <filename>include/retdec/bin2llvmir/optimizations/decoder/decoder_ranges.h
/**
* @file include/retdec/bin2llvmir/optimizations/decoder/decoder_ranges.h
* @brief Representation of ranges to decode.
* @copyright (c) 2017 Avast Software, licensed under the MIT license
*/
#ifndef RETDEC_BIN2LLVMIR_OPTIMIZATIONS_DECODER_DECODER_RANGES_H
#define RETDEC_BIN2LLVMIR_OPTIMIZATIONS_DECODER_DECODER_RANGES_H
#include <iostream>
#include "retdec/utils/address.h"
#include "retdec/bin2llvmir/providers/fileimage.h"
namespace retdec {
namespace bin2llvmir {
class RangesToDecode
{
public:
void addPrimary(utils::Address s, utils::Address e);
void addPrimary(const utils::AddressRange& r);
void addAlternative(utils::Address s, utils::Address e);
void addAlternative(const utils::AddressRange& r);
void promoteAlternativeToPrimary();
void remove(utils::Address s, utils::Address e);
void remove(const utils::AddressRange& r);
void removeZeroSequences(FileImage* image);
bool isStrict() const;
bool primaryEmpty() const;
bool alternativeEmpty() const;
const utils::AddressRange& primaryFront() const;
const utils::AddressRange& alternativeFront() const;
const utils::AddressRange* getPrimary(utils::Address a) const;
const utils::AddressRange* getAlternative(utils::Address a) const;
const utils::AddressRange* get(utils::Address a) const;
void setArchitectureInstructionAlignment(unsigned a);
friend std::ostream& operator<<(std::ostream &os, const RangesToDecode& rs);
private:
void removeZeroSequences(
FileImage* image,
utils::AddressRangeContainer& rs);
private:
utils::AddressRangeContainer _primaryRanges;
utils::AddressRangeContainer _alternativeRanges;
unsigned archInsnAlign = 0;
bool _strict = false;
};
} // namespace bin2llvmir
} // namespace retdec
#endif
| 625 |
479 | import windows
import windows.generated_def as gdef
def test_ipv4_connection():
windows.system.network.ipv4 # Better idea ?
def test_ipv6_connection():
windows.system.network.ipv6 # Better idea ?
def test_firewall():
firewall = windows.system.network.firewall
assert firewall.enabled # Its a dict that should not be empty
assert firewall.rules # Its a list that should not be empty
# Just check that fields exists and do not crash for now
rule = firewall.rules[0]
rule.name
rule.description
rule.protocol
rule.remote_port
rule.local_port
rule.local_address
rule.remote_address
rule.application_name
rule.direction
rule.enabled
| 234 |
695 | /*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package controller.battlefield;
import java.util.ArrayList;
import java.util.List;
import model.CommandManager;
import model.battlefield.army.Unity;
import model.battlefield.army.components.Unit;
import controller.Controller;
import controller.GUIController;
import controller.Reporter;
import de.lessvoid.nifty.Nifty;
import de.lessvoid.nifty.elements.render.TextRenderer;
import de.lessvoid.nifty.screen.Screen;
/**
*
* @author Benoît
*/
public class BattlefieldGUIController extends GUIController {
public BattlefieldGUIController(Nifty nifty, Controller controller) {
super(controller, nifty);
}
@Override
public void activate(){
nifty.gotoScreen("hud");
}
public void selectAll(){
CommandManager.selectAll();
}
@Override
public void update() {
if(!nifty.isActive("interface/screen.xml", "hud")) {
return;
}
String n = System.getProperty("line.separator");
// update info
if (CommandManager.selection.size() == 1) {
Unit u = CommandManager.selection.get(0);
getElement("unitName").getRenderer(TextRenderer.class).setText(Reporter.getName(u));
getElement("unitHealth").getRenderer(TextRenderer.class).setText(Reporter.getHealth(u));
getElement("unitState").getRenderer(TextRenderer.class).setText(Reporter.getState(u));
getElement("unitOrder").getRenderer(TextRenderer.class).setText(Reporter.getOrder(u));
getElement("unitHolding").getRenderer(TextRenderer.class).setText(Reporter.getHolding(u));
getElement("info").show();
} else {
getElement("info").hide();
}
}
@Override
public void bind(Nifty nifty, Screen screen) {
}
@Override
public void onStartScreen() {
}
@Override
public void onEndScreen() {
}
}
| 694 |
302 | #ifndef ENGINE_H
#define ENGINE_H
class Engine
{
public:
Engine();
};
#endif // ENGINE_H
| 43 |
826 | <gh_stars>100-1000
// SPDX-License-Identifier: BSD-3-Clause
// Copyright Contributors to the OpenColorIO Project.
#ifndef INCLUDED_OCIO_FILEFORMAT_UTILS_H
#define INCLUDED_OCIO_FILEFORMAT_UTILS_H
#include <OpenColorIO/OpenColorIO.h>
#include "ops/lut1d/Lut1DOpData.h"
#include "ops/lut3d/Lut3DOpData.h"
namespace OCIO_NAMESPACE
{
Lut1DOpDataRcPtr HandleLUT1D(const Lut1DOpDataRcPtr & fileLut1D,
Interpolation fileInterp,
bool & fileInterpUsed);
Lut3DOpDataRcPtr HandleLUT3D(const Lut3DOpDataRcPtr & fileLut3D,
Interpolation fileInterp,
bool & fileInterpUsed);
void LogWarningInterpolationNotUsed(Interpolation interp, const FileTransform & fileTransform);
} // OCIO_NAMESPACE
#endif // INCLUDED_OCIO_FILEFORMAT_UTILS_H | 407 |
355 | <filename>mmselfsup/models/necks/nonlinear_neck.py
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import build_norm_layer
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class NonLinearNeck(BaseModule):
"""The non-linear neck.
Structure: fc-bn-[relu-fc-bn] where the substructure in [] can be repeated.
For the default setting, the repeated time is 1.
The neck can be used in many algorithms, e.g., SimCLR, BYOL, SimSiam.
Args:
in_channels (int): Number of input channels.
hid_channels (int): Number of hidden channels.
out_channels (int): Number of output channels.
num_layers (int): Number of fc layers. Defaults to 2.
with_bias (bool): Whether to use bias in fc layers (except for the
last). Defaults to False.
with_last_bn (bool): Whether to add the last BN layer.
Defaults to True.
with_last_bn_affine (bool): Whether to have learnable affine parameters
in the last BN layer (set False for SimSiam). Defaults to True.
with_last_bias (bool): Whether to use bias in the last fc layer.
Defaults to False.
with_avg_pool (bool): Whether to apply the global average pooling
after backbone. Defaults to True.
norm_cfg (dict): Dictionary to construct and config norm layer.
Defaults to dict(type='SyncBN').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
hid_channels,
out_channels,
num_layers=2,
with_bias=False,
with_last_bn=True,
with_last_bn_affine=True,
with_last_bias=False,
with_avg_pool=True,
vit_backbone=False,
norm_cfg=dict(type='SyncBN'),
init_cfg=[
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super(NonLinearNeck, self).__init__(init_cfg)
self.with_avg_pool = with_avg_pool
self.vit_backbone = vit_backbone
if with_avg_pool:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc0 = nn.Linear(in_channels, hid_channels, bias=with_bias)
self.bn0 = build_norm_layer(norm_cfg, hid_channels)[1]
self.fc_names = []
self.bn_names = []
for i in range(1, num_layers):
this_channels = out_channels if i == num_layers - 1 \
else hid_channels
if i != num_layers - 1:
self.add_module(
f'fc{i}',
nn.Linear(hid_channels, this_channels, bias=with_bias))
self.add_module(f'bn{i}',
build_norm_layer(norm_cfg, this_channels)[1])
self.bn_names.append(f'bn{i}')
else:
self.add_module(
f'fc{i}',
nn.Linear(
hid_channels, this_channels, bias=with_last_bias))
if with_last_bn:
self.add_module(
f'bn{i}',
build_norm_layer(
dict(**norm_cfg, affine=with_last_bn_affine),
this_channels)[1])
self.bn_names.append(f'bn{i}')
else:
self.bn_names.append(None)
self.fc_names.append(f'fc{i}')
def forward(self, x):
assert len(x) == 1
x = x[0]
if self.vit_backbone:
x = x[-1]
if self.with_avg_pool:
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc0(x)
x = self.bn0(x)
for fc_name, bn_name in zip(self.fc_names, self.bn_names):
fc = getattr(self, fc_name)
x = self.relu(x)
x = fc(x)
if bn_name is not None:
bn = getattr(self, bn_name)
x = bn(x)
return [x]
| 2,344 |
4,857 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.assignment.AssignmentTestingUtil;
import org.apache.hadoop.hbase.master.assignment.SplitTableRegionProcedure;
import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
@Category({MediumTests.class, ClientTests.class})
public class TestSplitOrMergeStatus {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestSplitOrMergeStatus.class);
private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
private static byte [] FAMILY = Bytes.toBytes("testFamily");
@Rule
public TestName name = new TestName();
@Before
public void setUp() throws Exception {
TEST_UTIL.startMiniCluster(2);
}
@After
public void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Test
public void testSplitSwitch() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
Table t = TEST_UTIL.createTable(tableName, FAMILY);
TEST_UTIL.loadTable(t, FAMILY, false);
RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(t.getName());
int originalCount = locator.getAllRegionLocations().size();
Admin admin = TEST_UTIL.getAdmin();
initSwitchStatus(admin);
assertTrue(admin.splitSwitch(false, false));
try {
admin.split(t.getName());
fail("Shouldn't get here");
} catch (DoNotRetryIOException dnioe) {
// Expected
}
int count = admin.getRegions(tableName).size();
assertTrue(originalCount == count);
assertFalse(admin.splitSwitch(true, false));
admin.split(t.getName());
while ((count = admin.getRegions(tableName).size()) == originalCount) {
Threads.sleep(1);
}
count = admin.getRegions(tableName).size();
assertTrue(originalCount < count);
admin.close();
}
@Ignore @Test
public void testMergeSwitch() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
Table t = TEST_UTIL.createTable(tableName, FAMILY);
TEST_UTIL.loadTable(t, FAMILY, false);
Admin admin = TEST_UTIL.getAdmin();
int originalCount = admin.getRegions(tableName).size();
initSwitchStatus(admin);
admin.split(t.getName());
int postSplitCount = -1;
while ((postSplitCount = admin.getRegions(tableName).size()) == originalCount) {
Threads.sleep(1);
}
assertTrue("originalCount=" + originalCount + ", newCount=" + postSplitCount,
originalCount != postSplitCount);
// Merge switch is off so merge should NOT succeed.
boolean result = admin.mergeSwitch(false, false);
assertTrue(result);
List<RegionInfo> regions = admin.getRegions(t.getName());
assertTrue(regions.size() > 1);
Future<?> f = admin.mergeRegionsAsync(regions.get(0).getEncodedNameAsBytes(),
regions.get(1).getEncodedNameAsBytes(), true);
try {
f.get(10, TimeUnit.SECONDS);
fail("Should not get here.");
} catch (ExecutionException ee) {
// Expected.
}
int count = admin.getRegions(tableName).size();
assertTrue("newCount=" + postSplitCount + ", count=" + count, postSplitCount == count);
result = admin.mergeSwitch(true, false);
regions = admin.getRegions(t.getName());
assertFalse(result);
f = admin.mergeRegionsAsync(regions.get(0).getEncodedNameAsBytes(),
regions.get(1).getEncodedNameAsBytes(), true);
f.get(10, TimeUnit.SECONDS);
count = admin.getRegions(tableName).size();
assertTrue((postSplitCount / 2 /*Merge*/) == count);
admin.close();
}
@Test
public void testMultiSwitches() throws IOException {
Admin admin = TEST_UTIL.getAdmin();
assertTrue(admin.splitSwitch(false, false));
assertTrue(admin.mergeSwitch(false, false));
assertFalse(admin.isSplitEnabled());
assertFalse(admin.isMergeEnabled());
admin.close();
}
@Test
public void testSplitRegionReplicaRitRecovery() throws Exception {
int startRowNum = 11;
int rowCount = 60;
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
TEST_UTIL.getAdmin().createTable(TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY)).setRegionReplication(2).build());
TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
ServerName serverName =
RegionReplicaTestHelper.getRSCarryingReplica(TEST_UTIL, tableName, 1).get();
List<RegionInfo> regions = TEST_UTIL.getAdmin().getRegions(tableName);
insertData(tableName, startRowNum, rowCount);
int splitRowNum = startRowNum + rowCount / 2;
byte[] splitKey = Bytes.toBytes("" + splitRowNum);
// Split region of the table
long procId = procExec.submitProcedure(
new SplitTableRegionProcedure(procExec.getEnvironment(), regions.get(0), splitKey));
// Wait the completion
ProcedureTestingUtility.waitProcedure(procExec, procId);
// Disable the table
long procId1 = procExec
.submitProcedure(new DisableTableProcedure(procExec.getEnvironment(), tableName, false));
// Wait the completion
ProcedureTestingUtility.waitProcedure(procExec, procId1);
// Delete Table
long procId2 =
procExec.submitProcedure(new DeleteTableProcedure(procExec.getEnvironment(), tableName));
// Wait the completion
ProcedureTestingUtility.waitProcedure(procExec, procId2);
AssignmentTestingUtil.killRs(TEST_UTIL, serverName);
Threads.sleepWithoutInterrupt(5000);
boolean hasRegionsInTransition = TEST_UTIL.getMiniHBaseCluster().getMaster()
.getAssignmentManager().getRegionStates().hasRegionsInTransition();
assertEquals(false, hasRegionsInTransition);
}
private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
return TEST_UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
}
private void insertData(final TableName tableName, int startRow, int rowCount)
throws IOException {
Table t = TEST_UTIL.getConnection().getTable(tableName);
Put p;
for (int i = 0; i < rowCount; i++) {
p = new Put(Bytes.toBytes("" + (startRow + i)));
p.addColumn(FAMILY, Bytes.toBytes("q1"), Bytes.toBytes(i));
t.put(p);
}
}
private void initSwitchStatus(Admin admin) throws IOException {
if (!admin.isSplitEnabled()) {
admin.splitSwitch(true, false);
}
if (!admin.isMergeEnabled()) {
admin.mergeSwitch(true, false);
}
assertTrue(admin.isSplitEnabled());
assertTrue(admin.isMergeEnabled());
}
}
| 3,015 |
8,747 | <reponame>lovyan03/esp-idf
#include "esp_log.h"
/* Function used to tell the linker to include this file
* with all its symbols.
*/
void bootloader_hooks_include(void){
}
void bootloader_before_init(void) {
/* Keep in my mind that a lot of functions cannot be called from here
* as system initialization has not been performed yet, including
* BSS, SPI flash, or memory protection. */
ESP_LOGI("HOOK", "This hook is called BEFORE bootloader initialization");
}
void bootloader_after_init(void) {
ESP_LOGI("HOOK", "This hook is called AFTER bootloader initialization");
}
| 187 |
4,879 | #include "kinetic_scroller.hpp"
#include "visual_params.hpp"
#include "indexer/scales.hpp"
#include "base/logging.hpp"
#include <algorithm>
namespace df
{
double const kKineticDuration = 1.5;
double const kKineticFadeoff = 4.0;
double const kKineticThreshold = 50.0;
double const kKineticAcceleration = 0.4;
double const kKineticMaxSpeedStart = 1000.0; // pixels per second
double const kKineticMaxSpeedEnd = 10000.0; // pixels per second
double const kInstantVelocityThresholdUnscaled = 200.0; // pixels per second
double CalculateKineticMaxSpeed(ScreenBase const & modelView)
{
double const lerpCoef = 1.0 - GetNormalizedZoomLevel(modelView.GetScale());
return (kKineticMaxSpeedStart * lerpCoef + kKineticMaxSpeedEnd * (1.0 - lerpCoef)) *
VisualParams::Instance().GetVisualScale();
}
class KineticScrollAnimation : public Animation
{
public:
// startRect - mercator visible on screen rect in moment when user release fingers.
// direction - mercator space direction of moving. length(direction) - mercator distance on wich map will be offset.
KineticScrollAnimation(m2::PointD const & startPos, m2::PointD const & direction, double duration)
: Animation(true /* couldBeInterrupted */, true /* couldBeBlended */)
, m_endPos(startPos + direction)
, m_direction(direction)
, m_duration(duration)
, m_elapsedTime(0.0)
{
SetInterruptedOnCombine(true);
m_objects.insert(Animation::Object::MapPlane);
m_properties.insert(Animation::ObjectProperty::Position);
}
Animation::Type GetType() const override { return Animation::Type::KineticScroll; }
TAnimObjects const & GetObjects() const override
{
return m_objects;
}
bool HasObject(Object object) const override
{
return m_objects.find(object) != m_objects.end();
}
TObjectProperties const & GetProperties(Object object) const override
{
ASSERT(HasObject(object), ());
return m_properties;
}
bool HasProperty(Object object, ObjectProperty property) const override
{
return HasObject(object) && m_properties.find(property) != m_properties.end();
}
void SetMaxDuration(double maxDuration) override
{
if (m_duration > maxDuration)
m_duration = maxDuration;
}
void SetMinDuration(double minDuration) override {}
double GetMaxDuration() const override { return Animation::kInvalidAnimationDuration; }
double GetMinDuration() const override { return Animation::kInvalidAnimationDuration; }
double GetDuration() const override { return m_duration; }
bool IsFinished() const override { return m_elapsedTime >= m_duration; }
void Advance(double elapsedSeconds) override
{
m_elapsedTime += elapsedSeconds;
}
void Finish() override
{
m_elapsedTime = m_duration;
Animation::Finish();
}
bool GetProperty(Object object, ObjectProperty property, PropertyValue & value) const override
{
ASSERT(HasProperty(object, property), ());
// Current position = target position - amplutide * e ^ (elapsed / duration).
// We calculate current position not based on start position, but based on target position.
value = PropertyValue(m_endPos - m_direction * exp(-kKineticFadeoff * GetT()));
return true;
}
bool GetTargetProperty(Object object, ObjectProperty property, PropertyValue & value) const override
{
ASSERT(HasProperty(object, property), ());
value = PropertyValue(m_endPos);
return true;
}
private:
double GetT() const
{
return IsFinished() ? 1.0 : m_elapsedTime / m_duration;
}
m2::PointD m_endPos;
m2::PointD m_direction;
double m_duration;
double m_elapsedTime;
TAnimObjects m_objects;
TObjectProperties m_properties;
};
void KineticScroller::Init(ScreenBase const & modelView)
{
ASSERT(!m_isActive, ());
m_isActive = true;
m_lastRect = modelView.GlobalRect();
m_lastTimestamp = std::chrono::steady_clock::now();
m_updatePosition = modelView.GlobalRect().GlobalCenter();
m_updateTimestamp = m_lastTimestamp;
}
void KineticScroller::Update(ScreenBase const & modelView)
{
ASSERT(m_isActive, ());
using namespace std::chrono;
auto const nowTimestamp = std::chrono::steady_clock::now();
auto const curPos = modelView.GlobalRect().GlobalCenter();
double const instantPixelLen = (modelView.GtoP(curPos) - modelView.GtoP(m_updatePosition)).Length();
auto const updateElapsed = duration_cast<duration<double>>(nowTimestamp - m_updateTimestamp).count();
m_instantVelocity = (updateElapsed >= 1e-5) ? instantPixelLen / updateElapsed : 0.0;
m_updateTimestamp = nowTimestamp;
m_updatePosition = curPos;
}
bool KineticScroller::IsActive() const
{
return m_isActive;
}
m2::PointD KineticScroller::GetDirection(ScreenBase const & modelView) const
{
// In KineticScroller we store m_direction in mixed state.
// Direction in mercator space, and length(m_direction) in pixel space.
// We need same reaction on different zoom levels, and should calculate velocity on pixel space.
ASSERT(m_isActive, ());
using namespace std::chrono;
auto const nowTimestamp = steady_clock::now();
auto const elapsed = duration_cast<duration<double>>(nowTimestamp - m_lastTimestamp).count();
m2::PointD const currentCenter = modelView.GlobalRect().GlobalCenter();
m2::PointD const lastCenter = m_lastRect.GlobalCenter();
double const pxDeltaLength = (modelView.GtoP(currentCenter) - modelView.GtoP(lastCenter)).Length();
m2::PointD delta = currentCenter - lastCenter;
if (!delta.IsAlmostZero())
{
delta = delta.Normalize();
// Velocity on pixels.
double const v = std::min(pxDeltaLength / elapsed, CalculateKineticMaxSpeed(modelView));
// At this point length(m_direction) already in pixel space, and delta normalized.
return delta * v;
}
return m2::PointD::Zero();
}
void KineticScroller::Cancel()
{
m_isActive = false;
}
drape_ptr<Animation> KineticScroller::CreateKineticAnimation(ScreenBase const & modelView)
{
static double vs = VisualParams::Instance().GetVisualScale();
static double kVelocityThreshold = kKineticThreshold * vs;
static double kInstantVelocityThreshold = kInstantVelocityThresholdUnscaled * vs;
if (m_instantVelocity < kInstantVelocityThreshold)
{
Cancel();
return drape_ptr<Animation>();
}
auto const direction = GetDirection(modelView);
Cancel();
if (direction.Length() < kVelocityThreshold)
return drape_ptr<Animation>();
// Before we start animation we have to convert length(m_direction) from pixel space to mercator space.
m2::PointD const center = modelView.GlobalRect().GlobalCenter();
double const offset = (modelView.PtoG(modelView.GtoP(center) + direction) - center).Length();
double const glbLength = kKineticAcceleration * offset;
m2::PointD const glbDirection = direction.Normalize() * glbLength;
m2::PointD const targetCenter = center + glbDirection;
if (!df::GetWorldRect().IsPointInside(targetCenter))
return drape_ptr<Animation>();
return make_unique_dp<KineticScrollAnimation>(center, glbDirection, kKineticDuration);
}
} // namespace df
| 2,223 |
575 | // Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ash/wm/full_restore/full_restore_controller.h"
#include <cstdint>
#include "ash/public/cpp/app_types.h"
#include "ash/public/cpp/shell_window_ids.h"
#include "ash/shell.h"
#include "ash/wm/mru_window_tracker.h"
#include "ash/wm/window_state.h"
#include "ash/wm/wm_event.h"
#include "base/check_op.h"
#include "base/threading/thread_task_runner_handle.h"
#include "components/full_restore/full_restore_utils.h"
#include "components/prefs/pref_service.h"
#include "ui/aura/client/aura_constants.h"
#include "ui/aura/window.h"
#include "ui/display/display.h"
#include "ui/display/screen.h"
#include "ui/views/widget/widget.h"
#include "ui/views/widget/widget_delegate.h"
namespace ash {
namespace {
FullRestoreController* g_instance = nullptr;
// Callback for testing which is run when `OnWidgetInitialized()` triggers a
// read from file.
FullRestoreController::ReadWindowCallback g_read_window_callback_for_testing;
// Callback for testing which is run when `SaveWindowImpl()` triggers a write to
// file.
FullRestoreController::SaveWindowCallback g_save_window_callback_for_testing;
// The list of possible app window parents.
// TODO(crbug.com/1164472): Support the rest of the desk containers which
// are currently not always created depending on whether the bento feature
// is enabled.
constexpr ShellWindowId kAppParentContainers[5] = {
kShellWindowId_DefaultContainerDeprecated,
kShellWindowId_DeskContainerB,
kShellWindowId_DeskContainerC,
kShellWindowId_DeskContainerD,
kShellWindowId_AlwaysOnTopContainer,
};
// The types of apps currently supported by full restore.
// TODO(crbug.com/1164472): Checking app type is temporary solution until we
// can get windows which are allowed to full restore from the
// FullRestoreService.
constexpr AppType kSupportedAppTypes[3] = {
AppType::BROWSER, AppType::CHROME_APP, AppType::ARC_APP};
// Returns the sibling of `window` that `window` should be stacked below based
// on restored activation indices. Returns nullptr if `window` does not need
// to be moved in the z-ordering. Should be called after `window` is added as
// a child of its parent.
aura::Window* GetSiblingToStackBelow(aura::Window* window) {
DCHECK(window->parent());
auto siblings = window->parent()->children();
#if DCHECK_IS_ON()
// Verify that the activation keys are descending and that non-restored
// windows are all at the end.
for (int i = 0; i < int{siblings.size()} - 2; ++i) {
int32_t* current_activation_key =
siblings[i]->GetProperty(full_restore::kActivationIndexKey);
size_t next_index = i + 1;
int32_t* next_activation_key =
siblings[next_index]->GetProperty(full_restore::kActivationIndexKey);
const bool descending_order =
current_activation_key &&
(!next_activation_key ||
*current_activation_key > *next_activation_key);
const bool both_null = !current_activation_key && !next_activation_key;
DCHECK(descending_order || both_null);
}
DCHECK_EQ(siblings.back(), window);
#endif
int32_t* restore_activation_key =
window->GetProperty(full_restore::kActivationIndexKey);
DCHECK(restore_activation_key);
for (int i = 0; i < int{siblings.size()} - 1; ++i) {
int32_t* sibling_restore_activation_key =
siblings[i]->GetProperty(full_restore::kActivationIndexKey);
if (!sibling_restore_activation_key ||
*restore_activation_key > *sibling_restore_activation_key) {
// Activation index is saved to match MRU order so lower means more
// recent/higher in stacking order. Also restored windows should be
// stacked below non-restored windows.
return siblings[i];
}
}
return nullptr;
}
} // namespace
FullRestoreController::FullRestoreController() {
DCHECK_EQ(nullptr, g_instance);
g_instance = this;
tablet_mode_observation_.Observe(Shell::Get()->tablet_mode_controller());
full_restore_info_observation_.Observe(
full_restore::FullRestoreInfo::GetInstance());
}
FullRestoreController::~FullRestoreController() {
DCHECK_EQ(this, g_instance);
g_instance = nullptr;
}
// static
FullRestoreController* FullRestoreController::Get() {
return g_instance;
}
void FullRestoreController::SaveWindow(WindowState* window_state) {
SaveWindowImpl(window_state, /*activation_index=*/base::nullopt);
}
void FullRestoreController::OnWindowActivated(aura::Window* gained_active) {
DCHECK(gained_active);
// Once a window gains activation, it can be cleared of its activation index
// key since it is no longer used in the stacking algorithm.
gained_active->ClearProperty(full_restore::kActivationIndexKey);
SaveAllWindows();
}
void FullRestoreController::OnActiveUserPrefServiceChanged(
PrefService* pref_service) {
// TODO(crbug.com/1164472): Register and the check the pref service.
}
void FullRestoreController::OnTabletModeStarted() {
SaveAllWindows();
}
void FullRestoreController::OnTabletModeEnded() {
SaveAllWindows();
}
void FullRestoreController::OnTabletControllerDestroyed() {
tablet_mode_observation_.Reset();
}
void FullRestoreController::OnAppLaunched(aura::Window* window) {}
void FullRestoreController::OnWidgetInitialized(views::Widget* widget) {
DCHECK(widget);
aura::Window* window = widget->GetNativeWindow();
DCHECK(window->parent());
std::unique_ptr<full_restore::WindowInfo> window_info =
g_read_window_callback_for_testing
? g_read_window_callback_for_testing.Run(window)
: full_restore::GetWindowInfo(window);
if (window_info) {
// Snap the window if necessary.
auto state_type = window_info->window_state_type;
if (state_type) {
if (*state_type == chromeos::WindowStateType::kLeftSnapped ||
*state_type == chromeos::WindowStateType::kRightSnapped) {
const WMEvent snap_event(*state_type ==
chromeos::WindowStateType::kLeftSnapped
? WM_EVENT_SNAP_LEFT
: WM_EVENT_SNAP_RIGHT);
WindowState::Get(window)->OnWMEvent(&snap_event);
}
}
}
int32_t* activation_index =
window->GetProperty(full_restore::kActivationIndexKey);
if (!activation_index)
return;
// Window that are launched from full restore are not activatable initially to
// prevent them from taking activation when Widget::Show() is called. Make
// these windows activatable once they are launched. Use a post task since it
// is quite common for some widgets to explicitly call Show() after
// initialized.
// TODO(sammiequon): Instead of disabling activation when creating the widget
// and enabling it here, use ShowInactive() instead of Show() when the widget
// is created.
base::ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, base::BindOnce(
[](aura::Window* window) {
views::Widget* widget =
views::Widget::GetWidgetForNativeView(window);
DCHECK(widget);
widget->widget_delegate()->SetCanActivate(true);
},
window));
// Stack the window.
auto* target_sibling = GetSiblingToStackBelow(window);
if (target_sibling)
window->parent()->StackChildBelow(window, target_sibling);
}
void FullRestoreController::SaveAllWindows() {
auto mru_windows =
Shell::Get()->mru_window_tracker()->BuildMruWindowList(kAllDesks);
for (int i = 0; i < int{mru_windows.size()}; ++i) {
// Provide the activation index here since we need to loop through |windows|
// anyhow. Otherwise we need to loop again to get the same value in
// SaveWindowImpl().
WindowState* window_state = WindowState::Get(mru_windows[i]);
SaveWindowImpl(window_state, /*activation_index=*/i);
}
}
void FullRestoreController::SaveWindowImpl(
WindowState* window_state,
base::Optional<int> activation_index) {
DCHECK(window_state);
aura::Window* window = window_state->window();
// Only apps whose parent is a certain container can be saved.
if (!window->parent() ||
!base::Contains(kAppParentContainers, window->parent()->id())) {
return;
}
// Only some app types can be saved.
if (!base::Contains(
kSupportedAppTypes,
static_cast<AppType>(window->GetProperty(aura::client::kAppType)))) {
return;
}
int window_activation_index;
if (activation_index) {
window_activation_index = *activation_index;
} else {
auto mru_windows =
Shell::Get()->mru_window_tracker()->BuildMruWindowList(kAllDesks);
auto it = std::find(mru_windows.begin(), mru_windows.end(), window);
if (it != mru_windows.end())
window_activation_index = it - mru_windows.begin();
}
full_restore::WindowInfo window_info;
window_info.activation_index = window_activation_index;
window_info.window = window;
window_info.desk_id = window->GetProperty(aura::client::kWindowWorkspaceKey);
if (window->GetProperty(aura::client::kVisibleOnAllWorkspacesKey)) {
// Only save |visible_on_all_workspaces| field if it's true to reduce file
// storage size.
window_info.visible_on_all_workspaces = true;
}
// If there are restore bounds, use those as current bounds. On restore, for
// states with restore bounds (maximized, minimized, snapped, etc), they will
// take the current bounds as their restore bounds and have the current bounds
// determined by the system.
window_info.current_bounds = window_state->HasRestoreBounds()
? window_state->GetRestoreBoundsInScreen()
: window->GetBoundsInScreen();
window_info.window_state_type = window_state->GetStateType();
window_info.display_id =
display::Screen::GetScreen()->GetDisplayNearestWindow(window).id();
full_restore::SaveWindowInfo(window_info);
if (g_save_window_callback_for_testing)
g_save_window_callback_for_testing.Run(window_info);
}
void FullRestoreController::SetReadWindowCallbackForTesting(
ReadWindowCallback callback) {
g_read_window_callback_for_testing = std::move(callback);
}
void FullRestoreController::SetSaveWindowCallbackForTesting(
SaveWindowCallback callback) {
g_save_window_callback_for_testing = std::move(callback);
}
} // namespace ash
| 3,706 |
2,032 | /*
Tencent is pleased to support the open source community by making PhxQueue available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
<https://opensource.org/licenses/BSD-3-Clause>
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "phxqueue/store/storesm.h"
#include <cinttypes>
#include <unistd.h>
#include "phxqueue/comm.h"
#include "phxpaxos/node.h"
#include "phxqueue/store/store.h"
#include "phxqueue/store/basemgr.h"
#include "phxqueue/store/storemeta.h"
#include "phxqueue/store/syncctrl.h"
namespace phxqueue {
namespace store {
using namespace std;
class StoreSM::StoreSMImpl {
public:
StoreSMImpl() {}
virtual ~StoreSMImpl() {}
Store *store{nullptr};
};
StoreSM::StoreSM(Store *const store) : impl_(new StoreSMImpl()) {
impl_->store = store;
}
StoreSM::~StoreSM() {}
bool StoreSM::Execute(const int paxos_group_id, const uint64_t instance_id,
const string &paxos_value, phxpaxos::SMCtx *ctx) {
QLVerb("StoreSM::Execute begin");
QLVerb("paxos_group_id %d instance_id %" PRIu64 " ctx %p paxos_value.length %zu",
paxos_group_id, instance_id, ctx, paxos_value.length());
comm::RetCode ret;
StoreContext *sc{nullptr};
if (ctx && ctx->m_pCtx) {
sc = static_cast<StoreContext *>(ctx->m_pCtx);
}
proto::StorePaxosArgs args;
if (!args.ParseFromString(paxos_value)) {
QLErr("StorePaxosArgs ParseFromString err. pv.length %zu", paxos_value.length());
if (sc) sc->result = comm::RetCode::RET_ERR_ARG;
return false;
}
const uint64_t cursor_id{instance_id};
if (args.has_add_req()) {
QLVerb("value.length() %zu", paxos_value.length());
if (0 > as_integer(ret = impl_->store->GetBaseMgr()->Add(cursor_id, args.add_req()))) {
QLErr("BaseMgr::Add err %d", as_integer(ret));
if (sc) sc->result = ret;
return false;
}
}
if (args.has_sync_ctrl_info()) {
if (0 > as_integer(ret = impl_->store->GetSyncCtrl()->SyncCursorID(args.sync_ctrl_info()))) {
QLErr("SyncCtrl::SyncCursorID ret %d", as_integer(ret));
if (sc) sc->result = ret;
return false;
}
}
if (!args.master_addr().ip().empty()) {
auto opt(impl_->store->GetStoreOption());
comm::proto::Addr addr;
addr.set_ip(opt->ip);
addr.set_port(opt->port);
addr.set_paxos_port(opt->paxos_port);
if (!(addr.ip() == args.master_addr().ip() &&
addr.port() == args.master_addr().port() &&
addr.paxos_port() == args.master_addr().paxos_port())) {
// 1. drop master
QLInfo("drop master. addr.ip %s master_addr.ip %s", addr.ip().c_str(), args.master_addr().ip().c_str());
if (nullptr != impl_->store->GetNode()) { // GetNode() still return nullptr while calling RunNode() in Store::PaxosInit()
impl_->store->GetNode()->DropMaster(paxos_group_id);
}
}
}
if (sc) sc->result = comm::RetCode::RET_OK;
QLVerb("StoreSM::Execute end");
return true;
}
const uint64_t StoreSM::GetCheckpointInstanceID(const int paxos_group_id) const {
QLVerb("StoreSM::GetCheckpointInstanceID begin");
uint64_t cp(-1);
comm::RetCode ret;
auto stat(impl_->store->GetCheckPointStatMgr()->GetCheckPointStat(paxos_group_id));
if (!stat) {
QLErr("CheckPointStatMgr::GetCheckPointStat fail paxos_group_id %d", paxos_group_id);
return -1;
}
if (comm::RetCode::RET_OK != (ret = stat->GetCheckPoint(cp))) {
QLErr("GetCheckPoint fail ret %d", as_integer(ret));
return -1;
}
QLVerb("paxos_group_id %d cp %" PRIu64, paxos_group_id, cp);
comm::StoreSMBP::GetThreadInstance()->OnGetCheckpointInstanceID(paxos_group_id, cp);
return cp;
}
int StoreSM::GetCheckpointState(const int paxos_group_id, string &dir_path,
vector<string> &file_list) {
QLVerb("StoreSM::GetCheckpointState begin");
auto stat(impl_->store->GetCheckPointStatMgr()->GetCheckPointStat(paxos_group_id));
if (!stat) {
QLErr("GetCheckPointStat fail paxos_group_id %d", paxos_group_id);
return -1;
}
dir_path = stat->GetDir();
file_list.clear();
file_list.push_back(stat->GetFile());
return 0;
}
int StoreSM::LoadCheckpointState(const int paxos_group_id, const string &tmp_dir_path,
const vector<string> &vecFileList, const uint64_t cp) {
QLVerb("StoreSM::LoadCheckpointState begin");
comm::RetCode ret;
QLErr("paxos_group_id %d cp %" PRIu64, paxos_group_id, cp);
if (cp == -1) return 0;
auto stat = impl_->store->GetCheckPointStatMgr()->GetCheckPointStat(paxos_group_id);
if (!stat) {
QLErr("GetCheckPointStat fail paxos_group_id %d", paxos_group_id);
return -1;
}
if (comm::RetCode::RET_OK != (ret = stat->UpdateCheckPointAndFlush(cp))) {
QLErr("UpdateCheckPointAndFlush ret %d paxos_group_id %d cp %" PRIu64,
ret, paxos_group_id, cp);
return -2;
}
sleep(10); // wait for other groups doing LoadCheckpointState
return 0;
}
} // namespace store
} // namespace phxqueue
| 2,464 |
4,812 | //===-- PPCInstrBuilder.h - Aides for building PPC insts --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file exposes functions that may be used with BuildMI from the
// MachineInstrBuilder.h file to simplify generating frame and constant pool
// references.
//
// For reference, the order of operands for memory references is:
// (Operand), Dest Reg, Base Reg, and either Reg Index or Immediate
// Displacement.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_POWERPC_PPCINSTRBUILDER_H
#define LLVM_LIB_TARGET_POWERPC_PPCINSTRBUILDER_H
#include "llvm/CodeGen/MachineInstrBuilder.h"
namespace llvm {
/// addFrameReference - This function is used to add a reference to the base of
/// an abstract object on the stack frame of the current function. This
/// reference has base register as the FrameIndex offset until it is resolved.
/// This allows a constant offset to be specified as well...
///
static inline const MachineInstrBuilder&
addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0,
bool mem = true) {
if (mem)
return MIB.addImm(Offset).addFrameIndex(FI);
else
return MIB.addFrameIndex(FI).addImm(Offset);
}
} // End llvm namespace
#endif
| 451 |
4,640 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""USMP Transform Python API for passes"""
# pylint: disable=invalid-name
from typing import Dict
import tvm
from tvm.tir import Stmt
from tvm.tir.usmp.utils import PoolAllocation
from . import _ffi_api
def convert_pool_allocations_to_offsets(
pool_allocations: Dict[Stmt, PoolAllocation], emit_tvmscript_printable: bool = False
) -> tvm.transform.Pass:
"""Convert pool allocations to Load nodes with offsets from pools.
Parameters
----------
pool_allocations : Dict[Stmt, PoolAllocation]
Allocate or AllocateConst node to pool allocation mapping
emit_tvmscript_printable : bool
A toggle to emit TVMScript printable IRModule for unit tests
removing all attributes that should be attached for integration
Returns
-------
ret: tvm.transform.Pass
The registered pass that converts the allocations to offsets.
"""
return _ffi_api.ConvertPoolAllocationsToOffsets(pool_allocations, emit_tvmscript_printable)
| 523 |
2,151 | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/media/router/route_message_observer.h"
#include "chrome/browser/media/router/media_router.h"
namespace media_router {
RouteMessageObserver::RouteMessageObserver(MediaRouter* router,
const MediaRoute::Id& route_id)
: router_(router), route_id_(route_id) {
DCHECK(router_);
DCHECK(!route_id_.empty());
router_->RegisterRouteMessageObserver(this);
}
RouteMessageObserver::~RouteMessageObserver() {
router_->UnregisterRouteMessageObserver(this);
}
} // namespace media_router
| 260 |
1,288 | <reponame>s3bubble/shaka-packager<filename>packager/media/crypto/subsample_generator_unittest.cc
// Copyright 2018 Google LLC. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
#include "packager/media/crypto/subsample_generator.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "packager/media/base/audio_stream_info.h"
#include "packager/media/base/video_stream_info.h"
#include "packager/media/codecs/av1_parser.h"
#include "packager/media/codecs/video_slice_header_parser.h"
#include "packager/media/codecs/vpx_parser.h"
#include "packager/status_test_util.h"
namespace shaka {
namespace media {
namespace {
using ::testing::_;
using ::testing::AtLeast;
using ::testing::DoAll;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Return;
using ::testing::SetArgPointee;
using ::testing::Test;
using ::testing::Values;
using ::testing::WithParamInterface;
const bool kVP9SubsampleEncryption = true;
const uint8_t kH264CodecConfig[] = {
// clang-format off
// Header
0x01, 0x64, 0x00, 0x1e, 0xff,
// SPS count (ignore top three bits)
0xe1,
// SPS
0x00, 0x19, // Size
0x67, 0x64, 0x00, 0x1e, 0xac, 0xd9, 0x40, 0xa0, 0x2f, 0xf9, 0x70, 0x11,
0x00, 0x00, 0x03, 0x03, 0xe9, 0x00, 0x00, 0xea, 0x60, 0x0f, 0x16, 0x2d,
0x96,
// PPS count
0x01,
// PPS
0x00, 0x06, // Size
0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0,
// clang-format on
};
const uint8_t kAV1CodecConfig[] = {0x00, 0x01, 0x02, 0x03};
const int kTrackId = 1;
const int32_t kTimeScale = 1000;
const int64_t kDuration = 10000;
const char kCodecString[] = "codec string";
const char kLanguage[] = "eng";
const bool kEncrypted = true;
VideoStreamInfo GetVideoStreamInfo(Codec codec) {
const uint16_t kWidth = 10u;
const uint16_t kHeight = 20u;
const uint32_t kPixelWidth = 2u;
const uint32_t kPixelHeight = 3u;
const uint8_t kTransferCharacteristics = 0;
const int16_t kTrickPlayFactor = 0;
const uint8_t kNaluLengthSize = 1u;
const uint8_t* codec_config = nullptr;
size_t codec_config_size = 0;
switch (codec) {
case kCodecH264:
codec_config = kH264CodecConfig;
codec_config_size = sizeof(kH264CodecConfig);
break;
case kCodecAV1:
codec_config = kAV1CodecConfig;
codec_config_size = sizeof(kAV1CodecConfig);
break;
default:
// We do not care about the codec configs for other codecs in this file.
break;
}
return VideoStreamInfo(
kTrackId, kTimeScale, kDuration, codec, H26xStreamFormat::kUnSpecified,
kCodecString, codec_config, codec_config_size, kWidth, kHeight,
kPixelWidth, kPixelHeight, kTransferCharacteristics, kTrickPlayFactor,
kNaluLengthSize, kLanguage, !kEncrypted);
}
AudioStreamInfo GetAudioStreamInfo(Codec codec) {
const uint8_t kSampleBits = 1;
const uint8_t kNumChannels = 2;
const uint32_t kSamplingFrequency = 48000;
const uint64_t kSeekPrerollNs = 12345;
const uint64_t kCodecDelayNs = 56789;
const uint32_t kMaxBitrate = 13579;
const uint32_t kAvgBitrate = 13000;
const uint8_t kCodecConfig[] = {0x00};
return AudioStreamInfo(kTrackId, kTimeScale, kDuration, codec, kCodecString,
kCodecConfig, sizeof(kCodecConfig), kSampleBits,
kNumChannels, kSamplingFrequency, kSeekPrerollNs,
kCodecDelayNs, kMaxBitrate, kAvgBitrate, kLanguage,
!kEncrypted);
}
} // namespace
inline bool operator==(const SubsampleEntry& lhs, const SubsampleEntry& rhs) {
return lhs.clear_bytes == rhs.clear_bytes &&
lhs.cipher_bytes == rhs.cipher_bytes;
}
class MockVPxParser : public VPxParser {
public:
MOCK_METHOD3(Parse,
bool(const uint8_t* data,
size_t data_size,
std::vector<VPxFrameInfo>* vpx_frames));
};
class MockVideoSliceHeaderParser : public VideoSliceHeaderParser {
public:
MOCK_METHOD1(Initialize,
bool(const std::vector<uint8_t>& decoder_configuration));
MOCK_METHOD1(ProcessNalu, bool(const Nalu& nalu));
MOCK_METHOD1(GetHeaderSize, int64_t(const Nalu& nalu));
};
class MockAV1Parser : public AV1Parser {
public:
MOCK_METHOD3(Parse,
bool(const uint8_t* data,
size_t data_size,
std::vector<Tile>* tiles));
};
class SubsampleGeneratorTest : public Test, public WithParamInterface<FourCC> {
public:
SubsampleGeneratorTest() : protection_scheme_(GetParam()) {}
protected:
FourCC protection_scheme_;
};
TEST_P(SubsampleGeneratorTest, VP9FullSampleEncryption) {
SubsampleGenerator generator(!kVP9SubsampleEncryption);
ASSERT_OK(
generator.Initialize(protection_scheme_, GetVideoStreamInfo(kCodecVP9)));
constexpr size_t kFrameSize = 50;
constexpr uint8_t kFrame[kFrameSize] = {};
std::vector<SubsampleEntry> subsamples;
ASSERT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
EXPECT_THAT(subsamples, ElementsAre());
}
TEST_P(SubsampleGeneratorTest, VP9ParseFailed) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(
generator.Initialize(protection_scheme_, GetVideoStreamInfo(kCodecVP9)));
constexpr size_t kFrameSize = 50;
constexpr uint8_t kFrame[kFrameSize] = {};
std::unique_ptr<MockVPxParser> mock_vpx_parser(new MockVPxParser);
EXPECT_CALL(*mock_vpx_parser, Parse(kFrame, kFrameSize, _))
.WillOnce(Return(false));
generator.InjectVpxParserForTesting(std::move(mock_vpx_parser));
std::vector<SubsampleEntry> subsamples;
ASSERT_NOT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
}
TEST_P(SubsampleGeneratorTest, VP9SubsampleEncryption) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(
generator.Initialize(protection_scheme_, GetVideoStreamInfo(kCodecVP9)));
constexpr size_t kFrameSize = 50;
constexpr uint8_t kFrame[kFrameSize] = {};
constexpr size_t kUncompressedHeaderSize = 20;
// VP9 block align protected data for all protection schemes.
const SubsampleEntry kExpectedSubsamples[] = {
// {20,30} block aligned.
{34, 16},
};
std::vector<VPxFrameInfo> vpx_frame_info(1);
vpx_frame_info[0].frame_size = kFrameSize;
vpx_frame_info[0].uncompressed_header_size = kUncompressedHeaderSize;
std::unique_ptr<MockVPxParser> mock_vpx_parser(new MockVPxParser);
EXPECT_CALL(*mock_vpx_parser, Parse(kFrame, kFrameSize, _))
.WillOnce(DoAll(SetArgPointee<2>(vpx_frame_info), Return(true)));
generator.InjectVpxParserForTesting(std::move(mock_vpx_parser));
std::vector<SubsampleEntry> subsamples;
ASSERT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
EXPECT_THAT(subsamples, ElementsAreArray(kExpectedSubsamples));
}
TEST_P(SubsampleGeneratorTest, VP9SubsampleEncryptionWithSuperFrame) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(
generator.Initialize(protection_scheme_, GetVideoStreamInfo(kCodecVP9)));
constexpr size_t kFrameSize = 50;
constexpr uint8_t kFrame[kFrameSize] = {};
// Super frame with two subframes.
constexpr size_t kSubFrameSizes[] = {10, 34};
constexpr size_t kUncompressedHeaderSizes[] = {4, 1};
// VP9 block align protected data for all protection schemes.
const SubsampleEntry kExpectedSubsamples[] = {
// {4,6},{1,33} block aligned => {10,0},{2,32}
// Then merge consecutive clear-only subsamples.
{12, 32},
// Superframe index (50 - 10 - 34).
{6, 0},
};
std::vector<VPxFrameInfo> vpx_frame_info(2);
for (int i = 0; i < 2; i++) {
vpx_frame_info[i].frame_size = kSubFrameSizes[i];
vpx_frame_info[i].uncompressed_header_size = kUncompressedHeaderSizes[i];
}
std::unique_ptr<MockVPxParser> mock_vpx_parser(new MockVPxParser);
EXPECT_CALL(*mock_vpx_parser, Parse(kFrame, kFrameSize, _))
.WillOnce(DoAll(SetArgPointee<2>(vpx_frame_info), Return(true)));
generator.InjectVpxParserForTesting(std::move(mock_vpx_parser));
std::vector<SubsampleEntry> subsamples;
ASSERT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
EXPECT_THAT(subsamples, ElementsAreArray(kExpectedSubsamples));
}
TEST_P(SubsampleGeneratorTest, VP9SubsampleEncryptionWithLargeSuperFrame) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(
generator.Initialize(protection_scheme_, GetVideoStreamInfo(kCodecVP9)));
constexpr size_t kFrameSize = 0x23456;
constexpr uint8_t kFrame[kFrameSize] = {};
// Super frame with two subframes.
constexpr size_t kSubFrameSizes[] = {0x10, 0x23000, 0x440};
constexpr size_t kUncompressedHeaderSizes[] = {4, 0x21000, 2};
// VP9 block align protected data for all protection schemes.
const SubsampleEntry kExpectedSubsamples[] = {
// {4,12},{1,0x23000-1} block aligned => {16,0},{0x21000,0x2000}
// Then split big clear_bytes, merge consecutive clear-only subsamples.
{0xffff, 0},
{0xffff, 0},
{0x1012, 0x2000},
// {2,0x440-2} block aligned.
{0x10, 0x430},
// Superframe index.
{6, 0},
};
std::vector<VPxFrameInfo> vpx_frame_info(3);
for (int i = 0; i < 3; i++) {
vpx_frame_info[i].frame_size = kSubFrameSizes[i];
vpx_frame_info[i].uncompressed_header_size = kUncompressedHeaderSizes[i];
}
std::unique_ptr<MockVPxParser> mock_vpx_parser(new MockVPxParser);
EXPECT_CALL(*mock_vpx_parser, Parse(kFrame, kFrameSize, _))
.WillOnce(DoAll(SetArgPointee<2>(vpx_frame_info), Return(true)));
generator.InjectVpxParserForTesting(std::move(mock_vpx_parser));
std::vector<SubsampleEntry> subsamples;
ASSERT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
EXPECT_THAT(subsamples, ElementsAreArray(kExpectedSubsamples));
}
TEST_P(SubsampleGeneratorTest, H264ParseFailed) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(
generator.Initialize(protection_scheme_, GetVideoStreamInfo(kCodecH264)));
constexpr uint8_t kFrame[] = {
// First NALU (nalu_size = 9).
0x09, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09};
constexpr size_t kFrameSize = sizeof(kFrame);
std::unique_ptr<MockVideoSliceHeaderParser> mock_video_slice_header_parser(
new MockVideoSliceHeaderParser);
EXPECT_CALL(*mock_video_slice_header_parser, ProcessNalu(_))
.WillOnce(Return(true));
EXPECT_CALL(*mock_video_slice_header_parser, GetHeaderSize(_))
.WillOnce(Return(-1));
generator.InjectVideoSliceHeaderParserForTesting(
std::move(mock_video_slice_header_parser));
std::vector<SubsampleEntry> subsamples;
ASSERT_NOT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
}
TEST_P(SubsampleGeneratorTest, H264SubsampleEncryption) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(
generator.Initialize(protection_scheme_, GetVideoStreamInfo(kCodecH264)));
constexpr uint8_t kFrame[] = {
// First NALU (nalu_size = 9).
0x09, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
// Second NALU (nalu_size = 0x25).
0x27, 0x25, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
0x24, 0x25, 0x26, 0x27,
// Third non-video-slice NALU (nalu_size = 0x32).
0x32, 0x67, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32};
constexpr size_t kFrameSize = sizeof(kFrame);
// There are two video slices.
const size_t kSliceHeaderSize[] = {4, 5};
const SubsampleEntry kExpectedUnalignedSubsamples[] = {
// clear_bytes = nalu_length_size (1) + type_size (1) + header_size (4).
// encrypted_bytes = nalu_size (9) - type_size (1) - header_size (4).
{6, 4},
// clear_bytes = nalu_length_size (1) + type_size (1) + header_size (5).
// encrypted_bytes = nalu_size (0x27) - type_size (1) - header_size (5).
{7, 0x21},
// Non-video slice, clear_bytes = nalu_length_size (1) + nalu_size (0x32).
// encrypted_bytes = 0.
{0x33, 0},
};
const SubsampleEntry kExpectedAlignedSubsamples[] = {
// {6,4},{7,0x21} block aligned => {10,0},{8,0x20}
// Then merge consecutive clear-only subsamples.
{18, 0x20},
{0x33, 0},
};
std::unique_ptr<MockVideoSliceHeaderParser> mock_video_slice_header_parser(
new MockVideoSliceHeaderParser);
EXPECT_CALL(*mock_video_slice_header_parser, ProcessNalu(_))
.Times(AtLeast(2))
.WillRepeatedly(Return(true));
EXPECT_CALL(*mock_video_slice_header_parser, GetHeaderSize(_))
.WillOnce(Return(kSliceHeaderSize[0]))
.WillOnce(Return(kSliceHeaderSize[1]));
generator.InjectVideoSliceHeaderParserForTesting(
std::move(mock_video_slice_header_parser));
std::vector<SubsampleEntry> subsamples;
ASSERT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
// Align subsamples for all CENC protection schemes except for cbcs.
if (protection_scheme_ == FOURCC_cbcs)
EXPECT_THAT(subsamples, ElementsAreArray(kExpectedUnalignedSubsamples));
else
EXPECT_THAT(subsamples, ElementsAreArray(kExpectedAlignedSubsamples));
}
TEST_P(SubsampleGeneratorTest, AV1ParserFailed) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(
generator.Initialize(protection_scheme_, GetVideoStreamInfo(kCodecAV1)));
constexpr size_t kFrameSize = 50;
constexpr uint8_t kFrame[kFrameSize] = {};
std::unique_ptr<MockAV1Parser> mock_av1_parser(new MockAV1Parser);
EXPECT_CALL(*mock_av1_parser, Parse(kFrame, kFrameSize, _))
.WillOnce(Return(false));
generator.InjectAV1ParserForTesting(std::move(mock_av1_parser));
std::vector<SubsampleEntry> subsamples;
ASSERT_NOT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
}
TEST_P(SubsampleGeneratorTest, AV1SubsampleEncryption) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(
generator.Initialize(protection_scheme_, GetVideoStreamInfo(kCodecAV1)));
constexpr size_t kFrameSize = 70;
constexpr uint8_t kFrame[kFrameSize] = {};
constexpr size_t kTileOffsets[] = {4, 11, 44};
constexpr size_t kTileSizes[] = {6, 33, 20};
constexpr int kNumTiles = 3;
// AV1 block align protected data for all protection schemes.
const SubsampleEntry kExpectedSubsamplesCbcs[] = {
// {4,6},{11-4-6,33},{44-11-33,20},{70-44-20,0}
// Always starts on the first byte and ends on the last byte of the tiles.
{4, 6},
{1, 33},
{0, 20},
{6, 0},
};
const SubsampleEntry kExpectedSubsamplesNonCbcs[] = {
// {4,6},{11-4-6,33},{44-11-33,20},{70-44-20,0} block aligned =>
// {10,0},{2,32},{4,16},{6,0}.
// Then merge consecutive clear-only subsamples.
{12, 32},
{4, 16},
{6, 0},
};
std::vector<AV1Parser::Tile> tiles(kNumTiles);
for (int i = 0; i < kNumTiles; i++) {
tiles[i].start_offset_in_bytes = kTileOffsets[i];
tiles[i].size_in_bytes = kTileSizes[i];
}
std::unique_ptr<MockAV1Parser> mock_av1_parser(new MockAV1Parser);
EXPECT_CALL(*mock_av1_parser, Parse(kFrame, kFrameSize, _))
.WillOnce(DoAll(SetArgPointee<2>(tiles), Return(true)));
generator.InjectAV1ParserForTesting(std::move(mock_av1_parser));
std::vector<SubsampleEntry> subsamples;
ASSERT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
if (protection_scheme_ == FOURCC_cbcs)
EXPECT_THAT(subsamples, ElementsAreArray(kExpectedSubsamplesCbcs));
else
EXPECT_THAT(subsamples, ElementsAreArray(kExpectedSubsamplesNonCbcs));
}
TEST_P(SubsampleGeneratorTest, AACIsFullSampleEncrypted) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(
generator.Initialize(protection_scheme_, GetAudioStreamInfo(kCodecAAC)));
constexpr size_t kFrameSize = 50;
constexpr uint8_t kFrame[kFrameSize] = {};
std::vector<SubsampleEntry> subsamples;
ASSERT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
EXPECT_THAT(subsamples, ElementsAre());
}
INSTANTIATE_TEST_CASE_P(
CencProtectionSchemes,
SubsampleGeneratorTest,
Values(FOURCC_cenc, FOURCC_cens, FOURCC_cbc1, FOURCC_cbcs));
TEST(SampleAesSubsampleGeneratorTest, AAC) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(generator.Initialize(kAppleSampleAesProtectionScheme,
GetAudioStreamInfo(kCodecAAC)));
constexpr size_t kNumFrames = 4;
constexpr size_t kMaxFrameSize = 100;
constexpr size_t kFrameSizes[] = {6, 16, 17, 50};
constexpr uint8_t kFrames[kNumFrames][kMaxFrameSize] = {};
// 16 bytes clear lead.
const SubsampleEntry kExpectedSubsamples[] = {
{6, 0},
{16, 0},
{16, 1},
{16, 34},
};
for (int i = 0; i < 4; i++) {
std::vector<SubsampleEntry> subsamples;
ASSERT_OK(
generator.GenerateSubsamples(kFrames[i], kFrameSizes[i], &subsamples));
EXPECT_THAT(subsamples, ElementsAre(kExpectedSubsamples[i]));
}
}
TEST(SampleAesSubsampleGeneratorTest, H264) {
SubsampleGenerator generator(kVP9SubsampleEncryption);
ASSERT_OK(generator.Initialize(kAppleSampleAesProtectionScheme,
GetVideoStreamInfo(kCodecH264)));
constexpr uint8_t kFrame[] = {
// First NALU (nalu_size = 9).
0x09, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
// Second NALU (nalu_size = 0x30).
0x30, 0x25, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30,
// Third NALU (nalu_size = 0x31).
0x31, 0x25, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31,
// Fourth non-video-slice NALU (nalu_size = 6).
0x32, 0x67, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23,
0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32};
constexpr size_t kFrameSize = sizeof(kFrame);
const SubsampleEntry kExpectedSubsamples[] = {
// NAL units with nalu_size <= 32+16 is not encrypted, so
// the first two NALUs are left in clear {1+9,0},{1+48,0}.
// The third NALUs has a fixed 32 bytes clear lead, +1 byte NALU length
// size, so it is {1+32, 17}.
// Then merge consecutive clear-only subsamples.
{1 + 9 + 1 + 48 + 1 + 32, 17},
// Non video slice is not encrypted.
{0x33, 0},
};
std::vector<SubsampleEntry> subsamples;
ASSERT_OK(generator.GenerateSubsamples(kFrame, kFrameSize, &subsamples));
EXPECT_THAT(subsamples, ElementsAreArray(kExpectedSubsamples));
}
} // namespace media
} // namespace shaka
| 8,572 |
937 | import logging
import uuid
from datetime import timedelta
from typing import List, Optional
from django.apps import apps
from django.db import IntegrityError, models, transaction
from django.utils import dateformat, timezone
from django.utils.encoding import smart_str
from stripe.api_resources.abstract.api_resource import APIResource
from stripe.error import InvalidRequestError
from ..fields import (
JSONField,
StripeDateTimeField,
StripeForeignKey,
StripeIdField,
StripePercentField,
)
from ..managers import StripeModelManager
from ..settings import djstripe_settings
from ..utils import get_friendly_currency_amount, get_id_from_stripe_data
logger = logging.getLogger(__name__)
class StripeBaseModel(models.Model):
stripe_class: Optional[APIResource] = None
djstripe_created = models.DateTimeField(auto_now_add=True, editable=False)
djstripe_updated = models.DateTimeField(auto_now=True, editable=False)
class Meta:
abstract = True
@classmethod
def api_list(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):
"""
Call the stripe API's list operation for this model.
:param api_key: The api key to use for this request. \
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
See Stripe documentation for accepted kwargs for each object.
:returns: an iterator over all items in the query
"""
return cls.stripe_class.list(api_key=api_key, **kwargs).auto_paging_iter()
class StripeModel(StripeBaseModel):
# This must be defined in descendants of this model/mixin
# e.g. Event, Charge, Customer, etc.
expand_fields: List[str] = []
stripe_dashboard_item_name = ""
objects = models.Manager()
stripe_objects = StripeModelManager()
djstripe_id = models.BigAutoField(
verbose_name="ID", serialize=False, primary_key=True
)
id = StripeIdField(unique=True)
djstripe_owner_account: Optional[StripeForeignKey] = StripeForeignKey(
"djstripe.Account",
on_delete=models.CASCADE,
to_field="id",
null=True,
blank=True,
help_text="The Stripe Account this object belongs to.",
)
livemode = models.BooleanField(
null=True,
default=None,
blank=True,
help_text="Null here indicates that the livemode status is unknown or was "
"previously unrecorded. Otherwise, this field indicates whether this record "
"comes from Stripe test mode or live mode operation.",
)
created = StripeDateTimeField(
null=True,
blank=True,
help_text="The datetime this object was created in stripe.",
)
metadata = JSONField(
null=True,
blank=True,
help_text="A set of key/value pairs that you can attach to an object. "
"It can be useful for storing additional information about an object in "
"a structured format.",
)
description = models.TextField(
null=True, blank=True, help_text="A description of this object."
)
class Meta:
abstract = True
get_latest_by = "created"
def _get_base_stripe_dashboard_url(self):
owner_path_prefix = (
(self.djstripe_owner_account.id + "/")
if self.djstripe_owner_account
else ""
)
return "https://dashboard.stripe.com/{}{}".format(
owner_path_prefix, "test/" if not self.livemode else ""
)
def get_stripe_dashboard_url(self) -> str:
"""Get the stripe dashboard url for this object."""
if not self.stripe_dashboard_item_name or not self.id:
return ""
else:
return "{base_url}{item}/{id}".format(
base_url=self._get_base_stripe_dashboard_url(),
item=self.stripe_dashboard_item_name,
id=self.id,
)
@property
def human_readable_amount(self) -> str:
return get_friendly_currency_amount(self.amount, self.currency)
@property
def default_api_key(self) -> str:
# If the class is abstract (StripeModel), fall back to default key.
if not self._meta.abstract:
if self.djstripe_owner_account:
return self.djstripe_owner_account.get_default_api_key()
return djstripe_settings.get_default_api_key(self.livemode)
def _get_stripe_account_id(self, api_key=None) -> Optional[str]:
"""
Call the stripe API's retrieve operation for this model.
:param api_key: The api key to use for this request. \
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
"""
api_key = api_key or self.default_api_key
try:
djstripe_owner_account = self.djstripe_owner_account
if djstripe_owner_account is not None:
return djstripe_owner_account.id
except (AttributeError, KeyError, ValueError):
pass
# Get reverse foreign key relations to Account in case we need to
# retrieve ourselves using that Account ID.
reverse_account_relations = (
field
for field in self._meta.get_fields(include_parents=True)
if field.is_relation and field.one_to_many
# Avoid circular import problems by using the app registry to
# get the model class rather than a direct import.
and field.related_model
is apps.get_model(app_label="djstripe", model_name="account")
)
# Handle case where we have a reverse relation to Account and should pass
# that account ID to the retrieve call.
for field in reverse_account_relations:
# Grab the related object, using the first one we find.
reverse_lookup_attr = field.get_accessor_name()
account = getattr(self, reverse_lookup_attr).first()
if account is not None:
return account.id
return None
def api_retrieve(self, api_key=None, stripe_account=None):
"""
Call the stripe API's retrieve operation for this model.
:param api_key: The api key to use for this request. \
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
"""
# Prefer passed in stripe_account if set.
if not stripe_account:
stripe_account = self._get_stripe_account_id(api_key)
return self.stripe_class.retrieve(
id=self.id,
api_key=api_key or self.default_api_key,
expand=self.expand_fields,
stripe_account=stripe_account,
)
@classmethod
def _api_create(cls, api_key=djstripe_settings.STRIPE_SECRET_KEY, **kwargs):
"""
Call the stripe API's create operation for this model.
:param api_key: The api key to use for this request. \
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
"""
return cls.stripe_class.create(api_key=api_key, **kwargs)
def _api_delete(self, api_key=None, stripe_account=None, **kwargs):
"""
Call the stripe API's delete operation for this model
:param api_key: The api key to use for this request. \
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
"""
api_key = api_key or self.default_api_key
# Prefer passed in stripe_account if set.
if not stripe_account:
stripe_account = self._get_stripe_account_id(api_key)
return self.stripe_class.delete(
self.id, api_key=api_key, stripe_account=stripe_account, **kwargs
)
def _api_update(self, api_key=None, stripe_account=None, **kwargs):
"""
Call the stripe API's modify operation for this model
:param api_key: The api key to use for this request.
Defaults to djstripe_settings.STRIPE_SECRET_KEY.
:type api_key: string
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
"""
api_key = api_key or self.default_api_key
# Prefer passed in stripe_account if set.
if not stripe_account:
stripe_account = self._get_stripe_account_id(api_key)
return self.stripe_class.modify(
self.id, api_key=api_key, stripe_account=stripe_account, **kwargs
)
def str_parts(self) -> List[str]:
"""
Extend this to add information to the string representation of the object
"""
return ["id={id}".format(id=self.id)]
@classmethod
def _manipulate_stripe_object_hook(cls, data):
"""
Gets called by this object's stripe object conversion method just before
conversion.
Use this to populate custom fields in a StripeModel from stripe data.
"""
return data
@classmethod
def _find_owner_account(cls, data, api_key=djstripe_settings.STRIPE_SECRET_KEY):
"""
Fetches the Stripe Account (djstripe_owner_account model field)
linked to the class, cls.
Tries to retreive using the Stripe_account if given.
Otherwise uses the api_key.
"""
from .account import Account
# try to fetch by stripe_account. Also takes care of Stripe Connected Accounts
if data:
# case of Webhook Event Trigger
if data.get("object") == "event":
# if account key exists and has a not null value
if data.get("account"):
stripe_account_id = get_id_from_stripe_data(data.get("account"))
if stripe_account_id:
return Account._get_or_retrieve(id=stripe_account_id)
else:
stripe_account = getattr(data, "stripe_account", None)
if stripe_account:
stripe_account_id = get_id_from_stripe_data(stripe_account)
if stripe_account_id:
return Account._get_or_retrieve(id=stripe_account_id)
# try to fetch by the given api_key.
return Account.get_or_retrieve_for_api_key(api_key)
@classmethod
def _stripe_object_to_record(
cls,
data: dict,
current_ids=None,
pending_relations: list = None,
stripe_account: str = None,
) -> dict:
"""
This takes an object, as it is formatted in Stripe's current API for our object
type. In return, it provides a dict. The dict can be used to create a record or
to update a record
This function takes care of mapping from one field name to another, converting
from cents to dollars, converting timestamps, and eliminating unused fields
(so that an objects.create() call would not fail).
:param data: the object, as sent by Stripe. Parsed from JSON, into a dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
:param pending_relations: list of tuples of relations to be attached post-save
:param stripe_account: The optional connected account \
for which this request is being made.
:return: All the members from the input, translated, mutated, etc
"""
manipulated_data = cls._manipulate_stripe_object_hook(data)
if not cls.is_valid_object(data):
raise ValueError(
"Trying to fit a %r into %r. Aborting."
% (data.get("object", ""), cls.__name__)
)
result = {}
if current_ids is None:
current_ids = set()
# Iterate over all the fields that we know are related to Stripe,
# let each field work its own magic
ignore_fields = ["date_purged", "subscriber"] # XXX: Customer hack
for field in cls._meta.fields:
if field.name.startswith("djstripe_") or field.name in ignore_fields:
continue
if isinstance(field, models.ForeignKey):
field_data, skip, is_nulled = cls._stripe_object_field_to_foreign_key(
field=field,
manipulated_data=manipulated_data,
current_ids=current_ids,
pending_relations=pending_relations,
stripe_account=stripe_account,
)
if skip and not is_nulled:
continue
else:
if hasattr(field, "stripe_to_db"):
field_data = field.stripe_to_db(manipulated_data)
else:
field_data = manipulated_data.get(field.name)
if (
isinstance(field, (models.CharField, models.TextField))
and field_data is None
):
# TODO - this applies to StripeEnumField as well, since it
# sub-classes CharField, is that intentional?
field_data = ""
result[field.name] = field_data
# For all objects other than the account object itself, get the API key
# attached to the request, and get the matching Account for that key.
owner_account = cls._find_owner_account(data)
if owner_account:
result["djstripe_owner_account"] = owner_account
return result
@classmethod
def _stripe_object_field_to_foreign_key(
cls,
field,
manipulated_data,
current_ids=None,
pending_relations=None,
stripe_account=None,
):
"""
This converts a stripe API field to the dj stripe object it references,
so that foreign keys can be connected up automatically.
:param field:
:type field: models.ForeignKey
:param manipulated_data:
:type manipulated_data: dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
:param pending_relations: list of tuples of relations to be attached post-save
:type pending_relations: list
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
:return:
"""
from djstripe.models import DjstripePaymentMethod
field_data = None
field_name = field.name
refetch = False
skip = False
# a flag to indicate if the given field is null upstream on Stripe
is_nulled = False
if issubclass(field.related_model, StripeModel) or issubclass(
field.related_model, DjstripePaymentMethod
):
if field_name in manipulated_data:
raw_field_data = manipulated_data.get(field_name)
# field's value is None. Skip syncing but set as None.
# Otherwise nulled FKs sync gets skipped.
if not raw_field_data:
is_nulled = True
skip = True
else:
# field does not exist in manipulated_data dict. Skip Syncing
skip = True
raw_field_data = None
id_ = get_id_from_stripe_data(raw_field_data)
if id_ == raw_field_data:
# A field like {"subscription": "sub_6lsC8pt7IcFpjA", ...}
refetch = True
else:
# A field like {"subscription": {"id": sub_6lsC8pt7IcFpjA", ...}}
pass
if id_ in current_ids:
# this object is currently being fetched, don't try to fetch again,
# to avoid recursion instead, record the relation that should be
# created once "object_id" object exists
if pending_relations is not None:
object_id = manipulated_data["id"]
pending_relations.append((object_id, field, id_))
skip = True
# sync only if field exists and is not null
if not skip and not is_nulled:
# add the id of the current object to the list
# of ids being processed.
# This will avoid infinite recursive syncs in case a relatedmodel
# requests the same object
current_ids.add(id_)
field_data, _ = field.related_model._get_or_create_from_stripe_object(
manipulated_data,
field_name,
refetch=refetch,
current_ids=current_ids,
pending_relations=pending_relations,
stripe_account=stripe_account,
)
# Remove the id of the current object from the list
# after it has been created or retrieved
current_ids.remove(id_)
else:
# eg PaymentMethod, handled in hooks
skip = True
return field_data, skip, is_nulled
@classmethod
def is_valid_object(cls, data):
"""
Returns whether the data is a valid object for the class
"""
return "object" in data and data["object"] == cls.stripe_class.OBJECT_NAME
def _attach_objects_hook(self, cls, data, current_ids=None):
"""
Gets called by this object's create and sync methods just before save.
Use this to populate fields before the model is saved.
:param cls: The target class for the instantiated object.
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
"""
pass
def _attach_objects_post_save_hook(self, cls, data, pending_relations=None):
"""
Gets called by this object's create and sync methods just after save.
Use this to populate fields after the model is saved.
:param cls: The target class for the instantiated object.
:param data: The data dictionary received from the Stripe API.
:type data: dict
"""
unprocessed_pending_relations = []
if pending_relations is not None:
for post_save_relation in pending_relations:
object_id, field, id_ = post_save_relation
if self.id == id_:
# the target instance now exists
target = field.model.objects.get(id=object_id)
setattr(target, field.name, self)
target.save()
# reload so that indirect relations back to this object
# eg self.charge.invoice = self are set
# TODO - reverse the field reference here to avoid hitting the DB?
self.refresh_from_db()
else:
unprocessed_pending_relations.append(post_save_relation)
if len(pending_relations) != len(unprocessed_pending_relations):
# replace in place so passed in list is updated in calling method
pending_relations[:] = unprocessed_pending_relations
@classmethod
def _create_from_stripe_object(
cls,
data,
current_ids=None,
pending_relations=None,
save=True,
stripe_account=None,
):
"""
Instantiates a model instance using the provided data object received
from Stripe, and saves it to the database if specified.
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
:param pending_relations: list of tuples of relations to be attached post-save
:type pending_relations: list
:param save: If True, the object is saved after instantiation.
:type save: bool
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
:returns: The instantiated object.
"""
instance = cls(
**cls._stripe_object_to_record(
data,
current_ids=current_ids,
pending_relations=pending_relations,
stripe_account=stripe_account,
)
)
instance._attach_objects_hook(cls, data, current_ids=current_ids)
if save:
instance.save(force_insert=True)
instance._attach_objects_post_save_hook(
cls, data, pending_relations=pending_relations
)
return instance
# flake8: noqa (C901)
@classmethod
def _get_or_create_from_stripe_object(
cls,
data,
field_name="id",
refetch=True,
current_ids=None,
pending_relations=None,
save=True,
stripe_account=None,
):
"""
:param data:
:param field_name:
:param refetch:
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
:param pending_relations: list of tuples of relations to be attached post-save
:type pending_relations: list
:param save:
:param stripe_account: The optional connected account \
for which this request is being made.
:type stripe_account: string
:return:
:rtype: cls, bool
"""
field = data.get(field_name)
is_nested_data = field_name != "id"
should_expand = False
if pending_relations is None:
pending_relations = []
id_ = get_id_from_stripe_data(field)
if not field:
# An empty field - We need to return nothing here because there is
# no way of knowing what needs to be fetched!
logger.warning(
"empty field %s.%s = %r - this is a bug, "
"please report it to dj-stripe!",
cls.__name__,
field_name,
field,
)
return None, False
elif id_ == field:
# A field like {"subscription": "sub_6lsC8pt7IcFpjA", ...}
# We'll have to expand if the field is not "id" (= is nested)
should_expand = is_nested_data
else:
# A field like {"subscription": {"id": sub_6lsC8pt7IcFpjA", ...}}
data = field
try:
return cls.stripe_objects.get(id=id_), False
except cls.DoesNotExist:
if is_nested_data and refetch:
# This is what `data` usually looks like:
# {"id": "cus_XXXX", "default_source": "card_XXXX"}
# Leaving the default field_name ("id") will get_or_create the customer.
# If field_name="default_source", we get_or_create the card instead.
cls_instance = cls(id=id_)
try:
data = cls_instance.api_retrieve(stripe_account=stripe_account)
except InvalidRequestError as e:
if "a similar object exists in" in str(e):
# HACK around a Stripe bug.
# When a File is retrieved from the Account object,
# a mismatch between live and test mode is possible depending
# on whether the file (usually the logo) was uploaded in live
# or test. Reported to Stripe in August 2020.
# Context: https://github.com/dj-stripe/dj-stripe/issues/830
pass
elif "No such PaymentMethod:" in str(e):
# payment methods (card_… etc) can be irretrievably deleted,
# but still present during sync. For example, if a refund is
# issued on a charge whose payment method has been deleted.
return None, False
else:
raise
should_expand = False
# The next thing to happen will be the "create from stripe object" call.
# At this point, if we don't have data to start with (field is a str),
# *and* we didn't refetch by id, then `should_expand` is True and we
# don't have the data to actually create the object.
# If this happens when syncing Stripe data, it's a djstripe bug. Report it!
assert not should_expand, "No data to create {} from {}".format(
cls.__name__, field_name
)
try:
# We wrap the `_create_from_stripe_object` in a transaction to
# avoid TransactionManagementError on subsequent queries in case
# of the IntegrityError catch below. See PR #903
with transaction.atomic():
return (
cls._create_from_stripe_object(
data,
current_ids=current_ids,
pending_relations=pending_relations,
save=save,
stripe_account=stripe_account,
),
True,
)
except IntegrityError:
# Handle the race condition that something else created the object
# after the `get` and before `_create_from_stripe_object`.
# This is common during webhook handling, since Stripe sends
# multiple webhook events simultaneously,
# each of which will cause recursive syncs. See issue #429
return cls.stripe_objects.get(id=id_), False
@classmethod
def _stripe_object_to_customer(cls, target_cls, data, current_ids=None):
"""
Search the given manager for the Customer matching this object's
``customer`` field.
:param target_cls: The target class
:type target_cls: Customer
:param data: stripe object
:type data: dict
:param current_ids: stripe ids of objects that are currently being processed
:type current_ids: set
"""
if "customer" in data and data["customer"]:
return target_cls._get_or_create_from_stripe_object(
data, "customer", current_ids=current_ids
)[0]
@classmethod
def _stripe_object_to_default_tax_rates(cls, target_cls, data):
"""
Retrieves TaxRates for a Subscription or Invoice
:param target_cls:
:param data:
:param instance:
:type instance: Union[djstripe.models.Invoice, djstripe.models.Subscription]
:return:
"""
tax_rates = []
for tax_rate_data in data.get("default_tax_rates", []):
tax_rate, _ = target_cls._get_or_create_from_stripe_object(
tax_rate_data, refetch=False
)
tax_rates.append(tax_rate)
return tax_rates
@classmethod
def _stripe_object_to_tax_rates(cls, target_cls, data):
"""
Retrieves TaxRates for a SubscriptionItem or InvoiceItem
:param target_cls:
:param data:
:return:
"""
tax_rates = []
for tax_rate_data in data.get("tax_rates", []):
tax_rate, _ = target_cls._get_or_create_from_stripe_object(
tax_rate_data, refetch=False
)
tax_rates.append(tax_rate)
return tax_rates
@classmethod
def _stripe_object_set_total_tax_amounts(cls, target_cls, data, instance):
"""
Set total tax amounts on Invoice instance
:param target_cls:
:param data:
:param instance:
:type instance: djstripe.models.Invoice
:return:
"""
from .billing import TaxRate
pks = []
for tax_amount_data in data.get("total_tax_amounts", []):
tax_rate_data = tax_amount_data["tax_rate"]
if isinstance(tax_rate_data, str):
tax_rate_data = {"tax_rate": tax_rate_data}
tax_rate, _ = TaxRate._get_or_create_from_stripe_object(
tax_rate_data, field_name="tax_rate", refetch=True
)
tax_amount, _ = target_cls.objects.update_or_create(
invoice=instance,
tax_rate=tax_rate,
defaults={
"amount": tax_amount_data["amount"],
"inclusive": tax_amount_data["inclusive"],
},
)
pks.append(tax_amount.pk)
instance.total_tax_amounts.exclude(pk__in=pks).delete()
@classmethod
def _stripe_object_to_invoice_items(cls, target_cls, data, invoice):
"""
Retrieves InvoiceItems for an invoice.
If the invoice item doesn't exist already then it is created.
If the invoice is an upcoming invoice that doesn't persist to the
database (i.e. ephemeral) then the invoice items are also not saved.
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: Type[djstripe.models.InvoiceItem]
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param invoice: The invoice object that should hold the invoice items.
:type invoice: ``djstripe.models.Invoice``
"""
lines = data.get("lines")
if not lines:
return []
invoiceitems = []
for line in lines.auto_paging_iter():
if invoice.id:
save = True
line.setdefault("invoice", invoice.id)
if line.get("type") == "subscription":
# Lines for subscriptions need to be keyed based on invoice and
# subscription, because their id is *just* the subscription
# when received from Stripe. This means that future updates to
# a subscription will change previously saved invoices - Doing
# the composite key avoids this.
if not line["id"].startswith(invoice.id):
line["id"] = "{invoice_id}-{subscription_id}".format(
invoice_id=invoice.id, subscription_id=line["id"]
)
else:
# Don't save invoice items for ephemeral invoices
save = False
line.setdefault("customer", invoice.customer.id)
line.setdefault("date", int(dateformat.format(invoice.created, "U")))
item, _ = target_cls._get_or_create_from_stripe_object(
line, refetch=False, save=save
)
invoiceitems.append(item)
return invoiceitems
@classmethod
def _stripe_object_to_subscription_items(cls, target_cls, data, subscription):
"""
Retrieves SubscriptionItems for a subscription.
If the subscription item doesn't exist already then it is created.
:param target_cls: The target class to instantiate per invoice item.
:type target_cls: Type[djstripe.models.SubscriptionItem]
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param subscription: The subscription object that should hold the items.
:type subscription: djstripe.models.Subscription
"""
items = data.get("items")
if not items:
subscription.items.delete()
return []
pks = []
subscriptionitems = []
for item_data in items.auto_paging_iter():
item, _ = target_cls._get_or_create_from_stripe_object(
item_data, refetch=False
)
# sync the SubscriptionItem
target_cls.sync_from_stripe_data(item_data)
pks.append(item.pk)
subscriptionitems.append(item)
subscription.items.exclude(pk__in=pks).delete()
return subscriptionitems
@classmethod
def _stripe_object_to_refunds(cls, target_cls, data, charge):
"""
Retrieves Refunds for a charge
:param target_cls: The target class to instantiate per refund
:type target_cls: Type[djstripe.models.Refund]
:param data: The data dictionary received from the Stripe API.
:type data: dict
:param charge: The charge object that refunds are for.
:type charge: djstripe.models.Refund
:return:
"""
refunds = data.get("refunds")
if not refunds:
return []
refund_objs = []
for refund_data in refunds.auto_paging_iter():
item, _ = target_cls._get_or_create_from_stripe_object(
refund_data, refetch=False
)
refund_objs.append(item)
return refund_objs
@classmethod
def sync_from_stripe_data(cls, data):
"""
Syncs this object from the stripe data provided.
Foreign keys will also be retrieved and synced recursively.
:param data: stripe object
:type data: dict
:rtype: cls
"""
current_ids = set()
data_id = data.get("id")
stripe_account = getattr(data, "stripe_account", None)
if data_id:
# stop nested objects from trying to retrieve this object before
# initial sync is complete
current_ids.add(data_id)
instance, created = cls._get_or_create_from_stripe_object(
data,
current_ids=current_ids,
stripe_account=stripe_account,
)
if not created:
record_data = cls._stripe_object_to_record(data)
for attr, value in record_data.items():
setattr(instance, attr, value)
instance._attach_objects_hook(cls, data, current_ids=current_ids)
instance.save()
instance._attach_objects_post_save_hook(cls, data)
for field in instance._meta.concrete_fields:
if isinstance(field, StripePercentField):
# get rid of cached values
delattr(instance, field.name)
return instance
@classmethod
def _get_or_retrieve(cls, id, stripe_account=None, **kwargs):
"""
Retrieve object from the db, if it exists. If it doesn't, query Stripe to fetch
the object and sync with the db.
"""
try:
return cls.objects.get(id=id)
except cls.DoesNotExist:
pass
if stripe_account:
kwargs["stripe_account"] = str(stripe_account)
# If no API key is specified, use the default one for the specified livemode
# (or if no livemode is specified, the default one altogether)
kwargs.setdefault(
"api_key",
djstripe_settings.get_default_api_key(livemode=kwargs.get("livemode")),
)
data = cls.stripe_class.retrieve(id=id, **kwargs)
instance = cls.sync_from_stripe_data(data)
return instance
def __str__(self):
return smart_str("<{list}>".format(list=", ".join(self.str_parts())))
class IdempotencyKey(models.Model):
uuid = models.UUIDField(
max_length=36, primary_key=True, editable=False, default=uuid.uuid4
)
action = models.CharField(max_length=100)
livemode = models.BooleanField(
help_text="Whether the key was used in live or test mode."
)
created = models.DateTimeField(auto_now_add=True)
class Meta:
unique_together = ("action", "livemode")
def __str__(self):
return str(self.uuid)
@property
def is_expired(self) -> bool:
return timezone.now() > self.created + timedelta(hours=24)
| 16,438 |
2,151 | <gh_stars>1000+
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef IOS_CHROME_BROWSER_UI_CONTENT_SUGGESTIONS_NTP_HOME_MEDIATOR_H_
#define IOS_CHROME_BROWSER_UI_CONTENT_SUGGESTIONS_NTP_HOME_MEDIATOR_H_
#import <UIKit/UIKit.h>
#import "ios/chrome/browser/ui/content_suggestions/cells/content_suggestions_gesture_commands.h"
#import "ios/chrome/browser/ui/content_suggestions/content_suggestions_commands.h"
#import "ios/chrome/browser/ui/content_suggestions/content_suggestions_header_view_controller_delegate.h"
namespace ntp_snippets {
class ContentSuggestionsService;
}
@protocol ApplicationCommands;
@protocol BrowserCommands;
@class ContentSuggestionsHeaderViewController;
@class ContentSuggestionsMediator;
@class ContentSuggestionsMetricsRecorder;
@class ContentSuggestionsViewController;
@protocol LogoVendor;
@protocol NTPHomeConsumer;
@class NTPHomeMetrics;
class TemplateURLService;
@protocol SnackbarCommands;
@protocol UrlLoader;
class WebStateList;
// Mediator for the NTP Home panel, handling the interactions with the
// suggestions.
@interface NTPHomeMediator
: NSObject<ContentSuggestionsCommands,
ContentSuggestionsGestureCommands,
ContentSuggestionsHeaderViewControllerDelegate>
- (nullable instancetype)
initWithWebStateList:(nonnull WebStateList*)webStateList
templateURLService:(nonnull TemplateURLService*)templateURLService
logoVendor:(nonnull id<LogoVendor>)logoVendor
NS_DESIGNATED_INITIALIZER;
- (nullable instancetype)init NS_UNAVAILABLE;
// Dispatcher.
@property(nonatomic, weak, nullable)
id<ApplicationCommands, BrowserCommands, SnackbarCommands, UrlLoader>
dispatcher;
// Suggestions service used to get the suggestions.
@property(nonatomic, assign, nonnull)
ntp_snippets::ContentSuggestionsService* suggestionsService;
// Recorder for the metrics related to ContentSuggestions.
@property(nonatomic, strong, nullable)
ContentSuggestionsMetricsRecorder* metricsRecorder;
// Recorder for the metrics related to the NTP.
@property(nonatomic, strong, nullable) NTPHomeMetrics* NTPMetrics;
// View Controller displaying the suggestions.
@property(nonatomic, weak, nullable)
ContentSuggestionsViewController* suggestionsViewController;
// Mediator for the ContentSuggestions.
@property(nonatomic, strong, nonnull)
ContentSuggestionsMediator* suggestionsMediator;
// Consumer for this mediator.
@property(nonatomic, weak, nullable) id<NTPHomeConsumer> consumer;
// Inits the mediator.
- (void)setUp;
// Cleans the mediator.
- (void)shutdown;
@end
#endif // IOS_CHROME_BROWSER_UI_CONTENT_SUGGESTIONS_NTP_HOME_MEDIATOR_H_
| 911 |
488 | /*
* Copyright (c) Wipro Technologies Ltd, 2002. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
*/
/**********************************************************
*
* TEST IDENTIFIER : clone03
*
* EXECUTED BY : anyone
*
* TEST TITLE : test for clone(2)
*
* TEST CASE TOTAL : 1
*
* AUTHOR : <NAME> <<EMAIL>>
*
* SIGNALS
* Uses SIGUSR1 to pause before test if option set.
* (See the parse_opts(3) man page).
*
* DESCRIPTION
* Check for equality of pid of child & return value of clone(2)
*
* Setup:
* Setup signal handling.
* Pause for SIGUSR1 if option specified.
*
* Test:
* Open a pipe.
* Loop if the proper options are given.
* Call clone(2) called without SIGCHLD
*
* CHILD:
* writes the pid to pipe
* PARENT:
* reads child'd pid from pipe
* if child's pid == return value from clone(2)
* Test passed
* else
* test failed
* Cleanup:
* Print errno log and/or timing stats if options given
*
* USAGE: <for command-line>
* clone03 [-c n] [-e] [-i n] [-I x] [-P x] [-t] [-h] [-f] [-p]
* where, -c n : Run n copies concurrently.
* -e : Turn on errno logging.
* -h : Show help screen
* -f : Turn off functional testing
* -i n : Execute test n times.
* -I x : Execute test for x seconds.
* -p : Pause for SIGUSR1 before starting
* -P x : Pause for x seconds between iterations.
* -t : Turn on syscall timing.
*
****************************************************************/
#if defined UCLINUX && !__THROW
/* workaround for libc bug */
#define __THROW
#endif
#include <errno.h>
#include <sched.h>
#include "test.h"
#include "usctest.h"
#include "clone_platform.h"
static void setup();
static void cleanup();
static int child_fn();
static int pfd[2];
char *TCID = "clone03"; /* Test program identifier. */
int TST_TOTAL = 1; /* Total number of test cases. */
extern int Tst_count; /* Test Case counter for tst_* routines */
int main(int ac, char **av)
{
int lc; /* loop counter */
char *msg; /* message returned from parse_opts */
void *child_stack; /* stack for child */
char buff[10];
int child_pid;
/* parse standard options */
if ((msg = parse_opts(ac, av, (option_t *) NULL, NULL)) != (char *)NULL) {
tst_brkm(TBROK, tst_exit, "OPTION PARSING ERROR - %s", msg);
}
/* perform global setup for test */
setup();
/* Allocate stack for child */
if ((child_stack = (void *)malloc(CHILD_STACK_SIZE)) == NULL) {
tst_brkm(TBROK, cleanup, "Cannot allocate stack for child");
}
/* check looping state if -i option given */
for (lc = 0; TEST_LOOPING(lc); lc++) {
/* reset Tst_count in case we are looping. */
Tst_count = 0;
/* Open a pipe */
if ((pipe(pfd)) == -1) {
tst_brkm(TBROK|TERRNO, cleanup, "pipe() failed");
}
/*
* Call clone(2)
*/
TEST(ltp_clone(0, child_fn, NULL, CHILD_STACK_SIZE,
child_stack));
/* check return code */
if (TEST_RETURN == -1) {
tst_resm(TFAIL|TTERRNO, "clone() failed");
cleanup();
}
/* close write end from parent */
if ((close(pfd[1])) == -1) {
tst_brkm(TBROK|TERRNO, cleanup, "close(pfd[1]) failed");
}
/* Read pid from read end */
if ((read(pfd[0], buff, sizeof(buff))) == -1) {
tst_brkm(TBROK|TERRNO, cleanup, "read from pipe failed");
}
/* Close read end from parent */
if ((close(pfd[0])) == -1) {
tst_resm(TWARN|TERRNO, "close(pfd[0]) failed");
}
/* Get child's pid from pid string */
child_pid = atoi(buff);
if (TEST_RETURN == child_pid) {
tst_resm(TPASS, "Test passed");
} else {
tst_resm(TFAIL, "Test failed");
}
} /* End for TEST_LOOPING */
free(child_stack);
/* cleanup and exit */
cleanup();
/*NOTREACHED*/ return 0;
} /* End main */
/* setup() - performs all ONE TIME setup for this test */
void setup()
{
/* capture signals */
tst_sig(NOFORK, DEF_HANDLER, cleanup);
/* Pause if that option was specified */
TEST_PAUSE;
} /* End setup() */
/*
*cleanup() - performs all ONE TIME cleanup for this test at
* completion or premature exit.
*/
void cleanup()
{
/*
* print timing stats if that option was specified.
* print errno log if that option was specified.
*/
TEST_CLEANUP;
/* exit with return code appropriate for results */
tst_exit();
} /* End cleanup() */
/*
* child_fn() - function executed by child
*/
int child_fn(void)
{
char pid[10];
/* Close read end from child */
if ((close(pfd[0])) == -1) {
tst_brkm(TBROK|TERRNO, cleanup, "close(pfd[0]) failed");
}
/* Construct pid string */
sprintf(pid, "%d", getpid());
/* Write pid string to pipe */
if ((write(pfd[1], pid, sizeof(pid))) == -1) {
tst_brkm(TBROK|TERRNO, cleanup, "write to pipe failed");
}
/* Close write end of pipe from child */
if ((close(pfd[1])) == -1) {
tst_resm(TWARN|TERRNO, "close(pfd[1]) failed");
}
exit(1);
}
| 2,055 |
341 | from instapy_cli import client
username = 'USERNAME'
password = 'PASSWORD'
# There are two ways to use instapy-cli programmatically:
# vvv << see below >> vvv
# Create cli and use it in functions/classes/parameters
cli = client(username, password)
ig = cli.api()
me = ig.current_user()
print(me)
# Create cli using 'with ... as ..' - a more pythonic way
with client(username, password) as cli:
# do stuffs with cli
ig = cli.api()
me = ig.current_user()
print(me)
| 171 |
427 | from __future__ import print_function
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteThreadsInStopReply(
gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
ENABLE_THREADS_IN_STOP_REPLY_ENTRIES = [
"read packet: $QListThreadsInStopReply#21",
"send packet: $OK#00",
]
def gather_stop_reply_threads(self, post_startup_log_lines, thread_count):
# Set up the inferior args.
inferior_args = []
for i in range(thread_count - 1):
inferior_args.append("thread:new")
inferior_args.append("sleep:10")
procs = self.prep_debug_monitor_and_inferior(
inferior_args=inferior_args)
# Assumes test_sequence has anything added needed to setup the initial state.
# (Like optionally enabling QThreadsInStopReply.)
if post_startup_log_lines:
self.test_sequence.add_log_lines(post_startup_log_lines, True)
self.test_sequence.add_log_lines([
"read packet: $c#63"
], True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Give threads time to start up, then break.
time.sleep(1)
self.reset_test_sequence()
self.test_sequence.add_log_lines(
[
"read packet: {}".format(
chr(3)),
{
"direction": "send",
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
"capture": {
1: "stop_result",
2: "key_vals_text"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Wait until all threads have started.
threads = self.wait_for_thread_count(thread_count, timeout_seconds=3)
self.assertIsNotNone(threads)
self.assertEqual(len(threads), thread_count)
# Run, then stop the process, grab the stop reply content.
self.reset_test_sequence()
self.test_sequence.add_log_lines(["read packet: $c#63",
"read packet: {}".format(chr(3)),
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
"capture": {1: "stop_result",
2: "key_vals_text"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Parse the stop reply contents.
key_vals_text = context.get("key_vals_text")
self.assertIsNotNone(key_vals_text)
kv_dict = self.parse_key_val_dict(key_vals_text)
self.assertIsNotNone(kv_dict)
# Pull out threads from stop response.
stop_reply_threads_text = kv_dict.get("threads")
if stop_reply_threads_text:
return [int(thread_id, 16)
for thread_id in stop_reply_threads_text.split(",")]
else:
return []
def QListThreadsInStopReply_supported(self):
procs = self.prep_debug_monitor_and_inferior()
self.test_sequence.add_log_lines(
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
@debugserver_test
def test_QListThreadsInStopReply_supported_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.QListThreadsInStopReply_supported()
@llgs_test
def test_QListThreadsInStopReply_supported_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.QListThreadsInStopReply_supported()
def stop_reply_reports_multiple_threads(self, thread_count):
# Gather threads from stop notification when QThreadsInStopReply is
# enabled.
stop_reply_threads = self.gather_stop_reply_threads(
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count)
self.assertEqual(len(stop_reply_threads), thread_count)
@debugserver_test
def test_stop_reply_reports_multiple_threads_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_multiple_threads(5)
@llgs_test
def test_stop_reply_reports_multiple_threads_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_multiple_threads(5)
def no_QListThreadsInStopReply_supplies_no_threads(self, thread_count):
# Gather threads from stop notification when QThreadsInStopReply is not
# enabled.
stop_reply_threads = self.gather_stop_reply_threads(None, thread_count)
self.assertEqual(len(stop_reply_threads), 0)
@debugserver_test
def test_no_QListThreadsInStopReply_supplies_no_threads_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.no_QListThreadsInStopReply_supplies_no_threads(5)
@llgs_test
def test_no_QListThreadsInStopReply_supplies_no_threads_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.no_QListThreadsInStopReply_supplies_no_threads(5)
def stop_reply_reports_correct_threads(self, thread_count):
# Gather threads from stop notification when QThreadsInStopReply is
# enabled.
stop_reply_threads = self.gather_stop_reply_threads(
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count)
self.assertEqual(len(stop_reply_threads), thread_count)
# Gather threads from q{f,s}ThreadInfo.
self.reset_test_sequence()
self.add_threadinfo_collection_packets()
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
threads = self.parse_threadinfo_packets(context)
self.assertIsNotNone(threads)
self.assertEqual(len(threads), thread_count)
# Ensure each thread in q{f,s}ThreadInfo appears in stop reply threads
for tid in threads:
self.assertTrue(tid in stop_reply_threads)
@debugserver_test
def test_stop_reply_reports_correct_threads_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_correct_threads(5)
@llgs_test
def test_stop_reply_reports_correct_threads_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_reply_reports_correct_threads(5)
| 3,413 |
4,842 | package com.gpmall.user.dal.entitys;
import lombok.Data;
/**
* 腾讯课堂搜索【咕泡学院】
* 官网:www.gupaoedu.com
* 风骚的Mic 老师
* create-date: 2019/8/6-14:35
*/
@Data
public class ImageResult {
String img;
String code;
} | 133 |
4,145 | <gh_stars>1000+
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from hydra.utils import instantiate
from transformers import AutoConfig, AutoModel
from nemo.collections.nlp.modules.common.encoder_module import EncoderModule
from nemo.collections.nlp.modules.common.huggingface.huggingface_utils import get_huggingface_pretrained_lm_models_list
from nemo.core.classes.common import typecheck
from nemo.utils import logging
class HuggingFaceEncoderModule(EncoderModule):
""" Class for using HuggingFace encoders in NeMo NLP."""
def __init__(
self,
model_name: Optional[str] = None,
pretrained: bool = False,
config_dict: Optional[dict] = None,
checkpoint_file: Optional[str] = None,
):
"""Gets HuggingFace based model to be used as an Encoder in NeMo NLP.
Use the model_name arg to get a named model architecture.
Available model names can be found with get_huggingface_pretrained_lm_models_list() or
by going to https://huggingface.co/models.
Use the pretrained arg to get the named model architecture with or without pretrained weights.
If model_name is None, then we can pass in a custom configuration via the config_dict.
For example, to instantiate a HuggingFace BERT model with custom configuration we would do:
config_dict={
'_target_': 'transformers.BertConfig',
'hidden_size': 1536
}
Args:
model_name (Optional[str]): Named model architecture from HuggingFace. Defaults to None.
pretrained (bool): Use True to get pretrained weights.
False will use the same architecture but with randomly initialized weights.
Defaults to False.
config_dict (Optional[dict], optional): Use for custom configuration of the HuggingFace model. Defaults to None.
checkpoint_file (Optional[str], optional): Provide weights for the transformer from a local checkpoint. Defaults to None.
"""
super().__init__()
if checkpoint_file:
raise NotImplementedError('Restoring from checkpoint file not implemented yet.')
model = None
if model_name is not None:
if model_name in get_huggingface_pretrained_lm_models_list(include_external=True):
if pretrained:
config_dict.pop('vocab_size')
if config_dict:
raise ValueError(
f'When using pretrained model, config_dict should be None or empty. Got: {config_dict}'
)
model = AutoModel.from_pretrained(model_name)
else:
cfg = AutoConfig.from_pretrained(model_name)
model = AutoModel.from_config(cfg)
else:
logging.error(f'{model_name} not found in list of HuggingFace pretrained models')
else:
if pretrained:
raise ValueError(f'If not using model_name, then pretrained should be False. Got: {pretrained}.')
cfg = instantiate(config_dict)
model = AutoModel.from_config(cfg)
self._hidden_size = model.config.hidden_size
self._vocab_size = model.config.vocab_size
self._encoder = model
@typecheck()
def forward(self, input_ids, encoder_mask):
encoder_hidden_states = self._encoder.forward(input_ids=input_ids, attention_mask=encoder_mask)[0]
return encoder_hidden_states
@property
def hidden_size(self) -> Optional[int]:
return self._hidden_size
@property
def vocab_size(self) -> Optional[int]:
return self._vocab_size
| 1,741 |
848 | # Copyright (C) 2020 Intel Corporation. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
import re
from collections import namedtuple
import common
import board_cfg_lib
import scenario_cfg_lib
VBAR_INFO_DEFINE="""#ifndef VBAR_BASE_H_
#define VBAR_BASE_H_
"""
VBAR_INFO_ENDIF="""#endif /* VBAR_BASE_H_ */"""
# Constants for ivshmem
BAR0_SHEMEM_SIZE = 4*common.SIZE_K
BAR1_SHEMEM_SIZE = 4*common.SIZE_K
BAR0_SHEMEM_ALIGNMENT = 4*common.SIZE_K
BAR1_SHEMEM_ALIGNMENT = 4*common.SIZE_K
BAR2_SHEMEM_ALIGNMENT = 2*common.SIZE_M
# Constants for pci vuart
PCI_VUART_VBAR0_SIZE = 4*common.SIZE_K
PCI_VUART_VBAR1_SIZE = 4*common.SIZE_K
PCI_VUART_VBAR0_ALIGNMENT = 4*common.SIZE_K
PCI_VUART_VBAR1_ALIGNMENT = 4*common.SIZE_K
# Constants for vmsix bar
VMSIX_VBAR_SIZE = 4*common.SIZE_K
VMSIX_VBAR_ALIGNMENT = VMSIX_VBAR_SIZE
class MmioWindow(namedtuple(
"MmioWindow", [
"start",
"end"])):
PATTERN = re.compile(r"\s*(?P<start>[0-9a-f]+)-(?P<end>[0-9a-f]+) ")
@classmethod
def from_str(cls, value):
if not isinstance(value, str):
raise ValueError("value must be a str: {}".format(type(value)))
match = cls.PATTERN.fullmatch(value)
if match:
return MmioWindow(
start=int(match.group("start"), 16),
end=int(match.group("end"), 16))
else:
raise ValueError("not an mmio window: {!r}".format(value))
def overlaps(self, other):
if not isinstance(other, MmioWindow):
raise TypeError('overlaps() other must be an MmioWindow: {}'.format(type(other)))
if other.end < self.start:
return False
if self.end < other.start:
return False
return True
def get_devs_per_vm_with_key(pci_devs, keywords):
devicelist = {}
for vm_i, pci_devs_list in pci_devs.items():
devicelist[vm_i] = [
d for d in pci_devs_list if d in keywords
]
return devicelist
def write_vbar(i_cnt, bdf, pci_bar_dic, bar_attr, \
pci_devs_per_vm, mmiolist_per_vm, sos_mmio_range,config):
"""
Parser and generate vbar
:param i_cnt: the number of pci devices have the same PCI sub class name
:param bdf: it is a string what contains BDF
:param pci_bar_dic: it is a dictionary of pci vbar for those BDF
:param bar_attr: it is a class, contains PIC bar attribute
:param config: it is a file pointer of pci information for writing to
"""
align = ' ' * 54
ptdev_mmio_str = ''
tmp_sub_name = board_cfg_lib.get_sub_pci_name(i_cnt, bar_attr)
if bdf in pci_bar_dic.keys():
bar_list = list(pci_bar_dic[bdf].keys())
bar_len = len(bar_list)
bar_num = 0
bar_val = ""
free = MmioWindow(0, 0)
is_vmsix = False
# If the device is vmsix device, find a free mmio window up to 4k size
if board_cfg_lib.is_matched_board(('ehl-crb-b')):
for vm_i in pci_devs_per_vm:
if bdf in pci_devs_per_vm[vm_i]:
if scenario_cfg_lib.VM_DB[common.VM_TYPES[vm_i]]['load_type'] == "PRE_LAUNCHED_VM":
is_vmsix = True
bar_len += 1
# For pre-launched VM, the windows range is form 2G to 4G
free = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \
mmiolist_per_vm[vm_i], VMSIX_VBAR_ALIGNMENT + VMSIX_VBAR_SIZE)
free_vbar_start_addr = common.round_up(free.start, VMSIX_VBAR_ALIGNMENT)
free_vbar_end_addr = free_vbar_start_addr + VMSIX_VBAR_SIZE - 1
free = MmioWindow(free_vbar_start_addr, free_vbar_end_addr)
mmiolist_per_vm[vm_i].append(free)
mmiolist_per_vm[vm_i].sort()
break
for bar_i in bar_list:
if not bar_attr.remappable:
print("/* TODO: add {} 64bit BAR support */".format(tmp_sub_name), file=config)
bar_num += 1
bar_val = pci_bar_dic[bdf][bar_i].addr
if pci_bar_dic[bdf][bar_i].remapped:
ptdev_mmio_str = 'HI_MMIO_START + '
if bar_num == bar_len:
if bar_len == 1:
print("#define %-38s" % (tmp_sub_name+"_VBAR"), " .vbar_base[{}] = {}{}UL" \
.format(bar_i, ptdev_mmio_str, bar_val), file=config)
else:
print("{}.vbar_base[{}] = {}{}UL" \
.format(align, bar_i, ptdev_mmio_str, bar_val), file=config)
elif bar_num == 1:
print("#define %-38s" % (tmp_sub_name+"_VBAR"), " .vbar_base[{}] = {}{}UL, \\".format(bar_i, ptdev_mmio_str, bar_val), file=config)
else:
print("{}.vbar_base[{}] = {}{}UL, \\".format(align, bar_i, ptdev_mmio_str, bar_val), file=config)
if is_vmsix:
next_bar_idx = find_next_bar(bar_val, bar_list)
print("{}.vbar_base[{}] = {}{}UL".format(align, next_bar_idx, ptdev_mmio_str, hex(free.start)), file=config)
print("", file=config)
def find_next_bar(bar_val, bar_list):
pci_lines = board_cfg_lib.get_info(common.BOARD_INFO_FILE, "<PCI_DEVICE>", "</PCI_DEVICE>")
idx = bar_list[-1]
for line in pci_lines:
if bar_val.split('x')[1] in line:
if "32-bit" in line:
idx += 1
break
elif "64-bit" in line:
idx += 2
break
if int(idx) > 5:
raise ValueError("Not enough bar region, last bar region is {}".format(idx))
return idx
def write_vuart_vbar(mmiolist_per_vm, sos_mmio_range, config):
# get legacy vuart information
vuart0_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 0)
vuart1_setting = common.get_vuart_info_id(common.SCENARIO_INFO_FILE, 1)
# get pci vuart information
vuarts = common.get_vuart_info(common.SCENARIO_INFO_FILE)
for vm_id in vuarts.keys():
vm_type = common.VM_TYPES[vm_id]
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "POST_LAUNCHED_VM":
continue
for vuart_id in vuarts[vm_id].keys():
if vuarts[vm_id][vuart_id]['base'] == "INVALID_PCI_BASE":
continue
# Skip pci vuart 0 if the legacy vuart 0 is enabled
if vuart_id == 0 and vm_id in vuart0_setting \
and vuart0_setting[vm_id]['base'] != "INVALID_COM_BASE":
continue
# Skip pci vuart 1 if the legacy vuart 1 is enabled
if vuart_id == 1 and vm_id in vuart1_setting \
and vuart1_setting[vm_id]['base'] != "INVALID_COM_BASE":
continue
free_bar0 = []
free_bar1 = []
# vuart decice requires 2 bars
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM":
free_bar0 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], \
PCI_VUART_VBAR0_SIZE + PCI_VUART_VBAR0_ALIGNMENT)
free_bar0_start_addr = common.round_up(free_bar0.start, PCI_VUART_VBAR0_ALIGNMENT)
free_bar0_end_addr = free_bar0_start_addr + PCI_VUART_VBAR0_SIZE - 1
free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr)
mmiolist_per_vm[vm_id].append(free_bar0)
mmiolist_per_vm[vm_id].sort()
free_bar1 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], \
PCI_VUART_VBAR1_SIZE + PCI_VUART_VBAR1_ALIGNMENT)
free_bar1_start_addr = common.round_up(free_bar1.start, PCI_VUART_VBAR1_ALIGNMENT)
free_bar1_end_addr = free_bar1_start_addr + PCI_VUART_VBAR1_SIZE - 1
free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr)
mmiolist_per_vm[vm_id].append(free_bar1)
mmiolist_per_vm[vm_id].sort()
elif scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM":
free_bar0 = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \
mmiolist_per_vm[vm_id], PCI_VUART_VBAR0_SIZE + PCI_VUART_VBAR0_ALIGNMENT)
free_bar0_start_addr = common.round_up(free_bar0.start, PCI_VUART_VBAR0_ALIGNMENT)
free_bar0_end_addr = free_bar0_start_addr + PCI_VUART_VBAR0_SIZE - 1
free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr)
mmiolist_per_vm[vm_id].append(free_bar0)
mmiolist_per_vm[vm_id].sort()
free_bar1 = get_free_mmio([MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)], \
mmiolist_per_vm[vm_id], PCI_VUART_VBAR1_SIZE + PCI_VUART_VBAR1_ALIGNMENT)
free_bar1_start_addr = common.round_up(free_bar1.start, PCI_VUART_VBAR1_ALIGNMENT)
free_bar1_end_addr = free_bar1_start_addr + PCI_VUART_VBAR1_SIZE - 1
free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr)
mmiolist_per_vm[vm_id].append(free_bar1)
mmiolist_per_vm[vm_id].sort()
print("#define VM%s" %(str(vm_id) + "_VUART_%-28s") % (str(vuart_id) + "_VBAR"),
" .vbar_base[0] = {:#x}UL, \\".format(free_bar0.start), file=config)
print("{}.vbar_base[1] = {:#x}UL".format(' ' * 54, free_bar1.start), file=config)
print("", file=config)
def write_ivshmem_vbar(mmiolist_per_vm, sos_mmio_range, config):
for vm_id,vm_type in common.VM_TYPES.items():
ivshmem_region = common.get_hv_item_tag(common.SCENARIO_INFO_FILE,
"FEATURES", "IVSHMEM", "IVSHMEM_REGION")
shmem_regions = scenario_cfg_lib.get_shmem_regions(ivshmem_region)
if vm_id not in shmem_regions:
continue
shmems = shmem_regions.get(vm_id)
idx = 0
for shm in shmems:
if shm is None or shm.strip() == '':
continue
shm_splited = shm.split(',')
size = shm_splited[1].strip()
try:
int_size = int(size) * 0x100000
except:
int_size = 0
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM":
# vbar[0] for shared memory is 4k
free_bar0 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], BAR0_SHEMEM_ALIGNMENT + BAR0_SHEMEM_SIZE)
free_bar0_start_addr = common.round_up(free_bar0.start, BAR0_SHEMEM_ALIGNMENT)
free_bar0_end_addr = free_bar0_start_addr + BAR0_SHEMEM_SIZE - 1
free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr)
mmiolist_per_vm[vm_id].append(free_bar0)
mmiolist_per_vm[vm_id].sort()
# vbar[1] for shared memory is 4K
free_bar1 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE)
free_bar1_start_addr = common.round_up(free_bar1.start, BAR1_SHEMEM_ALIGNMENT)
free_bar1_end_addr = free_bar1_start_addr + BAR1_SHEMEM_SIZE - 1
free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr)
mmiolist_per_vm[vm_id].append(free_bar1)
mmiolist_per_vm[vm_id].sort()
# vbar[2] for shared memory is specified size in MB
free_bar2 = get_free_mmio(sos_mmio_range, mmiolist_per_vm[vm_id], BAR2_SHEMEM_ALIGNMENT + int_size)
free_bar2_start_addr = common.round_up(free_bar2.start, BAR2_SHEMEM_ALIGNMENT) + 0xC
free_bar2_end_addr = free_bar2_start_addr + int_size - 1
free_bar2 = MmioWindow(free_bar2_start_addr, free_bar2_end_addr)
mmiolist_per_vm[vm_id].append(free_bar2)
mmiolist_per_vm[vm_id].sort()
print("#define SOS_IVSHMEM_DEVICE_%-19s" % (str(idx) + "_VBAR"),
" .vbar_base[0] = {:#x}UL, \\".format(free_bar0.start), file=config)
print("{}.vbar_base[1] = {:#x}UL, \\".format(' ' * 54, free_bar1.start), file=config)
print("{}.vbar_base[2] = {:#x}UL".format(' ' * 54, free_bar2.start), file=config)
print("", file=config)
idx += 1
elif scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM":
mmioRange = [MmioWindow(start=common.SIZE_2G, end=common.SIZE_4G-1)]
# vbar[0] for shared memory is 4k
free_bar0 = get_free_mmio(mmioRange, mmiolist_per_vm[vm_id], BAR0_SHEMEM_ALIGNMENT + BAR0_SHEMEM_SIZE)
free_bar0_start_addr = common.round_up(free_bar0.start, BAR0_SHEMEM_ALIGNMENT)
free_bar0_end_addr = free_bar0_start_addr + BAR0_SHEMEM_SIZE - 1
free_bar0 = MmioWindow(free_bar0_start_addr, free_bar0_end_addr)
mmiolist_per_vm[vm_id].append(free_bar0)
mmiolist_per_vm[vm_id].sort()
# vbar[1] for shared memory is 4K
free_bar1 = get_free_mmio(mmioRange, mmiolist_per_vm[vm_id], BAR1_SHEMEM_ALIGNMENT + BAR1_SHEMEM_SIZE)
free_bar1_start_addr = common.round_up(free_bar1.start, BAR1_SHEMEM_ALIGNMENT)
free_bar1_end_addr = free_bar1_start_addr + BAR1_SHEMEM_SIZE - 1
free_bar1 = MmioWindow(free_bar1_start_addr, free_bar1_end_addr)
mmiolist_per_vm[vm_id].append(free_bar1)
mmiolist_per_vm[vm_id].sort()
# vbar[2] for shared memory is specified size in MB
free_bar2 = get_free_mmio(mmioRange, mmiolist_per_vm[vm_id], BAR2_SHEMEM_ALIGNMENT + int_size)
free_bar2_start_addr = common.round_up(free_bar2.start, BAR2_SHEMEM_ALIGNMENT) + 0xC
free_bar2_end_addr = free_bar2_start_addr + int_size - 1
free_bar2 = MmioWindow(free_bar2_start_addr, free_bar2_end_addr)
mmiolist_per_vm[vm_id].append(free_bar2)
mmiolist_per_vm[vm_id].sort()
print("#define IVSHMEM_DEVICE_%-23s" % (str(idx) + "_VBAR"),
" .vbar_base[0] = {:#x}UL, \\".format(free_bar0.start), file=config)
print("{}.vbar_base[1] = {:#x}UL, \\".format(' ' * 54, free_bar1.start), file=config)
print("{}.vbar_base[2] = {:#x}UL".format(' ' * 54, free_bar2.start), file=config)
print("", file=config)
idx += 1
def is_mmio_window_used(devinfo, keywords):
for k in keywords:
if k in devinfo:
return True
return False
def get_mmio_windows_with_key(keywords):
keyword_mmiolist = []
exclusive_mmiolist = []
iomem_lines = board_cfg_lib.get_info(common.BOARD_INFO_FILE, "<IOMEM_INFO>", "</IOMEM_INFO>")
for line in iomem_lines:
mmio_range = line.split(':')[0]
devmmio_tuple = MmioWindow.from_str(mmio_range)
if is_mmio_window_used(line, keywords):
keyword_mmiolist.append(devmmio_tuple)
else:
exclusive_mmiolist.append(devmmio_tuple)
return sorted(keyword_mmiolist), sorted(exclusive_mmiolist)
def removed_nested(list1, list2):
if not list1 or not list2:
return list1
resolvedList = list1[:]
for w1 in resolvedList:
for w2 in list2:
if w2.start <= w1.start <= w2.end and w2.start <= w1.end <= w2.end:
if w1 not in resolvedList:
continue
resolvedList.remove(w1)
return sorted(resolvedList)
def merged_windows(windowslist):
if not windowslist:
return None
sortedlist = sorted(windowslist)
resolvedList = []
last = sortedlist[0]
for cur in sortedlist:
if cur.start <= last.end + 1:
last = MmioWindow(start=last.start, end=max(last.end, cur.end))
else:
resolvedList.append(last)
last = cur
resolvedList.append(last)
return sorted(resolvedList)
def get_free_mmio(windowslist, used, size):
if not size:
raise ValueError("allocate size cannot be {}".format(size))
if not windowslist:
raise ValueError("No mmio range is specified:{}".format(windowslist))
for w in windowslist:
window = MmioWindow(start=w.start, end=w.start+size-1)
for u in used:
if window.overlaps(u):
window = MmioWindow(start=u.end+1, end=u.end+size)
continue
if window.overlaps(w):
return window
raise ValueError("Not enough mmio window for a device size {}: {}".format(size, window))
def generate_file(config):
matching_mmios, non_matching_mmios = get_mmio_windows_with_key(['PCI Bus 0000:00'])
matching_mmios = removed_nested(matching_mmios, non_matching_mmios)
non_matching_mmios = [
w for w in non_matching_mmios
if any((w.overlaps(w2) for w2 in matching_mmios))
]
non_matching_mmios = merged_windows(non_matching_mmios)
# list of all vmsix supported device list in bdf format
bdf_list = board_cfg_lib.get_known_caps_pci_devs().get('VMSIX', [])
# list of all PRE_LAUNCHED_VMs' vmsix supported passthrough devices in bdf format
pci_items = common.get_leaf_tag_map(common.SCENARIO_INFO_FILE, "pci_devs", "pci_dev")
pci_devs = scenario_cfg_lib.get_pt_pci_devs(pci_items)
pci_devs_per_vm = get_devs_per_vm_with_key(pci_devs, bdf_list)
# list Service VM vmsix supported devices without other PRE_LAUNCHED_VMs' in bdf format
sos_bdf_list = [
d for d in bdf_list
if all((d not in pci_devs_per_vm[i] for i in pci_devs_per_vm))
]
for vm_i in pci_devs_per_vm:
vm_type = common.VM_TYPES[vm_i]
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM":
pci_devs_per_vm[vm_i] = sos_bdf_list
mmiolist_per_vm = {}
for vm_i,vm_type in common.VM_TYPES.items():
if vm_i not in mmiolist_per_vm.keys():
mmiolist_per_vm[vm_i] = []
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM":
mmiolist_per_vm[vm_i] = non_matching_mmios
else:
if vm_i in pci_devs.keys():
match, _ = get_mmio_windows_with_key(pci_devs[vm_i])
mmiolist_per_vm[vm_i] = match
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM":
if vm_i not in mmiolist_per_vm.keys():
mmiolist_per_vm[vm_i] = []
# TSN reserved region
mmiolist_per_vm[vm_i].append(MmioWindow(start = 0xffff0000 , end = 0xffffffff))
# For the pre-launched vm, if the TPM is passtrough, this address is used
if vm_i == 0 and board_cfg_lib.is_tpm_passthru():
mmiolist_per_vm[vm_i].append(MmioWindow(start = 0xfed40000, end = 0xfed40000 + 0x5000 - 1))
# For the pre-launched vm o ehl-crb-b, if the p2sb is passtrough, this address is used
if board_cfg_lib.is_matched_board(('ehl-crb-b')):
p2sb_start = board_cfg_lib.find_p2sb_bar_addr()
mmiolist_per_vm[vm_i].append(MmioWindow(start = p2sb_start, end = p2sb_start + 0x1000000 - 1))
mmiolist_per_vm[vm_i].sort()
# start to generate board_info.h
print("{0}".format(board_cfg_lib.HEADER_LICENSE), file=config)
print(VBAR_INFO_DEFINE, file=config)
common.get_vm_types()
pre_vm = False
sos_vm = False
for vm_type in common.VM_TYPES.values():
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "PRE_LAUNCHED_VM":
pre_vm = True
if scenario_cfg_lib.VM_DB[vm_type]['load_type'] == "SERVICE_VM":
sos_vm = True
if not pre_vm and not sos_vm:
print(VBAR_INFO_ENDIF, file=config)
return
ivshmem_enabled = common.get_hv_item_tag(common.SCENARIO_INFO_FILE, "FEATURES", "IVSHMEM", "IVSHMEM_ENABLED")
if ivshmem_enabled == 'y':
write_ivshmem_vbar(mmiolist_per_vm, matching_mmios, config)
# Get passthrough devices vbar bases
compared_bdf = []
for cnt_sub_name in board_cfg_lib.SUB_NAME_COUNT.keys():
i_cnt = 0
for bdf, bar_attr in board_cfg_lib.PCI_DEV_BAR_DESC.pci_dev_dic.items():
if cnt_sub_name == bar_attr.name and bdf not in compared_bdf:
compared_bdf.append(bdf)
else:
continue
write_vbar(i_cnt, bdf, board_cfg_lib.PCI_DEV_BAR_DESC.pci_bar_dic, bar_attr, \
pci_devs_per_vm, mmiolist_per_vm, matching_mmios, config)
i_cnt += 1
write_vuart_vbar(mmiolist_per_vm, matching_mmios, config)
print(VBAR_INFO_ENDIF, file=config)
| 11,048 |
1,444 |
package mage.cards.t;
import java.util.UUID;
import mage.abilities.effects.common.continuous.BoostOpponentsEffect;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.constants.Duration;
/**
*
* @author Loki
*/
public final class TurnTheTide extends CardImpl {
public TurnTheTide (UUID ownerId, CardSetInfo setInfo) {
super(ownerId,setInfo,new CardType[]{CardType.INSTANT},"{1}{U}");
this.getSpellAbility().addEffect(new BoostOpponentsEffect(-2, 0, Duration.EndOfTurn));
}
public TurnTheTide (final TurnTheTide card) {
super(card);
}
@Override
public TurnTheTide copy() {
return new TurnTheTide(this);
}
}
| 271 |
352 | {
"description": "Counterfactual regret related experiments.",
"order": [
"JuliaRL_TabularCFR_OpenSpiel.jl",
"JuliaRL_DeepCFR_OpenSpiel.jl"
]
} | 64 |
4,036 | <reponame>vadi2/codeql
/* Foo */
#include "h.h"
typedef int foo;
/* Bar */
#include "i.h"
typedef int bar;
| 56 |
348 | <reponame>chamberone/Leaflet.PixiOverlay<filename>docs/data/leg-t1/062/06209195.json
{"nom":"Calonne-sur-la-Lys","circ":"9ème circonscription","dpt":"Pas-de-Calais","inscrits":1227,"abs":627,"votants":600,"blancs":13,"nuls":6,"exp":581,"res":[{"nuance":"MDM","nom":"Mme <NAME>","voix":190},{"nuance":"FN","nom":"M. <NAME>","voix":148},{"nuance":"LR","nom":"<NAME>","voix":88},{"nuance":"FI","nom":"<NAME>","voix":51},{"nuance":"RDG","nom":"<NAME>","voix":47},{"nuance":"DLF","nom":"M. <NAME>","voix":19},{"nuance":"COM","nom":"M. <NAME>","voix":11},{"nuance":"ECO","nom":"Mme <NAME>","voix":11},{"nuance":"DVG","nom":"M. <NAME>","voix":6},{"nuance":"EXG","nom":"Mme <NAME>","voix":6},{"nuance":"DIV","nom":"M. <NAME>","voix":2},{"nuance":"DIV","nom":"M. <NAME>","voix":2},{"nuance":"ECO","nom":"M. <NAME>","voix":0}]} | 343 |
723 | <filename>mac/vendor/EDSemver/Project/semverTests/EDSemverGreaterThan.h
//
// EDSemverGreaterThan.h
// semver
//
// Created by <NAME> on 7/7/13.
// Copyright (c) 2013 <NAME>. All rights reserved.
//
#import "EDSemverHarness.h"
@interface EDSemverGreaterThan : SenTestCase
@end
| 116 |
732 | //
// OpenShareHeader.h
// openshare
//
// Created by LiuLogan on 15/5/15.
// Copyright (c) 2015年 OpenShare <http://openshare.gfzj.us/>. All rights reserved.
//
#ifndef openshare_OpenShareHeader_h
#define openshare_OpenShareHeader_h
#import "OpenShare+QQ.h"
#import "OpenShare+Weibo.h"
#import "OpenShare+Weixin.h"
#import "OpenShare+Renren.h"
#import "OpenShare+Alipay.h"
#endif
| 154 |
2,338 | <reponame>mkinsner/llvm<gh_stars>1000+
/// Test that llvm-cov supports a fake gcov 4.2 format used before clang 11.
// RUN: rm -rf %t && mkdir %t && cd %t
// RUN: echo -e '\n\n\n\n\n\n\n\n\n' > test.cpp && echo > test.h
// RUN: llvm-cov gcov test. --gcno=%S/Inputs/gcov-fake-4.2.gcno --gcda=%S/Inputs/gcov-fake-4.2.gcda | FileCheck %s
// RUN: FileCheck %s --check-prefix=C < test.cpp.gcov
// RUN: FileCheck %s --check-prefix=H < test.h.gcov
// CHECK: File 'test.cpp'
// CHECK-NEXT: Lines executed:84.21% of 38
// CHECK-NEXT: Creating 'test.cpp.gcov'
// CHECK-EMPTY:
// CHECK-NEXT: File './test.h'
// CHECK-NEXT: Lines executed:100.00% of 1
// CHECK-NEXT: Creating 'test.h.gcov'
// CHECK-EMPTY:
// C: -: 0:Source:test.cpp
// C-NEXT: -: 0:Graph:{{.*}}gcov-fake-4.2.gcno
// C-NEXT: -: 0:Data:{{.*}}gcov-fake-4.2.gcda
/// `Runs` is stored in GCOV_TAG_OBJECT_SUMMARY with a length of 9.
// C-NEXT: -: 0:Runs:2
// C-NEXT: -: 0:Programs:1
// C-NEXT: -: 1:
// C-NEXT: -: 2:
// C-NEXT: -: 3:
// C-NEXT: -: 4:
// C-NEXT: -: 5:
// C-NEXT: -: 6:
// C-NEXT: -: 7:
// C-NEXT: -: 8:
// C-NEXT: -: 9:
// C-NEXT:8589934592: 10:
// H: -: 0:Source:./test.h
// H-NEXT: -: 0:Graph:{{.*}}gcov-fake-4.2.gcno
// H-NEXT: -: 0:Data:{{.*}}gcov-fake-4.2.gcda
// H-NEXT: -: 0:Runs:2
// H-NEXT: -: 0:Programs:1
// H-NEXT: 4: 1:
| 887 |
572 | from json import loads
from behave import when, then
from requests import get, post
@when(u'faço uma requisição na url "{url}"')
def request_url(context, url):
context.response = get(url='{}{}'.format(context.base_url, url))
@then(u'a api deve responder')
def check_response_json(context):
assert context.response.json() == loads(context.text)
@when(u'faço uma requisição POST na url "{url}"')
def post_request_json(context, url):
headers = {'Content-Type': 'application/json'}
context.response = post(url='{}{}'.format(context.base_url, url),
json=loads(context.text), headers=headers)
assert False
| 250 |
348 | {"nom":"Viellenave-de-Navarrenx","circ":"4ème circonscription","dpt":"Pyrénées-Atlantiques","inscrits":115,"abs":35,"votants":80,"blancs":5,"nuls":2,"exp":73,"res":[{"nuance":"DVD","nom":"<NAME>","voix":46},{"nuance":"REM","nom":"<NAME>","voix":27}]} | 104 |
402 | /**
* Copyright © 2018 spring-data-dynamodb (https://github.com/derjust/spring-data-dynamodb)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.socialsignin.spring.data.dynamodb.repository.support;
import org.socialsignin.spring.data.dynamodb.core.DynamoDBOperations;
import org.socialsignin.spring.data.dynamodb.mapping.DynamoDBMappingContext;
import org.socialsignin.spring.data.dynamodb.repository.util.DynamoDBMappingContextProcessor;
import org.socialsignin.spring.data.dynamodb.repository.util.Entity2DynamoDBTableSynchronizer;
import org.springframework.beans.factory.annotation.Required;
import org.springframework.data.repository.Repository;
import org.springframework.data.repository.core.support.RepositoryFactoryBeanSupport;
import org.springframework.data.repository.core.support.RepositoryFactorySupport;
import java.io.Serializable;
/**
* Special adapter for Springs
* {@link org.springframework.beans.factory.FactoryBean} interface to allow easy
* setup of repository factories via Spring configuration.
*
* @author <NAME>
* @author <NAME>
* @param <T>
* the type of the repository
*/
public class DynamoDBRepositoryFactoryBean<T extends Repository<S, ID>, S, ID extends Serializable>
extends
RepositoryFactoryBeanSupport<T, S, ID> {
private DynamoDBOperations dynamoDBOperations;
private Entity2DynamoDBTableSynchronizer<S, ID> tableSynchronizer;
private DynamoDBMappingContextProcessor<S, ID> dynamoDBMappingContextProcessor;
public DynamoDBRepositoryFactoryBean(Class<? extends T> repositoryInterface) {
super(repositoryInterface);
}
@Override
protected RepositoryFactorySupport createRepositoryFactory() {
assert dynamoDBOperations != null;
assert tableSynchronizer != null;
assert dynamoDBMappingContextProcessor != null;
DynamoDBRepositoryFactory dynamoDBRepositoryFactory = new DynamoDBRepositoryFactory(dynamoDBOperations);
dynamoDBRepositoryFactory.addRepositoryProxyPostProcessor(tableSynchronizer);
dynamoDBRepositoryFactory.addRepositoryProxyPostProcessor(dynamoDBMappingContextProcessor);
return dynamoDBRepositoryFactory;
}
@Required
public void setDynamoDBMappingContextProcessor(
DynamoDBMappingContextProcessor<S, ID> dynamoDBMappingContextProcessor) {
this.dynamoDBMappingContextProcessor = dynamoDBMappingContextProcessor;
}
@Required
public void setEntity2DynamoDBTableSynchronizer(Entity2DynamoDBTableSynchronizer<S, ID> tableSynchronizer) {
this.tableSynchronizer = tableSynchronizer;
}
@Required
public void setDynamoDBOperations(DynamoDBOperations dynamoDBOperations) {
this.dynamoDBOperations = dynamoDBOperations;
}
@Required
public void setDynamoDBMappingContext(DynamoDBMappingContext dynamoDBMappingContext) {
setMappingContext(dynamoDBMappingContext);
}
}
| 1,045 |
335 | {
"word": "Windproof",
"definitions": [
"(of a garment or fabric) giving protection from the wind."
],
"parts-of-speech": "Adjective"
} | 65 |
316 | <reponame>burgerdev/vigra
#include "g++_relops_workaround.hxx"
#include "vector_2_image_policy.hxx"
#include "basic_image_test.hxx"
Vector2BasicImageTestSuite ::Vector2BasicImageTestSuite()
: vigra::test_suite(" Vector2BasicImageTestSuite")
{
add ( new BasicImageTestSuite<Vector2ImagePolicy<vigra::FVector2Image> > ("vigra::FVector2Image"));
add ( new BasicImageTestSuite<Vector2ImagePolicy<vigra::DVector2Image> > ("vigra::DVector2Image"));
}
// int main()
// {
// Vector2BasicImageTestSuite suite;
// int failed = suite.run();
// std::cout << suite.report() << std::endl;
// return (failed != 0);
// }
| 266 |
1,830 | <filename>test-util/src/main/java/io/camunda/zeebe/test/util/bpmn/random/steps/StepTriggerTimerBoundaryEvent.java<gh_stars>1000+
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH under
* one or more contributor license agreements. See the NOTICE file distributed
* with this work for additional information regarding copyright ownership.
* Licensed under the Zeebe Community License 1.1. You may not use this file
* except in compliance with the Zeebe Community License 1.1.
*/
package io.camunda.zeebe.test.util.bpmn.random.steps;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
public final class StepTriggerTimerBoundaryEvent extends AbstractExecutionStep {
private final String boundaryTimerEventId;
public StepTriggerTimerBoundaryEvent(final String boundaryTimerEventId) {
this.boundaryTimerEventId = boundaryTimerEventId;
}
@Override
protected Map<String, Object> updateVariables(
final Map<String, Object> variables, final Duration activationDuration) {
final var result = new HashMap<>(variables);
result.put(boundaryTimerEventId, activationDuration.toString());
return result;
}
@Override
public boolean isAutomatic() {
return false;
}
@Override
public Duration getDeltaTime() {
return DEFAULT_DELTA;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), boundaryTimerEventId);
}
@Override
public boolean equals(final Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
if (!super.equals(o)) {
return false;
}
final StepTriggerTimerBoundaryEvent that = (StepTriggerTimerBoundaryEvent) o;
return boundaryTimerEventId.equals(that.boundaryTimerEventId);
}
public String getBoundaryTimerEventId() {
return boundaryTimerEventId;
}
}
| 603 |
333 | /*
* Copyright 2018 NXP.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* Neither the name of the NXP Semiconductor nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "gitversion.h"
#include "libuuu.h"
#include <string>
using namespace std;
static constexpr auto g_version = GIT_VERSION;
const char *uuu_get_version_string()
{
return g_version;
}
int uuu_get_version()
{
string version_str{g_version};
// Find first dot because major version number must be before it
auto pos = version_str.find(".");
// Find the position of the character right before the start of the number
auto vs = version_str.find_last_not_of("0123456789", pos - 1);
// Let "vs" point exactly to the first character of the major version number
++vs;
string temp_num_str = version_str.substr(vs, pos - vs);
const auto maj = static_cast<int>(stoll(temp_num_str, nullptr, 10));
version_str = version_str.substr(pos + 1);
pos = version_str.find(".");
temp_num_str = version_str.substr(0, pos);
const auto min = static_cast<int>(stoll(temp_num_str, nullptr, 10));
version_str = version_str.substr(pos + 1);
temp_num_str = version_str.substr(0, pos = version_str.find("-"));
const auto build = static_cast<int>(stoll(temp_num_str, nullptr, 10));
return (maj << 24) | (min << 12) | build;
}
| 814 |
882 | <filename>py2/jenkins_h2o_port_allocate.py<gh_stars>100-1000
#!/usr/bin/python
# "Avoid locker or centralized resource by hard-wiring the port mapping within range"
# "implied by max # of ports used per job, max # of executors per machine, and # of machines."
# "Map of source determines port. in/out using env variables"
print "\njenkins_h2o_port_allocate...."
import socket, os, subprocess
USED_HOSTNAMES = [
'mr-0xb1',
'mr-0xb4',
'mr-0x2',
'mr-0x3',
'mr-0x4',
'mr-0x5',
'mr-0x6',
'mr-0x7',
'mr-0x8',
'mr-0x9',
'mr-0x10',
'mr-0xd4',
'mr-0xd5',
'mr-0xd6',
'mr-0xd7',
'mr-0xd8',
'mr-0xd9',
'mr-0xd10',
'Kevin-Ubuntu3',
]
# maximum number of ports a job uses 10 = 5 jvms * 2 ports per h2o jvm (current max known)
PORTS_PER_SLOT = 10
DEFAULT_BASE_PORT = 54340
EXECUTOR_NUM = 8
def jenkins_h2o_port_allocate():
"""
input: jenkins environment variable EXECUTOR_NUMBER
output: creates ./BASE_PORT.sh, that you should 'source ./PORT.sh'
(can't see the env. variables directly from python?)
which will create os environment variables H2O_PORT and H2O_PORT_OFFSET (legacy)
internal state for this script that can be updated:
USED_HOSTNAMES (list of machine names),
PORTS_PER_SLOT (max per any job),
DEFAULT_BASE_PORT
If you modify any of the internal state, you may introduce contention between
new jenkins jobs and running jenkins jobs. (might not!)
You should stop/start all jobs (or ignore failures) if you modify internal state here.
Hence, no parameters to avoid living dangerously!
"""
if os.environ.has_key("EXECUTOR_NUMBER"):
# this will fail if it's not an integer
executor = int(os.environ["EXECUTOR_NUMBER"])
else:
executor = 1 # jenkins starts with 1
print "jenkins EXECUTOR_NUMBER:", executor
if executor<0 or executor>=EXECUTOR_NUM:
raise Exception("executor: %s wrong? Expecting 1-8 jenkins executors on a machine (0-7 exp.)" % executor)
h2oPort = DEFAULT_BASE_PORT
h2oPortOffset = 0
hostname = socket.gethostname()
if hostname not in USED_HOSTNAMES:
print "WARNING: this hostname: %s isn't in my list. You should add it?" % hostname
print "Will use default base port"
else:
hostnameIndex = USED_HOSTNAMES.index(hostname)
h2oPortOffset = PORTS_PER_SLOT * (executor + hostnameIndex)
h2oPort += h2oPortOffset
print "Possible h2o base_port range is %s to %s" % \
(DEFAULT_BASE_PORT, DEFAULT_BASE_PORT + (PORTS_PER_SLOT * EXECUTOR_NUM * len(USED_HOSTNAMES)) - 2)
print "Possible h2o ports used ranged is %s to %s" % \
(DEFAULT_BASE_PORT, DEFAULT_BASE_PORT + (PORTS_PER_SLOT * EXECUTOR_NUM * len(USED_HOSTNAMES)) - 1)
print "want to 'export H2O_PORT=%s'" % h2oPort
print "want to 'export H2O_PORT_OFFSET=%s # legacy'" % h2oPortOffset
f = open('H2O_BASE_PORT.sh','w')
f.write('export H2O_PORT=%s\n' % h2oPort)
f.write('export H2O_PORT_OFFSET=%s # legacy\n' % h2oPortOffset)
f.close()
print "\nNow please:\nsource ./H2O_BASE_PORT.sh"
if __name__ == "__main__":
jenkins_h2o_port_allocate()
"""
This auto-magics the manual allocation I did when parallelized the current 8-way jenkins jobs,
2 per machine, on the jenkins mr-0xd4 that dispatches to mr-0xd5 thru mr-0xd9
The rationale for a global allocate requires understanding what machines a jenkins master/slave can be on,
and what machines they send h2o jars to.
at 0xdata:
A jenkins master is a member of a group of machines. Jenkins can send the python or other test to another slave machine, and then the test can dispatch h2o either locally, or to other machines in the group.
it can target h2o.jar's anywhere in that group, or dispatch a job to a slave in that group that might do the same.
We currently have two such groups, with one jenkins master in each group (mr-0xb4 and mr-0xd4)
(update: let's just say it's all one big group. Not worth optimizing for subgroup knowlege)
So using
(hostname offset in the list of total hostnames) * (EXECUTOR_NUMBER-1 * PORTS_PER_SLOT)
Will give a unique offset from the default 54340 base, for the job, regardless of which jenkins (master or slave) starts it in the group and where the h2o targest are (which is controlled by the config.json used in the job)
all cloud builds done in a job (one or more) use the same offset.
Dispatching tests from your laptop..will they collide with jenkins?
If the host machine is not in the list, like a laptop, then the offset is 0. (54340 will be used). I suppose jenkins could shift it's base_port to be at least 10 above 54340, so existing scripts that users have, that use 54340, won't be stepped on by jenkins. 54340 could be the jenkins base port.
EC2:
I suppose if the tests are used in ec2, we only do one h2o jar per machine, (or multijvm) so no conflict if 54340 is used. (or 54340). We typically want fast EC2 results, so don't overload target machines?. I suppose an EC2 machine list could be created in this script if we started overloading EC2 machines also
PORTS_PER_SLOT is 10 right now, since the most a job will do is 5 h2o jvms.
I guess to ease the transition, I could leave the H2O_PORT_OFFSET as the api to build_cloud(), and have another python script look at the current ho2 IP and EXECUTOR_NUMBER env variable from jenkins
Notes:
Right now, assuming the subnet octet range from a group is 160-180 or 181-190 works. 164 is an oddball case (out of the ten range for it's group)
I guess I could just put a list of IPs for the jenkins groups that exist, and find the group your in, and then get a "group index" from that list. That's robust and easily maintainable.
This algorithm keeps the total port range in use = (max # of executors per jenkins master or slave) * PORTS_PER_SLOT * (# of machines in a group)
Using 2 executors per machine is nice. 4 is about the max that works well with h2o. so 4 * 10 * 10 = 400 ports
that would be 54340 thru 54721
NICE POSSIBILITES: If we know that ubuntu or other services need to reserve ports that are in our range, we can put in mappings to other ports for those values, or shift the port range or whatever...i.e. we can adjust the algorithm in one place. If the 54340 base is not good, that's set in h2o.py..currently tests don't modify base_port (except for some cloud tests we don't run in jenkins, that do more than 5 jvms on a single machine)
I suppose the tool could output the exact port to use, rather than an offset to h2o.py's default. Maybe initially will output both, so h2o.py can migrate
i.e. environment variables H2O_PORT_OFFSET and H2O_PORT (= 5321 + H2O_PORT_OFFSET)
UPDATE: To allow for dispatching h2o to any machine in any jenkins group, we can have just one group list that has all possible machines. Makes the used port range twice as big (800) but that's okay. It's like going to a 255.255.0.0 network!
Detail:
Jenkins has global environment variables
This one is useful
EXECUTOR_NUMBER The unique number that identifies the current executor (among executors of the same machine) that's carrying out this build. This is the number you see in the "build executor status", except that the number starts from 0, not 1.
Now each slave machine can have multiple executors, in addition to the master.
So since in a grand scheme, we don't know who's creating h2o.jars on target machines, from which machine, (jenkins master or slave)...
it means we want a global h2o port allocation (assuming that scraping an h2o port from OS allocation is ugly)
I have cases on 164 jenkins that send the python job to jenkins slave 174, which dispatches h2o jars to 175-180, Or dispatch to YARN on hadoop clusters, but we don't care about ports there, we get told the ip/port by the h2odriver.
Since the pool of machines in a group is fixed, we have the EXECUTOR_NUMBER which is the parallelism per machine (jenkins master or slave), and we
Will give a unique offset to 54340
We can call it a "PORT_SLOT" and pass it as a environment variable like the current "export H2O_PORT_OFFSET=40"
that the build_cloud() uses to offset the default base_port. I suppose PORTS_PER_SLOT can be fixed in build_cloud() so it's the same for all jobs (so jobs don't step over each other.
"""
| 2,859 |
716 | <filename>runtime/flang/dbesy03f.c
/*
* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
* See https://llvm.org/LICENSE.txt for license information.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*
*/
/* clang-format off */
/* dbesy03f.c - Implements LIB3F dbesy0 subprogram. */
#include "ent3f.h"
#ifdef _WIN64
#define y0 _y0
#endif
extern double y0(double);
double ENT3F(DBESY0, dbesy0)(double *x) { return y0(*x); }
| 186 |
3,066 | /*
* Licensed to Crate.io GmbH ("Crate") under one or more contributor
* license agreements. See the NOTICE file distributed with this work for
* additional information regarding copyright ownership. Crate licenses
* this file to you under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* However, if you have executed another commercial license agreement
* with Crate these terms will supersede the license and you may use the
* software solely pursuant to the terms of the relevant commercial agreement.
*/
package io.crate.rest.action;
import io.crate.action.sql.BaseResultReceiver;
import io.crate.data.Row;
import io.crate.exceptions.SQLExceptions;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
class RestBulkRowCountReceiver extends BaseResultReceiver {
private final Result[] results;
private final int resultIdx;
private long rowCount;
RestBulkRowCountReceiver(Result[] results, int resultIdx) {
this.results = results;
this.resultIdx = resultIdx;
}
@Override
public void setNextRow(Row row) {
rowCount = ((long) row.get(0));
}
@Override
public void allFinished(boolean interrupted) {
results[resultIdx] = new Result(null, rowCount);
super.allFinished(interrupted);
}
@Override
public void fail(@Nonnull Throwable t) {
results[resultIdx] = new Result(SQLExceptions.messageOf(t), rowCount);
super.fail(t);
}
static class Result {
private String errorMessage;
private long rowCount;
Result(@Nullable String errorMessage, long rowCount) {
this.errorMessage = errorMessage;
this.rowCount = rowCount;
}
@Nullable
String errorMessage() {
return errorMessage;
}
long rowCount() {
return rowCount;
}
}
}
| 804 |
3,269 | <filename>Algo and DSA/LeetCode-Solutions-master/Python/minimum-operations-to-reduce-x-to-zero.py
# Time: O(n)
# Space: O(1)
class Solution(object):
def minOperations(self, nums, x):
"""
:type nums: List[int]
:type x: int
:rtype: int
"""
target = sum(nums)-x
result = -1
curr = left = 0
for right in xrange(len(nums)):
curr += nums[right]
while left < len(nums) and curr > target:
curr -= nums[left]
left += 1
if curr == target:
result = max(result, right-left+1)
return len(nums)-result if result != -1 else -1
| 359 |
1,909 | <filename>spring-batch-core/src/main/java/org/springframework/batch/core/repository/dao/JobExecutionDao.java
/*
* Copyright 2006-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.batch.core.repository.dao;
import java.util.List;
import java.util.Set;
import org.springframework.batch.core.JobExecution;
import org.springframework.batch.core.JobInstance;
import org.springframework.lang.Nullable;
/**
* Data Access Object for job executions.
*
* @author <NAME>
* @author <NAME>
* @author <NAME>
*/
public interface JobExecutionDao {
/**
* Save a new JobExecution.
*
* Preconditions: jobInstance the jobExecution belongs to must have a
* jobInstanceId.
*
* @param jobExecution {@link JobExecution} instance to be saved.
*/
void saveJobExecution(JobExecution jobExecution);
/**
* Update and existing JobExecution.
*
* Preconditions: jobExecution must have an Id (which can be obtained by the
* save method) and a jobInstanceId.
*
* @param jobExecution {@link JobExecution} instance to be updated.
*/
void updateJobExecution(JobExecution jobExecution);
/**
* Return all {@link JobExecution}s for given {@link JobInstance}, sorted
* backwards by creation order (so the first element is the most recent).
*
* @param jobInstance parent {@link JobInstance} of the {@link JobExecution}s to find.
* @return {@link List} containing JobExecutions for the jobInstance.
*/
List<JobExecution> findJobExecutions(JobInstance jobInstance);
/**
* Find the last {@link JobExecution} to have been created for a given
* {@link JobInstance}.
* @param jobInstance the {@link JobInstance}
* @return the last {@link JobExecution} to execute for this instance or
* {@code null} if no job execution is found for the given job instance.
*/
@Nullable
JobExecution getLastJobExecution(JobInstance jobInstance);
/**
* @param jobName {@link String} containing the name of the job.
* @return all {@link JobExecution} that are still running (or indeterminate
* state), i.e. having null end date, for the specified job name.
*/
Set<JobExecution> findRunningJobExecutions(String jobName);
/**
* @param executionId {@link Long} containing the id of the execution.
* @return the {@link JobExecution} for given identifier.
*/
@Nullable
JobExecution getJobExecution(Long executionId);
/**
* Because it may be possible that the status of a JobExecution is updated
* while running, the following method will synchronize only the status and
* version fields.
*
* @param jobExecution to be updated.
*/
void synchronizeStatus(JobExecution jobExecution);
}
| 946 |
988 | <gh_stars>100-1000
//------------------------------------------------------------------------------
// GB_ek_slice.h: slice the entries and vectors of a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, <NAME>, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#ifndef GB_EK_SLICE_H
#define GB_EK_SLICE_H
#include "GB.h"
//------------------------------------------------------------------------------
// GB_ek_slice_ntasks: determine # of threads and tasks to use for GB_ek_slice
//------------------------------------------------------------------------------
static inline void GB_ek_slice_ntasks
(
// output
int *nthreads, // # of threads to use for GB_ek_slice
int *ntasks, // # of tasks to create for GB_ek_slice
// input
GrB_Matrix A, // matrix to slice
int ntasks_per_thread, // # of tasks per thread
double work, // total work to do
double chunk, // give each thread at least this much work
int nthreads_max // max # of threads to use
)
{
int64_t anz = GB_nnz_held (A) ;
if (anz == 0)
{
(*nthreads) = 1 ;
(*ntasks) = 1 ;
}
else
{
(*nthreads) = GB_nthreads (work, chunk, nthreads_max) ;
(*ntasks) = (*nthreads == 1) ? 1 : ((ntasks_per_thread) * (*nthreads)) ;
(*ntasks) = GB_IMIN (*ntasks, anz) ;
(*ntasks) = GB_IMAX (*ntasks, 1) ;
}
}
//------------------------------------------------------------------------------
// GB_SLICE_MATRIX: slice a single matrix using GB_ek_slice
//------------------------------------------------------------------------------
#define GB_SLICE_MATRIX_WORK(X,NTASKS_PER_THREAD,chunk,work) \
GB_ek_slice_ntasks (&(X ## _nthreads), &(X ## _ntasks), X, \
NTASKS_PER_THREAD, work, chunk, nthreads_max) ; \
GB_WERK_PUSH (X ## _ek_slicing, 3*(X ## _ntasks)+1, int64_t) ; \
if (X ## _ek_slicing == NULL) \
{ \
/* out of memory */ \
GB_FREE_ALL ; \
return (GrB_OUT_OF_MEMORY) ; \
} \
GB_ek_slice (X ## _ek_slicing, X, X ## _ntasks) ; \
const int64_t *kfirst_ ## X ## slice = X ## _ek_slicing ; \
const int64_t *klast_ ## X ## slice = X ## _ek_slicing + X ## _ntasks ; \
const int64_t *pstart_ ## X ## slice = X ## _ek_slicing + X ## _ntasks*2 ;
#define GB_SLICE_MATRIX(X,NTASKS_PER_THREAD,chunk) \
GB_ek_slice_ntasks (&(X ## _nthreads), &(X ## _ntasks), X, \
NTASKS_PER_THREAD, GB_nnz_held (X) + X->nvec, chunk, nthreads_max) ; \
GB_WERK_PUSH (X ## _ek_slicing, 3*(X ## _ntasks)+1, int64_t) ; \
if (X ## _ek_slicing == NULL) \
{ \
/* out of memory */ \
GB_FREE_ALL ; \
return (GrB_OUT_OF_MEMORY) ; \
} \
GB_ek_slice (X ## _ek_slicing, X, X ## _ntasks) ; \
const int64_t *kfirst_ ## X ## slice = X ## _ek_slicing ; \
const int64_t *klast_ ## X ## slice = X ## _ek_slicing + X ## _ntasks ; \
const int64_t *pstart_ ## X ## slice = X ## _ek_slicing + X ## _ntasks*2 ;
//------------------------------------------------------------------------------
// GB_ek_slice prototypes
//------------------------------------------------------------------------------
// Slice the entries of a matrix or vector into ntasks slices.
// Task t does entries pstart_slice [t] to pstart_slice [t+1]-1 and
// vectors kfirst_slice [t] to klast_slice [t]. The first and last vectors
// may be shared with prior slices and subsequent slices.
// On input, ntasks must be <= nnz (A), unless nnz (A) is zero. In that
// case, ntasks must be 1.
void GB_ek_slice // slice a matrix
(
// output:
int64_t *restrict A_ek_slicing, // size 3*ntasks+1
// input:
GrB_Matrix A, // matrix to slice
int ntasks // # of tasks
) ;
void GB_ek_slice_merge1 // merge column counts for the matrix C
(
// input/output:
int64_t *restrict Cp, // column counts
// input:
const int64_t *restrict Wfirst, // size ntasks
const int64_t *restrict Wlast, // size ntasks
const int64_t *ek_slicing, // size 3*ntasks+1
const int ntasks // # of tasks
) ;
void GB_ek_slice_merge2 // merge final results for matrix C
(
// output
int64_t *C_nvec_nonempty, // # of non-empty vectors in C
int64_t *restrict Cp_kfirst, // size ntasks
// input/output
int64_t *restrict Cp, // size cnvec+1
// input
const int64_t cnvec,
const int64_t *restrict Wfirst, // size ntasks
const int64_t *restrict Wlast, // size ntasks
const int64_t *ek_slicing, // size 3*ntasks+1
const int ntasks, // # of tasks used to construct C
const int nthreads, // # of threads to use
GB_Context Context
) ;
//------------------------------------------------------------------------------
// GB_get_pA_and_pC: find the part of A(:,k) and C(:,k) for this task
//------------------------------------------------------------------------------
// The tasks were generated by GB_ek_slice.
static inline void GB_get_pA_and_pC
(
// output
int64_t *pA_start,
int64_t *pA_end,
int64_t *pC,
// input
int tid, // task id
int64_t k, // current vector
int64_t kfirst, // first vector for this slice
int64_t klast, // last vector for this slice
const int64_t *restrict pstart_slice, // start of each slice in A
const int64_t *restrict Cp_kfirst, // start of each slice in C
const int64_t *restrict Cp, // vector pointers for C
int64_t cvlen, // C->vlen
const int64_t *restrict Ap, // vector pointers for A
int64_t avlen // A->vlen
)
{
int64_t p0 = GBP (Ap, k, avlen) ;
int64_t p1 = GBP (Ap, k+1, avlen) ;
if (k == kfirst)
{
// First vector for task tid; may only be partially owned.
(*pA_start) = pstart_slice [tid] ;
(*pA_end ) = GB_IMIN (p1, pstart_slice [tid+1]) ;
(*pC) = Cp_kfirst [tid] ;
}
else if (k == klast)
{
// Last vector for task tid; may only be partially owned.
(*pA_start) = p0 ;
(*pA_end ) = pstart_slice [tid+1] ;
(*pC) = GBP (Cp, k, cvlen) ;
}
else
{
// task tid entirely owns this vector A(:,k).
(*pA_start) = p0 ;
(*pA_end ) = p1 ;
(*pC) = GBP (Cp, k, cvlen) ;
}
}
//------------------------------------------------------------------------------
// GB_get_pA: find the part of A(:,k) to be operated on by this task
//------------------------------------------------------------------------------
// The tasks were generated by GB_ek_slice.
static inline void GB_get_pA
(
// output
int64_t *pA_start,
int64_t *pA_end,
// input
int tid, // task id
int64_t k, // current vector
int64_t kfirst, // first vector for this slice
int64_t klast, // last vector for this slice
const int64_t *restrict pstart_slice, // start of each slice in A
const int64_t *restrict Ap, // vector pointers for A
int64_t avlen // A->vlen
)
{
int64_t p0 = GBP (Ap, k, avlen) ;
int64_t p1 = GBP (Ap, k+1, avlen) ;
if (k == kfirst)
{
// First vector for task tid; may only be partially owned.
(*pA_start) = pstart_slice [tid] ;
(*pA_end ) = GB_IMIN (p1, pstart_slice [tid+1]) ;
}
else if (k == klast)
{
// Last vector for task tid; may only be partially owned.
(*pA_start) = p0 ;
(*pA_end ) = pstart_slice [tid+1] ;
}
else
{
// task tid entirely owns this vector A(:,k).
(*pA_start) = p0 ;
(*pA_end ) = p1 ;
}
}
#endif
| 4,438 |
1,350 | <filename>sdk/eventhubs/azure-messaging-eventhubs-track1-perf/src/main/java/com/azure/messaging/eventhubs/perf/SendEventDataTest.java
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.messaging.eventhubs.perf;
import com.microsoft.azure.eventhubs.EventData;
import com.microsoft.azure.eventhubs.EventHubException;
import reactor.core.publisher.Mono;
import java.util.concurrent.CompletableFuture;
/**
* Sends a number of {@link EventData} to Event Hub.
*/
public class SendEventDataTest extends ServiceTest<EventHubsOptions> {
/**
* Creates an instance of performance test.
*
* @param options the options configured for the test.
*/
public SendEventDataTest(EventHubsOptions options) {
super(options);
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> setupAsync() {
if (options.isSync() && client == null) {
client = createEventHubClient();
} else if (!options.isSync() && clientFuture == null) {
clientFuture = createEventHubClientAsync();
}
return super.setupAsync();
}
/**
* {@inheritDoc}
*/
@Override
public void run() {
for (int i = 0; i < events.size(); i++) {
final EventData event = events.get(i);
try {
client.sendSync(event);
} catch (EventHubException e) {
throw new RuntimeException("Unable to send event at index: " + i, e);
}
}
}
/**
* {@inheritDoc}
*/
@Override
public Mono<Void> runAsync() {
return Mono.fromCompletionStage(clientFuture.thenComposeAsync(client -> {
final CompletableFuture<?>[] completableFutures = events.stream()
.map(client::send)
.toArray(CompletableFuture<?>[]::new);
return CompletableFuture.allOf(completableFutures);
}));
}
}
| 836 |
3,631 | <filename>kie-dmn/kie-dmn-backend/src/test/java/org/kie/dmn/backend/marshalling/v1_3/extensions/TrisoExtensionRegister.java
/*
* Copyright 2019 Red Hat, Inc. and/or its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kie.dmn.backend.marshalling.v1_3.extensions;
import javax.xml.namespace.QName;
import com.thoughtworks.xstream.XStream;
import com.thoughtworks.xstream.io.xml.QNameMap;
import org.kie.dmn.api.marshalling.DMNExtensionRegister;
public class TrisoExtensionRegister implements DMNExtensionRegister {
@Override
public void registerExtensionConverters(XStream xStream) {
xStream.processAnnotations(ProjectCharter.class);
}
@Override
public void beforeMarshal(Object o, QNameMap qmap) {
qmap.registerMapping(new QName("http://www.trisotech.com/2015/triso/modeling", "ProjectCharter", "triso"), "ProjectCharter");
qmap.registerMapping(new QName("http://www.trisotech.com/2015/triso/modeling", "projectGoals", "triso"), "projectGoals");
qmap.registerMapping(new QName("http://www.trisotech.com/2015/triso/modeling", "projectChallenges", "triso"), "projectChallenges");
qmap.registerMapping(new QName("http://www.trisotech.com/2015/triso/modeling", "projectStakeholders", "triso"), "projectStakeholders");
}
}
| 618 |
620 | <gh_stars>100-1000
import os
import random
import numpy as np
from scipy.sparse import identity, csr_matrix
from utils import (build_knns, knns2ordered_nbrs, fast_knns2spmat,
build_symmetric_adj, clusters2labels, Timer)
def chinese_whispers(feats, prefix, name, knn_method, knn, th_sim, iters,
**kwargs):
""" Chinese Whispers Clustering Algorithm
Paper: Chinese whispers: an efficient graph clustering algorithm
and its application to natural language processing problems.
Reference code:
- http://alexloveless.co.uk/data/chinese-whispers-graph-clustering-in-python/
- https://github.com/zhly0/facenet-face-cluster-chinese-whispers-
"""
import networkx as nx
assert len(feats) > 1
with Timer('create graph'):
knn_prefix = os.path.join(prefix, 'knns', name)
knns = build_knns(knn_prefix, feats, knn_method, knn)
spmat = fast_knns2spmat(knns, knn, th_sim, use_sim=True)
size = len(feats)
nodes = [(n_i, {'cluster': n_i}) for n_i in range(size)]
c = spmat.tocoo()
edges = [(n_i, n_j, {
'weight': s
}) for n_i, n_j, s in zip(c.row, c.col, c.data)]
G = nx.Graph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
node_num = G.number_of_nodes()
edge_num = G.number_of_edges()
assert size == node_num
print('#nodes: {}, #edges: {}'.format(node_num, edge_num))
with Timer('whisper iteratively (iters={})'.format(iters)):
cluster_nodes = list(G.nodes())
for _ in range(iters):
idxs = [i for i in range(node_num)]
random.shuffle(idxs)
for idx in idxs:
node = cluster_nodes[idx]
nbrs = G[node]
if len(nbrs) == 0:
continue
cluster2weight = {}
for nbr in nbrs:
assigned_cluster = G.nodes[nbr]['cluster']
edge_weight = G[node][nbr]['weight']
if assigned_cluster not in cluster2weight:
cluster2weight[assigned_cluster] = 0
cluster2weight[assigned_cluster] += edge_weight
# set the class of node to its neighbor with largest weight
cluster2weight = sorted(cluster2weight.items(),
key=lambda kv: kv[1],
reverse=True)
G.nodes[node]['cluster'] = cluster2weight[0][0]
clusters = {}
for (node, data) in G.nodes.items():
assigned_cluster = data['cluster']
if assigned_cluster not in clusters:
clusters[assigned_cluster] = []
clusters[assigned_cluster].append(node)
print('#cluster: {}'.format(len(clusters)))
labels = clusters2labels(clusters.values())
labels = list(labels.values())
return labels
def _matrix2array(m):
return np.asarray(m).reshape(-1)
def _maxrow(D, n):
row = np.arange(n)
col = _matrix2array(D.argmax(axis=1))
data = _matrix2array(D[row, col])
D = csr_matrix((data, (row, col)), shape=(n, n))
return D
def chinese_whispers_fast(feats, prefix, name, knn_method, knn, th_sim, iters,
**kwargs):
""" Chinese Whispers Clustering Algorithm
Paper: Chinese whispers: an efficient graph clustering algorithm
and its application to natural language processing problems.
This implementation follows the matrix operation as described in Figure.4
int the paper. We switch the `maxrow` and `D^{t-1} * A_G` to make it
easier for post-processing.
The current result is inferior to `chinese_whispers` as it lacks of the
random mechanism as the iterative algorithm. The paper introduce two
operations to tackle this issue, namely `random mutation` and `keep class`.
However, it is not very clear how to set this two hyper-parameters.
"""
assert len(feats) > 1
with Timer('create graph'):
knn_prefix = os.path.join(prefix, 'knns', name)
knns = build_knns(knn_prefix, feats, knn_method, knn)
spmat = fast_knns2spmat(knns, knn, th_sim, use_sim=True)
A = build_symmetric_adj(spmat, self_loop=False)
node_num = len(feats)
edge_num = A.nnz
print('#nodes: {}, #edges: {}'.format(node_num, edge_num))
with Timer('whisper iteratively (iters={})'.format(iters)):
D = identity(node_num)
for _ in range(iters):
D = D * A # it is equal to D.dot(A)
D = _maxrow(D, node_num)
assert D.nnz == node_num
clusters = {}
assigned_clusters = D.tocoo().col
for (node, assigned_cluster) in enumerate(assigned_clusters):
if assigned_cluster not in clusters:
clusters[assigned_cluster] = []
clusters[assigned_cluster].append(node)
print('#cluster: {}'.format(len(clusters)))
labels = clusters2labels(clusters.values())
labels = list(labels.values())
return labels
| 2,348 |
347 | package org.ovirt.engine.api.restapi.test.mappers.impl;
import org.ovirt.engine.api.restapi.test.mappers.api.IFoo;
public class FooImpl implements IFoo {
private String s;
private String other;
public FooImpl() {
}
public FooImpl(String s) {
this.s = s;
}
public FooImpl(String s, String other) {
this(s);
this.other = other;
}
@Override
public String get() {
return s;
}
@Override
public void set(String s) {
this.s = s;
}
public String other() {
return other;
}
}
| 258 |
335 | {
"word": "Exude",
"definitions": [
"(with reference to moisture or a smell) discharge or be discharged slowly and steadily.",
"(of a person) display (an emotion or quality) strongly and openly.",
"(of a place) have a strong atmosphere of."
],
"parts-of-speech": "Verb"
} | 111 |
621 | package net.neevek.android.lib.paginize;
import android.content.Intent;
import android.content.res.Configuration;
import android.content.res.Resources;
import android.os.Bundle;
import android.view.KeyEvent;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewGroup;
import net.neevek.android.lib.paginize.annotation.InsertPageLayout;
import net.neevek.android.lib.paginize.annotation.PageLayout;
import net.neevek.android.lib.paginize.exception.InjectFailedException;
import net.neevek.android.lib.paginize.util.AnnotationUtils;
import net.neevek.android.lib.paginize.util.ViewFinder;
import java.util.ArrayList;
import java.util.List;
/**
* Copyright (c) 2015 neevek <<EMAIL>>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* This class encapsulates a {@link android.view.View}, and declares a set of
* lifecycle methods.
*
* @see net.neevek.android.lib.paginize.Page
* @see net.neevek.android.lib.paginize.InnerPage
*/
public abstract class ViewWrapper {
/**
* This field will be made private in the future to make the API consistent
*
* @deprecated use {@link #getContext()} instead.
*/
protected PageActivity mContext;
private View mView;
View mViewCurrentFocus;
public ViewWrapper(PageActivity pageActivity) {
mContext = pageActivity;
init();
}
private void init() {
Class clazz = getClass();
try {
List<Class> list = new ArrayList<Class>(4);
do {
list.add(clazz);
if (mView == null && clazz.isAnnotationPresent(PageLayout.class)) {
mView = mContext.getLayoutInflater().inflate(((PageLayout) clazz.getAnnotation(PageLayout.class)).value(), null);
}
} while ((clazz = clazz.getSuperclass()) != ViewWrapper.class);
if (mView == null) {
throw new IllegalArgumentException("Must specify a layout resource with the @PageLayout annotation on " + clazz.getName());
}
if (list.size() > 1) {
// -2 because a Page with @PageLayout should not have @InsertPageLayout, which will be silently ignored.
for (int i = list.size() - 2; i >= 0; --i) {
clazz = list.get(i);
if (clazz.isAnnotationPresent(InsertPageLayout.class)) {
InsertPageLayout insertPageLayoutAnno = (InsertPageLayout) clazz.getAnnotation(InsertPageLayout.class);
if (insertPageLayoutAnno.parent() != -1) {
ViewGroup root = (ViewGroup) mView.findViewById(insertPageLayoutAnno.parent());
if (root == null) {
throw new IllegalArgumentException("The parent specified in @InsertPageLayout is not found.");
}
mContext.getLayoutInflater().inflate(insertPageLayoutAnno.value(), root, true);
} else {
mContext.getLayoutInflater().inflate(insertPageLayoutAnno.value(), (ViewGroup) mView, true);
}
}
}
}
ViewFinder viewFinder = new ViewFinder() {
public View findViewById(int id) {
return ViewWrapper.this.findViewById(id);
}
};
for (int i = list.size() - 1; i >= 0; --i) {
AnnotationUtils.initAnnotatedFields(list.get(i), this, viewFinder, false);
AnnotationUtils.handleAnnotatedConstructors(list.get(i), this, viewFinder, false);
}
} catch (Exception e) {
e.printStackTrace();
throw new InjectFailedException(e);
}
}
/**
* inject views after the ViewWrapper is constructed
*/
protected View lazyInitializeLayout(int layoutResId) {
final View view = mContext.getLayoutInflater().inflate(layoutResId, null, false);
ViewFinder viewFinder = new ViewFinder() {
public View findViewById(int id) {
return view.findViewById(id);
}
};
try {
StackTraceElement[] traces = Thread.currentThread().getStackTrace();
Class<?> clazz = null;
String className;
String viewWrapperClassName = ViewWrapper.class.getName();
for (StackTraceElement trace : traces) {
className = trace.getClassName();
if (viewWrapperClassName.equals(className)) {
continue;
}
Class<?> cls = Class.forName(className);
if (ViewWrapper.class.isAssignableFrom(cls)) {
clazz = cls;
break;
}
}
if (clazz != null) {
AnnotationUtils.initAnnotatedFields(clazz, this, viewFinder, true);
AnnotationUtils.handleAnnotatedConstructors(clazz, this, viewFinder, true);
} else {
//usually it can not be run here
AnnotationUtils.initAnnotatedFields(getClass(), this, viewFinder, true);
AnnotationUtils.handleAnnotatedConstructors(getClass(), this, viewFinder, true);
}
} catch (Exception e) {
e.printStackTrace();
throw new InjectFailedException(e);
}
return view;
}
public PageActivity getContext() {
return mContext;
}
public View getView() {
return mView;
}
protected View findViewById(int id) {
return mView.findViewById(id);
}
protected String getString(int resId) {
return mContext.getString(resId);
}
protected String getString(int resId, Object... args) {
return mContext.getString(resId, args);
}
protected Resources getResources() {
return mContext.getResources();
}
protected void hideTopPage() {
mContext.hideTopPage();
}
protected boolean isAttached() {
return mView.getParent() != null;
}
public boolean post(Runnable action) {
if (mView != null) {
return mView.post(action);
}
return false;
}
public boolean postDelayed(Runnable action, long delayMillis) {
if (mView != null) {
return mView.postDelayed(action, delayMillis);
}
return false;
}
/**
* onShow is called when the page is pushed on the page stack,
* at this point the Page is still not be visible
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onShow(Object arg) {
}
/**
* onShown is called after the page is pushed on the page stack
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onShown(Object arg) {
}
/**
* onHide is called before the page is popped out of the page stack,
* at this point the Page is still visible
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onHide() {
}
/**
* onHidden is called after the page is popped out of the page stack
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onHidden() {
}
/**
* onCover is called for the current ViewWrapper before a new
* ViewWrapper is pushed on the page stack
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onCover() {
mViewCurrentFocus = getContext().getCurrentFocus();
}
/**
* onCovered is called for the current ViewWrapper when a new
* ViewWrapper is pushed on the page stack
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onCovered() {
}
/**
* onUncovered is called for the previous page before the current page
* is popped out of the page stack
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onUncover(Object arg) {
if (mViewCurrentFocus != null) {
mViewCurrentFocus.requestFocus();
}
}
/**
* onUncovered is called for the previous page when the current page
* is popped out of the page stack
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onUncovered(Object arg) {
}
/**
* onBackPressed mirrors Activity.onBackPressed, only the current
* page(on top of the stack) receives this call
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public boolean onBackPressed() {
return false;
}
/**
* onMenuPressed is called when KeyEvent for onKeyDown() is KEYCODE_MENU, only the current
* page(on top of the stack) receives this call
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public boolean onMenuPressed() {
return false;
}
/**
* onActivityResult mirrors Activity.onActivityResult, only the current
* page(on top of the stack) receives this call
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onActivityResult(int requestCode, int resultCode, Intent data) {
}
/**
* onPause mirrors Activity.onPause, only the current page
* (on top of the stack) receives this call
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onPause() {
}
/**
* onResume mirrors Activity.onResume, only the current page
* (on top of the stack) receives this call
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onResume() {
}
/**
* onResume mirrors Activity.onKeyDown
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public boolean onKeyDown(int keyCode, KeyEvent event) {
return false;
}
/**
* onResume mirrors Activity.onKeyUp
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public boolean onKeyUp(int keyCode, KeyEvent event) {
return false;
}
/**
* onResume mirrors Activity.onTouchEvent
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public boolean onTouchEvent(MotionEvent event) {
return false;
}
/**
* onResume mirrors Activity.onConfigurationChanged
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onConfigurationChanged(Configuration newConfig) {
}
/**
* onResume mirrors Activity.onSaveInstanceState
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onSaveInstanceState(Bundle outState) {
}
/**
* onResume mirrors Activity.onRestoreInstanceState
*
* @see net.neevek.android.lib.paginize.PageManager
*/
public void onRestoreInstanceState(Bundle savedInstanceState) {
}
public boolean shouldSaveInstanceState() {
return true;
}
}
| 4,000 |
521 | <reponame>wk8/elle
#ifndef VERSION_HPP
# define VERSION_HPP
# define BOOST_VERSION 105300
#endif
| 44 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.