patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -311,6 +311,10 @@ bool rai::uint256_union::decode_dec (std::string const & text)
{
stream >> number_l;
*this = number_l;
+ if (!stream.eof ())
+ {
+ result = true;
+ }
}
catch (std::runtime_error &)
{
| 1 |
#include <rai/lib/numbers.hpp>
#include <ed25519-donna/ed25519.h>
#include <blake2/blake2.h>
#include <cryptopp/aes.h>
#include <cryptopp/modes.h>
thread_local CryptoPP::AutoSeededRandomPool rai::random_pool;
namespace
{
char const * base58_reverse ("~012345678~~~~~~~9:;<=>?@~ABCDE~FGHIJKLMNOP~~~~~~QRSTUVWXYZ[~\\]^_`abcdefghi");
uint8_t base58_decode (char value)
{
assert (value >= '0');
assert (value <= '~');
auto result (base58_reverse [value - 0x30] - 0x30);
return result;
}
char const * account_lookup ("13456789abcdefghijkmnopqrstuwxyz");
char const * account_reverse ("~0~1234567~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~89:;<=>?@AB~CDEFGHIJK~LMNO~~~~~");
char account_encode (uint8_t value)
{
assert (value < 32);
auto result (account_lookup [value]);
return result;
}
uint8_t account_decode (char value)
{
assert (value >= '0');
assert (value <= '~');
auto result (account_reverse [value - 0x30] - 0x30);
return result;
}
}
void rai::uint256_union::encode_account (std::string & destination_a) const
{
assert (destination_a.empty ());
destination_a.reserve (64);
uint64_t check (0);
blake2b_state hash;
blake2b_init (&hash, 5);
blake2b_update (&hash, bytes.data (), bytes.size ());
blake2b_final (&hash, reinterpret_cast <uint8_t *> (&check), 5);
rai::uint512_t number_l (number ());
number_l <<= 40;
number_l |= rai::uint512_t (check);
for (auto i (0); i < 60; ++i)
{
auto r (number_l.convert_to <uint8_t> () & 0x1f);
number_l >>= 5;
destination_a.push_back (account_encode (r));
}
destination_a.append ("_brx"); // xrb_
std::reverse (destination_a.begin (), destination_a.end ());
}
std::string rai::uint256_union::to_account_split () const
{
auto result (to_account ());
assert (result.size () == 64);
result.insert (32, "\n");
return result;
}
std::string rai::uint256_union::to_account () const
{
std::string result;
encode_account (result);
return result;
}
bool rai::uint256_union::decode_account_v1 (std::string const & source_a)
{
auto result (source_a.size () != 50);
if (!result)
{
rai::uint512_t number_l;
for (auto i (source_a.begin ()), j (source_a.end ()); !result && i != j; ++i)
{
uint8_t character (*i);
result = character < 0x30 || character >= 0x80;
if (!result)
{
uint8_t byte (base58_decode (character));
result = byte == '~';
if (!result)
{
number_l *= 58;
number_l += byte;
}
}
}
if (!result)
{
*this = number_l.convert_to <rai::uint256_t> ();
uint32_t check ((number_l >> 256).convert_to <uint32_t> ());
result = (number_l >> (256 + 32)) != 13;
if (!result)
{
uint32_t validation;
blake2b_state hash;
blake2b_init (&hash, sizeof (validation));
blake2b_update (&hash, bytes.data (), sizeof (bytes));
blake2b_final (&hash, reinterpret_cast <uint8_t *> (&validation), sizeof (validation));
result = check != validation;
}
}
}
return result;
}
bool rai::uint256_union::decode_account (std::string const & source_a)
{
auto result (source_a.size () != 64);
if (!result)
{
if (source_a [0] == 'x' && source_a [1] == 'r' && source_a [2] == 'b' && (source_a [3] == '_' || source_a [3] == '-'))
{
rai::uint512_t number_l;
for (auto i (source_a.begin () + 4), j (source_a.end ()); !result && i != j; ++i)
{
uint8_t character (*i);
result = character < 0x30 || character >= 0x80;
if (!result)
{
uint8_t byte (account_decode (character));
result = byte == '~';
if (!result)
{
number_l <<= 5;
number_l += byte;
}
}
}
if (!result)
{
*this = (number_l >> 40).convert_to <rai::uint256_t> ();
uint64_t check (number_l.convert_to <uint64_t> ());
check &= 0xffffffffff;
uint64_t validation (0);
blake2b_state hash;
blake2b_init (&hash, 5);
blake2b_update (&hash, bytes.data (), bytes.size ());
blake2b_final (&hash, reinterpret_cast <uint8_t *> (&validation), 5);
result = check != validation;
}
}
else
{
result = true;
}
}
else
{
result = decode_account_v1 (source_a);
}
return result;
}
rai::uint256_union::uint256_union (rai::uint256_t const & number_a)
{
rai::uint256_t number_l (number_a);
for (auto i (bytes.rbegin ()), n (bytes.rend ()); i != n; ++i)
{
*i = ((number_l) & 0xff).convert_to <uint8_t> ();
number_l >>= 8;
}
}
bool rai::uint256_union::operator == (rai::uint256_union const & other_a) const
{
return bytes == other_a.bytes;
}
// Construct a uint256_union = AES_ENC_CTR (cleartext, key, iv)
void rai::uint256_union::encrypt (rai::raw_key const & cleartext, rai::raw_key const & key, uint128_union const & iv)
{
CryptoPP::AES::Encryption alg (key.data.bytes.data (), sizeof (key.data.bytes));
CryptoPP::CTR_Mode_ExternalCipher::Encryption enc (alg, iv.bytes.data ());
enc.ProcessData (bytes.data (), cleartext.data.bytes.data (), sizeof (cleartext.data.bytes));
}
bool rai::uint256_union::is_zero () const
{
return qwords [0] == 0 && qwords [1] == 0 && qwords [2] == 0 && qwords [3] == 0;
}
std::string rai::uint256_union::to_string () const
{
std::string result;
encode_hex (result);
return result;
}
bool rai::uint256_union::operator < (rai::uint256_union const & other_a) const
{
return number () < other_a.number ();
}
rai::uint256_union & rai::uint256_union::operator ^= (rai::uint256_union const & other_a)
{
auto j (other_a.qwords.begin ());
for (auto i (qwords.begin ()), n (qwords.end ()); i != n; ++i, ++j)
{
*i ^= *j;
}
return *this;
}
rai::uint256_union rai::uint256_union::operator ^ (rai::uint256_union const & other_a) const
{
rai::uint256_union result;
auto k (result.qwords.begin ());
for (auto i (qwords.begin ()), j (other_a.qwords.begin ()), n (qwords.end ()); i != n; ++i, ++j, ++k)
{
*k = *i ^ *j;
}
return result;
}
rai::uint256_union::uint256_union (std::string const & hex_a)
{
decode_hex (hex_a);
}
void rai::uint256_union::clear ()
{
qwords.fill (0);
}
rai::uint256_t rai::uint256_union::number () const
{
rai::uint256_t result;
auto shift (0);
for (auto i (bytes.begin ()), n (bytes.end ()); i != n; ++i)
{
result <<= shift;
result |= *i;
shift = 8;
}
return result;
}
void rai::uint256_union::encode_hex (std::string & text) const
{
assert (text.empty ());
std::stringstream stream;
stream << std::hex << std::noshowbase << std::setw (64) << std::setfill ('0');
stream << number ();
text = stream.str ();
}
bool rai::uint256_union::decode_hex (std::string const & text)
{
auto result (false);
if (!text.empty ())
{
if (text.size () <= 64)
{
std::stringstream stream (text);
stream << std::hex << std::noshowbase;
rai::uint256_t number_l;
try
{
stream >> number_l;
*this = number_l;
if (!stream.eof ())
{
result = true;
}
}
catch (std::runtime_error &)
{
result = true;
}
}
else
{
result = true;
}
}
else
{
result = true;
}
return result;
}
void rai::uint256_union::encode_dec (std::string & text) const
{
assert (text.empty ());
std::stringstream stream;
stream << std::dec << std::noshowbase;
stream << number ();
text = stream.str ();
}
bool rai::uint256_union::decode_dec (std::string const & text)
{
auto result (text.size () > 78);
if (!result)
{
std::stringstream stream (text);
stream << std::dec << std::noshowbase;
rai::uint256_t number_l;
try
{
stream >> number_l;
*this = number_l;
}
catch (std::runtime_error &)
{
result = true;
}
}
return result;
}
rai::uint256_union::uint256_union (uint64_t value0)
{
*this = rai::uint256_t (value0);
}
bool rai::uint256_union::operator != (rai::uint256_union const & other_a) const
{
return ! (*this == other_a);
}
bool rai::uint512_union::operator == (rai::uint512_union const & other_a) const
{
return bytes == other_a.bytes;
}
rai::uint512_union::uint512_union (rai::uint512_t const & number_a)
{
rai::uint512_t number_l (number_a);
for (auto i (bytes.rbegin ()), n (bytes.rend ()); i != n; ++i)
{
*i = ((number_l) & 0xff).convert_to <uint8_t> ();
number_l >>= 8;
}
}
void rai::uint512_union::clear ()
{
bytes.fill (0);
}
rai::uint512_t rai::uint512_union::number () const
{
rai::uint512_t result;
auto shift (0);
for (auto i (bytes.begin ()), n (bytes.end ()); i != n; ++i)
{
result <<= shift;
result |= *i;
shift = 8;
}
return result;
}
void rai::uint512_union::encode_hex (std::string & text) const
{
assert (text.empty ());
std::stringstream stream;
stream << std::hex << std::noshowbase << std::setw (128) << std::setfill ('0');
stream << number ();
text = stream.str ();
}
bool rai::uint512_union::decode_hex (std::string const & text)
{
auto result (text.size () > 128);
if (!result)
{
std::stringstream stream (text);
stream << std::hex << std::noshowbase;
rai::uint512_t number_l;
try
{
stream >> number_l;
*this = number_l;
if (!stream.eof ())
{
result = true;
}
}
catch (std::runtime_error &)
{
result = true;
}
}
return result;
}
bool rai::uint512_union::operator != (rai::uint512_union const & other_a) const
{
return ! (*this == other_a);
}
rai::uint512_union & rai::uint512_union::operator ^= (rai::uint512_union const & other_a)
{
uint256s [0] ^= other_a.uint256s [0];
uint256s [1] ^= other_a.uint256s [1];
return *this;
}
std::string rai::uint512_union::to_string () const
{
std::string result;
encode_hex (result);
return result;
}
rai::raw_key::~raw_key ()
{
data.clear ();
}
bool rai::raw_key::operator == (rai::raw_key const & other_a) const
{
return data == other_a.data;
}
bool rai::raw_key::operator != (rai::raw_key const & other_a) const
{
return !(*this == other_a);
}
// This this = AES_DEC_CTR (ciphertext, key, iv)
void rai::raw_key::decrypt (rai::uint256_union const & ciphertext, rai::raw_key const & key_a, uint128_union const & iv)
{
CryptoPP::AES::Encryption alg (key_a.data.bytes.data (), sizeof (key_a.data.bytes));
CryptoPP::CTR_Mode_ExternalCipher::Decryption dec (alg, iv.bytes.data ());
dec.ProcessData (data.bytes.data (), ciphertext.bytes.data (), sizeof (ciphertext.bytes));
}
rai::uint512_union rai::sign_message (rai::raw_key const & private_key, rai::public_key const & public_key, rai::uint256_union const & message)
{
rai::uint512_union result;
ed25519_sign (message.bytes.data (), sizeof (message.bytes), private_key.data.bytes.data (), public_key.bytes.data (), result.bytes.data ());
return result;
}
void rai::deterministic_key (rai::uint256_union const & seed_a, uint32_t index_a, rai::uint256_union & prv_a)
{
blake2b_state hash;
blake2b_init (&hash, prv_a.bytes.size ());
blake2b_update (&hash, seed_a.bytes.data (), seed_a.bytes.size ());
rai::uint256_union index (index_a);
blake2b_update (&hash, reinterpret_cast <uint8_t *> (&index.dwords [7]), sizeof (uint32_t));
blake2b_final (&hash, prv_a.bytes.data (), prv_a.bytes.size ());
}
bool rai::validate_message (rai::public_key const & public_key, rai::uint256_union const & message, rai::uint512_union const & signature)
{
auto result (0 != ed25519_sign_open (message.bytes.data (), sizeof (message.bytes), public_key.bytes.data (), signature.bytes.data ()));
return result;
}
rai::uint128_union::uint128_union (std::string const & string_a)
{
decode_hex (string_a);
}
rai::uint128_union::uint128_union (uint64_t value_a)
{
*this = rai::uint128_t (value_a);
}
rai::uint128_union::uint128_union (rai::uint128_t const & value_a)
{
rai::uint128_t number_l (value_a);
for (auto i (bytes.rbegin ()), n (bytes.rend ()); i != n; ++i)
{
*i = ((number_l) & 0xff).convert_to <uint8_t> ();
number_l >>= 8;
}
}
bool rai::uint128_union::operator == (rai::uint128_union const & other_a) const
{
return qwords [0] == other_a.qwords [0] && qwords [1] == other_a.qwords [1];
}
bool rai::uint128_union::operator != (rai::uint128_union const & other_a) const
{
return !(*this == other_a);
}
bool rai::uint128_union::operator < (rai::uint128_union const & other_a) const
{
return number () < other_a.number ();
}
bool rai::uint128_union::operator > (rai::uint128_union const & other_a) const
{
return number () > other_a.number ();
}
rai::uint128_t rai::uint128_union::number () const
{
rai::uint128_t result;
auto shift (0);
for (auto i (bytes.begin ()), n (bytes.end ()); i != n; ++i)
{
result <<= shift;
result |= *i;
shift = 8;
}
return result;
}
void rai::uint128_union::encode_hex (std::string & text) const
{
assert (text.empty ());
std::stringstream stream;
stream << std::hex << std::noshowbase << std::setw (32) << std::setfill ('0');
stream << number ();
text = stream.str ();
}
bool rai::uint128_union::decode_hex (std::string const & text)
{
auto result (text.size () > 32);
if (!result)
{
std::stringstream stream (text);
stream << std::hex << std::noshowbase;
rai::uint128_t number_l;
try
{
stream >> number_l;
*this = number_l;
if (!stream.eof ())
{
result = true;
}
}
catch (std::runtime_error &)
{
result = true;
}
}
return result;
}
void rai::uint128_union::encode_dec (std::string & text) const
{
assert (text.empty ());
std::stringstream stream;
stream << std::dec << std::noshowbase;
stream << number ();
text = stream.str ();
}
bool rai::uint128_union::decode_dec (std::string const & text)
{
auto result (text.size () > 39);
if (!result)
{
std::stringstream stream (text);
stream << std::dec << std::noshowbase;
rai::uint128_t number_l;
try
{
stream >> number_l;
*this = number_l;
}
catch (std::runtime_error &)
{
result = true;
}
}
return result;
}
void rai::uint128_union::clear ()
{
qwords.fill (0);
}
bool rai::uint128_union::is_zero () const
{
return qwords [0] == 0 && qwords [1] == 0;
}
std::string rai::uint128_union::to_string () const
{
std::string result;
encode_hex (result);
return result;
}
std::string rai::uint128_union::to_string_dec () const
{
std::string result;
encode_dec (result);
return result;
}
| 1 | 12,901 |
This is to prevent parsing things like "1.1" as "1" until we implement it properly.
|
nanocurrency-nano-node
|
cpp
|
@@ -25,7 +25,7 @@ import os
import shutil
from nupic.data.file_record_stream import FileRecordStream
-from nupic.frameworks.opf.experiment_runner import runExperiment
+from nupic.frameworks.opf.experiment_runner import runExperiment, _getCheckpointParentDir
from nupic.support import initLogging
from nupic.support.unittesthelpers.testcasebase import (
unittest, TestCaseBase as HelperTestCaseBase)
| 1 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import os
import shutil
from nupic.data.file_record_stream import FileRecordStream
from nupic.frameworks.opf.experiment_runner import runExperiment
from nupic.support import initLogging
from nupic.support.unittesthelpers.testcasebase import (
unittest, TestCaseBase as HelperTestCaseBase)
_EXPERIMENT_BASE = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "experiments")
class MyTestCaseBase(HelperTestCaseBase):
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
@staticmethod
def getOpfNonTemporalPredictionFilepath(experimentDir, taskLabel):
path = os.path.join(experimentDir,
"inference",
"%s.nontemporal.predictionLog.csv" % taskLabel)
return os.path.abspath(path)
@staticmethod
def getOpfTemporalPredictionFilepath(experimentDir, taskLabel):
path = os.path.join(experimentDir,
"inference",
"%s.temporal.predictionLog.csv" % taskLabel)
return os.path.abspath(path)
def compareOPFPredictionFiles(self, path1, path2, temporal,
maxMismatches=None):
""" Compare temporal or non-temporal predictions for the given experiment
that just finished executing
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiments.
maxMismatches: Maximum number of row mismatches to report before
terminating the comparison; None means: report all
mismatches
Returns: True if equal; False if different
"""
experimentLabel = "%s prediction comparison" % \
("Temporal" if temporal else "Non-Temporal")
print "%s: Performing comparison of OPF prediction CSV files %r and %r" % (
experimentLabel, path1, path2)
# Open CSV readers
#
self.assertTrue(
os.path.isfile(path1),
msg="OPF prediction file path1 %s doesn't exist or is not a file" % (
path1))
(opf1CsvReader, opf1FieldNames) = self._openOpfPredictionCsvFile(path1)
self.assertTrue(
os.path.isfile(path2),
msg="OPF prediction file path2 %s doesn't exist or is not a file" % (
path2))
(opf2CsvReader, opf2FieldNames) = self._openOpfPredictionCsvFile(path2)
self.assertEqual(len(opf1FieldNames), len(opf2FieldNames),
("%s: Mismatch in number of prediction columns: "
"opf1: %s, opf2: %s") % (
experimentLabel, len(opf1FieldNames),
len(opf2FieldNames)))
self.assertEqual(opf1FieldNames, opf2FieldNames)
# Each data row is assumed to be arranged as follows:
#
# reset, actual-field1, prediction-field1, actual-field2,
# prediction-field2, etc.
#
# Presently, we only compare the predicted values that need to match.
opf1EOF = False
opf2EOF = False
opf1CurrentDataRowIndex = -1
opf2CurrentDataRowIndex = -1
if temporal:
# Skip the first data rows for temporal tests, since they don't contain
# prediction values.
_skipOpf1Row = opf1CsvReader.next()
opf1CurrentDataRowIndex += 1
_skipOpf2Row = opf2CsvReader.next()
opf2CurrentDataRowIndex += 1
fieldsIndexesToCompare = tuple(xrange(2, len(opf1FieldNames), 2))
self.assertGreater(len(fieldsIndexesToCompare), 0)
print ("%s: Comparing fields at indexes: %s; "
"opf1Labels: %s; opf2Labels: %s") % (
experimentLabel,
fieldsIndexesToCompare,
[opf1FieldNames[i] for i in fieldsIndexesToCompare],
[opf2FieldNames[i] for i in fieldsIndexesToCompare])
for i in fieldsIndexesToCompare:
self.assertTrue(opf1FieldNames[i].endswith("predicted"),
msg="%r doesn't end with 'predicted'" % opf1FieldNames[i])
self.assertTrue(opf2FieldNames[i].endswith("predicted"),
msg="%r doesn't end with 'predicted'" % opf2FieldNames[i])
mismatchCount = 0
while True:
try:
opf1Row = opf1CsvReader.next()
except StopIteration:
opf1EOF = True
else:
opf1CurrentDataRowIndex += 1
try:
opf2Row = opf2CsvReader.next()
except StopIteration:
opf2EOF = True
else:
opf2CurrentDataRowIndex += 1
if opf1EOF != opf2EOF:
print ("%s: ERROR: Data row counts mismatch: "
"opf1EOF: %s, opf1CurrentDataRowIndex: %s; "
"opf2EOF: %s, opf2CurrentDataRowIndex: %s") % (
experimentLabel,
opf1EOF, opf1CurrentDataRowIndex,
opf2EOF, opf2CurrentDataRowIndex)
return False
if opf1EOF and opf2EOF:
# Done with both prediction datasets
break
# Compare the rows
self.assertEqual(len(opf1Row), len(opf2Row))
for i in fieldsIndexesToCompare:
opf1FloatValue = float(opf1Row[i])
opf2FloatValue = float(opf2Row[i])
if opf1FloatValue != opf2FloatValue:
mismatchCount += 1
print ("%s: ERROR: mismatch in "
"prediction values: dataRowIndex: %s, fieldIndex: %s (%r); "
"opf1FieldValue: <%s>, opf2FieldValue: <%s>; "
"opf1FieldValueAsFloat: %s, opf2FieldValueAsFloat: %s; "
"opf1Row: %s, opf2Row: %s") % (
experimentLabel,
opf1CurrentDataRowIndex,
i,
opf1FieldNames[i],
opf1Row[i],
opf2Row[i],
opf1FloatValue,
opf2FloatValue,
opf1Row,
opf2Row)
# Stop comparison if we exceeded the allowed number of mismatches
if maxMismatches is not None and mismatchCount >= maxMismatches:
break
if mismatchCount != 0:
print "%s: ERROR: there were %s mismatches between %r and %r" % (
experimentLabel, mismatchCount, path1, path2)
return False
# A difference here would indicate a logic error in this method
self.assertEqual(opf1CurrentDataRowIndex, opf2CurrentDataRowIndex)
print ("%s: Comparison of predictions "
"completed: OK; number of prediction rows examined: %s; "
"path1: %r; path2: %r") % \
(experimentLabel,
opf1CurrentDataRowIndex + 1,
path1,
path2)
return True
def _openOpfPredictionCsvFile(self, filepath):
""" Open an OPF prediction CSV file and advance it to the first data row
Returns: the tuple (csvReader, fieldNames), where 'csvReader' is the
csv reader object, and 'fieldNames' is a sequence of field
names.
"""
# Open the OPF prediction file
csvReader = self._openCsvFile(filepath)
# Advance it past the three NUPIC header lines
names = csvReader.next()
_types = csvReader.next()
_specials = csvReader.next()
return (csvReader, names)
@staticmethod
def _openCsvFile(filepath):
# We'll be operating on csvs with arbitrarily long fields
size = 2**27
csv.field_size_limit(size)
rawFileObj = open(filepath, 'r')
csvReader = csv.reader(rawFileObj, dialect='excel')
return csvReader
def _testSamePredictions(self, experiment, predSteps, checkpointAt,
predictionsFilename, additionalFields=None):
""" Test that we get the same predictions out from the following two
scenarios:
a_plus_b: Run the network for 'a' iterations followed by 'b' iterations
a, followed by b: Run the network for 'a' iterations, save it, load it
back in, then run for 'b' iterations.
Parameters:
-----------------------------------------------------------------------
experiment: base directory of the experiment. This directory should
contain the following:
base.py
a_plus_b/description.py
a/description.py
b/description.py
The sub-directory description files should import the
base.py and only change the first and last record used
from the data file.
predSteps: Number of steps ahead predictions are for
checkpointAt: Number of iterations that 'a' runs for.
IMPORTANT: This must match the number of records that
a/description.py runs for - it is NOT dynamically stuffed into
the a/description.py.
predictionsFilename: The name of the predictions file that the OPF
generates for this experiment (for example
'DefaulTask.NontemporalMultiStep.predictionLog.csv')
"""
# Get the 3 sub-experiment directories
aPlusBExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a_plus_b")
aExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a")
bExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "b")
# Run a+b
_aPlusBExp = runExperiment(args=[aPlusBExpDir])
# Run a, the copy the saved checkpoint into the b directory
_aExp = runExperiment(args=[aExpDir])
if os.path.exists(os.path.join(bExpDir, 'savedmodels')):
shutil.rmtree(os.path.join(bExpDir, 'savedmodels'))
shutil.copytree(src=os.path.join(aExpDir, 'savedmodels'),
dst=os.path.join(bExpDir, 'savedmodels'))
_bExp = runExperiment(args=[bExpDir, '--load=DefaultTask'])
# Now, compare the predictions at the end of a+b to those in b.
aPlusBPred = FileRecordStream(os.path.join(aPlusBExpDir, 'inference',
predictionsFilename))
bPred = FileRecordStream(os.path.join(bExpDir, 'inference',
predictionsFilename))
colNames = [x[0] for x in aPlusBPred.getFields()]
actValueColIdx = colNames.index('multiStepPredictions.actual')
predValueColIdx = colNames.index('multiStepPredictions.%d' % (predSteps))
# Skip past the 'a' records in aPlusB
for i in range(checkpointAt):
aPlusBPred.next()
# Now, read through the records that don't have predictions yet
for i in range(predSteps):
aPlusBPred.next()
bPred.next()
# Now, compare predictions in the two files
rowIdx = checkpointAt + predSteps + 4 - 1
epsilon = 0.0001
while True:
rowIdx += 1
try:
rowAPB = aPlusBPred.next()
rowB = bPred.next()
# Compare actuals
self.assertEqual(rowAPB[actValueColIdx], rowB[actValueColIdx],
"Mismatch in actual values: row %d of a+b has %s and row %d of "
"b has %s" % (rowIdx, rowAPB[actValueColIdx], rowIdx-checkpointAt,
rowB[actValueColIdx]))
# Compare predictions, within nearest epsilon
predAPB = eval(rowAPB[predValueColIdx])
predB = eval(rowB[predValueColIdx])
# Sort with highest probabilities first
predAPB = [(a, b) for b, a in predAPB.items()]
predB = [(a, b) for b, a in predB.items()]
predAPB.sort(reverse=True)
predB.sort(reverse=True)
if additionalFields is not None:
for additionalField in additionalFields:
fieldIdx = colNames.index(additionalField)
self.assertEqual(rowAPB[fieldIdx], rowB[fieldIdx],
"Mismatch in field \'%s\' values: row %d of a+b has value: (%s)\n"
" and row %d of b has value: %s" % \
(additionalField, rowIdx, rowAPB[fieldIdx],
rowIdx-checkpointAt, rowB[fieldIdx]))
self.assertEqual(len(predAPB), len(predB),
"Mismatch in predicted values: row %d of a+b has %d predictions: "
"\n (%s) and row %d of b has %d predictions:\n (%s)" % \
(rowIdx, len(predAPB), predAPB, rowIdx-checkpointAt, len(predB),
predB))
for i in range(len(predAPB)):
(aProb, aValue) = predAPB[i]
(bProb, bValue) = predB[i]
self.assertLess(abs(aValue-bValue), epsilon,
"Mismatch in predicted values: row %d of a+b predicts value %s "
"and row %d of b predicts %s" % (rowIdx, aValue,
rowIdx-checkpointAt, bValue))
self.assertLess(abs(aProb-bProb), epsilon,
"Mismatch in probabilities: row %d of a+b predicts %s with "
"probability %s and row %d of b predicts %s with probability %s" \
% (rowIdx, aValue, aProb, rowIdx-checkpointAt, bValue, bProb))
except StopIteration:
break
print "Predictions match!"
@staticmethod
def _testBackwardsCompatibility(experiment, checkpointName):
""" Test that we can load in a checkpoint saved by an earlier version of
the OPF.
Parameters:
-----------------------------------------------------------------------
experiment: Directory of the experiment.
checkpointName: which checkpoint to verify
"""
# Get the experiment directories
expDir = os.path.join(_EXPERIMENT_BASE, experiment)
# Copy the pertinent checkpoint
if os.path.exists(os.path.join(expDir, 'savedmodels')):
shutil.rmtree(os.path.join(expDir, 'savedmodels'))
shutil.copytree(src=os.path.join(expDir, checkpointName),
dst=os.path.join(expDir, 'savedmodels'))
# Run it from the checkpoint
_aPlusBExp = runExperiment(args=[expDir, '--load=DefaultTask',
'--noCheckpoint'])
class PositiveTests(MyTestCaseBase):
def test_NonTemporalMultiStep(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(
experiment="non_temporal_multi_step", predSteps=24, checkpointAt=250,
predictionsFilename=
"DefaultTask.NontemporalMultiStep.predictionLog.csv")
@unittest.skip("Currently Fails: NUP-1864")
def test_TemporalMultiStep(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(experiment="temporal_multi_step", predSteps=24,
checkpointAt=250,
predictionsFilename='DefaultTask.TemporalMultiStep.predictionLog.csv')
@unittest.skip("Currently Fails: NUP-1864")
def test_TemporalAnomaly(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(experiment="temporal_anomaly", predSteps=1,
checkpointAt=250,
predictionsFilename='DefaultTask.TemporalAnomaly.predictionLog.csv',
additionalFields=['anomalyScore'])
def test_BackwardsCompatibility(self):
""" Test that we can load in a checkpoint saved by an earlier version of
the OPF.
"""
self._testBackwardsCompatibility(
os.path.join('backwards_compatibility', 'a'),
'savedmodels_2012-10-05')
if __name__ == "__main__":
initLogging(verbose=True)
unittest.main()
| 1 | 20,154 |
Can we change `_getCheckpointParentDir` to be a public function explicitly? It seems like something that clients of experiment runner may wish to use.
|
numenta-nupic
|
py
|
@@ -2050,6 +2050,13 @@ class AdvancedPanelControls(wx.Panel):
self.UIAInMSWordCheckBox.SetValue(config.conf["UIA"]["useInMSWordWhenAvailable"])
self.UIAInMSWordCheckBox.defaultValue = self._getDefaultValue(["UIA", "useInMSWordWhenAvailable"])
+ # Translators: This is the label for a checkbox in the
+ # Advanced settings panel.
+ label = _("Use UI Automation to access the Windows Console (restart NVDA for changes to take effect)")
+ self.ConsoleUIACheckBox=UIAGroup.addItem(wx.CheckBox(self, label=label))
+ self.ConsoleUIACheckBox.SetValue(config.conf["UIA"]["consoleUIA"])
+ self.ConsoleUIACheckBox.defaultValue = self._getDefaultValue(["UIA", "consoleUIA"])
+
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Browse mode")
| 1 |
# -*- coding: UTF-8 -*-
#settingsDialogs.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2019 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Rui Batista, Joseph Lee, Heiko Folkerts, Zahari Yurukov, Leonard de Ruijter, Derek Riemer, Babbage B.V., Davy Kager, Ethan Holliger
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
from abc import abstractmethod
from six import with_metaclass
import os
import copy
import re
import wx
from wx.lib import scrolledpanel
from wx.lib.expando import ExpandoTextCtrl
import wx.lib.newevent
import winUser
import logHandler
import installer
from synthDriverHandler import *
import config
import languageHandler
import speech
import gui
import globalVars
from logHandler import log
import nvwave
import audioDucking
import speechDictHandler
import queueHandler
import braille
import brailleTables
import brailleInput
import core
import keyboardHandler
import characterProcessing
from . import guiHelper
try:
import updateCheck
except RuntimeError:
updateCheck = None
import inputCore
from . import nvdaControls
from driverHandler import *
import touchHandler
import winVersion
import weakref
import time
import keyLabels
from dpiScalingHelper import DpiScalingHelperMixin
class SettingsDialog(with_metaclass(guiHelper.SIPABCMeta, wx.Dialog, DpiScalingHelperMixin)):
"""A settings dialog.
A settings dialog consists of one or more settings controls and OK and Cancel buttons and an optional Apply button.
Action may be taken in response to the OK, Cancel or Apply buttons.
To use this dialog:
* Set L{title} to the title of the dialog.
* Override L{makeSettings} to populate a given sizer with the settings controls.
* Optionally, override L{postInit} to perform actions after the dialog is created, such as setting the focus. Be
aware that L{postInit} is also called by L{onApply}.
* Optionally, extend one or more of L{onOk}, L{onCancel} or L{onApply} to perform actions in response to the
OK, Cancel or Apply buttons, respectively.
@ivar title: The title of the dialog.
@type title: str
"""
class MultiInstanceError(RuntimeError): pass
_DIALOG_CREATED_STATE = 0
_DIALOG_DESTROYED_STATE = 1
# holds instances of SettingsDialogs as keys, and state as the value
_instances=weakref.WeakKeyDictionary()
title = ""
shouldSuspendConfigProfileTriggers = True
def __new__(cls, *args, **kwargs):
instanceItems = SettingsDialog._instances.items()
instancesOfSameClass = (
(dlg, state) for dlg, state in instanceItems if isinstance(dlg, cls)
)
firstMatchingInstance, state = next(instancesOfSameClass, (None, None))
multiInstanceAllowed = kwargs.get('multiInstanceAllowed', False)
if log.isEnabledFor(log.DEBUG):
instancesState = dict(SettingsDialog._instances)
log.debug(
"Creating new settings dialog (multiInstanceAllowed:{}). "
"State of _instances {!r}".format(multiInstanceAllowed, instancesState)
)
if state is cls._DIALOG_CREATED_STATE and not multiInstanceAllowed:
raise SettingsDialog.MultiInstanceError("Only one instance of SettingsDialog can exist at a time")
if state is cls._DIALOG_DESTROYED_STATE and not multiInstanceAllowed:
# the dialog has been destroyed by wx, but the instance is still available. This indicates there is something
# keeping it alive.
log.error("Opening new settings dialog while instance still exists: {!r}".format(firstMatchingInstance))
obj = super(SettingsDialog, cls).__new__(cls, *args, **kwargs)
SettingsDialog._instances[obj] = cls._DIALOG_CREATED_STATE
return obj
def _setInstanceDestroyedState(self):
if log.isEnabledFor(log.DEBUG):
instancesState = dict(SettingsDialog._instances)
log.debug(
"Setting state to destroyed for instance: {!r}\n"
"Current _instances {!r}".format(self, instancesState)
)
if self in SettingsDialog._instances:
SettingsDialog._instances[self] = self._DIALOG_DESTROYED_STATE
def __init__(
self, parent,
resizeable=False,
hasApplyButton=False,
settingsSizerOrientation=wx.VERTICAL,
multiInstanceAllowed=False
):
"""
@param parent: The parent for this dialog; C{None} for no parent.
@type parent: wx.Window
@param resizeable: True if the settings dialog should be resizable by the user, only set this if
you have tested that the components resize correctly.
@type resizeable: bool
@param hasApplyButton: C{True} to add an apply button to the dialog; defaults to C{False} for backwards compatibility.
@type hasApplyButton: bool
@param settingsSizerOrientation: Either wx.VERTICAL or wx.HORIZONTAL. This controls the orientation of the
sizer that is passed into L{makeSettings}. The default is wx.VERTICAL.
@type settingsSizerOrientation: wx.Orientation
@param multiInstanceAllowed: Whether multiple instances of SettingsDialog may exist.
Note that still only one instance of a particular SettingsDialog subclass may exist at one time.
@type multiInstanceAllowed: bool
"""
if gui._isDebug():
startTime = time.time()
windowStyle = wx.DEFAULT_DIALOG_STYLE | (wx.RESIZE_BORDER if resizeable else 0)
wx.Dialog.__init__(self, parent, title=self.title, style=windowStyle)
DpiScalingHelperMixin.__init__(self, self.GetHandle())
self.hasApply = hasApplyButton
self.mainSizer=wx.BoxSizer(wx.VERTICAL)
self.settingsSizer=wx.BoxSizer(settingsSizerOrientation)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL | wx.EXPAND, proportion=1)
self.mainSizer.Add(wx.StaticLine(self), flag=wx.EXPAND)
buttonSizer = guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The Ok button on a NVDA dialog. This button will accept any changes and dismiss the dialog.
buttonSizer.addButton(self, label=_("OK"), id=wx.ID_OK)
# Translators: The cancel button on a NVDA dialog. This button will discard any changes and dismiss the dialog.
buttonSizer.addButton(self, label=_("Cancel"), id=wx.ID_CANCEL)
if hasApplyButton:
# Translators: The Apply button on a NVDA dialog. This button will accept any changes but will not dismiss the dialog.
buttonSizer.addButton(self, label=_("Apply"), id=wx.ID_APPLY)
self.mainSizer.Add(
buttonSizer.sizer,
border=guiHelper.BORDER_FOR_DIALOGS,
flag=wx.ALL | wx.ALIGN_RIGHT
)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
self.Bind(wx.EVT_BUTTON, self.onOk, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
self.Bind(wx.EVT_BUTTON, self.onApply, id=wx.ID_APPLY)
self.Bind(wx.EVT_CHAR_HOOK, self._enterActivatesOk_ctrlSActivatesApply)
# Garbage collection normally handles removing the settings instance, however this may not happen immediately
# after a window is closed, or may be blocked by a circular reference. So instead, remove when the window is
# destroyed.
self.Bind(wx.EVT_WINDOW_DESTROY, self._onWindowDestroy)
self.postInit()
self.CentreOnScreen()
if gui._isDebug():
log.debug("Loading %s took %.2f seconds"%(self.__class__.__name__, time.time() - startTime))
def _enterActivatesOk_ctrlSActivatesApply(self, evt):
"""Listens for keyboard input and triggers ok button on enter and triggers apply button when control + S is
pressed. Cancel behavior is built into wx.
Pressing enter will also close the dialog when a list has focus
(e.g. the list of symbols in the symbol pronunciation dialog).
Without this custom handler, enter would propagate to the list control (wx ticket #3725).
"""
if evt.KeyCode in (wx.WXK_RETURN, wx.WXK_NUMPAD_ENTER):
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_OK))
elif self.hasApply and evt.UnicodeKey == ord(u'S') and evt.controlDown:
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_APPLY))
else:
evt.Skip()
@abstractmethod
def makeSettings(self, sizer):
"""Populate the dialog with settings controls.
Subclasses must override this method.
@param sizer: The sizer to which to add the settings controls.
@type sizer: wx.Sizer
"""
raise NotImplementedError
def postInit(self):
"""Called after the dialog has been created.
For example, this might be used to set focus to the desired control.
Sub-classes may override this method.
"""
def onOk(self, evt):
"""Take action in response to the OK button being pressed.
Sub-classes may extend this method.
This base method should always be called to clean up the dialog.
"""
self.DestroyChildren()
self.Destroy()
self.SetReturnCode(wx.ID_OK)
def onCancel(self, evt):
"""Take action in response to the Cancel button being pressed.
Sub-classes may extend this method.
This base method should always be called to clean up the dialog.
"""
self.DestroyChildren()
self.Destroy()
self.SetReturnCode(wx.ID_CANCEL)
def onApply(self, evt):
"""Take action in response to the Apply button being pressed.
Sub-classes may extend or override this method.
This base method should be called to run the postInit method.
"""
self.postInit()
self.SetReturnCode(wx.ID_APPLY)
def _onWindowDestroy(self, evt):
evt.Skip()
self._setInstanceDestroyedState()
# An event and event binder that will notify the containers that they should
# redo the layout in whatever way makes sense for their particular content.
_RWLayoutNeededEvent, EVT_RW_LAYOUT_NEEDED = wx.lib.newevent.NewCommandEvent()
class SettingsPanel(with_metaclass(guiHelper.SIPABCMeta, wx.Panel, DpiScalingHelperMixin)):
"""A settings panel, to be used in a multi category settings dialog.
A settings panel consists of one or more settings controls.
Action may be taken in response to the parent dialog's OK or Cancel buttons.
To use this panel:
* Set L{title} to the title of the category.
* Override L{makeSettings} to populate a given sizer with the settings controls.
* Optionally, extend L{onPanelActivated} to perform actions after the category has been selected in the list of categories, such as synthesizer or braille display list population.
* Optionally, extend L{onPanelDeactivated} to perform actions after the category has been deselected (i.e. another category is selected) in the list of categories.
* Optionally, extend one or both of L{onSave} or L{onDiscard} to perform actions in response to the parent dialog's OK or Cancel buttons, respectively.
* Optionally, extend one or both of L{isValid} or L{postSave} to perform validation before or steps after saving, respectively.
@ivar title: The title of the settings panel, also listed in the list of settings categories.
@type title: str
"""
title=""
panelDescription=u""
def __init__(self, parent):
"""
@param parent: The parent for this panel; C{None} for no parent.
@type parent: wx.Window
"""
if gui._isDebug():
startTime = time.time()
wx.Panel.__init__(self, parent, wx.ID_ANY)
DpiScalingHelperMixin.__init__(self, self.GetHandle())
self.mainSizer=wx.BoxSizer(wx.VERTICAL)
self.settingsSizer=wx.BoxSizer(wx.VERTICAL)
self.makeSettings(self.settingsSizer)
self.mainSizer.Add(self.settingsSizer, flag=wx.ALL)
self.mainSizer.Fit(self)
self.SetSizer(self.mainSizer)
if gui._isDebug():
log.debug("Loading %s took %.2f seconds"%(self.__class__.__name__, time.time() - startTime))
@abstractmethod
def makeSettings(self, sizer):
"""Populate the panel with settings controls.
Subclasses must override this method.
@param sizer: The sizer to which to add the settings controls.
@type sizer: wx.Sizer
"""
raise NotImplementedError
def onPanelActivated(self):
"""Called after the panel has been activated (i.e. de corresponding category is selected in the list of categories).
For example, this might be used for resource intensive tasks.
Sub-classes should extendthis method.
"""
self.Show()
def onPanelDeactivated(self):
"""Called after the panel has been deactivated (i.e. another category has been selected in the list of categories).
Sub-classes should extendthis method.
"""
self.Hide()
@abstractmethod
def onSave(self):
"""Take action in response to the parent's dialog OK or apply button being pressed.
Sub-classes should override this method.
MultiCategorySettingsDialog is responsible for cleaning up the panel when OK is pressed.
"""
raise NotImplementedError
def isValid(self):
"""Evaluate whether the current circumstances of this panel are valid
and allow saving all the settings in a L{MultiCategorySettingsDialog}.
Sub-classes may extend this method.
@returns: C{True} if validation should continue,
C{False} otherwise.
@rtype: bool
"""
return True
def postSave(self):
"""Take action whenever saving settings for all panels in a L{MultiCategorySettingsDialog} succeeded.
Sub-classes may extend this method.
"""
def onDiscard(self):
"""Take action in response to the parent's dialog Cancel button being pressed.
Sub-classes may override this method.
MultiCategorySettingsDialog is responsible for cleaning up the panel when Cancel is pressed.
"""
def _sendLayoutUpdatedEvent(self):
"""Notify any wx parents that may be listening that they should redo their layout in whatever way
makes sense for them. It is expected that sub-classes call this method in response to changes in
the number of GUI items in their panel.
"""
event = _RWLayoutNeededEvent(self.GetId())
event.SetEventObject(self)
self.GetEventHandler().ProcessEvent(event)
class MultiCategorySettingsDialog(SettingsDialog):
"""A settings dialog with multiple settings categories.
A multi category settings dialog consists of a list view with settings categories on the left side,
and a settings panel on the right side of the dialog.
Furthermore, in addition to Ok and Cancel buttons, it has an Apply button by default,
which is different from the default behavior of L{SettingsDialog}.
To use this dialog: set title and populate L{categoryClasses} with subclasses of SettingsPanel.
Make sure that L{categoryClasses} only contains panels that are available on a particular system.
For example, if a certain category of settings is only supported on Windows 10 and higher,
that category should be left out of L{categoryClasses}
"""
title=""
categoryClasses=[]
class CategoryUnavailableError(RuntimeError): pass
def __init__(self, parent, initialCategory=None):
"""
@param parent: The parent for this dialog; C{None} for no parent.
@type parent: wx.Window
@param initialCategory: The initial category to select when opening this dialog
@type parent: SettingsPanel
"""
if initialCategory and not issubclass(initialCategory,SettingsPanel):
if gui._isDebug():
log.debug("Unable to open category: {}".format(initialCategory), stack_info=True)
raise TypeError("initialCategory should be an instance of SettingsPanel")
if initialCategory and initialCategory not in self.categoryClasses:
if gui._isDebug():
log.debug("Unable to open category: {}".format(initialCategory), stack_info=True)
raise MultiCategorySettingsDialog.CategoryUnavailableError(
"The provided initial category is not a part of this dialog"
)
self.initialCategory = initialCategory
self.currentCategory = None
self.setPostInitFocus = None
# dictionary key is index of category in self.catList, value is the instance. Partially filled, check for KeyError
self.catIdToInstanceMap = {}
super(MultiCategorySettingsDialog, self).__init__(
parent,
resizeable=True,
hasApplyButton=True,
settingsSizerOrientation=wx.HORIZONTAL
)
# setting the size must be done after the parent is constructed.
self.SetMinSize(self.scaleSize(self.MIN_SIZE))
self.SetSize(self.scaleSize(self.INITIAL_SIZE))
# the size has changed, so recenter on the screen
self.CentreOnScreen()
# Initial / min size for the dialog. This size was chosen as a medium fit, so the
# smaller settings panels are not surrounded by too much space but most of
# the panels fit. Vertical scrolling is acceptable. Horizontal scrolling less
# so, the width was chosen to eliminate horizontal scroll bars. If a panel
# exceeds the the initial width a debugWarning will be added to the log.
INITIAL_SIZE = (800, 480)
MIN_SIZE = (470, 240) # Min height required to show the OK, Cancel, Apply buttons
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for the list of categories in a multi category settings dialog.
categoriesLabelText=_("&Categories:")
categoriesLabel = wx.StaticText(self, label=categoriesLabelText)
# since the categories list and the container both expand in height, the y
# portion is essentially a "min" height.
# These sizes are set manually so that the initial proportions within the dialog look correct. If these sizes are
# not given, then I believe the proportion arguments (as given to the gridBagSizer.AddGrowableColumn) are used
# to set their relative sizes. We want the proportion argument to be used for resizing, but not the initial size.
catListDim = (150, 10)
catListDim = self.scaleSize(catListDim)
initialScaledWidth = self.scaleSize(self.INITIAL_SIZE[0])
spaceForBorderWidth = self.scaleSize(20)
catListWidth = catListDim[0]
containerDim = (initialScaledWidth - catListWidth - spaceForBorderWidth, self.scaleSize(10))
self.catListCtrl = nvdaControls.AutoWidthColumnListCtrl(
self,
autoSizeColumn=1,
size=catListDim,
style=wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.LC_NO_HEADER
)
# This list consists of only one column.
# The provided column header is just a placeholder, as it is hidden due to the wx.LC_NO_HEADER style flag.
self.catListCtrl.InsertColumn(0,categoriesLabelText)
self.container = scrolledpanel.ScrolledPanel(
parent = self,
style = wx.TAB_TRAVERSAL | wx.BORDER_THEME,
size=containerDim
)
# Th min size is reset so that they can be reduced to below their "size" constraint.
self.container.SetMinSize((1,1))
self.catListCtrl.SetMinSize((1,1))
self.containerSizer = wx.BoxSizer(wx.VERTICAL)
self.container.SetSizer(self.containerSizer)
for cls in self.categoryClasses:
if not issubclass(cls,SettingsPanel):
raise RuntimeError("Invalid category class %s provided in %s.categoryClasses"%(cls.__name__,self.__class__.__name__))
# It's important here that the listItems are added to catListCtrl in the same order that they exist in categoryClasses.
# the ListItem index / Id is used to index categoryClasses, and used as the key in catIdToInstanceMap
self.catListCtrl.Append((cls.title,))
# populate the GUI with the initial category
initialCatIndex = 0 if not self.initialCategory else self.categoryClasses.index(self.initialCategory)
self._doCategoryChange(initialCatIndex)
self.catListCtrl.Select(initialCatIndex)
# we must focus the initial category in the category list.
self.catListCtrl.Focus(initialCatIndex)
self.setPostInitFocus = self.container.SetFocus if self.initialCategory else self.catListCtrl.SetFocus
self.gridBagSizer=gridBagSizer=wx.GridBagSizer(
hgap=guiHelper.SPACE_BETWEEN_BUTTONS_HORIZONTAL,
vgap=guiHelper.SPACE_BETWEEN_BUTTONS_VERTICAL
)
# add the label, the categories list, and the settings panel to a 2 by 2 grid.
# The label should span two columns, so that the start of the categories list
# and the start of the settings panel are at the same vertical position.
gridBagSizer.Add(categoriesLabel, pos=(0,0), span=(1,2))
gridBagSizer.Add(self.catListCtrl, pos=(1,0), flag=wx.EXPAND)
gridBagSizer.Add(self.container, pos=(1,1), flag=wx.EXPAND)
# Make the row with the listCtrl and settings panel grow vertically.
gridBagSizer.AddGrowableRow(1)
# Make the columns with the listCtrl and settings panel grow horizontally if the dialog is resized.
# They should grow 1:3, since the settings panel is much more important, and already wider
# than the listCtrl.
gridBagSizer.AddGrowableCol(0, proportion=1)
gridBagSizer.AddGrowableCol(1, proportion=3)
sHelper.sizer.Add(gridBagSizer, flag=wx.EXPAND, proportion=1)
self.container.Layout()
self.catListCtrl.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onCategoryChange)
self.Bind(wx.EVT_CHAR_HOOK, self.onCharHook)
self.Bind(EVT_RW_LAYOUT_NEEDED, self._onPanelLayoutChanged)
def _getCategoryPanel(self, catId):
panel = self.catIdToInstanceMap.get(catId, None)
if not panel:
try:
cls = self.categoryClasses[catId]
except IndexError:
raise ValueError("Unable to create panel for unknown category ID: {}".format(catId))
panel = cls(parent=self.container)
panel.Hide()
self.containerSizer.Add(panel, flag=wx.ALL, border=guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_HORIZONTAL)
self.catIdToInstanceMap[catId] = panel
panelWidth = panel.Size[0]
availableWidth = self.containerSizer.GetSize()[0]
if panelWidth > availableWidth and gui._isDebug():
log.debugWarning(
("Panel width ({1}) too large for: {0} Try to reduce the width of this panel, or increase width of " +
"MultiCategorySettingsDialog.MIN_SIZE"
).format(cls, panel.Size[0])
)
panel.SetLabel(panel.title)
import oleacc
panel.server = nvdaControls.AccPropertyOverride(
panel,
propertyAnnotations={
oleacc.PROPID_ACC_ROLE: oleacc.ROLE_SYSTEM_PROPERTYPAGE, # change the role from pane to property page
oleacc.PROPID_ACC_DESCRIPTION: panel.panelDescription, # set a description
}
)
return panel
def postInit(self):
# By default after the dialog is created, focus lands on the button group for wx.Dialogs. However this is not where
# we want focus. We only want to modify focus after creation (makeSettings), but postInit is also called after
# onApply, so we reset the setPostInitFocus function.
if self.setPostInitFocus:
self.setPostInitFocus()
self.setPostInitFocus = None
else:
# when postInit is called without a setPostInitFocus ie because onApply was called
# then set the focus to the listCtrl. This is a good starting point for a "fresh state"
self.catListCtrl.SetFocus()
def onCharHook(self,evt):
"""Listens for keyboard input and switches panels for control+tab"""
if not self.catListCtrl:
# Dialog has not yet been constructed.
# Allow another handler to take the event, and return early.
evt.Skip()
return
key = evt.GetKeyCode()
listHadFocus = self.catListCtrl.HasFocus()
if evt.ControlDown() and key==wx.WXK_TAB:
# Focus the categories list. If we don't, the panel won't hide correctly
if not listHadFocus:
self.catListCtrl.SetFocus()
index = self.catListCtrl.GetFirstSelected()
newIndex=index-1 if evt.ShiftDown() else index+1
# Less than first wraps to the last index, greater than last wraps to first index.
newIndex=newIndex % self.catListCtrl.ItemCount
self.catListCtrl.Select(newIndex)
# we must focus the new selection in the category list to trigger the change of category.
self.catListCtrl.Focus(newIndex)
if not listHadFocus and self.currentCategory:
self.currentCategory.SetFocus()
else:
evt.Skip()
def _onPanelLayoutChanged(self,evt):
# call layout and SetupScrolling on the container so that the controls apear in their expected locations.
self.container.Layout()
self.container.SetupScrolling()
# when child elements get smaller the scrolledPanel does not
# erase the old contents and must be redrawn
self.container.Refresh()
def _doCategoryChange(self, newCatId):
oldCat = self.currentCategory
# Freeze and Thaw are called to stop visual artifact's while the GUI
# is being rebuilt. Without this, the controls can sometimes be seen being
# added.
self.container.Freeze()
try:
newCat = self._getCategoryPanel(newCatId)
except ValueError as e:
newCatTitle = self.catListCtrl.GetItemText(newCatId)
log.error("Unable to change to category: {}".format(newCatTitle), exc_info=e)
return
if oldCat:
oldCat.onPanelDeactivated()
self.currentCategory = newCat
newCat.onPanelActivated()
# call Layout and SetupScrolling on the container to make sure that the controls apear in their expected locations.
self.container.Layout()
self.container.SetupScrolling()
self.container.Thaw()
def onCategoryChange(self, evt):
currentCat = self.currentCategory
newIndex = evt.GetIndex()
if not currentCat or newIndex != self.categoryClasses.index(currentCat.__class__):
self._doCategoryChange(newIndex)
else:
evt.Skip()
def _doSave(self):
for panel in self.catIdToInstanceMap.itervalues():
if panel.isValid() is False:
raise ValueError("Validation for %s blocked saving settings" % panel.__class__.__name__)
for panel in self.catIdToInstanceMap.itervalues():
panel.onSave()
for panel in self.catIdToInstanceMap.itervalues():
panel.postSave()
def onOk(self,evt):
try:
self._doSave()
except ValueError:
log.debugWarning("", exc_info=True)
return
for panel in self.catIdToInstanceMap.itervalues():
panel.Destroy()
super(MultiCategorySettingsDialog,self).onOk(evt)
def onCancel(self,evt):
for panel in self.catIdToInstanceMap.itervalues():
panel.onDiscard()
panel.Destroy()
super(MultiCategorySettingsDialog,self).onCancel(evt)
def onApply(self,evt):
try:
self._doSave()
except ValueError:
log.debugWarning("", exc_info=True)
return
super(MultiCategorySettingsDialog,self).onApply(evt)
class GeneralSettingsPanel(SettingsPanel):
# Translators: This is the label for the general settings panel.
title = _("General")
LOG_LEVELS = (
# Translators: One of the log levels of NVDA (the disabled mode turns off logging completely).
(log.OFF, _("disabled")),
# Translators: One of the log levels of NVDA (the info mode shows info as NVDA runs).
(log.INFO, _("info")),
# Translators: One of the log levels of NVDA (the debug warning shows debugging messages and warnings as NVDA runs).
(log.DEBUGWARNING, _("debug warning")),
# Translators: One of the log levels of NVDA (the input/output shows keyboard commands and/or braille commands as well as speech and/or braille output of NVDA).
(log.IO, _("input/output")),
# Translators: One of the log levels of NVDA (the debug mode shows debug messages as NVDA runs).
(log.DEBUG, _("debug"))
)
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
self.languageNames = languageHandler.getAvailableLanguages(presentational=True)
languageChoices = [x[1] for x in self.languageNames]
# Translators: The label for a setting in general settings to select NVDA's interface language (once selected, NVDA must be restarted; the option user default means the user's Windows language will be used).
languageLabelText = _("&Language (requires restart to fully take effect):")
self.languageList=settingsSizerHelper.addLabeledControl(languageLabelText, wx.Choice, choices=languageChoices)
self.languageList.SetToolTip(wx.ToolTip("Choose the language NVDA's messages and user interface should be presented in."))
try:
self.oldLanguage=config.conf["general"]["language"]
index=[x[0] for x in self.languageNames].index(self.oldLanguage)
self.languageList.SetSelection(index)
except:
pass
if globalVars.appArgs.secure:
self.languageList.Disable()
# Translators: The label for a setting in general settings to save current configuration when NVDA exits (if it is not checked, user needs to save configuration before quitting NVDA).
self.saveOnExitCheckBox=wx.CheckBox(self,label=_("&Save configuration on exit"))
self.saveOnExitCheckBox.SetValue(config.conf["general"]["saveConfigurationOnExit"])
if globalVars.appArgs.secure:
self.saveOnExitCheckBox.Disable()
settingsSizerHelper.addItem(self.saveOnExitCheckBox)
# Translators: The label for a setting in general settings to ask before quitting NVDA (if not checked, NVDA will exit without asking the user for action).
self.askToExitCheckBox=wx.CheckBox(self,label=_("Sho&w exit options when exiting NVDA"))
self.askToExitCheckBox.SetValue(config.conf["general"]["askToExit"])
settingsSizerHelper.addItem(self.askToExitCheckBox)
# Translators: The label for a setting in general settings to play sounds when NVDA starts or exits.
self.playStartAndExitSoundsCheckBox=wx.CheckBox(self,label=_("&Play sounds when starting or exiting NVDA"))
self.playStartAndExitSoundsCheckBox.SetValue(config.conf["general"]["playStartAndExitSounds"])
settingsSizerHelper.addItem(self.playStartAndExitSoundsCheckBox)
# Translators: The label for a setting in general settings to select logging level of NVDA as it runs (available options and what they are logged are found under comments for the logging level messages themselves).
logLevelLabelText=_("L&ogging level:")
logLevelChoices = [name for level, name in self.LOG_LEVELS]
self.logLevelList = settingsSizerHelper.addLabeledControl(logLevelLabelText, wx.Choice, choices=logLevelChoices)
curLevel = log.getEffectiveLevel()
for index, (level, name) in enumerate(self.LOG_LEVELS):
if level == curLevel:
self.logLevelList.SetSelection(index)
break
else:
log.debugWarning("Could not set log level list to current log level")
# Translators: The label for a setting in general settings to allow NVDA to start after logging onto Windows (if checked, NvDA will start automatically after loggin into Windows; if not, user must start NVDA by pressing the shortcut key (CTRL+Alt+N by default).
self.startAfterLogonCheckBox = wx.CheckBox(self, label=_("&Automatically start NVDA after I log on to Windows"))
self.startAfterLogonCheckBox.SetValue(config.getStartAfterLogon())
if globalVars.appArgs.secure or not config.isInstalledCopy():
self.startAfterLogonCheckBox.Disable()
settingsSizerHelper.addItem(self.startAfterLogonCheckBox)
# Translators: The label for a setting in general settings to allow NVDA to come up in Windows login screen (useful if user needs to enter passwords or if multiple user accounts are present to allow user to choose the correct account).
self.startOnLogonScreenCheckBox = wx.CheckBox(self, label=_("Use NVDA on the Windows logon screen (requires administrator privileges)"))
self.startOnLogonScreenCheckBox.SetValue(config.getStartOnLogonScreen())
if globalVars.appArgs.secure or not config.canStartOnSecureScreens():
self.startOnLogonScreenCheckBox.Disable()
settingsSizerHelper.addItem(self.startOnLogonScreenCheckBox)
# Translators: The label for a button in general settings to copy current user settings to system settings (to allow current settings to be used in secure screens such as User Account Control (UAC) dialog).
self.copySettingsButton= wx.Button(self, label=_("Use currently saved settings on the logon and other secure screens (requires administrator privileges)"))
self.copySettingsButton.Bind(wx.EVT_BUTTON,self.onCopySettings)
if globalVars.appArgs.secure or not config.canStartOnSecureScreens():
self.copySettingsButton.Disable()
settingsSizerHelper.addItem(self.copySettingsButton)
if updateCheck:
# Translators: The label of a checkbox in general settings to toggle automatic checking for updated versions of NVDA (if not checked, user must check for updates manually).
item=self.autoCheckForUpdatesCheckBox=wx.CheckBox(self,label=_("Automatically check for &updates to NVDA"))
item.Value=config.conf["update"]["autoCheck"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
# Translators: The label of a checkbox in general settings to toggle startup notifications
# for a pending NVDA update.
item=self.notifyForPendingUpdateCheckBox=wx.CheckBox(self,label=_("Notify for &pending update on startup"))
item.Value=config.conf["update"]["startupNotification"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
# Translators: The label of a checkbox in general settings to toggle allowing of usage stats gathering
item=self.allowUsageStatsCheckBox=wx.CheckBox(self,label=_("Allow the NVDA project to gather NVDA usage statistics"))
item.Value=config.conf["update"]["allowUsageStats"]
if globalVars.appArgs.secure:
item.Disable()
settingsSizerHelper.addItem(item)
def onCopySettings(self,evt):
addonsDirPath = os.path.join(globalVars.appArgs.configPath, 'addons')
if os.path.isdir(addonsDirPath) and 0 < len(os.listdir(addonsDirPath)):
# Translators: A message to warn the user when attempting to copy current
# settings to system settings.
message = _(
"Add-ons were detected in your user settings directory. "
"Copying these to the system profile could be a security risk. "
"Do you still wish to copy your settings?"
)
# Translators: The title of the warning dialog displayed when trying to
# copy settings for use in secure screens.
title = _("Warning")
style = wx.YES | wx.NO | wx.ICON_WARNING
if wx.NO == gui.messageBox(message, title, style, self):
return
progressDialog = gui.IndeterminateProgressDialog(
gui.mainFrame,
# Translators: The title of the dialog presented while settings are being copied
_("Copying Settings"),
# Translators: The message displayed while settings are being copied
# to the system configuration (for use on Windows logon etc)
_("Please wait while settings are copied to the system configuration.")
)
while True:
try:
gui.ExecAndPump(config.setSystemConfigToCurrentConfig)
res=True
break
except installer.RetriableFailure:
log.debugWarning("Error when copying settings to system config",exc_info=True)
# Translators: a message dialog asking to retry or cancel when copying settings fails
message=_("Unable to copy a file. Perhaps it is currently being used by another process or you have run out of disc space on the drive you are copying to.")
# Translators: the title of a retry cancel dialog when copying settings fails
title=_("Error Copying")
if winUser.MessageBox(None,message,title,winUser.MB_RETRYCANCEL)==winUser.IDRETRY:
continue
res=False
break
except:
log.debugWarning("Error when copying settings to system config",exc_info=True)
res=False
break
progressDialog.done()
del progressDialog
if not res:
# Translators: The message displayed when errors were found while trying to copy current configuration to system settings.
gui.messageBox(_("Error copying NVDA user settings"),_("Error"),wx.OK|wx.ICON_ERROR,self)
else:
# Translators: The message displayed when copying configuration to system settings was successful.
gui.messageBox(_("Successfully copied NVDA user settings"),_("Success"),wx.OK|wx.ICON_INFORMATION,self)
def onSave(self):
newLanguage=[x[0] for x in self.languageNames][self.languageList.GetSelection()]
config.conf["general"]["language"]=newLanguage
config.conf["general"]["saveConfigurationOnExit"]=self.saveOnExitCheckBox.IsChecked()
config.conf["general"]["askToExit"]=self.askToExitCheckBox.IsChecked()
config.conf["general"]["playStartAndExitSounds"]=self.playStartAndExitSoundsCheckBox.IsChecked()
logLevel=self.LOG_LEVELS[self.logLevelList.GetSelection()][0]
config.conf["general"]["loggingLevel"]=logHandler.levelNames[logLevel]
logHandler.setLogLevelFromConfig()
if self.startAfterLogonCheckBox.IsEnabled():
config.setStartAfterLogon(self.startAfterLogonCheckBox.GetValue())
if self.startOnLogonScreenCheckBox.IsEnabled():
try:
config.setStartOnLogonScreen(self.startOnLogonScreenCheckBox.GetValue())
except (WindowsError, RuntimeError):
gui.messageBox(_("This change requires administrator privileges."), _("Insufficient Privileges"), style=wx.OK | wx.ICON_ERROR, parent=self)
if updateCheck:
config.conf["update"]["autoCheck"]=self.autoCheckForUpdatesCheckBox.IsChecked()
config.conf["update"]["allowUsageStats"]=self.allowUsageStatsCheckBox.IsChecked()
config.conf["update"]["startupNotification"]=self.notifyForPendingUpdateCheckBox.IsChecked()
updateCheck.terminate()
updateCheck.initialize()
def postSave(self):
if self.oldLanguage != config.conf["general"]["language"]:
LanguageRestartDialog(self).ShowModal()
class LanguageRestartDialog(wx.Dialog):
def __init__(self, parent):
# Translators: The title of the dialog which appears when the user changed NVDA's interface language.
super(LanguageRestartDialog, self).__init__(parent, title=_("Language Configuration Change"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: The message displayed after NVDA interface language has been changed.
sHelper.addItem(wx.StaticText(self, label=_("NVDA must be restarted for the new language to take effect.")))
bHelper = sHelper.addDialogDismissButtons(guiHelper.ButtonHelper(wx.HORIZONTAL))
# Translators: The label for a button in the dialog which appears when the user changed NVDA's interface language.
restartNowButton = bHelper.addButton(self, label=_("Restart &now"))
restartNowButton.Bind(wx.EVT_BUTTON, self.onRestartNowButton)
restartNowButton.SetFocus()
# Translators: The label for a button in the dialog which appears when the user changed NVDA's interface language.
restartLaterButton = bHelper.addButton(self, wx.ID_CLOSE, label=_("Restart &later"))
restartLaterButton.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
self.Bind(wx.EVT_CLOSE, lambda evt: self.Destroy())
self.EscapeId = wx.ID_CLOSE
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.CentreOnScreen()
def onRestartNowButton(self, evt):
self.Destroy()
config.conf.save()
queueHandler.queueFunction(queueHandler.eventQueue,core.restart)
class SpeechSettingsPanel(SettingsPanel):
# Translators: This is the label for the speech panel
title = _("Speech")
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: A label for the synthesizer on the speech panel.
synthLabel = _("&Synthesizer")
synthBox = wx.StaticBox(self, label=synthLabel)
synthGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(synthBox, wx.HORIZONTAL))
settingsSizerHelper.addItem(synthGroup)
# Use a ExpandoTextCtrl because even when readonly it accepts focus from keyboard, which
# standard readonly TextCtrl does not. ExpandoTextCtrl is a TE_MULTILINE control, however
# by default it renders as a single line. Standard TextCtrl with TE_MULTILINE has two lines,
# and a vertical scroll bar. This is not neccessary for the single line of text we wish to
# display here.
synthDesc = getSynth().description
self.synthNameCtrl = ExpandoTextCtrl(self, size=(self.scaleSize(250), -1), value=synthDesc, style=wx.TE_READONLY)
self.synthNameCtrl.Bind(wx.EVT_CHAR_HOOK, self._enterTriggersOnChangeSynth)
# Translators: This is the label for the button used to change synthesizer,
# it appears in the context of a synthesizer group on the speech settings panel.
changeSynthBtn = wx.Button(self, label=_("C&hange..."))
synthGroup.addItem(
guiHelper.associateElements(
self.synthNameCtrl,
changeSynthBtn
)
)
changeSynthBtn.Bind(wx.EVT_BUTTON,self.onChangeSynth)
self.voicePanel = VoiceSettingsPanel(self)
settingsSizerHelper.addItem(self.voicePanel)
def _enterTriggersOnChangeSynth(self, evt):
if evt.KeyCode == wx.WXK_RETURN:
self.onChangeSynth(evt)
else:
evt.Skip()
def onChangeSynth(self, evt):
changeSynth = SynthesizerSelectionDialog(self, multiInstanceAllowed=True)
ret = changeSynth.ShowModal()
if ret == wx.ID_OK:
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def updateCurrentSynth(self):
synthDesc = getSynth().description
self.synthNameCtrl.SetValue(synthDesc)
def onPanelActivated(self):
# call super after all panel updates have been completed, we dont want the panel to show until this is complete.
self.voicePanel.onPanelActivated()
super(SpeechSettingsPanel,self).onPanelActivated()
def onPanelDeactivated(self):
self.voicePanel.onPanelDeactivated()
super(SpeechSettingsPanel,self).onPanelDeactivated()
def onDiscard(self):
self.voicePanel.onDiscard()
def onSave(self):
self.voicePanel.onSave()
class SynthesizerSelectionDialog(SettingsDialog):
# Translators: This is the label for the synthesizer selection dialog
title = _("Select Synthesizer")
synthNames = []
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is a label for the select
# synthesizer combobox in the synthesizer dialog.
synthListLabelText=_("&Synthesizer:")
self.synthList = settingsSizerHelper.addLabeledControl(synthListLabelText, wx.Choice, choices=[])
self.updateSynthesizerList()
# Translators: This is the label for the select output
# device combo in the synthesizer dialog. Examples of
# of an output device are default soundcard, usb
# headphones, etc.
deviceListLabelText = _("Output &device:")
deviceNames=nvwave.getOutputDeviceNames()
self.deviceList = settingsSizerHelper.addLabeledControl(deviceListLabelText, wx.Choice, choices=deviceNames)
try:
selection = deviceNames.index(config.conf["speech"]["outputDevice"])
except ValueError:
selection = 0
self.deviceList.SetSelection(selection)
# Translators: This is a label for the audio ducking combo box in the Synthesizer Settings dialog.
duckingListLabelText=_("Audio &ducking mode:")
self.duckingList=settingsSizerHelper.addLabeledControl(duckingListLabelText, wx.Choice, choices=audioDucking.audioDuckingModes)
index=config.conf['audio']['audioDuckingMode']
self.duckingList.SetSelection(index)
if not audioDucking.isAudioDuckingSupported():
self.duckingList.Disable()
def postInit(self):
# Finally, ensure that focus is on the synthlist
self.synthList.SetFocus()
def updateSynthesizerList(self):
driverList=getSynthList()
self.synthNames=[x[0] for x in driverList]
options=[x[1] for x in driverList]
self.synthList.Clear()
self.synthList.AppendItems(options)
try:
index=self.synthNames.index(getSynth().name)
self.synthList.SetSelection(index)
except:
pass
def onOk(self, evt):
if not self.synthNames:
# The list of synths has not been populated yet, so we didn't change anything in this panel
return
config.conf["speech"]["outputDevice"]=self.deviceList.GetStringSelection()
newSynth=self.synthNames[self.synthList.GetSelection()]
if not setSynth(newSynth):
# Translators: This message is presented when
# NVDA is unable to load the selected
# synthesizer.
gui.messageBox(_("Could not load the %s synthesizer.")%newSynth,_("Synthesizer Error"),wx.OK|wx.ICON_WARNING,self)
return
if audioDucking.isAudioDuckingSupported():
index=self.duckingList.GetSelection()
config.conf['audio']['audioDuckingMode']=index
audioDucking.setAudioDuckingMode(index)
if self.IsModal():
# Hack: we need to update the synth in our parent window before closing.
# Otherwise, NVDA will report the old synth even though the new synth is reflected visually.
self.Parent.updateCurrentSynth()
super(SynthesizerSelectionDialog, self).onOk(evt)
class DriverSettingChanger(object):
"""Functor which acts as calback for GUI events."""
def __init__(self,driver,setting):
self._driverRef=weakref.ref(driver)
self.setting=setting
@property
def driver(self):
return self._driverRef()
def __call__(self,evt):
val=evt.GetSelection()
setattr(self.driver,self.setting.id,val)
class StringDriverSettingChanger(DriverSettingChanger):
"""Same as L{DriverSettingChanger} but handles combobox events."""
def __init__(self,driver,setting,container):
self.container=container
super(StringDriverSettingChanger,self).__init__(driver,setting)
def __call__(self,evt):
# Quick workaround to deal with voice changes.
if self.setting.id == "voice":
# Cancel speech first so that the voice will change immediately instead of the change being queued.
speech.cancelSpeech()
changeVoice(
self.driver,
getattr(self.container,"_%ss"%self.setting.id)[evt.GetSelection()].id
)
self.container.updateDriverSettings(changedSetting=self.setting.id)
else:
setattr(
self.driver,
self.setting.id,
getattr(self.container,"_%ss"%self.setting.id)[evt.GetSelection()].id
)
class DriverSettingsMixin(object):
"""
Mixin class that provides support for driver specific gui settings.
Derived classes should implement L{driver}.
"""
def __init__(self, *args, **kwargs):
self.sizerDict={}
self.lastControl=None
super(DriverSettingsMixin,self).__init__(*args,**kwargs)
self._curDriverRef = weakref.ref(self.driver)
@property
def driver(self):
raise NotImplementedError
@classmethod
def _setSliderStepSizes(cls, slider, setting):
slider.SetLineSize(setting.minStep)
slider.SetPageSize(setting.largeStep)
def makeSliderSettingControl(self,setting):
"""Constructs appropriate GUI controls for given L{DriverSetting} such as label and slider.
@param setting: Setting to construct controls for
@type setting: L{DriverSetting}
@returns: WXSizer containing newly created controls.
@rtype: L{wx.BoxSizer}
"""
labeledControl = guiHelper.LabeledControlHelper(
self,
"%s:"%setting.displayNameWithAccelerator,
nvdaControls.EnhancedInputSlider,
minValue=setting.minVal,
maxValue=setting.maxVal
)
lSlider=labeledControl.control
setattr(self,"%sSlider"%setting.id,lSlider)
lSlider.Bind(wx.EVT_SLIDER,DriverSettingChanger(self.driver,setting))
self._setSliderStepSizes(lSlider,setting)
lSlider.SetValue(getattr(self.driver,setting.id))
if self.lastControl:
lSlider.MoveAfterInTabOrder(self.lastControl)
self.lastControl=lSlider
return labeledControl.sizer
def makeStringSettingControl(self,setting):
"""Same as L{makeSliderSettingControl} but for string settings. Returns sizer with label and combobox."""
labelText="%s:"%setting.displayNameWithAccelerator
setattr(
self,
"_%ss"%setting.id,
getattr(self.driver,"available%ss"%setting.id.capitalize()).values()
)
l=getattr(self,"_%ss"%setting.id)
labeledControl=guiHelper.LabeledControlHelper(
self,
labelText,
wx.Choice,
choices=[x.displayName for x in l]
)
lCombo = labeledControl.control
setattr(self,"%sList"%setting.id,lCombo)
try:
cur=getattr(self.driver,setting.id)
i=[x.id for x in l].index(cur)
lCombo.SetSelection(i)
except ValueError:
pass
lCombo.Bind(wx.EVT_CHOICE,StringDriverSettingChanger(self.driver,setting,self))
if self.lastControl:
lCombo.MoveAfterInTabOrder(self.lastControl)
self.lastControl=lCombo
return labeledControl.sizer
def makeBooleanSettingControl(self,setting):
"""Same as L{makeSliderSettingControl} but for boolean settings. Returns checkbox."""
checkbox=wx.CheckBox(self,wx.ID_ANY,label=setting.displayNameWithAccelerator)
setattr(self,"%sCheckbox"%setting.id,checkbox)
checkbox.Bind(wx.EVT_CHECKBOX,
lambda evt: setattr(self.driver,setting.id,evt.IsChecked()))
checkbox.SetValue(getattr(self.driver,setting.id))
if self.lastControl:
checkbox.MoveAfterInTabOrder(self.lastControl)
self.lastControl=checkbox
return checkbox
def updateDriverSettings(self, changedSetting=None):
"""Creates, hides or updates existing GUI controls for all of supported settings."""
#firstly check already created options
for name,sizer in self.sizerDict.iteritems():
if name == changedSetting:
# Changing a setting shouldn't cause that setting itself to disappear.
continue
if not self.driver.isSupported(name):
self.settingsSizer.Hide(sizer)
#Create new controls, update already existing
for setting in self.driver.supportedSettings:
if setting.id == changedSetting:
# Changing a setting shouldn't cause that setting's own values to change.
continue
if setting.id in self.sizerDict: #update a value
self.settingsSizer.Show(self.sizerDict[setting.id])
if isinstance(setting,NumericDriverSetting):
getattr(self,"%sSlider"%setting.id).SetValue(getattr(self.driver,setting.id))
elif isinstance(setting,BooleanDriverSetting):
getattr(self,"%sCheckbox"%setting.id).SetValue(getattr(self.driver,setting.id))
else:
l=getattr(self,"_%ss"%setting.id)
lCombo=getattr(self,"%sList"%setting.id)
try:
cur=getattr(self.driver,setting.id)
i=[x.id for x in l].index(cur)
lCombo.SetSelection(i)
except ValueError:
pass
else: #create a new control
if isinstance(setting,NumericDriverSetting):
settingMaker=self.makeSliderSettingControl
elif isinstance(setting,BooleanDriverSetting):
settingMaker=self.makeBooleanSettingControl
else:
settingMaker=self.makeStringSettingControl
try:
s=settingMaker(setting)
except UnsupportedConfigParameterError:
log.debugWarning("Unsupported setting %s; ignoring"%setting.id, exc_info=True)
continue
self.sizerDict[setting.id]=s
self.settingsSizer.Insert(len(self.sizerDict)-1,s,border=10,flag=wx.BOTTOM)
#Update graphical layout of the dialog
self.settingsSizer.Layout()
def onDiscard(self):
#unbind change events for string settings as wx closes combo boxes on cancel
for setting in self.driver.supportedSettings:
if isinstance(setting,(NumericDriverSetting,BooleanDriverSetting)): continue
getattr(self,"%sList"%setting.id).Unbind(wx.EVT_CHOICE)
#restore settings
self.driver.loadSettings()
def onSave(self):
self.driver.saveSettings()
def onPanelActivated(self):
if not self._curDriverRef():
if gui._isDebug():
log.debug("refreshing panel")
self.sizerDict.clear()
self.settingsSizer.Clear(delete_windows=True)
self._curDriverRef = weakref.ref(self.driver)
self.makeSettings(self.settingsSizer)
super(DriverSettingsMixin,self).onPanelActivated()
class VoiceSettingsPanel(DriverSettingsMixin, SettingsPanel):
# Translators: This is the label for the voice settings panel.
title = _("Voice")
@property
def driver(self):
return getSynth()
def makeSettings(self, settingsSizer):
# Construct synthesizer settings
self.updateDriverSettings()
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, text will be read using the voice for the language of the text).
autoLanguageSwitchingText = _("Automatic language switching (when supported)")
self.autoLanguageSwitchingCheckbox = settingsSizerHelper.addItem(wx.CheckBox(self,label=autoLanguageSwitchingText))
self.autoLanguageSwitchingCheckbox.SetValue(config.conf["speech"]["autoLanguageSwitching"])
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, different voices for dialects will be used to read text in that dialect).
autoDialectSwitchingText =_("Automatic dialect switching (when supported)")
self.autoDialectSwitchingCheckbox=settingsSizerHelper.addItem(wx.CheckBox(self,label=autoDialectSwitchingText))
self.autoDialectSwitchingCheckbox.SetValue(config.conf["speech"]["autoDialectSwitching"])
# Translators: This is the label for a combobox in the
# voice settings panel (possible choices are none, some, most and all).
punctuationLabelText = _("Punctuation/symbol &level:")
symbolLevelLabels=characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS
symbolLevelChoices =[symbolLevelLabels[level] for level in characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS]
self.symbolLevelList = settingsSizerHelper.addLabeledControl(punctuationLabelText, wx.Choice, choices=symbolLevelChoices)
curLevel = config.conf["speech"]["symbolLevel"]
self.symbolLevelList.SetSelection(characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS.index(curLevel))
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, text will be read using the voice for the language of the text).
trustVoiceLanguageText = _("Trust voice's language when processing characters and symbols")
self.trustVoiceLanguageCheckbox = settingsSizerHelper.addItem(wx.CheckBox(self,label=trustVoiceLanguageText))
self.trustVoiceLanguageCheckbox.SetValue(config.conf["speech"]["trustVoiceLanguage"])
# Translators: This is the label for a checkbox in the
# voice settings panel (if checked, data from the unicode CLDR will be used
# to speak emoji descriptions).
includeCLDRText = _("Include Unicode Consortium data (including emoji) when processing characters and symbols")
self.includeCLDRCheckbox = settingsSizerHelper.addItem(wx.CheckBox(self,label=includeCLDRText))
self.includeCLDRCheckbox.SetValue(config.conf["speech"]["includeCLDR"])
# Translators: This is a label for a setting in voice settings (an edit box to change voice pitch for capital letters; the higher the value, the pitch will be higher).
capPitchChangeLabelText=_("Capital pitch change percentage")
self.capPitchChangeEdit=settingsSizerHelper.addLabeledControl(capPitchChangeLabelText, nvdaControls.SelectOnFocusSpinCtrl,
min=int(config.conf.getConfigValidationParameter(["speech", self.driver.name, "capPitchChange"], "min")),
max=int(config.conf.getConfigValidationParameter(["speech", self.driver.name, "capPitchChange"], "max")),
initial=config.conf["speech"][self.driver.name]["capPitchChange"])
# Translators: This is the label for a checkbox in the
# voice settings panel.
sayCapForCapsText = _("Say &cap before capitals")
self.sayCapForCapsCheckBox = settingsSizerHelper.addItem(wx.CheckBox(self,label=sayCapForCapsText))
self.sayCapForCapsCheckBox.SetValue(config.conf["speech"][self.driver.name]["sayCapForCapitals"])
# Translators: This is the label for a checkbox in the
# voice settings panel.
beepForCapsText =_("&Beep for capitals")
self.beepForCapsCheckBox = settingsSizerHelper.addItem(wx.CheckBox(self, label = beepForCapsText))
self.beepForCapsCheckBox.SetValue(config.conf["speech"][self.driver.name]["beepForCapitals"])
# Translators: This is the label for a checkbox in the
# voice settings panel.
useSpellingFunctionalityText = _("Use &spelling functionality if supported")
self.useSpellingFunctionalityCheckBox = settingsSizerHelper.addItem(wx.CheckBox(self, label = useSpellingFunctionalityText))
self.useSpellingFunctionalityCheckBox.SetValue(config.conf["speech"][self.driver.name]["useSpellingFunctionality"])
def onSave(self):
DriverSettingsMixin.onSave(self)
config.conf["speech"]["autoLanguageSwitching"]=self.autoLanguageSwitchingCheckbox.IsChecked()
config.conf["speech"]["autoDialectSwitching"]=self.autoDialectSwitchingCheckbox.IsChecked()
config.conf["speech"]["symbolLevel"]=characterProcessing.CONFIGURABLE_SPEECH_SYMBOL_LEVELS[self.symbolLevelList.GetSelection()]
config.conf["speech"]["trustVoiceLanguage"]=self.trustVoiceLanguageCheckbox.IsChecked()
currentIncludeCLDR = config.conf["speech"]["includeCLDR"]
config.conf["speech"]["includeCLDR"] = newIncludeCldr = self.includeCLDRCheckbox.IsChecked()
if currentIncludeCLDR is not newIncludeCldr:
# Either included or excluded CLDR data, so clear the cache.
characterProcessing.clearSpeechSymbols()
config.conf["speech"][self.driver.name]["capPitchChange"]=self.capPitchChangeEdit.Value
config.conf["speech"][self.driver.name]["sayCapForCapitals"]=self.sayCapForCapsCheckBox.IsChecked()
config.conf["speech"][self.driver.name]["beepForCapitals"]=self.beepForCapsCheckBox.IsChecked()
config.conf["speech"][self.driver.name]["useSpellingFunctionality"]=self.useSpellingFunctionalityCheckBox.IsChecked()
class KeyboardSettingsPanel(SettingsPanel):
# Translators: This is the label for the keyboard settings panel.
title = _("Keyboard")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a combobox in the
# keyboard settings panel.
kbdLabelText = _("&Keyboard layout:")
layouts=keyboardHandler.KeyboardInputGesture.LAYOUTS
self.kbdNames=sorted(layouts)
kbdChoices = [layouts[layout] for layout in self.kbdNames]
self.kbdList=sHelper.addLabeledControl(kbdLabelText, wx.Choice, choices=kbdChoices)
try:
index=self.kbdNames.index(config.conf['keyboard']['keyboardLayout'])
self.kbdList.SetSelection(index)
except:
log.debugWarning("Could not set Keyboard layout list to current layout",exc_info=True)
#Translators: This is the label for a list of checkboxes
# controlling which keys are NVDA modifier keys.
modifierBoxLabel = _("&Select NVDA Modifier Keys")
self.modifierChoices = [keyLabels.localizedKeyLabels[key] for key in keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS]
self.modifierList=sHelper.addLabeledControl(modifierBoxLabel, nvdaControls.CustomCheckListBox, choices=self.modifierChoices)
checkedItems = []
if config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"]:
checkedItems.append(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("numpadinsert"))
if config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"]:
checkedItems.append(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("insert"))
if config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"]:
checkedItems.append(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("capslock"))
self.modifierList.CheckedItems = checkedItems
self.modifierList.Select(0)
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
charsText = _("Speak typed &characters")
self.charsCheckBox=sHelper.addItem(wx.CheckBox(self,label=charsText))
self.charsCheckBox.SetValue(config.conf["keyboard"]["speakTypedCharacters"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speakTypedWordsText = _("Speak typed &words")
self.wordsCheckBox=sHelper.addItem(wx.CheckBox(self,label=speakTypedWordsText))
self.wordsCheckBox.SetValue(config.conf["keyboard"]["speakTypedWords"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speechInterruptForCharText = _("Speech interrupt for typed characters")
self.speechInterruptForCharsCheckBox=sHelper.addItem(wx.CheckBox(self,label=speechInterruptForCharText))
self.speechInterruptForCharsCheckBox.SetValue(config.conf["keyboard"]["speechInterruptForCharacters"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
speechInterruptForEnterText = _("Speech interrupt for Enter key")
self.speechInterruptForEnterCheckBox=sHelper.addItem(wx.CheckBox(self,label=speechInterruptForEnterText))
self.speechInterruptForEnterCheckBox.SetValue(config.conf["keyboard"]["speechInterruptForEnter"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
allowSkimReadingInSayAllText = _("Allow skim &reading in Say All")
self.skimReadingInSayAllCheckBox=sHelper.addItem(wx.CheckBox(self,label=allowSkimReadingInSayAllText))
self.skimReadingInSayAllCheckBox.SetValue(config.conf["keyboard"]["allowSkimReadingInSayAll"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
beepForLowercaseWithCapsLockText = _("Beep if typing lowercase letters when caps lock is on")
self.beepLowercaseCheckBox=sHelper.addItem(wx.CheckBox(self,label=beepForLowercaseWithCapsLockText))
self.beepLowercaseCheckBox.SetValue(config.conf["keyboard"]["beepForLowercaseWithCapslock"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
commandKeysText = _("Speak command &keys")
self.commandKeysCheckBox=sHelper.addItem(wx.CheckBox(self,label=commandKeysText))
self.commandKeysCheckBox.SetValue(config.conf["keyboard"]["speakCommandKeys"])
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
alertForSpellingErrorsText = _("Play sound for &spelling errors while typing")
self.alertForSpellingErrorsCheckBox=sHelper.addItem(wx.CheckBox(self,label=alertForSpellingErrorsText))
self.alertForSpellingErrorsCheckBox.SetValue(config.conf["keyboard"]["alertForSpellingErrors"])
if not config.conf["documentFormatting"]["reportSpellingErrors"]:
self.alertForSpellingErrorsCheckBox.Disable()
# Translators: This is the label for a checkbox in the
# keyboard settings panel.
handleInjectedKeysText = _("Handle keys from other &applications")
self.handleInjectedKeysCheckBox=sHelper.addItem(wx.CheckBox(self,label=handleInjectedKeysText))
self.handleInjectedKeysCheckBox.SetValue(config.conf["keyboard"]["handleInjectedKeys"])
def isValid(self):
# #2871: check wether at least one key is the nvda key.
if not self.modifierList.CheckedItems:
log.debugWarning("No NVDA key set")
gui.messageBox(
# Translators: Message to report wrong configuration of the NVDA key
_("At least one key must be used as the NVDA key."),
# Translators: The title of the message box
_("Error"), wx.OK|wx.ICON_ERROR,self)
return False
return super(KeyboardSettingsPanel, self).isValid()
def onSave(self):
layout=self.kbdNames[self.kbdList.GetSelection()]
config.conf['keyboard']['keyboardLayout']=layout
config.conf["keyboard"]["useNumpadInsertAsNVDAModifierKey"]= self.modifierList.IsChecked(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("numpadinsert"))
config.conf["keyboard"]["useExtendedInsertAsNVDAModifierKey"] = self.modifierList.IsChecked(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("insert"))
config.conf["keyboard"]["useCapsLockAsNVDAModifierKey"] = self.modifierList.IsChecked(keyboardHandler.SUPPORTED_NVDA_MODIFIER_KEYS.index("capslock"))
config.conf["keyboard"]["speakTypedCharacters"]=self.charsCheckBox.IsChecked()
config.conf["keyboard"]["speakTypedWords"]=self.wordsCheckBox.IsChecked()
config.conf["keyboard"]["speechInterruptForCharacters"]=self.speechInterruptForCharsCheckBox.IsChecked()
config.conf["keyboard"]["speechInterruptForEnter"]=self.speechInterruptForEnterCheckBox.IsChecked()
config.conf["keyboard"]["allowSkimReadingInSayAll"]=self.skimReadingInSayAllCheckBox.IsChecked()
config.conf["keyboard"]["beepForLowercaseWithCapslock"]=self.beepLowercaseCheckBox.IsChecked()
config.conf["keyboard"]["speakCommandKeys"]=self.commandKeysCheckBox.IsChecked()
config.conf["keyboard"]["alertForSpellingErrors"]=self.alertForSpellingErrorsCheckBox.IsChecked()
config.conf["keyboard"]["handleInjectedKeys"]=self.handleInjectedKeysCheckBox.IsChecked()
class MouseSettingsPanel(SettingsPanel):
# Translators: This is the label for the mouse settings panel.
title = _("Mouse")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# mouse settings panel.
shapeChangesText = _("Report mouse &shape changes")
self.shapeCheckBox=sHelper.addItem(wx.CheckBox(self,label=shapeChangesText))
self.shapeCheckBox.SetValue(config.conf["mouse"]["reportMouseShapeChanges"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
mouseTrackingText=_("Enable mouse &tracking")
self.mouseTrackingCheckBox=sHelper.addItem(wx.CheckBox(self,label=mouseTrackingText))
self.mouseTrackingCheckBox.SetValue(config.conf["mouse"]["enableMouseTracking"])
# Translators: This is the label for a combobox in the
# mouse settings panel.
textUnitLabelText=_("Text &unit resolution:")
import textInfos
self.textUnits=textInfos.MOUSE_TEXT_RESOLUTION_UNITS
textUnitsChoices = [textInfos.unitLabels[x] for x in self.textUnits]
self.textUnitComboBox=sHelper.addLabeledControl(textUnitLabelText, wx.Choice, choices=textUnitsChoices)
try:
index=self.textUnits.index(config.conf["mouse"]["mouseTextUnit"])
except:
index=0
self.textUnitComboBox.SetSelection(index)
# Translators: This is the label for a checkbox in the
# mouse settings panel.
reportObjectRoleText = _("Report &role when mouse enters object")
self.reportObjectRoleCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportObjectRoleText))
self.reportObjectRoleCheckBox.SetValue(config.conf["mouse"]["reportObjectRoleOnMouseEnter"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
audioText = _("&Play audio coordinates when mouse moves")
self.audioCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioText))
self.audioCheckBox.SetValue(config.conf["mouse"]["audioCoordinatesOnMouseMove"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
audioDetectBrightnessText = _("&Brightness controls audio coordinates volume")
self.audioDetectBrightnessCheckBox=sHelper.addItem(wx.CheckBox(self,label=audioDetectBrightnessText))
self.audioDetectBrightnessCheckBox.SetValue(config.conf["mouse"]["audioCoordinates_detectBrightness"])
# Translators: This is the label for a checkbox in the
# mouse settings panel.
ignoreInjectedMouseInputText = _("Ignore mouse input from other &applications")
self.ignoreInjectedMouseInputCheckBox=sHelper.addItem(wx.CheckBox(self,label=ignoreInjectedMouseInputText))
self.ignoreInjectedMouseInputCheckBox.SetValue(config.conf["mouse"]["ignoreInjectedMouseInput"])
def onSave(self):
config.conf["mouse"]["reportMouseShapeChanges"]=self.shapeCheckBox.IsChecked()
config.conf["mouse"]["enableMouseTracking"]=self.mouseTrackingCheckBox.IsChecked()
config.conf["mouse"]["mouseTextUnit"]=self.textUnits[self.textUnitComboBox.GetSelection()]
config.conf["mouse"]["reportObjectRoleOnMouseEnter"]=self.reportObjectRoleCheckBox.IsChecked()
config.conf["mouse"]["audioCoordinatesOnMouseMove"]=self.audioCheckBox.IsChecked()
config.conf["mouse"]["audioCoordinates_detectBrightness"]=self.audioDetectBrightnessCheckBox.IsChecked()
config.conf["mouse"]["ignoreInjectedMouseInput"]=self.ignoreInjectedMouseInputCheckBox.IsChecked()
class ReviewCursorPanel(SettingsPanel):
# Translators: This is the label for the review cursor settings panel.
title = _("Review Cursor")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followFocusCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Follow system &focus"))
self.followFocusCheckBox.SetValue(config.conf["reviewCursor"]["followFocus"])
settingsSizer.Add(self.followFocusCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followCaretCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Follow System &Caret"))
self.followCaretCheckBox.SetValue(config.conf["reviewCursor"]["followCaret"])
settingsSizer.Add(self.followCaretCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.followMouseCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Follow &mouse cursor"))
self.followMouseCheckBox.SetValue(config.conf["reviewCursor"]["followMouse"])
settingsSizer.Add(self.followMouseCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# review cursor settings panel.
self.simpleReviewModeCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Simple review mode"))
self.simpleReviewModeCheckBox.SetValue(config.conf["reviewCursor"]["simpleReviewMode"])
settingsSizer.Add(self.simpleReviewModeCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["reviewCursor"]["followFocus"]=self.followFocusCheckBox.IsChecked()
config.conf["reviewCursor"]["followCaret"]=self.followCaretCheckBox.IsChecked()
config.conf["reviewCursor"]["followMouse"]=self.followMouseCheckBox.IsChecked()
config.conf["reviewCursor"]["simpleReviewMode"]=self.simpleReviewModeCheckBox.IsChecked()
class InputCompositionPanel(SettingsPanel):
# Translators: This is the label for the Input Composition settings panel.
title = _("Input Composition")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.autoReportAllCandidatesCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Automatically report all available &candidates"))
self.autoReportAllCandidatesCheckBox.SetValue(config.conf["inputComposition"]["autoReportAllCandidates"])
settingsSizer.Add(self.autoReportAllCandidatesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.announceSelectedCandidateCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Announce &selected candidate"))
self.announceSelectedCandidateCheckBox.SetValue(config.conf["inputComposition"]["announceSelectedCandidate"])
settingsSizer.Add(self.announceSelectedCandidateCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.candidateIncludesShortCharacterDescriptionCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Always include short character &description when announcing candidates"))
self.candidateIncludesShortCharacterDescriptionCheckBox.SetValue(config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"])
settingsSizer.Add(self.candidateIncludesShortCharacterDescriptionCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.reportReadingStringChangesCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Report changes to the &reading string"))
self.reportReadingStringChangesCheckBox.SetValue(config.conf["inputComposition"]["reportReadingStringChanges"])
settingsSizer.Add(self.reportReadingStringChangesCheckBox,border=10,flag=wx.BOTTOM)
# Translators: This is the label for a checkbox in the
# Input composition settings panel.
self.reportCompositionStringChangesCheckBox=wx.CheckBox(self,wx.ID_ANY,label=_("Report changes to the &composition string"))
self.reportCompositionStringChangesCheckBox.SetValue(config.conf["inputComposition"]["reportCompositionStringChanges"])
settingsSizer.Add(self.reportCompositionStringChangesCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["inputComposition"]["autoReportAllCandidates"]=self.autoReportAllCandidatesCheckBox.IsChecked()
config.conf["inputComposition"]["announceSelectedCandidate"]=self.announceSelectedCandidateCheckBox.IsChecked()
config.conf["inputComposition"]["alwaysIncludeShortCharacterDescriptionInCandidateName"]=self.candidateIncludesShortCharacterDescriptionCheckBox.IsChecked()
config.conf["inputComposition"]["reportReadingStringChanges"]=self.reportReadingStringChangesCheckBox.IsChecked()
config.conf["inputComposition"]["reportCompositionStringChanges"]=self.reportCompositionStringChangesCheckBox.IsChecked()
class ObjectPresentationPanel(SettingsPanel):
# Translators: This is the label for the object presentation panel.
title = _("Object Presentation")
progressLabels = (
# Translators: An option for progress bar output in the Object Presentation dialog
# which disables reporting of progress bars.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("off", _("off")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by speaking.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("speak", _("Speak")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by beeping.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("beep", _("Beep")),
# Translators: An option for progress bar output in the Object Presentation dialog
# which reports progress bar updates by both speaking and beeping.
# See Progress bar output in the Object Presentation Settings section of the User Guide.
("both", _("Speak and beep")),
)
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
reportToolTipsText = _("Report &tooltips")
self.tooltipCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportToolTipsText))
self.tooltipCheckBox.SetValue(config.conf["presentation"]["reportTooltips"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
balloonText = _("Report ¬ifications")
self.balloonCheckBox=sHelper.addItem(wx.CheckBox(self,label=balloonText))
self.balloonCheckBox.SetValue(config.conf["presentation"]["reportHelpBalloons"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
shortcutText = _("Report object shortcut &keys")
self.shortcutCheckBox=sHelper.addItem(wx.CheckBox(self,label=shortcutText))
self.shortcutCheckBox.SetValue(config.conf["presentation"]["reportKeyboardShortcuts"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
positionInfoText = _("Report object &position information")
self.positionInfoCheckBox=sHelper.addItem(wx.CheckBox(self,label=positionInfoText))
self.positionInfoCheckBox.SetValue(config.conf["presentation"]["reportObjectPositionInformation"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
guessPositionInfoText=_("Guess object &position information when unavailable")
self.guessPositionInfoCheckBox=sHelper.addItem(wx.CheckBox(self,label=guessPositionInfoText))
self.guessPositionInfoCheckBox.SetValue(config.conf["presentation"]["guessObjectPositionInformationWhenUnavailable"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
descriptionText = _("Report object &descriptions")
self.descriptionCheckBox=sHelper.addItem(wx.CheckBox(self,label=descriptionText))
self.descriptionCheckBox.SetValue(config.conf["presentation"]["reportObjectDescriptions"])
# Translators: This is the label for a combobox in the
# object presentation settings panel.
progressLabelText = _("Progress &bar output:")
progressChoices = [name for setting, name in self.progressLabels]
self.progressList=sHelper.addLabeledControl(progressLabelText, wx.Choice, choices=progressChoices)
for index, (setting, name) in enumerate(self.progressLabels):
if setting == config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]:
self.progressList.SetSelection(index)
break
else:
log.debugWarning("Could not set progress list to current report progress bar updates setting")
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
reportBackgroundProgressBarsText = _("Report background progress bars")
self.reportBackgroundProgressBarsCheckBox=sHelper.addItem(wx.CheckBox(self,label=reportBackgroundProgressBarsText))
self.reportBackgroundProgressBarsCheckBox.SetValue(config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"])
# Translators: This is the label for a checkbox in the
# object presentation settings panel.
dynamicContentText = _("Report dynamic &content changes")
self.dynamicContentCheckBox=sHelper.addItem(wx.CheckBox(self,label=dynamicContentText))
self.dynamicContentCheckBox.SetValue(config.conf["presentation"]["reportDynamicContentChanges"])
# Translators: This is the label for a combobox in the
# object presentation settings panel.
autoSuggestionsLabelText = _("Play a sound when &auto-suggestions appear")
self.autoSuggestionSoundsCheckBox=sHelper.addItem(wx.CheckBox(self,label=autoSuggestionsLabelText))
self.autoSuggestionSoundsCheckBox.SetValue(config.conf["presentation"]["reportAutoSuggestionsWithSound"])
def onSave(self):
config.conf["presentation"]["reportTooltips"]=self.tooltipCheckBox.IsChecked()
config.conf["presentation"]["reportHelpBalloons"]=self.balloonCheckBox.IsChecked()
config.conf["presentation"]["reportKeyboardShortcuts"]=self.shortcutCheckBox.IsChecked()
config.conf["presentation"]["reportObjectPositionInformation"]=self.positionInfoCheckBox.IsChecked()
config.conf["presentation"]["guessObjectPositionInformationWhenUnavailable"]=self.guessPositionInfoCheckBox.IsChecked()
config.conf["presentation"]["reportObjectDescriptions"]=self.descriptionCheckBox.IsChecked()
config.conf["presentation"]["progressBarUpdates"]["progressBarOutputMode"]=self.progressLabels[self.progressList.GetSelection()][0]
config.conf["presentation"]["progressBarUpdates"]["reportBackgroundProgressBars"]=self.reportBackgroundProgressBarsCheckBox.IsChecked()
config.conf["presentation"]["reportDynamicContentChanges"]=self.dynamicContentCheckBox.IsChecked()
config.conf["presentation"]["reportAutoSuggestionsWithSound"]=self.autoSuggestionSoundsCheckBox.IsChecked()
class BrowseModePanel(SettingsPanel):
# Translators: This is the label for the browse mode settings panel.
title = _("Browse Mode")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: This is the label for a textfield in the
# browse mode settings panel.
maxLengthLabelText = _("&Maximum number of characters on one line")
self.maxLengthEdit = sHelper.addLabeledControl(maxLengthLabelText, nvdaControls.SelectOnFocusSpinCtrl,
# min and max are not enforced in the config for virtualBuffers.maxLineLength
min=10, max=250,
initial=config.conf["virtualBuffers"]["maxLineLength"])
# Translators: This is the label for a textfield in the
# browse mode settings panel.
pageLinesLabelText = _("&Number of lines per page")
self.pageLinesEdit = sHelper.addLabeledControl(pageLinesLabelText, nvdaControls.SelectOnFocusSpinCtrl,
# min and max are not enforced in the config for virtualBuffers.linesPerPage
min=5, max=150,
initial=config.conf["virtualBuffers"]["linesPerPage"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
useScreenLayoutText = _("Use &screen layout (when supported)")
self.useScreenLayoutCheckBox = sHelper.addItem(wx.CheckBox(self, label=useScreenLayoutText))
self.useScreenLayoutCheckBox.SetValue(config.conf["virtualBuffers"]["useScreenLayout"])
# Translators: The label for a checkbox in browse mode settings to
# enable browse mode on page load.
enableOnPageLoadText = _("&Enable browse mode on page load")
self.enableOnPageLoadCheckBox = sHelper.addItem(wx.CheckBox(self, label=enableOnPageLoadText))
self.enableOnPageLoadCheckBox.SetValue(config.conf["virtualBuffers"]["enableOnPageLoad"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoSayAllText = _("Automatic &Say All on page load")
self.autoSayAllCheckBox = sHelper.addItem(wx.CheckBox(self, label=autoSayAllText))
self.autoSayAllCheckBox.SetValue(config.conf["virtualBuffers"]["autoSayAllOnPageLoad"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
layoutTablesText = _("Include l&ayout tables")
self.layoutTablesCheckBox = sHelper.addItem(wx.CheckBox(self, label =layoutTablesText))
self.layoutTablesCheckBox.SetValue(config.conf["documentFormatting"]["includeLayoutTables"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoPassThroughOnFocusChangeText = _("Automatic focus mode for focus changes")
self.autoPassThroughOnFocusChangeCheckBox = sHelper.addItem(wx.CheckBox(self, label=autoPassThroughOnFocusChangeText))
self.autoPassThroughOnFocusChangeCheckBox.SetValue(config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
autoPassThroughOnCaretMoveText = _("Automatic focus mode for caret movement")
self.autoPassThroughOnCaretMoveCheckBox = sHelper.addItem(wx.CheckBox(self, label=autoPassThroughOnCaretMoveText))
self.autoPassThroughOnCaretMoveCheckBox.SetValue(config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
passThroughAudioIndicationText = _("Audio indication of focus and browse modes")
self.passThroughAudioIndicationCheckBox = sHelper.addItem(wx.CheckBox(self, label=passThroughAudioIndicationText))
self.passThroughAudioIndicationCheckBox.SetValue(config.conf["virtualBuffers"]["passThroughAudioIndication"])
# Translators: This is the label for a checkbox in the
# browse mode settings panel.
trapNonCommandGesturesText = _("&Trap all non-command gestures from reaching the document")
self.trapNonCommandGesturesCheckBox = sHelper.addItem(wx.CheckBox(self, label=trapNonCommandGesturesText))
self.trapNonCommandGesturesCheckBox.SetValue(config.conf["virtualBuffers"]["trapNonCommandGestures"])
def onSave(self):
config.conf["virtualBuffers"]["maxLineLength"]=self.maxLengthEdit.GetValue()
config.conf["virtualBuffers"]["linesPerPage"]=self.pageLinesEdit.GetValue()
config.conf["virtualBuffers"]["useScreenLayout"]=self.useScreenLayoutCheckBox.IsChecked()
config.conf["virtualBuffers"]["enableOnPageLoad"] = self.enableOnPageLoadCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoSayAllOnPageLoad"]=self.autoSayAllCheckBox.IsChecked()
config.conf["documentFormatting"]["includeLayoutTables"]=self.layoutTablesCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"]=self.autoPassThroughOnFocusChangeCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]=self.autoPassThroughOnCaretMoveCheckBox.IsChecked()
config.conf["virtualBuffers"]["passThroughAudioIndication"]=self.passThroughAudioIndicationCheckBox.IsChecked()
config.conf["virtualBuffers"]["trapNonCommandGestures"]=self.trapNonCommandGesturesCheckBox.IsChecked()
class DocumentFormattingPanel(SettingsPanel):
# Translators: This is the label for the document formatting panel.
title = _("Document Formatting")
# Translators: This is a label appearing on the document formatting settings panel.
panelDescription = _("The following options control the types of document formatting reported by NVDA.")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
sHelper.addItem(wx.StaticText(self, label=self.panelDescription))
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
fontGroupText = _("Font")
fontGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=fontGroupText), wx.VERTICAL))
sHelper.addItem(fontGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontNameText = _("&Font name")
self.fontNameCheckBox=fontGroup.addItem(wx.CheckBox(self, label=fontNameText))
self.fontNameCheckBox.SetValue(config.conf["documentFormatting"]["reportFontName"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontSizeText = _("Font &size")
self.fontSizeCheckBox=fontGroup.addItem(wx.CheckBox(self,label=fontSizeText))
self.fontSizeCheckBox.SetValue(config.conf["documentFormatting"]["reportFontSize"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
fontAttributesText = _("Font attri&butes")
self.fontAttrsCheckBox=fontGroup.addItem(wx.CheckBox(self,label=fontAttributesText))
self.fontAttrsCheckBox.SetValue(config.conf["documentFormatting"]["reportFontAttributes"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
emphasisText=_("E&mphasis")
self.emphasisCheckBox=fontGroup.addItem(wx.CheckBox(self,label=emphasisText))
self.emphasisCheckBox.SetValue(config.conf["documentFormatting"]["reportEmphasis"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
styleText =_("St&yle")
self.styleCheckBox=fontGroup.addItem(wx.CheckBox(self,label=styleText))
self.styleCheckBox.SetValue(config.conf["documentFormatting"]["reportStyle"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
colorsText = _("&Colors")
self.colorCheckBox=fontGroup.addItem(wx.CheckBox(self,label=colorsText))
self.colorCheckBox.SetValue(config.conf["documentFormatting"]["reportColor"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
documentInfoGroupText = _("Document information")
docInfoGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=documentInfoGroupText), wx.VERTICAL))
sHelper.addItem(docInfoGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
commentsText = _("Co&mments")
self.commentsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=commentsText))
self.commentsCheckBox.SetValue(config.conf["documentFormatting"]["reportComments"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
revisionsText = _("&Editor revisions")
self.revisionsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=revisionsText))
self.revisionsCheckBox.SetValue(config.conf["documentFormatting"]["reportRevisions"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
spellingErrorText = _("Spelling e&rrors")
self.spellingErrorsCheckBox=docInfoGroup.addItem(wx.CheckBox(self,label=spellingErrorText))
self.spellingErrorsCheckBox.SetValue(config.conf["documentFormatting"]["reportSpellingErrors"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
pageAndSpaceGroupText = _("Pages and spacing")
pageAndSpaceGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=pageAndSpaceGroupText), wx.VERTICAL))
sHelper.addItem(pageAndSpaceGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
pageText = _("&Pages")
self.pageCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=pageText))
self.pageCheckBox.SetValue(config.conf["documentFormatting"]["reportPage"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
lineText = _("Line &numbers")
self.lineNumberCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=lineText))
self.lineNumberCheckBox.SetValue(config.conf["documentFormatting"]["reportLineNumber"])
# Translators: This is the label for a combobox controlling the reporting of line indentation in the
# Document Formatting dialog (possible choices are Off, Speech, Tones, or Both.
lineIndentationText = _("Line &indentation reporting:")
indentChoices=[
#Translators: A choice in a combo box in the document formatting dialog to report No line Indentation.
_("Off"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with Speech.
pgettext('line indentation setting', "Speech"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with tones.
_("Tones"),
#Translators: A choice in a combo box in the document formatting dialog to report indentation with both Speech and tones.
_("Both Speech and Tones")
]
self.lineIndentationCombo = pageAndSpaceGroup.addLabeledControl(lineIndentationText, wx.Choice, choices=indentChoices)
#We use bitwise operations because it saves us a four way if statement.
curChoice = config.conf["documentFormatting"]["reportLineIndentationWithTones"] << 1 | config.conf["documentFormatting"]["reportLineIndentation"]
self.lineIndentationCombo.SetSelection(curChoice)
# Translators: This message is presented in the document formatting settings panelue
# If this option is selected, NVDA will report paragraph indentation if available.
paragraphIndentationText = _("&Paragraph indentation")
self.paragraphIndentationCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=paragraphIndentationText))
self.paragraphIndentationCheckBox.SetValue(config.conf["documentFormatting"]["reportParagraphIndentation"])
# Translators: This message is presented in the document formatting settings panelue
# If this option is selected, NVDA will report line spacing if available.
lineSpacingText=_("&Line spacing")
self.lineSpacingCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=lineSpacingText))
self.lineSpacingCheckBox.SetValue(config.conf["documentFormatting"]["reportLineSpacing"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
alignmentText = _("&Alignment")
self.alignmentCheckBox=pageAndSpaceGroup.addItem(wx.CheckBox(self,label=alignmentText))
self.alignmentCheckBox.SetValue(config.conf["documentFormatting"]["reportAlignment"])
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
tablesGroupText = _("Table information")
tablesGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=tablesGroupText), wx.VERTICAL))
sHelper.addItem(tablesGroup)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tablesCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("&Tables")))
self.tablesCheckBox.SetValue(config.conf["documentFormatting"]["reportTables"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tableHeadersCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("Row/column h&eaders")))
self.tableHeadersCheckBox.SetValue(config.conf["documentFormatting"]["reportTableHeaders"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.tableCellCoordsCheckBox=tablesGroup.addItem(wx.CheckBox(self,label=_("Cell c&oordinates")))
self.tableCellCoordsCheckBox.SetValue(config.conf["documentFormatting"]["reportTableCellCoords"])
borderChoices=[
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Off"),
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Styles"),
# Translators: This is the label for a combobox in the
# document formatting settings panel.
_("Both Colors and Styles"),
]
# Translators: This is the label for a combobox in the
# document formatting settings panel.
self.borderComboBox=tablesGroup.addLabeledControl(_("Cell borders:"), wx.Choice, choices=borderChoices)
curChoice = 0
if config.conf["documentFormatting"]["reportBorderStyle"]:
if config.conf["documentFormatting"]["reportBorderColor"]:
curChoice = 2
else:
curChoice = 1
self.borderComboBox.SetSelection(curChoice)
# Translators: This is the label for a group of document formatting options in the
# document formatting settings panel
elementsGroupText = _("Elements")
elementsGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(wx.StaticBox(self, label=elementsGroupText), wx.VERTICAL))
sHelper.addItem(elementsGroup, flag=wx.EXPAND, proportion=1)
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.headingsCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Headings")))
self.headingsCheckBox.SetValue(config.conf["documentFormatting"]["reportHeadings"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.linksCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Lin&ks")))
self.linksCheckBox.SetValue(config.conf["documentFormatting"]["reportLinks"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.listsCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Lists")))
self.listsCheckBox.SetValue(config.conf["documentFormatting"]["reportLists"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.blockQuotesCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Block "es")))
self.blockQuotesCheckBox.SetValue(config.conf["documentFormatting"]["reportBlockQuotes"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.landmarksCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Lan&dmarks")))
self.landmarksCheckBox.SetValue(config.conf["documentFormatting"]["reportLandmarks"])
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.framesCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("Fra&mes")))
self.framesCheckBox.Value=config.conf["documentFormatting"]["reportFrames"]
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
self.clickableCheckBox=elementsGroup.addItem(wx.CheckBox(self,label=_("&Clickable")))
self.clickableCheckBox.Value=config.conf["documentFormatting"]["reportClickable"]
# Translators: This is the label for a checkbox in the
# document formatting settings panel.
detectFormatAfterCursorText =_("Report formatting changes after the cursor (can cause a lag)")
self.detectFormatAfterCursorCheckBox=wx.CheckBox(self, label=detectFormatAfterCursorText)
self.detectFormatAfterCursorCheckBox.SetValue(config.conf["documentFormatting"]["detectFormatAfterCursor"])
sHelper.addItem(self.detectFormatAfterCursorCheckBox)
def onSave(self):
config.conf["documentFormatting"]["detectFormatAfterCursor"]=self.detectFormatAfterCursorCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontName"]=self.fontNameCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontSize"]=self.fontSizeCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFontAttributes"]=self.fontAttrsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportColor"]=self.colorCheckBox.IsChecked()
config.conf["documentFormatting"]["reportComments"]=self.commentsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportRevisions"]=self.revisionsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportEmphasis"]=self.emphasisCheckBox.IsChecked()
config.conf["documentFormatting"]["reportAlignment"]=self.alignmentCheckBox.IsChecked()
config.conf["documentFormatting"]["reportStyle"]=self.styleCheckBox.IsChecked()
config.conf["documentFormatting"]["reportSpellingErrors"]=self.spellingErrorsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportPage"]=self.pageCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLineNumber"]=self.lineNumberCheckBox.IsChecked()
choice = self.lineIndentationCombo.GetSelection()
config.conf["documentFormatting"]["reportLineIndentation"] = choice in (1, 3)
config.conf["documentFormatting"]["reportLineIndentationWithTones"] = choice in (2, 3)
config.conf["documentFormatting"]["reportParagraphIndentation"]=self.paragraphIndentationCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLineSpacing"]=self.lineSpacingCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTables"]=self.tablesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTableHeaders"]=self.tableHeadersCheckBox.IsChecked()
config.conf["documentFormatting"]["reportTableCellCoords"]=self.tableCellCoordsCheckBox.IsChecked()
choice = self.borderComboBox.GetSelection()
config.conf["documentFormatting"]["reportBorderStyle"] = choice in (1,2)
config.conf["documentFormatting"]["reportBorderColor"] = (choice == 2)
config.conf["documentFormatting"]["reportLinks"]=self.linksCheckBox.IsChecked()
config.conf["documentFormatting"]["reportHeadings"]=self.headingsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLists"]=self.listsCheckBox.IsChecked()
config.conf["documentFormatting"]["reportBlockQuotes"]=self.blockQuotesCheckBox.IsChecked()
config.conf["documentFormatting"]["reportLandmarks"]=self.landmarksCheckBox.IsChecked()
config.conf["documentFormatting"]["reportFrames"]=self.framesCheckBox.Value
config.conf["documentFormatting"]["reportClickable"]=self.clickableCheckBox.Value
class TouchInteractionPanel(SettingsPanel):
# Translators: This is the label for the touch interaction settings panel.
title = _("Touch Interaction")
def makeSettings(self, settingsSizer):
# Translators: This is the label for a checkbox in the
# touch interaction settings panel.
self.touchTypingCheckBox=wx.CheckBox(self,label=_("&Touch typing mode"))
self.touchTypingCheckBox.SetValue(config.conf["touch"]["touchTyping"])
settingsSizer.Add(self.touchTypingCheckBox,border=10,flag=wx.BOTTOM)
def onSave(self):
config.conf["touch"]["touchTyping"]=self.touchTypingCheckBox.IsChecked()
class UwpOcrPanel(SettingsPanel):
# Translators: The title of the Windows 10 OCR panel.
title = _("Windows 10 OCR")
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Lazily import this.
from contentRecog import uwpOcr
self.languageCodes = uwpOcr.getLanguages()
languageChoices = [
languageHandler.getLanguageDescription(languageHandler.normalizeLanguage(lang))
for lang in self.languageCodes]
# Translators: Label for an option in the Windows 10 OCR dialog.
languageLabel = _("Recognition &language:")
self.languageChoice = sHelper.addLabeledControl(languageLabel, wx.Choice, choices=languageChoices)
try:
langIndex = self.languageCodes.index(config.conf["uwpOcr"]["language"])
self.languageChoice.Selection = langIndex
except ValueError:
self.languageChoice.Selection = 0
def onSave(self):
lang = self.languageCodes[self.languageChoice.Selection]
config.conf["uwpOcr"]["language"] = lang
class AdvancedPanelControls(wx.Panel):
"""Holds the actual controls for the Advanced Settings panel, this allows the state of the controls to
be more easily managed.
"""
def __init__(self, parent):
super(AdvancedPanelControls, self).__init__(parent)
self._defaultsRestored = False
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
self.SetSizer(sHelper.sizer)
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
groupText = _("NVDA Development")
devGroup = guiHelper.BoxSizerHelper(
parent=self,
sizer=wx.StaticBoxSizer(parent=self, label=groupText, orient=wx.VERTICAL)
)
sHelper.addItem(devGroup)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Enable loading custom code from Developer Scratchpad directory")
self.scratchpadCheckBox=devGroup.addItem(wx.CheckBox(self, label=label))
self.scratchpadCheckBox.SetValue(config.conf["development"]["enableScratchpadDir"])
self.scratchpadCheckBox.defaultValue = self._getDefaultValue(["development", "enableScratchpadDir"])
self.scratchpadCheckBox.Bind(
wx.EVT_CHECKBOX,
lambda evt: self.openScratchpadButton.Enable(evt.IsChecked())
)
if config.isAppX:
self.scratchpadCheckBox.Disable()
# Translators: the label for a button in the Advanced settings category
label=_("Open developer scratchpad directory")
self.openScratchpadButton=devGroup.addItem(wx.Button(self, label=label))
self.openScratchpadButton.Enable(config.conf["development"]["enableScratchpadDir"])
self.openScratchpadButton.Bind(wx.EVT_BUTTON,self.onOpenScratchpadDir)
if config.isAppX:
self.openScratchpadButton.Disable()
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Microsoft UI Automation")
UIAGroup = guiHelper.BoxSizerHelper(
parent=self,
sizer=wx.StaticBoxSizer(parent=self, label=label, orient=wx.VERTICAL)
)
sHelper.addItem(UIAGroup)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
label = _("Use UI Automation to access Microsoft &Word document controls when available")
self.UIAInMSWordCheckBox=UIAGroup.addItem(wx.CheckBox(self, label=label))
self.UIAInMSWordCheckBox.SetValue(config.conf["UIA"]["useInMSWordWhenAvailable"])
self.UIAInMSWordCheckBox.defaultValue = self._getDefaultValue(["UIA", "useInMSWordWhenAvailable"])
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Browse mode")
browseModeGroup = guiHelper.BoxSizerHelper(
parent=self,
sizer=wx.StaticBoxSizer(parent=self, label=label, orient=wx.VERTICAL)
)
sHelper.addItem(browseModeGroup)
# Translators: This is the label for a checkbox in the
# Advanced settings panel.
autoFocusFocusableElementsText = _("Automatically set system &focus to focusable elements")
self.autoFocusFocusableElementsCheckBox=browseModeGroup.addItem(wx.CheckBox(self,wx.ID_ANY,label=autoFocusFocusableElementsText))
self.autoFocusFocusableElementsCheckBox.SetValue(config.conf["virtualBuffers"]["autoFocusFocusableElements"])
self.autoFocusFocusableElementsCheckBox.defaultValue=self._getDefaultValue(["virtualBuffers","autoFocusFocusableElements"])
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Editable Text")
editableTextGroup = guiHelper.BoxSizerHelper(
self,
sizer=wx.StaticBoxSizer(parent=self, label=label, orient=wx.VERTICAL)
)
sHelper.addItem(editableTextGroup)
# Translators: This is the label for a numeric control in the
# Advanced settings panel.
label = _("Caret movement timeout (in ms)")
self.caretMoveTimeoutSpinControl=editableTextGroup.addLabeledControl(
label,
nvdaControls.SelectOnFocusSpinCtrl,
min=0,
max=2000,
initial=config.conf["editableText"]["caretMoveTimeoutMs"]
)
self.caretMoveTimeoutSpinControl.defaultValue = self._getDefaultValue(["editableText", "caretMoveTimeoutMs"])
# Translators: This is the label for a group of advanced options in the
# Advanced settings panel
label = _("Debug logging")
debugLogGroup = guiHelper.BoxSizerHelper(
self,
sizer=wx.StaticBoxSizer(parent=self, label=label, orient=wx.VERTICAL)
)
sHelper.addItem(debugLogGroup)
self.logCategories=[
"hwIo",
"audioDucking",
"gui",
"louis",
"timeSinceInput",
]
# Translators: This is the label for a list in the
# Advanced settings panel
logCategoriesLabel=_("Enabled logging categories")
self.logCategoriesList=debugLogGroup.addLabeledControl(
logCategoriesLabel,
nvdaControls.CustomCheckListBox,
choices=self.logCategories
)
self.logCategoriesList.CheckedItems = [
index for index, x in enumerate(self.logCategories) if config.conf['debugLog'][x]
]
self.logCategoriesList.Select(0)
self.logCategoriesList.defaultCheckedItems = [
index for index, x in enumerate(self.logCategories) if bool(
self._getDefaultValue(['debugLog', x])
)
]
self.Layout()
def onOpenScratchpadDir(self,evt):
path=config.getScratchpadDir(ensureExists=True)
os.startfile(path)
def _getDefaultValue(self, configPath):
return config.conf.getConfigValidation(configPath).default
def haveConfigDefaultsBeenRestored(self):
return (
self._defaultsRestored and
self.scratchpadCheckBox.IsChecked() == self.scratchpadCheckBox.defaultValue and
self.UIAInMSWordCheckBox.IsChecked() == self.UIAInMSWordCheckBox.defaultValue and
self.autoFocusFocusableElementsCheckBox.IsChecked() == self.autoFocusFocusableElementsCheckBox.defaultValue and
self.caretMoveTimeoutSpinControl.GetValue() == self.caretMoveTimeoutSpinControl.defaultValue and
set(self.logCategoriesList.CheckedItems) == set(self.logCategoriesList.defaultCheckedItems) and
True # reduce noise in diff when the list is extended.
)
def restoreToDefaults(self):
self.scratchpadCheckBox.SetValue(self.scratchpadCheckBox.defaultValue)
self.UIAInMSWordCheckBox.SetValue(self.UIAInMSWordCheckBox.defaultValue)
self.autoFocusFocusableElementsCheckBox.SetValue(self.autoFocusFocusableElementsCheckBox.defaultValue)
self.caretMoveTimeoutSpinControl.SetValue(self.caretMoveTimeoutSpinControl.defaultValue)
self.logCategoriesList.CheckedItems = self.logCategoriesList.defaultCheckedItems
self._defaultsRestored = True
def onSave(self):
log.debug("Saving advanced config")
config.conf["development"]["enableScratchpadDir"]=self.scratchpadCheckBox.IsChecked()
config.conf["UIA"]["useInMSWordWhenAvailable"]=self.UIAInMSWordCheckBox.IsChecked()
config.conf["virtualBuffers"]["autoFocusFocusableElements"] = self.autoFocusFocusableElementsCheckBox.IsChecked()
config.conf["editableText"]["caretMoveTimeoutMs"]=self.caretMoveTimeoutSpinControl.GetValue()
for index,key in enumerate(self.logCategories):
config.conf['debugLog'][key]=self.logCategoriesList.IsChecked(index)
class AdvancedPanel(SettingsPanel):
enableControlsCheckBox = None # type: wx.CheckBox
# Translators: This is the label for the Advanced settings panel.
title = _("Advanced")
# Translators: This is the label to warn users about the Advanced options in the
# Advanced settings panel
warningHeader = _("Warning!")
# Translators: This is a label appearing on the Advanced settings panel.
warningExplanation = _(
"The following settings are for advanced users. "
"Changing them may cause NVDA to function incorrectly. "
"Please only change these if you know what you are doing or "
"have been specifically instructed by NVDA developers."
)
panelDescription = u"{}\n{}".format(warningHeader, warningExplanation)
def makeSettings(self, settingsSizer):
"""
:type settingsSizer: wx.BoxSizer
"""
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
warningGroup = guiHelper.BoxSizerHelper(
self,
sizer=wx.StaticBoxSizer(wx.StaticBox(self), wx.VERTICAL)
)
sHelper.addItem(warningGroup)
warningBox = warningGroup.sizer.GetStaticBox() # type: wx.StaticBox
warningText = wx.StaticText(warningBox, label=self.warningHeader)
warningText.SetFont(wx.Font(18, wx.FONTFAMILY_DEFAULT, wx.NORMAL, wx.BOLD))
warningGroup.addItem(warningText)
self.windowText = warningGroup.addItem(wx.StaticText(warningBox, label=self.warningExplanation))
self.windowText.Wrap(self.scaleSize(544))
# Translators: This is the label for a checkbox in the Advanced settings panel.
enableAdvancedControlslabel = _(
"I understand that changing these settings may cause NVDA to function incorrectly."
)
self.enableControlsCheckBox = warningGroup.addItem(
wx.CheckBox(parent=warningBox, label=enableAdvancedControlslabel, id=wx.NewIdRef())
)
boldedFont = self.enableControlsCheckBox.GetFont().Bold()
self.enableControlsCheckBox.SetFont(boldedFont)
restoreDefaultsButton = warningGroup.addItem(
# Translators: This is the label for a button in the Advanced settings panel
wx.Button(self, label=_("Restore defaults"))
)
restoreDefaultsButton.Bind(wx.EVT_BUTTON, lambda evt: self.advancedControls.restoreToDefaults())
self.advancedControls = AdvancedPanelControls(self)
sHelper.sizer.Add(self.advancedControls, flag=wx.EXPAND)
self.enableControlsCheckBox.Bind(
wx.EVT_CHECKBOX,
self.onEnableControlsCheckBox
)
self.advancedControls.Enable(self.enableControlsCheckBox.IsChecked())
def onSave(self):
if (
self.enableControlsCheckBox.IsChecked() or
self.advancedControls.haveConfigDefaultsBeenRestored()
):
self.advancedControls.onSave()
def onEnableControlsCheckBox(self, evt):
# due to some not very well understood mis ordering of event processing, we force NVDA to
# process pending events. This fixes an issue where the checkbox state was being reported
# incorrectly. This checkbox is slightly different from most, in that its behaviour is to
# enable more controls than is typical. This might be causing enough of a delay, that there
# is a mismatch in the state of the checkbox and when the events are processed by NVDA.
from api import processPendingEvents
processPendingEvents()
self.advancedControls.Enable(evt.IsChecked())
class DictionaryEntryDialog(wx.Dialog):
TYPE_LABELS = {
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_ANYWHERE: _("&Anywhere"),
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_WORD: _("Whole &word"),
# Translators: This is a label for an Entry Type radio button in add dictionary entry dialog.
speechDictHandler.ENTRY_TYPE_REGEXP: _("Regular &expression")
}
TYPE_LABELS_ORDERING = (speechDictHandler.ENTRY_TYPE_ANYWHERE, speechDictHandler.ENTRY_TYPE_WORD, speechDictHandler.ENTRY_TYPE_REGEXP)
# Translators: This is the label for the edit dictionary entry dialog.
def __init__(self, parent, title=_("Edit Dictionary Entry")):
super(DictionaryEntryDialog,self).__init__(parent,title=title)
mainSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: This is a label for an edit field in add dictionary entry dialog.
patternLabelText = _("&Pattern")
self.patternTextCtrl=sHelper.addLabeledControl(patternLabelText, wx.TextCtrl)
# Translators: This is a label for an edit field in add dictionary entry dialog and in punctuation/symbol pronunciation dialog.
replacementLabelText = _("&Replacement")
self.replacementTextCtrl=sHelper.addLabeledControl(replacementLabelText, wx.TextCtrl)
# Translators: This is a label for an edit field in add dictionary entry dialog.
commentLabelText = _("&Comment")
self.commentTextCtrl=sHelper.addLabeledControl(commentLabelText, wx.TextCtrl)
# Translators: This is a label for a checkbox in add dictionary entry dialog.
caseSensitiveText = _("Case &sensitive")
self.caseSensitiveCheckBox=sHelper.addItem(wx.CheckBox(self,label=caseSensitiveText))
# Translators: This is a label for a set of radio buttons in add dictionary entry dialog.
typeText = _("&Type")
typeChoices = [DictionaryEntryDialog.TYPE_LABELS[i] for i in DictionaryEntryDialog.TYPE_LABELS_ORDERING]
self.typeRadioBox=sHelper.addItem(wx.RadioBox(self,label=typeText, choices=typeChoices))
sHelper.addDialogDismissButtons(self.CreateButtonSizer(wx.OK|wx.CANCEL))
mainSizer.Add(sHelper.sizer,border=20,flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.setType(speechDictHandler.ENTRY_TYPE_ANYWHERE)
self.patternTextCtrl.SetFocus()
self.Bind(wx.EVT_BUTTON,self.onOk,id=wx.ID_OK)
def getType(self):
typeRadioValue = self.typeRadioBox.GetSelection()
if typeRadioValue == wx.NOT_FOUND:
return speechDictHandler.ENTRY_TYPE_ANYWHERE
return DictionaryEntryDialog.TYPE_LABELS_ORDERING[typeRadioValue]
def onOk(self,evt):
if not self.patternTextCtrl.GetValue():
# Translators: This is an error message to let the user know that the pattern field in the dictionary entry is not valid.
gui.messageBox(_("A pattern is required."), _("Dictionary Entry Error"), wx.OK|wx.ICON_WARNING, self)
self.patternTextCtrl.SetFocus()
return
try:
self.dictEntry=speechDictHandler.SpeechDictEntry(self.patternTextCtrl.GetValue(),self.replacementTextCtrl.GetValue(),self.commentTextCtrl.GetValue(),bool(self.caseSensitiveCheckBox.GetValue()),self.getType())
except Exception as e:
log.debugWarning("Could not add dictionary entry due to (regex error) : %s" % e)
# Translators: This is an error message to let the user know that the dictionary entry is not valid.
gui.messageBox(_("Regular Expression error: \"%s\".")%e, _("Dictionary Entry Error"), wx.OK|wx.ICON_WARNING, self)
return
evt.Skip()
def setType(self, type):
self.typeRadioBox.SetSelection(DictionaryEntryDialog.TYPE_LABELS_ORDERING.index(type))
class DictionaryDialog(SettingsDialog):
TYPE_LABELS = {t: l.replace("&", "") for t, l in DictionaryEntryDialog.TYPE_LABELS.iteritems()}
def __init__(self,parent,title,speechDict):
self.title = title
self.speechDict = speechDict
self.tempSpeechDict=speechDictHandler.SpeechDict()
self.tempSpeechDict.extend(self.speechDict)
globalVars.speechDictionaryProcessing=False
super(DictionaryDialog, self).__init__(parent)
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for the combo box of dictionary entries in speech dictionary dialog.
entriesLabelText=_("&Dictionary entries")
self.dictList=sHelper.addLabeledControl(entriesLabelText, wx.ListCtrl, style=wx.LC_REPORT|wx.LC_SINGLE_SEL,size=(550,350))
# Translators: The label for a column in dictionary entries list used to identify comments for the entry.
self.dictList.InsertColumn(0,_("Comment"),width=150)
# Translators: The label for a column in dictionary entries list used to identify pattern (original word or a pattern).
self.dictList.InsertColumn(1,_("Pattern"),width=150)
# Translators: The label for a column in dictionary entries list and in a list of symbols from symbol pronunciation dialog used to identify replacement for a pattern or a symbol
self.dictList.InsertColumn(2,_("Replacement"),width=150)
# Translators: The label for a column in dictionary entries list used to identify whether the entry is case sensitive or not.
self.dictList.InsertColumn(3,_("case"),width=50)
# Translators: The label for a column in dictionary entries list used to identify whether the entry is a regular expression, matches whole words, or matches anywhere.
self.dictList.InsertColumn(4,_("Type"),width=50)
self.offOn = (_("off"),_("on"))
for entry in self.tempSpeechDict:
self.dictList.Append((entry.comment,entry.pattern,entry.replacement,self.offOn[int(entry.caseSensitive)],DictionaryDialog.TYPE_LABELS[entry.type]))
self.editingIndex=-1
bHelper = guiHelper.ButtonHelper(orientation=wx.HORIZONTAL)
bHelper.addButton(
parent=self,
# Translators: The label for a button in speech dictionaries dialog to add new entries.
label=_("&Add")
).Bind(wx.EVT_BUTTON, self.OnAddClick)
bHelper.addButton(
parent=self,
# Translators: The label for a button in speech dictionaries dialog to edit existing entries.
label=_("&Edit")
).Bind(wx.EVT_BUTTON, self.OnEditClick)
bHelper.addButton(
parent=self,
# Translators: The label for a button in speech dictionaries dialog to remove existing entries.
label=_("&Remove")
).Bind(wx.EVT_BUTTON, self.OnRemoveClick)
sHelper.addItem(bHelper)
def postInit(self):
self.dictList.SetFocus()
def onCancel(self,evt):
globalVars.speechDictionaryProcessing=True
super(DictionaryDialog, self).onCancel(evt)
def onOk(self,evt):
globalVars.speechDictionaryProcessing=True
if self.tempSpeechDict!=self.speechDict:
del self.speechDict[:]
self.speechDict.extend(self.tempSpeechDict)
self.speechDict.save()
super(DictionaryDialog, self).onOk(evt)
def OnAddClick(self,evt):
# Translators: This is the label for the add dictionary entry dialog.
entryDialog=DictionaryEntryDialog(self,title=_("Add Dictionary Entry"))
if entryDialog.ShowModal()==wx.ID_OK:
self.tempSpeechDict.append(entryDialog.dictEntry)
self.dictList.Append((entryDialog.commentTextCtrl.GetValue(),entryDialog.patternTextCtrl.GetValue(),entryDialog.replacementTextCtrl.GetValue(),self.offOn[int(entryDialog.caseSensitiveCheckBox.GetValue())],DictionaryDialog.TYPE_LABELS[entryDialog.getType()]))
index=self.dictList.GetFirstSelected()
while index>=0:
self.dictList.Select(index,on=0)
index=self.dictList.GetNextSelected(index)
addedIndex=self.dictList.GetItemCount()-1
self.dictList.Select(addedIndex)
self.dictList.Focus(addedIndex)
self.dictList.SetFocus()
entryDialog.Destroy()
def OnEditClick(self,evt):
if self.dictList.GetSelectedItemCount()!=1:
return
editIndex=self.dictList.GetFirstSelected()
if editIndex<0:
return
entryDialog=DictionaryEntryDialog(self)
entryDialog.patternTextCtrl.SetValue(self.tempSpeechDict[editIndex].pattern)
entryDialog.replacementTextCtrl.SetValue(self.tempSpeechDict[editIndex].replacement)
entryDialog.commentTextCtrl.SetValue(self.tempSpeechDict[editIndex].comment)
entryDialog.caseSensitiveCheckBox.SetValue(self.tempSpeechDict[editIndex].caseSensitive)
entryDialog.setType(self.tempSpeechDict[editIndex].type)
if entryDialog.ShowModal()==wx.ID_OK:
self.tempSpeechDict[editIndex]=entryDialog.dictEntry
self.dictList.SetItem(editIndex,0,entryDialog.commentTextCtrl.GetValue())
self.dictList.SetItem(editIndex,1,entryDialog.patternTextCtrl.GetValue())
self.dictList.SetItem(editIndex,2,entryDialog.replacementTextCtrl.GetValue())
self.dictList.SetItem(editIndex,3,self.offOn[int(entryDialog.caseSensitiveCheckBox.GetValue())])
self.dictList.SetItem(editIndex,4,DictionaryDialog.TYPE_LABELS[entryDialog.getType()])
self.dictList.SetFocus()
entryDialog.Destroy()
def OnRemoveClick(self,evt):
index=self.dictList.GetFirstSelected()
while index>=0:
self.dictList.DeleteItem(index)
del self.tempSpeechDict[index]
index=self.dictList.GetNextSelected(index)
self.dictList.SetFocus()
class BrailleSettingsPanel(SettingsPanel):
# Translators: This is the label for the braille panel
title = _("Braille")
def makeSettings(self, settingsSizer):
settingsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: A label for the braille display on the braille panel.
displayLabel = _("Braille &display")
displayBox = wx.StaticBox(self, label=displayLabel)
displayGroup = guiHelper.BoxSizerHelper(self, sizer=wx.StaticBoxSizer(displayBox, wx.HORIZONTAL))
settingsSizerHelper.addItem(displayGroup)
self.displayNameCtrl = ExpandoTextCtrl(self, size=(self.scaleSize(250), -1), style=wx.TE_READONLY)
self.updateCurrentDisplay()
# Translators: This is the label for the button used to change braille display,
# it appears in the context of a braille display group on the braille settings panel.
changeDisplayBtn = wx.Button(self, label=_("C&hange..."))
displayGroup.addItem(
guiHelper.associateElements(
self.displayNameCtrl,
changeDisplayBtn
)
)
self.displayNameCtrl.Bind(wx.EVT_CHAR_HOOK, self._enterTriggersOnChangeDisplay)
changeDisplayBtn.Bind(wx.EVT_BUTTON,self.onChangeDisplay)
self.brailleSubPanel = BrailleSettingsSubPanel(self)
settingsSizerHelper.addItem(self.brailleSubPanel)
def _enterTriggersOnChangeDisplay(self, evt):
if evt.KeyCode == wx.WXK_RETURN:
self.onChangeDisplay(evt)
else:
evt.Skip()
def onChangeDisplay(self, evt):
changeDisplay = BrailleDisplaySelectionDialog(self, multiInstanceAllowed=True)
ret = changeDisplay.ShowModal()
if ret == wx.ID_OK:
self.Freeze()
# trigger a refresh of the settings
self.onPanelActivated()
self._sendLayoutUpdatedEvent()
self.Thaw()
def updateCurrentDisplay(self):
if config.conf["braille"]["display"] == braille.AUTO_DISPLAY_NAME:
displayDesc = BrailleDisplaySelectionDialog.getCurrentAutoDisplayDescription()
else:
displayDesc = braille.handler.display.description
self.displayNameCtrl.SetValue(displayDesc)
def onPanelActivated(self):
self.brailleSubPanel.onPanelActivated()
super(BrailleSettingsPanel,self).onPanelActivated()
def onPanelDeactivated(self):
self.brailleSubPanel.onPanelDeactivated()
super(BrailleSettingsPanel,self).onPanelDeactivated()
def onDiscard(self):
self.brailleSubPanel.onDiscard()
def onSave(self):
self.brailleSubPanel.onSave()
class BrailleDisplaySelectionDialog(SettingsDialog):
# Translators: This is the label for the braille display selection dialog.
title = _("Select Braille Display")
displayNames = []
possiblePorts = []
def makeSettings(self, settingsSizer):
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label for a setting in braille settings to choose a braille display.
displayLabelText = _("Braille &display:")
self.displayList = sHelper.addLabeledControl(displayLabelText, wx.Choice, choices=[])
self.Bind(wx.EVT_CHOICE, self.onDisplayNameChanged, self.displayList)
# Translators: The label for a setting in braille settings to choose the connection port (if the selected braille display supports port selection).
portsLabelText = _("&Port:")
self.portsList = sHelper.addLabeledControl(portsLabelText, wx.Choice, choices=[])
self.updateBrailleDisplayLists()
def postInit(self):
# Finally, ensure that focus is on the list of displays.
self.displayList.SetFocus()
@staticmethod
def getCurrentAutoDisplayDescription():
description = braille.AUTOMATIC_PORT[1]
if (
config.conf["braille"]["display"] == braille.AUTO_DISPLAY_NAME
and braille.handler.display.name != "noBraille"
):
description = "%s (%s)" % (description, braille.handler.display.description)
return description
def updateBrailleDisplayLists(self):
driverList = [(braille.AUTO_DISPLAY_NAME, self.getCurrentAutoDisplayDescription())]
driverList.extend(braille.getDisplayList())
self.displayNames = [driver[0] for driver in driverList]
displayChoices = [driver[1] for driver in driverList]
self.displayList.Clear()
self.displayList.AppendItems(displayChoices)
try:
if config.conf["braille"]["display"] == braille.AUTO_DISPLAY_NAME:
selection = 0
else:
selection = self.displayNames.index(braille.handler.display.name)
self.displayList.SetSelection(selection)
except:
pass
self.updatePossiblePorts()
def updatePossiblePorts(self):
displayName = self.displayNames[self.displayList.GetSelection()]
self.possiblePorts = []
if displayName != "auto":
displayCls = braille._getDisplayDriver(displayName)
try:
self.possiblePorts.extend(displayCls.getPossiblePorts().iteritems())
except NotImplementedError:
pass
if self.possiblePorts:
self.portsList.SetItems([p[1] for p in self.possiblePorts])
try:
selectedPort = config.conf["braille"][displayName].get("port")
portNames = [p[0] for p in self.possiblePorts]
selection = portNames.index(selectedPort)
except (KeyError, ValueError):
# Display name not in config or port not valid
selection = 0
self.portsList.SetSelection(selection)
# If no port selection is possible or only automatic selection is available, disable the port selection control
enable = len(self.possiblePorts) > 0 and not (len(self.possiblePorts) == 1 and self.possiblePorts[0][0] == "auto")
self.portsList.Enable(enable)
def onDisplayNameChanged(self, evt):
self.updatePossiblePorts()
def onOk(self, evt):
if not self.displayNames:
# The list of displays has not been populated yet, so we didn't change anything in this panel
return
display = self.displayNames[self.displayList.GetSelection()]
if display not in config.conf["braille"]:
config.conf["braille"][display] = {}
if self.possiblePorts:
port = self.possiblePorts[self.portsList.GetSelection()][0]
config.conf["braille"][display]["port"] = port
if not braille.handler.setDisplayByName(display):
gui.messageBox(_("Could not load the %s display.")%display, _("Braille Display Error"), wx.OK|wx.ICON_WARNING, self)
return
if self.IsModal():
# Hack: we need to update the display in our parent window before closing.
# Otherwise, NVDA will report the old display even though the new display is reflected visually.
self.Parent.updateCurrentDisplay()
super(BrailleDisplaySelectionDialog, self).onOk(evt)
class BrailleSettingsSubPanel(DriverSettingsMixin, SettingsPanel):
@property
def driver(self):
return braille.handler.display
def makeSettings(self, settingsSizer):
if gui._isDebug():
startTime = time.time()
# Construct braille display specific settings
self.updateDriverSettings()
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
tables = brailleTables.listTables()
# Translators: The label for a setting in braille settings to select the output table (the braille table used to read braille text on the braille display).
outputsLabelText = _("&Output table:")
outTables = [table for table in tables if table.output]
self.outTableNames = [table.fileName for table in outTables]
outTableChoices = [table.displayName for table in outTables]
self.outTableList = sHelper.addLabeledControl(outputsLabelText, wx.Choice, choices=outTableChoices)
try:
selection = self.outTableNames.index(config.conf["braille"]["translationTable"])
self.outTableList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading output tables completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to select the input table (the braille table used to type braille characters on a braille keyboard).
inputLabelText = _("&Input table:")
self.inTables = [table for table in tables if table.input]
inTableChoices = [table.displayName for table in self.inTables]
self.inTableList = sHelper.addLabeledControl(inputLabelText, wx.Choice, choices=inTableChoices)
try:
selection = self.inTables.index(brailleInput.handler.table)
self.inTableList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading input tables completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to expand the current word under cursor to computer braille.
expandAtCursorText = _("E&xpand to computer braille for the word at the cursor")
self.expandAtCursorCheckBox = sHelper.addItem(wx.CheckBox(self, wx.ID_ANY, label=expandAtCursorText))
self.expandAtCursorCheckBox.SetValue(config.conf["braille"]["expandAtCursor"])
# Translators: The label for a setting in braille settings to show the cursor.
showCursorLabelText = _("&Show cursor")
self.showCursorCheckBox = sHelper.addItem(wx.CheckBox(self, label=showCursorLabelText))
self.showCursorCheckBox.Bind(wx.EVT_CHECKBOX, self.onShowCursorChange)
self.showCursorCheckBox.SetValue(config.conf["braille"]["showCursor"])
# Translators: The label for a setting in braille settings to enable cursor blinking.
cursorBlinkLabelText = _("Blink cursor")
self.cursorBlinkCheckBox = sHelper.addItem(wx.CheckBox(self, label=cursorBlinkLabelText))
self.cursorBlinkCheckBox.Bind(wx.EVT_CHECKBOX, self.onBlinkCursorChange)
self.cursorBlinkCheckBox.SetValue(config.conf["braille"]["cursorBlink"])
if not self.showCursorCheckBox.GetValue():
self.cursorBlinkCheckBox.Disable()
# Translators: The label for a setting in braille settings to change cursor blink rate in milliseconds (1 second is 1000 milliseconds).
cursorBlinkRateLabelText = _("Cursor blink rate (ms)")
minBlinkRate = int(config.conf.getConfigValidationParameter(["braille", "cursorBlinkRate"], "min"))
maxBlinkRate = int(config.conf.getConfigValidationParameter(["braille", "cursorBlinkRate"], "max"))
self.cursorBlinkRateEdit = sHelper.addLabeledControl(cursorBlinkRateLabelText, nvdaControls.SelectOnFocusSpinCtrl,
min=minBlinkRate, max=maxBlinkRate, initial=config.conf["braille"]["cursorBlinkRate"])
if not self.showCursorCheckBox.GetValue() or not self.cursorBlinkCheckBox.GetValue() :
self.cursorBlinkRateEdit.Disable()
self.cursorShapes = [s[0] for s in braille.CURSOR_SHAPES]
cursorShapeChoices = [s[1] for s in braille.CURSOR_SHAPES]
# Translators: The label for a setting in braille settings to select the cursor shape when tethered to focus.
cursorShapeFocusLabelText = _("Cursor shape for &focus:")
self.cursorShapeFocusList = sHelper.addLabeledControl(cursorShapeFocusLabelText, wx.Choice, choices=cursorShapeChoices)
try:
selection = self.cursorShapes.index(config.conf["braille"]["cursorShapeFocus"])
self.cursorShapeFocusList.SetSelection(selection)
except:
pass
if not self.showCursorCheckBox.GetValue():
self.cursorShapeFocusList.Disable()
# Translators: The label for a setting in braille settings to select the cursor shape when tethered to review.
cursorShapeReviewLabelText = _("Cursor shape for &review:")
self.cursorShapeReviewList = sHelper.addLabeledControl(cursorShapeReviewLabelText, wx.Choice, choices=cursorShapeChoices)
try:
selection = self.cursorShapes.index(config.conf["braille"]["cursorShapeReview"])
self.cursorShapeReviewList.SetSelection(selection)
except:
pass
if not self.showCursorCheckBox.GetValue():
self.cursorShapeReviewList.Disable()
if gui._isDebug():
log.debug("Loading cursor settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to change how long a message stays on the braille display (in seconds).
messageTimeoutText = _("Message &timeout (sec)")
self.messageTimeoutEdit = sHelper.addLabeledControl(messageTimeoutText, nvdaControls.SelectOnFocusSpinCtrl,
min=int(config.conf.getConfigValidationParameter(["braille", "messageTimeout"], "min")),
max=int(config.conf.getConfigValidationParameter(["braille", "messageTimeout"], "max")),
initial=config.conf["braille"]["messageTimeout"])
# Translators: The label for a setting in braille settings to display a message on the braille display indefinitely.
noMessageTimeoutLabelText = _("Show &messages indefinitely")
self.noMessageTimeoutCheckBox = sHelper.addItem(wx.CheckBox(self, label=noMessageTimeoutLabelText))
self.noMessageTimeoutCheckBox.Bind(wx.EVT_CHECKBOX, self.onNoMessageTimeoutChange)
self.noMessageTimeoutCheckBox.SetValue(config.conf["braille"]["noMessageTimeout"])
if self.noMessageTimeoutCheckBox.GetValue():
self.messageTimeoutEdit.Disable()
if gui._isDebug():
log.debug("Loading timeout settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to set whether braille should be tethered to focus or review cursor.
tetherListText = _("Tether B&raille:")
# Translators: The value for a setting in the braille settings, to set whether braille should be tethered to focus or review cursor.
tetherChoices = [x[1] for x in braille.handler.tetherValues]
self.tetherList = sHelper.addLabeledControl(tetherListText, wx.Choice, choices=tetherChoices)
tetherChoice=braille.handler.TETHER_AUTO if config.conf["braille"]["autoTether"] else config.conf["braille"]["tetherTo"]
selection = (x for x,y in enumerate(braille.handler.tetherValues) if y[0]==tetherChoice).next()
try:
self.tetherList.SetSelection(selection)
except:
pass
if gui._isDebug():
log.debug("Loading tether settings completed, now at %.2f seconds from start"%(time.time() - startTime))
# Translators: The label for a setting in braille settings to read by paragraph (if it is checked, the commands to move the display by lines moves the display by paragraphs instead).
readByParagraphText = _("Read by ¶graph")
self.readByParagraphCheckBox = sHelper.addItem(wx.CheckBox(self, label=readByParagraphText))
self.readByParagraphCheckBox.Value = config.conf["braille"]["readByParagraph"]
# Translators: The label for a setting in braille settings to enable word wrap (try to avoid spliting words at the end of the braille display).
wordWrapText = _("Avoid splitting &words when possible")
self.wordWrapCheckBox = sHelper.addItem(wx.CheckBox(self, label=wordWrapText))
self.wordWrapCheckBox.Value = config.conf["braille"]["wordWrap"]
# Translators: The label for a setting in braille settings to select how the context for the focus object should be presented on a braille display.
focusContextPresentationLabelText = _("Focus context presentation:")
self.focusContextPresentationValues = [x[0] for x in braille.focusContextPresentations]
focusContextPresentationChoices = [x[1] for x in braille.focusContextPresentations]
self.focusContextPresentationList = sHelper.addLabeledControl(focusContextPresentationLabelText, wx.Choice, choices=focusContextPresentationChoices)
try:
index=self.focusContextPresentationValues.index(config.conf["braille"]["focusContextPresentation"])
except:
index=0
self.focusContextPresentationList.SetSelection(index)
if gui._isDebug():
log.debug("Finished making settings, now at %.2f seconds from start"%(time.time() - startTime))
def onSave(self):
DriverSettingsMixin.onSave(self)
config.conf["braille"]["translationTable"] = self.outTableNames[self.outTableList.GetSelection()]
brailleInput.handler.table = self.inTables[self.inTableList.GetSelection()]
config.conf["braille"]["expandAtCursor"] = self.expandAtCursorCheckBox.GetValue()
config.conf["braille"]["showCursor"] = self.showCursorCheckBox.GetValue()
config.conf["braille"]["cursorBlink"] = self.cursorBlinkCheckBox.GetValue()
config.conf["braille"]["cursorBlinkRate"] = self.cursorBlinkRateEdit.GetValue()
config.conf["braille"]["cursorShapeFocus"] = self.cursorShapes[self.cursorShapeFocusList.GetSelection()]
config.conf["braille"]["cursorShapeReview"] = self.cursorShapes[self.cursorShapeReviewList.GetSelection()]
config.conf["braille"]["noMessageTimeout"] = self.noMessageTimeoutCheckBox.GetValue()
config.conf["braille"]["messageTimeout"] = self.messageTimeoutEdit.GetValue()
tetherChoice = braille.handler.tetherValues[self.tetherList.GetSelection()][0]
if tetherChoice==braille.handler.TETHER_AUTO:
config.conf["braille"]["autoTether"] = True
config.conf["braille"]["tetherTo"] = braille.handler.TETHER_FOCUS
else:
config.conf["braille"]["autoTether"] = False
braille.handler.setTether(tetherChoice, auto=False)
config.conf["braille"]["readByParagraph"] = self.readByParagraphCheckBox.Value
config.conf["braille"]["wordWrap"] = self.wordWrapCheckBox.Value
config.conf["braille"]["focusContextPresentation"] = self.focusContextPresentationValues[self.focusContextPresentationList.GetSelection()]
def onShowCursorChange(self, evt):
self.cursorBlinkCheckBox.Enable(evt.IsChecked())
self.cursorBlinkRateEdit.Enable(evt.IsChecked() and self.cursorBlinkCheckBox.GetValue())
self.cursorShapeFocusList.Enable(evt.IsChecked())
self.cursorShapeReviewList.Enable(evt.IsChecked())
def onBlinkCursorChange(self, evt):
self.cursorBlinkRateEdit.Enable(evt.IsChecked())
def onNoMessageTimeoutChange(self, evt):
self.messageTimeoutEdit.Enable(not evt.IsChecked())
""" The name of the config profile currently being edited, if any.
This is set when the currently edited configuration profile is determined and returned to None when the dialog is destroyed.
This can be used by an AppModule for NVDA to identify and announce
changes in the name of the edited configuration profile when categories are changed"""
NvdaSettingsDialogActiveConfigProfile = None
NvdaSettingsDialogWindowHandle = None
class NVDASettingsDialog(MultiCategorySettingsDialog):
# Translators: This is the label for the NVDA settings dialog.
title = _("NVDA Settings")
categoryClasses=[
GeneralSettingsPanel,
SpeechSettingsPanel,
BrailleSettingsPanel,
KeyboardSettingsPanel,
MouseSettingsPanel,
ReviewCursorPanel,
InputCompositionPanel,
ObjectPresentationPanel,
BrowseModePanel,
DocumentFormattingPanel,
]
if touchHandler.touchSupported():
categoryClasses.append(TouchInteractionPanel)
if winVersion.isUwpOcrAvailable():
categoryClasses.append(UwpOcrPanel)
# And finally the Advanced panel which should always be last.
if not globalVars.appArgs.secure:
categoryClasses.append(AdvancedPanel)
def makeSettings(self, settingsSizer):
# Ensure that after the settings dialog is created the name is set correctly
super(NVDASettingsDialog, self).makeSettings(settingsSizer)
self._doOnCategoryChange()
global NvdaSettingsDialogWindowHandle
NvdaSettingsDialogWindowHandle = self.GetHandle()
def _doOnCategoryChange(self):
global NvdaSettingsDialogActiveConfigProfile
NvdaSettingsDialogActiveConfigProfile = config.conf.profiles[-1].name
if not NvdaSettingsDialogActiveConfigProfile or isinstance(self.currentCategory, GeneralSettingsPanel):
# Translators: The profile name for normal configuration
NvdaSettingsDialogActiveConfigProfile = _("normal configuration")
self.SetTitle(self._getDialogTitle())
def _getDialogTitle(self):
return u"{dialogTitle}: {panelTitle} ({configProfile})".format(
dialogTitle=self.title,
panelTitle=self.currentCategory.title,
configProfile=NvdaSettingsDialogActiveConfigProfile
)
def onCategoryChange(self,evt):
super(NVDASettingsDialog,self).onCategoryChange(evt)
if evt.Skipped:
return
self._doOnCategoryChange()
def Destroy(self):
global NvdaSettingsDialogActiveConfigProfile, NvdaSettingsDialogWindowHandle
NvdaSettingsDialogActiveConfigProfile = None
NvdaSettingsDialogWindowHandle = None
super(NVDASettingsDialog, self).Destroy()
class AddSymbolDialog(wx.Dialog):
def __init__(self, parent):
# Translators: This is the label for the add symbol dialog.
super(AddSymbolDialog,self).__init__(parent, title=_("Add Symbol"))
mainSizer=wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: This is the label for the edit field in the add symbol dialog.
symbolText = _("Symbol:")
self.identifierTextCtrl = sHelper.addLabeledControl(symbolText, wx.TextCtrl)
sHelper.addDialogDismissButtons(self.CreateButtonSizer(wx.OK | wx.CANCEL))
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.identifierTextCtrl.SetFocus()
self.CentreOnScreen()
class SpeechSymbolsDialog(SettingsDialog):
def __init__(self,parent):
try:
symbolProcessor = characterProcessing._localeSpeechSymbolProcessors.fetchLocaleData(speech.getCurrentLanguage())
except LookupError:
symbolProcessor = characterProcessing._localeSpeechSymbolProcessors.fetchLocaleData("en")
self.symbolProcessor = symbolProcessor
# Translators: This is the label for the symbol pronunciation dialog.
# %s is replaced by the language for which symbol pronunciation is being edited.
self.title = _("Symbol Pronunciation (%s)")%languageHandler.getLanguageDescription(self.symbolProcessor.locale)
super(SpeechSymbolsDialog, self).__init__(
parent,
resizeable=True,
)
def makeSettings(self, settingsSizer):
self.filteredSymbols = self.symbols = [
copy.copy(symbol) for symbol in self.symbolProcessor.computedSymbols.itervalues()
]
self.pendingRemovals = {}
sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
# Translators: The label of a text field to search for symbols in the speech symbols dialog.
filterText = pgettext("speechSymbols", "&Filter by:")
self.filterEdit = sHelper.addLabeledControl(
labelText = filterText,
wxCtrlClass=wx.TextCtrl,
size=self.scaleSize((310, -1)),
)
self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange)
# Translators: The label for symbols list in symbol pronunciation dialog.
symbolsText = _("&Symbols")
self.symbolsList = sHelper.addLabeledControl(
symbolsText,
nvdaControls.AutoWidthColumnListCtrl,
autoSizeColumn=2, # The replacement column is likely to need the most space
itemTextCallable=self.getItemTextForList,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.LC_VIRTUAL
)
# Translators: The label for a column in symbols list used to identify a symbol.
self.symbolsList.InsertColumn(0, _("Symbol"), width=self.scaleSize(150))
self.symbolsList.InsertColumn(1, _("Replacement"))
# Translators: The label for a column in symbols list used to identify a symbol's speech level (either none, some, most, all or character).
self.symbolsList.InsertColumn(2, _("Level"))
# Translators: The label for a column in symbols list which specifies when the actual symbol will be sent to the synthesizer (preserved).
# See the "Punctuation/Symbol Pronunciation" section of the User Guide for details.
self.symbolsList.InsertColumn(3, _("Preserve"))
self.symbolsList.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onListItemFocused)
# Translators: The label for the group of controls in symbol pronunciation dialog to change the pronunciation of a symbol.
changeSymbolText = _("Change selected symbol")
changeSymbolHelper = sHelper.addItem(guiHelper.BoxSizerHelper(
parent=self,
sizer=wx.StaticBoxSizer(
parent=self,
label=changeSymbolText,
orient=wx.VERTICAL,
)
))
# Used to ensure that event handlers call Skip(). Not calling skip can cause focus problems for controls. More
# generally the advice on the wx documentation is: "In general, it is recommended to skip all non-command events
# to allow the default handling to take place. The command events are, however, normally not skipped as usually
# a single command such as a button click or menu item selection must only be processed by one handler."
def skipEventAndCall(handler):
def wrapWithEventSkip(event):
if event:
event.Skip()
return handler()
return wrapWithEventSkip
# Translators: The label for the edit field in symbol pronunciation dialog to change the replacement text of a symbol.
replacementText = _("&Replacement")
self.replacementEdit = sHelper.addLabeledControl(
labelText=replacementText,
wxCtrlClass=wx.TextCtrl,
size=self.scaleSize((300, -1)),
)
self.replacementEdit.Bind(wx.EVT_TEXT, skipEventAndCall(self.onSymbolEdited))
# Translators: The label for the combo box in symbol pronunciation dialog to change the speech level of a symbol.
levelText = _("&Level")
symbolLevelLabels = characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS
levelChoices = [symbolLevelLabels[level] for level in characterProcessing.SPEECH_SYMBOL_LEVELS]
self.levelList = changeSymbolHelper.addLabeledControl(levelText, wx.Choice, choices=levelChoices)
self.levelList.Bind(wx.EVT_CHOICE, skipEventAndCall(self.onSymbolEdited))
# Translators: The label for the combo box in symbol pronunciation dialog to change when a symbol is sent to the synthesizer.
preserveText = _("&Send actual symbol to synthesizer")
symbolPreserveLabels = characterProcessing.SPEECH_SYMBOL_PRESERVE_LABELS
preserveChoices = [symbolPreserveLabels[mode] for mode in characterProcessing.SPEECH_SYMBOL_PRESERVES]
self.preserveList = changeSymbolHelper.addLabeledControl(preserveText, wx.Choice, choices=preserveChoices)
self.preserveList.Bind(wx.EVT_CHOICE, skipEventAndCall(self.onSymbolEdited))
bHelper = sHelper.addItem(guiHelper.ButtonHelper(orientation=wx.HORIZONTAL))
# Translators: The label for a button in the Symbol Pronunciation dialog to add a new symbol.
addButton = bHelper.addButton(self, label=_("&Add"))
# Translators: The label for a button in the Symbol Pronunciation dialog to remove a symbol.
self.removeButton = bHelper.addButton(self, label=_("Re&move"))
self.removeButton.Disable()
addButton.Bind(wx.EVT_BUTTON, self.OnAddClick)
self.removeButton.Bind(wx.EVT_BUTTON, self.OnRemoveClick)
# Populate the unfiltered list with symbols.
self.filter()
def postInit(self):
size = self.GetBestSize()
self.SetSizeHints(
minW=size.GetWidth(),
minH=size.GetHeight(),
maxH=size.GetHeight(),
)
self.symbolsList.SetFocus()
def filter(self, filterText=''):
NONE_SELECTED = -1
previousSelectionValue = None
previousIndex = self.symbolsList.GetFirstSelected() # may return NONE_SELECTED
if previousIndex != NONE_SELECTED:
previousSelectionValue = self.filteredSymbols[previousIndex]
if not filterText:
self.filteredSymbols = self.symbols
else:
# Do case-insensitive matching by lowering both filterText and each symbols's text.
filterText = filterText.lower()
self.filteredSymbols = [
symbol for symbol in self.symbols
if filterText in symbol.displayName.lower()
or filterText in symbol.replacement.lower()
]
self.symbolsList.ItemCount = len(self.filteredSymbols)
# sometimes filtering may result in an empty list.
if not self.symbolsList.ItemCount:
self.editingItem = None
# disable the "change symbol" controls, since there are no items in the list.
self.replacementEdit.Disable()
self.levelList.Disable()
self.preserveList.Disable()
self.removeButton.Disable()
return # exit early, no need to select an item.
# If there was a selection before filtering, try to preserve it
newIndex = 0 # select first item by default.
if previousSelectionValue:
try:
newIndex = self.filteredSymbols.index(previousSelectionValue)
except ValueError:
pass
# Change the selection
self.symbolsList.Select(newIndex)
self.symbolsList.Focus(newIndex)
# We don't get a new focus event with the new index.
self.symbolsList.sendListItemFocusedEvent(newIndex)
def getItemTextForList(self, item, column):
symbol = self.filteredSymbols[item]
if column == 0:
return symbol.displayName
elif column == 1:
return symbol.replacement
elif column == 2:
return characterProcessing.SPEECH_SYMBOL_LEVEL_LABELS[symbol.level]
elif column == 3:
return characterProcessing.SPEECH_SYMBOL_PRESERVE_LABELS[symbol.preserve]
else:
raise ValueError("Unknown column: %d" % column)
def onSymbolEdited(self):
if self.editingItem is not None:
# Update the symbol the user was just editing.
item = self.editingItem
symbol = self.filteredSymbols[item]
symbol.replacement = self.replacementEdit.Value
symbol.level = characterProcessing.SPEECH_SYMBOL_LEVELS[self.levelList.Selection]
symbol.preserve = characterProcessing.SPEECH_SYMBOL_PRESERVES[self.preserveList.Selection]
def onListItemFocused(self, evt):
# Update the editing controls to reflect the newly selected symbol.
item = evt.GetIndex()
symbol = self.filteredSymbols[item]
self.editingItem = item
# ChangeValue and Selection property used because they do not cause EVNT_CHANGED to be fired.
self.replacementEdit.ChangeValue(symbol.replacement)
self.levelList.Selection = characterProcessing.SPEECH_SYMBOL_LEVELS.index(symbol.level)
self.preserveList.Selection = characterProcessing.SPEECH_SYMBOL_PRESERVES.index(symbol.preserve)
self.removeButton.Enabled = not self.symbolProcessor.isBuiltin(symbol.identifier)
self.replacementEdit.Enable()
self.levelList.Enable()
self.preserveList.Enable()
evt.Skip()
def OnAddClick(self, evt):
with AddSymbolDialog(self) as entryDialog:
if entryDialog.ShowModal() != wx.ID_OK:
return
identifier = entryDialog.identifierTextCtrl.GetValue()
if not identifier:
return
# Clean the filter, so we can select the new entry.
self.filterEdit.Value=""
self.filter()
for index, symbol in enumerate(self.symbols):
if identifier == symbol.identifier:
# Translators: An error reported in the Symbol Pronunciation dialog when adding a symbol that is already present.
gui.messageBox(_('Symbol "%s" is already present.') % identifier,
_("Error"), wx.OK | wx.ICON_ERROR)
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
self.symbolsList.SetFocus()
return
addedSymbol = characterProcessing.SpeechSymbol(identifier)
try:
del self.pendingRemovals[identifier]
except KeyError:
pass
addedSymbol.displayName = identifier
addedSymbol.replacement = ""
addedSymbol.level = characterProcessing.SYMLVL_ALL
addedSymbol.preserve = characterProcessing.SYMPRES_NEVER
self.symbols.append(addedSymbol)
self.symbolsList.ItemCount = len(self.symbols)
index = self.symbolsList.ItemCount - 1
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
# We don't get a new focus event with the new index.
self.symbolsList.sendListItemFocusedEvent(index)
self.symbolsList.SetFocus()
def OnRemoveClick(self, evt):
index = self.symbolsList.GetFirstSelected()
symbol = self.filteredSymbols[index]
self.pendingRemovals[symbol.identifier] = symbol
del self.filteredSymbols[index]
if self.filteredSymbols is not self.symbols:
self.symbols.remove(symbol)
self.symbolsList.ItemCount = len(self.filteredSymbols)
# sometimes removing may result in an empty list.
if not self.symbolsList.ItemCount:
self.editingItem = None
# disable the "change symbol" controls, since there are no items in the list.
self.replacementEdit.Disable()
self.levelList.Disable()
self.preserveList.Disable()
self.removeButton.Disable()
else:
index = min(index, self.symbolsList.ItemCount - 1)
self.symbolsList.Select(index)
self.symbolsList.Focus(index)
# We don't get a new focus event with the new index.
self.symbolsList.sendListItemFocusedEvent(index)
self.symbolsList.SetFocus()
def onOk(self, evt):
self.onSymbolEdited()
self.editingItem = None
for symbol in self.pendingRemovals.itervalues():
self.symbolProcessor.deleteSymbol(symbol)
for symbol in self.symbols:
if not symbol.replacement:
continue
self.symbolProcessor.updateSymbol(symbol)
try:
self.symbolProcessor.userSymbols.save()
except IOError as e:
log.error("Error saving user symbols info: %s" % e)
characterProcessing._localeSpeechSymbolProcessors.invalidateLocaleData(self.symbolProcessor.locale)
super(SpeechSymbolsDialog, self).onOk(evt)
def _refreshVisibleItems(self):
count = self.symbolsList.GetCountPerPage()
first = self.symbolsList.GetTopItem()
self.symbolsList.RefreshItems(first, first+count)
def onFilterEditTextChange(self, evt):
self.filter(self.filterEdit.Value)
self._refreshVisibleItems()
evt.Skip()
class InputGesturesDialog(SettingsDialog):
# Translators: The title of the Input Gestures dialog where the user can remap input gestures for commands.
title = _("Input Gestures")
def makeSettings(self, settingsSizer):
filterSizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of a text field to search for gestures in the Input Gestures dialog.
filterLabel = wx.StaticText(self, label=pgettext("inputGestures", "&Filter by:"))
filter = wx.TextCtrl(self)
filterSizer.Add(filterLabel, flag=wx.ALIGN_CENTER_VERTICAL)
filterSizer.AddSpacer(guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_HORIZONTAL)
filterSizer.Add(filter, proportion=1)
settingsSizer.Add(filterSizer, flag=wx.EXPAND)
settingsSizer.AddSpacer(5)
filter.Bind(wx.EVT_TEXT, self.onFilterChange, filter)
tree = self.tree = wx.TreeCtrl(self, size=wx.Size(600, 400), style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_LINES_AT_ROOT | wx.TR_SINGLE )
self.treeRoot = tree.AddRoot("root")
tree.Bind(wx.EVT_TREE_SEL_CHANGED, self.onTreeSelect)
settingsSizer.Add(tree, proportion=1, flag=wx.EXPAND)
self.gestures = inputCore.manager.getAllGestureMappings(obj=gui.mainFrame.prevFocus, ancestors=gui.mainFrame.prevFocusAncestors)
self.populateTree()
settingsSizer.AddSpacer(guiHelper.SPACE_BETWEEN_ASSOCIATED_CONTROL_VERTICAL)
bHelper = guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The label of a button to add a gesture in the Input Gestures dialog.
self.addButton = bHelper.addButton(self, label=_("&Add"))
self.addButton.Bind(wx.EVT_BUTTON, self.onAdd)
self.addButton.Disable()
# Translators: The label of a button to remove a gesture in the Input Gestures dialog.
self.removeButton = bHelper.addButton(self, label=_("&Remove"))
self.removeButton.Bind(wx.EVT_BUTTON, self.onRemove)
self.removeButton.Disable()
self.pendingAdds = set()
self.pendingRemoves = set()
settingsSizer.Add(bHelper.sizer)
def postInit(self):
self.tree.SetFocus()
def populateTree(self, filter=''):
if filter:
#This regexp uses a positive lookahead (?=...) for every word in the filter, which just makes sure the word is present in the string to be tested without matching position or order.
# #5060: Escape the filter text to prevent unexpected matches and regexp errors.
# Because we're escaping, words must then be split on "\ ".
filter = re.escape(filter)
filterReg = re.compile(r'(?=.*?' + r')(?=.*?'.join(filter.split('\ ')) + r')', re.U|re.IGNORECASE)
for category in sorted(self.gestures):
treeCat = self.tree.AppendItem(self.treeRoot, category)
commands = self.gestures[category]
for command in sorted(commands):
if filter and not filterReg.match(command):
continue
treeCom = self.tree.AppendItem(treeCat, command)
commandInfo = commands[command]
self.tree.SetItemData(treeCom, commandInfo)
for gesture in commandInfo.gestures:
treeGes = self.tree.AppendItem(treeCom, self._formatGesture(gesture))
self.tree.SetItemData(treeGes, gesture)
if not self.tree.ItemHasChildren(treeCat):
self.tree.Delete(treeCat)
elif filter:
self.tree.Expand(treeCat)
def onFilterChange(self, evt):
filter=evt.GetEventObject().GetValue()
self.tree.DeleteChildren(self.treeRoot)
self.populateTree(filter)
def _formatGesture(self, identifier):
try:
source, main = inputCore.getDisplayTextForGestureIdentifier(identifier)
# Translators: Describes a gesture in the Input Gestures dialog.
# {main} is replaced with the main part of the gesture; e.g. alt+tab.
# {source} is replaced with the gesture's source; e.g. laptop keyboard.
return _("{main} ({source})").format(main=main, source=source)
except LookupError:
return identifier
def onTreeSelect(self, evt):
# #7077: Check if the treeview is still alive.
try:
item = self.tree.Selection
except RuntimeError:
return
data = self.tree.GetItemData(item)
isCommand = isinstance(data, inputCore.AllGesturesScriptInfo)
isGesture = isinstance(data, basestring)
self.addButton.Enabled = isCommand or isGesture
self.removeButton.Enabled = isGesture
def onAdd(self, evt):
if inputCore.manager._captureFunc:
return
treeCom = self.tree.Selection
scriptInfo = self.tree.GetItemData(treeCom)
if not isinstance(scriptInfo, inputCore.AllGesturesScriptInfo):
treeCom = self.tree.GetItemParent(treeCom)
scriptInfo = self.tree.GetItemData(treeCom)
# Translators: The prompt to enter a gesture in the Input Gestures dialog.
treeGes = self.tree.AppendItem(treeCom, _("Enter input gesture:"))
self.tree.SelectItem(treeGes)
self.tree.SetFocus()
def addGestureCaptor(gesture):
if gesture.isModifier:
return False
inputCore.manager._captureFunc = None
wx.CallAfter(self._addCaptured, treeGes, scriptInfo, gesture)
return False
inputCore.manager._captureFunc = addGestureCaptor
def _addCaptured(self, treeGes, scriptInfo, gesture):
gids = gesture.normalizedIdentifiers
if len(gids) > 1:
# Multiple choices. Present them in a pop-up menu.
menu = wx.Menu()
for gid in gids:
disp = self._formatGesture(gid)
item = menu.Append(wx.ID_ANY, disp)
self.Bind(wx.EVT_MENU,
lambda evt, gid=gid, disp=disp: self._addChoice(treeGes, scriptInfo, gid, disp),
item)
self.PopupMenu(menu)
if not self.tree.GetItemData(treeGes):
# No item was selected, so use the first.
self._addChoice(treeGes, scriptInfo, gids[0],
self._formatGesture(gids[0]))
menu.Destroy()
else:
self._addChoice(treeGes, scriptInfo, gids[0],
self._formatGesture(gids[0]))
def _addChoice(self, treeGes, scriptInfo, gid, disp):
entry = (gid, scriptInfo.moduleName, scriptInfo.className, scriptInfo.scriptName)
try:
# If this was just removed, just undo it.
self.pendingRemoves.remove(entry)
except KeyError:
self.pendingAdds.add(entry)
self.tree.SetItemText(treeGes, disp)
self.tree.SetItemData(treeGes, gid)
scriptInfo.gestures.append(gid)
self.onTreeSelect(None)
def onRemove(self, evt):
treeGes = self.tree.Selection
gesture = self.tree.GetItemData(treeGes)
treeCom = self.tree.GetItemParent(treeGes)
scriptInfo = self.tree.GetItemData(treeCom)
entry = (gesture, scriptInfo.moduleName, scriptInfo.className, scriptInfo.scriptName)
try:
# If this was just added, just undo it.
self.pendingAdds.remove(entry)
except KeyError:
self.pendingRemoves.add(entry)
self.tree.Delete(treeGes)
scriptInfo.gestures.remove(gesture)
self.tree.SetFocus()
def onOk(self, evt):
for gesture, module, className, scriptName in self.pendingRemoves:
try:
inputCore.manager.userGestureMap.remove(gesture, module, className, scriptName)
except ValueError:
# The user wants to unbind a gesture they didn't define.
inputCore.manager.userGestureMap.add(gesture, module, className, None)
for gesture, module, className, scriptName in self.pendingAdds:
try:
# The user might have unbound this gesture,
# so remove this override first.
inputCore.manager.userGestureMap.remove(gesture, module, className, None)
except ValueError:
pass
inputCore.manager.userGestureMap.add(gesture, module, className, scriptName)
if self.pendingAdds or self.pendingRemoves:
# Only save if there is something to save.
try:
inputCore.manager.userGestureMap.save()
except:
log.debugWarning("", exc_info=True)
# Translators: An error displayed when saving user defined input gestures fails.
gui.messageBox(_("Error saving user defined gestures - probably read only file system."),
_("Error"), wx.OK | wx.ICON_ERROR)
super(InputGesturesDialog, self).onOk(evt)
| 1 | 25,231 |
Why is a restart necessary?
|
nvaccess-nvda
|
py
|
@@ -458,7 +458,11 @@ func (task *Task) addGPUResource(cfg *config.Config) error {
container.GPUIDs = append(container.GPUIDs, association.Name)
}
}
- task.populateGPUEnvironmentVariables()
+ // For external instances, GPU IDs are handled by resources struct
+ // For internal instances, GPU IDs are handled by env var
+ if !cfg.External.Enabled() {
+ task.populateGPUEnvironmentVariables()
+ }
task.NvidiaRuntime = cfg.NvidiaRuntime
}
return nil
| 1 |
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package task
import (
"context"
"encoding/json"
"fmt"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/logger"
"github.com/aws/amazon-ecs-agent/agent/logger/field"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/arn"
"github.com/docker/docker/api/types"
"github.com/docker/go-connections/nat"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
apiappmesh "github.com/aws/amazon-ecs-agent/agent/api/appmesh"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apieni "github.com/aws/amazon-ecs-agent/agent/api/eni"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/taskresource"
"github.com/aws/amazon-ecs-agent/agent/taskresource/asmauth"
"github.com/aws/amazon-ecs-agent/agent/taskresource/asmsecret"
"github.com/aws/amazon-ecs-agent/agent/taskresource/envFiles"
"github.com/aws/amazon-ecs-agent/agent/taskresource/firelens"
"github.com/aws/amazon-ecs-agent/agent/taskresource/ssmsecret"
resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
resourcetype "github.com/aws/amazon-ecs-agent/agent/taskresource/types"
taskresourcevolume "github.com/aws/amazon-ecs-agent/agent/taskresource/volume"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/cihub/seelog"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/pkg/errors"
)
const (
// NetworkPauseContainerName is the internal name for the pause container
NetworkPauseContainerName = "~internal~ecs~pause"
// NamespacePauseContainerName is the internal name for the IPC resource namespace and/or
// PID namespace sharing pause container
NamespacePauseContainerName = "~internal~ecs~pause~namespace"
emptyHostVolumeName = "~internal~ecs-emptyvolume-source"
// awsSDKCredentialsRelativeURIPathEnvironmentVariableName defines the name of the environment
// variable in containers' config, which will be used by the AWS SDK to fetch
// credentials.
awsSDKCredentialsRelativeURIPathEnvironmentVariableName = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
NvidiaVisibleDevicesEnvVar = "NVIDIA_VISIBLE_DEVICES"
GPUAssociationType = "gpu"
// neuronRuntime is the name of the neuron docker runtime.
neuronRuntime = "neuron"
ContainerOrderingCreateCondition = "CREATE"
ContainerOrderingStartCondition = "START"
arnResourceDelimiter = "/"
// networkModeNone specifies the string used to define the `none` docker networking mode
networkModeNone = "none"
// dockerMappingContainerPrefix specifies the prefix string used for setting the
// container's option (network, ipc, or pid) to that of another existing container
dockerMappingContainerPrefix = "container:"
// awslogsCredsEndpointOpt is the awslogs option that is used to pass in an
// http endpoint for authentication
awslogsCredsEndpointOpt = "awslogs-credentials-endpoint"
// These contants identify the docker flag options
pidModeHost = "host"
pidModeTask = "task"
ipcModeHost = "host"
ipcModeTask = "task"
ipcModeSharable = "shareable"
ipcModeNone = "none"
// firelensConfigBindFormatFluentd and firelensConfigBindFormatFluentbit specify the format of the firelens
// config file bind mount for fluentd and fluentbit firelens container respectively.
// First placeholder is host data dir, second placeholder is taskID.
firelensConfigBindFormatFluentd = "%s/data/firelens/%s/config/fluent.conf:/fluentd/etc/fluent.conf"
firelensConfigBindFormatFluentbit = "%s/data/firelens/%s/config/fluent.conf:/fluent-bit/etc/fluent-bit.conf"
// firelensS3ConfigBindFormat specifies the format of the bind mount for the firelens config file downloaded from S3.
// First placeholder is host data dir, second placeholder is taskID, third placeholder is the s3 config path inside
// the firelens container.
firelensS3ConfigBindFormat = "%s/data/firelens/%s/config/external.conf:%s"
// firelensSocketBindFormat specifies the format for firelens container's socket directory bind mount.
// First placeholder is host data dir, second placeholder is taskID.
firelensSocketBindFormat = "%s/data/firelens/%s/socket/:/var/run/"
// firelensDriverName is the log driver name for containers that want to use the firelens container to send logs.
firelensDriverName = "awsfirelens"
// FirelensLogDriverBufferLimitOption is the option for customers who want to specify the buffer limit size in FireLens.
FirelensLogDriverBufferLimitOption = "log-driver-buffer-limit"
// firelensConfigVarFmt specifies the format for firelens config variable name. The first placeholder
// is option name. The second placeholder is the index of the container in the task's container list, appended
// for the purpose of avoiding config vars from different containers within a task collide (note: can't append
// container name here because it may contain hyphen which will break the config var resolution (see PR 2164 for
// details), and can't append container ID either because we need the config var in PostUnmarshalTask, which is
// before all the containers being created).
firelensConfigVarFmt = "%s_%d"
// firelensConfigVarPlaceholderFmtFluentd and firelensConfigVarPlaceholderFmtFluentbit specify the config var
// placeholder format expected by fluentd and fluentbit respectively.
firelensConfigVarPlaceholderFmtFluentd = "\"#{ENV['%s']}\""
firelensConfigVarPlaceholderFmtFluentbit = "${%s}"
// awsExecutionEnvKey is the key of the env specifying the execution environment.
awsExecutionEnvKey = "AWS_EXECUTION_ENV"
// ec2ExecutionEnv specifies the ec2 execution environment.
ec2ExecutionEnv = "AWS_ECS_EC2"
// specifies bridge type mode for a task
BridgeNetworkMode = "bridge"
// specifies awsvpc type mode for a task
AWSVPCNetworkMode = "awsvpc"
// disableIPv6SysctlKey specifies the setting that controls whether ipv6 is disabled.
disableIPv6SysctlKey = "net.ipv6.conf.all.disable_ipv6"
// sysctlValueOff specifies the value to use to turn off a sysctl setting.
sysctlValueOff = "0"
)
// TaskOverrides are the overrides applied to a task
type TaskOverrides struct{}
// Task is the internal representation of a task in the ECS agent
type Task struct {
// Arn is the unique identifier for the task
Arn string
// Overrides are the overrides applied to a task
Overrides TaskOverrides `json:"-"`
// Family is the name of the task definition family
Family string
// Version is the version of the task definition
Version string
// Containers are the containers for the task
Containers []*apicontainer.Container
// Associations are the available associations for the task.
Associations []Association `json:"associations"`
// ResourcesMapUnsafe is the map of resource type to corresponding resources
ResourcesMapUnsafe resourcetype.ResourcesMap `json:"resources"`
// Volumes are the volumes for the task
Volumes []TaskVolume `json:"volumes"`
// CPU is a task-level limit for compute resources. A value of 1 means that
// the task may access 100% of 1 vCPU on the instance
CPU float64 `json:"Cpu,omitempty"`
// Memory is a task-level limit for memory resources in bytes
Memory int64 `json:"Memory,omitempty"`
// DesiredStatusUnsafe represents the state where the task should go. Generally,
// the desired status is informed by the ECS backend as a result of either
// API calls made to ECS or decisions made by the ECS service scheduler.
// The DesiredStatusUnsafe is almost always either apitaskstatus.TaskRunning or apitaskstatus.TaskStopped.
// NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `UpdateStatus`,
// `UpdateDesiredStatus`, `SetDesiredStatus`, and `SetDesiredStatus`.
// TODO DesiredStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
DesiredStatusUnsafe apitaskstatus.TaskStatus `json:"DesiredStatus"`
// KnownStatusUnsafe represents the state where the task is. This is generally
// the minimum of equivalent status types for the containers in the task;
// if one container is at ContainerRunning and another is at ContainerPulled,
// the task KnownStatusUnsafe would be TaskPulled.
// NOTE: Do not access KnownStatusUnsafe directly. Instead, use `UpdateStatus`,
// and `GetKnownStatus`.
// TODO KnownStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
KnownStatusUnsafe apitaskstatus.TaskStatus `json:"KnownStatus"`
// KnownStatusTimeUnsafe captures the time when the KnownStatusUnsafe was last updated.
// NOTE: Do not access KnownStatusTime directly, instead use `GetKnownStatusTime`.
KnownStatusTimeUnsafe time.Time `json:"KnownTime"`
// PullStartedAtUnsafe is the timestamp when the task start pulling the first container,
// it won't be set if the pull never happens
PullStartedAtUnsafe time.Time `json:"PullStartedAt"`
// PullStoppedAtUnsafe is the timestamp when the task finished pulling the last container,
// it won't be set if the pull never happens
PullStoppedAtUnsafe time.Time `json:"PullStoppedAt"`
// ExecutionStoppedAtUnsafe is the timestamp when the task desired status moved to stopped,
// which is when the any of the essential containers stopped
ExecutionStoppedAtUnsafe time.Time `json:"ExecutionStoppedAt"`
// SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS SubmitTaskStateChange API.
// TODO(samuelkarp) SentStatusUnsafe needs a lock and setters/getters.
// TODO SentStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
SentStatusUnsafe apitaskstatus.TaskStatus `json:"SentStatus"`
StartSequenceNumber int64
StopSequenceNumber int64
// ExecutionCredentialsID is the ID of credentials that are used by agent to
// perform some action at the task level, such as pulling image from ECR
ExecutionCredentialsID string `json:"executionCredentialsID"`
// credentialsID is used to set the CredentialsId field for the
// IAMRoleCredentials object associated with the task. This id can be
// used to look up the credentials for task in the credentials manager
credentialsID string
credentialsRelativeURIUnsafe string
// ENIs is the list of Elastic Network Interfaces assigned to this task. The
// TaskENIs type is helpful when decoding state files which might have stored
// ENIs as a single ENI object instead of a list.
ENIs TaskENIs `json:"ENI"`
// AppMesh is the service mesh specified by the task
AppMesh *apiappmesh.AppMesh
// MemoryCPULimitsEnabled to determine if task supports CPU, memory limits
MemoryCPULimitsEnabled bool `json:"MemoryCPULimitsEnabled,omitempty"`
// PlatformFields consists of fields specific to linux/windows for a task
PlatformFields PlatformFields `json:"PlatformFields,omitempty"`
// terminalReason should be used when we explicitly move a task to stopped.
// This ensures the task object carries some context for why it was explicitly
// stoppped.
terminalReason string
terminalReasonOnce sync.Once
// PIDMode is used to determine how PID namespaces are organized between
// containers of the Task
PIDMode string `json:"PidMode,omitempty"`
// IPCMode is used to determine how IPC resources should be shared among
// containers of the Task
IPCMode string `json:"IpcMode,omitempty"`
// NvidiaRuntime is the runtime to pass Nvidia GPU devices to containers
NvidiaRuntime string `json:"NvidiaRuntime,omitempty"`
// LocalIPAddressUnsafe stores the local IP address allocated to the bridge that connects the task network
// namespace and the host network namespace, for tasks in awsvpc network mode (tasks in other network mode won't
// have a value for this). This field should be accessed via GetLocalIPAddress and SetLocalIPAddress.
LocalIPAddressUnsafe string `json:"LocalIPAddress,omitempty"`
// LaunchType is the launch type of this task.
LaunchType string `json:"LaunchType,omitempty"`
// lock is for protecting all fields in the task struct
lock sync.RWMutex
}
// TaskFromACS translates ecsacs.Task to apitask.Task by first marshaling the received
// ecsacs.Task to json and unmarshaling it as apitask.Task
func TaskFromACS(acsTask *ecsacs.Task, envelope *ecsacs.PayloadMessage) (*Task, error) {
data, err := jsonutil.BuildJSON(acsTask)
if err != nil {
return nil, err
}
task := &Task{}
if err := json.Unmarshal(data, task); err != nil {
return nil, err
}
if task.GetDesiredStatus() == apitaskstatus.TaskRunning && envelope.SeqNum != nil {
task.StartSequenceNumber = *envelope.SeqNum
} else if task.GetDesiredStatus() == apitaskstatus.TaskStopped && envelope.SeqNum != nil {
task.StopSequenceNumber = *envelope.SeqNum
}
// Overrides the container command if it's set
for _, container := range task.Containers {
if (container.Overrides != apicontainer.ContainerOverrides{}) && container.Overrides.Command != nil {
container.Command = *container.Overrides.Command
}
container.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet)
}
//initialize resources map for task
task.ResourcesMapUnsafe = make(map[string][]taskresource.TaskResource)
return task, nil
}
func (task *Task) initializeVolumes(cfg *config.Config, dockerClient dockerapi.DockerClient, ctx context.Context) error {
err := task.initializeDockerLocalVolumes(dockerClient, ctx)
if err != nil {
return apierrors.NewResourceInitError(task.Arn, err)
}
err = task.initializeDockerVolumes(cfg.SharedVolumeMatchFullConfig.Enabled(), dockerClient, ctx)
if err != nil {
return apierrors.NewResourceInitError(task.Arn, err)
}
err = task.initializeEFSVolumes(cfg, dockerClient, ctx)
if err != nil {
return apierrors.NewResourceInitError(task.Arn, err)
}
return nil
}
// PostUnmarshalTask is run after a task has been unmarshalled, but before it has been
// run. It is possible it will be subsequently called after that and should be
// able to handle such an occurrence appropriately (e.g. behave idempotently).
func (task *Task) PostUnmarshalTask(cfg *config.Config,
credentialsManager credentials.Manager, resourceFields *taskresource.ResourceFields,
dockerClient dockerapi.DockerClient, ctx context.Context, options ...Option) error {
// TODO, add rudimentary plugin support and call any plugins that want to
// hook into this
task.adjustForPlatform(cfg)
if task.MemoryCPULimitsEnabled {
if err := task.initializeCgroupResourceSpec(cfg.CgroupPath, cfg.CgroupCPUPeriod, resourceFields); err != nil {
seelog.Errorf("Task [%s]: could not intialize resource: %v", task.Arn, err)
return apierrors.NewResourceInitError(task.Arn, err)
}
}
if err := task.initializeContainerOrderingForVolumes(); err != nil {
seelog.Errorf("Task [%s]: could not initialize volumes dependency for container: %v", task.Arn, err)
return apierrors.NewResourceInitError(task.Arn, err)
}
if err := task.initializeContainerOrderingForLinks(); err != nil {
seelog.Errorf("Task [%s]: could not initialize links dependency for container: %v", task.Arn, err)
return apierrors.NewResourceInitError(task.Arn, err)
}
task.initSecretResources(credentialsManager, resourceFields)
task.initializeCredentialsEndpoint(credentialsManager)
// NOTE: initializeVolumes needs to be after initializeCredentialsEndpoint, because EFS volume might
// need the credentials endpoint constructed by it.
if err := task.initializeVolumes(cfg, dockerClient, ctx); err != nil {
return err
}
if err := task.addGPUResource(cfg); err != nil {
seelog.Errorf("Task [%s]: could not initialize GPU associations: %v", task.Arn, err)
return apierrors.NewResourceInitError(task.Arn, err)
}
task.initializeContainersV3MetadataEndpoint(utils.NewDynamicUUIDProvider())
task.initializeContainersV4MetadataEndpoint(utils.NewDynamicUUIDProvider())
if err := task.addNetworkResourceProvisioningDependency(cfg); err != nil {
seelog.Errorf("Task [%s]: could not provision network resource: %v", task.Arn, err)
return apierrors.NewResourceInitError(task.Arn, err)
}
// Adds necessary Pause containers for sharing PID or IPC namespaces
task.addNamespaceSharingProvisioningDependency(cfg)
if err := task.applyFirelensSetup(cfg, resourceFields, credentialsManager); err != nil {
return err
}
if task.requiresCredentialSpecResource() {
if err := task.initializeCredentialSpecResource(cfg, credentialsManager, resourceFields); err != nil {
seelog.Errorf("Task [%s]: could not initialize credentialspec resource: %v", task.Arn, err)
return apierrors.NewResourceInitError(task.Arn, err)
}
}
if err := task.initializeEnvfilesResource(cfg, credentialsManager); err != nil {
seelog.Errorf("Task [%s]: could not initialize environment files resource: %v", task.Arn, err)
return apierrors.NewResourceInitError(task.Arn, err)
}
task.populateTaskARN()
// fsxWindowsFileserver is the product type -- it is technically "agnostic" ie it should apply to both Windows and Linux tasks
if task.requiresFSxWindowsFileServerResource() {
if err := task.initializeFSxWindowsFileServerResource(cfg, credentialsManager, resourceFields); err != nil {
seelog.Errorf("Task [%s]: could not initialize FSx for Windows File Server resource: %v", task.Arn, err)
return apierrors.NewResourceInitError(task.Arn, err)
}
}
for _, opt := range options {
if err := opt(task); err != nil {
seelog.Errorf("Task [%s]: could not apply task option: %v", task.Arn, err)
return err
}
}
return nil
}
// populateTaskARN populates the arn of the task to the containers.
func (task *Task) populateTaskARN() {
for _, c := range task.Containers {
c.SetTaskARN(task.Arn)
}
}
func (task *Task) initSecretResources(credentialsManager credentials.Manager,
resourceFields *taskresource.ResourceFields) {
if task.requiresASMDockerAuthData() {
task.initializeASMAuthResource(credentialsManager, resourceFields)
}
if task.requiresSSMSecret() {
task.initializeSSMSecretResource(credentialsManager, resourceFields)
}
if task.requiresASMSecret() {
task.initializeASMSecretResource(credentialsManager, resourceFields)
}
}
func (task *Task) applyFirelensSetup(cfg *config.Config, resourceFields *taskresource.ResourceFields,
credentialsManager credentials.Manager) error {
firelensContainer := task.GetFirelensContainer()
if firelensContainer != nil {
if err := task.initializeFirelensResource(cfg, resourceFields, firelensContainer, credentialsManager); err != nil {
return apierrors.NewResourceInitError(task.Arn, err)
}
if err := task.addFirelensContainerDependency(); err != nil {
return errors.New("unable to add firelens container dependency")
}
}
return nil
}
func (task *Task) addGPUResource(cfg *config.Config) error {
if cfg.GPUSupportEnabled {
for _, association := range task.Associations {
// One GPU can be associated with only one container
// That is why validating if association.Containers is of length 1
if association.Type == GPUAssociationType {
if len(association.Containers) != 1 {
return fmt.Errorf("could not associate multiple containers to GPU %s", association.Name)
}
container, ok := task.ContainerByName(association.Containers[0])
if !ok {
return fmt.Errorf("could not find container with name %s for associating GPU %s",
association.Containers[0], association.Name)
}
container.GPUIDs = append(container.GPUIDs, association.Name)
}
}
task.populateGPUEnvironmentVariables()
task.NvidiaRuntime = cfg.NvidiaRuntime
}
return nil
}
func (task *Task) isGPUEnabled() bool {
for _, association := range task.Associations {
if association.Type == GPUAssociationType {
return true
}
}
return false
}
func (task *Task) populateGPUEnvironmentVariables() {
for _, container := range task.Containers {
if len(container.GPUIDs) > 0 {
gpuList := strings.Join(container.GPUIDs, ",")
envVars := make(map[string]string)
envVars[NvidiaVisibleDevicesEnvVar] = gpuList
container.MergeEnvironmentVariables(envVars)
}
}
}
func (task *Task) shouldRequireNvidiaRuntime(container *apicontainer.Container) bool {
_, ok := container.Environment[NvidiaVisibleDevicesEnvVar]
return ok
}
func (task *Task) initializeDockerLocalVolumes(dockerClient dockerapi.DockerClient, ctx context.Context) error {
var requiredLocalVolumes []string
for _, container := range task.Containers {
for _, mountPoint := range container.MountPoints {
vol, ok := task.HostVolumeByName(mountPoint.SourceVolume)
if !ok {
continue
}
if localVolume, ok := vol.(*taskresourcevolume.LocalDockerVolume); ok {
localVolume.HostPath = task.volumeName(mountPoint.SourceVolume)
container.BuildResourceDependency(mountPoint.SourceVolume,
resourcestatus.ResourceStatus(taskresourcevolume.VolumeCreated),
apicontainerstatus.ContainerPulled)
requiredLocalVolumes = append(requiredLocalVolumes, mountPoint.SourceVolume)
}
}
}
if len(requiredLocalVolumes) == 0 {
// No need to create the auxiliary local driver volumes
return nil
}
// if we have required local volumes, create one with default local drive
for _, volumeName := range requiredLocalVolumes {
vol, _ := task.HostVolumeByName(volumeName)
// BUG(samuelkarp) On Windows, volumes with names that differ only by case will collide
scope := taskresourcevolume.TaskScope
localVolume, err := taskresourcevolume.NewVolumeResource(ctx, volumeName, HostVolumeType,
vol.Source(), scope, false,
taskresourcevolume.DockerLocalVolumeDriver,
make(map[string]string), make(map[string]string), dockerClient)
if err != nil {
return err
}
task.AddResource(resourcetype.DockerVolumeKey, localVolume)
}
return nil
}
func (task *Task) volumeName(name string) string {
return "ecs-" + task.Family + "-" + task.Version + "-" + name + "-" + utils.RandHex()
}
// initializeDockerVolumes checks the volume resource in the task to determine if the agent
// should create the volume before creating the container
func (task *Task) initializeDockerVolumes(sharedVolumeMatchFullConfig bool, dockerClient dockerapi.DockerClient, ctx context.Context) error {
for i, vol := range task.Volumes {
// No need to do this for non-docker volume, eg: host bind/empty volume
if vol.Type != DockerVolumeType {
continue
}
dockerVolume, ok := vol.Volume.(*taskresourcevolume.DockerVolumeConfig)
if !ok {
return errors.New("task volume: volume configuration does not match the type 'docker'")
}
// Agent needs to create task-scoped volume
if dockerVolume.Scope == taskresourcevolume.TaskScope {
if err := task.addTaskScopedVolumes(ctx, dockerClient, &task.Volumes[i]); err != nil {
return err
}
} else {
// Agent needs to create shared volume if that's auto provisioned
if err := task.addSharedVolumes(sharedVolumeMatchFullConfig, ctx, dockerClient, &task.Volumes[i]); err != nil {
return err
}
}
}
return nil
}
// initializeEFSVolumes inspects the volume definitions in the task definition.
// If it finds EFS volumes in the task definition, then it converts it to a docker
// volume definition.
func (task *Task) initializeEFSVolumes(cfg *config.Config, dockerClient dockerapi.DockerClient, ctx context.Context) error {
for i, vol := range task.Volumes {
// No need to do this for non-efs volume, eg: host bind/empty volume
if vol.Type != EFSVolumeType {
continue
}
efsvol, ok := vol.Volume.(*taskresourcevolume.EFSVolumeConfig)
if !ok {
return errors.New("task volume: volume configuration does not match the type 'efs'")
}
err := task.addEFSVolumes(ctx, cfg, dockerClient, &task.Volumes[i], efsvol)
if err != nil {
return err
}
}
return nil
}
// addEFSVolumes converts the EFS task definition into an internal docker 'local' volume
// mounted with NFS struct and updates container dependency
func (task *Task) addEFSVolumes(
ctx context.Context,
cfg *config.Config,
dockerClient dockerapi.DockerClient,
vol *TaskVolume,
efsvol *taskresourcevolume.EFSVolumeConfig,
) error {
driverOpts := taskresourcevolume.GetDriverOptions(cfg, efsvol, task.GetCredentialsRelativeURI())
driverName := getEFSVolumeDriverName(cfg)
volumeResource, err := taskresourcevolume.NewVolumeResource(
ctx,
vol.Name,
EFSVolumeType,
task.volumeName(vol.Name),
"task",
false,
driverName,
driverOpts,
map[string]string{},
dockerClient,
)
if err != nil {
return err
}
vol.Volume = &volumeResource.VolumeConfig
task.AddResource(resourcetype.DockerVolumeKey, volumeResource)
task.updateContainerVolumeDependency(vol.Name)
return nil
}
// addTaskScopedVolumes adds the task scoped volume into task resources and updates container dependency
func (task *Task) addTaskScopedVolumes(ctx context.Context, dockerClient dockerapi.DockerClient,
vol *TaskVolume) error {
volumeConfig := vol.Volume.(*taskresourcevolume.DockerVolumeConfig)
volumeResource, err := taskresourcevolume.NewVolumeResource(
ctx,
vol.Name,
DockerVolumeType,
task.volumeName(vol.Name),
volumeConfig.Scope, volumeConfig.Autoprovision,
volumeConfig.Driver, volumeConfig.DriverOpts,
volumeConfig.Labels, dockerClient)
if err != nil {
return err
}
vol.Volume = &volumeResource.VolumeConfig
task.AddResource(resourcetype.DockerVolumeKey, volumeResource)
task.updateContainerVolumeDependency(vol.Name)
return nil
}
// addSharedVolumes adds shared volume into task resources and updates container dependency
func (task *Task) addSharedVolumes(SharedVolumeMatchFullConfig bool, ctx context.Context, dockerClient dockerapi.DockerClient,
vol *TaskVolume) error {
volumeConfig := vol.Volume.(*taskresourcevolume.DockerVolumeConfig)
volumeConfig.DockerVolumeName = vol.Name
// if autoprovision == true, we will auto-provision the volume if it does not exist already
// else the named volume must exist
if !volumeConfig.Autoprovision {
volumeMetadata := dockerClient.InspectVolume(ctx, vol.Name, dockerclient.InspectVolumeTimeout)
if volumeMetadata.Error != nil {
return errors.Wrapf(volumeMetadata.Error, "initialize volume: volume detection failed, volume '%s' does not exist and autoprovision is set to false", vol.Name)
}
return nil
}
// at this point we know autoprovision = true
// check if the volume configuration matches the one exists on the instance
volumeMetadata := dockerClient.InspectVolume(ctx, volumeConfig.DockerVolumeName, dockerclient.InspectVolumeTimeout)
if volumeMetadata.Error != nil {
// Inspect the volume timed out, fail the task
if _, ok := volumeMetadata.Error.(*dockerapi.DockerTimeoutError); ok {
return volumeMetadata.Error
}
seelog.Infof("initialize volume: Task [%s]: non-autoprovisioned volume not found, adding to task resource %q", task.Arn, vol.Name)
// this resource should be created by agent
volumeResource, err := taskresourcevolume.NewVolumeResource(
ctx,
vol.Name,
DockerVolumeType,
vol.Name,
volumeConfig.Scope, volumeConfig.Autoprovision,
volumeConfig.Driver, volumeConfig.DriverOpts,
volumeConfig.Labels, dockerClient)
if err != nil {
return err
}
task.AddResource(resourcetype.DockerVolumeKey, volumeResource)
task.updateContainerVolumeDependency(vol.Name)
return nil
}
seelog.Infof("initialize volume: Task [%s]: volume [%s] already exists", task.Arn, volumeConfig.DockerVolumeName)
if !SharedVolumeMatchFullConfig {
seelog.Infof("initialize volume: Task [%s]: ECS_SHARED_VOLUME_MATCH_FULL_CONFIG is set to false and volume with name [%s] is found", task.Arn, volumeConfig.DockerVolumeName)
return nil
}
// validate all the volume metadata fields match to the configuration
if len(volumeMetadata.DockerVolume.Labels) == 0 && len(volumeMetadata.DockerVolume.Labels) == len(volumeConfig.Labels) {
seelog.Infof("labels are both empty or null: Task [%s]: volume [%s]", task.Arn, volumeConfig.DockerVolumeName)
} else if !reflect.DeepEqual(volumeMetadata.DockerVolume.Labels, volumeConfig.Labels) {
return errors.Errorf("intialize volume: non-autoprovisioned volume does not match existing volume labels: existing: %v, expected: %v",
volumeMetadata.DockerVolume.Labels, volumeConfig.Labels)
}
if len(volumeMetadata.DockerVolume.Options) == 0 && len(volumeMetadata.DockerVolume.Options) == len(volumeConfig.DriverOpts) {
seelog.Infof("driver options are both empty or null: Task [%s]: volume [%s]", task.Arn, volumeConfig.DockerVolumeName)
} else if !reflect.DeepEqual(volumeMetadata.DockerVolume.Options, volumeConfig.DriverOpts) {
return errors.Errorf("initialize volume: non-autoprovisioned volume does not match existing volume options: existing: %v, expected: %v",
volumeMetadata.DockerVolume.Options, volumeConfig.DriverOpts)
}
// Right now we are not adding shared, autoprovision = true volume to task as resource if it already exists (i.e. when this task didn't create the volume).
// if we need to change that, make a call to task.AddResource here.
return nil
}
// updateContainerVolumeDependency adds the volume resource to container dependency
func (task *Task) updateContainerVolumeDependency(name string) {
// Find all the container that depends on the volume
for _, container := range task.Containers {
for _, mountpoint := range container.MountPoints {
if mountpoint.SourceVolume == name {
container.BuildResourceDependency(name,
resourcestatus.ResourceCreated,
apicontainerstatus.ContainerPulled)
}
}
}
}
// initializeCredentialsEndpoint sets the credentials endpoint for all containers in a task if needed.
func (task *Task) initializeCredentialsEndpoint(credentialsManager credentials.Manager) {
id := task.GetCredentialsID()
if id == "" {
// No credentials set for the task. Do not inject the endpoint environment variable.
return
}
taskCredentials, ok := credentialsManager.GetTaskCredentials(id)
if !ok {
// Task has credentials id set, but credentials manager is unaware of
// the id. This should never happen as the payload handler sets
// credentialsId for the task after adding credentials to the
// credentials manager
seelog.Errorf("Unable to get credentials for task: %s", task.Arn)
return
}
credentialsEndpointRelativeURI := taskCredentials.IAMRoleCredentials.GenerateCredentialsEndpointRelativeURI()
for _, container := range task.Containers {
// container.Environment map would not be initialized if there are
// no environment variables to be set or overridden in the container
// config. Check if that's the case and initialize if needed
if container.Environment == nil {
container.Environment = make(map[string]string)
}
container.Environment[awsSDKCredentialsRelativeURIPathEnvironmentVariableName] = credentialsEndpointRelativeURI
}
task.SetCredentialsRelativeURI(credentialsEndpointRelativeURI)
}
// initializeContainersV3MetadataEndpoint generates an v3 endpoint id for each container, constructs the
// v3 metadata endpoint, and injects it as an environment variable
func (task *Task) initializeContainersV3MetadataEndpoint(uuidProvider utils.UUIDProvider) {
for _, container := range task.Containers {
v3EndpointID := container.GetV3EndpointID()
if v3EndpointID == "" { // if container's v3 endpoint has not been set
container.SetV3EndpointID(uuidProvider.New())
}
container.InjectV3MetadataEndpoint()
}
}
// initializeContainersV4MetadataEndpoint generates an v4 endpoint id which we reuse the v3 container id
// (they are the same) for each container, constructs the v4 metadata endpoint,
// and injects it as an environment variable
func (task *Task) initializeContainersV4MetadataEndpoint(uuidProvider utils.UUIDProvider) {
for _, container := range task.Containers {
v3EndpointID := container.GetV3EndpointID()
if v3EndpointID == "" { // if container's v3 endpoint has not been set
container.SetV3EndpointID(uuidProvider.New())
}
container.InjectV4MetadataEndpoint()
}
}
// requiresASMDockerAuthData returns true if atleast one container in the task
// needs to retrieve private registry authentication data from ASM
func (task *Task) requiresASMDockerAuthData() bool {
for _, container := range task.Containers {
if container.ShouldPullWithASMAuth() {
return true
}
}
return false
}
// initializeASMAuthResource builds the resource dependency map for the ASM auth resource
func (task *Task) initializeASMAuthResource(credentialsManager credentials.Manager,
resourceFields *taskresource.ResourceFields) {
asmAuthResource := asmauth.NewASMAuthResource(task.Arn, task.getAllASMAuthDataRequirements(),
task.ExecutionCredentialsID, credentialsManager, resourceFields.ASMClientCreator)
task.AddResource(asmauth.ResourceName, asmAuthResource)
for _, container := range task.Containers {
if container.ShouldPullWithASMAuth() {
container.BuildResourceDependency(asmAuthResource.GetName(),
resourcestatus.ResourceStatus(asmauth.ASMAuthStatusCreated),
apicontainerstatus.ContainerPulled)
}
}
}
func (task *Task) getAllASMAuthDataRequirements() []*apicontainer.ASMAuthData {
var reqs []*apicontainer.ASMAuthData
for _, container := range task.Containers {
if container.ShouldPullWithASMAuth() {
reqs = append(reqs, container.RegistryAuthentication.ASMAuthData)
}
}
return reqs
}
// requiresSSMSecret returns true if at least one container in the task
// needs to retrieve secret from SSM parameter
func (task *Task) requiresSSMSecret() bool {
for _, container := range task.Containers {
if container.ShouldCreateWithSSMSecret() {
return true
}
}
return false
}
// initializeSSMSecretResource builds the resource dependency map for the SSM ssmsecret resource
func (task *Task) initializeSSMSecretResource(credentialsManager credentials.Manager,
resourceFields *taskresource.ResourceFields) {
ssmSecretResource := ssmsecret.NewSSMSecretResource(task.Arn, task.getAllSSMSecretRequirements(),
task.ExecutionCredentialsID, credentialsManager, resourceFields.SSMClientCreator)
task.AddResource(ssmsecret.ResourceName, ssmSecretResource)
// for every container that needs ssm secret vending as env, it needs to wait all secrets got retrieved
for _, container := range task.Containers {
if container.ShouldCreateWithSSMSecret() {
container.BuildResourceDependency(ssmSecretResource.GetName(),
resourcestatus.ResourceStatus(ssmsecret.SSMSecretCreated),
apicontainerstatus.ContainerCreated)
}
// Firelens container needs to depends on secret if other containers use secret log options.
if container.GetFirelensConfig() != nil && task.firelensDependsOnSecretResource(apicontainer.SecretProviderSSM) {
container.BuildResourceDependency(ssmSecretResource.GetName(),
resourcestatus.ResourceStatus(ssmsecret.SSMSecretCreated),
apicontainerstatus.ContainerCreated)
}
}
}
// firelensDependsOnSecret checks whether the firelens container needs to depends on a secret resource of
// a certain provider type.
func (task *Task) firelensDependsOnSecretResource(secretProvider string) bool {
isLogDriverSecretWithGivenProvider := func(s apicontainer.Secret) bool {
return s.Provider == secretProvider && s.Target == apicontainer.SecretTargetLogDriver
}
for _, container := range task.Containers {
if container.GetLogDriver() == firelensDriverName && container.HasSecret(isLogDriverSecretWithGivenProvider) {
return true
}
}
return false
}
// getAllSSMSecretRequirements stores all secrets in a map whose key is region and value is all
// secrets in that region
func (task *Task) getAllSSMSecretRequirements() map[string][]apicontainer.Secret {
reqs := make(map[string][]apicontainer.Secret)
for _, container := range task.Containers {
for _, secret := range container.Secrets {
if secret.Provider == apicontainer.SecretProviderSSM {
if _, ok := reqs[secret.Region]; !ok {
reqs[secret.Region] = []apicontainer.Secret{}
}
reqs[secret.Region] = append(reqs[secret.Region], secret)
}
}
}
return reqs
}
// requiresASMSecret returns true if at least one container in the task
// needs to retrieve secret from AWS Secrets Manager
func (task *Task) requiresASMSecret() bool {
for _, container := range task.Containers {
if container.ShouldCreateWithASMSecret() {
return true
}
}
return false
}
// initializeASMSecretResource builds the resource dependency map for the asmsecret resource
func (task *Task) initializeASMSecretResource(credentialsManager credentials.Manager,
resourceFields *taskresource.ResourceFields) {
asmSecretResource := asmsecret.NewASMSecretResource(task.Arn, task.getAllASMSecretRequirements(),
task.ExecutionCredentialsID, credentialsManager, resourceFields.ASMClientCreator)
task.AddResource(asmsecret.ResourceName, asmSecretResource)
// for every container that needs asm secret vending as envvar, it needs to wait all secrets got retrieved
for _, container := range task.Containers {
if container.ShouldCreateWithASMSecret() {
container.BuildResourceDependency(asmSecretResource.GetName(),
resourcestatus.ResourceStatus(asmsecret.ASMSecretCreated),
apicontainerstatus.ContainerCreated)
}
// Firelens container needs to depends on secret if other containers use secret log options.
if container.GetFirelensConfig() != nil && task.firelensDependsOnSecretResource(apicontainer.SecretProviderASM) {
container.BuildResourceDependency(asmSecretResource.GetName(),
resourcestatus.ResourceStatus(asmsecret.ASMSecretCreated),
apicontainerstatus.ContainerCreated)
}
}
}
// getAllASMSecretRequirements stores secrets in a task in a map
func (task *Task) getAllASMSecretRequirements() map[string]apicontainer.Secret {
reqs := make(map[string]apicontainer.Secret)
for _, container := range task.Containers {
for _, secret := range container.Secrets {
if secret.Provider == apicontainer.SecretProviderASM {
secretKey := secret.GetSecretResourceCacheKey()
if _, ok := reqs[secretKey]; !ok {
reqs[secretKey] = secret
}
}
}
}
return reqs
}
// GetFirelensContainer returns the firelens container in the task, if there is one.
func (task *Task) GetFirelensContainer() *apicontainer.Container {
for _, container := range task.Containers {
if container.GetFirelensConfig() != nil { // This is a firelens container.
return container
}
}
return nil
}
// initializeFirelensResource initializes the firelens task resource and adds it as a dependency of the
// firelens container.
func (task *Task) initializeFirelensResource(config *config.Config, resourceFields *taskresource.ResourceFields,
firelensContainer *apicontainer.Container, credentialsManager credentials.Manager) error {
if firelensContainer.GetFirelensConfig() == nil {
return errors.New("firelens container config doesn't exist")
}
containerToLogOptions := make(map[string]map[string]string)
// Collect plain text log options.
if err := task.collectFirelensLogOptions(containerToLogOptions); err != nil {
return errors.Wrap(err, "unable to initialize firelens resource")
}
// Collect secret log options.
if err := task.collectFirelensLogEnvOptions(containerToLogOptions, firelensContainer.FirelensConfig.Type); err != nil {
return errors.Wrap(err, "unable to initialize firelens resource")
}
for _, container := range task.Containers {
firelensConfig := container.GetFirelensConfig()
if firelensConfig != nil {
var ec2InstanceID string
if container.Environment != nil && container.Environment[awsExecutionEnvKey] == ec2ExecutionEnv {
ec2InstanceID = resourceFields.EC2InstanceID
}
var networkMode string
if task.IsNetworkModeAWSVPC() {
networkMode = AWSVPCNetworkMode
} else if container.GetNetworkModeFromHostConfig() == "" || container.GetNetworkModeFromHostConfig() == BridgeNetworkMode {
networkMode = BridgeNetworkMode
} else {
networkMode = container.GetNetworkModeFromHostConfig()
}
firelensResource, err := firelens.NewFirelensResource(config.Cluster, task.Arn, task.Family+":"+task.Version,
ec2InstanceID, config.DataDir, firelensConfig.Type, config.AWSRegion, networkMode, firelensConfig.Options, containerToLogOptions,
credentialsManager, task.ExecutionCredentialsID)
if err != nil {
return errors.Wrap(err, "unable to initialize firelens resource")
}
task.AddResource(firelens.ResourceName, firelensResource)
container.BuildResourceDependency(firelensResource.GetName(), resourcestatus.ResourceCreated,
apicontainerstatus.ContainerCreated)
return nil
}
}
return errors.New("unable to initialize firelens resource because there's no firelens container")
}
// addFirelensContainerDependency adds a START dependency between each container using awsfirelens log driver
// and the firelens container.
func (task *Task) addFirelensContainerDependency() error {
var firelensContainer *apicontainer.Container
for _, container := range task.Containers {
if container.GetFirelensConfig() != nil {
firelensContainer = container
}
}
if firelensContainer == nil {
return errors.New("unable to add firelens container dependency because there's no firelens container")
}
if firelensContainer.HasContainerDependencies() {
// If firelens container has any container dependency, we don't add internal container dependency that depends
// on it in order to be safe (otherwise we need to deal with circular dependency).
seelog.Warnf("Not adding container dependency to let firelens container %s start first, because it has dependency on other containers.", firelensContainer.Name)
return nil
}
for _, container := range task.Containers {
containerHostConfig := container.GetHostConfig()
if containerHostConfig == nil {
continue
}
// Firelens container itself could be using awsfirelens log driver. Don't add container dependency in this case.
if container.Name == firelensContainer.Name {
continue
}
hostConfig := &dockercontainer.HostConfig{}
if err := json.Unmarshal([]byte(*containerHostConfig), hostConfig); err != nil {
return errors.Wrapf(err, "unable to decode host config of container %s", container.Name)
}
if hostConfig.LogConfig.Type == firelensDriverName {
// If there's no dependency between the app container and the firelens container, make firelens container
// start first to be the default behavior by adding a START container depdendency.
if !container.DependsOnContainer(firelensContainer.Name) {
seelog.Infof("Adding a START container dependency on firelens container %s for container %s",
firelensContainer.Name, container.Name)
container.AddContainerDependency(firelensContainer.Name, ContainerOrderingStartCondition)
}
}
}
return nil
}
// collectFirelensLogOptions collects the log options for all the containers that use the firelens container
// as the log driver.
// containerToLogOptions is a nested map. Top level key is the container name. Second level is a map storing
// the log option key and value of the container.
func (task *Task) collectFirelensLogOptions(containerToLogOptions map[string]map[string]string) error {
for _, container := range task.Containers {
if container.DockerConfig.HostConfig == nil {
continue
}
hostConfig := &dockercontainer.HostConfig{}
if err := json.Unmarshal([]byte(*container.DockerConfig.HostConfig), hostConfig); err != nil {
return errors.Wrapf(err, "unable to decode host config of container %s", container.Name)
}
if hostConfig.LogConfig.Type == firelensDriverName {
if containerToLogOptions[container.Name] == nil {
containerToLogOptions[container.Name] = make(map[string]string)
}
for k, v := range hostConfig.LogConfig.Config {
if k == FirelensLogDriverBufferLimitOption {
continue
}
containerToLogOptions[container.Name][k] = v
}
}
}
return nil
}
// collectFirelensLogEnvOptions collects all the log secret options. Each secret log option will have a value
// of a config file variable (e.g. "${config_var_name}") and we will pass the secret value as env to the firelens
// container and it will resolve the config file variable from the env.
// Each config variable name has a format of log-option-key_container-name. We need the container name because options
// from different containers using awsfirelens log driver in a task will be presented in the same firelens config file.
func (task *Task) collectFirelensLogEnvOptions(containerToLogOptions map[string]map[string]string, firelensConfigType string) error {
placeholderFmt := ""
switch firelensConfigType {
case firelens.FirelensConfigTypeFluentd:
placeholderFmt = firelensConfigVarPlaceholderFmtFluentd
case firelens.FirelensConfigTypeFluentbit:
placeholderFmt = firelensConfigVarPlaceholderFmtFluentbit
default:
return errors.Errorf("unsupported firelens config type %s", firelensConfigType)
}
for _, container := range task.Containers {
for _, secret := range container.Secrets {
if secret.Target == apicontainer.SecretTargetLogDriver {
if containerToLogOptions[container.Name] == nil {
containerToLogOptions[container.Name] = make(map[string]string)
}
idx := task.GetContainerIndex(container.Name)
if idx < 0 {
return errors.Errorf("can't find container %s in task %s", container.Name, task.Arn)
}
containerToLogOptions[container.Name][secret.Name] = fmt.Sprintf(placeholderFmt,
fmt.Sprintf(firelensConfigVarFmt, secret.Name, idx))
}
}
}
return nil
}
// AddFirelensContainerBindMounts adds config file bind mount and socket directory bind mount to the firelens
// container's host config.
func (task *Task) AddFirelensContainerBindMounts(firelensConfig *apicontainer.FirelensConfig, hostConfig *dockercontainer.HostConfig,
config *config.Config) *apierrors.HostConfigError {
taskID, err := task.GetID()
if err != nil {
return &apierrors.HostConfigError{Msg: err.Error()}
}
var configBind, s3ConfigBind, socketBind string
switch firelensConfig.Type {
case firelens.FirelensConfigTypeFluentd:
configBind = fmt.Sprintf(firelensConfigBindFormatFluentd, config.DataDirOnHost, taskID)
s3ConfigBind = fmt.Sprintf(firelensS3ConfigBindFormat, config.DataDirOnHost, taskID, firelens.S3ConfigPathFluentd)
case firelens.FirelensConfigTypeFluentbit:
configBind = fmt.Sprintf(firelensConfigBindFormatFluentbit, config.DataDirOnHost, taskID)
s3ConfigBind = fmt.Sprintf(firelensS3ConfigBindFormat, config.DataDirOnHost, taskID, firelens.S3ConfigPathFluentbit)
default:
return &apierrors.HostConfigError{Msg: fmt.Sprintf("encounter invalid firelens configuration type %s",
firelensConfig.Type)}
}
socketBind = fmt.Sprintf(firelensSocketBindFormat, config.DataDirOnHost, taskID)
hostConfig.Binds = append(hostConfig.Binds, configBind, socketBind)
// Add the s3 config bind mount if firelens container is using a config file from S3.
if firelensConfig.Options != nil && firelensConfig.Options[firelens.ExternalConfigTypeOption] == firelens.ExternalConfigTypeS3 {
hostConfig.Binds = append(hostConfig.Binds, s3ConfigBind)
}
return nil
}
// IsNetworkModeAWSVPC checks if the task is configured to use the AWSVPC task networking feature.
func (task *Task) IsNetworkModeAWSVPC() bool {
return len(task.ENIs) > 0
}
func (task *Task) addNetworkResourceProvisioningDependency(cfg *config.Config) error {
if !task.IsNetworkModeAWSVPC() {
return nil
}
pauseContainer := apicontainer.NewContainerWithSteadyState(apicontainerstatus.ContainerResourcesProvisioned)
pauseContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet)
pauseContainer.Name = NetworkPauseContainerName
pauseContainer.Image = fmt.Sprintf("%s:%s", cfg.PauseContainerImageName, cfg.PauseContainerTag)
pauseContainer.Essential = true
pauseContainer.Type = apicontainer.ContainerCNIPause
// Set pauseContainer user the same as proxy container user when image name is not DefaultPauseContainerImageName
if task.GetAppMesh() != nil && cfg.PauseContainerImageName != config.DefaultPauseContainerImageName {
appMeshConfig := task.GetAppMesh()
// Validation is done when registering task to make sure there is one container name matching
for _, container := range task.Containers {
if container.Name != appMeshConfig.ContainerName {
continue
}
if container.DockerConfig.Config == nil {
return errors.Errorf("user needs to be specified for proxy container")
}
containerConfig := &dockercontainer.Config{}
if err := json.Unmarshal([]byte(aws.StringValue(container.DockerConfig.Config)), &containerConfig); err != nil {
return errors.Errorf("unable to decode given docker config: %s", err.Error())
}
if containerConfig.User == "" {
return errors.Errorf("user needs to be specified for proxy container")
}
pauseConfig := dockercontainer.Config{
User: containerConfig.User,
Image: fmt.Sprintf("%s:%s", cfg.PauseContainerImageName, cfg.PauseContainerTag),
}
bytes, err := json.Marshal(pauseConfig)
if err != nil {
return errors.Errorf("Error json marshaling pause config: %s", err)
}
serializedConfig := string(bytes)
pauseContainer.DockerConfig = apicontainer.DockerConfig{
Config: &serializedConfig,
}
break
}
}
task.Containers = append(task.Containers, pauseContainer)
for _, container := range task.Containers {
if container.IsInternal() {
continue
}
container.BuildContainerDependency(NetworkPauseContainerName, apicontainerstatus.ContainerResourcesProvisioned, apicontainerstatus.ContainerPulled)
pauseContainer.BuildContainerDependency(container.Name, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped)
}
for _, resource := range task.GetResources() {
if resource.DependOnTaskNetwork() {
seelog.Debugf("Task [%s]: adding network pause container dependency to resource [%s]", task.Arn, resource.GetName())
resource.BuildContainerDependency(NetworkPauseContainerName, apicontainerstatus.ContainerResourcesProvisioned, resourcestatus.ResourceStatus(taskresourcevolume.VolumeCreated))
}
}
return nil
}
func (task *Task) addNamespaceSharingProvisioningDependency(cfg *config.Config) {
// Pause container does not need to be created if no namespace sharing will be done at task level
if task.getIPCMode() != ipcModeTask && task.getPIDMode() != pidModeTask {
return
}
namespacePauseContainer := apicontainer.NewContainerWithSteadyState(apicontainerstatus.ContainerRunning)
namespacePauseContainer.TransitionDependenciesMap = make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet)
namespacePauseContainer.Name = NamespacePauseContainerName
namespacePauseContainer.Image = fmt.Sprintf("%s:%s", config.DefaultPauseContainerImageName, config.DefaultPauseContainerTag)
namespacePauseContainer.Essential = true
namespacePauseContainer.Type = apicontainer.ContainerNamespacePause
task.Containers = append(task.Containers, namespacePauseContainer)
for _, container := range task.Containers {
if container.IsInternal() {
continue
}
container.BuildContainerDependency(NamespacePauseContainerName, apicontainerstatus.ContainerRunning, apicontainerstatus.ContainerPulled)
namespacePauseContainer.BuildContainerDependency(container.Name, apicontainerstatus.ContainerStopped, apicontainerstatus.ContainerStopped)
}
}
// ContainerByName returns the *Container for the given name
func (task *Task) ContainerByName(name string) (*apicontainer.Container, bool) {
for _, container := range task.Containers {
if container.Name == name {
return container, true
}
}
return nil, false
}
// HostVolumeByName returns the task Volume for the given a volume name in that
// task. The second return value indicates the presence of that volume
func (task *Task) HostVolumeByName(name string) (taskresourcevolume.Volume, bool) {
for _, v := range task.Volumes {
if v.Name == name {
return v.Volume, true
}
}
return nil, false
}
// UpdateMountPoints updates the mount points of volumes that were created
// without specifying a host path. This is used as part of the empty host
// volume feature.
func (task *Task) UpdateMountPoints(cont *apicontainer.Container, vols []types.MountPoint) {
for _, mountPoint := range cont.MountPoints {
containerPath := utils.GetCanonicalPath(mountPoint.ContainerPath)
for _, vol := range vols {
if strings.Compare(vol.Destination, containerPath) == 0 ||
// /path/ -> /path or \path\ -> \path
strings.Compare(vol.Destination, strings.TrimRight(containerPath, string(filepath.Separator))) == 0 {
if hostVolume, exists := task.HostVolumeByName(mountPoint.SourceVolume); exists {
if empty, ok := hostVolume.(*taskresourcevolume.LocalDockerVolume); ok {
empty.HostPath = vol.Source
}
}
}
}
}
}
// updateTaskKnownStatus updates the given task's status based on its container's status.
// It updates to the minimum of all containers no matter what
// It returns a TaskStatus indicating what change occurred or TaskStatusNone if
// there was no change
// Invariant: task known status is the minimum of container known status
func (task *Task) updateTaskKnownStatus() (newStatus apitaskstatus.TaskStatus) {
seelog.Debugf("api/task: Updating task's known status, task: %s", task.String())
// Set to a large 'impossible' status that can't be the min
containerEarliestKnownStatus := apicontainerstatus.ContainerZombie
var earliestKnownStatusContainer *apicontainer.Container
essentialContainerStopped := false
for _, container := range task.Containers {
containerKnownStatus := container.GetKnownStatus()
if containerKnownStatus == apicontainerstatus.ContainerStopped && container.Essential {
essentialContainerStopped = true
}
if containerKnownStatus < containerEarliestKnownStatus {
containerEarliestKnownStatus = containerKnownStatus
earliestKnownStatusContainer = container
}
}
if earliestKnownStatusContainer == nil {
seelog.Criticalf(
"Impossible state found while updating tasks's known status, earliest state recorded as %s for task [%v]",
containerEarliestKnownStatus.String(), task)
return apitaskstatus.TaskStatusNone
}
seelog.Debugf("api/task: Container with earliest known container is [%s] for task: %s",
earliestKnownStatusContainer.String(), task.String())
// If the essential container is stopped while other containers may be running
// don't update the task status until the other containers are stopped.
if earliestKnownStatusContainer.IsKnownSteadyState() && essentialContainerStopped {
seelog.Debugf(
"Essential container is stopped while other containers are running, not updating task status for task: %s",
task.String())
return apitaskstatus.TaskStatusNone
}
// We can't rely on earliest container known status alone for determining if the
// task state needs to be updated as containers can have different steady states
// defined. Instead we should get the task status for all containers' known
// statuses and compute the min of this
earliestKnownTaskStatus := task.getEarliestKnownTaskStatusForContainers()
if task.GetKnownStatus() < earliestKnownTaskStatus {
task.SetKnownStatus(earliestKnownTaskStatus)
logger.Info("Container change also resulted in task change", logger.Fields{
field.TaskARN: task.Arn,
field.Container: earliestKnownStatusContainer.Name,
field.RuntimeID: earliestKnownStatusContainer.RuntimeID,
field.DesiredStatus: task.GetDesiredStatus().String(),
field.KnownStatus: earliestKnownTaskStatus.String(),
})
return earliestKnownTaskStatus
}
return apitaskstatus.TaskStatusNone
}
// getEarliestKnownTaskStatusForContainers gets the lowest (earliest) task status
// based on the known statuses of all containers in the task
func (task *Task) getEarliestKnownTaskStatusForContainers() apitaskstatus.TaskStatus {
if len(task.Containers) == 0 {
seelog.Criticalf("No containers in the task: %s", task.String())
return apitaskstatus.TaskStatusNone
}
// Set earliest container status to an impossible to reach 'high' task status
earliest := apitaskstatus.TaskZombie
for _, container := range task.Containers {
containerTaskStatus := apitaskstatus.MapContainerToTaskStatus(container.GetKnownStatus(), container.GetSteadyStateStatus())
if containerTaskStatus < earliest {
earliest = containerTaskStatus
}
}
return earliest
}
// DockerConfig converts the given container in this task to the format of
// the Docker SDK 'Config' struct
func (task *Task) DockerConfig(container *apicontainer.Container, apiVersion dockerclient.DockerVersion) (*dockercontainer.Config, *apierrors.DockerClientConfigError) {
return task.dockerConfig(container, apiVersion)
}
func (task *Task) dockerConfig(container *apicontainer.Container, apiVersion dockerclient.DockerVersion) (*dockercontainer.Config, *apierrors.DockerClientConfigError) {
dockerEnv := make([]string, 0, len(container.Environment))
for envKey, envVal := range container.Environment {
dockerEnv = append(dockerEnv, envKey+"="+envVal)
}
var entryPoint []string
if container.EntryPoint != nil {
entryPoint = *container.EntryPoint
}
containerConfig := &dockercontainer.Config{
Image: container.Image,
Cmd: container.Command,
Entrypoint: entryPoint,
ExposedPorts: task.dockerExposedPorts(container),
Env: dockerEnv,
}
if container.DockerConfig.Config != nil {
if err := json.Unmarshal([]byte(aws.StringValue(container.DockerConfig.Config)), &containerConfig); err != nil {
return nil, &apierrors.DockerClientConfigError{Msg: "Unable decode given docker config: " + err.Error()}
}
}
if container.HealthCheckType == apicontainer.DockerHealthCheckType && containerConfig.Healthcheck == nil {
return nil, &apierrors.DockerClientConfigError{
Msg: "docker health check is nil while container health check type is DOCKER"}
}
if containerConfig.Labels == nil {
containerConfig.Labels = make(map[string]string)
}
if container.Type == apicontainer.ContainerCNIPause {
// apply hostname to pause container's docker config
return task.applyENIHostname(containerConfig), nil
}
return containerConfig, nil
}
func (task *Task) dockerExposedPorts(container *apicontainer.Container) nat.PortSet {
dockerExposedPorts := make(map[nat.Port]struct{})
for _, portBinding := range container.Ports {
dockerPort := nat.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String())
dockerExposedPorts[dockerPort] = struct{}{}
}
return dockerExposedPorts
}
// DockerHostConfig construct the configuration recognized by docker
func (task *Task) DockerHostConfig(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer, apiVersion dockerclient.DockerVersion, cfg *config.Config) (*dockercontainer.HostConfig, *apierrors.HostConfigError) {
return task.dockerHostConfig(container, dockerContainerMap, apiVersion, cfg)
}
// ApplyExecutionRoleLogsAuth will check whether the task has execution role
// credentials, and add the genereated credentials endpoint to the associated HostConfig
func (task *Task) ApplyExecutionRoleLogsAuth(hostConfig *dockercontainer.HostConfig, credentialsManager credentials.Manager) *apierrors.HostConfigError {
id := task.GetExecutionCredentialsID()
if id == "" {
// No execution credentials set for the task. Do not inject the endpoint environment variable.
return &apierrors.HostConfigError{Msg: "No execution credentials set for the task"}
}
executionRoleCredentials, ok := credentialsManager.GetTaskCredentials(id)
if !ok {
// Task has credentials id set, but credentials manager is unaware of
// the id. This should never happen as the payload handler sets
// credentialsId for the task after adding credentials to the
// credentials manager
return &apierrors.HostConfigError{Msg: "Unable to get execution role credentials for task"}
}
credentialsEndpointRelativeURI := executionRoleCredentials.IAMRoleCredentials.GenerateCredentialsEndpointRelativeURI()
if hostConfig.LogConfig.Config == nil {
hostConfig.LogConfig.Config = map[string]string{}
}
hostConfig.LogConfig.Config[awslogsCredsEndpointOpt] = credentialsEndpointRelativeURI
return nil
}
func (task *Task) dockerHostConfig(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer, apiVersion dockerclient.DockerVersion, cfg *config.Config) (*dockercontainer.HostConfig, *apierrors.HostConfigError) {
dockerLinkArr, err := task.dockerLinks(container, dockerContainerMap)
if err != nil {
return nil, &apierrors.HostConfigError{Msg: err.Error()}
}
dockerPortMap := task.dockerPortMap(container)
volumesFrom, err := task.dockerVolumesFrom(container, dockerContainerMap)
if err != nil {
return nil, &apierrors.HostConfigError{Msg: err.Error()}
}
binds, err := task.dockerHostBinds(container)
if err != nil {
return nil, &apierrors.HostConfigError{Msg: err.Error()}
}
resources := task.getDockerResources(container)
// Populate hostConfig
hostConfig := &dockercontainer.HostConfig{
Links: dockerLinkArr,
Binds: binds,
PortBindings: dockerPortMap,
VolumesFrom: volumesFrom,
Resources: resources,
}
if err := task.overrideContainerRuntime(container, hostConfig, cfg); err != nil {
return nil, err
}
if container.DockerConfig.HostConfig != nil {
err := json.Unmarshal([]byte(*container.DockerConfig.HostConfig), hostConfig)
if err != nil {
return nil, &apierrors.HostConfigError{Msg: "Unable to decode given host config: " + err.Error()}
}
}
if err := task.platformHostConfigOverride(hostConfig); err != nil {
return nil, &apierrors.HostConfigError{Msg: err.Error()}
}
// Determine if network mode should be overridden and override it if needed
ok, networkMode := task.shouldOverrideNetworkMode(container, dockerContainerMap)
if ok {
hostConfig.NetworkMode = dockercontainer.NetworkMode(networkMode)
// Override 'awsvpc' parameters if needed
if container.Type == apicontainer.ContainerCNIPause {
// apply ExtraHosts to HostConfig for pause container
if hosts := task.generateENIExtraHosts(); hosts != nil {
hostConfig.ExtraHosts = append(hostConfig.ExtraHosts, hosts...)
}
if task.shouldEnableIPv6() {
// By default, the disable ipv6 setting is turned on, so need to turn it off to enable it.
enableIPv6SysctlSetting(hostConfig)
}
// Override the DNS settings for the pause container if ENI has custom
// DNS settings
return task.overrideDNS(hostConfig), nil
}
}
ok, pidMode := task.shouldOverridePIDMode(container, dockerContainerMap)
if ok {
hostConfig.PidMode = dockercontainer.PidMode(pidMode)
}
ok, ipcMode := task.shouldOverrideIPCMode(container, dockerContainerMap)
if ok {
hostConfig.IpcMode = dockercontainer.IpcMode(ipcMode)
}
return hostConfig, nil
}
// overrideContainerRuntime overrides the runtime for the container in host config if needed.
func (task *Task) overrideContainerRuntime(container *apicontainer.Container, hostCfg *dockercontainer.HostConfig,
cfg *config.Config) *apierrors.HostConfigError {
if task.isGPUEnabled() && task.shouldRequireNvidiaRuntime(container) {
if task.NvidiaRuntime == "" {
return &apierrors.HostConfigError{Msg: "Runtime is not set for GPU containers"}
}
seelog.Debugf("Setting runtime as %s for container %s", task.NvidiaRuntime, container.Name)
hostCfg.Runtime = task.NvidiaRuntime
}
if cfg.InferentiaSupportEnabled && container.RequireNeuronRuntime() {
seelog.Debugf("Setting runtime as %s for container %s", neuronRuntime, container.Name)
hostCfg.Runtime = neuronRuntime
}
return nil
}
// Requires an *apicontainer.Container and returns the Resources for the HostConfig struct
func (task *Task) getDockerResources(container *apicontainer.Container) dockercontainer.Resources {
// Convert MB to B and set Memory
dockerMem := int64(container.Memory * 1024 * 1024)
if dockerMem != 0 && dockerMem < apicontainer.DockerContainerMinimumMemoryInBytes {
seelog.Warnf("Task %s container %s memory setting is too low, increasing to %d bytes",
task.Arn, container.Name, apicontainer.DockerContainerMinimumMemoryInBytes)
dockerMem = apicontainer.DockerContainerMinimumMemoryInBytes
}
// Set CPUShares
cpuShare := task.dockerCPUShares(container.CPU)
resources := dockercontainer.Resources{
Memory: dockerMem,
CPUShares: cpuShare,
}
return resources
}
// shouldOverrideNetworkMode returns true if the network mode of the container needs
// to be overridden. It also returns the override string in this case. It returns
// false otherwise
func (task *Task) shouldOverrideNetworkMode(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) (bool, string) {
// TODO. We can do an early return here by determining which kind of task it is
// Example: Does this task have ENIs in its payload, what is its networking mode etc
if container.IsInternal() {
// If it's an internal container, set the network mode to none.
// Currently, internal containers are either for creating empty host
// volumes or for creating the 'pause' container. Both of these
// only need the network mode to be set to "none"
return true, networkModeNone
}
// For other types of containers, determine if the container map contains
// a pause container. Since a pause container is only added to the task
// when using non docker daemon supported network modes, its existence
// indicates the need to configure the network mode outside of supported
// network drivers
if !task.IsNetworkModeAWSVPC() {
return false, ""
}
pauseContName := ""
for _, cont := range task.Containers {
if cont.Type == apicontainer.ContainerCNIPause {
pauseContName = cont.Name
break
}
}
if pauseContName == "" {
seelog.Critical("Pause container required, but not found in the task: %s", task.String())
return false, ""
}
pauseContainer, ok := dockerContainerMap[pauseContName]
if !ok || pauseContainer == nil {
// This should never be the case and implies a code-bug.
seelog.Criticalf("Pause container required, but not found in container map for container: [%s] in task: %s",
container.String(), task.String())
return false, ""
}
return true, dockerMappingContainerPrefix + pauseContainer.DockerID
}
// overrideDNS overrides a container's host config if the following conditions are
// true:
// 1. Task has an ENI associated with it
// 2. ENI has custom DNS IPs and search list associated with it
// This should only be done for the pause container as other containers inherit
// /etc/resolv.conf of this container (they share the network namespace)
func (task *Task) overrideDNS(hostConfig *dockercontainer.HostConfig) *dockercontainer.HostConfig {
eni := task.GetPrimaryENI()
if eni == nil {
return hostConfig
}
hostConfig.DNS = eni.DomainNameServers
hostConfig.DNSSearch = eni.DomainNameSearchList
return hostConfig
}
// applyENIHostname adds the hostname provided by the ENI message to the
// container's docker config. At the time of implmentation, we are only using it
// to configure the pause container for awsvpc tasks
func (task *Task) applyENIHostname(dockerConfig *dockercontainer.Config) *dockercontainer.Config {
eni := task.GetPrimaryENI()
if eni == nil {
return dockerConfig
}
hostname := eni.GetHostname()
if hostname == "" {
return dockerConfig
}
dockerConfig.Hostname = hostname
return dockerConfig
}
// generateENIExtraHosts returns a slice of strings of the form "hostname:ip"
// that is generated using the hostname and ip addresses allocated to the ENI
func (task *Task) generateENIExtraHosts() []string {
eni := task.GetPrimaryENI()
if eni == nil {
return nil
}
hostname := eni.GetHostname()
if hostname == "" {
return nil
}
extraHosts := []string{}
for _, ip := range eni.GetIPV4Addresses() {
host := fmt.Sprintf("%s:%s", hostname, ip)
extraHosts = append(extraHosts, host)
}
return extraHosts
}
func (task *Task) shouldEnableIPv6() bool {
eni := task.GetPrimaryENI()
if eni == nil {
return false
}
return len(eni.GetIPV6Addresses()) > 0
}
// shouldOverridePIDMode returns true if the PIDMode of the container needs
// to be overridden. It also returns the override string in this case. It returns
// false otherwise
func (task *Task) shouldOverridePIDMode(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) (bool, string) {
// If the container is an internal container (ContainerEmptyHostVolume,
// ContainerCNIPause, or ContainerNamespacePause), then PID namespace for
// the container itself should be private (default Docker option)
if container.IsInternal() {
return false, ""
}
switch task.getPIDMode() {
case pidModeHost:
return true, pidModeHost
case pidModeTask:
pauseCont, ok := task.ContainerByName(NamespacePauseContainerName)
if !ok {
seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
return false, ""
}
pauseDockerID, ok := dockerContainerMap[pauseCont.Name]
if !ok || pauseDockerID == nil {
// Docker container shouldn't be nil or not exist if the Container definition within task exists; implies code-bug
seelog.Criticalf("Namespace Pause docker container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
return false, ""
}
return true, dockerMappingContainerPrefix + pauseDockerID.DockerID
// If PIDMode is not Host or Task, then no need to override
default:
return false, ""
}
}
// shouldOverrideIPCMode returns true if the IPCMode of the container needs
// to be overridden. It also returns the override string in this case. It returns
// false otherwise
func (task *Task) shouldOverrideIPCMode(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) (bool, string) {
// All internal containers do not need the same IPCMode. The NamespaceContainerPause
// needs to be "shareable" if ipcMode is "task". All other internal containers should
// defer to the Docker daemon default option (either shareable or private depending on
// version and configuration)
if container.IsInternal() {
if container.Type == apicontainer.ContainerNamespacePause {
// Setting NamespaceContainerPause to be sharable with other containers
if task.getIPCMode() == ipcModeTask {
return true, ipcModeSharable
}
}
// Defaulting to Docker daemon default option
return false, ""
}
switch task.getIPCMode() {
// No IPCMode provided in Task Definition, no need to override
case "":
return false, ""
// IPCMode is none - container will have own private namespace with /dev/shm not mounted
case ipcModeNone:
return true, ipcModeNone
case ipcModeHost:
return true, ipcModeHost
case ipcModeTask:
pauseCont, ok := task.ContainerByName(NamespacePauseContainerName)
if !ok {
seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
return false, ""
}
pauseDockerID, ok := dockerContainerMap[pauseCont.Name]
if !ok || pauseDockerID == nil {
// Docker container shouldn't be nill or not exist if the Container definition within task exists; implies code-bug
seelog.Criticalf("Namespace Pause container not found in the task: %s; Setting Task's Desired Status to Stopped", task.Arn)
task.SetDesiredStatus(apitaskstatus.TaskStopped)
return false, ""
}
return true, dockerMappingContainerPrefix + pauseDockerID.DockerID
default:
return false, ""
}
}
func (task *Task) initializeContainerOrderingForVolumes() error {
for _, container := range task.Containers {
if len(container.VolumesFrom) > 0 {
for _, volume := range container.VolumesFrom {
if _, ok := task.ContainerByName(volume.SourceContainer); !ok {
return fmt.Errorf("could not find container with name %s", volume.SourceContainer)
}
dependOn := apicontainer.DependsOn{ContainerName: volume.SourceContainer, Condition: ContainerOrderingCreateCondition}
container.SetDependsOn(append(container.GetDependsOn(), dependOn))
}
}
}
return nil
}
func (task *Task) initializeContainerOrderingForLinks() error {
for _, container := range task.Containers {
if len(container.Links) > 0 {
for _, link := range container.Links {
linkParts := strings.Split(link, ":")
if len(linkParts) > 2 {
return fmt.Errorf("Invalid link format")
}
linkName := linkParts[0]
if _, ok := task.ContainerByName(linkName); !ok {
return fmt.Errorf("could not find container with name %s", linkName)
}
dependOn := apicontainer.DependsOn{ContainerName: linkName, Condition: ContainerOrderingStartCondition}
container.SetDependsOn(append(container.GetDependsOn(), dependOn))
}
}
}
return nil
}
func (task *Task) dockerLinks(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) ([]string, error) {
dockerLinkArr := make([]string, len(container.Links))
for i, link := range container.Links {
linkParts := strings.Split(link, ":")
if len(linkParts) > 2 {
return []string{}, errors.New("Invalid link format")
}
linkName := linkParts[0]
var linkAlias string
if len(linkParts) == 2 {
linkAlias = linkParts[1]
} else {
seelog.Warnf("Link name [%s] found with no linkalias for container: [%s] in task: [%s]",
linkName, container.String(), task.String())
linkAlias = linkName
}
targetContainer, ok := dockerContainerMap[linkName]
if !ok {
return []string{}, errors.New("Link target not available: " + linkName)
}
dockerLinkArr[i] = targetContainer.DockerName + ":" + linkAlias
}
return dockerLinkArr, nil
}
func (task *Task) dockerPortMap(container *apicontainer.Container) nat.PortMap {
dockerPortMap := nat.PortMap{}
for _, portBinding := range container.Ports {
dockerPort := nat.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String())
currentMappings, existing := dockerPortMap[dockerPort]
if existing {
dockerPortMap[dockerPort] = append(currentMappings, nat.PortBinding{HostPort: strconv.Itoa(int(portBinding.HostPort))})
} else {
dockerPortMap[dockerPort] = []nat.PortBinding{{HostPort: strconv.Itoa(int(portBinding.HostPort))}}
}
}
return dockerPortMap
}
func (task *Task) dockerVolumesFrom(container *apicontainer.Container, dockerContainerMap map[string]*apicontainer.DockerContainer) ([]string, error) {
volumesFrom := make([]string, len(container.VolumesFrom))
for i, volume := range container.VolumesFrom {
targetContainer, ok := dockerContainerMap[volume.SourceContainer]
if !ok {
return []string{}, errors.New("Volume target not available: " + volume.SourceContainer)
}
if volume.ReadOnly {
volumesFrom[i] = targetContainer.DockerName + ":ro"
} else {
volumesFrom[i] = targetContainer.DockerName
}
}
return volumesFrom, nil
}
func (task *Task) dockerHostBinds(container *apicontainer.Container) ([]string, error) {
if container.Name == emptyHostVolumeName {
// emptyHostVolumes are handled as a special case in config, not
// hostConfig
return []string{}, nil
}
binds := make([]string, len(container.MountPoints))
for i, mountPoint := range container.MountPoints {
hv, ok := task.HostVolumeByName(mountPoint.SourceVolume)
if !ok {
return []string{}, errors.New("Invalid volume referenced: " + mountPoint.SourceVolume)
}
if hv.Source() == "" || mountPoint.ContainerPath == "" {
seelog.Errorf(
"Unable to resolve volume mounts for container [%s]; invalid path: [%s]; [%s] -> [%s] in task: [%s]",
container.Name, mountPoint.SourceVolume, hv.Source(), mountPoint.ContainerPath, task.String())
return []string{}, errors.Errorf("Unable to resolve volume mounts; invalid path: %s %s; %s -> %s",
container.Name, mountPoint.SourceVolume, hv.Source(), mountPoint.ContainerPath)
}
bind := hv.Source() + ":" + mountPoint.ContainerPath
if mountPoint.ReadOnly {
bind += ":ro"
}
binds[i] = bind
}
return binds, nil
}
// UpdateStatus updates a task's known and desired statuses to be compatible
// with all of its containers
// It will return a bool indicating if there was a change
func (task *Task) UpdateStatus() bool {
change := task.updateTaskKnownStatus()
// DesiredStatus can change based on a new known status
task.UpdateDesiredStatus()
return change != apitaskstatus.TaskStatusNone
}
// UpdateDesiredStatus sets the known status of the task
func (task *Task) UpdateDesiredStatus() {
task.lock.Lock()
defer task.lock.Unlock()
task.updateTaskDesiredStatusUnsafe()
task.updateContainerDesiredStatusUnsafe(task.DesiredStatusUnsafe)
task.updateResourceDesiredStatusUnsafe(task.DesiredStatusUnsafe)
}
// updateTaskDesiredStatusUnsafe determines what status the task should properly be at based on the containers' statuses
// Invariant: task desired status must be stopped if any essential container is stopped
func (task *Task) updateTaskDesiredStatusUnsafe() {
seelog.Debugf("Updating task: [%s]", task.stringUnsafe())
// A task's desired status is stopped if any essential container is stopped
// Otherwise, the task's desired status is unchanged (typically running, but no need to change)
for _, cont := range task.Containers {
if task.DesiredStatusUnsafe == apitaskstatus.TaskStopped {
break
}
if cont.Essential && (cont.KnownTerminal() || cont.DesiredTerminal()) {
seelog.Infof("api/task: Updating task desired status to stopped because of container: [%s]; task: [%s]",
cont.Name, task.stringUnsafe())
task.DesiredStatusUnsafe = apitaskstatus.TaskStopped
}
}
}
// updateContainerDesiredStatusUnsafe sets all container's desired status's to the
// task's desired status
// Invariant: container desired status is <= task desired status converted to container status
// Note: task desired status and container desired status is typically only RUNNING or STOPPED
func (task *Task) updateContainerDesiredStatusUnsafe(taskDesiredStatus apitaskstatus.TaskStatus) {
for _, container := range task.Containers {
taskDesiredStatusToContainerStatus := apitaskstatus.MapTaskToContainerStatus(taskDesiredStatus, container.GetSteadyStateStatus())
if container.GetDesiredStatus() < taskDesiredStatusToContainerStatus {
container.SetDesiredStatus(taskDesiredStatusToContainerStatus)
}
}
}
// updateResourceDesiredStatusUnsafe sets all resources' desired status depending on the
// task's desired status
// TODO: Create a mapping of resource status to the corresponding task status and use it here
func (task *Task) updateResourceDesiredStatusUnsafe(taskDesiredStatus apitaskstatus.TaskStatus) {
resources := task.getResourcesUnsafe()
for _, r := range resources {
if taskDesiredStatus == apitaskstatus.TaskRunning {
if r.GetDesiredStatus() < r.SteadyState() {
r.SetDesiredStatus(r.SteadyState())
}
} else {
if r.GetDesiredStatus() < r.TerminalStatus() {
r.SetDesiredStatus(r.TerminalStatus())
}
}
}
}
// SetKnownStatus sets the known status of the task
func (task *Task) SetKnownStatus(status apitaskstatus.TaskStatus) {
task.setKnownStatus(status)
task.updateKnownStatusTime()
}
func (task *Task) setKnownStatus(status apitaskstatus.TaskStatus) {
task.lock.Lock()
defer task.lock.Unlock()
task.KnownStatusUnsafe = status
}
func (task *Task) updateKnownStatusTime() {
task.lock.Lock()
defer task.lock.Unlock()
task.KnownStatusTimeUnsafe = ttime.Now()
}
// GetKnownStatus gets the KnownStatus of the task
func (task *Task) GetKnownStatus() apitaskstatus.TaskStatus {
task.lock.RLock()
defer task.lock.RUnlock()
return task.KnownStatusUnsafe
}
// GetKnownStatusTime gets the KnownStatusTime of the task
func (task *Task) GetKnownStatusTime() time.Time {
task.lock.RLock()
defer task.lock.RUnlock()
return task.KnownStatusTimeUnsafe
}
// SetCredentialsID sets the credentials ID for the task
func (task *Task) SetCredentialsID(id string) {
task.lock.Lock()
defer task.lock.Unlock()
task.credentialsID = id
}
// GetCredentialsID gets the credentials ID for the task
func (task *Task) GetCredentialsID() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.credentialsID
}
// SetCredentialsRelativeURI sets the credentials relative uri for the task
func (task *Task) SetCredentialsRelativeURI(uri string) {
task.lock.Lock()
defer task.lock.Unlock()
task.credentialsRelativeURIUnsafe = uri
}
// GetCredentialsRelativeURI returns the credentials relative uri for the task
func (task *Task) GetCredentialsRelativeURI() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.credentialsRelativeURIUnsafe
}
// SetExecutionRoleCredentialsID sets the ID for the task execution role credentials
func (task *Task) SetExecutionRoleCredentialsID(id string) {
task.lock.Lock()
defer task.lock.Unlock()
task.ExecutionCredentialsID = id
}
// GetExecutionCredentialsID gets the credentials ID for the task
func (task *Task) GetExecutionCredentialsID() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.ExecutionCredentialsID
}
// GetDesiredStatus gets the desired status of the task
func (task *Task) GetDesiredStatus() apitaskstatus.TaskStatus {
task.lock.RLock()
defer task.lock.RUnlock()
return task.DesiredStatusUnsafe
}
// SetDesiredStatus sets the desired status of the task
func (task *Task) SetDesiredStatus(status apitaskstatus.TaskStatus) {
task.lock.Lock()
defer task.lock.Unlock()
task.DesiredStatusUnsafe = status
}
// GetSentStatus safely returns the SentStatus of the task
func (task *Task) GetSentStatus() apitaskstatus.TaskStatus {
task.lock.RLock()
defer task.lock.RUnlock()
return task.SentStatusUnsafe
}
// SetSentStatus safely sets the SentStatus of the task
func (task *Task) SetSentStatus(status apitaskstatus.TaskStatus) {
task.lock.Lock()
defer task.lock.Unlock()
task.SentStatusUnsafe = status
}
// AddTaskENI adds ENI information to the task.
func (task *Task) AddTaskENI(eni *apieni.ENI) {
task.lock.Lock()
defer task.lock.Unlock()
if task.ENIs == nil {
task.ENIs = make([]*apieni.ENI, 0)
}
task.ENIs = append(task.ENIs, eni)
}
// GetTaskENIs returns the list of ENIs for the task.
func (task *Task) GetTaskENIs() []*apieni.ENI {
// TODO: what's the point of locking if we are returning a pointer?
task.lock.RLock()
defer task.lock.RUnlock()
return task.ENIs
}
// GetPrimaryENI returns the primary ENI of the task. Since ACS can potentially send
// multiple ENIs to the agent, the first ENI in the list is considered as the primary ENI.
func (task *Task) GetPrimaryENI() *apieni.ENI {
task.lock.RLock()
defer task.lock.RUnlock()
if len(task.ENIs) == 0 {
return nil
}
return task.ENIs[0]
}
// SetAppMesh sets the app mesh config of the task
func (task *Task) SetAppMesh(appMesh *apiappmesh.AppMesh) {
task.lock.Lock()
defer task.lock.Unlock()
task.AppMesh = appMesh
}
// GetAppMesh returns the app mesh config of the task
func (task *Task) GetAppMesh() *apiappmesh.AppMesh {
task.lock.RLock()
defer task.lock.RUnlock()
return task.AppMesh
}
// GetStopSequenceNumber returns the stop sequence number of a task
func (task *Task) GetStopSequenceNumber() int64 {
task.lock.RLock()
defer task.lock.RUnlock()
return task.StopSequenceNumber
}
// SetStopSequenceNumber sets the stop seqence number of a task
func (task *Task) SetStopSequenceNumber(seqnum int64) {
task.lock.Lock()
defer task.lock.Unlock()
task.StopSequenceNumber = seqnum
}
// SetPullStartedAt sets the task pullstartedat timestamp and returns whether
// this field was updated or not
func (task *Task) SetPullStartedAt(timestamp time.Time) bool {
task.lock.Lock()
defer task.lock.Unlock()
// Only set this field if it is not set
if task.PullStartedAtUnsafe.IsZero() {
task.PullStartedAtUnsafe = timestamp
return true
}
return false
}
// GetPullStartedAt returns the PullStartedAt timestamp
func (task *Task) GetPullStartedAt() time.Time {
task.lock.RLock()
defer task.lock.RUnlock()
return task.PullStartedAtUnsafe
}
// SetPullStoppedAt sets the task pullstoppedat timestamp
func (task *Task) SetPullStoppedAt(timestamp time.Time) {
task.lock.Lock()
defer task.lock.Unlock()
task.PullStoppedAtUnsafe = timestamp
}
// GetPullStoppedAt returns the PullStoppedAt timestamp
func (task *Task) GetPullStoppedAt() time.Time {
task.lock.RLock()
defer task.lock.RUnlock()
return task.PullStoppedAtUnsafe
}
// SetExecutionStoppedAt sets the ExecutionStoppedAt timestamp of the task
func (task *Task) SetExecutionStoppedAt(timestamp time.Time) bool {
task.lock.Lock()
defer task.lock.Unlock()
if task.ExecutionStoppedAtUnsafe.IsZero() {
task.ExecutionStoppedAtUnsafe = timestamp
return true
}
return false
}
// GetExecutionStoppedAt returns the task executionStoppedAt timestamp
func (task *Task) GetExecutionStoppedAt() time.Time {
task.lock.RLock()
defer task.lock.RUnlock()
return task.ExecutionStoppedAtUnsafe
}
// String returns a human readable string representation of this object
func (task *Task) String() string {
return task.stringUnsafe()
}
// stringUnsafe returns a human readable string representation of this object
func (task *Task) stringUnsafe() string {
return fmt.Sprintf("%s:%s %s, TaskStatus: (%s->%s) N Containers: %d, N ENIs %d",
task.Family, task.Version, task.Arn,
task.KnownStatusUnsafe.String(), task.DesiredStatusUnsafe.String(),
len(task.Containers), len(task.ENIs))
}
// GetID is used to retrieve the taskID from taskARN
// Reference: http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-ecs
func (task *Task) GetID() (string, error) {
// Parse taskARN
parsedARN, err := arn.Parse(task.Arn)
if err != nil {
return "", errors.Wrapf(err, "task get-id: malformed taskARN: %s", task.Arn)
}
// Get task resource section
resource := parsedARN.Resource
if !strings.Contains(resource, arnResourceDelimiter) {
return "", errors.Errorf("task get-id: malformed task resource: %s", resource)
}
resourceSplit := strings.Split(resource, arnResourceDelimiter)
return resourceSplit[len(resourceSplit)-1], nil
}
// RecordExecutionStoppedAt checks if this is an essential container stopped
// and set the task executionStoppedAt timestamps
func (task *Task) RecordExecutionStoppedAt(container *apicontainer.Container) {
if !container.Essential {
return
}
if container.GetKnownStatus() != apicontainerstatus.ContainerStopped {
return
}
// If the essential container is stopped, set the ExecutionStoppedAt timestamp
now := time.Now()
ok := task.SetExecutionStoppedAt(now)
if !ok {
// ExecutionStoppedAt was already recorded. Nothing to left to do here
return
}
seelog.Infof("Task [%s]: recording execution stopped time. Essential container [%s] stopped at: %s",
task.Arn, container.Name, now.String())
}
// GetResources returns the list of task resources from ResourcesMap
func (task *Task) GetResources() []taskresource.TaskResource {
task.lock.RLock()
defer task.lock.RUnlock()
return task.getResourcesUnsafe()
}
// getResourcesUnsafe returns the list of task resources from ResourcesMap
func (task *Task) getResourcesUnsafe() []taskresource.TaskResource {
var resourceList []taskresource.TaskResource
for _, resources := range task.ResourcesMapUnsafe {
resourceList = append(resourceList, resources...)
}
return resourceList
}
// AddResource adds a resource to ResourcesMap
func (task *Task) AddResource(resourceType string, resource taskresource.TaskResource) {
task.lock.Lock()
defer task.lock.Unlock()
task.ResourcesMapUnsafe[resourceType] = append(task.ResourcesMapUnsafe[resourceType], resource)
}
// SetTerminalReason sets the terminalReason string and this can only be set
// once per the task's lifecycle. This field does not accept updates.
func (task *Task) SetTerminalReason(reason string) {
seelog.Infof("Task [%s]: attempting to set terminal reason for task [%s]", task.Arn, reason)
task.terminalReasonOnce.Do(func() {
seelog.Infof("Task [%s]: setting terminal reason for task [%s]", task.Arn, reason)
// Converts the first letter of terminal reason into capital letter
words := strings.Fields(reason)
words[0] = strings.Title(words[0])
task.terminalReason = strings.Join(words, " ")
})
}
// GetTerminalReason retrieves the terminalReason string
func (task *Task) GetTerminalReason() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.terminalReason
}
// PopulateASMAuthData sets docker auth credentials for a container
func (task *Task) PopulateASMAuthData(container *apicontainer.Container) error {
secretID := container.RegistryAuthentication.ASMAuthData.CredentialsParameter
resource, ok := task.getASMAuthResource()
if !ok {
return errors.New("task auth data: unable to fetch ASM resource")
}
// This will cause a panic if the resource is not of ASMAuthResource type.
// But, it's better to panic as we should have never reached condition
// unless we released an agent without any testing around that code path
asmResource := resource[0].(*asmauth.ASMAuthResource)
dac, ok := asmResource.GetASMDockerAuthConfig(secretID)
if !ok {
return errors.Errorf("task auth data: unable to fetch docker auth config [%s]", secretID)
}
container.SetASMDockerAuthConfig(dac)
return nil
}
func (task *Task) getASMAuthResource() ([]taskresource.TaskResource, bool) {
task.lock.RLock()
defer task.lock.RUnlock()
res, ok := task.ResourcesMapUnsafe[asmauth.ResourceName]
return res, ok
}
// getSSMSecretsResource retrieves ssmsecret resource from resource map
func (task *Task) getSSMSecretsResource() ([]taskresource.TaskResource, bool) {
task.lock.RLock()
defer task.lock.RUnlock()
res, ok := task.ResourcesMapUnsafe[ssmsecret.ResourceName]
return res, ok
}
// PopulateSecrets appends secrets to container's env var map and hostconfig section
func (task *Task) PopulateSecrets(hostConfig *dockercontainer.HostConfig, container *apicontainer.Container) *apierrors.DockerClientConfigError {
var ssmRes *ssmsecret.SSMSecretResource
var asmRes *asmsecret.ASMSecretResource
if container.ShouldCreateWithSSMSecret() {
resource, ok := task.getSSMSecretsResource()
if !ok {
return &apierrors.DockerClientConfigError{Msg: "task secret data: unable to fetch SSM Secrets resource"}
}
ssmRes = resource[0].(*ssmsecret.SSMSecretResource)
}
if container.ShouldCreateWithASMSecret() {
resource, ok := task.getASMSecretsResource()
if !ok {
return &apierrors.DockerClientConfigError{Msg: "task secret data: unable to fetch ASM Secrets resource"}
}
asmRes = resource[0].(*asmsecret.ASMSecretResource)
}
populateContainerSecrets(hostConfig, container, ssmRes, asmRes)
return nil
}
func populateContainerSecrets(hostConfig *dockercontainer.HostConfig, container *apicontainer.Container,
ssmRes *ssmsecret.SSMSecretResource, asmRes *asmsecret.ASMSecretResource) {
envVars := make(map[string]string)
logDriverTokenName := ""
logDriverTokenSecretValue := ""
for _, secret := range container.Secrets {
secretVal := ""
if secret.Provider == apicontainer.SecretProviderSSM {
k := secret.GetSecretResourceCacheKey()
if secretValue, ok := ssmRes.GetCachedSecretValue(k); ok {
secretVal = secretValue
}
}
if secret.Provider == apicontainer.SecretProviderASM {
k := secret.GetSecretResourceCacheKey()
if secretValue, ok := asmRes.GetCachedSecretValue(k); ok {
secretVal = secretValue
}
}
if secret.Type == apicontainer.SecretTypeEnv {
envVars[secret.Name] = secretVal
continue
}
if secret.Target == apicontainer.SecretTargetLogDriver {
// Log driver secrets for container using awsfirelens log driver won't be saved in log config and passed to
// Docker here. They will only be used to configure the firelens container.
if container.GetLogDriver() == firelensDriverName {
continue
}
logDriverTokenName = secret.Name
logDriverTokenSecretValue = secretVal
// Check if all the name and secret value for the log driver do exist
// And add the secret value for this log driver into container's HostConfig
if hostConfig.LogConfig.Type != "" && logDriverTokenName != "" && logDriverTokenSecretValue != "" {
if hostConfig.LogConfig.Config == nil {
hostConfig.LogConfig.Config = map[string]string{}
}
hostConfig.LogConfig.Config[logDriverTokenName] = logDriverTokenSecretValue
}
}
}
container.MergeEnvironmentVariables(envVars)
}
// PopulateSecretLogOptionsToFirelensContainer collects secret log option values for awsfirelens log driver from task
// resource and specified then as envs of firelens container. Firelens container will use the envs to resolve config
// file variables constructed for secret log options when loading the config file.
func (task *Task) PopulateSecretLogOptionsToFirelensContainer(firelensContainer *apicontainer.Container) *apierrors.DockerClientConfigError {
firelensENVs := make(map[string]string)
var ssmRes *ssmsecret.SSMSecretResource
var asmRes *asmsecret.ASMSecretResource
resource, ok := task.getSSMSecretsResource()
if ok {
ssmRes = resource[0].(*ssmsecret.SSMSecretResource)
}
resource, ok = task.getASMSecretsResource()
if ok {
asmRes = resource[0].(*asmsecret.ASMSecretResource)
}
for _, container := range task.Containers {
if container.GetLogDriver() != firelensDriverName {
continue
}
logDriverSecretData, err := collectLogDriverSecretData(container.Secrets, ssmRes, asmRes)
if err != nil {
return &apierrors.DockerClientConfigError{
Msg: fmt.Sprintf("unable to generate config to create firelens container: %v", err),
}
}
idx := task.GetContainerIndex(container.Name)
if idx < 0 {
return &apierrors.DockerClientConfigError{
Msg: fmt.Sprintf("unable to generate config to create firelens container because container %s is not found in task", container.Name),
}
}
for key, value := range logDriverSecretData {
envKey := fmt.Sprintf(firelensConfigVarFmt, key, idx)
firelensENVs[envKey] = value
}
}
firelensContainer.MergeEnvironmentVariables(firelensENVs)
return nil
}
// collectLogDriverSecretData collects all the secret values for log driver secrets.
func collectLogDriverSecretData(secrets []apicontainer.Secret, ssmRes *ssmsecret.SSMSecretResource,
asmRes *asmsecret.ASMSecretResource) (map[string]string, error) {
secretData := make(map[string]string)
for _, secret := range secrets {
if secret.Target != apicontainer.SecretTargetLogDriver {
continue
}
secretVal := ""
cacheKey := secret.GetSecretResourceCacheKey()
if secret.Provider == apicontainer.SecretProviderSSM {
if ssmRes == nil {
return nil, errors.Errorf("missing secret value for secret %s", secret.Name)
}
if secretValue, ok := ssmRes.GetCachedSecretValue(cacheKey); ok {
secretVal = secretValue
}
} else if secret.Provider == apicontainer.SecretProviderASM {
if asmRes == nil {
return nil, errors.Errorf("missing secret value for secret %s", secret.Name)
}
if secretValue, ok := asmRes.GetCachedSecretValue(cacheKey); ok {
secretVal = secretValue
}
}
secretData[secret.Name] = secretVal
}
return secretData, nil
}
// getASMSecretsResource retrieves asmsecret resource from resource map
func (task *Task) getASMSecretsResource() ([]taskresource.TaskResource, bool) {
task.lock.RLock()
defer task.lock.RUnlock()
res, ok := task.ResourcesMapUnsafe[asmsecret.ResourceName]
return res, ok
}
// InitializeResources initializes the required field in the task on agent restart
// Some of the fields in task isn't saved in the agent state file, agent needs
// to initialize these fields before processing the task, eg: docker client in resource
func (task *Task) InitializeResources(resourceFields *taskresource.ResourceFields) {
task.lock.Lock()
defer task.lock.Unlock()
for _, resources := range task.ResourcesMapUnsafe {
for _, resource := range resources {
resource.Initialize(resourceFields, task.KnownStatusUnsafe, task.DesiredStatusUnsafe)
}
}
}
// Retrieves a Task's PIDMode
func (task *Task) getPIDMode() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.PIDMode
}
// Retrieves a Task's IPCMode
func (task *Task) getIPCMode() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.IPCMode
}
// AssociationByTypeAndContainer gets a list of names of all the associations associated with a container and of a
// certain type
func (task *Task) AssociationsByTypeAndContainer(associationType, containerName string) []string {
task.lock.RLock()
defer task.lock.RUnlock()
var associationNames []string
for _, association := range task.Associations {
if association.Type == associationType {
for _, associatedContainerName := range association.Containers {
if associatedContainerName == containerName {
associationNames = append(associationNames, association.Name)
}
}
}
}
return associationNames
}
// AssociationByTypeAndName gets an association of a certain type and name
func (task *Task) AssociationByTypeAndName(associationType, associationName string) (*Association, bool) {
task.lock.RLock()
defer task.lock.RUnlock()
for _, association := range task.Associations {
if association.Type == associationType && association.Name == associationName {
return &association, true
}
}
return nil, false
}
// GetContainerIndex returns the index of the container in the container list. This doesn't count internal container.
func (task *Task) GetContainerIndex(containerName string) int {
task.lock.RLock()
defer task.lock.RUnlock()
idx := 0
for _, container := range task.Containers {
if container.IsInternal() {
continue
}
if container.Name == containerName {
return idx
}
idx++
}
return -1
}
func (task *Task) initializeEnvfilesResource(config *config.Config, credentialsManager credentials.Manager) error {
for _, container := range task.Containers {
if container.ShouldCreateWithEnvFiles() {
envfileResource, err := envFiles.NewEnvironmentFileResource(config.Cluster, task.Arn, config.AWSRegion, config.DataDir,
container.Name, container.EnvironmentFiles, credentialsManager, task.ExecutionCredentialsID)
if err != nil {
return errors.Wrapf(err, "unable to initialize envfiles resource for container %s", container.Name)
}
task.AddResource(envFiles.ResourceName, envfileResource)
container.BuildResourceDependency(envfileResource.GetName(), resourcestatus.ResourceCreated, apicontainerstatus.ContainerCreated)
}
}
return nil
}
func (task *Task) getEnvfilesResource(containerName string) (taskresource.TaskResource, bool) {
task.lock.RLock()
defer task.lock.RUnlock()
resources, ok := task.ResourcesMapUnsafe[envFiles.ResourceName]
if !ok {
return nil, false
}
for _, resource := range resources {
envfileResource := resource.(*envFiles.EnvironmentFileResource)
if envfileResource.GetContainerName() == containerName {
return envfileResource, true
}
}
// was not able to retrieve envfile resource for specified container name
return nil, false
}
// MergeEnvVarsFromEnvfiles should be called when creating a container -
// this method reads the environment variables specified in the environment files
// that was downloaded to disk and merges it with existing environment variables
func (task *Task) MergeEnvVarsFromEnvfiles(container *apicontainer.Container) *apierrors.ResourceInitError {
var envfileResource *envFiles.EnvironmentFileResource
resource, ok := task.getEnvfilesResource(container.Name)
if !ok {
err := errors.New(fmt.Sprintf("task environment files: unable to retrieve environment files resource for container %s", container.Name))
return apierrors.NewResourceInitError(task.Arn, err)
}
envfileResource = resource.(*envFiles.EnvironmentFileResource)
envVarsList, err := envfileResource.ReadEnvVarsFromEnvfiles()
if err != nil {
return apierrors.NewResourceInitError(task.Arn, err)
}
err = container.MergeEnvironmentVariablesFromEnvfiles(envVarsList)
if err != nil {
return apierrors.NewResourceInitError(task.Arn, err)
}
return nil
}
// GetLocalIPAddress returns the local IP address of the task.
func (task *Task) GetLocalIPAddress() string {
task.lock.RLock()
defer task.lock.RUnlock()
return task.LocalIPAddressUnsafe
}
// SetLocalIPAddress sets the local IP address of the task.
func (task *Task) SetLocalIPAddress(addr string) {
task.lock.Lock()
defer task.lock.Unlock()
task.LocalIPAddressUnsafe = addr
}
// UpdateTaskENIsLinkName updates the link name of all the enis associated with the task.
func (task *Task) UpdateTaskENIsLinkName() {
task.lock.Lock()
defer task.lock.Unlock()
// Update the link name of the task eni.
for _, eni := range task.ENIs {
eni.GetLinkName()
}
}
| 1 | 26,486 |
Should setting the task runtime also go under the above if condition?
|
aws-amazon-ecs-agent
|
go
|
@@ -32,6 +32,8 @@ import (
var isDNSLabel = regexp.MustCompile(`^[a-zA-Z0-9]([-]*[a-zA-Z0-9])+$`).MatchString
+const _defaultListEntriesPageSize = 50
+
//Handler service is used to register SPIFFE IDs, and the attestation logic that should
//be performed on a workload before those IDs can be issued.
type Handler struct {
| 1 |
package registration
import (
"crypto/x509"
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"time"
"github.com/gofrs/uuid"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/sirupsen/logrus"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/peertracker"
"github.com/spiffe/spire/pkg/common/telemetry"
telemetry_common "github.com/spiffe/spire/pkg/common/telemetry/common"
telemetry_registrationapi "github.com/spiffe/spire/pkg/common/telemetry/server/registrationapi"
"github.com/spiffe/spire/pkg/server/ca"
"github.com/spiffe/spire/pkg/server/catalog"
"github.com/spiffe/spire/pkg/server/plugin/datastore"
"github.com/spiffe/spire/proto/spire/api/registration"
"github.com/spiffe/spire/proto/spire/common"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
)
var isDNSLabel = regexp.MustCompile(`^[a-zA-Z0-9]([-]*[a-zA-Z0-9])+$`).MatchString
//Handler service is used to register SPIFFE IDs, and the attestation logic that should
//be performed on a workload before those IDs can be issued.
type Handler struct {
Log logrus.FieldLogger
Metrics telemetry.Metrics
Catalog catalog.Catalog
TrustDomain url.URL
ServerCA ca.ServerCA
}
//CreateEntry creates an entry in the Registration table,
//used to assign SPIFFE IDs to nodes and workloads.
func (h *Handler) CreateEntry(ctx context.Context, request *common.RegistrationEntry) (_ *registration.RegistrationEntryID, err error) {
counter := telemetry_registrationapi.StartCreateEntryCall(h.Metrics)
defer counter.Done(&err)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
log := h.Log.WithField(telemetry.Method, telemetry.CreateRegistrationEntry)
request, err = h.prepareRegistrationEntry(request, false)
if err != nil {
err = status.Error(codes.InvalidArgument, err.Error())
log.WithError(err).Error("Request parameter validation error")
return nil, err
}
ds := h.getDataStore()
unique, err := h.isEntryUnique(ctx, ds, request)
if err != nil {
log.WithError(err).Error("Error trying to create entry")
return nil, status.Errorf(codes.Internal, "error trying to create entry: %v", err)
}
if !unique {
log.Error("Entry already exists")
return nil, status.Error(codes.AlreadyExists, "entry already exists")
}
createResponse, err := ds.CreateRegistrationEntry(ctx,
&datastore.CreateRegistrationEntryRequest{Entry: request},
)
if err != nil {
log.WithError(err).Error("Error trying to create entry")
return nil, status.Errorf(codes.Internal, "error trying to create entry: %v", err)
}
return ®istration.RegistrationEntryID{Id: createResponse.Entry.EntryId}, nil
}
//DeleteEntry deletes an entry in the Registration table
func (h *Handler) DeleteEntry(ctx context.Context, request *registration.RegistrationEntryID) (_ *common.RegistrationEntry, err error) {
counter := telemetry_registrationapi.StartDeleteEntryCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.DeleteRegistrationEntry)
ds := h.getDataStore()
req := &datastore.DeleteRegistrationEntryRequest{
EntryId: request.Id,
}
resp, err := ds.DeleteRegistrationEntry(ctx, req)
if err != nil {
log.WithError(err).Error("Error deleting registration entry")
return &common.RegistrationEntry{}, status.Error(codes.Internal, err.Error())
}
return resp.Entry, nil
}
//FetchEntry Retrieves a specific registered entry
func (h *Handler) FetchEntry(ctx context.Context, request *registration.RegistrationEntryID) (_ *common.RegistrationEntry, err error) {
counter := telemetry_registrationapi.StartFetchEntryCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.FetchRegistrationEntry)
ds := h.getDataStore()
fetchResponse, err := ds.FetchRegistrationEntry(ctx,
&datastore.FetchRegistrationEntryRequest{EntryId: request.Id},
)
if err != nil {
log.WithError(err).Error("Error trying to fetch entry")
return nil, status.Errorf(codes.Internal, "error trying to fetch entry: %v", err)
}
if fetchResponse.Entry == nil {
log.Error("No such registration entry")
return nil, status.Error(codes.NotFound, "no such registration entry")
}
return fetchResponse.Entry, nil
}
//FetchEntries retrieves all registered entries
func (h *Handler) FetchEntries(ctx context.Context, request *common.Empty) (_ *common.RegistrationEntries, err error) {
counter := telemetry_registrationapi.StartListEntriesCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.FetchRegistrationEntries)
ds := h.getDataStore()
fetchResponse, err := ds.ListRegistrationEntries(ctx, &datastore.ListRegistrationEntriesRequest{})
if err != nil {
log.WithError(err).Error("Error trying to fetch entries")
return nil, status.Errorf(codes.Internal, "error trying to fetch entries: %v", err)
}
return &common.RegistrationEntries{
Entries: fetchResponse.Entries,
}, nil
}
//UpdateEntry updates a specific registered entry
func (h *Handler) UpdateEntry(ctx context.Context, request *registration.UpdateEntryRequest) (_ *common.RegistrationEntry, err error) {
counter := telemetry_registrationapi.StartUpdateEntryCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.UpdateRegistrationEntry)
if request.Entry == nil {
log.Error("Request is missing entry to update")
return nil, status.Error(codes.InvalidArgument, "request is missing entry to update")
}
request.Entry, err = h.prepareRegistrationEntry(request.Entry, true)
if err != nil {
log.WithError(err).Error("Error validating request parameters")
return nil, status.Error(codes.InvalidArgument, err.Error())
}
ds := h.getDataStore()
resp, err := ds.UpdateRegistrationEntry(ctx, &datastore.UpdateRegistrationEntryRequest{
Entry: request.Entry,
})
if err != nil {
log.WithError(err).Error("Failed to update registration entry")
return nil, status.Errorf(codes.Internal, "failed to update registration entry: %v", err)
}
telemetry_registrationapi.IncrRegistrationAPIUpdatedEntryCounter(h.Metrics)
log.WithFields(logrus.Fields{
telemetry.ParentID: resp.Entry.ParentId,
telemetry.SPIFFEID: resp.Entry.SpiffeId,
}).Debug("Workload registration successfully updated")
return resp.Entry, nil
}
//ListByParentID Returns all the Entries associated with the ParentID value
func (h *Handler) ListByParentID(ctx context.Context, request *registration.ParentID) (_ *common.RegistrationEntries, err error) {
counter := telemetry_registrationapi.StartListEntriesCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.ListRegistrationsByParentID)
request.Id, err = idutil.NormalizeSpiffeID(request.Id, idutil.AllowAny())
if err != nil {
log.WithError(err).Error("Failed to normalize SPIFFE ID")
return nil, status.Error(codes.InvalidArgument, err.Error())
}
ds := h.getDataStore()
listResponse, err := ds.ListRegistrationEntries(ctx,
&datastore.ListRegistrationEntriesRequest{
ByParentId: &wrappers.StringValue{
Value: request.Id,
},
})
if err != nil {
log.WithError(err).Error("Failed to list entries by parent ID")
return nil, status.Errorf(codes.Internal, "error trying to list entries by parent ID: %v", err)
}
return &common.RegistrationEntries{
Entries: listResponse.Entries,
}, nil
}
//ListBySelector returns all the Entries associated with the Selector
func (h *Handler) ListBySelector(ctx context.Context, request *common.Selector) (_ *common.RegistrationEntries, err error) {
counter := telemetry_registrationapi.StartListEntriesCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.ListRegistrationsBySelector)
ds := h.getDataStore()
req := &datastore.ListRegistrationEntriesRequest{
BySelectors: &datastore.BySelectors{
Selectors: []*common.Selector{request},
},
}
resp, err := ds.ListRegistrationEntries(ctx, req)
if err != nil {
log.WithError(err).Error("Failed to list entries by selector")
return nil, status.Errorf(codes.Internal, "error trying to list entries by selector: %v", err)
}
return &common.RegistrationEntries{
Entries: resp.Entries,
}, nil
}
//ListBySelectors returns all the Entries associated with the Selectors
func (h *Handler) ListBySelectors(ctx context.Context, request *common.Selectors) (_ *common.RegistrationEntries, err error) {
counter := telemetry_registrationapi.StartListEntriesCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.ListRegistrationsBySelectors)
ds := h.getDataStore()
req := &datastore.ListRegistrationEntriesRequest{
BySelectors: &datastore.BySelectors{
Selectors: request.Entries,
},
}
resp, err := ds.ListRegistrationEntries(ctx, req)
if err != nil {
log.WithError(err).Error("Failed to list entries by selectors")
return nil, status.Errorf(codes.Internal, "error trying to list entries by selectors: %v", err)
}
return &common.RegistrationEntries{
Entries: resp.Entries,
}, nil
}
//ListBySpiffeID returns all the Entries associated with the SPIFFE ID
func (h *Handler) ListBySpiffeID(ctx context.Context, request *registration.SpiffeID) (_ *common.RegistrationEntries, err error) {
counter := telemetry_registrationapi.StartListEntriesCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.ListRegistrationsBySPIFFEID)
request.Id, err = idutil.NormalizeSpiffeID(request.Id, idutil.AllowAny())
if err != nil {
log.WithError(err).Error("Failed to normalize SPIFFE ID")
return nil, status.Error(codes.InvalidArgument, err.Error())
}
ds := h.getDataStore()
req := &datastore.ListRegistrationEntriesRequest{
BySpiffeId: &wrappers.StringValue{
Value: request.Id,
},
}
resp, err := ds.ListRegistrationEntries(ctx, req)
if err != nil {
log.WithError(err).Error("Failed to list entries by SPIFFE ID")
return nil, status.Errorf(codes.Internal, "error trying to list entries by SPIFFE ID: %v", err)
}
return &common.RegistrationEntries{
Entries: resp.Entries,
}, nil
}
func (h *Handler) CreateFederatedBundle(ctx context.Context, request *registration.FederatedBundle) (_ *common.Empty, err error) {
counter := telemetry_registrationapi.StartCreateFedBundleCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.CreateFederatedBundle)
bundle := request.Bundle
if bundle == nil {
log.Error("Bundle field is required")
return nil, status.Error(codes.InvalidArgument, "bundle field is required")
}
bundle.TrustDomainId, err = idutil.NormalizeSpiffeID(bundle.TrustDomainId, idutil.AllowAnyTrustDomain())
if err != nil {
log.WithError(err).Error("Failed to normalize SPIFFE ID")
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if bundle.TrustDomainId == h.TrustDomain.String() {
log.Error("Federated bundle id cannot match server trust domain")
return nil, status.Error(codes.InvalidArgument, "federated bundle id cannot match server trust domain")
}
ds := h.getDataStore()
if _, err := ds.CreateBundle(ctx, &datastore.CreateBundleRequest{
Bundle: bundle,
}); err != nil {
log.WithError(err).Error("Failed to create bundle")
return nil, status.Error(codes.Internal, err.Error())
}
return &common.Empty{}, nil
}
func (h *Handler) FetchFederatedBundle(ctx context.Context, request *registration.FederatedBundleID) (_ *registration.FederatedBundle, err error) {
counter := telemetry_registrationapi.StartFetchFedBundleCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.FetchFederatedBundle)
request.Id, err = idutil.NormalizeSpiffeID(request.Id, idutil.AllowAnyTrustDomain())
if err != nil {
log.WithError(err).Error("Failed to normalize SPIFFE ID")
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if request.Id == h.TrustDomain.String() {
log.Error("Federated bundle id cannot match server trust domain")
return nil, status.Error(codes.InvalidArgument, "federated bundle id cannot match server trust domain")
}
ds := h.getDataStore()
resp, err := ds.FetchBundle(ctx, &datastore.FetchBundleRequest{
TrustDomainId: request.Id,
})
if err != nil {
log.WithError(err).Error("Failed to fetch bundle")
return nil, status.Error(codes.Internal, err.Error())
}
if resp.Bundle == nil {
log.Error("Bundle not found")
return nil, status.Error(codes.NotFound, "bundle not found")
}
return ®istration.FederatedBundle{
Bundle: resp.Bundle,
}, nil
}
func (h *Handler) ListFederatedBundles(request *common.Empty, stream registration.Registration_ListFederatedBundlesServer) (err error) {
counter := telemetry_registrationapi.StartListFedBundlesCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(stream.Context()))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.ListFederatedBundles)
ds := h.getDataStore()
resp, err := ds.ListBundles(stream.Context(), &datastore.ListBundlesRequest{})
if err != nil {
log.WithError(err).Error("Failed to list bundles")
return status.Error(codes.Internal, err.Error())
}
for _, bundle := range resp.Bundles {
if bundle.TrustDomainId == h.TrustDomain.String() {
continue
}
if err := stream.Send(®istration.FederatedBundle{
Bundle: bundle,
}); err != nil {
log.WithError(err).Error("Failed to send response over stream")
return status.Error(codes.Internal, err.Error())
}
}
return nil
}
func (h *Handler) UpdateFederatedBundle(ctx context.Context, request *registration.FederatedBundle) (_ *common.Empty, err error) {
counter := telemetry_registrationapi.StartUpdateFedBundleCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.UpdateFederatedBundle)
bundle := request.Bundle
if bundle == nil {
log.Error("Bundle field is required")
return nil, status.Error(codes.InvalidArgument, "bundle field is required")
}
bundle.TrustDomainId, err = idutil.NormalizeSpiffeID(bundle.TrustDomainId, idutil.AllowAnyTrustDomain())
if err != nil {
log.WithError(err).Error("Failed to normalize SPIFFE ID")
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if bundle.TrustDomainId == h.TrustDomain.String() {
log.Error("Federated bundle ID cannot match server trust domain")
return nil, status.Error(codes.InvalidArgument, "federated bundle id cannot match server trust domain")
}
ds := h.getDataStore()
if _, err := ds.UpdateBundle(ctx, &datastore.UpdateBundleRequest{
Bundle: bundle,
}); err != nil {
log.WithError(err).Error("Failed to update federated bundle")
return nil, status.Error(codes.Internal, err.Error())
}
return &common.Empty{}, err
}
func (h *Handler) DeleteFederatedBundle(ctx context.Context, request *registration.DeleteFederatedBundleRequest) (_ *common.Empty, err error) {
counter := telemetry_registrationapi.StartDeleteFedBundleCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.DeleteFederatedBundle)
request.Id, err = idutil.NormalizeSpiffeID(request.Id, idutil.AllowAnyTrustDomain())
if err != nil {
log.WithError(err).Error("Failed to normalize SPIFFE ID")
return nil, status.Error(codes.InvalidArgument, err.Error())
}
if request.Id == h.TrustDomain.String() {
log.Error("Federated bundle ID cannot match server trust domain")
return nil, status.Error(codes.InvalidArgument, "federated bundle id cannot match server trust domain")
}
mode, err := convertDeleteBundleMode(request.Mode)
if err != nil {
log.WithError(err).Error("Unknown delete bundle mode in request")
return nil, status.Error(codes.InvalidArgument, err.Error())
}
ds := h.getDataStore()
if _, err := ds.DeleteBundle(ctx, &datastore.DeleteBundleRequest{
TrustDomainId: request.Id,
Mode: mode,
}); err != nil {
log.WithError(err).Error("Failed to delete federated bundle")
return nil, status.Error(codes.Internal, err.Error())
}
return &common.Empty{}, nil
}
func (h *Handler) CreateJoinToken(ctx context.Context, request *registration.JoinToken) (_ *registration.JoinToken, err error) {
counter := telemetry_registrationapi.StartCreateJoinTokenCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.CreateJoinToken)
if request.Ttl < 1 {
log.Error("TTL is required")
return nil, status.Error(codes.InvalidArgument, "ttl is required, you must provide one")
}
// Generate a token if one wasn't specified
if request.Token == "" {
u, err := uuid.NewV4()
if err != nil {
log.WithError(err).Error("Failed to generate UUID token")
return nil, status.Errorf(codes.Internal, "error generating uuid token: %v", err)
}
request.Token = u.String()
}
ds := h.getDataStore()
expiry := time.Now().Unix() + int64(request.Ttl)
_, err = ds.CreateJoinToken(ctx, &datastore.CreateJoinTokenRequest{
JoinToken: &datastore.JoinToken{
Token: request.Token,
Expiry: expiry,
},
})
if err != nil {
log.WithError(err).Error("Failed to register token")
return nil, status.Errorf(codes.Internal, "Failed to register token: %v", err)
}
return request, nil
}
// FetchBundle retrieves the CA bundle.
func (h *Handler) FetchBundle(ctx context.Context, request *common.Empty) (_ *registration.Bundle, err error) {
counter := telemetry_registrationapi.StartFetchBundleCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.FetchBundle)
ds := h.getDataStore()
resp, err := ds.FetchBundle(ctx, &datastore.FetchBundleRequest{
TrustDomainId: h.TrustDomain.String(),
})
if err != nil {
log.WithError(err).Error("Failed to get bundle from datastore")
return nil, status.Errorf(codes.Internal, "get bundle from datastore: %v", err)
}
if resp.Bundle == nil {
log.Error("Bundle not found")
return nil, status.Error(codes.NotFound, "bundle not found")
}
return ®istration.Bundle{
Bundle: resp.Bundle,
}, nil
}
//EvictAgent removes a node from the attested nodes store
func (h *Handler) EvictAgent(ctx context.Context, evictRequest *registration.EvictAgentRequest) (*registration.EvictAgentResponse, error) {
spiffeID := evictRequest.GetSpiffeID()
log := h.Log.WithFields(logrus.Fields{
telemetry.Method: telemetry.EvictAgent,
telemetry.SPIFFEID: spiffeID,
})
deletedNode, err := h.deleteAttestedNode(ctx, spiffeID)
if err != nil {
log.WithError(err).Warn("Failed to evict agent")
return nil, err
}
log.Debug("Successfully evicted agent")
return ®istration.EvictAgentResponse{
Node: deletedNode,
}, nil
}
//ListAgents returns the list of attested nodes
func (h *Handler) ListAgents(ctx context.Context, listReq *registration.ListAgentsRequest) (*registration.ListAgentsResponse, error) {
log := h.Log.WithField(telemetry.Method, telemetry.ListAgents)
ds := h.Catalog.GetDataStore()
req := &datastore.ListAttestedNodesRequest{}
resp, err := ds.ListAttestedNodes(ctx, req)
if err != nil {
log.WithError(err).Error("Failed to list attested nodes")
return nil, err
}
return ®istration.ListAgentsResponse{Nodes: resp.Nodes}, nil
}
func (h *Handler) MintX509SVID(ctx context.Context, req *registration.MintX509SVIDRequest) (_ *registration.MintX509SVIDResponse, err error) {
counter := telemetry_registrationapi.StartMintX509SVIDCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.MintX509SVID)
spiffeID, err := h.normalizeSPIFFEIDForMinting(req.SpiffeId)
if err != nil {
log.WithError(err).Error("Failed to normalize SPIFFE ID for minting")
return nil, err
}
if len(req.Csr) == 0 {
log.Error("Request missing CSR")
return nil, status.Error(codes.InvalidArgument, "request missing CSR")
}
for _, dnsName := range req.DnsNames {
if err := validateDNS(dnsName); err != nil {
log.WithField(telemetry.DNSName, dnsName).Error("Invalid DNS name")
return nil, status.Errorf(codes.InvalidArgument, "invalid DNS name: %v", err)
}
}
csr, err := x509.ParseCertificateRequest(req.Csr)
if err != nil {
log.WithError(err).Error("Invalid CSR")
return nil, status.Errorf(codes.InvalidArgument, "invalid CSR: %v", err)
}
if err := csr.CheckSignature(); err != nil {
log.WithError(err).Error("Invalid CSR: signature verification failed")
return nil, status.Errorf(codes.InvalidArgument, "invalid CSR: signature verify failed")
}
svid, err := h.ServerCA.SignX509SVID(ctx, ca.X509SVIDParams{
SpiffeID: spiffeID,
PublicKey: csr.PublicKey,
TTL: time.Duration(req.Ttl) * time.Second,
DNSList: req.DnsNames,
})
if err != nil {
log.WithError(err).Error("Failed to sign X.509 SVID")
return nil, status.Error(codes.Internal, err.Error())
}
resp, err := h.getDataStore().FetchBundle(ctx, &datastore.FetchBundleRequest{
TrustDomainId: h.TrustDomain.String(),
})
if err != nil {
log.WithError(err).Error("Failed to fetch bundle from datastore")
return nil, status.Error(codes.Internal, err.Error())
}
if resp.Bundle == nil {
log.Error("Bundle not found")
return nil, status.Error(codes.FailedPrecondition, "bundle not found")
}
svidChain := make([][]byte, 0, len(svid))
for _, cert := range svid {
svidChain = append(svidChain, cert.Raw)
}
var rootCAs [][]byte
for _, rootCA := range resp.Bundle.RootCas {
rootCAs = append(rootCAs, rootCA.DerBytes)
}
return ®istration.MintX509SVIDResponse{
SvidChain: svidChain,
RootCas: rootCAs,
}, nil
}
func (h *Handler) MintJWTSVID(ctx context.Context, req *registration.MintJWTSVIDRequest) (_ *registration.MintJWTSVIDResponse, err error) {
counter := telemetry_registrationapi.StartMintJWTSVIDCall(h.Metrics)
telemetry_common.AddCallerID(counter, getCallerID(ctx))
defer counter.Done(&err)
log := h.Log.WithField(telemetry.Method, telemetry.MintJWTSVID)
spiffeID, err := h.normalizeSPIFFEIDForMinting(req.SpiffeId)
if err != nil {
log.WithError(err).Error("Failed to normalize SPIFFE ID for minting")
return nil, err
}
if len(req.Audience) == 0 {
log.Error("Request must specify at least one audience")
return nil, status.Error(codes.InvalidArgument, "request must specify at least one audience")
}
token, err := h.ServerCA.SignJWTSVID(ctx, ca.JWTSVIDParams{
SpiffeID: spiffeID,
TTL: time.Duration(req.Ttl) * time.Second,
Audience: req.Audience,
})
if err != nil {
log.WithError(err).Error("Failed to sign JWT-SVID")
return nil, status.Error(codes.Internal, err.Error())
}
return ®istration.MintJWTSVIDResponse{
Token: token,
}, nil
}
// GetNodeSelectors returns node (agent) selectors
func (h *Handler) GetNodeSelectors(ctx context.Context, req *registration.GetNodeSelectorsRequest) (*registration.GetNodeSelectorsResponse, error) {
log := h.Log.WithField(telemetry.Method, telemetry.GetNodeSelectors)
ds := h.Catalog.GetDataStore()
r := &datastore.GetNodeSelectorsRequest{
SpiffeId: req.SpiffeId,
}
resp, err := ds.GetNodeSelectors(ctx, r)
if err != nil {
log.WithError(err).Error("Failed to get node selectors")
return nil, err
}
return ®istration.GetNodeSelectorsResponse{
Selectors: ®istration.NodeSelectors{
SpiffeId: resp.Selectors.SpiffeId,
Selectors: resp.Selectors.Selectors,
},
}, nil
}
func (h *Handler) deleteAttestedNode(ctx context.Context, agentID string) (*common.AttestedNode, error) {
if agentID == "" {
return nil, errors.New("empty agent ID")
}
ds := h.Catalog.GetDataStore()
req := &datastore.DeleteAttestedNodeRequest{
SpiffeId: agentID,
}
resp, err := ds.DeleteAttestedNode(ctx, req)
if err != nil {
return nil, err
}
return resp.Node, nil
}
func (h *Handler) normalizeSPIFFEIDForMinting(spiffeID string) (string, error) {
if spiffeID == "" {
return "", status.Error(codes.InvalidArgument, "request missing SPIFFE ID")
}
spiffeID, err := idutil.NormalizeSpiffeID(spiffeID, idutil.AllowTrustDomainWorkload(h.TrustDomain.Host))
if err != nil {
return "", status.Errorf(codes.InvalidArgument, err.Error())
}
return spiffeID, nil
}
func (h *Handler) isEntryUnique(ctx context.Context, ds datastore.DataStore, entry *common.RegistrationEntry) (bool, error) {
// First we get all the entries that matches the entry's spiffe id.
req := &datastore.ListRegistrationEntriesRequest{
BySpiffeId: &wrappers.StringValue{
Value: entry.SpiffeId,
},
ByParentId: &wrappers.StringValue{
Value: entry.ParentId,
},
BySelectors: &datastore.BySelectors{
Match: datastore.BySelectors_MATCH_EXACT,
Selectors: entry.Selectors,
},
}
res, err := ds.ListRegistrationEntries(ctx, req)
if err != nil {
return false, err
}
return len(res.Entries) == 0, nil
}
func (h *Handler) getDataStore() datastore.DataStore {
return h.Catalog.GetDataStore()
}
func (h *Handler) prepareRegistrationEntry(entry *common.RegistrationEntry, forUpdate bool) (*common.RegistrationEntry, error) {
entry = cloneRegistrationEntry(entry)
if forUpdate && entry.EntryId == "" {
return nil, errors.New("missing registration entry id")
}
var err error
for _, dns := range entry.DnsNames {
err = validateDNS(dns)
if err != nil {
return nil, fmt.Errorf("dns name %v failed validation: %v", dns, err)
}
}
entry.ParentId, err = idutil.NormalizeSpiffeID(entry.ParentId, idutil.AllowAnyInTrustDomain(h.TrustDomain.Host))
if err != nil {
return nil, err
}
// Validate Spiffe ID
entry.SpiffeId, err = idutil.NormalizeSpiffeID(entry.SpiffeId, idutil.AllowTrustDomainWorkload(h.TrustDomain.Host))
if err != nil {
return nil, err
}
return entry, nil
}
func (h *Handler) AuthorizeCall(ctx context.Context, fullMethod string) (context.Context, error) {
// For the time being, authorization is not per-method. In other words, all or nothing.
callerID, err := authorizeCaller(ctx, h.getDataStore())
if err != nil {
h.Log.WithError(err).Error("Failed to authorize caller")
return nil, err
}
if callerID != "" {
ctx = withCallerID(ctx, callerID)
}
return ctx, nil
}
func cloneRegistrationEntry(entry *common.RegistrationEntry) *common.RegistrationEntry {
return proto.Clone(entry).(*common.RegistrationEntry)
}
func convertDeleteBundleMode(in registration.DeleteFederatedBundleRequest_Mode) (datastore.DeleteBundleRequest_Mode, error) {
switch in {
case registration.DeleteFederatedBundleRequest_RESTRICT:
return datastore.DeleteBundleRequest_RESTRICT, nil
case registration.DeleteFederatedBundleRequest_DISSOCIATE:
return datastore.DeleteBundleRequest_DISSOCIATE, nil
case registration.DeleteFederatedBundleRequest_DELETE:
return datastore.DeleteBundleRequest_DELETE, nil
}
return datastore.DeleteBundleRequest_RESTRICT, fmt.Errorf("unhandled delete mode %q", in)
}
func getSpiffeIDFromCert(cert *x509.Certificate) (string, error) {
if len(cert.URIs) == 0 {
return "", errors.New("no SPIFFE ID in certificate")
}
spiffeID, err := idutil.NormalizeSpiffeIDURL(cert.URIs[0], idutil.AllowAny())
if err != nil {
return "", err
}
return spiffeID.String(), nil
}
func authorizeCaller(ctx context.Context, ds datastore.DataStore) (spiffeID string, err error) {
ctxPeer, ok := peer.FromContext(ctx)
if !ok {
return "", status.Error(codes.PermissionDenied, "no peer information for caller")
}
switch authInfo := ctxPeer.AuthInfo.(type) {
case credentials.TLSInfo:
// The caller came over TLS and must present an authorized SPIFFE ID
if len(authInfo.State.VerifiedChains) == 0 {
return "", status.Errorf(codes.PermissionDenied, "no verified client certificate")
}
chain := authInfo.State.VerifiedChains[0]
if len(chain) == 0 {
// the tls package should never supply an empty verified chain, but
// we'll just be defensive here.
return "", status.Errorf(codes.PermissionDenied, "verified chain is empty")
}
cert := chain[0]
spiffeID, err = getSpiffeIDFromCert(cert)
if err != nil {
return "", status.Error(codes.PermissionDenied, err.Error())
}
case peertracker.AuthInfo:
// The caller came over UDS and is therefore authorized but does not
// provide a spiffeID. The file permissions on the UDS are restricted to
// processes belonging to the same user or group as the server.
return "", nil
default:
// The caller came over an unknown transport
return "", status.Errorf(codes.PermissionDenied, "unsupported peer auth info type (%T)", authInfo)
}
resp, err := ds.ListRegistrationEntries(ctx, &datastore.ListRegistrationEntriesRequest{
BySpiffeId: &wrappers.StringValue{
Value: spiffeID,
},
})
if err != nil {
return "", err
}
for _, entry := range resp.Entries {
if entry.Admin {
return spiffeID, nil
}
}
return "", status.Errorf(codes.PermissionDenied, "SPIFFE ID %q is not authorized", spiffeID)
}
type callerIDKey struct{}
func withCallerID(ctx context.Context, callerID string) context.Context {
return context.WithValue(ctx, callerIDKey{}, callerID)
}
func getCallerID(ctx context.Context) string {
callerID, _ := ctx.Value(callerIDKey{}).(string)
return callerID
}
func validateDNS(dns string) error {
// follow https://tools.ietf.org/html/rfc5280#section-4.2.1.6
// do not allow empty or the technically valid DNS " "
dns = strings.TrimSpace(dns)
if len(dns) == 0 {
return errors.New("empty or only whitespace")
}
// handle up to 255 characters
if len(dns) > 255 {
return errors.New("length exceeded")
}
// a DNS is split into labels by "."
splitDNS := strings.Split(dns, ".")
for _, label := range splitDNS {
if err := validateDNSLabel(label); err != nil {
return err
}
}
return nil
}
func validateDNSLabel(label string) error {
// follow https://tools.ietf.org/html/rfc5280#section-4.2.1.6 guidance
// <label> ::= <let-dig> [ [ <ldh-str> ] <let-dig> ]
// <ldh-str> ::= <let-dig-hyp> | <let-dig-hyp> <ldh-str>
if len(label) == 0 {
return errors.New("label is empty")
}
if len(label) > 63 {
return fmt.Errorf("label length exceeded: %v", label)
}
if match := isDNSLabel(label); !match {
return fmt.Errorf("label does not match regex: %v", label)
}
return nil
}
| 1 | 12,630 |
SPIRE uses the leadingLowerCase convention for unexported constants
|
spiffe-spire
|
go
|
@@ -1,11 +1,12 @@
// Copyright (c) Microsoft. All Rights Reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
-using Microsoft.CodeAnalysis.Sarif.Readers;
using System;
using System.CodeDom.Compiler;
using System.Collections.Generic;
using System.Runtime.Serialization;
+using Microsoft.CodeAnalysis.Sarif.Readers;
+using Newtonsoft.Json;
namespace Microsoft.CodeAnalysis.Sarif
{
| 1 |
// Copyright (c) Microsoft. All Rights Reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using Microsoft.CodeAnalysis.Sarif.Readers;
using System;
using System.CodeDom.Compiler;
using System.Collections.Generic;
using System.Runtime.Serialization;
namespace Microsoft.CodeAnalysis.Sarif
{
/// <summary>
/// An annotation used to express code flows through a method or other locations that are related to a result.
/// </summary>
[DataContract]
[GeneratedCode("Microsoft.Json.Schema.ToDotNet", "0.36.0.0")]
public partial class AnnotatedCodeLocation : PropertyBagHolder, ISarifNode
{
public static IEqualityComparer<AnnotatedCodeLocation> ValueComparer => AnnotatedCodeLocationEqualityComparer.Instance;
public bool ValueEquals(AnnotatedCodeLocation other) => ValueComparer.Equals(this, other);
public int ValueGetHashCode() => ValueComparer.GetHashCode(this);
/// <summary>
/// Gets a value indicating the type of object implementing <see cref="ISarifNode" />.
/// </summary>
public SarifNodeKind SarifNodeKind
{
get
{
return SarifNodeKind.AnnotatedCodeLocation;
}
}
/// <summary>
/// An identifier for the location, unique within the scope of the code flow within which it occurs.
/// </summary>
[DataMember(Name = "id", IsRequired = false, EmitDefaultValue = false)]
public string Id { get; set; }
/// <summary>
/// A file location to which this annotation refers.
/// </summary>
[DataMember(Name = "physicalLocation", IsRequired = true)]
public PhysicalLocation PhysicalLocation { get; set; }
/// <summary>
/// The fully qualified name of the method or function that is executing.
/// </summary>
[DataMember(Name = "fullyQualifiedLogicalName", IsRequired = false, EmitDefaultValue = false)]
public string FullyQualifiedLogicalName { get; set; }
/// <summary>
/// A key used to retrieve the annotation logicalLocation from the logicalLocations dictionary.
/// </summary>
[DataMember(Name = "logicalLocationKey", IsRequired = false, EmitDefaultValue = false)]
public string LogicalLocationKey { get; set; }
/// <summary>
/// The name of the module that contains the code that is executing.
/// </summary>
[DataMember(Name = "module", IsRequired = false, EmitDefaultValue = false)]
public string Module { get; set; }
/// <summary>
/// The thread identifier of the code that is executing.
/// </summary>
[DataMember(Name = "threadId", IsRequired = false, EmitDefaultValue = false)]
public int ThreadId { get; set; }
/// <summary>
/// A message relevant to this annotation.
/// </summary>
[DataMember(Name = "message", IsRequired = false, EmitDefaultValue = false)]
public string Message { get; set; }
/// <summary>
/// A descriptive identifier that categorizes the annotation.
/// </summary>
[DataMember(Name = "kind", IsRequired = false, EmitDefaultValue = false)]
public AnnotatedCodeLocationKind Kind { get; set; }
/// <summary>
/// True if this location is essential to understanding the code flow in which it occurs.
/// </summary>
[DataMember(Name = "essential", IsRequired = false, EmitDefaultValue = false)]
public bool Essential { get; set; }
/// <summary>
/// Key/value pairs that provide additional information about the code location.
/// </summary>
[DataMember(Name = "properties", IsRequired = false, EmitDefaultValue = false)]
internal override IDictionary<string, SerializedPropertyInfo> Properties { get; set; }
/// <summary>
/// Initializes a new instance of the <see cref="AnnotatedCodeLocation" /> class.
/// </summary>
public AnnotatedCodeLocation()
{
}
/// <summary>
/// Initializes a new instance of the <see cref="AnnotatedCodeLocation" /> class from the supplied values.
/// </summary>
/// <param name="id">
/// An initialization value for the <see cref="P: Id" /> property.
/// </param>
/// <param name="physicalLocation">
/// An initialization value for the <see cref="P: PhysicalLocation" /> property.
/// </param>
/// <param name="fullyQualifiedLogicalName">
/// An initialization value for the <see cref="P: FullyQualifiedLogicalName" /> property.
/// </param>
/// <param name="logicalLocationKey">
/// An initialization value for the <see cref="P: LogicalLocationKey" /> property.
/// </param>
/// <param name="module">
/// An initialization value for the <see cref="P: Module" /> property.
/// </param>
/// <param name="threadId">
/// An initialization value for the <see cref="P: ThreadId" /> property.
/// </param>
/// <param name="message">
/// An initialization value for the <see cref="P: Message" /> property.
/// </param>
/// <param name="kind">
/// An initialization value for the <see cref="P: Kind" /> property.
/// </param>
/// <param name="essential">
/// An initialization value for the <see cref="P: Essential" /> property.
/// </param>
/// <param name="properties">
/// An initialization value for the <see cref="P: Properties" /> property.
/// </param>
public AnnotatedCodeLocation(string id, PhysicalLocation physicalLocation, string fullyQualifiedLogicalName, string logicalLocationKey, string module, int threadId, string message, AnnotatedCodeLocationKind kind, bool essential, IDictionary<string, SerializedPropertyInfo> properties)
{
Init(id, physicalLocation, fullyQualifiedLogicalName, logicalLocationKey, module, threadId, message, kind, essential, properties);
}
/// <summary>
/// Initializes a new instance of the <see cref="AnnotatedCodeLocation" /> class from the specified instance.
/// </summary>
/// <param name="other">
/// The instance from which the new instance is to be initialized.
/// </param>
/// <exception cref="ArgumentNullException">
/// Thrown if <paramref name="other" /> is null.
/// </exception>
public AnnotatedCodeLocation(AnnotatedCodeLocation other)
{
if (other == null)
{
throw new ArgumentNullException(nameof(other));
}
Init(other.Id, other.PhysicalLocation, other.FullyQualifiedLogicalName, other.LogicalLocationKey, other.Module, other.ThreadId, other.Message, other.Kind, other.Essential, other.Properties);
}
ISarifNode ISarifNode.DeepClone()
{
return DeepCloneCore();
}
/// <summary>
/// Creates a deep copy of this instance.
/// </summary>
public AnnotatedCodeLocation DeepClone()
{
return (AnnotatedCodeLocation)DeepCloneCore();
}
private ISarifNode DeepCloneCore()
{
return new AnnotatedCodeLocation(this);
}
private void Init(string id, PhysicalLocation physicalLocation, string fullyQualifiedLogicalName, string logicalLocationKey, string module, int threadId, string message, AnnotatedCodeLocationKind kind, bool essential, IDictionary<string, SerializedPropertyInfo> properties)
{
Id = id;
if (physicalLocation != null)
{
PhysicalLocation = new PhysicalLocation(physicalLocation);
}
FullyQualifiedLogicalName = fullyQualifiedLogicalName;
LogicalLocationKey = logicalLocationKey;
Module = module;
ThreadId = threadId;
Message = message;
Kind = kind;
Essential = essential;
if (properties != null)
{
Properties = new Dictionary<string, SerializedPropertyInfo>(properties);
}
}
}
}
| 1 | 10,937 |
The newest JSchema code generator orders `System` usings first.
|
microsoft-sarif-sdk
|
.cs
|
@@ -82,10 +82,17 @@ public class HiveIcebergSerDe extends AbstractSerDe {
}
String[] selectedColumns = ColumnProjectionUtils.getReadColumnNames(configuration);
- Schema projectedSchema = selectedColumns.length > 0 ? tableSchema.select(selectedColumns) : tableSchema;
+ Schema projectedSchema = tableSchema;
+
+ boolean caseSensitive = configuration.getBoolean(InputFormatConfig.CASE_SENSITIVE,
+ InputFormatConfig.CASE_SENSITIVE_DEFAULT);
+ if (selectedColumns.length > 0) {
+ projectedSchema = caseSensitive ? tableSchema.select(selectedColumns)
+ : tableSchema.caseInsensitiveSelect(selectedColumns);
+ }
try {
- this.inspector = IcebergObjectInspector.create(projectedSchema);
+ this.inspector = IcebergObjectInspector.create(projectedSchema, caseSensitive);
} catch (Exception e) {
throw new SerDeException(e);
}
| 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.mr.hive;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import javax.annotation.Nullable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.serde.serdeConstants;
import org.apache.hadoop.hive.serde2.AbstractSerDe;
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
import org.apache.hadoop.hive.serde2.SerDeException;
import org.apache.hadoop.hive.serde2.SerDeStats;
import org.apache.hadoop.hive.serde2.SerDeUtils;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
import org.apache.hadoop.io.Writable;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SchemaParser;
import org.apache.iceberg.hive.HiveSchemaUtil;
import org.apache.iceberg.mr.Catalogs;
import org.apache.iceberg.mr.InputFormatConfig;
import org.apache.iceberg.mr.hive.serde.objectinspector.IcebergObjectInspector;
import org.apache.iceberg.mr.mapred.Container;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HiveIcebergSerDe extends AbstractSerDe {
private static final Logger LOG = LoggerFactory.getLogger(HiveIcebergSerDe.class);
private ObjectInspector inspector;
@Override
public void initialize(@Nullable Configuration configuration, Properties serDeProperties) throws SerDeException {
// HiveIcebergSerDe.initialize is called multiple places in Hive code:
// - When we are trying to create a table - HiveDDL data is stored at the serDeProperties, but no Iceberg table
// is created yet.
// - When we are compiling the Hive query on HiveServer2 side - We only have table information (location/name),
// and we have to read the schema using the table data. This is called multiple times so there is room for
// optimizing here.
// - When we are executing the Hive query in the execution engine - We do not want to load the table data on every
// executor, but serDeProperties are populated by HiveIcebergStorageHandler.configureInputJobProperties() and
// the resulting properties are serialized and distributed to the executors
// temporarily disabling vectorization in Tez, since it doesn't work with projection pruning (fix: TEZ-4248)
// TODO: remove this once TEZ-4248 has been released and the Tez dependencies updated here
assertNotVectorizedTez(configuration);
Schema tableSchema;
if (configuration.get(InputFormatConfig.TABLE_SCHEMA) != null) {
tableSchema = SchemaParser.fromJson(configuration.get(InputFormatConfig.TABLE_SCHEMA));
} else if (serDeProperties.get(InputFormatConfig.TABLE_SCHEMA) != null) {
tableSchema = SchemaParser.fromJson((String) serDeProperties.get(InputFormatConfig.TABLE_SCHEMA));
} else {
try {
// always prefer the original table schema if there is one
tableSchema = Catalogs.loadTable(configuration, serDeProperties).schema();
LOG.info("Using schema from existing table {}", SchemaParser.toJson(tableSchema));
} catch (Exception e) {
// If we can not load the table try the provided hive schema
tableSchema = hiveSchemaOrThrow(serDeProperties, e);
}
}
String[] selectedColumns = ColumnProjectionUtils.getReadColumnNames(configuration);
Schema projectedSchema = selectedColumns.length > 0 ? tableSchema.select(selectedColumns) : tableSchema;
try {
this.inspector = IcebergObjectInspector.create(projectedSchema);
} catch (Exception e) {
throw new SerDeException(e);
}
}
private void assertNotVectorizedTez(Configuration configuration) {
if ("tez".equals(configuration.get("hive.execution.engine")) &&
"true".equals(configuration.get("hive.vectorized.execution.enabled"))) {
throw new UnsupportedOperationException("Vectorized execution on Tez is currently not supported when using " +
"Iceberg tables. Please set hive.vectorized.execution.enabled=false and rerun the query.");
}
}
@Override
public Class<? extends Writable> getSerializedClass() {
return Container.class;
}
@Override
public Writable serialize(Object o, ObjectInspector objectInspector) {
throw new UnsupportedOperationException("Serialization is not supported.");
}
@Override
public SerDeStats getSerDeStats() {
return null;
}
@Override
public Object deserialize(Writable writable) {
return ((Container<?>) writable).get();
}
@Override
public ObjectInspector getObjectInspector() {
return inspector;
}
/**
* Gets the hive schema from the serDeProperties, and throws an exception if it is not provided. In the later case
* it adds the previousException as a root cause.
* @param serDeProperties The source of the hive schema
* @param previousException If we had an exception previously
* @return The hive schema parsed from the serDeProperties
* @throws SerDeException If there is no schema information in the serDeProperties
*/
private static Schema hiveSchemaOrThrow(Properties serDeProperties, Exception previousException)
throws SerDeException {
// Read the configuration parameters
String columnNames = serDeProperties.getProperty(serdeConstants.LIST_COLUMNS);
String columnTypes = serDeProperties.getProperty(serdeConstants.LIST_COLUMN_TYPES);
String columnNameDelimiter = serDeProperties.containsKey(serdeConstants.COLUMN_NAME_DELIMITER) ?
serDeProperties.getProperty(serdeConstants.COLUMN_NAME_DELIMITER) : String.valueOf(SerDeUtils.COMMA);
if (columnNames != null && columnTypes != null && columnNameDelimiter != null &&
!columnNames.isEmpty() && !columnTypes.isEmpty() && !columnNameDelimiter.isEmpty()) {
// Parse the configuration parameters
List<String> names = new ArrayList<>();
Collections.addAll(names, columnNames.split(columnNameDelimiter));
Schema hiveSchema = HiveSchemaUtil.convert(names, TypeInfoUtils.getTypeInfosFromTypeString(columnTypes));
LOG.info("Using hive schema {}", SchemaParser.toJson(hiveSchema));
return hiveSchema;
} else {
throw new SerDeException("Please provide an existing table or a valid schema", previousException);
}
}
}
| 1 | 31,404 |
I understand that we want to have a possibility to configure the IcebergInputFormat to be case sensitive or case insensitive since it can be used by other MR jobs as well. Do we want to allow the users of Hive to shot themselves on the foot and enable case sensitivity? My first guess would be that we should not use the configuration here, just go with `false`, but if you have some specific use-case in your mind I can be easily convinced
|
apache-iceberg
|
java
|
@@ -79,7 +79,7 @@ func (c *CStorPoolController) cStorPoolEventHandler(operation common.QueueOperat
common.SyncResources.Mux.Lock()
status, err := c.cStorPoolAddEventHandler(cStorPoolGot)
common.SyncResources.Mux.Unlock()
-
+ pool.PoolAddEventHandled = true
return status, err
case common.QOpDestroy:
| 1 |
/*
Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package poolcontroller
import (
"fmt"
"os"
"reflect"
"time"
"github.com/golang/glog"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/pool"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/volumereplica"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/util"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
)
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the cStorPoolUpdated resource
// with the current status of the resource.
func (c *CStorPoolController) syncHandler(key string, operation common.QueueOperation) error {
cStorPoolGot, err := c.getPoolResource(key)
if err != nil {
return err
}
status, err := c.cStorPoolEventHandler(operation, cStorPoolGot)
if status == "" {
return nil
}
cStorPoolGot.Status.Phase = apis.CStorPoolPhase(status)
if err != nil {
glog.Errorf(err.Error())
_, err := c.clientset.OpenebsV1alpha1().CStorPools().Update(cStorPoolGot)
if err != nil {
return err
}
glog.Infof("cStorPool:%v, %v; Status: %v", cStorPoolGot.Name,
string(cStorPoolGot.GetUID()), cStorPoolGot.Status.Phase)
return err
}
_, err = c.clientset.OpenebsV1alpha1().CStorPools().Update(cStorPoolGot)
if err != nil {
return err
}
glog.Infof("cStorPool:%v, %v; Status: %v", cStorPoolGot.Name,
string(cStorPoolGot.GetUID()), cStorPoolGot.Status.Phase)
return nil
}
// cStorPoolEventHandler is to handle cstor pool related events.
func (c *CStorPoolController) cStorPoolEventHandler(operation common.QueueOperation, cStorPoolGot *apis.CStorPool) (string, error) {
pool.RunnerVar = util.RealRunner{}
switch operation {
case common.QOpAdd:
glog.Infof("Processing cStorPool added event: %v, %v", cStorPoolGot.ObjectMeta.Name, string(cStorPoolGot.GetUID()))
// lock is to synchronize pool and volumereplica. Until certain pool related
// operations are over, the volumereplica threads will be held.
common.SyncResources.Mux.Lock()
status, err := c.cStorPoolAddEventHandler(cStorPoolGot)
common.SyncResources.Mux.Unlock()
return status, err
case common.QOpDestroy:
glog.Infof("Processing cStorPool Destroy event %v, %v", cStorPoolGot.ObjectMeta.Name, string(cStorPoolGot.GetUID()))
status, err := c.cStorPoolDestroyEventHandler(cStorPoolGot)
return status, err
}
return string(apis.CStorPoolStatusInvalid), nil
}
func (c *CStorPoolController) cStorPoolAddEventHandler(cStorPoolGot *apis.CStorPool) (string, error) {
// CheckValidPool is to check if pool attributes are correct.
err := pool.CheckValidPool(cStorPoolGot)
if err != nil {
c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureValidate), string(common.MessageResourceFailValidate))
return string(apis.CStorPoolStatusOffline), err
}
/* If pool is already present.
Pool CR status is online. This means pool (main car) is running successfully,
but watcher container got restarted.
Pool CR status is init/online. If entire pod got restarted, both zrepl and watcher
are started.
a) Zrepl could have come up first, in this case, watcher will update after
the specified interval of 120s.
b) Watcher could have come up first, in this case, there is a possibility
that zrepl goes down and comes up and the watcher sees that no pool is there,
so it will break the loop and attempt to import the pool. */
// cnt is no of attempts to wait and handle in case of already present pool.
cnt := common.NoOfPoolWaitAttempts
existingPool, _ := pool.GetPoolName()
isPoolExists := len(existingPool) != 0
for i := 0; isPoolExists && i < cnt; i++ {
// GetVolumes is called because, while importing a pool, volumes corresponding
// to the pool are also imported. This needs to be handled and made visible
// to cvr controller.
common.InitialImportedPoolVol, _ = volumereplica.GetVolumes()
// GetPoolName is to get pool name for particular no. of attempts.
existingPool, _ := pool.GetPoolName()
if common.CheckIfPresent(existingPool, string(pool.PoolPrefix)+string(cStorPoolGot.GetUID())) {
// In the last attempt, ignore and update the status.
if i == cnt-1 {
isPoolExists = false
if IsPendingStatus(cStorPoolGot) || IsEmptyStatus(cStorPoolGot) {
// Pool CR status is init. This means pool deployment was done
// successfully, but before updating the CR to Online status,
// the watcher container got restarted.
glog.Infof("Pool %v is online", string(pool.PoolPrefix)+string(cStorPoolGot.GetUID()))
c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent))
common.SyncResources.IsImported = true
return string(apis.CStorPoolStatusOnline), nil
}
glog.Infof("Pool %v already present", string(pool.PoolPrefix)+string(cStorPoolGot.GetUID()))
c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent))
common.SyncResources.IsImported = true
return string(apis.CStorPoolStatusErrorDuplicate), fmt.Errorf("Duplicate resource request")
}
glog.Infof("Attempt %v: Waiting...", i+1)
time.Sleep(common.PoolWaitInterval)
} else {
// If no pool is present while trying for getpoolname, set isPoolExists to false and
// break the loop, to import the pool later.
isPoolExists = false
}
}
var importPoolErr error
var status string
cachfileFlags := []bool{true, false}
for _, cachefileFlag := range cachfileFlags {
status, importPoolErr = c.importPool(cStorPoolGot, cachefileFlag)
if status == string(apis.CStorPoolStatusOnline) {
c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.SuccessImported), string(common.MessageResourceImported))
common.SyncResources.IsImported = true
return status, nil
}
}
// make a check if initialImportedPoolVol is not empty, then notify cvr controller
// through channel.
if len(common.InitialImportedPoolVol) != 0 {
common.SyncResources.IsImported = true
} else {
common.SyncResources.IsImported = false
}
// IsInitStatus is to check if initial status of cstorpool object is `init`.
if IsEmptyStatus(cStorPoolGot) || IsPendingStatus(cStorPoolGot) {
// LabelClear is to clear pool label
err = pool.LabelClear(cStorPoolGot.Spec.Disks.DiskList)
if err != nil {
glog.Errorf(err.Error(), cStorPoolGot.GetUID())
} else {
glog.Infof("Label clear successful: %v", string(cStorPoolGot.GetUID()))
}
// CreatePool is to create cstor pool.
err = pool.CreatePool(cStorPoolGot)
if err != nil {
glog.Errorf("Pool creation failure: %v", string(cStorPoolGot.GetUID()))
c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureCreate), string(common.MessageResourceFailCreate))
return string(apis.CStorPoolStatusOffline), err
}
glog.Infof("Pool creation successful: %v", string(cStorPoolGot.GetUID()))
c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.SuccessCreated), string(common.MessageResourceCreated))
return string(apis.CStorPoolStatusOnline), nil
}
glog.Infof("Not init status: %v, %v", cStorPoolGot.ObjectMeta.Name, string(cStorPoolGot.GetUID()))
return string(apis.CStorPoolStatusOffline), importPoolErr
}
func (c *CStorPoolController) cStorPoolDestroyEventHandler(cStorPoolGot *apis.CStorPool) (string, error) {
// DeletePool is to delete cstor pool.
err := pool.DeletePool(string(pool.PoolPrefix) + string(cStorPoolGot.ObjectMeta.UID))
if err != nil {
c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureDestroy), string(common.MessageResourceFailDestroy))
return string(apis.CStorPoolStatusDeletionFailed), err
}
// LabelClear is to clear pool label
err = pool.LabelClear(cStorPoolGot.Spec.Disks.DiskList)
if err != nil {
glog.Errorf(err.Error(), cStorPoolGot.GetUID())
} else {
glog.Infof("Label clear successful: %v", string(cStorPoolGot.GetUID()))
}
// removeFinalizer is to remove finalizer of cStorPool resource.
err = c.removeFinalizer(cStorPoolGot)
if err != nil {
return string(apis.CStorPoolStatusOffline), err
}
return "", nil
}
// getPoolResource returns object corresponding to the resource key
func (c *CStorPoolController) getPoolResource(key string) (*apis.CStorPool, error) {
// Convert the key(namespace/name) string into a distinct name
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil, nil
}
cStorPoolGot, err := c.clientset.OpenebsV1alpha1().CStorPools().Get(name, metav1.GetOptions{})
if err != nil {
// The cStorPool resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("cStorPoolGot '%s' in work queue no longer exists", key))
return nil, nil
}
return nil, err
}
return cStorPoolGot, nil
}
// removeFinalizer is to remove finalizer of cstorpool resource.
func (c *CStorPoolController) removeFinalizer(cStorPoolGot *apis.CStorPool) error {
if len(cStorPoolGot.Finalizers) > 0 {
cStorPoolGot.Finalizers = []string{}
}
_, err := c.clientset.OpenebsV1alpha1().CStorPools().Update(cStorPoolGot)
if err != nil {
return err
}
glog.Infof("Removed Finalizer: %v, %v", cStorPoolGot.Name, string(cStorPoolGot.GetUID()))
return nil
}
func (c *CStorPoolController) importPool(cStorPoolGot *apis.CStorPool, cachefileFlag bool) (string, error) {
err := pool.ImportPool(cStorPoolGot, cachefileFlag)
if err == nil {
err = pool.SetCachefile(cStorPoolGot)
if err != nil {
common.SyncResources.IsImported = false
return string(apis.CStorPoolStatusOffline), err
}
glog.Infof("Set cachefile successful: %v", string(cStorPoolGot.GetUID()))
// GetVolumes is called because, while importing a pool, volumes corresponding
// to the pool are also imported. This needs to be handled and made visible
// to cvr controller.
common.InitialImportedPoolVol, err = volumereplica.GetVolumes()
if err != nil {
common.SyncResources.IsImported = false
return string(apis.CStorPoolStatusOffline), err
}
glog.Infof("Import Pool with cachefile successful: %v", string(cStorPoolGot.GetUID()))
return string(apis.CStorPoolStatusOnline), nil
}
return "", nil
}
// IsRightCStorPoolMgmt is to check if the pool request is for particular pod/application.
func IsRightCStorPoolMgmt(cStorPool *apis.CStorPool) bool {
if os.Getenv(string(common.OpenEBSIOCStorID)) == string(cStorPool.ObjectMeta.UID) {
return true
}
return false
}
// IsDestroyEvent is to check if the call is for cStorPool destroy.
func IsDestroyEvent(cStorPool *apis.CStorPool) bool {
if cStorPool.ObjectMeta.DeletionTimestamp != nil {
return true
}
return false
}
// IsOnlyStatusChange is to check only status change of cStorPool object.
func IsOnlyStatusChange(oldCStorPool, newCStorPool *apis.CStorPool) bool {
if reflect.DeepEqual(oldCStorPool.Spec, newCStorPool.Spec) &&
!reflect.DeepEqual(oldCStorPool.Status, newCStorPool.Status) {
return true
}
return false
}
// IsEmptyStatus is to check if the status of cStorPool object is empty.
func IsEmptyStatus(cStorPool *apis.CStorPool) bool {
if string(cStorPool.Status.Phase) == string(apis.CStorPoolStatusEmpty) {
glog.Infof("cStorPool empty status: %v", string(cStorPool.ObjectMeta.UID))
return true
}
glog.Infof("Not empty status: %v", string(cStorPool.ObjectMeta.UID))
return false
}
// IsPendingStatus is to check if the status of cStorPool object is pending.
func IsPendingStatus(cStorPool *apis.CStorPool) bool {
if string(cStorPool.Status.Phase) == string(apis.CStorPoolStatusPending) {
glog.Infof("cStorPool pending: %v", string(cStorPool.ObjectMeta.UID))
return true
}
glog.V(4).Infof("Not pending status: %v", string(cStorPool.ObjectMeta.UID))
return false
}
// IsErrorDuplicate is to check if the status of cStorPool object is error-duplicate.
func IsErrorDuplicate(cStorPool *apis.CStorPool) bool {
if string(cStorPool.Status.Phase) == string(apis.CStorPoolStatusErrorDuplicate) {
glog.Infof("cStorPool duplication error: %v", string(cStorPool.ObjectMeta.UID))
return true
}
glog.V(4).Infof("Not error duplicate status: %v", string(cStorPool.ObjectMeta.UID))
return false
}
// IsDeletionFailedBefore is to make sure no other operation should happen if the
// status of cStorPool is deletion-failed.
func IsDeletionFailedBefore(cStorPool *apis.CStorPool) bool {
if cStorPool.Status.Phase == apis.CStorPoolStatusDeletionFailed {
return true
}
return false
}
| 1 | 9,546 |
Will the above call to cStorPoolAddEventHandler() get executed in the same thread as this line?
|
openebs-maya
|
go
|
@@ -600,11 +600,11 @@ func (c *coroutineLoweringPass) lowerFuncsPass() {
continue
}
- if len(fn.normalCalls) == 0 {
- // No suspend points. Lower without turning it into a coroutine.
+ if len(fn.normalCalls) == 0 && fn.fn.FirstBasicBlock().FirstInstruction().IsAAllocaInst().IsNil() {
+ // No suspend points or stack allocations. Lower without turning it into a coroutine.
c.lowerFuncFast(fn)
} else {
- // There are suspend points, so it is necessary to turn this into a coroutine.
+ // There are suspend points or stack allocations, so it is necessary to turn this into a coroutine.
c.lowerFuncCoro(fn)
}
}
| 1 |
package transform
// This file lowers asynchronous functions and goroutine starts when using the coroutines scheduler.
// This is accomplished by inserting LLVM intrinsics which are used in order to save the states of functions.
import (
"errors"
"go/token"
"strconv"
"github.com/tinygo-org/tinygo/compiler/llvmutil"
"tinygo.org/x/go-llvm"
)
// LowerCoroutines turns async functions into coroutines.
// This must be run with the coroutines scheduler.
//
// Before this pass, goroutine starts are expressed as a call to an intrinsic called "internal/task.start".
// This intrinsic accepts the function pointer and a pointer to a struct containing the function's arguments.
//
// Before this pass, an intrinsic called "internal/task.Pause" is used to express suspensions of the current goroutine.
//
// This pass first accumulates a list of blocking functions.
// A function is considered "blocking" if it calls "internal/task.Pause" or any other blocking function.
//
// Blocking calls are implemented by turning blocking functions into a coroutine.
// The body of each blocking function is modified to start a new coroutine, and to return after the first suspend.
// After calling a blocking function, the caller coroutine suspends.
// The caller also provides a buffer to store the return value into.
// When a blocking function returns, the return value is written into this buffer and then the caller is queued to run.
//
// Goroutine starts which invoke non-blocking functions are implemented as direct calls.
// Goroutine starts are replaced with the creation of a new task data structure followed by a call to the start of the blocking function.
// The task structure is populated with a "noop" coroutine before invoking the blocking function.
// When the blocking function returns, it resumes this "noop" coroutine which does nothing.
// The goroutine starter is able to continue after the first suspend of the started goroutine.
//
// The transformation of a function to a coroutine is accomplished using LLVM's coroutines system (https://llvm.org/docs/Coroutines.html).
// The simplest implementation of a coroutine inserts a suspend point after every blocking call.
//
// Transforming blocking functions into coroutines and calls into suspend points is extremely expensive.
// In many cases, a blocking call is followed immediately by a function terminator (a return or an "unreachable" instruction).
// This is a blocking "tail call".
// In a non-returning tail call (call to a non-returning function, such as an infinite loop), the coroutine can exit without any extra work.
// In a returning tail call, the returned value must either be the return of the call or a value known before the call.
// If the return value of the caller is the return of the callee, the coroutine can exit without any extra work and the tailed call will instead return to the caller of the caller.
// If the return value is known in advance, this result can be stored into the parent's return buffer before the call so that a suspend is unnecessary.
// If the callee returns an unnecessary value, a return buffer can be allocated on the heap so that it will outlive the coroutine.
//
// In the implementation of time.Sleep, the current task is pushed onto a timer queue and then suspended.
// Since the only suspend point is a call to "internal/task.Pause" followed by a return, there is no need to transform this into a coroutine.
// This generalizes to all blocking functions in which all suspend points can be elided.
// This optimization saves a substantial amount of binary size.
func LowerCoroutines(mod llvm.Module, needStackSlots bool) error {
ctx := mod.Context()
builder := ctx.NewBuilder()
defer builder.Dispose()
target := llvm.NewTargetData(mod.DataLayout())
defer target.Dispose()
pass := &coroutineLoweringPass{
mod: mod,
ctx: ctx,
builder: builder,
target: target,
needStackSlots: needStackSlots,
}
err := pass.load()
if err != nil {
return err
}
// Supply task operands to async calls.
pass.supplyTaskOperands()
// Analyze async returns.
pass.returnAnalysisPass()
// Categorize async calls.
pass.categorizeCalls()
// Lower async functions.
pass.lowerFuncsPass()
// Lower calls to internal/task.Current.
pass.lowerCurrent()
// Lower goroutine starts.
pass.lowerStartsPass()
// Fix annotations on async call params.
pass.fixAnnotations()
if needStackSlots {
// Set up garbage collector tracking of tasks at start.
err = pass.trackGoroutines()
if err != nil {
return err
}
}
return nil
}
// CoroutinesError is an error returned when coroutine lowering failed, for
// example because an async function is exported.
type CoroutinesError struct {
Msg string
Pos token.Position
Traceback []CoroutinesErrorLine
}
// CoroutinesErrorLine is a single line of a CoroutinesError traceback.
type CoroutinesErrorLine struct {
Name string // function name
Position token.Position // position in the function
}
// Error implements the error interface by returning a simple error message
// without the stack.
func (err CoroutinesError) Error() string {
return err.Msg
}
type asyncCallInfo struct {
fn llvm.Value
call llvm.Value
}
// asyncFunc is a metadata container for an asynchronous function.
type asyncFunc struct {
// fn is the underlying function pointer.
fn llvm.Value
// rawTask is the parameter where the task pointer is passed in.
rawTask llvm.Value
// callers is a set of all functions which call this async function.
callers map[llvm.Value]struct{}
// returns is a list of returns in the function, along with metadata.
returns []asyncReturn
// calls is a list of all calls in the asyncFunc.
// normalCalls is a list of all intermideate suspending calls in the asyncFunc.
// tailCalls is a list of all tail calls in the asyncFunc.
calls, normalCalls, tailCalls []llvm.Value
}
// asyncReturn is a metadata container for a return from an asynchronous function.
type asyncReturn struct {
// block is the basic block terminated by the return.
block llvm.BasicBlock
// kind is the kind of the return.
kind returnKind
}
// coroutineLoweringPass is a goroutine lowering pass which is used with the "coroutines" scheduler.
type coroutineLoweringPass struct {
mod llvm.Module
ctx llvm.Context
builder llvm.Builder
target llvm.TargetData
// asyncFuncs is a map of all asyncFuncs.
// The map keys are function pointers.
asyncFuncs map[llvm.Value]*asyncFunc
asyncFuncsOrdered []*asyncFunc
// calls is a slice of all of the async calls in the module.
calls []llvm.Value
i8ptr llvm.Type
// memory management functions from the runtime
alloc, free llvm.Value
// coroutine intrinsics
start, pause, current llvm.Value
setState, setRetPtr, getRetPtr, returnTo, returnCurrent llvm.Value
createTask llvm.Value
// llvm.coro intrinsics
coroId, coroSize, coroBegin, coroSuspend, coroEnd, coroFree, coroSave llvm.Value
trackPointer llvm.Value
needStackSlots bool
}
// findAsyncFuncs finds all asynchronous functions.
// A function is considered asynchronous if it calls an asynchronous function or intrinsic.
func (c *coroutineLoweringPass) findAsyncFuncs() error {
asyncs := map[llvm.Value]*asyncFunc{}
asyncsOrdered := []llvm.Value{}
calls := []llvm.Value{}
callsAsyncFunction := map[llvm.Value]asyncCallInfo{}
// Use a breadth-first search to find all async functions.
worklist := []llvm.Value{c.pause}
for len(worklist) > 0 {
// Pop a function off the worklist.
fn := worklist[0]
worklist = worklist[1:]
// Get task pointer argument.
task := fn.LastParam()
if fn != c.pause && (task.IsNil() || task.Name() != "parentHandle") {
// Exported functions must not do async operations.
err := CoroutinesError{
Msg: "blocking operation in exported function: " + fn.Name(),
Pos: getPosition(fn),
}
f := fn
for !f.IsNil() && f != c.pause {
data := callsAsyncFunction[f]
err.Traceback = append(err.Traceback, CoroutinesErrorLine{f.Name(), getPosition(data.call)})
f = data.fn
}
return err
}
// Search all uses of the function while collecting callers.
callers := map[llvm.Value]struct{}{}
for use := fn.FirstUse(); !use.IsNil(); use = use.NextUse() {
user := use.User()
if user.IsACallInst().IsNil() {
// User is not a call instruction, so this is irrelevant.
continue
}
if user.CalledValue() != fn {
// Not the called value.
continue
}
// Add to calls list.
calls = append(calls, user)
// Get the caller.
caller := user.InstructionParent().Parent()
// Add as caller.
callers[caller] = struct{}{}
if _, ok := asyncs[caller]; ok {
// Already marked caller as async.
continue
}
// Mark the caller as async.
// Use nil as a temporary value. It will be replaced later.
asyncs[caller] = nil
asyncsOrdered = append(asyncsOrdered, caller)
// Track which calls caused this function to be marked async (for
// better diagnostics).
callsAsyncFunction[caller] = asyncCallInfo{
fn: fn,
call: user,
}
// Put the caller on the worklist.
worklist = append(worklist, caller)
}
asyncs[fn] = &asyncFunc{
fn: fn,
rawTask: task,
callers: callers,
}
}
// Flip the order of the async functions so that the top ones are lowered first.
for i := 0; i < len(asyncsOrdered)/2; i++ {
asyncsOrdered[i], asyncsOrdered[len(asyncsOrdered)-(i+1)] = asyncsOrdered[len(asyncsOrdered)-(i+1)], asyncsOrdered[i]
}
// Map the elements of asyncsOrdered to *asyncFunc.
asyncFuncsOrdered := make([]*asyncFunc, len(asyncsOrdered))
for i, v := range asyncsOrdered {
asyncFuncsOrdered[i] = asyncs[v]
}
c.asyncFuncs = asyncs
c.asyncFuncsOrdered = asyncFuncsOrdered
c.calls = calls
return nil
}
func (c *coroutineLoweringPass) load() error {
// Find memory management functions from the runtime.
c.alloc = c.mod.NamedFunction("runtime.alloc")
if c.alloc.IsNil() {
return ErrMissingIntrinsic{"runtime.alloc"}
}
c.free = c.mod.NamedFunction("runtime.free")
if c.free.IsNil() {
return ErrMissingIntrinsic{"runtime.free"}
}
// Find intrinsics.
c.pause = c.mod.NamedFunction("internal/task.Pause")
if c.pause.IsNil() {
return ErrMissingIntrinsic{"internal/task.Pause"}
}
c.start = c.mod.NamedFunction("internal/task.start")
if c.start.IsNil() {
return ErrMissingIntrinsic{"internal/task.start"}
}
c.current = c.mod.NamedFunction("internal/task.Current")
if c.current.IsNil() {
return ErrMissingIntrinsic{"internal/task.Current"}
}
c.setState = c.mod.NamedFunction("(*internal/task.Task).setState")
if c.setState.IsNil() {
return ErrMissingIntrinsic{"(*internal/task.Task).setState"}
}
c.setRetPtr = c.mod.NamedFunction("(*internal/task.Task).setReturnPtr")
if c.setRetPtr.IsNil() {
return ErrMissingIntrinsic{"(*internal/task.Task).setReturnPtr"}
}
c.getRetPtr = c.mod.NamedFunction("(*internal/task.Task).getReturnPtr")
if c.getRetPtr.IsNil() {
return ErrMissingIntrinsic{"(*internal/task.Task).getReturnPtr"}
}
c.returnTo = c.mod.NamedFunction("(*internal/task.Task).returnTo")
if c.returnTo.IsNil() {
return ErrMissingIntrinsic{"(*internal/task.Task).returnTo"}
}
c.returnCurrent = c.mod.NamedFunction("(*internal/task.Task).returnCurrent")
if c.returnCurrent.IsNil() {
return ErrMissingIntrinsic{"(*internal/task.Task).returnCurrent"}
}
c.createTask = c.mod.NamedFunction("internal/task.createTask")
if c.createTask.IsNil() {
return ErrMissingIntrinsic{"internal/task.createTask"}
}
if c.needStackSlots {
c.trackPointer = c.mod.NamedFunction("runtime.trackPointer")
if c.trackPointer.IsNil() {
return ErrMissingIntrinsic{"runtime.trackPointer"}
}
}
// Find async functions.
err := c.findAsyncFuncs()
if err != nil {
return err
}
// Get i8* type.
c.i8ptr = llvm.PointerType(c.ctx.Int8Type(), 0)
// Build LLVM coroutine intrinsic.
coroIdType := llvm.FunctionType(c.ctx.TokenType(), []llvm.Type{c.ctx.Int32Type(), c.i8ptr, c.i8ptr, c.i8ptr}, false)
c.coroId = llvm.AddFunction(c.mod, "llvm.coro.id", coroIdType)
sizeT := c.alloc.Param(0).Type()
coroSizeType := llvm.FunctionType(sizeT, nil, false)
c.coroSize = llvm.AddFunction(c.mod, "llvm.coro.size.i"+strconv.Itoa(sizeT.IntTypeWidth()), coroSizeType)
coroBeginType := llvm.FunctionType(c.i8ptr, []llvm.Type{c.ctx.TokenType(), c.i8ptr}, false)
c.coroBegin = llvm.AddFunction(c.mod, "llvm.coro.begin", coroBeginType)
coroSuspendType := llvm.FunctionType(c.ctx.Int8Type(), []llvm.Type{c.ctx.TokenType(), c.ctx.Int1Type()}, false)
c.coroSuspend = llvm.AddFunction(c.mod, "llvm.coro.suspend", coroSuspendType)
coroEndType := llvm.FunctionType(c.ctx.Int1Type(), []llvm.Type{c.i8ptr, c.ctx.Int1Type()}, false)
c.coroEnd = llvm.AddFunction(c.mod, "llvm.coro.end", coroEndType)
coroFreeType := llvm.FunctionType(c.i8ptr, []llvm.Type{c.ctx.TokenType(), c.i8ptr}, false)
c.coroFree = llvm.AddFunction(c.mod, "llvm.coro.free", coroFreeType)
coroSaveType := llvm.FunctionType(c.ctx.TokenType(), []llvm.Type{c.i8ptr}, false)
c.coroSave = llvm.AddFunction(c.mod, "llvm.coro.save", coroSaveType)
return nil
}
func (c *coroutineLoweringPass) track(ptr llvm.Value) {
if c.needStackSlots {
if ptr.Type() != c.i8ptr {
ptr = c.builder.CreateBitCast(ptr, c.i8ptr, "track.bitcast")
}
c.builder.CreateCall(c.trackPointer, []llvm.Value{ptr, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
}
}
// lowerStartSync lowers a goroutine start of a synchronous function to a synchronous call.
func (c *coroutineLoweringPass) lowerStartSync(start llvm.Value) {
c.builder.SetInsertPointBefore(start)
// Get function to call.
fn := start.Operand(0).Operand(0)
// Create the list of params for the call.
paramTypes := fn.Type().ElementType().ParamTypes()
params := llvmutil.EmitPointerUnpack(c.builder, c.mod, start.Operand(1), paramTypes[:len(paramTypes)-1])
params = append(params, llvm.Undef(c.i8ptr))
// Generate call to function.
c.builder.CreateCall(fn, params, "")
// Remove start call.
start.EraseFromParentAsInstruction()
}
// supplyTaskOperands fills in the task operands of async calls.
func (c *coroutineLoweringPass) supplyTaskOperands() {
var curCalls []llvm.Value
for use := c.current.FirstUse(); !use.IsNil(); use = use.NextUse() {
curCalls = append(curCalls, use.User())
}
for _, call := range append(curCalls, c.calls...) {
c.builder.SetInsertPointBefore(call)
task := c.asyncFuncs[call.InstructionParent().Parent()].rawTask
call.SetOperand(call.OperandsCount()-2, task)
}
}
// returnKind is a classification of a type of function terminator.
type returnKind uint8
const (
// returnNormal is a terminator that returns a value normally from a function.
returnNormal returnKind = iota
// returnVoid is a terminator that exits normally without returning a value.
returnVoid
// returnVoidTail is a terminator which is a tail call to a void-returning function in a void-returning function.
returnVoidTail
// returnTail is a terinator which is a tail call to a value-returning function where the value is returned by the callee.
returnTail
// returnDeadTail is a terminator which is a call to a non-returning asynchronous function.
returnDeadTail
// returnAlternateTail is a terminator which is a tail call to a value-returning function where a previously acquired value is returned by the callee.
returnAlternateTail
// returnDitchedTail is a terminator which is a tail call to a value-returning function, where the callee returns void.
returnDitchedTail
// returnDelayedValue is a terminator in which a void-returning tail call is followed by a return of a previous value.
returnDelayedValue
)
// isAsyncCall returns whether the specified call is async.
func (c *coroutineLoweringPass) isAsyncCall(call llvm.Value) bool {
_, ok := c.asyncFuncs[call.CalledValue()]
return ok
}
// analyzeFuncReturns analyzes and classifies the returns of a function.
func (c *coroutineLoweringPass) analyzeFuncReturns(fn *asyncFunc) {
returns := []asyncReturn{}
if fn.fn == c.pause {
// Skip pause.
fn.returns = returns
return
}
for _, bb := range fn.fn.BasicBlocks() {
last := bb.LastInstruction()
switch last.InstructionOpcode() {
case llvm.Ret:
// Check if it is a void return.
isVoid := fn.fn.Type().ElementType().ReturnType().TypeKind() == llvm.VoidTypeKind
// Analyze previous instruction.
prev := llvm.PrevInstruction(last)
switch {
case prev.IsNil():
fallthrough
case prev.IsACallInst().IsNil():
fallthrough
case !c.isAsyncCall(prev):
// This is not any form of asynchronous tail call.
if isVoid {
returns = append(returns, asyncReturn{
block: bb,
kind: returnVoid,
})
} else {
returns = append(returns, asyncReturn{
block: bb,
kind: returnNormal,
})
}
case isVoid:
if prev.CalledValue().Type().ElementType().ReturnType().TypeKind() == llvm.VoidTypeKind {
// This is a tail call to a void-returning function from a function with a void return.
returns = append(returns, asyncReturn{
block: bb,
kind: returnVoidTail,
})
} else {
// This is a tail call to a value-returning function from a function with a void return.
// The returned value will be ditched.
returns = append(returns, asyncReturn{
block: bb,
kind: returnDitchedTail,
})
}
case last.Operand(0) == prev:
// This is a regular tail call. The return of the callee is returned to the parent.
returns = append(returns, asyncReturn{
block: bb,
kind: returnTail,
})
case prev.CalledValue().Type().ElementType().ReturnType().TypeKind() == llvm.VoidTypeKind:
// This is a tail call that returns a previous value after waiting on a void function.
returns = append(returns, asyncReturn{
block: bb,
kind: returnDelayedValue,
})
default:
// This is a tail call that returns a value that is available before the function call.
returns = append(returns, asyncReturn{
block: bb,
kind: returnAlternateTail,
})
}
case llvm.Unreachable:
prev := llvm.PrevInstruction(last)
if prev.IsNil() || prev.IsACallInst().IsNil() || !c.isAsyncCall(prev) {
// This unreachable instruction does not behave as an asynchronous return.
continue
}
// This is an asyncnhronous tail call to function that does not return.
returns = append(returns, asyncReturn{
block: bb,
kind: returnDeadTail,
})
}
}
fn.returns = returns
}
// returnAnalysisPass runs an analysis pass which classifies the returns of all async functions.
func (c *coroutineLoweringPass) returnAnalysisPass() {
for _, async := range c.asyncFuncsOrdered {
c.analyzeFuncReturns(async)
}
}
// categorizeCalls categorizes all asynchronous calls into regular vs. async and matches them to their callers.
func (c *coroutineLoweringPass) categorizeCalls() {
// Sort calls into their respective callers.
for _, call := range c.calls {
caller := c.asyncFuncs[call.InstructionParent().Parent()]
caller.calls = append(caller.calls, call)
}
// Seperate regular and tail calls.
for _, async := range c.asyncFuncsOrdered {
// Search returns for tail calls.
tails := map[llvm.Value]struct{}{}
for _, ret := range async.returns {
switch ret.kind {
case returnVoidTail, returnTail, returnDeadTail, returnAlternateTail, returnDitchedTail, returnDelayedValue:
// This is a tail return. The previous instruction is a tail call.
tails[llvm.PrevInstruction(ret.block.LastInstruction())] = struct{}{}
}
}
// Seperate tail calls and regular calls.
normalCalls, tailCalls := []llvm.Value{}, []llvm.Value{}
for _, call := range async.calls {
if _, ok := tails[call]; ok {
// This is a tail call.
tailCalls = append(tailCalls, call)
} else {
// This is a regular call.
normalCalls = append(normalCalls, call)
}
}
async.normalCalls = normalCalls
async.tailCalls = tailCalls
}
}
// lowerFuncsPass lowers all functions, turning them into coroutines if necessary.
func (c *coroutineLoweringPass) lowerFuncsPass() {
for _, fn := range c.asyncFuncs {
if fn.fn == c.pause {
// Skip. It is an intrinsic.
continue
}
if len(fn.normalCalls) == 0 {
// No suspend points. Lower without turning it into a coroutine.
c.lowerFuncFast(fn)
} else {
// There are suspend points, so it is necessary to turn this into a coroutine.
c.lowerFuncCoro(fn)
}
}
}
func (async *asyncFunc) hasValueStoreReturn() bool {
for _, ret := range async.returns {
switch ret.kind {
case returnNormal, returnAlternateTail, returnDelayedValue:
return true
}
}
return false
}
// heapAlloc creates a heap allocation large enough to hold the supplied type.
// The allocation is returned as a raw i8* pointer.
// This allocation is not automatically tracked by the garbage collector, and should thus be stored into a tracked memory object immediately.
func (c *coroutineLoweringPass) heapAlloc(t llvm.Type, name string) llvm.Value {
sizeT := c.alloc.FirstParam().Type()
size := llvm.ConstInt(sizeT, c.target.TypeAllocSize(t), false)
return c.builder.CreateCall(c.alloc, []llvm.Value{size, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, name)
}
// lowerFuncFast lowers an async function that has no suspend points.
func (c *coroutineLoweringPass) lowerFuncFast(fn *asyncFunc) {
// Get return type.
retType := fn.fn.Type().ElementType().ReturnType()
// Get task value.
c.insertPointAfterAllocas(fn.fn)
task := c.builder.CreateCall(c.current, []llvm.Value{llvm.Undef(c.i8ptr), fn.rawTask}, "task")
// Get return pointer if applicable.
var rawRetPtr, retPtr llvm.Value
if fn.hasValueStoreReturn() {
rawRetPtr = c.builder.CreateCall(c.getRetPtr, []llvm.Value{task, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "ret.ptr")
retType = fn.fn.Type().ElementType().ReturnType()
retPtr = c.builder.CreateBitCast(rawRetPtr, llvm.PointerType(retType, 0), "ret.ptr.bitcast")
}
// Lower returns.
for _, ret := range fn.returns {
// Get terminator.
terminator := ret.block.LastInstruction()
// Get tail call if applicable.
var call llvm.Value
switch ret.kind {
case returnVoidTail, returnTail, returnDeadTail, returnAlternateTail, returnDitchedTail, returnDelayedValue:
call = llvm.PrevInstruction(terminator)
}
switch ret.kind {
case returnNormal:
c.builder.SetInsertPointBefore(terminator)
// Store value into return pointer.
c.builder.CreateStore(terminator.Operand(0), retPtr)
// Resume caller.
c.builder.CreateCall(c.returnCurrent, []llvm.Value{task, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
// Erase return argument.
terminator.SetOperand(0, llvm.Undef(retType))
case returnVoid:
c.builder.SetInsertPointBefore(terminator)
// Resume caller.
c.builder.CreateCall(c.returnCurrent, []llvm.Value{task, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
case returnVoidTail:
// Nothing to do. There is already a tail call followed by a void return.
case returnTail:
// Erase return argument.
terminator.SetOperand(0, llvm.Undef(retType))
case returnDeadTail:
// Replace unreachable with immediate return, without resuming the caller.
c.builder.SetInsertPointBefore(terminator)
if retType.TypeKind() == llvm.VoidTypeKind {
c.builder.CreateRetVoid()
} else {
c.builder.CreateRet(llvm.Undef(retType))
}
terminator.EraseFromParentAsInstruction()
case returnAlternateTail:
c.builder.SetInsertPointBefore(call)
// Store return value.
c.builder.CreateStore(terminator.Operand(0), retPtr)
// Heap-allocate a return buffer for the discarded return.
alternateBuf := c.heapAlloc(call.Type(), "ret.alternate")
c.builder.CreateCall(c.setRetPtr, []llvm.Value{task, alternateBuf, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
// Erase return argument.
terminator.SetOperand(0, llvm.Undef(retType))
case returnDitchedTail:
c.builder.SetInsertPointBefore(call)
// Heap-allocate a return buffer for the discarded return.
ditchBuf := c.heapAlloc(call.Type(), "ret.ditch")
c.builder.CreateCall(c.setRetPtr, []llvm.Value{task, ditchBuf, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
case returnDelayedValue:
c.builder.SetInsertPointBefore(call)
// Store value into return pointer.
c.builder.CreateStore(terminator.Operand(0), retPtr)
// Erase return argument.
terminator.SetOperand(0, llvm.Undef(retType))
}
// Delete call if it is a pause, because it has already been lowered.
if !call.IsNil() && call.CalledValue() == c.pause {
call.EraseFromParentAsInstruction()
}
}
}
// insertPointAfterAllocas sets the insert point of the builder to be immediately after the last alloca in the entry block.
func (c *coroutineLoweringPass) insertPointAfterAllocas(fn llvm.Value) {
inst := fn.EntryBasicBlock().FirstInstruction()
for !inst.IsAAllocaInst().IsNil() {
inst = llvm.NextInstruction(inst)
}
c.builder.SetInsertPointBefore(inst)
}
// lowerCallReturn lowers the return value of an async call by creating a return buffer and loading the returned value from it.
func (c *coroutineLoweringPass) lowerCallReturn(caller *asyncFunc, call llvm.Value) {
// Get return type.
retType := call.Type()
if retType.TypeKind() == llvm.VoidTypeKind {
// Void return. Nothing to do.
return
}
// Create alloca for return buffer.
alloca := llvmutil.CreateInstructionAlloca(c.builder, c.mod, retType, call, "call.return")
// Store new return buffer into task before call.
c.builder.SetInsertPointBefore(call)
task := c.builder.CreateCall(c.current, []llvm.Value{llvm.Undef(c.i8ptr), caller.rawTask}, "call.task")
retPtr := c.builder.CreateBitCast(alloca, c.i8ptr, "call.return.bitcast")
c.builder.CreateCall(c.setRetPtr, []llvm.Value{task, retPtr, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
// Load return value after call.
c.builder.SetInsertPointBefore(llvm.NextInstruction(call))
ret := c.builder.CreateLoad(alloca, "call.return.load")
// Replace call value with loaded return.
call.ReplaceAllUsesWith(ret)
}
// lowerFuncCoro transforms an async function into a coroutine by lowering async operations to `llvm.coro` intrinsics.
// See https://llvm.org/docs/Coroutines.html for more information on these intrinsics.
func (c *coroutineLoweringPass) lowerFuncCoro(fn *asyncFunc) {
returnType := fn.fn.Type().ElementType().ReturnType()
// Prepare coroutine state.
c.insertPointAfterAllocas(fn.fn)
// %coro.id = call token @llvm.coro.id(i32 0, i8* null, i8* null, i8* null)
coroId := c.builder.CreateCall(c.coroId, []llvm.Value{
llvm.ConstInt(c.ctx.Int32Type(), 0, false),
llvm.ConstNull(c.i8ptr),
llvm.ConstNull(c.i8ptr),
llvm.ConstNull(c.i8ptr),
}, "coro.id")
// %coro.size = call i32 @llvm.coro.size.i32()
coroSize := c.builder.CreateCall(c.coroSize, []llvm.Value{}, "coro.size")
// %coro.alloc = call i8* runtime.alloc(i32 %coro.size)
coroAlloc := c.builder.CreateCall(c.alloc, []llvm.Value{coroSize, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "coro.alloc")
// %coro.state = call noalias i8* @llvm.coro.begin(token %coro.id, i8* %coro.alloc)
coroState := c.builder.CreateCall(c.coroBegin, []llvm.Value{coroId, coroAlloc}, "coro.state")
c.track(coroState)
// Store state into task.
task := c.builder.CreateCall(c.current, []llvm.Value{llvm.Undef(c.i8ptr), fn.rawTask}, "task")
parentState := c.builder.CreateCall(c.setState, []llvm.Value{task, coroState, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "task.state.parent")
// Get return pointer if needed.
var retPtrRaw, retPtr llvm.Value
if returnType.TypeKind() != llvm.VoidTypeKind {
retPtrRaw = c.builder.CreateCall(c.getRetPtr, []llvm.Value{task, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "task.retPtr")
retPtr = c.builder.CreateBitCast(retPtrRaw, llvm.PointerType(fn.fn.Type().ElementType().ReturnType(), 0), "task.retPtr.bitcast")
}
// Build suspend block.
// This is executed when the coroutine is about to suspend.
suspend := c.ctx.AddBasicBlock(fn.fn, "suspend")
c.builder.SetInsertPointAtEnd(suspend)
// %unused = call i1 @llvm.coro.end(i8* %coro.state, i1 false)
c.builder.CreateCall(c.coroEnd, []llvm.Value{coroState, llvm.ConstInt(c.ctx.Int1Type(), 0, false)}, "unused")
// Insert return.
if returnType.TypeKind() == llvm.VoidTypeKind {
c.builder.CreateRetVoid()
} else {
c.builder.CreateRet(llvm.Undef(returnType))
}
// Build cleanup block.
// This is executed before the function returns in order to clean up resources.
cleanup := c.ctx.AddBasicBlock(fn.fn, "cleanup")
c.builder.SetInsertPointAtEnd(cleanup)
// %coro.memFree = call i8* @llvm.coro.free(token %coro.id, i8* %coro.state)
coroMemFree := c.builder.CreateCall(c.coroFree, []llvm.Value{coroId, coroState}, "coro.memFree")
// call i8* runtime.free(i8* %coro.memFree)
c.builder.CreateCall(c.free, []llvm.Value{coroMemFree, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
// Branch to suspend block.
c.builder.CreateBr(suspend)
// Restore old state before tail calls.
for _, call := range fn.tailCalls {
if !llvm.NextInstruction(call).IsAUnreachableInst().IsNil() {
// Callee never returns, so the state restore is ineffectual.
continue
}
c.builder.SetInsertPointBefore(call)
c.builder.CreateCall(c.setState, []llvm.Value{task, parentState, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "coro.state.restore")
}
// Lower returns.
for _, ret := range fn.returns {
// Get terminator instruction.
terminator := ret.block.LastInstruction()
// Get tail call if applicable.
var call llvm.Value
switch ret.kind {
case returnVoidTail, returnTail, returnDeadTail, returnAlternateTail, returnDitchedTail, returnDelayedValue:
call = llvm.PrevInstruction(terminator)
}
switch ret.kind {
case returnNormal:
c.builder.SetInsertPointBefore(terminator)
// Store value into return pointer.
c.builder.CreateStore(terminator.Operand(0), retPtr)
// Resume caller.
c.builder.CreateCall(c.returnTo, []llvm.Value{task, parentState, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
case returnVoid:
c.builder.SetInsertPointBefore(terminator)
// Resume caller.
c.builder.CreateCall(c.returnTo, []llvm.Value{task, parentState, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
case returnVoidTail, returnDeadTail:
// Nothing to do.
case returnTail:
c.builder.SetInsertPointBefore(call)
// Restore the return pointer so that the caller can store into it.
c.builder.CreateCall(c.setRetPtr, []llvm.Value{task, retPtrRaw, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
case returnAlternateTail:
c.builder.SetInsertPointBefore(call)
// Store return value.
c.builder.CreateStore(terminator.Operand(0), retPtr)
// Heap-allocate a return buffer for the discarded return.
alternateBuf := c.heapAlloc(call.Type(), "ret.alternate")
c.builder.CreateCall(c.setRetPtr, []llvm.Value{task, alternateBuf, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
case returnDitchedTail:
c.builder.SetInsertPointBefore(call)
// Heap-allocate a return buffer for the discarded return.
ditchBuf := c.heapAlloc(call.Type(), "ret.ditch")
c.builder.CreateCall(c.setRetPtr, []llvm.Value{task, ditchBuf, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
case returnDelayedValue:
c.builder.SetInsertPointBefore(call)
// Store return value.
c.builder.CreateStore(terminator.Operand(0), retPtr)
}
// Delete call if it is a pause, because it has already been lowered.
if !call.IsNil() && call.CalledValue() == c.pause {
call.EraseFromParentAsInstruction()
}
// Replace terminator with branch to cleanup.
terminator.EraseFromParentAsInstruction()
c.builder.SetInsertPointAtEnd(ret.block)
c.builder.CreateBr(cleanup)
}
// Lower regular calls.
for _, call := range fn.normalCalls {
// Lower return value of call.
c.lowerCallReturn(fn, call)
// Get originating basic block.
bb := call.InstructionParent()
// Split block.
wakeup := llvmutil.SplitBasicBlock(c.builder, call, llvm.NextBasicBlock(bb), "wakeup")
// Insert suspension and switch.
c.builder.SetInsertPointAtEnd(bb)
// %coro.save = call token @llvm.coro.save(i8* %coro.state)
save := c.builder.CreateCall(c.coroSave, []llvm.Value{coroState}, "coro.save")
// %call.suspend = llvm.coro.suspend(token %coro.save, i1 false)
// switch i8 %call.suspend, label %suspend [i8 0, label %wakeup
// i8 1, label %cleanup]
suspendValue := c.builder.CreateCall(c.coroSuspend, []llvm.Value{save, llvm.ConstInt(c.ctx.Int1Type(), 0, false)}, "call.suspend")
sw := c.builder.CreateSwitch(suspendValue, suspend, 2)
sw.AddCase(llvm.ConstInt(c.ctx.Int8Type(), 0, false), wakeup)
sw.AddCase(llvm.ConstInt(c.ctx.Int8Type(), 1, false), cleanup)
// Delete call if it is a pause, because it has already been lowered.
if call.CalledValue() == c.pause {
call.EraseFromParentAsInstruction()
}
c.builder.SetInsertPointBefore(wakeup.FirstInstruction())
c.track(coroState)
}
}
// lowerCurrent lowers calls to internal/task.Current to bitcasts.
func (c *coroutineLoweringPass) lowerCurrent() error {
taskType := c.current.Type().ElementType().ReturnType()
deleteQueue := []llvm.Value{}
for use := c.current.FirstUse(); !use.IsNil(); use = use.NextUse() {
// Get user.
user := use.User()
if user.IsACallInst().IsNil() || user.CalledValue() != c.current {
return errorAt(user, "unexpected non-call use of task.Current")
}
// Replace with bitcast.
c.builder.SetInsertPointBefore(user)
raw := user.Operand(1)
if !raw.IsAUndefValue().IsNil() || raw.IsNull() {
return errors.New("undefined task")
}
task := c.builder.CreateBitCast(raw, taskType, "task.current")
user.ReplaceAllUsesWith(task)
deleteQueue = append(deleteQueue, user)
}
// Delete calls.
for _, inst := range deleteQueue {
inst.EraseFromParentAsInstruction()
}
return nil
}
// lowerStart lowers a goroutine start into a task creation and call or a synchronous call.
func (c *coroutineLoweringPass) lowerStart(start llvm.Value) {
c.builder.SetInsertPointBefore(start)
// Get function to call.
fn := start.Operand(0).Operand(0)
if _, ok := c.asyncFuncs[fn]; !ok {
// Turn into synchronous call.
c.lowerStartSync(start)
return
}
// Create the list of params for the call.
paramTypes := fn.Type().ElementType().ParamTypes()
params := llvmutil.EmitPointerUnpack(c.builder, c.mod, start.Operand(1), paramTypes[:len(paramTypes)-1])
// Create task.
task := c.builder.CreateCall(c.createTask, []llvm.Value{llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "start.task")
rawTask := c.builder.CreateBitCast(task, c.i8ptr, "start.task.bitcast")
params = append(params, rawTask)
// Generate a return buffer if necessary.
returnType := fn.Type().ElementType().ReturnType()
if returnType.TypeKind() == llvm.VoidTypeKind {
// No return buffer necessary for a void return.
} else {
// Check for any undead returns.
var undead bool
for _, ret := range c.asyncFuncs[fn].returns {
if ret.kind != returnDeadTail {
// This return results in a value being eventually stored.
undead = true
break
}
}
if undead {
// The function stores a value into a return buffer, so we need to create one.
retBuf := c.heapAlloc(returnType, "ret.ditch")
c.builder.CreateCall(c.setRetPtr, []llvm.Value{task, retBuf, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
}
}
// Generate call to function.
c.builder.CreateCall(fn, params, "")
// Erase start call.
start.EraseFromParentAsInstruction()
}
// lowerStartsPass lowers all goroutine starts.
func (c *coroutineLoweringPass) lowerStartsPass() {
starts := []llvm.Value{}
for use := c.start.FirstUse(); !use.IsNil(); use = use.NextUse() {
starts = append(starts, use.User())
}
for _, start := range starts {
c.lowerStart(start)
}
}
func (c *coroutineLoweringPass) fixAnnotations() {
for f := range c.asyncFuncs {
// These properties were added by the functionattrs pass. Remove
// them, because now we start using the parameter.
// https://llvm.org/docs/Passes.html#functionattrs-deduce-function-attributes
for _, kind := range []string{"nocapture", "readnone"} {
kindID := llvm.AttributeKindID(kind)
n := f.ParamsCount()
for i := 0; i <= n; i++ {
f.RemoveEnumAttributeAtIndex(i, kindID)
}
}
}
}
// trackGoroutines adds runtime.trackPointer calls to track goroutine starts and data.
func (c *coroutineLoweringPass) trackGoroutines() error {
trackPointer := c.mod.NamedFunction("runtime.trackPointer")
if trackPointer.IsNil() {
return ErrMissingIntrinsic{"runtime.trackPointer"}
}
trackFunctions := []llvm.Value{c.createTask, c.setState, c.getRetPtr}
for _, fn := range trackFunctions {
for use := fn.FirstUse(); !use.IsNil(); use = use.NextUse() {
call := use.User()
c.builder.SetInsertPointBefore(llvm.NextInstruction(call))
ptr := call
if ptr.Type() != c.i8ptr {
ptr = c.builder.CreateBitCast(call, c.i8ptr, "")
}
c.builder.CreateCall(trackPointer, []llvm.Value{ptr, llvm.Undef(c.i8ptr), llvm.Undef(c.i8ptr)}, "")
}
}
return nil
}
| 1 | 13,029 |
I don't think alloca instructions are always at the start of the entry block. I think it would be safer to check the entire entry block for alloca instructions, just in case some are not the first instruction. (Technically they can be anywhere in the function but we check in other places that this isn't possible).
|
tinygo-org-tinygo
|
go
|
@@ -135,11 +135,19 @@ func chainsForIfaces(ifaceMetadata []string,
epMarkMapper rules.EndpointMarkMapper,
host bool,
tableKind string) []*iptables.Chain {
+ const (
+ ProtoUDP = 17
+ ProtoIPIP = 4
+ VXLANPort = 0
+ VXLANVNI = 0
+ )
+
log.WithFields(log.Fields{
"ifaces": ifaceMetadata,
"host": host,
"tableKind": tableKind,
}).Debug("Calculating chains for interface")
+
chains := []*iptables.Chain{}
dispatchOut := []iptables.Rule{}
dispatchIn := []iptables.Rule{}
| 1 |
// Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package intdataplane
import (
"errors"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/ip"
"github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/felix/iptables"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/routetable"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/felix/testutils"
"github.com/projectcalico/libcalico-go/lib/set"
)
var wlDispatchEmpty = []*iptables.Chain{
{
Name: "cali-to-wl-dispatch",
Rules: []iptables.Rule{
{
Match: iptables.Match(),
Action: iptables.DropAction{},
Comment: "Unknown interface",
},
},
},
{
Name: "cali-from-wl-dispatch",
Rules: []iptables.Rule{
{
Match: iptables.Match(),
Action: iptables.DropAction{},
Comment: "Unknown interface",
},
},
},
{
Name: "cali-from-endpoint-mark",
Rules: []iptables.Rule{
{
Match: iptables.Match(),
Action: iptables.DropAction{},
Comment: "Unknown interface",
},
},
},
{
Name: "cali-set-endpoint-mark",
Rules: []iptables.Rule{
iptables.Rule{
Match: iptables.Match().InInterface("cali+"),
Action: iptables.DropAction{},
Comment: "Unknown endpoint",
},
iptables.Rule{
Match: iptables.Match().InInterface("tap+"),
Action: iptables.DropAction{},
Comment: "Unknown endpoint",
},
{
Action: iptables.SetMaskedMarkAction{Mark: 0x0100, Mask: 0xff00},
Comment: "Non-Cali endpoint mark",
},
},
},
}
var hostDispatchEmptyNormal = []*iptables.Chain{
{
Name: "cali-to-host-endpoint",
Rules: []iptables.Rule{},
},
{
Name: "cali-from-host-endpoint",
Rules: []iptables.Rule{},
},
}
var hostDispatchEmptyForward = []*iptables.Chain{
{
Name: "cali-to-hep-forward",
Rules: []iptables.Rule{},
},
{
Name: "cali-from-hep-forward",
Rules: []iptables.Rule{},
},
}
var fromHostDispatchEmpty = []*iptables.Chain{
{
Name: "cali-from-host-endpoint",
Rules: []iptables.Rule{},
},
}
func hostChainsForIfaces(ifaceMetadata []string, epMarkMapper rules.EndpointMarkMapper) []*iptables.Chain {
return append(chainsForIfaces(ifaceMetadata, epMarkMapper, true, "normal"),
chainsForIfaces(ifaceMetadata, epMarkMapper, true, "applyOnForward")...,
)
}
func rawChainsForIfaces(ifaceMetadata []string, epMarkMapper rules.EndpointMarkMapper) []*iptables.Chain {
return chainsForIfaces(ifaceMetadata, epMarkMapper, true, "untracked")
}
func preDNATChainsForIfaces(ifaceMetadata []string, epMarkMapper rules.EndpointMarkMapper) []*iptables.Chain {
return chainsForIfaces(ifaceMetadata, epMarkMapper, true, "preDNAT")
}
func wlChainsForIfaces(ifaceMetadata []string, epMarkMapper rules.EndpointMarkMapper) []*iptables.Chain {
return chainsForIfaces(ifaceMetadata, epMarkMapper, false, "normal")
}
func chainsForIfaces(ifaceMetadata []string,
epMarkMapper rules.EndpointMarkMapper,
host bool,
tableKind string) []*iptables.Chain {
log.WithFields(log.Fields{
"ifaces": ifaceMetadata,
"host": host,
"tableKind": tableKind,
}).Debug("Calculating chains for interface")
chains := []*iptables.Chain{}
dispatchOut := []iptables.Rule{}
dispatchIn := []iptables.Rule{}
epMarkSet := []iptables.Rule{}
epMarkFrom := []iptables.Rule{}
hostOrWlLetter := "w"
hostOrWlDispatch := "wl-dispatch"
outPrefix := "cali-from-"
inPrefix := "cali-to-"
epMarkSetName := "cali-set-endpoint-mark"
epMarkFromName := "cali-from-endpoint-mark"
epMarkSetOnePrefix := "cali-sm-"
epmarkFromPrefix := outPrefix[:6]
if host {
hostOrWlLetter = "h"
hostOrWlDispatch = "host-endpoint"
if tableKind == "applyOnForward" {
hostOrWlLetter = "hfw"
hostOrWlDispatch = "hep-forward"
}
outPrefix = "cali-to-"
inPrefix = "cali-from-"
epmarkFromPrefix = inPrefix[:6]
}
for _, ifaceMetadata := range ifaceMetadata {
var ifaceName, polName string
nameParts := strings.Split(ifaceMetadata, "_")
ifaceKind := "normal"
ingress := true
egress := true
if len(nameParts) == 1 {
// Just an interface name "eth0", apply no tweaks.
log.Debug("Interface name only")
ifaceName = nameParts[0]
polName = ""
} else if len(nameParts) == 2 {
// Interface name and a policy name "eth0_polA".
log.Debug("Interface name and policy name")
ifaceName = nameParts[0]
polName = nameParts[1]
} else {
// Interface name, policy name and untracked "eth0_polA_untracked"
// or applyOnForwrd "eth0_polA_applyOnForward".
log.Debug("Interface name policy name and untracked/ingress/egress")
ifaceName = nameParts[0]
polName = nameParts[1]
switch nameParts[2] {
case "ingress":
egress = false
case "egress":
ingress = false
default:
ifaceKind = nameParts[2]
}
}
epMark, err := epMarkMapper.GetEndpointMark(ifaceName)
if err != nil {
log.WithFields(log.Fields{
"ifaces": ifaceMetadata,
"host": host,
"tableKind": tableKind,
}).Debug("Failed to get endpoint mark for interface")
continue
}
if tableKind != ifaceKind && tableKind != "normal" && tableKind != "applyOnForward" {
continue
}
outRules := []iptables.Rule{}
if tableKind != "untracked" {
outRules = append(outRules,
iptables.Rule{
Match: iptables.Match().ConntrackState("RELATED,ESTABLISHED"),
Action: iptables.AcceptAction{},
},
)
outRules = append(outRules, iptables.Rule{
Match: iptables.Match().ConntrackState("INVALID"),
Action: iptables.DropAction{},
})
}
if host && tableKind != "applyOnForward" {
outRules = append(outRules, iptables.Rule{
Match: iptables.Match(),
Action: iptables.JumpAction{Target: "cali-failsafe-out"},
})
}
outRules = append(outRules, iptables.Rule{
Match: iptables.Match(),
Action: iptables.ClearMarkAction{Mark: 8},
})
if egress && polName != "" && tableKind == ifaceKind {
outRules = append(outRules, iptables.Rule{
Match: iptables.Match(),
Action: iptables.ClearMarkAction{Mark: 16},
Comment: "Start of policies",
})
outRules = append(outRules, iptables.Rule{
Match: iptables.Match().MarkClear(16),
Action: iptables.JumpAction{Target: "cali-po-" + polName},
})
if tableKind == "untracked" {
outRules = append(outRules, iptables.Rule{
Match: iptables.Match().MarkSingleBitSet(8),
Action: iptables.NoTrackAction{},
})
}
outRules = append(outRules, iptables.Rule{
Match: iptables.Match().MarkSingleBitSet(8),
Action: iptables.ReturnAction{},
Comment: "Return if policy accepted",
})
if tableKind == "normal" || tableKind == "applyOnForward" {
// Only end with a drop rule in the filter chain. In the raw chain,
// we consider the policy as unfinished, because some of the
// policy may live in the filter chain.
outRules = append(outRules, iptables.Rule{
Match: iptables.Match().MarkClear(16),
Action: iptables.DropAction{},
Comment: "Drop if no policies passed packet",
})
}
} else if tableKind == "applyOnForward" {
// Expect forwarded traffic to be allowed when there are no
// applicable policies.
outRules = append(outRules, iptables.Rule{
Action: iptables.SetMarkAction{Mark: 8},
Comment: "Allow forwarded traffic by default",
})
outRules = append(outRules, iptables.Rule{
Action: iptables.ReturnAction{},
Comment: "Return for accepted forward traffic",
})
}
if tableKind == "normal" {
outRules = append(outRules, iptables.Rule{
Match: iptables.Match(),
Action: iptables.DropAction{},
Comment: "Drop if no profiles matched",
})
}
inRules := []iptables.Rule{}
if tableKind != "untracked" {
inRules = append(inRules,
iptables.Rule{
Match: iptables.Match().ConntrackState("RELATED,ESTABLISHED"),
Action: iptables.AcceptAction{},
},
)
inRules = append(inRules, iptables.Rule{
Match: iptables.Match().ConntrackState("INVALID"),
Action: iptables.DropAction{},
})
}
if host && tableKind != "applyOnForward" {
inRules = append(inRules, iptables.Rule{
Match: iptables.Match(),
Action: iptables.JumpAction{Target: "cali-failsafe-in"},
})
}
inRules = append(inRules, iptables.Rule{
Match: iptables.Match(),
Action: iptables.ClearMarkAction{Mark: 8},
})
if ingress && polName != "" && tableKind == ifaceKind {
inRules = append(inRules, iptables.Rule{
Match: iptables.Match(),
Action: iptables.ClearMarkAction{Mark: 16},
Comment: "Start of policies",
})
// For untracked policy, we expect a tier with a policy in it.
inRules = append(inRules, iptables.Rule{
Match: iptables.Match().MarkClear(16),
Action: iptables.JumpAction{Target: "cali-pi-" + polName},
})
if tableKind == "untracked" {
inRules = append(inRules, iptables.Rule{
Match: iptables.Match().MarkSingleBitSet(8),
Action: iptables.NoTrackAction{},
})
}
inRules = append(inRules, iptables.Rule{
Match: iptables.Match().MarkSingleBitSet(8),
Action: iptables.ReturnAction{},
Comment: "Return if policy accepted",
})
if tableKind == "normal" || tableKind == "applyOnForward" {
// Only end with a drop rule in the filter chain. In the raw chain,
// we consider the policy as unfinished, because some of the
// policy may live in the filter chain.
inRules = append(inRules, iptables.Rule{
Match: iptables.Match().MarkClear(16),
Action: iptables.DropAction{},
Comment: "Drop if no policies passed packet",
})
}
} else if tableKind == "applyOnForward" {
// Expect forwarded traffic to be allowed when there are no
// applicable policies.
inRules = append(inRules, iptables.Rule{
Action: iptables.SetMarkAction{Mark: 8},
Comment: "Allow forwarded traffic by default",
})
inRules = append(inRules, iptables.Rule{
Action: iptables.ReturnAction{},
Comment: "Return for accepted forward traffic",
})
}
if tableKind == "normal" {
inRules = append(inRules, iptables.Rule{
Match: iptables.Match(),
Action: iptables.DropAction{},
Comment: "Drop if no profiles matched",
})
}
if tableKind == "preDNAT" {
chains = append(chains,
&iptables.Chain{
Name: inPrefix[:6] + hostOrWlLetter + "-" + ifaceName,
Rules: inRules,
},
)
} else {
chains = append(chains,
&iptables.Chain{
Name: outPrefix[:6] + hostOrWlLetter + "-" + ifaceName,
Rules: outRules,
},
&iptables.Chain{
Name: inPrefix[:6] + hostOrWlLetter + "-" + ifaceName,
Rules: inRules,
},
)
}
if host {
dispatchOut = append(dispatchOut,
iptables.Rule{
Match: iptables.Match().OutInterface(ifaceName),
Action: iptables.GotoAction{Target: outPrefix[:6] + hostOrWlLetter + "-" + ifaceName},
},
)
dispatchIn = append(dispatchIn,
iptables.Rule{
Match: iptables.Match().InInterface(ifaceName),
Action: iptables.GotoAction{Target: inPrefix[:6] + hostOrWlLetter + "-" + ifaceName},
},
)
} else {
dispatchOut = append(dispatchOut,
iptables.Rule{
Match: iptables.Match().InInterface(ifaceName),
Action: iptables.GotoAction{Target: outPrefix[:6] + hostOrWlLetter + "-" + ifaceName},
},
)
dispatchIn = append(dispatchIn,
iptables.Rule{
Match: iptables.Match().OutInterface(ifaceName),
Action: iptables.GotoAction{Target: inPrefix[:6] + hostOrWlLetter + "-" + ifaceName},
},
)
}
if tableKind != "preDNAT" && tableKind != "untracked" {
chains = append(chains,
&iptables.Chain{
Name: epMarkSetOnePrefix + ifaceName,
Rules: []iptables.Rule{
iptables.Rule{
Action: iptables.SetMaskedMarkAction{Mark: epMark, Mask: epMarkMapper.GetMask()},
},
},
},
)
epMarkSet = append(epMarkSet,
iptables.Rule{
Match: iptables.Match().InInterface(ifaceName),
Action: iptables.GotoAction{Target: epMarkSetOnePrefix + ifaceName},
},
)
epMarkFrom = append(epMarkFrom,
iptables.Rule{
Match: iptables.Match().MarkMatchesWithMask(epMark, epMarkMapper.GetMask()),
Action: iptables.GotoAction{Target: epmarkFromPrefix + hostOrWlLetter + "-" + ifaceName},
},
)
}
}
if !host {
dispatchOut = append(dispatchOut,
iptables.Rule{
Match: iptables.Match(),
Action: iptables.DropAction{},
Comment: "Unknown interface",
},
)
dispatchIn = append(dispatchIn,
iptables.Rule{
Match: iptables.Match(),
Action: iptables.DropAction{},
Comment: "Unknown interface",
},
)
}
if tableKind != "preDNAT" && tableKind != "untracked" {
epMarkSet = append(epMarkSet,
iptables.Rule{
Match: iptables.Match().InInterface("cali+"),
Action: iptables.DropAction{},
Comment: "Unknown endpoint",
},
iptables.Rule{
Match: iptables.Match().InInterface("tap+"),
Action: iptables.DropAction{},
Comment: "Unknown endpoint",
},
iptables.Rule{
Action: iptables.SetMaskedMarkAction{Mark: 0x0100, Mask: 0xff00},
Comment: "Non-Cali endpoint mark",
},
)
epMarkFrom = append(epMarkFrom,
iptables.Rule{
Match: iptables.Match(),
Action: iptables.DropAction{},
Comment: "Unknown interface",
},
)
chains = append(chains,
&iptables.Chain{
Name: epMarkSetName,
Rules: epMarkSet,
},
&iptables.Chain{
Name: epMarkFromName,
Rules: epMarkFrom,
},
)
}
if tableKind == "preDNAT" {
chains = append(chains,
&iptables.Chain{
Name: inPrefix + hostOrWlDispatch,
Rules: dispatchIn,
},
)
} else {
chains = append(chains,
&iptables.Chain{
Name: outPrefix + hostOrWlDispatch,
Rules: dispatchOut,
},
&iptables.Chain{
Name: inPrefix + hostOrWlDispatch,
Rules: dispatchIn,
},
)
}
return chains
}
type mockRouteTable struct {
currentRoutes map[string][]routetable.Target
currentL2Routes map[string][]routetable.L2Target
}
func (t *mockRouteTable) SetRoutes(ifaceName string, targets []routetable.Target) {
log.WithFields(log.Fields{
"ifaceName": ifaceName,
"targets": targets,
}).Debug("SetRoutes")
t.currentRoutes[ifaceName] = targets
}
func (t *mockRouteTable) SetL2Routes(ifaceName string, targets []routetable.L2Target) {
log.WithFields(log.Fields{
"ifaceName": ifaceName,
"targets": targets,
}).Debug("SetL2Routes")
t.currentL2Routes[ifaceName] = targets
}
func (t *mockRouteTable) checkRoutes(ifaceName string, expected []routetable.Target) {
Expect(t.currentRoutes[ifaceName]).To(Equal(expected))
}
func (t *mockRouteTable) checkL2Routes(ifaceName string, expected []routetable.Target) {
Expect(t.currentL2Routes[ifaceName]).To(Equal(expected))
}
type statusReportRecorder struct {
currentState map[interface{}]string
}
func (r *statusReportRecorder) endpointStatusUpdateCallback(ipVersion uint8, id interface{}, status string) {
log.WithFields(log.Fields{
"ipVersion": ipVersion,
"id": id,
"status": status,
}).Debug("endpointStatusUpdateCallback")
if status == "" {
delete(r.currentState, id)
} else {
r.currentState[id] = status
}
}
type hostEpSpec struct {
id string
name string
ipv4Addrs []string
ipv6Addrs []string
polName string
}
func endpointManagerTests(ipVersion uint8) func() {
return func() {
const (
ipv4 = "10.0.240.10"
ipv4Eth1 = "10.0.240.30"
ipv6 = "2001:db8::10.0.240.10"
)
var (
epMgr *endpointManager
rawTable *mockTable
mangleTable *mockTable
filterTable *mockTable
rrConfigNormal rules.Config
eth0Addrs set.Set
loAddrs set.Set
eth1Addrs set.Set
routeTable *mockRouteTable
mockProcSys *testProcSys
statusReportRec *statusReportRecorder
)
BeforeEach(func() {
rrConfigNormal = rules.Config{
IPIPEnabled: true,
IPIPTunnelAddress: nil,
IPSetConfigV4: ipsets.NewIPVersionConfig(ipsets.IPFamilyV4, "cali", nil, nil),
IPSetConfigV6: ipsets.NewIPVersionConfig(ipsets.IPFamilyV6, "cali", nil, nil),
IptablesMarkAccept: 0x8,
IptablesMarkPass: 0x10,
IptablesMarkScratch0: 0x20,
IptablesMarkScratch1: 0x40,
IptablesMarkEndpoint: 0xff00,
IptablesMarkNonCaliEndpoint: 0x0100,
KubeIPVSSupportEnabled: true,
WorkloadIfacePrefixes: []string{"cali", "tap"},
}
eth0Addrs = set.New()
eth0Addrs.Add(ipv4)
eth0Addrs.Add(ipv6)
loAddrs = set.New()
loAddrs.Add("127.0.1.1")
loAddrs.Add("::1")
eth1Addrs = set.New()
eth1Addrs.Add(ipv4Eth1)
})
JustBeforeEach(func() {
renderer := rules.NewRenderer(rrConfigNormal)
rawTable = newMockTable("raw")
mangleTable = newMockTable("mangle")
filterTable = newMockTable("filter")
routeTable = &mockRouteTable{
currentRoutes: map[string][]routetable.Target{},
}
mockProcSys = &testProcSys{state: map[string]string{}}
statusReportRec = &statusReportRecorder{currentState: map[interface{}]string{}}
epMgr = newEndpointManagerWithShims(
rawTable,
mangleTable,
filterTable,
renderer,
routeTable,
ipVersion,
rules.NewEndpointMarkMapper(rrConfigNormal.IptablesMarkEndpoint, rrConfigNormal.IptablesMarkNonCaliEndpoint),
rrConfigNormal.KubeIPVSSupportEnabled,
[]string{"cali"},
statusReportRec.endpointStatusUpdateCallback,
mockProcSys.write,
newCallbacks(),
)
})
It("should be constructable", func() {
Expect(epMgr).ToNot(BeNil())
})
configureHostEp := func(spec *hostEpSpec) func() {
tiers := []*proto.TierInfo{}
untrackedTiers := []*proto.TierInfo{}
preDNATTiers := []*proto.TierInfo{}
forwardTiers := []*proto.TierInfo{}
if spec.polName != "" {
parts := strings.Split(spec.polName, "_")
if len(parts) == 1 {
tiers = append(tiers, &proto.TierInfo{
Name: "default",
IngressPolicies: []string{spec.polName},
EgressPolicies: []string{spec.polName},
})
} else if len(parts) == 2 && parts[1] == "untracked" {
untrackedTiers = append(untrackedTiers, &proto.TierInfo{
Name: "default",
IngressPolicies: []string{parts[0]},
EgressPolicies: []string{parts[0]},
})
} else if len(parts) == 2 && parts[1] == "preDNAT" {
preDNATTiers = append(preDNATTiers, &proto.TierInfo{
Name: "default",
IngressPolicies: []string{parts[0]},
})
} else if len(parts) == 2 && parts[1] == "applyOnForward" {
forwardTiers = append(forwardTiers, &proto.TierInfo{
Name: "default",
IngressPolicies: []string{parts[0]},
EgressPolicies: []string{parts[0]},
})
} else if len(parts) == 2 && parts[1] == "ingress" {
tiers = append(tiers, &proto.TierInfo{
Name: "default",
IngressPolicies: []string{parts[0]},
})
} else if len(parts) == 2 && parts[1] == "egress" {
tiers = append(tiers, &proto.TierInfo{
Name: "default",
EgressPolicies: []string{parts[0]},
})
} else {
panic("Failed to parse policy name " + spec.polName)
}
}
return func() {
epMgr.OnUpdate(&proto.HostEndpointUpdate{
Id: &proto.HostEndpointID{
EndpointId: spec.id,
},
Endpoint: &proto.HostEndpoint{
Name: spec.name,
ProfileIds: []string{},
Tiers: tiers,
UntrackedTiers: untrackedTiers,
PreDnatTiers: preDNATTiers,
ForwardTiers: forwardTiers,
ExpectedIpv4Addrs: spec.ipv4Addrs,
ExpectedIpv6Addrs: spec.ipv6Addrs,
},
})
epMgr.CompleteDeferredWork()
}
}
expectChainsFor := func(names ...string) func() {
return func() {
filterTable.checkChains([][]*iptables.Chain{
wlDispatchEmpty,
hostChainsForIfaces(names, epMgr.epMarkMapper),
})
rawTable.checkChains([][]*iptables.Chain{
rawChainsForIfaces(names, epMgr.epMarkMapper),
})
mangleTable.checkChains([][]*iptables.Chain{
preDNATChainsForIfaces(names, epMgr.epMarkMapper),
})
}
}
expectEmptyChains := func() func() {
return func() {
filterTable.checkChains([][]*iptables.Chain{
wlDispatchEmpty,
hostDispatchEmptyNormal,
hostDispatchEmptyForward,
})
rawTable.checkChains([][]*iptables.Chain{
hostDispatchEmptyNormal,
})
mangleTable.checkChains([][]*iptables.Chain{
fromHostDispatchEmpty,
})
}
}
removeHostEp := func(id string) func() {
return func() {
epMgr.OnUpdate(&proto.HostEndpointRemove{
Id: &proto.HostEndpointID{
EndpointId: id,
},
})
epMgr.CompleteDeferredWork()
}
}
Context("with host interfaces eth0, lo", func() {
JustBeforeEach(func() {
epMgr.OnUpdate(&ifaceUpdate{
Name: "eth0",
State: "up",
})
epMgr.OnUpdate(&ifaceAddrsUpdate{
Name: "eth0",
Addrs: eth0Addrs,
})
epMgr.OnUpdate(&ifaceUpdate{
Name: "lo",
State: "up",
})
epMgr.OnUpdate(&ifaceAddrsUpdate{
Name: "lo",
Addrs: loAddrs,
})
epMgr.CompleteDeferredWork()
})
It("should have empty dispatch chains", expectEmptyChains())
It("should make no status reports", func() {
Expect(statusReportRec.currentState).To(BeEmpty())
})
// Configure host endpoints with tier names here, so we can check which of
// the host endpoints gets used in the programming for a particular host
// interface. When more than one host endpoint matches a given interface,
// we expect the one used to be the one with the alphabetically earliest ID.
Describe("with host endpoint with tier matching eth0", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA",
}))
It("should have expected chains", expectChainsFor("eth0_polA"))
It("should report id1 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id1"}: "up",
}))
})
Context("with another host ep (>ID) that matches the IPv4 address", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id2",
ipv4Addrs: []string{ipv4},
polName: "polB",
}))
It("should have expected chains", expectChainsFor("eth0_polA"))
It("should report id1 up, but id2 now in error", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id1"}: "up",
proto.HostEndpointID{EndpointId: "id2"}: "error",
}))
})
Context("with the first host ep removed", func() {
JustBeforeEach(removeHostEp("id1"))
It("should have expected chains", expectChainsFor("eth0_polB"))
It("should report id2 up only", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id2"}: "up",
}))
})
Context("with both host eps removed", func() {
JustBeforeEach(removeHostEp("id2"))
It("should have empty dispatch chains", expectEmptyChains())
})
})
})
Context("with another host ep (<ID) that matches the IPv4 address", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id0",
ipv4Addrs: []string{ipv4},
polName: "polB",
}))
It("should have expected chains", expectChainsFor("eth0_polB"))
It("should report id0 up, but id1 now in error", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id0"}: "up",
proto.HostEndpointID{EndpointId: "id1"}: "error",
}))
})
Context("with the first host ep removed", func() {
JustBeforeEach(removeHostEp("id1"))
It("should have expected chains", expectChainsFor("eth0_polB"))
It("should report id0 up only", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id0"}: "up",
}))
})
Context("with both host eps removed", func() {
JustBeforeEach(removeHostEp("id0"))
It("should have empty dispatch chains", expectEmptyChains())
It("should remove all status reports", func() {
Expect(statusReportRec.currentState).To(BeEmpty())
})
})
})
})
Describe("replaced with untracked version", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA_untracked",
}))
It("should have expected chains", expectChainsFor("eth0_polA_untracked"))
})
Describe("replaced with applyOnForward version", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA_applyOnForward",
}))
It("should have expected chains", expectChainsFor("eth0_polA_applyOnForward"))
})
Describe("replaced with pre-DNAT version", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA_preDNAT",
}))
It("should have expected chains", expectChainsFor("eth0_polA_preDNAT"))
})
Describe("replaced with ingress-only version", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA_ingress",
}))
It("should have expected chains", expectChainsFor("eth0_polA_ingress"))
})
Describe("replaced with egress-only version", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA_egress",
}))
It("should have expected chains", expectChainsFor("eth0_polA_egress"))
})
})
Describe("with host endpoint with untracked tier matching eth0", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA_untracked",
}))
It("should have expected chains", expectChainsFor("eth0_polA_untracked"))
Context("with another host ep (<ID) that matches the IPv4 address", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id0",
ipv4Addrs: []string{ipv4},
polName: "polB_untracked",
}))
It("should have expected chains", expectChainsFor("eth0_polB_untracked"))
Context("with the first host ep removed", func() {
JustBeforeEach(removeHostEp("id1"))
It("should have expected chains", expectChainsFor("eth0_polB_untracked"))
Context("with both host eps removed", func() {
JustBeforeEach(removeHostEp("id0"))
It("should have empty dispatch chains", expectEmptyChains())
})
})
})
Describe("replaced with a tracked version", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA",
}))
It("should have expected chains", expectChainsFor("eth0_polA"))
})
})
Context("with a host ep that matches the IPv4 address with untracked policy", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id0",
ipv4Addrs: []string{ipv4},
polName: "polB_untracked",
}))
It("should have expected chains", expectChainsFor("eth0_polB_untracked"))
})
Describe("with host endpoint with applyOnForward tier matching eth0", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA_applyOnForward",
}))
It("should have expected chains", expectChainsFor("eth0_polA_applyOnForward"))
Context("with another host ep (<ID) that matches the IPv4 address", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id0",
ipv4Addrs: []string{ipv4},
polName: "polB_applyOnForward",
}))
It("should have expected chains", expectChainsFor("eth0_polB_applyOnForward"))
Context("with the first host ep removed", func() {
JustBeforeEach(removeHostEp("id1"))
It("should have expected chains", expectChainsFor("eth0_polB_applyOnForward"))
Context("with both host eps removed", func() {
JustBeforeEach(removeHostEp("id0"))
It("should have empty dispatch chains", expectEmptyChains())
})
})
})
Describe("replaced with a tracked version", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA",
}))
It("should have expected chains", expectChainsFor("eth0_polA"))
})
})
Context("with a host ep that matches the IPv4 address with applyOnForward policy", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id0",
ipv4Addrs: []string{ipv4},
polName: "polB_applyOnForward",
}))
It("should have expected chains", expectChainsFor("eth0_polB_applyOnForward"))
})
Describe("with host endpoint with pre-DNAT tier matching eth0", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA_preDNAT",
}))
It("should have expected chains", expectChainsFor("eth0_polA_preDNAT"))
Context("with another host ep (<ID) that matches the IPv4 address", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id0",
ipv4Addrs: []string{ipv4},
polName: "polB_preDNAT",
}))
It("should have expected chains", expectChainsFor("eth0_polB_preDNAT"))
Context("with the first host ep removed", func() {
JustBeforeEach(removeHostEp("id1"))
It("should have expected chains", expectChainsFor("eth0_polB_preDNAT"))
Context("with both host eps removed", func() {
JustBeforeEach(removeHostEp("id0"))
It("should have empty dispatch chains", expectEmptyChains())
})
})
})
Describe("replaced with a tracked version", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
polName: "polA",
}))
It("should have expected chains", expectChainsFor("eth0_polA"))
})
})
Context("with a host ep that matches the IPv4 address with pre-DNAT policy", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id0",
ipv4Addrs: []string{ipv4},
polName: "polB_preDNAT",
}))
It("should have expected chains", expectChainsFor("eth0_polB_preDNAT"))
})
Describe("with host endpoint matching eth0", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id1",
name: "eth0",
}))
It("should have expected chains", expectChainsFor("eth0"))
It("should report id1 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id1"}: "up",
}))
})
Context("with another host interface eth1", func() {
JustBeforeEach(func() {
epMgr.OnUpdate(&ifaceUpdate{
Name: "eth1",
State: "up",
})
epMgr.OnUpdate(&ifaceAddrsUpdate{
Name: "eth1",
Addrs: eth1Addrs,
})
epMgr.CompleteDeferredWork()
})
It("should have expected chains", expectChainsFor("eth0"))
It("should report id1 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id1"}: "up",
}))
})
Context("with host ep matching eth1's IP", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id22",
ipv4Addrs: []string{ipv4Eth1},
}))
It("should have expected chains", expectChainsFor("eth0", "eth1"))
It("should report id1 and id22 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id1"}: "up",
proto.HostEndpointID{EndpointId: "id22"}: "up",
}))
})
})
Context("with host ep matching both eth0 and eth1 IPs", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id0",
ipv4Addrs: []string{ipv4Eth1, ipv4},
}))
It("should have expected chains", expectChainsFor("eth0", "eth1"))
// The "id0" host endpoint matches both eth0 and
// eth1, and is preferred for eth0 over "id1"
// because of alphabetical ordering. "id1" is then
// unused, and so reported as in error.
It("should report id1 error and id0 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id1"}: "error",
proto.HostEndpointID{EndpointId: "id0"}: "up",
}))
})
})
Context("with host ep matching eth1", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id22",
name: "eth1",
}))
It("should have expected chains", expectChainsFor("eth0", "eth1"))
It("should report id1 and id22 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id1"}: "up",
proto.HostEndpointID{EndpointId: "id22"}: "up",
}))
})
})
})
})
Describe("with host endpoint matching non-existent interface", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id3",
name: "eth1",
}))
It("should have empty dispatch chains", expectEmptyChains())
It("should report endpoint in error", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id3"}: "error",
}))
})
})
Describe("with host endpoint matching IPv4 address", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id4",
ipv4Addrs: []string{ipv4},
}))
It("should have expected chains", expectChainsFor("eth0"))
It("should report id4 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id4"}: "up",
}))
})
})
Describe("with host endpoint matching IPv6 address", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id5",
ipv6Addrs: []string{ipv6},
}))
It("should have expected chains", expectChainsFor("eth0"))
It("should report id5 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id5"}: "up",
}))
})
})
Describe("with host endpoint matching IPv4 address and correct interface name", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id3",
name: "eth0",
ipv4Addrs: []string{ipv4},
}))
It("should have expected chains", expectChainsFor("eth0"))
It("should report id3 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id3"}: "up",
}))
})
})
Describe("with host endpoint matching IPv6 address and correct interface name", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id3",
name: "eth0",
ipv6Addrs: []string{ipv6},
}))
It("should have expected chains", expectChainsFor("eth0"))
It("should report id3 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id3"}: "up",
}))
})
})
Describe("with host endpoint matching IPv4 address and wrong interface name", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id3",
name: "eth1",
ipv4Addrs: []string{ipv4},
}))
It("should have empty dispatch chains", expectEmptyChains())
It("should report id3 error", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id3"}: "error",
}))
})
})
Describe("with host endpoint matching IPv6 address and wrong interface name", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id3",
name: "eth1",
ipv6Addrs: []string{ipv6},
}))
It("should have empty dispatch chains", expectEmptyChains())
It("should report id3 error", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id3"}: "error",
}))
})
})
Describe("with host endpoint with unmatched IPv4 address", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id4",
ipv4Addrs: []string{"8.8.8.8"},
}))
It("should have empty dispatch chains", expectEmptyChains())
It("should report id4 error", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id4"}: "error",
}))
})
})
Describe("with host endpoint with unmatched IPv6 address", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id5",
ipv6Addrs: []string{"fe08::2"},
}))
It("should have empty dispatch chains", expectEmptyChains())
It("should report id5 error", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id5"}: "error",
}))
})
})
})
Context("with host endpoint configured before interface signaled", func() {
JustBeforeEach(configureHostEp(&hostEpSpec{
id: "id3",
name: "eth0",
}))
It("should have empty dispatch chains", expectEmptyChains())
It("should report id3 error", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id3"}: "error",
}))
})
Context("with interface signaled", func() {
JustBeforeEach(func() {
epMgr.OnUpdate(&ifaceUpdate{
Name: "eth0",
State: "up",
})
epMgr.OnUpdate(&ifaceAddrsUpdate{
Name: "eth0",
Addrs: eth0Addrs,
})
epMgr.CompleteDeferredWork()
})
It("should have expected chains", expectChainsFor("eth0"))
It("should report id3 up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
proto.HostEndpointID{EndpointId: "id3"}: "up",
}))
})
})
})
expectWlChainsFor := func(names ...string) func() {
return func() {
filterTable.checkChains([][]*iptables.Chain{
hostDispatchEmptyNormal,
hostDispatchEmptyForward,
wlChainsForIfaces(names, epMgr.epMarkMapper),
})
mangleTable.checkChains([][]*iptables.Chain{
fromHostDispatchEmpty,
})
}
}
Describe("workload endpoints", func() {
Context("with a workload endpoint", func() {
wlEPID1 := proto.WorkloadEndpointID{
OrchestratorId: "k8s",
WorkloadId: "pod-11",
EndpointId: "endpoint-id-11",
}
var tiers []*proto.TierInfo
BeforeEach(func() {
tiers = []*proto.TierInfo{}
})
JustBeforeEach(func() {
epMgr.OnUpdate(&proto.WorkloadEndpointUpdate{
Id: &wlEPID1,
Endpoint: &proto.WorkloadEndpoint{
State: "active",
Mac: "01:02:03:04:05:06",
Name: "cali12345-ab",
ProfileIds: []string{},
Tiers: tiers,
Ipv4Nets: []string{"10.0.240.2/24"},
Ipv6Nets: []string{"2001:db8:2::2/128"},
},
})
epMgr.CompleteDeferredWork()
})
Context("with policy", func() {
BeforeEach(func() {
tiers = []*proto.TierInfo{&proto.TierInfo{
Name: "default",
IngressPolicies: []string{"policy1"},
EgressPolicies: []string{"policy1"},
}}
})
It("should have expected chains", expectWlChainsFor("cali12345-ab_policy1"))
})
Context("with ingress-only policy", func() {
BeforeEach(func() {
tiers = []*proto.TierInfo{&proto.TierInfo{
Name: "default",
IngressPolicies: []string{"policy1"},
}}
})
It("should have expected chains", expectWlChainsFor("cali12345-ab_policy1_ingress"))
})
Context("with egress-only policy", func() {
BeforeEach(func() {
tiers = []*proto.TierInfo{&proto.TierInfo{
Name: "default",
EgressPolicies: []string{"policy1"},
}}
})
It("should have expected chains", expectWlChainsFor("cali12345-ab_policy1_egress"))
})
It("should have expected chains", expectWlChainsFor("cali12345-ab"))
It("should set routes", func() {
if ipVersion == 6 {
routeTable.checkRoutes("cali12345-ab", []routetable.Target{{
CIDR: ip.MustParseCIDROrIP("2001:db8:2::2/128"),
DestMAC: testutils.MustParseMAC("01:02:03:04:05:06"),
}})
} else {
routeTable.checkRoutes("cali12345-ab", []routetable.Target{{
CIDR: ip.MustParseCIDROrIP("10.0.240.0/24"),
DestMAC: testutils.MustParseMAC("01:02:03:04:05:06"),
}})
}
})
It("should report endpoint down", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
wlEPID1: "down",
}))
})
Context("with updates for the workload's iface and proc/sys failure", func() {
JustBeforeEach(func() {
mockProcSys.Fail = true
epMgr.OnUpdate(&ifaceUpdate{
Name: "cali12345-ab",
State: "up",
})
epMgr.OnUpdate(&ifaceAddrsUpdate{
Name: "cali12345-ab",
Addrs: set.New(),
})
epMgr.CompleteDeferredWork()
})
It("should report the interface in error", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
wlEPID1: "error",
}))
})
})
Context("with updates for the workload's iface", func() {
JustBeforeEach(func() {
epMgr.OnUpdate(&ifaceUpdate{
Name: "cali12345-ab",
State: "up",
})
epMgr.OnUpdate(&ifaceAddrsUpdate{
Name: "cali12345-ab",
Addrs: set.New(),
})
epMgr.CompleteDeferredWork()
})
It("should have expected chains", expectWlChainsFor("cali12345-ab"))
It("should report endpoint up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
wlEPID1: "up",
}))
})
It("should write /proc/sys entries", func() {
if ipVersion == 6 {
mockProcSys.checkState(map[string]string{
"/proc/sys/net/ipv6/conf/cali12345-ab/proxy_ndp": "1",
"/proc/sys/net/ipv6/conf/cali12345-ab/forwarding": "1",
})
} else {
mockProcSys.checkState(map[string]string{
"/proc/sys/net/ipv4/conf/cali12345-ab/forwarding": "1",
"/proc/sys/net/ipv4/conf/cali12345-ab/rp_filter": "1",
"/proc/sys/net/ipv4/conf/cali12345-ab/route_localnet": "1",
"/proc/sys/net/ipv4/conf/cali12345-ab/proxy_arp": "1",
"/proc/sys/net/ipv4/neigh/cali12345-ab/proxy_delay": "0",
})
}
})
Context("with floating IPs added to the endpoint", func() {
JustBeforeEach(func() {
epMgr.OnUpdate(&proto.WorkloadEndpointUpdate{
Id: &wlEPID1,
Endpoint: &proto.WorkloadEndpoint{
State: "active",
Mac: "01:02:03:04:05:06",
Name: "cali12345-ab",
ProfileIds: []string{},
Tiers: []*proto.TierInfo{},
Ipv4Nets: []string{"10.0.240.2/24"},
Ipv6Nets: []string{"2001:db8:2::2/128"},
Ipv4Nat: []*proto.NatInfo{
{ExtIp: "172.16.1.3", IntIp: "10.0.240.2"},
{ExtIp: "172.18.1.4", IntIp: "10.0.240.2"},
},
Ipv6Nat: []*proto.NatInfo{
{ExtIp: "2001:db8:3::2", IntIp: "2001:db8:2::2"},
{ExtIp: "2001:db8:4::2", IntIp: "2001:db8:4::2"},
},
},
})
epMgr.CompleteDeferredWork()
})
It("should have expected chains", expectWlChainsFor("cali12345-ab"))
It("should set routes", func() {
if ipVersion == 6 {
routeTable.checkRoutes("cali12345-ab", []routetable.Target{
{
CIDR: ip.MustParseCIDROrIP("2001:db8:2::2/128"),
DestMAC: testutils.MustParseMAC("01:02:03:04:05:06"),
},
{
CIDR: ip.MustParseCIDROrIP("2001:db8:3::2/128"),
DestMAC: testutils.MustParseMAC("01:02:03:04:05:06"),
},
{
CIDR: ip.MustParseCIDROrIP("2001:db8:4::2/128"),
DestMAC: testutils.MustParseMAC("01:02:03:04:05:06"),
},
})
} else {
routeTable.checkRoutes("cali12345-ab", []routetable.Target{
{
CIDR: ip.MustParseCIDROrIP("10.0.240.0/24"),
DestMAC: testutils.MustParseMAC("01:02:03:04:05:06"),
},
{
CIDR: ip.MustParseCIDROrIP("172.16.1.3/32"),
DestMAC: testutils.MustParseMAC("01:02:03:04:05:06"),
},
{
CIDR: ip.MustParseCIDROrIP("172.18.1.4/32"),
DestMAC: testutils.MustParseMAC("01:02:03:04:05:06"),
},
})
}
})
})
Context("with the endpoint removed", func() {
JustBeforeEach(func() {
epMgr.OnUpdate(&proto.WorkloadEndpointRemove{
Id: &wlEPID1,
})
epMgr.CompleteDeferredWork()
})
It("should have empty dispatch chains", expectEmptyChains())
It("should have removed routes", func() {
routeTable.checkRoutes("cali12345-ab", nil)
})
It("should report endpoint gone", func() {
Expect(statusReportRec.currentState).To(BeEmpty())
})
})
Context("changing the endpoint to another up interface", func() {
JustBeforeEach(func() {
epMgr.OnUpdate(&ifaceUpdate{
Name: "cali12345-cd",
State: "up",
})
epMgr.OnUpdate(&ifaceAddrsUpdate{
Name: "cali12345-cd",
Addrs: set.New(),
})
epMgr.OnUpdate(&proto.WorkloadEndpointUpdate{
Id: &wlEPID1,
Endpoint: &proto.WorkloadEndpoint{
State: "active",
Mac: "01:02:03:04:05:06",
Name: "cali12345-cd",
ProfileIds: []string{},
Tiers: []*proto.TierInfo{},
Ipv4Nets: []string{"10.0.240.2/24"},
Ipv6Nets: []string{"2001:db8:2::2/128"},
},
})
epMgr.CompleteDeferredWork()
})
It("should have expected chains", expectWlChainsFor("cali12345-cd"))
It("should have removed routes for old iface", func() {
routeTable.checkRoutes("cali12345-ab", nil)
})
It("should report endpoint up", func() {
Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{
wlEPID1: "up",
}))
})
It("should have set routes for new iface", func() {
if ipVersion == 6 {
routeTable.checkRoutes("cali12345-cd", []routetable.Target{{
CIDR: ip.MustParseCIDROrIP("2001:db8:2::2/128"),
DestMAC: testutils.MustParseMAC("01:02:03:04:05:06"),
}})
} else {
routeTable.checkRoutes("cali12345-cd", []routetable.Target{{
CIDR: ip.MustParseCIDROrIP("10.0.240.0/24"),
DestMAC: testutils.MustParseMAC("01:02:03:04:05:06"),
}})
}
})
})
})
})
Context("with an inactive workload endpoint", func() {
wlEPID1 := proto.WorkloadEndpointID{
OrchestratorId: "k8s",
WorkloadId: "pod-11",
EndpointId: "endpoint-id-11",
}
JustBeforeEach(func() {
epMgr.OnUpdate(&proto.WorkloadEndpointUpdate{
Id: &wlEPID1,
Endpoint: &proto.WorkloadEndpoint{
State: "inactive",
Mac: "01:02:03:04:05:06",
Name: "cali12345-ab",
ProfileIds: []string{},
Tiers: []*proto.TierInfo{},
Ipv4Nets: []string{"10.0.240.2/24"},
Ipv6Nets: []string{"2001:db8:2::2/128"},
},
})
epMgr.CompleteDeferredWork()
})
It("should have expected chains", func() {
Expect(filterTable.currentChains["cali-tw-cali12345-ab"]).To(Equal(
&iptables.Chain{
Name: "cali-tw-cali12345-ab",
Rules: []iptables.Rule{{
Action: iptables.DropAction{},
Comment: "Endpoint admin disabled",
}},
},
))
Expect(filterTable.currentChains["cali-fw-cali12345-ab"]).To(Equal(
&iptables.Chain{
Name: "cali-fw-cali12345-ab",
Rules: []iptables.Rule{{
Action: iptables.DropAction{},
Comment: "Endpoint admin disabled",
}},
},
))
_, ok := mangleTable.currentChains["cali-tw-cali12345-ab"]
Expect(ok).To(BeFalse())
_, ok = mangleTable.currentChains["cali-fw-cali12345-ab"]
Expect(ok).To(BeFalse())
})
It("should remove routes", func() {
routeTable.checkRoutes("cali12345-ab", nil)
})
})
})
}
}
var _ = Describe("EndpointManager IPv4", endpointManagerTests(4))
var _ = Describe("EndpointManager IPv6", endpointManagerTests(6))
type testProcSys struct {
state map[string]string
Fail bool
}
var (
procSysFail = errors.New("mock proc sys failure")
)
func (t *testProcSys) write(path, value string) error {
log.WithFields(log.Fields{
"path": path,
"value": value,
}).Info("testProcSys writer")
if t.Fail {
return procSysFail
}
t.state[path] = value
return nil
}
func (t *testProcSys) checkState(expected map[string]string) {
Expect(t.state).To(Equal(expected))
}
| 1 | 16,983 |
Why isn't this 4789 (which I think is the Felix default)?
|
projectcalico-felix
|
go
|
@@ -57,6 +57,10 @@ const (
MessageResourceFailCreate EventReason = "Resource creation failed"
// SuccessImported holds status for corresponding imported resource.
SuccessImported EventReason = "Imported"
+ // FailureImported holds status for corresponding imported resource.
+ FailureImported EventReason = "Import failure"
+ // FailureImportOperations holds status for corresponding imported resource.
+ FailureImportOperations EventReason = "Failure Import operations"
// MessageResourceImported holds message for corresponding imported resource.
MessageResourceImported EventReason = "Resource imported successfully"
// FailureStatusSync holds status for corresponding failed status sync of resource.
| 1 |
/*
Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"reflect"
"sync"
"time"
"github.com/golang/glog"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/pool"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/volumereplica"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned"
"github.com/openebs/maya/pkg/util"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
//EventReason is used as part of the Event reason when a resource goes through different phases
type EventReason string
const (
// ToDo: Improve the messages and event reason. ( Put these in a similar k8s style)
// SuccessSynced is used as part of the Event 'reason' when a resource is synced.
SuccessSynced EventReason = "Synced"
// FailedSynced is used as part of the Event 'reason' when resource sync fails.
FailedSynced EventReason = "FailedSync"
// MessageCreateSynced holds message for corresponding create request sync.
MessageCreateSynced EventReason = "Received Resource create event"
// MessageModifySynced holds message for corresponding modify request sync.
MessageModifySynced EventReason = "Received Resource modify event"
// MessageDestroySynced holds message for corresponding destroy request sync.
MessageDestroySynced EventReason = "Received Resource destroy event"
// StatusSynced holds message for corresponding status request sync.
StatusSynced EventReason = "Resource status sync event"
// SuccessCreated holds status for corresponding created resource.
SuccessCreated EventReason = "Created"
// MessageResourceCreated holds message for corresponding created resource.
MessageResourceCreated EventReason = "Resource created successfully"
// FailureCreate holds status for corresponding failed create resource.
FailureCreate EventReason = "FailCreate"
// MessageResourceFailCreate holds message for corresponding failed create resource.
MessageResourceFailCreate EventReason = "Resource creation failed"
// SuccessImported holds status for corresponding imported resource.
SuccessImported EventReason = "Imported"
// MessageResourceImported holds message for corresponding imported resource.
MessageResourceImported EventReason = "Resource imported successfully"
// FailureStatusSync holds status for corresponding failed status sync of resource.
FailureStatusSync EventReason = "FailStatusSync"
// FailureCapacitySync holds status for corresponding failed capacity sync of resource.
FailureCapacitySync EventReason = "FailCapacitySync"
// MessageResourceFailStatusSync holds message for corresponding failed status sync of resource.
MessageResourceFailStatusSync EventReason = "Resource status sync failed"
// MessageResourceFailCapacitySync holds message for corresponding failed capacity sync of resource.
MessageResourceFailCapacitySync EventReason = "Resource capacity sync failed"
// MessageResourceSyncSuccess holds message for corresponding successful sync of resource.
MessageResourceSyncSuccess EventReason = "Resource successfully synced"
// MessageResourceSyncFailure holds message for corresponding failed sync of resource.
MessageResourceSyncFailure EventReason = "Resource sync failed:"
// FailureDestroy holds status for corresponding failed destroy resource.
FailureDestroy EventReason = "FailDestroy"
// MessageResourceFailDestroy holds message for corresponding failed destroy resource.
MessageResourceFailDestroy EventReason = "Resource Destroy failed"
// FailureValidate holds status for corresponding failed validate resource.
FailureValidate EventReason = "FailValidate"
// MessageResourceFailValidate holds message for corresponding failed validate resource.
MessageResourceFailValidate EventReason = "Resource validation failed"
// AlreadyPresent holds status for corresponding already present resource.
AlreadyPresent EventReason = "AlreadyPresent"
// MessageResourceAlreadyPresent holds message for corresponding already present resource.
MessageResourceAlreadyPresent EventReason = "Resource already present"
)
// Periodic interval duration.
const (
// CRDRetryInterval is used if CRD is not present.
CRDRetryInterval = 10 * time.Second
// PoolNameHandlerInterval is used when expected pool is not present.
PoolNameHandlerInterval = 5 * time.Second
// SharedInformerInterval is used to sync watcher controller.
SharedInformerInterval = 30 * time.Second
// ResourceWorkerInterval is used for resource sync.
ResourceWorkerInterval = time.Second
// InitialZreplRetryInterval is used while initially starting controller.
InitialZreplRetryInterval = 3 * time.Second
// ContinuousZreplRetryInterval is used while controller has started running.
ContinuousZreplRetryInterval = 1 * time.Second
)
const (
// NoOfPoolWaitAttempts is number of attempts to wait in case of pod/container restarts.
NoOfPoolWaitAttempts = 30
// PoolWaitInterval is the interval to wait for pod/container restarts.
PoolWaitInterval = 2 * time.Second
)
// InitialImportedPoolVol is to store pool-volume names while pod restart.
var InitialImportedPoolVol []string
// QueueLoad is for storing the key and type of operation before entering workqueue
type QueueLoad struct {
Key string
Operation QueueOperation
}
// Environment is for environment variables passed for cstor-pool-mgmt.
type Environment string
const (
// OpenEBSIOCStorID is the environment variable specified in pod.
OpenEBSIOCStorID Environment = "OPENEBS_IO_CSTOR_ID"
)
//QueueOperation represents the type of operation on resource
type QueueOperation string
//Different type of operations on the controller
const (
QOpAdd QueueOperation = "add"
QOpDestroy QueueOperation = "destroy"
QOpModify QueueOperation = "modify"
// QOpSync is the operation for syncing(reconciling) on cstor pool object.
QOpSync QueueOperation = "Sync"
)
// namespace defines kubernetes namespace specified for cvr.
type namespace string
// Different types of k8s namespaces.
const (
defaultNameSpace namespace = "default"
)
// SyncResources is to synchronize pool and volumereplica.
var SyncResources SyncCStorPoolCVR
// SyncCStorPoolCVR is to hold synchronization related variables.
type SyncCStorPoolCVR struct {
// Mux is mutex variable to block cvr until certain pool operations are complete.
Mux *sync.Mutex
// IsImported is boolean flag to check at cvr until certain pool import operations are complete.
IsImported bool
}
// PoolNameHandler tries to get pool name and blocks for
// particular number of attempts.
func PoolNameHandler(cVR *apis.CStorVolumeReplica, cnt int) bool {
for i := 0; ; i++ {
poolname, _ := pool.GetPoolName()
if reflect.DeepEqual(poolname, []string{}) ||
!CheckIfPresent(poolname, string(pool.PoolPrefix)+cVR.Labels["cstorpool.openebs.io/uid"]) {
glog.Warningf("Attempt %v: No pool found", i+1)
time.Sleep(PoolNameHandlerInterval)
if i > cnt {
return false
}
} else if CheckIfPresent(poolname, string(pool.PoolPrefix)+cVR.Labels["cstorpool.openebs.io/uid"]) {
return true
}
}
}
// CheckForCStorPoolCRD is Blocking call for checking status of CStorPool CRD.
func CheckForCStorPoolCRD(clientset clientset.Interface) {
for {
_, err := clientset.OpenebsV1alpha1().CStorPools().List(metav1.ListOptions{})
if err != nil {
glog.Errorf("CStorPool CRD not found. Retrying after %v, error: %v", CRDRetryInterval, err)
time.Sleep(CRDRetryInterval)
continue
}
glog.Info("CStorPool CRD found")
break
}
}
// CheckForCStorVolumeReplicaCRD is Blocking call for checking status of CStorVolumeReplica CRD.
func CheckForCStorVolumeReplicaCRD(clientset clientset.Interface) {
for {
// Since this blocking function is restricted to check if CVR CRD is present
// or not, we are trying to handle only the error of CVR CR List api indirectly.
// CRD has only two types of scope, cluster and namespaced. If CR list api
// for default namespace works fine, then CR list api works for all namespaces.
_, err := clientset.OpenebsV1alpha1().CStorVolumeReplicas(string(defaultNameSpace)).List(metav1.ListOptions{})
if err != nil {
glog.Errorf("CStorVolumeReplica CRD not found. Retrying after %v, error: %v", CRDRetryInterval, err)
time.Sleep(CRDRetryInterval)
continue
}
glog.Info("CStorVolumeReplica CRD found")
break
}
}
// CheckForInitialImportedPoolVol is to check if volume is already
// imported with pool.
func CheckForInitialImportedPoolVol(InitialImportedPoolVol []string, fullvolname string) bool {
for i, initialVol := range InitialImportedPoolVol {
if initialVol == fullvolname {
if i < len(InitialImportedPoolVol) {
InitialImportedPoolVol = append(InitialImportedPoolVol[:i], InitialImportedPoolVol[i+1:]...)
}
return true
}
}
return false
}
// CheckIfPresent is to check if search string is present in array of string.
func CheckIfPresent(arrStr []string, searchStr string) bool {
for _, str := range arrStr {
if str == searchStr {
return true
}
}
return false
}
// CheckForCStorPool tries to get pool name and blocks forever because
// volumereplica can be created only if pool is present.
func CheckForCStorPool() {
for {
poolname, _ := pool.GetPoolName()
if reflect.DeepEqual(poolname, []string{}) {
glog.Warningf("CStorPool not found. Retrying after %v", PoolNameHandlerInterval)
time.Sleep(PoolNameHandlerInterval)
continue
}
glog.Info("CStorPool found")
break
}
}
// Init is to instantiate variable used between pool and volumereplica while
// starting controller.
func Init() {
// Instantiate mutex variable.
SyncResources.Mux = &sync.Mutex{}
// Making RunnerVar to use RealRunner
pool.RunnerVar = util.RealRunner{}
volumereplica.RunnerVar = util.RealRunner{}
}
| 1 | 15,582 |
Can this be `Import operations failure`? In addition, is this the right time to change the variable names? e.g. `ReasonImportOperationsFailure`, `ReasonImportFailure`, `ReasonImportSuccessful`, `ReasonCreateResourceFailure`, `ReasonImportResourceSuccessful` Naming convention that i have assumed here is as follows: `Reason<Action><Entity/Adjective of the Entity><Sucess/Failure>`
|
openebs-maya
|
go
|
@@ -457,4 +457,5 @@ void Init_binary_protocol_accelerated() {
rb_define_method(bpa_class, "read_set_end", rb_thrift_binary_proto_read_set_end, 0);
rbuf_ivar_id = rb_intern("@rbuf");
+ rb_global_variable(&rbuf_ivar_id);
}
| 1 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <ruby.h>
#include <stdbool.h>
#include <stdint.h>
#include <constants.h>
#include <struct.h>
#include <macros.h>
#include <bytes.h>
VALUE rb_thrift_binary_proto_native_qmark(VALUE self) {
return Qtrue;
}
static int VERSION_1;
static int VERSION_MASK;
static int TYPE_MASK;
static int BAD_VERSION;
static ID rbuf_ivar_id;
static void write_byte_direct(VALUE trans, int8_t b) {
WRITE(trans, (char*)&b, 1);
}
static void write_i16_direct(VALUE trans, int16_t value) {
char data[2];
data[1] = value;
data[0] = (value >> 8);
WRITE(trans, data, 2);
}
static void write_i32_direct(VALUE trans, int32_t value) {
char data[4];
data[3] = value;
data[2] = (value >> 8);
data[1] = (value >> 16);
data[0] = (value >> 24);
WRITE(trans, data, 4);
}
static void write_i64_direct(VALUE trans, int64_t value) {
char data[8];
data[7] = value;
data[6] = (value >> 8);
data[5] = (value >> 16);
data[4] = (value >> 24);
data[3] = (value >> 32);
data[2] = (value >> 40);
data[1] = (value >> 48);
data[0] = (value >> 56);
WRITE(trans, data, 8);
}
static void write_string_direct(VALUE trans, VALUE str) {
if (TYPE(str) != T_STRING) {
rb_raise(rb_eStandardError, "Value should be a string");
}
str = convert_to_utf8_byte_buffer(str);
write_i32_direct(trans, RSTRING_LEN(str));
rb_funcall(trans, write_method_id, 1, str);
}
//--------------------------------
// interface writing methods
//--------------------------------
VALUE rb_thrift_binary_proto_write_message_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_write_struct_begin(VALUE self, VALUE name) {
return Qnil;
}
VALUE rb_thrift_binary_proto_write_struct_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_write_field_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_write_map_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_write_list_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_write_set_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_write_message_begin(VALUE self, VALUE name, VALUE type, VALUE seqid) {
VALUE trans = GET_TRANSPORT(self);
VALUE strict_write = GET_STRICT_WRITE(self);
if (strict_write == Qtrue) {
write_i32_direct(trans, VERSION_1 | FIX2INT(type));
write_string_direct(trans, name);
write_i32_direct(trans, FIX2INT(seqid));
} else {
write_string_direct(trans, name);
write_byte_direct(trans, FIX2INT(type));
write_i32_direct(trans, FIX2INT(seqid));
}
return Qnil;
}
VALUE rb_thrift_binary_proto_write_field_begin(VALUE self, VALUE name, VALUE type, VALUE id) {
VALUE trans = GET_TRANSPORT(self);
write_byte_direct(trans, FIX2INT(type));
write_i16_direct(trans, FIX2INT(id));
return Qnil;
}
VALUE rb_thrift_binary_proto_write_field_stop(VALUE self) {
write_byte_direct(GET_TRANSPORT(self), TTYPE_STOP);
return Qnil;
}
VALUE rb_thrift_binary_proto_write_map_begin(VALUE self, VALUE ktype, VALUE vtype, VALUE size) {
VALUE trans = GET_TRANSPORT(self);
write_byte_direct(trans, FIX2INT(ktype));
write_byte_direct(trans, FIX2INT(vtype));
write_i32_direct(trans, FIX2INT(size));
return Qnil;
}
VALUE rb_thrift_binary_proto_write_list_begin(VALUE self, VALUE etype, VALUE size) {
VALUE trans = GET_TRANSPORT(self);
write_byte_direct(trans, FIX2INT(etype));
write_i32_direct(trans, FIX2INT(size));
return Qnil;
}
VALUE rb_thrift_binary_proto_write_set_begin(VALUE self, VALUE etype, VALUE size) {
rb_thrift_binary_proto_write_list_begin(self, etype, size);
return Qnil;
}
VALUE rb_thrift_binary_proto_write_bool(VALUE self, VALUE b) {
write_byte_direct(GET_TRANSPORT(self), RTEST(b) ? 1 : 0);
return Qnil;
}
VALUE rb_thrift_binary_proto_write_byte(VALUE self, VALUE byte) {
CHECK_NIL(byte);
write_byte_direct(GET_TRANSPORT(self), NUM2INT(byte));
return Qnil;
}
VALUE rb_thrift_binary_proto_write_i16(VALUE self, VALUE i16) {
CHECK_NIL(i16);
write_i16_direct(GET_TRANSPORT(self), FIX2INT(i16));
return Qnil;
}
VALUE rb_thrift_binary_proto_write_i32(VALUE self, VALUE i32) {
CHECK_NIL(i32);
write_i32_direct(GET_TRANSPORT(self), NUM2INT(i32));
return Qnil;
}
VALUE rb_thrift_binary_proto_write_i64(VALUE self, VALUE i64) {
CHECK_NIL(i64);
write_i64_direct(GET_TRANSPORT(self), NUM2LL(i64));
return Qnil;
}
VALUE rb_thrift_binary_proto_write_double(VALUE self, VALUE dub) {
CHECK_NIL(dub);
// Unfortunately, bitwise_cast doesn't work in C. Bad C!
union {
double f;
int64_t t;
} transfer;
transfer.f = RFLOAT_VALUE(rb_Float(dub));
write_i64_direct(GET_TRANSPORT(self), transfer.t);
return Qnil;
}
VALUE rb_thrift_binary_proto_write_string(VALUE self, VALUE str) {
CHECK_NIL(str);
VALUE trans = GET_TRANSPORT(self);
write_string_direct(trans, str);
return Qnil;
}
VALUE rb_thrift_binary_proto_write_binary(VALUE self, VALUE buf) {
CHECK_NIL(buf);
VALUE trans = GET_TRANSPORT(self);
buf = force_binary_encoding(buf);
write_i32_direct(trans, RSTRING_LEN(buf));
rb_funcall(trans, write_method_id, 1, buf);
return Qnil;
}
//---------------------------------------
// interface reading methods
//---------------------------------------
VALUE rb_thrift_binary_proto_read_string(VALUE self);
VALUE rb_thrift_binary_proto_read_binary(VALUE self);
VALUE rb_thrift_binary_proto_read_byte(VALUE self);
VALUE rb_thrift_binary_proto_read_i32(VALUE self);
VALUE rb_thrift_binary_proto_read_i16(VALUE self);
static char read_byte_direct(VALUE self) {
VALUE byte = rb_funcall(GET_TRANSPORT(self), read_byte_method_id, 0);
return (char)(FIX2INT(byte));
}
static int16_t read_i16_direct(VALUE self) {
VALUE rbuf = rb_ivar_get(self, rbuf_ivar_id);
rb_funcall(GET_TRANSPORT(self), read_into_buffer_method_id, 2, rbuf, INT2FIX(2));
return (int16_t)(((uint8_t)(RSTRING_PTR(rbuf)[1])) | ((uint16_t)((RSTRING_PTR(rbuf)[0]) << 8)));
}
static int32_t read_i32_direct(VALUE self) {
VALUE rbuf = rb_ivar_get(self, rbuf_ivar_id);
rb_funcall(GET_TRANSPORT(self), read_into_buffer_method_id, 2, rbuf, INT2FIX(4));
return ((uint8_t)(RSTRING_PTR(rbuf)[3])) |
(((uint8_t)(RSTRING_PTR(rbuf)[2])) << 8) |
(((uint8_t)(RSTRING_PTR(rbuf)[1])) << 16) |
(((uint8_t)(RSTRING_PTR(rbuf)[0])) << 24);
}
static int64_t read_i64_direct(VALUE self) {
VALUE rbuf = rb_ivar_get(self, rbuf_ivar_id);
rb_funcall(GET_TRANSPORT(self), read_into_buffer_method_id, 2, rbuf, INT2FIX(8));
uint64_t hi = ((uint8_t)(RSTRING_PTR(rbuf)[3])) |
(((uint8_t)(RSTRING_PTR(rbuf)[2])) << 8) |
(((uint8_t)(RSTRING_PTR(rbuf)[1])) << 16) |
(((uint8_t)(RSTRING_PTR(rbuf)[0])) << 24);
uint32_t lo = ((uint8_t)(RSTRING_PTR(rbuf)[7])) |
(((uint8_t)(RSTRING_PTR(rbuf)[6])) << 8) |
(((uint8_t)(RSTRING_PTR(rbuf)[5])) << 16) |
(((uint8_t)(RSTRING_PTR(rbuf)[4])) << 24);
return (hi << 32) | lo;
}
static VALUE get_protocol_exception(VALUE code, VALUE message) {
VALUE args[2];
args[0] = code;
args[1] = message;
return rb_class_new_instance(2, (VALUE*)&args, protocol_exception_class);
}
VALUE rb_thrift_binary_proto_read_message_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_read_struct_begin(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_read_struct_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_read_field_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_read_map_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_read_list_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_read_set_end(VALUE self) {
return Qnil;
}
VALUE rb_thrift_binary_proto_read_message_begin(VALUE self) {
VALUE strict_read = GET_STRICT_READ(self);
VALUE name, seqid;
int type;
int version = read_i32_direct(self);
if (version < 0) {
if ((version & VERSION_MASK) != VERSION_1) {
rb_exc_raise(get_protocol_exception(INT2FIX(BAD_VERSION), rb_str_new2("Missing version identifier")));
}
type = version & TYPE_MASK;
name = rb_thrift_binary_proto_read_string(self);
seqid = rb_thrift_binary_proto_read_i32(self);
} else {
if (strict_read == Qtrue) {
rb_exc_raise(get_protocol_exception(INT2FIX(BAD_VERSION), rb_str_new2("No version identifier, old protocol client?")));
}
name = READ(self, version);
type = read_byte_direct(self);
seqid = rb_thrift_binary_proto_read_i32(self);
}
return rb_ary_new3(3, name, INT2FIX(type), seqid);
}
VALUE rb_thrift_binary_proto_read_field_begin(VALUE self) {
int type = read_byte_direct(self);
if (type == TTYPE_STOP) {
return rb_ary_new3(3, Qnil, INT2FIX(type), INT2FIX(0));
} else {
VALUE id = rb_thrift_binary_proto_read_i16(self);
return rb_ary_new3(3, Qnil, INT2FIX(type), id);
}
}
VALUE rb_thrift_binary_proto_read_map_begin(VALUE self) {
VALUE ktype = rb_thrift_binary_proto_read_byte(self);
VALUE vtype = rb_thrift_binary_proto_read_byte(self);
VALUE size = rb_thrift_binary_proto_read_i32(self);
return rb_ary_new3(3, ktype, vtype, size);
}
VALUE rb_thrift_binary_proto_read_list_begin(VALUE self) {
VALUE etype = rb_thrift_binary_proto_read_byte(self);
VALUE size = rb_thrift_binary_proto_read_i32(self);
return rb_ary_new3(2, etype, size);
}
VALUE rb_thrift_binary_proto_read_set_begin(VALUE self) {
return rb_thrift_binary_proto_read_list_begin(self);
}
VALUE rb_thrift_binary_proto_read_bool(VALUE self) {
char byte = read_byte_direct(self);
return byte != 0 ? Qtrue : Qfalse;
}
VALUE rb_thrift_binary_proto_read_byte(VALUE self) {
return INT2FIX(read_byte_direct(self));
}
VALUE rb_thrift_binary_proto_read_i16(VALUE self) {
return INT2FIX(read_i16_direct(self));
}
VALUE rb_thrift_binary_proto_read_i32(VALUE self) {
return INT2NUM(read_i32_direct(self));
}
VALUE rb_thrift_binary_proto_read_i64(VALUE self) {
return LL2NUM(read_i64_direct(self));
}
VALUE rb_thrift_binary_proto_read_double(VALUE self) {
union {
double f;
int64_t t;
} transfer;
transfer.t = read_i64_direct(self);
return rb_float_new(transfer.f);
}
VALUE rb_thrift_binary_proto_read_string(VALUE self) {
VALUE buffer = rb_thrift_binary_proto_read_binary(self);
return convert_to_string(buffer);
}
VALUE rb_thrift_binary_proto_read_binary(VALUE self) {
int size = read_i32_direct(self);
return READ(self, size);
}
void Init_binary_protocol_accelerated() {
VALUE thrift_binary_protocol_class = rb_const_get(thrift_module, rb_intern("BinaryProtocol"));
VERSION_1 = rb_num2ll(rb_const_get(thrift_binary_protocol_class, rb_intern("VERSION_1")));
VERSION_MASK = rb_num2ll(rb_const_get(thrift_binary_protocol_class, rb_intern("VERSION_MASK")));
TYPE_MASK = rb_num2ll(rb_const_get(thrift_binary_protocol_class, rb_intern("TYPE_MASK")));
VALUE bpa_class = rb_define_class_under(thrift_module, "BinaryProtocolAccelerated", thrift_binary_protocol_class);
rb_define_method(bpa_class, "native?", rb_thrift_binary_proto_native_qmark, 0);
rb_define_method(bpa_class, "write_message_begin", rb_thrift_binary_proto_write_message_begin, 3);
rb_define_method(bpa_class, "write_field_begin", rb_thrift_binary_proto_write_field_begin, 3);
rb_define_method(bpa_class, "write_field_stop", rb_thrift_binary_proto_write_field_stop, 0);
rb_define_method(bpa_class, "write_map_begin", rb_thrift_binary_proto_write_map_begin, 3);
rb_define_method(bpa_class, "write_list_begin", rb_thrift_binary_proto_write_list_begin, 2);
rb_define_method(bpa_class, "write_set_begin", rb_thrift_binary_proto_write_set_begin, 2);
rb_define_method(bpa_class, "write_byte", rb_thrift_binary_proto_write_byte, 1);
rb_define_method(bpa_class, "write_bool", rb_thrift_binary_proto_write_bool, 1);
rb_define_method(bpa_class, "write_i16", rb_thrift_binary_proto_write_i16, 1);
rb_define_method(bpa_class, "write_i32", rb_thrift_binary_proto_write_i32, 1);
rb_define_method(bpa_class, "write_i64", rb_thrift_binary_proto_write_i64, 1);
rb_define_method(bpa_class, "write_double", rb_thrift_binary_proto_write_double, 1);
rb_define_method(bpa_class, "write_string", rb_thrift_binary_proto_write_string, 1);
rb_define_method(bpa_class, "write_binary", rb_thrift_binary_proto_write_binary, 1);
// unused methods
rb_define_method(bpa_class, "write_message_end", rb_thrift_binary_proto_write_message_end, 0);
rb_define_method(bpa_class, "write_struct_begin", rb_thrift_binary_proto_write_struct_begin, 1);
rb_define_method(bpa_class, "write_struct_end", rb_thrift_binary_proto_write_struct_end, 0);
rb_define_method(bpa_class, "write_field_end", rb_thrift_binary_proto_write_field_end, 0);
rb_define_method(bpa_class, "write_map_end", rb_thrift_binary_proto_write_map_end, 0);
rb_define_method(bpa_class, "write_list_end", rb_thrift_binary_proto_write_list_end, 0);
rb_define_method(bpa_class, "write_set_end", rb_thrift_binary_proto_write_set_end, 0);
rb_define_method(bpa_class, "read_message_begin", rb_thrift_binary_proto_read_message_begin, 0);
rb_define_method(bpa_class, "read_field_begin", rb_thrift_binary_proto_read_field_begin, 0);
rb_define_method(bpa_class, "read_map_begin", rb_thrift_binary_proto_read_map_begin, 0);
rb_define_method(bpa_class, "read_list_begin", rb_thrift_binary_proto_read_list_begin, 0);
rb_define_method(bpa_class, "read_set_begin", rb_thrift_binary_proto_read_set_begin, 0);
rb_define_method(bpa_class, "read_byte", rb_thrift_binary_proto_read_byte, 0);
rb_define_method(bpa_class, "read_bool", rb_thrift_binary_proto_read_bool, 0);
rb_define_method(bpa_class, "read_i16", rb_thrift_binary_proto_read_i16, 0);
rb_define_method(bpa_class, "read_i32", rb_thrift_binary_proto_read_i32, 0);
rb_define_method(bpa_class, "read_i64", rb_thrift_binary_proto_read_i64, 0);
rb_define_method(bpa_class, "read_double", rb_thrift_binary_proto_read_double, 0);
rb_define_method(bpa_class, "read_string", rb_thrift_binary_proto_read_string, 0);
rb_define_method(bpa_class, "read_binary", rb_thrift_binary_proto_read_binary, 0);
// unused methods
rb_define_method(bpa_class, "read_message_end", rb_thrift_binary_proto_read_message_end, 0);
rb_define_method(bpa_class, "read_struct_begin", rb_thrift_binary_proto_read_struct_begin, 0);
rb_define_method(bpa_class, "read_struct_end", rb_thrift_binary_proto_read_struct_end, 0);
rb_define_method(bpa_class, "read_field_end", rb_thrift_binary_proto_read_field_end, 0);
rb_define_method(bpa_class, "read_map_end", rb_thrift_binary_proto_read_map_end, 0);
rb_define_method(bpa_class, "read_list_end", rb_thrift_binary_proto_read_list_end, 0);
rb_define_method(bpa_class, "read_set_end", rb_thrift_binary_proto_read_set_end, 0);
rbuf_ivar_id = rb_intern("@rbuf");
}
| 1 | 17,047 |
I suspect this isn't needed because I think `rb_intern` gets marked already.
|
apache-thrift
|
c
|
@@ -96,7 +96,7 @@ public class InternalSelenseTestBase extends SeleneseTestBase {
return;
}
- log.info("In dev mode. Copying required files in case we're using a WebDriver-backed Selenium");
+ log.finest("In dev mode. Copying required files in case we're using a WebDriver-backed Selenium");
Path dir =
InProject.locate("java/client/build/production/com/thoughtworks/selenium/webdriven");
| 1 |
// Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.thoughtworks.selenium;
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
import static org.openqa.selenium.UnexpectedAlertBehaviour.IGNORE;
import static org.openqa.selenium.build.DevMode.isInDevMode;
import static org.openqa.selenium.remote.CapabilityType.UNEXPECTED_ALERT_BEHAVIOUR;
import com.google.common.base.Throwables;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.Resources;
import com.thoughtworks.selenium.testing.SeleniumTestEnvironment;
import com.thoughtworks.selenium.webdriven.WebDriverBackedSelenium;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assume;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.rules.ExternalResource;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;
import org.junit.rules.TestWatcher;
import org.junit.runner.Description;
import org.junit.runners.model.Statement;
import org.openqa.selenium.build.BuckBuild;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.JavascriptExecutor;
import org.openqa.selenium.MutableCapabilities;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WrapsDriver;
import org.openqa.selenium.chrome.ChromeOptions;
import org.openqa.selenium.edge.EdgeOptions;
import org.openqa.selenium.environment.GlobalTestEnvironment;
import org.openqa.selenium.firefox.FirefoxOptions;
import org.openqa.selenium.ie.InternetExplorerOptions;
import org.openqa.selenium.opera.OperaOptions;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.openqa.selenium.build.DevMode;
import org.openqa.selenium.build.InProject;
import org.openqa.selenium.safari.SafariOptions;
import org.openqa.selenium.testing.drivers.Browser;
import org.openqa.selenium.testing.drivers.WebDriverBuilder;
import java.io.IOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Logger;
public class InternalSelenseTestBase extends SeleneseTestBase {
private static final Logger log = Logger.getLogger(InternalSelenseTestBase.class.getName());
private static final ImmutableSet<String> ATOM_TARGETS = ImmutableSet.of(
"findElement",
"findOption",
"fireEvent",
"fireEventAt",
"getAttribute",
"getText",
"linkLocator",
"isElementPresent",
"isSomethingSelected",
"isTextPresent",
"isVisible",
"setCursorPosition",
"type");
private static Selenium INSTANCE;
private static final AtomicBoolean MUST_BUILD = new AtomicBoolean(true);
@BeforeClass
public static void buildJavascriptLibraries() throws IOException {
if (!DevMode.isInDevMode() || !MUST_BUILD.compareAndSet(true, false)) {
return;
}
log.info("In dev mode. Copying required files in case we're using a WebDriver-backed Selenium");
Path dir =
InProject.locate("java/client/build/production/com/thoughtworks/selenium/webdriven");
Files.createDirectories(dir);
for (String target : ATOM_TARGETS) {
Path atom = new BuckBuild().of("//javascript/selenium-atoms:" + target).go(isInDevMode());
Files.copy(atom, dir.resolve(atom.getFileName()), REPLACE_EXISTING);
}
Path sizzle = InProject.locate("third_party/js/sizzle/sizzle.js");
Files.copy(sizzle, dir.resolve("sizzle.js"), REPLACE_EXISTING);
Path seDir = InProject.locate("java/client/test/com/thoughtworks/selenium");
Path destDir =
InProject.locate("java/client/build/production/com/thoughtworks/selenium");
Files.list(seDir)
.filter(path -> path.getFileName().toString().endsWith(".js"))
.forEach(path -> {
try {
Files.copy(path, destDir.resolve(path.getFileName()), REPLACE_EXISTING);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
@BeforeClass
public static void initializeServer() {
GlobalTestEnvironment.get(SeleniumTestEnvironment.class);
}
public TestWatcher traceMethodName = new TestWatcher() {
@Override
protected void starting(Description description) {
super.starting(description);
log.info(">>> Starting " + description);
}
@Override
protected void finished(Description description) {
super.finished(description);
log.info("<<< Finished " + description);
}
};
public ExternalResource initializeSelenium = new ExternalResource() {
@Override
protected void before() {
selenium = INSTANCE;
if (selenium != null) {
return;
}
MutableCapabilities caps = new MutableCapabilities(createCapabilities());
caps.setCapability(UNEXPECTED_ALERT_BEHAVIOUR, IGNORE);
String baseUrl = whereIs("selenium-server/");
WebDriver driver = new WebDriverBuilder().get(caps);
selenium = new WebDriverBackedSelenium(driver, baseUrl);
selenium.setBrowserLogLevel("debug");
INSTANCE = selenium;
}
};
private Capabilities createCapabilities() {
String property = System.getProperty("selenium.browser", "ff");
Browser browser = Browser.valueOf(property);
switch (browser) {
case CHROME:
return new ChromeOptions();
case EDGE:
return new EdgeOptions();
case IE:
return new InternetExplorerOptions();
case FIREFOX:
case MARIONETTE:
return new FirefoxOptions();
case OPERA:
case OPERABLINK:
return new OperaOptions();
case SAFARI:
return new SafariOptions();
default:
fail("Attempt to use an unsupported browser: " + property);
// we never get here, but keep null checks happy anyway
return new DesiredCapabilities();
}
}
public ExternalResource addNecessaryJavascriptCommands = new ExternalResource() {
@Override
protected void before() {
if (!(selenium instanceof WebDriverBackedSelenium)) {
return;
}
// We need to be a on page where we can execute JS
WebDriver driver = ((WrapsDriver) selenium).getWrappedDriver();
driver.get(whereIs("/selenium-server"));
try {
URL scriptUrl =
Resources.getResource(getClass(), "/com/thoughtworks/selenium/testHelpers.js");
String script = Resources.toString(scriptUrl, StandardCharsets.UTF_8);
((JavascriptExecutor) driver).executeScript(script);
} catch (IOException e) {
fail("Cannot read script: " + Throwables.getStackTraceAsString(e));
}
}
};
public ExternalResource returnFocusToMainWindow = new ExternalResource() {
@Override
protected void before() {
if (selenium == null) {
return;
}
selenium.selectWindow("");
selenium.windowFocus();
}
};
public TestWatcher filter = new TestWatcher() {
@Override
public Statement apply(Statement base, Description description) {
String onlyRun = System.getProperty("only_run");
Assume.assumeTrue(onlyRun == null ||
Arrays.asList(onlyRun.split(",")).contains(description.getTestClass().getSimpleName()));
String mth = System.getProperty("method");
Assume.assumeTrue(mth == null ||
Arrays.asList(mth.split(",")).contains(description.getMethodName()));
return super.apply(base, description);
}
};
@Rule
public TestRule chain =
RuleChain.outerRule(filter)
.around(initializeSelenium)
.around(returnFocusToMainWindow)
.around(addNecessaryJavascriptCommands)
.around(traceMethodName);
@After
public void checkVerifications() {
checkForVerificationErrors();
}
private String whereIs(String location) {
return GlobalTestEnvironment.get().getAppServer().whereIs(location);
}
@AfterClass
public static void destroyDriver() {
if (Boolean.getBoolean("webdriver.singletestsuite.leaverunning")) {
return;
}
Selenium selenium = INSTANCE;
if (selenium != null) {
selenium.stop();
INSTANCE = null;
}
}
}
| 1 | 16,448 |
This change is incorrect: the current log level is correct.
|
SeleniumHQ-selenium
|
rb
|
@@ -80,7 +80,8 @@ public class BKDWriter implements Closeable {
//public static final int VERSION_CURRENT = VERSION_START;
public static final int VERSION_LEAF_STORES_BOUNDS = 5;
public static final int VERSION_SELECTIVE_INDEXING = 6;
- public static final int VERSION_CURRENT = VERSION_SELECTIVE_INDEXING;
+ public static final int VERSION_LOW_CARDINALITY_LEAVES= 7;
+ public static final int VERSION_CURRENT = VERSION_LOW_CARDINALITY_LEAVES;
/** How many bytes each docs takes in the fixed-width offline format */
private final int bytesPerDoc;
| 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.util.bkd;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.IntFunction;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.MutablePointValues;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.PointValues.IntersectVisitor;
import org.apache.lucene.index.PointValues.Relation;
import org.apache.lucene.store.ByteBuffersDataOutput;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.TrackingDirectoryWrapper;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.PriorityQueue;
// TODO
// - allow variable length byte[] (across docs and dims), but this is quite a bit more hairy
// - we could also index "auto-prefix terms" here, and use better compression, and maybe only use for the "fully contained" case so we'd
// only index docIDs
// - the index could be efficiently encoded as an FST, so we don't have wasteful
// (monotonic) long[] leafBlockFPs; or we could use MonotonicLongValues ... but then
// the index is already plenty small: 60M OSM points --> 1.1 MB with 128 points
// per leaf, and you can reduce that by putting more points per leaf
// - we could use threads while building; the higher nodes are very parallelizable
/**
* Recursively builds a block KD-tree to assign all incoming points in N-dim space to smaller
* and smaller N-dim rectangles (cells) until the number of points in a given
* rectangle is <= <code>maxPointsInLeafNode</code>. The tree is
* fully balanced, which means the leaf nodes will have between 50% and 100% of
* the requested <code>maxPointsInLeafNode</code>. Values that fall exactly
* on a cell boundary may be in either cell.
*
* <p>The number of dimensions can be 1 to 8, but every byte[] value is fixed length.
*
* <p>This consumes heap during writing: it allocates a <code>Long[numLeaves]</code>,
* a <code>byte[numLeaves*(1+bytesPerDim)]</code> and then uses up to the specified
* {@code maxMBSortInHeap} heap space for writing.
*
* <p>
* <b>NOTE</b>: This can write at most Integer.MAX_VALUE * <code>maxPointsInLeafNode</code> / (1+bytesPerDim)
* total points.
*
* @lucene.experimental */
public class BKDWriter implements Closeable {
public static final String CODEC_NAME = "BKD";
public static final int VERSION_START = 4; // version used by Lucene 7.0
//public static final int VERSION_CURRENT = VERSION_START;
public static final int VERSION_LEAF_STORES_BOUNDS = 5;
public static final int VERSION_SELECTIVE_INDEXING = 6;
public static final int VERSION_CURRENT = VERSION_SELECTIVE_INDEXING;
/** How many bytes each docs takes in the fixed-width offline format */
private final int bytesPerDoc;
/** Default maximum number of point in each leaf block */
public static final int DEFAULT_MAX_POINTS_IN_LEAF_NODE = 1024;
/** Default maximum heap to use, before spilling to (slower) disk */
public static final float DEFAULT_MAX_MB_SORT_IN_HEAP = 16.0f;
/** Maximum number of dimensions */
public static final int MAX_DIMS = 8;
/** How many dimensions we are storing at the leaf (data) nodes */
protected final int numDataDims;
/** How many dimensions we are indexing in the internal nodes */
protected final int numIndexDims;
/** How many bytes each value in each dimension takes. */
protected final int bytesPerDim;
/** numDataDims * bytesPerDim */
protected final int packedBytesLength;
/** numIndexDims * bytesPerDim */
protected final int packedIndexBytesLength;
final TrackingDirectoryWrapper tempDir;
final String tempFileNamePrefix;
final double maxMBSortInHeap;
final byte[] scratchDiff;
final byte[] scratch1;
final byte[] scratch2;
final BytesRef scratchBytesRef1 = new BytesRef();
final BytesRef scratchBytesRef2 = new BytesRef();
final int[] commonPrefixLengths;
protected final FixedBitSet docsSeen;
private PointWriter pointWriter;
private boolean finished;
private IndexOutput tempInput;
protected final int maxPointsInLeafNode;
private final int maxPointsSortInHeap;
/** Minimum per-dim values, packed */
protected final byte[] minPackedValue;
/** Maximum per-dim values, packed */
protected final byte[] maxPackedValue;
protected long pointCount;
/** An upper bound on how many points the caller will add (includes deletions) */
private final long totalPointCount;
private final int maxDoc;
public BKDWriter(int maxDoc, Directory tempDir, String tempFileNamePrefix, int numDataDims, int numIndexDims, int bytesPerDim,
int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount) throws IOException {
verifyParams(numDataDims, numIndexDims, maxPointsInLeafNode, maxMBSortInHeap, totalPointCount);
// We use tracking dir to deal with removing files on exception, so each place that
// creates temp files doesn't need crazy try/finally/sucess logic:
this.tempDir = new TrackingDirectoryWrapper(tempDir);
this.tempFileNamePrefix = tempFileNamePrefix;
this.maxPointsInLeafNode = maxPointsInLeafNode;
this.numDataDims = numDataDims;
this.numIndexDims = numIndexDims;
this.bytesPerDim = bytesPerDim;
this.totalPointCount = totalPointCount;
this.maxDoc = maxDoc;
docsSeen = new FixedBitSet(maxDoc);
packedBytesLength = numDataDims * bytesPerDim;
packedIndexBytesLength = numIndexDims * bytesPerDim;
scratchDiff = new byte[bytesPerDim];
scratch1 = new byte[packedBytesLength];
scratch2 = new byte[packedBytesLength];
commonPrefixLengths = new int[numDataDims];
minPackedValue = new byte[packedIndexBytesLength];
maxPackedValue = new byte[packedIndexBytesLength];
// dimensional values (numDims * bytesPerDim) + docID (int)
bytesPerDoc = packedBytesLength + Integer.BYTES;
// Maximum number of points we hold in memory at any time
maxPointsSortInHeap = (int) ((maxMBSortInHeap * 1024 * 1024) / (bytesPerDoc));
// Finally, we must be able to hold at least the leaf node in heap during build:
if (maxPointsSortInHeap < maxPointsInLeafNode) {
throw new IllegalArgumentException("maxMBSortInHeap=" + maxMBSortInHeap + " only allows for maxPointsSortInHeap=" + maxPointsSortInHeap + ", but this is less than maxPointsInLeafNode=" + maxPointsInLeafNode + "; either increase maxMBSortInHeap or decrease maxPointsInLeafNode");
}
this.maxMBSortInHeap = maxMBSortInHeap;
}
public static void verifyParams(int numDataDims, int numIndexDims, int maxPointsInLeafNode, double maxMBSortInHeap, long totalPointCount) {
// We encode dim in a single byte in the splitPackedValues, but we only expose 4 bits for it now, in case we want to use
// remaining 4 bits for another purpose later
if (numDataDims < 1 || numDataDims > MAX_DIMS) {
throw new IllegalArgumentException("numDataDims must be 1 .. " + MAX_DIMS + " (got: " + numDataDims + ")");
}
if (numIndexDims < 1 || numIndexDims > numDataDims) {
throw new IllegalArgumentException("numIndexDims must be 1 .. " + numDataDims + " (got: " + numIndexDims + ")");
}
if (maxPointsInLeafNode <= 0) {
throw new IllegalArgumentException("maxPointsInLeafNode must be > 0; got " + maxPointsInLeafNode);
}
if (maxPointsInLeafNode > ArrayUtil.MAX_ARRAY_LENGTH) {
throw new IllegalArgumentException("maxPointsInLeafNode must be <= ArrayUtil.MAX_ARRAY_LENGTH (= " + ArrayUtil.MAX_ARRAY_LENGTH + "); got " + maxPointsInLeafNode);
}
if (maxMBSortInHeap < 0.0) {
throw new IllegalArgumentException("maxMBSortInHeap must be >= 0.0 (got: " + maxMBSortInHeap + ")");
}
if (totalPointCount < 0) {
throw new IllegalArgumentException("totalPointCount must be >=0 (got: " + totalPointCount + ")");
}
}
private void initPointWriter() throws IOException {
assert pointWriter == null : "Point writer is already initialized";
//total point count is an estimation but the final point count must be equal or lower to that number.
if (totalPointCount > maxPointsSortInHeap) {
pointWriter = new OfflinePointWriter(tempDir, tempFileNamePrefix, packedBytesLength, "spill", 0);
tempInput = ((OfflinePointWriter)pointWriter).out;
} else {
pointWriter = new HeapPointWriter(Math.toIntExact(totalPointCount), packedBytesLength);
}
}
public void add(byte[] packedValue, int docID) throws IOException {
if (packedValue.length != packedBytesLength) {
throw new IllegalArgumentException("packedValue should be length=" + packedBytesLength + " (got: " + packedValue.length + ")");
}
if (pointCount >= totalPointCount) {
throw new IllegalStateException("totalPointCount=" + totalPointCount + " was passed when we were created, but we just hit " + (pointCount + 1) + " values");
}
if (pointCount == 0) {
initPointWriter();
System.arraycopy(packedValue, 0, minPackedValue, 0, packedIndexBytesLength);
System.arraycopy(packedValue, 0, maxPackedValue, 0, packedIndexBytesLength);
} else {
for(int dim=0;dim<numIndexDims;dim++) {
int offset = dim*bytesPerDim;
if (Arrays.compareUnsigned(packedValue, offset, offset + bytesPerDim, minPackedValue, offset, offset + bytesPerDim) < 0) {
System.arraycopy(packedValue, offset, minPackedValue, offset, bytesPerDim);
}
if (Arrays.compareUnsigned(packedValue, offset, offset + bytesPerDim, maxPackedValue, offset, offset + bytesPerDim) > 0) {
System.arraycopy(packedValue, offset, maxPackedValue, offset, bytesPerDim);
}
}
}
pointWriter.append(packedValue, docID);
pointCount++;
docsSeen.set(docID);
}
/** How many points have been added so far */
public long getPointCount() {
return pointCount;
}
private static class MergeReader {
final BKDReader bkd;
final BKDReader.IntersectState state;
final MergeState.DocMap docMap;
/** Current doc ID */
public int docID;
/** Which doc in this block we are up to */
private int docBlockUpto;
/** How many docs in the current block */
private int docsInBlock;
/** Which leaf block we are up to */
private int blockID;
private final byte[] packedValues;
public MergeReader(BKDReader bkd, MergeState.DocMap docMap) throws IOException {
this.bkd = bkd;
state = new BKDReader.IntersectState(bkd.in.clone(),
bkd.numDataDims,
bkd.packedBytesLength,
bkd.packedIndexBytesLength,
bkd.maxPointsInLeafNode,
null,
null);
this.docMap = docMap;
state.in.seek(bkd.getMinLeafBlockFP());
this.packedValues = new byte[bkd.maxPointsInLeafNode * bkd.packedBytesLength];
}
public boolean next() throws IOException {
//System.out.println("MR.next this=" + this);
while (true) {
if (docBlockUpto == docsInBlock) {
if (blockID == bkd.leafNodeOffset) {
//System.out.println(" done!");
return false;
}
//System.out.println(" new block @ fp=" + state.in.getFilePointer());
docsInBlock = bkd.readDocIDs(state.in, state.in.getFilePointer(), state.scratchDocIDs);
assert docsInBlock > 0;
docBlockUpto = 0;
bkd.visitDocValues(state.commonPrefixLengths, state.scratchDataPackedValue, state.scratchMinIndexPackedValue, state.scratchMaxIndexPackedValue, state.in, state.scratchDocIDs, docsInBlock, new IntersectVisitor() {
int i = 0;
@Override
public void visit(int docID) {
throw new UnsupportedOperationException();
}
@Override
public void visit(int docID, byte[] packedValue) {
assert docID == state.scratchDocIDs[i];
System.arraycopy(packedValue, 0, packedValues, i * bkd.packedBytesLength, bkd.packedBytesLength);
i++;
}
@Override
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
return Relation.CELL_CROSSES_QUERY;
}
});
blockID++;
}
final int index = docBlockUpto++;
int oldDocID = state.scratchDocIDs[index];
int mappedDocID;
if (docMap == null) {
mappedDocID = oldDocID;
} else {
mappedDocID = docMap.get(oldDocID);
}
if (mappedDocID != -1) {
// Not deleted!
docID = mappedDocID;
System.arraycopy(packedValues, index * bkd.packedBytesLength, state.scratchDataPackedValue, 0, bkd.packedBytesLength);
return true;
}
}
}
}
private static class BKDMergeQueue extends PriorityQueue<MergeReader> {
private final int bytesPerDim;
public BKDMergeQueue(int bytesPerDim, int maxSize) {
super(maxSize);
this.bytesPerDim = bytesPerDim;
}
@Override
public boolean lessThan(MergeReader a, MergeReader b) {
assert a != b;
int cmp = Arrays.compareUnsigned(a.state.scratchDataPackedValue, 0, bytesPerDim, b.state.scratchDataPackedValue, 0, bytesPerDim);
if (cmp < 0) {
return true;
} else if (cmp > 0) {
return false;
}
// Tie break by sorting smaller docIDs earlier:
return a.docID < b.docID;
}
}
/** Write a field from a {@link MutablePointValues}. This way of writing
* points is faster than regular writes with {@link BKDWriter#add} since
* there is opportunity for reordering points before writing them to
* disk. This method does not use transient disk in order to reorder points.
*/
public long writeField(IndexOutput out, String fieldName, MutablePointValues reader) throws IOException {
if (numDataDims == 1) {
return writeField1Dim(out, fieldName, reader);
} else {
return writeFieldNDims(out, fieldName, reader);
}
}
/* In the 2+D case, we recursively pick the split dimension, compute the
* median value and partition other values around it. */
private long writeFieldNDims(IndexOutput out, String fieldName, MutablePointValues values) throws IOException {
if (pointCount != 0) {
throw new IllegalStateException("cannot mix add and writeField");
}
// Catch user silliness:
if (finished == true) {
throw new IllegalStateException("already finished");
}
// Mark that we already finished:
finished = true;
long countPerLeaf = pointCount = values.size();
long innerNodeCount = 1;
while (countPerLeaf > maxPointsInLeafNode) {
countPerLeaf = (countPerLeaf+1)/2;
innerNodeCount *= 2;
}
int numLeaves = Math.toIntExact(innerNodeCount);
checkMaxLeafNodeCount(numLeaves);
final byte[] splitPackedValues = new byte[numLeaves * (bytesPerDim + 1)];
final long[] leafBlockFPs = new long[numLeaves];
// compute the min/max for this slice
Arrays.fill(minPackedValue, (byte) 0xff);
Arrays.fill(maxPackedValue, (byte) 0);
for (int i = 0; i < Math.toIntExact(pointCount); ++i) {
values.getValue(i, scratchBytesRef1);
for(int dim=0;dim<numIndexDims;dim++) {
int offset = dim*bytesPerDim;
if (Arrays.compareUnsigned(scratchBytesRef1.bytes, scratchBytesRef1.offset + offset, scratchBytesRef1.offset + offset + bytesPerDim, minPackedValue, offset, offset + bytesPerDim) < 0) {
System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + offset, minPackedValue, offset, bytesPerDim);
}
if (Arrays.compareUnsigned(scratchBytesRef1.bytes, scratchBytesRef1.offset + offset, scratchBytesRef1.offset + offset + bytesPerDim, maxPackedValue, offset, offset + bytesPerDim) > 0) {
System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + offset, maxPackedValue, offset, bytesPerDim);
}
}
docsSeen.set(values.getDocID(i));
}
final int[] parentSplits = new int[numIndexDims];
build(1, numLeaves, values, 0, Math.toIntExact(pointCount), out,
minPackedValue, maxPackedValue, parentSplits,
splitPackedValues, leafBlockFPs,
new int[maxPointsInLeafNode]);
assert Arrays.equals(parentSplits, new int[numIndexDims]);
long indexFP = out.getFilePointer();
writeIndex(out, Math.toIntExact(countPerLeaf), leafBlockFPs, splitPackedValues);
return indexFP;
}
/* In the 1D case, we can simply sort points in ascending order and use the
* same writing logic as we use at merge time. */
private long writeField1Dim(IndexOutput out, String fieldName, MutablePointValues reader) throws IOException {
MutablePointsReaderUtils.sort(maxDoc, packedIndexBytesLength, reader, 0, Math.toIntExact(reader.size()));
final OneDimensionBKDWriter oneDimWriter = new OneDimensionBKDWriter(out);
reader.intersect(new IntersectVisitor() {
@Override
public void visit(int docID, byte[] packedValue) throws IOException {
oneDimWriter.add(packedValue, docID);
}
@Override
public void visit(int docID) throws IOException {
throw new IllegalStateException();
}
@Override
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
return Relation.CELL_CROSSES_QUERY;
}
});
return oneDimWriter.finish();
}
/** More efficient bulk-add for incoming {@link BKDReader}s. This does a merge sort of the already
* sorted values and currently only works when numDims==1. This returns -1 if all documents containing
* dimensional values were deleted. */
public long merge(IndexOutput out, List<MergeState.DocMap> docMaps, List<BKDReader> readers) throws IOException {
assert docMaps == null || readers.size() == docMaps.size();
BKDMergeQueue queue = new BKDMergeQueue(bytesPerDim, readers.size());
for(int i=0;i<readers.size();i++) {
BKDReader bkd = readers.get(i);
MergeState.DocMap docMap;
if (docMaps == null) {
docMap = null;
} else {
docMap = docMaps.get(i);
}
MergeReader reader = new MergeReader(bkd, docMap);
if (reader.next()) {
queue.add(reader);
}
}
OneDimensionBKDWriter oneDimWriter = new OneDimensionBKDWriter(out);
while (queue.size() != 0) {
MergeReader reader = queue.top();
// System.out.println("iter reader=" + reader);
oneDimWriter.add(reader.state.scratchDataPackedValue, reader.docID);
if (reader.next()) {
queue.updateTop();
} else {
// This segment was exhausted
queue.pop();
}
}
return oneDimWriter.finish();
}
// Reused when writing leaf blocks
private final ByteBuffersDataOutput scratchOut = ByteBuffersDataOutput.newResettableInstance();
private class OneDimensionBKDWriter {
final IndexOutput out;
final List<Long> leafBlockFPs = new ArrayList<>();
final List<byte[]> leafBlockStartValues = new ArrayList<>();
final byte[] leafValues = new byte[maxPointsInLeafNode * packedBytesLength];
final int[] leafDocs = new int[maxPointsInLeafNode];
private long valueCount;
private int leafCount;
OneDimensionBKDWriter(IndexOutput out) {
if (numIndexDims != 1) {
throw new UnsupportedOperationException("numIndexDims must be 1 but got " + numIndexDims);
}
if (pointCount != 0) {
throw new IllegalStateException("cannot mix add and merge");
}
// Catch user silliness:
if (finished == true) {
throw new IllegalStateException("already finished");
}
// Mark that we already finished:
finished = true;
this.out = out;
lastPackedValue = new byte[packedBytesLength];
}
// for asserts
final byte[] lastPackedValue;
private int lastDocID;
void add(byte[] packedValue, int docID) throws IOException {
assert valueInOrder(valueCount + leafCount,
0, lastPackedValue, packedValue, 0, docID, lastDocID);
System.arraycopy(packedValue, 0, leafValues, leafCount * packedBytesLength, packedBytesLength);
leafDocs[leafCount] = docID;
docsSeen.set(docID);
leafCount++;
if (valueCount + leafCount > totalPointCount) {
throw new IllegalStateException("totalPointCount=" + totalPointCount + " was passed when we were created, but we just hit " + pointCount + leafCount + " values");
}
if (leafCount == maxPointsInLeafNode) {
// We write a block once we hit exactly the max count ... this is different from
// when we write N > 1 dimensional points where we write between max/2 and max per leaf block
writeLeafBlock();
leafCount = 0;
}
assert (lastDocID = docID) >= 0; // only assign when asserts are enabled
}
public long finish() throws IOException {
if (leafCount > 0) {
writeLeafBlock();
leafCount = 0;
}
if (valueCount == 0) {
return -1;
}
pointCount = valueCount;
long indexFP = out.getFilePointer();
int numInnerNodes = leafBlockStartValues.size();
//System.out.println("BKDW: now rotate numInnerNodes=" + numInnerNodes + " leafBlockStarts=" + leafBlockStartValues.size());
byte[] index = new byte[(1+numInnerNodes) * (1+bytesPerDim)];
rotateToTree(1, 0, numInnerNodes, index, leafBlockStartValues);
long[] arr = new long[leafBlockFPs.size()];
for(int i=0;i<leafBlockFPs.size();i++) {
arr[i] = leafBlockFPs.get(i);
}
writeIndex(out, maxPointsInLeafNode, arr, index);
return indexFP;
}
private void writeLeafBlock() throws IOException {
assert leafCount != 0;
if (valueCount == 0) {
System.arraycopy(leafValues, 0, minPackedValue, 0, packedIndexBytesLength);
}
System.arraycopy(leafValues, (leafCount - 1) * packedBytesLength, maxPackedValue, 0, packedIndexBytesLength);
valueCount += leafCount;
if (leafBlockFPs.size() > 0) {
// Save the first (minimum) value in each leaf block except the first, to build the split value index in the end:
leafBlockStartValues.add(ArrayUtil.copyOfSubArray(leafValues, 0, packedBytesLength));
}
leafBlockFPs.add(out.getFilePointer());
checkMaxLeafNodeCount(leafBlockFPs.size());
// Find per-dim common prefix:
int offset = (leafCount - 1) * packedBytesLength;
int prefix = Arrays.mismatch(leafValues, 0, bytesPerDim, leafValues, offset, offset + bytesPerDim);
if (prefix == -1) {
prefix = bytesPerDim;
}
commonPrefixLengths[0] = prefix;
assert scratchOut.size() == 0;
writeLeafBlockDocs(scratchOut, leafDocs, 0, leafCount);
writeCommonPrefixes(scratchOut, commonPrefixLengths, leafValues);
scratchBytesRef1.length = packedBytesLength;
scratchBytesRef1.bytes = leafValues;
final IntFunction<BytesRef> packedValues = new IntFunction<BytesRef>() {
@Override
public BytesRef apply(int i) {
scratchBytesRef1.offset = packedBytesLength * i;
return scratchBytesRef1;
}
};
assert valuesInOrderAndBounds(leafCount, 0, ArrayUtil.copyOfSubArray(leafValues, 0, packedBytesLength),
ArrayUtil.copyOfSubArray(leafValues, (leafCount - 1) * packedBytesLength, leafCount * packedBytesLength),
packedValues, leafDocs, 0);
writeLeafBlockPackedValues(scratchOut, commonPrefixLengths, leafCount, 0, packedValues);
scratchOut.copyTo(out);
scratchOut.reset();
}
}
// TODO: there must be a simpler way?
private void rotateToTree(int nodeID, int offset, int count, byte[] index, List<byte[]> leafBlockStartValues) {
//System.out.println("ROTATE: nodeID=" + nodeID + " offset=" + offset + " count=" + count + " bpd=" + bytesPerDim + " index.length=" + index.length);
if (count == 1) {
// Leaf index node
//System.out.println(" leaf index node");
//System.out.println(" index[" + nodeID + "] = blockStartValues[" + offset + "]");
System.arraycopy(leafBlockStartValues.get(offset), 0, index, nodeID*(1+bytesPerDim)+1, bytesPerDim);
} else if (count > 1) {
// Internal index node: binary partition of count
int countAtLevel = 1;
int totalCount = 0;
while (true) {
int countLeft = count - totalCount;
//System.out.println(" cycle countLeft=" + countLeft + " coutAtLevel=" + countAtLevel);
if (countLeft <= countAtLevel) {
// This is the last level, possibly partially filled:
int lastLeftCount = Math.min(countAtLevel/2, countLeft);
assert lastLeftCount >= 0;
int leftHalf = (totalCount-1)/2 + lastLeftCount;
int rootOffset = offset + leftHalf;
/*
System.out.println(" last left count " + lastLeftCount);
System.out.println(" leftHalf " + leftHalf + " rightHalf=" + (count-leftHalf-1));
System.out.println(" rootOffset=" + rootOffset);
*/
System.arraycopy(leafBlockStartValues.get(rootOffset), 0, index, nodeID*(1+bytesPerDim)+1, bytesPerDim);
//System.out.println(" index[" + nodeID + "] = blockStartValues[" + rootOffset + "]");
// TODO: we could optimize/specialize, when we know it's simply fully balanced binary tree
// under here, to save this while loop on each recursion
// Recurse left
rotateToTree(2*nodeID, offset, leftHalf, index, leafBlockStartValues);
// Recurse right
rotateToTree(2*nodeID+1, rootOffset+1, count-leftHalf-1, index, leafBlockStartValues);
return;
}
totalCount += countAtLevel;
countAtLevel *= 2;
}
} else {
assert count == 0;
}
}
// TODO: if we fixed each partition step to just record the file offset at the "split point", we could probably handle variable length
// encoding and not have our own ByteSequencesReader/Writer
// useful for debugging:
/*
private void printPathSlice(String desc, PathSlice slice, int dim) throws IOException {
System.out.println(" " + desc + " dim=" + dim + " count=" + slice.count + ":");
try(PointReader r = slice.writer.getReader(slice.start, slice.count)) {
int count = 0;
while (r.next()) {
byte[] v = r.packedValue();
System.out.println(" " + count + ": " + new BytesRef(v, dim*bytesPerDim, bytesPerDim));
count++;
if (count == slice.count) {
break;
}
}
}
}
*/
private void checkMaxLeafNodeCount(int numLeaves) {
if ((1+bytesPerDim) * (long) numLeaves > ArrayUtil.MAX_ARRAY_LENGTH) {
throw new IllegalStateException("too many nodes; increase maxPointsInLeafNode (currently " + maxPointsInLeafNode + ") and reindex");
}
}
/** Writes the BKD tree to the provided {@link IndexOutput} and returns the file offset where index was written. */
public long finish(IndexOutput out) throws IOException {
// System.out.println("\nBKDTreeWriter.finish pointCount=" + pointCount + " out=" + out + " heapWriter=" + heapPointWriter);
// TODO: specialize the 1D case? it's much faster at indexing time (no partitioning on recurse...)
// Catch user silliness:
if (finished == true) {
throw new IllegalStateException("already finished");
}
if (pointCount == 0) {
throw new IllegalStateException("must index at least one point");
}
//mark as finished
finished = true;
pointWriter.close();
BKDRadixSelector.PathSlice points = new BKDRadixSelector.PathSlice(pointWriter, 0, pointCount);
//clean up pointers
tempInput = null;
pointWriter = null;
long countPerLeaf = pointCount;
long innerNodeCount = 1;
while (countPerLeaf > maxPointsInLeafNode) {
countPerLeaf = (countPerLeaf+1)/2;
innerNodeCount *= 2;
}
int numLeaves = (int) innerNodeCount;
checkMaxLeafNodeCount(numLeaves);
// NOTE: we could save the 1+ here, to use a bit less heap at search time, but then we'd need a somewhat costly check at each
// step of the recursion to recompute the split dim:
// Indexed by nodeID, but first (root) nodeID is 1. We do 1+ because the lead byte at each recursion says which dim we split on.
byte[] splitPackedValues = new byte[Math.toIntExact(numLeaves*(1+bytesPerDim))];
// +1 because leaf count is power of 2 (e.g. 8), and innerNodeCount is power of 2 minus 1 (e.g. 7)
long[] leafBlockFPs = new long[numLeaves];
// Make sure the math above "worked":
assert pointCount / numLeaves <= maxPointsInLeafNode: "pointCount=" + pointCount + " numLeaves=" + numLeaves + " maxPointsInLeafNode=" + maxPointsInLeafNode;
//We re-use the selector so we do not need to create an object every time.
BKDRadixSelector radixSelector = new BKDRadixSelector(numDataDims, bytesPerDim, maxPointsSortInHeap, tempDir, tempFileNamePrefix);
boolean success = false;
try {
final int[] parentSplits = new int[numIndexDims];
build(1, numLeaves, points,
out, radixSelector,
minPackedValue, maxPackedValue,
parentSplits,
splitPackedValues,
leafBlockFPs);
assert Arrays.equals(parentSplits, new int[numIndexDims]);
// If no exception, we should have cleaned everything up:
assert tempDir.getCreatedFiles().isEmpty();
//long t2 = System.nanoTime();
//System.out.println("write time: " + ((t2-t1)/1000000.0) + " msec");
success = true;
} finally {
if (success == false) {
IOUtils.deleteFilesIgnoringExceptions(tempDir, tempDir.getCreatedFiles());
}
}
//System.out.println("Total nodes: " + innerNodeCount);
// Write index:
long indexFP = out.getFilePointer();
writeIndex(out, Math.toIntExact(countPerLeaf), leafBlockFPs, splitPackedValues);
return indexFP;
}
/** Packs the two arrays, representing a balanced binary tree, into a compact byte[] structure. */
private byte[] packIndex(long[] leafBlockFPs, byte[] splitPackedValues) throws IOException {
int numLeaves = leafBlockFPs.length;
// Possibly rotate the leaf block FPs, if the index not fully balanced binary tree (only happens
// if it was created by OneDimensionBKDWriter). In this case the leaf nodes may straddle the two bottom
// levels of the binary tree:
if (numIndexDims == 1 && numLeaves > 1) {
int levelCount = 2;
while (true) {
if (numLeaves >= levelCount && numLeaves <= 2*levelCount) {
int lastLevel = 2*(numLeaves - levelCount);
assert lastLevel >= 0;
if (lastLevel != 0) {
// Last level is partially filled, so we must rotate the leaf FPs to match. We do this here, after loading
// at read-time, so that we can still delta code them on disk at write:
long[] newLeafBlockFPs = new long[numLeaves];
System.arraycopy(leafBlockFPs, lastLevel, newLeafBlockFPs, 0, leafBlockFPs.length - lastLevel);
System.arraycopy(leafBlockFPs, 0, newLeafBlockFPs, leafBlockFPs.length - lastLevel, lastLevel);
leafBlockFPs = newLeafBlockFPs;
}
break;
}
levelCount *= 2;
}
}
/** Reused while packing the index */
ByteBuffersDataOutput writeBuffer = ByteBuffersDataOutput.newResettableInstance();
// This is the "file" we append the byte[] to:
List<byte[]> blocks = new ArrayList<>();
byte[] lastSplitValues = new byte[bytesPerDim * numIndexDims];
//System.out.println("\npack index");
int totalSize = recursePackIndex(writeBuffer, leafBlockFPs, splitPackedValues, 0l, blocks, 1, lastSplitValues, new boolean[numIndexDims], false);
// Compact the byte[] blocks into single byte index:
byte[] index = new byte[totalSize];
int upto = 0;
for(byte[] block : blocks) {
System.arraycopy(block, 0, index, upto, block.length);
upto += block.length;
}
assert upto == totalSize;
return index;
}
/** Appends the current contents of writeBuffer as another block on the growing in-memory file */
private int appendBlock(ByteBuffersDataOutput writeBuffer, List<byte[]> blocks) throws IOException {
byte[] block = writeBuffer.toArrayCopy();
blocks.add(block);
writeBuffer.reset();
return block.length;
}
/**
* lastSplitValues is per-dimension split value previously seen; we use this to prefix-code the split byte[] on each inner node
*/
private int recursePackIndex(ByteBuffersDataOutput writeBuffer, long[] leafBlockFPs, byte[] splitPackedValues, long minBlockFP, List<byte[]> blocks,
int nodeID, byte[] lastSplitValues, boolean[] negativeDeltas, boolean isLeft) throws IOException {
if (nodeID >= leafBlockFPs.length) {
int leafID = nodeID - leafBlockFPs.length;
//System.out.println("recursePack leaf nodeID=" + nodeID);
// In the unbalanced case it's possible the left most node only has one child:
if (leafID < leafBlockFPs.length) {
long delta = leafBlockFPs[leafID] - minBlockFP;
if (isLeft) {
assert delta == 0;
return 0;
} else {
assert nodeID == 1 || delta > 0: "nodeID=" + nodeID;
writeBuffer.writeVLong(delta);
return appendBlock(writeBuffer, blocks);
}
} else {
return 0;
}
} else {
long leftBlockFP;
if (isLeft == false) {
leftBlockFP = getLeftMostLeafBlockFP(leafBlockFPs, nodeID);
long delta = leftBlockFP - minBlockFP;
assert nodeID == 1 || delta > 0 : "expected nodeID=1 or delta > 0; got nodeID=" + nodeID + " and delta=" + delta;
writeBuffer.writeVLong(delta);
} else {
// The left tree's left most leaf block FP is always the minimal FP:
leftBlockFP = minBlockFP;
}
int address = nodeID * (1+bytesPerDim);
int splitDim = splitPackedValues[address++] & 0xff;
//System.out.println("recursePack inner nodeID=" + nodeID + " splitDim=" + splitDim + " splitValue=" + new BytesRef(splitPackedValues, address, bytesPerDim));
// find common prefix with last split value in this dim:
int prefix = Arrays.mismatch(splitPackedValues, address, address + bytesPerDim, lastSplitValues,
splitDim * bytesPerDim, splitDim * bytesPerDim + bytesPerDim);
if (prefix == -1) {
prefix = bytesPerDim;
}
//System.out.println("writeNodeData nodeID=" + nodeID + " splitDim=" + splitDim + " numDims=" + numDims + " bytesPerDim=" + bytesPerDim + " prefix=" + prefix);
int firstDiffByteDelta;
if (prefix < bytesPerDim) {
//System.out.println(" delta byte cur=" + Integer.toHexString(splitPackedValues[address+prefix]&0xFF) + " prev=" + Integer.toHexString(lastSplitValues[splitDim * bytesPerDim + prefix]&0xFF) + " negated?=" + negativeDeltas[splitDim]);
firstDiffByteDelta = (splitPackedValues[address+prefix]&0xFF) - (lastSplitValues[splitDim * bytesPerDim + prefix]&0xFF);
if (negativeDeltas[splitDim]) {
firstDiffByteDelta = -firstDiffByteDelta;
}
//System.out.println(" delta=" + firstDiffByteDelta);
assert firstDiffByteDelta > 0;
} else {
firstDiffByteDelta = 0;
}
// pack the prefix, splitDim and delta first diff byte into a single vInt:
int code = (firstDiffByteDelta * (1+bytesPerDim) + prefix) * numIndexDims + splitDim;
//System.out.println(" code=" + code);
//System.out.println(" splitValue=" + new BytesRef(splitPackedValues, address, bytesPerDim));
writeBuffer.writeVInt(code);
// write the split value, prefix coded vs. our parent's split value:
int suffix = bytesPerDim - prefix;
byte[] savSplitValue = new byte[suffix];
if (suffix > 1) {
writeBuffer.writeBytes(splitPackedValues, address+prefix+1, suffix-1);
}
byte[] cmp = lastSplitValues.clone();
System.arraycopy(lastSplitValues, splitDim * bytesPerDim + prefix, savSplitValue, 0, suffix);
// copy our split value into lastSplitValues for our children to prefix-code against
System.arraycopy(splitPackedValues, address+prefix, lastSplitValues, splitDim * bytesPerDim + prefix, suffix);
int numBytes = appendBlock(writeBuffer, blocks);
// placeholder for left-tree numBytes; we need this so that at search time if we only need to recurse into the right sub-tree we can
// quickly seek to its starting point
int idxSav = blocks.size();
blocks.add(null);
boolean savNegativeDelta = negativeDeltas[splitDim];
negativeDeltas[splitDim] = true;
int leftNumBytes = recursePackIndex(writeBuffer, leafBlockFPs, splitPackedValues, leftBlockFP, blocks, 2*nodeID, lastSplitValues, negativeDeltas, true);
if (nodeID * 2 < leafBlockFPs.length) {
writeBuffer.writeVInt(leftNumBytes);
} else {
assert leftNumBytes == 0: "leftNumBytes=" + leftNumBytes;
}
byte[] bytes2 = writeBuffer.toArrayCopy();
writeBuffer.reset();
// replace our placeholder:
blocks.set(idxSav, bytes2);
negativeDeltas[splitDim] = false;
int rightNumBytes = recursePackIndex(writeBuffer, leafBlockFPs, splitPackedValues, leftBlockFP, blocks, 2*nodeID+1, lastSplitValues, negativeDeltas, false);
negativeDeltas[splitDim] = savNegativeDelta;
// restore lastSplitValues to what caller originally passed us:
System.arraycopy(savSplitValue, 0, lastSplitValues, splitDim * bytesPerDim + prefix, suffix);
assert Arrays.equals(lastSplitValues, cmp);
return numBytes + bytes2.length + leftNumBytes + rightNumBytes;
}
}
private long getLeftMostLeafBlockFP(long[] leafBlockFPs, int nodeID) {
// TODO: can we do this cheaper, e.g. a closed form solution instead of while loop? Or
// change the recursion while packing the index to return this left-most leaf block FP
// from each recursion instead?
//
// Still, the overall cost here is minor: this method's cost is O(log(N)), and while writing
// we call it O(N) times (N = number of leaf blocks)
while (nodeID < leafBlockFPs.length) {
nodeID *= 2;
}
int leafID = nodeID - leafBlockFPs.length;
long result = leafBlockFPs[leafID];
if (result < 0) {
throw new AssertionError(result + " for leaf " + leafID);
}
return result;
}
private void writeIndex(IndexOutput out, int countPerLeaf, long[] leafBlockFPs, byte[] splitPackedValues) throws IOException {
byte[] packedIndex = packIndex(leafBlockFPs, splitPackedValues);
writeIndex(out, countPerLeaf, leafBlockFPs.length, packedIndex);
}
private void writeIndex(IndexOutput out, int countPerLeaf, int numLeaves, byte[] packedIndex) throws IOException {
CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
out.writeVInt(numDataDims);
out.writeVInt(numIndexDims);
out.writeVInt(countPerLeaf);
out.writeVInt(bytesPerDim);
assert numLeaves > 0;
out.writeVInt(numLeaves);
out.writeBytes(minPackedValue, 0, packedIndexBytesLength);
out.writeBytes(maxPackedValue, 0, packedIndexBytesLength);
out.writeVLong(pointCount);
out.writeVInt(docsSeen.cardinality());
out.writeVInt(packedIndex.length);
out.writeBytes(packedIndex, 0, packedIndex.length);
}
private void writeLeafBlockDocs(DataOutput out, int[] docIDs, int start, int count) throws IOException {
assert count > 0: "maxPointsInLeafNode=" + maxPointsInLeafNode;
out.writeVInt(count);
DocIdsWriter.writeDocIds(docIDs, start, count, out);
}
private void writeLeafBlockPackedValues(DataOutput out, int[] commonPrefixLengths, int count, int sortedDim, IntFunction<BytesRef> packedValues) throws IOException {
int prefixLenSum = Arrays.stream(commonPrefixLengths).sum();
if (prefixLenSum == packedBytesLength) {
// all values in this block are equal
out.writeByte((byte) -1);
} else {
if (numIndexDims != 1) {
writeActualBounds(out, commonPrefixLengths, count, packedValues);
}
assert commonPrefixLengths[sortedDim] < bytesPerDim;
out.writeByte((byte) sortedDim);
int compressedByteOffset = sortedDim * bytesPerDim + commonPrefixLengths[sortedDim];
commonPrefixLengths[sortedDim]++;
for (int i = 0; i < count; ) {
// do run-length compression on the byte at compressedByteOffset
int runLen = runLen(packedValues, i, Math.min(i + 0xff, count), compressedByteOffset);
assert runLen <= 0xff;
BytesRef first = packedValues.apply(i);
byte prefixByte = first.bytes[first.offset + compressedByteOffset];
out.writeByte(prefixByte);
out.writeByte((byte) runLen);
writeLeafBlockPackedValuesRange(out, commonPrefixLengths, i, i + runLen, packedValues);
i += runLen;
assert i <= count;
}
}
}
private void writeActualBounds(DataOutput out, int[] commonPrefixLengths, int count, IntFunction<BytesRef> packedValues) throws IOException {
for (int dim = 0; dim < numIndexDims; ++dim) {
int commonPrefixLength = commonPrefixLengths[dim];
int suffixLength = bytesPerDim - commonPrefixLength;
if (suffixLength > 0) {
BytesRef[] minMax = computeMinMax(count, packedValues, dim * bytesPerDim + commonPrefixLength, suffixLength);
BytesRef min = minMax[0];
BytesRef max = minMax[1];
out.writeBytes(min.bytes, min.offset, min.length);
out.writeBytes(max.bytes, max.offset, max.length);
}
}
}
/** Return an array that contains the min and max values for the [offset, offset+length] interval
* of the given {@link BytesRef}s. */
private static BytesRef[] computeMinMax(int count, IntFunction<BytesRef> packedValues, int offset, int length) {
assert length > 0;
BytesRefBuilder min = new BytesRefBuilder();
BytesRefBuilder max = new BytesRefBuilder();
BytesRef first = packedValues.apply(0);
min.copyBytes(first.bytes, first.offset + offset, length);
max.copyBytes(first.bytes, first.offset + offset, length);
for (int i = 1; i < count; ++i) {
BytesRef candidate = packedValues.apply(i);
if (Arrays.compareUnsigned(min.bytes(), 0, length, candidate.bytes, candidate.offset + offset, candidate.offset + offset + length) > 0) {
min.copyBytes(candidate.bytes, candidate.offset + offset, length);
} else if (Arrays.compareUnsigned(max.bytes(), 0, length, candidate.bytes, candidate.offset + offset, candidate.offset + offset + length) < 0) {
max.copyBytes(candidate.bytes, candidate.offset + offset, length);
}
}
return new BytesRef[]{min.get(), max.get()};
}
private void writeLeafBlockPackedValuesRange(DataOutput out, int[] commonPrefixLengths, int start, int end, IntFunction<BytesRef> packedValues) throws IOException {
for (int i = start; i < end; ++i) {
BytesRef ref = packedValues.apply(i);
assert ref.length == packedBytesLength;
for(int dim=0;dim<numDataDims;dim++) {
int prefix = commonPrefixLengths[dim];
out.writeBytes(ref.bytes, ref.offset + dim*bytesPerDim + prefix, bytesPerDim-prefix);
}
}
}
private static int runLen(IntFunction<BytesRef> packedValues, int start, int end, int byteOffset) {
BytesRef first = packedValues.apply(start);
byte b = first.bytes[first.offset + byteOffset];
for (int i = start + 1; i < end; ++i) {
BytesRef ref = packedValues.apply(i);
byte b2 = ref.bytes[ref.offset + byteOffset];
assert Byte.toUnsignedInt(b2) >= Byte.toUnsignedInt(b);
if (b != b2) {
return i - start;
}
}
return end - start;
}
private void writeCommonPrefixes(DataOutput out, int[] commonPrefixes, byte[] packedValue) throws IOException {
for(int dim=0;dim<numDataDims;dim++) {
out.writeVInt(commonPrefixes[dim]);
//System.out.println(commonPrefixes[dim] + " of " + bytesPerDim);
out.writeBytes(packedValue, dim*bytesPerDim, commonPrefixes[dim]);
}
}
@Override
public void close() throws IOException {
finished = true;
if (tempInput != null) {
// NOTE: this should only happen on exception, e.g. caller calls close w/o calling finish:
try {
tempInput.close();
} finally {
tempDir.deleteFile(tempInput.getName());
tempInput = null;
}
}
}
/** Called on exception, to check whether the checksum is also corrupt in this source, and add that
* information (checksum matched or didn't) as a suppressed exception. */
private Error verifyChecksum(Throwable priorException, PointWriter writer) throws IOException {
assert priorException != null;
// TODO: we could improve this, to always validate checksum as we recurse, if we shared left and
// right reader after recursing to children, and possibly within recursed children,
// since all together they make a single pass through the file. But this is a sizable re-org,
// and would mean leaving readers (IndexInputs) open for longer:
if (writer instanceof OfflinePointWriter) {
// We are reading from a temp file; go verify the checksum:
String tempFileName = ((OfflinePointWriter) writer).name;
if (tempDir.getCreatedFiles().contains(tempFileName)) {
try (ChecksumIndexInput in = tempDir.openChecksumInput(tempFileName, IOContext.READONCE)) {
CodecUtil.checkFooter(in, priorException);
}
}
}
// We are reading from heap; nothing to add:
throw IOUtils.rethrowAlways(priorException);
}
/** Called only in assert */
private boolean valueInBounds(BytesRef packedValue, byte[] minPackedValue, byte[] maxPackedValue) {
for(int dim=0;dim<numIndexDims;dim++) {
int offset = bytesPerDim*dim;
if (Arrays.compareUnsigned(packedValue.bytes, packedValue.offset + offset, packedValue.offset + offset + bytesPerDim, minPackedValue, offset, offset + bytesPerDim) < 0) {
return false;
}
if (Arrays.compareUnsigned(packedValue.bytes, packedValue.offset + offset, packedValue.offset + offset + bytesPerDim, maxPackedValue, offset, offset + bytesPerDim) > 0) {
return false;
}
}
return true;
}
/**
* Pick the next dimension to split.
* @param minPackedValue the min values for all dimensions
* @param maxPackedValue the max values for all dimensions
* @param parentSplits how many times each dim has been split on the parent levels
* @return the dimension to split
*/
protected int split(byte[] minPackedValue, byte[] maxPackedValue, int[] parentSplits) {
// First look at whether there is a dimension that has split less than 2x less than
// the dim that has most splits, and return it if there is such a dimension and it
// does not only have equals values. This helps ensure all dimensions are indexed.
int maxNumSplits = 0;
for (int numSplits : parentSplits) {
maxNumSplits = Math.max(maxNumSplits, numSplits);
}
for (int dim = 0; dim < numIndexDims; ++dim) {
final int offset = dim * bytesPerDim;
if (parentSplits[dim] < maxNumSplits / 2 &&
Arrays.compareUnsigned(minPackedValue, offset, offset + bytesPerDim, maxPackedValue, offset, offset + bytesPerDim) != 0) {
return dim;
}
}
// Find which dim has the largest span so we can split on it:
int splitDim = -1;
for(int dim=0;dim<numIndexDims;dim++) {
NumericUtils.subtract(bytesPerDim, dim, maxPackedValue, minPackedValue, scratchDiff);
if (splitDim == -1 || Arrays.compareUnsigned(scratchDiff, 0, bytesPerDim, scratch1, 0, bytesPerDim) > 0) {
System.arraycopy(scratchDiff, 0, scratch1, 0, bytesPerDim);
splitDim = dim;
}
}
//System.out.println("SPLIT: " + splitDim);
return splitDim;
}
/** Pull a partition back into heap once the point count is low enough while recursing. */
private HeapPointWriter switchToHeap(PointWriter source) throws IOException {
int count = Math.toIntExact(source.count());
try (PointReader reader = source.getReader(0, source.count());
HeapPointWriter writer = new HeapPointWriter(count, packedBytesLength)) {
for(int i=0;i<count;i++) {
boolean hasNext = reader.next();
assert hasNext;
writer.append(reader.pointValue());
}
source.destroy();
return writer;
} catch (Throwable t) {
throw verifyChecksum(t, source);
}
}
/* Recursively reorders the provided reader and writes the bkd-tree on the fly; this method is used
* when we are writing a new segment directly from IndexWriter's indexing buffer (MutablePointsReader). */
private void build(int nodeID, int leafNodeOffset,
MutablePointValues reader, int from, int to,
IndexOutput out,
byte[] minPackedValue, byte[] maxPackedValue,
int[] parentSplits,
byte[] splitPackedValues,
long[] leafBlockFPs,
int[] spareDocIds) throws IOException {
if (nodeID >= leafNodeOffset) {
// leaf node
final int count = to - from;
assert count <= maxPointsInLeafNode;
// Compute common prefixes
Arrays.fill(commonPrefixLengths, bytesPerDim);
reader.getValue(from, scratchBytesRef1);
for (int i = from + 1; i < to; ++i) {
reader.getValue(i, scratchBytesRef2);
for (int dim=0;dim<numDataDims;dim++) {
final int offset = dim * bytesPerDim;
int dimensionPrefixLength = commonPrefixLengths[dim];
commonPrefixLengths[dim] = Arrays.mismatch(scratchBytesRef1.bytes, scratchBytesRef1.offset + offset,
scratchBytesRef1.offset + offset + dimensionPrefixLength,
scratchBytesRef2.bytes, scratchBytesRef2.offset + offset,
scratchBytesRef2.offset + offset + dimensionPrefixLength);
if (commonPrefixLengths[dim] == -1) {
commonPrefixLengths[dim] = dimensionPrefixLength;
}
}
}
// Find the dimension that has the least number of unique bytes at commonPrefixLengths[dim]
FixedBitSet[] usedBytes = new FixedBitSet[numDataDims];
for (int dim = 0; dim < numDataDims; ++dim) {
if (commonPrefixLengths[dim] < bytesPerDim) {
usedBytes[dim] = new FixedBitSet(256);
}
}
for (int i = from + 1; i < to; ++i) {
for (int dim=0;dim<numDataDims;dim++) {
if (usedBytes[dim] != null) {
byte b = reader.getByteAt(i, dim * bytesPerDim + commonPrefixLengths[dim]);
usedBytes[dim].set(Byte.toUnsignedInt(b));
}
}
}
int sortedDim = 0;
int sortedDimCardinality = Integer.MAX_VALUE;
for (int dim = 0; dim < numDataDims; ++dim) {
if (usedBytes[dim] != null) {
final int cardinality = usedBytes[dim].cardinality();
if (cardinality < sortedDimCardinality) {
sortedDim = dim;
sortedDimCardinality = cardinality;
}
}
}
// sort by sortedDim
MutablePointsReaderUtils.sortByDim(sortedDim, bytesPerDim, commonPrefixLengths,
reader, from, to, scratchBytesRef1, scratchBytesRef2);
// Save the block file pointer:
leafBlockFPs[nodeID - leafNodeOffset] = out.getFilePointer();
assert scratchOut.size() == 0;
// Write doc IDs
int[] docIDs = spareDocIds;
for (int i = from; i < to; ++i) {
docIDs[i - from] = reader.getDocID(i);
}
//System.out.println("writeLeafBlock pos=" + out.getFilePointer());
writeLeafBlockDocs(scratchOut, docIDs, 0, count);
// Write the common prefixes:
reader.getValue(from, scratchBytesRef1);
System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset, scratch1, 0, packedBytesLength);
writeCommonPrefixes(scratchOut, commonPrefixLengths, scratch1);
// Write the full values:
IntFunction<BytesRef> packedValues = new IntFunction<BytesRef>() {
@Override
public BytesRef apply(int i) {
reader.getValue(from + i, scratchBytesRef1);
return scratchBytesRef1;
}
};
assert valuesInOrderAndBounds(count, sortedDim, minPackedValue, maxPackedValue, packedValues,
docIDs, 0);
writeLeafBlockPackedValues(scratchOut, commonPrefixLengths, count, sortedDim, packedValues);
scratchOut.copyTo(out);
scratchOut.reset();
} else {
// inner node
// compute the split dimension and partition around it
final int splitDim = split(minPackedValue, maxPackedValue, parentSplits);
final int mid = (from + to + 1) >>> 1;
int commonPrefixLen = Arrays.mismatch(minPackedValue, splitDim * bytesPerDim,
splitDim * bytesPerDim + bytesPerDim, maxPackedValue, splitDim * bytesPerDim,
splitDim * bytesPerDim + bytesPerDim);
if (commonPrefixLen == -1) {
commonPrefixLen = bytesPerDim;
}
MutablePointsReaderUtils.partition(maxDoc, splitDim, bytesPerDim, commonPrefixLen,
reader, from, to, mid, scratchBytesRef1, scratchBytesRef2);
// set the split value
final int address = nodeID * (1+bytesPerDim);
splitPackedValues[address] = (byte) splitDim;
reader.getValue(mid, scratchBytesRef1);
System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim, splitPackedValues, address + 1, bytesPerDim);
byte[] minSplitPackedValue = ArrayUtil.copyOfSubArray(minPackedValue, 0, packedIndexBytesLength);
byte[] maxSplitPackedValue = ArrayUtil.copyOfSubArray(maxPackedValue, 0, packedIndexBytesLength);
System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim,
minSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
System.arraycopy(scratchBytesRef1.bytes, scratchBytesRef1.offset + splitDim * bytesPerDim,
maxSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
// recurse
parentSplits[splitDim]++;
build(nodeID * 2, leafNodeOffset, reader, from, mid, out,
minPackedValue, maxSplitPackedValue, parentSplits,
splitPackedValues, leafBlockFPs, spareDocIds);
build(nodeID * 2 + 1, leafNodeOffset, reader, mid, to, out,
minSplitPackedValue, maxPackedValue, parentSplits,
splitPackedValues, leafBlockFPs, spareDocIds);
parentSplits[splitDim]--;
}
}
/** The point writer contains the data that is going to be splitted using radix selection.
/* This method is used when we are merging previously written segments, in the numDims > 1 case. */
private void build(int nodeID, int leafNodeOffset,
BKDRadixSelector.PathSlice points,
IndexOutput out,
BKDRadixSelector radixSelector,
byte[] minPackedValue, byte[] maxPackedValue,
int[] parentSplits,
byte[] splitPackedValues,
long[] leafBlockFPs) throws IOException {
if (nodeID >= leafNodeOffset) {
// Leaf node: write block
// We can write the block in any order so by default we write it sorted by the dimension that has the
// least number of unique bytes at commonPrefixLengths[dim], which makes compression more efficient
HeapPointWriter heapSource;
if (points.writer instanceof HeapPointWriter == false) {
// Adversarial cases can cause this, e.g. merging big segments with most of the points deleted
heapSource = switchToHeap(points.writer);
} else {
heapSource = (HeapPointWriter) points.writer;
}
int from = Math.toIntExact(points.start);
int to = Math.toIntExact(points.start + points.count);
//we store common prefix on scratch1
computeCommonPrefixLength(heapSource, scratch1, from, to);
int sortedDim = 0;
int sortedDimCardinality = Integer.MAX_VALUE;
FixedBitSet[] usedBytes = new FixedBitSet[numDataDims];
for (int dim = 0; dim < numDataDims; ++dim) {
if (commonPrefixLengths[dim] < bytesPerDim) {
usedBytes[dim] = new FixedBitSet(256);
}
}
//Find the dimension to compress
for (int dim = 0; dim < numDataDims; dim++) {
int prefix = commonPrefixLengths[dim];
if (prefix < bytesPerDim) {
int offset = dim * bytesPerDim;
for (int i = from; i < to; ++i) {
PointValue value = heapSource.getPackedValueSlice(i);
BytesRef packedValue = value.packedValue();
int bucket = packedValue.bytes[packedValue.offset + offset + prefix] & 0xff;
usedBytes[dim].set(bucket);
}
int cardinality =usedBytes[dim].cardinality();
if (cardinality < sortedDimCardinality) {
sortedDim = dim;
sortedDimCardinality = cardinality;
}
}
}
// sort the chosen dimension
radixSelector.heapRadixSort(heapSource, from, to, sortedDim, commonPrefixLengths[sortedDim]);
// Save the block file pointer:
leafBlockFPs[nodeID - leafNodeOffset] = out.getFilePointer();
//System.out.println(" write leaf block @ fp=" + out.getFilePointer());
// Write docIDs first, as their own chunk, so that at intersect time we can add all docIDs w/o
// loading the values:
int count = to - from;
assert count > 0: "nodeID=" + nodeID + " leafNodeOffset=" + leafNodeOffset;
writeLeafBlockDocs(out, heapSource.docIDs, from, count);
// TODO: minor opto: we don't really have to write the actual common prefixes, because BKDReader on recursing can regenerate it for us
// from the index, much like how terms dict does so from the FST:
// Write the common prefixes:
writeCommonPrefixes(out, commonPrefixLengths, scratch1);
// Write the full values:
IntFunction<BytesRef> packedValues = new IntFunction<BytesRef>() {
final BytesRef scratch = new BytesRef();
{
scratch.length = packedBytesLength;
}
@Override
public BytesRef apply(int i) {
PointValue value = heapSource.getPackedValueSlice(from + i);
return value.packedValue();
}
};
assert valuesInOrderAndBounds(count, sortedDim, minPackedValue, maxPackedValue, packedValues,
heapSource.docIDs, from);
writeLeafBlockPackedValues(out, commonPrefixLengths, count, sortedDim, packedValues);
} else {
// Inner node: partition/recurse
int splitDim;
if (numIndexDims > 1) {
splitDim = split(minPackedValue, maxPackedValue, parentSplits);
} else {
splitDim = 0;
}
assert nodeID < splitPackedValues.length : "nodeID=" + nodeID + " splitValues.length=" + splitPackedValues.length;
// How many points will be in the left tree:
long rightCount = points.count / 2;
long leftCount = points.count - rightCount;
BKDRadixSelector.PathSlice[] slices = new BKDRadixSelector.PathSlice[2];
int commonPrefixLen = Arrays.mismatch(minPackedValue, splitDim * bytesPerDim,
splitDim * bytesPerDim + bytesPerDim, maxPackedValue, splitDim * bytesPerDim,
splitDim * bytesPerDim + bytesPerDim);
if (commonPrefixLen == -1) {
commonPrefixLen = bytesPerDim;
}
byte[] splitValue = radixSelector.select(points, slices, points.start, points.start + points.count, points.start + leftCount, splitDim, commonPrefixLen);
int address = nodeID * (1 + bytesPerDim);
splitPackedValues[address] = (byte) splitDim;
System.arraycopy(splitValue, 0, splitPackedValues, address + 1, bytesPerDim);
byte[] minSplitPackedValue = new byte[packedIndexBytesLength];
System.arraycopy(minPackedValue, 0, minSplitPackedValue, 0, packedIndexBytesLength);
byte[] maxSplitPackedValue = new byte[packedIndexBytesLength];
System.arraycopy(maxPackedValue, 0, maxSplitPackedValue, 0, packedIndexBytesLength);
System.arraycopy(splitValue, 0, minSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
System.arraycopy(splitValue, 0, maxSplitPackedValue, splitDim * bytesPerDim, bytesPerDim);
parentSplits[splitDim]++;
// Recurse on left tree:
build(2 * nodeID, leafNodeOffset, slices[0],
out, radixSelector, minPackedValue, maxSplitPackedValue,
parentSplits, splitPackedValues, leafBlockFPs);
// Recurse on right tree:
build(2 * nodeID + 1, leafNodeOffset, slices[1],
out, radixSelector, minSplitPackedValue, maxPackedValue
, parentSplits, splitPackedValues, leafBlockFPs);
parentSplits[splitDim]--;
}
}
private void computeCommonPrefixLength(HeapPointWriter heapPointWriter, byte[] commonPrefix, int from, int to) {
Arrays.fill(commonPrefixLengths, bytesPerDim);
PointValue value = heapPointWriter.getPackedValueSlice(from);
BytesRef packedValue = value.packedValue();
for (int dim = 0; dim < numDataDims; dim++) {
System.arraycopy(packedValue.bytes, packedValue.offset + dim * bytesPerDim, commonPrefix, dim * bytesPerDim, bytesPerDim);
}
for (int i = from + 1; i < to; i++) {
value = heapPointWriter.getPackedValueSlice(i);
packedValue = value.packedValue();
for (int dim = 0; dim < numDataDims; dim++) {
if (commonPrefixLengths[dim] != 0) {
int j = Arrays.mismatch(commonPrefix, dim * bytesPerDim, dim * bytesPerDim + commonPrefixLengths[dim], packedValue.bytes, packedValue.offset + dim * bytesPerDim, packedValue.offset + dim * bytesPerDim + commonPrefixLengths[dim]);
if (j != -1) {
commonPrefixLengths[dim] = j;
}
}
}
}
}
// only called from assert
private boolean valuesInOrderAndBounds(int count, int sortedDim, byte[] minPackedValue, byte[] maxPackedValue,
IntFunction<BytesRef> values, int[] docs, int docsOffset) throws IOException {
byte[] lastPackedValue = new byte[packedBytesLength];
int lastDoc = -1;
for (int i=0;i<count;i++) {
BytesRef packedValue = values.apply(i);
assert packedValue.length == packedBytesLength;
assert valueInOrder(i, sortedDim, lastPackedValue, packedValue.bytes, packedValue.offset,
docs[docsOffset + i], lastDoc);
lastDoc = docs[docsOffset + i];
// Make sure this value does in fact fall within this leaf cell:
assert valueInBounds(packedValue, minPackedValue, maxPackedValue);
}
return true;
}
// only called from assert
private boolean valueInOrder(long ord, int sortedDim, byte[] lastPackedValue, byte[] packedValue, int packedValueOffset,
int doc, int lastDoc) {
int dimOffset = sortedDim * bytesPerDim;
if (ord > 0) {
int cmp = Arrays.compareUnsigned(lastPackedValue, dimOffset, dimOffset + bytesPerDim, packedValue, packedValueOffset + dimOffset, packedValueOffset + dimOffset + bytesPerDim);
if (cmp > 0) {
throw new AssertionError("values out of order: last value=" + new BytesRef(lastPackedValue) + " current value=" + new BytesRef(packedValue, packedValueOffset, packedBytesLength) + " ord=" + ord);
}
if (cmp == 0 && doc < lastDoc) {
throw new AssertionError("docs out of order: last doc=" + lastDoc + " current doc=" + doc + " ord=" + ord);
}
}
System.arraycopy(packedValue, packedValueOffset, lastPackedValue, 0, packedBytesLength);
return true;
}
}
| 1 | 29,486 |
Can you add a space before the equal sign? There are a couple other places where spaces are missing in this PR.
|
apache-lucene-solr
|
java
|
@@ -268,7 +268,10 @@ public class ExecutorServlet extends HttpServlet implements ConnectorParams {
continue;
}
- if (flow.getUpdateTime() > updateTime) {
+ // After removing runningFlows cache from web server, flow info will be fetched from DB directly,
+ // updateTime will be synced between web server and executor. So below if condition will always be true.
+ // TODO: will remove UpdaterThread in executorManager and deprecate handleAjaxUpdateRequest()
+ if (flow.getUpdateTime() >= updateTime) {
updateList.add(flow.toUpdateObject(updateTime));
}
}
| 1 |
/*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import com.google.common.base.Preconditions;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.log4j.Logger;
import org.codehaus.jackson.map.ObjectMapper;
import azkaban.Constants;
import azkaban.executor.ConnectorParams;
import azkaban.executor.ExecutableFlowBase;
import azkaban.executor.Executor;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.utils.FileIOUtils.JobMetaData;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.JSONUtils;
import static java.util.Objects.requireNonNull;
public class ExecutorServlet extends HttpServlet implements ConnectorParams {
private static final long serialVersionUID = 1L;
private static final Logger logger = Logger.getLogger(ExecutorServlet.class
.getName());
public static final String JSON_MIME_TYPE = "application/json";
private AzkabanExecutorServer application;
private FlowRunnerManager flowRunnerManager;
public ExecutorServlet() {
super();
}
@Override
public void init(ServletConfig config) throws ServletException {
application =
(AzkabanExecutorServer) config.getServletContext().getAttribute(
Constants.AZKABAN_SERVLET_CONTEXT_KEY);
if (application == null) {
throw new IllegalStateException(
"No batch application is defined in the servlet context!");
}
flowRunnerManager = application.getFlowRunnerManager();
}
protected void writeJSON(HttpServletResponse resp, Object obj)
throws IOException {
resp.setContentType(JSON_MIME_TYPE);
ObjectMapper mapper = new ObjectMapper();
OutputStream stream = resp.getOutputStream();
mapper.writeValue(stream, obj);
}
@Override
public void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
HashMap<String, Object> respMap = new HashMap<String, Object>();
// logger.info("ExecutorServer called by " + req.getRemoteAddr());
try {
if (!hasParam(req, ACTION_PARAM)) {
logger.error("Parameter action not set");
respMap.put("error", "Parameter action not set");
} else {
String action = getParam(req, ACTION_PARAM);
if (action.equals(UPDATE_ACTION)) {
// logger.info("Updated called");
handleAjaxUpdateRequest(req, respMap);
} else if (action.equals(PING_ACTION)) {
respMap.put("status", "alive");
} else if (action.equals(RELOAD_JOBTYPE_PLUGINS_ACTION)) {
logger.info("Reloading Jobtype plugins");
handleReloadJobTypePlugins(respMap);
} else if (action.equals(ACTIVATE)) {
logger.warn("Setting ACTIVE flag to true");
setActive(true, respMap);
} else if (action.equals(GET_STATUS)) {
logger.debug("Get Executor Status: ");
getStatus(respMap);
} else if (action.equals(DEACTIVATE)) {
logger.warn("Setting ACTIVE flag to false");
setActive(false, respMap);
} else if (action.equals(SHUTDOWN)) {
shutdown(respMap);
} else {
int execid = Integer.parseInt(getParam(req, EXECID_PARAM));
String user = getParam(req, USER_PARAM, null);
logger.info("User " + user + " has called action " + action + " on "
+ execid);
if (action.equals(METADATA_ACTION)) {
handleFetchMetaDataEvent(execid, req, resp, respMap);
} else if (action.equals(LOG_ACTION)) {
handleFetchLogEvent(execid, req, resp, respMap);
} else if (action.equals(ATTACHMENTS_ACTION)) {
handleFetchAttachmentsEvent(execid, req, resp, respMap);
} else if (action.equals(EXECUTE_ACTION)) {
handleAjaxExecute(req, respMap, execid);
} else if (action.equals(STATUS_ACTION)) {
handleAjaxFlowStatus(respMap, execid);
} else if (action.equals(CANCEL_ACTION)) {
logger.info("Cancel called.");
handleAjaxCancel(respMap, execid, user);
} else if (action.equals(PAUSE_ACTION)) {
logger.info("Paused called.");
handleAjaxPause(respMap, execid, user);
} else if (action.equals(RESUME_ACTION)) {
logger.info("Resume called.");
handleAjaxResume(respMap, execid, user);
} else if (action.equals(MODIFY_EXECUTION_ACTION)) {
logger.info("Modify Execution Action");
handleModifyExecutionRequest(respMap, execid, user, req);
} else {
logger.error("action: '" + action + "' not supported.");
respMap.put("error", "action: '" + action + "' not supported.");
}
}
}
} catch (Exception e) {
logger.error(e.getMessage(), e);
respMap.put(RESPONSE_ERROR, e.getMessage());
}
writeJSON(resp, respMap);
resp.flushBuffer();
}
private void handleModifyExecutionRequest(Map<String, Object> respMap,
int execId, String user, HttpServletRequest req) throws ServletException {
if (!hasParam(req, MODIFY_EXECUTION_ACTION_TYPE)) {
respMap.put(RESPONSE_ERROR, "Modification type not set.");
}
String modificationType = getParam(req, MODIFY_EXECUTION_ACTION_TYPE);
try {
if (MODIFY_RETRY_FAILURES.equals(modificationType)) {
flowRunnerManager.retryFailures(execId, user);
}
} catch (ExecutorManagerException e) {
logger.error(e.getMessage(), e);
respMap.put("error", e.getMessage());
}
}
private void handleFetchLogEvent(int execId, HttpServletRequest req,
HttpServletResponse resp, Map<String, Object> respMap)
throws ServletException {
String type = getParam(req, "type");
int startByte = getIntParam(req, "offset");
int length = getIntParam(req, "length");
resp.setContentType("text/plain");
resp.setCharacterEncoding("utf-8");
if (type.equals("flow")) {
LogData result;
try {
result = flowRunnerManager.readFlowLogs(execId, startByte, length);
respMap.putAll(result.toObject());
} catch (Exception e) {
logger.error(e.getMessage(), e);
respMap.put(RESPONSE_ERROR, e.getMessage());
}
} else {
int attempt = getIntParam(req, "attempt", 0);
String jobId = getParam(req, "jobId");
try {
LogData result =
flowRunnerManager.readJobLogs(execId, jobId, attempt, startByte,
length);
respMap.putAll(result.toObject());
} catch (Exception e) {
logger.error(e.getMessage(), e);
respMap.put("error", e.getMessage());
}
}
}
private void handleFetchAttachmentsEvent(int execId, HttpServletRequest req,
HttpServletResponse resp, Map<String, Object> respMap)
throws ServletException {
String jobId = getParam(req, "jobId");
int attempt = getIntParam(req, "attempt", 0);
try {
List<Object> result =
flowRunnerManager.readJobAttachments(execId, jobId, attempt);
respMap.put("attachments", result);
} catch (Exception e) {
logger.error(e.getMessage(), e);
respMap.put("error", e.getMessage());
}
}
private void handleFetchMetaDataEvent(int execId, HttpServletRequest req,
HttpServletResponse resp, Map<String, Object> respMap)
throws ServletException {
int startByte = getIntParam(req, "offset");
int length = getIntParam(req, "length");
resp.setContentType("text/plain");
resp.setCharacterEncoding("utf-8");
int attempt = getIntParam(req, "attempt", 0);
String jobId = getParam(req, "jobId");
try {
JobMetaData result =
flowRunnerManager.readJobMetaData(execId, jobId, attempt, startByte,
length);
respMap.putAll(result.toObject());
} catch (Exception e) {
logger.error(e.getMessage(), e);
respMap.put("error", e.getMessage());
}
}
@SuppressWarnings("unchecked")
private void handleAjaxUpdateRequest(HttpServletRequest req,
Map<String, Object> respMap) throws ServletException, IOException {
ArrayList<Object> updateTimesList =
(ArrayList<Object>) JSONUtils.parseJSONFromString(getParam(req,
UPDATE_TIME_LIST_PARAM));
ArrayList<Object> execIDList =
(ArrayList<Object>) JSONUtils.parseJSONFromString(getParam(req,
EXEC_ID_LIST_PARAM));
ArrayList<Object> updateList = new ArrayList<Object>();
for (int i = 0; i < execIDList.size(); ++i) {
long updateTime = JSONUtils.getLongFromObject(updateTimesList.get(i));
int execId = (Integer) execIDList.get(i);
ExecutableFlowBase flow = flowRunnerManager.getExecutableFlow(execId);
if (flow == null) {
Map<String, Object> errorResponse = new HashMap<String, Object>();
errorResponse.put(RESPONSE_ERROR, "Flow does not exist");
errorResponse.put(UPDATE_MAP_EXEC_ID, execId);
updateList.add(errorResponse);
continue;
}
if (flow.getUpdateTime() > updateTime) {
updateList.add(flow.toUpdateObject(updateTime));
}
}
respMap.put(RESPONSE_UPDATED_FLOWS, updateList);
}
private void handleAjaxExecute(HttpServletRequest req,
Map<String, Object> respMap, int execId) throws ServletException {
try {
flowRunnerManager.submitFlow(execId);
} catch (ExecutorManagerException e) {
e.printStackTrace();
logger.error(e.getMessage(), e);
respMap.put(RESPONSE_ERROR, e.getMessage());
}
}
private void handleAjaxFlowStatus(Map<String, Object> respMap, int execid) {
ExecutableFlowBase flow = flowRunnerManager.getExecutableFlow(execid);
if (flow == null) {
respMap.put(STATUS_PARAM, RESPONSE_NOTFOUND);
} else {
respMap.put(STATUS_PARAM, flow.getStatus().toString());
respMap.put(RESPONSE_UPDATETIME, flow.getUpdateTime());
}
}
private void handleAjaxPause(Map<String, Object> respMap, int execid,
String user) throws ServletException {
if (user == null) {
respMap.put(RESPONSE_ERROR, "user has not been set");
return;
}
try {
flowRunnerManager.pauseFlow(execid, user);
respMap.put(STATUS_PARAM, RESPONSE_SUCCESS);
} catch (ExecutorManagerException e) {
logger.error(e.getMessage(), e);
respMap.put(RESPONSE_ERROR, e.getMessage());
}
}
private void handleAjaxResume(Map<String, Object> respMap, int execid,
String user) throws ServletException {
if (user == null) {
respMap.put(RESPONSE_ERROR, "user has not been set");
return;
}
try {
flowRunnerManager.resumeFlow(execid, user);
respMap.put(STATUS_PARAM, RESPONSE_SUCCESS);
} catch (ExecutorManagerException e) {
e.printStackTrace();
respMap.put(RESPONSE_ERROR, e.getMessage());
}
}
private void handleAjaxCancel(Map<String, Object> respMap, int execid,
String user) throws ServletException {
if (user == null) {
respMap.put(RESPONSE_ERROR, "user has not been set");
return;
}
try {
flowRunnerManager.cancelFlow(execid, user);
respMap.put(STATUS_PARAM, RESPONSE_SUCCESS);
} catch (ExecutorManagerException e) {
logger.error(e.getMessage(), e);
respMap.put(RESPONSE_ERROR, e.getMessage());
}
}
private void handleReloadJobTypePlugins(Map<String, Object> respMap)
throws ServletException {
try {
flowRunnerManager.reloadJobTypePlugins();
respMap.put(STATUS_PARAM, RESPONSE_SUCCESS);
} catch (Exception e) {
logger.error(e.getMessage(), e);
respMap.put(RESPONSE_ERROR, e.getMessage());
}
}
private void setActive(boolean value, Map<String, Object> respMap)
throws ServletException {
try {
setActiveInternal(value);
respMap.put(STATUS_PARAM, RESPONSE_SUCCESS);
} catch (Exception e) {
logger.error(e.getMessage(), e);
respMap.put(RESPONSE_ERROR, e.getMessage());
}
}
private void setActiveInternal(boolean value)
throws ExecutorManagerException {
ExecutorLoader executorLoader = application.getExecutorLoader();
Executor executor = executorLoader.fetchExecutor(application.getHost(), application.getPort());
Preconditions.checkState(executor != null, "Unable to obtain self entry in DB");
if (executor.isActive() != value) {
executor.setActive(value);
executorLoader.updateExecutor(executor);
flowRunnerManager.setExecutorActive(value);
} else {
logger.warn("Set active action ignored. Executor is already " + (value? "active" : "inactive"));
}
}
/**
* Prepare the executor for shutdown.
*
* @param respMap json response object
* @throws ServletException
*/
private void shutdown(Map<String, Object> respMap)
throws ServletException {
try {
logger.warn("Shutting down executor...");
// Set the executor to inactive. Will receive no new flows.
setActiveInternal(false);
application.shutdown();
respMap.put(STATUS_PARAM, RESPONSE_SUCCESS);
} catch (Exception e) {
logger.error(e.getMessage(), e);
respMap.put(RESPONSE_ERROR, e.getMessage());
}
}
private void getStatus(Map<String, Object> respMap)
throws ServletException {
try {
ExecutorLoader executorLoader = application.getExecutorLoader();
final Executor executor = requireNonNull(executorLoader.fetchExecutor(application.getHost(), application.getPort()),
"The executor can not be null");
respMap.put("executor_id", Integer.toString(executor.getId()));
respMap.put("isActive", String.valueOf(executor.isActive()));
respMap.put(STATUS_PARAM, RESPONSE_SUCCESS);
} catch (Exception e) {
logger.error(e.getMessage(), e);
respMap.put(RESPONSE_ERROR, e.getMessage());
}
}
@Override
public void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
}
/**
* Duplicated code with AbstractAzkabanServlet, but ne
*/
public boolean hasParam(HttpServletRequest request, String param) {
return request.getParameter(param) != null;
}
public String getParam(HttpServletRequest request, String name)
throws ServletException {
String p = request.getParameter(name);
if (p == null)
throw new ServletException("Missing required parameter '" + name + "'.");
else
return p;
}
public String getParam(HttpServletRequest request, String name,
String defaultVal) {
String p = request.getParameter(name);
if (p == null) {
return defaultVal;
}
return p;
}
public int getIntParam(HttpServletRequest request, String name)
throws ServletException {
String p = getParam(request, name);
return Integer.parseInt(p);
}
public int getIntParam(HttpServletRequest request, String name, int defaultVal) {
if (hasParam(request, name)) {
try {
return getIntParam(request, name);
} catch (Exception e) {
return defaultVal;
}
}
return defaultVal;
}
}
| 1 | 13,078 |
Please follow the todo naming convention.
|
azkaban-azkaban
|
java
|
@@ -28,6 +28,9 @@ import (
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
+// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC."
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=clusterissuers,scope=Cluster
type ClusterIssuer struct {
| 1 |
/*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cmacme "github.com/jetstack/cert-manager/pkg/apis/acme/v1alpha2"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=clusterissuers,scope=Cluster
type ClusterIssuer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IssuerSpec `json:"spec,omitempty"`
Status IssuerStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterIssuerList is a list of Issuers
type ClusterIssuerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []ClusterIssuer `json:"items"`
}
// +genclient
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=issuers
type Issuer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IssuerSpec `json:"spec,omitempty"`
Status IssuerStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IssuerList is a list of Issuers
type IssuerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Issuer `json:"items"`
}
// IssuerSpec is the specification of an Issuer. This includes any
// configuration required for the issuer.
type IssuerSpec struct {
IssuerConfig `json:",inline"`
}
type IssuerConfig struct {
// +optional
ACME *cmacme.ACMEIssuer `json:"acme,omitempty"`
// +optional
CA *CAIssuer `json:"ca,omitempty"`
// +optional
Vault *VaultIssuer `json:"vault,omitempty"`
// +optional
SelfSigned *SelfSignedIssuer `json:"selfSigned,omitempty"`
// +optional
Venafi *VenafiIssuer `json:"venafi,omitempty"`
}
// VenafiIssuer describes issuer configuration details for Venafi Cloud.
type VenafiIssuer struct {
// Zone is the Venafi Policy Zone to use for this issuer.
// All requests made to the Venafi platform will be restricted by the named
// zone policy.
// This field is required.
Zone string `json:"zone"`
// TPP specifies Trust Protection Platform configuration settings.
// Only one of TPP or Cloud may be specified.
// +optional
TPP *VenafiTPP `json:"tpp,omitempty"`
// Cloud specifies the Venafi cloud configuration settings.
// Only one of TPP or Cloud may be specified.
// +optional
Cloud *VenafiCloud `json:"cloud,omitempty"`
}
// VenafiTPP defines connection configuration details for a Venafi TPP instance
type VenafiTPP struct {
// URL is the base URL for the Venafi TPP instance
URL string `json:"url"`
// CredentialsRef is a reference to a Secret containing the username and
// password for the TPP server.
// The secret must contain two keys, 'username' and 'password'.
CredentialsRef cmmeta.LocalObjectReference `json:"credentialsRef"`
// CABundle is a PEM encoded TLS certifiate to use to verify connections to
// the TPP instance.
// If specified, system roots will not be used and the issuing CA for the
// TPP instance must be verifiable using the provided root.
// If not specified, the connection will be verified using the cert-manager
// system root certificates.
// +optional
CABundle []byte `json:"caBundle,omitempty"`
}
// VenafiCloud defines connection configuration details for Venafi Cloud
type VenafiCloud struct {
// URL is the base URL for Venafi Cloud
URL string `json:"url"`
// APITokenSecretRef is a secret key selector for the Venafi Cloud API token.
APITokenSecretRef cmmeta.SecretKeySelector `json:"apiTokenSecretRef"`
}
type SelfSignedIssuer struct{}
type VaultIssuer struct {
// Vault authentication
Auth VaultAuth `json:"auth"`
// Server is the vault connection address
Server string `json:"server"`
// Vault URL path to the certificate role
Path string `json:"path"`
// Base64 encoded CA bundle to validate Vault server certificate. Only used
// if the Server URL is using HTTPS protocol. This parameter is ignored for
// plain HTTP protocol connection. If not set the system root certificates
// are used to validate the TLS connection.
// +optional
CABundle []byte `json:"caBundle,omitempty"`
}
// Vault authentication can be configured:
// - With a secret containing a token. Cert-manager is using this token as-is.
// - With a secret containing a AppRole. This AppRole is used to authenticate to
// Vault and retrieve a token.
type VaultAuth struct {
// This Secret contains the Vault token key
// +optional
TokenSecretRef *cmmeta.SecretKeySelector `json:"tokenSecretRef,omitempty"`
// This Secret contains a AppRole and Secret
// +optional
AppRole *VaultAppRole `json:"appRole,omitempty"`
// This contains a Role and Secret with a ServiceAccount token to
// authenticate with vault.
// +optional
Kubernetes *VaultKubernetesAuth `json:"kubernetes,omitempty"`
}
type VaultAppRole struct {
// Where the authentication path is mounted in Vault.
Path string `json:"path"`
RoleId string `json:"roleId"`
SecretRef cmmeta.SecretKeySelector `json:"secretRef"`
}
// Authenticate against Vault using a Kubernetes ServiceAccount token stored in
// a Secret.
type VaultKubernetesAuth struct {
// The value here will be used as part of the path used when authenticating
// with vault, for example if you set a value of "foo", the path used will be
// `/v1/auth/foo/login`. If unspecified, the default value "kubernetes" will
// be used.
// +optional
Path string `json:"mountPath,omitempty"`
// The required Secret field containing a Kubernetes ServiceAccount JWT used
// for authenticating with Vault. Use of 'ambient credentials' is not
// supported.
SecretRef cmmeta.SecretKeySelector `json:"secretRef"`
// A required field containing the Vault Role to assume. A Role binds a
// Kubernetes ServiceAccount with a set of Vault policies.
Role string `json:"role"`
}
type CAIssuer struct {
// SecretName is the name of the secret used to sign Certificates issued
// by this Issuer.
SecretName string `json:"secretName"`
}
// IssuerStatus contains status information about an Issuer
type IssuerStatus struct {
// +optional
Conditions []IssuerCondition `json:"conditions,omitempty"`
// +optional
ACME *cmacme.ACMEIssuerStatus `json:"acme,omitempty"`
}
// IssuerCondition contains condition information for an Issuer.
type IssuerCondition struct {
// Type of the condition, currently ('Ready').
Type IssuerConditionType `json:"type"`
// Status of the condition, one of ('True', 'False', 'Unknown').
Status cmmeta.ConditionStatus `json:"status"`
// LastTransitionTime is the timestamp corresponding to the last status
// change of this condition.
// +optional
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a brief machine readable explanation for the condition's last
// transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human readable description of the details of the last
// transition, complementing reason.
// +optional
Message string `json:"message,omitempty"`
}
// IssuerConditionType represents an Issuer condition value.
type IssuerConditionType string
const (
// IssuerConditionReady represents the fact that a given Issuer condition
// is in ready state.
IssuerConditionReady IssuerConditionType = "Ready"
)
| 1 | 19,822 |
These are no longer used and changes should instead be made directly to the files in `deploy/chart/crds` /assign @JoshVanL /unassign
|
jetstack-cert-manager
|
go
|
@@ -257,9 +257,11 @@ class YOLOV3Head(BaseDenseHead, BBoxTestMixin):
# (h, w, num_anchors*num_attrib) -> (h*w*num_anchors, num_attrib)
pred_map = pred_map.permute(1, 2, 0).reshape(-1, self.num_attrib)
- pred_map[..., :2] = torch.sigmoid(pred_map[..., :2])
- bbox_pred = self.bbox_coder.decode(multi_lvl_anchors[i],
- pred_map[..., :4], stride)
+ xy = torch.sigmoid(pred_map[..., :2])
+ xywh = torch.cat((xy, pred_map[..., 2:4]), axis=-1)
+ bbox_pred = self.bbox_coder.decode(multi_lvl_anchors[i], xywh,
+ stride)
+
# conf and cls
conf_pred = torch.sigmoid(pred_map[..., 4]).view(-1)
cls_pred = torch.sigmoid(pred_map[..., 5:]).view(
| 1 |
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, normal_init
from mmcv.runner import force_fp32
from mmdet.core import (build_anchor_generator, build_assigner,
build_bbox_coder, build_sampler, images_to_levels,
multi_apply, multiclass_nms)
from ..builder import HEADS, build_loss
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
@HEADS.register_module()
class YOLOV3Head(BaseDenseHead, BBoxTestMixin):
"""YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767.
Args:
num_classes (int): The number of object classes (w/o background)
in_channels (List[int]): Number of input channels per scale.
out_channels (List[int]): The number of output channels per scale
before the final 1x1 layer. Default: (1024, 512, 256).
anchor_generator (dict): Config dict for anchor generator
bbox_coder (dict): Config of bounding box coder.
featmap_strides (List[int]): The stride of each scale.
Should be in descending order. Default: (32, 16, 8).
one_hot_smoother (float): Set a non-zero value to enable label-smooth
Default: 0.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
loss_cls (dict): Config of classification loss.
loss_conf (dict): Config of confidence loss.
loss_xy (dict): Config of xy coordinate loss.
loss_wh (dict): Config of wh coordinate loss.
train_cfg (dict): Training config of YOLOV3 head. Default: None.
test_cfg (dict): Testing config of YOLOV3 head. Default: None.
"""
def __init__(self,
num_classes,
in_channels,
out_channels=(1024, 512, 256),
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'),
featmap_strides=[32, 16, 8],
one_hot_smoother=0.,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_conf=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_xy=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_wh=dict(type='MSELoss', loss_weight=1.0),
train_cfg=None,
test_cfg=None):
super(YOLOV3Head, self).__init__()
# Check params
assert (len(in_channels) == len(out_channels) == len(featmap_strides))
self.num_classes = num_classes
self.in_channels = in_channels
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
if hasattr(self.train_cfg, 'sampler'):
sampler_cfg = self.train_cfg.sampler
else:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.one_hot_smoother = one_hot_smoother
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.bbox_coder = build_bbox_coder(bbox_coder)
self.anchor_generator = build_anchor_generator(anchor_generator)
self.loss_cls = build_loss(loss_cls)
self.loss_conf = build_loss(loss_conf)
self.loss_xy = build_loss(loss_xy)
self.loss_wh = build_loss(loss_wh)
# usually the numbers of anchors for each level are the same
# except SSD detectors
self.num_anchors = self.anchor_generator.num_base_anchors[0]
assert len(
self.anchor_generator.num_base_anchors) == len(featmap_strides)
self._init_layers()
@property
def num_levels(self):
return len(self.featmap_strides)
@property
def num_attrib(self):
"""int: number of attributes in pred_map, bboxes (4) +
objectness (1) + num_classes"""
return 5 + self.num_classes
def _init_layers(self):
self.convs_bridge = nn.ModuleList()
self.convs_pred = nn.ModuleList()
for i in range(self.num_levels):
conv_bridge = ConvModule(
self.in_channels[i],
self.out_channels[i],
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
conv_pred = nn.Conv2d(self.out_channels[i],
self.num_anchors * self.num_attrib, 1)
self.convs_bridge.append(conv_bridge)
self.convs_pred.append(conv_pred)
def init_weights(self):
"""Initialize weights of the head."""
for m in self.convs_pred:
normal_init(m, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple[Tensor]: A tuple of multi-level predication map, each is a
4D-tensor of shape (batch_size, 5+num_classes, height, width).
"""
assert len(feats) == self.num_levels
pred_maps = []
for i in range(self.num_levels):
x = feats[i]
x = self.convs_bridge[i](x)
pred_map = self.convs_pred[i](x)
pred_maps.append(pred_map)
return tuple(pred_maps),
@force_fp32(apply_to=('pred_maps', ))
def get_bboxes(self,
pred_maps,
img_metas,
cfg=None,
rescale=False,
with_nms=True):
"""Transform network output for a batch into bbox predictions.
Args:
pred_maps (list[Tensor]): Raw predictions for a batch of images.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used. Default: None.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the predicted class label of the
corresponding box.
"""
result_list = []
num_levels = len(pred_maps)
for img_id in range(len(img_metas)):
pred_maps_list = [
pred_maps[i][img_id].detach() for i in range(num_levels)
]
scale_factor = img_metas[img_id]['scale_factor']
proposals = self._get_bboxes_single(pred_maps_list, scale_factor,
cfg, rescale, with_nms)
result_list.append(proposals)
return result_list
def _get_bboxes_single(self,
pred_maps_list,
scale_factor,
cfg,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
pred_maps_list (list[Tensor]): Prediction maps for different scales
of each single image in the batch.
scale_factor (ndarray): Scale factor of the image arrange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
tuple(Tensor):
det_bboxes (Tensor): BBox predictions in shape (n, 5), where
the first 4 columns are bounding box positions
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
between 0 and 1.
det_labels (Tensor): A (n,) tensor where each item is the
predicted class label of the corresponding box.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(pred_maps_list) == self.num_levels
multi_lvl_bboxes = []
multi_lvl_cls_scores = []
multi_lvl_conf_scores = []
num_levels = len(pred_maps_list)
featmap_sizes = [
pred_maps_list[i].shape[-2:] for i in range(num_levels)
]
multi_lvl_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, pred_maps_list[0][0].device)
for i in range(self.num_levels):
# get some key info for current scale
pred_map = pred_maps_list[i]
stride = self.featmap_strides[i]
# (h, w, num_anchors*num_attrib) -> (h*w*num_anchors, num_attrib)
pred_map = pred_map.permute(1, 2, 0).reshape(-1, self.num_attrib)
pred_map[..., :2] = torch.sigmoid(pred_map[..., :2])
bbox_pred = self.bbox_coder.decode(multi_lvl_anchors[i],
pred_map[..., :4], stride)
# conf and cls
conf_pred = torch.sigmoid(pred_map[..., 4]).view(-1)
cls_pred = torch.sigmoid(pred_map[..., 5:]).view(
-1, self.num_classes) # Cls pred one-hot.
# Filtering out all predictions with conf < conf_thr
conf_thr = cfg.get('conf_thr', -1)
conf_inds = conf_pred.ge(conf_thr).nonzero().flatten()
bbox_pred = bbox_pred[conf_inds, :]
cls_pred = cls_pred[conf_inds, :]
conf_pred = conf_pred[conf_inds]
# Get top-k prediction
nms_pre = cfg.get('nms_pre', -1)
if 0 < nms_pre < conf_pred.size(0):
_, topk_inds = conf_pred.topk(nms_pre)
bbox_pred = bbox_pred[topk_inds, :]
cls_pred = cls_pred[topk_inds, :]
conf_pred = conf_pred[topk_inds]
# Save the result of current scale
multi_lvl_bboxes.append(bbox_pred)
multi_lvl_cls_scores.append(cls_pred)
multi_lvl_conf_scores.append(conf_pred)
# Merge the results of different scales together
multi_lvl_bboxes = torch.cat(multi_lvl_bboxes)
multi_lvl_cls_scores = torch.cat(multi_lvl_cls_scores)
multi_lvl_conf_scores = torch.cat(multi_lvl_conf_scores)
if with_nms and (multi_lvl_conf_scores.size(0) == 0):
return torch.zeros((0, 5)), torch.zeros((0, ))
if rescale:
multi_lvl_bboxes /= multi_lvl_bboxes.new_tensor(scale_factor)
# In mmdet 2.x, the class_id for background is num_classes.
# i.e., the last column.
padding = multi_lvl_cls_scores.new_zeros(multi_lvl_cls_scores.shape[0],
1)
multi_lvl_cls_scores = torch.cat([multi_lvl_cls_scores, padding],
dim=1)
if with_nms:
det_bboxes, det_labels = multiclass_nms(
multi_lvl_bboxes,
multi_lvl_cls_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=multi_lvl_conf_scores)
return det_bboxes, det_labels
else:
return (multi_lvl_bboxes, multi_lvl_cls_scores,
multi_lvl_conf_scores)
@force_fp32(apply_to=('pred_maps', ))
def loss(self,
pred_maps,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
pred_maps (list[Tensor]): Prediction map for each scale level,
shape (N, num_anchors * num_attrib, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
num_imgs = len(img_metas)
device = pred_maps[0][0].device
featmap_sizes = [
pred_maps[i].shape[-2:] for i in range(self.num_levels)
]
multi_level_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device)
anchor_list = [multi_level_anchors for _ in range(num_imgs)]
responsible_flag_list = []
for img_id in range(len(img_metas)):
responsible_flag_list.append(
self.anchor_generator.responsible_flags(
featmap_sizes, gt_bboxes[img_id], device))
target_maps_list, neg_maps_list = self.get_targets(
anchor_list, responsible_flag_list, gt_bboxes, gt_labels)
losses_cls, losses_conf, losses_xy, losses_wh = multi_apply(
self.loss_single, pred_maps, target_maps_list, neg_maps_list)
return dict(
loss_cls=losses_cls,
loss_conf=losses_conf,
loss_xy=losses_xy,
loss_wh=losses_wh)
def loss_single(self, pred_map, target_map, neg_map):
"""Compute loss of a single image from a batch.
Args:
pred_map (Tensor): Raw predictions for a single level.
target_map (Tensor): The Ground-Truth target for a single level.
neg_map (Tensor): The negative masks for a single level.
Returns:
tuple:
loss_cls (Tensor): Classification loss.
loss_conf (Tensor): Confidence loss.
loss_xy (Tensor): Regression loss of x, y coordinate.
loss_wh (Tensor): Regression loss of w, h coordinate.
"""
num_imgs = len(pred_map)
pred_map = pred_map.permute(0, 2, 3,
1).reshape(num_imgs, -1, self.num_attrib)
neg_mask = neg_map.float()
pos_mask = target_map[..., 4]
pos_and_neg_mask = neg_mask + pos_mask
pos_mask = pos_mask.unsqueeze(dim=-1)
if torch.max(pos_and_neg_mask) > 1.:
warnings.warn('There is overlap between pos and neg sample.')
pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.)
pred_xy = pred_map[..., :2]
pred_wh = pred_map[..., 2:4]
pred_conf = pred_map[..., 4]
pred_label = pred_map[..., 5:]
target_xy = target_map[..., :2]
target_wh = target_map[..., 2:4]
target_conf = target_map[..., 4]
target_label = target_map[..., 5:]
loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask)
loss_conf = self.loss_conf(
pred_conf, target_conf, weight=pos_and_neg_mask)
loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask)
loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask)
return loss_cls, loss_conf, loss_xy, loss_wh
def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list,
gt_labels_list):
"""Compute target maps for anchors in multiple images.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_total_anchors, 4).
responsible_flag_list (list[list[Tensor]]): Multi level responsible
flags of each image. Each element is a tensor of shape
(num_total_anchors, )
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
gt_labels_list (list[Tensor]): Ground truth labels of each box.
Returns:
tuple: Usually returns a tuple containing learning targets.
- target_map_list (list[Tensor]): Target map of each level.
- neg_map_list (list[Tensor]): Negative map of each level.
"""
num_imgs = len(anchor_list)
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
results = multi_apply(self._get_targets_single, anchor_list,
responsible_flag_list, gt_bboxes_list,
gt_labels_list)
all_target_maps, all_neg_maps = results
assert num_imgs == len(all_target_maps) == len(all_neg_maps)
target_maps_list = images_to_levels(all_target_maps, num_level_anchors)
neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors)
return target_maps_list, neg_maps_list
def _get_targets_single(self, anchors, responsible_flags, gt_bboxes,
gt_labels):
"""Generate matching bounding box prior and converted GT.
Args:
anchors (list[Tensor]): Multi-level anchors of the image.
responsible_flags (list[Tensor]): Multi-level responsible flags of
anchors
gt_bboxes (Tensor): Ground truth bboxes of single image.
gt_labels (Tensor): Ground truth labels of single image.
Returns:
tuple:
target_map (Tensor): Predication target map of each
scale level, shape (num_total_anchors,
5+num_classes)
neg_map (Tensor): Negative map of each scale level,
shape (num_total_anchors,)
"""
anchor_strides = []
for i in range(len(anchors)):
anchor_strides.append(
torch.tensor(self.featmap_strides[i],
device=gt_bboxes.device).repeat(len(anchors[i])))
concat_anchors = torch.cat(anchors)
concat_responsible_flags = torch.cat(responsible_flags)
anchor_strides = torch.cat(anchor_strides)
assert len(anchor_strides) == len(concat_anchors) == \
len(concat_responsible_flags)
assign_result = self.assigner.assign(concat_anchors,
concat_responsible_flags,
gt_bboxes)
sampling_result = self.sampler.sample(assign_result, concat_anchors,
gt_bboxes)
target_map = concat_anchors.new_zeros(
concat_anchors.size(0), self.num_attrib)
target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode(
sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes,
anchor_strides[sampling_result.pos_inds])
target_map[sampling_result.pos_inds, 4] = 1
gt_labels_one_hot = F.one_hot(
gt_labels, num_classes=self.num_classes).float()
if self.one_hot_smoother != 0: # label smooth
gt_labels_one_hot = gt_labels_one_hot * (
1 - self.one_hot_smoother
) + self.one_hot_smoother / self.num_classes
target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[
sampling_result.pos_assigned_gt_inds]
neg_map = concat_anchors.new_zeros(
concat_anchors.size(0), dtype=torch.uint8)
neg_map[sampling_result.neg_inds] = 1
return target_map, neg_map
def aug_test(self, feats, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
feats (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains features for all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[ndarray]: bbox results of each class
"""
return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
| 1 | 21,626 |
Is this really necessary to work around this way? The old version works fine with me when converting to ONNX.
|
open-mmlab-mmdetection
|
py
|
@@ -71,7 +71,7 @@ type s3Blob struct {
func (bb *s3Blob) Write(p []byte) (int, error) {
if bb.final {
- return 0, errors.New("blob already closed")
+ return 0, errors.New("Blob already closed")
}
n, err := bb.buf.Write(p)
| 1 |
package s3
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"strings"
"gopkg.in/amz.v3/aws"
"gopkg.in/amz.v3/s3"
"github.com/restic/restic/backend"
)
const maxKeysInList = 1000
const connLimit = 10
const backendPrefix = "restic"
func s3path(t backend.Type, name string) string {
if t == backend.Config {
return backendPrefix + "/" + string(t)
}
return backendPrefix + "/" + string(t) + "/" + name
}
type S3Backend struct {
bucket *s3.Bucket
connChan chan struct{}
path string
}
// Open a backend using an S3 bucket object
func OpenS3Bucket(bucket *s3.Bucket, bucketname string) *S3Backend {
connChan := make(chan struct{}, connLimit)
for i := 0; i < connLimit; i++ {
connChan <- struct{}{}
}
return &S3Backend{bucket: bucket, path: bucketname, connChan: connChan}
}
// Open opens the S3 backend at bucket and region.
func Open(regionname, bucketname string) (backend.Backend, error) {
auth, err := aws.EnvAuth()
if err != nil {
return nil, err
}
client := s3.New(auth, aws.Regions[regionname])
s3bucket, s3err := client.Bucket(bucketname)
if s3err != nil {
return nil, s3err
}
return OpenS3Bucket(s3bucket, bucketname), nil
}
// Location returns this backend's location (the bucket name).
func (be *S3Backend) Location() string {
return be.path
}
type s3Blob struct {
b *S3Backend
buf *bytes.Buffer
final bool
}
func (bb *s3Blob) Write(p []byte) (int, error) {
if bb.final {
return 0, errors.New("blob already closed")
}
n, err := bb.buf.Write(p)
return n, err
}
func (bb *s3Blob) Read(p []byte) (int, error) {
return bb.buf.Read(p)
}
func (bb *s3Blob) Close() error {
bb.final = true
bb.buf.Reset()
return nil
}
func (bb *s3Blob) Size() uint {
return uint(bb.buf.Len())
}
func (bb *s3Blob) Finalize(t backend.Type, name string) error {
if bb.final {
return errors.New("Already finalized")
}
bb.final = true
path := s3path(t, name)
// Check key does not already exist
_, err := bb.b.bucket.GetReader(path)
if err == nil {
return errors.New("key already exists!")
}
<-bb.b.connChan
err = bb.b.bucket.PutReader(path, bb.buf, int64(bb.buf.Len()), "binary/octet-stream", "private")
bb.b.connChan <- struct{}{}
bb.buf.Reset()
return err
}
// Create creates a new Blob. The data is available only after Finalize()
// has been called on the returned Blob.
func (be *S3Backend) Create() (backend.Blob, error) {
blob := s3Blob{
b: be,
buf: &bytes.Buffer{},
}
return &blob, nil
}
// Get returns a reader that yields the content stored under the given
// name. The reader should be closed after draining it.
func (be *S3Backend) Get(t backend.Type, name string) (io.ReadCloser, error) {
path := s3path(t, name)
return be.bucket.GetReader(path)
}
// GetReader returns an io.ReadCloser for the Blob with the given name of
// type t at offset and length. If length is 0, the reader reads until EOF.
func (be *S3Backend) GetReader(t backend.Type, name string, offset, length uint) (io.ReadCloser, error) {
rc, err := be.Get(t, name)
if err != nil {
return nil, err
}
n, errc := io.CopyN(ioutil.Discard, rc, int64(offset))
if errc != nil {
return nil, errc
} else if n != int64(offset) {
return nil, fmt.Errorf("less bytes read than expected, read: %d, expected: %d", n, offset)
}
if length == 0 {
return rc, nil
}
return backend.LimitReadCloser(rc, int64(length)), nil
}
// Test returns true if a blob of the given type and name exists in the backend.
func (be *S3Backend) Test(t backend.Type, name string) (bool, error) {
found := false
path := s3path(t, name)
_, err := be.bucket.GetReader(path)
if err == nil {
found = true
}
// If error, then not found
return found, nil
}
// Remove removes the blob with the given name and type.
func (be *S3Backend) Remove(t backend.Type, name string) error {
path := s3path(t, name)
return be.bucket.Del(path)
}
// List returns a channel that yields all names of blobs of type t. A
// goroutine is started for this. If the channel done is closed, sending
// stops.
func (be *S3Backend) List(t backend.Type, done <-chan struct{}) <-chan string {
ch := make(chan string)
prefix := s3path(t, "")
listresp, err := be.bucket.List(prefix, "/", "", maxKeysInList)
if err != nil {
close(ch)
return ch
}
matches := make([]string, len(listresp.Contents))
for idx, key := range listresp.Contents {
matches[idx] = strings.TrimPrefix(key.Key, prefix)
}
// Continue making requests to get full list.
for listresp.IsTruncated {
listresp, err = be.bucket.List(prefix, "/", listresp.NextMarker, maxKeysInList)
if err != nil {
close(ch)
return ch
}
for _, key := range listresp.Contents {
matches = append(matches, strings.TrimPrefix(key.Key, prefix))
}
}
go func() {
defer close(ch)
for _, m := range matches {
if m == "" {
continue
}
select {
case ch <- m:
case <-done:
return
}
}
}()
return ch
}
// Remove keys for a specified backend type
func (be *S3Backend) removeKeys(t backend.Type) {
doneChan := make(chan struct{})
for key := range be.List(backend.Data, doneChan) {
be.Remove(backend.Data, key)
}
doneChan <- struct{}{}
}
// Delete removes all restic keys
func (be *S3Backend) Delete() error {
be.removeKeys(backend.Data)
be.removeKeys(backend.Key)
be.removeKeys(backend.Lock)
be.removeKeys(backend.Snapshot)
be.removeKeys(backend.Index)
be.removeKeys(backend.Config)
return nil
}
// Close does nothing
func (be *S3Backend) Close() error { return nil }
| 1 | 6,732 |
Same as above: don't start error strings with a capital letter, and don't fix unneeded things in an unrelated commit ;)
|
restic-restic
|
go
|
@@ -168,6 +168,7 @@ module Beaker
options[:recursive]=File.directory?(source) if options[:recursive].nil?
+ @logger.debug "going to upload! #{source} to #{@hostname}:#{target}"
@ssh.scp.upload! source, target, options
result = Result.new(@hostname, [source, target])
| 1 |
require 'socket'
require 'timeout'
require 'net/scp'
module Beaker
class SshConnection
attr_accessor :logger
RETRYABLE_EXCEPTIONS = [
SocketError,
Timeout::Error,
Errno::ETIMEDOUT,
Errno::EHOSTDOWN,
Errno::EHOSTUNREACH,
Errno::ECONNREFUSED,
Errno::ECONNRESET,
Errno::ENETUNREACH,
Net::SSH::Disconnect,
Net::SSH::AuthenticationFailed,
]
def initialize hostname, user = nil, ssh_opts = {}, options = {}
@hostname = hostname
@user = user
@ssh_opts = ssh_opts
@logger = options[:logger]
end
def self.connect hostname, user = 'root', ssh_opts = {}, options = {}
connection = new hostname, user, ssh_opts, options
connection.connect
connection
end
def connect
try = 1
last_wait = 0
wait = 1
@ssh ||= begin
Net::SSH.start(@hostname, @user, @ssh_opts)
rescue *RETRYABLE_EXCEPTIONS => e
if try <= 11
@logger.warn "Try #{try} -- Host #{@hostname} unreachable: #{e.message}"
@logger.warn "Trying again in #{wait} seconds"
sleep wait
(last_wait, wait) = wait, last_wait + wait
try += 1
retry
else
# why is the logger not passed into this class?
@logger.error "Failed to connect to #{@hostname}"
raise
end
end
@logger.debug "Created ssh connection to #{@hostname}, user: #{@user}, opts: #{@ssh_opts}"
self
end
def close
begin
@ssh.close if @ssh
rescue
@ssh.shutdown!
end
@ssh = nil
end
def try_to_execute command, options = {}, stdout_callback = nil,
stderr_callback = stdout_callback
result = Result.new(@hostname, command)
# why are we getting to this point on a dry run anyways?
# also... the host creates connections through the class method,
# which automatically connects, so you can't do a dry run unless you also
# can connect to your hosts?
return result if options[:dry_run]
@ssh.open_channel do |channel|
request_terminal_for( channel, command ) if options[:pty]
channel.exec(command) do |terminal, success|
abort "FAILED: to execute command on a new channel on #{@hostname}" unless success
register_stdout_for terminal, result, stdout_callback
register_stderr_for terminal, result, stderr_callback
register_exit_code_for terminal, result
process_stdin_for( terminal, options[:stdin] ) if options[:stdin]
end
end
# Process SSH activity until we stop doing that - which is when our
# channel is finished with...
@ssh.loop
result.finalize!
@logger.last_result = result
result
end
def execute command, options = {}, stdout_callback = nil,
stderr_callback = stdout_callback
attempt = true
begin
result = try_to_execute(command, options, stdout_callback, stderr_callback)
rescue *RETRYABLE_EXCEPTIONS => e
if attempt
attempt = false
@logger.error "Command execution failed, attempting to reconnect to #{@hostname}"
close
connect
retry
else
raise
end
end
result
end
def request_terminal_for channel, command
channel.request_pty do |ch, success|
if success
@logger.info "Allocated a PTY on #{@hostname} for #{command.inspect}"
else
abort "FAILED: could not allocate a pty when requested on " +
"#{@hostname} for #{command.inspect}"
end
end
end
def register_stdout_for channel, output, callback = nil
channel.on_data do |ch, data|
callback[data] if callback
output.stdout << data
output.output << data
end
end
def register_stderr_for channel, output, callback = nil
channel.on_extended_data do |ch, type, data|
if type == 1
callback[data] if callback
output.stderr << data
output.output << data
end
end
end
def register_exit_code_for channel, output
channel.on_request("exit-status") do |ch, data|
output.exit_code = data.read_long
end
end
def process_stdin_for channel, stdin
# queue stdin data, force it to packets, and signal eof: this
# triggers action in many remote commands, notably including
# 'puppet apply'. It must be sent at some point before the rest
# of the action.
channel.send_data stdin.to_s
channel.process
channel.eof!
end
def scp_to source, target, options = {}, dry_run = false
return if dry_run
options[:recursive]=File.directory?(source) if options[:recursive].nil?
@ssh.scp.upload! source, target, options
result = Result.new(@hostname, [source, target])
# Setting these values allows reporting via result.log(test_name)
result.stdout = "SCP'ed file #{source} to #{@hostname}:#{target}"
# Net::Scp always returns 0, so just set the return code to 0.
result.exit_code = 0
result.finalize!
return result
end
def scp_from source, target, options = {}, dry_run = false
return if dry_run
options[:recursive] = true if options[:recursive].nil?
@ssh.scp.download! source, target, options
result = Result.new(@hostname, [source, target])
# Setting these values allows reporting via result.log(test_name)
result.stdout = "SCP'ed file #{@hostname}:#{source} to #{target}"
# Net::Scp always returns 0, so just set the return code to 0.
result.exit_code = 0
result.finalize!
result
end
end
end
| 1 | 6,363 |
Is this a testing debug statement?
|
voxpupuli-beaker
|
rb
|
@@ -849,7 +849,11 @@ var connect = function(self, url, options, callback) {
}
// Parse the string
- var object = parse(url, options);
+ var object;
+ parse(url, options, function(err, connectionStringObject) {
+ if (err) return console.log(err);
+ object = connectionStringObject;
+ });
var _finalOptions = createUnifiedOptions({}, object);
_finalOptions = mergeOptions(_finalOptions, object, false);
_finalOptions = createUnifiedOptions(_finalOptions, options);
| 1 |
'use strict';
var parse = require('./url_parser'),
Server = require('./topologies/server'),
Mongos = require('./topologies/mongos'),
ReplSet = require('./topologies/replset'),
EventEmitter = require('events').EventEmitter,
inherits = require('util').inherits,
Define = require('./metadata'),
ReadPreference = require('./read_preference'),
Logger = require('mongodb-core').Logger,
MongoError = require('mongodb-core').MongoError,
handleCallback = require('./utils').handleCallback,
Db = require('./db'),
f = require('util').format,
assign = require('./utils').assign,
shallowClone = require('./utils').shallowClone,
authenticate = require('./authenticate'),
ServerSessionPool = require('mongodb-core').Sessions.ServerSessionPool,
executeOperation = require('./utils').executeOperation;
/**
* @fileOverview The **MongoClient** class is a class that allows for making Connections to MongoDB.
*
* @example
* var MongoClient = require('mongodb').MongoClient,
* test = require('assert');
* // Connection url
* var url = 'mongodb://localhost:27017/test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, db) {
* // Get an additional db
* db.close();
* });
*/
var validOptionNames = [
'poolSize',
'ssl',
'sslValidate',
'sslCA',
'sslCert',
'sslKey',
'sslPass',
'sslCRL',
'autoReconnect',
'noDelay',
'keepAlive',
'keepAliveInitialDelay',
'connectTimeoutMS',
'family',
'socketTimeoutMS',
'reconnectTries',
'reconnectInterval',
'ha',
'haInterval',
'replicaSet',
'secondaryAcceptableLatencyMS',
'acceptableLatencyMS',
'connectWithNoPrimary',
'authSource',
'w',
'wtimeout',
'j',
'forceServerObjectId',
'serializeFunctions',
'ignoreUndefined',
'raw',
'bufferMaxEntries',
'readPreference',
'pkFactory',
'promiseLibrary',
'readConcern',
'maxStalenessSeconds',
'loggerLevel',
'logger',
'promoteValues',
'promoteBuffers',
'promoteLongs',
'domainsEnabled',
'checkServerIdentity',
'validateOptions',
'appname',
'auth',
'user',
'password',
'authMechanism',
'compression',
'fsync',
'readPreferenceTags',
'numberOfRetries',
'auto_reconnect'
];
var ignoreOptionNames = ['native_parser'];
var legacyOptionNames = ['server', 'replset', 'replSet', 'mongos', 'db'];
function validOptions(options) {
var _validOptions = validOptionNames.concat(legacyOptionNames);
for (var name in options) {
if (ignoreOptionNames.indexOf(name) !== -1) {
continue;
}
if (_validOptions.indexOf(name) === -1 && options.validateOptions) {
return new MongoError(f('option %s is not supported', name));
} else if (_validOptions.indexOf(name) === -1) {
console.warn(f('the options [%s] is not supported', name));
}
if (legacyOptionNames.indexOf(name) !== -1) {
console.warn(
f(
'the server/replset/mongos options are deprecated, ' +
'all their options are supported at the top level of the options object [%s]',
validOptionNames
)
);
}
}
}
/**
* Creates a new MongoClient instance
* @class
* @param {string} url The connection URI string
* @param {object} [options] Optional settings
* @param {number} [options.poolSize=5] The maximum size of the individual server pool
* @param {boolean} [options.ssl=false] Enable SSL connection.
* @param {boolean} [options.sslValidate=true] Validate mongod server certificate against Certificate Authority
* @param {buffer} [options.sslCA=undefined] SSL Certificate store binary buffer
* @param {buffer} [options.sslCert=undefined] SSL Certificate binary buffer
* @param {buffer} [options.sslKey=undefined] SSL Key file binary buffer
* @param {string} [options.sslPass=undefined] SSL Certificate pass phrase
* @param {buffer} [options.sslCRL=undefined] SSL Certificate revocation list binary buffer
* @param {boolean} [options.autoReconnect=true] Enable autoReconnect for single server instances
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {number} [options.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket
* @param {number} [options.connectTimeoutMS=30000] TCP Connection timeout setting
* @param {number} [options.family=4] Version of IP stack. Defaults to 4
* @param {number} [options.socketTimeoutMS=360000] TCP Socket timeout setting
* @param {number} [options.reconnectTries=30] Server attempt to reconnect #times
* @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries
* @param {boolean} [options.ha=true] Control if high availability monitoring runs for Replicaset or Mongos proxies
* @param {number} [options.haInterval=10000] The High availability period for replicaset inquiry
* @param {string} [options.replicaSet=undefined] The Replicaset set name
* @param {number} [options.secondaryAcceptableLatencyMS=15] Cutoff latency point in MS for Replicaset member selection
* @param {number} [options.acceptableLatencyMS=15] Cutoff latency point in MS for Mongos proxies selection
* @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available
* @param {string} [options.authSource=undefined] Define the database to authenticate against
* @param {(number|string)} [options.w=null] The write concern
* @param {number} [options.wtimeout=null] The write concern timeout
* @param {boolean} [options.j=false] Specify a journal write concern
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers
* @param {number} [options.bufferMaxEntries=-1] Sets a cap on how many operations the driver will buffer up before giving up on getting a working connection, default is -1 which is unlimited
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST)
* @param {object} [options.pkFactory=null] A primary key factory object for generation of custom _id keys
* @param {object} [options.promiseLibrary=null] A Promise library class the application wishes to use such as Bluebird, must be ES6 compatible
* @param {object} [options.readConcern=null] Specify a read concern for the collection (only MongoDB 3.2 or higher supported)
* @param {string} [options.readConcern.level='local'] Specify a read concern level for the collection operations, one of [local|majority]. (only MongoDB 3.2 or higher supported)
* @param {number} [options.maxStalenessSeconds=undefined] The max staleness to secondary reads (values under 10 seconds cannot be guaranteed)
* @param {string} [options.loggerLevel=undefined] The logging level (error/warn/info/debug)
* @param {object} [options.logger=undefined] Custom logger object
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers
* @param {boolean} [options.promoteLongs=true] Promotes long values to number if they fit inside the 53 bits resolution
* @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function
* @param {object} [options.validateOptions=false] Validate MongoClient passed in options for correctness
* @param {string} [options.appname=undefined] The name of the application that created this MongoClient instance. MongoDB 3.4 and newer will print this value in the server log upon establishing each connection. It is also recorded in the slow query log and profile collections
* @param {string} [options.auth.user=undefined] The username for auth
* @param {string} [options.auth.password=undefined] The password for auth
* @param {string} [options.authMechanism=undefined] Mechanism for authentication: MDEFAULT, GSSAPI, PLAIN, MONGODB-X509, SCRAM-SHA-1 or MONGODB-CR
* @param {object} [options.compression=null] Type of compression to use: snappy or zlib
* @param {boolean} [options.fsync=false] Specify a file sync write concern
* @param {array} [options.readPreferenceTags=null] Read preference tags
* @param {number} [options.numberOfRetries=5] The number of retries for a tailable cursor
* @param {boolean} [options.auto_reconnect=true] Enable auto reconnecting for single server instances
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {MongoClient} a MongoClient instance
*/
function MongoClient(url, options) {
if (!(this instanceof MongoClient)) return new MongoClient();
// Set up event emitter
EventEmitter.call(this);
// The internal state
this.s = {
url: url,
options: options || {},
promiseLibrary: null,
dbCache: {}
};
// Get the promiseLibrary
var promiseLibrary = this.s.options.promiseLibrary || Promise;
// Add the promise to the internal state
this.s.promiseLibrary = promiseLibrary;
}
/**
* @ignore
*/
inherits(MongoClient, EventEmitter);
var define = (MongoClient.define = new Define('MongoClient', MongoClient, false));
/**
* The callback format for results
* @callback MongoClient~connectCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {Db} db The connected database.
*/
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
* @deprecated MongoClient.connect is deprecated, please use new MongoClient().connect() to connect.
*/
MongoClient.prototype.connect = function(callback) {
// Validate options object
var err = validOptions(this.s.options);
if (typeof callback === 'string') {
throw new TypeError('`connect` only accepts a callback');
}
return executeOperation(this, connectOp, [this, err, callback], {
skipSessions: true
});
};
const connectOp = (self, err, callback) => {
// Did we have a validation error
if (err) return callback(err);
// Fallback to callback based connect
connect(self, self.s.url, self.s.options, function(err) {
if (err) return callback(err);
callback(null, self);
});
};
define.classMethod('close', { callback: true, promise: true, returns: [MongoClient] });
/**
* Logout user from server, fire off on all connections and remove all auth info
* @method
* @param {object} [options=null] Optional settings.
* @param {string} [options.dbName=null] Logout against different database than current.
* @param {Db~resultCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
MongoClient.prototype.logout = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Establish the correct database name
var dbName = this.s.options.authSource ? this.s.options.authSource : this.s.options.dbName;
return executeOperation(this, logout, [this, dbName, callback], {
skipSessions: true
});
};
const logout = (self, dbName, callback) => {
self.topology.logout(dbName, function(err) {
if (err) return callback(err);
callback(null, true);
});
};
define.classMethod('logout', { callback: true, promise: true });
/**
* Close the db and its underlying connections
* @method
* @param {boolean} force Force close, emitting no events
* @param {Db~noResultCallback} [callback] The result callback
* @return {Promise} returns Promise if no callback passed
*/
MongoClient.prototype.close = function(force, callback) {
var self = this;
if (typeof force === 'function') (callback = force), (force = false);
// Close the topologu connection
this.topology.close(force);
// Emit close event
self.emit('close', self);
// Fire close event on any cached db instances
for (var name in this.s.dbCache) {
this.s.dbCache[name].emit('close');
}
// Remove listeners after emit
self.removeAllListeners('close');
// Callback after next event loop tick
if (typeof callback === 'function')
return process.nextTick(function() {
handleCallback(callback, null);
});
// Return dummy promise
return new this.s.promiseLibrary(function(resolve) {
resolve();
});
};
define.classMethod('close', { callback: true, promise: true });
/**
* Create a new Db instance sharing the current socket connections. Be aware that the new db instances are
* related in a parent-child relationship to the original instance so that events are correctly emitted on child
* db instances. Child db instances are cached so performing db('db1') twice will return the same instance.
* You can control these behaviors with the options noListener and returnNonCachedInstance.
*
* @method
* @param {string} name The name of the database we want to use.
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.noListener=false] Do not make the db an event listener to the original connection.
* @param {boolean} [options.returnNonCachedInstance=false] Control if you want to return a cached instance or have a new one created
* @return {Db}
*/
MongoClient.prototype.db = function(dbName, options) {
options = options || {};
// Copy the options and add out internal override of the not shared flag
var finalOptions = assign({}, this.s.options, options);
// Do we have the db in the cache already
if (this.s.dbCache[dbName] && finalOptions.returnNonCachedInstance !== true) {
return this.s.dbCache[dbName];
}
// Add promiseLibrary
finalOptions.promiseLibrary = this.s.promiseLibrary;
// If no topology throw an error message
if (!this.topology) {
throw new MongoError('MongoClient must be connected before calling MongoClient.prototype.db');
}
// Return the db object
var db = new Db(dbName, this.topology, finalOptions);
// Add the db to the cache
this.s.dbCache[dbName] = db;
// Return the database
return db;
};
/**
* Check if MongoClient is connected
*
* @method
* @param {string} name The name of the database we want to use.
* @param {object} [options=null] Optional settings.
* @param {boolean} [options.noListener=false] Do not make the db an event listener to the original connection.
* @param {boolean} [options.returnNonCachedInstance=false] Control if you want to return a cached instance or have a new one created
* @return {Db}
*/
MongoClient.prototype.isConnected = function(options) {
options = options || {};
if (!this.topology) return false;
return this.topology.isConnected(options);
};
/**
* Connect to MongoDB using a url as documented at
*
* docs.mongodb.org/manual/reference/connection-string/
*
* Note that for replicasets the replicaSet query parameter is required in the 2.0 driver
*
* @method
* @static
* @param {string} url The connection URI string
* @param {object} [options] Optional settings
* @param {number} [options.poolSize=5] The maximum size of the individual server pool
* @param {boolean} [options.ssl=false] Enable SSL connection.
* @param {boolean} [options.sslValidate=true] Validate mongod server certificate against Certificate Authority
* @param {buffer} [options.sslCA=undefined] SSL Certificate store binary buffer
* @param {buffer} [options.sslCert=undefined] SSL Certificate binary buffer
* @param {buffer} [options.sslKey=undefined] SSL Key file binary buffer
* @param {string} [options.sslPass=undefined] SSL Certificate pass phrase
* @param {buffer} [options.sslCRL=undefined] SSL Certificate revocation list binary buffer
* @param {boolean} [options.autoReconnect=true] Enable autoReconnect for single server instances
* @param {boolean} [options.noDelay=true] TCP Connection no delay
* @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled
* @param {boolean} [options.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket
* @param {number} [options.connectTimeoutMS=30000] TCP Connection timeout setting
* @param {number} [options.family=4] Version of IP stack. Defaults to 4
* @param {number} [options.socketTimeoutMS=360000] TCP Socket timeout setting
* @param {number} [options.reconnectTries=30] Server attempt to reconnect #times
* @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries
* @param {boolean} [options.ha=true] Control if high availability monitoring runs for Replicaset or Mongos proxies
* @param {number} [options.haInterval=10000] The High availability period for replicaset inquiry
* @param {string} [options.replicaSet=undefined] The Replicaset set name
* @param {number} [options.secondaryAcceptableLatencyMS=15] Cutoff latency point in MS for Replicaset member selection
* @param {number} [options.acceptableLatencyMS=15] Cutoff latency point in MS for Mongos proxies selection
* @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available
* @param {string} [options.authSource=undefined] Define the database to authenticate against
* @param {(number|string)} [options.w=null] The write concern
* @param {number} [options.wtimeout=null] The write concern timeout
* @param {boolean} [options.j=false] Specify a journal write concern
* @param {boolean} [options.forceServerObjectId=false] Force server to assign _id values instead of driver
* @param {boolean} [options.serializeFunctions=false] Serialize functions on any object
* @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields
* @param {boolean} [options.raw=false] Return document results as raw BSON buffers
* @param {number} [options.bufferMaxEntries=-1] Sets a cap on how many operations the driver will buffer up before giving up on getting a working connection, default is -1 which is unlimited
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST)
* @param {object} [options.pkFactory=null] A primary key factory object for generation of custom _id keys
* @param {object} [options.promiseLibrary=null] A Promise library class the application wishes to use such as Bluebird, must be ES6 compatible
* @param {object} [options.readConcern=null] Specify a read concern for the collection (only MongoDB 3.2 or higher supported)
* @param {string} [options.readConcern.level='local'] Specify a read concern level for the collection operations, one of [local|majority]. (only MongoDB 3.2 or higher supported)
* @param {number} [options.maxStalenessSeconds=undefined] The max staleness to secondary reads (values under 10 seconds cannot be guaranteed)
* @param {string} [options.loggerLevel=undefined] The logging level (error/warn/info/debug)
* @param {object} [options.logger=undefined] Custom logger object
* @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types
* @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers
* @param {boolean} [options.promoteLongs=true] Promotes long values to number if they fit inside the 53 bits resolution
* @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit
* @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function
* @param {object} [options.validateOptions=false] Validate MongoClient passed in options for correctness
* @param {string} [options.appname=undefined] The name of the application that created this MongoClient instance. MongoDB 3.4 and newer will print this value in the server log upon establishing each connection. It is also recorded in the slow query log and profile collections
* @param {string} [options.auth.user=undefined] The username for auth
* @param {string} [options.auth.password=undefined] The password for auth
* @param {string} [options.authMechanism=undefined] Mechanism for authentication: MDEFAULT, GSSAPI, PLAIN, MONGODB-X509, SCRAM-SHA-1 or MONGODB-CR
* @param {object} [options.compression=null] Type of compression to use: snappy or zlib
* @param {boolean} [options.fsync=false] Specify a file sync write concern
* @param {array} [options.readPreferenceTags=null] Read preference tags
* @param {number} [options.numberOfRetries=5] The number of retries for a tailable cursor
* @param {boolean} [options.auto_reconnect=true] Enable auto reconnecting for single server instances
* @param {MongoClient~connectCallback} [callback] The command result callback
* @return {Promise} returns Promise if no callback passed
*/
MongoClient.connect = function(url, options, callback) {
var args = Array.prototype.slice.call(arguments, 1);
callback = typeof args[args.length - 1] === 'function' ? args.pop() : undefined;
options = args.length ? args.shift() : null;
options = options || {};
// Create client
var mongoclient = new MongoClient(url, options);
// Execute the connect method
return mongoclient.connect(callback);
};
define.staticMethod('connect', { callback: true, promise: true });
/**
* Starts a new session on the server
*
* @param {object} [options] optional settings for a driver session
* @param {MongoClient~sessionCallback} [callback] The callback called with a newly establish session, or an error if one occurred
* @return {Promise} if no callback is specified, a promise will be returned for the newly established session
*/
MongoClient.prototype.startSession = function(options) {
options = options || {};
if (!this.topology) {
throw new MongoError('Must connect to a server before calling this method');
}
const capabilities = this.topology.capabilities();
if (capabilities && !capabilities.hasSessionSupport) {
throw new MongoError('Current topology does not support sessions');
}
return this.topology.startSession(options);
};
var mergeOptions = function(target, source, flatten) {
for (var name in source) {
if (source[name] && typeof source[name] === 'object' && flatten) {
target = mergeOptions(target, source[name], flatten);
} else {
target[name] = source[name];
}
}
return target;
};
var createUnifiedOptions = function(finalOptions, options) {
var childOptions = [
'mongos',
'server',
'db',
'replset',
'db_options',
'server_options',
'rs_options',
'mongos_options'
];
var noMerge = ['readconcern', 'compression'];
for (var name in options) {
if (noMerge.indexOf(name.toLowerCase()) !== -1) {
finalOptions[name] = options[name];
} else if (childOptions.indexOf(name.toLowerCase()) !== -1) {
finalOptions = mergeOptions(finalOptions, options[name], false);
} else {
if (
options[name] &&
typeof options[name] === 'object' &&
!Buffer.isBuffer(options[name]) &&
!Array.isArray(options[name])
) {
finalOptions = mergeOptions(finalOptions, options[name], true);
} else {
finalOptions[name] = options[name];
}
}
}
return finalOptions;
};
function translateOptions(options) {
// If we have a readPreference passed in by the db options
if (typeof options.readPreference === 'string' || typeof options.read_preference === 'string') {
options.readPreference = new ReadPreference(options.readPreference || options.read_preference);
}
// Do we have readPreference tags, add them
if (options.readPreference && (options.readPreferenceTags || options.read_preference_tags)) {
options.readPreference.tags = options.readPreferenceTags || options.read_preference_tags;
}
// Do we have maxStalenessSeconds
if (options.maxStalenessSeconds) {
options.readPreference.maxStalenessSeconds = options.maxStalenessSeconds;
}
// Set the socket and connection timeouts
if (options.socketTimeoutMS == null) options.socketTimeoutMS = 360000;
if (options.connectTimeoutMS == null) options.connectTimeoutMS = 30000;
// Create server instances
return options.servers.map(function(serverObj) {
return serverObj.domain_socket
? new Server(serverObj.domain_socket, 27017, options)
: new Server(serverObj.host, serverObj.port, options);
});
}
var events = [
'timeout',
'close',
'serverOpening',
'serverDescriptionChanged',
'serverHeartbeatStarted',
'serverHeartbeatSucceeded',
'serverHeartbeatFailed',
'serverClosed',
'topologyOpening',
'topologyClosed',
'topologyDescriptionChanged',
'joined',
'left',
'ping',
'ha',
'all',
'fullsetup'
];
//
// Collect all events in order from SDAM
//
function collectEvents(self, topology) {
var collectedEvents = [];
if (self instanceof MongoClient) {
events.forEach(function(event) {
topology.on(event, function(object1, object2) {
collectedEvents.push({
event: event,
object1: object1,
object2: object2
});
});
});
}
return collectedEvents;
}
//
// Clear out all event
//
function clearAllEvents(topology) {
events.forEach(function(event) {
topology.removeAllListeners(event);
});
}
//
// Replay any events due to single server connection switching to Mongos
//
function replayEvents(self, events) {
for (var i = 0; i < events.length; i++) {
self.emit(events[i].event, events[i].object1, events[i].object2);
}
}
function relayEvents(self, topology) {
var events = [
'serverOpening',
'serverDescriptionChanged',
'serverHeartbeatStarted',
'serverHeartbeatSucceeded',
'serverHeartbeatFailed',
'serverClosed',
'topologyOpening',
'topologyClosed',
'topologyDescriptionChanged',
'joined',
'left',
'ping',
'ha'
];
events.forEach(function(event) {
topology.on(event, function(object1, object2) {
self.emit(event, object1, object2);
});
});
}
function assignTopology(client, topology) {
client.topology = topology;
topology.s.sessionPool = new ServerSessionPool(topology.s.coreTopology);
}
function createServer(self, options, callback) {
// Pass in the promise library
options.promiseLibrary = self.s.promiseLibrary;
// Set default options
var servers = translateOptions(options);
// Propegate the events to the client
var collectedEvents = collectEvents(self, servers[0]);
// Connect to topology
servers[0].connect(function(err, topology) {
if (err) return callback(err);
// Clear out all the collected event listeners
clearAllEvents(servers[0]);
// Relaty all the events
relayEvents(self, servers[0]);
// Add listeners
addListeners(self, servers[0]);
// Check if we are really speaking to a mongos
var ismaster = topology.lastIsMaster();
// Set the topology
assignTopology(self, topology);
// Do we actually have a mongos
if (ismaster && ismaster.msg === 'isdbgrid') {
// Destroy the current connection
topology.close();
// Create mongos connection instead
return createMongos(self, options, callback);
}
// Fire all the events
replayEvents(self, collectedEvents);
// Otherwise callback
callback(err, topology);
});
}
function createReplicaset(self, options, callback) {
// Pass in the promise library
options.promiseLibrary = self.s.promiseLibrary;
// Set default options
var servers = translateOptions(options);
// Create the topology
var topology = new ReplSet(servers, options);
// Add listeners
addListeners(self, topology);
// Propegate the events to the client
relayEvents(self, topology);
// Open the connection
topology.connect(options, function(err, topology) {
if (err) return callback(err);
assignTopology(self, topology);
callback(null, topology);
});
}
function createMongos(self, options, callback) {
// Pass in the promise library
options.promiseLibrary = self.s.promiseLibrary;
// Set default options
var servers = translateOptions(options);
// Create the topology
var topology = new Mongos(servers, options);
// Add listeners
addListeners(self, topology);
// Propegate the events to the client
relayEvents(self, topology);
// Open the connection
topology.connect(options, function(err, topology) {
if (err) return callback(err);
assignTopology(self, topology);
callback(null, topology);
});
}
function createListener(self, event) {
return function(v1, v2) {
if (event === 'open' || event === 'fullsetup' || event === 'all' || event === 'reconnect') {
return self.emit(event, self);
}
self.emit(event, v1, v2);
};
}
function addListeners(self, topology) {
topology.on('authenticated', createListener(self, 'authenticated'));
topology.on('error', createListener(self, 'error'));
topology.on('timeout', createListener(self, 'timeout'));
topology.on('close', createListener(self, 'close'));
topology.on('parseError', createListener(self, 'parseError'));
topology.once('open', createListener(self, 'open'));
topology.once('fullsetup', createListener(self, 'fullsetup'));
topology.once('all', createListener(self, 'all'));
topology.on('reconnect', createListener(self, 'reconnect'));
}
function connectHandler(client, options, callback) {
return function(err, topology) {
if (err) {
return process.nextTick(function() {
try {
callback(err, null);
} catch (err) {
if (topology) topology.close();
throw err;
}
});
}
// No authentication just reconnect
if (!options.auth) {
return process.nextTick(function() {
try {
callback(err, topology);
} catch (err) {
if (topology) topology.close();
throw err;
}
});
}
// Authenticate
authenticate(client, options.user, options.password, options, function(err, success) {
if (success) {
process.nextTick(function() {
try {
callback(null, topology);
} catch (err) {
if (topology) topology.close();
throw err;
}
});
} else {
if (topology) topology.close();
process.nextTick(function() {
try {
callback(err ? err : new Error('Could not authenticate user ' + options.auth[0]), null);
} catch (err) {
if (topology) topology.close();
throw err;
}
});
}
});
};
}
/*
* Connect using MongoClient
*/
var connect = function(self, url, options, callback) {
options = options || {};
options = shallowClone(options);
// If callback is null throw an exception
if (callback == null) {
throw new Error('no callback function provided');
}
// Get a logger for MongoClient
var logger = Logger('MongoClient', options);
// Did we pass in a Server/ReplSet/Mongos
if (url instanceof Server || url instanceof ReplSet || url instanceof Mongos) {
// Set the topology
assignTopology(self, url);
// Add listeners
addListeners(self, url);
// Connect
return url.connect(
options,
connectHandler(self, options, function(err, topology) {
if (err) return connectCallback(err, topology);
if (options.user || options.password || options.authMechanism) {
return authenticate(self, options.user, options.password, options, function(err) {
if (err) return connectCallback(err, topology);
connectCallback(err, topology);
});
}
connectCallback(err, topology);
})
);
}
// Parse the string
var object = parse(url, options);
var _finalOptions = createUnifiedOptions({}, object);
_finalOptions = mergeOptions(_finalOptions, object, false);
_finalOptions = createUnifiedOptions(_finalOptions, options);
// Check if we have connection and socket timeout set
if (_finalOptions.socketTimeoutMS == null) _finalOptions.socketTimeoutMS = 360000;
if (_finalOptions.connectTimeoutMS == null) _finalOptions.connectTimeoutMS = 30000;
if (_finalOptions.db_options && _finalOptions.db_options.auth) {
delete _finalOptions.db_options.auth;
}
// Store the merged options object
self.s.options = _finalOptions;
// Failure modes
if (object.servers.length === 0) {
throw new Error('connection string must contain at least one seed host');
}
function connectCallback(err, topology) {
if (err && err.message === 'no mongos proxies found in seed list') {
if (logger.isWarn()) {
logger.warn(
f(
'seed list contains no mongos proxies, replicaset connections requires the parameter replicaSet to be supplied in the URI or options object, mongodb://server:port/db?replicaSet=name'
)
);
}
// Return a more specific error message for MongoClient.connect
return callback(
new MongoError(
'seed list contains no mongos proxies, replicaset connections requires the parameter replicaSet to be supplied in the URI or options object, mongodb://server:port/db?replicaSet=name'
)
);
}
// Return the error and db instance
callback(err, topology);
}
// Do we have a replicaset then skip discovery and go straight to connectivity
if (_finalOptions.replicaSet || _finalOptions.rs_name) {
return createReplicaset(
self,
_finalOptions,
connectHandler(self, _finalOptions, connectCallback)
);
} else if (object.servers.length > 1) {
return createMongos(self, _finalOptions, connectHandler(self, _finalOptions, connectCallback));
} else {
return createServer(self, _finalOptions, connectHandler(self, _finalOptions, connectCallback));
}
};
module.exports = MongoClient;
| 1 | 13,965 |
you'll need to wrap the _whole thing_ within this async call (or refactor it so everything below is in a new temporary function). The code below this section depends on a value for `object` after the parsing occurs, but what you're doing here is kicking off a race condition where `object` is null, but _might_ parse quickly enough to assign it the parsed parts. Instead, you need to do everything that depends on the parsed object in the callback of `parse`.
|
mongodb-node-mongodb-native
|
js
|
@@ -238,9 +238,9 @@ thread ([this]() {
void nano::worker::run ()
{
+ std::unique_lock<std::mutex> lk (mutex);
while (!stopped)
{
- std::unique_lock<std::mutex> lk (mutex);
if (!queue.empty ())
{
auto func = queue.front ();
| 1 |
#include <nano/lib/utility.hpp>
#include <boost/dll/runtime_symbol_info.hpp>
#include <iostream>
// Some builds (mac) fail due to "Boost.Stacktrace requires `_Unwind_Backtrace` function".
#ifndef _WIN32
#ifndef _GNU_SOURCE
#define BEFORE_GNU_SOURCE 0
#define _GNU_SOURCE
#else
#define BEFORE_GNU_SOURCE 1
#endif
#endif
// On Windows this include defines min/max macros, so keep below other includes
// to reduce conflicts with other std functions
#include <boost/stacktrace.hpp>
#ifndef _WIN32
#if !BEFORE_GNU_SOURCE
#undef _GNU_SOURCE
#endif
#endif
namespace nano
{
seq_con_info_composite::seq_con_info_composite (const std::string & name) :
name (name)
{
}
bool seq_con_info_composite::is_composite () const
{
return true;
}
void seq_con_info_composite::add_component (std::unique_ptr<seq_con_info_component> child)
{
children.push_back (std::move (child));
}
const std::vector<std::unique_ptr<seq_con_info_component>> & seq_con_info_composite::get_children () const
{
return children;
}
const std::string & seq_con_info_composite::get_name () const
{
return name;
}
seq_con_info_leaf::seq_con_info_leaf (const seq_con_info & info) :
info (info)
{
}
bool seq_con_info_leaf::is_composite () const
{
return false;
}
const seq_con_info & seq_con_info_leaf::get_info () const
{
return info;
}
void dump_crash_stacktrace ()
{
boost::stacktrace::safe_dump_to ("nano_node_backtrace.dump");
}
namespace thread_role
{
/*
* nano::thread_role namespace
*
* Manage thread role
*/
static thread_local nano::thread_role::name current_thread_role = nano::thread_role::name::unknown;
nano::thread_role::name get ()
{
return current_thread_role;
}
std::string get_string (nano::thread_role::name role)
{
std::string thread_role_name_string;
switch (role)
{
case nano::thread_role::name::unknown:
thread_role_name_string = "<unknown>";
break;
case nano::thread_role::name::io:
thread_role_name_string = "I/O";
break;
case nano::thread_role::name::work:
thread_role_name_string = "Work pool";
break;
case nano::thread_role::name::packet_processing:
thread_role_name_string = "Pkt processing";
break;
case nano::thread_role::name::alarm:
thread_role_name_string = "Alarm";
break;
case nano::thread_role::name::vote_processing:
thread_role_name_string = "Vote processing";
break;
case nano::thread_role::name::block_processing:
thread_role_name_string = "Blck processing";
break;
case nano::thread_role::name::request_loop:
thread_role_name_string = "Request loop";
break;
case nano::thread_role::name::wallet_actions:
thread_role_name_string = "Wallet actions";
break;
case nano::thread_role::name::work_watcher:
thread_role_name_string = "Work watcher";
break;
case nano::thread_role::name::bootstrap_initiator:
thread_role_name_string = "Bootstrap init";
break;
case nano::thread_role::name::voting:
thread_role_name_string = "Voting";
break;
case nano::thread_role::name::signature_checking:
thread_role_name_string = "Signature check";
break;
case nano::thread_role::name::rpc_request_processor:
thread_role_name_string = "RPC processor";
break;
case nano::thread_role::name::rpc_process_container:
thread_role_name_string = "RPC process";
break;
case nano::thread_role::name::confirmation_height_processing:
thread_role_name_string = "Conf height";
break;
case nano::thread_role::name::worker:
thread_role_name_string = "Worker";
break;
}
/*
* We want to constrain the thread names to 15
* characters, since this is the smallest maximum
* length supported by the platforms we support
* (specifically, Linux)
*/
assert (thread_role_name_string.size () < 16);
return (thread_role_name_string);
}
std::string get_string ()
{
return get_string (current_thread_role);
}
void set (nano::thread_role::name role)
{
auto thread_role_name_string (get_string (role));
nano::thread_role::set_os_name (thread_role_name_string);
nano::thread_role::current_thread_role = role;
}
}
}
void nano::thread_attributes::set (boost::thread::attributes & attrs)
{
auto attrs_l (&attrs);
attrs_l->set_stack_size (8000000); //8MB
}
nano::thread_runner::thread_runner (boost::asio::io_context & io_ctx_a, unsigned service_threads_a) :
io_guard (boost::asio::make_work_guard (io_ctx_a))
{
boost::thread::attributes attrs;
nano::thread_attributes::set (attrs);
for (auto i (0u); i < service_threads_a; ++i)
{
threads.push_back (boost::thread (attrs, [&io_ctx_a]() {
nano::thread_role::set (nano::thread_role::name::io);
try
{
io_ctx_a.run ();
}
catch (std::exception const & ex)
{
std::cerr << ex.what () << std::endl;
#ifndef NDEBUG
throw;
#endif
}
catch (...)
{
#ifndef NDEBUG
/*
* In a release build, catch and swallow the
* io_context exception, in debug mode pass it
* on
*/
throw;
#endif
}
}));
}
}
nano::thread_runner::~thread_runner ()
{
join ();
}
void nano::thread_runner::join ()
{
io_guard.reset ();
for (auto & i : threads)
{
if (i.joinable ())
{
i.join ();
}
}
}
void nano::thread_runner::stop_event_processing ()
{
io_guard.get_executor ().context ().stop ();
}
nano::worker::worker () :
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::worker);
this->run ();
})
{
}
void nano::worker::run ()
{
while (!stopped)
{
std::unique_lock<std::mutex> lk (mutex);
if (!queue.empty ())
{
auto func = queue.front ();
queue.pop_front ();
lk.unlock ();
func ();
// So that we reduce locking for anything being pushed as that will
// most likely be on an io-thread
std::this_thread::yield ();
lk.lock ();
}
else
{
cv.wait (lk);
}
}
}
nano::worker::~worker ()
{
stop ();
}
void nano::worker::push_task (std::function<void()> func_a)
{
{
std::lock_guard<std::mutex> guard (mutex);
queue.emplace_back (func_a);
}
cv.notify_one ();
}
void nano::worker::stop ()
{
stopped = true;
cv.notify_one ();
if (thread.joinable ())
{
thread.join ();
}
}
std::unique_ptr<nano::seq_con_info_component> nano::collect_seq_con_info (nano::worker & worker, const std::string & name)
{
auto composite = std::make_unique<seq_con_info_composite> (name);
size_t count = 0;
{
std::lock_guard<std::mutex> guard (worker.mutex);
count = worker.queue.size ();
}
auto sizeof_element = sizeof (decltype (worker.queue)::value_type);
composite->add_component (std::make_unique<nano::seq_con_info_leaf> (nano::seq_con_info{ "queue", count, sizeof_element }));
return composite;
}
void nano::remove_all_files_in_dir (boost::filesystem::path const & dir)
{
for (auto & p : boost::filesystem::directory_iterator (dir))
{
auto path = p.path ();
if (boost::filesystem::is_regular_file (path))
{
boost::filesystem::remove (path);
}
}
}
void nano::move_all_files_to_dir (boost::filesystem::path const & from, boost::filesystem::path const & to)
{
for (auto & p : boost::filesystem::directory_iterator (from))
{
auto path = p.path ();
if (boost::filesystem::is_regular_file (path))
{
boost::filesystem::rename (path, to / path.filename ());
}
}
}
/*
* Backing code for "release_assert", which is itself a macro
*/
void release_assert_internal (bool check, const char * check_expr, const char * file, unsigned int line)
{
if (check)
{
return;
}
std::cerr << "Assertion (" << check_expr << ") failed " << file << ":" << line << "\n\n";
// Output stack trace to cerr
auto stacktrace = boost::stacktrace::stacktrace ();
std::stringstream ss;
ss << stacktrace;
auto backtrace_str = ss.str ();
std::cerr << backtrace_str << std::endl;
// "abort" at the end of this function will go into any signal handlers (the daemon ones will generate a stack trace and load memory address files on non-Windows systems).
// As there is no async-signal-safe way to generate stacktraces on Windows so must be done before aborting
#ifdef _WIN32
{
// Try construct the stacktrace dump in the same folder as the the running executable, otherwise use the current directory.
boost::system::error_code err;
auto running_executable_filepath = boost::dll::program_location (err);
std::string filename = "nano_node_backtrace_release_assert.txt";
std::string filepath = filename;
if (!err)
{
filepath = (running_executable_filepath.parent_path () / filename).string ();
}
std::ofstream file (filepath);
nano::set_secure_perm_file (filepath);
file << backtrace_str;
}
#endif
abort ();
}
| 1 | 15,882 |
This is now going to be protecting `stopped` when it doesn't need to. If you are going to do this then can you make `stopped` non-atomic and protect it in `stop`. It's cheap to make a `unique_lock` fyi, it's just a pointer to a mutex and a `bool`. The `lk.lock ()` in the loop is superfluous so probably shouldn't have been there to begin with.
|
nanocurrency-nano-node
|
cpp
|
@@ -27,6 +27,10 @@ import (
"strings"
"time"
+ "github.com/mysteriumnetwork/node/cmd/commands"
+
+ remote_config "github.com/mysteriumnetwork/node/config/remote"
+
"github.com/chzyer/readline"
"github.com/rs/zerolog/log"
"github.com/urfave/cli/v2"
| 1 |
/*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package cli
import (
"errors"
"flag"
"fmt"
"io"
stdlog "log"
"path/filepath"
"strings"
"time"
"github.com/chzyer/readline"
"github.com/rs/zerolog/log"
"github.com/urfave/cli/v2"
"github.com/mysteriumnetwork/node/cmd"
"github.com/mysteriumnetwork/node/cmd/commands/cli/clio"
"github.com/mysteriumnetwork/node/config"
"github.com/mysteriumnetwork/node/config/urfavecli/clicontext"
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/core/node"
"github.com/mysteriumnetwork/node/datasize"
"github.com/mysteriumnetwork/node/metadata"
"github.com/mysteriumnetwork/node/money"
"github.com/mysteriumnetwork/node/services"
tequilapi_client "github.com/mysteriumnetwork/node/tequilapi/client"
"github.com/mysteriumnetwork/node/tequilapi/contract"
"github.com/mysteriumnetwork/node/utils"
)
// CommandName is the name which is used to call this command
const CommandName = "cli"
const serviceHelp = `service <action> [args]
start <ProviderID> <ServiceType> [options]
stop <ServiceID>
status <ServiceID>
list
sessions
example: service start 0x7d5ee3557775aed0b85d691b036769c17349db23 openvpn --openvpn.port=1194 --openvpn.proto=UDP`
// NewCommand constructs CLI based Mysterium UI with possibility to control quiting
func NewCommand() *cli.Command {
return &cli.Command{
Name: CommandName,
Usage: "Starts a CLI client with a Tequilapi",
Before: clicontext.LoadUserConfigQuietly,
Flags: []cli.Flag{&config.FlagAgreedTermsConditions},
Action: func(ctx *cli.Context) error {
config.ParseFlagsNode(ctx)
nodeOptions := node.GetOptions()
cmdCLI := &cliApp{
historyFile: filepath.Join(nodeOptions.Directories.Data, ".cli_history"),
tequilapi: tequilapi_client.NewClient(nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort),
}
cmd.RegisterSignalCallback(utils.SoftKiller(cmdCLI.Kill))
return describeQuit(cmdCLI.Run(ctx))
},
}
}
func describeQuit(err error) error {
if err == nil || err == io.EOF || err == readline.ErrInterrupt {
log.Info().Msg("Stopping application")
return nil
}
log.Error().Err(err).Stack().Msg("Terminating application due to error")
return err
}
// cliApp describes CLI based Mysterium UI
type cliApp struct {
historyFile string
tequilapi *tequilapi_client.Client
fetchedProposals []contract.ProposalDTO
completer *readline.PrefixCompleter
reader *readline.Instance
currentConsumerID string
}
const redColor = "\033[31m%s\033[0m"
const identityDefaultPassphrase = ""
const statusConnected = "Connected"
var versionSummary = metadata.VersionAsSummary(metadata.LicenseCopyright(
"type 'license --warranty'",
"type 'license --conditions'",
))
func (c *cliApp) handleTOS(ctx *cli.Context) error {
if ctx.Bool(config.FlagAgreedTermsConditions.Name) {
c.acceptTOS()
return nil
}
agreedC := config.Current.GetBool(contract.TermsConsumerAgreed)
if !agreedC {
return errors.New("You must agree with provider and consumer terms of use in order to use this command")
}
agreedP := config.Current.GetBool(contract.TermsProviderAgreed)
if !agreedP {
return errors.New("You must agree with provider and consumer terms of use in order to use this command")
}
version := config.Current.GetString(contract.TermsVersion)
if version != metadata.CurrentTermsVersion {
return fmt.Errorf("You've agreed to terms of use version %s, but version %s is required", version, metadata.CurrentTermsVersion)
}
return nil
}
func (c *cliApp) acceptTOS() {
t := true
if err := c.tequilapi.UpdateTerms(contract.TermsRequest{
AgreedConsumer: &t,
AgreedProvider: &t,
AgreedVersion: metadata.CurrentTermsVersion,
}); err != nil {
clio.Info("Failed to save terms of use agreement, you will have to re-agree on next launch")
}
}
// Run runs CLI interface synchronously, in the same thread while blocking it
func (c *cliApp) Run(ctx *cli.Context) (err error) {
if err := c.handleTOS(ctx); err != nil {
clio.PrintTOSError(err)
return nil
}
c.completer = newAutocompleter(c.tequilapi, c.fetchedProposals)
c.fetchedProposals = c.fetchProposals()
if ctx.Args().Len() > 0 {
c.handleActions(strings.Join(ctx.Args().Slice(), " "))
return nil
}
c.reader, err = readline.NewEx(&readline.Config{
Prompt: fmt.Sprintf(redColor, "» "),
HistoryFile: c.historyFile,
AutoComplete: c.completer,
InterruptPrompt: "^C",
EOFPrompt: "exit",
})
if err != nil {
return err
}
// TODO Should overtake output of CommandRun
stdlog.SetOutput(c.reader.Stderr())
for {
line, err := c.reader.Readline()
if err == readline.ErrInterrupt && len(line) > 0 {
continue
} else if err != nil {
c.quit()
return err
}
c.handleActions(line)
}
}
// Kill stops cli
func (c *cliApp) Kill() error {
c.reader.Clean()
return c.reader.Close()
}
func (c *cliApp) handleActions(line string) {
line = strings.TrimSpace(line)
staticCmds := []struct {
command string
handler func()
}{
{"exit", c.quit},
{"quit", c.quit},
{"help", c.help},
{"status", c.status},
{"healthcheck", c.healthcheck},
{"nat", c.natStatus},
{"location", c.location},
{"disconnect", c.disconnect},
{"stop", c.stopClient},
}
argCmds := []struct {
command string
handler func(argsString string)
}{
{"connect", c.connect},
{"identities", c.identities},
{"order", c.order},
{"payout", c.payout},
{"version", c.version},
{"license", c.license},
{"proposals", c.proposals},
{"service", c.service},
{"stake", c.stake},
{"mmn", c.mmnApiKey},
}
for _, cmd := range staticCmds {
if line == cmd.command {
cmd.handler()
return
}
}
for _, cmd := range argCmds {
if strings.HasPrefix(line, cmd.command) {
argsString := strings.TrimSpace(line[len(cmd.command):])
cmd.handler(argsString)
return
}
}
if len(line) > 0 {
c.help()
}
}
func (c *cliApp) service(argsString string) {
args := strings.Fields(argsString)
if len(args) == 0 {
fmt.Println(serviceHelp)
return
}
action := args[0]
switch action {
case "start":
if len(args) < 3 {
fmt.Println(serviceHelp)
return
}
c.serviceStart(args[1], args[2], args[3:]...)
case "stop":
if len(args) < 2 {
fmt.Println(serviceHelp)
return
}
c.serviceStop(args[1])
case "status":
if len(args) < 2 {
fmt.Println(serviceHelp)
return
}
c.serviceGet(args[1])
case "list":
c.serviceList()
case "sessions":
c.serviceSessions()
default:
clio.Info(fmt.Sprintf("Unknown action provided: %s", action))
fmt.Println(serviceHelp)
}
}
func (c *cliApp) serviceStart(providerID, serviceType string, args ...string) {
serviceOpts, err := parseStartFlags(serviceType, args...)
if err != nil {
clio.Info("Failed to parse service options:", err)
return
}
service, err := c.tequilapi.ServiceStart(contract.ServiceStartRequest{
ProviderID: providerID,
Type: serviceType,
PaymentMethod: contract.ServicePaymentMethod{
PriceGB: serviceOpts.PaymentPricePerGB,
PriceMinute: serviceOpts.PaymentPricePerMinute,
},
AccessPolicies: contract.ServiceAccessPolicies{IDs: serviceOpts.AccessPolicyList},
Options: serviceOpts.TypeOptions,
})
if err != nil {
clio.Info("Failed to start service: ", err)
return
}
clio.Status(service.Status,
"ID: "+service.ID,
"ProviderID: "+service.Proposal.ProviderID,
"Type: "+service.Proposal.ServiceType)
}
func (c *cliApp) serviceStop(id string) {
if err := c.tequilapi.ServiceStop(id); err != nil {
clio.Info("Failed to stop service: ", err)
return
}
clio.Status("Stopping", "ID: "+id)
}
func (c *cliApp) serviceList() {
services, err := c.tequilapi.Services()
if err != nil {
clio.Info("Failed to get a list of services: ", err)
return
}
for _, service := range services {
clio.Status(service.Status,
"ID: "+service.ID,
"ProviderID: "+service.Proposal.ProviderID,
"Type: "+service.Proposal.ServiceType)
}
}
func (c *cliApp) serviceSessions() {
sessions, err := c.tequilapi.Sessions()
if err != nil {
clio.Info("Failed to get a list of sessions: ", err)
return
}
clio.Status("Current sessions", len(sessions.Items))
for _, session := range sessions.Items {
clio.Status(
"ID: "+session.ID,
"ConsumerID: "+session.ConsumerID,
fmt.Sprintf("Data: %s/%s", datasize.FromBytes(session.BytesReceived).String(), datasize.FromBytes(session.BytesSent).String()),
fmt.Sprintf("Tokens: %s", money.New(session.Tokens)),
)
}
}
func (c *cliApp) serviceGet(id string) {
service, err := c.tequilapi.Service(id)
if err != nil {
clio.Info("Failed to get service info: ", err)
return
}
clio.Status(service.Status,
"ID: "+service.ID,
"ProviderID: "+service.Proposal.ProviderID,
"Type: "+service.Proposal.ServiceType)
}
func (c *cliApp) connect(argsString string) {
args := strings.Fields(argsString)
helpMsg := "Please type in the provider identity. connect <consumer-identity> <provider-identity> <service-type> [dns=auto|provider|system|1.1.1.1] [disable-kill-switch]"
if len(args) < 3 {
clio.Info(helpMsg)
return
}
consumerID, providerID, serviceType := args[0], args[1], args[2]
if !services.IsTypeValid(serviceType) {
clio.Warn(fmt.Sprintf("Invalid service type, expected one of: %s", strings.Join(services.Types(), ",")))
return
}
var disableKillSwitch bool
var dns connection.DNSOption
var err error
for _, arg := range args[3:] {
if strings.HasPrefix(arg, "dns=") {
kv := strings.Split(arg, "=")
dns, err = connection.NewDNSOption(kv[1])
if err != nil {
clio.Warn("Invalid value: ", err)
clio.Info(helpMsg)
return
}
continue
}
switch arg {
case "disable-kill-switch":
disableKillSwitch = true
default:
clio.Warn("Unexpected arg:", arg)
clio.Info(helpMsg)
return
}
}
connectOptions := contract.ConnectOptions{
DNS: dns,
DisableKillSwitch: disableKillSwitch,
}
clio.Status("CONNECTING", "from:", consumerID, "to:", providerID)
hermesID := config.GetString(config.FlagHermesID)
// Dont throw an error here incase user identity has a password on it
// or we failed to randomly unlock it. We can still try to connect
// if identity it locked, it will notify us anyway.
_ = c.tequilapi.Unlock(consumerID, "")
_, err = c.tequilapi.ConnectionCreate(consumerID, providerID, hermesID, serviceType, connectOptions)
if err != nil {
clio.Error(err)
return
}
c.currentConsumerID = consumerID
clio.Success("Connected.")
}
func (c *cliApp) payout(argsString string) {
args := strings.Fields(argsString)
const usage = "payout command:\n set"
if len(args) == 0 {
clio.Info(usage)
return
}
action := args[0]
switch action {
case "set":
payoutSignature := "payout set <identity> <ethAddress>"
if len(args) < 2 {
clio.Info("Please provide identity. You can select one by pressing tab.\n", payoutSignature)
return
}
var identity, ethAddress string
if len(args) > 2 {
identity, ethAddress = args[1], args[2]
} else {
clio.Info("Please type in identity and Ethereum address.\n", payoutSignature)
return
}
err := c.tequilapi.Payout(identity, ethAddress)
if err != nil {
clio.Warn(err)
return
}
clio.Success(fmt.Sprintf("Payout address %s registered.", ethAddress))
default:
clio.Warnf("Unknown sub-command '%s'\n", action)
fmt.Println(usage)
return
}
}
func (c *cliApp) mmnApiKey(argsString string) {
args := strings.Fields(argsString)
var profileUrl = config.GetString(config.FlagMMNAddress) + "user/profile"
var usage = "Set MMN's API key and claim this node:\nmmn <api-key>\nTo get the token, visit: " + profileUrl + "\n"
if len(args) == 0 {
clio.Info(usage)
return
}
apiKey := args[0]
err := c.tequilapi.SetMMNApiKey(contract.MMNApiKeyRequest{
ApiKey: apiKey,
})
if err != nil {
clio.Warn(err)
return
}
clio.Success(fmt.Sprint("MMN API key configured."))
}
func (c *cliApp) disconnect() {
err := c.tequilapi.ConnectionDestroy()
if err != nil {
clio.Warn(err)
return
}
c.currentConsumerID = ""
clio.Success("Disconnected.")
}
func (c *cliApp) status() {
status, err := c.tequilapi.ConnectionStatus()
if err != nil {
clio.Warn(err)
} else {
clio.Info("Status:", status.Status)
clio.Info("SID:", status.SessionID)
}
ip, err := c.tequilapi.ConnectionIP()
if err != nil {
clio.Warn(err)
} else {
clio.Info("IP:", ip.IP)
}
location, err := c.tequilapi.ConnectionLocation()
if err != nil {
clio.Warn(err)
} else {
clio.Info(fmt.Sprintf("Location: %s, %s (%s - %s)", location.City, location.Country, location.UserType, location.ISP))
}
if status.Status == statusConnected {
clio.Info("Proposal:", status.Proposal)
statistics, err := c.tequilapi.ConnectionStatistics()
if err != nil {
clio.Warn(err)
} else {
clio.Info(fmt.Sprintf("Connection duration: %s", time.Duration(statistics.Duration)*time.Second))
clio.Info(fmt.Sprintf("Data: %s/%s", datasize.FromBytes(statistics.BytesReceived), datasize.FromBytes(statistics.BytesSent)))
clio.Info(fmt.Sprintf("Throughput: %s/%s", datasize.BitSpeed(statistics.ThroughputReceived), datasize.BitSpeed(statistics.ThroughputSent)))
clio.Info(fmt.Sprintf("Spent: %s", money.New(statistics.TokensSpent)))
}
}
}
func (c *cliApp) healthcheck() {
healthcheck, err := c.tequilapi.Healthcheck()
if err != nil {
clio.Warn(err)
return
}
clio.Info(fmt.Sprintf("Uptime: %v", healthcheck.Uptime))
clio.Info(fmt.Sprintf("Process: %v", healthcheck.Process))
clio.Info(fmt.Sprintf("Version: %v", healthcheck.Version))
buildString := metadata.FormatString(healthcheck.BuildInfo.Commit, healthcheck.BuildInfo.Branch, healthcheck.BuildInfo.BuildNumber)
clio.Info(buildString)
}
func (c *cliApp) natStatus() {
status, err := c.tequilapi.NATStatus()
if err != nil {
clio.Warn("Failed to retrieve NAT traversal status:", err)
return
}
if status.Error == "" {
clio.Infof("NAT traversal status: %q\n", status.Status)
} else {
clio.Infof("NAT traversal status: %q (error: %q)\n", status.Status, status.Error)
}
}
func (c *cliApp) proposals(filter string) {
proposals := c.fetchProposals()
c.fetchedProposals = proposals
filterMsg := ""
if filter != "" {
filterMsg = fmt.Sprintf("(filter: '%s')", filter)
}
clio.Info(fmt.Sprintf("Found %v proposals %s", len(proposals), filterMsg))
for _, proposal := range proposals {
country := proposal.ServiceDefinition.LocationOriginate.Country
if country == "" {
country = "Unknown"
}
var policies []string
if proposal.AccessPolicies != nil {
for _, policy := range *proposal.AccessPolicies {
policies = append(policies, policy.ID)
}
}
msg := fmt.Sprintf("- provider id: %v\ttype: %v\tcountry: %v\taccess policies: %v", proposal.ProviderID, proposal.ServiceType, country, strings.Join(policies, ","))
if filter == "" ||
strings.Contains(proposal.ProviderID, filter) ||
strings.Contains(country, filter) {
clio.Info(msg)
}
}
}
func (c *cliApp) fetchProposals() []contract.ProposalDTO {
upperTimeBound := config.GetBigInt(config.FlagPaymentsConsumerPricePerMinuteUpperBound)
lowerTimeBound := config.GetBigInt(config.FlagPaymentsConsumerPricePerMinuteLowerBound)
upperGBBound := config.GetBigInt(config.FlagPaymentsConsumerPricePerGBUpperBound)
lowerGBBound := config.GetBigInt(config.FlagPaymentsConsumerPricePerGBLowerBound)
proposals, err := c.tequilapi.ProposalsByPrice(lowerTimeBound, upperTimeBound, lowerGBBound, upperGBBound)
if err != nil {
clio.Warn(err)
return []contract.ProposalDTO{}
}
return proposals
}
func (c *cliApp) location() {
location, err := c.tequilapi.OriginLocation()
if err != nil {
clio.Warn(err)
return
}
clio.Info(fmt.Sprintf("Location: %s, %s (%s - %s)", location.City, location.Country, location.UserType, location.ISP))
}
func (c *cliApp) help() {
clio.Info("Mysterium CLI commands:")
fmt.Println(c.completer.Tree(" "))
}
// quit stops cli and client commands and exits application
func (c *cliApp) quit() {
stop := utils.SoftKiller(c.Kill)
stop()
}
func (c *cliApp) stopClient() {
err := c.tequilapi.Stop()
if err != nil {
clio.Warn("Cannot stop client:", err)
}
clio.Success("Client stopped")
}
func (c *cliApp) version(argsString string) {
fmt.Println(versionSummary)
}
func (c *cliApp) license(argsString string) {
if argsString == "warranty" {
fmt.Print(metadata.LicenseWarranty)
} else if argsString == "conditions" {
fmt.Print(metadata.LicenseConditions)
} else {
clio.Info("identities command:\n warranty\n conditions")
}
}
func getIdentityOptionList(tequilapi *tequilapi_client.Client) func(string) []string {
return func(line string) []string {
var identities []string
ids, err := tequilapi.GetIdentities()
if err != nil {
clio.Warn(err)
return identities
}
for _, id := range ids {
identities = append(identities, id.Address)
}
return identities
}
}
func getProposalOptionList(proposals []contract.ProposalDTO) func(string) []string {
return func(line string) []string {
var providerIDS []string
for _, proposal := range proposals {
providerIDS = append(providerIDS, proposal.ProviderID)
}
return providerIDS
}
}
func newAutocompleter(tequilapi *tequilapi_client.Client, proposals []contract.ProposalDTO) *readline.PrefixCompleter {
connectOpts := []readline.PrefixCompleterInterface{
readline.PcItem("dns=auto"),
readline.PcItem("dns=provider"),
readline.PcItem("dns=system"),
readline.PcItem("dns=1.1.1.1"),
}
return readline.NewPrefixCompleter(
readline.PcItem(
"connect",
readline.PcItemDynamic(
getIdentityOptionList(tequilapi),
readline.PcItemDynamic(
getProposalOptionList(proposals),
readline.PcItem("noop", connectOpts...),
readline.PcItem("openvpn", connectOpts...),
readline.PcItem("wireguard", connectOpts...),
),
),
),
readline.PcItem(
"service",
readline.PcItem("start", readline.PcItemDynamic(
getIdentityOptionList(tequilapi),
readline.PcItem("noop"),
readline.PcItem("openvpn"),
readline.PcItem("wireguard"),
)),
readline.PcItem("stop"),
readline.PcItem("list"),
readline.PcItem("status"),
readline.PcItem("sessions"),
),
readline.PcItem(
"identities",
readline.PcItem("list"),
readline.PcItem("get", readline.PcItemDynamic(getIdentityOptionList(tequilapi))),
readline.PcItem("new"),
readline.PcItem("unlock", readline.PcItemDynamic(getIdentityOptionList(tequilapi))),
readline.PcItem("register", readline.PcItemDynamic(getIdentityOptionList(tequilapi))),
readline.PcItem("beneficiary", readline.PcItemDynamic(getIdentityOptionList(tequilapi))),
readline.PcItem("settle", readline.PcItemDynamic(getIdentityOptionList(tequilapi))),
readline.PcItem("referralcode", readline.PcItemDynamic(getIdentityOptionList(tequilapi))),
),
readline.PcItem("status"),
readline.PcItem(
"stake",
readline.PcItem("increase"),
readline.PcItem("decrease"),
),
readline.PcItem("orders",
readline.PcItem("create"),
readline.PcItem("get"),
readline.PcItem("get-all"),
readline.PcItem("currencies"),
),
readline.PcItem("healthcheck"),
readline.PcItem("nat"),
readline.PcItem("proposals"),
readline.PcItem("location"),
readline.PcItem("disconnect"),
readline.PcItem("mmn"),
readline.PcItem("help"),
readline.PcItem("quit"),
readline.PcItem("stop"),
readline.PcItem(
"payout",
readline.PcItem("set", readline.PcItemDynamic(getIdentityOptionList(tequilapi))),
),
readline.PcItem(
"license",
readline.PcItem("warranty"),
readline.PcItem("conditions"),
),
)
}
func parseStartFlags(serviceType string, args ...string) (services.StartOptions, error) {
var flags []cli.Flag
config.RegisterFlagsServiceStart(&flags)
config.RegisterFlagsServiceOpenvpn(&flags)
config.RegisterFlagsServiceWireguard(&flags)
config.RegisterFlagsServiceNoop(&flags)
set := flag.NewFlagSet("", flag.ContinueOnError)
for _, f := range flags {
f.Apply(set)
}
if err := set.Parse(args); err != nil {
return services.StartOptions{}, err
}
ctx := cli.NewContext(nil, set, nil)
config.ParseFlagsServiceStart(ctx)
config.ParseFlagsServiceOpenvpn(ctx)
config.ParseFlagsServiceWireguard(ctx)
config.ParseFlagsServiceNoop(ctx)
return services.GetStartOptions(serviceType)
}
| 1 | 16,852 |
Lets not give packages names if we dont have to. If a package requires a rename, we should change the actual name while/if we can.
|
mysteriumnetwork-node
|
go
|
@@ -1128,12 +1128,16 @@ encode_opnd_imm5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
static inline bool
decode_opnd_imm6(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
+ if (!TEST(1U << 31, enc) && TEST(1U << 15, enc))
+ return false;
return decode_opnd_int(10, 6, false, 0, OPSZ_6b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm6(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
+ if (!TEST(1U << 31, enc) && TEST(1U << 15, enc))
+ return false;
return encode_opnd_int(10, 6, false, 0, 0, opnd, enc_out);
}
| 1 |
/* **********************************************************
* Copyright (c) 2017 Google, Inc. All rights reserved.
* Copyright (c) 2016 ARM Limited. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of ARM Limited nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* AArch64 decoder and encoder functions.
* This file is rather large and should perhaps be split up, but there are many
* opportunities for inlining which could be lost if it were split into separate
* translation units, and it is helpful to have the per-operand-type decode/encode
* functions next to each other.
*/
#include "../globals.h"
#include "arch.h"
#include "decode.h"
#include "disassemble.h"
#include "instr.h"
#include "instr_create.h"
#include "codec.h"
/* Decode immediate argument of bitwise operations.
* Returns zero if the encoding is invalid.
*/
static ptr_uint_t
decode_bitmask(uint enc)
{
uint pos = enc >> 6 & 63;
uint len = enc & 63;
ptr_uint_t x;
if (TEST(1U << 12, enc)) {
if (len == 63)
return 0;
x = ((ptr_uint_t)1 << (len + 1)) - 1;
return x >> pos | x << 1 << (63 - pos);
} else {
uint i, t = 32;
while ((t & len) != 0)
t >>= 1;
if (t < 2)
return 0;
x = len & (t - 1);
if (x == t - 1)
return 0;
x = ((ptr_uint_t)1 << (x + 1)) - 1;
pos &= t - 1;
x = x >> pos | x << (t - pos);
for (i = 2; i < 64; i *= 2) {
if (t <= i)
x |= x << i;
}
return x;
}
}
/* Encode immediate argument of bitwise operations.
* Returns -1 if the value cannot be encoded.
*/
static int
encode_bitmask(ptr_uint_t x)
{
int neg, rep, pos, len;
neg = 0;
if ((x & 1) != 0)
neg = 1, x = ~x;
if (x == 0)
return -1;
if (x >> 2 == (x & (((ptr_uint_t)1 << (64 - 2)) - 1)))
rep = 2, x &= ((ptr_uint_t)1 << 2) - 1;
else if (x >> 4 == (x & (((ptr_uint_t)1 << (64 - 4)) - 1)))
rep = 4, x &= ((ptr_uint_t)1 << 4) - 1;
else if (x >> 8 == (x & (((ptr_uint_t)1 << (64 - 8)) - 1)))
rep = 8, x &= ((ptr_uint_t)1 << 8) - 1;
else if (x >> 16 == (x & (((ptr_uint_t)1 << (64 - 16)) - 1)))
rep = 16, x &= ((ptr_uint_t)1 << 16) - 1;
else if (x >> 32 == (x & (((ptr_uint_t)1 << (64 - 32)) - 1)))
rep = 32, x &= ((ptr_uint_t)1 << 32) - 1;
else
rep = 64;
pos = 0;
(x & (((ptr_uint_t)1 << 32) - 1)) != 0 ? 0 : (x >>= 32, pos += 32);
(x & (((ptr_uint_t)1 << 16) - 1)) != 0 ? 0 : (x >>= 16, pos += 16);
(x & (((ptr_uint_t)1 << 8) - 1)) != 0 ? 0 : (x >>= 8, pos += 8);
(x & (((ptr_uint_t)1 << 4) - 1)) != 0 ? 0 : (x >>= 4, pos += 4);
(x & (((ptr_uint_t)1 << 2) - 1)) != 0 ? 0 : (x >>= 2, pos += 2);
(x & (((ptr_uint_t)1 << 1) - 1)) != 0 ? 0 : (x >>= 1, pos += 1);
len = 0;
(~x & (((ptr_uint_t)1 << 32) - 1)) != 0 ? 0 : (x >>= 32, len += 32);
(~x & (((ptr_uint_t)1 << 16) - 1)) != 0 ? 0 : (x >>= 16, len += 16);
(~x & (((ptr_uint_t)1 << 8) - 1)) != 0 ? 0 : (x >>= 8, len += 8);
(~x & (((ptr_uint_t)1 << 4) - 1)) != 0 ? 0 : (x >>= 4, len += 4);
(~x & (((ptr_uint_t)1 << 2) - 1)) != 0 ? 0 : (x >>= 2, len += 2);
(~x & (((ptr_uint_t)1 << 1) - 1)) != 0 ? 0 : (x >>= 1, len += 1);
if (x != 0)
return -1;
if (neg) {
pos = (pos + len) & (rep - 1);
len = rep - len;
}
return (0x1000 & rep << 6) | (((rep - 1) ^ 31) << 1 & 63) |
((rep - pos) & (rep - 1)) << 6 | (len - 1);
}
/* Extract signed integer from subfield of word. */
static inline ptr_int_t
extract_int(uint enc, int pos, int len)
{
uint u = ((enc >> pos & (((uint)1 << (len - 1)) - 1)) -
(enc >> pos & ((uint)1 << (len - 1))));
return u << 1 < u ? -(ptr_int_t)~u - 1 : u;
}
/* Extract unsigned integer from subfield of word. */
static inline ptr_uint_t
extract_uint(uint enc, int pos, int len)
{
return enc >> pos & (((uint)1 << len) - 1);
}
static inline bool
try_encode_int(OUT uint *bits, int len, int scale, ptr_int_t val)
{
/* If any of lowest 'scale' bits are set, or 'val' is out of range, fail. */
if (((ptr_uint_t)val & ((1U << scale) - 1)) != 0 ||
val < -((ptr_int_t)1 << (len + scale - 1)) ||
val >= (ptr_int_t)1 << (len + scale - 1))
return false;
*bits = (ptr_uint_t)val >> scale & ((1U << len) - 1);
return true;
}
static inline bool
try_encode_imm(OUT uint *imm, int bits, opnd_t opnd)
{
ptr_int_t value;
if (!opnd_is_immed_int(opnd))
return false;
value = opnd_get_immed_int(opnd);
if (!(0 <= value && value < (uint)1 << bits))
return false;
*imm = value;
return true;
}
static inline bool
encode_pc_off(OUT uint *poff, int bits, byte *pc, instr_t *instr, opnd_t opnd)
{
ptr_uint_t off, range;
ASSERT(0 < bits && bits <= 32);
if (opnd.kind == PC_kind)
off = opnd.value.pc - pc;
else if (opnd.kind == INSTR_kind)
off = (byte *)opnd_get_instr(opnd)->note - (byte *)instr->note;
else
return false;
range = (ptr_uint_t)1 << bits;
if (TEST(~((range - 1) << 2), off + (range << 1)))
return false;
*poff = off >> 2 & (range - 1);
return true;
}
static inline opnd_t
decode_sysreg(uint imm15)
{
reg_t sysreg;
switch (imm15) {
case 0x5a10: sysreg = DR_REG_NZCV; break;
case 0x5a20: sysreg = DR_REG_FPCR; break;
case 0x5a21: sysreg = DR_REG_FPSR; break;
case 0x5e82: sysreg = DR_REG_TPIDR_EL0; break;
default:
return opnd_create_immed_uint(imm15, OPSZ_2);
}
return opnd_create_reg(sysreg);
}
static inline bool
encode_sysreg(OUT uint *imm15, opnd_t opnd)
{
if (opnd_is_reg(opnd)) {
switch (opnd_get_reg(opnd)) {
case DR_REG_NZCV: *imm15 = 0x5a10; break;
case DR_REG_FPCR: *imm15 = 0x5a20; break;
case DR_REG_FPSR: *imm15 = 0x5a21; break;
case DR_REG_TPIDR_EL0: *imm15 = 0x5e82; break;
default:
return false;
}
return true;
}
if (opnd_is_immed_int(opnd)) {
uint imm;
if (try_encode_imm(&imm, 15, opnd) && !opnd_is_reg(decode_sysreg(imm))) {
*imm15 = imm;
return true;
}
return false;
}
return false;
}
/* Decode integer register. Input 'n' is number from 0 to 31, where
* 31 can mean stack pointer or zero register, depending on 'is_sp'.
*/
static inline reg_id_t
decode_reg(uint n, bool is_x, bool is_sp)
{
return (n < 31 ? (is_x ? DR_REG_X0 : DR_REG_W0) + n :
is_sp ? (is_x ? DR_REG_XSP : DR_REG_WSP) : (is_x ? DR_REG_XZR : DR_REG_WZR));
}
/* Encode integer register. */
static inline bool
encode_reg(OUT uint *num, OUT bool *is_x, reg_id_t reg, bool is_sp)
{
if (DR_REG_X0 <= reg && reg <= DR_REG_X30) {
*num = reg - DR_REG_X0;
*is_x = true;
return true;
}
if (DR_REG_W0 <= reg && reg <= DR_REG_W30) {
*num = reg - DR_REG_W0;
*is_x = false;
return true;
}
if (is_sp && (reg == DR_REG_XSP || reg == DR_REG_WSP)) {
*num = 31;
*is_x = (reg == DR_REG_XSP);
return true;
}
if (!is_sp && (reg == DR_REG_XZR || reg == DR_REG_WZR)) {
*num = 31;
*is_x = (reg == DR_REG_XZR);
return true;
}
return false;
}
/* Decode SIMD/FP register. */
static inline opnd_t
decode_vreg(uint scale, uint n)
{
reg_id_t reg = DR_REG_NULL;
ASSERT(n < 32 && scale < 5);
switch (scale) {
case 0: reg = DR_REG_B0 + n; break;
case 1: reg = DR_REG_H0 + n; break;
case 2: reg = DR_REG_S0 + n; break;
case 3: reg = DR_REG_D0 + n; break;
case 4: reg = DR_REG_Q0 + n; break;
}
return opnd_create_reg(reg);
}
/* Encode SIMD/FP register. */
static inline bool
encode_vreg(INOUT opnd_size_t *x, OUT uint *r, opnd_t opnd)
{
reg_id_t reg;
opnd_size_t sz;
uint n;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
if ((uint)(reg - DR_REG_B0) < 32) {
n = reg - DR_REG_B0;
sz = OPSZ_1;
} else if ((uint)(reg - DR_REG_H0) < 32) {
n = reg - DR_REG_H0;
sz = OPSZ_2;
} else if ((uint)(reg - DR_REG_S0) < 32) {
n = reg - DR_REG_S0;
sz = OPSZ_4;
} else if ((uint)(reg - DR_REG_D0) < 32) {
n = reg - DR_REG_D0;
sz = OPSZ_8;
} else if ((uint)(reg - DR_REG_Q0) < 32) {
n = reg - DR_REG_Q0;
sz = OPSZ_16;
} else
return false;
if (*x == OPSZ_NA)
*x = sz;
else if (*x != sz)
return false;
*r = n;
return true;
}
static opnd_t
create_base_imm(uint enc, int disp, int bytes)
{
/* The base register number comes from bits 5 to 9. It may be SP. */
return opnd_create_base_disp(decode_reg(extract_uint(enc, 5, 5), true, true),
DR_REG_NULL, 0, disp, opnd_size_from_bytes(bytes));
}
static bool
is_base_imm(opnd_t opnd, OUT uint *regnum)
{
uint n;
bool is_x;
if (!opnd_is_base_disp(opnd) || opnd_get_index(opnd) != DR_REG_NULL ||
!encode_reg(&n, &is_x, opnd_get_base(opnd), true) || !is_x)
return false;
*regnum = n;
return true;
}
/* Used for mem7* operand types, which have a 7-bit offset and are used by
* load/store (pair) instructions. Returns the scale (log base 2 of number
* of bytes) of the memory argument, a function of bits 26, 30 and 31.
*/
static int
mem7_scale(uint enc)
{
return 2 + (TEST(1U << 26, enc) ?
extract_uint(enc, 30, 2) : extract_uint(enc, 31, 1));
}
/* Used for memlit operand type, used by load (literal). Returns the size
* of the memory operand, a function of bits 26, 30 and 31.
*/
static opnd_size_t
memlit_size(uint enc)
{
opnd_size_t size = OPSZ_0;
switch (extract_uint(enc, 30, 2)) {
case 0: size = OPSZ_4; break;
case 1: size = OPSZ_8; break;
case 2: size = TEST(1U << 26, enc) ? OPSZ_16 : OPSZ_4;
}
return size;
}
/* Returns the number of registers accessed by SIMD load structure and replicate,
* a function of bits 13 and 21.
*/
static int
memvr_regcount(uint enc)
{
return ((enc >> 13 & 1) << 1 | (enc >> 21 & 1)) + 1;
}
/* Used for memvs operand type, used by SIMD load/store single structure.
* Returns the number of bytes read or written, which is a function of
* bits 10, 11, 13, 14, 15 and 21.
*/
static int
memvs_size(uint enc)
{
int scale = extract_uint(enc, 14, 2);
/* Number of elements in structure, 1 to 4. */
int elems = memvr_regcount(enc);
int size = extract_uint(enc, 10, 2);
if (scale == 2 && size == 1)
scale = 3;
return elems * (1 << scale);
}
/* Returns the number of registers accessed by SIMD load/store multiple structures,
* a function of bits 12-15.
*/
static int
multistruct_regcount(uint enc)
{
switch (extract_uint(enc, 12, 4)) {
case 0: return 4;
case 2: return 4;
case 4: return 3;
case 6: return 3;
case 7: return 1;
case 8: return 2;
case 10: return 2;
}
ASSERT(false);
return 0;
}
/*******************************************************************************
* Pairs of functions for decoding and encoding a generalised type of operand.
*/
/* adr_page: used for adr, adrp */
static bool
decode_opnd_adr_page(int scale, uint enc, byte *pc, OUT opnd_t *opnd)
{
uint bits = (enc >> 3 & 0x1ffffc) | (enc >> 29 & 3);
byte *addr = ((byte *)((ptr_uint_t)pc >> scale << scale) +
extract_int(bits, 0, 21) * ((ptr_int_t)1 << scale));
*opnd = opnd_create_rel_addr(addr, OPSZ_0);
return true;
}
static bool
encode_opnd_adr_page(int scale, byte *pc, opnd_t opnd, OUT uint *enc_out,
instr_t *instr)
{
ptr_int_t offset;
uint bits;
if (opnd_is_rel_addr(opnd)) {
offset = (ptr_int_t)opnd_get_addr(opnd) -
(ptr_int_t)((ptr_uint_t)pc >> scale << scale);
} else if (opnd_is_instr(opnd)) {
offset = (ptr_int_t)
((byte *)opnd_get_instr(opnd)->note - (byte *)instr->note);
} else
return false;
if (!try_encode_int(&bits, 21, scale, offset))
return false;
*enc_out = (bits & 3) << 29 | (bits & 0x1ffffc) << 3;
return true;
}
/* dq_plus: used for dq0, dq0p1, dq0p2, dq0p3 */
static inline bool
decode_opnd_dq_plus(int add, int qpos, uint enc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg((TEST(1U << qpos, enc) ? DR_REG_Q0 : DR_REG_D0) +
(extract_uint(enc, 0, 5) + add) % 32);
return true;
}
static inline bool
encode_opnd_dq_plus(int add, int qpos, opnd_t opnd, OUT uint *enc_out)
{
uint num;
bool q;
if (!opnd_is_reg(opnd))
return false;
q = (uint)(opnd_get_reg(opnd) - DR_REG_Q0) < 32;
num = opnd_get_reg(opnd) - (q ? DR_REG_Q0 : DR_REG_D0);
if (num >= 32)
return false;
*enc_out = (num - add) % 32 | (uint)q << qpos;
return true;
}
/* index: used for opnd_index0, ..., opnd_index3 */
static bool
decode_opnd_index(int n, uint enc, OUT opnd_t *opnd)
{
uint bits = (enc >> 30 & 1) << 3 | (enc >> 10 & 7);
*opnd = opnd_create_immed_int(bits >> n, OPSZ_4b);
return true;
}
static bool
encode_opnd_index(int n, opnd_t opnd, OUT uint *enc_out)
{
ptr_int_t val;
uint bits;
if (!opnd_is_immed_int(opnd))
return false;
val = opnd_get_immed_int(opnd);
if (val < 0 || val >= 16 >> n)
return false;
bits = val << n;
*enc_out = (bits >> 3 & 1) << 30 | (bits & 7) << 10;
return true;
}
/* int: used for almost every operand type that is an immediate integer */
static bool
decode_opnd_int(int pos, int len, bool signd, int scale, opnd_size_t size,
dr_opnd_flags_t flags, uint enc, OUT opnd_t *opnd)
{
ptr_int_t val = signd ? extract_int(enc, pos, len) : extract_uint(enc, pos, len);
*opnd = opnd_add_flags(opnd_create_immed_int(val * ((ptr_int_t)1 << scale), size),
flags);
return true;
}
static bool
encode_opnd_int(int pos, int len, bool signd, int scale,
dr_opnd_flags_t flags, opnd_t opnd, OUT uint *enc_out)
{
ptr_uint_t val;
if (!opnd_is_immed_int(opnd) || (opnd_get_flags(opnd) & flags) != flags)
return false;
val = opnd_get_immed_int(opnd);
if ((val & (((ptr_uint_t)1 << scale) - 1)) != 0)
return false;
if ((val + (signd ? ((ptr_uint_t)1 << (len + scale - 1)) : 0)) >> (len + scale) != 0)
return false;
*enc_out = (val >> scale & (((ptr_uint_t)1 << (len - 1)) * 2 - 1)) << pos;
return true;
}
/* imm_bf: used for bitfield immediate operands */
static bool
decode_opnd_imm_bf(int pos, uint enc, OUT opnd_t *opnd)
{
if (!TEST(1U << 31, enc) && extract_uint(enc, pos, 6) >= 32)
return false;
return decode_opnd_int(pos, 6, false, 0, OPSZ_6b, 0, enc, opnd);
}
static bool
encode_opnd_imm_bf(int pos, uint enc, opnd_t opnd, uint *enc_out)
{
if (!TEST(1U << 31, enc) && extract_uint(enc, pos, 6) >= 32)
return false;
return encode_opnd_int(pos, 6, false, 0, 0, opnd, enc_out);
}
/* mem0_scale: used for mem0, mem0p */
static inline bool
decode_opnd_mem0_scale(int scale, uint enc, OUT opnd_t *opnd)
{
*opnd = create_base_imm(enc, 0, 1 << scale);
return true;
}
static inline bool
encode_opnd_mem0_scale(int scale, opnd_t opnd, OUT uint *enc_out)
{
uint xn;
if (!is_base_imm(opnd, &xn) ||
opnd_get_size(opnd) != opnd_size_from_bytes(1 << scale) ||
opnd_get_disp(opnd) != 0)
return false;
*enc_out = xn << 5;
return true;
}
/* mem12_scale: used for mem12, mem12q, prf12 */
static inline bool
decode_opnd_mem12_scale(int scale, bool prfm, uint enc, OUT opnd_t *opnd)
{
*opnd = create_base_imm(enc, extract_uint(enc, 10, 12) << scale,
prfm ? 0 : 1 << scale);
return true;
}
static inline bool
encode_opnd_mem12_scale(int scale, bool prfm, opnd_t opnd, OUT uint *enc_out)
{
int disp;
uint xn;
if (!is_base_imm(opnd, &xn) ||
opnd_get_size(opnd) != (prfm ? OPSZ_0 : opnd_size_from_bytes(1 << scale)))
return false;
disp = opnd_get_disp(opnd);
if (disp < 0 || disp >> scale > 0xfff || disp >> scale << scale != disp)
return false;
*enc_out = xn << 5 | (uint)disp >> scale << 10;
return true;
}
/* mem7_postindex: used for mem7, mem7post */
static inline bool
decode_opnd_mem7_postindex(bool post, uint enc, OUT opnd_t *opnd)
{
int scale = mem7_scale(enc);
*opnd = create_base_imm(enc, post ? 0 : extract_int(enc, 15, 7) * (1 << scale),
2 << scale);
opnd->value.base_disp.pre_index = !post;
return true;
}
static inline bool
encode_opnd_mem7_postindex(bool post, uint enc, opnd_t opnd, OUT uint *enc_out)
{
int scale = mem7_scale(enc);
int disp;
uint xn;
if (!is_base_imm(opnd, &xn) ||
opnd_get_size(opnd) != opnd_size_from_bytes(2 << scale))
return false;
disp = opnd_get_disp(opnd);
if (disp == 0 && opnd.value.base_disp.pre_index == post)
return false;
if (post ? disp != 0 :
((uint)disp & ((1 << scale) - 1)) != 0 ||
(uint)disp + (0x40 << scale) >= (0x80 << scale))
return false;
*enc_out = xn << 5 | ((uint)disp >> scale & 0x7f) << 15;
return true;
}
/* mem9_bytes: used for mem9, mem9post, mem9q, mem9qpost, prf9 */
static inline bool
decode_opnd_mem9_bytes(int bytes, bool post, uint enc, OUT opnd_t *opnd)
{
*opnd = create_base_imm(enc, post ? 0 : extract_int(enc, 12, 9), bytes);
opnd->value.base_disp.pre_index = !post;
return true;
}
static inline bool
encode_opnd_mem9_bytes(int bytes, bool post, opnd_t opnd, OUT uint *enc_out)
{
int disp;
uint xn;
if (!is_base_imm(opnd, &xn) || opnd_get_size(opnd) != opnd_size_from_bytes(bytes))
return false;
disp = opnd_get_disp(opnd);
if (disp == 0 && opnd.value.base_disp.pre_index == post)
return false;
if (post ? (disp != 0) : (disp < -256 || disp > 255))
return false;
*enc_out = xn << 5 | ((uint)disp & 0x1ff) << 12;
return true;
}
/* memreg_size: used for memreg, memregq, prfreg */
static inline bool
decode_opnd_memreg_size(opnd_size_t size, uint enc, OUT opnd_t *opnd)
{
if (!TEST(1U << 14, enc))
return false;
*opnd = opnd_create_base_disp_aarch64(decode_reg(enc >> 5 & 31, true, true),
decode_reg(enc >> 16 & 31, true, false),
enc >> 13 & 7, TEST(1U << 12, enc),
0, 0, size);
return true;
}
static inline bool
encode_opnd_memreg_size(opnd_size_t size, opnd_t opnd, OUT uint *enc_out)
{
uint rn, rm, option;
bool xn, xm, scaled;
if (!opnd_is_base_disp(opnd) || opnd_get_size(opnd) != size ||
opnd_get_disp(opnd) != 0)
return false;
option = opnd_get_index_extend(opnd, &scaled, NULL);
if (!TEST(2, option))
return false;
if (!encode_reg(&rn, &xn, opnd_get_base(opnd), true) || !xn ||
!encode_reg(&rm, &xm, opnd_get_index(opnd), false) || !xm)
return false;
*enc_out = rn << 5 | rm << 16 | option << 13 | (uint)scaled << 12;
return true;
}
/* q0p: used for q0p1, q0p2, q0p3 */
static bool
decode_opnd_q0p(int add, uint enc, OUT opnd_t *opnd)
{
*opnd = decode_vreg(4, (extract_uint(enc, 0, 5) + add) % 32);
return true;
}
static bool
encode_opnd_q0p(int add, opnd_t opnd, OUT uint *enc_out)
{
opnd_size_t size = OPSZ_NA;
uint r;
if (!encode_vreg(&size, &r, opnd) || size != OPSZ_16)
return false;
*enc_out = (r - add) % 32;
return true;
}
/* rn: used for many integer register operands where bit 31 specifies W or X */
static inline bool
decode_opnd_rn(bool is_sp, int pos, uint enc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(decode_reg(extract_uint(enc, pos, 5),
TEST(1U << 31, enc), is_sp));
return true;
}
static inline bool
encode_opnd_rn(bool is_sp, int pos, opnd_t opnd, OUT uint *enc_out)
{
uint num;
bool is_x;
if (!opnd_is_reg(opnd) || !encode_reg(&num, &is_x, opnd_get_reg(opnd), is_sp))
return false;
*enc_out = (uint)is_x << 31 | num << pos;
return true;
}
/* vector_reg: used for many FP/SIMD register operands */
static bool
decode_opnd_vector_reg(int pos, int scale, uint enc, OUT opnd_t *opnd)
{
*opnd = decode_vreg(scale, extract_uint(enc, pos, 5));
return true;
}
static bool
encode_opnd_vector_reg(int pos, int scale, opnd_t opnd, OUT uint *enc_out)
{
opnd_size_t size = OPSZ_NA;
uint r;
if (!encode_vreg(&size, &r, opnd) || size != opnd_size_from_bytes(1 << scale))
return false;
*enc_out = r << pos;
return true;
}
/* vtn: used for vt0, ..., vt3 */
static bool
decode_opnd_vtn(int add, uint enc, OUT opnd_t *opnd)
{
if (extract_uint(enc, 10, 2) == 3 && extract_uint(enc, 30, 1) == 0)
return false;
*opnd = opnd_create_reg((TEST(1U << 30, enc) ? DR_REG_Q0 : DR_REG_D0) +
((extract_uint(enc, 0, 5) + add) % 32));
return true;
}
static bool
encode_opnd_vtn(int add, uint enc, opnd_t opnd, OUT uint *enc_out)
{
reg_t reg;
uint num;
bool q;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
q = (uint)(reg - DR_REG_Q0) < 32;
if (extract_uint(enc, 10, 2) == 3 && !q)
return false;
num = reg - (q ? DR_REG_Q0 : DR_REG_D0);
if (num >= 32)
return false;
*enc_out = (num - add) % 32 | (uint)q << 30;
return true;
}
/* wxn: used for many integer register operands with fixed size (W or X) */
static bool
decode_opnd_wxn(bool is_x, bool is_sp, int pos, uint enc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(decode_reg(enc >> pos & 31, is_x, is_sp));
return true;
}
static bool
encode_opnd_wxn(bool is_x, bool is_sp, int pos, opnd_t opnd, OUT uint *enc_out)
{
reg_id_t reg;
uint n;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
n = reg - (is_x ? DR_REG_X0 : DR_REG_W0);
if (n < 31) {
*enc_out = n << pos;
return true;
}
if (reg == (is_sp ?
(is_x ? DR_REG_XSP : DR_REG_WSP) :
(is_x ? DR_REG_XZR : DR_REG_WZR))) {
*enc_out = (uint)31 << pos;
return true;
}
return false;
}
/* wxnp: used for CASP, even/odd register pairs */
static bool
decode_opnd_wxnp(bool is_x, int plus, int pos, uint enc, OUT opnd_t *opnd)
{
if ((enc >> pos & 1) != 0)
return false;
*opnd = opnd_create_reg(decode_reg(((enc >> pos) + plus) & 31, is_x, false));
return true;
}
static bool
encode_opnd_wxnp(bool is_x, int plus, int pos, opnd_t opnd, OUT uint *enc_out)
{
reg_id_t reg;
uint n;
if (!opnd_is_reg(opnd))
return false;
reg = opnd_get_reg(opnd);
n = reg - (is_x ? DR_REG_X0 : DR_REG_W0);
if (n < 31 && (n - plus) % 2 == 0) {
*enc_out = ((n - plus) & 31) << pos;
return true;
}
if (reg == (is_x ? DR_REG_XZR : DR_REG_WZR) && ((uint)31 - plus) % 2 == 0) {
*enc_out = (((uint)31 - plus) & 31) << pos;
return true;
}
return false;
}
/*******************************************************************************
* Pairs of functions for decoding and encoding each type of operand, as listed in
* "codec.txt". Try to keep these short: perhaps a tail call to a function in the
* previous section.
*/
/* b0: B register at bit position 0 */
static inline bool
decode_opnd_b0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 0, enc, opnd);
}
static inline bool
encode_opnd_b0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 0, opnd, enc_out);
}
/* cond: condition operand for conditional compare */
static inline bool
decode_opnd_cond(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(12, 4, false, 0, OPSZ_4b, DR_OPND_IS_CONDITION, enc, opnd);
}
static inline bool
encode_opnd_cond(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(12, 4, false, 0, 0, opnd, enc_out);
}
/* d0: D register at bit position 0 */
static inline bool
decode_opnd_d0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 3, enc, opnd);
}
static inline bool
encode_opnd_d0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 3, opnd, enc_out);
}
/* d10: D register at bit position 10 */
static inline bool
decode_opnd_d10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(10, 3, enc, opnd);
}
static inline bool
encode_opnd_d10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(10, 3, opnd, enc_out);
}
/* dq0: D/Q register at bit position 0; bit 30 selects Q reg */
static inline bool
decode_opnd_dq0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(0, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(0, 30, opnd, enc_out);
}
/* dq0p1: as dq0 but add 1 mod 32 to reg number */
static inline bool
decode_opnd_dq0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(1, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(1, 30, opnd, enc_out);
}
/* dq0p2: as dq0 but add 2 mod 32 to reg number */
static inline bool
decode_opnd_dq0p2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(2, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0p2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(2, 30, opnd, enc_out);
}
/* dq0p3: as dq0 but add 3 mod 32 to reg number */
static inline bool
decode_opnd_dq0p3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_dq_plus(3, 30, enc, opnd);
}
static inline bool
encode_opnd_dq0p3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_dq_plus(3, 30, opnd, enc_out);
}
/* ext: extend type, dr_extend_type_t */
static inline bool
decode_opnd_ext(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(13, 3, false, 0, OPSZ_3b, DR_OPND_IS_EXTEND, enc, opnd);
}
static inline bool
encode_opnd_ext(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(13, 3, false, 0, DR_OPND_IS_EXTEND, opnd, enc_out);
}
/* extam: extend amount, a left shift from 0 to 4 */
static inline bool
decode_opnd_extam(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (extract_uint(enc, 10, 3) > 4) /* shift amount must be <= 4 */
return false;
return decode_opnd_int(10, 3, false, 0, OPSZ_3b, 0, enc, opnd);
}
static inline bool
encode_opnd_extam(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(10, 3, false, 0, 0, opnd, &t) ||
extract_uint(t, 10, 3) > 4) /* shift amount must be <= 4 */
return false;
*enc_out = t;
return true;
}
/* h0: H register at bit position 0 */
static inline bool
decode_opnd_h0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 1, enc, opnd);
}
static inline bool
encode_opnd_h0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 1, opnd, enc_out);
}
/* ign10: ignored register field at bit position 10 in load/store exclusive */
static inline bool
decode_opnd_ign10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(10, 5, false, 0, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_ign10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(10, 5, false, 0, 0, opnd, enc_out);
}
/* ign10: ignored register field at bit position 16 in load/store exclusive */
static inline bool
decode_opnd_ign16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(16, 5, false, 0, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_ign16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(16, 5, false, 0, 0, opnd, enc_out);
}
/* imm12: 12-bit immediate operand of ADD/SUB */
static inline bool
decode_opnd_imm12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(10, 12, false, 0, OPSZ_12b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(10, 12, false, 0, 0, opnd, enc_out);
}
/* imm12sh: shift amount for 12-bit immediate of ADD/SUB, 0 or 16 */
static inline bool
decode_opnd_imm12sh(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(22, 1, false, 4, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm12sh(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(22, 1, false, 4, 0, opnd, enc_out);
}
/* imm16: 16-bit immediate operand of MOVK/MOVN/MOVZ/SVC */
static inline bool
decode_opnd_imm16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(5, 16, false, 0, OPSZ_12b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(5, 16, false, 0, 0, opnd, enc_out);
}
/* imm16sh: shift amount for 16-bit immediate of MOVK/MOVN/MOVZ/SVC */
static inline bool
decode_opnd_imm16sh(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (!TEST(1U << 31, enc) && TEST(1U << 22, enc))
return false;
return decode_opnd_int(21, 2, false, 4, OPSZ_6b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm16sh(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(21, 2, false, 4, 0, opnd, &t) ||
(!TEST(1U << 31, enc) && TEST(1U << 22, t)))
return false;
*enc_out = t;
return true;
}
/* imm4: immediate operand for some system instructions */
static inline bool
decode_opnd_imm4(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(8, 4, false, 0, OPSZ_4b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm4(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(8, 4, false, 0, 0, opnd, enc_out);
}
/* imm5: immediate operand for conditional compare (immediate) */
static inline bool
decode_opnd_imm5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(16, 5, false, 0, OPSZ_6b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(16, 5, false, 0, 0, opnd, enc_out);
}
/* imm6: shift amount for logical and arithmetical instructions */
static inline bool
decode_opnd_imm6(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(10, 6, false, 0, OPSZ_6b, 0, enc, opnd);
}
static inline bool
encode_opnd_imm6(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(10, 6, false, 0, 0, opnd, enc_out);
}
/* immr: first immediate operand for bitfield operation */
static inline bool
decode_opnd_immr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_imm_bf(16, enc, opnd);
}
static inline bool
encode_opnd_immr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_imm_bf(16, enc, opnd, enc_out);
}
/* imms: second immediate operand for bitfield operation */
static inline bool
decode_opnd_imms(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_imm_bf(10, enc, opnd);
}
static inline bool
encode_opnd_imms(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_imm_bf(10, enc, opnd, enc_out);
}
/* impx30: implicit X30 operand, used by BLR */
static inline bool
decode_opnd_impx30(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_reg(DR_REG_X30);
return true;
}
static inline bool
encode_opnd_impx30(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (!opnd_is_reg(opnd) || opnd_get_reg(opnd) != DR_REG_X30)
return false;
*enc_out = 0;
return true;
}
/* index0: index of B subreg in Q register: 0-15 */
static inline bool
decode_opnd_index0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(0, enc, opnd);
}
static inline bool
encode_opnd_index0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(0, opnd, enc_out);
}
/* index1: index of H subreg in Q register: 0-7 */
static inline bool
decode_opnd_index1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(1, enc, opnd);
}
static inline bool
encode_opnd_index1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(1, opnd, enc_out);
}
/* index2: index of S subreg in Q register: 0-3 */
static inline bool
decode_opnd_index2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(2, enc, opnd);
}
static inline bool
encode_opnd_index2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(2, opnd, enc_out);
}
/* index3: index of D subreg in Q register: 0-1 */
static inline bool
decode_opnd_index3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_index(3, enc, opnd);
}
static inline bool
encode_opnd_index3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_index(3, opnd, enc_out);
}
/* lsl: constant LSL for ADD/MOV, no encoding bits */
static inline bool
decode_opnd_lsl(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
uint t = DR_SHIFT_LSL;
return decode_opnd_int(0, 2, false, 0, OPSZ_2b, DR_OPND_IS_SHIFT, t, opnd);
}
static inline bool
encode_opnd_lsl(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(0, 2, false, 0, DR_OPND_IS_SHIFT, opnd, &t) ||
t != DR_SHIFT_LSL)
return false;
*enc_out = 0;
return true;
}
/* mem0: memory operand with no offset, gets size from bits 30 and 31 */
static inline bool
decode_opnd_mem0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem0_scale(extract_uint(enc, 30, 2), enc, opnd);
}
static inline bool
encode_opnd_mem0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem0_scale(extract_uint(enc, 30, 2), opnd, enc_out);
}
/* mem0p: as mem0, but a pair of registers, so double size */
static inline bool
decode_opnd_mem0p(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem0_scale(extract_uint(enc, 30, 1) + 3, enc, opnd);
}
static inline bool
encode_opnd_mem0p(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem0_scale(extract_uint(enc, 30, 1) + 3, opnd, enc_out);
}
/* mem12: memory operand with 12-bit offset; gets size from bits 30 and 31 */
static inline bool
decode_opnd_mem12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem12_scale(extract_uint(enc, 30, 2), false, enc, opnd);
}
static inline bool
encode_opnd_mem12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem12_scale(extract_uint(enc, 30, 2), false, opnd, enc_out);
}
/* mem12: memory operand with 12-bit offset; size is 16 bytes */
static inline bool
decode_opnd_mem12q(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem12_scale(4, false, enc, opnd);
}
static inline bool
encode_opnd_mem12q(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem12_scale(4, false, opnd, enc_out);
}
/* mem7: memory operand with 7-bit offset; gets size from bits 26, 30 and 31 */
static inline bool
decode_opnd_mem7(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem7_postindex(false, enc, opnd);
}
static inline bool
encode_opnd_mem7(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem7_postindex(false, enc, opnd, enc_out);
}
/* mem7off: just the 7-bit offset from mem7 */
static inline bool
decode_opnd_mem7off(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(15, 7, true, mem7_scale(enc), OPSZ_PTR, 0, enc, opnd);
}
static inline bool
encode_opnd_mem7off(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(15, 7, true, mem7_scale(enc), 0, opnd, enc_out);
}
/* mem7off: post-indexed mem7, so offset is zero */
static inline bool
decode_opnd_mem7post(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem7_postindex(true, enc, opnd);
}
static inline bool
encode_opnd_mem7post(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem7_postindex(true, enc, opnd, enc_out);
}
/* mem9: memory operand with 9-bit offset; gets size from bits 30 and 31 */
static inline bool
decode_opnd_mem9(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), false, enc, opnd);
}
static inline bool
encode_opnd_mem9(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), false, opnd, enc_out);
}
/* mem9off: just the 9-bit offset from mem9 */
static inline bool
decode_opnd_mem9off(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(12, 9, true, 0, OPSZ_PTR, 0, enc, opnd);
}
static inline bool
encode_opnd_mem9off(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(12, 9, true, 0, 0, opnd, enc_out);
}
/* mem9post: post-indexed mem9, so offset is zero */
static inline bool
decode_opnd_mem9post(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), true, enc, opnd);
}
static inline bool
encode_opnd_mem9post(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(1 << extract_uint(enc, 30, 2), true, opnd, enc_out);
}
/* mem9q: memory operand with 9-bit offset; size is 16 bytes */
static inline bool
decode_opnd_mem9q(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(16, false, enc, opnd);
}
static inline bool
encode_opnd_mem9q(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(16, false, opnd, enc_out);
}
/* mem9qpost: post-indexed mem9q, so offset is zero */
static inline bool
decode_opnd_mem9qpost(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(16, true, enc, opnd);
}
static inline bool
encode_opnd_mem9qpost(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(16, true, opnd, enc_out);
}
/* memlit: memory operand for literal load; gets size from bits 26, 30 and 31 */
static inline bool
decode_opnd_memlit(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = opnd_create_rel_addr(pc + 4 * extract_int(enc, 5, 19), memlit_size(enc));
return true;
}
static inline bool
encode_opnd_memlit(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
ptr_uint_t off;
if (!opnd_is_rel_addr(opnd) || opnd_get_size(opnd) != memlit_size(enc))
return false;
off = (byte *)opnd_get_addr(opnd) - pc;
if ((off & 3) != 0 || off + (1U << 20) >= 1U << 21)
return false;
*enc_out = (off >> 2 & 0x7ffff) << 5;
return true;
}
/* memreg: memory operand with register offset; gets size from bits 30 and 31 */
static inline bool
decode_opnd_memreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_memreg_size(opnd_size_from_bytes(1 << extract_uint(enc, 30, 2)),
enc, opnd);
}
static inline bool
encode_opnd_memreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_memreg_size(opnd_size_from_bytes(1 << extract_uint(enc, 30, 2)),
opnd, enc_out);
}
/* memreqq: memory operand with register offset; size is 16 bytes */
static inline bool
decode_opnd_memregq(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_memreg_size(OPSZ_16, enc, opnd);
}
static inline bool
encode_opnd_memregq(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_memreg_size(OPSZ_16, opnd, enc_out);
}
/* memvm: memory operand for SIMD load/store multiple structures */
static inline bool
decode_opnd_memvm(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int bytes = (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc);
*opnd = create_base_imm(enc, 0, bytes);
return true;
}
static inline bool
encode_opnd_memvm(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
int regs = multistruct_regcount(enc);
opnd_size_t size;
uint rn;
if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0)
return false;
size = opnd_get_size(opnd);
if (size != opnd_size_from_bytes(regs * 8) &&
size != opnd_size_from_bytes(regs * 16))
return false;
*enc_out = rn << 5 | (uint)(size == opnd_size_from_bytes(regs * 16)) << 30;
return true;
}
/* memvr: memory operand for SIMD load structure and replicate */
static inline bool
decode_opnd_memvr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2);
*opnd = create_base_imm(enc, 0, bytes);
return true;
}
static inline bool
encode_opnd_memvr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
int bytes, regcount;
uint rn;
if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0)
return false;
bytes = opnd_size_in_bytes(opnd_get_size(opnd));
regcount = memvr_regcount(enc);
if (bytes % regcount != 0)
return false;
bytes /= regcount;
if (bytes < 1 || bytes > 8 || (bytes & (bytes - 1)) != 0 ||
opnd_size_from_bytes(bytes * regcount) != opnd_get_size(opnd))
return false;
*enc_out = (rn << 5 |
(bytes == 1 ? 0 : bytes == 2 ? 1 : bytes == 4 ? 2 : 3) << 10);
return true;
}
/* memvrpost: post-indexed memvr */
static inline bool
decode_opnd_memvrpost(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2);
*opnd = create_base_imm(enc, 0, bytes);
return true;
}
static inline bool
encode_opnd_memvrpost(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2);
uint rn;
if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0 ||
opnd_get_size(opnd) != opnd_size_from_bytes(bytes))
return false;
*enc_out = rn << 5;
return true;
}
/* memvs: memory operand for SIMD load/store single structure */
static inline bool
decode_opnd_memvs(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int bytes = memvs_size(enc);
*opnd = create_base_imm(enc, 0, bytes);
return true;
}
static inline bool
encode_opnd_memvs(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint rn;
if (!is_base_imm(opnd, &rn) || opnd_get_disp(opnd) != 0)
return false;
if (opnd_get_size(opnd) != opnd_size_from_bytes(memvs_size(enc)))
return false;
*enc_out = rn << 5;
return true;
}
/* nzcv: flag bit specifier for conditional compare */
static inline bool
decode_opnd_nzcv(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(0, 4, false, 0, OPSZ_4b, 0, enc, opnd);
}
static inline bool
encode_opnd_nzcv(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(0, 4, false, 0, 0, opnd, enc_out);
}
/* prf12: prefetch variant of mem12 */
static inline bool
decode_opnd_prf12(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem12_scale(3, true, enc, opnd);
}
static inline bool
encode_opnd_prf12(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem12_scale(3, true, opnd, enc_out);
}
/* prf9: prefetch variant of mem9 */
static inline bool
decode_opnd_prf9(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_mem9_bytes(0, false, enc, opnd);
}
static inline bool
encode_opnd_prf9(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_mem9_bytes(0, false, opnd, enc_out);
}
/* prfop: prefetch operation, such as PLDL1KEEP */
static inline bool
decode_opnd_prfop(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(0, 5, false, 0, OPSZ_5b, 0, enc, opnd);
}
static inline bool
encode_opnd_prfop(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(0, 5, false, 0, 0, opnd, enc_out);
}
/* prfreg: prefetch variant of memreg */
static inline bool
decode_opnd_prfreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_memreg_size(OPSZ_0, enc, opnd);
}
static inline bool
encode_opnd_prfreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_memreg_size(OPSZ_0, opnd, enc_out);
}
static inline bool
decode_opnd_q0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 4, enc, opnd);
}
/* q0: Q register at bit position 0 */
static inline bool
encode_opnd_q0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 4, opnd, enc_out);
}
/* q0p1: as q0 but add 1 mod 32 to reg number */
static inline bool
decode_opnd_q0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_q0p(1, enc, opnd);
}
static inline bool
encode_opnd_q0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_q0p(1, opnd, enc_out);
}
/* q0p2: as q0 but add 2 mod 32 to reg number */
static inline bool
decode_opnd_q0p2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_q0p(2, enc, opnd);
}
static inline bool
encode_opnd_q0p2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_q0p(2, opnd, enc_out);
}
/* q0p3: as q0 but add 3 mod 32 to reg number */
static inline bool
decode_opnd_q0p3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_q0p(3, enc, opnd);
}
static inline bool
encode_opnd_q0p3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_q0p(3, opnd, enc_out);
}
/* q10: Q register at bit position 10 */
static inline bool
decode_opnd_q10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(10, 4, enc, opnd);
}
static inline bool
encode_opnd_q10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(10, 4, opnd, enc_out);
}
/* s0: S register at bit position 0 */
static inline bool
decode_opnd_s0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(0, 2, enc, opnd);
}
static inline bool
encode_opnd_s0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(0, 2, opnd, enc_out);
}
/* s10: S register at bit position 10 */
static inline bool
decode_opnd_s10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vector_reg(10, 2, enc, opnd);
}
static inline bool
encode_opnd_s10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vector_reg(10, 2, opnd, enc_out);
}
/* shift3: shift type for ADD/SUB: LSL, LSR or ASR */
static inline bool
decode_opnd_shift3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
if (extract_uint(enc, 22, 2) == 3)
return false;
return decode_opnd_int(22, 2, false, 0, OPSZ_3b, DR_OPND_IS_SHIFT, enc, opnd);
}
static inline bool
encode_opnd_shift3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_opnd_int(22, 2, false, 0, DR_OPND_IS_SHIFT, opnd, &t) ||
extract_uint(t, 22, 2) == 3)
return false;
*enc_out = t;
return true;
}
/* shift4: shift type for logical operation: LSL, LSR, ASR or ROR */
static inline bool
decode_opnd_shift4(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(22, 2, false, 0, OPSZ_3b, DR_OPND_IS_SHIFT, enc, opnd);
}
static inline bool
encode_opnd_shift4(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(22, 2, false, 0, DR_OPND_IS_SHIFT, opnd, enc_out);
}
/* sysops: immediate operand for SYS instruction */
static inline bool
decode_opnd_sysops(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(5, 14, false, 0, OPSZ_2, 0, enc, opnd);
}
static inline bool
encode_opnd_sysops(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(5, 14, false, 0, 0, opnd, enc_out);
}
/* sysreg: system register, operand of MRS/MSR */
static inline bool
decode_opnd_sysreg(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
*opnd = decode_sysreg(extract_uint(enc, 5, 15));
return true;
}
static inline bool
encode_opnd_sysreg(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
uint t;
if (!encode_sysreg(&t, opnd))
return false;
*enc_out = t << 5;
return true;
}
/* vmsz: B/H/S/D for load/store multiple structures */
static inline bool
decode_opnd_vmsz(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_int(10, 2, false, 0, OPSZ_2b, 0, enc, opnd);
}
static inline bool
encode_opnd_vmsz(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_int(10, 2, false, 0, 0, opnd, enc_out);
}
/* vt0: first register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(0, enc, opnd);
}
static inline bool
encode_opnd_vt0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(0, enc, opnd, enc_out);
}
/* vt1: second register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(1, enc, opnd);
}
static inline bool
encode_opnd_vt1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(1, enc, opnd, enc_out);
}
/* vt2: third register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt2(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(2, enc, opnd);
}
static inline bool
encode_opnd_vt2(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(2, enc, opnd, enc_out);
}
/* vt3: fourth register operand of SIMD load/store multiple structures */
static inline bool
decode_opnd_vt3(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_vtn(3, enc, opnd);
}
static inline bool
encode_opnd_vt3(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_vtn(3, enc, opnd, enc_out);
}
/* w0: W register or WZR at bit position 0 */
static inline bool
decode_opnd_w0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 0, enc, opnd);
}
static inline bool
encode_opnd_w0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 0, opnd, enc_out);
}
/* w0p0: even-numbered W register or WZR at bit position 0 */
static inline bool
decode_opnd_w0p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 0, 0, enc, opnd);
}
static inline bool
encode_opnd_w0p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 0, 0, opnd, enc_out);
}
/* w0p1: even-numbered W register or WZR at bit position 0, add 1 */
static inline bool
decode_opnd_w0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 1, 0, enc, opnd);
}
static inline bool
encode_opnd_w0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 1, 0, opnd, enc_out);
}
/* w10: W register or WZR at bit position 10 */
static inline bool
decode_opnd_w10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 10, enc, opnd);
}
static inline bool
encode_opnd_w10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 10, opnd, enc_out);
}
/* w16: W register or WZR at bit position 16 */
static inline bool
decode_opnd_w16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 16, enc, opnd);
}
static inline bool
encode_opnd_w16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 16, opnd, enc_out);
}
/* w16p0: even-numbered W register or WZR at bit position 16 */
static inline bool
decode_opnd_w16p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 0, 16, enc, opnd);
}
static inline bool
encode_opnd_w16p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 0, 16, opnd, enc_out);
}
/* w16p1: even-numbered W register or WZR at bit position 16, add 1 */
static inline bool
decode_opnd_w16p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(false, 1, 16, enc, opnd);
}
static inline bool
encode_opnd_w16p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(false, 1, 16, opnd, enc_out);
}
/* w5: W register or WZR at bit position 5 */
static inline bool
decode_opnd_w5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(false, false, 5, enc, opnd);
}
static inline bool
encode_opnd_w5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(false, false, 5, opnd, enc_out);
}
/* wx0: W/X register or WZR/XZR at bit position 0; bit 31 selects X reg */
static inline bool
decode_opnd_wx0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 0, enc, opnd);
}
static inline bool
encode_opnd_wx0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 0, opnd, enc_out);
}
/* wx0sp: W/X register or WSP/XSP at bit position 0; bit 31 selects X reg */
static inline bool
decode_opnd_wx0sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(true, 0, enc, opnd);
}
static inline bool
encode_opnd_wx0sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(true, 0, opnd, enc_out);
}
/* wx10: W/X register or WZR/XZR at bit position 10; bit 31 selects X reg */
static inline bool
decode_opnd_wx10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 10, enc, opnd);
}
static inline bool
encode_opnd_wx10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 10, opnd, enc_out);
}
/* wx16: W/X register or WZR/XZR at bit position 16; bit 31 selects X reg */
static inline bool
decode_opnd_wx16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 16, enc, opnd);
}
static inline bool
encode_opnd_wx16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 16, opnd, enc_out);
}
/* wx5: W/X register or WZR/XZR at bit position 5; bit 31 selects X reg */
static inline bool
decode_opnd_wx5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(false, 5, enc, opnd);
}
static inline bool
encode_opnd_wx5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(false, 5, opnd, enc_out);
}
/* wx5sp: W/X register or WSP/XSP at bit position 5; bit 31 selects X reg */
static inline bool
decode_opnd_wx5sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_rn(true, 5, enc, opnd);
}
static inline bool
encode_opnd_wx5sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_rn(true, 5, opnd, enc_out);
}
/* x0: X register or XZR at bit position 0 */
static inline bool
decode_opnd_x0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 0, enc, opnd);
}
static inline bool
encode_opnd_x0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 0, opnd, enc_out);
}
/* x0p0: even-numbered X register or XZR at bit position 0 */
static inline bool
decode_opnd_x0p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 0, 0, enc, opnd);
}
static inline bool
encode_opnd_x0p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 0, 0, opnd, enc_out);
}
/* x0p1: even-numbered X register or XZR at bit position 0, add 1 */
static inline bool
decode_opnd_x0p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 1, 0, enc, opnd);
}
static inline bool
encode_opnd_x0p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 1, 0, opnd, enc_out);
}
/* x10: X register or XZR at bit position 10 */
static inline bool
decode_opnd_x10(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 10, enc, opnd);
}
static inline bool
encode_opnd_x10(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 10, opnd, enc_out);
}
/* x16: X register or XZR at bit position 16 */
static inline bool
decode_opnd_x16(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 16, enc, opnd);
}
static inline bool
encode_opnd_x16(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 16, opnd, enc_out);
}
/* x16p0: even-numbered X register or XZR at bit position 16 */
static inline bool
decode_opnd_x16p0(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 0, 16, enc, opnd);
}
static inline bool
encode_opnd_x16p0(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 0, 16, opnd, enc_out);
}
/* x16p1: even-numbered X register or XZR at bit position 16, add 1 */
static inline bool
decode_opnd_x16p1(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxnp(true, 1, 16, enc, opnd);
}
static inline bool
encode_opnd_x16p1(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxnp(true, 1, 16, opnd, enc_out);
}
/* x16imm: immediate operand for SIMD load/store multiple structures (post-indexed) */
static inline bool
decode_opnd_x16imm(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int num = extract_uint(enc, 16, 5);
if (num < 31)
*opnd = opnd_create_reg(DR_REG_X0 + num);
else {
int bytes = (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc);
*opnd = opnd_create_immed_int(bytes, OPSZ_1);
}
return true;
}
static inline bool
encode_opnd_x16imm(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_is_reg(opnd)) {
uint num = opnd_get_reg(opnd) - DR_REG_X0;
if (num == 31)
return false;
*enc_out = num << 16;
return true;
} else if (opnd_is_immed_int(opnd)) {
ptr_int_t bytes = opnd_get_immed_int(opnd);
if (bytes != (8 << extract_uint(enc, 30, 1)) * multistruct_regcount(enc))
return false;
*enc_out = 31U << 16;
return true;
}
return false;
}
/* x16immvr: immediate operand for SIMD load structure and replicate (post-indexed) */
static inline bool
decode_opnd_x16immvr(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int num = extract_uint(enc, 16, 5);
if (num < 31)
*opnd = opnd_create_reg(DR_REG_X0 + num);
else {
int bytes = memvr_regcount(enc) << extract_uint(enc, 10, 2);
*opnd = opnd_create_immed_int(bytes, OPSZ_1);
}
return true;
}
static inline bool
encode_opnd_x16immvr(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_is_reg(opnd)) {
uint num = opnd_get_reg(opnd) - DR_REG_X0;
if (num == 31)
return false;
*enc_out = num << 16;
return true;
} else if (opnd_is_immed_int(opnd)) {
ptr_int_t bytes = opnd_get_immed_int(opnd);
if (bytes != memvr_regcount(enc) << extract_uint(enc, 10, 2))
return false;
*enc_out = 31U << 16;
return true;
}
return false;
}
/* x16immvs: immediate operand for SIMD load/store single structure (post-indexed) */
static inline bool
decode_opnd_x16immvs(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
int num = extract_uint(enc, 16, 5);
if (num < 31)
*opnd = opnd_create_reg(DR_REG_X0 + num);
else {
int bytes = memvs_size(enc);
*opnd = opnd_create_immed_int(bytes, OPSZ_1);
}
return true;
}
static inline bool
encode_opnd_x16immvs(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
if (opnd_is_reg(opnd)) {
uint num = opnd_get_reg(opnd) - DR_REG_X0;
if (num == 31)
return false;
*enc_out = num << 16;
return true;
} else if (opnd_is_immed_int(opnd)) {
ptr_int_t bytes = opnd_get_immed_int(opnd);
if (bytes != memvs_size(enc))
return false;
*enc_out = 31U << 16;
return true;
}
return false;
}
/* x5: X register or XZR at position 5 */
static inline bool
decode_opnd_x5(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, false, 5, enc, opnd);
}
static inline bool
encode_opnd_x5(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, false, 5, opnd, enc_out);
}
/* x5: X register or XSP at position 5 */
static inline bool
decode_opnd_x5sp(uint enc, int opcode, byte *pc, OUT opnd_t *opnd)
{
return decode_opnd_wxn(true, true, 5, enc, opnd);
}
static inline bool
encode_opnd_x5sp(uint enc, int opcode, byte *pc, opnd_t opnd, OUT uint *enc_out)
{
return encode_opnd_wxn(true, true, 5, opnd, enc_out);
}
/*******************************************************************************
* Pairs of functions for decoding and encoding opndsets, as listed in "codec.txt".
* Currently all branch instructions are handled in this way.
*/
/* adr: used for ADR and ADRP */
static inline bool
decode_opnds_adr(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
opnd_t opnd;
if (!decode_opnd_adr_page(opcode == OP_adrp ? 12 : 0, enc, pc, &opnd))
return false;
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_dst(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5),
true, false)));
instr_set_src(instr, 0, opnd);
return true;
}
static inline uint
encode_opnds_adr(byte *pc, instr_t *instr, uint enc)
{
int opcode = instr_get_opcode(instr);
uint rd, adr;
if (instr_num_dsts(instr) == 1 && instr_num_srcs(instr) == 1 &&
encode_opnd_adr_page(opcode == OP_adrp ? 12 : 0,
pc, instr_get_src(instr, 0), &adr, instr) &&
encode_opnd_wxn(true, false, 0, instr_get_dst(instr, 0), &rd))
return (enc | adr | rd);
return ENCFAIL;
}
/* b: used for B and BL */
static inline bool
decode_opnds_b(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
if (opcode == OP_bl) {
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_dst(instr, 0, opnd_create_reg(DR_REG_X30));
} else
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 0, 26) * 4));
return true;
}
static inline uint
encode_opnds_b(byte *pc, instr_t *instr, uint enc)
{
int opcode = instr_get_opcode(instr);
bool is_bl = (opcode == OP_bl);
uint off, x30;
if (instr_num_dsts(instr) == (is_bl ? 1 : 0) &&
instr_num_srcs(instr) == 1 &&
(!is_bl || encode_opnd_impx30(enc, opcode, pc, instr_get_dst(instr, 0), &x30)) &&
encode_pc_off(&off, 26, pc, instr, instr_get_src(instr, 0)))
return (enc | off);
return ENCFAIL;
}
/* bcond: used for B.cond */
static inline bool
decode_opnds_bcond(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 19) * 4));
instr_set_predicate(instr, DR_PRED_EQ + (enc & 15));
return true;
}
static inline uint
encode_opnds_bcond(byte *pc, instr_t *instr, uint enc)
{
uint off;
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 1 &&
encode_pc_off(&off, 19, pc, instr, instr_get_src(instr, 0)) &&
(uint)(instr_get_predicate(instr) - DR_PRED_EQ) < 16)
return (enc | off << 5 | (instr_get_predicate(instr) - DR_PRED_EQ));
return ENCFAIL;
}
/* cbz: used for CBNZ and CBZ */
static inline bool
decode_opnds_cbz(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 0, 2);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 19) * 4));
instr_set_src(instr, 1, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5),
TEST(1U << 31, enc), false)));
return true;
}
static inline uint
encode_opnds_cbz(byte *pc, instr_t *instr, uint enc)
{
uint rt, off;
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 2 &&
encode_pc_off(&off, 19, pc, instr, instr_get_src(instr, 0)) &&
encode_opnd_rn(false, 0, instr_get_src(instr, 1), &rt))
return (enc | off << 5 | rt);
return ENCFAIL;
}
/* logic_imm: used for AND, ANDS, EOR and ORR.
* Logical (immediate) instructions are awkward because there are sometimes
* many ways of representing the same immediate value. We add the raw encoding
* as an additional operand when the encoding is not the canonical one.
*/
static inline bool
decode_opnds_logic_imm(uint enc, dcontext_t *dcontext, byte *pc,
instr_t *instr, int opcode)
{
bool is_x = TEST(1U << 31, enc);
uint imm_enc = extract_uint(enc, 10, 13); /* encoding of bitmask */
ptr_uint_t imm_val = decode_bitmask(imm_enc); /* value of bitmask */
bool canonical = encode_bitmask(imm_val) == imm_enc;
if (imm_val == 0 || (!is_x && TEST(1U << 12, imm_enc)))
return false;
if (!is_x)
imm_val &= 0xffffffff;
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 1, 2 + (canonical ? 0 : 1));
instr_set_dst(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5),
is_x, opcode != OP_ands)));
instr_set_src(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 5, 5),
is_x, false)));
instr_set_src(instr, 1, opnd_create_immed_uint(imm_val, is_x ? OPSZ_8 : OPSZ_4));
if (!canonical)
instr_set_src(instr, 2, opnd_create_immed_uint(imm_enc, OPSZ_2));
return true;
}
static inline uint
encode_opnds_logic_imm(byte *pc, instr_t *instr, uint enc)
{
int opcode = instr_get_opcode(instr);
int srcs = instr_num_srcs(instr);
opnd_t opnd_val;
ptr_uint_t imm_val;
uint rd, rn;
if (srcs < 2 || srcs > 3 || instr_num_dsts(instr) != 1)
return ENCFAIL;
opnd_val = instr_get_src(instr, 1);
if (!encode_opnd_rn(opcode != OP_ands, 0, instr_get_dst(instr, 0), &rd) ||
!encode_opnd_rn(false, 5, instr_get_src(instr, 0), &rn) ||
TEST(1U << 31, rd ^ rn) ||
!opnd_is_immed_int(opnd_val))
return ENCFAIL;
imm_val = opnd_get_immed_int(opnd_val);
if (!TEST(1U << 31, rd)) {
if ((imm_val >> 32) != 0)
return ENCFAIL;
imm_val |= imm_val << 32;
}
if (srcs == 3) {
opnd_t opnd_enc = instr_get_src(instr, 2);
ptr_int_t imm_enc;
if (!opnd_is_immed_int(opnd_enc))
return ENCFAIL;
imm_enc = opnd_get_immed_int(opnd_enc);
if (imm_enc < 0 || imm_enc > 0x1fff || decode_bitmask(imm_enc) != imm_val)
return ENCFAIL;
return (enc | rd | rn | (uint)imm_enc << 10);
} else {
int imm_enc = encode_bitmask(imm_val);
if (imm_enc < 0)
return ENCFAIL;
return (enc | rd | rn | (uint)imm_enc << 10);
}
}
/* mst: used for MSR.
* With MSR the destination register may or may not be one of the system registers
* that we recognise.
*/
static inline bool
decode_opnds_msr(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
opnd_t opnd = decode_sysreg(extract_uint(enc, 5, 15));
instr_set_opcode(instr, opcode);
if (opnd_is_reg(opnd)) {
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_dst(instr, 0, opnd);
} else {
instr_set_num_opnds(dcontext, instr, 0, 2);
instr_set_src(instr, 1, opnd);
}
instr_set_src(instr, 0, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5),
true, false)));
return true;
}
static inline uint
encode_opnds_msr(byte *pc, instr_t *instr, uint enc)
{
uint imm15, xt;
if (instr_num_dsts(instr) == 1 && instr_num_srcs(instr) == 1 &&
opnd_is_reg(instr_get_dst(instr, 0)) &&
encode_sysreg(&imm15, instr_get_dst(instr, 0)) &&
encode_opnd_wxn(true, false, 0, instr_get_src(instr, 0), &xt))
return (enc | xt | imm15 << 5);
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 2 &&
opnd_is_immed_int(instr_get_src(instr, 1)) &&
encode_opnd_wxn(true, false, 0, instr_get_src(instr, 0), &xt) &&
encode_sysreg(&imm15, instr_get_src(instr, 1)))
return (enc | xt | imm15 << 5);
return ENCFAIL;
}
/* tbz: used for TBNZ and TBZ */
static inline bool
decode_opnds_tbz(uint enc, dcontext_t *dcontext, byte *pc, instr_t *instr, int opcode)
{
instr_set_opcode(instr, opcode);
instr_set_num_opnds(dcontext, instr, 0, 3);
instr_set_src(instr, 0, opnd_create_pc(pc + extract_int(enc, 5, 14) * 4));
instr_set_src(instr, 1, opnd_create_reg(decode_reg(extract_uint(enc, 0, 5),
true, false)));
instr_set_src(instr, 2, opnd_create_immed_int((enc >> 19 & 31) | (enc >> 26 & 32),
OPSZ_5b));
return true;
}
static inline uint
encode_opnds_tbz(byte *pc, instr_t *instr, uint enc)
{
uint xt, imm6, off;
if (instr_num_dsts(instr) == 0 && instr_num_srcs(instr) == 3 &&
encode_pc_off(&off, 14, pc, instr, instr_get_src(instr, 0)) &&
encode_opnd_wxn(true, false, 0, instr_get_src(instr, 1), &xt) &&
encode_opnd_int(0, 6, false, 0, 0, instr_get_src(instr, 2), &imm6))
return (enc | off << 5 | xt | (imm6 & 31) << 19 | (imm6 & 32) << 26);
return ENCFAIL;
}
/******************************************************************************/
/* Include automatically generated decoder and encoder. */
#include "decode_gen.h"
#include "encode_gen.h"
/******************************************************************************/
byte *
decode_common(dcontext_t *dcontext, byte *pc, byte *orig_pc, instr_t *instr)
{
byte *next_pc = pc + 4;
uint enc = *(uint *)pc;
uint eflags = 0;
int opc;
CLIENT_ASSERT(instr->opcode == OP_INVALID || instr->opcode == OP_UNDECODED,
"decode: instr is already decoded, may need to call instr_reset()");
if (!decoder(enc, dcontext, orig_pc, instr)) {
/* We use OP_xx for instructions not yet handled by the decoder.
* If an A64 instruction accesses a general-purpose register
* (except X30) then the number of that register appears in one
* of four possible places in the instruction word, so we can
* pessimistically assume that an unrecognised instruction reads
* and writes all four of those registers, and this is
* sufficient to enable correct (though often excessive) mangling.
*/
instr_set_opcode(instr, OP_xx);
instr_set_num_opnds(dcontext, instr, 4, 5);
instr->src0 = OPND_CREATE_INT32(enc);
instr->srcs[0] = opnd_create_reg(DR_REG_X0 + (enc & 31));
instr->dsts[0] = opnd_create_reg(DR_REG_X0 + (enc & 31));
instr->srcs[1] = opnd_create_reg(DR_REG_X0 + (enc >> 5 & 31));
instr->dsts[1] = opnd_create_reg(DR_REG_X0 + (enc >> 5 & 31));
instr->srcs[2] = opnd_create_reg(DR_REG_X0 + (enc >> 10 & 31));
instr->dsts[2] = opnd_create_reg(DR_REG_X0 + (enc >> 10 & 31));
instr->srcs[3] = opnd_create_reg(DR_REG_X0 + (enc >> 16 & 31));
instr->dsts[3] = opnd_create_reg(DR_REG_X0 + (enc >> 16 & 31));
}
/* XXX i#2374: This determination of flag usage should be separate from the decoding
* of operands. Also, we should perhaps add flag information in codec.txt instead of
* listing all the opcodes, although the list is short and unlikely to change.
*/
opc = instr_get_opcode(instr);
if ((opc == OP_mrs && instr_num_srcs(instr) == 1 &&
opnd_is_reg(instr_get_src(instr, 0)) &&
opnd_get_reg(instr_get_src(instr, 0)) == DR_REG_NZCV) ||
opc == OP_bcond ||
opc == OP_adc || opc == OP_adcs || opc == OP_sbc || opc == OP_sbcs ||
opc == OP_csel || opc == OP_csinc || opc == OP_csinv || opc == OP_csneg ||
opc == OP_ccmn || opc == OP_ccmp) {
/* FIXME i#1569: When handled by decoder, add:
* opc == OP_fcsel
*/
eflags |= EFLAGS_READ_NZCV;
}
if ((opc == OP_msr && instr_num_dsts(instr) == 1 &&
opnd_is_reg(instr_get_dst(instr, 0)) &&
opnd_get_reg(instr_get_dst(instr, 0)) == DR_REG_NZCV) ||
opc == OP_adcs || opc == OP_adds || opc == OP_sbcs || opc == OP_subs ||
opc == OP_ands || opc == OP_bics ||
opc == OP_ccmn || opc == OP_ccmp) {
/* FIXME i#1569: When handled by decoder, add:
* opc == OP_fccmp || opc == OP_fccmpe || opc == OP_fcmp || opc == OP_fcmpe
*/
eflags |= EFLAGS_WRITE_NZCV;
}
instr->eflags = eflags;
instr_set_eflags_valid(instr, true);
instr_set_operands_valid(instr, true);
if (orig_pc != pc) {
/* We do not want to copy when encoding and condone an invalid
* relative target.
*/
instr_set_raw_bits_valid(instr, false);
instr_set_translation(instr, orig_pc);
} else {
/* We set raw bits AFTER setting all srcs and dsts because setting
* a src or dst marks instr as having invalid raw bits.
*/
ASSERT(CHECK_TRUNCATE_TYPE_uint(next_pc - pc));
instr_set_raw_bits(instr, pc, (uint)(next_pc - pc));
}
return next_pc;
}
uint
encode_common(byte *pc, instr_t *i)
{
ASSERT(((ptr_int_t)pc & 3) == 0);
return encoder(pc, i);
}
| 1 | 12,158 |
Not sure I see where this is coming from. This is a bug fix? Should be mentioned in the commit message? Same below.
|
DynamoRIO-dynamorio
|
c
|
@@ -125,9 +125,6 @@ function generateTopologyTests(testSuites, testContext, filter) {
describe(suiteName, {
metadata: { requires },
test: function () {
- beforeEach(() => prepareDatabaseForSuite(testSuite, testContext));
- afterEach(() => testContext.cleanupAfterSuite());
-
testSuite.tests.forEach(spec => {
it(spec.description, function () {
if (
| 1 |
'use strict';
const path = require('path');
const fs = require('fs');
const chai = require('chai');
const expect = chai.expect;
const { EJSON } = require('bson');
const TestRunnerContext = require('./context').TestRunnerContext;
const resolveConnectionString = require('./utils').resolveConnectionString;
const hasOwnProperty = Object.prototype.hasOwnProperty;
// Promise.try alternative https://stackoverflow.com/questions/60624081/promise-try-without-bluebird/60624164?noredirect=1#comment107255389_60624164
function promiseTry(callback) {
return new Promise((resolve, reject) => {
try {
resolve(callback());
} catch (e) {
reject(e);
}
});
}
chai.use(require('chai-subset'));
chai.use(require('./matcher').default);
chai.config.includeStack = true;
chai.config.showDiff = true;
chai.config.truncateThreshold = 0;
function escape(string) {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}
function isPlainObject(value) {
return value !== null && typeof value === 'object' && Array.isArray(value) === false;
}
function translateClientOptions(options) {
Object.keys(options).forEach(key => {
if (key === 'readConcernLevel') {
options.readConcern = { level: options.readConcernLevel };
delete options[key];
} else if (key === 'autoEncryptOpts') {
options.autoEncryption = Object.assign({}, options.autoEncryptOpts);
if (options.autoEncryptOpts.keyVaultNamespace == null) {
options.autoEncryption.keyVaultNamespace = 'keyvault.datakeys';
}
if (options.autoEncryptOpts.kmsProviders) {
delete options.kmsProviders;
options.autoEncryption.kmsProviders = options.autoEncryptOpts.kmsProviders;
const kmsProviders = options.autoEncryption.kmsProviders;
if (kmsProviders.aws) {
const awsProvider = kmsProviders.aws;
awsProvider.accessKeyId = process.env.AWS_ACCESS_KEY_ID || 'NOT_PROVIDED';
awsProvider.secretAccessKey = process.env.AWS_SECRET_ACCESS_KEY || 'NOT_PROVIDED';
}
if (kmsProviders.local && kmsProviders.local.key) {
const localKey = kmsProviders.local.key;
if (localKey._bsontype && localKey._bsontype === 'Binary' && localKey.sub_type === 0) {
// this is read in as BSON Binary subtype 0, extract the Buffer
kmsProviders.local.key = kmsProviders.local.key.buffer;
}
}
}
delete options.autoEncryptOpts;
}
});
return options;
}
function gatherTestSuites(specPath) {
return fs
.readdirSync(specPath)
.filter(x => x.indexOf('.json') !== -1)
.map(x =>
Object.assign(EJSON.parse(fs.readFileSync(path.join(specPath, x)), { relaxed: true }), {
name: path.basename(x, '.json')
})
);
}
function parseTopologies(topologies) {
if (topologies == null) {
return ['replicaset', 'sharded', 'single'];
}
return topologies;
}
function parseRunOn(runOn) {
return runOn.map(config => {
const topology = parseTopologies(config.topology);
const version = [];
if (config.minServerVersion) {
version.push(`>= ${config.minServerVersion}`);
}
if (config.maxServerVersion) {
version.push(`<= ${config.maxServerVersion}`);
}
const mongodb = version.join(' ');
return { topology, mongodb };
});
}
function generateTopologyTests(testSuites, testContext, filter) {
testSuites.forEach(testSuite => {
// TODO: remove this when SPEC-1255 is completed
let runOn = testSuite.runOn;
if (!testSuite.runOn) {
runOn = [{ minServerVersion: testSuite.minServerVersion }];
if (testSuite.maxServerVersion) {
runOn.push({ maxServerVersion: testSuite.maxServerVersion });
}
}
const environmentRequirementList = parseRunOn(runOn);
environmentRequirementList.forEach(requires => {
const suiteName = `${testSuite.name} - ${requires.topology.join()}`;
describe(suiteName, {
metadata: { requires },
test: function () {
beforeEach(() => prepareDatabaseForSuite(testSuite, testContext));
afterEach(() => testContext.cleanupAfterSuite());
testSuite.tests.forEach(spec => {
it(spec.description, function () {
if (
spec.skipReason ||
(filter && typeof filter === 'function' && !filter(spec, this.configuration))
) {
return this.skip();
}
let testPromise = Promise.resolve();
if (spec.failPoint) {
testPromise = testPromise.then(() => testContext.enableFailPoint(spec.failPoint));
}
// run the actual test
testPromise = testPromise.then(() =>
runTestSuiteTest(this.configuration, spec, testContext)
);
if (spec.failPoint) {
testPromise = testPromise.then(() => testContext.disableFailPoint(spec.failPoint));
}
return testPromise.then(() => validateOutcome(spec, testContext));
});
});
}
});
});
});
}
// Test runner helpers
function prepareDatabaseForSuite(suite, context) {
context.dbName = suite.database_name || 'spec_db';
context.collectionName = suite.collection_name || 'spec_collection';
const db = context.sharedClient.db(context.dbName);
const setupPromise = db
.admin()
.command({ killAllSessions: [] })
.catch(err => {
if (err.message.match(/no such (cmd|command)/) || err.code === 11601) {
return;
}
throw err;
});
if (context.collectionName == null || context.dbName === 'admin') {
return setupPromise;
}
const coll = db.collection(context.collectionName);
return setupPromise
.then(() => coll.drop({ writeConcern: 'majority' }))
.catch(err => {
if (!err.message.match(/ns not found/)) throw err;
})
.then(() => {
if (suite.key_vault_data) {
const dataKeysCollection = context.sharedClient.db('keyvault').collection('datakeys');
return dataKeysCollection
.drop({ w: 'majority' })
.catch(err => {
if (!err.message.match(/ns not found/)) {
throw err;
}
})
.then(() => {
if (suite.key_vault_data.length) {
return dataKeysCollection.insertMany(suite.key_vault_data, { w: 'majority' });
}
});
}
})
.then(() => {
const options = { w: 'majority' };
if (suite.json_schema) {
options.validator = { $jsonSchema: suite.json_schema };
}
return db.createCollection(context.collectionName, options);
})
.then(() => {
if (suite.data && Array.isArray(suite.data) && suite.data.length > 0) {
return coll.insertMany(suite.data, { w: 'majority' });
}
})
.then(() => {
return context.runForAllClients(client => {
return client
.db(context.dbName)
.collection(context.collectionName)
.distinct('x')
.catch(() => {});
});
});
}
function parseSessionOptions(options) {
const result = Object.assign({}, options);
if (result.defaultTransactionOptions && result.defaultTransactionOptions.readPreference) {
result.defaultTransactionOptions.readPreference = normalizeReadPreference(
result.defaultTransactionOptions.readPreference.mode
);
}
return result;
}
const IGNORED_COMMANDS = new Set(['ismaster', 'configureFailPoint', 'endSessions']);
const SDAM_EVENTS = new Set([
'serverOpening',
'serverClosed',
'serverDescriptionChanged',
'topologyOpening',
'topologyClosed',
'topologyDescriptionChanged',
'serverHeartbeatStarted',
'serverHeartbeatSucceeded',
'serverHeartbeatFailed'
]);
const CMAP_EVENTS = new Set([
'connectionPoolCreated',
'connectionPoolClosed',
'connectionCreated',
'connectionReady',
'connectionClosed',
'connectionCheckOutStarted',
'connectionCheckOutFailed',
'connectionCheckedOut',
'connectionCheckedIn',
'connectionPoolCleared'
]);
let displayCommands = false;
function runTestSuiteTest(configuration, spec, context) {
context.commandEvents = [];
const clientOptions = translateClientOptions(
Object.assign(
{
heartbeatFrequencyMS: 100,
minHeartbeatFrequencyMS: 100,
useRecoveryToken: true,
monitorCommands: true
},
spec.clientOptions
)
);
const url = resolveConnectionString(configuration, spec);
const client = configuration.newClient(url, clientOptions);
CMAP_EVENTS.forEach(eventName => client.on(eventName, event => context.cmapEvents.push(event)));
SDAM_EVENTS.forEach(eventName => client.on(eventName, event => context.sdamEvents.push(event)));
client.on('commandStarted', event => {
if (IGNORED_COMMANDS.has(event.commandName)) {
return;
}
context.commandEvents.push(event);
// very useful for debugging
if (displayCommands) {
// console.dir(event, { depth: 5 });
}
});
return client.connect().then(client => {
context.testClient = client;
const sessionOptions = Object.assign({}, spec.transactionOptions);
spec.sessionOptions = spec.sessionOptions || {};
const database = client.db(context.dbName);
let session0, session1;
let savedSessionData;
try {
session0 = client.startSession(
Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session0))
);
session1 = client.startSession(
Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session1))
);
savedSessionData = {
session0: JSON.parse(EJSON.stringify(session0.id)),
session1: JSON.parse(EJSON.stringify(session1.id))
};
} catch (err) {
// ignore
}
// enable to see useful APM debug information at the time of actual test run
// displayCommands = true;
const operationContext = {
client,
database,
collectionName: context.collectionName,
session0,
session1,
testRunner: context
};
let testPromise = Promise.resolve();
return testPromise
.then(() => testOperations(spec, operationContext))
.catch(err => {
// If the driver throws an exception / returns an error while executing this series
// of operations, store the error message.
throw err;
})
.then(() => {
const promises = [];
if (session0) promises.push(session0.endSession());
if (session1) promises.push(session1.endSession());
return Promise.all(promises);
})
.then(() => validateExpectations(context.commandEvents, spec, savedSessionData));
});
}
function validateOutcome(testData, testContext) {
if (testData.outcome && testData.outcome.collection) {
const outcomeCollection = testData.outcome.collection.name || testContext.collectionName;
// use the client without transactions to verify
return testContext.sharedClient
.db(testContext.dbName)
.collection(outcomeCollection)
.find({}, { readPreference: 'primary', readConcern: { level: 'local' } })
.sort({ _id: 1 })
.toArray()
.then(docs => {
expect(docs).to.matchMongoSpec(testData.outcome.collection.data);
});
}
return Promise.resolve();
}
function validateExpectations(commandEvents, spec, savedSessionData) {
if (!spec.expectations || !Array.isArray(spec.expectations) || spec.expectations.length === 0) {
return;
}
const actualEvents = normalizeCommandShapes(commandEvents);
const rawExpectedEvents = spec.expectations.map(x => x.command_started_event);
const expectedEvents = normalizeCommandShapes(rawExpectedEvents);
expect(actualEvents).to.have.length(expectedEvents.length);
expectedEvents.forEach((expected, idx) => {
const actual = actualEvents[idx];
if (expected.commandName != null) {
expect(actual.commandName).to.equal(expected.commandName);
}
if (expected.databaseName != null) {
expect(actual.databaseName).to.equal(expected.databaseName);
}
const actualCommand = actual.command;
const expectedCommand = expected.command;
expect(actualCommand).withSessionData(savedSessionData).to.matchMongoSpec(expectedCommand);
});
}
function normalizeCommandShapes(commands) {
return commands.map(command =>
JSON.parse(
EJSON.stringify(
{
command: command.command,
commandName: command.command_name ? command.command_name : command.commandName,
databaseName: command.database_name ? command.database_name : command.databaseName
},
{ relaxed: true }
)
)
);
}
function extractCrudResult(result, operation) {
if (Array.isArray(result) || !isPlainObject(result)) {
return result;
}
if (result.value) {
// some of our findAndModify results return more than just an id, so we need to pluck
const resultKeys = Object.keys(operation.result);
if (resultKeys.length === 1 && resultKeys[0] === '_id') {
return { _id: result.value._id };
}
return result.value;
}
return Object.keys(operation.result).reduce((crudResult, key) => {
if (hasOwnProperty.call(result, key) && result[key] != null) {
// FIXME(major): update crud results are broken and need to be changed
crudResult[key] = key === 'upsertedId' ? result[key]._id : result[key];
}
return crudResult;
}, {});
}
function isTransactionCommand(command) {
return ['startTransaction', 'commitTransaction', 'abortTransaction'].indexOf(command) !== -1;
}
function isTestRunnerCommand(context, commandName) {
const testRunnerContext = context.testRunner;
let methods = new Set();
let object = testRunnerContext;
while (object !== Object.prototype) {
Object.getOwnPropertyNames(object)
.filter(prop => typeof object[prop] === 'function' && prop !== 'constructor')
.map(prop => methods.add(prop));
object = Object.getPrototypeOf(object);
}
return methods.has(commandName);
}
function extractBulkRequests(requests) {
return requests.map(request => ({ [request.name]: request.arguments }));
}
function translateOperationName(operationName) {
if (operationName === 'runCommand') return 'command';
if (operationName === 'listDatabaseNames') return 'listDatabases';
if (operationName === 'listCollectionNames') return 'listCollections';
return operationName;
}
function normalizeReadPreference(mode) {
return mode.charAt(0).toLowerCase() + mode.substr(1);
}
function resolveOperationArgs(operationName, operationArgs, context) {
const result = [];
function pluck(fromObject, toArray, fields) {
for (const field of fields) {
if (fromObject[field]) toArray.push(fromObject[field]);
}
}
// TODO: migrate all operations here
if (operationName === 'distinct') {
pluck(operationArgs, result, ['fieldName', 'filter']);
if (result.length === 1) result.push({});
} else {
return;
}
// compile the options
const options = {};
if (operationArgs.options) {
Object.assign(options, operationArgs.options);
if (options.readPreference) {
options.readPreference = normalizeReadPreference(options.readPreference.mode);
}
}
if (operationArgs.session) {
if (isTransactionCommand(operationName)) return;
options.session = context[operationArgs.session];
}
result.push(options);
// determine if there is a callback to add
if (operationArgs.callback) {
result.push(() =>
testOperations(operationArgs.callback, context, { swallowOperationErrors: false })
);
}
return result;
}
const CURSOR_COMMANDS = new Set(['find', 'aggregate', 'listIndexes', 'listCollections']);
const ADMIN_COMMANDS = new Set(['listDatabases']);
function maybeSession(operation, context) {
return (
operation &&
operation.arguments &&
operation.arguments.session &&
context[operation.arguments.session]
);
}
const kOperations = new Map([
[
'recordPrimary',
(operation, testRunner, context /*, options */) => {
testRunner.recordPrimary(context.client);
}
],
[
'waitForPrimaryChange',
(operation, testRunner, context /*, options */) => {
return testRunner.waitForPrimaryChange(context.client);
}
],
[
'runOnThread',
(operation, testRunner, context, options) => {
const args = operation.arguments;
const threadName = args.name;
const subOperation = args.operation;
return testRunner.runOnThread(
threadName,
testOperation(subOperation, context[subOperation.object], context, options)
);
}
],
[
'createIndex',
(operation, collection, context /*, options */) => {
const fieldOrSpec = operation.arguments.keys;
const options = { session: maybeSession(operation, context) };
if (operation.arguments.name) options.name = operation.arguments.name;
return collection.createIndex(fieldOrSpec, options);
}
],
[
'createCollection',
(operation, db, context /*, options */) => {
const collectionName = operation.arguments.collection;
const session = maybeSession(operation, context);
return db.createCollection(collectionName, { session });
}
],
[
'dropCollection',
(operation, db, context /*, options */) => {
const collectionName = operation.arguments.collection;
const session = maybeSession(operation, context);
return db.dropCollection(collectionName, { session });
}
],
[
'dropIndex',
(operation, collection /*, context, options */) => {
const indexName = operation.arguments.name;
const session = maybeSession(operation, context);
return collection.dropIndex(indexName, { session });
}
],
[
'mapReduce',
(operation, collection, context /*, options */) => {
const args = operation.arguments;
const map = args.map;
const reduce = args.reduce;
const options = { session: maybeSession(operation, context) };
if (args.out) options.out = args.out;
return collection.mapReduce(map, reduce, options);
}
]
]);
/**
* @param {object} operation the operation definition from the spec test
* @param {object} obj the object to call the operation on
* @param {object} context a context object containing sessions used for the test
* @param {object} [options] Optional settings
* @param {boolean} [options.swallowOperationErrors] Generally we want to observe operation errors, validate them against our expectations, and then swallow them. In cases like `withTransaction` we want to use the same `testOperations` to build the lambda, and in those cases it is not desireable to swallow the errors, since we need to test this behavior.
*/
function testOperation(operation, obj, context, options) {
options = options || { swallowOperationErrors: true };
const opOptions = {};
let args = [];
const operationName = translateOperationName(operation.name);
let opPromise;
if (kOperations.has(operationName)) {
opPromise = kOperations.get(operationName)(operation, obj, context, options);
} else {
if (operation.arguments) {
args = resolveOperationArgs(operationName, operation.arguments, context);
if (args == null) {
args = [];
Object.keys(operation.arguments).forEach(key => {
if (key === 'callback') {
args.push(() =>
testOperations(operation.arguments.callback, context, {
swallowOperationErrors: false
})
);
return;
}
if (['filter', 'fieldName', 'document', 'documents', 'pipeline'].indexOf(key) !== -1) {
return args.unshift(operation.arguments[key]);
}
if ((key === 'map' || key === 'reduce') && operationName === 'mapReduce') {
return args.unshift(operation.arguments[key]);
}
if (key === 'command') return args.unshift(operation.arguments[key]);
if (key === 'requests')
return args.unshift(extractBulkRequests(operation.arguments[key]));
if (key === 'update' || key === 'replacement') return args.push(operation.arguments[key]);
if (key === 'session') {
if (isTransactionCommand(operationName)) return;
opOptions.session = context[operation.arguments.session];
return;
}
if (key === 'returnDocument') {
opOptions.returnOriginal = operation.arguments[key] === 'Before' ? true : false;
return;
}
if (key === 'options') {
Object.assign(opOptions, operation.arguments[key]);
if (opOptions.readPreference) {
opOptions.readPreference = normalizeReadPreference(opOptions.readPreference.mode);
}
return;
}
if (key === 'readPreference') {
opOptions[key] = normalizeReadPreference(operation.arguments[key].mode);
return;
}
opOptions[key] = operation.arguments[key];
});
}
}
if (
args.length === 0 &&
!isTransactionCommand(operationName) &&
!isTestRunnerCommand(context, operationName)
) {
args.push({});
}
if (Object.keys(opOptions).length > 0) {
// NOTE: this is awful, but in order to provide options for some methods we need to add empty
// query objects.
if (operationName === 'distinct') {
args.push({});
}
args.push(opOptions);
}
if (ADMIN_COMMANDS.has(operationName)) {
obj = obj.db().admin();
}
if (operation.name === 'listDatabaseNames' || operation.name === 'listCollectionNames') {
opOptions.nameOnly = true;
}
if (CURSOR_COMMANDS.has(operationName)) {
// `find` creates a cursor, so we need to call `toArray` on it
const cursor = obj[operationName].apply(obj, args);
opPromise = cursor.toArray();
} else {
// wrap this in a `promiseTry` because some operations might throw
opPromise = promiseTry(() => obj[operationName].apply(obj, args));
}
}
if (operation.error) {
opPromise = opPromise.then(
() => {
throw new Error('expected an error!');
},
() => {}
);
}
if (operation.result) {
const result = operation.result;
if (
result.errorContains != null ||
result.errorCodeName ||
result.errorLabelsContain ||
result.errorLabelsOmit
) {
return opPromise.then(
() => {
throw new Error('expected an error!');
},
err => {
const errorContains = result.errorContains;
const errorCodeName = result.errorCodeName;
const errorLabelsContain = result.errorLabelsContain;
const errorLabelsOmit = result.errorLabelsOmit;
if (errorLabelsContain) {
expect(err).to.have.property('errorLabels');
expect(err.errorLabels).to.include.members(errorLabelsContain);
}
if (errorLabelsOmit) {
if (err.errorLabels && Array.isArray(err.errorLabels) && err.errorLabels.length !== 0) {
expect(errorLabelsOmit).to.not.include.members(err.errorLabels);
}
}
if (operation.result.errorContains) {
expect(err.message).to.match(new RegExp(escape(errorContains), 'i'));
}
if (errorCodeName) {
expect(err.codeName).to.equal(errorCodeName);
}
if (!options.swallowOperationErrors) {
throw err;
}
}
);
}
return opPromise.then(opResult => {
const actual = extractCrudResult(opResult, operation);
expect(actual).to.matchMongoSpec(operation.result);
});
}
return opPromise;
}
function convertCollectionOptions(options) {
const result = {};
Object.keys(options).forEach(key => {
if (key === 'readPreference') {
result[key] = normalizeReadPreference(options[key].mode);
} else {
result[key] = options[key];
}
});
return result;
}
function testOperations(testData, operationContext, options) {
options = options || { swallowOperationErrors: true };
return testData.operations.reduce((combined, operation) => {
return combined.then(() => {
const object = operation.object || 'collection';
if (object === 'collection') {
const db = operationContext.database;
const collectionName = operationContext.collectionName;
const collectionOptions = operation.collectionOptions || {};
operationContext[object] = db.collection(
collectionName,
convertCollectionOptions(collectionOptions)
);
}
return testOperation(operation, operationContext[object], operationContext, options);
});
}, Promise.resolve());
}
module.exports = {
TestRunnerContext,
gatherTestSuites,
generateTopologyTests,
parseRunOn
};
| 1 | 18,798 |
I reverted this change in the last commit, I think we should revert it here. I know this leads to confusing test output for skipped tests, but I would prefer to tackle that in a separate PR specifically focused on that problem. In fact, I think the issue might really be with the leak checker, and not how this test runner is defined. Please make a ticket for this (if it doesn't already exist) and revert this change
|
mongodb-node-mongodb-native
|
js
|
@@ -213,6 +213,9 @@ class LoadAnnotations:
annotation. Default: False.
poly2mask (bool): Whether to convert the instance masks from polygons
to bitmaps. Default: True.
+ normed_bbox (bool): Whether to convert bbox from relative value to
+ absolute value. Only used in OpenImage Dataset.
+ Default: False.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
| 1 |
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from mmdet.core import BitmapMasks, PolygonMasks
from ..builder import PIPELINES
try:
from panopticapi.utils import rgb2id
except ImportError:
rgb2id = None
@PIPELINES.register_module()
class LoadImageFromFile:
"""Load an image from file.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename"). Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
color_type='color',
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results['img_prefix'] is not None:
filename = osp.join(results['img_prefix'],
results['img_info']['filename'])
else:
filename = results['img_info']['filename']
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, flag=self.color_type)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['img_fields'] = ['img']
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f'file_client_args={self.file_client_args})')
return repr_str
@PIPELINES.register_module()
class LoadImageFromWebcam(LoadImageFromFile):
"""Load an image from webcam.
Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in
``results['img']``.
"""
def __call__(self, results):
"""Call functions to add image meta information.
Args:
results (dict): Result dict with Webcam read image in
``results['img']``.
Returns:
dict: The dict contains loaded image and meta information.
"""
img = results['img']
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = None
results['ori_filename'] = None
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['img_fields'] = ['img']
return results
@PIPELINES.register_module()
class LoadMultiChannelImageFromFiles:
"""Load multi-channel images from a list of separate channel files.
Required keys are "img_prefix" and "img_info" (a dict that must contain the
key "filename", which is expected to be a list of filenames).
Added or updated keys are "filename", "img", "img_shape",
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
Args:
to_float32 (bool): Whether to convert the loaded image to a float32
numpy array. If set to False, the loaded image is an uint8 array.
Defaults to False.
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
Defaults to 'color'.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
to_float32=False,
color_type='unchanged',
file_client_args=dict(backend='disk')):
self.to_float32 = to_float32
self.color_type = color_type
self.file_client_args = file_client_args.copy()
self.file_client = None
def __call__(self, results):
"""Call functions to load multiple images and get images meta
information.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded images and meta information.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
if results['img_prefix'] is not None:
filename = [
osp.join(results['img_prefix'], fname)
for fname in results['img_info']['filename']
]
else:
filename = results['img_info']['filename']
img = []
for name in filename:
img_bytes = self.file_client.get(name)
img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type))
img = np.stack(img, axis=-1)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
results['ori_filename'] = results['img_info']['filename']
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'to_float32={self.to_float32}, '
f"color_type='{self.color_type}', "
f'file_client_args={self.file_client_args})')
return repr_str
@PIPELINES.register_module()
class LoadAnnotations:
"""Load multiple types of annotations.
Args:
with_bbox (bool): Whether to parse and load the bbox annotation.
Default: True.
with_label (bool): Whether to parse and load the label annotation.
Default: True.
with_mask (bool): Whether to parse and load the mask annotation.
Default: False.
with_seg (bool): Whether to parse and load the semantic segmentation
annotation. Default: False.
poly2mask (bool): Whether to convert the instance masks from polygons
to bitmaps. Default: True.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
with_bbox=True,
with_label=True,
with_mask=False,
with_seg=False,
poly2mask=True,
file_client_args=dict(backend='disk')):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.poly2mask = poly2mask
self.file_client_args = file_client_args.copy()
self.file_client = None
def _load_bboxes(self, results):
"""Private function to load bounding box annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded bounding box annotations.
"""
ann_info = results['ann_info']
results['gt_bboxes'] = ann_info['bboxes'].copy()
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
if gt_bboxes_ignore is not None:
results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
results['bbox_fields'].append('gt_bboxes_ignore')
results['bbox_fields'].append('gt_bboxes')
return results
def _load_labels(self, results):
"""Private function to load label annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded label annotations.
"""
results['gt_labels'] = results['ann_info']['labels'].copy()
return results
def _poly2mask(self, mask_ann, img_h, img_w):
"""Private function to convert masks represented with polygon to
bitmaps.
Args:
mask_ann (list | dict): Polygon mask annotation input.
img_h (int): The height of output mask.
img_w (int): The width of output mask.
Returns:
numpy.ndarray: The decode bitmap mask of shape (img_h, img_w).
"""
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def process_polygons(self, polygons):
"""Convert polygons to list of ndarray and filter invalid polygons.
Args:
polygons (list[list]): Polygons of one instance.
Returns:
list[numpy.ndarray]: Processed polygons.
"""
polygons = [np.array(p) for p in polygons]
valid_polygons = []
for polygon in polygons:
if len(polygon) % 2 == 0 and len(polygon) >= 6:
valid_polygons.append(polygon)
return valid_polygons
def _load_masks(self, results):
"""Private function to load mask annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded mask annotations.
If ``self.poly2mask`` is set ``True``, `gt_mask` will contain
:obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used.
"""
h, w = results['img_info']['height'], results['img_info']['width']
gt_masks = results['ann_info']['masks']
if self.poly2mask:
gt_masks = BitmapMasks(
[self._poly2mask(mask, h, w) for mask in gt_masks], h, w)
else:
gt_masks = PolygonMasks(
[self.process_polygons(polygons) for polygons in gt_masks], h,
w)
results['gt_masks'] = gt_masks
results['mask_fields'].append('gt_masks')
return results
def _load_semantic_seg(self, results):
"""Private function to load semantic segmentation annotations.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: The dict contains loaded semantic segmentation annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
filename = osp.join(results['seg_prefix'],
results['ann_info']['seg_map'])
img_bytes = self.file_client.get(filename)
results['gt_semantic_seg'] = mmcv.imfrombytes(
img_bytes, flag='unchanged').squeeze()
results['seg_fields'].append('gt_semantic_seg')
return results
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded bounding box, label, mask and
semantic segmentation annotations.
"""
if self.with_bbox:
results = self._load_bboxes(results)
if results is None:
return None
if self.with_label:
results = self._load_labels(results)
if self.with_mask:
results = self._load_masks(results)
if self.with_seg:
results = self._load_semantic_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(with_bbox={self.with_bbox}, '
repr_str += f'with_label={self.with_label}, '
repr_str += f'with_mask={self.with_mask}, '
repr_str += f'with_seg={self.with_seg}, '
repr_str += f'poly2mask={self.poly2mask}, '
repr_str += f'poly2mask={self.file_client_args})'
return repr_str
@PIPELINES.register_module()
class LoadPanopticAnnotations(LoadAnnotations):
"""Load multiple types of panoptic annotations.
Args:
with_bbox (bool): Whether to parse and load the bbox annotation.
Default: True.
with_label (bool): Whether to parse and load the label annotation.
Default: True.
with_mask (bool): Whether to parse and load the mask annotation.
Default: True.
with_seg (bool): Whether to parse and load the semantic segmentation
annotation. Default: True.
file_client_args (dict): Arguments to instantiate a FileClient.
See :class:`mmcv.fileio.FileClient` for details.
Defaults to ``dict(backend='disk')``.
"""
def __init__(self,
with_bbox=True,
with_label=True,
with_mask=True,
with_seg=True,
file_client_args=dict(backend='disk')):
if rgb2id is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
super(LoadPanopticAnnotations,
self).__init__(with_bbox, with_label, with_mask, with_seg, True,
file_client_args)
def _load_masks_and_semantic_segs(self, results):
"""Private function to load mask and semantic segmentation annotations.
In gt_semantic_seg, the foreground label is from `0` to
`num_things - 1`, the background label is from `num_things` to
`num_things + num_stuff - 1`, 255 means the ignored label (`VOID`).
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded mask and semantic segmentation
annotations. `BitmapMasks` is used for mask annotations.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
filename = osp.join(results['seg_prefix'],
results['ann_info']['seg_map'])
img_bytes = self.file_client.get(filename)
pan_png = mmcv.imfrombytes(
img_bytes, flag='color', channel_order='rgb').squeeze()
pan_png = rgb2id(pan_png)
gt_masks = []
gt_seg = np.zeros_like(pan_png) + 255 # 255 as ignore
for mask_info in results['ann_info']['masks']:
mask = (pan_png == mask_info['id'])
gt_seg = np.where(mask, mask_info['category'], gt_seg)
# The legal thing masks
if mask_info.get('is_thing'):
gt_masks.append(mask.astype(np.uint8))
if self.with_mask:
h, w = results['img_info']['height'], results['img_info']['width']
gt_masks = BitmapMasks(gt_masks, h, w)
results['gt_masks'] = gt_masks
results['mask_fields'].append('gt_masks')
if self.with_seg:
results['gt_semantic_seg'] = gt_seg
results['seg_fields'].append('gt_semantic_seg')
return results
def __call__(self, results):
"""Call function to load multiple types panoptic annotations.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded bounding box, label, mask and
semantic segmentation annotations.
"""
if self.with_bbox:
results = self._load_bboxes(results)
if results is None:
return None
if self.with_label:
results = self._load_labels(results)
if self.with_mask or self.with_seg:
# The tasks completed by '_load_masks' and '_load_semantic_segs'
# in LoadAnnotations are merged to one function.
results = self._load_masks_and_semantic_segs(results)
return results
@PIPELINES.register_module()
class LoadProposals:
"""Load proposal pipeline.
Required key is "proposals". Updated keys are "proposals", "bbox_fields".
Args:
num_max_proposals (int, optional): Maximum number of proposals to load.
If not specified, all proposals will be loaded.
"""
def __init__(self, num_max_proposals=None):
self.num_max_proposals = num_max_proposals
def __call__(self, results):
"""Call function to load proposals from file.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded proposal annotations.
"""
proposals = results['proposals']
if proposals.shape[1] not in (4, 5):
raise AssertionError(
'proposals should have shapes (n, 4) or (n, 5), '
f'but found {proposals.shape}')
proposals = proposals[:, :4]
if self.num_max_proposals is not None:
proposals = proposals[:self.num_max_proposals]
if len(proposals) == 0:
proposals = np.array([[0, 0, 0, 0]], dtype=np.float32)
results['proposals'] = proposals
results['bbox_fields'].append('proposals')
return results
def __repr__(self):
return self.__class__.__name__ + \
f'(num_max_proposals={self.num_max_proposals})'
@PIPELINES.register_module()
class FilterAnnotations:
"""Filter invalid annotations.
Args:
min_gt_bbox_wh (tuple[int]): Minimum width and height of ground truth
boxes.
"""
def __init__(self, min_gt_bbox_wh):
# TODO: add more filter options
self.min_gt_bbox_wh = min_gt_bbox_wh
def __call__(self, results):
assert 'gt_bboxes' in results
gt_bboxes = results['gt_bboxes']
w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
keep = (w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1])
if not keep.any():
return None
else:
keys = ('gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg')
for key in keys:
if key in results:
results[key] = results[key][keep]
return results
| 1 | 26,123 |
Can this part be done in `dataset.get_ann_info`? Just like CocoDataset converts xywh to xyxy in `_parse_ann_info`
|
open-mmlab-mmdetection
|
py
|
@@ -32,10 +32,10 @@ func (*testTraceProvider) Tracer(_ string, _ ...trace.TracerOption) trace.Tracer
func TestMultipleGlobalTracerProvider(t *testing.T) {
p1 := testTraceProvider{}
p2 := trace.NoopProvider{}
- global.SetTraceProvider(&p1)
- global.SetTraceProvider(&p2)
+ global.SetTracerProvider(&p1)
+ global.SetTracerProvider(&p2)
- got := global.TraceProvider()
+ got := global.TracerProvider()
want := &p2
if got != want {
t.Fatalf("Provider: got %p, want %p\n", got, want)
| 1 |
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global_test
import (
"testing"
"go.opentelemetry.io/otel/api/global"
"go.opentelemetry.io/otel/api/trace"
)
type testTraceProvider struct{}
var _ trace.Provider = &testTraceProvider{}
func (*testTraceProvider) Tracer(_ string, _ ...trace.TracerOption) trace.Tracer {
return &trace.NoopTracer{}
}
func TestMultipleGlobalTracerProvider(t *testing.T) {
p1 := testTraceProvider{}
p2 := trace.NoopProvider{}
global.SetTraceProvider(&p1)
global.SetTraceProvider(&p2)
got := global.TraceProvider()
want := &p2
if got != want {
t.Fatalf("Provider: got %p, want %p\n", got, want)
}
}
| 1 | 13,209 |
Should we change the name of this test struct to be `testTracerProvider` as well?
|
open-telemetry-opentelemetry-go
|
go
|
@@ -21,13 +21,14 @@ import (
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
- "github.com/iotexproject/iotex-core/explorer"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/probe"
+ "github.com/iotexproject/iotex-core/protogen/iotexapi"
"github.com/iotexproject/iotex-core/server/itx"
"github.com/iotexproject/iotex-core/testutil"
"github.com/iotexproject/iotex-core/tools/util"
+ "google.golang.org/grpc"
)
const (
| 1 |
// Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
// usage: make minicluster
package main
import (
"context"
"flag"
"fmt"
"math"
"sync"
"time"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/explorer"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/probe"
"github.com/iotexproject/iotex-core/server/itx"
"github.com/iotexproject/iotex-core/testutil"
"github.com/iotexproject/iotex-core/tools/util"
)
const (
numNodes = 4
numAdmins = 2
)
func main() {
// timeout indicates the duration of running nightly build in seconds. Default is 300
var timeout int
// aps indicates how many actions to be injected in one second. Default is 0
var aps float64
// smart contract deployment data. Default is "608060405234801561001057600080fd5b506102f5806100206000396000f3006080604052600436106100615763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632885ad2c8114610066578063797d9fbd14610070578063cd5e3c5d14610091578063d0e30db0146100b8575b600080fd5b61006e6100c0565b005b61006e73ffffffffffffffffffffffffffffffffffffffff600435166100cb565b34801561009d57600080fd5b506100a6610159565b60408051918252519081900360200190f35b61006e610229565b6100c9336100cb565b565b60006100d5610159565b6040805182815290519192507fbae72e55df73720e0f671f4d20a331df0c0dc31092fda6c573f35ff7f37f283e919081900360200190a160405173ffffffffffffffffffffffffffffffffffffffff8316906305f5e100830280156108fc02916000818181858888f19350505050158015610154573d6000803e3d6000fd5b505050565b604080514460208083019190915260001943014082840152825180830384018152606090920192839052815160009360059361021a9360029391929182918401908083835b602083106101bd5780518252601f19909201916020918201910161019e565b51815160209384036101000a600019018019909216911617905260405191909301945091925050808303816000865af11580156101fe573d6000803e3d6000fd5b5050506040513d602081101561021357600080fd5b5051610261565b81151561022357fe5b06905090565b60408051348152905133917fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c919081900360200190a2565b600080805b60208110156102c25780600101602060ff160360080260020a848260208110151561028d57fe5b7f010000000000000000000000000000000000000000000000000000000000000091901a810204029190910190600101610266565b50929150505600a165627a7a72305820a426929891673b0a04d7163b60113d28e7d0f48ea667680ba48126c182b872c10029"
var deployExecData string
// smart contract interaction data. Default is "d0e30db0"
var interactExecData string
flag.IntVar(&timeout, "timeout", 100, "duration of running nightly build")
flag.Float64Var(&aps, "aps", 1, "actions to be injected per second")
flag.StringVar(&deployExecData, "deploy-data", "608060405234801561001057600080fd5b506102f5806100206000396000f3006080604052600436106100615763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632885ad2c8114610066578063797d9fbd14610070578063cd5e3c5d14610091578063d0e30db0146100b8575b600080fd5b61006e6100c0565b005b61006e73ffffffffffffffffffffffffffffffffffffffff600435166100cb565b34801561009d57600080fd5b506100a6610159565b60408051918252519081900360200190f35b61006e610229565b6100c9336100cb565b565b60006100d5610159565b6040805182815290519192507fbae72e55df73720e0f671f4d20a331df0c0dc31092fda6c573f35ff7f37f283e919081900360200190a160405173ffffffffffffffffffffffffffffffffffffffff8316906305f5e100830280156108fc02916000818181858888f19350505050158015610154573d6000803e3d6000fd5b505050565b604080514460208083019190915260001943014082840152825180830384018152606090920192839052815160009360059361021a9360029391929182918401908083835b602083106101bd5780518252601f19909201916020918201910161019e565b51815160209384036101000a600019018019909216911617905260405191909301945091925050808303816000865af11580156101fe573d6000803e3d6000fd5b5050506040513d602081101561021357600080fd5b5051610261565b81151561022357fe5b06905090565b60408051348152905133917fe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c919081900360200190a2565b600080805b60208110156102c25780600101602060ff160360080260020a848260208110151561028d57fe5b7f010000000000000000000000000000000000000000000000000000000000000091901a810204029190910190600101610266565b50929150505600a165627a7a72305820a426929891673b0a04d7163b60113d28e7d0f48ea667680ba48126c182b872c10029",
"smart contract deployment data")
flag.StringVar(&interactExecData, "interact-data", "d0e30db0", "smart contract interaction data")
flag.Parse()
// path of config file containing all the public/private key paris of addresses getting transfers
// from Creator in genesis block
injectorConfigPath := "./tools/minicluster/gentsfaddrs.yaml"
chainAddrs, err := util.LoadAddresses(injectorConfigPath, uint32(1))
if err != nil {
log.L().Fatal("Failed to load addresses from config path", zap.Error(err))
}
admins := chainAddrs[len(chainAddrs)-numAdmins:]
delegates := chainAddrs[:len(chainAddrs)-numAdmins]
// path of config file containing all the transfers and self-nominations in genesis block
genesisConfigPath := "./tools/minicluster/testnet_actions.yaml"
// Set mini-cluster configurations
configs := make([]config.Config, numNodes)
for i := 0; i < numNodes; i++ {
chainDBPath := fmt.Sprintf("./chain%d.db", i+1)
trieDBPath := fmt.Sprintf("./trie%d.db", i+1)
networkPort := 4689 + i
explorerPort := 14004 + i
config := newConfig(genesisConfigPath, chainDBPath, trieDBPath, chainAddrs[i].PriKey,
networkPort, explorerPort)
if i == 0 {
config.Network.BootstrapNodes = []string{}
config.Network.MasterKey = "bootnode"
}
configs[i] = config
}
// Create mini-cluster
svrs := make([]*itx.Server, numNodes)
for i := 0; i < numNodes; i++ {
svr, err := itx.NewServer(configs[i])
if err != nil {
log.L().Fatal("Failed to create server.", zap.Error(err))
}
svrs[i] = svr
}
// Start mini-cluster
for i := 0; i < numNodes; i++ {
go itx.StartServer(context.Background(), svrs[i], probe.New(7788), configs[i])
}
if err := testutil.WaitUntil(10*time.Millisecond, 2*time.Second, func() (bool, error) {
return svrs[0].ChainService(uint32(1)).Explorer().Port() == 14004, nil
}); err != nil {
log.L().Fatal("Failed to start explorer JSON-RPC server", zap.Error(err))
}
// target address for jrpc connection. Default is "127.0.0.1:14004"
jrpcAddr := "127.0.0.1:14004"
client := explorer.NewExplorerProxy("http://" + jrpcAddr)
counter, err := util.InitCounter(client, chainAddrs)
if err != nil {
log.L().Fatal("Failed to initialize nonce counter", zap.Error(err))
}
// Inject actions to first node
if aps > 0 {
// transfer gas limit. Default is 1000000
transferGasLimit := 1000000
// transfer gas price. Default is 10
transferGasPrice := 10
// transfer payload. Default is ""
transferPayload := ""
// vote gas limit. Default is 1000000
voteGasLimit := 1000000
// vote gas price. Default is 10
voteGasPrice := 10
// execution amount. Default is 0
executionAmount := 0
// execution gas limit. Default is 1200000
executionGasLimit := 1200000
// execution gas price. Default is 10
executionGasPrice := 10
// maximum number of rpc retries. Default is 5
retryNum := 5
// sleeping period between two consecutive rpc retries in seconds. Default is 1
retryInterval := 1
// reset interval indicates the interval to reset nonce counter in seconds. Default is 60
resetInterval := 60
d := time.Duration(timeout) * time.Second
// First deploy a smart contract which can be interacted by injected executions
eHash, err := util.DeployContract(client, counter, delegates, executionGasLimit, executionGasPrice,
deployExecData, retryNum, retryInterval)
if err != nil {
log.L().Fatal("Failed to deploy smart contract", zap.Error(err))
}
// Wait until the smart contract is successfully deployed
var receipt *action.Receipt
if err := testutil.WaitUntil(100*time.Millisecond, 60*time.Second, func() (bool, error) {
receipt, err = svrs[0].ChainService(uint32(1)).Blockchain().GetReceiptByActionHash(eHash)
return receipt != nil, nil
}); err != nil {
log.L().Fatal("Failed to get receipt of execution deployment", zap.Error(err))
}
contract := receipt.ContractAddress
wg := &sync.WaitGroup{}
util.InjectByAps(wg, aps, counter, transferGasLimit, transferGasPrice, transferPayload, voteGasLimit, voteGasPrice,
contract, executionAmount, executionGasLimit, executionGasPrice, interactExecData, client, admins, delegates, d,
retryNum, retryInterval, resetInterval)
wg.Wait()
chains := make([]blockchain.Blockchain, numNodes)
stateHeights := make([]uint64, numNodes)
bcHeights := make([]uint64, numNodes)
idealHeight := make([]uint64, numNodes)
var netTimeout int
var minTimeout int
for i := 0; i < numNodes; i++ {
chains[i] = svrs[i].ChainService(configs[i].Chain.ID).Blockchain()
stateHeights[i], err = chains[i].GetFactory().Height()
if err != nil {
log.S().Errorf("Node %d: Can not get State height", i)
}
bcHeights[i] = chains[i].TipHeight()
minTimeout = int(configs[i].Consensus.RollDPoS.Delay/time.Second - configs[i].Genesis.BlockInterval/time.Second)
netTimeout = 0
if timeout > minTimeout {
netTimeout = timeout - minTimeout
}
idealHeight[i] = uint64((time.Duration(netTimeout) * time.Second) / configs[i].Genesis.BlockInterval)
log.S().Infof("Node#%d blockchain height: %d", i, bcHeights[i])
log.S().Infof("Node#%d state height: %d", i, stateHeights[i])
log.S().Infof("Node#%d ideal height: %d", i, idealHeight[i])
if bcHeights[i] != stateHeights[i] {
log.S().Errorf("Node#%d: State height does not match blockchain height", i)
}
if math.Abs(float64(bcHeights[i]-idealHeight[i])) > 1 {
log.S().Errorf("blockchain in Node#%d is behind the expected height", i)
}
}
for i := 0; i < numNodes; i++ {
for j := i + 1; j < numNodes; j++ {
if math.Abs(float64(bcHeights[i]-bcHeights[j])) > 1 {
log.S().Errorf("blockchain in Node#%d and blockchain in Node#%d are not sync", i, j)
} else {
log.S().Infof("blockchain in Node#%d and blockchain in Node#%d are sync", i, j)
}
}
}
}
}
func newConfig(
genesisConfigPath,
chainDBPath,
trieDBPath string,
producerPriKey keypair.PrivateKey,
networkPort,
explorerPort int,
) config.Config {
cfg := config.Default
cfg.NodeType = config.DelegateType
cfg.Network.Port = networkPort
cfg.Network.BootstrapNodes = []string{"/ip4/127.0.0.1/tcp/4689/ipfs/12D3KooWJwW6pUpTkxPTMv84RPLPMQVEAjZ6fvJuX4oZrvW5DAGQ"}
cfg.Chain.ID = 1
cfg.Chain.GenesisActionsPath = genesisConfigPath
cfg.Chain.ChainDBPath = chainDBPath
cfg.Chain.TrieDBPath = trieDBPath
cfg.Chain.NumCandidates = numNodes
cfg.Chain.EnableIndex = true
cfg.Chain.EnableAsyncIndexWrite = true
producerPubKey := &producerPriKey.PublicKey
cfg.Chain.ProducerPubKey = keypair.EncodePublicKey(producerPubKey)
cfg.Chain.ProducerPrivKey = keypair.EncodePrivateKey(producerPriKey)
cfg.Consensus.Scheme = config.RollDPoSScheme
cfg.Consensus.RollDPoS.FSM.UnmatchedEventInterval = 4 * time.Second
cfg.Consensus.RollDPoS.FSM.AcceptBlockTTL = 3 * time.Second
cfg.Consensus.RollDPoS.FSM.AcceptProposalEndorsementTTL = 3 * time.Second
cfg.Consensus.RollDPoS.FSM.AcceptLockEndorsementTTL = 3 * time.Second
cfg.Consensus.RollDPoS.FSM.EventChanSize = 100000
cfg.Consensus.RollDPoS.ToleratedOvertime = 2 * time.Second
cfg.Consensus.RollDPoS.Delay = 10 * time.Second
cfg.ActPool.MaxNumActsToPick = 2000
cfg.System.HTTPMetricsPort = 0
cfg.Explorer.Enabled = true
cfg.Explorer.Port = explorerPort
cfg.Genesis.Blockchain.BlockInterval = 10 * time.Second
cfg.Genesis.Blockchain.NumSubEpochs = 2
cfg.Genesis.Blockchain.NumDelegates = numNodes
cfg.Genesis.Blockchain.TimeBasedRotation = true
return cfg
}
| 1 | 15,583 |
"google.golang.org/grpc" should be together with "go.uber.org/zap" all ""github.com/iotexproject/iotex-core/*" stay together
|
iotexproject-iotex-core
|
go
|
@@ -700,6 +700,12 @@ class WebEngineTab(browsertab.AbstractTab):
self._inject_early_js('js', js_code, subframes=True)
self._init_stylesheet()
+ js_code_print = utils.read_file('javascript/print.js')
+ self._inject_early_js('js',
+ js_code_print,
+ subframes=True,
+ world=QWebEngineScript.MainWorld)
+
greasemonkey = objreg.get('greasemonkey')
greasemonkey.scripts_reloaded.connect(self._inject_userscripts)
self._inject_userscripts()
| 1 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Wrapper over a QWebEngineView."""
import math
import functools
import sys
import re
import html as html_utils
import sip
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, Qt, QEvent, QPoint, QPointF,
QUrl, QTimer)
from PyQt5.QtGui import QKeyEvent, QIcon
from PyQt5.QtNetwork import QAuthenticator
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineScript
from qutebrowser.config import configdata, config
from qutebrowser.browser import browsertab, mouse, shared
from qutebrowser.browser.webengine import (webview, webengineelem, tabhistory,
interceptor, webenginequtescheme,
webenginedownloads,
webenginesettings)
from qutebrowser.misc import miscwidgets
from qutebrowser.utils import (usertypes, qtutils, log, javascript, utils,
message, objreg, jinja, debug)
_qute_scheme_handler = None
def init():
"""Initialize QtWebEngine-specific modules."""
# For some reason we need to keep a reference, otherwise the scheme handler
# won't work...
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-September/038075.html
global _qute_scheme_handler
app = QApplication.instance()
log.init.debug("Initializing qute://* handler...")
_qute_scheme_handler = webenginequtescheme.QuteSchemeHandler(parent=app)
_qute_scheme_handler.install(webenginesettings.default_profile)
_qute_scheme_handler.install(webenginesettings.private_profile)
log.init.debug("Initializing request interceptor...")
host_blocker = objreg.get('host-blocker')
req_interceptor = interceptor.RequestInterceptor(
host_blocker, parent=app)
req_interceptor.install(webenginesettings.default_profile)
req_interceptor.install(webenginesettings.private_profile)
log.init.debug("Initializing QtWebEngine downloads...")
download_manager = webenginedownloads.DownloadManager(parent=app)
download_manager.install(webenginesettings.default_profile)
download_manager.install(webenginesettings.private_profile)
objreg.register('webengine-download-manager', download_manager)
# Mapping worlds from usertypes.JsWorld to QWebEngineScript world IDs.
_JS_WORLD_MAP = {
usertypes.JsWorld.main: QWebEngineScript.MainWorld,
usertypes.JsWorld.application: QWebEngineScript.ApplicationWorld,
usertypes.JsWorld.user: QWebEngineScript.UserWorld,
usertypes.JsWorld.jseval: QWebEngineScript.UserWorld + 1,
}
class WebEngineAction(browsertab.AbstractAction):
"""QtWebEngine implementations related to web actions."""
action_class = QWebEnginePage
action_base = QWebEnginePage.WebAction
def exit_fullscreen(self):
self._widget.triggerPageAction(QWebEnginePage.ExitFullScreen)
def save_page(self):
"""Save the current page."""
self._widget.triggerPageAction(QWebEnginePage.SavePage)
def show_source(self):
try:
self._widget.triggerPageAction(QWebEnginePage.ViewSource)
except AttributeError:
# Qt < 5.8
tb = objreg.get('tabbed-browser', scope='window',
window=self._tab.win_id)
urlstr = self._tab.url().toString(QUrl.RemoveUserInfo)
# The original URL becomes the path of a view-source: URL
# (without a host), but query/fragment should stay.
url = QUrl('view-source:' + urlstr)
tb.tabopen(url, background=False, related=True)
class WebEnginePrinting(browsertab.AbstractPrinting):
"""QtWebEngine implementations related to printing."""
def check_pdf_support(self):
return True
def check_printer_support(self):
if not hasattr(self._widget.page(), 'print'):
raise browsertab.WebTabError(
"Printing is unsupported with QtWebEngine on Qt < 5.8")
def check_preview_support(self):
raise browsertab.WebTabError(
"Print previews are unsupported with QtWebEngine")
def to_pdf(self, filename):
self._widget.page().printToPdf(filename)
def to_printer(self, printer, callback=None):
if callback is None:
callback = lambda _ok: None
self._widget.page().print(printer, callback)
class WebEngineSearch(browsertab.AbstractSearch):
"""QtWebEngine implementations related to searching on the page.
Attributes:
_flags: The QWebEnginePage.FindFlags of the last search.
_pending_searches: How many searches have been started but not called
back yet.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._flags = QWebEnginePage.FindFlags(0)
self._pending_searches = 0
def _find(self, text, flags, callback, caller):
"""Call findText on the widget."""
self.search_displayed = True
self._pending_searches += 1
def wrapped_callback(found):
"""Wrap the callback to do debug logging."""
self._pending_searches -= 1
if self._pending_searches > 0:
# See https://github.com/qutebrowser/qutebrowser/issues/2442
# and https://github.com/qt/qtwebengine/blob/5.10/src/core/web_contents_adapter.cpp#L924-L934
log.webview.debug("Ignoring cancelled search callback with "
"{} pending searches".format(
self._pending_searches))
return
found_text = 'found' if found else "didn't find"
if flags:
flag_text = 'with flags {}'.format(debug.qflags_key(
QWebEnginePage, flags, klass=QWebEnginePage.FindFlag))
else:
flag_text = ''
log.webview.debug(' '.join([caller, found_text, text, flag_text])
.strip())
if callback is not None:
callback(found)
self._widget.findText(text, flags, wrapped_callback)
def search(self, text, *, ignore_case='never', reverse=False,
result_cb=None):
# Don't go to next entry on duplicate search
if self.text == text and self.search_displayed:
log.webview.debug("Ignoring duplicate search request"
" for {}".format(text))
return
self.text = text
self._flags = QWebEnginePage.FindFlags(0)
if self._is_case_sensitive(ignore_case):
self._flags |= QWebEnginePage.FindCaseSensitively
if reverse:
self._flags |= QWebEnginePage.FindBackward
self._find(text, self._flags, result_cb, 'search')
def clear(self):
self.search_displayed = False
self._widget.findText('')
def prev_result(self, *, result_cb=None):
# The int() here makes sure we get a copy of the flags.
flags = QWebEnginePage.FindFlags(int(self._flags))
if flags & QWebEnginePage.FindBackward:
flags &= ~QWebEnginePage.FindBackward
else:
flags |= QWebEnginePage.FindBackward
self._find(self.text, flags, result_cb, 'prev_result')
def next_result(self, *, result_cb=None):
self._find(self.text, self._flags, result_cb, 'next_result')
class WebEngineCaret(browsertab.AbstractCaret):
"""QtWebEngine implementations related to moving the cursor/selection."""
@pyqtSlot(usertypes.KeyMode)
def _on_mode_entered(self, mode):
if mode != usertypes.KeyMode.caret:
return
if self._tab.search.search_displayed:
# We are currently in search mode.
# convert the search to a blue selection so we can operate on it
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
self._tab.run_js_async(
javascript.assemble('caret', 'setPlatform', sys.platform))
self._js_call('setInitialCursor', self._selection_cb)
def _selection_cb(self, enabled):
"""Emit selection_toggled based on setInitialCursor."""
if enabled is None:
log.webview.debug("Ignoring selection status None")
return
self.selection_toggled.emit(enabled)
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, mode):
if mode != usertypes.KeyMode.caret:
return
self.drop_selection()
self._js_call('disableCaret')
def move_to_next_line(self, count=1):
for _ in range(count):
self._js_call('moveDown')
def move_to_prev_line(self, count=1):
for _ in range(count):
self._js_call('moveUp')
def move_to_next_char(self, count=1):
for _ in range(count):
self._js_call('moveRight')
def move_to_prev_char(self, count=1):
for _ in range(count):
self._js_call('moveLeft')
def move_to_end_of_word(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfWord')
def move_to_next_word(self, count=1):
for _ in range(count):
self._js_call('moveToNextWord')
def move_to_prev_word(self, count=1):
for _ in range(count):
self._js_call('moveToPreviousWord')
def move_to_start_of_line(self):
self._js_call('moveToStartOfLine')
def move_to_end_of_line(self):
self._js_call('moveToEndOfLine')
def move_to_start_of_next_block(self, count=1):
for _ in range(count):
self._js_call('moveToStartOfNextBlock')
def move_to_start_of_prev_block(self, count=1):
for _ in range(count):
self._js_call('moveToStartOfPrevBlock')
def move_to_end_of_next_block(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfNextBlock')
def move_to_end_of_prev_block(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfPrevBlock')
def move_to_start_of_document(self):
self._js_call('moveToStartOfDocument')
def move_to_end_of_document(self):
self._js_call('moveToEndOfDocument')
def toggle_selection(self):
self._js_call('toggleSelection', self.selection_toggled.emit)
def drop_selection(self):
self._js_call('dropSelection')
def selection(self, callback):
# Not using selectedText() as WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-53134
# Even on Qt 5.10 selectedText() seems to work poorly, see
# https://github.com/qutebrowser/qutebrowser/issues/3523
self._tab.run_js_async(javascript.assemble('caret', 'getSelection'),
callback)
def _follow_selected_cb(self, js_elem, tab=False):
"""Callback for javascript which clicks the selected element.
Args:
js_elem: The element serialized from javascript.
tab: Open in a new tab.
"""
if js_elem is None:
return
assert isinstance(js_elem, dict), js_elem
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
if tab:
click_type = usertypes.ClickTarget.tab
else:
click_type = usertypes.ClickTarget.normal
# Only click if we see a link
if elem.is_link():
log.webview.debug("Found link in selection, clicking. ClickTarget "
"{}, elem {}".format(click_type, elem))
elem.click(click_type)
def follow_selected(self, *, tab=False):
if self._tab.search.search_displayed:
# We are currently in search mode.
# let's click the link via a fake-click
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
log.webview.debug("Clicking a searched link via fake key press.")
# send a fake enter, clicking the orange selection box
if tab:
self._tab.key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
else:
self._tab.key_press(Qt.Key_Enter)
else:
# click an existing blue selection
js_code = javascript.assemble('webelem', 'find_selected_link')
self._tab.run_js_async(js_code, lambda jsret:
self._follow_selected_cb(jsret, tab))
def _js_call(self, command, callback=None):
self._tab.run_js_async(javascript.assemble('caret', command), callback)
class WebEngineScroller(browsertab.AbstractScroller):
"""QtWebEngine implementations related to scrolling."""
def __init__(self, tab, parent=None):
super().__init__(tab, parent)
self._args = objreg.get('args')
self._pos_perc = (0, 0)
self._pos_px = QPoint()
self._at_bottom = False
def _init_widget(self, widget):
super()._init_widget(widget)
page = widget.page()
page.scrollPositionChanged.connect(self._update_pos)
def _repeated_key_press(self, key, count=1, modifier=Qt.NoModifier):
"""Send count fake key presses to this scroller's WebEngineTab."""
for _ in range(min(count, 1000)):
self._tab.key_press(key, modifier)
@pyqtSlot(QPointF)
def _update_pos(self, pos):
"""Update the scroll position attributes when it changed."""
self._pos_px = pos.toPoint()
contents_size = self._widget.page().contentsSize()
scrollable_x = contents_size.width() - self._widget.width()
if scrollable_x == 0:
perc_x = 0
else:
try:
perc_x = min(100, round(100 / scrollable_x * pos.x()))
except ValueError:
# https://github.com/qutebrowser/qutebrowser/issues/3219
log.misc.debug("Got ValueError!")
log.misc.debug("contents_size.width(): {}".format(
contents_size.width()))
log.misc.debug("self._widget.width(): {}".format(
self._widget.width()))
log.misc.debug("scrollable_x: {}".format(scrollable_x))
log.misc.debug("pos.x(): {}".format(pos.x()))
raise
scrollable_y = contents_size.height() - self._widget.height()
if scrollable_y == 0:
perc_y = 0
else:
perc_y = min(100, round(100 / scrollable_y * pos.y()))
self._at_bottom = math.ceil(pos.y()) >= scrollable_y
if (self._pos_perc != (perc_x, perc_y) or
'no-scroll-filtering' in self._args.debug_flags):
self._pos_perc = perc_x, perc_y
self.perc_changed.emit(*self._pos_perc)
def pos_px(self):
return self._pos_px
def pos_perc(self):
return self._pos_perc
def to_perc(self, x=None, y=None):
js_code = javascript.assemble('scroll', 'to_perc', x, y)
self._tab.run_js_async(js_code)
def to_point(self, point):
js_code = javascript.assemble('window', 'scroll', point.x(), point.y())
self._tab.run_js_async(js_code)
def to_anchor(self, name):
url = self._tab.url()
url.setFragment(name)
self._tab.openurl(url)
def delta(self, x=0, y=0):
self._tab.run_js_async(javascript.assemble('window', 'scrollBy', x, y))
def delta_page(self, x=0, y=0):
js_code = javascript.assemble('scroll', 'delta_page', x, y)
self._tab.run_js_async(js_code)
def up(self, count=1):
self._repeated_key_press(Qt.Key_Up, count)
def down(self, count=1):
self._repeated_key_press(Qt.Key_Down, count)
def left(self, count=1):
self._repeated_key_press(Qt.Key_Left, count)
def right(self, count=1):
self._repeated_key_press(Qt.Key_Right, count)
def top(self):
self._tab.key_press(Qt.Key_Home)
def bottom(self):
self._tab.key_press(Qt.Key_End)
def page_up(self, count=1):
self._repeated_key_press(Qt.Key_PageUp, count)
def page_down(self, count=1):
self._repeated_key_press(Qt.Key_PageDown, count)
def at_top(self):
return self.pos_px().y() == 0
def at_bottom(self):
return self._at_bottom
class WebEngineHistory(browsertab.AbstractHistory):
"""QtWebEngine implementations related to page history."""
def current_idx(self):
return self._history.currentItemIndex()
def can_go_back(self):
return self._history.canGoBack()
def can_go_forward(self):
return self._history.canGoForward()
def _item_at(self, i):
return self._history.itemAt(i)
def _go_to_item(self, item):
self._tab.predicted_navigation.emit(item.url())
self._history.goToItem(item)
def serialize(self):
if not qtutils.version_check('5.9', compiled=False):
# WORKAROUND for
# https://github.com/qutebrowser/qutebrowser/issues/2289
# Don't use the history's currentItem here, because of
# https://bugreports.qt.io/browse/QTBUG-59599 and because it doesn't
# contain view-source.
scheme = self._tab.url().scheme()
if scheme in ['view-source', 'chrome']:
raise browsertab.WebTabError("Can't serialize special URL!")
return qtutils.serialize(self._history)
def deserialize(self, data):
return qtutils.deserialize(data, self._history)
def load_items(self, items):
if items:
self._tab.predicted_navigation.emit(items[-1].url)
stream, _data, cur_data = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, self._history)
@pyqtSlot()
def _on_load_finished():
self._tab.scroller.to_point(cur_data['scroll-pos'])
self._tab.load_finished.disconnect(_on_load_finished)
if cur_data is not None:
if 'zoom' in cur_data:
self._tab.zoom.set_factor(cur_data['zoom'])
if ('scroll-pos' in cur_data and
self._tab.scroller.pos_px() == QPoint(0, 0)):
self._tab.load_finished.connect(_on_load_finished)
class WebEngineZoom(browsertab.AbstractZoom):
"""QtWebEngine implementations related to zooming."""
def _set_factor_internal(self, factor):
self._widget.setZoomFactor(factor)
class WebEngineElements(browsertab.AbstractElements):
"""QtWebEngine implemementations related to elements on the page."""
def _js_cb_multiple(self, callback, js_elems):
"""Handle found elements coming from JS and call the real callback.
Args:
callback: The callback to call with the found elements.
Called with None if there was an error.
js_elems: The elements serialized from javascript.
"""
if js_elems is None:
callback(None)
return
elems = []
for js_elem in js_elems:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
elems.append(elem)
callback(elems)
def _js_cb_single(self, callback, js_elem):
"""Handle a found focus elem coming from JS and call the real callback.
Args:
callback: The callback to call with the found element.
Called with a WebEngineElement or None.
js_elem: The element serialized from javascript.
"""
debug_str = ('None' if js_elem is None
else utils.elide(repr(js_elem), 1000))
log.webview.debug("Got element from JS: {}".format(debug_str))
if js_elem is None:
callback(None)
else:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
callback(elem)
def find_css(self, selector, callback, *, only_visible=False):
js_code = javascript.assemble('webelem', 'find_css', selector,
only_visible)
js_cb = functools.partial(self._js_cb_multiple, callback)
self._tab.run_js_async(js_code, js_cb)
def find_id(self, elem_id, callback):
js_code = javascript.assemble('webelem', 'find_id', elem_id)
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_focused(self, callback):
js_code = javascript.assemble('webelem', 'find_focused')
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_at_pos(self, pos, callback):
assert pos.x() >= 0
assert pos.y() >= 0
pos /= self._tab.zoom.factor()
js_code = javascript.assemble('webelem', 'find_at_pos',
pos.x(), pos.y())
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
class WebEngineTab(browsertab.AbstractTab):
"""A QtWebEngine tab in the browser.
Signals:
_load_finished_fake:
Used in place of unreliable loadFinished
"""
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
_load_finished_fake = pyqtSignal(bool)
def __init__(self, *, win_id, mode_manager, private, parent=None):
super().__init__(win_id=win_id, mode_manager=mode_manager,
private=private, parent=parent)
widget = webview.WebEngineView(tabdata=self.data, win_id=win_id,
private=private)
self.history = WebEngineHistory(self)
self.scroller = WebEngineScroller(self, parent=self)
self.caret = WebEngineCaret(mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = WebEngineZoom(tab=self, parent=self)
self.search = WebEngineSearch(parent=self)
self.printing = WebEnginePrinting()
self.elements = WebEngineElements(tab=self)
self.action = WebEngineAction(tab=self)
# We're assigning settings in _set_widget
self.settings = webenginesettings.WebEngineSettings(settings=None)
self._set_widget(widget)
self._connect_signals()
self.backend = usertypes.Backend.QtWebEngine
self._child_event_filter = None
self._saved_zoom = None
self._reload_url = None
config.instance.changed.connect(self._on_config_changed)
self._init_js()
@pyqtSlot(str)
def _on_config_changed(self, option):
if option in ['scrolling.bar', 'content.user_stylesheets']:
self._init_stylesheet()
self._update_stylesheet()
def _update_stylesheet(self):
"""Update the custom stylesheet in existing tabs."""
css = shared.get_user_stylesheet()
code = javascript.assemble('stylesheet', 'set_css', css)
self.run_js_async(code)
def _inject_early_js(self, name, js_code, *,
world=QWebEngineScript.ApplicationWorld,
subframes=False):
"""Inject the given script to run early on a page load.
This runs the script both on DocumentCreation and DocumentReady as on
some internal pages, DocumentCreation will not work.
That is a WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66011
"""
scripts = self._widget.page().scripts()
for injection in ['creation', 'ready']:
injection_points = {
'creation': QWebEngineScript.DocumentCreation,
'ready': QWebEngineScript.DocumentReady,
}
script = QWebEngineScript()
script.setInjectionPoint(injection_points[injection])
script.setSourceCode(js_code)
script.setWorldId(world)
script.setRunsOnSubFrames(subframes)
script.setName('_qute_{}_{}'.format(name, injection))
scripts.insert(script)
def _remove_early_js(self, name):
"""Remove an early QWebEngineScript."""
scripts = self._widget.page().scripts()
for injection in ['creation', 'ready']:
full_name = '_qute_{}_{}'.format(name, injection)
script = scripts.findScript(full_name)
if not script.isNull():
scripts.remove(script)
def _init_js(self):
"""Initialize global qutebrowser JavaScript."""
js_code = javascript.wrap_global(
'scripts',
utils.read_file('javascript/scroll.js'),
utils.read_file('javascript/webelem.js'),
utils.read_file('javascript/caret.js'),
)
# FIXME:qtwebengine what about subframes=True?
self._inject_early_js('js', js_code, subframes=True)
self._init_stylesheet()
greasemonkey = objreg.get('greasemonkey')
greasemonkey.scripts_reloaded.connect(self._inject_userscripts)
self._inject_userscripts()
def _init_stylesheet(self):
"""Initialize custom stylesheets.
Partially inspired by QupZilla:
https://github.com/QupZilla/qupzilla/blob/v2.0/src/lib/app/mainapplication.cpp#L1063-L1101
"""
self._remove_early_js('stylesheet')
css = shared.get_user_stylesheet()
js_code = javascript.wrap_global(
'stylesheet',
utils.read_file('javascript/stylesheet.js'),
javascript.assemble('stylesheet', 'set_css', css),
)
self._inject_early_js('stylesheet', js_code, subframes=True)
def _inject_userscripts(self):
"""Register user JavaScript files with the global profiles."""
# The Greasemonkey metadata block support in QtWebEngine only starts at
# Qt 5.8. With 5.7.1, we need to inject the scripts ourselves in
# response to urlChanged.
if not qtutils.version_check('5.8'):
return
# Since we are inserting scripts into profile.scripts they won't
# just get replaced by new gm scripts like if we were injecting them
# ourselves so we need to remove all gm scripts, while not removing
# any other stuff that might have been added. Like the one for
# stylesheets.
greasemonkey = objreg.get('greasemonkey')
scripts = self._widget.page().scripts()
for script in scripts.toList():
if script.name().startswith("GM-"):
log.greasemonkey.debug('Removing script: {}'
.format(script.name()))
removed = scripts.remove(script)
assert removed, script.name()
# Then add the new scripts.
for script in greasemonkey.all_scripts():
# @run-at (and @include/@exclude/@match) is parsed by
# QWebEngineScript.
new_script = QWebEngineScript()
new_script.setWorldId(QWebEngineScript.MainWorld)
new_script.setSourceCode(script.code())
new_script.setName("GM-{}".format(script.name))
new_script.setRunsOnSubFrames(script.runs_on_sub_frames)
log.greasemonkey.debug('adding script: {}'
.format(new_script.name()))
scripts.insert(new_script)
def _install_event_filter(self):
fp = self._widget.focusProxy()
if fp is not None:
fp.installEventFilter(self._mouse_event_filter)
self._child_event_filter = mouse.ChildEventFilter(
eventfilter=self._mouse_event_filter, widget=self._widget,
parent=self)
self._widget.installEventFilter(self._child_event_filter)
@pyqtSlot()
def _restore_zoom(self):
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
if self._saved_zoom is None:
return
self.zoom.set_factor(self._saved_zoom)
self._saved_zoom = None
def openurl(self, url, *, predict=True):
"""Open the given URL in this tab.
Arguments:
url: The QUrl to open.
predict: If set to False, predicted_navigation is not emitted.
"""
self._saved_zoom = self.zoom.factor()
self._openurl_prepare(url, predict=predict)
self._widget.load(url)
def url(self, requested=False):
page = self._widget.page()
if requested:
return page.requestedUrl()
else:
return page.url()
def dump_async(self, callback, *, plain=False):
if plain:
self._widget.page().toPlainText(callback)
else:
self._widget.page().toHtml(callback)
def run_js_async(self, code, callback=None, *, world=None):
if world is None:
world_id = QWebEngineScript.ApplicationWorld
elif isinstance(world, int):
world_id = world
else:
world_id = _JS_WORLD_MAP[world]
if callback is None:
self._widget.page().runJavaScript(code, world_id)
else:
self._widget.page().runJavaScript(code, world_id, callback)
def shutdown(self):
self.shutting_down.emit()
self.action.exit_fullscreen()
self._widget.shutdown()
def reload(self, *, force=False):
if force:
action = QWebEnginePage.ReloadAndBypassCache
else:
action = QWebEnginePage.Reload
self._widget.triggerPageAction(action)
def stop(self):
self._widget.stop()
def title(self):
return self._widget.title()
def icon(self):
return self._widget.icon()
def set_html(self, html, base_url=QUrl()):
# FIXME:qtwebengine
# check this and raise an exception if too big:
# Warning: The content will be percent encoded before being sent to the
# renderer via IPC. This may increase its size. The maximum size of the
# percent encoded content is 2 megabytes minus 30 bytes.
self._widget.setHtml(html, base_url)
def networkaccessmanager(self):
return None
def user_agent(self):
return None
def clear_ssl_errors(self):
raise browsertab.UnsupportedOperationError
def key_press(self, key, modifier=Qt.NoModifier):
press_evt = QKeyEvent(QEvent.KeyPress, key, modifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.KeyRelease, key, modifier,
0, 0, 0)
self.send_event(press_evt)
self.send_event(release_evt)
def _show_error_page(self, url, error):
"""Show an error page in the tab."""
log.misc.debug("Showing error page for {}".format(error))
url_string = url.toDisplayString()
error_page = jinja.render(
'error.html',
title="Error loading page: {}".format(url_string),
url=url_string, error=error)
self.set_html(error_page)
@pyqtSlot()
def _on_history_trigger(self):
try:
self._widget.page()
except RuntimeError:
# Looks like this slot can be triggered on destroyed tabs:
# https://crashes.qutebrowser.org/view/3abffbed (Qt 5.9.1)
# wrapped C/C++ object of type WebEngineView has been deleted
log.misc.debug("Ignoring history trigger for destroyed tab")
return
url = self.url()
requested_url = self.url(requested=True)
# Don't save the title if it's generated from the URL
title = self.title()
title_url = QUrl(url)
title_url.setScheme('')
if title == title_url.toDisplayString(QUrl.RemoveScheme).strip('/'):
title = ""
# Don't add history entry if the URL is invalid anyways
if not url.isValid():
log.misc.debug("Ignoring invalid URL being added to history")
return
self.add_history_item.emit(url, requested_url, title)
@pyqtSlot(QUrl, 'QAuthenticator*', 'QString')
def _on_proxy_authentication_required(self, url, authenticator,
proxy_host):
"""Called when a proxy needs authentication."""
msg = "<b>{}</b> requires a username and password.".format(
html_utils.escape(proxy_host))
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
answer = message.ask(
title="Proxy authentication required", text=msg,
mode=usertypes.PromptMode.user_pwd,
abort_on=[self.shutting_down, self.load_started], url=urlstr)
if answer is not None:
authenticator.setUser(answer.user)
authenticator.setPassword(answer.password)
else:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
self._show_error_page(url, "Proxy authentication required")
@pyqtSlot(QUrl, 'QAuthenticator*')
def _on_authentication_required(self, url, authenticator):
netrc_success = False
if not self.data.netrc_used:
self.data.netrc_used = True
netrc_success = shared.netrc_authentication(url, authenticator)
if not netrc_success:
abort_on = [self.shutting_down, self.load_started]
answer = shared.authentication_required(url, authenticator,
abort_on)
if not netrc_success and answer is None:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
# WORKAROUND for
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-December/038400.html
self._show_error_page(url, "Authentication required")
@pyqtSlot('QWebEngineFullScreenRequest')
def _on_fullscreen_requested(self, request):
request.accept()
on = request.toggleOn()
self.data.fullscreen = on
self.fullscreen_requested.emit(on)
if on:
notification = miscwidgets.FullscreenNotification(self)
notification.show()
notification.set_timeout(3000)
@pyqtSlot()
def _on_load_started(self):
"""Clear search when a new load is started if needed."""
if (qtutils.version_check('5.9', compiled=False) and
not qtutils.version_check('5.9.2', compiled=False)):
# WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-61506
self.search.clear()
super()._on_load_started()
self.data.netrc_used = False
@pyqtSlot(QWebEnginePage.RenderProcessTerminationStatus, int)
def _on_render_process_terminated(self, status, exitcode):
"""Show an error when the renderer process terminated."""
if (status == QWebEnginePage.AbnormalTerminationStatus and
exitcode == 256):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58697
status = QWebEnginePage.CrashedTerminationStatus
status_map = {
QWebEnginePage.NormalTerminationStatus:
browsertab.TerminationStatus.normal,
QWebEnginePage.AbnormalTerminationStatus:
browsertab.TerminationStatus.abnormal,
QWebEnginePage.CrashedTerminationStatus:
browsertab.TerminationStatus.crashed,
QWebEnginePage.KilledTerminationStatus:
browsertab.TerminationStatus.killed,
-1:
browsertab.TerminationStatus.unknown,
}
self.renderer_process_terminated.emit(status_map[status], exitcode)
@pyqtSlot(int)
def _on_load_progress_workaround(self, perc):
"""Use loadProgress(100) to emit loadFinished(True).
See https://bugreports.qt.io/browse/QTBUG-65223
"""
if perc == 100 and self.load_status() != usertypes.LoadStatus.error:
self._load_finished_fake.emit(True)
@pyqtSlot(bool)
def _on_load_finished_workaround(self, ok):
"""Use only loadFinished(False).
See https://bugreports.qt.io/browse/QTBUG-65223
"""
if not ok:
self._load_finished_fake.emit(False)
def _error_page_workaround(self, html):
"""Check if we're displaying a Chromium error page.
This gets only called if we got loadFinished(False) without JavaScript,
so we can display at least some error page.
WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66643
Needs to check the page content as a WORKAROUND for
https://bugreports.qt.io/browse/QTBUG-66661
"""
match = re.search(r'"errorCode":"([^"]*)"', html)
if match is None:
return
self._show_error_page(self.url(), error=match.group(1))
@pyqtSlot(bool)
def _on_load_finished(self, ok):
"""Display a static error page if JavaScript is disabled."""
super()._on_load_finished(ok)
js_enabled = self.settings.test_attribute('content.javascript.enabled')
if not ok and not js_enabled:
self.dump_async(self._error_page_workaround)
if ok and self._reload_url is not None:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66656
log.config.debug(
"Loading {} again because of config change".format(
self._reload_url.toDisplayString()))
QTimer.singleShot(100, lambda url=self._reload_url:
self.openurl(url, predict=False))
self._reload_url = None
if not qtutils.version_check('5.10', compiled=False):
# We can't do this when we have the loadFinished workaround as that
# sometimes clears icons without loading a new page.
# In general, this is handled by Qt, but when loading takes long,
# the old icon is still displayed.
self.icon_changed.emit(QIcon())
@pyqtSlot(QUrl)
def _on_predicted_navigation(self, url):
"""If we know we're going to visit an URL soon, change the settings."""
super()._on_predicted_navigation(url)
self.settings.update_for_url(url)
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(self, navigation):
super()._on_navigation_request(navigation)
if not navigation.accepted or not navigation.is_main_frame:
return
needs_reload = {
'content.plugins',
'content.javascript.enabled',
'content.javascript.can_access_clipboard',
'content.javascript.can_access_clipboard',
'content.print_element_backgrounds',
'input.spatial_navigation',
'input.spatial_navigation',
}
assert needs_reload.issubset(configdata.DATA)
changed = self.settings.update_for_url(navigation.url)
if (changed & needs_reload and navigation.navigation_type !=
navigation.Type.link_clicked):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66656
self._reload_url = navigation.url
def _connect_signals(self):
view = self._widget
page = view.page()
page.windowCloseRequested.connect(self.window_close_requested)
page.linkHovered.connect(self.link_hovered)
page.loadProgress.connect(self._on_load_progress)
page.loadStarted.connect(self._on_load_started)
page.certificate_error.connect(self._on_ssl_errors)
page.authenticationRequired.connect(self._on_authentication_required)
page.proxyAuthenticationRequired.connect(
self._on_proxy_authentication_required)
page.fullScreenRequested.connect(self._on_fullscreen_requested)
page.contentsSizeChanged.connect(self.contents_size_changed)
page.navigation_request.connect(self._on_navigation_request)
view.titleChanged.connect(self.title_changed)
view.urlChanged.connect(self._on_url_changed)
view.renderProcessTerminated.connect(
self._on_render_process_terminated)
view.iconChanged.connect(self.icon_changed)
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
if qtutils.version_check('5.10', compiled=False):
page.loadProgress.connect(self._on_load_progress_workaround)
self._load_finished_fake.connect(self._on_history_trigger)
self._load_finished_fake.connect(self._restore_zoom)
self._load_finished_fake.connect(self._on_load_finished)
page.loadFinished.connect(self._on_load_finished_workaround)
else:
# for older Qt versions which break with the above
page.loadProgress.connect(self._on_load_progress)
page.loadFinished.connect(self._on_history_trigger)
page.loadFinished.connect(self._restore_zoom)
page.loadFinished.connect(self._on_load_finished)
self.predicted_navigation.connect(self._on_predicted_navigation)
def event_target(self):
fp = self._widget.focusProxy()
assert fp is not None
return fp
| 1 | 21,361 |
No need for a separate variable here - just call `utils.read_file` directly below.
|
qutebrowser-qutebrowser
|
py
|
@@ -150,8 +150,18 @@ func (ctx *ChannelContext) SendResp(message model.Message) {
ctx.anonChsLock.RLock()
defer ctx.anonChsLock.RUnlock()
+
+ deadline := time.Now().Add(MessageTimeoutDefault)
+ sendTimer := time.NewTimer(time.Until(deadline))
+
if channel, exist := ctx.anonChannels[anonName]; exist {
- channel <- message
+ select {
+ case channel <- message:
+ case <-sendTimer.C:
+ klog.Warning("timeout when SendResp message")
+ return
+ }
+ sendTimer.Stop()
return
}
| 1 |
package context
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"k8s.io/klog"
"github.com/kubeedge/beehive/pkg/core/model"
)
// constants for channel context
const (
ChannelSizeDefault = 1024
MessageTimeoutDefault = 30 * time.Second
TickerTimeoutDefault = 20 * time.Millisecond
)
// ChannelContext is object for Context channel
type ChannelContext struct {
//ConfigFactory goarchaius.ConfigurationFactory
channels map[string]chan model.Message
chsLock sync.RWMutex
typeChannels map[string]map[string]chan model.Message
typeChsLock sync.RWMutex
anonChannels map[string]chan model.Message
anonChsLock sync.RWMutex
}
// NewChannelContext creates and returns object of new channel context
// TODO: Singleton
func NewChannelContext() *ChannelContext {
channelMap := make(map[string]chan model.Message)
moduleChannels := make(map[string]map[string]chan model.Message)
anonChannels := make(map[string]chan model.Message)
return &ChannelContext{
channels: channelMap,
typeChannels: moduleChannels,
anonChannels: anonChannels,
}
}
// Cleanup close modules
func (ctx *ChannelContext) Cleanup(module string) {
if channel := ctx.getChannel(module); channel != nil {
ctx.delChannel(module)
// decrease probable exception of channel closing
time.Sleep(20 * time.Millisecond)
close(channel)
}
}
// Send send msg to a module. Todo: do not stuck
func (ctx *ChannelContext) Send(module string, message model.Message) {
// avoid exception because of channel colsing
// TODO: need reconstruction
defer func() {
if exception := recover(); exception != nil {
klog.Warningf("Recover when send message, exception: %+v", exception)
}
}()
if channel := ctx.getChannel(module); channel != nil {
channel <- message
return
}
klog.Warningf("Get bad module name :%s when send message, do nothing", module)
}
// Receive msg from channel of module
func (ctx *ChannelContext) Receive(module string) (model.Message, error) {
if channel := ctx.getChannel(module); channel != nil {
content := <-channel
return content, nil
}
klog.Warningf("Failed to get channel for module:%s when receive message", module)
return model.Message{}, fmt.Errorf("failed to get channel for module(%s)", module)
}
func getAnonChannelName(msgID string) string {
return msgID
}
// SendSync sends message in a sync way
func (ctx *ChannelContext) SendSync(module string, message model.Message, timeout time.Duration) (model.Message, error) {
// avoid exception because of channel colsing
// TODO: need reconstruction
defer func() {
if exception := recover(); exception != nil {
klog.Warningf("Recover when sendsync message, exception: %+v", exception)
}
}()
if timeout <= 0 {
timeout = MessageTimeoutDefault
}
deadline := time.Now().Add(timeout)
// make sure to set sync flag
message.Header.Sync = true
// check req/resp channel
reqChannel := ctx.getChannel(module)
if reqChannel == nil {
return model.Message{}, fmt.Errorf("bad request module name(%s)", module)
}
sendTimer := time.NewTimer(timeout)
select {
case reqChannel <- message:
case <-sendTimer.C:
return model.Message{}, errors.New("timeout to send message")
}
sendTimer.Stop()
// new anonymous channel for response
anonChan := make(chan model.Message)
anonName := getAnonChannelName(message.GetID())
ctx.anonChsLock.Lock()
ctx.anonChannels[anonName] = anonChan
ctx.anonChsLock.Unlock()
defer func() {
ctx.anonChsLock.Lock()
delete(ctx.anonChannels, anonName)
close(anonChan)
ctx.anonChsLock.Unlock()
}()
var resp model.Message
respTimer := time.NewTimer(time.Until(deadline))
select {
case resp = <-anonChan:
case <-respTimer.C:
return model.Message{}, errors.New("timeout to get response")
}
respTimer.Stop()
return resp, nil
}
// SendResp send resp for this message when using sync mode
func (ctx *ChannelContext) SendResp(message model.Message) {
anonName := getAnonChannelName(message.GetParentID())
ctx.anonChsLock.RLock()
defer ctx.anonChsLock.RUnlock()
if channel, exist := ctx.anonChannels[anonName]; exist {
channel <- message
return
}
klog.V(4).Infof("Get bad anonName:%s when sendresp message, do nothing", anonName)
}
// SendToGroup send msg to modules. Todo: do not stuck
func (ctx *ChannelContext) SendToGroup(moduleType string, message model.Message) {
// avoid exception because of channel colsing
// TODO: need reconstruction
defer func() {
if exception := recover(); exception != nil {
klog.Warningf("Recover when sendToGroup message, exception: %+v", exception)
}
}()
send := func(ch chan model.Message) {
select {
case ch <- message:
default:
klog.Warningf("the message channel is full, message: %+v", message)
select {
case ch <- message:
}
}
}
if channelList := ctx.getTypeChannel(moduleType); channelList != nil {
for _, channel := range channelList {
go send(channel)
}
return
}
klog.Warningf("Get bad module type:%s when sendToGroup message, do nothing", moduleType)
}
// SendToGroupSync : broadcast the message to echo module channel, the module send response back anon channel
// check timeout and the size of anon channel
func (ctx *ChannelContext) SendToGroupSync(moduleType string, message model.Message, timeout time.Duration) error {
// avoid exception because of channel colsing
// TODO: need reconstruction
defer func() {
if exception := recover(); exception != nil {
klog.Warningf("Recover when sendToGroupsync message, exception: %+v", exception)
}
}()
if timeout <= 0 {
timeout = MessageTimeoutDefault
}
deadline := time.Now().Add(timeout)
channelList := ctx.getTypeChannel(moduleType)
if channelList == nil {
return fmt.Errorf("failed to get module type(%s) channel list", moduleType)
}
// echo module must sync a response,
// let anonchan size be module number
channelNumber := len(channelList)
anonChan := make(chan model.Message, channelNumber)
anonName := getAnonChannelName(message.GetID())
ctx.anonChsLock.Lock()
ctx.anonChannels[anonName] = anonChan
ctx.anonChsLock.Unlock()
cleanup := func() error {
ctx.anonChsLock.Lock()
delete(ctx.anonChannels, anonName)
close(anonChan)
ctx.anonChsLock.Unlock()
var uninvitedGuests int
// cleanup anonchan and check parentid for resp
for resp := range anonChan {
if resp.GetParentID() != message.GetID() {
uninvitedGuests++
}
}
if uninvitedGuests != 0 {
klog.Errorf("Get some unexpected:%d resp when sendToGroupsync message", uninvitedGuests)
return fmt.Errorf("got some unexpected(%d) resp", uninvitedGuests)
}
return nil
}
// make sure to set sync flag before sending
message.Header.Sync = true
var timeoutCounter int32
send := func(ch chan model.Message) {
sendTimer := time.NewTimer(time.Until(deadline))
select {
case ch <- message:
sendTimer.Stop()
case <-sendTimer.C:
atomic.AddInt32(&timeoutCounter, 1)
}
}
for _, channel := range channelList {
go send(channel)
}
sendTimer := time.NewTimer(time.Until(deadline))
ticker := time.NewTicker(TickerTimeoutDefault)
for {
// annonChan is full
if len(anonChan) == channelNumber {
break
}
select {
case <-ticker.C:
case <-sendTimer.C:
cleanup()
if timeoutCounter != 0 {
errInfo := fmt.Sprintf("timeout to send message, several %d timeout when send", timeoutCounter)
return fmt.Errorf(errInfo)
}
klog.Error("Timeout to sendToGroupsync message")
return fmt.Errorf("Timeout to send message")
}
}
return cleanup()
}
// New Channel
func (ctx *ChannelContext) newChannel() chan model.Message {
channel := make(chan model.Message, ChannelSizeDefault)
return channel
}
// getChannel return chan
func (ctx *ChannelContext) getChannel(module string) chan model.Message {
ctx.chsLock.RLock()
defer ctx.chsLock.RUnlock()
if _, exist := ctx.channels[module]; exist {
return ctx.channels[module]
}
klog.Warningf("Failed to get channel, type:%s", module)
return nil
}
// addChannel return chan
func (ctx *ChannelContext) addChannel(module string, moduleCh chan model.Message) {
ctx.chsLock.Lock()
defer ctx.chsLock.Unlock()
ctx.channels[module] = moduleCh
}
// deleteChannel by module name
func (ctx *ChannelContext) delChannel(module string) {
// delete module channel from channels map
ctx.chsLock.Lock()
_, exist := ctx.channels[module]
if !exist {
klog.Warningf("Failed to get channel, module:%s", module)
return
}
delete(ctx.channels, module)
ctx.chsLock.Unlock()
// delete module channel from typechannels map
ctx.typeChsLock.Lock()
for _, moduleMap := range ctx.typeChannels {
if _, exist := moduleMap[module]; exist {
delete(moduleMap, module)
break
}
}
ctx.typeChsLock.Unlock()
}
// getTypeChannel return chan
func (ctx *ChannelContext) getTypeChannel(moduleType string) map[string]chan model.Message {
ctx.typeChsLock.RLock()
defer ctx.typeChsLock.RUnlock()
if _, exist := ctx.typeChannels[moduleType]; exist {
return ctx.typeChannels[moduleType]
}
klog.Warningf("Failed to get type channel, type:%s", moduleType)
return nil
}
func (ctx *ChannelContext) getModuleByChannel(ch chan model.Message) string {
ctx.chsLock.RLock()
defer ctx.chsLock.RUnlock()
for module, channel := range ctx.channels {
if channel == ch {
return module
}
}
klog.Warning("Failed to get module by channel")
return ""
}
// addTypeChannel put modules into moduleType map
func (ctx *ChannelContext) addTypeChannel(module, group string, moduleCh chan model.Message) {
ctx.typeChsLock.Lock()
defer ctx.typeChsLock.Unlock()
if _, exist := ctx.typeChannels[group]; !exist {
ctx.typeChannels[group] = make(map[string]chan model.Message)
}
ctx.typeChannels[group][module] = moduleCh
}
// AddModule adds module into module context
func (ctx *ChannelContext) AddModule(module string) {
channel := ctx.newChannel()
ctx.addChannel(module, channel)
}
// AddModuleGroup adds modules into module context group
func (ctx *ChannelContext) AddModuleGroup(module, group string) {
if channel := ctx.getChannel(module); channel != nil {
ctx.addTypeChannel(module, group, channel)
return
}
klog.Warningf("Get bad module name %s when addmodulegroup", module)
}
| 1 | 18,325 |
Thanks for the fixing, small question, Here the message just be written to the channel, will it be blocked?
|
kubeedge-kubeedge
|
go
|
@@ -29,7 +29,8 @@ type SyncStage byte
const (
Headers SyncStage = iota // Headers are downloaded, their Proof-Of-Work validity and chaining is verified
- Bodies // Block bodies are downloaded, TxHash and UncleHash are getting verified, "From" recovered from signatures
+ Bodies // Block bodies are downloaded, TxHash and UncleHash are getting verified
+ Senders // "From" recovered from signatures, bodies re-written
Execution // Executing each block w/o buildinf a trie
HashCheck // Checking the root hash
Finish // Nominal stage after all other stages
| 1 |
// Copyright 2020 The turbo-geth Authors
// This file is part of the turbo-geth library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package downloader
import (
"encoding/binary"
"fmt"
"github.com/ledgerwatch/turbo-geth/common/dbutils"
"github.com/ledgerwatch/turbo-geth/ethdb"
)
// SyncStage represents the stages of syncronisation in the SyncMode.StagedSync mode
type SyncStage byte
const (
Headers SyncStage = iota // Headers are downloaded, their Proof-Of-Work validity and chaining is verified
Bodies // Block bodies are downloaded, TxHash and UncleHash are getting verified, "From" recovered from signatures
Execution // Executing each block w/o buildinf a trie
HashCheck // Checking the root hash
Finish // Nominal stage after all other stages
)
// GetStageProcess retrieves saved progress of given sync stage from the database
func GetStageProgress(db ethdb.Getter, stage SyncStage) (uint64, error) {
v, err := db.Get(dbutils.SyncStageProgress, []byte{byte(stage)})
if err != nil && err != ethdb.ErrKeyNotFound {
return 0, err
}
if len(v) == 0 {
return 0, nil
}
if len(v) != 8 {
return 0, fmt.Errorf("stage progress value must be of length 8, got %d", len(v))
}
return binary.BigEndian.Uint64(v), nil
}
// SaveStageProgress saves the progress of the given stage in the database
func SaveStageProgress(db ethdb.Putter, stage SyncStage, progress uint64) error {
var v [8]byte
binary.BigEndian.PutUint64(v[:], progress)
return db.Put(dbutils.SyncStageProgress, []byte{byte(stage)}, v[:])
}
// GetStageInvalidation retrives the invalidation for the given stage
// Invalidation means that that stage needs to rollback to the invalidation
// point and be redone
func GetStageInvalidation(db ethdb.Getter, stage SyncStage) (uint64, error) {
v, err := db.Get(dbutils.SyncStageInvalidation, []byte{byte(stage)})
if err != nil && err != ethdb.ErrKeyNotFound {
return 0, err
}
if len(v) == 0 {
return 0, nil
}
if len(v) != 8 {
return 0, fmt.Errorf("stage invalidation value must be of length 8, got %d", len(v))
}
return binary.BigEndian.Uint64(v), nil
}
// SaveStageInvalidation saves the progress of the given stage in the database
func SaveStageInvalidation(db ethdb.Putter, stage SyncStage, invalidation uint64) error {
var v [8]byte
binary.BigEndian.PutUint64(v[:], invalidation)
return db.Put(dbutils.SyncStageInvalidation, []byte{byte(stage)}, v[:])
}
| 1 | 21,479 |
maybe we will have something like `Bodies []byte("bodies")` instead of them being a number? with numbers we are making it harder to insert stages mid-process.
|
ledgerwatch-erigon
|
go
|
@@ -82,11 +82,11 @@ type Endpoint struct {
// GroupMember represents resource member to be populated in Groups.
// This supersedes GroupMemberPod, and will eventually replace it.
type GroupMember struct {
- // ExternalEntity maintains the reference to the ExternalEntity.
- ExternalEntity *ExternalEntityReference `json:"externalEntity,omitempty" protobuf:"bytes,1,opt,name=externalEntity"`
-
// Pod maintains the reference to the Pod.
- Pod *PodReference `json:"pod,omitempty" protobuf:"bytes,2,opt,name=pod"`
+ Pod *PodReference `json:"pod,omitempty" protobuf:"bytes,1,opt,name=pod"`
+
+ // ExternalEntity maintains the reference to the ExternalEntity.
+ ExternalEntity *ExternalEntityReference `json:"externalEntity,omitempty" protobuf:"bytes,2,opt,name=externalEntity"`
// Endpoints maintains a list of EndPoints associated with this groupMember.
Endpoints []Endpoint `json:"endpoints,omitempty" protobuf:"bytes,3,rep,name=endpoints"`
| 1 |
// Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1"
)
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroup is the message format of antrea/pkg/controller/types.AppliedToGroup in an API response.
type AppliedToGroup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Pods is a list of Pods selected by this group.
Pods []GroupMemberPod `json:"pods,omitempty" protobuf:"bytes,2,rep,name=pods"`
// GroupMembers is list of resources selected by this group. This eventually will replace Pods
GroupMembers []GroupMember `json:"groupMembers,omitempty" protobuf:"bytes,3,rep,name=groupMembers"`
}
// PodReference represents a Pod Reference.
type PodReference struct {
// The name of this pod.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The namespace of this pod.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// NamedPort represents a Port with a name on Pod.
type NamedPort struct {
// Port represents the Port number.
Port int32 `json:"port,omitempty" protobuf:"varint,1,opt,name=port"`
// Name represents the associated name with this Port number.
Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"`
// Protocol for port. Must be UDP, TCP, or SCTP.
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol"`
}
// GroupMemberPod represents a GroupMember related to Pods.
type GroupMemberPod struct {
// Pod maintains the reference to the Pod.
Pod *PodReference `json:"pod,omitempty" protobuf:"bytes,1,opt,name=pod"`
// IP maintains the IPAddress associated with the Pod.
IP IPAddress `json:"ip,omitempty" protobuf:"bytes,2,opt,name=ip"`
// Ports maintain the named port mapping of this Pod.
Ports []NamedPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// ExternalEntityReference represents a ExternalEntity Reference.
type ExternalEntityReference struct {
// The name of this ExternalEntity.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The namespace of this ExternalEntity.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Endpoint represents an external endpoint.
type Endpoint struct {
// IP is the IP address of the Endpoint.
IP IPAddress `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Ports is the list NamedPort of the Endpoint.
Ports []NamedPort `json:"ports,omitempty" protobuf:"bytes,2,rep,name=ports"`
}
// GroupMember represents resource member to be populated in Groups.
// This supersedes GroupMemberPod, and will eventually replace it.
type GroupMember struct {
// ExternalEntity maintains the reference to the ExternalEntity.
ExternalEntity *ExternalEntityReference `json:"externalEntity,omitempty" protobuf:"bytes,1,opt,name=externalEntity"`
// Pod maintains the reference to the Pod.
Pod *PodReference `json:"pod,omitempty" protobuf:"bytes,2,opt,name=pod"`
// Endpoints maintains a list of EndPoints associated with this groupMember.
Endpoints []Endpoint `json:"endpoints,omitempty" protobuf:"bytes,3,rep,name=endpoints"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroupPatch describes the incremental update of an AppliedToGroup.
type AppliedToGroupPatch struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
AddedPods []GroupMemberPod `json:"addedPods,omitempty" protobuf:"bytes,2,rep,name=addedPods"`
RemovedPods []GroupMemberPod `json:"removedPods,omitempty" protobuf:"bytes,3,rep,name=removedPods"`
AddedGroupMembers []GroupMember `json:"addedGroupMembers,omitempty" protobuf:"bytes,4,rep,name=addedGroupMembers"`
RemovedGroupMembers []GroupMember `json:"removedGroupMembers,omitempty" protobuf:"bytes,5,rep,name=removedGroupMembers"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AppliedToGroupList is a list of AppliedToGroup objects.
type AppliedToGroupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []AppliedToGroup `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroup is the message format of antrea/pkg/controller/types.AddressGroup in an API response.
type AddressGroup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Pods []GroupMemberPod `json:"pods,omitempty" protobuf:"bytes,2,rep,name=pods"`
GroupMembers []GroupMember `json:"groupMembers,omitempty" protobuf:"bytes,3,rep,name=groupMembers"`
}
// IPAddress describes a single IP address. Either an IPv4 or IPv6 address must be set.
type IPAddress []byte
// IPNet describes an IP network.
type IPNet struct {
IP IPAddress `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
PrefixLength int32 `json:"prefixLength,omitempty" protobuf:"varint,2,opt,name=prefixLength"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroupPatch describes the incremental update of an AddressGroup.
type AddressGroupPatch struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
AddedPods []GroupMemberPod `json:"addedPods,omitempty" protobuf:"bytes,2,rep,name=addedPods"`
RemovedPods []GroupMemberPod `json:"removedPods,omitempty" protobuf:"bytes,3,rep,name=removedPods"`
AddedGroupMembers []GroupMember `json:"addedGroupMembers,omitempty" protobuf:"bytes,4,rep,name=addedGroupMembers"`
RemovedGroupMembers []GroupMember `json:"removedGroupMembers,omitempty" protobuf:"bytes,5,rep,name=removedGroupMembers"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// AddressGroupList is a list of AddressGroup objects.
type AddressGroupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []AddressGroup `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// TierPriority specifies the relative ordering among Tiers. A lower
// TierPriority indicates higher precedence.
type TierPriority uint32
// +genclient
// +genclient:onlyVerbs=list,get,watch
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicy is the message format of antrea/pkg/controller/types.NetworkPolicy in an API response.
type NetworkPolicy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Rules is a list of rules to be applied to the selected Pods.
Rules []NetworkPolicyRule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"`
// AppliedToGroups is a list of names of AppliedToGroups to which this policy applies.
AppliedToGroups []string `json:"appliedToGroups,omitempty" protobuf:"bytes,3,rep,name=appliedToGroups"`
// Priority represents the relative priority of this Network Policy as compared to
// other Network Policies. Priority will be unset (nil) for K8s Network Policy.
Priority *float64 `json:"priority,omitempty" protobuf:"fixed64,4,opt,name=priority"`
// TierPriority represents the priority of the Tier associated with this Network
// Policy. The TierPriority will remain nil for K8s NetworkPolicy.
TierPriority *TierPriority `json:"tierPriority,omitempty" protobuf:"varint,5,opt,name=tierPriority"`
}
// Direction defines traffic direction of NetworkPolicyRule.
type Direction string
const (
DirectionIn Direction = "In"
DirectionOut Direction = "Out"
)
// NetworkPolicyRule describes a particular set of traffic that is allowed.
type NetworkPolicyRule struct {
// The direction of this rule.
// If it's set to In, From must be set and To must not be set.
// If it's set to Out, To must be set and From must not be set.
Direction Direction `json:"direction,omitempty" protobuf:"bytes,1,opt,name=direction"`
// From represents sources which should be able to access the pods selected by the policy.
From NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"`
// To represents destinations which should be able to be accessed by the pods selected by the policy.
To NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,3,opt,name=to"`
// Services is a list of services which should be matched.
Services []Service `json:"services,omitempty" protobuf:"bytes,4,rep,name=services"`
// Priority defines the priority of the Rule as compared to other rules in the
// NetworkPolicy.
Priority int32 `json:"priority,omitempty" protobuf:"varint,5,opt,name=priority"`
// Action specifies the action to be applied on the rule. i.e. Allow/Drop. An empty
// action “nil” defaults to Allow action, which would be the case for rules created for
// K8s Network Policy.
Action *secv1alpha1.RuleAction `json:"action,omitempty" protobuf:"bytes,6,opt,name=action,casttype=github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1.RuleAction"`
}
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
// ProtocolSCTP is the SCTP protocol.
ProtocolSCTP Protocol = "SCTP"
)
// Service describes a port to allow traffic on.
type Service struct {
// The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this
// field defaults to TCP.
// +optional
Protocol *Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol"`
// The port name or number on the given protocol. If not specified, this matches all port numbers.
// +optional
Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"`
}
// NetworkPolicyPeer describes a peer of NetworkPolicyRules.
// It could be a list of names of AddressGroups and/or a list of IPBlock.
type NetworkPolicyPeer struct {
// A list of names of AddressGroups.
AddressGroups []string `json:"addressGroups,omitempty" protobuf:"bytes,1,rep,name=addressGroups"`
// A list of IPBlock.
IPBlocks []IPBlock `json:"ipBlocks,omitempty" protobuf:"bytes,2,rep,name=ipBlocks"`
}
// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24"). The except entry describes CIDRs that should
// not be included within this rule.
type IPBlock struct {
// CIDR is an IPNet represents the IP Block.
CIDR IPNet `json:"cidr" protobuf:"bytes,1,name=cidr"`
// Except is a slice of IPNets that should not be included within an IP Block.
// Except values will be rejected if they are outside the CIDR range.
// +optional
Except []IPNet `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NetworkPolicyList is a list of NetworkPolicy objects.
type NetworkPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
}
| 1 | 22,673 |
out of curiosity, why making this change? If no real reason, could you restore to avoid unnecessary changes of several generated files?
|
antrea-io-antrea
|
go
|
@@ -334,6 +334,10 @@ func (manager *connectionManager) Disconnect() error {
manager.cleanConnection()
manager.setStatus(statusNotConnected())
+ manager.eventPublisher.Publish(StateEventTopic, StateEvent{
+ State: NotConnected,
+ SessionInfo: manager.sessionInfo,
+ })
return nil
}
| 1 |
/*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package connection
import (
"context"
"errors"
"sync"
"time"
log "github.com/cihub/seelog"
"github.com/mysteriumnetwork/node/communication"
"github.com/mysteriumnetwork/node/consumer"
"github.com/mysteriumnetwork/node/core/ip"
"github.com/mysteriumnetwork/node/firewall"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/market"
"github.com/mysteriumnetwork/node/money"
"github.com/mysteriumnetwork/node/services/openvpn/discovery/dto"
"github.com/mysteriumnetwork/node/session"
"github.com/mysteriumnetwork/node/session/balance"
"github.com/mysteriumnetwork/node/session/promise"
)
const managerLogPrefix = "[connection-manager] "
var (
// ErrNoConnection error indicates that action applied to manager expects active connection (i.e. disconnect)
ErrNoConnection = errors.New("no connection exists")
// ErrAlreadyExists error indicates that action applied to manager expects no active connection (i.e. connect)
ErrAlreadyExists = errors.New("connection already exists")
// ErrConnectionCancelled indicates that connection in progress was cancelled by request of api user
ErrConnectionCancelled = errors.New("connection was cancelled")
// ErrConnectionFailed indicates that Connect method didn't reach "Connected" phase due to connection error
ErrConnectionFailed = errors.New("connection has failed")
// ErrUnsupportedServiceType indicates that target proposal contains unsupported service type
ErrUnsupportedServiceType = errors.New("unsupported service type in proposal")
)
// Creator creates new connection by given options and uses state channel to report state changes
type Creator func(serviceType string, stateChannel StateChannel, statisticsChannel StatisticsChannel) (Connection, error)
// SessionInfo contains all the relevant info of the current session
type SessionInfo struct {
SessionID session.ID
ConsumerID identity.Identity
Proposal market.ServiceProposal
}
// Publisher is responsible for publishing given events
type Publisher interface {
Publish(topic string, data interface{})
}
// PaymentIssuer handles the payments for service
type PaymentIssuer interface {
Start() error
Stop()
}
// PaymentIssuerFactory creates a new payment issuer from the given params
type PaymentIssuerFactory func(
initialState promise.PaymentInfo,
paymentDefinition dto.PaymentPerTime,
messageChan chan balance.Message,
dialog communication.Dialog,
consumer, provider identity.Identity) (PaymentIssuer, error)
type connectionManager struct {
//these are passed on creation
newDialog DialogCreator
paymentIssuerFactory PaymentIssuerFactory
newConnection Creator
eventPublisher Publisher
resolver ip.Resolver
//these are populated by Connect at runtime
ctx context.Context
status Status
statusLock sync.RWMutex
sessionInfo SessionInfo
cleanup []func() error
cancel func()
discoLock sync.Mutex
}
// NewManager creates connection manager with given dependencies
func NewManager(
dialogCreator DialogCreator,
paymentIssuerFactory PaymentIssuerFactory,
connectionCreator Creator,
eventPublisher Publisher,
resolver ip.Resolver,
) *connectionManager {
return &connectionManager{
newDialog: dialogCreator,
paymentIssuerFactory: paymentIssuerFactory,
newConnection: connectionCreator,
status: statusNotConnected(),
eventPublisher: eventPublisher,
cleanup: make([]func() error, 0),
resolver: resolver,
}
}
func (manager *connectionManager) Connect(consumerID identity.Identity, proposal market.ServiceProposal, params ConnectParams) (err error) {
if manager.Status().State != NotConnected {
return ErrAlreadyExists
}
manager.ctx, manager.cancel = context.WithCancel(context.Background())
manager.setStatus(statusConnecting())
defer func() {
if err != nil {
manager.setStatus(statusNotConnected())
}
}()
providerID := identity.FromAddress(proposal.ProviderID)
dialog, err := manager.createDialog(consumerID, providerID, proposal.ProviderContacts[0])
if err != nil {
return err
}
stateChannel := make(chan State, 10)
statisticsChannel := make(chan consumer.SessionStatistics, 10)
connection, err := manager.newConnection(proposal.ServiceType, stateChannel, statisticsChannel)
if err != nil {
return err
}
sessionDTO, paymentInfo, err := manager.createSession(connection, dialog, consumerID, proposal)
if err != nil {
return err
}
err = manager.launchPayments(paymentInfo, dialog, consumerID, providerID)
if err != nil {
return err
}
err = manager.startConnection(connection, consumerID, proposal, params, sessionDTO, stateChannel, statisticsChannel)
if err == context.Canceled {
return ErrConnectionCancelled
}
return err
}
func (manager *connectionManager) launchPayments(paymentInfo *promise.PaymentInfo, dialog communication.Dialog, consumerID, providerID identity.Identity) error {
var promiseState promise.PaymentInfo
if paymentInfo != nil {
promiseState.FreeCredit = paymentInfo.FreeCredit
promiseState.LastPromise = paymentInfo.LastPromise
}
messageChan := make(chan balance.Message, 1)
// TODO: set the time and proper payment info
payment := dto.PaymentPerTime{
Price: money.Money{
Currency: money.CurrencyMyst,
Amount: uint64(0),
},
Duration: time.Minute,
}
payments, err := manager.paymentIssuerFactory(promiseState, payment, messageChan, dialog, consumerID, providerID)
if err != nil {
return err
}
manager.cleanup = append(manager.cleanup, func() error {
payments.Stop()
return nil
})
go manager.payForService(payments)
return nil
}
func (manager *connectionManager) cleanConnection() {
manager.cancel()
for i := len(manager.cleanup) - 1; i > 0; i-- {
err := manager.cleanup[i]()
if err != nil {
log.Warn(managerLogPrefix, "cleanup error:", err)
}
}
manager.cleanup = make([]func() error, 0)
}
func (manager *connectionManager) createDialog(consumerID, providerID identity.Identity, contact market.Contact) (communication.Dialog, error) {
dialog, err := manager.newDialog(consumerID, providerID, contact)
if err != nil {
return nil, err
}
manager.cleanup = append(manager.cleanup, dialog.Close)
return dialog, err
}
func (manager *connectionManager) createSession(c Connection, dialog communication.Dialog, consumerID identity.Identity, proposal market.ServiceProposal) (session.SessionDto, *promise.PaymentInfo, error) {
sessionCreateConfig, err := c.GetConfig()
if err != nil {
return session.SessionDto{}, nil, err
}
consumerInfo := session.ConsumerInfo{
// TODO: once we're supporting payments from another identity make the changes accordingly
IssuerID: consumerID,
}
s, paymentInfo, err := session.RequestSessionCreate(dialog, proposal.ID, sessionCreateConfig, consumerInfo)
if err != nil {
return session.SessionDto{}, nil, err
}
manager.cleanup = append(manager.cleanup, func() error { return session.RequestSessionDestroy(dialog, s.ID) })
// set the session info for future use
manager.sessionInfo = SessionInfo{
SessionID: s.ID,
ConsumerID: consumerID,
Proposal: proposal,
}
manager.eventPublisher.Publish(SessionEventTopic, SessionEvent{
Status: SessionCreatedStatus,
SessionInfo: manager.sessionInfo,
})
manager.cleanup = append(manager.cleanup, func() error {
manager.eventPublisher.Publish(SessionEventTopic, SessionEvent{
Status: SessionEndedStatus,
SessionInfo: manager.sessionInfo,
})
return nil
})
return s, paymentInfo, nil
}
func (manager *connectionManager) startConnection(
connection Connection,
consumerID identity.Identity,
proposal market.ServiceProposal,
params ConnectParams,
sessionDTO session.SessionDto,
stateChannel chan State,
statisticsChannel chan consumer.SessionStatistics) (err error) {
defer func() {
if err != nil {
log.Info(managerLogPrefix, "Cancelling connection initiation: ", err)
logDisconnectError(manager.Disconnect())
}
}()
connectOptions := ConnectOptions{
SessionID: sessionDTO.ID,
SessionConfig: sessionDTO.Config,
ConsumerID: consumerID,
ProviderID: identity.FromAddress(proposal.ProviderID),
Proposal: proposal,
}
if err = connection.Start(connectOptions); err != nil {
return err
}
manager.cleanup = append(manager.cleanup, func() error {
connection.Stop()
return nil
})
//consume statistics right after start - openvpn3 will publish them even before connected state
go manager.consumeStats(statisticsChannel)
err = manager.waitForConnectedState(stateChannel, sessionDTO.ID)
if err != nil {
return err
}
if !params.DisableKillSwitch {
// TODO: Implement fw based kill switch for respective OS
// we may need to wait for tun device setup to be finished
firewall.NewKillSwitch().Enable()
}
go manager.consumeConnectionStates(stateChannel)
go manager.connectionWaiter(connection)
return nil
}
func (manager *connectionManager) Status() Status {
manager.statusLock.RLock()
defer manager.statusLock.RUnlock()
return manager.status
}
func (manager *connectionManager) setStatus(cs Status) {
manager.statusLock.Lock()
manager.status = cs
manager.statusLock.Unlock()
}
func (manager *connectionManager) Disconnect() error {
manager.discoLock.Lock()
defer manager.discoLock.Unlock()
if manager.Status().State == NotConnected {
return ErrNoConnection
}
manager.setStatus(statusDisconnecting())
manager.cleanConnection()
manager.setStatus(statusNotConnected())
return nil
}
func (manager *connectionManager) payForService(payments PaymentIssuer) {
err := payments.Start()
if err != nil {
log.Error(managerLogPrefix, "payment error: ", err)
err = manager.Disconnect()
if err != nil {
log.Error(managerLogPrefix, "could not disconnect gracefully:", err)
}
}
}
func warnOnClean() {
log.Warn(managerLogPrefix, "Trying to close when there is nothing to close. Possible bug or race condition")
}
func (manager *connectionManager) connectionWaiter(connection Connection) {
err := connection.Wait()
if err != nil {
log.Warn(managerLogPrefix, "Connection exited with error: ", err)
} else {
log.Info(managerLogPrefix, "Connection exited")
}
logDisconnectError(manager.Disconnect())
}
func (manager *connectionManager) waitForConnectedState(stateChannel <-chan State, sessionID session.ID) error {
for {
select {
case state, more := <-stateChannel:
if !more {
return ErrConnectionFailed
}
switch state {
case Connected:
manager.onStateChanged(state)
return nil
default:
manager.onStateChanged(state)
}
case <-manager.ctx.Done():
return manager.ctx.Err()
}
}
}
func (manager *connectionManager) consumeConnectionStates(stateChannel <-chan State) {
for state := range stateChannel {
manager.onStateChanged(state)
}
log.Debug(managerLogPrefix, "State updater stopCalled")
logDisconnectError(manager.Disconnect())
}
func (manager *connectionManager) consumeStats(statisticsChannel <-chan consumer.SessionStatistics) {
for stats := range statisticsChannel {
manager.eventPublisher.Publish(StatisticsEventTopic, stats)
}
}
func (manager *connectionManager) onStateChanged(state State) {
manager.eventPublisher.Publish(StateEventTopic, StateEvent{
State: state,
SessionInfo: manager.sessionInfo,
})
switch state {
case Connected:
manager.setStatus(statusConnected(manager.sessionInfo.SessionID, manager.sessionInfo.Proposal))
case Reconnecting:
manager.setStatus(statusReconnecting())
}
}
func logDisconnectError(err error) {
if err != nil && err != ErrNoConnection {
log.Error(managerLogPrefix, "Disconnect error", err)
}
}
| 1 | 14,316 |
Could we public events if `manager.serStatus()`?
|
mysteriumnetwork-node
|
go
|
@@ -134,7 +134,9 @@ module.exports = class Plugin {
}
this.uppy.log(`Not installing ${callerPluginName}`)
- throw new Error(`Invalid target option given to ${callerPluginName}`)
+ throw new Error(`Invalid target option given to ${callerPluginName}. Please make sure that the element
+ exists on the page, or that the plugin you are targeting has been installed. Check that the <script> tag initializing Uppy
+ comes at the bottom of the page, before the </body> (see https://github.com/transloadit/uppy/issues/1042).`)
}
render (state) {
| 1 |
const preact = require('preact')
const findDOMElement = require('@uppy/utils/lib/findDOMElement')
/**
* Defer a frequent call to the microtask queue.
*/
function debounce (fn) {
let calling = null
let latestArgs = null
return (...args) => {
latestArgs = args
if (!calling) {
calling = Promise.resolve().then(() => {
calling = null
// At this point `args` may be different from the most
// recent state, if multiple calls happened since this task
// was queued. So we use the `latestArgs`, which definitely
// is the most recent call.
return fn(...latestArgs)
})
}
return calling
}
}
/**
* Boilerplate that all Plugins share - and should not be used
* directly. It also shows which methods final plugins should implement/override,
* this deciding on structure.
*
* @param {object} main Uppy core object
* @param {object} object with plugin options
* @return {array | string} files or success/fail message
*/
module.exports = class Plugin {
constructor (uppy, opts) {
this.uppy = uppy
this.opts = opts || {}
this.update = this.update.bind(this)
this.mount = this.mount.bind(this)
this.install = this.install.bind(this)
this.uninstall = this.uninstall.bind(this)
}
getPluginState () {
const { plugins } = this.uppy.getState()
return plugins[this.id] || {}
}
setPluginState (update) {
const { plugins } = this.uppy.getState()
this.uppy.setState({
plugins: {
...plugins,
[this.id]: {
...plugins[this.id],
...update
}
}
})
}
update (state) {
if (typeof this.el === 'undefined') {
return
}
if (this._updateUI) {
this._updateUI(state)
}
}
/**
* Check if supplied `target` is a DOM element or an `object`.
* If it’s an object — target is a plugin, and we search `plugins`
* for a plugin with same name and return its target.
*
* @param {String|Object} target
*
*/
mount (target, plugin) {
const callerPluginName = plugin.id
const targetElement = findDOMElement(target)
if (targetElement) {
this.isTargetDOMEl = true
// API for plugins that require a synchronous rerender.
this.rerender = (state) => {
// plugin could be removed, but this.rerender is debounced below,
// so it could still be called even after uppy.removePlugin or uppy.close
// hence the check
if (!this.uppy.getPlugin(this.id)) return
this.el = preact.render(this.render(state), targetElement, this.el)
}
this._updateUI = debounce(this.rerender)
this.uppy.log(`Installing ${callerPluginName} to a DOM element`)
// clear everything inside the target container
if (this.opts.replaceTargetContent) {
targetElement.innerHTML = ''
}
this.el = preact.render(this.render(this.uppy.getState()), targetElement)
return this.el
}
let targetPlugin
if (typeof target === 'object' && target instanceof Plugin) {
// Targeting a plugin *instance*
targetPlugin = target
} else if (typeof target === 'function') {
// Targeting a plugin type
const Target = target
// Find the target plugin instance.
this.uppy.iteratePlugins((plugin) => {
if (plugin instanceof Target) {
targetPlugin = plugin
return false
}
})
}
if (targetPlugin) {
const targetPluginName = targetPlugin.id
this.uppy.log(`Installing ${callerPluginName} to ${targetPluginName}`)
this.el = targetPlugin.addTarget(plugin)
return this.el
}
this.uppy.log(`Not installing ${callerPluginName}`)
throw new Error(`Invalid target option given to ${callerPluginName}`)
}
render (state) {
throw (new Error('Extend the render method to add your plugin to a DOM element'))
}
addTarget (plugin) {
throw (new Error('Extend the addTarget method to add your plugin to another plugin\'s target'))
}
unmount () {
if (this.isTargetDOMEl && this.el && this.el.parentNode) {
this.el.parentNode.removeChild(this.el)
}
}
install () {
}
uninstall () {
this.unmount()
}
}
| 1 | 11,120 |
could be misread as `<body>`, maybe something redundant but very clear like "before the closing `</body>` tag"?
|
transloadit-uppy
|
js
|
@@ -510,12 +510,11 @@ namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
- using (var server = new TestServer(httpContext =>
+ using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 11;
- httpContext.Response.Body.Write(Encoding.ASCII.GetBytes("hello,"), 0, 6);
- httpContext.Response.Body.Write(Encoding.ASCII.GetBytes(" world"), 0, 6);
- return Task.CompletedTask;
+ await httpContext.Response.Body.WriteAsync(Encoding.ASCII.GetBytes("hello,"), 0, 6);
+ await httpContext.Response.Body.WriteAsync(Encoding.ASCII.GetBytes(" world"), 0, 6);
}, serviceContext))
{
using (var connection = server.CreateConnection())
| 1 |
// Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Net.Http;
using System.Net.Sockets;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Core;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure;
using Microsoft.AspNetCore.Testing;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Primitives;
using Moq;
using Xunit;
namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests
{
public class ResponseTests
{
public static TheoryData<ListenOptions> ConnectionAdapterData => new TheoryData<ListenOptions>
{
new ListenOptions(new IPEndPoint(IPAddress.Loopback, 0)),
new ListenOptions(new IPEndPoint(IPAddress.Loopback, 0))
{
ConnectionAdapters = { new PassThroughConnectionAdapter() }
}
};
[Fact]
public async Task LargeDownload()
{
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls("http://127.0.0.1:0/")
.Configure(app =>
{
app.Run(async context =>
{
var bytes = new byte[1024];
for (int i = 0; i < bytes.Length; i++)
{
bytes[i] = (byte)i;
}
context.Response.ContentLength = bytes.Length * 1024;
for (int i = 0; i < 1024; i++)
{
await context.Response.Body.WriteAsync(bytes, 0, bytes.Length);
}
});
});
using (var host = hostBuilder.Build())
{
host.Start();
using (var client = new HttpClient())
{
var response = await client.GetAsync($"http://127.0.0.1:{host.GetPort()}/");
response.EnsureSuccessStatusCode();
var responseBody = await response.Content.ReadAsStreamAsync();
// Read the full response body
var total = 0;
var bytes = new byte[1024];
var count = await responseBody.ReadAsync(bytes, 0, bytes.Length);
while (count > 0)
{
for (int i = 0; i < count; i++)
{
Assert.Equal(total % 256, bytes[i]);
total++;
}
count = await responseBody.ReadAsync(bytes, 0, bytes.Length);
}
}
}
}
[Theory, MemberData(nameof(NullHeaderData))]
public async Task IgnoreNullHeaderValues(string headerName, StringValues headerValue, string expectedValue)
{
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls("http://127.0.0.1:0/")
.Configure(app =>
{
app.Run(async context =>
{
context.Response.Headers.Add(headerName, headerValue);
await context.Response.WriteAsync("");
});
});
using (var host = hostBuilder.Build())
{
host.Start();
using (var client = new HttpClient())
{
var response = await client.GetAsync($"http://127.0.0.1:{host.GetPort()}/");
response.EnsureSuccessStatusCode();
var headers = response.Headers;
if (expectedValue == null)
{
Assert.False(headers.Contains(headerName));
}
else
{
Assert.True(headers.Contains(headerName));
Assert.Equal(headers.GetValues(headerName).Single(), expectedValue);
}
}
}
}
[Fact]
public async Task OnCompleteCalledEvenWhenOnStartingNotCalled()
{
var onStartingCalled = false;
var onCompletedCalled = false;
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls("http://127.0.0.1:0/")
.Configure(app =>
{
app.Run(context =>
{
context.Response.OnStarting(() => Task.Run(() => onStartingCalled = true));
context.Response.OnCompleted(() => Task.Run(() => onCompletedCalled = true));
// Prevent OnStarting call (see Frame<T>.ProcessRequestsAsync()).
throw new Exception();
});
});
using (var host = hostBuilder.Build())
{
host.Start();
using (var client = new HttpClient())
{
var response = await client.GetAsync($"http://127.0.0.1:{host.GetPort()}/");
Assert.Equal(HttpStatusCode.InternalServerError, response.StatusCode);
Assert.False(onStartingCalled);
Assert.True(onCompletedCalled);
}
}
}
[Fact]
public async Task OnStartingThrowsWhenSetAfterResponseHasAlreadyStarted()
{
InvalidOperationException ex = null;
var hostBuilder = new WebHostBuilder()
.UseKestrel()
.UseUrls("http://127.0.0.1:0/")
.Configure(app =>
{
app.Run(async context =>
{
await context.Response.WriteAsync("hello, world");
await context.Response.Body.FlushAsync();
ex = Assert.Throws<InvalidOperationException>(() => context.Response.OnStarting(_ => Task.CompletedTask, null));
});
});
using (var host = hostBuilder.Build())
{
host.Start();
using (var client = new HttpClient())
{
var response = await client.GetAsync($"http://127.0.0.1:{host.GetPort()}/");
// Despite the error, the response had already started
Assert.Equal(HttpStatusCode.OK, response.StatusCode);
Assert.NotNull(ex);
}
}
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposeAppException()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
context =>
{
throw new Exception();
},
expectedClientStatusCode: HttpStatusCode.InternalServerError,
expectedServerStatusCode: HttpStatusCode.InternalServerError);
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposeRequestAborted()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
context =>
{
context.Abort();
return Task.CompletedTask;
},
expectedClientStatusCode: null,
expectedServerStatusCode: 0);
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposeRequestAbortedAppException()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
context =>
{
context.Abort();
throw new Exception();
},
expectedClientStatusCode: null,
expectedServerStatusCode: 0);
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposedRequestMalformed()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
context =>
{
return Task.CompletedTask;
},
expectedClientStatusCode: null,
expectedServerStatusCode: HttpStatusCode.BadRequest,
sendMalformedRequest: true);
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposedRequestMalformedRead()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
async context =>
{
await context.Request.Body.ReadAsync(new byte[1], 0, 1);
},
expectedClientStatusCode: null,
expectedServerStatusCode: HttpStatusCode.BadRequest,
sendMalformedRequest: true);
}
[Fact]
public Task ResponseStatusCodeSetBeforeHttpContextDisposedRequestMalformedReadIgnored()
{
return ResponseStatusCodeSetBeforeHttpContextDispose(
async context =>
{
try
{
await context.Request.Body.ReadAsync(new byte[1], 0, 1);
}
catch (BadHttpRequestException)
{
}
},
expectedClientStatusCode: null,
expectedServerStatusCode: HttpStatusCode.BadRequest,
sendMalformedRequest: true);
}
private static async Task ResponseStatusCodeSetBeforeHttpContextDispose(
RequestDelegate handler,
HttpStatusCode? expectedClientStatusCode,
HttpStatusCode expectedServerStatusCode,
bool sendMalformedRequest = false)
{
var mockHttpContextFactory = new Mock<IHttpContextFactory>();
mockHttpContextFactory.Setup(f => f.Create(It.IsAny<IFeatureCollection>()))
.Returns<IFeatureCollection>(fc => new DefaultHttpContext(fc));
var disposedTcs = new TaskCompletionSource<int>();
mockHttpContextFactory.Setup(f => f.Dispose(It.IsAny<HttpContext>()))
.Callback<HttpContext>(c =>
{
disposedTcs.TrySetResult(c.Response.StatusCode);
});
using (var server = new TestServer(handler, new TestServiceContext(), new ListenOptions(new IPEndPoint(IPAddress.Loopback, 0)),
services => services.AddSingleton(mockHttpContextFactory.Object)))
{
if (!sendMalformedRequest)
{
using (var client = new HttpClient())
{
try
{
var response = await client.GetAsync($"http://127.0.0.1:{server.Port}/");
Assert.Equal(expectedClientStatusCode, response.StatusCode);
}
catch
{
if (expectedClientStatusCode != null)
{
throw;
}
}
}
}
else
{
using (var connection = new TestConnection(server.Port))
{
await connection.Send(
"POST / HTTP/1.1",
"Host:",
"Transfer-Encoding: chunked",
"",
"gg");
await connection.ReceiveForcedEnd(
"HTTP/1.1 400 Bad Request",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
var disposedStatusCode = await disposedTcs.Task.TimeoutAfter(TimeSpan.FromSeconds(10));
Assert.Equal(expectedServerStatusCode, (HttpStatusCode)disposedStatusCode);
}
}
// https://github.com/aspnet/KestrelHttpServer/pull/1111/files#r80584475 explains the reason for this test.
[Fact]
public async Task SingleErrorResponseSentWhenAppSwallowsBadRequestException()
{
BadHttpRequestException readException = null;
using (var server = new TestServer(async httpContext =>
{
readException = await Assert.ThrowsAsync<BadHttpRequestException>(
async () => await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1));
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Host:",
"Transfer-Encoding: chunked",
"",
"gg");
await connection.ReceiveForcedEnd(
"HTTP/1.1 400 Bad Request",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
Assert.NotNull(readException);
}
[Fact]
public async Task TransferEncodingChunkedSetOnUnknownLengthHttp11Response()
{
using (var server = new TestServer(async httpContext =>
{
await httpContext.Response.WriteAsync("hello, ");
await httpContext.Response.WriteAsync("world");
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Transfer-Encoding: chunked",
"",
"7",
"hello, ",
"5",
"world",
"0",
"",
"");
}
}
}
[Theory]
[InlineData(StatusCodes.Status204NoContent)]
[InlineData(StatusCodes.Status205ResetContent)]
[InlineData(StatusCodes.Status304NotModified)]
public async Task TransferEncodingChunkedNotSetOnNonBodyResponse(int statusCode)
{
using (var server = new TestServer(httpContext =>
{
httpContext.Response.StatusCode = statusCode;
return Task.CompletedTask;
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
$"HTTP/1.1 {Encoding.ASCII.GetString(ReasonPhrases.ToStatusBytes(statusCode))}",
$"Date: {server.Context.DateHeaderValue}",
"",
"");
}
}
}
[Fact]
public async Task TransferEncodingNotSetOnHeadResponse()
{
using (var server = new TestServer(httpContext =>
{
return Task.CompletedTask;
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"HEAD / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"",
"");
}
}
}
[Fact]
public async Task ResponseBodyNotWrittenOnHeadResponseAndLoggedOnlyOnce()
{
const string response = "hello, world";
var logTcs = new TaskCompletionSource<object>();
var mockKestrelTrace = new Mock<IKestrelTrace>();
mockKestrelTrace
.Setup(trace => trace.ConnectionHeadResponseBodyWrite(It.IsAny<string>(), response.Length))
.Callback<string, long>((connectionId, count) => logTcs.SetResult(null));
using (var server = new TestServer(async httpContext =>
{
await httpContext.Response.WriteAsync(response);
await httpContext.Response.Body.FlushAsync();
}, new TestServiceContext { Log = mockKestrelTrace.Object }))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"HEAD / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"",
"");
// Wait for message to be logged before disposing the socket.
// Disposing the socket will abort the connection and Frame._requestAborted
// might be 1 by the time ProduceEnd() gets called and the message is logged.
await logTcs.Task.TimeoutAfter(TimeSpan.FromSeconds(10));
}
}
mockKestrelTrace.Verify(kestrelTrace =>
kestrelTrace.ConnectionHeadResponseBodyWrite(It.IsAny<string>(), response.Length), Times.Once);
}
[Fact]
public async Task ThrowsAndClosesConnectionWhenAppWritesMoreThanContentLengthWrite()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(httpContext =>
{
httpContext.Response.ContentLength = 11;
httpContext.Response.Body.Write(Encoding.ASCII.GetBytes("hello,"), 0, 6);
httpContext.Response.Body.Write(Encoding.ASCII.GetBytes(" world"), 0, 6);
return Task.CompletedTask;
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 11",
"",
"hello,");
await connection.WaitForConnectionClose().TimeoutAfter(TimeSpan.FromSeconds(30));
}
}
var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error);
Assert.Equal(
$"Response Content-Length mismatch: too many bytes written (12 of 11).",
logMessage.Exception.Message);
}
[Fact]
public async Task ThrowsAndClosesConnectionWhenAppWritesMoreThanContentLengthWriteAsync()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 11;
await httpContext.Response.WriteAsync("hello,");
await httpContext.Response.WriteAsync(" world");
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveForcedEnd(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 11",
"",
"hello,");
}
}
var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error);
Assert.Equal(
$"Response Content-Length mismatch: too many bytes written (12 of 11).",
logMessage.Exception.Message);
}
[Fact]
public async Task InternalServerErrorAndConnectionClosedOnWriteWithMoreThanContentLengthAndResponseNotStarted()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(httpContext =>
{
var response = Encoding.ASCII.GetBytes("hello, world");
httpContext.Response.ContentLength = 5;
httpContext.Response.Body.Write(response, 0, response.Length);
return Task.CompletedTask;
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveForcedEnd(
$"HTTP/1.1 500 Internal Server Error",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error);
Assert.Equal(
$"Response Content-Length mismatch: too many bytes written (12 of 5).",
logMessage.Exception.Message);
}
[Fact]
public async Task InternalServerErrorAndConnectionClosedOnWriteAsyncWithMoreThanContentLengthAndResponseNotStarted()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(httpContext =>
{
var response = Encoding.ASCII.GetBytes("hello, world");
httpContext.Response.ContentLength = 5;
return httpContext.Response.Body.WriteAsync(response, 0, response.Length);
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveForcedEnd(
$"HTTP/1.1 500 Internal Server Error",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error);
Assert.Equal(
$"Response Content-Length mismatch: too many bytes written (12 of 5).",
logMessage.Exception.Message);
}
[Fact]
public async Task WhenAppWritesLessThanContentLengthErrorLogged()
{
var logTcs = new TaskCompletionSource<object>();
var mockTrace = new Mock<IKestrelTrace>();
mockTrace
.Setup(trace => trace.ApplicationError(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<InvalidOperationException>()))
.Callback<string, string, Exception>((connectionId, requestId, ex) =>
{
logTcs.SetResult(null);
});
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 13;
await httpContext.Response.WriteAsync("hello, world");
}, new TestServiceContext { Log = mockTrace.Object }))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
// Don't use ReceiveEnd here, otherwise the FIN might
// abort the request before the server checks the
// response content length, in which case the check
// will be skipped.
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 13",
"",
"hello, world");
// Wait for error message to be logged.
await logTcs.Task.TimeoutAfter(TimeSpan.FromSeconds(10));
// The server should close the connection in this situation.
await connection.WaitForConnectionClose().TimeoutAfter(TimeSpan.FromSeconds(10));
}
}
mockTrace.Verify(trace =>
trace.ApplicationError(
It.IsAny<string>(),
It.IsAny<string>(),
It.Is<InvalidOperationException>(ex =>
ex.Message.Equals($"Response Content-Length mismatch: too few bytes written (12 of 13).", StringComparison.Ordinal))));
}
[Fact]
public async Task WhenAppWritesLessThanContentLengthButRequestIsAbortedErrorNotLogged()
{
var requestAborted = new SemaphoreSlim(0);
var mockTrace = new Mock<IKestrelTrace>();
using (var server = new TestServer(async httpContext =>
{
httpContext.RequestAborted.Register(() =>
{
requestAborted.Release(2);
});
httpContext.Response.ContentLength = 12;
await httpContext.Response.WriteAsync("hello,");
// Wait until the request is aborted so we know Frame will skip the response content length check.
Assert.True(await requestAborted.WaitAsync(TimeSpan.FromSeconds(10)));
}, new TestServiceContext { Log = mockTrace.Object }))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 12",
"",
"hello,");
}
// Verify the request was really aborted. A timeout in
// the app would cause a server error and skip the content length
// check altogether, making the test pass for the wrong reason.
// Await before disposing the server to prevent races between the
// abort triggered by the connection RST and the abort called when
// disposing the server.
Assert.True(await requestAborted.WaitAsync(TimeSpan.FromSeconds(10)));
}
// With the server disposed we know all connections were drained and all messages were logged.
mockTrace.Verify(trace => trace.ApplicationError(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<InvalidOperationException>()), Times.Never);
}
[Fact]
public async Task WhenAppSetsContentLengthButDoesNotWriteBody500ResponseSentAndConnectionDoesNotClose()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(httpContext =>
{
httpContext.Response.ContentLength = 5;
return Task.CompletedTask;
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 500 Internal Server Error",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 500 Internal Server Error",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
var error = testLogger.Messages.Where(message => message.LogLevel == LogLevel.Error);
Assert.Equal(2, error.Count());
Assert.All(error, message => message.Equals("Response Content-Length mismatch: too few bytes written (0 of 5)."));
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task WhenAppSetsContentLengthToZeroAndDoesNotWriteNoErrorIsThrown(bool flushResponse)
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 0;
if (flushResponse)
{
await httpContext.Response.Body.FlushAsync();
}
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
Assert.Equal(0, testLogger.ApplicationErrorsLogged);
}
// https://tools.ietf.org/html/rfc7230#section-3.3.3
// If a message is received with both a Transfer-Encoding and a
// Content-Length header field, the Transfer-Encoding overrides the
// Content-Length.
[Fact]
public async Task WhenAppSetsTransferEncodingAndContentLengthWritingLessIsNotAnError()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.Headers["Transfer-Encoding"] = "chunked";
httpContext.Response.ContentLength = 13;
await httpContext.Response.WriteAsync("hello, world");
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 13",
"Transfer-Encoding: chunked",
"",
"hello, world");
}
}
Assert.Equal(0, testLogger.ApplicationErrorsLogged);
}
// https://tools.ietf.org/html/rfc7230#section-3.3.3
// If a message is received with both a Transfer-Encoding and a
// Content-Length header field, the Transfer-Encoding overrides the
// Content-Length.
[Fact]
public async Task WhenAppSetsTransferEncodingAndContentLengthWritingMoreIsNotAnError()
{
var testLogger = new TestApplicationErrorLogger();
var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) };
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.Headers["Transfer-Encoding"] = "chunked";
httpContext.Response.ContentLength = 11;
await httpContext.Response.WriteAsync("hello, world");
}, serviceContext))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
$"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 11",
"Transfer-Encoding: chunked",
"",
"hello, world");
}
}
Assert.Equal(0, testLogger.ApplicationErrorsLogged);
}
[Fact]
public async Task HeadResponseCanContainContentLengthHeader()
{
using (var server = new TestServer(httpContext =>
{
httpContext.Response.ContentLength = 42;
return Task.CompletedTask;
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"HEAD / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 42",
"",
"");
}
}
}
[Fact]
public async Task HeadResponseBodyNotWrittenWithAsyncWrite()
{
var flushed = new SemaphoreSlim(0, 1);
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 12;
await httpContext.Response.WriteAsync("hello, world");
await flushed.WaitAsync();
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"HEAD / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 12",
"",
"");
flushed.Release();
}
}
}
[Fact]
public async Task HeadResponseBodyNotWrittenWithSyncWrite()
{
var flushed = new SemaphoreSlim(0, 1);
using (var server = new TestServer(httpContext =>
{
httpContext.Response.ContentLength = 12;
httpContext.Response.Body.Write(Encoding.ASCII.GetBytes("hello, world"), 0, 12);
flushed.Wait();
return Task.CompletedTask;
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"HEAD / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 12",
"",
"");
flushed.Release();
}
}
}
[Fact]
public async Task ZeroLengthWritesFlushHeaders()
{
var flushed = new SemaphoreSlim(0, 1);
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.ContentLength = 12;
await httpContext.Response.WriteAsync("");
flushed.Wait();
await httpContext.Response.WriteAsync("hello, world");
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 12",
"",
"");
flushed.Release();
await connection.ReceiveEnd("hello, world");
}
}
}
[Fact]
public async Task WriteAfterConnectionCloseNoops()
{
var connectionClosed = new ManualResetEventSlim();
var requestStarted = new ManualResetEventSlim();
var tcs = new TaskCompletionSource<object>();
using (var server = new TestServer(async httpContext =>
{
try
{
requestStarted.Set();
connectionClosed.Wait();
httpContext.Response.ContentLength = 12;
await httpContext.Response.WriteAsync("hello, world");
tcs.TrySetResult(null);
}
catch (Exception ex)
{
tcs.TrySetException(ex);
}
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
requestStarted.Wait();
connection.Shutdown(SocketShutdown.Send);
await connection.WaitForConnectionClose().TimeoutAfter(TimeSpan.FromSeconds(30));
}
connectionClosed.Set();
await tcs.Task.TimeoutAfter(TimeSpan.FromSeconds(30));
}
}
[Fact]
public async Task AppCanWriteOwnBadRequestResponse()
{
var expectedResponse = string.Empty;
var responseWritten = new SemaphoreSlim(0);
using (var server = new TestServer(async httpContext =>
{
try
{
await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1);
}
catch (BadHttpRequestException ex)
{
expectedResponse = ex.Message;
httpContext.Response.StatusCode = StatusCodes.Status400BadRequest;
httpContext.Response.ContentLength = ex.Message.Length;
await httpContext.Response.WriteAsync(ex.Message);
responseWritten.Release();
}
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Host:",
"Transfer-Encoding: chunked",
"",
"gg");
await responseWritten.WaitAsync().TimeoutAfter(TimeSpan.FromSeconds(30));
await connection.ReceiveEnd(
"HTTP/1.1 400 Bad Request",
$"Date: {server.Context.DateHeaderValue}",
$"Content-Length: {expectedResponse.Length}",
"",
expectedResponse);
}
}
}
[Theory]
[InlineData("gzip")]
[InlineData("chunked, gzip")]
public async Task ConnectionClosedWhenChunkedIsNotFinalTransferCoding(string responseTransferEncoding)
{
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.Headers["Transfer-Encoding"] = responseTransferEncoding;
await httpContext.Response.WriteAsync("hello, world");
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
}
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.0",
"Connection: keep-alive",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
}
}
}
[Theory]
[InlineData("gzip")]
[InlineData("chunked, gzip")]
public async Task ConnectionClosedWhenChunkedIsNotFinalTransferCodingEvenIfConnectionKeepAliveSetInResponse(string responseTransferEncoding)
{
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.Headers["Connection"] = "keep-alive";
httpContext.Response.Headers["Transfer-Encoding"] = responseTransferEncoding;
await httpContext.Response.WriteAsync("hello, world");
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
"Connection: keep-alive",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
}
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.0",
"Connection: keep-alive",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
"Connection: keep-alive",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
}
}
}
[Theory]
[InlineData("chunked")]
[InlineData("gzip, chunked")]
public async Task ConnectionKeptAliveWhenChunkedIsFinalTransferCoding(string responseTransferEncoding)
{
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.Headers["Transfer-Encoding"] = responseTransferEncoding;
// App would have to chunk manually, but here we don't care
await httpContext.Response.WriteAsync("hello, world");
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
// Make sure connection was kept open
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: {responseTransferEncoding}",
"",
"hello, world");
}
}
}
[Fact]
public async Task FirstWriteVerifiedAfterOnStarting()
{
using (var server = new TestServer(httpContext =>
{
httpContext.Response.OnStarting(() =>
{
// Change response to chunked
httpContext.Response.ContentLength = null;
return Task.CompletedTask;
});
var response = Encoding.ASCII.GetBytes("hello, world");
httpContext.Response.ContentLength = response.Length - 1;
// If OnStarting is not run before verifying writes, an error response will be sent.
httpContext.Response.Body.Write(response, 0, response.Length);
return Task.CompletedTask;
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: chunked",
"",
"c",
"hello, world",
"0",
"",
"");
}
}
}
[Fact]
public async Task SubsequentWriteVerifiedAfterOnStarting()
{
using (var server = new TestServer(httpContext =>
{
httpContext.Response.OnStarting(() =>
{
// Change response to chunked
httpContext.Response.ContentLength = null;
return Task.CompletedTask;
});
var response = Encoding.ASCII.GetBytes("hello, world");
httpContext.Response.ContentLength = response.Length - 1;
// If OnStarting is not run before verifying writes, an error response will be sent.
httpContext.Response.Body.Write(response, 0, response.Length / 2);
httpContext.Response.Body.Write(response, response.Length / 2, response.Length - response.Length / 2);
return Task.CompletedTask;
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: chunked",
"",
"6",
"hello,",
"6",
" world",
"0",
"",
"");
}
}
}
[Fact]
public async Task FirstWriteAsyncVerifiedAfterOnStarting()
{
using (var server = new TestServer(httpContext =>
{
httpContext.Response.OnStarting(() =>
{
// Change response to chunked
httpContext.Response.ContentLength = null;
return Task.CompletedTask;
});
var response = Encoding.ASCII.GetBytes("hello, world");
httpContext.Response.ContentLength = response.Length - 1;
// If OnStarting is not run before verifying writes, an error response will be sent.
return httpContext.Response.Body.WriteAsync(response, 0, response.Length);
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: chunked",
"",
"c",
"hello, world",
"0",
"",
"");
}
}
}
[Fact]
public async Task SubsequentWriteAsyncVerifiedAfterOnStarting()
{
using (var server = new TestServer(async httpContext =>
{
httpContext.Response.OnStarting(() =>
{
// Change response to chunked
httpContext.Response.ContentLength = null;
return Task.CompletedTask;
});
var response = Encoding.ASCII.GetBytes("hello, world");
httpContext.Response.ContentLength = response.Length - 1;
// If OnStarting is not run before verifying writes, an error response will be sent.
await httpContext.Response.Body.WriteAsync(response, 0, response.Length / 2);
await httpContext.Response.Body.WriteAsync(response, response.Length / 2, response.Length - response.Length / 2);
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: chunked",
"",
"6",
"hello,",
"6",
" world",
"0",
"",
"");
}
}
}
[Fact]
public async Task WhenResponseAlreadyStartedResponseEndedBeforeConsumingRequestBody()
{
using (var server = new TestServer(async httpContext =>
{
await httpContext.Response.WriteAsync("hello, world");
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Host:",
"Content-Length: 1",
"",
"");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: chunked",
"",
"c",
"hello, world",
"");
// If the expected behavior is regressed, this will hang because the
// server will try to consume the request body before flushing the chunked
// terminator.
await connection.Receive(
"0",
"",
"");
}
}
}
[Fact]
public async Task WhenResponseNotStartedResponseEndedAfterConsumingRequestBody()
{
using (var server = new TestServer(httpContext => Task.CompletedTask))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Host:",
"Transfer-Encoding: chunked",
"",
"gg");
// If the expected behavior is regressed, this will receive
// a success response because the server flushed the response
// before reading the malformed chunk header in the request.
await connection.ReceiveForcedEnd(
"HTTP/1.1 400 Bad Request",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Fact]
public async Task Sending100ContinueDoesNotStartResponse()
{
using (var server = new TestServer(httpContext =>
{
return httpContext.Request.Body.ReadAsync(new byte[1], 0, 1);
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Host:",
"Transfer-Encoding: chunked",
"Expect: 100-continue",
"",
"");
await connection.Receive(
"HTTP/1.1 100 Continue",
"",
"");
// Let the app finish
await connection.Send(
"1",
"a",
"");
// This will be consumed by Frame when it attempts to
// consume the request body and will cause an error.
await connection.Send(
"gg");
// If 100 Continue sets Frame.HasResponseStarted to true,
// a success response will be produced before the server sees the
// bad chunk header above, making this test fail.
await connection.ReceiveForcedEnd(
"HTTP/1.1 400 Bad Request",
"Connection: close",
$"Date: {server.Context.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Fact]
public async Task Sending100ContinueAndResponseSendsChunkTerminatorBeforeConsumingRequestBody()
{
using (var server = new TestServer(async httpContext =>
{
await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1);
await httpContext.Response.WriteAsync("hello, world");
}))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Host:",
"Content-Length: 2",
"Expect: 100-continue",
"",
"");
await connection.Receive(
"HTTP/1.1 100 Continue",
"",
"");
await connection.Send(
"a");
await connection.Receive(
"HTTP/1.1 200 OK",
$"Date: {server.Context.DateHeaderValue}",
$"Transfer-Encoding: chunked",
"",
"c",
"hello, world",
"");
// If the expected behavior is regressed, this will hang because the
// server will try to consume the request body before flushing the chunked
// terminator.
await connection.Receive(
"0",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task Http11ResponseSentToHttp10Request(ListenOptions listenOptions)
{
var serviceContext = new TestServiceContext();
using (var server = new TestServer(TestApp.EchoApp, serviceContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.0",
"Content-Length: 11",
"",
"Hello World");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {serviceContext.DateHeaderValue}",
"",
"Hello World");
}
}
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ZeroContentLengthSetAutomaticallyAfterNoWrites(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
using (var server = new TestServer(TestApp.EmptyApp, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"GET / HTTP/1.0",
"Connection: keep-alive",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 200 OK",
"Connection: keep-alive",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ZeroContentLengthSetAutomaticallyForNonKeepAliveRequests(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
using (var server = new TestServer(async httpContext =>
{
Assert.Equal(0, await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1).TimeoutAfter(TimeSpan.FromSeconds(10)));
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"Connection: close",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.0",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ZeroContentLengthNotSetAutomaticallyForHeadRequests(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
using (var server = new TestServer(TestApp.EmptyApp, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"HEAD / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ZeroContentLengthNotSetAutomaticallyForCertainStatusCodes(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
using (var server = new TestServer(async httpContext =>
{
var request = httpContext.Request;
var response = httpContext.Response;
using (var reader = new StreamReader(request.Body, Encoding.ASCII))
{
var statusString = await reader.ReadLineAsync();
response.StatusCode = int.Parse(statusString);
}
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Host:",
"Content-Length: 3",
"",
"204POST / HTTP/1.1",
"Host:",
"Content-Length: 3",
"",
"205POST / HTTP/1.1",
"Host:",
"Content-Length: 3",
"",
"304POST / HTTP/1.1",
"Host:",
"Content-Length: 3",
"",
"200");
await connection.ReceiveEnd(
"HTTP/1.1 204 No Content",
$"Date: {testContext.DateHeaderValue}",
"",
"HTTP/1.1 205 Reset Content",
$"Date: {testContext.DateHeaderValue}",
"",
"HTTP/1.1 304 Not Modified",
$"Date: {testContext.DateHeaderValue}",
"",
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ConnectionClosedAfter101Response(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
using (var server = new TestServer(async httpContext =>
{
var request = httpContext.Request;
var stream = await httpContext.Features.Get<IHttpUpgradeFeature>().UpgradeAsync();
var response = Encoding.ASCII.GetBytes("hello, world");
await stream.WriteAsync(response, 0, response.Length);
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"Connection: Upgrade",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 101 Switching Protocols",
"Connection: Upgrade",
$"Date: {testContext.DateHeaderValue}",
"",
"hello, world");
}
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.0",
"Connection: keep-alive, Upgrade",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 101 Switching Protocols",
"Connection: Upgrade",
$"Date: {testContext.DateHeaderValue}",
"",
"hello, world");
}
}
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ThrowingResultsIn500Response(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
bool onStartingCalled = false;
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(httpContext =>
{
var response = httpContext.Response;
response.OnStarting(_ =>
{
onStartingCalled = true;
return Task.CompletedTask;
}, null);
// Anything added to the ResponseHeaders dictionary is ignored
response.Headers["Content-Length"] = "11";
throw new Exception();
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"GET / HTTP/1.1",
"Host:",
"Connection: close",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 500 Internal Server Error",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 500 Internal Server Error",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
Assert.False(onStartingCalled);
Assert.Equal(2, testLogger.ApplicationErrorsLogged);
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ThrowingInOnStartingResultsInFailedWritesAnd500Response(ListenOptions listenOptions)
{
var callback1Called = false;
var callback2CallCount = 0;
var testContext = new TestServiceContext();
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(async httpContext =>
{
var onStartingException = new Exception();
var response = httpContext.Response;
response.OnStarting(_ =>
{
callback1Called = true;
throw onStartingException;
}, null);
response.OnStarting(_ =>
{
callback2CallCount++;
throw onStartingException;
}, null);
var writeException = await Assert.ThrowsAsync<ObjectDisposedException>(async () => await response.Body.FlushAsync());
Assert.Same(onStartingException, writeException.InnerException);
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 500 Internal Server Error",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 500 Internal Server Error",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
// The first registered OnStarting callback should have been called,
// since they are called LIFO order and the other one failed.
Assert.False(callback1Called);
Assert.Equal(2, callback2CallCount);
Assert.Equal(2, testLogger.ApplicationErrorsLogged);
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ThrowingInOnCompletedIsLoggedAndClosesConnection(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
var onCompletedCalled1 = false;
var onCompletedCalled2 = false;
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(async httpContext =>
{
var response = httpContext.Response;
response.OnCompleted(_ =>
{
onCompletedCalled1 = true;
throw new Exception();
}, null);
response.OnCompleted(_ =>
{
onCompletedCalled2 = true;
throw new Exception();
}, null);
response.Headers["Content-Length"] = new[] { "11" };
await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello World"), 0, 11);
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 11",
"",
"Hello World");
}
}
// All OnCompleted callbacks should be called even if they throw.
Assert.Equal(2, testLogger.ApplicationErrorsLogged);
Assert.True(onCompletedCalled1);
Assert.True(onCompletedCalled2);
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ThrowingAfterWritingKillsConnection(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
bool onStartingCalled = false;
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(async httpContext =>
{
var response = httpContext.Response;
response.OnStarting(_ =>
{
onStartingCalled = true;
return Task.FromResult<object>(null);
}, null);
response.Headers["Content-Length"] = new[] { "11" };
await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello World"), 0, 11);
throw new Exception();
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 11",
"",
"Hello World");
}
}
Assert.True(onStartingCalled);
Assert.Equal(1, testLogger.ApplicationErrorsLogged);
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ThrowingAfterPartialWriteKillsConnection(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
bool onStartingCalled = false;
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(async httpContext =>
{
var response = httpContext.Response;
response.OnStarting(_ =>
{
onStartingCalled = true;
return Task.FromResult<object>(null);
}, null);
response.Headers["Content-Length"] = new[] { "11" };
await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello"), 0, 5);
throw new Exception();
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 11",
"",
"Hello");
}
}
Assert.True(onStartingCalled);
Assert.Equal(1, testLogger.ApplicationErrorsLogged);
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ThrowsOnWriteWithRequestAbortedTokenAfterRequestIsAborted(ListenOptions listenOptions)
{
// This should match _maxBytesPreCompleted in SocketOutput
var maxBytesPreCompleted = 65536;
// Ensure string is long enough to disable write-behind buffering
var largeString = new string('a', maxBytesPreCompleted + 1);
var writeTcs = new TaskCompletionSource<object>();
var requestAbortedWh = new ManualResetEventSlim();
var requestStartWh = new ManualResetEventSlim();
using (var server = new TestServer(async httpContext =>
{
requestStartWh.Set();
var response = httpContext.Response;
var request = httpContext.Request;
var lifetime = httpContext.Features.Get<IHttpRequestLifetimeFeature>();
lifetime.RequestAborted.Register(() => requestAbortedWh.Set());
Assert.True(requestAbortedWh.Wait(TimeSpan.FromSeconds(10)));
try
{
await response.WriteAsync(largeString, lifetime.RequestAborted);
}
catch (Exception ex)
{
writeTcs.SetException(ex);
throw;
}
writeTcs.SetException(new Exception("This shouldn't be reached."));
}, new TestServiceContext(), listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.1",
"Host:",
"Content-Length: 0",
"",
"");
Assert.True(requestStartWh.Wait(TimeSpan.FromSeconds(10)));
}
// Write failed - can throw TaskCanceledException or OperationCanceledException,
// dependending on how far the canceled write goes.
await Assert.ThrowsAnyAsync<OperationCanceledException>(async () => await writeTcs.Task).TimeoutAfter(TimeSpan.FromSeconds(15));
// RequestAborted tripped
Assert.True(requestAbortedWh.Wait(TimeSpan.FromSeconds(1)));
}
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task NoErrorsLoggedWhenServerEndsConnectionBeforeClient(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
var testLogger = new TestApplicationErrorLogger();
testContext.Log = new KestrelTrace(testLogger);
using (var server = new TestServer(async httpContext =>
{
var response = httpContext.Response;
response.Headers["Content-Length"] = new[] { "11" };
await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello World"), 0, 11);
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.0",
"",
"");
await connection.ReceiveForcedEnd(
"HTTP/1.1 200 OK",
"Connection: close",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 11",
"",
"Hello World");
}
}
Assert.Equal(0, testLogger.TotalErrorsLogged);
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task NoResponseSentWhenConnectionIsClosedByServerBeforeClientFinishesSendingRequest(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
using (var server = new TestServer(httpContext =>
{
httpContext.Abort();
return Task.CompletedTask;
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"POST / HTTP/1.0",
"Content-Length: 1",
"",
"");
await connection.ReceiveForcedEnd();
}
}
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task ResponseHeadersAreResetOnEachRequest(ListenOptions listenOptions)
{
var testContext = new TestServiceContext();
IHeaderDictionary originalResponseHeaders = null;
var firstRequest = true;
using (var server = new TestServer(httpContext =>
{
var responseFeature = httpContext.Features.Get<IHttpResponseFeature>();
if (firstRequest)
{
originalResponseHeaders = responseFeature.Headers;
responseFeature.Headers = new FrameResponseHeaders();
firstRequest = false;
}
else
{
Assert.Same(originalResponseHeaders, responseFeature.Headers);
}
return Task.CompletedTask;
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
"Content-Length: 0",
"",
"");
}
}
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task OnStartingCallbacksAreCalledInLastInFirstOutOrder(ListenOptions listenOptions)
{
const string response = "hello, world";
var testContext = new TestServiceContext();
var callOrder = new Stack<int>();
var onStartingTcs = new TaskCompletionSource<object>();
using (var server = new TestServer(async context =>
{
context.Response.OnStarting(_ =>
{
callOrder.Push(1);
onStartingTcs.SetResult(null);
return Task.CompletedTask;
}, null);
context.Response.OnStarting(_ =>
{
callOrder.Push(2);
return Task.CompletedTask;
}, null);
context.Response.ContentLength = response.Length;
await context.Response.WriteAsync(response);
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
$"Content-Length: {response.Length}",
"",
"hello, world");
// Wait for all callbacks to be called.
await onStartingTcs.Task.TimeoutAfter(TimeSpan.FromSeconds(10));
}
}
Assert.Equal(1, callOrder.Pop());
Assert.Equal(2, callOrder.Pop());
}
[Theory]
[MemberData(nameof(ConnectionAdapterData))]
public async Task OnCompletedCallbacksAreCalledInLastInFirstOutOrder(ListenOptions listenOptions)
{
const string response = "hello, world";
var testContext = new TestServiceContext();
var callOrder = new Stack<int>();
var onCompletedTcs = new TaskCompletionSource<object>();
using (var server = new TestServer(async context =>
{
context.Response.OnCompleted(_ =>
{
callOrder.Push(1);
onCompletedTcs.SetResult(null);
return Task.CompletedTask;
}, null);
context.Response.OnCompleted(_ =>
{
callOrder.Push(2);
return Task.CompletedTask;
}, null);
context.Response.ContentLength = response.Length;
await context.Response.WriteAsync(response);
}, testContext, listenOptions))
{
using (var connection = server.CreateConnection())
{
await connection.Send(
"GET / HTTP/1.1",
"Host:",
"",
"");
await connection.ReceiveEnd(
"HTTP/1.1 200 OK",
$"Date: {testContext.DateHeaderValue}",
$"Content-Length: {response.Length}",
"",
"hello, world");
// Wait for all callbacks to be called.
await onCompletedTcs.Task.TimeoutAfter(TimeSpan.FromSeconds(10));
}
}
Assert.Equal(1, callOrder.Pop());
Assert.Equal(2, callOrder.Pop());
}
public static TheoryData<string, StringValues, string> NullHeaderData
{
get
{
var dataset = new TheoryData<string, StringValues, string>();
// Unknown headers
dataset.Add("NullString", (string)null, null);
dataset.Add("EmptyString", "", "");
dataset.Add("NullStringArray", new string[] { null }, null);
dataset.Add("EmptyStringArray", new string[] { "" }, "");
dataset.Add("MixedStringArray", new string[] { null, "" }, "");
// Known headers
dataset.Add("Location", (string)null, null);
dataset.Add("Location", "", "");
dataset.Add("Location", new string[] { null }, null);
dataset.Add("Location", new string[] { "" }, "");
dataset.Add("Location", new string[] { null, "" }, "");
return dataset;
}
}
}
}
| 1 | 13,575 |
This test is specifically checking behavior on sync I/O (note there are tests with Write in the name, and other tests with WriteAsync). Enable sync I/O here.
|
aspnet-KestrelHttpServer
|
.cs
|
@@ -0,0 +1,11 @@
+class WeeklyIterationDripMailerPreview < ActionMailer::Preview
+ def weekly_update
+ user = User.new(name: 'John Doe')
+ video = Video.first.tap do |video|
+ video.email_body_text = 'Email body text'
+ video.email_cta_label = 'Click Here!'
+ end
+
+ WeeklyIterationDripMailer.weekly_update(user: user, video: video)
+ end
+end
| 1 | 1 | 18,307 |
Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
|
thoughtbot-upcase
|
rb
|
|
@@ -56,6 +56,8 @@ type CStorVolumeClaimSpec struct {
// CStorVolumeRef has the information about where CstorVolumeClaim
// is created from.
CStorVolumeRef *corev1.ObjectReference `json:"cstorVolumeRef,omitempty"`
+ // CstorVolumeSource contains the source volumeID+Snapshot combaination
+ CstorVolumeSource string `json:"cstorVolumeSource,omitempty"`
}
// CStorVolumeClaimPublish contains info related to attachment of a volume to a node.
| 1 |
/*
Copyright 2019 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// CStorVolumeClaim describes a cstor volume claim resource created as
// custom resource. CStorVolumeClaim is a request for creating cstor volume
// related resources like deployment, svc etc.
type CStorVolumeClaim struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec defines a specification of a cstor volume claim required
// to provisione cstor volume resources
Spec CStorVolumeClaimSpec `json:"spec"`
// Publish contains info related to attachment of a volume to a node.
// i.e. NodeId etc.
Publish CStorVolumeClaimPublish `json:"publish,omitempty"`
// Status represents the current information/status for the cstor volume
// claim, populated by the controller.
Status CStorVolumeClaimStatus `json:"status"`
VersionDetails VersionDetails `json:"versionDetails"`
}
// CStorVolumeClaimSpec is the spec for a CStorVolumeClaim resource
type CStorVolumeClaimSpec struct {
// Capacity represents the actual resources of the underlying
// cstor volume.
Capacity corev1.ResourceList `json:"capacity"`
// ReplicaCount represents the actual replica count for the underlying
// cstor volume
ReplicaCount int `json:"replicaCount"`
// CStorVolumeRef has the information about where CstorVolumeClaim
// is created from.
CStorVolumeRef *corev1.ObjectReference `json:"cstorVolumeRef,omitempty"`
}
// CStorVolumeClaimPublish contains info related to attachment of a volume to a node.
// i.e. NodeId etc.
type CStorVolumeClaimPublish struct {
// NodeID contains publish info related to attachment of a volume to a node.
NodeID string `json:"nodeId,omitempty"`
}
// CStorVolumeClaimPhase represents the current phase of CStorVolumeClaim.
type CStorVolumeClaimPhase string
const (
//CStorVolumeClaimPhasePending indicates that the cvc is still waiting for
//the cstorvolume to be created and bound
CStorVolumeClaimPhasePending CStorVolumeClaimPhase = "Pending"
//CStorVolumeClaimPhaseBound indiacates that the cstorvolume has been
//provisioned and bound to the cstor volume claim
CStorVolumeClaimPhaseBound CStorVolumeClaimPhase = "Bound"
//CStorVolumeClaimPhaseFailed indiacates that the cstorvolume provisioning
//has failed
CStorVolumeClaimPhaseFailed CStorVolumeClaimPhase = "Failed"
)
// CStorVolumeClaimStatus is for handling status of CstorVolume Claim.
// defines the observed state of CStorVolumeClaim
type CStorVolumeClaimStatus struct {
// Phase represents the current phase of CStorVolumeClaim.
Phase CStorVolumeClaimPhase `json:"phase"`
// Capacity the actual resources of the underlying volume.
Capacity corev1.ResourceList `json:"capacity,omitempty"`
Conditions []CStorVolumeClaimCondition `json:"condition,omitempty"`
}
// CStorVolumeClaimCondition contains details about state of cstor volume
type CStorVolumeClaimCondition struct {
// Current Condition of cstor volume claim. If underlying persistent volume is being
// resized then the Condition will be set to 'ResizeStarted' etc
Type CStorVolumeClaimConditionType `json:"type"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a brief CamelCase string that describes any failure
Reason string `json:"reason"`
// Human-readable message indicating details about last transition.
Message string `json:"message"`
}
// CStorVolumeClaimConditionType is a valid value of CstorVolumeClaimCondition.Type
type CStorVolumeClaimConditionType string
// These constants are CVC condition types related to resize operation.
const (
// CStorVolumeClaimResizePending ...
CStorVolumeClaimResizing CStorVolumeClaimConditionType = "Resizing"
// CStorVolumeClaimResizeFailed ...
CStorVolumeClaimResizeFailed CStorVolumeClaimConditionType = "VolumeResizeFailed"
// CStorVolumeClaimResizeSuccess ...
CStorVolumeClaimResizeSuccess CStorVolumeClaimConditionType = "VolumeResizeSuccessful"
// CStorVolumeClaimResizePending ...
CStorVolumeClaimResizePending CStorVolumeClaimConditionType = "VolumeResizePending"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:openapi-gen=true
// CStorVolumeClaimList is a list of CStorVolumeClaim resources
type CStorVolumeClaimList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []CStorVolumeClaim `json:"items"`
}
| 1 | 17,608 |
nit: Should we have something like `volumeName@snapShotname` and this will be filled only if it is a clone creation
|
openebs-maya
|
go
|
@@ -164,10 +164,13 @@ func WriteWordpressConfig(wordpressConfig *WordpressConfig, filePath string) err
return err
}
- // Ensure target directory is writable.
+ // Ensure target directory exists and is writeable
dir := filepath.Dir(filePath)
- err = os.Chmod(dir, 0755)
- if err != nil {
+ if err = os.Chmod(dir, 0755); os.IsNotExist(err) {
+ if err = os.MkdirAll(dir, 0755); err != nil {
+ return err
+ }
+ } else if err != nil {
return err
}
| 1 |
package ddevapp
import (
"os"
"path/filepath"
"text/template"
"fmt"
"github.com/Masterminds/sprig"
"github.com/drud/ddev/pkg/archive"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/output"
"github.com/drud/ddev/pkg/util"
)
// WordpressConfig encapsulates all the configurations for a WordPress site.
type WordpressConfig struct {
WPGeneric bool
DeployName string
DeployURL string
DatabaseName string
DatabaseUsername string
DatabasePassword string
DatabaseHost string
AuthKey string
SecureAuthKey string
LoggedInKey string
NonceKey string
AuthSalt string
SecureAuthSalt string
LoggedInSalt string
NonceSalt string
Docroot string
TablePrefix string
Signature string
}
// NewWordpressConfig produces a WordpressConfig object with defaults.
func NewWordpressConfig() *WordpressConfig {
return &WordpressConfig{
WPGeneric: false,
DatabaseName: "db",
DatabaseUsername: "db",
DatabasePassword: "db",
DatabaseHost: "db",
Docroot: "/var/www/html/docroot",
TablePrefix: "wp_",
AuthKey: util.RandString(64),
AuthSalt: util.RandString(64),
LoggedInKey: util.RandString(64),
LoggedInSalt: util.RandString(64),
NonceKey: util.RandString(64),
NonceSalt: util.RandString(64),
SecureAuthKey: util.RandString(64),
SecureAuthSalt: util.RandString(64),
Signature: DdevFileSignature,
}
}
// wordPressHooks adds a wp-specific hooks example for post-import-db
const wordPressHooks = `
# Un-comment and enter the production url and local url
# to replace in your database after import.
# post-import-db:
# - exec: wp search-replace <production-url> <local-url>`
// getWordpressHooks for appending as byte array
func getWordpressHooks() []byte {
return []byte(wordPressHooks)
}
// getWordpressUploadDir just returns a static upload files directory string.
func getWordpressUploadDir(app *DdevApp) string {
return "wp-content/uploads"
}
const (
wordpressTemplate = `<?php
{{ $config := . }}
/**
{{ $config.Signature }}: Automatically generated WordPress wp-config.php file.
ddev manages this file and may delete or overwrite the file unless this comment is removed.
*/
// ** MySQL settings - You can get this info from your web host ** //
/** The name of the database for WordPress */
define('DB_NAME', '{{ $config.DatabaseName }}');
/** MySQL database username */
define('DB_USER', '{{ $config.DatabaseUsername }}');
/** MySQL database password */
define('DB_PASSWORD', '{{ $config.DatabasePassword }}');
/** MySQL hostname */
define('DB_HOST', '{{ $config.DatabaseHost }}');
/** Database Charset to use in creating database tables. */
define('DB_CHARSET', 'utf8mb4');
/** The Database Collate type. Don't change this if in doubt. */
define('DB_COLLATE', '');
/**
* WordPress Database Table prefix.
*/
$table_prefix = '{{ $config.TablePrefix }}';
/**
* For developers: WordPress debugging mode.
*/
define('WP_DEBUG', false);
/**#@+
* Authentication Unique Keys and Salts.
*/
define( 'AUTH_KEY', '{{ $config.AuthKey }}' );
define( 'SECURE_AUTH_KEY', '{{ $config.SecureAuthKey }}' );
define( 'LOGGED_IN_KEY', '{{ $config.LoggedInKey }}' );
define( 'NONCE_KEY', '{{ $config.NonceKey }}' );
define( 'AUTH_SALT', '{{ $config.AuthSalt }}' );
define( 'SECURE_AUTH_SALT', '{{ $config.SecureAuthSalt }}' );
define( 'LOGGED_IN_SALT', '{{ $config.LoggedInSalt }}' );
define( 'NONCE_SALT', '{{ $config.NonceSalt }}' );
/* That's all, stop editing! Happy blogging. */
/** Absolute path to the WordPress directory. */
if ( !defined('ABSPATH') )
define('ABSPATH', dirname(__FILE__) . '/');
/**
Sets up WordPress vars and included files.
wp-settings.php is typically included in wp-config.php. This check ensures it is not
included again if this file is written to wp-config-local.php.
*/
if (basename(__FILE__) == "wp-config.php") {
require_once(ABSPATH . '/wp-settings.php');
}
`
)
// createWordpressSettingsFile creates a wordpress settings file from a
// template. Returns fullpath to location of file + err
func createWordpressSettingsFile(app *DdevApp) (string, error) {
settingsFilePath := app.SiteSettingsPath
if settingsFilePath == "" {
settingsFilePath = app.SiteLocalSettingsPath
}
output.UserOut.Printf("Generating %s file for database connection.", filepath.Base(settingsFilePath))
wpConfig := NewWordpressConfig()
wpConfig.DeployURL = app.GetHTTPURL()
err := WriteWordpressConfig(wpConfig, settingsFilePath)
return settingsFilePath, err
}
// WriteWordpressConfig dynamically produces valid wp-config.php file by combining a configuration
// object with a data-driven template.
func WriteWordpressConfig(wordpressConfig *WordpressConfig, filePath string) error {
tmpl, err := template.New("wordpressConfig").Funcs(sprig.TxtFuncMap()).Parse(wordpressTemplate)
if err != nil {
return err
}
// Ensure target directory is writable.
dir := filepath.Dir(filePath)
err = os.Chmod(dir, 0755)
if err != nil {
return err
}
file, err := os.Create(filePath)
if err != nil {
return err
}
err = tmpl.Execute(file, wordpressConfig)
if err != nil {
return err
}
util.CheckClose(file)
return nil
}
// setWordpressSiteSettingsPaths sets the expected settings files paths for
// a wordpress site.
func setWordpressSiteSettingsPaths(app *DdevApp) {
settingsFileBasePath := filepath.Join(app.AppRoot, app.Docroot)
var settingsFilePath, localSettingsFilePath string
settingsFilePath = filepath.Join(settingsFileBasePath, "wp-config.php")
localSettingsFilePath = filepath.Join(settingsFileBasePath, "wp-config-local.php")
app.SiteSettingsPath = settingsFilePath
app.SiteLocalSettingsPath = localSettingsFilePath
}
// isWordpressApp returns true if the app of of type wordpress
func isWordpressApp(app *DdevApp) bool {
if _, err := os.Stat(filepath.Join(app.AppRoot, app.Docroot, "wp-login.php")); err == nil {
return true
}
return false
}
// wordpressPostImportDBAction just emits a warning about updating URLs as is
// required with wordpress when running on a different URL.
func wordpressPostImportDBAction(app *DdevApp) error {
util.Warning("Wordpress sites require a search/replace of the database when the URL is changed. You can run \"ddev exec wp search-replace [http://www.myproductionsite.example] %s\" to update the URLs across your database. For more information, see http://wp-cli.org/commands/search-replace/", app.GetHTTPURL())
return nil
}
// wordpressImportFilesAction defines the Wordpress workflow for importing project files.
// The Wordpress workflow is currently identical to the Drupal import-files workflow.
func wordpressImportFilesAction(app *DdevApp, importPath, extPath string) error {
destPath := filepath.Join(app.GetAppRoot(), app.GetDocroot(), app.GetUploadDir())
// parent of destination dir should exist
if !fileutil.FileExists(filepath.Dir(destPath)) {
return fmt.Errorf("unable to import to %s: parent directory does not exist", destPath)
}
// parent of destination dir should be writable.
if err := os.Chmod(filepath.Dir(destPath), 0755); err != nil {
return err
}
// If the destination path exists, remove it as was warned
if fileutil.FileExists(destPath) {
if err := os.RemoveAll(destPath); err != nil {
return fmt.Errorf("failed to cleanup %s before import: %v", destPath, err)
}
}
if isTar(importPath) {
if err := archive.Untar(importPath, destPath, extPath); err != nil {
return fmt.Errorf("failed to extract provided archive: %v", err)
}
return nil
}
if isZip(importPath) {
if err := archive.Unzip(importPath, destPath, extPath); err != nil {
return fmt.Errorf("failed to extract provided archive: %v", err)
}
return nil
}
if err := fileutil.CopyDir(importPath, destPath); err != nil {
return err
}
return nil
}
| 1 | 13,064 |
This seems like an improved pattern :)
|
drud-ddev
|
php
|
@@ -53,7 +53,11 @@ def get_hdfs_syntax():
override this setting with "cdh3" or "apache1" in the hadoop section of the config in
order to use the old syntax
"""
- return configuration.get_config().get("hadoop", "version", "cdh4").lower()
+ try:
+ import snakebite
+ return configuration.get_config().get("hadoop", "version", "snakebite").lower()
+ except ImportError:
+ return configuration.get_config().get("hadoop", "version", "cdh4").lower()
def load_hadoop_cmd():
| 1 |
# Copyright (c) 2012 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import subprocess
import os
import random
import tempfile
import urlparse
import luigi.format
import datetime
import re
from luigi.target import FileSystem, FileSystemTarget, FileAlreadyExists
import configuration
class HDFSCliError(Exception):
def __init__(self, command, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
msg = ("Command %r failed [exit code %d]\n" +
"---stdout---\n" +
"%s\n" +
"---stderr---\n" +
"%s" +
"------------") % (command, returncode, stdout, stderr)
super(HDFSCliError, self).__init__(msg)
def call_check(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise HDFSCliError(command, p.returncode, stdout, stderr)
return stdout
def get_hdfs_syntax():
"""
CDH4 (hadoop 2+) has a slightly different syntax for interacting with
hdfs via the command line. The default version is CDH4, but one can
override this setting with "cdh3" or "apache1" in the hadoop section of the config in
order to use the old syntax
"""
return configuration.get_config().get("hadoop", "version", "cdh4").lower()
def load_hadoop_cmd():
return luigi.configuration.get_config().get('hadoop', 'command', 'hadoop')
def tmppath(path=None):
return tempfile.gettempdir() + '/' + (path + "-" if path else "") + "luigitemp-%08d" % random.randrange(1e9)
class HdfsClient(FileSystem):
"""This client uses Apache 2.x syntax for file system commands, which also matched CDH4"""
def exists(self, path):
""" Use `hadoop fs -stat to check file existance
"""
cmd = [load_hadoop_cmd(), 'fs', '-stat', path]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 0:
return True
else:
not_found_pattern = "^stat: `.*': No such file or directory$"
not_found_re = re.compile(not_found_pattern)
for line in stderr.split('\n'):
if not_found_re.match(line):
return False
raise HDFSCliError(cmd, p.returncode, stdout, stderr)
def rename(self, path, dest):
parent_dir = os.path.dirname(dest)
if parent_dir != '' and not self.exists(parent_dir):
self.mkdir(parent_dir)
call_check([load_hadoop_cmd(), 'fs', '-mv', path, dest])
def remove(self, path, recursive=True, skip_trash=False):
if recursive:
cmd = [load_hadoop_cmd(), 'fs', '-rm', '-r']
else:
cmd = [load_hadoop_cmd(), 'fs', '-rm']
if skip_trash:
cmd = cmd + ['-skipTrash']
cmd = cmd + [path]
call_check(cmd)
def chmod(self, path, permissions, recursive=False):
if recursive:
cmd = [load_hadoop_cmd(), 'fs', '-chmod', '-R', permissions, path]
else:
cmd = [load_hadoop_cmd(), 'fs', '-chmod', permissions, path]
call_check(cmd)
def chown(self, path, owner, group, recursive=False):
if owner is None:
owner = ''
if group is None:
group = ''
ownership = "%s:%s" % (owner, group)
if recursive:
cmd = [load_hadoop_cmd(), 'fs', '-chown', '-R', ownership, path]
else:
cmd = [load_hadoop_cmd(), 'fs', '-chown', ownership, path]
call_check(cmd)
def count(self, path):
cmd = [load_hadoop_cmd(), 'fs', '-count', path]
stdout = call_check(cmd)
(dir_count, file_count, content_size, ppath) = stdout.split()
results = {'content_size': content_size, 'dir_count': dir_count, 'file_count': file_count}
return results
def copy(self, path, destination):
call_check([load_hadoop_cmd(), 'fs', '-cp', path, destination])
def put(self, local_path, destination):
call_check([load_hadoop_cmd(), 'fs', '-put', local_path, destination])
def get(self, path, local_destination):
call_check([load_hadoop_cmd(), 'fs', '-get', path, local_destination])
def getmerge(self, path, local_destination, new_line=False):
if new_line:
cmd = [load_hadoop_cmd(), 'fs', '-getmerge', '-nl', path, local_destination]
else:
cmd = [load_hadoop_cmd(), 'fs', '-getmerge', path, local_destination]
call_check(cmd)
def mkdir(self, path):
try:
call_check([load_hadoop_cmd(), 'fs', '-mkdir', path])
except HDFSCliError, ex:
if "File exists" in ex.stderr:
raise FileAlreadyExists(ex.stderr)
else:
raise
def listdir(self, path, ignore_directories=False, ignore_files=False,
include_size=False, include_type=False, include_time=False, recursive=False):
if not path:
path = "." # default to current/home catalog
if recursive:
cmd = [load_hadoop_cmd(), 'fs', '-ls', '-R', path]
else:
cmd = [load_hadoop_cmd(), 'fs', '-ls', path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
lines = proc.stdout
for line in lines:
if line.startswith('Found'):
continue # "hadoop fs -ls" outputs "Found %d items" as its first line
line = line.rstrip("\n")
if ignore_directories and line[0] == 'd':
continue
if ignore_files and line[0] == '-':
continue
data = line.split(' ')
file = data[-1]
size = int(data[-4])
line_type = line[0]
extra_data = ()
if include_size:
extra_data += (size,)
if include_type:
extra_data += (line_type,)
if include_time:
time_str = '%sT%s' % (data[-3], data[-2])
modification_time = datetime.datetime.strptime(time_str,
'%Y-%m-%dT%H:%M')
extra_data += (modification_time,)
if len(extra_data) > 0:
yield (file,) + extra_data
else:
yield file
class HdfsClientCdh3(HdfsClient):
"""This client uses CDH3 syntax for file system commands"""
def remove(self, path, recursive=True, skip_trash=False):
if recursive:
cmd = [load_hadoop_cmd(), 'fs', '-rmr']
else:
cmd = [load_hadoop_cmd(), 'fs', '-rm']
if skip_trash:
cmd = cmd + ['-skipTrash']
cmd = cmd + [path]
call_check(cmd)
class HdfsClientApache1(HdfsClientCdh3):
"""This client uses Apache 1.x syntax for file system commands,
which are similar to CDH3 except for the file existence check"""
def exists(self, path):
cmd = [load_hadoop_cmd(), 'fs', '-test', '-e', path]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 0:
return True
elif p.returncode == 1:
return False
else:
raise HDFSCliError(cmd, p.returncode, stdout, stderr)
if get_hdfs_syntax() == "cdh4":
client = HdfsClient()
elif get_hdfs_syntax() == "cdh3":
client = HdfsClientCdh3()
elif get_hdfs_syntax() == "apache1":
client = HdfsClientApache1()
else:
raise Exception("Error: Unknown version specified in Hadoop version configuration parameter")
exists = client.exists
rename = client.rename
remove = client.remove
mkdir = client.mkdir
listdir = client.listdir
class HdfsReadPipe(luigi.format.InputPipeProcessWrapper):
def __init__(self, path):
super(HdfsReadPipe, self).__init__([load_hadoop_cmd(), 'fs', '-cat', path])
class HdfsAtomicWritePipe(luigi.format.OutputPipeProcessWrapper):
""" File like object for writing to HDFS
The referenced file is first written to a temporary location and then
renamed to final location on close(). If close() isn't called
the temporary file will be cleaned up when this object is
garbage collected
TODO: if this is buggy, change it so it first writes to a
local temporary file and then uploads it on completion
"""
def __init__(self, path):
self.path = path
self.tmppath = tmppath(self.path)
tmpdir = os.path.dirname(self.tmppath)
if get_hdfs_syntax() == "cdh4":
if subprocess.Popen([load_hadoop_cmd(), 'fs', '-mkdir', '-p', tmpdir]).wait():
raise RuntimeError("Could not create directory: %s" % tmpdir)
else:
if not exists(tmpdir) and subprocess.Popen([load_hadoop_cmd(), 'fs', '-mkdir', tmpdir]).wait():
raise RuntimeError("Could not create directory: %s" % tmpdir)
super(HdfsAtomicWritePipe, self).__init__([load_hadoop_cmd(), 'fs', '-put', '-', self.tmppath])
def abort(self):
print "Aborting %s('%s'). Removing temporary file '%s'" % (self.__class__.__name__, self.path, self.tmppath)
super(HdfsAtomicWritePipe, self).abort()
remove(self.tmppath)
def close(self):
super(HdfsAtomicWritePipe, self).close()
rename(self.tmppath, self.path)
class HdfsAtomicWriteDirPipe(luigi.format.OutputPipeProcessWrapper):
""" Writes a data<data_extension> file to a directory at <path> """
def __init__(self, path, data_extension=""):
self.path = path
self.tmppath = tmppath(self.path)
self.datapath = self.tmppath + ("/data%s" % data_extension)
super(HdfsAtomicWriteDirPipe, self).__init__([load_hadoop_cmd(), 'fs', '-put', '-', self.datapath])
def abort(self):
print "Aborting %s('%s'). Removing temporary dir '%s'" % (self.__class__.__name__, self.path, self.tmppath)
super(HdfsAtomicWriteDirPipe, self).abort()
remove(self.tmppath)
def close(self):
super(HdfsAtomicWriteDirPipe, self).close()
rename(self.tmppath, self.path)
class Plain(luigi.format.Format):
@classmethod
def hdfs_reader(cls, path):
return HdfsReadPipe(path)
@classmethod
def pipe_writer(cls, output_pipe):
return output_pipe
class PlainDir(luigi.format.Format):
@classmethod
def hdfs_reader(cls, path):
# exclude underscore-prefixedfiles/folders (created by MapReduce)
return HdfsReadPipe("%s/[^_]*" % path)
@classmethod
def hdfs_writer(cls, path):
return HdfsAtomicWriteDirPipe(path)
class HdfsTarget(FileSystemTarget):
fs = client # underlying file system
def __init__(self, path=None, format=Plain, is_tmp=False):
if path is None:
assert is_tmp
path = tmppath()
super(HdfsTarget, self).__init__(path)
self.format = format
self.is_tmp = is_tmp
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(path)
assert ":" not in path # colon is not allowed in hdfs filenames
def __del__(self):
#TODO: not sure is_tmp belongs in Targets construction arguments
if self.is_tmp and self.exists():
self.remove()
@property
def fn(self):
""" Deprecated. Use path property instead """
import warnings
warnings.warn("target.fn is deprecated and will be removed soon\
in luigi. Use target.path instead", stacklevel=2)
return self.path
def get_fn(self):
""" Deprecated. Use path property instead """
import warnings
warnings.warn("target.get_fn() is deprecated and will be removed soon\
in luigi. Use target.path instead", stacklevel=2)
return self.path
def glob_exists(self, expected_files):
ls = list(listdir(self.path))
if len(ls) == expected_files:
return True
return False
def open(self, mode='r'):
if mode not in ('r', 'w'):
raise ValueError("Unsupported open mode '%s'" % mode)
if mode == 'r':
try:
return self.format.hdfs_reader(self.path)
except NotImplementedError:
return self.format.pipe_reader(HdfsReadPipe(self.path))
else:
try:
return self.format.hdfs_writer(self.path)
except NotImplementedError:
return self.format.pipe_writer(HdfsAtomicWritePipe(self.path))
def remove(self, skip_trash=False):
remove(self.path, skip_trash=skip_trash)
def rename(self, path, fail_if_exists=False):
# rename does not change self.path, so be careful with assumptions
if isinstance(path, HdfsTarget):
path = path.path
if fail_if_exists and exists(path):
raise RuntimeError('Destination exists: %s' % path)
rename(self.path, path)
def move(self, path, fail_if_exists=False):
self.rename(path, fail_if_exists=fail_if_exists)
def move_dir(self, path):
# mkdir will fail if directory already exists, thereby ensuring atomicity
if isinstance(path, HdfsTarget):
path = path.path
mkdir(path)
rename(self.path + '/*', path)
self.remove()
def is_writable(self):
if "/" in self.path:
# example path: /log/ap/2013-01-17/00
parts = self.path.split("/")
# start with the full path and then up the tree until we can check
length = len(parts)
for part in xrange(length):
path = "/".join(parts[0:length - part]) + "/"
if exists(path):
# if the path exists and we can write there, great!
if self._is_writable(path):
return True
# if it exists and we can't =( sad panda
else:
return False
# We went through all parts of the path and we still couldn't find
# one that exists.
return False
def _is_writable(self, path):
test_path = path + '.test_write_access-%09d' % random.randrange(1e10)
return_value = subprocess.call([load_hadoop_cmd(), 'fs', '-touchz', test_path])
if return_value != 0:
return False
else:
remove(test_path, recursive=False)
return True
| 1 | 8,856 |
I think it would be better to have a separate configuration option for specifying which HDFS interaction layer to use instead of using the existing hadoop version option. I think there are still cases where things might have to rely on Hadoop version for other things, and overwriting that information with "snakebite" would make that difficult. The existing hadoop version option could still be used to distinguish the different HDFS CLI flavours.
|
spotify-luigi
|
py
|
@@ -575,10 +575,15 @@ module Bolt
outputter.print_task_info(pal.get_task(task_name))
end
+ # Filters a list of content by matching substring.
+ #
+ private def filter_content(content, filter)
+ return content unless content && filter
+ content.select { |name,| name.include?(filter) }
+ end
+
def list_tasks
- tasks = pal.list_tasks
- tasks.select! { |task| task.first.include?(options[:filter]) } if options[:filter]
- tasks.select! { |task| config.project.tasks.include?(task.first) } unless config.project.tasks.nil?
+ tasks = filter_content(pal.list_tasks(filter_content: true), options[:filter])
outputter.print_tasks(tasks, pal.user_modulepath)
end
| 1 |
# frozen_string_literal: true
# Avoid requiring the CLI from other files. It has side-effects - such as loading r10k -
# that are undesirable when using Bolt as a library.
require 'uri'
require 'benchmark'
require 'json'
require 'io/console'
require 'logging'
require 'optparse'
require 'bolt/analytics'
require 'bolt/bolt_option_parser'
require 'bolt/config'
require 'bolt/error'
require 'bolt/executor'
require 'bolt/inventory'
require 'bolt/logger'
require 'bolt/module_installer'
require 'bolt/outputter'
require 'bolt/pal'
require 'bolt/plan_creator'
require 'bolt/plugin'
require 'bolt/project_migrator'
require 'bolt/puppetdb'
require 'bolt/rerun'
require 'bolt/secret'
require 'bolt/target'
require 'bolt/version'
module Bolt
class CLIExit < StandardError; end
class CLI
COMMANDS = {
'command' => %w[run],
'script' => %w[run],
'task' => %w[show run],
'plan' => %w[show run convert new],
'file' => %w[download upload],
'puppetfile' => %w[install show-modules generate-types],
'secret' => %w[encrypt decrypt createkeys],
'inventory' => %w[show],
'group' => %w[show],
'project' => %w[init migrate],
'module' => %w[add generate-types install show],
'apply' => %w[],
'guide' => %w[]
}.freeze
attr_reader :config, :options
def initialize(argv)
Bolt::Logger.initialize_logging
@logger = Bolt::Logger.logger(self)
@argv = argv
@options = {}
end
# Only call after @config has been initialized.
def inventory
@inventory ||= Bolt::Inventory.from_config(config, plugins)
end
private :inventory
def help?(remaining)
# Set the subcommand
options[:subcommand] = remaining.shift
if options[:subcommand] == 'help'
options[:help] = true
options[:subcommand] = remaining.shift
end
# This section handles parsing non-flag options which are
# subcommand specific rather then part of the config
actions = COMMANDS[options[:subcommand]]
if actions && !actions.empty?
options[:action] = remaining.shift
end
options[:help]
end
private :help?
# Wrapper method that is called by the Bolt executable. Parses the command and
# then loads the project and config. Once config is loaded, it completes the
# setup process by configuring Bolt and logging messages.
#
# This separation is needed since the Bolt::Outputter class that normally handles
# printing errors relies on config being loaded. All setup that happens before
# config is loaded will have errors printed directly to stdout, while all errors
# raised after config is loaded are handled by the outputter.
def parse
parse_command
load_config
finalize_setup
end
# Parses the command and validates options. All errors that are raised here
# are not handled by the outputter, as it relies on config being loaded.
def parse_command
parser = BoltOptionParser.new(options)
# This part aims to handle both `bolt <mode> --help` and `bolt help <mode>`.
remaining = handle_parser_errors { parser.permute(@argv) } unless @argv.empty?
if @argv.empty? || help?(remaining)
# If the subcommand is not enabled, display the default
# help text
options[:subcommand] = nil unless COMMANDS.include?(options[:subcommand])
# Update the parser for the subcommand (or lack thereof)
parser.update
puts parser.help
raise Bolt::CLIExit
end
options[:object] = remaining.shift
# Handle reading a command from a file
if options[:subcommand] == 'command' && options[:object]
options[:object] = Bolt::Util.get_arg_input(options[:object])
end
# Only parse task_options for task or plan
if %w[task plan].include?(options[:subcommand])
task_options, remaining = remaining.partition { |s| s =~ /.+=/ }
if options[:task_options]
unless task_options.empty?
raise Bolt::CLIError,
"Parameters must be specified through either the --params " \
"option or param=value pairs, not both"
end
options[:params_parsed] = true
elsif task_options.any?
options[:params_parsed] = false
options[:task_options] = Hash[task_options.map { |a| a.split('=', 2) }]
else
options[:params_parsed] = true
options[:task_options] = {}
end
end
options[:leftovers] = remaining
# Default to verbose for everything except plans
unless options.key?(:verbose)
options[:verbose] = options[:subcommand] != 'plan'
end
validate(options)
# Deprecation warnings can't be issued until after config is loaded, so
# store them for later.
@parser_deprecations = parser.deprecations
rescue Bolt::Error => e
fatal_error(e)
raise e
end
# Loads the project and configuration. All errors that are raised here are not
# handled by the outputter, as it relies on config being loaded.
def load_config
@config = if ENV['BOLT_PROJECT']
project = Bolt::Project.create_project(ENV['BOLT_PROJECT'], 'environment')
Bolt::Config.from_project(project, options)
elsif options[:configfile]
Bolt::Config.from_file(options[:configfile], options)
else
project = if options[:boltdir]
dir = Pathname.new(options[:boltdir])
if (dir + Bolt::Project::BOLTDIR_NAME).directory?
Bolt::Project.create_project(dir + Bolt::Project::BOLTDIR_NAME)
else
Bolt::Project.create_project(dir)
end
else
Bolt::Project.find_boltdir(Dir.pwd)
end
Bolt::Config.from_project(project, options)
end
rescue Bolt::Error => e
fatal_error(e)
raise e
end
# Completes the setup process by configuring Bolt and log messages
def finalize_setup
Bolt::Logger.configure(config.log, config.color)
Bolt::Logger.analytics = analytics
# Logger must be configured before checking path case and project file, otherwise logs will not display
config.check_path_case('modulepath', config.modulepath)
config.project.check_deprecated_file
# Log messages created during parser and config initialization
config.logs.each { |log| @logger.send(log.keys[0], log.values[0]) }
@parser_deprecations.each { |dep| Bolt::Logger.deprecation_warning(dep[:type], dep[:msg]) }
config.deprecations.each { |dep| Bolt::Logger.deprecation_warning(dep[:type], dep[:msg]) }
warn_inventory_overrides_cli(options)
options
rescue Bolt::Error => e
outputter.fatal_error(e)
raise e
end
def update_targets(options)
target_opts = options.keys.select { |opt| %i[query rerun targets].include?(opt) }
target_string = "'--targets', '--rerun', or '--query'"
if target_opts.length > 1
raise Bolt::CLIError, "Only one targeting option #{target_string} may be specified"
elsif target_opts.empty? && options[:subcommand] != 'plan'
raise Bolt::CLIError, "Command requires a targeting option: #{target_string}"
end
targets = if options[:query]
query_puppetdb_nodes(options[:query])
elsif options[:rerun]
rerun.get_targets(options[:rerun])
else
options[:targets] || []
end
options[:target_args] = targets
options[:targets] = inventory.get_targets(targets)
end
def validate(options)
unless COMMANDS.include?(options[:subcommand])
command = Bolt::Util.powershell? ? 'Get-Command -Module PuppetBolt' : 'bolt help'
raise Bolt::CLIError,
"'#{options[:subcommand]}' is not a Bolt command. See '#{command}'."
end
actions = COMMANDS[options[:subcommand]]
if actions.any?
if options[:action].nil?
raise Bolt::CLIError,
"Expected an action of the form 'bolt #{options[:subcommand]} <action>'"
end
unless actions.include?(options[:action])
raise Bolt::CLIError,
"Expected action '#{options[:action]}' to be one of " \
"#{actions.join(', ')}"
end
end
if %w[task plan].include?(options[:subcommand]) && options[:action] == 'run'
if options[:object].nil?
raise Bolt::CLIError, "Must specify a #{options[:subcommand]} to run"
end
# This may mean that we parsed a parameter as the object
unless options[:object] =~ /\A([a-z][a-z0-9_]*)?(::[a-z][a-z0-9_]*)*\Z/
raise Bolt::CLIError,
"Invalid #{options[:subcommand]} '#{options[:object]}'"
end
end
if options[:subcommand] == 'apply' && (options[:object] && options[:code])
raise Bolt::CLIError, "--execute is unsupported when specifying a manifest file"
end
if options[:subcommand] == 'apply' && (!options[:object] && !options[:code])
raise Bolt::CLIError, "a manifest file or --execute is required"
end
if options[:subcommand] == 'command' && (!options[:object] || options[:object].empty?)
raise Bolt::CLIError, "Must specify a command to run"
end
if options[:subcommand] == 'secret' &&
(options[:action] == 'decrypt' || options[:action] == 'encrypt') &&
!options[:object]
raise Bolt::CLIError, "Must specify a value to #{options[:action]}"
end
if options[:subcommand] == 'plan' && options[:action] == 'new' && !options[:object]
raise Bolt::CLIError, "Must specify a plan name."
end
if options[:subcommand] == 'module' && options[:action] == 'add' && !options[:object]
raise Bolt::CLIError, "Must specify a module name."
end
if options[:subcommand] == 'module' && options[:action] == 'install' && options[:object]
command = Bolt::Util.powershell? ? 'Add-BoltModule -Module' : 'bolt module add'
raise Bolt::CLIError, "Invalid argument '#{options[:object]}'. To add a new module to "\
"the project, run '#{command} #{options[:object]}'."
end
if options[:subcommand] != 'file' && options[:subcommand] != 'script' &&
!options[:leftovers].empty?
raise Bolt::CLIError,
"Unknown argument(s) #{options[:leftovers].join(', ')}"
end
if options[:boltdir] && options[:configfile]
raise Bolt::CLIError, "Only one of '--boltdir', '--project', or '--configfile' may be specified"
end
if options[:noop] &&
!(options[:subcommand] == 'task' && options[:action] == 'run') && options[:subcommand] != 'apply'
raise Bolt::CLIError,
"Option '--noop' may only be specified when running a task or applying manifest code"
end
if options[:env_vars]
unless %w[command script].include?(options[:subcommand]) && options[:action] == 'run'
raise Bolt::CLIError,
"Option '--env-var' may only be specified when running a command or script"
end
end
if options.key?(:debug) && options.key?(:log)
raise Bolt::CLIError, "Only one of '--debug' or '--log-level' may be specified"
end
end
def handle_parser_errors
yield
rescue OptionParser::MissingArgument => e
raise Bolt::CLIError, "Option '#{e.args.first}' needs a parameter"
rescue OptionParser::InvalidArgument => e
raise Bolt::CLIError, "Invalid parameter specified for option '#{e.args.first}': #{e.args[1]}"
rescue OptionParser::InvalidOption, OptionParser::AmbiguousOption => e
raise Bolt::CLIError, "Unknown argument '#{e.args.first}'"
end
def puppetdb_client
plugins.puppetdb_client
end
def plugins
@plugins ||= Bolt::Plugin.setup(config, pal, analytics)
end
def query_puppetdb_nodes(query)
puppetdb_client.query_certnames(query)
end
def warn_inventory_overrides_cli(opts)
inventory_source = if ENV[Bolt::Inventory::ENVIRONMENT_VAR]
Bolt::Inventory::ENVIRONMENT_VAR
elsif config.inventoryfile && Bolt::Util.file_stat(config.inventoryfile)
config.inventoryfile
else
begin
Bolt::Util.file_stat(config.default_inventoryfile)
config.default_inventoryfile
rescue Errno::ENOENT
nil
end
end
inventory_cli_opts = %i[authentication escalation transports].each_with_object([]) do |key, acc|
acc.concat(Bolt::BoltOptionParser::OPTIONS[key])
end
inventory_cli_opts.concat(%w[no-host-key-check no-ssl no-ssl-verify no-tty])
conflicting_options = Set.new(opts.keys.map(&:to_s)).intersection(inventory_cli_opts)
if inventory_source && conflicting_options.any?
@logger.warn("CLI arguments #{conflicting_options.to_a} may be overridden by Inventory: #{inventory_source}")
end
end
def execute(options)
message = nil
handler = Signal.trap :INT do |signo|
@logger.info(
"Exiting after receiving SIG#{Signal.signame(signo)} signal.#{message ? ' ' + message : ''}"
)
exit!
end
# Initialize inventory and targets. Errors here are better to catch early.
# options[:target_args] will contain a string/array version of the targetting options this is passed to plans
# options[:targets] will contain a resolved set of Target objects
unless %w[guide module project puppetfile secret].include?(options[:subcommand]) ||
%w[convert new show].include?(options[:action])
update_targets(options)
end
screen = "#{options[:subcommand]}_#{options[:action]}"
# submit a different screen for `bolt task show` and `bolt task show foo`
if options[:action] == 'show' && options[:object]
screen += '_object'
end
screen_view_fields = {
output_format: config.format,
# For continuity
boltdir_type: config.project.type
}
# Only include target and inventory info for commands that take a targets
# list. This avoids loading inventory for commands that don't need it.
if options.key?(:targets)
screen_view_fields.merge!(target_nodes: options[:targets].count,
inventory_nodes: inventory.node_names.count,
inventory_groups: inventory.group_names.count,
inventory_version: inventory.version)
end
analytics.screen_view(screen, **screen_view_fields)
case options[:action]
when 'show'
case options[:subcommand]
when 'task'
if options[:object]
show_task(options[:object])
else
list_tasks
end
when 'plan'
if options[:object]
show_plan(options[:object])
else
list_plans
end
when 'inventory'
if options[:detail]
show_targets
else
list_targets
end
when 'group'
list_groups
when 'module'
list_modules
end
return 0
when 'show-modules'
list_modules
return 0
when 'convert'
pal.convert_plan(options[:object])
return 0
end
message = 'There may be processes left executing on some nodes.'
if %w[task plan].include?(options[:subcommand]) && options[:task_options] && !options[:params_parsed] && pal
options[:task_options] = pal.parse_params(options[:subcommand], options[:object], options[:task_options])
end
case options[:subcommand]
when 'guide'
code = if options[:object]
show_guide(options[:object])
else
list_topics
end
when 'project'
case options[:action]
when 'init'
code = initialize_project
when 'migrate'
code = Bolt::ProjectMigrator.new(config, outputter).migrate
end
when 'plan'
case options[:action]
when 'new'
command = Bolt::Util.powershell? ? 'New-BoltPlan' : 'bolt plan new'
@logger.warn("Command '#{command}' is experimental and subject to changes.")
plan_name = options[:object]
# If this passes validation, it will return the path to the plan to create
Bolt::PlanCreator.validate_input(config.project, plan_name)
code = Bolt::PlanCreator.create_plan(config.project.plans_path,
plan_name,
outputter,
options[:puppet])
when 'run'
code = run_plan(options[:object], options[:task_options], options[:target_args], options)
end
when 'module'
case options[:action]
when 'add'
code = add_project_module(options[:object], config.project)
when 'install'
code = install_project_modules(config.project, options[:force], options[:resolve])
when 'generate-types'
code = generate_types
end
when 'puppetfile'
case options[:action]
when 'generate-types'
code = generate_types
when 'install'
code = install_puppetfile(
config.puppetfile_config,
config.puppetfile,
config.modulepath.first
)
end
when 'secret'
code = Bolt::Secret.execute(plugins, outputter, options)
when 'apply'
if options[:object]
validate_file('manifest', options[:object])
options[:code] = File.read(File.expand_path(options[:object]))
end
code = apply_manifest(options[:code], options[:targets], options[:object], options[:noop])
else
executor = Bolt::Executor.new(config.concurrency, analytics, options[:noop], config.modified_concurrency)
targets = options[:targets]
results = nil
outputter.print_head
elapsed_time = Benchmark.realtime do
executor_opts = {}
executor_opts[:description] = options[:description] if options.key?(:description)
executor_opts[:env_vars] = options[:env_vars] if options.key?(:env_vars)
executor.subscribe(outputter)
executor.subscribe(log_outputter)
results =
case options[:subcommand]
when 'command'
executor.run_command(targets, options[:object], executor_opts)
when 'script'
script = options[:object]
validate_file('script', script)
executor.run_script(targets, script, options[:leftovers], executor_opts)
when 'task'
pal.run_task(options[:object],
targets,
options[:task_options],
executor,
inventory,
options[:description])
when 'file'
src = options[:object]
dest = options[:leftovers].first
if src.nil?
raise Bolt::CLIError, "A source path must be specified"
end
if dest.nil?
raise Bolt::CLIError, "A destination path must be specified"
end
case options[:action]
when 'download'
dest = File.expand_path(dest, Dir.pwd)
executor.download_file(targets, src, dest, executor_opts)
when 'upload'
validate_file('source file', src, true)
executor.upload_file(targets, src, dest, executor_opts)
end
end
end
executor.shutdown
rerun.update(results)
outputter.print_summary(results, elapsed_time)
code = results.ok ? 0 : 2
end
code
rescue Bolt::Error => e
outputter.fatal_error(e)
raise e
ensure
# restore original signal handler
Signal.trap :INT, handler if handler
analytics&.finish
end
def show_task(task_name)
outputter.print_task_info(pal.get_task(task_name))
end
def list_tasks
tasks = pal.list_tasks
tasks.select! { |task| task.first.include?(options[:filter]) } if options[:filter]
tasks.select! { |task| config.project.tasks.include?(task.first) } unless config.project.tasks.nil?
outputter.print_tasks(tasks, pal.user_modulepath)
end
def show_plan(plan_name)
outputter.print_plan_info(pal.get_plan_info(plan_name))
end
def list_plans
plans = pal.list_plans
plans.select! { |plan| plan.first.include?(options[:filter]) } if options[:filter]
plans.select! { |plan| config.project.plans.include?(plan.first) } unless config.project.plans.nil?
outputter.print_plans(plans, pal.user_modulepath)
end
def list_targets
inventoryfile = config.inventoryfile || config.default_inventoryfile
# Retrieve the known group and target names. This needs to be done before
# updating targets, as that will add adhoc targets to the inventory.
known_names = inventory.target_names
update_targets(options)
inventory_targets, adhoc_targets = options[:targets].partition do |target|
known_names.include?(target.name)
end
target_list = {
inventory: inventory_targets,
adhoc: adhoc_targets
}
outputter.print_targets(target_list, inventoryfile)
end
def show_targets
update_targets(options)
outputter.print_target_info(options[:targets])
end
def list_groups
groups = inventory.group_names
outputter.print_groups(groups)
end
def run_plan(plan_name, plan_arguments, nodes, options)
unless nodes.empty?
if plan_arguments['nodes'] || plan_arguments['targets']
key = plan_arguments.include?('nodes') ? 'nodes' : 'targets'
raise Bolt::CLIError,
"A plan's '#{key}' parameter may be specified using the --#{key} option, but in that " \
"case it must not be specified as a separate #{key}=<value> parameter nor included " \
"in the JSON data passed in the --params option"
end
plan_params = pal.get_plan_info(plan_name)['parameters']
target_param = plan_params.dig('targets', 'type') =~ /TargetSpec/
node_param = plan_params.include?('nodes')
if node_param && target_param
msg = "Plan parameters include both 'nodes' and 'targets' with type 'TargetSpec', " \
"neither will populated with the value for --nodes or --targets."
@logger.warn(msg)
elsif node_param
plan_arguments['nodes'] = nodes.join(',')
elsif target_param
plan_arguments['targets'] = nodes.join(',')
end
end
plan_context = { plan_name: plan_name,
params: plan_arguments }
plan_context[:description] = options[:description] if options[:description]
executor = Bolt::Executor.new(config.concurrency, analytics, options[:noop], config.modified_concurrency)
if %w[human rainbow].include?(options.fetch(:format, 'human'))
executor.subscribe(outputter)
else
# Only subscribe to out::message events for JSON outputter
executor.subscribe(outputter, [:message])
end
executor.subscribe(log_outputter)
executor.start_plan(plan_context)
result = pal.run_plan(plan_name, plan_arguments, executor, inventory, puppetdb_client)
# If a non-bolt exception bubbles up the plan won't get finished
executor.finish_plan(result)
executor.shutdown
rerun.update(result)
outputter.print_plan_result(result)
result.ok? ? 0 : 1
end
def apply_manifest(code, targets, filename = nil, noop = false)
Puppet[:tasks] = false
ast = pal.parse_manifest(code, filename)
if defined?(ast.body) &&
(ast.body.is_a?(Puppet::Pops::Model::HostClassDefinition) ||
ast.body.is_a?(Puppet::Pops::Model::ResourceTypeDefinition))
message = "Manifest only contains definitions and will result in no changes on the targets. "\
"Definitions must be declared for their resources to be applied. You can read more "\
"about defining and declaring classes and types in the Puppet documentation at "\
"https://puppet.com/docs/puppet/latest/lang_classes.html and "\
"https://puppet.com/docs/puppet/latest/lang_defined_types.html"
@logger.warn(message)
end
executor = Bolt::Executor.new(config.concurrency, analytics, noop, config.modified_concurrency)
executor.subscribe(outputter) if options.fetch(:format, 'human') == 'human'
executor.subscribe(log_outputter)
# apply logging looks like plan logging, so tell the outputter we're in a
# plan even though we're not
executor.publish_event(type: :plan_start, plan: nil)
results = nil
elapsed_time = Benchmark.realtime do
pal.in_plan_compiler(executor, inventory, puppetdb_client) do |compiler|
compiler.call_function('apply_prep', targets)
end
results = pal.with_bolt_executor(executor, inventory, puppetdb_client) do
Puppet.lookup(:apply_executor).apply_ast(ast, targets, catch_errors: true, noop: noop)
end
end
executor.shutdown
outputter.print_apply_result(results, elapsed_time)
rerun.update(results)
results.ok ? 0 : 1
end
def list_modules
assert_puppetfile_or_module_command(config.project.modules)
outputter.print_module_list(pal.list_modules)
end
def generate_types
assert_puppetfile_or_module_command(config.project.modules)
# generate_types will surface a nice error with helpful message if it fails
pal.generate_types
0
end
# Initializes a specified directory as a Bolt project and installs any modules
# specified by the user, along with their dependencies
def initialize_project
# Dir.pwd will return backslashes on Windows, but Pathname always uses
# forward slashes to concatenate paths. This results in paths like
# C:\User\Administrator/modules, which fail module install. This ensure
# forward slashes in the cwd path.
dir = File.expand_path(Dir.pwd)
name = options[:object] || File.basename(dir)
if name !~ Bolt::Module::MODULE_NAME_REGEX
if options[:object]
raise Bolt::ValidationError, "The provided project name '#{name}' is invalid; "\
"project name must begin with a lowercase letter and can include lowercase "\
"letters, numbers, and underscores."
else
command = Bolt::Util.powershell? ? 'New-BoltProject -Name <NAME>' : 'bolt project init <NAME>'
raise Bolt::ValidationError, "The current directory name '#{name}' is an invalid "\
"project name. Please specify a name using '#{command}'."
end
end
project = Pathname.new(dir)
old_config = project + 'bolt.yaml'
config = project + 'bolt-project.yaml'
puppetfile = project + 'Puppetfile'
moduledir = project + 'modules'
# Warn the user if the project directory already exists. We don't error
# here since users might not have installed any modules yet. If both
# bolt.yaml and bolt-project.yaml exist, this will just warn about
# bolt-project.yaml and subsequent Bolt actions will warn about both files
# existing.
if config.exist?
@logger.warn "Found existing project directory at #{project}. Skipping file creation."
elsif old_config.exist?
@logger.warn "Found existing #{old_config.basename} at #{project}. "\
"#{old_config.basename} is deprecated, please rename to #{config.basename}."
end
# If modules were specified, first check if there is already a Puppetfile
# at the project directory, erroring if there is. If there is no
# Puppetfile, install the specified modules. The module installer will
# resolve dependencies, generate a Puppetfile, and install the modules.
if options[:modules]
if puppetfile.exist?
raise Bolt::CLIError,
"Found existing Puppetfile at #{puppetfile}, unable to initialize "\
"project with modules."
end
installer = Bolt::ModuleInstaller.new(outputter, pal)
installer.install(options[:modules], puppetfile, moduledir)
end
# If either bolt.yaml or bolt-project.yaml exist, the user has already
# been warned and we can just finish project creation. Otherwise, create a
# bolt-project.yaml with the project name in it.
unless config.exist? || old_config.exist?
begin
content = { 'name' => name }
File.write(config.to_path, content.to_yaml)
outputter.print_message "Successfully created Bolt project at #{project}"
rescue StandardError => e
raise Bolt::FileError.new("Could not create bolt-project.yaml at #{project}: #{e.message}", nil)
end
end
0
end
# Installs modules declared in the project configuration file.
#
def install_project_modules(project, force, resolve)
assert_project_file(project)
assert_puppetfile_or_module_command(project.modules)
unless project.modules
outputter.print_message "Project configuration file #{project.project_file} does not "\
"specify any module dependencies. Nothing to do."
return 0
end
installer = Bolt::ModuleInstaller.new(outputter, pal)
ok = installer.install(project.modules,
project.puppetfile,
project.managed_moduledir,
force: force,
resolve: resolve)
ok ? 0 : 1
end
# Adds a single module to the project.
#
def add_project_module(name, project)
assert_project_file(project)
assert_puppetfile_or_module_command(project.modules)
modules = project.modules || []
installer = Bolt::ModuleInstaller.new(outputter, pal)
ok = installer.add(name,
modules,
project.puppetfile,
project.managed_moduledir,
project.project_file)
ok ? 0 : 1
end
# Asserts that there is a project configuration file.
#
def assert_project_file(project)
unless project.project_file?
msg = if project.config_file.exist?
command = Bolt::Util.powershell? ? 'Update-BoltProject' : 'bolt project migrate'
"Detected Bolt configuration file #{project.config_file}, unable to install "\
"modules. To update to a project configuration file, run '#{command}'."
else
command = Bolt::Util.powershell? ? 'New-BoltProject' : 'bolt project init'
"Could not find project configuration file #{project.project_file}, unable "\
"to install modules. To create a Bolt project, run '#{command}'."
end
raise Bolt::Error.new(msg, 'bolt/missing-project-config-error')
end
end
# Loads a Puppetfile and installs its modules.
#
def install_puppetfile(puppetfile_config, puppetfile, moduledir)
assert_puppetfile_or_module_command(config.project.modules)
outputter.print_message("Installing modules from Puppetfile")
installer = Bolt::ModuleInstaller.new(outputter, pal)
ok = installer.install_puppetfile(puppetfile, moduledir, puppetfile_config)
ok ? 0 : 1
end
# Raises an error if the 'puppetfile install' command is deprecated due to
# modules being configured.
#
def assert_puppetfile_or_module_command(modules)
if Bolt::Util.powershell?
case options[:action]
when 'generate-types'
old_command = 'Register-BoltPuppetfileTypes'
new_command = 'Register-BoltModuleTypes'
when 'install'
old_command = 'Install-BoltPuppetfile'
new_command = 'Install-BoltModule'
when 'show', 'show-modules'
old_command = 'Get-BoltPuppetfileModules'
new_command = 'Get-BoltModule'
end
else
old_command = "bolt puppetfile #{options[:action]}"
new_command = if options[:action] == 'show-modules'
'bolt module show'
else
"bolt module #{options[:action]}"
end
end
if modules && options[:subcommand] == 'puppetfile'
raise Bolt::CLIError,
"Unable to use command '#{old_command}' when 'modules' is configured in "\
"bolt-project.yaml. Use '#{new_command}' instead."
elsif modules.nil? && options[:subcommand] == 'module'
msg = "Unable to use command '#{new_command}' when 'modules' is not configured in "\
"bolt-project.yaml. "
msg += "Use '#{old_command}' instead." if options[:action] != 'add'
raise Bolt::CLIError, msg
end
end
def pal
@pal ||= Bolt::PAL.new(Bolt::Config::Modulepath.new(config.modulepath),
config.hiera_config,
config.project.resource_types,
config.compile_concurrency,
config.trusted_external,
config.apply_settings,
config.project)
end
# Collects the list of Bolt guides and maps them to their topics.
def guides
@guides ||= begin
root_path = File.expand_path(File.join(__dir__, '..', '..', 'guides'))
files = Dir.children(root_path).sort
files.each_with_object({}) do |file, guides|
next if file !~ /\.txt\z/
topic = File.basename(file, '.txt')
guides[topic] = File.join(root_path, file)
end
rescue SystemCallError => e
raise Bolt::FileError.new("#{e.message}: unable to load guides directory", root_path)
end
end
# Display the list of available Bolt guides.
def list_topics
outputter.print_topics(guides.keys)
0
end
# Display a specific Bolt guide.
def show_guide(topic)
if guides[topic]
analytics.event('Guide', 'known_topic', label: topic)
begin
guide = File.read(guides[topic])
rescue SystemCallError => e
raise Bolt::FileError("#{e.message}: unable to load guide page", filepath)
end
outputter.print_guide(guide, topic)
else
analytics.event('Guide', 'unknown_topic', label: topic)
outputter.print_message("Did not find guide for topic '#{topic}'.\n\n")
list_topics
end
0
end
def validate_file(type, path, allow_dir = false)
if path.nil?
raise Bolt::CLIError, "A #{type} must be specified"
end
Bolt::Util.validate_file(type, path, allow_dir)
end
def rerun
@rerun ||= Bolt::Rerun.new(config.rerunfile, config.save_rerun)
end
def outputter
@outputter ||= Bolt::Outputter.for_format(config.format, config.color, options[:verbose], config.trace)
end
def log_outputter
@log_outputter ||= Bolt::Outputter::Logger.new(options[:verbose], config.trace)
end
def analytics
@analytics ||= begin
client = Bolt::Analytics.build_client
client.bundled_content = bundled_content
client
end
end
def bundled_content
# If the bundled content directory is empty, Bolt is likely installed as a gem.
if ENV['BOLT_GEM'].nil? && incomplete_install?
msg = <<~MSG.chomp
Bolt may be installed as a gem. To use Bolt reliably and with all of its
dependencies, uninstall the 'bolt' gem and install Bolt as a package:
https://puppet.com/docs/bolt/latest/bolt_installing.html
If you meant to install Bolt as a gem and want to disable this warning,
set the BOLT_GEM environment variable.
MSG
@logger.warn(msg)
end
# We only need to enumerate bundled content when running a task or plan
content = { 'Plan' => [],
'Task' => [],
'Plugin' => Bolt::Plugin::BUILTIN_PLUGINS }
if %w[plan task].include?(options[:subcommand]) && options[:action] == 'run'
default_content = Bolt::PAL.new(Bolt::Config::Modulepath.new([]), nil, nil)
content['Plan'] = default_content.list_plans.each_with_object([]) do |iter, col|
col << iter&.first
end
content['Task'] = default_content.list_tasks.each_with_object([]) do |iter, col|
col << iter&.first
end
end
content
end
# Gem installs include the aggregate, canary, and puppetdb_fact modules, while
# package installs include modules listed in the Bolt repo Puppetfile
def incomplete_install?
(Dir.children(Bolt::Config::Modulepath::MODULES_PATH) - %w[aggregate canary puppetdb_fact secure_env_vars]).empty?
end
# Mimicks the output from Outputter::Human#fatal_error. This should be used to print
# errors prior to config being loaded, as the outputter relies on config being loaded.
def fatal_error(error)
if $stdout.isatty
$stdout.puts("\033[31m#{error.message}\033[0m")
else
$stdout.puts(error.message)
end
end
end
end
| 1 | 16,881 |
Why are we filtering content twice? It seems like just filtering it in PAL should work.
|
puppetlabs-bolt
|
rb
|
@@ -17,11 +17,11 @@
*/
export const ACCOUNT_STATUS_NONE = 'none';
+export const ACCOUNT_STATUS_MULTIPLE = 'multiple';
export const ACCOUNT_STATUS_DISAPPROVED = 'disapproved';
export const ACCOUNT_STATUS_GRAYLISTED = 'graylisted';
-export const ACCOUNT_STATUS_MULTIPLE = 'multiple';
-export const ACCOUNT_STATUS_NO_CLIENT = 'no-client';
export const ACCOUNT_STATUS_PENDING = 'pending';
+export const ACCOUNT_STATUS_NO_CLIENT = 'no-client';
export const ACCOUNT_STATUS_APPROVED = 'approved';
// TODO: Expand the statuses provided here to be more specific in case the
| 1 |
/**
* Status utlities.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export const ACCOUNT_STATUS_NONE = 'none';
export const ACCOUNT_STATUS_DISAPPROVED = 'disapproved';
export const ACCOUNT_STATUS_GRAYLISTED = 'graylisted';
export const ACCOUNT_STATUS_MULTIPLE = 'multiple';
export const ACCOUNT_STATUS_NO_CLIENT = 'no-client';
export const ACCOUNT_STATUS_PENDING = 'pending';
export const ACCOUNT_STATUS_APPROVED = 'approved';
// TODO: Expand the statuses provided here to be more specific in case the
// AdSense API exposes more of this information.
// At the moment the AdSense API does not provide endpoints to retrieve the
// state of a site, whether it is approved, pending review, or whether the
// user needs to make changes to get it approved. For now, we only have these
// two constants, as making inferences based on the limited data we get back
// from the AdSense API has proved problematic in the past.
export const SITE_STATUS_NONE = 'none';
export const SITE_STATUS_ADDED = 'added';
/**
* Determines the AdSense account status for given input data.
*
* This utility function should be used in combination with data retrieved from
* the datastore, hence passing undefined (loading state) is supported.
*
* @since n.e.x.t
*
* @param {Object} data Input data to determine account status.
* @param {?Array} data.accounts List of account objects retrieved from the API.
* @param {?Array} data.clients List of client objects retrieved from the API.
* @param {?Array} data.alerts List of alert objects retrieved from the API.
* @param {?Object} data.error Error object if one of the API requests failed.
* @param {?string} data.previousAccountID Account ID, if already known from before.
* @param {?string} data.previousClientID Client ID, if already known from before.
* @return {?string} Account status determined, or undefined if one of the required
* parameters is undefined.
*/
export const determineAccountStatus = ( {
accounts,
clients,
alerts,
error,
previousAccountID,
previousClientID,
} ) => {
if ( 'undefined' === typeof accounts || 'undefined' === typeof previousAccountID ) {
return errorToStatus( error );
}
const accountID = determineAccountID( { accounts, previousAccountID } );
if ( ! accountID ) {
// If there are accounts, but the account ID cannot be determined, it
// means that there are multiple accounts and the user needs to select
// one.
if ( accounts.length ) {
return ACCOUNT_STATUS_MULTIPLE;
}
return ACCOUNT_STATUS_NONE;
}
if ( 'undefined' === typeof alerts ) {
return errorToStatus( error );
}
const hasGraylistedAlert = alerts.some( ( alert ) => {
return 'GRAYLISTED_PUBLISHER' === alert.type;
} );
if ( hasGraylistedAlert ) {
return ACCOUNT_STATUS_GRAYLISTED;
}
if ( 'undefined' === typeof clients || 'undefined' === typeof previousClientID ) {
return errorToStatus( error );
}
const clientID = determineClientID( { clients, previousClientID } );
if ( ! clientID ) {
return ACCOUNT_STATUS_NO_CLIENT;
}
return ACCOUNT_STATUS_APPROVED;
};
/**
* Determines the AdSense site status for given input data.
*
* This utility function should be used in combination with data retrieved from
* the datastore, hence passing undefined (loading state) is supported.
*
* The AdSense API currently only surfaces limited information about the approval
* status for a site, so at this point it is only possible to know whether the
* site has been added or not. In other words, the site being added does not
* necessarily mean it has been already approved - there still may be actions
* required for the user.
*
* @since n.e.x.t
*
* @param {Object} data Input data to determine site status.
* @param {?Array} data.urlChannels List of URL channel objects retrieved from the API.
* @param {?string} data.siteURL URL of this website.
* @return {?string} Site status determined, or undefined if one of the required
* parameters is undefined.
*/
export const determineSiteStatus = ( {
urlChannels,
siteURL,
} ) => {
if ( 'undefined' === typeof urlChannels || 'undefined' === typeof siteURL ) {
return undefined;
}
const hasSiteURL = urlChannels.some( ( urlChannel ) => {
return 0 <= siteURL.indexOf( urlChannel.urlPattern );
} );
if ( ! hasSiteURL ) {
return SITE_STATUS_NONE;
}
return SITE_STATUS_ADDED;
};
/**
* Determines the AdSense account ID for given input data.
*
* @since n.e.x.t
*
* @param {Object} data Input data to determine account ID.
* @param {?Array} data.accounts List of account objects retrieved from the API.
* @param {?string} data.previousAccountID Account ID, if already known from before.
* @return {?string} Account ID, empty string if no account ID could be determined,
* or undefined if one of the required parameters is undefined.
*/
export const determineAccountID = ( { accounts, previousAccountID } ) => {
// If loading, nothing to determine.
if ( 'undefined' === typeof accounts ) {
return undefined;
}
// If no accounts, the user needs to create one.
if ( ! accounts.length ) {
return '';
}
// If there are multiple accounts (very rare), we'll need the account ID.
if ( accounts.length > 1 ) {
// If no ID passed, the user will need to select an account first.
if ( ! previousAccountID ) {
return '';
}
// Ensure the passed account ID is actually available.
return accounts.reduce( ( acc, account ) => {
if ( account.id === previousAccountID ) {
return previousAccountID;
}
return acc;
}, '' );
}
// Choose the only account that the user has.
return accounts[ 0 ].id;
};
/**
* Determines the AdSense client ID for given input data.
*
* @since n.e.x.t
*
* @param {Object} data Input data to determine client ID.
* @param {?Array} data.clients List of client objects retrieved from the API.
* @param {?string} data.previousClientID Client ID, if already known from before.
* @return {?string} Client ID, empty string if no client ID could be determined,
* or undefined if one of the required parameters is undefined.
*/
export const determineClientID = ( { clients, previousClientID } ) => {
// If loading, nothing to determine.
if ( 'undefined' === typeof clients ) {
return undefined;
}
// Only AFC (AdSense For Content) clients matter for Site Kit.
const afcClients = clients.filter( ( client ) => {
return 'AFC' === client.productCode;
} );
// If no AFC clients, the user needs to create one.
if ( ! afcClients.length ) {
return '';
}
// If multiple AFC clients and client ID was already known, try looking it up.
if ( afcClients.length > 1 && previousClientID ) {
const clientID = afcClients.reduce( ( acc, client ) => {
if ( client.id === previousClientID ) {
return previousClientID;
}
return acc;
}, '' );
if ( clientID ) {
return clientID;
}
}
// Otherwise, just pick the first AFC client. There should only ever be one anyway.
return afcClients[ 0 ].id;
};
/**
* Transforms an AdSense API error to the appropriate status.
*
* @since n.e.x.t
* @access private
*
* @param {?Object} error Error object or undefined.
* @return {?string} Status based on error, or undefined if no relevant error.
*/
const errorToStatus = ( error ) => {
if ( ! error ) {
return undefined;
}
// These specific errors represent account statuses for our purposes.
if ( 'noAdSenseAccount' === error.data.reason ) {
return ACCOUNT_STATUS_NONE;
}
if ( 'disapprovedAccount' === error.data.reason ) {
return ACCOUNT_STATUS_DISAPPROVED;
}
if ( 'accountPendingReview' === error.data.reason ) {
return ACCOUNT_STATUS_PENDING;
}
return undefined;
};
| 1 | 28,051 |
These should probably move to `constants.js`
|
google-site-kit-wp
|
js
|
@@ -21,6 +21,8 @@ import com.google.common.base.Charsets;
import com.google.common.io.ByteStreams;
import com.google.common.net.MediaType;
+import com.sun.org.glassfish.gmbal.ManagedObject;
+
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
| 1 |
/*
Copyright 2011 Selenium committers
Copyright 2011 Software Freedom Conservancy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.grid.internal;
import com.google.common.base.Charsets;
import com.google.common.io.ByteStreams;
import com.google.common.net.MediaType;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.HttpClient;
import org.apache.http.entity.InputStreamEntity;
import org.apache.http.message.BasicHttpEntityEnclosingRequest;
import org.apache.http.message.BasicHttpRequest;
import org.apache.http.util.EntityUtils;
import org.openqa.grid.common.exception.ClientGoneException;
import org.openqa.grid.common.exception.GridException;
import org.openqa.grid.internal.listeners.CommandListener;
import org.openqa.grid.web.Hub;
import org.openqa.grid.web.servlet.handler.LegacySeleniumRequest;
import org.openqa.grid.web.servlet.handler.RequestType;
import org.openqa.grid.web.servlet.handler.SeleniumBasedRequest;
import org.openqa.grid.web.servlet.handler.SeleniumBasedResponse;
import org.openqa.grid.web.servlet.handler.WebDriverRequest;
import org.openqa.selenium.io.IOUtils;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import java.text.DateFormat;
import java.util.Calendar;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import java.util.logging.Logger;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Represent a running test for the hub/registry. A test session is created when a TestSlot becomes
* available for a test. <p/> The session is destroyed when the test ends ( ended by the client or
* timed out)
*/
@SuppressWarnings("JavaDoc")
public class TestSession {
private static final Logger log = Logger.getLogger(TestSession.class.getName());
static final int MAX_IDLE_TIME_BEFORE_CONSIDERED_ORPHANED = 5000;
private final String internalKey;
private final TestSlot slot;
private volatile ExternalSessionKey externalKey = null;
private volatile long sessionCreatedAt;
private volatile long lastActivity;
private final Map<String, Object> requestedCapabilities;
private Map<String, Object> objects = Collections.synchronizedMap(new HashMap<String, Object>());
private volatile boolean ignoreTimeout = false;
private final TimeSource timeSource;
private volatile boolean forwardingRequest;
private final int MAX_NETWORK_LATENCY = 1000;
public String getInternalKey() {
return internalKey;
}
/**
* Creates a test session on the specified testSlot.
*/
public TestSession(TestSlot slot, Map<String, Object> requestedCapabilities,
TimeSource timeSource) {
internalKey = UUID.randomUUID().toString();
this.slot = slot;
this.requestedCapabilities = requestedCapabilities;
this.timeSource = timeSource;
lastActivity = this.timeSource.currentTimeInMillis();
}
/**
* the capabilities the client requested. It will match the TestSlot capabilities, but is not
* equals.
*/
public Map<String, Object> getRequestedCapabilities() {
return requestedCapabilities;
}
/**
* Get the session key from the remote. It's up to the remote to guarantee the key is unique. If 2
* remotes return the same session key, the tests will overwrite each other.
*
* @return the key that was provided by the remote when the POST /session command was sent.
*/
public ExternalSessionKey getExternalKey() {
return externalKey;
}
/**
* associate this session to the session provided by the remote.
*/
public void setExternalKey(ExternalSessionKey externalKey) {
this.externalKey = externalKey;
sessionCreatedAt = lastActivity;
}
/**
* give the time in milliseconds since the last access to this test session, or 0 is ignore time
* out has been set to true.
*
* @return time in millis
* @see TestSession#setIgnoreTimeout(boolean)
*/
public long getInactivityTime() {
if (ignoreTimeout) {
return 0;
} else {
return timeSource.currentTimeInMillis() - lastActivity;
}
}
public boolean isOrphaned() {
final long elapsedSinceCreation = timeSource.currentTimeInMillis() - sessionCreatedAt;
// The session needs to have been open for at least the time interval and we need to have not
// seen any new commands during that time frame.
return slot.getProtocol().isSelenium()
&& elapsedSinceCreation > MAX_IDLE_TIME_BEFORE_CONSIDERED_ORPHANED
&& sessionCreatedAt == lastActivity;
}
/**
* @return the TestSlot this session is executed against.
*/
public TestSlot getSlot() {
return slot;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((internalKey == null) ? 0 : internalKey.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TestSession other = (TestSession) obj;
return internalKey.equals(other.internalKey);
}
@Override
public String toString() {
return externalKey != null ? "ext. key " + externalKey : internalKey
+ " (int. key, remote not contacted yet.)";
}
private HttpClient getClient() {
Registry reg = slot.getProxy().getRegistry();
int browserTimeout = reg.getConfiguration().getBrowserTimeout();
if (browserTimeout > 0){
final int selenium_server_cleanup_cycle = browserTimeout / 10;
browserTimeout += (selenium_server_cleanup_cycle + MAX_NETWORK_LATENCY);
browserTimeout *=2; // Lets not let this happen too often
}
return slot.getProxy().getHttpClientFactory().getGridHttpClient(browserTimeout, browserTimeout);
}
/**
* forwards the request to the node.
*/
public String forward(SeleniumBasedRequest request, HttpServletResponse response,
boolean newSessionRequest)
throws IOException {
String res = null;
String currentThreadName = Thread.currentThread().getName();
setThreadDisplayName();
forwardingRequest = true;
try {
if (slot.getProxy() instanceof CommandListener) {
((CommandListener) slot.getProxy()).beforeCommand(this, request, response);
}
lastActivity = timeSource.currentTimeInMillis();
HttpRequest proxyRequest = prepareProxyRequest(request/*, config*/);
HttpResponse proxyResponse = sendRequestToNode(proxyRequest);
lastActivity = timeSource.currentTimeInMillis();
HttpEntity responseBody = proxyResponse.getEntity();
try {
final int statusCode = proxyResponse.getStatusLine().getStatusCode();
response.setStatus(statusCode);
processResponseHeaders(request, response, slot.getRemoteURL(), proxyResponse);
byte[] consumedNewWebDriverSessionBody = null;
if (statusCode != HttpServletResponse.SC_INTERNAL_SERVER_ERROR &&
statusCode != HttpServletResponse.SC_NOT_FOUND) {
consumedNewWebDriverSessionBody = updateHubIfNewWebDriverSession(request, proxyResponse);
}
if (newSessionRequest && statusCode == HttpServletResponse.SC_INTERNAL_SERVER_ERROR) {
removeIncompleteNewSessionRequest();
}
if (statusCode == HttpServletResponse.SC_NOT_FOUND) {
removeSessionBrowserTimeout();
}
byte[] contentBeingForwarded = null;
if (responseBody != null) {
try {
InputStream in;
if (consumedNewWebDriverSessionBody == null) {
in = responseBody.getContent();
if (request.getRequestType() == RequestType.START_SESSION
&& request instanceof LegacySeleniumRequest) {
res = getResponseUtf8Content(in);
updateHubNewSeleniumSession(res);
in = new ByteArrayInputStream(res.getBytes("UTF-8"));
}
} else {
in = new ByteArrayInputStream(consumedNewWebDriverSessionBody);
}
final byte[] bytes = drainInputStream(in);
writeRawBody(response, bytes);
contentBeingForwarded = bytes;
} finally {
EntityUtils.consume(responseBody);
}
}
if (slot.getProxy() instanceof CommandListener) {
SeleniumBasedResponse wrappedResponse = new SeleniumBasedResponse(response);
wrappedResponse.setForwardedContent(contentBeingForwarded);
((CommandListener) slot.getProxy()).afterCommand(this, request, wrappedResponse);
}
response.flushBuffer();
} finally {
EntityUtils.consume(responseBody);
}
response.flushBuffer();
return res;
} finally {
forwardingRequest = false;
Thread.currentThread().setName(currentThreadName);
}
}
private void setThreadDisplayName() {
DateFormat dfmt = DateFormat.getTimeInstance();
String name = "Forwarding " + this + " to " + slot.getRemoteURL() + " at " +
dfmt.format(Calendar.getInstance().getTime());
Thread.currentThread().setName(name);
}
private void removeIncompleteNewSessionRequest() {
RemoteProxy proxy = slot.getProxy();
proxy.getRegistry().terminate(this, SessionTerminationReason.CREATIONFAILED);
}
private void removeSessionBrowserTimeout() {
RemoteProxy proxy = slot.getProxy();
proxy.getRegistry().terminate(this, SessionTerminationReason.BROWSER_TIMEOUT);
}
private void updateHubNewSeleniumSession(String content) {
ExternalSessionKey key = ExternalSessionKey.fromResponseBody(content);
setExternalKey(key);
}
private byte[] updateHubIfNewWebDriverSession(
SeleniumBasedRequest request, HttpResponse proxyResponse) throws IOException {
byte[] consumedData = null;
if (request.getRequestType() == RequestType.START_SESSION
&& request instanceof WebDriverRequest) {
Header h = proxyResponse.getFirstHeader("Location");
if (h == null) {
if (isSuccessJsonResponse(proxyResponse) && proxyResponse.getEntity() != null) {
InputStream stream = proxyResponse.getEntity().getContent();
consumedData = ByteStreams.toByteArray(stream);
stream.close();
String contentString = new String(consumedData, Charsets.UTF_8);
ExternalSessionKey key = ExternalSessionKey.fromJsonResponseBody(contentString);
if (key == null) {
throw new GridException(
"webdriver new session JSON response body did not contain a session ID");
}
setExternalKey(key);
return consumedData;
} else {
throw new GridException(
"new session request for webdriver should contain a location header "
+ "or an 'application/json;charset=UTF-8' response body with the session ID.");
}
}
ExternalSessionKey key = ExternalSessionKey.fromWebDriverRequest(h.getValue());
setExternalKey(key);
}
return consumedData;
}
private static boolean isSuccessJsonResponse(HttpResponse response) {
if (response.getStatusLine().getStatusCode() == HttpServletResponse.SC_OK) {
for (Header header : response.getHeaders("Content-Type")) {
MediaType type;
try {
type = MediaType.parse(header.getValue());
} catch (IllegalArgumentException ignored) {
continue;
}
if (MediaType.JSON_UTF_8.is(type)) {
return true;
}
}
}
return false;
}
private HttpResponse sendRequestToNode(HttpRequest proxyRequest) throws ClientProtocolException,
IOException {
HttpClient client = getClient();
URL remoteURL = slot.getRemoteURL();
HttpHost host = new HttpHost(remoteURL.getHost(), remoteURL.getPort());
return client.execute(host, proxyRequest);
}
private HttpRequest prepareProxyRequest(HttpServletRequest request
/*, ForwardConfiguration config*/)
throws IOException {
URL remoteURL = slot.getRemoteURL();
String pathSpec = request.getServletPath() + request.getContextPath();
String path = request.getRequestURI();
if (!path.startsWith(pathSpec)) {
throw new IllegalStateException("Expected path " + path + " to start with pathSpec "
+ pathSpec);
}
String end = path.substring(pathSpec.length());
String ok = remoteURL + end;
if (request.getQueryString() != null) {
ok += "?" + request.getQueryString();
}
String uri = new URL(remoteURL, ok).toExternalForm();
InputStream body = null;
if (request.getContentLength() > 0 || request.getHeader("Transfer-Encoding") != null) {
body = request.getInputStream();
}
HttpRequest proxyRequest;
if (body != null) {
BasicHttpEntityEnclosingRequest r =
new BasicHttpEntityEnclosingRequest(request.getMethod(), uri);
r.setEntity(new InputStreamEntity(body, request.getContentLength()));
proxyRequest = r;
} else {
proxyRequest = new BasicHttpRequest(request.getMethod(), uri);
}
for (Enumeration<?> e = request.getHeaderNames(); e.hasMoreElements(); ) {
String headerName = (String) e.nextElement();
if ("Content-Length".equalsIgnoreCase(headerName)) {
continue; // already set
}
proxyRequest.setHeader(headerName, request.getHeader(headerName));
}
return proxyRequest;
}
private void writeRawBody(HttpServletResponse response, byte[] rawBody) throws IOException {
OutputStream out = response.getOutputStream();
try {
// We need to set the Content-Length header before we write to the output stream. Usually
// the
// Content-Length header is already set because we take it from the proxied request. But, it
// won't
// be set when we consume chunked content, since that doesn't use Content-Length. As we're
// not
// going to send a chunked response, we need to set the Content-Length in order for the
// response
// to be valid.
if (!response.containsHeader("Content-Length")) {
response.setIntHeader("Content-Length", rawBody.length);
}
out.write(rawBody);
} catch (IOException e) {
throw new ClientGoneException(e);
} finally {
IOUtils.closeQuietly(out);
}
}
private byte[] drainInputStream(InputStream in) throws IOException {
try {
return ByteStreams.toByteArray(in);
} finally {
in.close();
}
}
private String getResponseUtf8Content(InputStream in) {
String res;
StringBuilder sb = new StringBuilder();
String line;
try {
BufferedReader reader = new BufferedReader(new InputStreamReader(in, "UTF-8"));
while ((line = reader.readLine()) != null) {
// TODO freynaud bug ?
sb.append(line);/* .append("\n") */
}
in.close();
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
} catch (IOException e) {
throw new RuntimeException(e);
}
res = sb.toString();
return res;
}
private void processResponseHeaders(HttpServletRequest request, HttpServletResponse response,
URL remoteURL, HttpResponse proxyResponse)
throws MalformedURLException {
String pathSpec = request.getServletPath() + request.getContextPath();
for (Header header : proxyResponse.getAllHeaders()) {
String name = header.getName();
String value = header.getValue();
// HttpEntity#getContent() chews up the chunk-size octet (i.e., the InputStream does not
// actually map 1:1 to the underlying response body). This breaks any client expecting the
// chunk size. We could
// try to recreate it, but since the chunks are already read in and decoded, you'd end up with
// a
// single chunk, which isn't all that useful. So, we return the response as a traditional
// response with a
// Content-Length header, obviating the need for the Transfer-Encoding header.
if (name.equalsIgnoreCase("Transfer-Encoding") && value.equalsIgnoreCase("chunked")) {
continue;
}
// the location needs to point to the hub that will proxy
// everything.
if (name.equalsIgnoreCase("Location")) {
URL returnedLocation = new URL(value);
String driverPath = remoteURL.getPath();
String wrongPath = returnedLocation.getPath();
String correctPath = wrongPath.replace(driverPath, "");
Hub hub = slot.getProxy().getRegistry().getHub();
String location = "http://" + hub.getHost() + ":" + hub.getPort() + pathSpec + correctPath;
response.setHeader(name, location);
} else {
response.setHeader(name, value);
}
}
}
/**
* Allow you to retrieve an object previously stored on the test session.
*
* @return the object you stored
*/
public Object get(String key) {
return objects.get(key);
}
/**
* Allows you to store an object on the test session.
*
* @param key a non-null string
*/
public void put(String key, Object value) {
objects.put(key, value);
}
/**
* Sends a DELETE/testComplete (webdriver/selenium) session command to the remote, following web
* driver protocol.
*
* @return true is the remote replied successfully to the request.
*/
public boolean sendDeleteSessionRequest() {
URL remoteURL = slot.getRemoteURL();
HttpRequest request;
switch (slot.getProtocol()) {
case Selenium:
request =
new BasicHttpRequest("POST", remoteURL.toExternalForm()
+ "/?cmd=testComplete&sessionId=" + getExternalKey()
.getKey());
break;
case WebDriver:
String uri = remoteURL.toString() + "/session/" + externalKey;
request = new BasicHttpRequest("DELETE", uri);
break;
default:
throw new GridException("Error, protocol not implemented.");
}
HttpHost host = new HttpHost(remoteURL.getHost(), remoteURL.getPort());
HttpEntity responseBody = null;
boolean ok;
try {
HttpClient client = getClient();
HttpResponse response = client.execute(host, request);
responseBody = response.getEntity();
int code = response.getStatusLine().getStatusCode();
ok = (code >= 200) && (code <= 299);
} catch (Throwable e) {
ok = false;
// corrupted or the something else already sent the DELETE.
log.severe("Error releasing. Server corrupted ?");
}finally{
try {
EntityUtils.consume(responseBody);
} catch (IOException e) {
log.warning("Consuming the response body when DELETE to the node" + e.getMessage());
}
}
return ok;
}
/**
* allow to bypass time out for this session. ignore = true => the session will not time out.
* setIgnoreTimeout(true) also update the lastActivity to now.
*/
public void setIgnoreTimeout(boolean ignore) {
if (!ignore) {
lastActivity = timeSource.currentTimeInMillis();
}
this.ignoreTimeout = ignore;
}
public boolean isForwardingRequest() {
return forwardingRequest;
}
}
| 1 | 11,531 |
Again, you don't want to depend on glassfish.
|
SeleniumHQ-selenium
|
java
|
@@ -0,0 +1,15 @@
+# frozen_string_literal: true
+
+module Faker
+ module Space
+ class << self
+ extend Gem::Deprecate
+
+ def launch_vehicule
+ Faker::Space.launch_vehicule
+ end
+
+ deprecate :launch_vehicule, 'Faker::Space.launch_vehicule', 2019, 06
+ end
+ end
+end
| 1 | 1 | 9,214 |
it should be `Faker::Space.launch_vehicle`, otherwise it will crash
|
faker-ruby-faker
|
rb
|
|
@@ -630,6 +630,18 @@ namespace NLog.Targets
#endif
byte[] bytes = this.GetBytesToWrite(logEvent);
+ // Clean up old archives if this is the first time a log record has been written to
+ // this log file and the archiving system is date/time based.
+ if (this.ArchiveNumbering == ArchiveNumberingMode.Date && this.ArchiveEvery != FileArchivePeriod.None)
+ {
+ FileInfo fileInfo = new FileInfo(fileName);
+ if (!fileInfo.Exists)
+ {
+ string fileNamePattern = this.GetFileNamePattern(fileName, logEvent, fileInfo);
+ this.DeleteOldDateArchive(fileNamePattern);
+ }
+ }
+
if (this.ShouldAutoArchive(fileName, logEvent, bytes.Length))
{
this.InvalidateCacheItem(fileName);
| 1 |
//
// Copyright (c) 2004-2011 Jaroslaw Kowalski <[email protected]>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.Targets
{
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Globalization;
using System.IO;
using System.IO.Compression;
using System.Linq;
using System.Text;
using System.Threading;
using Common;
using Config;
using Internal;
using Internal.FileAppenders;
using Layouts;
using Time;
/// <summary>
/// Writes log messages to one or more files.
/// </summary>
/// <seealso href="https://github.com/nlog/nlog/wiki/File-target">Documentation on NLog Wiki</seealso>
[Target("File")]
public class FileTarget : TargetWithLayoutHeaderAndFooter, ICreateFileParameters
{
// Period is defined in days.
private const int InitializedFilesCleanupPeriod = 2;
private const int InitializedFilesCounterMax = 100;
private const int ArchiveAboveSizeDisabled = -1;
private readonly Dictionary<string, DateTime> initializedFiles = new Dictionary<string, DateTime>();
private LineEndingMode lineEndingMode = LineEndingMode.Default;
private IFileAppenderFactory appenderFactory;
private BaseFileAppender[] recentAppenders;
private Timer autoClosingTimer;
private int initializedFilesCounter;
private int maxArchiveFiles;
private readonly DynamicFileArchive fileArchive;
/// <summary>
/// Initializes a new instance of the <see cref="FileTarget" /> class.
/// </summary>
/// <remarks>
/// The default value of the layout is: <code>${longdate}|${level:uppercase=true}|${logger}|${message}</code>
/// </remarks>
public FileTarget()
{
this.ArchiveNumbering = ArchiveNumberingMode.Sequence;
this.maxArchiveFiles = 0;
this.ConcurrentWriteAttemptDelay = 1;
this.ArchiveEvery = FileArchivePeriod.None;
this.ArchiveAboveSize = FileTarget.ArchiveAboveSizeDisabled;
this.ConcurrentWriteAttempts = 10;
this.ConcurrentWrites = true;
#if SILVERLIGHT
this.Encoding = Encoding.UTF8;
#else
this.Encoding = Encoding.Default;
#endif
this.BufferSize = 32768;
this.AutoFlush = true;
#if !SILVERLIGHT
this.FileAttributes = Win32FileAttributes.Normal;
#endif
this.LineEnding = LineEndingMode.Default;
this.EnableFileDelete = true;
this.OpenFileCacheTimeout = -1;
this.OpenFileCacheSize = 5;
this.CreateDirs = true;
this.fileArchive = new DynamicFileArchive(MaxArchiveFiles);
this.ForceManaged = false;
this.ArchiveDateFormat = string.Empty;
}
/// <summary>
/// Gets or sets the name of the file to write to.
/// </summary>
/// <remarks>
/// This FileName string is a layout which may include instances of layout renderers.
/// This lets you use a single target to write to multiple files.
/// </remarks>
/// <example>
/// The following value makes NLog write logging events to files based on the log level in the directory where
/// the application runs.
/// <code>${basedir}/${level}.log</code>
/// All <c>Debug</c> messages will go to <c>Debug.log</c>, all <c>Info</c> messages will go to <c>Info.log</c> and so on.
/// You can combine as many of the layout renderers as you want to produce an arbitrary log file name.
/// </example>
/// <docgen category='Output Options' order='1' />
[RequiredParameter]
public Layout FileName { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to create directories if they do not exist.
/// </summary>
/// <remarks>
/// Setting this to false may improve performance a bit, but you'll receive an error
/// when attempting to write to a directory that's not present.
/// </remarks>
/// <docgen category='Output Options' order='10' />
[DefaultValue(true)]
[Advanced]
public bool CreateDirs { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to delete old log file on startup.
/// </summary>
/// <remarks>
/// This option works only when the "FileName" parameter denotes a single file.
/// </remarks>
/// <docgen category='Output Options' order='10' />
[DefaultValue(false)]
public bool DeleteOldFileOnStartup { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to archive old log file on startup.
/// </summary>
/// <remarks>
/// This option works only when the "FileName" parameter denotes a single file.
/// After archiving the old file, the current log file will be empty.
/// </remarks>
/// <docgen category='Output Options' order='10' />
[DefaultValue(false)]
public bool ArchiveOldFileOnStartup { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to replace file contents on each write instead of appending log message at the end.
/// </summary>
/// <docgen category='Output Options' order='10' />
[DefaultValue(false)]
[Advanced]
public bool ReplaceFileContentsOnEachWrite { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to keep log file open instead of opening and closing it on each logging event.
/// </summary>
/// <remarks>
/// Setting this property to <c>True</c> helps improve performance.
/// </remarks>
/// <docgen category='Performance Tuning Options' order='10' />
[DefaultValue(false)]
public bool KeepFileOpen { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to enable log file(s) to be deleted.
/// </summary>
/// <docgen category='Output Options' order='10' />
[DefaultValue(true)]
public bool EnableFileDelete { get; set; }
/// <summary>
/// Gets or sets a value specifying the date format to use when archving files.
/// </summary>
/// <remarks>
/// This option works only when the "ArchiveNumbering" parameter is set to Date.
/// </remarks>
/// <docgen category='Output Options' order='10' />
[DefaultValue("")]
public string ArchiveDateFormat { get; set; }
#if !SILVERLIGHT
/// <summary>
/// Gets or sets the file attributes (Windows only).
/// </summary>
/// <docgen category='Output Options' order='10' />
[Advanced]
public Win32FileAttributes FileAttributes { get; set; }
#endif
/// <summary>
/// Gets or sets the line ending mode.
/// </summary>
/// <docgen category='Layout Options' order='10' />
[Advanced]
public LineEndingMode LineEnding
{
get
{
return this.lineEndingMode;
}
set
{
this.lineEndingMode = value;
}
}
/// <summary>
/// Gets or sets a value indicating whether to automatically flush the file buffers after each log message.
/// </summary>
/// <docgen category='Performance Tuning Options' order='10' />
[DefaultValue(true)]
public bool AutoFlush { get; set; }
/// <summary>
/// Gets or sets the number of files to be kept open. Setting this to a higher value may improve performance
/// in a situation where a single File target is writing to many files
/// (such as splitting by level or by logger).
/// </summary>
/// <remarks>
/// The files are managed on a LRU (least recently used) basis, which flushes
/// the files that have not been used for the longest period of time should the
/// cache become full. As a rule of thumb, you shouldn't set this parameter to
/// a very high value. A number like 10-15 shouldn't be exceeded, because you'd
/// be keeping a large number of files open which consumes system resources.
/// </remarks>
/// <docgen category='Performance Tuning Options' order='10' />
[DefaultValue(5)]
[Advanced]
public int OpenFileCacheSize { get; set; }
/// <summary>
/// Gets or sets the maximum number of seconds that files are kept open. If this number is negative the files are
/// not automatically closed after a period of inactivity.
/// </summary>
/// <docgen category='Performance Tuning Options' order='10' />
[DefaultValue(-1)]
[Advanced]
public int OpenFileCacheTimeout { get; set; }
/// <summary>
/// Gets or sets the log file buffer size in bytes.
/// </summary>
/// <docgen category='Performance Tuning Options' order='10' />
[DefaultValue(32768)]
public int BufferSize { get; set; }
/// <summary>
/// Gets or sets the file encoding.
/// </summary>
/// <docgen category='Layout Options' order='10' />
public Encoding Encoding { get; set; }
/// <summary>
/// Gets or sets a value indicating whether concurrent writes to the log file by multiple processes on the same host.
/// </summary>
/// <remarks>
/// This makes multi-process logging possible. NLog uses a special technique
/// that lets it keep the files open for writing.
/// </remarks>
/// <docgen category='Performance Tuning Options' order='10' />
[DefaultValue(true)]
public bool ConcurrentWrites { get; set; }
/// <summary>
/// Gets or sets a value indicating whether concurrent writes to the log file by multiple processes on different network hosts.
/// </summary>
/// <remarks>
/// This effectively prevents files from being kept open.
/// </remarks>
/// <docgen category='Performance Tuning Options' order='10' />
[DefaultValue(false)]
public bool NetworkWrites { get; set; }
/// <summary>
/// Gets or sets the number of times the write is appended on the file before NLog
/// discards the log message.
/// </summary>
/// <docgen category='Performance Tuning Options' order='10' />
[DefaultValue(10)]
[Advanced]
public int ConcurrentWriteAttempts { get; set; }
/// <summary>
/// Gets or sets the delay in milliseconds to wait before attempting to write to the file again.
/// </summary>
/// <remarks>
/// The actual delay is a random value between 0 and the value specified
/// in this parameter. On each failed attempt the delay base is doubled
/// up to <see cref="ConcurrentWriteAttempts" /> times.
/// </remarks>
/// <example>
/// Assuming that ConcurrentWriteAttemptDelay is 10 the time to wait will be:<p/>
/// a random value between 0 and 10 milliseconds - 1st attempt<br/>
/// a random value between 0 and 20 milliseconds - 2nd attempt<br/>
/// a random value between 0 and 40 milliseconds - 3rd attempt<br/>
/// a random value between 0 and 80 milliseconds - 4th attempt<br/>
/// ...<p/>
/// and so on.
/// </example>
/// <docgen category='Performance Tuning Options' order='10' />
[DefaultValue(1)]
[Advanced]
public int ConcurrentWriteAttemptDelay { get; set; }
/// <summary>
/// Gets or sets the size in bytes above which log files will be automatically archived.
/// </summary>
/// <remarks>
/// Caution: Enabling this option can considerably slow down your file
/// logging in multi-process scenarios. If only one process is going to
/// be writing to the file, consider setting <c>ConcurrentWrites</c>
/// to <c>false</c> for maximum performance.
/// </remarks>
/// <docgen category='Archival Options' order='10' />
public long ArchiveAboveSize { get; set; }
/// <summary>
/// Gets or sets a value indicating whether to automatically archive log files every time the specified time passes.
/// </summary>
/// <remarks>
/// Files are moved to the archive as part of the write operation if the current period of time changes. For example
/// if the current <c>hour</c> changes from 10 to 11, the first write that will occur
/// on or after 11:00 will trigger the archiving.
/// <p>
/// Caution: Enabling this option can considerably slow down your file
/// logging in multi-process scenarios. If only one process is going to
/// be writing to the file, consider setting <c>ConcurrentWrites</c>
/// to <c>false</c> for maximum performance.
/// </p>
/// </remarks>
/// <docgen category='Archival Options' order='10' />
public FileArchivePeriod ArchiveEvery { get; set; }
/// <summary>
/// Gets or sets the name of the file to be used for an archive.
/// </summary>
/// <remarks>
/// It may contain a special placeholder {#####}
/// that will be replaced with a sequence of numbers depending on
/// the archiving strategy. The number of hash characters used determines
/// the number of numerical digits to be used for numbering files.
/// </remarks>
/// <docgen category='Archival Options' order='10' />
public Layout ArchiveFileName { get; set; }
/// <summary>
/// Gets or sets the maximum number of archive files that should be kept.
/// </summary>
/// <docgen category='Archival Options' order='10' />
[DefaultValue(0)]
public int MaxArchiveFiles
{
get
{
return maxArchiveFiles;
}
set
{
maxArchiveFiles = value;
fileArchive.MaxArchiveFileToKeep = value;
}
}
/// <summary>
/// Gets ors set a value indicating whether a managed file stream is forced, instead of used the native implementation.
/// </summary>
[DefaultValue(false)]
public bool ForceManaged { get; set; }
/// <summary>
/// Gets or sets the way file archives are numbered.
/// </summary>
/// <docgen category='Archival Options' order='10' />
public ArchiveNumberingMode ArchiveNumbering { get; set; }
#if NET4_5
/// <summary>
/// Gets or sets a value indicating whether to compress archive files into the zip archive format.
/// </summary>
/// <docgen category='Archival Options' order='10' />
[DefaultValue(false)]
public bool EnableArchiveFileCompression { get; set; }
#else
private const bool EnableArchiveFileCompression = false;
#endif
/// <summary>
/// Gets the characters that are appended after each line.
/// </summary>
protected internal string NewLineChars
{
get
{
return lineEndingMode.NewLineCharacters;
}
}
/// <summary>
/// Removes records of initialized files that have not been
/// accessed in the last two days.
/// </summary>
/// <remarks>
/// Files are marked 'initialized' for the purpose of writing footers when the logging finishes.
/// </remarks>
public void CleanupInitializedFiles()
{
this.CleanupInitializedFiles(DateTime.Now.AddDays(-FileTarget.InitializedFilesCleanupPeriod));
}
/// <summary>
/// Removes records of initialized files that have not been
/// accessed after the specified date.
/// </summary>
/// <param name="cleanupThreshold">The cleanup threshold.</param>
/// <remarks>
/// Files are marked 'initialized' for the purpose of writing footers when the logging finishes.
/// </remarks>
public void CleanupInitializedFiles(DateTime cleanupThreshold)
{
var filesToUninitialize = new List<string>();
// Select the files require to be unitialised.
foreach (var file in this.initializedFiles)
{
if (file.Value < cleanupThreshold)
{
filesToUninitialize.Add(file.Key);
}
}
// Unitialise the files.
foreach (string fileName in filesToUninitialize)
{
this.WriteFooterAndUninitialize(fileName);
}
}
/// <summary>
/// Flushes all pending file operations.
/// </summary>
/// <param name="asyncContinuation">The asynchronous continuation.</param>
/// <remarks>
/// The timeout parameter is ignored, because file APIs don't provide
/// the needed functionality.
/// </remarks>
protected override void FlushAsync(AsyncContinuation asyncContinuation)
{
try
{
foreach (BaseFileAppender t in this.recentAppenders)
{
if (t == null)
{
break;
}
t.Flush();
}
asyncContinuation(null);
}
catch (Exception exception)
{
if (exception.MustBeRethrown())
{
throw;
}
asyncContinuation(exception);
}
}
private IFileAppenderFactory GetFileAppenderFactory()
{
if (!this.KeepFileOpen)
{
return RetryingMultiProcessFileAppender.TheFactory;
}
else
{
if (this.ArchiveAboveSize != FileTarget.ArchiveAboveSizeDisabled || this.ArchiveEvery != FileArchivePeriod.None)
{
if (this.NetworkWrites)
{
return RetryingMultiProcessFileAppender.TheFactory;
}
else if (this.ConcurrentWrites)
{
#if SILVERLIGHT
return RetryingMultiProcessFileAppender.TheFactory;
#elif MONO
//
// mono on Windows uses mutexes, on Unix - special appender
//
if (PlatformDetector.IsUnix)
{
return UnixMultiProcessFileAppender.TheFactory;
}
else
{
return MutexMultiProcessFileAppender.TheFactory;
}
#else
return MutexMultiProcessFileAppender.TheFactory;
#endif
}
else
{
return CountingSingleProcessFileAppender.TheFactory;
}
}
else
{
if (this.NetworkWrites)
{
return RetryingMultiProcessFileAppender.TheFactory;
}
else if (this.ConcurrentWrites)
{
#if SILVERLIGHT
return RetryingMultiProcessFileAppender.TheFactory;
#elif MONO
//
// mono on Windows uses mutexes, on Unix - special appender
//
if (PlatformDetector.IsUnix)
{
return UnixMultiProcessFileAppender.TheFactory;
}
else
{
return MutexMultiProcessFileAppender.TheFactory;
}
#else
return MutexMultiProcessFileAppender.TheFactory;
#endif
}
else
{
return SingleProcessFileAppender.TheFactory;
}
}
}
}
/// <summary>
/// Initializes file logging by creating data structures that
/// enable efficient multi-file logging.
/// </summary>
protected override void InitializeTarget()
{
base.InitializeTarget();
this.appenderFactory = GetFileAppenderFactory();
this.recentAppenders = new BaseFileAppender[this.OpenFileCacheSize];
if ((this.OpenFileCacheSize > 0 || this.EnableFileDelete) && this.OpenFileCacheTimeout > 0)
{
this.autoClosingTimer = new Timer(
this.AutoClosingTimerCallback,
null,
this.OpenFileCacheTimeout * 1000,
this.OpenFileCacheTimeout * 1000);
}
// Console.Error.WriteLine("Name: {0} Factory: {1}", this.Name, this.appenderFactory.GetType().FullName);
}
/// <summary>
/// Closes the file(s) opened for writing.
/// </summary>
protected override void CloseTarget()
{
base.CloseTarget();
foreach (string fileName in new List<string>(this.initializedFiles.Keys))
{
this.WriteFooterAndUninitialize(fileName);
}
if (this.autoClosingTimer != null)
{
this.autoClosingTimer.Change(Timeout.Infinite, Timeout.Infinite);
this.autoClosingTimer.Dispose();
this.autoClosingTimer = null;
}
if (this.recentAppenders != null)
{
for (int i = 0; i < this.recentAppenders.Length; ++i)
{
if (this.recentAppenders[i] == null)
{
break;
}
this.recentAppenders[i].Close();
this.recentAppenders[i] = null;
}
}
}
/// <summary>
/// Writes the specified logging event to a file specified in the FileName
/// parameter.
/// </summary>
/// <param name="logEvent">The logging event.</param>
protected override void Write(LogEventInfo logEvent)
{
#if !SILVERLIGHT
string fileName = CleanupInvalidFileNameChars(this.FileName.Render(logEvent));
#else
string fileName = this.FileName.Render(logEvent);
#endif
byte[] bytes = this.GetBytesToWrite(logEvent);
if (this.ShouldAutoArchive(fileName, logEvent, bytes.Length))
{
this.InvalidateCacheItem(fileName);
this.DoAutoArchive(fileName, logEvent);
}
this.WriteToFile(fileName, bytes, false);
}
/// <summary>
/// Writes the specified array of logging events to a file specified in the FileName
/// parameter.
/// </summary>
/// <param name="logEvents">An array of <see cref="LogEventInfo "/> objects.</param>
/// <remarks>
/// This function makes use of the fact that the events are batched by sorting
/// the requests by filename. This optimizes the number of open/close calls
/// and can help improve performance.
/// </remarks>
protected override void Write(AsyncLogEventInfo[] logEvents)
{
var buckets = logEvents.BucketSort(c => this.FileName.Render(c.LogEvent));
using (var ms = new MemoryStream())
{
var pendingContinuations = new List<AsyncContinuation>();
foreach (var bucket in buckets)
{
#if !SILVERLIGHT
string fileName = CleanupInvalidFileNameChars(bucket.Key);
#else
string fileName = bucket.Key;
#endif
ms.SetLength(0);
ms.Position = 0;
LogEventInfo firstLogEvent = null;
foreach (AsyncLogEventInfo ev in bucket.Value)
{
if (firstLogEvent == null)
{
firstLogEvent = ev.LogEvent;
}
byte[] bytes = this.GetBytesToWrite(ev.LogEvent);
ms.Write(bytes, 0, bytes.Length);
pendingContinuations.Add(ev.Continuation);
}
this.FlushCurrentFileWrites(fileName, firstLogEvent, ms, pendingContinuations);
}
}
}
/// <summary>
/// Formats the log event for write.
/// </summary>
/// <param name="logEvent">The log event to be formatted.</param>
/// <returns>A string representation of the log event.</returns>
protected virtual string GetFormattedMessage(LogEventInfo logEvent)
{
return this.Layout.Render(logEvent);
}
/// <summary>
/// Gets the bytes to be written to the file.
/// </summary>
/// <param name="logEvent">Log event.</param>
/// <returns>Array of bytes that are ready to be written.</returns>
protected virtual byte[] GetBytesToWrite(LogEventInfo logEvent)
{
string renderedText = this.GetFormattedMessage(logEvent) + this.NewLineChars;
return this.TransformBytes(this.Encoding.GetBytes(renderedText));
}
/// <summary>
/// Modifies the specified byte array before it gets sent to a file.
/// </summary>
/// <param name="value">The byte array.</param>
/// <returns>The modified byte array. The function can do the modification in-place.</returns>
protected virtual byte[] TransformBytes(byte[] value)
{
return value;
}
private static string ReplaceNumberPattern(string pattern, int value)
{
int firstPart = pattern.IndexOf("{#", StringComparison.Ordinal);
int lastPart = pattern.IndexOf("#}", StringComparison.Ordinal) + 2;
int numDigits = lastPart - firstPart - 2;
return pattern.Substring(0, firstPart) + Convert.ToString(value, 10).PadLeft(numDigits, '0') + pattern.Substring(lastPart);
}
private void FlushCurrentFileWrites(string currentFileName, LogEventInfo firstLogEvent, MemoryStream ms, List<AsyncContinuation> pendingContinuations)
{
Exception lastException = null;
try
{
if (currentFileName != null)
{
if (this.ShouldAutoArchive(currentFileName, firstLogEvent, (int)ms.Length))
{
this.WriteFooterAndUninitialize(currentFileName);
this.InvalidateCacheItem(currentFileName);
this.DoAutoArchive(currentFileName, firstLogEvent);
}
this.WriteToFile(currentFileName, ms.ToArray(), false);
}
}
catch (Exception exception)
{
if (exception.MustBeRethrown())
{
throw;
}
lastException = exception;
}
foreach (AsyncContinuation cont in pendingContinuations)
{
cont(lastException);
}
pendingContinuations.Clear();
}
private bool ContainFileNamePattern(string fileName)
{
int startingIndex = fileName.IndexOf("{#", StringComparison.Ordinal);
int endingIndex = fileName.IndexOf("#}", StringComparison.Ordinal);
return (startingIndex != -1 && endingIndex != -1 && startingIndex < endingIndex);
}
private void RecursiveRollingRename(string fileName, string pattern, int archiveNumber)
{
if (this.MaxArchiveFiles > 0 && archiveNumber >= this.MaxArchiveFiles)
{
File.Delete(fileName);
return;
}
if (!File.Exists(fileName))
{
return;
}
string newFileName = ReplaceNumberPattern(pattern, archiveNumber);
if (File.Exists(fileName))
{
RecursiveRollingRename(newFileName, pattern, archiveNumber + 1);
}
InternalLogger.Trace("Renaming {0} to {1}", fileName, newFileName);
var shouldCompress = archiveNumber == 0;
try
{
RollArchiveForward(fileName, newFileName, shouldCompress);
}
catch (IOException)
{
// TODO: Check the value of CreateDirs property before creating directories.
string dir = Path.GetDirectoryName(newFileName);
if (!Directory.Exists(dir))
{
Directory.CreateDirectory(dir);
}
RollArchiveForward(fileName, newFileName, shouldCompress);
}
}
private void SequentialArchive(string fileName, string pattern)
{
FileNameTemplate fileTemplate = new FileNameTemplate(Path.GetFileName(pattern));
int trailerLength = fileTemplate.Template.Length - fileTemplate.EndAt;
string fileNameMask = fileTemplate.ReplacePattern("*");
string dirName = Path.GetDirectoryName(Path.GetFullPath(pattern));
int nextNumber = -1;
int minNumber = -1;
var number2name = new Dictionary<int, string>();
try
{
#if SILVERLIGHT
foreach (string s in Directory.EnumerateFiles(dirName, fileNameMask))
#else
foreach (string s in Directory.GetFiles(dirName, fileNameMask))
#endif
{
string baseName = Path.GetFileName(s);
string number = baseName.Substring(fileTemplate.BeginAt, baseName.Length - trailerLength - fileTemplate.BeginAt);
int num;
try
{
num = Convert.ToInt32(number, CultureInfo.InvariantCulture);
}
catch (FormatException)
{
continue;
}
nextNumber = Math.Max(nextNumber, num);
minNumber = minNumber != -1 ? Math.Min(minNumber, num) : num;
number2name[num] = s;
}
nextNumber++;
}
catch (DirectoryNotFoundException)
{
Directory.CreateDirectory(dirName);
nextNumber = 0;
}
if (minNumber != -1 && this.MaxArchiveFiles != 0)
{
int minNumberToKeep = nextNumber - this.MaxArchiveFiles + 1;
for (int i = minNumber; i < minNumberToKeep; ++i)
{
string s;
if (number2name.TryGetValue(i, out s))
{
File.Delete(s);
}
}
}
string newFileName = ReplaceNumberPattern(pattern, nextNumber);
RollArchiveForward(fileName, newFileName, shouldCompress: true);
}
private static void ArchiveFile(string fileName, string archiveFileName, bool enableCompression)
{
#if NET4_5
if (enableCompression)
{
using (var archiveStream = new FileStream(archiveFileName, FileMode.Create))
using (var archive = new ZipArchive(archiveStream, ZipArchiveMode.Create))
using (var originalFileStream = new FileStream(fileName, FileMode.Open, FileAccess.Read, FileShare.Read))
{
var zipArchiveEntry = archive.CreateEntry(Path.GetFileName(fileName));
using (var destination = zipArchiveEntry.Open())
{
originalFileStream.CopyTo(destination);
}
}
File.Delete(fileName);
}
else
#endif
{
File.Move(fileName, archiveFileName);
}
}
private void RollArchiveForward(string existingFileName, string archiveFileName, bool shouldCompress)
{
ArchiveFile(existingFileName, archiveFileName, shouldCompress && EnableArchiveFileCompression);
string fileName = Path.GetFileName(existingFileName);
if (fileName == null) { return; }
// When the file has been moved, the original filename is
// no longer one of the initializedFiles. The initializedFilesCounter
// should be left alone, the amount is still valid.
if (this.initializedFiles.ContainsKey(fileName))
{
this.initializedFiles.Remove(fileName);
}
else if (this.initializedFiles.ContainsKey(existingFileName))
{
this.initializedFiles.Remove(existingFileName);
}
}
#if !NET_CF
private void DateAndSequentialArchive(string fileName, string pattern, LogEventInfo logEvent)
{
string baseNamePattern = Path.GetFileName(pattern);
if (string.IsNullOrEmpty(baseNamePattern))
{
return;
}
int placeholderFirstPart = baseNamePattern.IndexOf("{#", StringComparison.Ordinal);
int placeholderLastPart = baseNamePattern.IndexOf("#}", StringComparison.Ordinal) + 2;
int dateTrailerLength = baseNamePattern.Length - placeholderLastPart;
string fileNameMask = baseNamePattern.Substring(0, placeholderFirstPart) + "*" + baseNamePattern.Substring(placeholderLastPart);
string dateFormat = GetDateFormatString(this.ArchiveDateFormat);
string dirName = Path.GetDirectoryName(Path.GetFullPath(pattern));
if (string.IsNullOrEmpty(dirName))
{
return;
}
bool isDaySwitch = false;
DateTime lastWriteTime;
long fileLength;
if (this.GetFileInfo(fileName, out lastWriteTime, out fileLength))
{
string formatString = GetDateFormatString(string.Empty);
string ts = lastWriteTime.ToString(formatString, CultureInfo.InvariantCulture);
string ts2 = logEvent.TimeStamp.ToLocalTime().ToString(formatString, CultureInfo.InvariantCulture);
isDaySwitch = ts != ts2;
}
int nextSequenceNumber = -1;
try
{
var directoryInfo = new DirectoryInfo(dirName);
#if SILVERLIGHT
List<string> files = directoryInfo.EnumerateFiles(fileNameMask).OrderBy(n => n.CreationTime).Select(n => n.FullName).ToList();
#else
List<string> files = directoryInfo.GetFiles(fileNameMask).OrderBy(n => n.CreationTime).Select(n => n.FullName).ToList();
#endif
var filesByDate = new List<string>();
for (int index = 0; index < files.Count; index++)
{
string archiveFileName = Path.GetFileName(files[index]);
if (string.IsNullOrEmpty(archiveFileName))
{
continue;
}
string datePart = archiveFileName.Substring(fileNameMask.LastIndexOf('*'), dateFormat.Length);
string numberPart = archiveFileName.Substring(fileNameMask.LastIndexOf('*') + dateFormat.Length + 1,
archiveFileName.Length - dateTrailerLength - (fileNameMask.LastIndexOf('*') + dateFormat.Length + 1));
int num;
try
{
num = Convert.ToInt32(numberPart, CultureInfo.InvariantCulture);
}
catch (FormatException)
{
continue;
}
if (datePart == GetArchiveDate(isDaySwitch).ToString(dateFormat))
{
nextSequenceNumber = Math.Max(nextSequenceNumber, num);
}
DateTime fileDate;
if (DateTime.TryParseExact(datePart, dateFormat, CultureInfo.InvariantCulture, DateTimeStyles.None,
out fileDate))
{
filesByDate.Add(files[index]);
}
}
nextSequenceNumber++;
// Cleanup archive files
for (int fileIndex = 0; fileIndex < filesByDate.Count; fileIndex++)
{
if (fileIndex > files.Count - this.MaxArchiveFiles)
break;
File.Delete(filesByDate[fileIndex]);
}
}
catch (DirectoryNotFoundException)
{
Directory.CreateDirectory(dirName);
nextSequenceNumber = 0;
}
DateTime newFileDate = GetArchiveDate(isDaySwitch);
string newFileName = Path.Combine(dirName,
fileNameMask.Replace("*", string.Format("{0}.{1}", newFileDate.ToString(dateFormat), nextSequenceNumber)));
RollArchiveForward(fileName, newFileName, shouldCompress: true);
}
private string ReplaceReplaceFileNamePattern(string pattern, string replacementValue)
{
return new FileNameTemplate(Path.GetFileName(pattern)).ReplacePattern(replacementValue);
}
private void DateArchive(string fileName, string pattern)
{
string fileNameMask = ReplaceReplaceFileNamePattern(pattern, "*");
string dirName = Path.GetDirectoryName(Path.GetFullPath(pattern));
string dateFormat = GetDateFormatString(this.ArchiveDateFormat);
try
{
DirectoryInfo directoryInfo = new DirectoryInfo(dirName);
#if SILVERLIGHT
List<string> files = directoryInfo.EnumerateFiles(fileNameMask).OrderBy(n => n.CreationTime).Select(n => n.FullName).ToList();
#else
List<string> files = directoryInfo.GetFiles(fileNameMask).OrderBy(n => n.CreationTime).Select(n => n.FullName).ToList();
#endif
List<string> filesByDate = new List<string>();
for (int index = 0; index < files.Count; index++)
{
string archiveFileName = Path.GetFileName(files[index]);
string datePart = archiveFileName.Substring(fileNameMask.LastIndexOf('*'), dateFormat.Length);
DateTime fileDate = DateTime.MinValue;
if (DateTime.TryParseExact(datePart, dateFormat, CultureInfo.InvariantCulture, DateTimeStyles.None, out fileDate))
{
filesByDate.Add(files[index]);
}
}
if (this.MaxArchiveFiles != 0)
{
for (int fileIndex = 0; fileIndex < filesByDate.Count; fileIndex++)
{
if (fileIndex > files.Count - this.MaxArchiveFiles)
break;
File.Delete(filesByDate[fileIndex]);
}
}
}
catch (DirectoryNotFoundException)
{
Directory.CreateDirectory(dirName);
}
DateTime newFileDate = GetArchiveDate(true);
string newFileName = Path.Combine(dirName, fileNameMask.Replace("*", newFileDate.ToString(dateFormat)));
RollArchiveForward(fileName, newFileName, shouldCompress: true);
}
#endif
private string GetDateFormatString(string defaultFormat)
{
// If archiveDateFormat is not set in the config file, use a default
// date format string based on the archive period.
string formatString = defaultFormat;
if (string.IsNullOrEmpty(formatString))
{
switch (this.ArchiveEvery)
{
case FileArchivePeriod.Year:
formatString = "yyyy";
break;
case FileArchivePeriod.Month:
formatString = "yyyyMM";
break;
default:
formatString = "yyyyMMdd";
break;
case FileArchivePeriod.Hour:
formatString = "yyyyMMddHH";
break;
case FileArchivePeriod.Minute:
formatString = "yyyyMMddHHmm";
break;
}
}
return formatString;
}
private DateTime GetArchiveDate(bool isNextCycle)
{
DateTime archiveDate = TimeSource.Current.Time;
// Because AutoArchive/DateArchive gets called after the FileArchivePeriod condition matches, decrement the archive period by 1
// (i.e. If ArchiveEvery = Day, the file will be archived with yesterdays date)
int addCount = isNextCycle ? -1 : 0;
switch (this.ArchiveEvery)
{
case FileArchivePeriod.Day:
archiveDate = archiveDate.AddDays(addCount);
break;
case FileArchivePeriod.Hour:
archiveDate = archiveDate.AddHours(addCount);
break;
case FileArchivePeriod.Minute:
archiveDate = archiveDate.AddMinutes(addCount);
break;
case FileArchivePeriod.Month:
archiveDate = archiveDate.AddMonths(addCount);
break;
case FileArchivePeriod.Year:
archiveDate = archiveDate.AddYears(addCount);
break;
}
return archiveDate;
}
private void DoAutoArchive(string fileName, LogEventInfo eventInfo)
{
FileInfo fi = new FileInfo(fileName);
if (!fi.Exists)
{
return;
}
// Console.WriteLine("DoAutoArchive({0})", fileName);
string fileNamePattern;
if (this.ArchiveFileName == null)
{
string ext = Path.GetExtension(fileName);
fileNamePattern = Path.ChangeExtension(fi.FullName, ".{#}" + ext);
}
else
{
//The archive file name is given. There are two possibiliy
//(1) User supplied the Filename with pattern
//(2) User supplied the normal filename
fileNamePattern = this.ArchiveFileName.Render(eventInfo);
}
if (!ContainFileNamePattern(fileNamePattern))
{
if (fileArchive.Archive(fileNamePattern, fi.FullName, CreateDirs, EnableArchiveFileCompression))
{
if (this.initializedFiles.ContainsKey(fi.FullName))
{
this.initializedFiles.Remove(fi.FullName);
}
}
}
else
{
switch (this.ArchiveNumbering)
{
case ArchiveNumberingMode.Rolling:
this.RecursiveRollingRename(fi.FullName, fileNamePattern, 0);
break;
case ArchiveNumberingMode.Sequence:
this.SequentialArchive(fi.FullName, fileNamePattern);
break;
#if !NET_CF
case ArchiveNumberingMode.Date:
this.DateArchive(fi.FullName, fileNamePattern);
break;
case ArchiveNumberingMode.DateAndSequence:
this.DateAndSequentialArchive(fi.FullName, fileNamePattern, eventInfo);
break;
#endif
}
}
}
private bool ShouldAutoArchive(string fileName, LogEventInfo ev, int upcomingWriteSize)
{
return ShouldAutoArchiveBasedOnFileSize(fileName, upcomingWriteSize) ||
ShouldAutoArchiveBasedOnTime(fileName, ev);
/*
if (this.ArchiveAboveSize == FileTarget.ArchiveAboveSizeDisabled && this.ArchiveEvery == FileArchivePeriod.None)
{
return false;
}
DateTime lastWriteTime;
long fileLength;
if (!this.GetFileInfo(fileName, out lastWriteTime, out fileLength))
{
return false;
}
if (this.ArchiveAboveSize != FileTarget.ArchiveAboveSizeDisabled)
{
if (fileLength + upcomingWriteSize > this.ArchiveAboveSize)
{
return true;
}
}
if (this.ArchiveEvery != FileArchivePeriod.None)
{
string formatString = GetDateFormatString(string.Empty);
string ts = lastWriteTime.ToString(formatString, CultureInfo.InvariantCulture);
string ts2 = ev.TimeStamp.ToLocalTime().ToString(formatString, CultureInfo.InvariantCulture);
if (ts != ts2)
{
return true;
}
}
return false;
*/
}
private bool ShouldAutoArchiveBasedOnFileSize(string fileName, int upcomingWriteSize)
{
if (this.ArchiveAboveSize == FileTarget.ArchiveAboveSizeDisabled)
{
return false;
}
DateTime lastWriteTime;
long fileLength;
if (!this.GetFileInfo(fileName, out lastWriteTime, out fileLength))
{
return false;
}
if (this.ArchiveAboveSize != FileTarget.ArchiveAboveSizeDisabled)
{
if (fileLength + upcomingWriteSize > this.ArchiveAboveSize)
{
return true;
}
}
return false;
}
private bool ShouldAutoArchiveBasedOnTime(string fileName, LogEventInfo logEvent)
{
if (this.ArchiveEvery == FileArchivePeriod.None)
{
return false;
}
DateTime lastWriteTime;
long fileLength;
if (!this.GetFileInfo(fileName, out lastWriteTime, out fileLength))
{
return false;
}
if (this.ArchiveEvery != FileArchivePeriod.None)
{
// file write time is in Utc and logEvent's timestamp is originated from TimeSource.Current,
// so we should ask the TimeSource to convert file time to TimeSource time:
lastWriteTime = TimeSource.Current.FromSystemTime(lastWriteTime);
string formatString = GetDateFormatString(string.Empty);
string fileLastChanged = lastWriteTime.ToString(formatString, CultureInfo.InvariantCulture);
string logEventRecorded = logEvent.TimeStamp.ToString(formatString, CultureInfo.InvariantCulture);
if (fileLastChanged != logEventRecorded)
{
return true;
}
}
return false;
}
private void AutoClosingTimerCallback(object state)
{
lock (this.SyncRoot)
{
if (!this.IsInitialized)
{
return;
}
try
{
DateTime timeToKill = DateTime.UtcNow.AddSeconds(-this.OpenFileCacheTimeout);
for (int i = 0; i < this.recentAppenders.Length; ++i)
{
if (this.recentAppenders[i] == null)
{
break;
}
if (this.recentAppenders[i].OpenTime < timeToKill)
{
for (int j = i; j < this.recentAppenders.Length; ++j)
{
if (this.recentAppenders[j] == null)
{
break;
}
this.recentAppenders[j].Close();
this.recentAppenders[j] = null;
}
break;
}
}
}
catch (Exception exception)
{
if (exception.MustBeRethrown())
{
throw;
}
InternalLogger.Warn("Exception in AutoClosingTimerCallback: {0}", exception);
}
}
}
private BaseFileAppender AllocateFileAppender(string fileName)
{
//
// BaseFileAppender.Write is the most expensive operation here
// so the in-memory data structure doesn't have to be
// very sophisticated. It's a table-based LRU, where we move
// the used element to become the first one.
// The number of items is usually very limited so the
// performance should be equivalent to the one of the hashtable.
//
BaseFileAppender appenderToWrite = null;
int freeSpot = this.recentAppenders.Length - 1;
for (int i = 0; i < this.recentAppenders.Length; ++i)
{
// Use empty slot in recent appender list, if there is one.
if (this.recentAppenders[i] == null)
{
freeSpot = i;
break;
}
if (this.recentAppenders[i].FileName == fileName)
{
// found it, move it to the first place on the list
// (MRU)
// file open has a chance of failure
// if it fails in the constructor, we won't modify any data structures
BaseFileAppender app = this.recentAppenders[i];
for (int j = i; j > 0; --j)
{
this.recentAppenders[j] = this.recentAppenders[j - 1];
}
this.recentAppenders[0] = app;
appenderToWrite = app;
break;
}
}
if (appenderToWrite == null)
{
BaseFileAppender newAppender = this.appenderFactory.Open(fileName, this);
if (this.recentAppenders[freeSpot] != null)
{
this.recentAppenders[freeSpot].Close();
this.recentAppenders[freeSpot] = null;
}
for (int j = freeSpot; j > 0; --j)
{
this.recentAppenders[j] = this.recentAppenders[j - 1];
}
this.recentAppenders[0] = newAppender;
appenderToWrite = newAppender;
}
return appenderToWrite;
}
private byte[] GetHeaderBytes()
{
return this.GetLayoutBytes(this.Header);
/*
if (this.Header == null)
{
return null;
}
string renderedText = this.Header.Render(LogEventInfo.CreateNullEvent()) + this.NewLineChars;
return this.TransformBytes(this.Encoding.GetBytes(renderedText));
*/
}
private byte[] GetFooterBytes()
{
return this.GetLayoutBytes(this.Footer);
/*
if (this.Footer == null)
{
return null;
}
string renderedText = this.Footer.Render(LogEventInfo.CreateNullEvent()) + this.NewLineChars;
return this.TransformBytes(this.Encoding.GetBytes(renderedText));
*/
}
private void WriteToFile(string fileName, byte[] bytes, bool justData)
{
if (this.ReplaceFileContentsOnEachWrite)
{
ReplaceFileContent(fileName, bytes);
return;
}
bool writeHeader = InitializeFile(fileName, justData);
BaseFileAppender appender = AllocateFileAppender(fileName);
if (writeHeader)
{
this.WriteHeader(appender);
}
appender.Write(bytes);
}
private bool InitializeFile(string fileName, bool justData)
{
bool writeHeader = false;
if (!justData)
{
if (!this.initializedFiles.ContainsKey(fileName))
{
ProcessOnStartup(fileName);
this.initializedFiles[fileName] = DateTime.Now;
this.initializedFilesCounter++;
writeHeader = true;
if (this.initializedFilesCounter >= FileTarget.InitializedFilesCounterMax)
{
this.initializedFilesCounter = 0;
this.CleanupInitializedFiles();
}
}
this.initializedFiles[fileName] = DateTime.Now;
}
return writeHeader;
}
private void WriteFooterAndUninitialize(string fileName)
{
byte[] footerBytes = this.GetFooterBytes();
if (footerBytes != null)
{
if (File.Exists(fileName))
{
this.WriteToFile(fileName, footerBytes, true);
}
}
this.initializedFiles.Remove(fileName);
}
private void ProcessOnStartup(string fileName)
{
if (this.ArchiveOldFileOnStartup)
{
try
{
this.DoAutoArchive(fileName, null);
}
catch (Exception exception)
{
if (exception.MustBeRethrown())
{
throw;
}
InternalLogger.Warn("Unable to archive old log file '{0}': {1}", fileName, exception);
}
}
if (this.DeleteOldFileOnStartup)
{
try
{
File.Delete(fileName);
}
catch (Exception exception)
{
if (exception.MustBeRethrown())
{
throw;
}
InternalLogger.Warn("Unable to delete old log file '{0}': {1}", fileName, exception);
}
}
}
private void ReplaceFileContent(string fileName, byte[] bytes)
{
using (FileStream fs = File.Create(fileName))
{
byte[] headerBytes = this.GetHeaderBytes();
if (headerBytes != null)
{
fs.Write(headerBytes, 0, headerBytes.Length);
}
fs.Write(bytes, 0, bytes.Length);
byte[] footerBytes = this.GetFooterBytes();
if (footerBytes != null)
{
fs.Write(footerBytes, 0, footerBytes.Length);
}
}
}
private void WriteHeader(BaseFileAppender appender)
{
long fileLength;
DateTime lastWriteTime;
// Write header only on empty files or if file info cannot be obtained.
if (!appender.GetFileInfo(out lastWriteTime, out fileLength) || fileLength == 0)
{
byte[] headerBytes = this.GetHeaderBytes();
if (headerBytes != null)
{
appender.Write(headerBytes);
}
}
}
private bool GetFileInfo(string fileName, out DateTime lastWriteTime, out long fileLength)
{
foreach (BaseFileAppender appender in this.recentAppenders)
{
if (appender == null)
{
break;
}
if (appender.FileName == fileName)
{
appender.GetFileInfo(out lastWriteTime, out fileLength);
return true;
}
}
FileInfo fileInfo = new FileInfo(fileName);
if (fileInfo.Exists)
{
fileLength = fileInfo.Length;
lastWriteTime = fileInfo.LastWriteTimeUtc;
return true;
}
fileLength = -1;
lastWriteTime = DateTime.MinValue;
return false;
}
private byte[] GetLayoutBytes(Layout layout)
{
if (layout == null)
{
return null;
}
string renderedText = layout.Render(LogEventInfo.CreateNullEvent()) + this.NewLineChars;
return this.TransformBytes(this.Encoding.GetBytes(renderedText));
}
private void InvalidateCacheItem(string fileName)
{
for (int i = 0; i < this.recentAppenders.Length; ++i)
{
if (this.recentAppenders[i] == null)
{
break;
}
if (this.recentAppenders[i].FileName == fileName)
{
this.recentAppenders[i].Close();
for (int j = i; j < this.recentAppenders.Length - 1; ++j)
{
this.recentAppenders[j] = this.recentAppenders[j + 1];
}
this.recentAppenders[this.recentAppenders.Length - 1] = null;
break;
}
}
}
#if !SILVERLIGHT
private static string CleanupInvalidFileNameChars(string fileName)
{
var lastDirSeparator =
fileName.LastIndexOfAny(new[] { Path.DirectorySeparatorChar, Path.AltDirectorySeparatorChar });
var fileName1 = fileName.Substring(lastDirSeparator + 1);
var dirName = lastDirSeparator > 0 ? fileName.Substring(0, lastDirSeparator) : string.Empty;
fileName1 = Path.GetInvalidFileNameChars().Aggregate(fileName1, (current, c) => current.Replace(c, '_'));
return Path.Combine(dirName, fileName1);
}
#endif
private class DynamicFileArchive
{
public bool CreateDirectory { get; set; }
public int MaxArchiveFileToKeep { get; set; }
public DynamicFileArchive(int maxArchivedFiles)
: this()
{
this.MaxArchiveFileToKeep = maxArchivedFiles;
}
/// <summary>
/// Adds a file into archive.
/// </summary>
/// <param name="archiveFileName">File name of the archive</param>
/// <param name="fileName">Original file name</param>
/// <param name="createDirectory">Create a directory, if it does not exist</param>
/// <param name="enableCompression">Enables file compression</param>
/// <returns><c>true</c> if the file has been moved successfully; <c>false</c> otherwise</returns>
[System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1031:DoNotCatchGeneralExceptionTypes")]
public bool Archive(string archiveFileName, string fileName, bool createDirectory, bool enableCompression)
{
if (MaxArchiveFileToKeep < 1)
{
InternalLogger.Warn("Archive is called. Even though the MaxArchiveFiles is set to less than 1");
return false;
}
if (!File.Exists(fileName))
{
InternalLogger.Error("Error while archiving, Source File : {0} Not found.", fileName);
return false;
}
DeleteOldArchiveFiles();
AddToArchive(archiveFileName, fileName, createDirectory, enableCompression);
archiveFileQueue.Enqueue(archiveFileName);
return true;
}
public DynamicFileArchive()
{
this.MaxArchiveFileToKeep = -1;
archiveFileQueue = new Queue<string>();
}
private readonly Queue<string> archiveFileQueue;
/// <summary>
///
/// </summary>
/// <param name="archiveFileName"></param>
/// <param name="fileName"></param>
/// <param name="createDirectory"></param>
/// <param name="enableCompression"></param>
private void AddToArchive(string archiveFileName, string fileName, bool createDirectory, bool enableCompression)
{
String alternativeFileName = archiveFileName;
if (archiveFileQueue.Contains(archiveFileName))
{
InternalLogger.Trace("AddToArchive file {0} already exist. Trying different file name.", archiveFileName);
alternativeFileName = FindSuitableFilename(archiveFileName, 1);
}
try
{
ArchiveFile(fileName, alternativeFileName, enableCompression);
}
catch (DirectoryNotFoundException)
{
if (createDirectory)
{
InternalLogger.Trace("AddToArchive directory not found. Creating {0}", Path.GetDirectoryName(archiveFileName));
try
{
Directory.CreateDirectory(Path.GetDirectoryName(archiveFileName));
ArchiveFile(fileName, alternativeFileName, enableCompression);
}
catch (Exception ex)
{
InternalLogger.Error("Cannot create archive directory, Exception : {0}", ex);
throw;
}
}
else
{
throw;
}
}
catch (Exception ex)
{
InternalLogger.Error("Cannot archive file {0}, Exception : {1}", fileName, ex);
throw;
}
}
/// <summary>
/// Remove old archive files when the files on the queue are more than the
/// MaxArchiveFilesToKeep.
/// </summary>
private void DeleteOldArchiveFiles()
{
while (archiveFileQueue.Count >= MaxArchiveFileToKeep)
{
string oldestArchivedFileName = archiveFileQueue.Dequeue();
try
{
File.Delete(oldestArchivedFileName);
}
catch (Exception ex)
{
InternalLogger.Warn("Cannot delete old archive file : {0} , Exception : {1}", oldestArchivedFileName, ex);
}
}
}
/// <summary>
/// Creates a new unique filename by appending a number to it. This method tests that
/// the filename created does not exist.
///
/// This process can be slow as it increments the number sequencially from a specified
/// starting point until it finds a number which produces a filename which does not
/// exist.
///
/// Example:
/// Original Filename trace.log
/// Target Filename trace.15.log
/// </summary>
/// <param name="fileName">Original filename</param>
/// <param name="numberToStartWith">Number starting point</param>
/// <returns>File name suitable for archiving</returns>
private string FindSuitableFilename(string fileName, int numberToStartWith)
{
String targetFileName = Path.GetFileNameWithoutExtension(fileName) + ".{#}" + Path.GetExtension(fileName);
while (File.Exists(ReplaceNumberPattern(targetFileName, numberToStartWith)))
{
InternalLogger.Trace("AddToArchive file {0} already exist. Trying with different file name.", fileName);
numberToStartWith++;
}
return targetFileName;
}
}
private sealed class FileNameTemplate
{
/// <summary>
/// Characters determining the start of the <see cref="P:FileNameTemplate.Pattern"/>.
/// </summary>
public const string PatternStartCharacters = "{#";
/// <summary>
/// Characters determining the end of the <see cref="P:FileNameTemplate.Pattern"/>.
/// </summary>
public const string PatternEndCharacters = "#}";
/// <summary>
/// File name which is used as template for matching and replacements.
/// It is expected to contain a pattern to match.
/// </summary>
public string Template
{
get { return this.template; }
}
/// <summary>
/// Pattern found within <see cref="P:FileNameTemplate.Template"/>.
/// <see cref="String.Empty"/> is returned when the template does
/// not contain any pattern.
/// </summary>
public string Pattern
{
get
{
return this.Pattern;
}
}
/// <summary>
/// The begging position of the <see cref="P:FileNameTemplate.Pattern"/>
/// within the <see cref="P:FileNameTemplate.Template"/>. -1 is returned
/// when no pattern can be found.
/// </summary>
public int BeginAt
{
get
{
return startIndex;
}
}
/// <summary>
/// The ending position of the <see cref="P:FileNameTemplate.Pattern"/>
/// within the <see cref="P:FileNameTemplate.Template"/>. -1 is returned
/// when no pattern can be found.
/// </summary>
public int EndAt
{
get
{
return endIndex;
}
}
private readonly string template;
private readonly string pattern;
private readonly int startIndex;
private readonly int endIndex;
public FileNameTemplate(string template)
{
this.template = template;
this.startIndex = template.IndexOf(PatternStartCharacters, StringComparison.Ordinal);
this.endIndex = template.IndexOf(PatternEndCharacters, StringComparison.Ordinal) + PatternEndCharacters.Length;
this.pattern = this.HasPattern() ? template.Substring(this.startIndex, this.endIndex - this.startIndex) : String.Empty;
}
/// <summary>
/// Checks if there the <see cref="P:FileNameTemplate.Template"/>
/// contains the <see cref="P:FileNameTemplate.Pattern"/>.
/// </summary>
/// <returns>Returns <see langword="true" /> if pattern is found in
/// the template, <see langword="false" /> otherwise.</returns>
public bool HasPattern()
{
return (this.BeginAt != -1 && this.EndAt != -1 && this.BeginAt < this.EndAt);
}
/// <summary>
/// Replace the pattern with the specified String.
/// </summary>
/// <param name="replacementValue"></param>
/// <returns></returns>
public string ReplacePattern(string replacementValue)
{
return String.IsNullOrEmpty(replacementValue) ? this.Template : template.Substring(0, this.BeginAt) + replacementValue + template.Substring(this.EndAt);
}
}
}
}
| 1 | 11,011 |
Isn't it quite expensive to check whether the file exists on every write?
|
NLog-NLog
|
.cs
|
@@ -165,7 +165,9 @@ webdriver.Builder.prototype.build = function() {
} else {
var url = this.serverUrl_;
var client;
- if (url[0] == '/') {
+ if (webdriver.http.CorsClient.isAvailable()) {
+ client = new webdriver.http.XhrClient(url);
+ } else if (url[0] == '/') {
var origin = window.location.origin ||
(window.location.protocol + '//' + window.location.host);
client = new webdriver.http.XhrClient(origin + url);
| 1 |
// Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
goog.provide('webdriver.Builder');
goog.require('goog.Uri');
goog.require('goog.userAgent');
goog.require('webdriver.Capabilities');
goog.require('webdriver.FirefoxDomExecutor');
goog.require('webdriver.WebDriver');
goog.require('webdriver.http.CorsClient');
goog.require('webdriver.http.Executor');
goog.require('webdriver.http.XhrClient');
/**
* Creates new {@code webdriver.WebDriver} clients for use in a browser
* environment. Upon instantiation, each Builder will configure itself based
* on the following query parameters:
* <dl>
* <dt>wdurl
* <dd>Defines the WebDriver server to send commands to. If this is a
* relative URL, the builder will use the standard WebDriver wire
* protocol and a {@link webdriver.http.XhrClient}. Otherwise, it will
* use a {@link webdriver.http.CorsClient}; this only works when
* connecting to an instance of the Java Selenium server. The server URL
* may be changed using {@code #usingServer}.
*
* <dt>wdsid
* <dd>Defines the session to connect to. If omitted, will request a new
* session from the server.
* </dl>
*
* @param {Window=} opt_window The window to extract query parameters from.
* @constructor
* @final
* @struct
*/
webdriver.Builder = function(opt_window) {
var win = opt_window || window;
var data = new goog.Uri(win.location).getQueryData();
/** @private {string} */
this.serverUrl_ =
/** @type {string} */ (data.get(webdriver.Builder.SERVER_URL_PARAM,
webdriver.Builder.DEFAULT_SERVER_URL));
/** @private {string} */
this.sessionId_ =
/** @type {string} */ (data.get(webdriver.Builder.SESSION_ID_PARAM));
/** @private {!webdriver.Capabilities} */
this.capabilities_ = new webdriver.Capabilities();
};
/**
* Query parameter that defines which session to connect to.
* @type {string}
* @const
*/
webdriver.Builder.SESSION_ID_PARAM = 'wdsid';
/**
* Query parameter that defines the URL of the remote server to connect to.
* @type {string}
* @const
*/
webdriver.Builder.SERVER_URL_PARAM = 'wdurl';
/**
* The default server URL to use.
* @type {string}
* @const
*/
webdriver.Builder.DEFAULT_SERVER_URL = 'http://localhost:4444/wd/hub';
/**
* Configures which WebDriver server should be used for new sessions.
* @param {string} url URL of the server to use.
* @return {!webdriver.Builder} This Builder instance for chain calling.
*/
webdriver.Builder.prototype.usingServer = function(url) {
this.serverUrl_ = url;
return this;
};
/**
* @return {string} The URL of the WebDriver server this instance is configured
* to use.
*/
webdriver.Builder.prototype.getServerUrl = function() {
return this.serverUrl_;
};
/**
* Configures the builder to create a client that will use an existing WebDriver
* session.
* @param {string} id The existing session ID to use.
* @return {!webdriver.Builder} This Builder instance for chain calling.
*/
webdriver.Builder.prototype.usingSession = function(id) {
this.sessionId_ = id;
return this;
};
/**
* @return {string} The ID of the session, if any, this builder is configured
* to reuse.
*/
webdriver.Builder.prototype.getSession = function() {
return this.sessionId_;
};
/**
* Sets the desired capabilities when requesting a new session. This will
* overwrite any previously set desired capabilities.
* @param {!(Object|webdriver.Capabilities)} capabilities The desired
* capabilities for a new session.
* @return {!webdriver.Builder} This Builder instance for chain calling.
*/
webdriver.Builder.prototype.withCapabilities = function(capabilities) {
this.capabilities_ = new webdriver.Capabilities(capabilities);
return this;
};
/**
* Builds a new {@link webdriver.WebDriver} instance using this builder's
* current configuration.
* @return {!webdriver.WebDriver} A new WebDriver client.
*/
webdriver.Builder.prototype.build = function() {
if (goog.userAgent.GECKO && document.readyState != 'complete') {
throw Error('Cannot create driver instance before window.onload');
}
var executor;
if (webdriver.FirefoxDomExecutor.isAvailable()) {
executor = new webdriver.FirefoxDomExecutor();
return webdriver.WebDriver.createSession(executor, this.capabilities_);
} else {
var url = this.serverUrl_;
var client;
if (url[0] == '/') {
var origin = window.location.origin ||
(window.location.protocol + '//' + window.location.host);
client = new webdriver.http.XhrClient(origin + url);
} else {
client = new webdriver.http.CorsClient(url);
}
executor = new webdriver.http.Executor(client);
if (this.sessionId_) {
return webdriver.WebDriver.attachToSession(executor, this.sessionId_);
} else {
throw new Error('Unable to create a new client for this browser. The ' +
'WebDriver session ID has not been defined.');
}
}
};
| 1 | 11,962 |
Please hide this behind a flag and add a setter function to the builder.
|
SeleniumHQ-selenium
|
py
|
@@ -1009,6 +1009,7 @@ func (vd *volAPI) Routes() []*Route {
{verb: "POST", path: backupPath("/restore", volume.APIVersion), fn: vd.cloudBackupRestore},
{verb: "GET", path: backupPath("", volume.APIVersion), fn: vd.cloudBackupEnumerate},
{verb: "DELETE", path: backupPath("", volume.APIVersion), fn: vd.cloudBackupDelete},
+ {verb: "DELETE", path: backupPath("/all", volume.APIVersion), fn: vd.cloudBackupDeleteAll},
{verb: "GET", path: backupPath("/status", volume.APIVersion), fn: vd.cloudBackupStatus},
{verb: "GET", path: backupPath("/catalog", volume.APIVersion), fn: vd.cloudBackupCatalog},
{verb: "GET", path: backupPath("/history", volume.APIVersion), fn: vd.cloudBackupHistory},
| 1 |
package server
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/api/errors"
"github.com/libopenstorage/openstorage/cluster"
"github.com/libopenstorage/openstorage/volume"
"github.com/libopenstorage/openstorage/volume/drivers"
)
const schedDriverPostFix = "-sched"
type volAPI struct {
restBase
}
func responseStatus(err error) string {
if err == nil {
return ""
}
return err.Error()
}
func newVolumeAPI(name string) restServer {
return &volAPI{restBase{version: volume.APIVersion, name: name}}
}
func (vd *volAPI) String() string {
return vd.name
}
func (vd *volAPI) getVolDriver(r *http.Request) (volume.VolumeDriver, error) {
// Check if the driver has registered by it's user agent name
userAgent := r.Header.Get("User-Agent")
if len(userAgent) > 0 {
clientName := strings.Split(userAgent, "/")
if len(clientName) > 0 {
d, err := volumedrivers.Get(clientName[0])
if err == nil {
return d, nil
}
}
}
// Check if the driver has registered a scheduler-based driver
d, err := volumedrivers.Get(vd.name + schedDriverPostFix)
if err == nil {
return d, nil
}
// default
return volumedrivers.Get(vd.name)
}
func (vd *volAPI) parseID(r *http.Request) (string, error) {
vars := mux.Vars(r)
if id, ok := vars["id"]; ok {
return string(id), nil
}
return "", fmt.Errorf("could not parse snap ID")
}
func (vd *volAPI) nodeIPtoIds(nodes []string) ([]string, error) {
nodeIds := make([]string, 0)
// Get cluster instance
c, err := cluster.Inst()
if err != nil {
return nodeIds, err
}
if c == nil {
return nodeIds, fmt.Errorf("failed to get cluster instance.")
}
for _, idIp := range nodes {
if idIp != "" {
id, err := c.GetNodeIdFromIp(idIp)
if err != nil {
return nodeIds, err
}
nodeIds = append(nodeIds, id)
}
}
return nodeIds, err
}
// Convert any replica set node values which are IPs to the corresponding Node ID.
// Update the replica set node list.
func (vd *volAPI) updateReplicaSpecNodeIPstoIds(rspecRef *api.ReplicaSet) error {
if rspecRef != nil && len(rspecRef.Nodes) > 0 {
nodeIds, err := vd.nodeIPtoIds(rspecRef.Nodes)
if err != nil {
return err
}
if len(nodeIds) > 0 {
rspecRef.Nodes = nodeIds
}
}
return nil
}
// swagger:operation POST /osd-volumes volume createVolume
//
// Creates a single volume with given spec.
//
// ---
// produces:
// - application/json
// parameters:
// - name: spec
// in: body
// description: spec to create volume with
// required: true
// schema:
// "$ref": "#/definitions/VolumeCreateRequest"
// responses:
// '200':
// description: volume create response
// schema:
// "$ref": "#/definitions/VolumeCreateResponse"
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/VolumeCreateResponse"
func (vd *volAPI) create(w http.ResponseWriter, r *http.Request) {
var dcRes api.VolumeCreateResponse
var dcReq api.VolumeCreateRequest
method := "create"
if err := json.NewDecoder(r.Body).Decode(&dcReq); err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
return
}
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
if dcReq.Spec != nil {
if err = vd.updateReplicaSpecNodeIPstoIds(dcReq.Spec.ReplicaSet); err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
return
}
}
id, err := d.Create(dcReq.Locator, dcReq.Source, dcReq.Spec)
dcRes.VolumeResponse = &api.VolumeResponse{Error: responseStatus(err)}
dcRes.Id = id
vd.logRequest(method, id).Infoln("")
json.NewEncoder(w).Encode(&dcRes)
}
func processErrorForVolSetResponse(action *api.VolumeStateAction, err error, resp *api.VolumeSetResponse) {
if err == nil || resp == nil {
return
}
if action != nil && (action.Mount == api.VolumeActionParam_VOLUME_ACTION_PARAM_OFF ||
action.Attach == api.VolumeActionParam_VOLUME_ACTION_PARAM_OFF) {
switch err.(type) {
case *errors.ErrNotFound:
resp.VolumeResponse = &api.VolumeResponse{}
resp.Volume = &api.Volume{}
default:
resp.VolumeResponse = &api.VolumeResponse{
Error: err.Error(),
}
}
} else if err != nil {
resp.VolumeResponse = &api.VolumeResponse{
Error: err.Error(),
}
}
}
// swagger:operation PUT /osd-volumes/{id} volume setVolume
//
// Updates a single volume with given spec.
//
// ---
// produces:
// - application/json
// parameters:
// - name: id
// in: path
// description: id to get volume with
// required: true
// type: integer
// - name: spec
// in: body
// description: spec to set volume with
// required: true
// schema:
// "$ref": "#/definitions/VolumeSetRequest"
// responses:
// '200':
// description: volume set response
// schema:
// "$ref": "#/definitions/VolumeSetResponse"
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/VolumeSetResponse"
func (vd *volAPI) volumeSet(w http.ResponseWriter, r *http.Request) {
var (
volumeID string
err error
req api.VolumeSetRequest
resp api.VolumeSetResponse
)
method := "volumeSet"
err = json.NewDecoder(r.Body).Decode(&req)
if err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
return
}
if volumeID, err = vd.parseID(r); err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
return
}
setActions := ""
if req.Action != nil {
setActions = fmt.Sprintf("Mount=%v Attach=%v", req.Action.Mount, req.Action.Attach)
}
vd.logRequest(method, string(volumeID)).Infoln(setActions)
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
if req.Locator != nil || req.Spec != nil {
if req.Spec != nil {
if err = vd.updateReplicaSpecNodeIPstoIds(req.Spec.ReplicaSet); err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
return
}
}
err = d.Set(volumeID, req.Locator, req.Spec)
}
for err == nil && req.Action != nil {
if req.Action.Attach != api.VolumeActionParam_VOLUME_ACTION_PARAM_NONE {
if req.Action.Attach == api.VolumeActionParam_VOLUME_ACTION_PARAM_ON {
_, err = d.Attach(volumeID, req.Options)
} else {
err = d.Detach(volumeID, req.Options)
}
if err != nil {
break
}
}
if req.Action.Mount != api.VolumeActionParam_VOLUME_ACTION_PARAM_NONE {
if req.Action.Mount == api.VolumeActionParam_VOLUME_ACTION_PARAM_ON {
if req.Action.MountPath == "" {
err = fmt.Errorf("Invalid mount path")
break
}
err = d.Mount(volumeID, req.Action.MountPath, req.Options)
} else {
err = d.Unmount(volumeID, req.Action.MountPath, req.Options)
}
if err != nil {
break
}
}
break
}
if err != nil {
processErrorForVolSetResponse(req.Action, err, &resp)
} else {
v, err := d.Inspect([]string{volumeID})
if err != nil {
processErrorForVolSetResponse(req.Action, err, &resp)
} else if v == nil || len(v) != 1 {
processErrorForVolSetResponse(req.Action, &errors.ErrNotFound{Type: "Volume", ID: volumeID}, &resp)
} else {
v0 := v[0]
resp.Volume = v0
}
}
json.NewEncoder(w).Encode(resp)
}
// swagger:operation GET /osd-volumes/{id} volume inspectVolume
//
// Inspect volume with specified id.
//
// ---
// produces:
// - application/json
// parameters:
// - name: id
// in: path
// description: id to get volume with
// required: true
// type: integer
// responses:
// '200':
// description: volume get response
// schema:
// "$ref": "#/definitions/Volume"
func (vd *volAPI) inspect(w http.ResponseWriter, r *http.Request) {
var err error
var volumeID string
method := "inspect"
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
if volumeID, err = vd.parseID(r); err != nil {
e := fmt.Errorf("Failed to parse parse volumeID: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
return
}
dk, err := d.Inspect([]string{volumeID})
if err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusNotFound)
return
}
json.NewEncoder(w).Encode(dk)
}
// swagger:operation DELETE /osd-volumes/{id} volume deleteVolume
//
// Delete volume with specified id.
//
// ---
// produces:
// - application/json
// parameters:
// - name: id
// in: path
// description: id to get volume with
// required: true
// type: integer
// responses:
// '200':
// description: volume set response
// schema:
// "$ref": "#/definitions/VolumeResponse"
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/VolumeResponse"
func (vd *volAPI) delete(w http.ResponseWriter, r *http.Request) {
var volumeID string
var err error
method := "delete"
if volumeID, err = vd.parseID(r); err != nil {
e := fmt.Errorf("Failed to parse parse volumeID: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
return
}
vd.logRequest(method, volumeID).Infoln("")
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
volumeResponse := &api.VolumeResponse{}
if err := d.Delete(volumeID); err != nil {
volumeResponse.Error = err.Error()
}
json.NewEncoder(w).Encode(volumeResponse)
}
// swagger:operation GET /osd-volumes volume enumerateVolumes
//
// Enumerate all volumes
//
// ---
// consumes:
// - multipart/form-data
// produces:
// - application/json
// parameters:
// - name: Name
// in: query
// description: User specified volume name (Case Sensitive)
// required: false
// type: string
// - name: Label
// in: formData
// description: |
// Comma separated name value pairs
// example: {"label1","label2"}
// required: false
// type: string
// - name: ConfigLabel
// in: formData
// description: |
// Comma separated name value pairs
// example: {"label1","label2"}
// required: false
// type: string
// - name: VolumeID
// in: query
// description: Volume UUID
// required: false
// type: string
// format: uuid
// responses:
// '200':
// description: an array of volumes
// schema:
// type: array
// items:
// $ref: '#/definitions/Volume'
func (vd *volAPI) enumerate(w http.ResponseWriter, r *http.Request) {
var locator api.VolumeLocator
var configLabels map[string]string
var err error
var vols []*api.Volume
method := "enumerate"
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
params := r.URL.Query()
v := params[string(api.OptName)]
if v != nil {
locator.Name = v[0]
}
v = params[string(api.OptLabel)]
if v != nil {
if err = json.Unmarshal([]byte(v[0]), &locator.VolumeLabels); err != nil {
e := fmt.Errorf("Failed to parse parse VolumeLabels: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
}
}
v = params[string(api.OptConfigLabel)]
if v != nil {
if err = json.Unmarshal([]byte(v[0]), &configLabels); err != nil {
e := fmt.Errorf("Failed to parse parse configLabels: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
}
}
v = params[string(api.OptVolumeID)]
if v != nil {
ids := make([]string, len(v))
for i, s := range v {
ids[i] = string(s)
}
vols, err = d.Inspect(ids)
if err != nil {
e := fmt.Errorf("Failed to inspect volumeID: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
return
}
} else {
vols, err = d.Enumerate(&locator, configLabels)
if err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusInternalServerError)
return
}
}
json.NewEncoder(w).Encode(vols)
}
// swagger:operation POST /osd-snapshots snapshot createSnap
//
// Take a snapshot of volume in SnapCreateRequest
//
// ---
// produces:
// - application/json
// parameters:
// - name: id
// in: query
// description: id to get volume with
// required: true
// type: integer
// - name: spec
// in: body
// description: spec to create snap with
// required: true
// schema:
// "$ref": "#/definitions/SnapCreateRequest"
// responses:
// '200':
// description: an array of volumes
// schema:
// "$ref": '#/definitions/SnapCreateResponse'
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/SnapCreateResponse"
func (vd *volAPI) snap(w http.ResponseWriter, r *http.Request) {
var snapReq api.SnapCreateRequest
var snapRes api.SnapCreateResponse
method := "snap"
if err := json.NewDecoder(r.Body).Decode(&snapReq); err != nil {
vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest)
return
}
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
vd.logRequest(method, string(snapReq.Id)).Infoln("")
id, err := d.Snapshot(snapReq.Id, snapReq.Readonly, snapReq.Locator)
snapRes.VolumeCreateResponse = &api.VolumeCreateResponse{
Id: id,
VolumeResponse: &api.VolumeResponse{
Error: responseStatus(err),
},
}
json.NewEncoder(w).Encode(&snapRes)
}
// swagger:operation POST /osd-snapshots/restore/{id} snapshot restoreSnap
//
// Restore snapshot with specified id.
//
// ---
// produces:
// - application/json
// parameters:
// - name: id
// in: path
// description: id of snapshot to restore
// required: true
// type: integer
// responses:
// '200':
// description: Restored volume
// schema:
// "$ref": '#/definitions/VolumeResponse'
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/VolumeResponse"
func (vd *volAPI) restore(w http.ResponseWriter, r *http.Request) {
var volumeID, snapID string
var err error
method := "restore"
if volumeID, err = vd.parseID(r); err != nil {
e := fmt.Errorf("Failed to parse parse volumeID: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
return
}
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
params := r.URL.Query()
v := params[api.OptSnapID]
if v != nil {
snapID = v[0]
} else {
vd.sendError(vd.name, method, w, "Missing "+api.OptSnapID+" param",
http.StatusBadRequest)
return
}
volumeResponse := &api.VolumeResponse{}
if err := d.Restore(volumeID, snapID); err != nil {
volumeResponse.Error = responseStatus(err)
}
json.NewEncoder(w).Encode(volumeResponse)
}
// swagger:operation GET /osd-snapshots snapshot enumerateSnaps
//
// Enumerate snapshots.
//
// ---
// consumes:
// - multipart/form-data
// produces:
// - application/json
// parameters:
// - name: name
// in: query
// description: Volume name that maps to this snap
// required: false
// type: string
// - name: VolumeLabels
// in: formData
// description: |
// Comma separated volume labels
// example: {"label1","label2"}
// required: false
// type: string
// - name: SnapLabels
// in: formData
// description: |
// Comma separated snap labels
// example: {"label1","label2"}
// required: false
// type: string
// - name: uuid
// in: query
// description: Snap UUID
// required: false
// type: string
// format: uuid
// responses:
// '200':
// description: an array of snapshots
// schema:
// type: array
// items:
// $ref: '#/definitions/Volume'
func (vd *volAPI) snapEnumerate(w http.ResponseWriter, r *http.Request) {
var err error
var labels map[string]string
var ids []string
method := "snapEnumerate"
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
params := r.URL.Query()
v := params[string(api.OptLabel)]
if v != nil {
if err = json.Unmarshal([]byte(v[0]), &labels); err != nil {
e := fmt.Errorf("Failed to parse parse VolumeLabels: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
}
}
v, ok := params[string(api.OptVolumeID)]
if v != nil && ok {
ids = make([]string, len(params))
for i, s := range v {
ids[i] = string(s)
}
}
snaps, err := d.SnapEnumerate(ids, labels)
if err != nil {
e := fmt.Errorf("Failed to enumerate snaps: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
return
}
json.NewEncoder(w).Encode(snaps)
}
// swagger:operation GET /osd-volumes/stats/{id} volume statsVolume
//
// Get stats for volume with specified id.
//
// ---
// produces:
// - application/json
// parameters:
// - name: id
// in: path
// description: id to get volume with
// required: true
// type: integer
// responses:
// '200':
// description: volume set response
// schema:
// "$ref": "#/definitions/Stats"
func (vd *volAPI) stats(w http.ResponseWriter, r *http.Request) {
var volumeID string
var err error
if volumeID, err = vd.parseID(r); err != nil {
e := fmt.Errorf("Failed to parse volumeID: %s", err.Error())
http.Error(w, e.Error(), http.StatusBadRequest)
return
}
params := r.URL.Query()
// By default always report /proc/diskstats style stats.
cumulative := true
if opt, ok := params[string(api.OptCumulative)]; ok {
if boolValue, err := strconv.ParseBool(strings.Join(opt[:], "")); !ok {
e := fmt.Errorf("Failed to parse %s option: %s",
api.OptCumulative, err.Error())
http.Error(w, e.Error(), http.StatusBadRequest)
return
} else {
cumulative = boolValue
}
}
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
stats, err := d.Stats(volumeID, cumulative)
if err != nil {
e := fmt.Errorf("Failed to get stats: %s", err.Error())
http.Error(w, e.Error(), http.StatusBadRequest)
return
}
json.NewEncoder(w).Encode(stats)
}
// swagger:operation GET /osd-volumes/usedsize/{id} volume usedSizeVolume
//
// Get Used size of volume with specified id.
//
// ---
// produces:
// - application/json
// parameters:
// - name: id
// in: path
// description: id to get volume with
// required: true
// type: integer
// responses:
// '200':
// description: volume set response
// type: integer
// format: int64
func (vd *volAPI) usedsize(w http.ResponseWriter, r *http.Request) {
var volumeID string
var err error
if volumeID, err = vd.parseID(r); err != nil {
e := fmt.Errorf("Failed to parse volumeID: %s", err.Error())
http.Error(w, e.Error(), http.StatusBadRequest)
return
}
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
used, err := d.UsedSize(volumeID)
if err != nil {
e := fmt.Errorf("Failed to get used size: %s", err.Error())
http.Error(w, e.Error(), http.StatusBadRequest)
return
}
json.NewEncoder(w).Encode(used)
}
// swagger:operation POST /osd-volumes/requests/{id} volume requestsVolume
//
// Get Requests for volume with specified id.
//
// ---
// produces:
// - application/json
// parameters:
// - name: id
// in: path
// description: id to get volume with
// required: true
// type: integer
// responses:
// '200':
// description: volume set response
// schema:
// "$ref": "#/definitions/ActiveRequests"
func (vd *volAPI) requests(w http.ResponseWriter, r *http.Request) {
var err error
method := "requests"
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
requests, err := d.GetActiveRequests()
if err != nil {
e := fmt.Errorf("Failed to get active requests: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
return
}
json.NewEncoder(w).Encode(requests)
}
// swagger:operation GET /osd-volumes/quiesce/{id} volume quiesceVolume
//
// Quiesce volume with specified id.
//
// ---
// produces:
// - application/json
// parameters:
// - name: id
// in: path
// description: id to get volume with
// required: true
// type: integer
// responses:
// '200':
// description: volume set response
// schema:
// "$ref": "#/definitions/VolumeResponse"
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/VolumeResponse"
func (vd *volAPI) quiesce(w http.ResponseWriter, r *http.Request) {
var volumeID string
var err error
method := "quiesce"
if volumeID, err = vd.parseID(r); err != nil {
e := fmt.Errorf("Failed to parse parse volumeID: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
return
}
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
params := r.URL.Query()
timeoutStr := params[api.OptTimeoutSec]
var timeoutSec uint64
if timeoutStr != nil {
var err error
timeoutSec, err = strconv.ParseUint(timeoutStr[0], 10, 64)
if err != nil {
vd.sendError(vd.name, method, w, api.OptTimeoutSec+" must be int",
http.StatusBadRequest)
return
}
}
quiesceIdParam := params[api.OptQuiesceID]
var quiesceId string
if len(quiesceIdParam) > 0 {
quiesceId = quiesceIdParam[0]
}
volumeResponse := &api.VolumeResponse{}
if err := d.Quiesce(volumeID, timeoutSec, quiesceId); err != nil {
volumeResponse.Error = responseStatus(err)
}
json.NewEncoder(w).Encode(volumeResponse)
}
// swagger:operation POST /osd-volumes/unquiesce/{id} volume unquiesceVolume
//
// Unquiesce volume with specified id.
//
// ---
// produces:
// - application/json
// parameters:
// - name: id
// in: path
// description: id to get volume with
// required: true
// type: integer
// responses:
// '200':
// description: volume set response
// schema:
// "$ref": "#/definitions/VolumeResponse"
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/VolumeResponse"
func (vd *volAPI) unquiesce(w http.ResponseWriter, r *http.Request) {
var volumeID string
var err error
method := "unquiesce"
if volumeID, err = vd.parseID(r); err != nil {
e := fmt.Errorf("Failed to parse parse volumeID: %s", err.Error())
vd.sendError(vd.name, method, w, e.Error(), http.StatusBadRequest)
return
}
d, err := vd.getVolDriver(r)
if err != nil {
notFound(w, r)
return
}
volumeResponse := &api.VolumeResponse{}
if err := d.Unquiesce(volumeID); err != nil {
volumeResponse.Error = responseStatus(err)
}
json.NewEncoder(w).Encode(volumeResponse)
}
// swagger:operation GET /osd-volumes/versions volume listVersions
//
// Lists API versions supported by this volumeDriver.
//
// ---
// produces:
// - application/json
// responses:
// '200':
// description: Supported versions
// schema:
// type: array
// items:
// type: string
func (vd *volAPI) versions(w http.ResponseWriter, r *http.Request) {
versions := []string{
volume.APIVersion,
// Update supported versions by adding them here
}
json.NewEncoder(w).Encode(versions)
}
func volVersion(route, version string) string {
if version == "" {
return "/" + route
} else {
return "/" + version + "/" + route
}
}
func volPath(route, version string) string {
return volVersion(api.OsdVolumePath+route, version)
}
func snapPath(route, version string) string {
return volVersion(api.OsdSnapshotPath+route, version)
}
func credsPath(route, version string) string {
return volVersion(api.OsdCredsPath+route, version)
}
func backupPath(route, version string) string {
return volVersion(api.OsdBackupPath+route, version)
}
func (vd *volAPI) Routes() []*Route {
return []*Route{
{verb: "GET", path: "/" + api.OsdVolumePath + "/versions", fn: vd.versions},
{verb: "POST", path: volPath("", volume.APIVersion), fn: vd.create},
{verb: "PUT", path: volPath("/{id}", volume.APIVersion), fn: vd.volumeSet},
{verb: "GET", path: volPath("", volume.APIVersion), fn: vd.enumerate},
{verb: "GET", path: volPath("/{id}", volume.APIVersion), fn: vd.inspect},
{verb: "DELETE", path: volPath("/{id}", volume.APIVersion), fn: vd.delete},
{verb: "GET", path: volPath("/stats", volume.APIVersion), fn: vd.stats},
{verb: "GET", path: volPath("/stats/{id}", volume.APIVersion), fn: vd.stats},
{verb: "GET", path: volPath("/usedsize", volume.APIVersion), fn: vd.usedsize},
{verb: "GET", path: volPath("/usedsize/{id}", volume.APIVersion), fn: vd.usedsize},
{verb: "GET", path: volPath("/requests", volume.APIVersion), fn: vd.requests},
{verb: "GET", path: volPath("/requests/{id}", volume.APIVersion), fn: vd.requests},
{verb: "POST", path: volPath("/quiesce/{id}", volume.APIVersion), fn: vd.quiesce},
{verb: "POST", path: volPath("/unquiesce/{id}", volume.APIVersion), fn: vd.unquiesce},
{verb: "POST", path: snapPath("", volume.APIVersion), fn: vd.snap},
{verb: "GET", path: snapPath("", volume.APIVersion), fn: vd.snapEnumerate},
{verb: "POST", path: snapPath("/restore/{id}", volume.APIVersion), fn: vd.restore},
{verb: "GET", path: credsPath("", volume.APIVersion), fn: vd.credsEnumerate},
{verb: "POST", path: credsPath("", volume.APIVersion), fn: vd.credsCreate},
{verb: "DELETE", path: credsPath("/{uuid}", volume.APIVersion), fn: vd.credsDelete},
{verb: "PUT", path: credsPath("/validate/{uuid}", volume.APIVersion), fn: vd.credsValidate},
{verb: "POST", path: backupPath("", volume.APIVersion), fn: vd.cloudBackupCreate},
{verb: "POST", path: backupPath("/restore", volume.APIVersion), fn: vd.cloudBackupRestore},
{verb: "GET", path: backupPath("", volume.APIVersion), fn: vd.cloudBackupEnumerate},
{verb: "DELETE", path: backupPath("", volume.APIVersion), fn: vd.cloudBackupDelete},
{verb: "GET", path: backupPath("/status", volume.APIVersion), fn: vd.cloudBackupStatus},
{verb: "GET", path: backupPath("/catalog", volume.APIVersion), fn: vd.cloudBackupCatalog},
{verb: "GET", path: backupPath("/history", volume.APIVersion), fn: vd.cloudBackupHistory},
{verb: "PUT", path: backupPath("/statechange", volume.APIVersion), fn: vd.cloudBackupStateChange},
{verb: "POST", path: backupPath("/sched", volume.APIVersion), fn: vd.cloudBackupSchedCreate},
{verb: "DELETE", path: backupPath("/sched", volume.APIVersion), fn: vd.cloudBackupSchedDelete},
{verb: "GET", path: backupPath("/sched", volume.APIVersion), fn: vd.cloudBackupSchedEnumerate},
}
}
| 1 | 6,632 |
old api should be deprecated if we don't intend on using it anymore. Again this should happen through swagger api and then removed from next release. We should also release not this as a breaking change. (same with all the other cloudsnap api renames.
|
libopenstorage-openstorage
|
go
|
@@ -59,7 +59,7 @@ public class VertxRestTransport extends AbstractTransport {
}
if (!NetUtils.canTcpListen(ep.getSocketAddress().getAddress(), ep.getPort())) {
- log.info("can not listen {}, skip {}.", ep.getSocketAddress(), this.getClass().getName());
+ log.warn("can not listen {}, port may have been occupied, this may cause client report error when being accessed. skip {}.", ep.getSocketAddress(), this.getClass().getName());
return false;
}
| 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.transport.rest.vertx;
import org.apache.servicecomb.core.Const;
import org.apache.servicecomb.core.Invocation;
import org.apache.servicecomb.core.transport.AbstractTransport;
import org.apache.servicecomb.foundation.common.net.NetUtils;
import org.apache.servicecomb.foundation.common.net.URIEndpointObject;
import org.apache.servicecomb.foundation.vertx.SimpleJsonObject;
import org.apache.servicecomb.foundation.vertx.VertxUtils;
import org.apache.servicecomb.swagger.invocation.AsyncResponse;
import org.apache.servicecomb.transport.rest.client.RestTransportClient;
import org.apache.servicecomb.transport.rest.client.RestTransportClientManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import io.vertx.core.DeploymentOptions;
@Component
public class VertxRestTransport extends AbstractTransport {
private static final Logger log = LoggerFactory.getLogger(VertxRestTransport.class);
private RestTransportClient restClient;
@Override
public String getName() {
return Const.RESTFUL;
}
@Override
public int getOrder() {
return -1000;
}
@Override
public boolean canInit() {
setListenAddressWithoutSchema(TransportConfig.getAddress());
URIEndpointObject ep = (URIEndpointObject) getEndpoint().getAddress();
if (ep == null) {
return true;
}
if (!NetUtils.canTcpListen(ep.getSocketAddress().getAddress(), ep.getPort())) {
log.info("can not listen {}, skip {}.", ep.getSocketAddress(), this.getClass().getName());
return false;
}
return true;
}
@Override
public boolean init() throws Exception {
restClient = RestTransportClientManager.INSTANCE.getRestClient();
// 部署transport server
DeploymentOptions options = new DeploymentOptions().setInstances(TransportConfig.getThreadCount());
SimpleJsonObject json = new SimpleJsonObject();
json.put(ENDPOINT_KEY, getEndpoint());
json.put(RestTransportClient.class.getName(), restClient);
options.setConfig(json);
return VertxUtils.blockDeploy(transportVertx, TransportConfig.getRestServerVerticle(), options);
}
@Override
public void send(Invocation invocation, AsyncResponse asyncResp) throws Exception {
restClient.send(invocation, asyncResp);
}
}
| 1 | 9,806 |
so strange...... canTcpListen is to detect, if this transport can init when customers deployed vertx and servlet rest transport, we must detect which one will be used only both of them refused to init is a problem. we have already print that......
|
apache-servicecomb-java-chassis
|
java
|
@@ -38,6 +38,8 @@ import com.salesforce.androidsdk.util.JSONObjectHelper;
*/
public class QuerySpec {
private static final String SELECT_COUNT = "SELECT count(*) ";
+ private static final String SELECT_COUNT_FORM = SELECT_COUNT + " FROM (%s)";
+
// Constants
private static final String SELECT = "SELECT ";
private static final String FROM = "FROM ";
| 1 |
/*
* Copyright (c) 2012, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartstore.store;
import org.json.JSONException;
import org.json.JSONObject;
import com.salesforce.androidsdk.smartstore.phonegap.SmartStorePlugin;
import com.salesforce.androidsdk.smartstore.store.SmartStore.SmartStoreException;
import com.salesforce.androidsdk.util.JSONObjectHelper;
/**
* Simple class to represent a query spec
*/
public class QuerySpec {
private static final String SELECT_COUNT = "SELECT count(*) ";
// Constants
private static final String SELECT = "SELECT ";
private static final String FROM = "FROM ";
private static final String WHERE = "WHERE ";
private static final String ORDER_BY = "ORDER BY ";
// Key members
public final QueryType queryType;
public final int pageSize;
public final String smartSql;
public final String countSmartSql;
// Exact/Range/Like
public final String soupName;
public final String path;
public final Order order;
// Exact
public final String matchKey;
// Range
public final String beginKey;
public final String endKey;
// Like
public final String likeKey;
// Private constructor for soup query spec
private QuerySpec(String soupName, String path, QueryType queryType, String matchKey, String beginKey, String endKey, String likeKey, Order order, int pageSize) {
this.soupName = soupName;
this.path = path;
this.queryType = queryType;
this.matchKey = matchKey;
this.beginKey = beginKey;
this.endKey = endKey;
this.likeKey = likeKey;
this.order = order;
this.pageSize = pageSize;
this.smartSql = computeSmartSql();
this.countSmartSql = computeCountSql();
}
// Private constructor for smart query spec
private QuerySpec(String smartSql, int pageSize) {
this.smartSql = smartSql;
this.countSmartSql = computeCountSql(smartSql);
this.pageSize = pageSize;
this.queryType = QueryType.smart;
// Not applicable
this.soupName = null;
this.path = null;
this.matchKey = null;
this.beginKey = null;
this.endKey = null;
this.likeKey = null;
this.order = null;
}
/**
* Return q auery spec for an all query
* @param soupName
* @param path
* @param order
* @param pageSize
* @return
*/
public static QuerySpec buildAllQuerySpec(String soupName, String path, Order order, int pageSize) {
return buildRangeQuerySpec(soupName, path, null, null, order, pageSize);
}
/**
* Return a query spec for an exact match query
* @param soupName
* @param path
* @param exactMatchKey
* @param pageSize
* @return
*/
public static QuerySpec buildExactQuerySpec(String soupName, String path, String exactMatchKey, int pageSize) {
return new QuerySpec(soupName, path, QueryType.exact, exactMatchKey, null, null, null, Order.ascending /* meaningless - all rows will have the same value in the indexed column*/, pageSize);
}
/**
* Return a query spec for a range query
* @param soupName
* @param path
* @param beginKey
* @param endKey
* @param order
* @param pageSize
* @return
*/
public static QuerySpec buildRangeQuerySpec(String soupName, String path, String beginKey, String endKey, Order order, int pageSize) {
return new QuerySpec(soupName, path, QueryType.range, null, beginKey, endKey, null, order, pageSize);
}
/**
* Return a query spec for a like query
* @param soupName
* @param path
* @param matchKey
* @param order
* @param pageSize
* @return
*/
public static QuerySpec buildLikeQuerySpec(String soupName, String path, String likeKey, Order order, int pageSize) {
return new QuerySpec(soupName, path, QueryType.like, null, null, null, likeKey, order, pageSize);
}
/**
* Return a query spec for a smart query
* @param smartSql
* @param pageSize
* @return
*/
public static QuerySpec buildSmartQuerySpec(String smartSql, int pageSize) {
return new QuerySpec(smartSql, pageSize);
}
/**
* Compute smartSql for exact/like/range queries
*/
private String computeSmartSql() {
String selectClause = computeSelectClause();
String fromClause = computeFromClause();
String whereClause = computeWhereClause();
String orderClause = computeOrderClause();
return selectClause + fromClause + whereClause + orderClause;
}
/**
* Compute countSmartSql for exact/like/range queries
*/
private String computeCountSql() {
String fromClause = computeFromClause();
String whereClause = computeWhereClause();
return SELECT_COUNT + fromClause + whereClause;
}
/**
* Compute countSmartSql for smart queries
*/
private String computeCountSql(String smartSql) {
int fromLocation = smartSql.toLowerCase().indexOf(" from ");
return SELECT_COUNT + smartSql.substring(fromLocation);
}
/**
* @return select clause for exact/like/range queries
*/
private String computeSelectClause() {
return SELECT + computeFieldReference(SmartSqlHelper.SOUP) + " ";
}
/**
* @return from clause for exact/like/range queries
*/
private String computeFromClause() {
return FROM + computeSoupReference() + " ";
}
/**
* @return where clause for exact/like/range queries
*/
private String computeWhereClause() {
if (path == null) return "";
String field = computeFieldReference(path);
String pred = "";
switch(queryType) {
case exact: pred = field + " = ? "; break;
case like: pred = field + " LIKE ? "; break;
case range:
if (beginKey == null && endKey == null) { break; }
if (endKey == null) { pred = field + " >= ? "; break; }
if (beginKey == null) { pred = field + " <= ? "; break; }
else { pred = field + " >= ? AND " + field + " <= ? "; break; }
default:
throw new SmartStoreException("Fell through switch: " + queryType);
}
return (pred.equals("") ? "" : WHERE + pred);
}
/**
* @return order clause for exact/like/range queries
*/
private String computeOrderClause() {
if (path == null) return "";
return ORDER_BY + computeFieldReference(path) + " " + order.sql + " ";
}
/**
* @return soup reference for smart sql query
*/
private String computeSoupReference() {
return "{" + soupName + "}";
}
/**
* @param field
* @return field reference for smart sql query
*/
private String computeFieldReference(String field) {
return "{" + soupName + ":" + field + "}";
}
/**
* @return args going with the sql predicate returned by getKeyPredicate
*/
public String[] getArgs() {
switch(queryType) {
case exact:
return new String[] {matchKey};
case like:
return new String[] {likeKey};
case range:
if (beginKey == null && endKey == null)
return null;
else if (endKey == null)
return new String[] {beginKey};
else if (beginKey == null)
return new String[] {endKey};
else
return new String[] {beginKey, endKey};
case smart:
return null;
default:
throw new SmartStoreException("Fell through switch: " + queryType);
}
}
/**
* @param soupName
* @param querySpecJson
* @return
* @throws JSONException
*/
public static QuerySpec fromJSON(String soupName, JSONObject querySpecJson)
throws JSONException {
QueryType queryType = QueryType.valueOf(querySpecJson.getString(SmartStorePlugin.QUERY_TYPE));
String path = JSONObjectHelper.optString(querySpecJson, SmartStorePlugin.INDEX_PATH);
String matchKey = JSONObjectHelper.optString(querySpecJson, SmartStorePlugin.MATCH_KEY);
String beginKey = JSONObjectHelper.optString(querySpecJson, SmartStorePlugin.BEGIN_KEY);
String endKey = JSONObjectHelper.optString(querySpecJson, SmartStorePlugin.END_KEY);
String likeKey = JSONObjectHelper.optString(querySpecJson, SmartStorePlugin.LIKE_KEY);
String smartSql = JSONObjectHelper.optString(querySpecJson, SmartStorePlugin.SMART_SQL);
Order order = Order.valueOf(JSONObjectHelper.optString(querySpecJson, SmartStorePlugin.ORDER, "ascending"));
int pageSize = querySpecJson.getInt(SmartStorePlugin.PAGE_SIZE);
// Building query spec
QuerySpec querySpec = null;
switch (queryType) {
case exact: querySpec = buildExactQuerySpec(soupName, path, matchKey, pageSize); break;
case range: querySpec = buildRangeQuerySpec(soupName, path, beginKey, endKey, order, pageSize); break;
case like: querySpec = buildLikeQuerySpec(soupName, path, likeKey, order, pageSize); break;
case smart: querySpec = buildSmartQuerySpec(smartSql, pageSize); break;
default: throw new RuntimeException("Fell through switch: " + queryType);
}
return querySpec;
}
/**
* Query type enum
*/
public enum QueryType {
exact,
range,
like,
smart;
}
/**
* Simple class to represent query order
*/
public enum Order {
ascending("ASC"), descending("DESC");
public final String sql;
Order(String sqlOrder) {
this.sql = sqlOrder;
}
}
}
| 1 | 14,498 |
Did you mean for this to be `FORM`, instead of `FROM`?
|
forcedotcom-SalesforceMobileSDK-Android
|
java
|
@@ -94,6 +94,13 @@ type AWSMachineSpec struct {
// +kubebuilder:validation:MaxItems=2
NetworkInterfaces []string `json:"networkInterfaces,omitempty"`
+ // UncompressedUserData specify whether the user data is gzip-compressed before it is sent to ec2 instance.
+ // cloud-init has built-in support for gzip-compressed user data
+ // user data stored in aws secret manager is always gzip-compressed.
+ //
+ // +optional
+ UncompressedUserData *bool `json:"uncompressedUserData,omitempty"`
+
// CloudInit defines options related to the bootstrapping systems where
// CloudInit is used.
// +optional
| 1 |
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha3
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/errors"
)
const (
// MachineFinalizer allows ReconcileAWSMachine to clean up AWS resources associated with AWSMachine before
// removing it from the apiserver.
MachineFinalizer = "awsmachine.infrastructure.cluster.x-k8s.io"
)
// AWSMachineSpec defines the desired state of AWSMachine
type AWSMachineSpec struct {
// ProviderID is the unique identifier as specified by the cloud provider.
ProviderID *string `json:"providerID,omitempty"`
// AMI is the reference to the AMI from which to create the machine instance.
AMI AWSResourceReference `json:"ami,omitempty"`
// ImageLookupOrg is the AWS Organization ID to use for image lookup if AMI is not set.
ImageLookupOrg string `json:"imageLookupOrg,omitempty"`
// ImageLookupBaseOS is the name of the base operating system to use for
// image lookup the AMI is not set.
ImageLookupBaseOS string `json:"imageLookupBaseOS,omitempty"`
// InstanceType is the type of instance to create. Example: m4.xlarge
InstanceType string `json:"instanceType,omitempty"`
// AdditionalTags is an optional set of tags to add to an instance, in addition to the ones added by default by the
// AWS provider. If both the AWSCluster and the AWSMachine specify the same tag name with different values, the
// AWSMachine's value takes precedence.
// +optional
AdditionalTags Tags `json:"additionalTags,omitempty"`
// IAMInstanceProfile is a name of an IAM instance profile to assign to the instance
// +optional
IAMInstanceProfile string `json:"iamInstanceProfile,omitempty"`
// PublicIP specifies whether the instance should get a public IP.
// Precedence for this setting is as follows:
// 1. This field if set
// 2. Cluster/flavor setting
// 3. Subnet default
// +optional
PublicIP *bool `json:"publicIP,omitempty"`
// AdditionalSecurityGroups is an array of references to security groups that should be applied to the
// instance. These security groups would be set in addition to any security groups defined
// at the cluster level or in the actuator.
// +optional
AdditionalSecurityGroups []AWSResourceReference `json:"additionalSecurityGroups,omitempty"`
// FailureDomain is the failure domain unique identifier this Machine should be attached to, as defined in Cluster API.
// For this infrastructure provider, the ID is equivalent to an AWS Availability Zone.
// If multiple subnets are matched for the availability zone, the first one returned is picked.
FailureDomain *string `json:"failureDomain,omitempty"`
// Subnet is a reference to the subnet to use for this instance. If not specified,
// the cluster subnet will be used.
// +optional
Subnet *AWSResourceReference `json:"subnet,omitempty"`
// SSHKeyName is the name of the ssh key to attach to the instance. Valid values are empty string (do not use SSH keys), a valid SSH key name, or omitted (use the default SSH key name)
// +optional
SSHKeyName *string `json:"sshKeyName,omitempty"`
// RootVolume encapsulates the configuration options for the root volume
// +optional
RootVolume *RootVolume `json:"rootVolume,omitempty"`
// NetworkInterfaces is a list of ENIs to associate with the instance.
// A maximum of 2 may be specified.
// +optional
// +kubebuilder:validation:MaxItems=2
NetworkInterfaces []string `json:"networkInterfaces,omitempty"`
// CloudInit defines options related to the bootstrapping systems where
// CloudInit is used.
// +optional
CloudInit CloudInit `json:"cloudInit,omitempty"`
}
// CloudInit defines options related to the bootstrapping systems where
// CloudInit is used.
type CloudInit struct {
// InsecureSkipSecretsManager, when set to true will not use AWS Secrets Manager
// to ensure privacy of userdata.
// By default, a cloud-init boothook shell script is prepended to download
// the userdata from Secrets Manager and additionally delete the secret.
InsecureSkipSecretsManager bool `json:"insecureSkipSecretsManager,omitempty"`
// SecretCount is the number of secrets used to form the complete secret
// +optional
SecretCount int32 `json:"secretCount,omitempty"`
// SecretPrefix is the prefix for the secret name. This is stored
// temporarily, and deleted when the machine registers as a node against
// the workload cluster.
// +optional
SecretPrefix string `json:"secretPrefix,omitempty"`
}
// AWSMachineStatus defines the observed state of AWSMachine
type AWSMachineStatus struct {
// Ready is true when the provider resource is ready.
// +optional
Ready bool `json:"ready"`
// Addresses contains the AWS instance associated addresses.
Addresses []v1.NodeAddress `json:"addresses,omitempty"`
// InstanceState is the state of the AWS instance for this machine.
// +optional
InstanceState *InstanceState `json:"instanceState,omitempty"`
// FailureReason will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a succinct value suitable
// for machine interpretation.
//
// This field should not be set for transitive errors that a controller
// faces that are expected to be fixed automatically over
// time (like service outages), but instead indicate that something is
// fundamentally wrong with the Machine's spec or the configuration of
// the controller, and that manual intervention is required. Examples
// of terminal errors would be invalid combinations of settings in the
// spec, values that are unsupported by the controller, or the
// responsible controller itself being critically misconfigured.
//
// Any transient errors that occur during the reconciliation of Machines
// can be added as events to the Machine object and/or logged in the
// controller's output.
// +optional
FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"`
// FailureMessage will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a more verbose string suitable
// for logging and human consumption.
//
// This field should not be set for transitive errors that a controller
// faces that are expected to be fixed automatically over
// time (like service outages), but instead indicate that something is
// fundamentally wrong with the Machine's spec or the configuration of
// the controller, and that manual intervention is required. Examples
// of terminal errors would be invalid combinations of settings in the
// spec, values that are unsupported by the controller, or the
// responsible controller itself being critically misconfigured.
//
// Any transient errors that occur during the reconciliation of Machines
// can be added as events to the Machine object and/or logged in the
// controller's output.
// +optional
FailureMessage *string `json:"failureMessage,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=awsmachines,scope=Namespaced,categories=cluster-api
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this AWSMachine belongs"
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.instanceState",description="EC2 instance state"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="Machine ready status"
// +kubebuilder:printcolumn:name="InstanceID",type="string",JSONPath=".spec.providerID",description="EC2 instance ID"
// +kubebuilder:printcolumn:name="Machine",type="string",JSONPath=".metadata.ownerReferences[?(@.kind==\"Machine\")].name",description="Machine object which owns with this AWSMachine"
// AWSMachine is the Schema for the awsmachines API
type AWSMachine struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AWSMachineSpec `json:"spec,omitempty"`
Status AWSMachineStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// AWSMachineList contains a list of AWSMachine
type AWSMachineList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AWSMachine `json:"items"`
}
func init() {
SchemeBuilder.Register(&AWSMachine{}, &AWSMachineList{})
}
| 1 | 14,405 |
What are your thoughts on the long term solution for the naming of this variable? Do you think we should revert the logic when we are ready to make the next set of breaking API changes?
|
kubernetes-sigs-cluster-api-provider-aws
|
go
|
@@ -92,14 +92,15 @@ module RSpec
command = spec_command
begin
- puts command if verbose
+ $stdout.puts(command}) if verbose
success = system(command)
rescue
- puts failure_message if failure_message
+ $stderr.puts failure_message if failure_message
end
+
if fail_on_error && !success
- $stderr.puts "#{command} failed"
- exit $?.exitstatus
+ $stderr.puts("#{command} failed") if verbose
+ exit($?.exitstatus)
end
end
| 1 |
require 'rspec/support/warnings'
require 'rake'
require 'rake/tasklib'
require 'shellwords'
module RSpec
module Core
class RakeTask < ::Rake::TaskLib
include ::Rake::DSL if defined?(::Rake::DSL)
# Name of task.
#
# default:
# :spec
attr_accessor :name
# Glob pattern to match files.
#
# default:
# 'spec/**/*_spec.rb'
attr_accessor :pattern
# Whether or not to fail Rake when an error occurs (typically when examples fail).
#
# default:
# true
attr_accessor :fail_on_error
# A message to print to stderr when there are failures.
attr_accessor :failure_message
# Use verbose output. If this is set to true, the task will print the
# executed spec command to stdout.
#
# default:
# true
attr_accessor :verbose
# Command line options to pass to ruby.
#
# default:
# nil
attr_accessor :ruby_opts
# Path to rspec
#
# default:
# 'rspec'
attr_accessor :rspec_path
# Command line options to pass to rspec.
#
# default:
# nil
attr_accessor :rspec_opts
# @deprecated
# Use rspec_opts instead.
#
# Command line options to pass to rspec.
#
# default:
# nil
def spec_opts=(opts)
RSpec.deprecate('RSpec::Core::RakeTask#spec_opts=', :replacement => 'rspec_opts=')
@rspec_opts = opts
end
def initialize(*args, &task_block)
setup_ivars(args)
desc "Run RSpec code examples" unless ::Rake.application.last_comment
task name, *args do |_, task_args|
RakeFileUtils.send(:verbose, verbose) do
task_block.call(*[self, task_args].slice(0, task_block.arity)) if task_block
run_task verbose
end
end
end
def setup_ivars(args)
@name = args.shift || :spec
@ruby_opts, @rspec_opts = nil, nil, nil
@verbose, @fail_on_error = true, true
@rspec_path = 'rspec'
@pattern = './spec{,/*/**}/*_spec.rb'
end
def run_task(verbose)
command = spec_command
begin
puts command if verbose
success = system(command)
rescue
puts failure_message if failure_message
end
if fail_on_error && !success
$stderr.puts "#{command} failed"
exit $?.exitstatus
end
end
private
def files_to_run
if ENV['SPEC']
FileList[ ENV['SPEC'] ].sort
else
FileList[ pattern ].sort.map(&:shellescape)
end
end
def spec_command
cmd_parts = []
cmd_parts << RUBY
cmd_parts << ruby_opts
cmd_parts << "-S" << rspec_path
cmd_parts << files_to_run
cmd_parts << rspec_opts
cmd_parts.flatten.reject(&blank).join(" ")
end
def blank
lambda {|s| s.nil? || s == ""}
end
end
end
end
| 1 | 11,114 |
There's a syntax error on this line.
|
rspec-rspec-core
|
rb
|
@@ -834,7 +834,7 @@ fpga_result clear_port_errors(fpga_handle afu_handle)
result = fpgaObjectRead64(port_error_object, &value, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
- return result;
+ goto out;
}
printf("\n \n Port error CSR : 0x%lx \n", value);
| 1 |
// Copyright(c) 2017-2018, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <errno.h>
#include <stdbool.h>
#include <malloc.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <time.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <uuid/uuid.h>
#include "safe_string/safe_string.h"
#include <opae/fpga.h>
// SYSFS FME Errors
#define FME_SYSFS_FME_ERRORS "errors/fme-errors/errors"
#define FME_SYSFS_PCIE0_ERRORS "errors/pcie0_errors"
#define FME_SYSFS_PCIE1_ERRORS "errors/pcie1_errors"
#define FME_SYSFS_BBS_ERRORS "errors/bbs_errors"
#define FME_SYSFS_GBS_ERRORS "errors/gbs_errors"
#define FME_SYSFS_WARNING_ERRORS "errors/warning_errors"
#define FME_SYSFS_NONFATAL_ERRORS "errors/nonfatal_errors"
#define FME_SYSFS_CATFATAL_ERRORS "errors/catfatal_errors"
#define FME_SYSFS_INJECT_ERROR "errors/inject_error"
#define FME_SYSFS_CLEAR_ERRORS "errors/fme-errors/clear"
#define FME_SYSFS_ERR_REVISION "errors/revision"
#define PORT_SYSFS_ERR "errors/errors"
#define PORT_SYSFS_ERR_CLEAR "errors/clear"
// SYFS Thermal
#define FME_SYSFS_THERMAL_MGMT_TEMP "thermal_mgmt/temperature"
#define FME_SYSFS_THERMAL_MGMT_THRESHOLD_TRIP "thermal_mgmt/*trip*"
// SYSFS Power
#define FME_SYSFS_POWER_MGMT_CONSUMED "power_mgmt/consumed"
// MMIO scratchpad
#define PORT_SCRATCHPAD0 0x0028
#define NLB_CSR_SCRATCHPAD (0x40000 + 0x0104 )
#define PORT_MMIO_LEN (0x40000 + 0x0512 )
#define MMO_WRITE64_VALUE 0xF1F1F1F1F1F1F1F1
#define MMO_WRITE32_VALUE 0xF1F1F1
#define FPGA_CSR_LEN 64
#define FPGA_INTEGRATED_DEVICEID 0xbcc0
#define FPGA_DISCRETE_DEVICEID 0x09c4
#define FPAG_DEVICEID_PATH "../device/device"
#define FPGA_PORT_RES_PATH "/sys/bus/pci/devices/%04x:%02x:%02x.%d/resource2"
#define FPGA_SET_BIT(val, index) val |= (1 << index)
#define FPGA_CLEAR_BIT(val, index) val &= ~(1 << index)
#define FPGA_TOGGLE_BIT(val, index) val ^= (1 << index)
#define FPGA_BIT_IS_SET(val, index) (((val) >> (index)) & 1)
/* Type definitions */
typedef struct {
uint32_t uint[16];
} cache_line;
int usleep(unsigned);
#ifndef CL
# define CL(x) ((x) * 64)
#endif // CL
#ifndef LOG2_CL
# define LOG2_CL 6
#endif // LOG2_CL
#ifndef MB
# define MB(x) ((x) * 1024 * 1024)
#endif // MB
#define CACHELINE_ALIGNED_ADDR(p) ((p) >> LOG2_CL)
#define LPBK1_BUFFER_SIZE MB(1)
#define LPBK1_BUFFER_ALLOCATION_SIZE MB(2)
#define LPBK1_DSM_SIZE MB(2)
#define CSR_SRC_ADDR 0x0120
#define CSR_DST_ADDR 0x0128
#define CSR_CTL 0x0138
#define CSR_CFG 0x0140
#define CSR_NUM_LINES 0x0130
#define DSM_STATUS_TEST_COMPLETE 0x40
#define CSR_AFU_DSM_BASEL 0x0110
#define CSR_AFU_DSM_BASEH 0x0114
/* SKX-P NLB0 AFU_ID */
#define SKX_P_NLB0_AFUID "D8424DC4-A4A3-C413-F89E-433683F9040B"
#define FME_ERROR_COUNT 7
static const char * const FME_ERROR[FME_ERROR_COUNT] = {
"Fabric error detected", \
"Fabric fifo under / overflow error detected", \
"KTI CDC Parity Error detected", \
"KTI CDC Parity Error detected", \
"IOMMU Parity error detected", \
"AFU PF/VF access mismatch detected", \
"Indicates an MBP event error detected", \
};
#define PCIE0_ERROR_COUNT 10
static const char * const PCIE0_ERROR[PCIE0_ERROR_COUNT] = {
"TLP format/type error detected", \
"TTLP MW address error detected", \
"TLP MW length error detected", \
"TLP MR address error detected", \
"TLP MR length error detected", \
"TLP CPL tag error detected", \
"TLP CPL status error detected", \
"TLP CPL timeout error detected", \
"CCI bridge parity error detected", \
"TLP with EP error detected", \
};
#define PCIE1_ERROR_COUNT 10
static const char * const PCIE1_ERROR[PCIE1_ERROR_COUNT] = {
"TLP format/type error detected", \
"TTLP MW address error detected", \
"TLP MW length error detected", \
"TLP MR address error detected", \
"TLP MR length error detected", \
"TLP CPL tag error detected", \
"TLP CPL status error detected", \
"TLP CPL timeout error detected", \
"CCI bridge parity error detected", \
"TLP with EP error detected", \
};
#define RAS_NONFATAL_ERROR_COUNT 13
static const char * const RAS_NONFATAL_ERROR [RAS_NONFATAL_ERROR_COUNT] = {
"Temperature threshold triggered AP1 detected", \
"Temperature threshold triggered AP2 detected", \
"PCIe error detected", \
"AFU port Fatal error detected", \
"ProcHot event error detected", \
"AFU PF/VF access mismatch error detected", \
"Injected Warning Error detected", \
"Reserved", \
"Reserved", \
"Temperature threshold triggered AP6 detected", \
"Power threshold triggered AP1 error detected", \
"Power threshold triggered AP2 error detected", \
"MBP event error detected", \
};
#define RAS_CATFATAL_ERROR_COUNT 12
static const char * const RAS_CATFATAL_ERROR[RAS_CATFATAL_ERROR_COUNT] = {
"KTI link layer error detected.", \
"tag-n-cache error detected.", \
"CCI error detected.", \
"KTI protocol error detected.", \
"Fatal DRAM error detected", \
"IOMMU fatal parity error detected.", \
"Fabric fatal error detected", \
"Poison error from any of PCIe ports detected", \
"Injected Fatal Error detected", \
"Catastrophic CRC error detected", \
"Catastrophic thermal runaway event detected", \
"Injected Catastrophic Error detected", \
};
#define RAS_INJECT_ERROR_COUNT 3
static const char * const RAS_INJECT_ERROR[RAS_INJECT_ERROR_COUNT] = {
"Set Catastrophic error .", \
"Set Fatal error.", \
"Ser Non-fatal error .", \
};
#define PORT_ERROR_COUNT 52
static const char * const PORT_ERROR[PORT_ERROR_COUNT] = {
"Tx Channel 0 overflow error detected.", \
"Tx Channel 0 invalid request encodingr error detected.", \
"Tx Channel 0 cl_len=3 not supported error detected.", \
"Tx Channel 0 request with cl_len=2 does NOT have a 2CL aligned address error detected.", \
"Tx Channel 0 request with cl_len=4 does NOT have a 4CL aligned address error detected.", \
"RSVD.", "RSVD.", "RSVD.","RSVD.",\
"AFU MMIO RD received while PORT is in reset error detected", \
"AFU MMIO WR received while PORT is in reset error detected", \
"RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.",\
"Tx Channel 1 invalid request encoding error detected", \
"Tx Channel 1 cl_len=3 not supported error detected.", \
"Tx Channel 1 request with cl_len=2 does NOT have a 2CL aligned address error detected", \
"Tx Channel 1 request with cl_len=4 does NOT have a 4CL aligned address error detected", \
"Tx Channel 1 insufficient data payload Error detected", \
"Tx Channel 1 data payload overrun error detected", \
"Tx Channel 1 incorrect address on subsequent payloads error detected", \
"Tx Channel 1 Non-zero SOP detected for requests!=WrLine_* error detected", \
"Tx Channel 1 Illegal VC_SEL. Atomic request is only supported on VL0 error detected", \
"RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.",\
"MMIO TimedOut error detected", \
"Tx Channel 2 fifo overflo error detected", \
"MMIO Read response received, with no matching request pending error detected", \
"RSVD.", "RSVD.", "RSVD.", "RSVD.", "RSVD.", \
"Number of pending requests: counter overflow error detected", \
"Request with Address violating SMM range error detected", \
"Request with Address violating second SMM range error detected", \
"Request with Address violating ME stolen range", \
"Request with Address violating Generic protected range error detected ", \
"Request with Address violating Legacy Range Low error detected", \
"Request with Address violating Legacy Range High error detected", \
"Request with Address violating VGA memory range error detected", \
"Page Fault error detected", \
"PMR Erro error detected", \
"AP6 event detected ", \
"VF FLR detected on port when PORT configured in PF access mode error detected ", \
};
// RAS Error Inject CSR
struct ras_inject_error {
union {
uint64_t csr;
struct {
/* Catastrophic error */
uint64_t catastrophicr_error : 1;
/* Fatal error */
uint64_t fatal_error : 1;
/* Non-fatal error */
uint64_t nonfatal_error : 1;
/* Reserved */
uint64_t rsvd : 61;
};
};
};
#define GETOPT_STRING ":hB:D:F:S:PQRNTCEGHIO"
struct option longopts[] = {
{"help", no_argument, NULL, 'h'},
{"segment", required_argument, NULL, 0xe},
{"bus", required_argument, NULL, 'B'},
{"device", required_argument, NULL, 'D'},
{"function", required_argument, NULL, 'F'},
{"socket-id", required_argument, NULL, 'S'},
{"print-error", no_argument, NULL, 'P'},
{"catast-error", no_argument, NULL, 'Q'},
{"fatal-error", no_argument, NULL, 'R'},
{"nofatal-error", no_argument, NULL, 'N'},
{"thermal-trip", no_argument, NULL, 'T'},
{"clearinj-error", no_argument, NULL, 'C'},
{"mwaddress-error", no_argument, NULL, 'E'},
{"mraddress-error", no_argument, NULL, 'G'},
{"mwlength-error", no_argument, NULL, 'H'},
{"mrlength-error", no_argument, NULL, 'I'},
{"pagefault-error", no_argument, NULL, 'O'},
{0,0,0,0}
};
// RAS Command line struct
struct RASCommandLine
{
uint32_t flags;
#define RASAPP_CMD_FLAG_HELP 0x00000001
#define RASAPP_CMD_FLAG_VERSION 0x00000002
#define RASAPP_CMD_PARSE_ERROR 0x00000003
#define RASAPP_CMD_FLAG_BUS 0x00000008
#define RASAPP_CMD_FLAG_DEV 0x00000010
#define RASAPP_CMD_FLAG_FUNC 0x00000020
#define RASAPP_CMD_FLAG_SOCKET 0x00000040
int segment;
int bus;
int device;
int function;
int socket;
bool print_error;
bool catast_error;
bool fatal_error;
bool nonfatal_error;
bool clear_injerror;
bool mwaddress_error;
bool mraddress_error;
bool mwlength_error;
bool mrlength_error;
bool pagefault_error;
};
struct RASCommandLine rasCmdLine = { 0, -1, -1, -1, -1, -1, false,
false, false, false,false,
false, false, false, false, false};
// RAS Command line input help
void RASAppShowHelp()
{
printf("Usage:\n");
printf("ras\n");
printf("<Segment> --segment=<SEGMENT NUMBER>\n");
printf("<Bus> --bus=<BUS NUMBER> "
"OR -B=<BUS NUMBER>\n");
printf("<Device> --device=<DEVICE NUMBER> "
"OR -D=<DEVICE NUMBER>\n");
printf("<Function> --function=<FUNCTION NUMBER> "
"OR -F=<FUNCTION NUMBER>\n");
printf("<Socket-id> --socket-id=<SOCKET NUMBER> "
"OR -S=<SOCKET NUMBER>\n");
printf("<Print Error> --print-error OR -P \n");
printf("<Catast Error> --catast-error OR -Q \n");
printf("<Fatal Error> --fatal-error OR -R \n");
printf("<NoFatal Error> --nofatal-error OR -N \n");
printf("<Clear Inj Error> --clearinj-error OR -C \n");
printf("<MW Address error> --mwaddress-error OR -E \n");
printf("<MR Address error> --mwaddress-error OR -G \n");
printf("<MW Length error> --mwlength-error OR -H \n");
printf("<MR Length error> --mrlength-error OR -I \n");
printf("<Page Fault Error> --pagefault-error OR -O \n");
printf("\n");
}
/*
* macro to check return codes, print error message, and goto cleanup label
* NOTE: this changes the program flow (uses goto)!
*/
#define ON_ERR_GOTO(res, label, desc) \
do { \
if ((res) != FPGA_OK) { \
print_err((desc), (res)); \
goto label; \
} \
} while (0)
void print_err(const char *s, fpga_result res)
{
fprintf(stderr, "Error %s: %s\n", s, fpgaErrStr(res));
}
fpga_result print_ras_errors(fpga_token token);
fpga_result print_pwr_temp(fpga_token token);
fpga_result clear_inject_ras_errors(fpga_handle fme_handle);
fpga_result inject_ras_errors(fpga_handle fme_handle, struct RASCommandLine *rasCmdLine);
fpga_result mmio_error(fpga_handle afu_handle, struct RASCommandLine *rasCmdLine);
fpga_result print_port_errors(fpga_token token);
fpga_result clear_port_errors(fpga_handle afu_handle);
fpga_result page_fault_errors();
int ParseCmds(struct RASCommandLine *rasCmdLine, int argc, char *argv[]);
int main( int argc, char** argv )
{
fpga_result result = 0;
fpga_properties fpga_filter = NULL;
uint32_t num_matches = 1;
fpga_properties afu_filter = NULL;
fpga_handle afu_handle = NULL ;
fpga_handle fme_handle = NULL;
fpga_token fme_token = NULL;
fpga_token afu_token = NULL;
// Parse command line
if ( argc < 2 ) {
RASAppShowHelp();
return 1;
} else if ( 0!= ParseCmds(&rasCmdLine, argc, argv) ) {
OPAE_ERR( "Error scanning command line \n.");
return 2;
}
printf(" ------- Command line Input Start ----\n\n");
printf(" Segment : %d\n", rasCmdLine.segment);
printf(" Bus : %d\n", rasCmdLine.bus);
printf(" Device : %d\n", rasCmdLine.device);
printf(" Function : %d\n", rasCmdLine.function);
printf(" Socket-id : %d\n", rasCmdLine.socket);
printf(" Print Error : %d\n", rasCmdLine.print_error);
printf(" Catas Error : %d\n", rasCmdLine.catast_error);
printf(" Fatal Error : %d\n", rasCmdLine.fatal_error);
printf(" NonFatal Error : %d\n", rasCmdLine.nonfatal_error);
printf(" Clear Error : %d\n", rasCmdLine.clear_injerror);
printf(" MW Address Error : %d\n", rasCmdLine.mwaddress_error);
printf(" MR Address Error : %d\n", rasCmdLine.mraddress_error);
printf(" MW Length Error : %d\n", rasCmdLine.mwlength_error);
printf(" MR Length Error : %d\n", rasCmdLine.mrlength_error);
printf(" Page Fault Error : %d\n", rasCmdLine.pagefault_error);
printf(" ------- Command line Input END ----\n\n");
// Enum FPGA device
result = fpgaInitialize(NULL);
ON_ERR_GOTO(result, out_exit, "Failed to initilize ");
result = fpgaGetProperties(NULL, &fpga_filter);
ON_ERR_GOTO(result, out_exit, "creating properties object");
result = fpgaGetProperties(NULL, &afu_filter);
ON_ERR_GOTO(result, out_destroy_prop, "creating properties object");
result = fpgaPropertiesSetObjectType(fpga_filter, FPGA_DEVICE);
ON_ERR_GOTO(result, out_destroy_prop, "setting object type");
if (rasCmdLine.segment > 0) {
result = fpgaPropertiesSetSegment(fpga_filter, rasCmdLine.segment);
ON_ERR_GOTO(result, out_destroy_prop, "setting segment");
}
if (rasCmdLine.bus > 0) {
result = fpgaPropertiesSetBus(fpga_filter, rasCmdLine.bus);
ON_ERR_GOTO(result, out_destroy_prop, "setting bus");
}
if (rasCmdLine.device > 0) {
result = fpgaPropertiesSetDevice(fpga_filter, rasCmdLine.device);
ON_ERR_GOTO(result, out_destroy_prop, "setting device");
}
if (rasCmdLine.function > 0) {
result = fpgaPropertiesSetFunction(fpga_filter, rasCmdLine.function);
ON_ERR_GOTO(result, out_destroy_prop, "setting function");
}
if (rasCmdLine.socket > 0) {
result = fpgaPropertiesSetSocketID(fpga_filter, rasCmdLine.socket);
ON_ERR_GOTO(result, out_destroy_prop, "setting socket");
}
result = fpgaEnumerate(&fpga_filter, 1, &fme_token, 1, &num_matches);
ON_ERR_GOTO(result, out_destroy_prop, "enumerating FPGAs");
if (num_matches < 1) {
fprintf(stderr, "FPGA Resource not found.\n");
result = fpgaDestroyProperties(&afu_filter);
result = fpgaDestroyProperties(&fpga_filter);
return FPGA_INVALID_PARAM;
}
fprintf(stderr, "FME Resource found.\n");
result = fpgaPropertiesSetParent(afu_filter, fme_token);
ON_ERR_GOTO(result, out_destroy_prop, "Error setting parent in properties");
result = fpgaEnumerate(&afu_filter, 1, &afu_token, 1, &num_matches);
ON_ERR_GOTO(result, out_destroy_prop, "enumerating FPGAs AFU");
if (num_matches < 1) {
fprintf(stderr, "AFU Resource not found.\n");
result = fpgaDestroyProperties(&fpga_filter);
result = fpgaDestroyProperties(&afu_filter);
return FPGA_INVALID_PARAM;
}
fprintf(stderr, "AFU Resource found.\n");
result = fpgaOpen(fme_token, &fme_handle, 0);
ON_ERR_GOTO(result, out_destroy_prop, "opening fpga");
result = fpgaOpen(afu_token, &afu_handle, 0);
ON_ERR_GOTO(result, out_close, "opening afu");
// Inject error
if (rasCmdLine.catast_error ||
rasCmdLine.fatal_error ||
rasCmdLine.nonfatal_error) {
// Inject RAS ERROR
result = inject_ras_errors(fme_handle, &rasCmdLine);
if (result != FPGA_OK) {
OPAE_ERR("Failed to print fme errors");
goto out_close;
}
}
// inject MMIO error
if ( (rasCmdLine.mwaddress_error == true) ||
(rasCmdLine.mraddress_error == true) ||
(rasCmdLine.mwlength_error == true) ||
(rasCmdLine.mrlength_error == true) ) {
result = mmio_error(afu_handle, &rasCmdLine);
if (result != FPGA_OK) {
OPAE_ERR("Failed set MMIO errors");
goto out_close;
}
}
// Clear Inject Error
if (rasCmdLine.clear_injerror ) {
// clear RAS ERROR
result = clear_inject_ras_errors(fme_handle);
if (result != FPGA_OK) {
OPAE_ERR("Failed to clear inject errors");
goto out_close;
}
// clear Port ERROR
result = clear_port_errors(afu_handle);
if (result != FPGA_OK) {
OPAE_ERR("Failed to clear port errors");
goto out_close;
}
}
if (rasCmdLine.pagefault_error) {
// Page fault error
result = page_fault_errors();
if (result != FPGA_OK) {
OPAE_ERR("Failed to trigger page fault errors");
goto out_close;
}
}
sleep(1);
if (rasCmdLine.print_error) {
// Print RAS Error
result = print_ras_errors(fme_token);
if (result != FPGA_OK) {
OPAE_ERR("Failed to print fme errors");
goto out_close;
}
// Print port Error
result = print_port_errors(afu_token);
if (result != FPGA_OK) {
OPAE_ERR("Failed to print port errors");
goto out_close;
}
// Print power and temp
result = print_pwr_temp(fme_token);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get power and temp");
goto out_close;
}
}
out_close:
if (fme_handle) {
result = fpgaClose(fme_handle);
if (result != FPGA_OK)
OPAE_ERR("closing fme");
}
if (afu_handle) {
result = fpgaClose(afu_handle);
ON_ERR_GOTO(result, out_destroy_prop, "closing afu");
}
out_destroy_prop:
if (fme_token) {
result = fpgaDestroyToken(&fme_token);
if (result != FPGA_OK)
OPAE_ERR("destroying token");
}
if (afu_token) {
result = fpgaDestroyToken(&afu_token);
if (result != FPGA_OK)
OPAE_ERR("destroying token");
}
if (afu_filter) {
result = fpgaDestroyProperties(&afu_filter);
if (result != FPGA_OK)
OPAE_ERR("destroying properties object");
}
if (fpga_filter) {
result = fpgaDestroyProperties(&fpga_filter);
ON_ERR_GOTO(result, out_exit, "destroying properties object");
}
out_exit:
return result;
}
// Print Error
fpga_result print_errors(fpga_token token,
const char * err_path,
const char * const* err_strings,
int size)
{
fpga_result result = FPGA_OK;
uint64_t value = 0;
int i = 0;
fpga_object err_object;
if (err_path == NULL ||
err_strings == NULL) {
OPAE_ERR("Invalid input sting");
return FPGA_INVALID_PARAM;
}
result = fpgaTokenGetObject(token, err_path, &err_object, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get Token Object");
return result;
}
result = fpgaObjectRead64(err_object, &value, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
printf(" CSR : 0x%lx \n", value);
for (i = 0; i < FPGA_CSR_LEN; i++) {
if ((i < size) && FPGA_BIT_IS_SET(value, i)) {
printf("\t %s \n", err_strings[i]);
}
}
result = fpgaDestroyObject(&err_object);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Destroy Object");
return result;
}
return result;
}
// prints RAS errors
fpga_result print_ras_errors(fpga_token token)
{
fpga_result result = FPGA_OK;
uint64_t revision = 0;
fpga_object rev_object;
result = fpgaTokenGetObject(token, FME_SYSFS_ERR_REVISION, &rev_object, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get Token Object");
return result;
}
result = fpgaObjectRead64(rev_object, &revision, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
result = fpgaDestroyObject(&rev_object);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Destroy Object");
return result;
}
printf("\n ==========================================\n");
printf(" ----------- PRINT FME ERROR START-------- \n \n");
printf(" fme error revison : %ld \n", revision);
// Revision 1
if (revision == 1) {
// Non Fatal Error
printf("\n ------- Non Fatal error ------------ \n");
result = print_errors(token,
FME_SYSFS_NONFATAL_ERRORS,
RAS_NONFATAL_ERROR,
RAS_NONFATAL_ERROR_COUNT);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get fme non fatal errors");
return result;
}
// Fatal Error
printf("\n ------- Fatal error ------------ \n");
result = print_errors(token,
FME_SYSFS_CATFATAL_ERRORS,
RAS_CATFATAL_ERROR,
RAS_CATFATAL_ERROR_COUNT);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get fme fatal errors");
return result;
}
// Injected error
printf("\n ------- Injected error ------------ \n");
result = print_errors(token,
FME_SYSFS_INJECT_ERROR,
RAS_INJECT_ERROR,
RAS_INJECT_ERROR_COUNT);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get fme Injected errors");
return result;
}
// FME error
printf("\n ------- FME error ------------ \n");
result = print_errors(token,
FME_SYSFS_FME_ERRORS,
FME_ERROR,
FME_ERROR_COUNT);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get fme errors");
return result;
}
// PCIe0 error
printf("\n ------- PCIe0 error ------------ \n");
result = print_errors(token,
FME_SYSFS_PCIE0_ERRORS,
PCIE0_ERROR,
PCIE0_ERROR_COUNT);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get pcie0 errors");
return result;
}
// PCIe1 error
printf("\n ------- PCIe1 error ------------ \n");
result = print_errors(token,
FME_SYSFS_PCIE1_ERRORS,
PCIE1_ERROR,
PCIE1_ERROR_COUNT);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get pcie1 errors");
return result;
}
}
else {
printf("\n Invalid FME Error Revision \n");
}
printf("\n ----------- PRINT FME ERROR END----------\n");
printf(" ========================================== \n \n");
return result;
}
// prints PORT errors
fpga_result print_port_errors(fpga_token token)
{
fpga_result result = FPGA_OK;
uint64_t value = 0;
int size = 0;
int i = 0;
fpga_object err_object;
printf("\n ==========================================\n");
printf(" ----------- PRINT PORT ERROR START-------- \n");
result = fpgaTokenGetObject(token, PORT_SYSFS_ERR, &err_object, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get Token Object");
return result;
}
result = fpgaObjectRead64(err_object, &value, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
printf("\n Port error CSR : 0x%lx \n", value);
size = PORT_ERROR_COUNT;
for (i = 0; i < 64; i++) {
if (FPGA_BIT_IS_SET(value, i) && (i < size)) {
printf("\t %s \n", PORT_ERROR[i]);
}
}
result = fpgaDestroyObject(&err_object);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Destroy Object");
return result;
}
printf("\n ----------- PRINT PORT ERROR END----------\n");
printf(" ========================================== \n \n");
return result;
}
// clear PORT errors
fpga_result clear_port_errors(fpga_handle afu_handle)
{
fpga_result result = FPGA_OK;
uint64_t value = 0;
fpga_object port_error_object;
printf(" ----------- Clear port error-------- \n \n");
// Power
result = fpgaHandleGetObject(afu_handle, PORT_SYSFS_ERR_CLEAR, &port_error_object, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get Token Object");
return result;
}
result = fpgaObjectRead64(port_error_object, &value, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
printf("\n \n Port error CSR : 0x%lx \n", value);
result = fpgaObjectWrite64(port_error_object, 0x0, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
result = fpgaDestroyObject(&port_error_object);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Destroy Object");
return result;
}
printf("----------- PORT ERROR CLEARED ERROR END-------- \n \n");
return result;
}
// Inject RAS errors
fpga_result inject_ras_errors(fpga_handle fme_handle,
struct RASCommandLine *rasCmdLine)
{
fpga_result result = FPGA_OK;
struct ras_inject_error inj_error = { { 0 } };
fpga_object inj_error_object;
printf("----------- INJECT ERROR START -------- \n \n");
// Power
result = fpgaHandleGetObject(fme_handle, FME_SYSFS_INJECT_ERROR, &inj_error_object, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get Token Object");
return result;
}
result = fpgaObjectRead64(inj_error_object, &inj_error.csr, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
printf("Current inj_error csr: %ld \n", inj_error.csr);
if (rasCmdLine->catast_error) {
inj_error.catastrophicr_error = 1;
}
if (rasCmdLine->fatal_error) {
inj_error.fatal_error = 1;
}
if (rasCmdLine->nonfatal_error) {
inj_error.nonfatal_error = 1;
}
printf("Injected inj_error.csr: %ld \n", inj_error.csr);
result = fpgaObjectWrite64(inj_error_object, inj_error.csr, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
result = fpgaDestroyObject(&inj_error_object);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Destroy Object");
return result;
}
printf("----------- INJECT ERROR END-------- \n \n");
return result;
}
// Clear Inject RAS errors
fpga_result clear_inject_ras_errors(fpga_handle fme_handle)
{
fpga_result result = FPGA_OK;
struct ras_inject_error inj_error = { { 0 } };
fpga_object error_object;
printf("----------- INJECT ERROR START -------- \n \n");
// Clear Inject error
result = fpgaHandleGetObject(fme_handle, FME_SYSFS_INJECT_ERROR, &error_object, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get Token Object");
return result;
}
result = fpgaObjectRead64(error_object, &inj_error.csr, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
printf(" Clear inj_error.csr: 0x%lx \n", inj_error.csr);;
result = fpgaObjectWrite64(error_object, 0x0, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Write Object ");
return result;
}
result = fpgaDestroyObject(&error_object);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Destroy Object");
return result;
}
printf("----------- INJECT ERROR END-------- \n \n");
printf("----------- FME ERROR START --------- \n \n");
// Clear FME error
result = fpgaHandleGetObject(fme_handle, FME_SYSFS_CLEAR_ERRORS, &error_object, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get handle Object");
return result;
}
result = fpgaObjectWrite64(error_object, 0x0, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Write Object ");
return result;
}
result = fpgaDestroyObject(&error_object);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Destroy Object");
return result;
}
printf("----------- FME ERROR END-------- \n \n");
return result;
}
// Print FPGA power and temperature
fpga_result print_pwr_temp(fpga_token token)
{
fpga_result result = FPGA_OK;
uint64_t value = 0;
fpga_object pwr_temp_object;
printf("\n ----------- POWER & THERMAL START-----------\n");
printf(" ========================================== \n \n");
// Power
result = fpgaTokenGetObject(token, FME_SYSFS_POWER_MGMT_CONSUMED, &pwr_temp_object, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get Token Object");
return result;
}
result = fpgaObjectRead64(pwr_temp_object, &value, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
printf(" Power consumed : %lu watts \n", value);
result = fpgaDestroyObject(&pwr_temp_object);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Destroy Object");
return result;
}
// Thermal
result = fpgaTokenGetObject(token, FME_SYSFS_THERMAL_MGMT_TEMP, &pwr_temp_object, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get Token Object");
return result;
}
result = fpgaObjectRead64(pwr_temp_object, &value, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
printf(" Temperature : %lu Centigrade \n", value);
result = fpgaDestroyObject(&pwr_temp_object);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Destroy Object");
return result;
}
result = fpgaTokenGetObject(token, FME_SYSFS_THERMAL_MGMT_THRESHOLD_TRIP,
&pwr_temp_object, FPGA_OBJECT_GLOB);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get Token Object");
return result;
}
result = fpgaObjectRead64(pwr_temp_object, &value, 0);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Read Object ");
return result;
}
printf(" Thermal Trip : %lu Centigrade \n", value);
result = fpgaDestroyObject(&pwr_temp_object);
if (result != FPGA_OK) {
OPAE_ERR("Failed to Destroy Object");
return result;
}
printf("\n ----------- POWER & THERMAL END -----------\n");
printf(" ========================================== \n \n");
return result;
}
// MMIO erros
fpga_result mmio_error(fpga_handle afu_handle, struct RASCommandLine *rasCmdLine)
{
char sysfs_path[256] = {0};
fpga_result result = FPGA_OK;
int bus = 0;
int device = 0;
int function = 0;
int fd = 0;
uint8_t *ptr = 0;
uint16_t value = 0;
fpga_properties props;
if (rasCmdLine == NULL ) {
OPAE_ERR("Invalid input ");
return FPGA_INVALID_PARAM;
}
if ( rasCmdLine->bus >0 )
bus = rasCmdLine->bus;
if ( rasCmdLine->device >0 )
device = rasCmdLine->bus;
if ( rasCmdLine->function >0 )
function = rasCmdLine->bus;
result = fpgaGetPropertiesFromHandle(afu_handle, &props);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get properties from handle");
return result;
}
result = fpgaPropertiesGetDeviceID(props, &value);
if (result != FPGA_OK) {
OPAE_ERR("Failed to get device id from properties");
return result;
}
if (fpgaDestroyProperties(&props)) {
OPAE_ERR("Failed to destroy properties");
}
printf(" Device ID : 0x%x \n", value);
if ((value != FPGA_INTEGRATED_DEVICEID) &&
(value != FPGA_DISCRETE_DEVICEID)) {
OPAE_ERR("Failed to read Device id");
return FPGA_NOT_SUPPORTED;
}
snprintf(sysfs_path, sizeof(sysfs_path),
FPGA_PORT_RES_PATH,0,bus,device,function);
fd = open(sysfs_path, O_RDWR);
if (fd < 0) {
OPAE_ERR("Failed to open FPGA PCIE BAR2");
return FPGA_EXCEPTION;
}
ptr = mmap(NULL, PORT_MMIO_LEN,
PROT_READ|PROT_WRITE,MAP_SHARED, fd, 0);
if (ptr == MAP_FAILED ) {
OPAE_ERR("Failed to map FPGA PCIE BAR2");
result = FPGA_EXCEPTION;
goto out_close ;
}
// Memory Write length error
if(rasCmdLine->mwlength_error) {
OPAE_DBG("Memory Write length error \n");
*((volatile uint64_t *) (ptr + PORT_SCRATCHPAD0+3))
= (uint16_t)MMO_WRITE64_VALUE;
}
// Memory Read length error
if(rasCmdLine->mrlength_error) {
OPAE_DBG(" Memory Read length error \n");
value = *((volatile uint64_t *) (ptr + PORT_SCRATCHPAD0+3));
OPAE_DBG(" Memory Read length value %lx\n",value);
}
// Memory Read addresss error
if(rasCmdLine->mraddress_error) {
OPAE_DBG("Memory Read addresss error \n");
value = *((volatile uint16_t *) (ptr + NLB_CSR_SCRATCHPAD +3));
OPAE_DBG("Memory Read addresss value %lx\n", value);
value = *((volatile uint64_t *) (ptr + PORT_SCRATCHPAD0+3));
OPAE_DBG("Memory Read addresss value %lx\n", value);
}
// Memory Write addresss error
if(rasCmdLine->mwaddress_error) {
OPAE_DBG("Memory Write addresss error \n");
*((volatile uint16_t *) (ptr + NLB_CSR_SCRATCHPAD +3))
= (uint16_t)MMO_WRITE32_VALUE;
}
if(ptr)
munmap(ptr, PORT_MMIO_LEN);
out_close:
if(fd >=0)
close(fd);
return result;
}
// page fault errors
fpga_result page_fault_errors()
{
fpga_properties filter = NULL;
fpga_token accelerator_token;
fpga_handle accelerator_handle;
fpga_guid guid;
uint32_t num_matches;
volatile uint64_t *dsm_ptr = NULL;
volatile uint64_t *input_ptr = NULL;
volatile uint64_t *output_ptr = NULL;
uint64_t dsm_wsid;
uint64_t input_wsid;
uint64_t output_wsid;
fpga_result res = FPGA_OK;
if (uuid_parse(SKX_P_NLB0_AFUID, guid) < 0) {
fprintf(stderr, "Error parsing guid '%s'\n", SKX_P_NLB0_AFUID);
goto out_exit;
}
/* Look for accelerator with MY_ACCELERATOR_ID */
res = fpgaGetProperties(NULL, &filter);
ON_ERR_GOTO(res, out_exit, "creating properties object");
res = fpgaPropertiesSetObjectType(filter, FPGA_ACCELERATOR);
ON_ERR_GOTO(res, out_destroy_prop, "setting object type");
res = fpgaPropertiesSetGUID(filter, guid);
ON_ERR_GOTO(res, out_destroy_prop, "setting GUID");
if (rasCmdLine.bus >0){
res = fpgaPropertiesSetBus(filter, rasCmdLine.bus);
ON_ERR_GOTO(res, out_destroy_prop, "setting bus");
}
if (rasCmdLine.device >0) {
res = fpgaPropertiesSetDevice(filter, rasCmdLine.device);
ON_ERR_GOTO(res, out_destroy_prop, "setting device");
}
if (rasCmdLine.function >0){
res = fpgaPropertiesSetFunction(filter, rasCmdLine.function);
ON_ERR_GOTO(res, out_destroy_prop, "setting function");
}
res = fpgaEnumerate(&filter, 1, &accelerator_token, 1, &num_matches);
ON_ERR_GOTO(res, out_destroy_prop, "enumerating accelerators");
if (num_matches < 1) {
fprintf(stderr, "accelerator not found.\n");
res = fpgaDestroyProperties(&filter);
return FPGA_INVALID_PARAM;
}
/* Open accelerator and map MMIO */
res = fpgaOpen(accelerator_token, &accelerator_handle, FPGA_OPEN_SHARED);
ON_ERR_GOTO(res, out_destroy_tok, "opening accelerator");
res = fpgaMapMMIO(accelerator_handle, 0, NULL);
ON_ERR_GOTO(res, out_close, "mapping MMIO space");
/* Allocate buffers */
res = fpgaPrepareBuffer(accelerator_handle, LPBK1_DSM_SIZE,
(void **)&dsm_ptr, &dsm_wsid, 0);
ON_ERR_GOTO(res, out_close, "allocating DSM buffer");
res = fpgaPrepareBuffer(accelerator_handle, LPBK1_BUFFER_ALLOCATION_SIZE,
(void **)&input_ptr, &input_wsid, 0);
ON_ERR_GOTO(res, out_free_dsm, "allocating input buffer");
res = fpgaPrepareBuffer(accelerator_handle, LPBK1_BUFFER_ALLOCATION_SIZE,
(void **)&output_ptr, &output_wsid, 0);
ON_ERR_GOTO(res, out_free_input, "allocating output buffer");
printf("Running Test\n");
/* Initialize buffers */
memset_s((void *)dsm_ptr, LPBK1_DSM_SIZE, 0);
memset_s((void *)input_ptr, LPBK1_BUFFER_SIZE, 0xAF);
memset_s((void *)output_ptr, LPBK1_BUFFER_SIZE, 0xBE);
cache_line *cl_ptr = (cache_line *)input_ptr;
uint32_t i;
for (i = 0; i < LPBK1_BUFFER_SIZE / CL(1); ++i) {
cl_ptr[i].uint[15] = i+1; /* set the last uint in every cacheline */
}
/* Reset accelerator */
res = fpgaReset(accelerator_handle);
ON_ERR_GOTO(res, out_free_output, "resetting accelerator");
/* Program DMA addresses */
uint64_t iova;
res = fpgaGetIOAddress(accelerator_handle, dsm_wsid, &iova);
ON_ERR_GOTO(res, out_free_output, "getting DSM IOVA");
res = fpgaWriteMMIO64(accelerator_handle, 0, CSR_AFU_DSM_BASEL, iova);
ON_ERR_GOTO(res, out_free_output, "writing CSR_AFU_DSM_BASEL");
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 0);
ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG");
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 1);
ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG");
res = fpgaGetIOAddress(accelerator_handle, input_wsid, &iova);
ON_ERR_GOTO(res, out_free_output, "getting input IOVA");
// Free Input buffer
res = fpgaReleaseBuffer(accelerator_handle, input_wsid);
res = fpgaWriteMMIO64(accelerator_handle, 0, CSR_SRC_ADDR, CACHELINE_ALIGNED_ADDR(iova));
ON_ERR_GOTO(res, out_free_output, "writing CSR_SRC_ADDR");
res = fpgaGetIOAddress(accelerator_handle, output_wsid, &iova);
ON_ERR_GOTO(res, out_free_output, "getting output IOVA");
res = fpgaWriteMMIO64(accelerator_handle, 0, CSR_DST_ADDR, CACHELINE_ALIGNED_ADDR(iova));
ON_ERR_GOTO(res, out_free_output, "writing CSR_DST_ADDR");
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_NUM_LINES, LPBK1_BUFFER_SIZE / CL(1));
ON_ERR_GOTO(res, out_free_output, "writing CSR_NUM_LINES");
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CFG, 0x42000);
ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG");
/* Start the test */
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 3);
ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG");
/* Wait for test completion */
usleep(10000);
/* Stop the device */
res = fpgaWriteMMIO32(accelerator_handle, 0, CSR_CTL, 7);
ON_ERR_GOTO(res, out_free_output, "writing CSR_CFG");
printf("Done Running Test\n");
/* Release buffers */
out_free_output:
res = fpgaReleaseBuffer(accelerator_handle, output_wsid);
ON_ERR_GOTO(res, out_free_input, "releasing output buffer");
out_free_input:
// res = fpgaReleaseBuffer(accelerator_handle, input_wsid);
// ON_ERR_GOTO(res, out_free_dsm, "releasing input buffer");
out_free_dsm:
res = fpgaReleaseBuffer(accelerator_handle, dsm_wsid);
ON_ERR_GOTO(res, out_unmap, "releasing DSM buffer");
/* Unmap MMIO space */
out_unmap:
res = fpgaUnmapMMIO(accelerator_handle, 0);
ON_ERR_GOTO(res, out_close, "unmapping MMIO space");
/* Release accelerator */
out_close:
res = fpgaClose(accelerator_handle);
ON_ERR_GOTO(res, out_destroy_tok, "closing accelerator");
/* Destroy token */
out_destroy_tok:
res = fpgaDestroyToken(&accelerator_token);
ON_ERR_GOTO(res, out_destroy_prop, "destroying token");
/* Destroy properties object */
out_destroy_prop:
res = fpgaDestroyProperties(&filter);
ON_ERR_GOTO(res, out_exit, "destroying properties object");
out_exit:
return res;
}
// parse Input command line
int ParseCmds(struct RASCommandLine *rasCmdLine, int argc, char *argv[])
{
int getopt_ret = 0;
int option_index = 0;
char *endptr = NULL;
while( -1 != ( getopt_ret = getopt_long(argc, argv,
GETOPT_STRING,
longopts,
&option_index))){
const char *tmp_optarg = optarg;
if ((optarg) &&
('=' == *tmp_optarg)){
++tmp_optarg;
}
switch(getopt_ret){
case 'h':
// Command line help
RASAppShowHelp();
return -2;
break;
case 0xe:
// segment number
if (tmp_optarg == NULL ) break;
endptr = NULL;
rasCmdLine->segment = strtol(tmp_optarg, &endptr, 0);
break;
case 'B':
// bus number
if (tmp_optarg == NULL ) break;
endptr = NULL;
rasCmdLine->bus = strtol(tmp_optarg, &endptr, 0);
break;
case 'D':
// Device number
if (tmp_optarg == NULL ) break;
endptr = NULL;
rasCmdLine->device = strtol(tmp_optarg, &endptr, 0);
break;
case 'F':
// Function number
if (tmp_optarg == NULL ) break;
endptr = NULL;
rasCmdLine->function = strtol(tmp_optarg, &endptr, 0);
break;
case 'S':
// Socket number
if (tmp_optarg == NULL ) break;
endptr = NULL;
rasCmdLine->socket = strtol(tmp_optarg, &endptr, 0);
break;
case 'P':
// Print Errors
rasCmdLine->print_error = true;
break;
case 'Q':
// Set Cast error
rasCmdLine->catast_error = true;
break;
case 'R':
// Set Fatal error
rasCmdLine->fatal_error = true;
break;
case 'O':
// Set page fault error
rasCmdLine->pagefault_error = true;
break;
case 'N':
// Set Non Fatal error
rasCmdLine->nonfatal_error = true;
break;
case 'C':
// Clear Injected Error
rasCmdLine->clear_injerror = true;
break;
case 'E':
// Set MW Address error
rasCmdLine->mwaddress_error = true;
break;
case 'G':
// Set MR Address error
rasCmdLine->mraddress_error = true;
break;
case 'H':
// Set MW Length error
rasCmdLine->mwlength_error = true;
break;
case 'I':
// Set MR Length error
rasCmdLine->mrlength_error = true;
break;
case ':': /* missing option argument */
printf("Missing option argument.\n");
return -1;
case '?':
default: /* invalid option */
printf("Invalid cmdline options.\n");
return -1;
}
}
return 0;
}
| 1 | 17,811 |
You lose the value of `result` when you `goto out`
|
OPAE-opae-sdk
|
c
|
@@ -497,8 +497,9 @@ Blockly.Scrollbar.prototype.onScroll_ = function() {
* @param {number} value The distance from the top/left end of the bar.
*/
Blockly.Scrollbar.prototype.set = function(value) {
+ var ratio = this.ratio_ == undefined ? 0 : this.ratio_;
// Move the scrollbar slider.
- this.svgKnob_.setAttribute(this.horizontal_ ? 'x' : 'y', value * this.ratio_);
+ this.svgKnob_.setAttribute(this.horizontal_ ? 'x' : 'y', value * ratio);
this.onScroll_();
};
| 1 |
/**
* @license
* Visual Blocks Editor
*
* Copyright 2011 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Library for creating scrollbars.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.Scrollbar');
goog.provide('Blockly.ScrollbarPair');
goog.require('goog.dom');
goog.require('goog.events');
/**
* Class for a pair of scrollbars. Horizontal and vertical.
* @param {!Blockly.Workspace} workspace Workspace to bind the scrollbars to.
* @constructor
*/
Blockly.ScrollbarPair = function(workspace) {
this.workspace_ = workspace;
this.hScroll = new Blockly.Scrollbar(workspace, true, true);
this.vScroll = new Blockly.Scrollbar(workspace, false, true);
this.corner_ = Blockly.createSvgElement('rect',
{'height': Blockly.Scrollbar.scrollbarThickness,
'width': Blockly.Scrollbar.scrollbarThickness,
'class': 'blocklyScrollbarBackground'}, null);
Blockly.Scrollbar.insertAfter_(this.corner_, workspace.getBubbleCanvas());
};
/**
* Previously recorded metrics from the workspace.
* @type {Object}
* @private
*/
Blockly.ScrollbarPair.prototype.oldHostMetrics_ = null;
/**
* Dispose of this pair of scrollbars.
* Unlink from all DOM elements to prevent memory leaks.
*/
Blockly.ScrollbarPair.prototype.dispose = function() {
goog.dom.removeNode(this.corner_);
this.corner_ = null;
this.workspace_ = null;
this.oldHostMetrics_ = null;
this.hScroll.dispose();
this.hScroll = null;
this.vScroll.dispose();
this.vScroll = null;
};
/**
* Recalculate both of the scrollbars' locations and lengths.
* Also reposition the corner rectangle.
*/
Blockly.ScrollbarPair.prototype.resize = function() {
// Look up the host metrics once, and use for both scrollbars.
var hostMetrics = this.workspace_.getMetrics();
if (!hostMetrics) {
// Host element is likely not visible.
return;
}
// Only change the scrollbars if there has been a change in metrics.
var resizeH = false;
var resizeV = false;
if (!this.oldHostMetrics_ ||
this.oldHostMetrics_.viewWidth != hostMetrics.viewWidth ||
this.oldHostMetrics_.viewHeight != hostMetrics.viewHeight ||
this.oldHostMetrics_.absoluteTop != hostMetrics.absoluteTop ||
this.oldHostMetrics_.absoluteLeft != hostMetrics.absoluteLeft) {
// The window has been resized or repositioned.
resizeH = true;
resizeV = true;
} else {
// Has the content been resized or moved?
if (!this.oldHostMetrics_ ||
this.oldHostMetrics_.contentWidth != hostMetrics.contentWidth ||
this.oldHostMetrics_.viewLeft != hostMetrics.viewLeft ||
this.oldHostMetrics_.contentLeft != hostMetrics.contentLeft) {
resizeH = true;
}
if (!this.oldHostMetrics_ ||
this.oldHostMetrics_.contentHeight != hostMetrics.contentHeight ||
this.oldHostMetrics_.viewTop != hostMetrics.viewTop ||
this.oldHostMetrics_.contentTop != hostMetrics.contentTop) {
resizeV = true;
}
}
if (resizeH) {
this.hScroll.resize(hostMetrics);
}
if (resizeV) {
this.vScroll.resize(hostMetrics);
}
// Reposition the corner square.
if (!this.oldHostMetrics_ ||
this.oldHostMetrics_.viewWidth != hostMetrics.viewWidth ||
this.oldHostMetrics_.absoluteLeft != hostMetrics.absoluteLeft) {
this.corner_.setAttribute('x', this.vScroll.xCoordinate);
}
if (!this.oldHostMetrics_ ||
this.oldHostMetrics_.viewHeight != hostMetrics.viewHeight ||
this.oldHostMetrics_.absoluteTop != hostMetrics.absoluteTop) {
this.corner_.setAttribute('y', this.hScroll.yCoordinate);
}
// Cache the current metrics to potentially short-cut the next resize event.
this.oldHostMetrics_ = hostMetrics;
};
/**
* Set the sliders of both scrollbars to be at a certain position.
* @param {number} x Horizontal scroll value.
* @param {number} y Vertical scroll value.
*/
Blockly.ScrollbarPair.prototype.set = function(x, y) {
this.hScroll.set(x);
this.vScroll.set(y);
};
// --------------------------------------------------------------------
/**
* Class for a pure SVG scrollbar.
* This technique offers a scrollbar that is guaranteed to work, but may not
* look or behave like the system's scrollbars.
* @param {!Blockly.Workspace} workspace Workspace to bind the scrollbar to.
* @param {boolean} horizontal True if horizontal, false if vertical.
* @param {boolean=} opt_pair True if the scrollbar is part of a horiz/vert pair.
* @constructor
*/
Blockly.Scrollbar = function(workspace, horizontal, opt_pair) {
this.workspace_ = workspace;
this.pair_ = opt_pair || false;
this.horizontal_ = horizontal;
this.createDom_();
if (horizontal) {
this.svgBackground_.setAttribute('height',
Blockly.Scrollbar.scrollbarThickness);
this.svgKnob_.setAttribute('height',
Blockly.Scrollbar.scrollbarThickness - 5);
this.svgKnob_.setAttribute('y', 2.5);
} else {
this.svgBackground_.setAttribute('width',
Blockly.Scrollbar.scrollbarThickness);
this.svgKnob_.setAttribute('width',
Blockly.Scrollbar.scrollbarThickness - 5);
this.svgKnob_.setAttribute('x', 2.5);
}
var scrollbar = this;
this.onMouseDownBarWrapper_ = Blockly.bindEvent_(this.svgBackground_,
'mousedown', scrollbar, scrollbar.onMouseDownBar_);
this.onMouseDownKnobWrapper_ = Blockly.bindEvent_(this.svgKnob_,
'mousedown', scrollbar, scrollbar.onMouseDownKnob_);
};
/**
* Width of vertical scrollbar or height of horizontal scrollbar.
* Increase the size of scrollbars on touch devices.
* Don't define if there is no document object (e.g. node.js).
*/
Blockly.Scrollbar.scrollbarThickness = 15;
if (goog.events.BrowserFeature.TOUCH_ENABLED) {
Blockly.Scrollbar.scrollbarThickness = 25;
}
/**
* Dispose of this scrollbar.
* Unlink from all DOM elements to prevent memory leaks.
*/
Blockly.Scrollbar.prototype.dispose = function() {
this.onMouseUpKnob_();
Blockly.unbindEvent_(this.onMouseDownBarWrapper_);
this.onMouseDownBarWrapper_ = null;
Blockly.unbindEvent_(this.onMouseDownKnobWrapper_);
this.onMouseDownKnobWrapper_ = null;
goog.dom.removeNode(this.svgGroup_);
this.svgGroup_ = null;
this.svgBackground_ = null;
this.svgKnob_ = null;
this.workspace_ = null;
};
/**
* Recalculate the scrollbar's location and its length.
* @param {Object=} opt_metrics A data structure of from the describing all the
* required dimensions. If not provided, it will be fetched from the host
* object.
*/
Blockly.Scrollbar.prototype.resize = function(opt_metrics) {
// Determine the location, height and width of the host element.
var hostMetrics = opt_metrics;
if (!hostMetrics) {
hostMetrics = this.workspace_.getMetrics();
if (!hostMetrics) {
// Host element is likely not visible.
return;
}
}
/* hostMetrics is an object with the following properties.
* .viewHeight: Height of the visible rectangle,
* .viewWidth: Width of the visible rectangle,
* .contentHeight: Height of the contents,
* .contentWidth: Width of the content,
* .viewTop: Offset of top edge of visible rectangle from parent,
* .viewLeft: Offset of left edge of visible rectangle from parent,
* .contentTop: Offset of the top-most content from the y=0 coordinate,
* .contentLeft: Offset of the left-most content from the x=0 coordinate,
* .absoluteTop: Top-edge of view.
* .absoluteLeft: Left-edge of view.
*/
if (this.horizontal_) {
var outerLength = hostMetrics.viewWidth - 1;
if (this.pair_) {
// Shorten the scrollbar to make room for the corner square.
outerLength -= Blockly.Scrollbar.scrollbarThickness;
} else {
// Only show the scrollbar if needed.
// Ideally this would also apply to scrollbar pairs, but that's a bigger
// headache (due to interactions with the corner square).
this.setVisible(outerLength < hostMetrics.contentWidth);
}
this.ratio_ = outerLength / hostMetrics.contentWidth;
if (this.ratio_ === -Infinity || this.ratio_ === Infinity ||
isNaN(this.ratio_)) {
this.ratio_ = 0;
}
var innerLength = hostMetrics.viewWidth * this.ratio_;
var innerOffset = (hostMetrics.viewLeft - hostMetrics.contentLeft) *
this.ratio_;
this.svgKnob_.setAttribute('width', Math.max(0, innerLength));
this.xCoordinate = hostMetrics.absoluteLeft + 0.5;
if (this.pair_ && this.workspace_.RTL) {
this.xCoordinate += hostMetrics.absoluteLeft +
Blockly.Scrollbar.scrollbarThickness;
}
this.yCoordinate = hostMetrics.absoluteTop + hostMetrics.viewHeight -
Blockly.Scrollbar.scrollbarThickness - 0.5;
this.svgGroup_.setAttribute('transform',
'translate(' + this.xCoordinate + ',' + this.yCoordinate + ')');
this.svgBackground_.setAttribute('width', Math.max(0, outerLength));
this.svgKnob_.setAttribute('x', this.constrainKnob_(innerOffset));
} else {
var outerLength = hostMetrics.viewHeight - 1;
if (this.pair_) {
// Shorten the scrollbar to make room for the corner square.
outerLength -= Blockly.Scrollbar.scrollbarThickness;
} else {
// Only show the scrollbar if needed.
this.setVisible(outerLength < hostMetrics.contentHeight);
}
this.ratio_ = outerLength / hostMetrics.contentHeight;
if (this.ratio_ === -Infinity || this.ratio_ === Infinity ||
isNaN(this.ratio_)) {
this.ratio_ = 0;
}
var innerLength = hostMetrics.viewHeight * this.ratio_;
var innerOffset = (hostMetrics.viewTop - hostMetrics.contentTop) *
this.ratio_;
this.svgKnob_.setAttribute('height', Math.max(0, innerLength));
this.xCoordinate = hostMetrics.absoluteLeft + 0.5;
if (!this.workspace_.RTL) {
this.xCoordinate += hostMetrics.viewWidth -
Blockly.Scrollbar.scrollbarThickness - 1;
}
this.yCoordinate = hostMetrics.absoluteTop + 0.5;
this.svgGroup_.setAttribute('transform',
'translate(' + this.xCoordinate + ',' + this.yCoordinate + ')');
this.svgBackground_.setAttribute('height', Math.max(0, outerLength));
this.svgKnob_.setAttribute('y', this.constrainKnob_(innerOffset));
}
// Resizing may have caused some scrolling.
this.onScroll_();
};
/**
* Create all the DOM elements required for a scrollbar.
* The resulting widget is not sized.
* @private
*/
Blockly.Scrollbar.prototype.createDom_ = function() {
/* Create the following DOM:
<g class="blocklyScrollbarHorizontal">
<rect class="blocklyScrollbarBackground" />
<rect class="blocklyScrollbarKnob" rx="8" ry="8" />
</g>
*/
var className = 'blocklyScrollbar' +
(this.horizontal_ ? 'Horizontal' : 'Vertical');
this.svgGroup_ = Blockly.createSvgElement('g', {'class': className}, null);
this.svgBackground_ = Blockly.createSvgElement('rect',
{'class': 'blocklyScrollbarBackground'}, this.svgGroup_);
var radius = Math.floor((Blockly.Scrollbar.scrollbarThickness - 5) / 2);
this.svgKnob_ = Blockly.createSvgElement('rect',
{'class': 'blocklyScrollbarKnob', 'rx': radius, 'ry': radius},
this.svgGroup_);
Blockly.Scrollbar.insertAfter_(this.svgGroup_,
this.workspace_.getBubbleCanvas());
};
/**
* Is the scrollbar visible. Non-paired scrollbars disappear when they aren't
* needed.
* @return {boolean} True if visible.
*/
Blockly.Scrollbar.prototype.isVisible = function() {
return this.svgGroup_.getAttribute('display') != 'none';
};
/**
* Set whether the scrollbar is visible.
* Only applies to non-paired scrollbars.
* @param {boolean} visible True if visible.
*/
Blockly.Scrollbar.prototype.setVisible = function(visible) {
if (visible == this.isVisible()) {
return;
}
// Ideally this would also apply to scrollbar pairs, but that's a bigger
// headache (due to interactions with the corner square).
if (this.pair_) {
throw 'Unable to toggle visibility of paired scrollbars.';
}
if (visible) {
this.svgGroup_.setAttribute('display', 'block');
} else {
// Hide the scrollbar.
this.workspace_.setMetrics({x: 0, y: 0});
this.svgGroup_.setAttribute('display', 'none');
}
};
/**
* Scroll by one pageful.
* Called when scrollbar background is clicked.
* @param {!Event} e Mouse down event.
* @private
*/
Blockly.Scrollbar.prototype.onMouseDownBar_ = function(e) {
this.onMouseUpKnob_();
if (Blockly.isRightButton(e)) {
// Right-click.
// Scrollbars have no context menu.
e.stopPropagation();
return;
}
var mouseXY = Blockly.mouseToSvg(e, this.workspace_.getParentSvg());
var mouseLocation = this.horizontal_ ? mouseXY.x : mouseXY.y;
var knobXY = Blockly.getSvgXY_(this.svgKnob_, this.workspace_);
var knobStart = this.horizontal_ ? knobXY.x : knobXY.y;
var knobLength = parseFloat(
this.svgKnob_.getAttribute(this.horizontal_ ? 'width' : 'height'));
var knobValue = parseFloat(
this.svgKnob_.getAttribute(this.horizontal_ ? 'x' : 'y'));
var pageLength = knobLength * 0.95;
if (mouseLocation <= knobStart) {
// Decrease the scrollbar's value by a page.
knobValue -= pageLength;
} else if (mouseLocation >= knobStart + knobLength) {
// Increase the scrollbar's value by a page.
knobValue += pageLength;
}
this.svgKnob_.setAttribute(this.horizontal_ ? 'x' : 'y',
this.constrainKnob_(knobValue));
this.onScroll_();
e.stopPropagation();
};
/**
* Start a dragging operation.
* Called when scrollbar knob is clicked.
* @param {!Event} e Mouse down event.
* @private
*/
Blockly.Scrollbar.prototype.onMouseDownKnob_ = function(e) {
this.onMouseUpKnob_();
if (Blockly.isRightButton(e)) {
// Right-click.
// Scrollbars have no context menu.
e.stopPropagation();
return;
}
// Look up the current translation and record it.
this.startDragKnob = parseFloat(
this.svgKnob_.getAttribute(this.horizontal_ ? 'x' : 'y'));
// Record the current mouse position.
this.startDragMouse = this.horizontal_ ? e.clientX : e.clientY;
Blockly.Scrollbar.onMouseUpWrapper_ = Blockly.bindEvent_(document,
'mouseup', this, this.onMouseUpKnob_);
Blockly.Scrollbar.onMouseMoveWrapper_ = Blockly.bindEvent_(document,
'mousemove', this, this.onMouseMoveKnob_);
e.stopPropagation();
};
/**
* Drag the scrollbar's knob.
* @param {!Event} e Mouse up event.
* @private
*/
Blockly.Scrollbar.prototype.onMouseMoveKnob_ = function(e) {
var currentMouse = this.horizontal_ ? e.clientX : e.clientY;
var mouseDelta = currentMouse - this.startDragMouse;
var knobValue = this.startDragKnob + mouseDelta;
// Position the bar.
this.svgKnob_.setAttribute(this.horizontal_ ? 'x' : 'y',
this.constrainKnob_(knobValue));
this.onScroll_();
};
/**
* Stop binding to the global mouseup and mousemove events.
* @private
*/
Blockly.Scrollbar.prototype.onMouseUpKnob_ = function() {
Blockly.removeAllRanges();
Blockly.hideChaff(true);
if (Blockly.Scrollbar.onMouseUpWrapper_) {
Blockly.unbindEvent_(Blockly.Scrollbar.onMouseUpWrapper_);
Blockly.Scrollbar.onMouseUpWrapper_ = null;
}
if (Blockly.Scrollbar.onMouseMoveWrapper_) {
Blockly.unbindEvent_(Blockly.Scrollbar.onMouseMoveWrapper_);
Blockly.Scrollbar.onMouseMoveWrapper_ = null;
}
};
/**
* Constrain the knob's position within the minimum (0) and maximum
* (length of scrollbar) values allowed for the scrollbar.
* @param {number} value Value that is potentially out of bounds.
* @return {number} Constrained value.
* @private
*/
Blockly.Scrollbar.prototype.constrainKnob_ = function(value) {
if (value <= 0 || isNaN(value)) {
value = 0;
} else {
var axis = this.horizontal_ ? 'width' : 'height';
var barLength = parseFloat(this.svgBackground_.getAttribute(axis));
var knobLength = parseFloat(this.svgKnob_.getAttribute(axis));
value = Math.min(value, barLength - knobLength);
}
return value;
};
/**
* Called when scrollbar is moved.
* @private
*/
Blockly.Scrollbar.prototype.onScroll_ = function() {
var knobValue = parseFloat(
this.svgKnob_.getAttribute(this.horizontal_ ? 'x' : 'y'));
var barLength = parseFloat(
this.svgBackground_.getAttribute(this.horizontal_ ? 'width' : 'height'));
var ratio = knobValue / barLength;
if (isNaN(ratio)) {
ratio = 0;
}
var xyRatio = {};
if (this.horizontal_) {
xyRatio.x = ratio;
} else {
xyRatio.y = ratio;
}
this.workspace_.setMetrics(xyRatio);
};
/**
* Set the scrollbar slider's position.
* @param {number} value The distance from the top/left end of the bar.
*/
Blockly.Scrollbar.prototype.set = function(value) {
// Move the scrollbar slider.
this.svgKnob_.setAttribute(this.horizontal_ ? 'x' : 'y', value * this.ratio_);
this.onScroll_();
};
/**
* Insert a node after a reference node.
* Contrast with node.insertBefore function.
* @param {!Element} newNode New element to insert.
* @param {!Element} refNode Existing element to precede new node.
* @private
*/
Blockly.Scrollbar.insertAfter_ = function(newNode, refNode) {
var siblingNode = refNode.nextSibling;
var parentNode = refNode.parentNode;
if (!parentNode) {
throw 'Reference node has no parent.';
}
if (siblingNode) {
parentNode.insertBefore(newNode, siblingNode);
} else {
parentNode.appendChild(newNode);
}
};
| 1 | 7,439 |
Just a style thing, why not `this.ratio_ || 0`?
|
LLK-scratch-blocks
|
js
|
@@ -186,7 +186,8 @@ func CIDRFromCalicoNet(ipNet calinet.IPNet) CIDR {
func CIDRFromIPNet(ipNet *net.IPNet) CIDR {
ones, _ := ipNet.Mask.Size()
- ip := FromNetIP(ipNet.IP)
+ // Mask the IP before creating the CIDR so that we have it in canonical format.
+ ip := FromNetIP(getMaskedIp(ipNet))
if ip.Version() == 4 {
return V4CIDR{
addr: ip.(V4Addr),
| 1 |
// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The ip package contains yet another IP address (and CIDR) type :-). The
// types differ from the ones in the net package in that they are backed by
// fixed-sized arrays of the appropriate size. The key advantage of
// using a fixed-size array is that it makes the types hashable so they can
// be used as map keys. In addition, they can be converted to net.IP by
// slicing.
package ip
import (
"errors"
"fmt"
"net"
"strings"
log "github.com/sirupsen/logrus"
calinet "github.com/projectcalico/libcalico-go/lib/net"
)
var ErrInvalidIP = errors.New("Failed to parse IP address")
// Addr represents either an IPv4 or IPv6 IP address.
type Addr interface {
// Version returns the IP version; 4 or 6.
Version() uint8
// AsNetIP returns a net.IP, which is backed by/shares storage with
// this object.
AsNetIP() net.IP
AsCalicoNetIP() calinet.IP
AsCIDR() CIDR
String() string
}
type V4Addr [4]byte
func (a V4Addr) Version() uint8 {
return 4
}
func (a V4Addr) AsNetIP() net.IP {
return net.IP(a[0:net.IPv4len])
}
func (a V4Addr) AsCalicoNetIP() calinet.IP {
return calinet.IP{IP: a.AsNetIP()}
}
func (a V4Addr) AsCIDR() CIDR {
return V4CIDR{
addr: a,
prefix: 32,
}
}
func (a V4Addr) String() string {
return a.AsNetIP().String()
}
type V6Addr [16]byte
func (a V6Addr) Version() uint8 {
return 6
}
func (a V6Addr) AsNetIP() net.IP {
return net.IP(a[0:net.IPv6len])
}
func (a V6Addr) AsCalicoNetIP() calinet.IP {
return calinet.IP{IP: a.AsNetIP()}
}
func (a V6Addr) AsCIDR() CIDR {
return V6CIDR{
addr: a,
prefix: 128,
}
}
func (a V6Addr) String() string {
return a.AsNetIP().String()
}
type CIDR interface {
Version() uint8
Addr() Addr
Prefix() uint8
String() string
ToIPNet() net.IPNet
}
type V4CIDR struct {
addr V4Addr
prefix uint8
}
func (c V4CIDR) Version() uint8 {
return 4
}
func (c V4CIDR) Addr() Addr {
return c.addr
}
func (c V4CIDR) Prefix() uint8 {
return c.prefix
}
func (c V4CIDR) ToIPNet() net.IPNet {
return net.IPNet{
IP: c.Addr().AsNetIP(),
Mask: net.CIDRMask(int(c.Prefix()), 32),
}
}
func (c V4CIDR) String() string {
return fmt.Sprintf("%s/%v", c.addr.String(), c.prefix)
}
type V6CIDR struct {
addr V6Addr
prefix uint8
}
func (c V6CIDR) Version() uint8 {
return 6
}
func (c V6CIDR) Addr() Addr {
return c.addr
}
func (c V6CIDR) Prefix() uint8 {
return c.prefix
}
func (c V6CIDR) ToIPNet() net.IPNet {
return net.IPNet{
IP: c.Addr().AsNetIP(),
Mask: net.CIDRMask(int(c.Prefix()), 128),
}
}
func (c V6CIDR) String() string {
return fmt.Sprintf("%s/%v", c.addr.String(), c.prefix)
}
func FromString(s string) Addr {
return FromNetIP(net.ParseIP(s))
}
func FromNetIP(netIP net.IP) Addr {
// Note: we have to use To4() here because the net package often represents an IPv4 address
// using 16 bytes. The only way to distinguish an IPv4 address using that API is To4(),
// which returns nil if the IP is a v6 address or nil.
if v4NetIP := netIP.To4(); v4NetIP != nil {
ip := V4Addr{}
copy(ip[:], v4NetIP)
return ip
}
if v6NetIP := netIP.To16(); v6NetIP != nil {
ip := V6Addr{}
copy(ip[:], v6NetIP)
return ip
}
return nil
}
func CIDRFromCalicoNet(ipNet calinet.IPNet) CIDR {
return CIDRFromIPNet(&ipNet.IPNet)
}
func CIDRFromIPNet(ipNet *net.IPNet) CIDR {
ones, _ := ipNet.Mask.Size()
ip := FromNetIP(ipNet.IP)
if ip.Version() == 4 {
return V4CIDR{
addr: ip.(V4Addr),
prefix: uint8(ones),
}
} else {
return V6CIDR{
addr: ip.(V6Addr),
prefix: uint8(ones),
}
}
}
// CIDRFromNetIP converts the given IP into our CIDR representation as a /32 or /128.
func CIDRFromNetIP(netIP net.IP) CIDR {
return FromNetIP(netIP).AsCIDR()
}
// MustParseCIDROrIP parses the given IP address or CIDR, treating IP addresses as "full length"
// CIDRs. For example, "10.0.0.1" is treated as "10.0.0.1/32". It panics on failure.
func MustParseCIDROrIP(s string) CIDR {
cidr, err := ParseCIDROrIP(s)
if err != nil {
log.WithError(err).WithField("cidr", s).Panic("Failed to parse CIDR")
}
return cidr
}
// ParseCIDROrIP parses the given IP address or CIDR, treating IP addresses as "full length"
// CIDRs. For example, "10.0.0.1" is treated as "10.0.0.1/32".
func ParseCIDROrIP(s string) (CIDR, error) {
if !strings.Contains(s, "/") {
ip := net.ParseIP(s)
if ip == nil {
return nil, ErrInvalidIP
}
return CIDRFromNetIP(ip), nil
}
_, netCIDR, err := net.ParseCIDR(s)
if err != nil {
return nil, err
}
return CIDRFromIPNet(netCIDR), nil
}
| 1 | 16,402 |
I think the stdlib's `net.IP` has a `Mask()` method, does that do what we need?
|
projectcalico-felix
|
c
|
@@ -106,9 +106,9 @@ func strOr(s string, ss ...string) string {
return ""
}
-// substitute runs replacer on string elements within a complex data structure
+// Substitute runs replacer on string elements within a complex data structure
// (except those contained in private data structure fields).
-func substitute(v reflect.Value, replacer *strings.Replacer) {
+func Substitute(v reflect.Value, replacer *strings.Replacer) {
traverseData(v, func(val reflect.Value) dErr {
switch val.Interface().(type) {
case string:
| 1 |
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisy
import (
"math/rand"
"os"
"os/user"
"reflect"
"regexp"
"strings"
"time"
)
func getUser() string {
if cu, err := user.Current(); err == nil {
return cu.Username
}
if hn, err := os.Hostname(); err == nil {
return hn
}
return "unknown"
}
func namedSubexp(re *regexp.Regexp, s string) map[string]string {
match := re.FindStringSubmatch(s)
if match == nil {
return nil
}
result := make(map[string]string)
l := len(match)
for i, name := range re.SubexpNames() {
if i == 0 || name == "" {
continue
}
result[name] = ""
if i < l {
result[name] = match[i]
}
}
return result
}
// filter creates a copy of ss, excluding any instances of s.
func filter(ss []string, s string) []string {
result := []string{}
for _, element := range ss {
if element != s {
result = append(result, element)
}
}
return result
}
func minInt(x int, ys ...int) int {
for _, y := range ys {
if y < x {
x = y
}
}
return x
}
func randString(n int) string {
gen := rand.New(rand.NewSource(time.Now().UnixNano()))
letters := "bdghjlmnpqrstvwxyz0123456789"
b := make([]byte, n)
for i := range b {
b[i] = letters[gen.Int63()%int64(len(letters))]
}
return string(b)
}
func strIn(s string, ss []string) bool {
for _, x := range ss {
if s == x {
return true
}
}
return false
}
func strLitPtr(s string) *string {
return &s
}
func strOr(s string, ss ...string) string {
ss = append([]string{s}, ss...)
for _, st := range ss {
if st != "" {
return st
}
}
return ""
}
// substitute runs replacer on string elements within a complex data structure
// (except those contained in private data structure fields).
func substitute(v reflect.Value, replacer *strings.Replacer) {
traverseData(v, func(val reflect.Value) dErr {
switch val.Interface().(type) {
case string:
val.SetString(replacer.Replace(val.String()))
}
return nil
})
}
// traverseData traverses complex data structures and runs
// a function, f, on its basic data types.
// Traverses arrays, maps, slices, and public fields of structs.
// For example, f will be run on bool, int, string, etc.
// Slices, maps, and structs will not have f called on them, but will
// traverse their subelements.
// Errors returned from f will be returned by traverseDataStructure.
func traverseData(v reflect.Value, f func(reflect.Value) dErr) dErr {
if !v.CanSet() {
// Don't run on private fields.
return nil
}
switch v.Kind() {
case reflect.Chan, reflect.Func:
return nil
case reflect.Interface, reflect.Ptr, reflect.UnsafePointer:
if v.IsNil() {
return nil
}
// I'm a pointer, dereference me.
return traverseData(v.Elem(), f)
}
switch v.Kind() {
case reflect.Array, reflect.Slice:
for i := 0; i < v.Len(); i++ {
if err := traverseData(v.Index(i), f); err != nil {
return err
}
}
case reflect.Map:
kvs := v.MapKeys()
for _, kv := range kvs {
vv := v.MapIndex(kv)
// Create new mutable copies of the key and value.
// Modify the copies.
newKv := reflect.New(kv.Type()).Elem()
newKv.Set(kv)
newVv := reflect.New(vv.Type()).Elem()
newVv.Set(vv)
if err := traverseData(newKv, f); err != nil {
return err
}
if err := traverseData(newVv, f); err != nil {
return err
}
// Delete the old key-value.
v.SetMapIndex(kv, reflect.Value{})
// Set the new key-value.
v.SetMapIndex(newKv, newVv)
}
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if err := traverseData(v.Field(i), f); err != nil {
return err
}
}
default:
// As far as I can tell, this is a basic data type. Run f on it.
return f(v)
}
return nil
}
func xor(x, y bool) bool {
return x != y
}
| 1 | 7,038 |
Why does substitute need to be public? Are we allowing/encouraging variable usage when using Daisy as a library?
|
GoogleCloudPlatform-compute-image-tools
|
go
|
@@ -335,6 +335,11 @@ class WebEngineCaret(browsertab.AbstractCaret):
"""
if js_elem is None:
return
+ if js_elem == "focused":
+ # we had a focused element, not a selected one. Just send <enter>
+ self._tab.key_press(Qt.Key_Enter)
+ return
+
assert isinstance(js_elem, dict), js_elem
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
if tab:
| 1 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Wrapper over a QWebEngineView."""
import math
import functools
import sys
import re
import html as html_utils
import sip
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, Qt, QEvent, QPoint, QPointF,
QUrl, QTimer)
from PyQt5.QtGui import QKeyEvent, QIcon
from PyQt5.QtNetwork import QAuthenticator
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineScript
from qutebrowser.config import configdata, config
from qutebrowser.browser import browsertab, mouse, shared
from qutebrowser.browser.webengine import (webview, webengineelem, tabhistory,
interceptor, webenginequtescheme,
webenginedownloads,
webenginesettings)
from qutebrowser.misc import miscwidgets
from qutebrowser.utils import (usertypes, qtutils, log, javascript, utils,
message, objreg, jinja, debug)
_qute_scheme_handler = None
def init():
"""Initialize QtWebEngine-specific modules."""
# For some reason we need to keep a reference, otherwise the scheme handler
# won't work...
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-September/038075.html
global _qute_scheme_handler
app = QApplication.instance()
log.init.debug("Initializing qute://* handler...")
_qute_scheme_handler = webenginequtescheme.QuteSchemeHandler(parent=app)
_qute_scheme_handler.install(webenginesettings.default_profile)
_qute_scheme_handler.install(webenginesettings.private_profile)
log.init.debug("Initializing request interceptor...")
host_blocker = objreg.get('host-blocker')
args = objreg.get('args')
req_interceptor = interceptor.RequestInterceptor(
host_blocker, args=args, parent=app)
req_interceptor.install(webenginesettings.default_profile)
req_interceptor.install(webenginesettings.private_profile)
log.init.debug("Initializing QtWebEngine downloads...")
download_manager = webenginedownloads.DownloadManager(parent=app)
download_manager.install(webenginesettings.default_profile)
download_manager.install(webenginesettings.private_profile)
objreg.register('webengine-download-manager', download_manager)
# Clear visited links on web history clear
hist = objreg.get('web-history')
for p in [webenginesettings.default_profile,
webenginesettings.private_profile]:
hist.history_cleared.connect(p.clearAllVisitedLinks)
hist.url_cleared.connect(lambda url, profile=p:
profile.clearVisitedLinks([url]))
# Mapping worlds from usertypes.JsWorld to QWebEngineScript world IDs.
_JS_WORLD_MAP = {
usertypes.JsWorld.main: QWebEngineScript.MainWorld,
usertypes.JsWorld.application: QWebEngineScript.ApplicationWorld,
usertypes.JsWorld.user: QWebEngineScript.UserWorld,
usertypes.JsWorld.jseval: QWebEngineScript.UserWorld + 1,
}
class WebEngineAction(browsertab.AbstractAction):
"""QtWebEngine implementations related to web actions."""
action_class = QWebEnginePage
action_base = QWebEnginePage.WebAction
def exit_fullscreen(self):
self._widget.triggerPageAction(QWebEnginePage.ExitFullScreen)
def save_page(self):
"""Save the current page."""
self._widget.triggerPageAction(QWebEnginePage.SavePage)
def show_source(self):
try:
self._widget.triggerPageAction(QWebEnginePage.ViewSource)
except AttributeError:
# Qt < 5.8
tb = objreg.get('tabbed-browser', scope='window',
window=self._tab.win_id)
urlstr = self._tab.url().toString(QUrl.RemoveUserInfo)
# The original URL becomes the path of a view-source: URL
# (without a host), but query/fragment should stay.
url = QUrl('view-source:' + urlstr)
tb.tabopen(url, background=False, related=True)
class WebEnginePrinting(browsertab.AbstractPrinting):
"""QtWebEngine implementations related to printing."""
def check_pdf_support(self):
return True
def check_printer_support(self):
if not hasattr(self._widget.page(), 'print'):
raise browsertab.WebTabError(
"Printing is unsupported with QtWebEngine on Qt < 5.8")
def check_preview_support(self):
raise browsertab.WebTabError(
"Print previews are unsupported with QtWebEngine")
def to_pdf(self, filename):
self._widget.page().printToPdf(filename)
def to_printer(self, printer, callback=None):
if callback is None:
callback = lambda _ok: None
self._widget.page().print(printer, callback)
class WebEngineSearch(browsertab.AbstractSearch):
"""QtWebEngine implementations related to searching on the page.
Attributes:
_flags: The QWebEnginePage.FindFlags of the last search.
_pending_searches: How many searches have been started but not called
back yet.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._flags = QWebEnginePage.FindFlags(0)
self._pending_searches = 0
def _find(self, text, flags, callback, caller):
"""Call findText on the widget."""
self.search_displayed = True
self._pending_searches += 1
def wrapped_callback(found):
"""Wrap the callback to do debug logging."""
self._pending_searches -= 1
if self._pending_searches > 0:
# See https://github.com/qutebrowser/qutebrowser/issues/2442
# and https://github.com/qt/qtwebengine/blob/5.10/src/core/web_contents_adapter.cpp#L924-L934
log.webview.debug("Ignoring cancelled search callback with "
"{} pending searches".format(
self._pending_searches))
return
found_text = 'found' if found else "didn't find"
if flags:
flag_text = 'with flags {}'.format(debug.qflags_key(
QWebEnginePage, flags, klass=QWebEnginePage.FindFlag))
else:
flag_text = ''
log.webview.debug(' '.join([caller, found_text, text, flag_text])
.strip())
if callback is not None:
callback(found)
self._widget.findText(text, flags, wrapped_callback)
def search(self, text, *, ignore_case='never', reverse=False,
result_cb=None):
# Don't go to next entry on duplicate search
if self.text == text and self.search_displayed:
log.webview.debug("Ignoring duplicate search request"
" for {}".format(text))
return
self.text = text
self._flags = QWebEnginePage.FindFlags(0)
if self._is_case_sensitive(ignore_case):
self._flags |= QWebEnginePage.FindCaseSensitively
if reverse:
self._flags |= QWebEnginePage.FindBackward
self._find(text, self._flags, result_cb, 'search')
def clear(self):
self.search_displayed = False
self._widget.findText('')
def prev_result(self, *, result_cb=None):
# The int() here makes sure we get a copy of the flags.
flags = QWebEnginePage.FindFlags(int(self._flags))
if flags & QWebEnginePage.FindBackward:
flags &= ~QWebEnginePage.FindBackward
else:
flags |= QWebEnginePage.FindBackward
self._find(self.text, flags, result_cb, 'prev_result')
def next_result(self, *, result_cb=None):
self._find(self.text, self._flags, result_cb, 'next_result')
class WebEngineCaret(browsertab.AbstractCaret):
"""QtWebEngine implementations related to moving the cursor/selection."""
@pyqtSlot(usertypes.KeyMode)
def _on_mode_entered(self, mode):
if mode != usertypes.KeyMode.caret:
return
if self._tab.search.search_displayed:
# We are currently in search mode.
# convert the search to a blue selection so we can operate on it
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
self._tab.run_js_async(
javascript.assemble('caret', 'setPlatform', sys.platform))
self._js_call('setInitialCursor', self._selection_cb)
def _selection_cb(self, enabled):
"""Emit selection_toggled based on setInitialCursor."""
if enabled is None:
log.webview.debug("Ignoring selection status None")
return
self.selection_toggled.emit(enabled)
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, mode):
if mode != usertypes.KeyMode.caret:
return
self.drop_selection()
self._js_call('disableCaret')
def move_to_next_line(self, count=1):
for _ in range(count):
self._js_call('moveDown')
def move_to_prev_line(self, count=1):
for _ in range(count):
self._js_call('moveUp')
def move_to_next_char(self, count=1):
for _ in range(count):
self._js_call('moveRight')
def move_to_prev_char(self, count=1):
for _ in range(count):
self._js_call('moveLeft')
def move_to_end_of_word(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfWord')
def move_to_next_word(self, count=1):
for _ in range(count):
self._js_call('moveToNextWord')
def move_to_prev_word(self, count=1):
for _ in range(count):
self._js_call('moveToPreviousWord')
def move_to_start_of_line(self):
self._js_call('moveToStartOfLine')
def move_to_end_of_line(self):
self._js_call('moveToEndOfLine')
def move_to_start_of_next_block(self, count=1):
for _ in range(count):
self._js_call('moveToStartOfNextBlock')
def move_to_start_of_prev_block(self, count=1):
for _ in range(count):
self._js_call('moveToStartOfPrevBlock')
def move_to_end_of_next_block(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfNextBlock')
def move_to_end_of_prev_block(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfPrevBlock')
def move_to_start_of_document(self):
self._js_call('moveToStartOfDocument')
def move_to_end_of_document(self):
self._js_call('moveToEndOfDocument')
def toggle_selection(self):
self._js_call('toggleSelection', self.selection_toggled.emit)
def drop_selection(self):
self._js_call('dropSelection')
def selection(self, callback):
# Not using selectedText() as WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-53134
# Even on Qt 5.10 selectedText() seems to work poorly, see
# https://github.com/qutebrowser/qutebrowser/issues/3523
self._tab.run_js_async(javascript.assemble('caret', 'getSelection'),
callback)
def _follow_selected_cb(self, js_elem, tab=False):
"""Callback for javascript which clicks the selected element.
Args:
js_elem: The element serialized from javascript.
tab: Open in a new tab.
"""
if js_elem is None:
return
assert isinstance(js_elem, dict), js_elem
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
if tab:
click_type = usertypes.ClickTarget.tab
else:
click_type = usertypes.ClickTarget.normal
# Only click if we see a link
if elem.is_link():
log.webview.debug("Found link in selection, clicking. ClickTarget "
"{}, elem {}".format(click_type, elem))
elem.click(click_type)
def follow_selected(self, *, tab=False):
if self._tab.search.search_displayed:
# We are currently in search mode.
# let's click the link via a fake-click
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
log.webview.debug("Clicking a searched link via fake key press.")
# send a fake enter, clicking the orange selection box
if tab:
self._tab.key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
else:
self._tab.key_press(Qt.Key_Enter)
else:
# click an existing blue selection
js_code = javascript.assemble('webelem', 'find_selected_link')
self._tab.run_js_async(js_code, lambda jsret:
self._follow_selected_cb(jsret, tab))
def _js_call(self, command, callback=None):
self._tab.run_js_async(javascript.assemble('caret', command), callback)
class WebEngineScroller(browsertab.AbstractScroller):
"""QtWebEngine implementations related to scrolling."""
def __init__(self, tab, parent=None):
super().__init__(tab, parent)
self._args = objreg.get('args')
self._pos_perc = (0, 0)
self._pos_px = QPoint()
self._at_bottom = False
def _init_widget(self, widget):
super()._init_widget(widget)
page = widget.page()
page.scrollPositionChanged.connect(self._update_pos)
def _repeated_key_press(self, key, count=1, modifier=Qt.NoModifier):
"""Send count fake key presses to this scroller's WebEngineTab."""
for _ in range(min(count, 1000)):
self._tab.key_press(key, modifier)
@pyqtSlot(QPointF)
def _update_pos(self, pos):
"""Update the scroll position attributes when it changed."""
self._pos_px = pos.toPoint()
contents_size = self._widget.page().contentsSize()
scrollable_x = contents_size.width() - self._widget.width()
if scrollable_x == 0:
perc_x = 0
else:
try:
perc_x = min(100, round(100 / scrollable_x * pos.x()))
except ValueError:
# https://github.com/qutebrowser/qutebrowser/issues/3219
log.misc.debug("Got ValueError!")
log.misc.debug("contents_size.width(): {}".format(
contents_size.width()))
log.misc.debug("self._widget.width(): {}".format(
self._widget.width()))
log.misc.debug("scrollable_x: {}".format(scrollable_x))
log.misc.debug("pos.x(): {}".format(pos.x()))
raise
scrollable_y = contents_size.height() - self._widget.height()
if scrollable_y == 0:
perc_y = 0
else:
perc_y = min(100, round(100 / scrollable_y * pos.y()))
self._at_bottom = math.ceil(pos.y()) >= scrollable_y
if (self._pos_perc != (perc_x, perc_y) or
'no-scroll-filtering' in self._args.debug_flags):
self._pos_perc = perc_x, perc_y
self.perc_changed.emit(*self._pos_perc)
def pos_px(self):
return self._pos_px
def pos_perc(self):
return self._pos_perc
def to_perc(self, x=None, y=None):
js_code = javascript.assemble('scroll', 'to_perc', x, y)
self._tab.run_js_async(js_code)
def to_point(self, point):
js_code = javascript.assemble('window', 'scroll', point.x(), point.y())
self._tab.run_js_async(js_code)
def to_anchor(self, name):
url = self._tab.url()
url.setFragment(name)
self._tab.openurl(url)
def delta(self, x=0, y=0):
self._tab.run_js_async(javascript.assemble('window', 'scrollBy', x, y))
def delta_page(self, x=0, y=0):
js_code = javascript.assemble('scroll', 'delta_page', x, y)
self._tab.run_js_async(js_code)
def up(self, count=1):
self._repeated_key_press(Qt.Key_Up, count)
def down(self, count=1):
self._repeated_key_press(Qt.Key_Down, count)
def left(self, count=1):
self._repeated_key_press(Qt.Key_Left, count)
def right(self, count=1):
self._repeated_key_press(Qt.Key_Right, count)
def top(self):
self._tab.key_press(Qt.Key_Home)
def bottom(self):
self._tab.key_press(Qt.Key_End)
def page_up(self, count=1):
self._repeated_key_press(Qt.Key_PageUp, count)
def page_down(self, count=1):
self._repeated_key_press(Qt.Key_PageDown, count)
def at_top(self):
return self.pos_px().y() == 0
def at_bottom(self):
return self._at_bottom
class WebEngineHistory(browsertab.AbstractHistory):
"""QtWebEngine implementations related to page history."""
def current_idx(self):
return self._history.currentItemIndex()
def can_go_back(self):
return self._history.canGoBack()
def can_go_forward(self):
return self._history.canGoForward()
def _item_at(self, i):
return self._history.itemAt(i)
def _go_to_item(self, item):
self._tab.predicted_navigation.emit(item.url())
self._history.goToItem(item)
def serialize(self):
if not qtutils.version_check('5.9', compiled=False):
# WORKAROUND for
# https://github.com/qutebrowser/qutebrowser/issues/2289
# Don't use the history's currentItem here, because of
# https://bugreports.qt.io/browse/QTBUG-59599 and because it doesn't
# contain view-source.
scheme = self._tab.url().scheme()
if scheme in ['view-source', 'chrome']:
raise browsertab.WebTabError("Can't serialize special URL!")
return qtutils.serialize(self._history)
def deserialize(self, data):
return qtutils.deserialize(data, self._history)
def load_items(self, items):
if items:
self._tab.predicted_navigation.emit(items[-1].url)
stream, _data, cur_data = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, self._history)
@pyqtSlot()
def _on_load_finished():
self._tab.scroller.to_point(cur_data['scroll-pos'])
self._tab.load_finished.disconnect(_on_load_finished)
if cur_data is not None:
if 'zoom' in cur_data:
self._tab.zoom.set_factor(cur_data['zoom'])
if ('scroll-pos' in cur_data and
self._tab.scroller.pos_px() == QPoint(0, 0)):
self._tab.load_finished.connect(_on_load_finished)
class WebEngineZoom(browsertab.AbstractZoom):
"""QtWebEngine implementations related to zooming."""
def _set_factor_internal(self, factor):
self._widget.setZoomFactor(factor)
class WebEngineElements(browsertab.AbstractElements):
"""QtWebEngine implemementations related to elements on the page."""
def _js_cb_multiple(self, callback, js_elems):
"""Handle found elements coming from JS and call the real callback.
Args:
callback: The callback to call with the found elements.
Called with None if there was an error.
js_elems: The elements serialized from javascript.
"""
if js_elems is None:
callback(None)
return
elems = []
for js_elem in js_elems:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
elems.append(elem)
callback(elems)
def _js_cb_single(self, callback, js_elem):
"""Handle a found focus elem coming from JS and call the real callback.
Args:
callback: The callback to call with the found element.
Called with a WebEngineElement or None.
js_elem: The element serialized from javascript.
"""
debug_str = ('None' if js_elem is None
else utils.elide(repr(js_elem), 1000))
log.webview.debug("Got element from JS: {}".format(debug_str))
if js_elem is None:
callback(None)
else:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
callback(elem)
def find_css(self, selector, callback, *, only_visible=False):
js_code = javascript.assemble('webelem', 'find_css', selector,
only_visible)
js_cb = functools.partial(self._js_cb_multiple, callback)
self._tab.run_js_async(js_code, js_cb)
def find_id(self, elem_id, callback):
js_code = javascript.assemble('webelem', 'find_id', elem_id)
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_focused(self, callback):
js_code = javascript.assemble('webelem', 'find_focused')
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_at_pos(self, pos, callback):
assert pos.x() >= 0
assert pos.y() >= 0
pos /= self._tab.zoom.factor()
js_code = javascript.assemble('webelem', 'find_at_pos',
pos.x(), pos.y())
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
class WebEngineTab(browsertab.AbstractTab):
"""A QtWebEngine tab in the browser.
Signals:
_load_finished_fake:
Used in place of unreliable loadFinished
"""
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
_load_finished_fake = pyqtSignal(bool)
def __init__(self, *, win_id, mode_manager, private, parent=None):
super().__init__(win_id=win_id, mode_manager=mode_manager,
private=private, parent=parent)
widget = webview.WebEngineView(tabdata=self.data, win_id=win_id,
private=private)
self.history = WebEngineHistory(self)
self.scroller = WebEngineScroller(self, parent=self)
self.caret = WebEngineCaret(mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = WebEngineZoom(tab=self, parent=self)
self.search = WebEngineSearch(parent=self)
self.printing = WebEnginePrinting()
self.elements = WebEngineElements(tab=self)
self.action = WebEngineAction(tab=self)
# We're assigning settings in _set_widget
self.settings = webenginesettings.WebEngineSettings(settings=None)
self._set_widget(widget)
self._connect_signals()
self.backend = usertypes.Backend.QtWebEngine
self._child_event_filter = None
self._saved_zoom = None
self._reload_url = None
config.instance.changed.connect(self._on_config_changed)
self._init_js()
@pyqtSlot(str)
def _on_config_changed(self, option):
if option in ['scrolling.bar', 'content.user_stylesheets']:
self._init_stylesheet()
self._update_stylesheet()
def _update_stylesheet(self):
"""Update the custom stylesheet in existing tabs."""
css = shared.get_user_stylesheet()
code = javascript.assemble('stylesheet', 'set_css', css)
self.run_js_async(code)
def _inject_early_js(self, name, js_code, *,
world=QWebEngineScript.ApplicationWorld,
subframes=False):
"""Inject the given script to run early on a page load.
This runs the script both on DocumentCreation and DocumentReady as on
some internal pages, DocumentCreation will not work.
That is a WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66011
"""
scripts = self._widget.page().scripts()
for injection in ['creation', 'ready']:
injection_points = {
'creation': QWebEngineScript.DocumentCreation,
'ready': QWebEngineScript.DocumentReady,
}
script = QWebEngineScript()
script.setInjectionPoint(injection_points[injection])
script.setSourceCode(js_code)
script.setWorldId(world)
script.setRunsOnSubFrames(subframes)
script.setName('_qute_{}_{}'.format(name, injection))
scripts.insert(script)
def _remove_early_js(self, name):
"""Remove an early QWebEngineScript."""
scripts = self._widget.page().scripts()
for injection in ['creation', 'ready']:
full_name = '_qute_{}_{}'.format(name, injection)
script = scripts.findScript(full_name)
if not script.isNull():
scripts.remove(script)
def _init_js(self):
"""Initialize global qutebrowser JavaScript."""
js_code = javascript.wrap_global(
'scripts',
utils.read_file('javascript/scroll.js'),
utils.read_file('javascript/webelem.js'),
utils.read_file('javascript/caret.js'),
)
# FIXME:qtwebengine what about subframes=True?
self._inject_early_js('js', js_code, subframes=True)
self._init_stylesheet()
greasemonkey = objreg.get('greasemonkey')
greasemonkey.scripts_reloaded.connect(self._inject_userscripts)
self._inject_userscripts()
def _init_stylesheet(self):
"""Initialize custom stylesheets.
Partially inspired by QupZilla:
https://github.com/QupZilla/qupzilla/blob/v2.0/src/lib/app/mainapplication.cpp#L1063-L1101
"""
self._remove_early_js('stylesheet')
css = shared.get_user_stylesheet()
js_code = javascript.wrap_global(
'stylesheet',
utils.read_file('javascript/stylesheet.js'),
javascript.assemble('stylesheet', 'set_css', css),
)
self._inject_early_js('stylesheet', js_code, subframes=True)
def _inject_userscripts(self):
"""Register user JavaScript files with the global profiles."""
# The Greasemonkey metadata block support in QtWebEngine only starts at
# Qt 5.8. With 5.7.1, we need to inject the scripts ourselves in
# response to urlChanged.
if not qtutils.version_check('5.8'):
return
# Since we are inserting scripts into profile.scripts they won't
# just get replaced by new gm scripts like if we were injecting them
# ourselves so we need to remove all gm scripts, while not removing
# any other stuff that might have been added. Like the one for
# stylesheets.
greasemonkey = objreg.get('greasemonkey')
scripts = self._widget.page().scripts()
for script in scripts.toList():
if script.name().startswith("GM-"):
log.greasemonkey.debug('Removing script: {}'
.format(script.name()))
removed = scripts.remove(script)
assert removed, script.name()
# Then add the new scripts.
for script in greasemonkey.all_scripts():
# @run-at (and @include/@exclude/@match) is parsed by
# QWebEngineScript.
new_script = QWebEngineScript()
new_script.setWorldId(QWebEngineScript.MainWorld)
new_script.setSourceCode(script.code())
new_script.setName("GM-{}".format(script.name))
new_script.setRunsOnSubFrames(script.runs_on_sub_frames)
log.greasemonkey.debug('adding script: {}'
.format(new_script.name()))
scripts.insert(new_script)
def _install_event_filter(self):
fp = self._widget.focusProxy()
if fp is not None:
fp.installEventFilter(self._mouse_event_filter)
self._child_event_filter = mouse.ChildEventFilter(
eventfilter=self._mouse_event_filter, widget=self._widget,
parent=self)
self._widget.installEventFilter(self._child_event_filter)
@pyqtSlot()
def _restore_zoom(self):
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
if self._saved_zoom is None:
return
self.zoom.set_factor(self._saved_zoom)
self._saved_zoom = None
def openurl(self, url, *, predict=True):
"""Open the given URL in this tab.
Arguments:
url: The QUrl to open.
predict: If set to False, predicted_navigation is not emitted.
"""
self._saved_zoom = self.zoom.factor()
self._openurl_prepare(url, predict=predict)
self._widget.load(url)
def url(self, requested=False):
page = self._widget.page()
if requested:
return page.requestedUrl()
else:
return page.url()
def dump_async(self, callback, *, plain=False):
if plain:
self._widget.page().toPlainText(callback)
else:
self._widget.page().toHtml(callback)
def run_js_async(self, code, callback=None, *, world=None):
if world is None:
world_id = QWebEngineScript.ApplicationWorld
elif isinstance(world, int):
world_id = world
else:
world_id = _JS_WORLD_MAP[world]
if callback is None:
self._widget.page().runJavaScript(code, world_id)
else:
self._widget.page().runJavaScript(code, world_id, callback)
def shutdown(self):
self.shutting_down.emit()
self.action.exit_fullscreen()
self._widget.shutdown()
def reload(self, *, force=False):
if force:
action = QWebEnginePage.ReloadAndBypassCache
else:
action = QWebEnginePage.Reload
self._widget.triggerPageAction(action)
def stop(self):
self._widget.stop()
def title(self):
return self._widget.title()
def icon(self):
return self._widget.icon()
def set_html(self, html, base_url=QUrl()):
# FIXME:qtwebengine
# check this and raise an exception if too big:
# Warning: The content will be percent encoded before being sent to the
# renderer via IPC. This may increase its size. The maximum size of the
# percent encoded content is 2 megabytes minus 30 bytes.
self._widget.setHtml(html, base_url)
def networkaccessmanager(self):
return None
def user_agent(self):
return None
def clear_ssl_errors(self):
raise browsertab.UnsupportedOperationError
def key_press(self, key, modifier=Qt.NoModifier):
press_evt = QKeyEvent(QEvent.KeyPress, key, modifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.KeyRelease, key, modifier,
0, 0, 0)
self.send_event(press_evt)
self.send_event(release_evt)
def _show_error_page(self, url, error):
"""Show an error page in the tab."""
log.misc.debug("Showing error page for {}".format(error))
url_string = url.toDisplayString()
error_page = jinja.render(
'error.html',
title="Error loading page: {}".format(url_string),
url=url_string, error=error)
self.set_html(error_page)
@pyqtSlot()
def _on_history_trigger(self):
try:
self._widget.page()
except RuntimeError:
# Looks like this slot can be triggered on destroyed tabs:
# https://crashes.qutebrowser.org/view/3abffbed (Qt 5.9.1)
# wrapped C/C++ object of type WebEngineView has been deleted
log.misc.debug("Ignoring history trigger for destroyed tab")
return
url = self.url()
requested_url = self.url(requested=True)
# Don't save the title if it's generated from the URL
title = self.title()
title_url = QUrl(url)
title_url.setScheme('')
if title == title_url.toDisplayString(QUrl.RemoveScheme).strip('/'):
title = ""
# Don't add history entry if the URL is invalid anyways
if not url.isValid():
log.misc.debug("Ignoring invalid URL being added to history")
return
self.add_history_item.emit(url, requested_url, title)
@pyqtSlot(QUrl, 'QAuthenticator*', 'QString')
def _on_proxy_authentication_required(self, url, authenticator,
proxy_host):
"""Called when a proxy needs authentication."""
msg = "<b>{}</b> requires a username and password.".format(
html_utils.escape(proxy_host))
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
answer = message.ask(
title="Proxy authentication required", text=msg,
mode=usertypes.PromptMode.user_pwd,
abort_on=[self.shutting_down, self.load_started], url=urlstr)
if answer is not None:
authenticator.setUser(answer.user)
authenticator.setPassword(answer.password)
else:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
self._show_error_page(url, "Proxy authentication required")
@pyqtSlot(QUrl, 'QAuthenticator*')
def _on_authentication_required(self, url, authenticator):
netrc_success = False
if not self.data.netrc_used:
self.data.netrc_used = True
netrc_success = shared.netrc_authentication(url, authenticator)
if not netrc_success:
abort_on = [self.shutting_down, self.load_started]
answer = shared.authentication_required(url, authenticator,
abort_on)
if not netrc_success and answer is None:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
# WORKAROUND for
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-December/038400.html
self._show_error_page(url, "Authentication required")
@pyqtSlot('QWebEngineFullScreenRequest')
def _on_fullscreen_requested(self, request):
request.accept()
on = request.toggleOn()
self.data.fullscreen = on
self.fullscreen_requested.emit(on)
if on:
notification = miscwidgets.FullscreenNotification(self)
notification.show()
notification.set_timeout(3000)
@pyqtSlot()
def _on_load_started(self):
"""Clear search when a new load is started if needed."""
if (qtutils.version_check('5.9', compiled=False) and
not qtutils.version_check('5.9.2', compiled=False)):
# WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-61506
self.search.clear()
super()._on_load_started()
self.data.netrc_used = False
@pyqtSlot(QWebEnginePage.RenderProcessTerminationStatus, int)
def _on_render_process_terminated(self, status, exitcode):
"""Show an error when the renderer process terminated."""
if (status == QWebEnginePage.AbnormalTerminationStatus and
exitcode == 256):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58697
status = QWebEnginePage.CrashedTerminationStatus
status_map = {
QWebEnginePage.NormalTerminationStatus:
browsertab.TerminationStatus.normal,
QWebEnginePage.AbnormalTerminationStatus:
browsertab.TerminationStatus.abnormal,
QWebEnginePage.CrashedTerminationStatus:
browsertab.TerminationStatus.crashed,
QWebEnginePage.KilledTerminationStatus:
browsertab.TerminationStatus.killed,
-1:
browsertab.TerminationStatus.unknown,
}
self.renderer_process_terminated.emit(status_map[status], exitcode)
@pyqtSlot(int)
def _on_load_progress_workaround(self, perc):
"""Use loadProgress(100) to emit loadFinished(True).
See https://bugreports.qt.io/browse/QTBUG-65223
"""
if perc == 100 and self.load_status() != usertypes.LoadStatus.error:
self._load_finished_fake.emit(True)
@pyqtSlot(bool)
def _on_load_finished_workaround(self, ok):
"""Use only loadFinished(False).
See https://bugreports.qt.io/browse/QTBUG-65223
"""
if not ok:
self._load_finished_fake.emit(False)
def _error_page_workaround(self, html):
"""Check if we're displaying a Chromium error page.
This gets only called if we got loadFinished(False) without JavaScript,
so we can display at least some error page.
WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66643
Needs to check the page content as a WORKAROUND for
https://bugreports.qt.io/browse/QTBUG-66661
"""
match = re.search(r'"errorCode":"([^"]*)"', html)
if match is None:
return
self._show_error_page(self.url(), error=match.group(1))
@pyqtSlot(bool)
def _on_load_finished(self, ok):
"""Display a static error page if JavaScript is disabled."""
super()._on_load_finished(ok)
js_enabled = self.settings.test_attribute('content.javascript.enabled')
if not ok and not js_enabled:
self.dump_async(self._error_page_workaround)
if ok and self._reload_url is not None:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66656
log.config.debug(
"Loading {} again because of config change".format(
self._reload_url.toDisplayString()))
QTimer.singleShot(100, functools.partial(self.openurl,
self._reload_url,
predict=False))
self._reload_url = None
if not qtutils.version_check('5.10', compiled=False):
# We can't do this when we have the loadFinished workaround as that
# sometimes clears icons without loading a new page.
# In general, this is handled by Qt, but when loading takes long,
# the old icon is still displayed.
self.icon_changed.emit(QIcon())
@pyqtSlot(QUrl)
def _on_predicted_navigation(self, url):
"""If we know we're going to visit an URL soon, change the settings."""
super()._on_predicted_navigation(url)
self.settings.update_for_url(url)
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(self, navigation):
super()._on_navigation_request(navigation)
if qtutils.version_check('5.11.0', exact=True, compiled=False):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-68224
layout = self._widget.layout()
count = layout.count()
children = self._widget.findChildren(QWidget)
if not count and children:
log.webview.warning("Found children not in layout: {}, "
"focus proxy {} (QTBUG-68224)".format(
children, self._widget.focusProxy()))
if count > 1:
log.webview.debug("Found {} widgets! (QTBUG-68224)"
.format(count))
for i in range(count):
item = layout.itemAt(i)
if item is None:
continue
widget = item.widget()
if widget is not self._widget.focusProxy():
log.webview.debug("Removing widget {} (QTBUG-68224)"
.format(widget))
layout.removeWidget(widget)
if not navigation.accepted or not navigation.is_main_frame:
return
settings_needing_reload = {
'content.plugins',
'content.javascript.enabled',
'content.javascript.can_access_clipboard',
'content.print_element_backgrounds',
'input.spatial_navigation',
}
assert settings_needing_reload.issubset(configdata.DATA)
changed = self.settings.update_for_url(navigation.url)
reload_needed = changed & settings_needing_reload
# On Qt < 5.11, we don't don't need a reload when type == link_clicked.
# On Qt 5.11.0, we always need a reload.
# TODO on Qt > 5.11.0, we hopefully never need a reload:
# https://codereview.qt-project.org/#/c/229525/1
if not qtutils.version_check('5.11.0', exact=True, compiled=False):
if navigation.navigation_type != navigation.Type.link_clicked:
reload_needed = False
if reload_needed:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66656
self._reload_url = navigation.url
def _connect_signals(self):
view = self._widget
page = view.page()
page.windowCloseRequested.connect(self.window_close_requested)
page.linkHovered.connect(self.link_hovered)
page.loadProgress.connect(self._on_load_progress)
page.loadStarted.connect(self._on_load_started)
page.certificate_error.connect(self._on_ssl_errors)
page.authenticationRequired.connect(self._on_authentication_required)
page.proxyAuthenticationRequired.connect(
self._on_proxy_authentication_required)
page.fullScreenRequested.connect(self._on_fullscreen_requested)
page.contentsSizeChanged.connect(self.contents_size_changed)
page.navigation_request.connect(self._on_navigation_request)
view.titleChanged.connect(self.title_changed)
view.urlChanged.connect(self._on_url_changed)
view.renderProcessTerminated.connect(
self._on_render_process_terminated)
view.iconChanged.connect(self.icon_changed)
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
if qtutils.version_check('5.10', compiled=False):
page.loadProgress.connect(self._on_load_progress_workaround)
self._load_finished_fake.connect(self._on_history_trigger)
self._load_finished_fake.connect(self._restore_zoom)
self._load_finished_fake.connect(self._on_load_finished)
page.loadFinished.connect(self._on_load_finished_workaround)
else:
# for older Qt versions which break with the above
page.loadProgress.connect(self._on_load_progress)
page.loadFinished.connect(self._on_history_trigger)
page.loadFinished.connect(self._restore_zoom)
page.loadFinished.connect(self._on_load_finished)
self.predicted_navigation.connect(self._on_predicted_navigation)
def event_target(self):
return self._widget.focusProxy()
| 1 | 21,460 |
We should probably use ctrl-enter if `tab` is given, just like in `follow_selected`?
|
qutebrowser-qutebrowser
|
py
|
@@ -146,7 +146,7 @@ func New(
dynamicCollection := dynamicconfig.NewCollection(params.DynamicConfig, logger)
- dispatcher := params.RPCFactory.CreateDispatcher()
+ dispatcher := params.RPCFactory.CreateTChannelDispatcher()
membershipMonitor, err := params.MembershipFactory.Create(dispatcher)
if err != nil {
return nil, err
| 1 |
// Copyright (c) 2019 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package resource
import (
"math/rand"
"os"
"sync/atomic"
"time"
"github.com/uber-go/tally"
"go.temporal.io/temporal/.gen/go/temporal/workflowserviceclient"
"go.uber.org/yarpc"
"github.com/temporalio/temporal/client"
"github.com/temporalio/temporal/client/admin"
"github.com/temporalio/temporal/client/frontend"
"github.com/temporalio/temporal/client/history"
"github.com/temporalio/temporal/client/matching"
"github.com/temporalio/temporal/common"
"github.com/temporalio/temporal/common/archiver"
"github.com/temporalio/temporal/common/archiver/provider"
"github.com/temporalio/temporal/common/cache"
"github.com/temporalio/temporal/common/clock"
"github.com/temporalio/temporal/common/cluster"
"github.com/temporalio/temporal/common/log"
"github.com/temporalio/temporal/common/log/loggerimpl"
"github.com/temporalio/temporal/common/log/tag"
"github.com/temporalio/temporal/common/membership"
"github.com/temporalio/temporal/common/messaging"
"github.com/temporalio/temporal/common/metrics"
"github.com/temporalio/temporal/common/persistence"
persistenceClient "github.com/temporalio/temporal/common/persistence/client"
"github.com/temporalio/temporal/common/service"
"github.com/temporalio/temporal/common/service/dynamicconfig"
)
type (
// VisibilityManagerInitializer is the function each service should implement
// for visibility manager initialization
VisibilityManagerInitializer func(
persistenceBean persistenceClient.Bean,
logger log.Logger,
) (persistence.VisibilityManager, error)
// Impl contains all common resources shared across frontend / matching / history / worker
Impl struct {
status int32
// static infos
numShards int
serviceName string
hostName string
metricsScope tally.Scope
clusterMetadata cluster.Metadata
// other common resources
domainCache cache.DomainCache
timeSource clock.TimeSource
payloadSerializer persistence.PayloadSerializer
metricsClient metrics.Client
messagingClient messaging.Client
archivalMetadata archiver.ArchivalMetadata
archiverProvider provider.ArchiverProvider
// membership infos
membershipMonitor membership.Monitor
frontendServiceResolver membership.ServiceResolver
matchingServiceResolver membership.ServiceResolver
historyServiceResolver membership.ServiceResolver
workerServiceResolver membership.ServiceResolver
// internal services clients
publicClient workflowserviceclient.Interface
frontendRawClient frontend.Client
frontendClient frontend.Client
matchingRawClient matching.Client
matchingClient matching.Client
historyRawClient history.Client
historyClient history.Client
clientBean client.Bean
// persistence clients
persistenceBean persistenceClient.Bean
visibilityMgr persistence.VisibilityManager
// loggers
logger log.Logger
throttledLogger log.Logger
// for registering handlers
dispatcher *yarpc.Dispatcher
// internal vars
pprofInitializer common.PProfInitializer
runtimeMetricsReporter *metrics.RuntimeMetricsReporter
membershipFactory service.MembershipMonitorFactory
rpcFactory common.RPCFactory
}
)
var _ Resource = (*Impl)(nil)
// New create a new resource containing common dependencies
func New(
params *service.BootstrapParams,
serviceName string,
throttledLoggerMaxRPS dynamicconfig.IntPropertyFn,
visibilityManagerInitializer VisibilityManagerInitializer,
) (*Impl, error) {
logger := params.Logger.WithTags(tag.Service(serviceName))
throttledLogger := loggerimpl.NewThrottledLogger(logger, throttledLoggerMaxRPS)
numShards := params.PersistenceConfig.NumHistoryShards
hostName, err := os.Hostname()
if err != nil {
return nil, err
}
dynamicCollection := dynamicconfig.NewCollection(params.DynamicConfig, logger)
dispatcher := params.RPCFactory.CreateDispatcher()
membershipMonitor, err := params.MembershipFactory.Create(dispatcher)
if err != nil {
return nil, err
}
clientBean, err := client.NewClientBean(
client.NewRPCClientFactory(
params.RPCFactory,
membershipMonitor,
params.MetricsClient,
dynamicCollection,
numShards,
logger,
),
params.DispatcherProvider,
params.ClusterMetadata,
)
if err != nil {
return nil, err
}
persistenceBean, err := persistenceClient.NewBeanFromFactory(persistenceClient.NewFactory(
¶ms.PersistenceConfig,
params.ClusterMetadata.GetCurrentClusterName(),
params.MetricsClient,
logger,
))
if err != nil {
return nil, err
}
visibilityMgr, err := visibilityManagerInitializer(
persistenceBean,
logger,
)
if err != nil {
return nil, err
}
frontendServiceResolver, err := membershipMonitor.GetResolver(common.FrontendServiceName)
if err != nil {
return nil, err
}
matchingServiceResolver, err := membershipMonitor.GetResolver(common.MatchingServiceName)
if err != nil {
return nil, err
}
historyServiceResolver, err := membershipMonitor.GetResolver(common.HistoryServiceName)
if err != nil {
return nil, err
}
workerServiceResolver, err := membershipMonitor.GetResolver(common.WorkerServiceName)
if err != nil {
return nil, err
}
domainCache := cache.NewDomainCache(
persistenceBean.GetMetadataManager(),
params.ClusterMetadata,
params.MetricsClient,
logger,
)
frontendRawClient := clientBean.GetFrontendClient()
frontendClient := frontend.NewRetryableClient(
frontendRawClient,
common.CreateFrontendServiceRetryPolicy(),
common.IsWhitelistServiceTransientError,
)
matchingRawClient, err := clientBean.GetMatchingClient(domainCache.GetDomainName)
if err != nil {
return nil, err
}
matchingClient := matching.NewRetryableClient(
matchingRawClient,
common.CreateMatchingServiceRetryPolicy(),
common.IsWhitelistServiceTransientError,
)
historyRawClient := clientBean.GetHistoryClient()
historyClient := history.NewRetryableClient(
historyRawClient,
common.CreateHistoryServiceRetryPolicy(),
common.IsWhitelistServiceTransientError,
)
return &Impl{
status: common.DaemonStatusInitialized,
// static infos
numShards: numShards,
serviceName: params.Name,
hostName: hostName,
metricsScope: params.MetricScope,
clusterMetadata: params.ClusterMetadata,
// other common resources
domainCache: domainCache,
timeSource: clock.NewRealTimeSource(),
payloadSerializer: persistence.NewPayloadSerializer(),
metricsClient: params.MetricsClient,
messagingClient: params.MessagingClient,
archivalMetadata: params.ArchivalMetadata,
archiverProvider: params.ArchiverProvider,
// membership infos
membershipMonitor: membershipMonitor,
frontendServiceResolver: frontendServiceResolver,
matchingServiceResolver: matchingServiceResolver,
historyServiceResolver: historyServiceResolver,
workerServiceResolver: workerServiceResolver,
// internal services clients
publicClient: params.PublicClient,
frontendRawClient: frontendRawClient,
frontendClient: frontendClient,
matchingRawClient: matchingRawClient,
matchingClient: matchingClient,
historyRawClient: historyRawClient,
historyClient: historyClient,
clientBean: clientBean,
// persistence clients
persistenceBean: persistenceBean,
visibilityMgr: visibilityMgr,
// loggers
logger: logger,
throttledLogger: throttledLogger,
// for registering handlers
dispatcher: dispatcher,
// internal vars
pprofInitializer: params.PProfInitializer,
runtimeMetricsReporter: metrics.NewRuntimeMetricsReporter(
params.MetricScope,
time.Minute,
logger,
params.InstanceID,
),
membershipFactory: params.MembershipFactory,
rpcFactory: params.RPCFactory,
}, nil
}
// Start start all resources
func (h *Impl) Start() {
if !atomic.CompareAndSwapInt32(
&h.status,
common.DaemonStatusInitialized,
common.DaemonStatusStarted,
) {
return
}
h.metricsScope.Counter(metrics.RestartCount).Inc(1)
h.runtimeMetricsReporter.Start()
if err := h.pprofInitializer.Start(); err != nil {
h.logger.WithTags(tag.Error(err)).Fatal("fail to start PProf")
}
if err := h.dispatcher.Start(); err != nil {
h.logger.WithTags(tag.Error(err)).Fatal("fail to start yarpc dispatcher")
}
if err := h.membershipMonitor.Start(); err != nil {
h.logger.WithTags(tag.Error(err)).Fatal("fail to start membership monitor")
}
h.domainCache.Start()
// The service is now started up
h.logger.Info("service started")
// seed the random generator once for this service
rand.Seed(time.Now().UTC().UnixNano())
}
// Stop stops all resources
func (h *Impl) Stop() {
if !atomic.CompareAndSwapInt32(
&h.status,
common.DaemonStatusStarted,
common.DaemonStatusStopped,
) {
return
}
h.domainCache.Stop()
h.membershipMonitor.Stop()
if err := h.dispatcher.Stop(); err != nil {
h.logger.WithTags(tag.Error(err)).Error("failed to stop dispatcher")
}
h.runtimeMetricsReporter.Stop()
h.persistenceBean.Close()
}
// GetServiceName return service name
func (h *Impl) GetServiceName() string {
return h.serviceName
}
// GetHostName return host name
func (h *Impl) GetHostName() string {
return h.hostName
}
// GetHostInfo return host info
func (h *Impl) GetHostInfo() (*membership.HostInfo, error) {
return h.membershipMonitor.WhoAmI()
}
// GetClusterMetadata return cluster metadata
func (h *Impl) GetClusterMetadata() cluster.Metadata {
return h.clusterMetadata
}
// other common resources
// GetDomainCache return domain cache
func (h *Impl) GetDomainCache() cache.DomainCache {
return h.domainCache
}
// GetTimeSource return time source
func (h *Impl) GetTimeSource() clock.TimeSource {
return h.timeSource
}
// GetPayloadSerializer return binary payload serializer
func (h *Impl) GetPayloadSerializer() persistence.PayloadSerializer {
return h.payloadSerializer
}
// GetMetricsClient return metrics client
func (h *Impl) GetMetricsClient() metrics.Client {
return h.metricsClient
}
// GetMessagingClient return messaging client
func (h *Impl) GetMessagingClient() messaging.Client {
return h.messagingClient
}
// GetArchivalMetadata return archival metadata
func (h *Impl) GetArchivalMetadata() archiver.ArchivalMetadata {
return h.archivalMetadata
}
// GetArchiverProvider return archival provider
func (h *Impl) GetArchiverProvider() provider.ArchiverProvider {
return h.archiverProvider
}
// membership infos
// GetMembershipMonitor return the membership monitor
func (h *Impl) GetMembershipMonitor() membership.Monitor {
return h.membershipMonitor
}
// GetFrontendServiceResolver return frontend service resolver
func (h *Impl) GetFrontendServiceResolver() membership.ServiceResolver {
return h.historyServiceResolver
}
// GetMatchingServiceResolver return matching service resolver
func (h *Impl) GetMatchingServiceResolver() membership.ServiceResolver {
return h.matchingServiceResolver
}
// GetHistoryServiceResolver return history service resolver
func (h *Impl) GetHistoryServiceResolver() membership.ServiceResolver {
return h.historyServiceResolver
}
// GetWorkerServiceResolver return worker service resolver
func (h *Impl) GetWorkerServiceResolver() membership.ServiceResolver {
return h.workerServiceResolver
}
// internal services clients
// GetPublicClient return public lib client
func (h *Impl) GetPublicClient() workflowserviceclient.Interface {
return h.publicClient
}
// GetFrontendRawClient return frontend client without retry policy
func (h *Impl) GetFrontendRawClient() frontend.Client {
return h.frontendRawClient
}
// GetFrontendClient return frontend client with retry policy
func (h *Impl) GetFrontendClient() frontend.Client {
return h.frontendClient
}
// GetMatchingRawClient return matching client without retry policy
func (h *Impl) GetMatchingRawClient() matching.Client {
return h.matchingRawClient
}
// GetMatchingClient return matching client with retry policy
func (h *Impl) GetMatchingClient() matching.Client {
return h.matchingClient
}
// GetHistoryRawClient return history client without retry policy
func (h *Impl) GetHistoryRawClient() history.Client {
return h.historyRawClient
}
// GetHistoryClient return history client with retry policy
func (h *Impl) GetHistoryClient() history.Client {
return h.historyClient
}
// GetRemoteAdminClient return remote admin client for given cluster name
func (h *Impl) GetRemoteAdminClient(
cluster string,
) admin.Client {
return h.clientBean.GetRemoteAdminClient(cluster)
}
// GetRemoteFrontendClient return remote frontend client for given cluster name
func (h *Impl) GetRemoteFrontendClient(
cluster string,
) frontend.Client {
return h.clientBean.GetRemoteFrontendClient(cluster)
}
// GetClientBean return RPC client bean
func (h *Impl) GetClientBean() client.Bean {
return h.clientBean
}
// persistence clients
// GetMetadataManager return metadata manager
func (h *Impl) GetMetadataManager() persistence.MetadataManager {
return h.persistenceBean.GetMetadataManager()
}
// GetTaskManager return task manager
func (h *Impl) GetTaskManager() persistence.TaskManager {
return h.persistenceBean.GetTaskManager()
}
// GetVisibilityManager return visibility manager
func (h *Impl) GetVisibilityManager() persistence.VisibilityManager {
return h.visibilityMgr
}
// GetDomainReplicationQueue return domain replication queue
func (h *Impl) GetDomainReplicationQueue() persistence.DomainReplicationQueue {
return h.persistenceBean.GetDomainReplicationQueue()
}
// GetShardManager return shard manager
func (h *Impl) GetShardManager() persistence.ShardManager {
return h.persistenceBean.GetShardManager()
}
// GetHistoryManager return history manager
func (h *Impl) GetHistoryManager() persistence.HistoryManager {
return h.persistenceBean.GetHistoryManager()
}
// GetExecutionManager return execution manager for given shard ID
func (h *Impl) GetExecutionManager(
shardID int,
) (persistence.ExecutionManager, error) {
return h.persistenceBean.GetExecutionManager(shardID)
}
// GetPersistenceBean return persistence bean
func (h *Impl) GetPersistenceBean() persistenceClient.Bean {
return h.persistenceBean
}
// loggers
// GetLogger return logger
func (h *Impl) GetLogger() log.Logger {
return h.logger
}
// GetThrottledLogger return throttled logger
func (h *Impl) GetThrottledLogger() log.Logger {
return h.throttledLogger
}
// GetDispatcher return YARPC dispatcher, used for registering handlers
func (h *Impl) GetDispatcher() *yarpc.Dispatcher {
return h.dispatcher
}
| 1 | 9,080 |
This file looks like anti pattern `ServiceLocator` and lucky it's not used anywhere. Can I remove it? (in separate PR).
|
temporalio-temporal
|
go
|
@@ -466,6 +466,10 @@ public class DatasetUtil {
* size for tabular files.
*/
public static String getDownloadSize(DatasetVersion dsv, boolean original) {
+ return FileSizeChecker.bytesToHumanReadable(getDownloadSizeNumeric(dsv, original));
+ }
+
+ public static Long getDownloadSizeNumeric(DatasetVersion dsv, boolean original) {
long bytes = 0l;
for (FileMetadata fileMetadata : dsv.getFileMetadatas()) {
DataFile dataFile = fileMetadata.getDataFile();
| 1 |
package edu.harvard.iq.dataverse.dataset;
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.DatasetField;
import edu.harvard.iq.dataverse.DatasetVersion;
import edu.harvard.iq.dataverse.FileMetadata;
import edu.harvard.iq.dataverse.dataaccess.DataAccess;
import static edu.harvard.iq.dataverse.dataaccess.DataAccess.getStorageIO;
import edu.harvard.iq.dataverse.dataaccess.StorageIO;
import edu.harvard.iq.dataverse.dataaccess.ImageThumbConverter;
import edu.harvard.iq.dataverse.settings.SettingsServiceBean;
import edu.harvard.iq.dataverse.util.FileUtil;
import java.awt.image.BufferedImage;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Logger;
import java.util.Base64;
import java.util.HashMap;
import java.util.Map;
import javax.imageio.ImageIO;
import org.apache.commons.io.IOUtils;
import static edu.harvard.iq.dataverse.dataaccess.DataAccess.getStorageIO;
import static edu.harvard.iq.dataverse.dataaccess.DataAccess.getStorageIO;
import static edu.harvard.iq.dataverse.dataaccess.DataAccess.getStorageIO;
import edu.harvard.iq.dataverse.datasetutility.FileSizeChecker;
public class DatasetUtil {
private static final Logger logger = Logger.getLogger(DatasetUtil.class.getCanonicalName());
public static String datasetLogoFilenameFinal = "dataset_logo_original";
public static String datasetLogoThumbnail = "dataset_logo";
public static String thumbExtension = ".thumb";
public static List<DatasetThumbnail> getThumbnailCandidates(Dataset dataset, boolean considerDatasetLogoAsCandidate, int size) {
List<DatasetThumbnail> thumbnails = new ArrayList<>();
if (dataset == null) {
return thumbnails;
}
if (considerDatasetLogoAsCandidate) {
// Path path = Paths.get(dataset.getFileSystemDirectory() + File.separator + datasetLogoThumbnail + thumb48addedByImageThumbConverter);
// if (Files.exists(path)) {
// logger.fine("Thumbnail created from dataset logo exists!");
// File file = path.toFile();
// try {
// byte[] bytes = Files.readAllBytes(file.toPath());
StorageIO<Dataset> dataAccess = null;
try{
dataAccess = DataAccess.getStorageIO(dataset);
}
catch(IOException ioex){
}
InputStream in = null;
try {
in = dataAccess.getAuxFileAsInputStream(datasetLogoThumbnail + thumbExtension + size);
} catch (Exception ioex) {
}
if (in != null) {
logger.fine("Thumbnail created from dataset logo exists!");
try {
byte[] bytes = IOUtils.toByteArray(in);
String base64image = Base64.getEncoder().encodeToString(bytes);
DatasetThumbnail datasetThumbnail = new DatasetThumbnail(FileUtil.DATA_URI_SCHEME + base64image, null);
thumbnails.add(datasetThumbnail);
} catch (IOException ex) {
logger.warning("Unable to rescale image: " + ex);
}
} else {
logger.fine("There is no thumbnail created from a dataset logo");
}
IOUtils.closeQuietly(in);
}
for (FileMetadata fileMetadata : dataset.getLatestVersion().getFileMetadatas()) {
DataFile dataFile = fileMetadata.getDataFile();
if (dataFile != null && FileUtil.isThumbnailSupported(dataFile)
&& ImageThumbConverter.isThumbnailAvailable(dataFile)
&& !dataFile.isRestricted()) {
String imageSourceBase64 = null;
imageSourceBase64 = ImageThumbConverter.getImageThumbnailAsBase64(dataFile, size);
if (imageSourceBase64 != null) {
DatasetThumbnail datasetThumbnail = new DatasetThumbnail(imageSourceBase64, dataFile);
thumbnails.add(datasetThumbnail);
}
}
}
return thumbnails;
}
/**
* Note "datasetVersionId" can be null. If needed, it helps the "efficiency"
* of "attemptToAutomaticallySelectThumbnailFromDataFiles"
*
* @param dataset
* @param datasetVersion
* @return
*/
public static DatasetThumbnail getThumbnail(Dataset dataset, DatasetVersion datasetVersion, int size) {
if (dataset == null) {
return null;
}
StorageIO<Dataset> dataAccess = null;
try{
dataAccess = DataAccess.getStorageIO(dataset);
}
catch(IOException ioex){
logger.warning("getThumbnail(): Failed to initialize dataset StorageIO for " + dataset.getStorageIdentifier() + " (" + ioex.getMessage() + ")");
}
InputStream in = null;
try {
if (dataAccess == null) {
logger.warning("getThumbnail(): Failed to initialize dataset StorageIO for " + dataset.getStorageIdentifier());
} else {
in = dataAccess.getAuxFileAsInputStream(datasetLogoThumbnail + ".thumb" + size);
}
} catch (IOException ex) {
in = null;
logger.fine("Dataset-level thumbnail file does not exist, or failed to open; will try to find an image file that can be used as the thumbnail.");
}
if (in != null) {
try {
byte[] bytes = IOUtils.toByteArray(in);
String base64image = Base64.getEncoder().encodeToString(bytes);
DatasetThumbnail datasetThumbnail = new DatasetThumbnail(FileUtil.DATA_URI_SCHEME + base64image, null);
logger.fine("will get thumbnail from dataset logo");
return datasetThumbnail;
} catch (IOException ex) {
logger.fine("Unable to read thumbnail image from file: " + ex);
return null;
} finally
{
IOUtils.closeQuietly(in);
}
} else {
DataFile thumbnailFile = dataset.getThumbnailFile();
if (thumbnailFile == null) {
if (dataset.isUseGenericThumbnail()) {
logger.fine("Dataset (id :" + dataset.getId() + ") does not have a thumbnail and is 'Use Generic'.");
return null;
} else {
thumbnailFile = attemptToAutomaticallySelectThumbnailFromDataFiles(dataset, datasetVersion);
if (thumbnailFile == null) {
logger.fine("Dataset (id :" + dataset.getId() + ") does not have a thumbnail available that could be selected automatically.");
return null;
} else {
String imageSourceBase64 = ImageThumbConverter.getImageThumbnailAsBase64(thumbnailFile, size);
DatasetThumbnail defaultDatasetThumbnail = new DatasetThumbnail(imageSourceBase64, thumbnailFile);
logger.fine("thumbnailFile (id :" + thumbnailFile.getId() + ") will get thumbnail through automatic selection from DataFile id " + thumbnailFile.getId());
return defaultDatasetThumbnail;
}
}
} else if (thumbnailFile.isRestricted()) {
logger.fine("Dataset (id :" + dataset.getId() + ") has a thumbnail the user selected but the file must have later been restricted. Returning null.");
return null;
} else {
String imageSourceBase64 = ImageThumbConverter.getImageThumbnailAsBase64(thumbnailFile, size);
DatasetThumbnail userSpecifiedDatasetThumbnail = new DatasetThumbnail(imageSourceBase64, thumbnailFile);
logger.fine("Dataset (id :" + dataset.getId() + ") will get thumbnail the user specified from DataFile id " + thumbnailFile.getId());
return userSpecifiedDatasetThumbnail;
}
}
}
public static DatasetThumbnail getThumbnail(Dataset dataset, int size) {
if (dataset == null) {
return null;
}
return getThumbnail(dataset, null, size);
}
public static boolean deleteDatasetLogo(Dataset dataset) {
if (dataset == null) {
return false;
}
try {
StorageIO<Dataset> storageIO = getStorageIO(dataset);
if (storageIO == null) {
logger.warning("Null storageIO in deleteDatasetLogo()");
return false;
}
storageIO.deleteAuxObject(datasetLogoFilenameFinal);
storageIO.deleteAuxObject(datasetLogoThumbnail + thumbExtension + ImageThumbConverter.DEFAULT_DATASETLOGO_SIZE);
storageIO.deleteAuxObject(datasetLogoThumbnail + thumbExtension + ImageThumbConverter.DEFAULT_CARDIMAGE_SIZE);
} catch (IOException ex) {
logger.info("Failed to delete dataset logo: " + ex.getMessage());
return false;
}
return true;
//TODO: Is this required?
// File originalFile = new File(dataset.getFileSystemDirectory().toString(), datasetLogoFilenameFinal);
// boolean originalFileDeleted = originalFile.delete();
// File thumb48 = new File(dataset.getFileSystemDirectory().toString(), File.separator + datasetLogoThumbnail + thumb48addedByImageThumbConverter);
// boolean thumb48Deleted = thumb48.delete();
// if (originalFileDeleted && thumb48Deleted) {
// return true;
// } else {
// logger.info("One of the files wasn't deleted. Original deleted: " + originalFileDeleted + ". thumb48 deleted: " + thumb48Deleted + ".");
// return false;
// }
}
/**
* Pass an optional datasetVersion in case the file system is checked
*
* @param dataset
* @param datasetVersion
* @return
*/
public static DataFile attemptToAutomaticallySelectThumbnailFromDataFiles(Dataset dataset, DatasetVersion datasetVersion) {
if (dataset == null) {
return null;
}
if (dataset.isUseGenericThumbnail()) {
logger.fine("Bypassing logic to find a thumbnail because a generic icon for the dataset is desired.");
return null;
}
if (datasetVersion == null) {
logger.fine("getting a published version of the dataset");
// We want to use published files only when automatically selecting
// dataset thumbnails.
datasetVersion = dataset.getReleasedVersion();
}
// No published version? - No [auto-selected] thumbnail for you.
if (datasetVersion == null) {
return null;
}
for (FileMetadata fmd : datasetVersion.getFileMetadatas()) {
DataFile testFile = fmd.getDataFile();
// We don't want to use a restricted image file as the dedicated thumbnail:
if (!testFile.isRestricted() && FileUtil.isThumbnailSupported(testFile) && ImageThumbConverter.isThumbnailAvailable(testFile, ImageThumbConverter.DEFAULT_DATASETLOGO_SIZE)) {
return testFile;
}
}
logger.fine("In attemptToAutomaticallySelectThumbnailFromDataFiles and interated through all the files but couldn't find a thumbnail.");
return null;
}
public static Dataset persistDatasetLogoToStorageAndCreateThumbnails(Dataset dataset, InputStream inputStream) {
if (dataset == null) {
return null;
}
File tmpFile = null;
try {
tmpFile = FileUtil.inputStreamToFile(inputStream);
} catch (IOException ex) {
logger.severe(ex.getMessage());
}
StorageIO<Dataset> dataAccess = null;
try{
dataAccess = DataAccess.getStorageIO(dataset);
}
catch(IOException ioex){
//TODO: Add a suitable waing message
logger.warning("Failed to save the file, storage id " + dataset.getStorageIdentifier() + " (" + ioex.getMessage() + ")");
}
//File originalFile = new File(datasetDirectory.toString(), datasetLogoFilenameFinal);
try {
//this goes through Swift API/local storage/s3 to write the dataset thumbnail into a container
dataAccess.savePathAsAux(tmpFile.toPath(), datasetLogoFilenameFinal);
} catch (IOException ex) {
logger.severe("Failed to move original file from " + tmpFile.getAbsolutePath() + " to its DataAccess location" + ": " + ex);
}
BufferedImage fullSizeImage = null;
try {
fullSizeImage = ImageIO.read(tmpFile);
} catch (IOException ex) {
IOUtils.closeQuietly(inputStream);
logger.severe(ex.getMessage());
return null;
}
if (fullSizeImage == null) {
logger.fine("fullSizeImage was null!");
IOUtils.closeQuietly(inputStream);
return null;
}
int width = fullSizeImage.getWidth();
int height = fullSizeImage.getHeight();
FileChannel src = null;
try {
src = new FileInputStream(tmpFile).getChannel();
} catch (FileNotFoundException ex) {
IOUtils.closeQuietly(inputStream);
logger.severe(ex.getMessage());
return null;
}
FileChannel dest = null;
try {
dest = new FileOutputStream(tmpFile).getChannel();
} catch (FileNotFoundException ex) {
IOUtils.closeQuietly(inputStream);
logger.severe(ex.getMessage());
return null;
}
try {
dest.transferFrom(src, 0, src.size());
} catch (IOException ex) {
logger.severe(ex.getMessage());
return null;
}
File tmpFileForResize = null;
try {
//The stream was used around line 274 above, so this creates an empty file (OK since all it is used for is getting a path, but not reusing it here would make it easier to close it above.)
tmpFileForResize = FileUtil.inputStreamToFile(inputStream);
} catch (IOException ex) {
logger.severe(ex.getMessage());
return null;
} finally {
IOUtils.closeQuietly(inputStream);
}
// We'll try to pre-generate the rescaled versions in both the
// DEFAULT_DATASET_LOGO (currently 140) and DEFAULT_CARDIMAGE_SIZE (48)
String thumbFileLocation = ImageThumbConverter.rescaleImage(fullSizeImage, width, height, ImageThumbConverter.DEFAULT_DATASETLOGO_SIZE, tmpFileForResize.toPath().toString());
logger.fine("thumbFileLocation = " + thumbFileLocation);
logger.fine("tmpFileLocation=" + tmpFileForResize.toPath().toString());
//now we must save the updated thumbnail
try {
dataAccess.savePathAsAux(Paths.get(thumbFileLocation), datasetLogoThumbnail+thumbExtension+ImageThumbConverter.DEFAULT_DATASETLOGO_SIZE);
} catch (IOException ex) {
logger.severe("Failed to move updated thumbnail file from " + tmpFile.getAbsolutePath() + " to its DataAccess location" + ": " + ex);
}
thumbFileLocation = ImageThumbConverter.rescaleImage(fullSizeImage, width, height, ImageThumbConverter.DEFAULT_CARDIMAGE_SIZE, tmpFileForResize.toPath().toString());
logger.fine("thumbFileLocation = " + thumbFileLocation);
logger.fine("tmpFileLocation=" + tmpFileForResize.toPath().toString());
//now we must save the updated thumbnail
try {
dataAccess.savePathAsAux(Paths.get(thumbFileLocation), datasetLogoThumbnail+thumbExtension+ImageThumbConverter.DEFAULT_CARDIMAGE_SIZE);
} catch (IOException ex) {
logger.severe("Failed to move updated thumbnail file from " + tmpFile.getAbsolutePath() + " to its DataAccess location" + ": " + ex);
}
//This deletes the tempfiles created for rescaling and encoding
boolean tmpFileWasDeleted = tmpFile.delete();
boolean originalTempFileWasDeleted = tmpFileForResize.delete();
try {
Files.delete(Paths.get(thumbFileLocation));
} catch (IOException ioex) {
logger.fine("Failed to delete temporary thumbnail file");
}
logger.fine("Thumbnail saved to " + thumbFileLocation + ". Temporary file deleted : " + tmpFileWasDeleted + ". Original file deleted : " + originalTempFileWasDeleted);
return dataset;
}
public static InputStream getThumbnailAsInputStream(Dataset dataset, int size) {
if (dataset == null) {
return null;
}
DatasetThumbnail datasetThumbnail = dataset.getDatasetThumbnail(size);
if (datasetThumbnail == null) {
return null;
} else {
String base64Image = datasetThumbnail.getBase64image();
String leadingStringToRemove = FileUtil.DATA_URI_SCHEME;
String encodedImg = base64Image.substring(leadingStringToRemove.length());
byte[] decodedImg = null;
try {
decodedImg = Base64.getDecoder().decode(encodedImg.getBytes("UTF-8"));
logger.fine("returning this many bytes for " + "dataset id: " + dataset.getId() + ", persistentId: " + dataset.getIdentifier() + " :" + decodedImg.length);
} catch (UnsupportedEncodingException ex) {
logger.info("dataset thumbnail could not be decoded for dataset id " + dataset.getId() + ": " + ex);
return null;
}
ByteArrayInputStream nonDefaultDatasetThumbnail = new ByteArrayInputStream(decodedImg);
logger.fine("For dataset id " + dataset.getId() + " a thumbnail was found and is being returned.");
return nonDefaultDatasetThumbnail;
}
}
/**
* The dataset logo is the file that a user uploads which is *not* one of
* the data files. Compare to the datavese logo. We do not save the original
* file that is uploaded. Rather, we delete it after first creating at least
* one thumbnail from it.
*/
public static boolean isDatasetLogoPresent(Dataset dataset, int size) {
if (dataset == null) {
return false;
}
StorageIO<Dataset> dataAccess = null;
try {
dataAccess = DataAccess.getStorageIO(dataset);
return dataAccess.isAuxObjectCached(datasetLogoThumbnail + thumbExtension + size);
} catch (IOException ioex) {
}
return false;
}
public static List<DatasetField> getDatasetSummaryFields(DatasetVersion datasetVersion, String customFields) {
List<DatasetField> datasetFields = new ArrayList<>();
//if customFields are empty, go with default fields.
if(customFields==null || customFields.isEmpty()){
customFields="dsDescription,subject,keyword,publication,notesText";
}
String[] customFieldList= customFields.split(",");
Map<String,DatasetField> DatasetFieldsSet=new HashMap<>();
for (DatasetField dsf : datasetVersion.getFlatDatasetFields()) {
DatasetFieldsSet.put(dsf.getDatasetFieldType().getName(),dsf);
}
for(String cfl : customFieldList)
{
DatasetField df = DatasetFieldsSet.get(cfl);
if(df!=null)
datasetFields.add(df);
}
return datasetFields;
}
public static boolean isAppropriateStorageDriver(Dataset dataset){
// ToDo - rsync was written before multiple store support and currently is hardcoded to use the "s3" store.
// When those restrictions are lifted/rsync can be configured per store, this test should check that setting
// instead of testing for the 's3" store,
//This method is used by both the dataset and edit files page so one change here
//will fix both
return dataset.getEffectiveStorageDriverId().equals("s3");
}
/**
* Given a dataset version, return it's size in human readable units such as
* 42.9 MB.There is a GetDatasetStorageSizeCommand but it's overly complex
* for the use case.
*
* @param original Use the original file size rather than the archival file
* size for tabular files.
*/
public static String getDownloadSize(DatasetVersion dsv, boolean original) {
long bytes = 0l;
for (FileMetadata fileMetadata : dsv.getFileMetadatas()) {
DataFile dataFile = fileMetadata.getDataFile();
if (original && dataFile.isTabularData()) {
bytes += dataFile.getOriginalFileSize() == null ? 0 : dataFile.getOriginalFileSize();
} else {
bytes += dataFile.getFilesize();
}
}
return FileSizeChecker.bytesToHumanReadable(bytes);
}
}
| 1 | 44,306 |
we can just have this method call the next, passing in the file metadatas (deduplication of logic)
|
IQSS-dataverse
|
java
|
@@ -480,6 +480,17 @@ class TabBar(QTabBar):
new_idx = super().insertTab(idx, icon, '')
self.set_page_title(new_idx, text)
+ def wheelEvent(self, event):
+ """Override wheelEvent to make the action configurable."""
+ if config.get('tabs', 'mousewheel-tab-switching'):
+ super().wheelEvent(event)
+ else:
+ tabbed_browser = objreg.get('tabbed-browser', scope='window',
+ window=self._win_id)
+ focused_tab = tabbed_browser.currentWidget()
+ if focused_tab is not None:
+ focused_tab.wheelEvent(event)
+
class TabBarStyle(QCommonStyle):
| 1 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The tab widget used for TabbedBrowser from browser.py.
Module attributes:
PM_TabBarPadding: The PixelMetric value for TabBarStyle to get the padding
between items.
"""
import functools
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize, QRect, QPoint, QTimer
from PyQt5.QtWidgets import (QTabWidget, QTabBar, QSizePolicy, QCommonStyle,
QStyle, QStylePainter, QStyleOptionTab)
from PyQt5.QtGui import QIcon, QPalette, QColor
from qutebrowser.utils import qtutils, objreg, utils
from qutebrowser.config import config
from qutebrowser.browser import webview
PM_TabBarPadding = QStyle.PM_CustomBase
class TabWidget(QTabWidget):
"""The tab widget used for TabbedBrowser.
Signals:
tab_index_changed: Emitted when the current tab was changed.
arg 0: The index of the tab which is now focused.
arg 1: The total count of tabs.
"""
tab_index_changed = pyqtSignal(int, int)
def __init__(self, win_id, parent=None):
super().__init__(parent)
bar = TabBar(win_id)
self.setTabBar(bar)
bar.tabCloseRequested.connect(self.tabCloseRequested)
bar.tabMoved.connect(functools.partial(
QTimer.singleShot, 0, self.update_tab_titles))
bar.currentChanged.connect(self.emit_tab_index_changed)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.setDocumentMode(True)
self.setElideMode(Qt.ElideRight)
self.setUsesScrollButtons(True)
bar.setDrawBase(False)
self.init_config()
objreg.get('config').changed.connect(self.init_config)
@config.change_filter('tabs')
def init_config(self):
"""Initialize attributes based on the config."""
tabbar = self.tabBar()
self.setMovable(config.get('tabs', 'movable'))
self.setTabsClosable(False)
position = config.get('tabs', 'position')
selection_behavior = config.get('tabs', 'select-on-remove')
self.setTabPosition(position)
tabbar.vertical = position in (QTabWidget.West, QTabWidget.East)
tabbar.setSelectionBehaviorOnRemove(selection_behavior)
tabbar.refresh()
def set_tab_indicator_color(self, idx, color):
"""Set the tab indicator color.
Args:
idx: The tab index.
color: A QColor.
"""
bar = self.tabBar()
bar.set_tab_data(idx, 'indicator-color', color)
bar.update(bar.tabRect(idx))
def set_page_title(self, idx, title):
"""Set the tab title user data."""
self.tabBar().set_tab_data(idx, 'page-title', title)
self.update_tab_title(idx)
def page_title(self, idx):
"""Get the tab title user data."""
return self.tabBar().page_title(idx)
def update_tab_title(self, idx):
"""Update the tab text for the given tab."""
widget = self.widget(idx)
page_title = self.page_title(idx).replace('&', '&&')
fields = {}
if widget.load_status == webview.LoadStatus.loading:
fields['perc'] = '[{}%] '.format(widget.progress)
else:
fields['perc'] = ''
fields['perc_raw'] = widget.progress
fields['title'] = page_title
fields['index'] = idx + 1
fields['id'] = widget.tab_id
fields['title_sep'] = ' - ' if page_title else ''
fmt = config.get('tabs', 'title-format')
self.tabBar().setTabText(idx, fmt.format(**fields))
@config.change_filter('tabs', 'title-format')
def update_tab_titles(self):
"""Update all texts."""
for idx in range(self.count()):
self.update_tab_title(idx)
def tabInserted(self, idx):
"""Update titles when a tab was inserted."""
super().tabInserted(idx)
self.update_tab_titles()
def tabRemoved(self, idx):
"""Update titles when a tab was removed."""
super().tabRemoved(idx)
self.update_tab_titles()
def addTab(self, page, icon_or_text, text_or_empty=None):
"""Override addTab to use our own text setting logic.
Unfortunately QTabWidget::addTab has these two overloads:
- QWidget * page, const QIcon & icon, const QString & label
- QWidget * page, const QString & label
This means we'll get different arguments based on the chosen overload.
Args:
page: The QWidget to add.
icon_or_text: Either the QIcon to add or the label.
text_or_empty: Either the label or None.
Return:
The index of the newly added tab.
"""
if text_or_empty is None:
icon = None
text = icon_or_text
new_idx = super().addTab(page, '')
else:
icon = icon_or_text
text = text_or_empty
new_idx = super().addTab(page, icon, '')
self.set_page_title(new_idx, text)
return new_idx
def insertTab(self, idx, page, icon_or_text, text_or_empty=None):
"""Override insertTab to use our own text setting logic.
Unfortunately QTabWidget::insertTab has these two overloads:
- int index, QWidget * page, const QIcon & icon,
const QString & label
- int index, QWidget * page, const QString & label
This means we'll get different arguments based on the chosen overload.
Args:
idx: Where to insert the widget.
page: The QWidget to add.
icon_or_text: Either the QIcon to add or the label.
text_or_empty: Either the label or None.
Return:
The index of the newly added tab.
"""
if text_or_empty is None:
icon = None
text = icon_or_text
new_idx = super().insertTab(idx, page, '')
else:
icon = icon_or_text
text = text_or_empty
new_idx = super().insertTab(idx, page, icon, '')
self.set_page_title(new_idx, text)
return new_idx
@pyqtSlot(int)
def emit_tab_index_changed(self, index):
"""Emit the tab_index_changed signal if the current tab changed."""
self.tab_index_changed.emit(index, self.count())
class TabBar(QTabBar):
"""Custom tab bar with our own style.
FIXME: Dragging tabs doesn't look as nice as it does in QTabBar. However,
fixing this would be a lot of effort, so we'll postpone it until we're
reimplementing drag&drop for other reasons.
https://github.com/The-Compiler/qutebrowser/issues/126
Attributes:
vertical: When the tab bar is currently vertical.
win_id: The window ID this TabBar belongs to.
"""
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
self.setStyle(TabBarStyle(self.style()))
self.set_font()
config_obj = objreg.get('config')
config_obj.changed.connect(self.set_font)
self.vertical = False
self.setAutoFillBackground(True)
self.set_colors()
config_obj.changed.connect(self.set_colors)
QTimer.singleShot(0, self._tabhide)
config_obj.changed.connect(self.autohide)
config_obj.changed.connect(self.alwayshide)
config_obj.changed.connect(self.on_tab_colors_changed)
def __repr__(self):
return utils.get_repr(self, count=self.count())
@config.change_filter('tabs', 'hide-auto')
def autohide(self):
"""Hide tab bar if needed when tabs->hide-auto got changed."""
self._tabhide()
@config.change_filter('tabs', 'hide-always')
def alwayshide(self):
"""Hide tab bar if needed when tabs->hide-always got changed."""
self._tabhide()
def _tabhide(self):
"""Hide the tab bar if needed."""
hide_auto = config.get('tabs', 'hide-auto')
hide_always = config.get('tabs', 'hide-always')
if hide_always or (hide_auto and self.count() == 1):
self.hide()
else:
self.show()
def set_tab_data(self, idx, key, value):
"""Set tab data as a dictionary."""
if not 0 <= idx < self.count():
raise IndexError("Tab index ({}) out of range ({})!".format(
idx, self.count()))
data = self.tabData(idx)
if data is None:
data = {}
data[key] = value
self.setTabData(idx, data)
def tab_data(self, idx, key):
"""Get tab data for a given key."""
if not 0 <= idx < self.count():
raise IndexError("Tab index ({}) out of range ({})!".format(
idx, self.count()))
data = self.tabData(idx)
if data is None:
data = {}
return data[key]
def page_title(self, idx):
"""Get the tab title user data.
Args:
idx: The tab index to get the title for.
handle_unset: Whether to return an empty string on KeyError.
"""
try:
return self.tab_data(idx, 'page-title')
except KeyError:
return ''
def refresh(self):
"""Properly repaint the tab bar and relayout tabs."""
# This is a horrible hack, but we need to do this so the underlaying Qt
# code sets layoutDirty so it actually relayouts the tabs.
self.setIconSize(self.iconSize())
@config.change_filter('fonts', 'tabbar')
def set_font(self):
"""Set the tab bar font."""
self.setFont(config.get('fonts', 'tabbar'))
@config.change_filter('colors', 'tabs.bg.bar')
def set_colors(self):
"""Set the tab bar colors."""
p = self.palette()
p.setColor(QPalette.Window, config.get('colors', 'tabs.bg.bar'))
self.setPalette(p)
@pyqtSlot(str, str)
def on_tab_colors_changed(self, section, option):
"""Set the tab colors."""
if section == 'colors' and option.startswith('tabs.'):
self.update()
def mousePressEvent(self, e):
"""Override mousePressEvent to close tabs if configured."""
button = config.get('tabs', 'close-mouse-button')
if (e.button() == Qt.RightButton and button == 'right' or
e.button() == Qt.MiddleButton and button == 'middle'):
idx = self.tabAt(e.pos())
if idx != -1:
e.accept()
self.tabCloseRequested.emit(idx)
return
super().mousePressEvent(e)
def minimumTabSizeHint(self, index):
"""Set the minimum tab size to indicator/icon/... text.
Args:
index: The index of the tab to get a size hint for.
Return:
A QSize.
"""
icon = self.tabIcon(index)
padding_count = 2
if icon.isNull():
icon_size = QSize(0, 0)
else:
extent = self.style().pixelMetric(QStyle.PM_TabBarIconSize, None,
self)
icon_size = icon.actualSize(QSize(extent, extent))
padding_count += 1
indicator_width = config.get('tabs', 'indicator-width')
if indicator_width != 0:
indicator_width += config.get('tabs', 'indicator-space')
padding_width = self.style().pixelMetric(PM_TabBarPadding, None, self)
height = self.fontMetrics().height()
width = (self.fontMetrics().width('\u2026') +
icon_size.width() + padding_count * padding_width +
indicator_width)
return QSize(width, height)
def tabSizeHint(self, index):
"""Override tabSizeHint so all tabs are the same size.
https://wiki.python.org/moin/PyQt/Customising%20tab%20bars
Args:
index: The index of the tab.
Return:
A QSize.
"""
minimum_size = self.minimumTabSizeHint(index)
height = self.fontMetrics().height()
if self.vertical:
confwidth = str(config.get('tabs', 'width'))
if confwidth.endswith('%'):
main_window = objreg.get('main-window', scope='window',
window=self._win_id)
perc = int(confwidth.rstrip('%'))
width = main_window.width() * perc / 100
else:
width = int(confwidth)
size = QSize(max(minimum_size.width(), width), height)
elif self.count() == 0:
# This happens on startup on OS X.
# We return it directly rather than setting `size' because we don't
# want to ensure it's valid in this special case.
return QSize()
elif self.count() * minimum_size.width() > self.width():
# If we don't have enough space, we return the minimum size so we
# get scroll buttons as soon as needed.
size = minimum_size
else:
# If we *do* have enough space, tabs should occupy the whole window
# width.
size = QSize(self.width() / self.count(), height)
qtutils.ensure_valid(size)
return size
def paintEvent(self, _e):
"""Override paintEvent to draw the tabs like we want to."""
p = QStylePainter(self)
tab = QStyleOptionTab()
selected = self.currentIndex()
for idx in range(self.count()):
self.initStyleOption(tab, idx)
if idx == selected:
bg_color = config.get('colors', 'tabs.bg.selected')
fg_color = config.get('colors', 'tabs.fg.selected')
elif idx % 2:
bg_color = config.get('colors', 'tabs.bg.odd')
fg_color = config.get('colors', 'tabs.fg.odd')
else:
bg_color = config.get('colors', 'tabs.bg.even')
fg_color = config.get('colors', 'tabs.fg.even')
tab.palette.setColor(QPalette.Window, bg_color)
tab.palette.setColor(QPalette.WindowText, fg_color)
try:
indicator_color = self.tab_data(idx, 'indicator-color')
except KeyError:
indicator_color = QColor()
tab.palette.setColor(QPalette.Base, indicator_color)
if tab.rect.right() < 0 or tab.rect.left() > self.width():
# Don't bother drawing a tab if the entire tab is outside of
# the visible tab bar.
continue
p.drawControl(QStyle.CE_TabBarTab, tab)
def tabInserted(self, idx):
"""Update visibility when a tab was inserted."""
super().tabInserted(idx)
self._tabhide()
def tabRemoved(self, idx):
"""Update visibility when a tab was removed."""
super().tabRemoved(idx)
self._tabhide()
def addTab(self, icon_or_text, text_or_empty=None):
"""Override addTab to use our own text setting logic.
Unfortunately QTabBar::addTab has these two overloads:
- const QIcon & icon, const QString & label
- const QString & label
This means we'll get different arguments based on the chosen overload.
Args:
icon_or_text: Either the QIcon to add or the label.
text_or_empty: Either the label or None.
Return:
The index of the newly added tab.
"""
if text_or_empty is None:
icon = None
text = icon_or_text
new_idx = super().addTab('')
else:
icon = icon_or_text
text = text_or_empty
new_idx = super().addTab(icon, '')
self.set_page_title(new_idx, text)
def insertTab(self, idx, icon_or_text, text_or_empty=None):
"""Override insertTab to use our own text setting logic.
Unfortunately QTabBar::insertTab has these two overloads:
- int index, const QIcon & icon, const QString & label
- int index, const QString & label
This means we'll get different arguments based on the chosen overload.
Args:
idx: Where to insert the widget.
icon_or_text: Either the QIcon to add or the label.
text_or_empty: Either the label or None.
Return:
The index of the newly added tab.
"""
if text_or_empty is None:
icon = None
text = icon_or_text
new_idx = super().InsertTab(idx, '')
else:
icon = icon_or_text
text = text_or_empty
new_idx = super().insertTab(idx, icon, '')
self.set_page_title(new_idx, text)
class TabBarStyle(QCommonStyle):
"""Qt style used by TabBar to fix some issues with the default one.
This fixes the following things:
- Remove the focus rectangle Ubuntu draws on tabs.
- Force text to be left-aligned even though Qt has "centered"
hardcoded.
Unfortunately PyQt doesn't support QProxyStyle, so we need to do this the
hard way...
Based on:
http://stackoverflow.com/a/17294081
https://code.google.com/p/makehuman/source/browse/trunk/makehuman/lib/qtgui.py
Attributes:
_style: The base/"parent" style.
"""
def __init__(self, style):
"""Initialize all functions we're not overriding.
This simply calls the corresponding function in self._style.
Args:
style: The base/"parent" style.
"""
self._style = style
for method in ('drawComplexControl', 'drawItemPixmap',
'generatedIconPixmap', 'hitTestComplexControl',
'itemPixmapRect', 'itemTextRect',
'polish', 'styleHint', 'subControlRect', 'unpolish',
'drawItemText', 'sizeFromContents', 'drawPrimitive'):
target = getattr(self._style, method)
setattr(self, method, functools.partial(target))
super().__init__()
def drawControl(self, element, opt, p, widget=None):
"""Override drawControl to draw odd tabs in a different color.
Draws the given element with the provided painter with the style
options specified by option.
Args:
element: ControlElement
option: const QStyleOption *
painter: QPainter *
widget: const QWidget *
"""
if element == QStyle.CE_TabBarTab:
# We override this so we can control TabBarTabShape/TabBarTabLabel.
self.drawControl(QStyle.CE_TabBarTabShape, opt, p, widget)
self.drawControl(QStyle.CE_TabBarTabLabel, opt, p, widget)
elif element == QStyle.CE_TabBarTabShape:
p.fillRect(opt.rect, opt.palette.window())
indicator_color = opt.palette.base().color()
indicator_width = config.get('tabs', 'indicator-width')
if indicator_color.isValid() and indicator_width != 0:
topleft = opt.rect.topLeft()
topleft += QPoint(config.get('tabs', 'indicator-space'), 2)
p.fillRect(topleft.x(), topleft.y(), indicator_width,
opt.rect.height() - 4, indicator_color)
# We use super() rather than self._style here because we don't want
# any sophisticated drawing.
super().drawControl(QStyle.CE_TabBarTabShape, opt, p, widget)
elif element == QStyle.CE_TabBarTabLabel:
text_rect, icon_rect = self._tab_layout(opt)
if not opt.icon.isNull():
qtutils.ensure_valid(icon_rect)
icon_mode = (QIcon.Normal if opt.state & QStyle.State_Enabled
else QIcon.Disabled)
icon_state = (QIcon.On if opt.state & QStyle.State_Selected
else QIcon.Off)
icon = opt.icon.pixmap(opt.iconSize, icon_mode, icon_state)
p.drawPixmap(icon_rect.x(), icon_rect.y(), icon)
alignment = Qt.AlignLeft | Qt.AlignVCenter | Qt.TextHideMnemonic
self._style.drawItemText(p, text_rect, alignment, opt.palette,
opt.state & QStyle.State_Enabled,
opt.text, QPalette.WindowText)
else:
# For any other elements we just delegate the work to our real
# style.
self._style.drawControl(element, opt, p, widget)
def pixelMetric(self, metric, option=None, widget=None):
"""Override pixelMetric to not shift the selected tab.
Args:
metric: PixelMetric
option: const QStyleOption *
widget: const QWidget *
Return:
An int.
"""
if (metric == QStyle.PM_TabBarTabShiftHorizontal or
metric == QStyle.PM_TabBarTabShiftVertical or
metric == QStyle.PM_TabBarTabHSpace or
metric == QStyle.PM_TabBarTabVSpace):
return 0
elif metric == PM_TabBarPadding:
return 4
else:
return self._style.pixelMetric(metric, option, widget)
def subElementRect(self, sr, opt, widget=None):
"""Override subElementRect to use our own _tab_layout implementation.
Args:
sr: SubElement
opt: QStyleOption
widget: QWidget
Return:
A QRect.
"""
if sr == QStyle.SE_TabBarTabText:
text_rect, _icon_rect = self._tab_layout(opt)
return text_rect
else:
return self._style.subElementRect(sr, opt, widget)
def _tab_layout(self, opt):
"""Compute the text/icon rect from the opt rect.
This is based on Qt's QCommonStylePrivate::tabLayout
(qtbase/src/widgets/styles/qcommonstyle.cpp) as we can't use the
private implementation.
Args:
opt: QStyleOptionTab
Return:
A (text_rect, icon_rect) tuple (both QRects).
"""
padding = self.pixelMetric(PM_TabBarPadding, opt)
icon_rect = QRect()
text_rect = QRect(opt.rect)
qtutils.ensure_valid(text_rect)
indicator_width = config.get('tabs', 'indicator-width')
text_rect.adjust(padding, 0, 0, 0)
if indicator_width != 0:
text_rect.adjust(indicator_width +
config.get('tabs', 'indicator-space'), 0, 0, 0)
if not opt.icon.isNull():
icon_rect = self._get_icon_rect(opt, text_rect)
text_rect.adjust(icon_rect.width() + padding, 0, 0, 0)
text_rect = self._style.visualRect(opt.direction, opt.rect, text_rect)
return (text_rect, icon_rect)
def _get_icon_rect(self, opt, text_rect):
"""Get a QRect for the icon to draw.
Args:
opt: QStyleOptionTab
text_rect: The QRect for the text.
Return:
A QRect.
"""
icon_size = opt.iconSize
if not icon_size.isValid():
icon_extent = self.pixelMetric(QStyle.PM_SmallIconSize)
icon_size = QSize(icon_extent, icon_extent)
icon_mode = (QIcon.Normal if opt.state & QStyle.State_Enabled
else QIcon.Disabled)
icon_state = (QIcon.On if opt.state & QStyle.State_Selected
else QIcon.Off)
tab_icon_size = opt.icon.actualSize(icon_size, icon_mode, icon_state)
tab_icon_size = QSize(min(tab_icon_size.width(), icon_size.width()),
min(tab_icon_size.height(), icon_size.height()))
icon_rect = QRect(text_rect.left(),
text_rect.center().y() - tab_icon_size.height() / 2,
tab_icon_size.width(), tab_icon_size.height())
icon_rect = self._style.visualRect(opt.direction, opt.rect, icon_rect)
qtutils.ensure_valid(icon_rect)
return icon_rect
| 1 | 12,949 |
What happens here if you send the `wheelEvent` directly to the `tabbed_browser` and let it take care of propagating it to its children (i.e. the focused tab)? If that works, I guess that'd be cleaner.
|
qutebrowser-qutebrowser
|
py
|
@@ -21,6 +21,10 @@ from .compat import (PANDAS_INSTALLED, DataFrame, Series, is_dtype_sparse,
from .libpath import find_lib_path
+def _log_callback(msg):
+ """Redirect logs from native library into Python console"""
+ print("{0:s}".format(decode_string(msg)), end='')
+
def _load_lib():
"""Load LightGBM library."""
lib_path = find_lib_path()
| 1 |
# coding: utf-8
"""Wrapper for C API of LightGBM."""
from __future__ import absolute_import
import copy
import ctypes
import os
import warnings
from tempfile import NamedTemporaryFile
from collections import OrderedDict
import numpy as np
import scipy.sparse
from .compat import (PANDAS_INSTALLED, DataFrame, Series, is_dtype_sparse,
DataTable,
decode_string, string_type,
integer_types, numeric_types,
json, json_default_with_numpy,
range_, zip_)
from .libpath import find_lib_path
def _load_lib():
"""Load LightGBM library."""
lib_path = find_lib_path()
if len(lib_path) == 0:
return None
lib = ctypes.cdll.LoadLibrary(lib_path[0])
lib.LGBM_GetLastError.restype = ctypes.c_char_p
return lib
_LIB = _load_lib()
def _safe_call(ret):
"""Check the return value from C API call.
Parameters
----------
ret : int
The return value from C API calls.
"""
if ret != 0:
raise LightGBMError(decode_string(_LIB.LGBM_GetLastError()))
def is_numeric(obj):
"""Check whether object is a number or not, include numpy number, etc."""
try:
float(obj)
return True
except (TypeError, ValueError):
# TypeError: obj is not a string or a number
# ValueError: invalid literal
return False
def is_numpy_1d_array(data):
"""Check whether data is a numpy 1-D array."""
return isinstance(data, np.ndarray) and len(data.shape) == 1
def is_1d_list(data):
"""Check whether data is a 1-D list."""
return isinstance(data, list) and (not data or is_numeric(data[0]))
def list_to_1d_numpy(data, dtype=np.float32, name='list'):
"""Convert data to numpy 1-D array."""
if is_numpy_1d_array(data):
if data.dtype == dtype:
return data
else:
return data.astype(dtype=dtype, copy=False)
elif is_1d_list(data):
return np.array(data, dtype=dtype, copy=False)
elif isinstance(data, Series):
if _get_bad_pandas_dtypes([data.dtypes]):
raise ValueError('Series.dtypes must be int, float or bool')
return np.array(data, dtype=dtype, copy=False) # SparseArray should be supported as well
else:
raise TypeError("Wrong type({0}) for {1}.\n"
"It should be list, numpy 1-D array or pandas Series".format(type(data).__name__, name))
def cfloat32_array_to_numpy(cptr, length):
"""Convert a ctypes float pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
return np.fromiter(cptr, dtype=np.float32, count=length)
else:
raise RuntimeError('Expected float pointer')
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.fromiter(cptr, dtype=np.float64, count=length)
else:
raise RuntimeError('Expected double pointer')
def cint32_array_to_numpy(cptr, length):
"""Convert a ctypes int pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_int32)):
return np.fromiter(cptr, dtype=np.int32, count=length)
else:
raise RuntimeError('Expected int pointer')
def c_str(string):
"""Convert a Python string to C string."""
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
"""Convert a Python array to C array."""
return (ctype * len(values))(*values)
def param_dict_to_str(data):
"""Convert Python dictionary to string, which is passed to C API."""
if data is None or not data:
return ""
pairs = []
for key, val in data.items():
if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):
pairs.append(str(key) + '=' + ','.join(map(str, val)))
elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val):
pairs.append(str(key) + '=' + str(val))
elif val is not None:
raise TypeError('Unknown type of parameter:%s, got:%s'
% (key, type(val).__name__))
return ' '.join(pairs)
class _TempFile(object):
def __enter__(self):
with NamedTemporaryFile(prefix="lightgbm_tmp_", delete=True) as f:
self.name = f.name
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if os.path.isfile(self.name):
os.remove(self.name)
def readlines(self):
with open(self.name, "r+") as f:
ret = f.readlines()
return ret
def writelines(self, lines):
with open(self.name, "w+") as f:
f.writelines(lines)
class LightGBMError(Exception):
"""Error thrown by LightGBM."""
pass
class _ConfigAliases(object):
aliases = {"bin_construct_sample_cnt": {"bin_construct_sample_cnt",
"subsample_for_bin"},
"boosting": {"boosting",
"boosting_type",
"boost"},
"categorical_feature": {"categorical_feature",
"cat_feature",
"categorical_column",
"cat_column"},
"data_random_seed": {"data_random_seed",
"data_seed"},
"early_stopping_round": {"early_stopping_round",
"early_stopping_rounds",
"early_stopping",
"n_iter_no_change"},
"enable_bundle": {"enable_bundle",
"is_enable_bundle",
"bundle"},
"eval_at": {"eval_at",
"ndcg_eval_at",
"ndcg_at",
"map_eval_at",
"map_at"},
"group_column": {"group_column",
"group",
"group_id",
"query_column",
"query",
"query_id"},
"header": {"header",
"has_header"},
"ignore_column": {"ignore_column",
"ignore_feature",
"blacklist"},
"is_enable_sparse": {"is_enable_sparse",
"is_sparse",
"enable_sparse",
"sparse"},
"label_column": {"label_column",
"label"},
"machines": {"machines",
"workers",
"nodes"},
"metric": {"metric",
"metrics",
"metric_types"},
"num_class": {"num_class",
"num_classes"},
"num_iterations": {"num_iterations",
"num_iteration",
"n_iter",
"num_tree",
"num_trees",
"num_round",
"num_rounds",
"num_boost_round",
"n_estimators"},
"objective": {"objective",
"objective_type",
"app",
"application"},
"pre_partition": {"pre_partition",
"is_pre_partition"},
"two_round": {"two_round",
"two_round_loading",
"use_two_round_loading"},
"verbosity": {"verbosity",
"verbose"},
"weight_column": {"weight_column",
"weight"}}
@classmethod
def get(cls, *args):
ret = set()
for i in args:
ret |= cls.aliases.get(i, {i})
return ret
MAX_INT32 = (1 << 31) - 1
"""Macro definition of data type in C API of LightGBM"""
C_API_DTYPE_FLOAT32 = 0
C_API_DTYPE_FLOAT64 = 1
C_API_DTYPE_INT32 = 2
C_API_DTYPE_INT64 = 3
"""Matrix is row major in Python"""
C_API_IS_ROW_MAJOR = 1
"""Macro definition of prediction type in C API of LightGBM"""
C_API_PREDICT_NORMAL = 0
C_API_PREDICT_RAW_SCORE = 1
C_API_PREDICT_LEAF_INDEX = 2
C_API_PREDICT_CONTRIB = 3
"""Data type of data field"""
FIELD_TYPE_MAPPER = {"label": C_API_DTYPE_FLOAT32,
"weight": C_API_DTYPE_FLOAT32,
"init_score": C_API_DTYPE_FLOAT64,
"group": C_API_DTYPE_INT32}
def convert_from_sliced_object(data):
"""Fix the memory of multi-dimensional sliced object."""
if isinstance(data, np.ndarray) and isinstance(data.base, np.ndarray):
if not data.flags.c_contiguous:
warnings.warn("Usage of np.ndarray subset (sliced data) is not recommended "
"due to it will double the peak memory cost in LightGBM.")
return np.copy(data)
return data
def c_float_array(data):
"""Get pointer of float numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.float32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
type_data = C_API_DTYPE_FLOAT32
elif data.dtype == np.float64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_double))
type_data = C_API_DTYPE_FLOAT64
else:
raise TypeError("Expected np.float32 or np.float64, met type({})"
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def c_int_array(data):
"""Get pointer of int numpy array / list."""
if is_1d_list(data):
data = np.array(data, copy=False)
if is_numpy_1d_array(data):
data = convert_from_sliced_object(data)
assert data.flags.c_contiguous
if data.dtype == np.int32:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
type_data = C_API_DTYPE_INT32
elif data.dtype == np.int64:
ptr_data = data.ctypes.data_as(ctypes.POINTER(ctypes.c_int64))
type_data = C_API_DTYPE_INT64
else:
raise TypeError("Expected np.int32 or np.int64, met type({})"
.format(data.dtype))
else:
raise TypeError("Unknown type({})".format(type(data).__name__))
return (ptr_data, type_data, data) # return `data` to avoid the temporary copy is freed
def _get_bad_pandas_dtypes(dtypes):
pandas_dtype_mapper = {'int8': 'int', 'int16': 'int', 'int32': 'int',
'int64': 'int', 'uint8': 'int', 'uint16': 'int',
'uint32': 'int', 'uint64': 'int', 'bool': 'int',
'float16': 'float', 'float32': 'float', 'float64': 'float'}
bad_indices = [i for i, dtype in enumerate(dtypes) if (dtype.name not in pandas_dtype_mapper
and (not is_dtype_sparse(dtype)
or dtype.subtype.name not in pandas_dtype_mapper))]
return bad_indices
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, DataFrame):
if len(data.shape) != 2 or data.shape[0] < 1:
raise ValueError('Input data must be 2 dimensional and non empty.')
if feature_name == 'auto' or feature_name is None:
data = data.rename(columns=str)
cat_cols = list(data.select_dtypes(include=['category']).columns)
cat_cols_not_ordered = [col for col in cat_cols if not data[col].cat.ordered]
if pandas_categorical is None: # train dataset
pandas_categorical = [list(data[col].cat.categories) for col in cat_cols]
else:
if len(cat_cols) != len(pandas_categorical):
raise ValueError('train and valid dataset categorical_feature do not match.')
for col, category in zip_(cat_cols, pandas_categorical):
if list(data[col].cat.categories) != list(category):
data[col] = data[col].cat.set_categories(category)
if len(cat_cols): # cat_cols is list
data = data.copy() # not alter origin DataFrame
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes).replace({-1: np.nan})
if categorical_feature is not None:
if feature_name is None:
feature_name = list(data.columns)
if categorical_feature == 'auto': # use cat cols from DataFrame
categorical_feature = cat_cols_not_ordered
else: # use cat cols specified by user
categorical_feature = list(categorical_feature)
if feature_name == 'auto':
feature_name = list(data.columns)
bad_indices = _get_bad_pandas_dtypes(data.dtypes)
if bad_indices:
raise ValueError("DataFrame.dtypes for data must be int, float or bool.\n"
"Did not expect the data types in the following fields: "
+ ', '.join(data.columns[bad_indices]))
data = data.values
if data.dtype != np.float32 and data.dtype != np.float64:
data = data.astype(np.float32)
else:
if feature_name == 'auto':
feature_name = None
if categorical_feature == 'auto':
categorical_feature = None
return data, feature_name, categorical_feature, pandas_categorical
def _label_from_pandas(label):
if isinstance(label, DataFrame):
if len(label.columns) > 1:
raise ValueError('DataFrame for label cannot have multiple columns')
if _get_bad_pandas_dtypes(label.dtypes):
raise ValueError('DataFrame.dtypes for label must be int, float or bool')
label = np.ravel(label.values.astype(np.float32, copy=False))
return label
def _dump_pandas_categorical(pandas_categorical, file_name=None):
pandas_str = ('\npandas_categorical:'
+ json.dumps(pandas_categorical, default=json_default_with_numpy)
+ '\n')
if file_name is not None:
with open(file_name, 'a') as f:
f.write(pandas_str)
return pandas_str
def _load_pandas_categorical(file_name=None, model_str=None):
pandas_key = 'pandas_categorical:'
offset = -len(pandas_key)
if file_name is not None:
max_offset = -os.path.getsize(file_name)
with open(file_name, 'rb') as f:
while True:
if offset < max_offset:
offset = max_offset
f.seek(offset, os.SEEK_END)
lines = f.readlines()
if len(lines) >= 2:
break
offset *= 2
last_line = decode_string(lines[-1]).strip()
if not last_line.startswith(pandas_key):
last_line = decode_string(lines[-2]).strip()
elif model_str is not None:
idx = model_str.rfind('\n', 0, offset)
last_line = model_str[idx:].strip()
if last_line.startswith(pandas_key):
return json.loads(last_line[len(pandas_key):])
else:
return None
class _InnerPredictor(object):
"""_InnerPredictor of LightGBM.
Not exposed to user.
Used only for prediction, usually used for continued training.
.. note::
Can be converted from Booster, but cannot be converted to Booster.
"""
def __init__(self, model_file=None, booster_handle=None, pred_parameter=None):
"""Initialize the _InnerPredictor.
Parameters
----------
model_file : string or None, optional (default=None)
Path to the model file.
booster_handle : object or None, optional (default=None)
Handle of Booster.
pred_parameter: dict or None, optional (default=None)
Other parameters for the prediciton.
"""
self.handle = ctypes.c_void_p()
self.__is_manage_handle = True
if model_file is not None:
"""Prediction task"""
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = out_num_iterations.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif booster_handle is not None:
self.__is_manage_handle = False
self.handle = booster_handle
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.num_class = out_num_class.value
self.num_total_iteration = self.current_iteration()
self.pandas_categorical = None
else:
raise TypeError('Need model_file or booster_handle to create a predictor')
pred_parameter = {} if pred_parameter is None else pred_parameter
self.pred_parameter = param_dict_to_str(pred_parameter)
def __del__(self):
try:
if self.__is_manage_handle:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __getstate__(self):
this = self.__dict__.copy()
this.pop('handle', None)
return this
def predict(self, data, num_iteration=-1,
raw_score=False, pred_leaf=False, pred_contrib=False, data_has_header=False,
is_reshape=True):
"""Predict logic.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
When data type is string, it represents the path of txt file.
num_iteration : int, optional (default=-1)
Iteration used for prediction.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
data_has_header : bool, optional (default=False)
Whether data has header.
Used only for txt data.
is_reshape : bool, optional (default=True)
Whether to reshape to (nrow, ncol).
Returns
-------
result : numpy array
Prediction result.
"""
if isinstance(data, Dataset):
raise TypeError("Cannot use Dataset instance for prediction, please use raw data instead")
data = _data_from_pandas(data, None, None, self.pandas_categorical)[0]
predict_type = C_API_PREDICT_NORMAL
if raw_score:
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf:
predict_type = C_API_PREDICT_LEAF_INDEX
if pred_contrib:
predict_type = C_API_PREDICT_CONTRIB
int_data_has_header = 1 if data_has_header else 0
if num_iteration > self.num_total_iteration:
num_iteration = self.num_total_iteration
if isinstance(data, string_type):
with _TempFile() as f:
_safe_call(_LIB.LGBM_BoosterPredictForFile(
self.handle,
c_str(data),
ctypes.c_int(int_data_has_header),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
c_str(f.name)))
lines = f.readlines()
nrow = len(lines)
preds = [float(token) for line in lines for token in line.split('\t')]
preds = np.array(preds, dtype=np.float64, copy=False)
elif isinstance(data, scipy.sparse.csr_matrix):
preds, nrow = self.__pred_for_csr(data, num_iteration, predict_type)
elif isinstance(data, scipy.sparse.csc_matrix):
preds, nrow = self.__pred_for_csc(data, num_iteration, predict_type)
elif isinstance(data, np.ndarray):
preds, nrow = self.__pred_for_np2d(data, num_iteration, predict_type)
elif isinstance(data, list):
try:
data = np.array(data)
except BaseException:
raise ValueError('Cannot convert data list to numpy array.')
preds, nrow = self.__pred_for_np2d(data, num_iteration, predict_type)
elif isinstance(data, DataTable):
preds, nrow = self.__pred_for_np2d(data.to_numpy(), num_iteration, predict_type)
else:
try:
warnings.warn('Converting data to scipy sparse matrix.')
csr = scipy.sparse.csr_matrix(data)
except BaseException:
raise TypeError('Cannot predict data for type {}'.format(type(data).__name__))
preds, nrow = self.__pred_for_csr(csr, num_iteration, predict_type)
if pred_leaf:
preds = preds.astype(np.int32)
if is_reshape and preds.size != nrow:
if preds.size % nrow == 0:
preds = preds.reshape(nrow, -1)
else:
raise ValueError('Length of predict result (%d) cannot be divide nrow (%d)'
% (preds.size, nrow))
return preds
def __get_num_preds(self, num_iteration, nrow, predict_type):
"""Get size of prediction result."""
if nrow > MAX_INT32:
raise LightGBMError('LightGBM cannot perform prediction for data'
'with number of rows greater than MAX_INT32 (%d).\n'
'You can split your data into chunks'
'and then concatenate predictions for them' % MAX_INT32)
n_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterCalcNumPredict(
self.handle,
ctypes.c_int(nrow),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
ctypes.byref(n_preds)))
return n_preds.value
def __pred_for_np2d(self, mat, num_iteration, predict_type):
"""Predict for a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray or list must be 2 dimensional')
def inner_predict(mat, num_iteration, predict_type, preds=None):
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
n_preds = self.__get_num_preds(num_iteration, mat.shape[0], predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
_safe_call(_LIB.LGBM_BoosterPredictForMat(
self.handle,
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, mat.shape[0]
nrow = mat.shape[0]
if nrow > MAX_INT32:
sections = np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(num_iteration, i, predict_type) for i in np.diff([0] + list(sections) + [nrow])]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for chunk, (start_idx_pred, end_idx_pred) in zip_(np.array_split(mat, sections),
zip_(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(chunk, num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(mat, num_iteration, predict_type)
def __pred_for_csr(self, csr, num_iteration, predict_type):
"""Predict for a CSR data."""
def inner_predict(csr, num_iteration, predict_type, preds=None):
nrow = len(csr.indptr) - 1
n_preds = self.__get_num_preds(num_iteration, nrow, predict_type)
if preds is None:
preds = np.zeros(n_preds, dtype=np.float64)
elif len(preds.shape) != 1 or len(preds) != n_preds:
raise ValueError("Wrong length of pre-allocated predict array")
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr.indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSR(
self.handle,
ptr_indptr,
ctypes.c_int32(type_ptr_indptr),
csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
nrow = len(csr.indptr) - 1
if nrow > MAX_INT32:
sections = [0] + list(np.arange(start=MAX_INT32, stop=nrow, step=MAX_INT32)) + [nrow]
# __get_num_preds() cannot work with nrow > MAX_INT32, so calculate overall number of predictions piecemeal
n_preds = [self.__get_num_preds(num_iteration, i, predict_type) for i in np.diff(sections)]
n_preds_sections = np.array([0] + n_preds, dtype=np.intp).cumsum()
preds = np.zeros(sum(n_preds), dtype=np.float64)
for (start_idx, end_idx), (start_idx_pred, end_idx_pred) in zip_(zip_(sections, sections[1:]),
zip_(n_preds_sections, n_preds_sections[1:])):
# avoid memory consumption by arrays concatenation operations
inner_predict(csr[start_idx:end_idx], num_iteration, predict_type, preds[start_idx_pred:end_idx_pred])
return preds, nrow
else:
return inner_predict(csr, num_iteration, predict_type)
def __pred_for_csc(self, csc, num_iteration, predict_type):
"""Predict for a CSC data."""
nrow = csc.shape[0]
if nrow > MAX_INT32:
return self.__pred_for_csr(csc.tocsr(), num_iteration, predict_type)
n_preds = self.__get_num_preds(num_iteration, nrow, predict_type)
preds = np.zeros(n_preds, dtype=np.float64)
out_num_preds = ctypes.c_int64(0)
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc.indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_BoosterPredictForCSC(
self.handle,
ptr_indptr,
ctypes.c_int32(type_ptr_indptr),
csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
ctypes.c_int(predict_type),
ctypes.c_int(num_iteration),
c_str(self.pred_parameter),
ctypes.byref(out_num_preds),
preds.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if n_preds != out_num_preds.value:
raise ValueError("Wrong length for predict results")
return preds, nrow
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
class Dataset(object):
"""Dataset in LightGBM."""
def __init__(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, silent=False,
feature_name='auto', categorical_feature='auto', params=None,
free_raw_data=True):
"""Initialize Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
reference : Dataset or None, optional (default=None)
If this is Dataset for validation, training data should be used as reference.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query size for Dataset.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
The output cannot be monotonically constrained with respect to a categorical feature.
params : dict or None, optional (default=None)
Other parameters for Dataset.
free_raw_data : bool, optional (default=True)
If True, raw data is freed after constructing inner Dataset.
"""
self.handle = None
self.data = data
self.label = label
self.reference = reference
self.weight = weight
self.group = group
self.init_score = init_score
self.silent = silent
self.feature_name = feature_name
self.categorical_feature = categorical_feature
self.params = copy.deepcopy(params)
self.free_raw_data = free_raw_data
self.used_indices = None
self.need_slice = True
self._predictor = None
self.pandas_categorical = None
self.params_back_up = None
self.feature_penalty = None
self.monotone_constraints = None
self.version = 0
def __del__(self):
try:
self._free_handle()
except AttributeError:
pass
def get_params(self):
"""Get the used parameters in the Dataset.
Returns
-------
params : dict or None
The used parameters in this Dataset object.
"""
if self.params is not None:
# no min_data, nthreads and verbose in this function
dataset_params = _ConfigAliases.get("bin_construct_sample_cnt",
"categorical_feature",
"data_random_seed",
"enable_bundle",
"feature_pre_filter",
"forcedbins_filename",
"group_column",
"header",
"ignore_column",
"is_enable_sparse",
"label_column",
"max_bin",
"max_bin_by_feature",
"min_data_in_bin",
"pre_partition",
"two_round",
"use_missing",
"weight_column",
"zero_as_missing")
return {k: v for k, v in self.params.items() if k in dataset_params}
def _free_handle(self):
if self.handle is not None:
_safe_call(_LIB.LGBM_DatasetFree(self.handle))
self.handle = None
self.need_slice = True
if self.used_indices is not None:
self.data = None
return self
def _set_init_score_by_predictor(self, predictor, data, used_indices=None):
data_has_header = False
if isinstance(data, string_type):
# check data has header or not
data_has_header = any(self.params.get(alias, False) for alias in _ConfigAliases.get("header"))
num_data = self.num_data()
if predictor is not None:
init_score = predictor.predict(data,
raw_score=True,
data_has_header=data_has_header,
is_reshape=False)
if used_indices is not None:
assert not self.need_slice
if isinstance(data, string_type):
sub_init_score = np.zeros(num_data * predictor.num_class, dtype=np.float32)
assert num_data == len(used_indices)
for i in range_(len(used_indices)):
for j in range_(predictor.num_class):
sub_init_score[i * predictor.num_class + j] = init_score[used_indices[i] * predictor.num_class + j]
init_score = sub_init_score
if predictor.num_class > 1:
# need to regroup init_score
new_init_score = np.zeros(init_score.size, dtype=np.float32)
for i in range_(num_data):
for j in range_(predictor.num_class):
new_init_score[j * num_data + i] = init_score[i * predictor.num_class + j]
init_score = new_init_score
elif self.init_score is not None:
init_score = np.zeros(self.init_score.shape, dtype=np.float32)
else:
return self
self.set_init_score(init_score)
def _lazy_init(self, data, label=None, reference=None,
weight=None, group=None, init_score=None, predictor=None,
silent=False, feature_name='auto',
categorical_feature='auto', params=None):
if data is None:
self.handle = None
return self
if reference is not None:
self.pandas_categorical = reference.pandas_categorical
categorical_feature = reference.categorical_feature
data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data,
feature_name,
categorical_feature,
self.pandas_categorical)
label = _label_from_pandas(label)
# process for args
params = {} if params is None else params
args_names = (getattr(self.__class__, '_lazy_init')
.__code__
.co_varnames[:getattr(self.__class__, '_lazy_init').__code__.co_argcount])
for key, _ in params.items():
if key in args_names:
warnings.warn('{0} keyword has been found in `params` and will be ignored.\n'
'Please use {0} argument of the Dataset constructor to pass this parameter.'
.format(key))
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
# get categorical features
if categorical_feature is not None:
categorical_indices = set()
feature_dict = {}
if feature_name is not None:
feature_dict = {name: i for i, name in enumerate(feature_name)}
for name in categorical_feature:
if isinstance(name, string_type) and name in feature_dict:
categorical_indices.add(feature_dict[name])
elif isinstance(name, integer_types):
categorical_indices.add(name)
else:
raise TypeError("Wrong type({}) or unknown name({}) in categorical_feature"
.format(type(name).__name__, name))
if categorical_indices:
for cat_alias in _ConfigAliases.get("categorical_feature"):
if cat_alias in params:
warnings.warn('{} in param dict is overridden.'.format(cat_alias))
params.pop(cat_alias, None)
params['categorical_column'] = sorted(categorical_indices)
params_str = param_dict_to_str(params)
self.params = params
# process for reference dataset
ref_dataset = None
if isinstance(reference, Dataset):
ref_dataset = reference.construct().handle
elif reference is not None:
raise TypeError('Reference dataset should be None or dataset instance')
# start construct data
if isinstance(data, string_type):
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromFile(
c_str(data),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
elif isinstance(data, scipy.sparse.csr_matrix):
self.__init_from_csr(data, params_str, ref_dataset)
elif isinstance(data, scipy.sparse.csc_matrix):
self.__init_from_csc(data, params_str, ref_dataset)
elif isinstance(data, np.ndarray):
self.__init_from_np2d(data, params_str, ref_dataset)
elif isinstance(data, list) and len(data) > 0 and all(isinstance(x, np.ndarray) for x in data):
self.__init_from_list_np2d(data, params_str, ref_dataset)
elif isinstance(data, DataTable):
self.__init_from_np2d(data.to_numpy(), params_str, ref_dataset)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self.__init_from_csr(csr, params_str, ref_dataset)
except BaseException:
raise TypeError('Cannot initialize Dataset from {}'.format(type(data).__name__))
if label is not None:
self.set_label(label)
if self.get_label() is None:
raise ValueError("Label should not be None")
if weight is not None:
self.set_weight(weight)
if group is not None:
self.set_group(group)
if isinstance(predictor, _InnerPredictor):
if self._predictor is None and init_score is not None:
warnings.warn("The init_score will be overridden by the prediction of init_model.")
self._set_init_score_by_predictor(predictor, data)
elif init_score is not None:
self.set_init_score(init_score)
elif predictor is not None:
raise TypeError('Wrong predictor type {}'.format(type(predictor).__name__))
# set feature names
return self.set_feature_name(feature_name)
def __init_from_np2d(self, mat, params_str, ref_dataset):
"""Initialize data from a 2-D numpy matrix."""
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
self.handle = ctypes.c_void_p()
if mat.dtype == np.float32 or mat.dtype == np.float64:
data = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
data = np.array(mat.reshape(mat.size), dtype=np.float32)
ptr_data, type_ptr_data, _ = c_float_array(data)
_safe_call(_LIB.LGBM_DatasetCreateFromMat(
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int(mat.shape[0]),
ctypes.c_int(mat.shape[1]),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_list_np2d(self, mats, params_str, ref_dataset):
"""Initialize data from a list of 2-D numpy matrices."""
ncol = mats[0].shape[1]
nrow = np.zeros((len(mats),), np.int32)
if mats[0].dtype == np.float64:
ptr_data = (ctypes.POINTER(ctypes.c_double) * len(mats))()
else:
ptr_data = (ctypes.POINTER(ctypes.c_float) * len(mats))()
holders = []
type_ptr_data = None
for i, mat in enumerate(mats):
if len(mat.shape) != 2:
raise ValueError('Input numpy.ndarray must be 2 dimensional')
if mat.shape[1] != ncol:
raise ValueError('Input arrays must have same number of columns')
nrow[i] = mat.shape[0]
if mat.dtype == np.float32 or mat.dtype == np.float64:
mats[i] = np.array(mat.reshape(mat.size), dtype=mat.dtype, copy=False)
else: # change non-float data to float data, need to copy
mats[i] = np.array(mat.reshape(mat.size), dtype=np.float32)
chunk_ptr_data, chunk_type_ptr_data, holder = c_float_array(mats[i])
if type_ptr_data is not None and chunk_type_ptr_data != type_ptr_data:
raise ValueError('Input chunks must have same type')
ptr_data[i] = chunk_ptr_data
type_ptr_data = chunk_type_ptr_data
holders.append(holder)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_DatasetCreateFromMats(
ctypes.c_int(len(mats)),
ctypes.cast(ptr_data, ctypes.POINTER(ctypes.POINTER(ctypes.c_double))),
ctypes.c_int(type_ptr_data),
nrow.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int(ncol),
ctypes.c_int(C_API_IS_ROW_MAJOR),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csr(self, csr, params_str, ref_dataset):
"""Initialize data from a CSR matrix."""
if len(csr.indices) != len(csr.data):
raise ValueError('Length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data)))
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csr.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csr.data)
assert csr.shape[1] <= MAX_INT32
csr.indices = csr.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSR(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csr.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csr.indptr)),
ctypes.c_int64(len(csr.data)),
ctypes.c_int64(csr.shape[1]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def __init_from_csc(self, csc, params_str, ref_dataset):
"""Initialize data from a CSC matrix."""
if len(csc.indices) != len(csc.data):
raise ValueError('Length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data)))
self.handle = ctypes.c_void_p()
ptr_indptr, type_ptr_indptr, __ = c_int_array(csc.indptr)
ptr_data, type_ptr_data, _ = c_float_array(csc.data)
assert csc.shape[0] <= MAX_INT32
csc.indices = csc.indices.astype(np.int32, copy=False)
_safe_call(_LIB.LGBM_DatasetCreateFromCSC(
ptr_indptr,
ctypes.c_int(type_ptr_indptr),
csc.indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ptr_data,
ctypes.c_int(type_ptr_data),
ctypes.c_int64(len(csc.indptr)),
ctypes.c_int64(len(csc.data)),
ctypes.c_int64(csc.shape[0]),
c_str(params_str),
ref_dataset,
ctypes.byref(self.handle)))
return self
def construct(self):
"""Lazy init.
Returns
-------
self : Dataset
Constructed Dataset object.
"""
if self.handle is None:
if self.reference is not None:
reference_params = self.reference.get_params()
if self.get_params() != reference_params:
warnings.warn('Overriding the parameters from Reference Dataset.')
self._update_params(reference_params)
if self.used_indices is None:
# create valid
self._lazy_init(self.data, label=self.label, reference=self.reference,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name, params=self.params)
else:
# construct subset
used_indices = list_to_1d_numpy(self.used_indices, np.int32, name='used_indices')
assert used_indices.flags.c_contiguous
if self.reference.group is not None:
group_info = np.array(self.reference.group).astype(np.int32, copy=False)
_, self.group = np.unique(np.repeat(range_(len(group_info)), repeats=group_info)[self.used_indices],
return_counts=True)
self.handle = ctypes.c_void_p()
params_str = param_dict_to_str(self.params)
_safe_call(_LIB.LGBM_DatasetGetSubset(
self.reference.construct().handle,
used_indices.ctypes.data_as(ctypes.POINTER(ctypes.c_int32)),
ctypes.c_int(used_indices.shape[0]),
c_str(params_str),
ctypes.byref(self.handle)))
if not self.free_raw_data:
self.get_data()
if self.group is not None:
self.set_group(self.group)
if self.get_label() is None:
raise ValueError("Label should not be None.")
if isinstance(self._predictor, _InnerPredictor) and self._predictor is not self.reference._predictor:
self.get_data()
self._set_init_score_by_predictor(self._predictor, self.data, used_indices)
else:
# create train
self._lazy_init(self.data, label=self.label,
weight=self.weight, group=self.group,
init_score=self.init_score, predictor=self._predictor,
silent=self.silent, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=self.params)
if self.free_raw_data:
self.data = None
return self
def create_valid(self, data, label=None, weight=None, group=None,
init_score=None, silent=False, params=None):
"""Create validation data align with current Dataset.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse or list of numpy arrays
Data source of Dataset.
If string, it represents the path to txt file.
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None, optional (default=None)
Label of the data.
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
Group/query size for Dataset.
init_score : list, numpy 1-D array, pandas Series or None, optional (default=None)
Init score for Dataset.
silent : bool, optional (default=False)
Whether to print messages during construction.
params : dict or None, optional (default=None)
Other parameters for validation Dataset.
Returns
-------
valid : Dataset
Validation Dataset with reference to self.
"""
ret = Dataset(data, label=label, reference=self,
weight=weight, group=group, init_score=init_score,
silent=silent, params=params, free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
return ret
def subset(self, used_indices, params=None):
"""Get subset of current Dataset.
Parameters
----------
used_indices : list of int
Indices used to create the subset.
params : dict or None, optional (default=None)
These parameters will be passed to Dataset constructor.
Returns
-------
subset : Dataset
Subset of the current Dataset.
"""
if params is None:
params = self.params
ret = Dataset(None, reference=self, feature_name=self.feature_name,
categorical_feature=self.categorical_feature, params=params,
free_raw_data=self.free_raw_data)
ret._predictor = self._predictor
ret.pandas_categorical = self.pandas_categorical
ret.used_indices = sorted(used_indices)
return ret
def save_binary(self, filename):
"""Save Dataset to a binary file.
.. note::
Please note that `init_score` is not saved in binary file.
If you need it, please set it again after loading Dataset.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetSaveBinary(
self.construct().handle,
c_str(filename)))
return self
def _update_params(self, params):
if not params:
return self
params = copy.deepcopy(params)
def update():
if not self.params:
self.params = params
else:
self.params_back_up = copy.deepcopy(self.params)
self.params.update(params)
if self.handle is None:
update()
elif params is not None:
ret = _LIB.LGBM_DatasetUpdateParamChecking(
c_str(param_dict_to_str(self.params)),
c_str(param_dict_to_str(params)))
if ret != 0:
# could be updated if data is not freed
if self.data is not None:
update()
self._free_handle()
else:
raise LightGBMError(decode_string(_LIB.LGBM_GetLastError()))
return self
def _reverse_update_params(self):
if self.handle is None:
self.params = copy.deepcopy(self.params_back_up)
self.params_back_up = None
return self
def set_field(self, field_name, data):
"""Set property into the Dataset.
Parameters
----------
field_name : string
The field name of the information.
data : list, numpy 1-D array, pandas Series or None
The array of data to be set.
Returns
-------
self : Dataset
Dataset with set property.
"""
if self.handle is None:
raise Exception("Cannot set %s before construct dataset" % field_name)
if data is None:
# set to None
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
None,
ctypes.c_int(0),
ctypes.c_int(FIELD_TYPE_MAPPER[field_name])))
return self
dtype = np.float32
if field_name == 'group':
dtype = np.int32
elif field_name == 'init_score':
dtype = np.float64
data = list_to_1d_numpy(data, dtype, name=field_name)
if data.dtype == np.float32 or data.dtype == np.float64:
ptr_data, type_data, _ = c_float_array(data)
elif data.dtype == np.int32:
ptr_data, type_data, _ = c_int_array(data)
else:
raise TypeError("Expected np.float32/64 or np.int32, met type({})".format(data.dtype))
if type_data != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Input type error for set_field")
_safe_call(_LIB.LGBM_DatasetSetField(
self.handle,
c_str(field_name),
ptr_data,
ctypes.c_int(len(data)),
ctypes.c_int(type_data)))
self.version += 1
return self
def get_field(self, field_name):
"""Get property from the Dataset.
Parameters
----------
field_name : string
The field name of the information.
Returns
-------
info : numpy array
A numpy array with information from the Dataset.
"""
if self.handle is None:
raise Exception("Cannot get %s before construct Dataset" % field_name)
tmp_out_len = ctypes.c_int()
out_type = ctypes.c_int()
ret = ctypes.POINTER(ctypes.c_void_p)()
_safe_call(_LIB.LGBM_DatasetGetField(
self.handle,
c_str(field_name),
ctypes.byref(tmp_out_len),
ctypes.byref(ret),
ctypes.byref(out_type)))
if out_type.value != FIELD_TYPE_MAPPER[field_name]:
raise TypeError("Return type error for get_field")
if tmp_out_len.value == 0:
return None
if out_type.value == C_API_DTYPE_INT32:
return cint32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_int32)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT32:
return cfloat32_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_float)), tmp_out_len.value)
elif out_type.value == C_API_DTYPE_FLOAT64:
return cfloat64_array_to_numpy(ctypes.cast(ret, ctypes.POINTER(ctypes.c_double)), tmp_out_len.value)
else:
raise TypeError("Unknown type")
def set_categorical_feature(self, categorical_feature):
"""Set categorical features.
Parameters
----------
categorical_feature : list of int or strings
Names or indices of categorical features.
Returns
-------
self : Dataset
Dataset with set categorical features.
"""
if self.categorical_feature == categorical_feature:
return self
if self.data is not None:
if self.categorical_feature is None:
self.categorical_feature = categorical_feature
return self._free_handle()
elif categorical_feature == 'auto':
warnings.warn('Using categorical_feature in Dataset.')
return self
else:
warnings.warn('categorical_feature in Dataset is overridden.\n'
'New categorical_feature is {}'.format(sorted(list(categorical_feature))))
self.categorical_feature = categorical_feature
return self._free_handle()
else:
raise LightGBMError("Cannot set categorical feature after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def _set_predictor(self, predictor):
"""Set predictor for continued training.
It is not recommended for user to call this function.
Please use init_model argument in engine.train() or engine.cv() instead.
"""
if predictor is self._predictor and (predictor is None or predictor.current_iteration() == self._predictor.current_iteration()):
return self
if self.handle is None:
self._predictor = predictor
elif self.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.data)
elif self.used_indices is not None and self.reference is not None and self.reference.data is not None:
self._predictor = predictor
self._set_init_score_by_predictor(self._predictor, self.reference.data, self.used_indices)
else:
raise LightGBMError("Cannot set predictor after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self
def set_reference(self, reference):
"""Set reference Dataset.
Parameters
----------
reference : Dataset
Reference that is used as a template to construct the current Dataset.
Returns
-------
self : Dataset
Dataset with set reference.
"""
self.set_categorical_feature(reference.categorical_feature) \
.set_feature_name(reference.feature_name) \
._set_predictor(reference._predictor)
# we're done if self and reference share a common upstrem reference
if self.get_ref_chain().intersection(reference.get_ref_chain()):
return self
if self.data is not None:
self.reference = reference
return self._free_handle()
else:
raise LightGBMError("Cannot set reference after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
def set_feature_name(self, feature_name):
"""Set feature name.
Parameters
----------
feature_name : list of strings
Feature names.
Returns
-------
self : Dataset
Dataset with set feature name.
"""
if feature_name != 'auto':
self.feature_name = feature_name
if self.handle is not None and feature_name is not None and feature_name != 'auto':
if len(feature_name) != self.num_feature():
raise ValueError("Length of feature_name({}) and num_feature({}) don't match"
.format(len(feature_name), self.num_feature()))
c_feature_name = [c_str(name) for name in feature_name]
_safe_call(_LIB.LGBM_DatasetSetFeatureNames(
self.handle,
c_array(ctypes.c_char_p, c_feature_name),
ctypes.c_int(len(feature_name))))
return self
def set_label(self, label):
"""Set label of Dataset.
Parameters
----------
label : list, numpy 1-D array, pandas Series / one-column DataFrame or None
The label information to be set into Dataset.
Returns
-------
self : Dataset
Dataset with set label.
"""
self.label = label
if self.handle is not None:
label = list_to_1d_numpy(_label_from_pandas(label), name='label')
self.set_field('label', label)
self.label = self.get_field('label') # original values can be modified at cpp side
return self
def set_weight(self, weight):
"""Set weight of each instance.
Parameters
----------
weight : list, numpy 1-D array, pandas Series or None
Weight to be set for each data point.
Returns
-------
self : Dataset
Dataset with set weight.
"""
if weight is not None and np.all(weight == 1):
weight = None
self.weight = weight
if self.handle is not None and weight is not None:
weight = list_to_1d_numpy(weight, name='weight')
self.set_field('weight', weight)
self.weight = self.get_field('weight') # original values can be modified at cpp side
return self
def set_init_score(self, init_score):
"""Set init score of Booster to start from.
Parameters
----------
init_score : list, numpy 1-D array, pandas Series or None
Init score for Booster.
Returns
-------
self : Dataset
Dataset with set init score.
"""
self.init_score = init_score
if self.handle is not None and init_score is not None:
init_score = list_to_1d_numpy(init_score, np.float64, name='init_score')
self.set_field('init_score', init_score)
self.init_score = self.get_field('init_score') # original values can be modified at cpp side
return self
def set_group(self, group):
"""Set group size of Dataset (used for ranking).
Parameters
----------
group : list, numpy 1-D array, pandas Series or None
Group size of each group.
Returns
-------
self : Dataset
Dataset with set group.
"""
self.group = group
if self.handle is not None and group is not None:
group = list_to_1d_numpy(group, np.int32, name='group')
self.set_field('group', group)
return self
def get_label(self):
"""Get the label of the Dataset.
Returns
-------
label : numpy array or None
The label information from the Dataset.
"""
if self.label is None:
self.label = self.get_field('label')
return self.label
def get_weight(self):
"""Get the weight of the Dataset.
Returns
-------
weight : numpy array or None
Weight for each data point from the Dataset.
"""
if self.weight is None:
self.weight = self.get_field('weight')
return self.weight
def get_init_score(self):
"""Get the initial score of the Dataset.
Returns
-------
init_score : numpy array or None
Init score of Booster.
"""
if self.init_score is None:
self.init_score = self.get_field('init_score')
return self.init_score
def get_data(self):
"""Get the raw data of the Dataset.
Returns
-------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame, scipy.sparse, list of numpy arrays or None
Raw data used in the Dataset construction.
"""
if self.handle is None:
raise Exception("Cannot get data before construct Dataset")
if self.need_slice and self.used_indices is not None and self.reference is not None:
self.data = self.reference.data
if self.data is not None:
if isinstance(self.data, np.ndarray) or scipy.sparse.issparse(self.data):
self.data = self.data[self.used_indices, :]
elif isinstance(self.data, DataFrame):
self.data = self.data.iloc[self.used_indices].copy()
elif isinstance(self.data, DataTable):
self.data = self.data[self.used_indices, :]
else:
warnings.warn("Cannot subset {} type of raw data.\n"
"Returning original raw data".format(type(self.data).__name__))
self.need_slice = False
if self.data is None:
raise LightGBMError("Cannot call `get_data` after freed raw data, "
"set free_raw_data=False when construct Dataset to avoid this.")
return self.data
def get_group(self):
"""Get the group of the Dataset.
Returns
-------
group : numpy array or None
Group size of each group.
"""
if self.group is None:
self.group = self.get_field('group')
if self.group is not None:
# group data from LightGBM is boundaries data, need to convert to group size
self.group = np.diff(self.group)
return self.group
def num_data(self):
"""Get the number of rows in the Dataset.
Returns
-------
number_of_rows : int
The number of rows in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int()
_safe_call(_LIB.LGBM_DatasetGetNumData(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_data before construct dataset")
def num_feature(self):
"""Get the number of columns (features) in the Dataset.
Returns
-------
number_of_columns : int
The number of columns (features) in the Dataset.
"""
if self.handle is not None:
ret = ctypes.c_int()
_safe_call(_LIB.LGBM_DatasetGetNumFeature(self.handle,
ctypes.byref(ret)))
return ret.value
else:
raise LightGBMError("Cannot get num_feature before construct dataset")
def get_ref_chain(self, ref_limit=100):
"""Get a chain of Dataset objects.
Starts with r, then goes to r.reference (if exists),
then to r.reference.reference, etc.
until we hit ``ref_limit`` or a reference loop.
Parameters
----------
ref_limit : int, optional (default=100)
The limit number of references.
Returns
-------
ref_chain : set of Dataset
Chain of references of the Datasets.
"""
head = self
ref_chain = set()
while len(ref_chain) < ref_limit:
if isinstance(head, Dataset):
ref_chain.add(head)
if (head.reference is not None) and (head.reference not in ref_chain):
head = head.reference
else:
break
else:
break
return ref_chain
def add_features_from(self, other):
"""Add features from other Dataset to the current Dataset.
Both Datasets must be constructed before calling this method.
Parameters
----------
other : Dataset
The Dataset to take features from.
Returns
-------
self : Dataset
Dataset with the new features added.
"""
if self.handle is None or other.handle is None:
raise ValueError('Both source and target Datasets must be constructed before adding features')
_safe_call(_LIB.LGBM_DatasetAddFeaturesFrom(self.handle, other.handle))
return self
def _dump_text(self, filename):
"""Save Dataset to a text file.
This format cannot be loaded back in by LightGBM, but is useful for debugging purposes.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self.
"""
_safe_call(_LIB.LGBM_DatasetDumpText(
self.construct().handle,
c_str(filename)))
return self
class Booster(object):
"""Booster in LightGBM."""
def __init__(self, params=None, train_set=None, model_file=None, model_str=None, silent=False):
"""Initialize the Booster.
Parameters
----------
params : dict or None, optional (default=None)
Parameters for Booster.
train_set : Dataset or None, optional (default=None)
Training dataset.
model_file : string or None, optional (default=None)
Path to the model file.
model_str : string or None, optional (default=None)
Model will be loaded from this string.
silent : bool, optional (default=False)
Whether to print messages during construction.
"""
self.handle = None
self.network = False
self.__need_reload_eval_info = True
self._train_data_name = "training"
self.__attr = {}
self.__set_objective_to_none = False
self.best_iteration = -1
self.best_score = {}
params = {} if params is None else copy.deepcopy(params)
# user can set verbose with params, it has higher priority
if not any(verbose_alias in params for verbose_alias in _ConfigAliases.get("verbosity")) and silent:
params["verbose"] = -1
if train_set is not None:
# Training task
if not isinstance(train_set, Dataset):
raise TypeError('Training data should be Dataset instance, met {}'
.format(type(train_set).__name__))
# set network if necessary
for alias in _ConfigAliases.get("machines"):
if alias in params:
machines = params[alias]
if isinstance(machines, string_type):
num_machines = len(machines.split(','))
elif isinstance(machines, (list, set)):
num_machines = len(machines)
machines = ','.join(machines)
else:
raise ValueError("Invalid machines in params.")
self.set_network(machines,
local_listen_port=params.get("local_listen_port", 12400),
listen_time_out=params.get("listen_time_out", 120),
num_machines=params.setdefault("num_machines", num_machines))
break
# construct booster object
train_set.construct()
# copy the parameters from train_set
params.update(train_set.get_params())
params_str = param_dict_to_str(params)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreate(
train_set.handle,
c_str(params_str),
ctypes.byref(self.handle)))
# save reference to data
self.train_set = train_set
self.valid_sets = []
self.name_valid_sets = []
self.__num_dataset = 1
self.__init_predictor = train_set._predictor
if self.__init_predictor is not None:
_safe_call(_LIB.LGBM_BoosterMerge(
self.handle,
self.__init_predictor.handle))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
# buffer for inner predict
self.__inner_predict_buffer = [None]
self.__is_predicted_cur_iter = [False]
self.__get_eval_info()
self.pandas_categorical = train_set.pandas_categorical
self.train_set_version = train_set.version
elif model_file is not None:
# Prediction task
out_num_iterations = ctypes.c_int(0)
self.handle = ctypes.c_void_p()
_safe_call(_LIB.LGBM_BoosterCreateFromModelfile(
c_str(model_file),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(file_name=model_file)
elif model_str is not None:
self.model_from_string(model_str, not silent)
else:
raise TypeError('Need at least one training dataset or model file or model string '
'to create Booster instance')
self.params = params
def __del__(self):
try:
if self.network:
self.free_network()
except AttributeError:
pass
try:
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
except AttributeError:
pass
def __copy__(self):
return self.__deepcopy__(None)
def __deepcopy__(self, _):
model_str = self.model_to_string(num_iteration=-1)
booster = Booster(model_str=model_str)
return booster
def __getstate__(self):
this = self.__dict__.copy()
handle = this['handle']
this.pop('train_set', None)
this.pop('valid_sets', None)
if handle is not None:
this["handle"] = self.model_to_string(num_iteration=-1)
return this
def __setstate__(self, state):
model_str = state.get('handle', None)
if model_str is not None:
handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(handle)))
state['handle'] = handle
self.__dict__.update(state)
def free_dataset(self):
"""Free Booster's Datasets.
Returns
-------
self : Booster
Booster without Datasets.
"""
self.__dict__.pop('train_set', None)
self.__dict__.pop('valid_sets', None)
self.__num_dataset = 0
return self
def _free_buffer(self):
self.__inner_predict_buffer = []
self.__is_predicted_cur_iter = []
return self
def set_network(self, machines, local_listen_port=12400,
listen_time_out=120, num_machines=1):
"""Set the network configuration.
Parameters
----------
machines : list, set or string
Names of machines.
local_listen_port : int, optional (default=12400)
TCP listen port for local machines.
listen_time_out : int, optional (default=120)
Socket time-out in minutes.
num_machines : int, optional (default=1)
The number of machines for parallel learning application.
Returns
-------
self : Booster
Booster with set network.
"""
_safe_call(_LIB.LGBM_NetworkInit(c_str(machines),
ctypes.c_int(local_listen_port),
ctypes.c_int(listen_time_out),
ctypes.c_int(num_machines)))
self.network = True
return self
def free_network(self):
"""Free Booster's network.
Returns
-------
self : Booster
Booster with freed network.
"""
_safe_call(_LIB.LGBM_NetworkFree())
self.network = False
return self
def trees_to_dataframe(self):
"""Parse the fitted model and return in an easy-to-read pandas DataFrame.
Returns
-------
result : pandas DataFrame
Returns a pandas DataFrame of the parsed model.
"""
if not PANDAS_INSTALLED:
raise LightGBMError('This method cannot be run without pandas installed')
if self.num_trees() == 0:
raise LightGBMError('There are no trees in this Booster and thus nothing to parse')
def _is_split_node(tree):
return 'split_index' in tree.keys()
def create_node_record(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
def _get_node_index(tree, tree_index):
tree_num = str(tree_index) + '-' if tree_index is not None else ''
is_split = _is_split_node(tree)
node_type = 'S' if is_split else 'L'
# if a single node tree it won't have `leaf_index` so return 0
node_num = str(tree.get('split_index' if is_split else 'leaf_index', 0))
return tree_num + node_type + node_num
def _get_split_feature(tree, feature_names):
if _is_split_node(tree):
if feature_names is not None:
feature_name = feature_names[tree['split_feature']]
else:
feature_name = tree['split_feature']
else:
feature_name = None
return feature_name
def _is_single_node_tree(tree):
return set(tree.keys()) == {'leaf_value'}
# Create the node record, and populate universal data members
node = OrderedDict()
node['tree_index'] = tree_index
node['node_depth'] = node_depth
node['node_index'] = _get_node_index(tree, tree_index)
node['left_child'] = None
node['right_child'] = None
node['parent_index'] = parent_node
node['split_feature'] = _get_split_feature(tree, feature_names)
node['split_gain'] = None
node['threshold'] = None
node['decision_type'] = None
node['missing_direction'] = None
node['missing_type'] = None
node['value'] = None
node['weight'] = None
node['count'] = None
# Update values to reflect node type (leaf or split)
if _is_split_node(tree):
node['left_child'] = _get_node_index(tree['left_child'], tree_index)
node['right_child'] = _get_node_index(tree['right_child'], tree_index)
node['split_gain'] = tree['split_gain']
node['threshold'] = tree['threshold']
node['decision_type'] = tree['decision_type']
node['missing_direction'] = 'left' if tree['default_left'] else 'right'
node['missing_type'] = tree['missing_type']
node['value'] = tree['internal_value']
node['weight'] = tree['internal_weight']
node['count'] = tree['internal_count']
else:
node['value'] = tree['leaf_value']
if not _is_single_node_tree(tree):
node['weight'] = tree['leaf_weight']
node['count'] = tree['leaf_count']
return node
def tree_dict_to_node_list(tree, node_depth=1, tree_index=None,
feature_names=None, parent_node=None):
node = create_node_record(tree,
node_depth=node_depth,
tree_index=tree_index,
feature_names=feature_names,
parent_node=parent_node)
res = [node]
if _is_split_node(tree):
# traverse the next level of the tree
children = ['left_child', 'right_child']
for child in children:
subtree_list = tree_dict_to_node_list(
tree[child],
node_depth=node_depth + 1,
tree_index=tree_index,
feature_names=feature_names,
parent_node=node['node_index'])
# In tree format, "subtree_list" is a list of node records (dicts),
# and we add node to the list.
res.extend(subtree_list)
return res
model_dict = self.dump_model()
feature_names = model_dict['feature_names']
model_list = []
for tree in model_dict['tree_info']:
model_list.extend(tree_dict_to_node_list(tree['tree_structure'],
tree_index=tree['tree_index'],
feature_names=feature_names))
return DataFrame(model_list, columns=model_list[0].keys())
def set_train_data_name(self, name):
"""Set the name to the training Dataset.
Parameters
----------
name : string
Name for the training Dataset.
Returns
-------
self : Booster
Booster with set training Dataset name.
"""
self._train_data_name = name
return self
def add_valid(self, data, name):
"""Add validation data.
Parameters
----------
data : Dataset
Validation data.
name : string
Name of validation data.
Returns
-------
self : Booster
Booster with set validation data.
"""
if not isinstance(data, Dataset):
raise TypeError('Validation data should be Dataset instance, met {}'
.format(type(data).__name__))
if data._predictor is not self.__init_predictor:
raise LightGBMError("Add validation data failed, "
"you should use same predictor for these data")
_safe_call(_LIB.LGBM_BoosterAddValidData(
self.handle,
data.construct().handle))
self.valid_sets.append(data)
self.name_valid_sets.append(name)
self.__num_dataset += 1
self.__inner_predict_buffer.append(None)
self.__is_predicted_cur_iter.append(False)
return self
def reset_parameter(self, params):
"""Reset parameters of Booster.
Parameters
----------
params : dict
New parameters for Booster.
Returns
-------
self : Booster
Booster with new parameters.
"""
params_str = param_dict_to_str(params)
if params_str:
_safe_call(_LIB.LGBM_BoosterResetParameter(
self.handle,
c_str(params_str)))
self.params.update(params)
return self
def update(self, train_set=None, fobj=None):
"""Update Booster for one iteration.
Parameters
----------
train_set : Dataset or None, optional (default=None)
Training data.
If None, last training data is used.
fobj : callable or None, optional (default=None)
Customized objective function.
Should accept two parameters: preds, train_data,
and return (grad, hess).
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
grad : list or numpy 1-D array
The value of the first order derivative (gradient) for each sample point.
hess : list or numpy 1-D array
The value of the second order derivative (Hessian) for each sample point.
For binary task, the preds is probability of positive class (or margin in case of specified ``fobj``).
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Returns
-------
is_finished : bool
Whether the update was successfully finished.
"""
# need reset training data
if train_set is None and self.train_set_version != self.train_set.version:
train_set = self.train_set
is_the_same_train_set = False
else:
is_the_same_train_set = train_set is self.train_set and self.train_set_version == train_set.version
if train_set is not None and not is_the_same_train_set:
if not isinstance(train_set, Dataset):
raise TypeError('Training data should be Dataset instance, met {}'
.format(type(train_set).__name__))
if train_set._predictor is not self.__init_predictor:
raise LightGBMError("Replace training data failed, "
"you should use same predictor for these data")
self.train_set = train_set
_safe_call(_LIB.LGBM_BoosterResetTrainingData(
self.handle,
self.train_set.construct().handle))
self.__inner_predict_buffer[0] = None
self.train_set_version = self.train_set.version
is_finished = ctypes.c_int(0)
if fobj is None:
if self.__set_objective_to_none:
raise LightGBMError('Cannot update due to null objective function.')
_safe_call(_LIB.LGBM_BoosterUpdateOneIter(
self.handle,
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return is_finished.value == 1
else:
if not self.__set_objective_to_none:
self.reset_parameter({"objective": "none"}).__set_objective_to_none = True
grad, hess = fobj(self.__inner_predict(0), self.train_set)
return self.__boost(grad, hess)
def __boost(self, grad, hess):
"""Boost Booster for one iteration with customized gradient statistics.
.. note::
For binary task, the score is probability of positive class (or margin in case of custom objective).
For multi-class task, the score is group by class_id first, then group by row_id.
If you want to get i-th row score in j-th class, the access way is score[j * num_data + i]
and you should group grad and hess in this way as well.
Parameters
----------
grad : list or numpy 1-D array
The first order derivative (gradient).
hess : list or numpy 1-D array
The second order derivative (Hessian).
Returns
-------
is_finished : bool
Whether the boost was successfully finished.
"""
grad = list_to_1d_numpy(grad, name='gradient')
hess = list_to_1d_numpy(hess, name='hessian')
assert grad.flags.c_contiguous
assert hess.flags.c_contiguous
if len(grad) != len(hess):
raise ValueError("Lengths of gradient({}) and hessian({}) don't match"
.format(len(grad), len(hess)))
is_finished = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterUpdateOneIterCustom(
self.handle,
grad.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
hess.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
ctypes.byref(is_finished)))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return is_finished.value == 1
def rollback_one_iter(self):
"""Rollback one iteration.
Returns
-------
self : Booster
Booster with rolled back one iteration.
"""
_safe_call(_LIB.LGBM_BoosterRollbackOneIter(
self.handle))
self.__is_predicted_cur_iter = [False for _ in range_(self.__num_dataset)]
return self
def current_iteration(self):
"""Get the index of the current iteration.
Returns
-------
cur_iter : int
The index of the current iteration.
"""
out_cur_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetCurrentIteration(
self.handle,
ctypes.byref(out_cur_iter)))
return out_cur_iter.value
def num_model_per_iteration(self):
"""Get number of models per iteration.
Returns
-------
model_per_iter : int
The number of models per iteration.
"""
model_per_iter = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumModelPerIteration(
self.handle,
ctypes.byref(model_per_iter)))
return model_per_iter.value
def num_trees(self):
"""Get number of weak sub-models.
Returns
-------
num_trees : int
The number of weak sub-models.
"""
num_trees = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterNumberOfTotalModel(
self.handle,
ctypes.byref(num_trees)))
return num_trees.value
def upper_bound(self):
"""Get upper bound value of a model.
Returns
-------
upper_bound : double
Upper bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetUpperBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def lower_bound(self):
"""Get lower bound value of a model.
Returns
-------
lower_bound : double
Lower bound value of the model.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLowerBoundValue(
self.handle,
ctypes.byref(ret)))
return ret.value
def eval(self, data, name, feval=None):
"""Evaluate for data.
Parameters
----------
data : Dataset
Data for the evaluating.
name : string
Name of the data.
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, eval_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
eval_data : Dataset
The evaluation dataset.
eval_name : string
The name of evaluation function (without whitespaces).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For binary task, the preds is probability of positive class (or margin in case of specified ``fobj``).
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
if not isinstance(data, Dataset):
raise TypeError("Can only eval for Dataset instance")
data_idx = -1
if data is self.train_set:
data_idx = 0
else:
for i in range_(len(self.valid_sets)):
if data is self.valid_sets[i]:
data_idx = i + 1
break
# need to push new valid data
if data_idx == -1:
self.add_valid(data, name)
data_idx = self.__num_dataset - 1
return self.__inner_eval(name, data_idx, feval)
def eval_train(self, feval=None):
"""Evaluate for training data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
train_data : Dataset
The training dataset.
eval_name : string
The name of evaluation function (without whitespaces).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For binary task, the preds is probability of positive class (or margin in case of specified ``fobj``).
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return self.__inner_eval(self._train_data_name, 0, feval)
def eval_valid(self, feval=None):
"""Evaluate for validation data.
Parameters
----------
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, valid_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
preds : list or numpy 1-D array
The predicted values.
valid_data : Dataset
The validation dataset.
eval_name : string
The name of evaluation function (without whitespaces).
eval_result : float
The eval result.
is_higher_better : bool
Is eval result higher better, e.g. AUC is ``is_higher_better``.
For binary task, the preds is probability of positive class (or margin in case of specified ``fobj``).
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
Returns
-------
result : list
List with evaluation results.
"""
return [item for i in range_(1, self.__num_dataset)
for item in self.__inner_eval(self.name_valid_sets[i - 1], i, feval)]
def save_model(self, filename, num_iteration=None, start_iteration=0):
"""Save Booster to file.
Parameters
----------
filename : string
Filename to save Booster.
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
Returns
-------
self : Booster
Returns self.
"""
if num_iteration is None:
num_iteration = self.best_iteration
_safe_call(_LIB.LGBM_BoosterSaveModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
c_str(filename)))
_dump_pandas_categorical(self.pandas_categorical, filename)
return self
def shuffle_models(self, start_iteration=0, end_iteration=-1):
"""Shuffle models.
Parameters
----------
start_iteration : int, optional (default=0)
The first iteration that will be shuffled.
end_iteration : int, optional (default=-1)
The last iteration that will be shuffled.
If <= 0, means the last available iteration.
Returns
-------
self : Booster
Booster with shuffled models.
"""
_safe_call(_LIB.LGBM_BoosterShuffleModels(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(end_iteration)))
return self
def model_from_string(self, model_str, verbose=True):
"""Load Booster from a string.
Parameters
----------
model_str : string
Model will be loaded from this string.
verbose : bool, optional (default=True)
Whether to print messages while loading model.
Returns
-------
self : Booster
Loaded Booster object.
"""
if self.handle is not None:
_safe_call(_LIB.LGBM_BoosterFree(self.handle))
self._free_buffer()
self.handle = ctypes.c_void_p()
out_num_iterations = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterLoadModelFromString(
c_str(model_str),
ctypes.byref(out_num_iterations),
ctypes.byref(self.handle)))
out_num_class = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumClasses(
self.handle,
ctypes.byref(out_num_class)))
if verbose:
print('Finished loading model, total used %d iterations' % int(out_num_iterations.value))
self.__num_class = out_num_class.value
self.pandas_categorical = _load_pandas_categorical(model_str=model_str)
return self
def model_to_string(self, num_iteration=None, start_iteration=0):
"""Save Booster to string.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be saved.
If None, if the best iteration exists, it is saved; otherwise, all iterations are saved.
If <= 0, all iterations are saved.
start_iteration : int, optional (default=0)
Start index of the iteration that should be saved.
Returns
-------
str_repr : string
String representation of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, re-allocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterSaveModelToString(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = string_buffer.value.decode('utf-8')
ret += _dump_pandas_categorical(self.pandas_categorical)
return ret
def dump_model(self, num_iteration=None, start_iteration=0):
"""Dump Booster to JSON format.
Parameters
----------
num_iteration : int or None, optional (default=None)
Index of the iteration that should be dumped.
If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped.
If <= 0, all iterations are dumped.
start_iteration : int, optional (default=0)
Start index of the iteration that should be dumped.
Returns
-------
json_repr : dict
JSON format of Booster.
"""
if num_iteration is None:
num_iteration = self.best_iteration
buffer_len = 1 << 20
tmp_out_len = ctypes.c_int64(0)
string_buffer = ctypes.create_string_buffer(buffer_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int64(buffer_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
actual_len = tmp_out_len.value
# if buffer length is not long enough, reallocate a buffer
if actual_len > buffer_len:
string_buffer = ctypes.create_string_buffer(actual_len)
ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)])
_safe_call(_LIB.LGBM_BoosterDumpModel(
self.handle,
ctypes.c_int(start_iteration),
ctypes.c_int(num_iteration),
ctypes.c_int64(actual_len),
ctypes.byref(tmp_out_len),
ptr_string_buffer))
ret = json.loads(string_buffer.value.decode('utf-8'))
ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical,
default=json_default_with_numpy))
return ret
def predict(self, data, num_iteration=None,
raw_score=False, pred_leaf=False, pred_contrib=False,
data_has_header=False, is_reshape=True, **kwargs):
"""Make a prediction.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for prediction.
If string, it represents the path to txt file.
num_iteration : int or None, optional (default=None)
Limit number of iterations in the prediction.
If None, if the best iteration exists, it is used; otherwise, all iterations are used.
If <= 0, all iterations are used (no limits).
raw_score : bool, optional (default=False)
Whether to predict raw scores.
pred_leaf : bool, optional (default=False)
Whether to predict leaf index.
pred_contrib : bool, optional (default=False)
Whether to predict feature contributions.
.. note::
If you want to get more explanations for your model's predictions using SHAP values,
like SHAP interaction values,
you can install the shap package (https://github.com/slundberg/shap).
Note that unlike the shap package, with ``pred_contrib`` we return a matrix with an extra
column, where the last column is the expected value.
data_has_header : bool, optional (default=False)
Whether the data has header.
Used only if data is string.
is_reshape : bool, optional (default=True)
If True, result is reshaped to [nrow, ncol].
**kwargs
Other parameters for the prediction.
Returns
-------
result : numpy array
Prediction result.
"""
predictor = self._to_predictor(copy.deepcopy(kwargs))
if num_iteration is None:
num_iteration = self.best_iteration
return predictor.predict(data, num_iteration,
raw_score, pred_leaf, pred_contrib,
data_has_header, is_reshape)
def refit(self, data, label, decay_rate=0.9, **kwargs):
"""Refit the existing Booster by new data.
Parameters
----------
data : string, numpy array, pandas DataFrame, H2O DataTable's Frame or scipy.sparse
Data source for refit.
If string, it represents the path to txt file.
label : list, numpy 1-D array or pandas Series / one-column DataFrame
Label for refit.
decay_rate : float, optional (default=0.9)
Decay rate of refit,
will use ``leaf_output = decay_rate * old_leaf_output + (1.0 - decay_rate) * new_leaf_output`` to refit trees.
**kwargs
Other parameters for refit.
These parameters will be passed to ``predict`` method.
Returns
-------
result : Booster
Refitted Booster.
"""
if self.__set_objective_to_none:
raise LightGBMError('Cannot refit due to null objective function.')
predictor = self._to_predictor(copy.deepcopy(kwargs))
leaf_preds = predictor.predict(data, -1, pred_leaf=True)
nrow, ncol = leaf_preds.shape
train_set = Dataset(data, label, silent=True)
new_params = copy.deepcopy(self.params)
new_params['refit_decay_rate'] = decay_rate
new_booster = Booster(new_params, train_set)
# Copy models
_safe_call(_LIB.LGBM_BoosterMerge(
new_booster.handle,
predictor.handle))
leaf_preds = leaf_preds.reshape(-1)
ptr_data, type_ptr_data, _ = c_int_array(leaf_preds)
_safe_call(_LIB.LGBM_BoosterRefit(
new_booster.handle,
ptr_data,
ctypes.c_int(nrow),
ctypes.c_int(ncol)))
new_booster.network = self.network
new_booster.__attr = self.__attr.copy()
return new_booster
def get_leaf_output(self, tree_id, leaf_id):
"""Get the output of a leaf.
Parameters
----------
tree_id : int
The index of the tree.
leaf_id : int
The index of the leaf in the tree.
Returns
-------
result : float
The output of the leaf.
"""
ret = ctypes.c_double(0)
_safe_call(_LIB.LGBM_BoosterGetLeafValue(
self.handle,
ctypes.c_int(tree_id),
ctypes.c_int(leaf_id),
ctypes.byref(ret)))
return ret.value
def _to_predictor(self, pred_parameter=None):
"""Convert to predictor."""
predictor = _InnerPredictor(booster_handle=self.handle, pred_parameter=pred_parameter)
predictor.pandas_categorical = self.pandas_categorical
return predictor
def num_feature(self):
"""Get number of features.
Returns
-------
num_feature : int
The number of features.
"""
out_num_feature = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetNumFeature(
self.handle,
ctypes.byref(out_num_feature)))
return out_num_feature.value
def feature_name(self):
"""Get names of features.
Returns
-------
result : list
List with names of features.
"""
num_feature = self.num_feature()
# Get name of features
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [ctypes.create_string_buffer(reserved_string_buffer_size) for i in range_(num_feature)]
ptr_string_buffers = (ctypes.c_char_p * num_feature)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetFeatureNames(
self.handle,
num_feature,
ctypes.byref(tmp_out_len),
reserved_string_buffer_size,
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if num_feature != tmp_out_len.value:
raise ValueError("Length of feature names doesn't equal with num_feature")
if reserved_string_buffer_size < required_string_buffer_size.value:
raise BufferError(
"Allocated feature name buffer size ({}) was inferior to the needed size ({})."
.format(reserved_string_buffer_size, required_string_buffer_size.value)
)
return [string_buffers[i].value.decode('utf-8') for i in range_(num_feature)]
def feature_importance(self, importance_type='split', iteration=None):
"""Get feature importances.
Parameters
----------
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
iteration : int or None, optional (default=None)
Limit number of iterations in the feature importance calculation.
If None, if the best iteration exists, it is used; otherwise, all trees are used.
If <= 0, all trees are used (no limits).
Returns
-------
result : numpy array
Array with feature importances.
"""
if iteration is None:
iteration = self.best_iteration
if importance_type == "split":
importance_type_int = 0
elif importance_type == "gain":
importance_type_int = 1
else:
importance_type_int = -1
result = np.zeros(self.num_feature(), dtype=np.float64)
_safe_call(_LIB.LGBM_BoosterFeatureImportance(
self.handle,
ctypes.c_int(iteration),
ctypes.c_int(importance_type_int),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if importance_type_int == 0:
return result.astype(np.int32)
else:
return result
def get_split_value_histogram(self, feature, bins=None, xgboost_style=False):
"""Get split value histogram for the specified feature.
Parameters
----------
feature : int or string
The feature name or index the histogram is calculated for.
If int, interpreted as index.
If string, interpreted as name.
.. warning::
Categorical features are not supported.
bins : int, string or None, optional (default=None)
The maximum number of bins.
If None, or int and > number of unique split values and ``xgboost_style=True``,
the number of bins equals number of unique split values.
If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.
xgboost_style : bool, optional (default=False)
Whether the returned result should be in the same form as it is in XGBoost.
If False, the returned value is tuple of 2 numpy arrays as it is in ``numpy.histogram()`` function.
If True, the returned value is matrix, in which the first column is the right edges of non-empty bins
and the second one is the histogram values.
Returns
-------
result_tuple : tuple of 2 numpy arrays
If ``xgboost_style=False``, the values of the histogram of used splitting values for the specified feature
and the bin edges.
result_array_like : numpy array or pandas DataFrame (if pandas is installed)
If ``xgboost_style=True``, the histogram of used splitting values for the specified feature.
"""
def add(root):
"""Recursively add thresholds."""
if 'split_index' in root: # non-leaf
if feature_names is not None and isinstance(feature, string_type):
split_feature = feature_names[root['split_feature']]
else:
split_feature = root['split_feature']
if split_feature == feature:
if isinstance(root['threshold'], string_type):
raise LightGBMError('Cannot compute split value histogram for the categorical feature')
else:
values.append(root['threshold'])
add(root['left_child'])
add(root['right_child'])
model = self.dump_model()
feature_names = model.get('feature_names')
tree_infos = model['tree_info']
values = []
for tree_info in tree_infos:
add(tree_info['tree_structure'])
if bins is None or isinstance(bins, integer_types) and xgboost_style:
n_unique = len(np.unique(values))
bins = max(min(n_unique, bins) if bins is not None else n_unique, 1)
hist, bin_edges = np.histogram(values, bins=bins)
if xgboost_style:
ret = np.column_stack((bin_edges[1:], hist))
ret = ret[ret[:, 1] > 0]
if PANDAS_INSTALLED:
return DataFrame(ret, columns=['SplitValue', 'Count'])
else:
return ret
else:
return hist, bin_edges
def __inner_eval(self, data_name, data_idx, feval=None):
"""Evaluate training or validation data."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
self.__get_eval_info()
ret = []
if self.__num_inner_eval > 0:
result = np.zeros(self.__num_inner_eval, dtype=np.float64)
tmp_out_len = ctypes.c_int(0)
_safe_call(_LIB.LGBM_BoosterGetEval(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
result.ctypes.data_as(ctypes.POINTER(ctypes.c_double))))
if tmp_out_len.value != self.__num_inner_eval:
raise ValueError("Wrong length of eval results")
for i in range_(self.__num_inner_eval):
ret.append((data_name, self.__name_inner_eval[i],
result[i], self.__higher_better_inner_eval[i]))
if feval is not None:
if data_idx == 0:
cur_data = self.train_set
else:
cur_data = self.valid_sets[data_idx - 1]
feval_ret = feval(self.__inner_predict(data_idx), cur_data)
if isinstance(feval_ret, list):
for eval_name, val, is_higher_better in feval_ret:
ret.append((data_name, eval_name, val, is_higher_better))
else:
eval_name, val, is_higher_better = feval_ret
ret.append((data_name, eval_name, val, is_higher_better))
return ret
def __inner_predict(self, data_idx):
"""Predict for training and validation dataset."""
if data_idx >= self.__num_dataset:
raise ValueError("Data_idx should be smaller than number of dataset")
if self.__inner_predict_buffer[data_idx] is None:
if data_idx == 0:
n_preds = self.train_set.num_data() * self.__num_class
else:
n_preds = self.valid_sets[data_idx - 1].num_data() * self.__num_class
self.__inner_predict_buffer[data_idx] = np.zeros(n_preds, dtype=np.float64)
# avoid to predict many time in one iteration
if not self.__is_predicted_cur_iter[data_idx]:
tmp_out_len = ctypes.c_int64(0)
data_ptr = self.__inner_predict_buffer[data_idx].ctypes.data_as(ctypes.POINTER(ctypes.c_double))
_safe_call(_LIB.LGBM_BoosterGetPredict(
self.handle,
ctypes.c_int(data_idx),
ctypes.byref(tmp_out_len),
data_ptr))
if tmp_out_len.value != len(self.__inner_predict_buffer[data_idx]):
raise ValueError("Wrong length of predict results for data %d" % (data_idx))
self.__is_predicted_cur_iter[data_idx] = True
return self.__inner_predict_buffer[data_idx]
def __get_eval_info(self):
"""Get inner evaluation count and names."""
if self.__need_reload_eval_info:
self.__need_reload_eval_info = False
out_num_eval = ctypes.c_int(0)
# Get num of inner evals
_safe_call(_LIB.LGBM_BoosterGetEvalCounts(
self.handle,
ctypes.byref(out_num_eval)))
self.__num_inner_eval = out_num_eval.value
if self.__num_inner_eval > 0:
# Get name of evals
tmp_out_len = ctypes.c_int(0)
reserved_string_buffer_size = 255
required_string_buffer_size = ctypes.c_size_t(0)
string_buffers = [
ctypes.create_string_buffer(reserved_string_buffer_size) for i in range_(self.__num_inner_eval)
]
ptr_string_buffers = (ctypes.c_char_p * self.__num_inner_eval)(*map(ctypes.addressof, string_buffers))
_safe_call(_LIB.LGBM_BoosterGetEvalNames(
self.handle,
self.__num_inner_eval,
ctypes.byref(tmp_out_len),
reserved_string_buffer_size,
ctypes.byref(required_string_buffer_size),
ptr_string_buffers))
if self.__num_inner_eval != tmp_out_len.value:
raise ValueError("Length of eval names doesn't equal with num_evals")
if reserved_string_buffer_size < required_string_buffer_size.value:
raise BufferError(
"Allocated eval name buffer size ({}) was inferior to the needed size ({})."
.format(reserved_string_buffer_size, required_string_buffer_size.value)
)
self.__name_inner_eval = \
[string_buffers[i].value.decode('utf-8') for i in range_(self.__num_inner_eval)]
self.__higher_better_inner_eval = \
[name.startswith(('auc', 'ndcg@', 'map@')) for name in self.__name_inner_eval]
def attr(self, key):
"""Get attribute string from the Booster.
Parameters
----------
key : string
The name of the attribute.
Returns
-------
value : string or None
The attribute value.
Returns None if attribute does not exist.
"""
return self.__attr.get(key, None)
def set_attr(self, **kwargs):
"""Set attributes to the Booster.
Parameters
----------
**kwargs
The attributes to set.
Setting a value to None deletes an attribute.
Returns
-------
self : Booster
Booster with set attributes.
"""
for key, value in kwargs.items():
if value is not None:
if not isinstance(value, string_type):
raise ValueError("Only string values are accepted")
self.__attr[key] = value
else:
self.__attr.pop(key, None)
return self
| 1 | 24,070 |
it seems python 2 don't support `end=''`. @StrikerRUS @henry0312 any better solution? or we drop the support of python 2?
|
microsoft-LightGBM
|
cpp
|
@@ -4135,6 +4135,12 @@ bool PreCallValidateCreateImageView(layer_data *device_data, const VkImageViewCr
}
}
}
+ else
+ {
+ skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-parameter",
+ "vkCreateImageView() image handle is VK_NULL_HANDLE. Image is either not created or it is destroyed at this point.");
+ }
return skip;
}
| 1 |
/* Copyright (c) 2015-2018 The Khronos Group Inc.
* Copyright (c) 2015-2018 Valve Corporation
* Copyright (c) 2015-2018 LunarG, Inc.
* Copyright (C) 2015-2018 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <[email protected]>
* Author: Dave Houlton <[email protected]>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
#include <sstream>
#include <string>
#include "vk_enum_string_helper.h"
#include "vk_layer_data.h"
#include "vk_layer_utils.h"
#include "vk_layer_logging.h"
#include "vk_typemap_helper.h"
#include "buffer_validation.h"
void SetLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
auto it = pCB->imageLayoutMap.find(imgpair);
if (it != pCB->imageLayoutMap.end()) {
it->second.layout = layout;
} else {
assert(imgpair.hasSubresource);
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (!FindCmdBufLayout(device_data, pCB, imgpair.image, imgpair.subresource, node)) {
node.initialLayout = layout;
}
SetLayout(device_data, pCB, imgpair, {node.initialLayout, layout});
}
}
template <class OBJECT, class LAYOUT>
void SetLayout(layer_data *device_data, OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
ImageSubresourcePair imgpair = {image, true, range};
SetLayout(device_data, pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
SetLayout(device_data, pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
SetLayout(device_data, pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
SetLayout(device_data, pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
if (GetDeviceExtensions(device_data)->vk_khr_sampler_ycbcr_conversion) {
SetLayout(device_data, pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
SetLayout(device_data, pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
SetLayout(device_data, pObject, imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
}
template <class OBJECT, class LAYOUT>
void SetLayout(layer_data *device_data, OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout,
VkImageAspectFlags aspectMask) {
if (imgpair.subresource.aspectMask & aspectMask) {
imgpair.subresource.aspectMask = aspectMask;
SetLayout(device_data, pObject, imgpair, layout);
}
}
// Set the layout in supplied map
void SetLayout(std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap, ImageSubresourcePair imgpair,
VkImageLayout layout) {
auto it = imageLayoutMap.find(imgpair);
if (it != imageLayoutMap.end()) {
it->second.layout = layout; // Update
} else {
imageLayoutMap[imgpair].layout = layout; // Insert
}
}
bool FindLayoutVerifyNode(layer_data const *device_data, GLOBAL_CB_NODE const *pCB, ImageSubresourcePair imgpair,
IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
if (!(imgpair.subresource.aspectMask & aspectMask)) {
return false;
}
VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
imgpair.subresource.aspectMask = aspectMask;
auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
if (imgsubIt == pCB->imageLayoutMap.end()) {
return false;
}
if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(imgpair.image),
kVUID_Core_DrawState_InvalidLayout,
"Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
HandleToUint64(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout),
string_VkImageLayout(imgsubIt->second.layout));
}
if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(imgpair.image),
kVUID_Core_DrawState_InvalidLayout,
"Cannot query for VkImage 0x%" PRIx64
" layout when combined aspect mask %d has multiple initial layout types: %s and %s",
HandleToUint64(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout),
string_VkImageLayout(imgsubIt->second.initialLayout));
}
node = imgsubIt->second;
return true;
}
bool FindLayoutVerifyLayout(layer_data const *device_data, ImageSubresourcePair imgpair, VkImageLayout &layout,
const VkImageAspectFlags aspectMask) {
if (!(imgpair.subresource.aspectMask & aspectMask)) {
return false;
}
const debug_report_data *report_data = core_validation::GetReportData(device_data);
VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
imgpair.subresource.aspectMask = aspectMask;
auto imgsubIt = (*core_validation::GetImageLayoutMap(device_data)).find(imgpair);
if (imgsubIt == (*core_validation::GetImageLayoutMap(device_data)).end()) {
return false;
}
if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(imgpair.image),
kVUID_Core_DrawState_InvalidLayout,
"Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
HandleToUint64(imgpair.image), oldAspectMask, string_VkImageLayout(layout),
string_VkImageLayout(imgsubIt->second.layout));
}
layout = imgsubIt->second.layout;
return true;
}
// Find layout(s) on the command buffer level
bool FindCmdBufLayout(layer_data const *device_data, GLOBAL_CB_NODE const *pCB, VkImage image, VkImageSubresource range,
IMAGE_CMD_BUF_LAYOUT_NODE &node) {
ImageSubresourcePair imgpair = {image, true, range};
node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
FindLayoutVerifyNode(device_data, pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
FindLayoutVerifyNode(device_data, pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
FindLayoutVerifyNode(device_data, pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
FindLayoutVerifyNode(device_data, pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
if (GetDeviceExtensions(device_data)->vk_khr_sampler_ycbcr_conversion) {
FindLayoutVerifyNode(device_data, pCB, imgpair, node, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
FindLayoutVerifyNode(device_data, pCB, imgpair, node, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
FindLayoutVerifyNode(device_data, pCB, imgpair, node, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
imgpair = {image, false, VkImageSubresource()};
auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
if (imgsubIt == pCB->imageLayoutMap.end()) return false;
// TODO: This is ostensibly a find function but it changes state here
node = imgsubIt->second;
}
return true;
}
// Find layout(s) on the global level
bool FindGlobalLayout(layer_data *device_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
layout = VK_IMAGE_LAYOUT_MAX_ENUM;
FindLayoutVerifyLayout(device_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
FindLayoutVerifyLayout(device_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
FindLayoutVerifyLayout(device_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
FindLayoutVerifyLayout(device_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
if (GetDeviceExtensions(device_data)->vk_khr_sampler_ycbcr_conversion) {
FindLayoutVerifyLayout(device_data, imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
FindLayoutVerifyLayout(device_data, imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
FindLayoutVerifyLayout(device_data, imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
imgpair = {imgpair.image, false, VkImageSubresource()};
auto imgsubIt = (*core_validation::GetImageLayoutMap(device_data)).find(imgpair);
if (imgsubIt == (*core_validation::GetImageLayoutMap(device_data)).end()) return false;
layout = imgsubIt->second.layout;
}
return true;
}
bool FindLayouts(layer_data *device_data, VkImage image, std::vector<VkImageLayout> &layouts) {
auto sub_data = (*core_validation::GetImageSubresourceMap(device_data)).find(image);
if (sub_data == (*core_validation::GetImageSubresourceMap(device_data)).end()) return false;
auto image_state = GetImageState(device_data, image);
if (!image_state) return false;
bool ignoreGlobal = false;
// TODO: Make this robust for >1 aspect mask. Now it will just say ignore potential errors in this case.
if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) {
ignoreGlobal = true;
}
for (auto imgsubpair : sub_data->second) {
if (ignoreGlobal && !imgsubpair.hasSubresource) continue;
auto img_data = (*core_validation::GetImageLayoutMap(device_data)).find(imgsubpair);
if (img_data != (*core_validation::GetImageLayoutMap(device_data)).end()) {
layouts.push_back(img_data->second.layout);
}
}
return true;
}
bool FindLayout(const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap, ImageSubresourcePair imgpair,
VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
if (!(imgpair.subresource.aspectMask & aspectMask)) {
return false;
}
imgpair.subresource.aspectMask = aspectMask;
auto imgsubIt = imageLayoutMap.find(imgpair);
if (imgsubIt == imageLayoutMap.end()) {
return false;
}
layout = imgsubIt->second.layout;
return true;
}
// find layout in supplied map
bool FindLayout(layer_data *device_data, const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap,
ImageSubresourcePair imgpair, VkImageLayout &layout) {
layout = VK_IMAGE_LAYOUT_MAX_ENUM;
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
if (GetDeviceExtensions(device_data)->vk_khr_sampler_ycbcr_conversion) {
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
// Image+subresource not found, look for image handle w/o subresource
if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
imgpair = {imgpair.image, false, VkImageSubresource()};
auto imgsubIt = imageLayoutMap.find(imgpair);
if (imgsubIt == imageLayoutMap.end()) return false;
layout = imgsubIt->second.layout;
}
return true;
}
// Set the layout on the global level
void SetGlobalLayout(layer_data *device_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
VkImage &image = imgpair.image;
auto &lmap = (*core_validation::GetImageLayoutMap(device_data));
auto data = lmap.find(imgpair);
if (data != lmap.end()) {
data->second.layout = layout; // Update
} else {
lmap[imgpair].layout = layout; // Insert
}
auto &image_subresources = (*core_validation::GetImageSubresourceMap(device_data))[image];
auto subresource = std::find(image_subresources.begin(), image_subresources.end(), imgpair);
if (subresource == image_subresources.end()) {
image_subresources.push_back(imgpair);
}
}
// Set the layout on the cmdbuf level
void SetLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
auto it = pCB->imageLayoutMap.find(imgpair);
if (it != pCB->imageLayoutMap.end()) {
it->second = node; // Update
} else {
pCB->imageLayoutMap[imgpair] = node; // Insert
}
}
// Set image layout for given VkImageSubresourceRange struct
void SetImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *image_state,
VkImageSubresourceRange image_subresource_range, const VkImageLayout &layout) {
assert(image_state);
cb_node->image_layout_change_count++; // Change the version of this data to force revalidation
for (uint32_t level_index = 0; level_index < image_subresource_range.levelCount; ++level_index) {
uint32_t level = image_subresource_range.baseMipLevel + level_index;
for (uint32_t layer_index = 0; layer_index < image_subresource_range.layerCount; layer_index++) {
uint32_t layer = image_subresource_range.baseArrayLayer + layer_index;
VkImageSubresource sub = {image_subresource_range.aspectMask, level, layer};
// TODO: If ImageView was created with depth or stencil, transition both layouts as the aspectMask is ignored and both
// are used. Verify that the extra implicit layout is OK for descriptor set layout validation
if (image_subresource_range.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
if (FormatIsDepthAndStencil(image_state->createInfo.format)) {
sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
}
}
SetLayout(device_data, cb_node, image_state->image, sub, layout);
}
}
}
// Set image layout for given VkImageSubresourceLayers struct
void SetImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *image_state,
VkImageSubresourceLayers image_subresource_layers, const VkImageLayout &layout) {
// Transfer VkImageSubresourceLayers into VkImageSubresourceRange struct
VkImageSubresourceRange image_subresource_range;
image_subresource_range.aspectMask = image_subresource_layers.aspectMask;
image_subresource_range.baseArrayLayer = image_subresource_layers.baseArrayLayer;
image_subresource_range.layerCount = image_subresource_layers.layerCount;
image_subresource_range.baseMipLevel = image_subresource_layers.mipLevel;
image_subresource_range.levelCount = 1;
SetImageLayout(device_data, cb_node, image_state, image_subresource_range, layout);
}
// Set image layout for all slices of an image view
void SetImageViewLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state,
const VkImageLayout &layout) {
assert(view_state);
IMAGE_STATE *image_state = GetImageState(device_data, view_state->create_info.image);
VkImageSubresourceRange sub_range = view_state->create_info.subresourceRange;
// When changing the layout of a 3D image subresource via a 2D or 2D_ARRRAY image view, all depth slices of
// the subresource mip level(s) are transitioned, ignoring any layers restriction in the subresource info.
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) && (view_state->create_info.viewType != VK_IMAGE_VIEW_TYPE_3D)) {
sub_range.baseArrayLayer = 0;
sub_range.layerCount = image_state->createInfo.extent.depth;
}
SetImageLayout(device_data, cb_node, image_state, sub_range, layout);
}
void SetImageViewLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, VkImageView imageView, const VkImageLayout &layout) {
auto view_state = GetImageViewState(device_data, imageView);
SetImageViewLayout(device_data, cb_node, view_state, layout);
}
bool ValidateRenderPassLayoutAgainstFramebufferImageUsage(layer_data *device_data, RenderPassCreateVersion rp_version,
VkImageLayout layout, VkImage image, VkImageView image_view,
VkFramebuffer framebuffer, VkRenderPass renderpass,
uint32_t attachment_index, const char *variable_name) {
bool skip = false;
const auto report_data = core_validation::GetReportData(device_data);
auto image_state = GetImageState(device_data, image);
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
if (!image_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"Render Pass begin with renderpass 0x%" PRIx64 " uses framebuffer 0x%" PRIx64 " where pAttachments[%" PRIu32
"] = image view 0x%" PRIx64 ", which refers to an invalid image",
HandleToUint64(renderpass), HandleToUint64(framebuffer), attachment_index, HandleToUint64(image_view));
return skip;
}
auto image_usage = image_state->createInfo.usage;
// Check for layouts that mismatch image usages in the framebuffer
if (layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03094" : "VUID-vkCmdBeginRenderPass-initialLayout-00895";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in render pass 0x%" PRIx64
" - the %s is %s but the image attached to framebuffer 0x%" PRIx64 " via image view 0x%" PRIx64
" was not created with VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT",
attachment_index, HandleToUint64(framebuffer), variable_name, string_VkImageLayout(layout),
HandleToUint64(renderpass), HandleToUint64(image_view));
}
if (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
!(image_usage & (VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT))) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03097" : "VUID-vkCmdBeginRenderPass-initialLayout-00897";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in render pass 0x%" PRIx64
" - the %s is %s but the image attached to framebuffer 0x%" PRIx64 " via image view 0x%" PRIx64
" was not created with VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT or VK_IMAGE_USAGE_SAMPLED_BIT",
attachment_index, HandleToUint64(framebuffer), variable_name, string_VkImageLayout(layout),
HandleToUint64(renderpass), HandleToUint64(image_view));
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03098" : "VUID-vkCmdBeginRenderPass-initialLayout-00898";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in render pass 0x%" PRIx64
" - the %s is %s but the image attached to framebuffer 0x%" PRIx64 " via image view 0x%" PRIx64
" was not created with VK_IMAGE_USAGE_TRANSFER_SRC_BIT",
attachment_index, HandleToUint64(framebuffer), variable_name, string_VkImageLayout(layout),
HandleToUint64(renderpass), HandleToUint64(image_view));
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03099" : "VUID-vkCmdBeginRenderPass-initialLayout-00899";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in render pass 0x%" PRIx64
" - the %s is %s but the image attached to framebuffer 0x%" PRIx64 " via image view 0x%" PRIx64
" was not created with VK_IMAGE_USAGE_TRANSFER_DST_BIT",
attachment_index, HandleToUint64(framebuffer), variable_name, string_VkImageLayout(layout),
HandleToUint64(renderpass), HandleToUint64(image_view));
}
if (GetDeviceExtensions(device_data)->vk_khr_maintenance2) {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2KHR-initialLayout-03096" : "VUID-vkCmdBeginRenderPass-initialLayout-01758";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), vuid,
"Layout/usage mismatch for attachment %u in render pass 0x%" PRIx64
" - the %s is %s but the image attached to framebuffer 0x%" PRIx64 " via image view 0x%" PRIx64
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
attachment_index, HandleToUint64(framebuffer), variable_name, string_VkImageLayout(layout),
HandleToUint64(renderpass), HandleToUint64(image_view));
}
} else {
// The create render pass 2 extension requires maintenance 2 (the previous branch), so no vuid switch needed here.
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-vkCmdBeginRenderPass-initialLayout-00896",
"Layout/usage mismatch for attachment %u in render pass 0x%" PRIx64
" - the %s is %s but the image attached to framebuffer 0x%" PRIx64 " via image view 0x%" PRIx64
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
attachment_index, HandleToUint64(framebuffer), variable_name, string_VkImageLayout(layout),
HandleToUint64(renderpass), HandleToUint64(image_view));
}
}
return skip;
}
bool VerifyFramebufferAndRenderPassLayouts(layer_data *device_data, RenderPassCreateVersion rp_version, GLOBAL_CB_NODE *pCB,
const VkRenderPassBeginInfo *pRenderPassBegin,
const FRAMEBUFFER_STATE *framebuffer_state) {
bool skip = false;
auto const pRenderPassInfo = GetRenderPassState(device_data, pRenderPassBegin->renderPass)->createInfo.ptr();
auto const &framebufferInfo = framebuffer_state->createInfo;
const auto report_data = core_validation::GetReportData(device_data);
auto render_pass = GetRenderPassState(device_data, pRenderPassBegin->renderPass)->renderPass;
auto framebuffer = framebuffer_state->framebuffer;
if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using a framebuffer with a different number of attachments.");
}
for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
const VkImageView &image_view = framebufferInfo.pAttachments[i];
auto view_state = GetImageViewState(device_data, image_view);
if (!view_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(pRenderPassBegin->renderPass), "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass() :framebuffer 0x%" PRIx64 " pAttachments[%" PRIu32 "] = 0x%" PRIx64
" is not a valid VkImageView handle",
HandleToUint64(framebuffer_state->framebuffer), i, HandleToUint64(image_view));
continue;
}
const VkImage &image = view_state->create_info.image;
const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
auto initial_layout = pRenderPassInfo->pAttachments[i].initialLayout;
auto final_layout = pRenderPassInfo->pAttachments[i].finalLayout;
// TODO: Do not iterate over every possibility - consolidate where possible
for (uint32_t j = 0; j < subRange.levelCount; j++) {
uint32_t level = subRange.baseMipLevel + j;
for (uint32_t k = 0; k < subRange.layerCount; k++) {
uint32_t layer = subRange.baseArrayLayer + k;
VkImageSubresource sub = {subRange.aspectMask, level, layer};
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (!FindCmdBufLayout(device_data, pCB, image, sub, node)) {
// Missing layouts will be added during state update
continue;
}
if (initial_layout != VK_IMAGE_LAYOUT_UNDEFINED && initial_layout != node.layout) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using attachment %u where the render pass initial layout is %s "
"and the previous known layout of the attachment is %s. The layouts must match, or the render "
"pass initial layout for the attachment must be VK_IMAGE_LAYOUT_UNDEFINED",
i, string_VkImageLayout(initial_layout), string_VkImageLayout(node.layout));
}
}
}
ValidateRenderPassLayoutAgainstFramebufferImageUsage(device_data, rp_version, initial_layout, image, image_view,
framebuffer, render_pass, i, "initial layout");
ValidateRenderPassLayoutAgainstFramebufferImageUsage(device_data, rp_version, final_layout, image, image_view, framebuffer,
render_pass, i, "final layout");
}
for (uint32_t j = 0; j < pRenderPassInfo->subpassCount; ++j) {
auto &subpass = pRenderPassInfo->pSubpasses[j];
for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].inputAttachmentCount; ++k) {
auto &attachment_ref = subpass.pInputAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = framebufferInfo.pAttachments[attachment_ref.attachment];
auto view_state = GetImageViewState(device_data, image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(device_data, rp_version, attachment_ref.layout, image,
image_view, framebuffer, render_pass,
attachment_ref.attachment, "input attachment layout");
}
}
}
for (uint32_t k = 0; k < pRenderPassInfo->pSubpasses[j].colorAttachmentCount; ++k) {
auto &attachment_ref = subpass.pColorAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = framebufferInfo.pAttachments[attachment_ref.attachment];
auto view_state = GetImageViewState(device_data, image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(device_data, rp_version, attachment_ref.layout, image,
image_view, framebuffer, render_pass,
attachment_ref.attachment, "color attachment layout");
if (subpass.pResolveAttachments) {
ValidateRenderPassLayoutAgainstFramebufferImageUsage(
device_data, rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass,
attachment_ref.attachment, "resolve attachment layout");
}
}
}
}
if (pRenderPassInfo->pSubpasses[j].pDepthStencilAttachment) {
auto &attachment_ref = *subpass.pDepthStencilAttachment;
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = framebufferInfo.pAttachments[attachment_ref.attachment];
auto view_state = GetImageViewState(device_data, image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(device_data, rp_version, attachment_ref.layout, image,
image_view, framebuffer, render_pass,
attachment_ref.attachment, "input attachment layout");
}
}
}
}
return skip;
}
void TransitionAttachmentRefLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
const safe_VkAttachmentReference2KHR &ref) {
if (ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = GetAttachmentImageViewState(device_data, pFramebuffer, ref.attachment);
if (image_view) {
SetImageViewLayout(device_data, pCB, image_view, ref.layout);
}
}
}
void TransitionSubpassLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB, const RENDER_PASS_STATE *render_pass_state,
const int subpass_index, FRAMEBUFFER_STATE *framebuffer_state) {
assert(render_pass_state);
if (framebuffer_state) {
auto const &subpass = render_pass_state->createInfo.pSubpasses[subpass_index];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
TransitionAttachmentRefLayout(device_data, pCB, framebuffer_state, subpass.pInputAttachments[j]);
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
TransitionAttachmentRefLayout(device_data, pCB, framebuffer_state, subpass.pColorAttachments[j]);
}
if (subpass.pDepthStencilAttachment) {
TransitionAttachmentRefLayout(device_data, pCB, framebuffer_state, *subpass.pDepthStencilAttachment);
}
}
}
bool ValidateImageAspectLayout(layer_data *device_data, GLOBAL_CB_NODE const *pCB, const VkImageMemoryBarrier *mem_barrier,
uint32_t level, uint32_t layer, VkImageAspectFlags aspect) {
if (!(mem_barrier->subresourceRange.aspectMask & aspect)) {
return false;
}
VkImageSubresource sub = {aspect, level, layer};
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (!FindCmdBufLayout(device_data, pCB, mem_barrier->image, sub, node)) {
return false;
}
bool skip = false;
if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
} else if (node.layout != mem_barrier->oldLayout) {
skip = log_msg(core_validation::GetReportData(device_data), VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
"VUID-VkImageMemoryBarrier-oldLayout-01197",
"For image 0x%" PRIx64
" you cannot transition the layout of aspect=%d level=%d layer=%d from %s when current layout is %s.",
HandleToUint64(mem_barrier->image), aspect, level, layer, string_VkImageLayout(mem_barrier->oldLayout),
string_VkImageLayout(node.layout));
}
return skip;
}
// Transition the layout state for renderpass attachments based on the BeginRenderPass() call. This includes:
// 1. Transition into initialLayout state
// 2. Transition from initialLayout to layout used in subpass 0
void TransitionBeginRenderPassLayouts(layer_data *device_data, GLOBAL_CB_NODE *cb_state, const RENDER_PASS_STATE *render_pass_state,
FRAMEBUFFER_STATE *framebuffer_state) {
// First transition into initialLayout
auto const rpci = render_pass_state->createInfo.ptr();
for (uint32_t i = 0; i < rpci->attachmentCount; ++i) {
auto view_state = GetAttachmentImageViewState(device_data, framebuffer_state, i);
if (view_state) {
SetImageViewLayout(device_data, cb_state, view_state, rpci->pAttachments[i].initialLayout);
}
}
// Now transition for first subpass (index 0)
TransitionSubpassLayouts(device_data, cb_state, render_pass_state, 0, framebuffer_state);
}
void TransitionImageAspectLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, const VkImageMemoryBarrier *mem_barrier,
uint32_t level, uint32_t layer, VkImageAspectFlags aspect_mask, VkImageAspectFlags aspect) {
if (!(aspect_mask & aspect)) {
return;
}
VkImageSubresource sub = {aspect, level, layer};
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (!FindCmdBufLayout(device_data, pCB, mem_barrier->image, sub, node)) {
pCB->image_layout_change_count++; // Change the version of this data to force revalidation
SetLayout(device_data, pCB, mem_barrier->image, sub,
IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
return;
}
if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid
}
SetLayout(device_data, pCB, mem_barrier->image, sub, mem_barrier->newLayout);
}
bool VerifyAspectsPresent(VkImageAspectFlags aspect_mask, VkFormat format) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != 0) {
if (!(FormatIsColor(format) || FormatIsMultiplane(format))) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) {
if (!FormatHasDepth(format)) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) {
if (!FormatHasStencil(format)) return false;
}
if (0 !=
(aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR | VK_IMAGE_ASPECT_PLANE_2_BIT_KHR))) {
if (FormatPlaneCount(format) == 1) return false;
}
return true;
}
// Verify an ImageMemoryBarrier's old/new ImageLayouts are compatible with the Image's ImageUsageFlags.
bool ValidateBarrierLayoutToImageUsage(layer_data *device_data, const VkImageMemoryBarrier *img_barrier, bool new_not_old,
VkImageUsageFlags usage_flags, const char *func_name) {
const auto report_data = core_validation::GetReportData(device_data);
bool skip = false;
const VkImageLayout layout = (new_not_old) ? img_barrier->newLayout : img_barrier->oldLayout;
std::string msg_code = kVUIDUndefined; // sentinel value meaning "no error"
switch (layout) {
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01208";
}
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01209";
}
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01210";
}
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
if ((usage_flags & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01211";
}
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01212";
}
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
if ((usage_flags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-01213";
}
break;
case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV:
if ((usage_flags & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) == 0) {
msg_code = "VUID-VkImageMemoryBarrier-oldLayout-02088";
}
break;
default:
// Other VkImageLayout values do not have VUs defined in this context.
break;
}
if (msg_code != kVUIDUndefined) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(img_barrier->image), msg_code,
"%s: Image barrier 0x%p %sLayout=%s is not compatible with image 0x%" PRIx64 " usage flags 0x%" PRIx32 ".",
func_name, static_cast<const void *>(img_barrier), ((new_not_old) ? "new" : "old"),
string_VkImageLayout(layout), HandleToUint64(img_barrier->image), usage_flags);
}
return skip;
}
// Scoreboard for checking for duplicate and inconsistent barriers to images
struct ImageBarrierScoreboardEntry {
uint32_t index;
// This is designed for temporary storage within the scope of the API call. If retained storage of the barriers is
// required, copies should be made and smart or unique pointers used in some other stucture (or this one refactored)
const VkImageMemoryBarrier *barrier;
};
using ImageBarrierScoreboardSubresMap = std::unordered_map<VkImageSubresourceRange, ImageBarrierScoreboardEntry>;
using ImageBarrierScoreboardImageMap = std::unordered_map<VkImage, ImageBarrierScoreboardSubresMap>;
// Verify image barriers are compatible with the images they reference.
bool ValidateBarriersToImages(layer_data *device_data, GLOBAL_CB_NODE const *cb_state, uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers, const char *func_name) {
bool skip = false;
// Scoreboard for duplicate layout transition barriers within the list
// Pointers retained in the scoreboard only have the lifetime of *this* call (i.e. within the scope of the API call)
ImageBarrierScoreboardImageMap layout_transitions;
for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) {
auto img_barrier = &pImageMemoryBarriers[i];
if (!img_barrier) continue;
// Update the scoreboard of layout transitions and check for barriers affecting the same image and subresource
// TODO: a higher precision could be gained by adapting the command_buffer image_layout_map logic looking for conflicts
// at a per sub-resource level
if (img_barrier->oldLayout != img_barrier->newLayout) {
ImageBarrierScoreboardEntry new_entry{i, img_barrier};
auto image_it = layout_transitions.find(img_barrier->image);
if (image_it != layout_transitions.end()) {
auto &subres_map = image_it->second;
auto subres_it = subres_map.find(img_barrier->subresourceRange);
if (subres_it != subres_map.end()) {
auto &entry = subres_it->second;
if ((entry.barrier->newLayout != img_barrier->oldLayout) &&
(img_barrier->oldLayout != VK_IMAGE_LAYOUT_UNDEFINED)) {
const VkImageSubresourceRange &range = img_barrier->subresourceRange;
skip = log_msg(
core_validation::GetReportData(device_data), VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
"VUID-VkImageMemoryBarrier-oldLayout-01197",
"%s: pImageMemoryBarrier[%u] conflicts with earlier entry pImageMemoryBarrier[%u]. Image 0x%" PRIx64
" subresourceRange: aspectMask=%u baseMipLevel=%u levelCount=%u, baseArrayLayer=%u, layerCount=%u; "
"conflicting barrier transitions image layout from %s when earlier barrier transitioned to layout %s.",
func_name, i, entry.index, HandleToUint64(img_barrier->image), range.aspectMask, range.baseMipLevel,
range.levelCount, range.baseArrayLayer, range.layerCount, string_VkImageLayout(img_barrier->oldLayout),
string_VkImageLayout(entry.barrier->newLayout));
}
entry = new_entry;
} else {
subres_map[img_barrier->subresourceRange] = new_entry;
}
} else {
layout_transitions[img_barrier->image][img_barrier->subresourceRange] = new_entry;
}
}
auto image_state = GetImageState(device_data, img_barrier->image);
if (image_state) {
VkImageUsageFlags usage_flags = image_state->createInfo.usage;
skip |= ValidateBarrierLayoutToImageUsage(device_data, img_barrier, false, usage_flags, func_name);
skip |= ValidateBarrierLayoutToImageUsage(device_data, img_barrier, true, usage_flags, func_name);
// Make sure layout is able to be transitioned, currently only presented shared presentable images are locked
if (image_state->layout_locked) {
// TODO: Add unique id for error when available
skip |= log_msg(
core_validation::GetReportData(device_data), VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(img_barrier->image), 0,
"Attempting to transition shared presentable image 0x%" PRIx64
" from layout %s to layout %s, but image has already been presented and cannot have its layout transitioned.",
HandleToUint64(img_barrier->image), string_VkImageLayout(img_barrier->oldLayout),
string_VkImageLayout(img_barrier->newLayout));
}
}
VkImageCreateInfo *image_create_info = &(GetImageState(device_data, img_barrier->image)->createInfo);
// For a Depth/Stencil image both aspects MUST be set
if (FormatIsDepthAndStencil(image_create_info->format)) {
auto const aspect_mask = img_barrier->subresourceRange.aspectMask;
auto const ds_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if ((aspect_mask & ds_mask) != (ds_mask)) {
skip |=
log_msg(core_validation::GetReportData(device_data), VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(img_barrier->image),
"VUID-VkImageMemoryBarrier-image-01207",
"%s: Image barrier 0x%p references image 0x%" PRIx64
" of format %s that must have the depth and stencil aspects set, but its aspectMask is 0x%" PRIx32 ".",
func_name, static_cast<const void *>(img_barrier), HandleToUint64(img_barrier->image),
string_VkFormat(image_create_info->format), aspect_mask);
}
}
uint32_t level_count = ResolveRemainingLevels(&img_barrier->subresourceRange, image_create_info->mipLevels);
uint32_t layer_count = ResolveRemainingLayers(&img_barrier->subresourceRange, image_create_info->arrayLayers);
for (uint32_t j = 0; j < level_count; j++) {
uint32_t level = img_barrier->subresourceRange.baseMipLevel + j;
for (uint32_t k = 0; k < layer_count; k++) {
uint32_t layer = img_barrier->subresourceRange.baseArrayLayer + k;
skip |= ValidateImageAspectLayout(device_data, cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_COLOR_BIT);
skip |= ValidateImageAspectLayout(device_data, cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_DEPTH_BIT);
skip |= ValidateImageAspectLayout(device_data, cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_STENCIL_BIT);
skip |= ValidateImageAspectLayout(device_data, cb_state, img_barrier, level, layer, VK_IMAGE_ASPECT_METADATA_BIT);
if (GetDeviceExtensions(device_data)->vk_khr_sampler_ycbcr_conversion) {
skip |= ValidateImageAspectLayout(device_data, cb_state, img_barrier, level, layer,
VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
skip |= ValidateImageAspectLayout(device_data, cb_state, img_barrier, level, layer,
VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
skip |= ValidateImageAspectLayout(device_data, cb_state, img_barrier, level, layer,
VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
}
}
}
return skip;
}
static bool IsReleaseOp(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkImageMemoryBarrier const *barrier) {
if (!IsTransferOp(barrier)) return false;
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
return pool && IsReleaseOp<VkImageMemoryBarrier, true>(pool, barrier);
}
template <typename Barrier>
bool ValidateQFOTransferBarrierUniqueness(layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
uint32_t barrier_count, const Barrier *barriers) {
using BarrierRecord = QFOTransferBarrier<Barrier>;
bool skip = false;
const auto report_data = core_validation::GetReportData(device_data);
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
auto &barrier_sets = GetQFOBarrierSets(cb_state, typename BarrierRecord::Tag());
const char *barrier_name = BarrierRecord::BarrierName();
const char *handle_name = BarrierRecord::HandleName();
const char *transfer_type = nullptr;
for (uint32_t b = 0; b < barrier_count; b++) {
if (!IsTransferOp(&barriers[b])) continue;
const BarrierRecord *barrier_record = nullptr;
if (IsReleaseOp<Barrier, true /* Assume IsTransfer */>(pool, &barriers[b]) && !IsSpecial(barriers[b].dstQueueFamilyIndex)) {
const auto found = barrier_sets.release.find(barriers[b]);
if (found != barrier_sets.release.cend()) {
barrier_record = &(*found);
transfer_type = "releasing";
}
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barriers[b]) &&
!IsSpecial(barriers[b].srcQueueFamilyIndex)) {
const auto found = barrier_sets.acquire.find(barriers[b]);
if (found != barrier_sets.acquire.cend()) {
barrier_record = &(*found);
transfer_type = "acquiring";
}
}
if (barrier_record != nullptr) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOInCB(),
"%s: %s at index %" PRIu32 " %s queue ownership of %s (0x%" PRIx64 "), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier recorded in this command buffer.",
func_name, barrier_name, b, transfer_type, handle_name, HandleToUint64(barrier_record->handle),
barrier_record->srcQueueFamilyIndex, barrier_record->dstQueueFamilyIndex);
}
}
return skip;
}
template <typename Barrier>
void RecordQFOTransferBarriers(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t barrier_count, const Barrier *barriers) {
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
auto &barrier_sets = GetQFOBarrierSets(cb_state, typename QFOTransferBarrier<Barrier>::Tag());
for (uint32_t b = 0; b < barrier_count; b++) {
if (!IsTransferOp(&barriers[b])) continue;
if (IsReleaseOp<Barrier, true /* Assume IsTransfer*/>(pool, &barriers[b]) && !IsSpecial(barriers[b].dstQueueFamilyIndex)) {
barrier_sets.release.emplace(barriers[b]);
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barriers[b]) &&
!IsSpecial(barriers[b].srcQueueFamilyIndex)) {
barrier_sets.acquire.emplace(barriers[b]);
}
}
}
bool ValidateBarriersQFOTransferUniqueness(layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers,
uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers) {
bool skip = false;
skip |= ValidateQFOTransferBarrierUniqueness(device_data, func_name, cb_state, bufferBarrierCount, pBufferMemBarriers);
skip |= ValidateQFOTransferBarrierUniqueness(device_data, func_name, cb_state, imageMemBarrierCount, pImageMemBarriers);
return skip;
}
void RecordBarriersQFOTransfers(layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers,
uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers) {
RecordQFOTransferBarriers(device_data, cb_state, bufferBarrierCount, pBufferMemBarriers);
RecordQFOTransferBarriers(device_data, cb_state, imageMemBarrierCount, pImageMemBarriers);
}
template <typename BarrierRecord, typename Scoreboard>
static bool ValidateAndUpdateQFOScoreboard(const debug_report_data *report_data, const GLOBAL_CB_NODE *cb_state,
const char *operation, const BarrierRecord &barrier, Scoreboard *scoreboard) {
// Record to the scoreboard or report that we have a duplication
bool skip = false;
auto inserted = scoreboard->insert(std::make_pair(barrier, cb_state));
if (!inserted.second && inserted.first->second != cb_state) {
// This is a duplication (but don't report duplicates from the same CB, as we do that at record time
skip = log_msg(
report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOInSubmit(),
"%s: %s %s queue ownership of %s (0x%" PRIx64 "), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier submitted in this batch from command buffer 0x%" PRIx64 ".",
"vkQueueSubmit()", BarrierRecord::BarrierName(), operation, BarrierRecord::HandleName(), HandleToUint64(barrier.handle),
barrier.srcQueueFamilyIndex, barrier.dstQueueFamilyIndex, HandleToUint64(inserted.first->second));
}
return skip;
}
template <typename Barrier>
static bool ValidateQueuedQFOTransferBarriers(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
QFOTransferCBScoreboards<Barrier> *scoreboards) {
using BarrierRecord = QFOTransferBarrier<Barrier>;
using TypeTag = typename BarrierRecord::Tag;
bool skip = false;
const auto report_data = core_validation::GetReportData(device_data);
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag());
const GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers =
core_validation::GetGlobalQFOReleaseBarrierMap(device_data, TypeTag());
const char *barrier_name = BarrierRecord::BarrierName();
const char *handle_name = BarrierRecord::HandleName();
// No release should have an extant duplicate (WARNING)
for (const auto &release : cb_barriers.release) {
// Check the global pending release barriers
const auto set_it = global_release_barriers.find(release.handle);
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
const auto found = set_for_handle.find(release);
if (found != set_for_handle.cend()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgDuplicateQFOSubmitted(),
"%s: %s releasing queue ownership of %s (0x%" PRIx64 "), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier queued for execution, without intervening acquire operation.",
"vkQueueSubmit()", barrier_name, handle_name, HandleToUint64(found->handle),
found->srcQueueFamilyIndex, found->dstQueueFamilyIndex);
}
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "releasing", release, &scoreboards->release);
}
// Each acquire must have a matching release (ERROR)
for (const auto &acquire : cb_barriers.acquire) {
const auto set_it = global_release_barriers.find(acquire.handle);
bool matching_release_found = false;
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
matching_release_found = set_for_handle.find(acquire) != set_for_handle.cend();
}
if (!matching_release_found) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), BarrierRecord::ErrMsgMissingQFOReleaseInSubmit(),
"%s: in submitted command buffer %s aquiring ownership of %s (0x%" PRIx64
"), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32
" has no matching release barrier queued for execution.",
"vkQueueSubmit()", barrier_name, handle_name, HandleToUint64(acquire.handle),
acquire.srcQueueFamilyIndex, acquire.dstQueueFamilyIndex);
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "acquiring", acquire, &scoreboards->acquire);
}
return skip;
}
bool ValidateQueuedQFOTransfers(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) {
bool skip = false;
skip |= ValidateQueuedQFOTransferBarriers<VkImageMemoryBarrier>(device_data, cb_state, qfo_image_scoreboards);
skip |= ValidateQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(device_data, cb_state, qfo_buffer_scoreboards);
return skip;
}
template <typename Barrier>
static void RecordQueuedQFOTransferBarriers(layer_data *device_data, GLOBAL_CB_NODE *cb_state) {
using BarrierRecord = QFOTransferBarrier<Barrier>;
using TypeTag = typename BarrierRecord::Tag;
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag());
GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers =
core_validation::GetGlobalQFOReleaseBarrierMap(device_data, TypeTag());
// Add release barriers from this submit to the global map
for (const auto &release : cb_barriers.release) {
// the global barrier list is mapped by resource handle to allow cleanup on resource destruction
// NOTE: We're using [] because creation of a Set is a needed side effect for new handles
global_release_barriers[release.handle].insert(release);
}
// Erase acquired barriers from this submit from the global map -- essentially marking releases as consumed
for (const auto &acquire : cb_barriers.acquire) {
// NOTE: We're not using [] because we don't want to create entries for missing releases
auto set_it = global_release_barriers.find(acquire.handle);
if (set_it != global_release_barriers.end()) {
QFOTransferBarrierSet<Barrier> &set_for_handle = set_it->second;
set_for_handle.erase(acquire);
if (set_for_handle.size() == 0) { // Clean up empty sets
global_release_barriers.erase(set_it);
}
}
}
}
void RecordQueuedQFOTransfers(layer_data *device_data, GLOBAL_CB_NODE *cb_state) {
RecordQueuedQFOTransferBarriers<VkImageMemoryBarrier>(device_data, cb_state);
RecordQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(device_data, cb_state);
}
// Remove the pending QFO release records from the global set
// Note that the type of the handle argument constrained to match Barrier type
// The defaulted BarrierRecord argument allows use to declare the type once, but is not intended to be specified by the caller
template <typename Barrier, typename BarrierRecord = QFOTransferBarrier<Barrier>>
static void EraseQFOReleaseBarriers(layer_data *device_data, const typename BarrierRecord::HandleType &handle) {
GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers =
core_validation::GetGlobalQFOReleaseBarrierMap(device_data, typename BarrierRecord::Tag());
global_release_barriers.erase(handle);
}
// Avoid making the template globally visible by exporting the one instance of it we need.
void EraseQFOImageRelaseBarriers(layer_data *device_data, const VkImage &image) {
EraseQFOReleaseBarriers<VkImageMemoryBarrier>(device_data, image);
}
void TransitionImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t memBarrierCount,
const VkImageMemoryBarrier *pImgMemBarriers) {
for (uint32_t i = 0; i < memBarrierCount; ++i) {
auto mem_barrier = &pImgMemBarriers[i];
if (!mem_barrier) continue;
// For ownership transfers, the barrier is specified twice; as a release
// operation on the yielding queue family, and as an acquire operation
// on the acquiring queue family. This barrier may also include a layout
// transition, which occurs 'between' the two operations. For validation
// purposes it doesn't seem important which side performs the layout
// transition, but it must not be performed twice. We'll arbitrarily
// choose to perform it as part of the acquire operation.
if (IsReleaseOp(device_data, cb_state, mem_barrier)) {
continue;
}
VkImageCreateInfo *image_create_info = &(GetImageState(device_data, mem_barrier->image)->createInfo);
uint32_t level_count = ResolveRemainingLevels(&mem_barrier->subresourceRange, image_create_info->mipLevels);
uint32_t layer_count = ResolveRemainingLayers(&mem_barrier->subresourceRange, image_create_info->arrayLayers);
// Special case for 3D images with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR flag bit, where <extent.depth> and
// <arrayLayers> can potentially alias. When recording layout for the entire image, pre-emptively record layouts
// for all (potential) layer sub_resources.
if ((0 != (image_create_info->flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) &&
(mem_barrier->subresourceRange.baseArrayLayer == 0) && (layer_count == 1)) {
layer_count = image_create_info->extent.depth; // Treat each depth slice as a layer subresource
}
// For multiplanar formats, IMAGE_ASPECT_COLOR is equivalent to adding the aspect of the individual planes
VkImageAspectFlags aspect_mask = mem_barrier->subresourceRange.aspectMask;
if (GetDeviceExtensions(device_data)->vk_khr_sampler_ycbcr_conversion) {
if (FormatIsMultiplane(image_create_info->format)) {
if (aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
aspect_mask &= ~VK_IMAGE_ASPECT_COLOR_BIT;
aspect_mask |= (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT);
if (FormatPlaneCount(image_create_info->format) > 2) {
aspect_mask |= VK_IMAGE_ASPECT_PLANE_2_BIT;
}
}
}
}
for (uint32_t j = 0; j < level_count; j++) {
uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
for (uint32_t k = 0; k < layer_count; k++) {
uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
TransitionImageAspectLayout(device_data, cb_state, mem_barrier, level, layer, aspect_mask,
VK_IMAGE_ASPECT_COLOR_BIT);
TransitionImageAspectLayout(device_data, cb_state, mem_barrier, level, layer, aspect_mask,
VK_IMAGE_ASPECT_DEPTH_BIT);
TransitionImageAspectLayout(device_data, cb_state, mem_barrier, level, layer, aspect_mask,
VK_IMAGE_ASPECT_STENCIL_BIT);
TransitionImageAspectLayout(device_data, cb_state, mem_barrier, level, layer, aspect_mask,
VK_IMAGE_ASPECT_METADATA_BIT);
if (GetDeviceExtensions(device_data)->vk_khr_sampler_ycbcr_conversion) {
TransitionImageAspectLayout(device_data, cb_state, mem_barrier, level, layer, aspect_mask,
VK_IMAGE_ASPECT_PLANE_0_BIT_KHR);
TransitionImageAspectLayout(device_data, cb_state, mem_barrier, level, layer, aspect_mask,
VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
TransitionImageAspectLayout(device_data, cb_state, mem_barrier, level, layer, aspect_mask,
VK_IMAGE_ASPECT_PLANE_2_BIT_KHR);
}
}
}
}
}
bool VerifyImageLayout(layer_data const *device_data, GLOBAL_CB_NODE const *cb_node, IMAGE_STATE *image_state,
VkImageSubresourceLayers subLayers, VkImageLayout explicit_layout, VkImageLayout optimal_layout,
const char *caller, const std::string &layout_invalid_msg_code, const std::string &layout_mismatch_msg_code,
bool *error) {
const auto report_data = core_validation::GetReportData(device_data);
const auto image = image_state->image;
bool skip = false;
for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
uint32_t layer = i + subLayers.baseArrayLayer;
VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (FindCmdBufLayout(device_data, cb_node, image, sub, node)) {
if (node.layout != explicit_layout) {
*error = true;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), layout_mismatch_msg_code,
"%s: Cannot use image 0x%" PRIx64
" (layer=%u mip=%u) with specific layout %s that doesn't match the actual current layout %s.",
caller, HandleToUint64(image), layer, subLayers.mipLevel, string_VkImageLayout(explicit_layout),
string_VkImageLayout(node.layout));
}
}
}
// If optimal_layout is not UNDEFINED, check that layout matches optimal for this case
if ((VK_IMAGE_LAYOUT_UNDEFINED != optimal_layout) && (explicit_layout != optimal_layout)) {
if (VK_IMAGE_LAYOUT_GENERAL == explicit_layout) {
if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_node->commandBuffer),
kVUID_Core_DrawState_InvalidImageLayout,
"%s: For optimal performance image 0x%" PRIx64 " layout should be %s instead of GENERAL.", caller,
HandleToUint64(image), string_VkImageLayout(optimal_layout));
}
} else if (GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image) {
if (image_state->shared_presentable) {
if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != explicit_layout) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
layout_invalid_msg_code,
"Layout for shared presentable image is %s but must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.",
string_VkImageLayout(optimal_layout));
}
}
} else {
*error = true;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), layout_invalid_msg_code,
"%s: Layout for image 0x%" PRIx64 " is %s but can only be %s or VK_IMAGE_LAYOUT_GENERAL.", caller,
HandleToUint64(image), string_VkImageLayout(explicit_layout), string_VkImageLayout(optimal_layout));
}
}
return skip;
}
void TransitionFinalSubpassLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
FRAMEBUFFER_STATE *framebuffer_state) {
auto renderPass = GetRenderPassState(device_data, pRenderPassBegin->renderPass);
if (!renderPass) return;
const VkRenderPassCreateInfo2KHR *pRenderPassInfo = renderPass->createInfo.ptr();
if (framebuffer_state) {
for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
auto view_state = GetAttachmentImageViewState(device_data, framebuffer_state, i);
if (view_state) {
SetImageViewLayout(device_data, pCB, view_state, pRenderPassInfo->pAttachments[i].finalLayout);
}
}
}
}
bool PreCallValidateCreateImage(layer_data *device_data, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(device_data);
if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-format-00943", "vkCreateImage: VkFormat for image must not be VK_FORMAT_UNDEFINED.");
return skip;
}
const char *format_string = string_VkFormat(pCreateInfo->format);
if ((pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) && (VK_IMAGE_TYPE_2D != pCreateInfo->imageType)) {
std::stringstream ss;
ss << "vkCreateImage: Image type must be VK_IMAGE_TYPE_2D when VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT flag bit is set";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-00949", "%s.", ss.str().c_str());
}
const VkPhysicalDeviceLimits *device_limits = &(GetPhysicalDeviceProperties(device_data)->limits);
VkImageFormatProperties format_limits; // Format limits may exceed general device limits
GetImageFormatProperties(device_data, pCreateInfo, &format_limits);
if (pCreateInfo->mipLevels > format_limits.maxMipLevels) {
std::stringstream ss;
ss << "vkCreateImage: Image mip levels exceed image format maxMipLevels for format " << format_string;
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-mipLevels-02255", "%s.", ss.str().c_str());
}
VkImageUsageFlags attach_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.width > device_limits->maxFramebufferWidth)) {
std::stringstream ss;
ss << "vkCreateImage: Image usage flags include a frame buffer attachment bit and image width exceeds device "
"maxFramebufferWidth";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-00964", "%s.", ss.str().c_str());
}
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.height > device_limits->maxFramebufferHeight)) {
std::stringstream ss;
ss << "vkCreateImage: Image usage flags include a frame buffer attachment bit and image height exceeds device "
"maxFramebufferHeight";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-usage-00965", "%s.", ss.str().c_str());
}
uint64_t total_size = (uint64_t)pCreateInfo->extent.width * (uint64_t)pCreateInfo->extent.height *
(uint64_t)pCreateInfo->extent.depth * (uint64_t)pCreateInfo->arrayLayers *
(uint64_t)pCreateInfo->samples * (uint64_t)FormatSize(pCreateInfo->format);
// Round up to imageGranularity boundary
VkDeviceSize imageGranularity = GetPhysicalDeviceProperties(device_data)->limits.bufferImageGranularity;
uint64_t ig_mask = imageGranularity - 1;
total_size = (total_size + ig_mask) & ~ig_mask;
if (total_size > format_limits.maxResourceSize) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
kVUID_Core_Image_InvalidFormatLimitsViolation,
"CreateImage resource size exceeds allowable maximum Image resource size = 0x%" PRIxLEAST64
", maximum resource size = 0x%" PRIxLEAST64 " ",
total_size, format_limits.maxResourceSize);
}
if (pCreateInfo->arrayLayers > format_limits.maxArrayLayers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
"VUID-VkImageCreateInfo-arrayLayers-02256",
"CreateImage arrayLayers=%d exceeds allowable maximum supported by format of %d.", pCreateInfo->arrayLayers,
format_limits.maxArrayLayers);
}
if ((pCreateInfo->samples & format_limits.sampleCounts) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, 0,
"VUID-VkImageCreateInfo-samples-02258", "CreateImage samples %s is not supported by format 0x%.8X.",
string_VkSampleCountFlagBits(pCreateInfo->samples), format_limits.sampleCounts);
}
if ((pCreateInfo->flags & VK_IMAGE_CREATE_SPARSE_ALIASED_BIT) &&
(!GetEnabledFeatures(device_data)->core.sparseResidencyAliased)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkImageCreateInfo-flags-01924",
"vkCreateImage(): the sparseResidencyAliased device feature is disabled: Images cannot be created with the "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT set.");
}
if (GetDeviceExtensions(device_data)->vk_khr_maintenance2) {
if (pCreateInfo->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR) {
if (!(FormatIsCompressed_BC(pCreateInfo->format) || FormatIsCompressed_ASTC_LDR(pCreateInfo->format) ||
FormatIsCompressed_ETC2_EAC(pCreateInfo->format))) {
// TODO: Add Maintenance2 VUID
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined,
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR, "
"format must be block, ETC or ASTC compressed, but is %s",
string_VkFormat(pCreateInfo->format));
}
if (!(pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT)) {
// TODO: Add Maintenance2 VUID
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUIDUndefined,
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR, "
"flags must also contain VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
}
}
return skip;
}
void PostCallRecordCreateImage(layer_data *device_data, const VkImageCreateInfo *pCreateInfo, VkImage *pImage) {
IMAGE_LAYOUT_NODE image_state;
image_state.layout = pCreateInfo->initialLayout;
image_state.format = pCreateInfo->format;
GetImageMap(device_data)->insert(std::make_pair(*pImage, std::unique_ptr<IMAGE_STATE>(new IMAGE_STATE(*pImage, pCreateInfo))));
ImageSubresourcePair subpair{*pImage, false, VkImageSubresource()};
(*core_validation::GetImageSubresourceMap(device_data))[*pImage].push_back(subpair);
(*core_validation::GetImageLayoutMap(device_data))[subpair] = image_state;
}
bool PreCallValidateDestroyImage(layer_data *device_data, VkImage image, IMAGE_STATE **image_state, VK_OBJECT *obj_struct) {
const CHECK_DISABLED *disabled = core_validation::GetDisables(device_data);
*image_state = core_validation::GetImageState(device_data, image);
*obj_struct = {HandleToUint64(image), kVulkanObjectTypeImage};
if (disabled->destroy_image) return false;
bool skip = false;
if (*image_state) {
skip |= core_validation::ValidateObjectNotInUse(device_data, *image_state, *obj_struct, "vkDestroyImage",
"VUID-vkDestroyImage-image-01000");
}
return skip;
}
void PreCallRecordDestroyImage(layer_data *device_data, VkImage image, IMAGE_STATE *image_state, VK_OBJECT obj_struct) {
core_validation::InvalidateCommandBuffers(device_data, image_state->cb_bindings, obj_struct);
// Clean up memory mapping, bindings and range references for image
for (auto mem_binding : image_state->GetBoundMemory()) {
auto mem_info = core_validation::GetMemObjInfo(device_data, mem_binding);
if (mem_info) {
core_validation::RemoveImageMemoryRange(obj_struct.handle, mem_info);
}
}
core_validation::ClearMemoryObjectBindings(device_data, obj_struct.handle, kVulkanObjectTypeImage);
EraseQFOReleaseBarriers<VkImageMemoryBarrier>(device_data, image);
// Remove image from imageMap
core_validation::GetImageMap(device_data)->erase(image);
std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *imageSubresourceMap =
core_validation::GetImageSubresourceMap(device_data);
const auto &sub_entry = imageSubresourceMap->find(image);
if (sub_entry != imageSubresourceMap->end()) {
for (const auto &pair : sub_entry->second) {
core_validation::GetImageLayoutMap(device_data)->erase(pair);
}
imageSubresourceMap->erase(sub_entry);
}
}
bool ValidateImageAttributes(layer_data *device_data, IMAGE_STATE *image_state, VkImageSubresourceRange range) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(device_data);
if (range.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
char const str[] = "vkCmdClearColorImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_COLOR_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_DrawState_InvalidImageAspect, str);
}
if (FormatIsDepthOrStencil(image_state->createInfo.format)) {
char const str[] = "vkCmdClearColorImage called with depth/stencil image.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00007", "%s.", str);
} else if (FormatIsCompressed(image_state->createInfo.format)) {
char const str[] = "vkCmdClearColorImage called with compressed image.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00007", "%s.", str);
}
if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
char const str[] = "vkCmdClearColorImage called with image created without VK_IMAGE_USAGE_TRANSFER_DST_BIT.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-vkCmdClearColorImage-image-00002", "%s.", str);
}
return skip;
}
uint32_t ResolveRemainingLevels(const VkImageSubresourceRange *range, uint32_t mip_levels) {
// Return correct number of mip levels taking into account VK_REMAINING_MIP_LEVELS
uint32_t mip_level_count = range->levelCount;
if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
mip_level_count = mip_levels - range->baseMipLevel;
}
return mip_level_count;
}
uint32_t ResolveRemainingLayers(const VkImageSubresourceRange *range, uint32_t layers) {
// Return correct number of layers taking into account VK_REMAINING_ARRAY_LAYERS
uint32_t array_layer_count = range->layerCount;
if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
array_layer_count = layers - range->baseArrayLayer;
}
return array_layer_count;
}
bool VerifyClearImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state,
VkImageSubresourceRange range, VkImageLayout dest_image_layout, const char *func_name) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(device_data);
uint32_t level_count = ResolveRemainingLevels(&range, image_state->createInfo.mipLevels);
uint32_t layer_count = ResolveRemainingLayers(&range, image_state->createInfo.arrayLayers);
if (dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
if (dest_image_layout == VK_IMAGE_LAYOUT_GENERAL) {
if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), kVUID_Core_DrawState_InvalidImageLayout,
"%s: Layout for cleared image should be TRANSFER_DST_OPTIMAL instead of GENERAL.", func_name);
}
} else if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR == dest_image_layout) {
if (!GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image) {
// TODO: Add unique error id when available.
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), 0,
"Must enable VK_KHR_shared_presentable_image extension before creating images with a layout type "
"of VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.");
} else {
if (image_state->shared_presentable) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), 0,
"Layout for shared presentable cleared image is %s but can only be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.",
string_VkImageLayout(dest_image_layout));
}
}
} else {
std::string error_code = "VUID-vkCmdClearColorImage-imageLayout-00005";
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00012";
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), error_code,
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name,
string_VkImageLayout(dest_image_layout));
}
}
for (uint32_t level_index = 0; level_index < level_count; ++level_index) {
uint32_t level = level_index + range.baseMipLevel;
for (uint32_t layer_index = 0; layer_index < layer_count; ++layer_index) {
uint32_t layer = layer_index + range.baseArrayLayer;
VkImageSubresource sub = {range.aspectMask, level, layer};
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (FindCmdBufLayout(device_data, cb_node, image_state->image, sub, node)) {
if (node.layout != dest_image_layout) {
std::string error_code = "VUID-vkCmdClearColorImage-imageLayout-00004";
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00011";
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
}
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
error_code, "%s: Cannot clear an image whose layout is %s and doesn't match the current layout %s.",
func_name, string_VkImageLayout(dest_image_layout), string_VkImageLayout(node.layout));
}
}
}
}
return skip;
}
void RecordClearImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, VkImage image, VkImageSubresourceRange range,
VkImageLayout dest_image_layout) {
VkImageCreateInfo *image_create_info = &(GetImageState(device_data, image)->createInfo);
uint32_t level_count = ResolveRemainingLevels(&range, image_create_info->mipLevels);
uint32_t layer_count = ResolveRemainingLayers(&range, image_create_info->arrayLayers);
for (uint32_t level_index = 0; level_index < level_count; ++level_index) {
uint32_t level = level_index + range.baseMipLevel;
for (uint32_t layer_index = 0; layer_index < layer_count; ++layer_index) {
uint32_t layer = layer_index + range.baseArrayLayer;
VkImageSubresource sub = {range.aspectMask, level, layer};
IMAGE_CMD_BUF_LAYOUT_NODE node;
if (!FindCmdBufLayout(device_data, cb_node, image, sub, node)) {
SetLayout(device_data, cb_node, image, sub, IMAGE_CMD_BUF_LAYOUT_NODE(dest_image_layout, dest_image_layout));
}
}
}
}
bool PreCallValidateCmdClearColorImage(layer_data *dev_data, VkCommandBuffer commandBuffer, VkImage image,
VkImageLayout imageLayout, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
auto cb_node = GetCBNode(dev_data, commandBuffer);
auto image_state = GetImageState(dev_data, image);
if (cb_node && image_state) {
skip |=
ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-image-00003");
skip |= ValidateCmdQueueFlags(dev_data, cb_node, "vkCmdClearColorImage()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdClearColorImage-commandBuffer-cmdpool");
skip |= ValidateCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
if (GetApiVersion(dev_data) >= VK_API_VERSION_1_1 || GetDeviceExtensions(dev_data)->vk_khr_maintenance1) {
skip |=
ValidateImageFormatFeatureFlags(dev_data, image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearColorImage",
"VUID-vkCmdClearColorImage-image-01993", "VUID-vkCmdClearColorImage-image-01993");
}
skip |= InsideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-renderpass");
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearColorSubresourceRange(dev_data, image_state, pRanges[i], param_name.c_str());
skip |= ValidateImageAttributes(dev_data, image_state, pRanges[i]);
skip |= VerifyClearImageLayout(dev_data, cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearColorImage()");
}
}
return skip;
}
// This state recording routine is shared between ClearColorImage and ClearDepthStencilImage
void PreCallRecordCmdClearImage(layer_data *dev_data, VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
auto cb_node = GetCBNode(dev_data, commandBuffer);
auto image_state = GetImageState(dev_data, image);
if (cb_node && image_state) {
AddCommandBufferBindingImage(dev_data, cb_node, image_state);
for (uint32_t i = 0; i < rangeCount; ++i) {
RecordClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout);
}
}
}
bool PreCallValidateCmdClearDepthStencilImage(layer_data *device_data, VkCommandBuffer commandBuffer, VkImage image,
VkImageLayout imageLayout, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(device_data);
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
auto cb_node = GetCBNode(device_data, commandBuffer);
auto image_state = GetImageState(device_data, image);
if (cb_node && image_state) {
skip |= ValidateMemoryIsBoundToImage(device_data, image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-image-00010");
skip |= ValidateCmdQueueFlags(device_data, cb_node, "vkCmdClearDepthStencilImage()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdClearDepthStencilImage-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
if (GetApiVersion(device_data) >= VK_API_VERSION_1_1 || GetDeviceExtensions(device_data)->vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(device_data, image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT,
"vkCmdClearDepthStencilImage", "VUID-vkCmdClearDepthStencilImage-image-01994",
"VUID-vkCmdClearDepthStencilImage-image-01994");
}
skip |=
InsideRenderPass(device_data, cb_node, "vkCmdClearDepthStencilImage()", "VUID-vkCmdClearDepthStencilImage-renderpass");
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearDepthSubresourceRange(device_data, image_state, pRanges[i], param_name.c_str());
skip |=
VerifyClearImageLayout(device_data, cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()");
// Image aspect must be depth or stencil or both
VkImageAspectFlags valid_aspects = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if (((pRanges[i].aspectMask & valid_aspects) == 0) || ((pRanges[i].aspectMask & ~valid_aspects) != 0)) {
char const str[] =
"vkCmdClearDepthStencilImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_DEPTH_BIT "
"and/or VK_IMAGE_ASPECT_STENCIL_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), kVUID_Core_DrawState_InvalidImageAspect, str);
}
}
if (image_state && !FormatIsDepthOrStencil(image_state->createInfo.format)) {
char const str[] = "vkCmdClearDepthStencilImage called without a depth/stencil image.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-vkCmdClearDepthStencilImage-image-00014", "%s.", str);
}
if (VK_IMAGE_USAGE_TRANSFER_DST_BIT != (VK_IMAGE_USAGE_TRANSFER_DST_BIT & image_state->createInfo.usage)) {
char const str[] =
"vkCmdClearDepthStencilImage() called with an image that was not created with the VK_IMAGE_USAGE_TRANSFER_DST_BIT "
"set.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-vkCmdClearDepthStencilImage-image-00009", "%s.", str);
}
}
return skip;
}
// Returns true if [x, xoffset] and [y, yoffset] overlap
static bool RangesIntersect(int32_t start, uint32_t start_offset, int32_t end, uint32_t end_offset) {
bool result = false;
uint32_t intersection_min = std::max(static_cast<uint32_t>(start), static_cast<uint32_t>(end));
uint32_t intersection_max = std::min(static_cast<uint32_t>(start) + start_offset, static_cast<uint32_t>(end) + end_offset);
if (intersection_max > intersection_min) {
result = true;
}
return result;
}
// Returns true if source area of first copy region intersects dest area of second region
// It is assumed that these are copy regions within a single image (otherwise no possibility of collision)
static bool RegionIntersects(const VkImageCopy *rgn0, const VkImageCopy *rgn1, VkImageType type, bool is_multiplane) {
bool result = false;
// Separate planes within a multiplane image cannot intersect
if (is_multiplane && (rgn0->srcSubresource.aspectMask != rgn1->dstSubresource.aspectMask)) {
return result;
}
if ((rgn0->srcSubresource.mipLevel == rgn1->dstSubresource.mipLevel) &&
(RangesIntersect(rgn0->srcSubresource.baseArrayLayer, rgn0->srcSubresource.layerCount, rgn1->dstSubresource.baseArrayLayer,
rgn1->dstSubresource.layerCount))) {
result = true;
switch (type) {
case VK_IMAGE_TYPE_3D:
result &= RangesIntersect(rgn0->srcOffset.z, rgn0->extent.depth, rgn1->dstOffset.z, rgn1->extent.depth);
// fall through
case VK_IMAGE_TYPE_2D:
result &= RangesIntersect(rgn0->srcOffset.y, rgn0->extent.height, rgn1->dstOffset.y, rgn1->extent.height);
// fall through
case VK_IMAGE_TYPE_1D:
result &= RangesIntersect(rgn0->srcOffset.x, rgn0->extent.width, rgn1->dstOffset.x, rgn1->extent.width);
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
}
return result;
}
// Returns non-zero if offset and extent exceed image extents
static const uint32_t x_bit = 1;
static const uint32_t y_bit = 2;
static const uint32_t z_bit = 4;
static uint32_t ExceedsBounds(const VkOffset3D *offset, const VkExtent3D *extent, const VkExtent3D *image_extent) {
uint32_t result = 0;
// Extents/depths cannot be negative but checks left in for clarity
if ((offset->z + extent->depth > image_extent->depth) || (offset->z < 0) ||
((offset->z + static_cast<int32_t>(extent->depth)) < 0)) {
result |= z_bit;
}
if ((offset->y + extent->height > image_extent->height) || (offset->y < 0) ||
((offset->y + static_cast<int32_t>(extent->height)) < 0)) {
result |= y_bit;
}
if ((offset->x + extent->width > image_extent->width) || (offset->x < 0) ||
((offset->x + static_cast<int32_t>(extent->width)) < 0)) {
result |= x_bit;
}
return result;
}
// Test if two VkExtent3D structs are equivalent
static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
bool result = true;
if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
(extent->depth != other_extent->depth)) {
result = false;
}
return result;
}
// For image copies between compressed/uncompressed formats, the extent is provided in source image texels
// Destination image texel extents must be adjusted by block size for the dest validation checks
VkExtent3D GetAdjustedDestImageExtent(VkFormat src_format, VkFormat dst_format, VkExtent3D extent) {
VkExtent3D adjusted_extent = extent;
if ((FormatIsCompressed(src_format) && (!FormatIsCompressed(dst_format)))) {
VkExtent3D block_size = FormatCompressedTexelBlockExtent(src_format);
adjusted_extent.width /= block_size.width;
adjusted_extent.height /= block_size.height;
adjusted_extent.depth /= block_size.depth;
} else if ((!FormatIsCompressed(src_format) && (FormatIsCompressed(dst_format)))) {
VkExtent3D block_size = FormatCompressedTexelBlockExtent(dst_format);
adjusted_extent.width *= block_size.width;
adjusted_extent.height *= block_size.height;
adjusted_extent.depth *= block_size.depth;
}
return adjusted_extent;
}
// Returns the effective extent of an image subresource, adjusted for mip level and array depth.
static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
const uint32_t mip = subresource->mipLevel;
// Return zero extent if mip level doesn't exist
if (mip >= img->createInfo.mipLevels) {
return VkExtent3D{0, 0, 0};
}
// Don't allow mip adjustment to create 0 dim, but pass along a 0 if that's what subresource specified
VkExtent3D extent = img->createInfo.extent;
if (img->createInfo.flags & VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV) {
extent.width = (0 == extent.width ? 0 : std::max(2U, 1 + ((extent.width - 1) >> mip)));
extent.height = (0 == extent.height ? 0 : std::max(2U, 1 + ((extent.height - 1) >> mip)));
extent.depth = (0 == extent.depth ? 0 : std::max(2U, 1 + ((extent.depth - 1) >> mip)));
} else {
extent.width = (0 == extent.width ? 0 : std::max(1U, extent.width >> mip));
extent.height = (0 == extent.height ? 0 : std::max(1U, extent.height >> mip));
extent.depth = (0 == extent.depth ? 0 : std::max(1U, extent.depth >> mip));
}
// Image arrays have an effective z extent that isn't diminished by mip level
if (VK_IMAGE_TYPE_3D != img->createInfo.imageType) {
extent.depth = img->createInfo.arrayLayers;
}
return extent;
}
// Test if the extent argument has all dimensions set to 0.
static inline bool IsExtentAllZeroes(const VkExtent3D *extent) {
return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
}
// Test if the extent argument has any dimensions set to 0.
static inline bool IsExtentSizeZero(const VkExtent3D *extent) {
return ((extent->width == 0) || (extent->height == 0) || (extent->depth == 0));
}
// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
static inline VkExtent3D GetScaledItg(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
// Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
VkExtent3D granularity = {0, 0, 0};
auto pPool = GetCommandPoolNode(device_data, cb_node->createInfo.commandPool);
if (pPool) {
granularity =
GetPhysDevProperties(device_data)->queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
if (FormatIsCompressed(img->createInfo.format)) {
auto block_size = FormatCompressedTexelBlockExtent(img->createInfo.format);
granularity.width *= block_size.width;
granularity.height *= block_size.height;
}
}
return granularity;
}
// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
bool valid = true;
if ((SafeModulo(extent->depth, granularity->depth) != 0) || (SafeModulo(extent->width, granularity->width) != 0) ||
(SafeModulo(extent->height, granularity->height) != 0)) {
valid = false;
}
return valid;
}
// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
static inline bool CheckItgOffset(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member,
std::string vuid) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
VkExtent3D offset_extent = {};
offset_extent.width = static_cast<uint32_t>(abs(offset->x));
offset_extent.height = static_cast<uint32_t>(abs(offset->y));
offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
if (IsExtentAllZeroes(&offset_extent) == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) when the command buffer's queue family "
"image transfer granularity is (w=0, h=0, d=0).",
function, i, member, offset->x, offset->y, offset->z);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
// integer multiples of the image transfer granularity.
if (IsExtentAligned(&offset_extent, granularity) == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
granularity->depth);
}
}
return skip;
}
// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
static inline bool CheckItgExtent(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
const VkImageType image_type, const uint32_t i, const char *function, const char *member,
std::string vuid) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
// subresource extent.
if (IsExtentEqual(extent, subresource_extent) == false) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
"when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
subresource_extent->height, subresource_extent->depth);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
// integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
// subresource extent dimensions.
VkExtent3D offset_extent_sum = {};
offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
bool x_ok = true;
bool y_ok = true;
bool z_ok = true;
switch (image_type) {
case VK_IMAGE_TYPE_3D:
z_ok = ((0 == SafeModulo(extent->depth, granularity->depth)) ||
(subresource_extent->depth == offset_extent_sum.depth));
// fall through
case VK_IMAGE_TYPE_2D:
y_ok = ((0 == SafeModulo(extent->height, granularity->height)) ||
(subresource_extent->height == offset_extent_sum.height));
// fall through
case VK_IMAGE_TYPE_1D:
x_ok = ((0 == SafeModulo(extent->width, granularity->width)) ||
(subresource_extent->width == offset_extent_sum.width));
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
if (!(x_ok && y_ok && z_ok)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
"extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
function, i, member, extent->width, extent->height, extent->depth, granularity->width,
granularity->height, granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height,
extent->depth, subresource_extent->width, subresource_extent->height, subresource_extent->depth);
}
}
return skip;
}
bool ValidateImageMipLevel(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img, uint32_t mip_level,
const uint32_t i, const char *function, const char *member, const std::string &vuid) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
if (mip_level >= img->createInfo.mipLevels) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"In %s, pRegions[%u].%s.mipLevel is %u, but provided image %" PRIx64 " has %u mip levels.", function, i,
member, mip_level, HandleToUint64(img->image), img->createInfo.mipLevels);
}
return skip;
}
bool ValidateImageArrayLayerRange(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img,
const uint32_t base_layer, const uint32_t layer_count, const uint32_t i, const char *function,
const char *member, const std::string &vuid) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
if (base_layer >= img->createInfo.arrayLayers || layer_count > img->createInfo.arrayLayers ||
(base_layer + layer_count) > img->createInfo.arrayLayers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), vuid,
"In %s, pRegions[%u].%s.baseArrayLayer is %u and .layerCount is "
"%u, but provided image %" PRIx64 " has %u array layers.",
function, i, member, base_layer, layer_count, HandleToUint64(img->image), img->createInfo.arrayLayers);
}
return skip;
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkBufferImageCopy structure
bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *device_data, const GLOBAL_CB_NODE *cb_node,
const IMAGE_STATE *img, const VkBufferImageCopy *region,
const uint32_t i, const char *function, const std::string &vuid) {
bool skip = false;
VkExtent3D granularity = GetScaledItg(device_data, cb_node, img);
skip |= CheckItgOffset(device_data, cb_node, ®ion->imageOffset, &granularity, i, function, "imageOffset", vuid);
VkExtent3D subresource_extent = GetImageSubresourceExtent(img, ®ion->imageSubresource);
skip |= CheckItgExtent(device_data, cb_node, ®ion->imageExtent, ®ion->imageOffset, &granularity, &subresource_extent,
img->createInfo.imageType, i, function, "imageExtent", vuid);
return skip;
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkImageCopy structure
bool ValidateCopyImageTransferGranularityRequirements(layer_data *device_data, const GLOBAL_CB_NODE *cb_node,
const IMAGE_STATE *src_img, const IMAGE_STATE *dst_img,
const VkImageCopy *region, const uint32_t i, const char *function) {
bool skip = false;
// Source image checks
VkExtent3D granularity = GetScaledItg(device_data, cb_node, src_img);
skip |= CheckItgOffset(device_data, cb_node, ®ion->srcOffset, &granularity, i, function, "srcOffset",
"VUID-vkCmdCopyImage-srcOffset-01783");
VkExtent3D subresource_extent = GetImageSubresourceExtent(src_img, ®ion->srcSubresource);
const VkExtent3D extent = region->extent;
skip |= CheckItgExtent(device_data, cb_node, &extent, ®ion->srcOffset, &granularity, &subresource_extent,
src_img->createInfo.imageType, i, function, "extent", "VUID-vkCmdCopyImage-srcOffset-01783");
// Destination image checks
granularity = GetScaledItg(device_data, cb_node, dst_img);
skip |= CheckItgOffset(device_data, cb_node, ®ion->dstOffset, &granularity, i, function, "dstOffset",
"VUID-vkCmdCopyImage-dstOffset-01784");
// Adjust dest extent, if necessary
const VkExtent3D dest_effective_extent =
GetAdjustedDestImageExtent(src_img->createInfo.format, dst_img->createInfo.format, extent);
subresource_extent = GetImageSubresourceExtent(dst_img, ®ion->dstSubresource);
skip |= CheckItgExtent(device_data, cb_node, &dest_effective_extent, ®ion->dstOffset, &granularity, &subresource_extent,
dst_img->createInfo.imageType, i, function, "extent", "VUID-vkCmdCopyImage-dstOffset-01784");
return skip;
}
// Validate contents of a VkImageCopy struct
bool ValidateImageCopyData(const layer_data *device_data, const debug_report_data *report_data, const uint32_t regionCount,
const VkImageCopy *ic_regions, const IMAGE_STATE *src_state, const IMAGE_STATE *dst_state) {
bool skip = false;
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageCopy region = ic_regions[i];
// For comp<->uncomp copies, the copy extent for the dest image must be adjusted
const VkExtent3D src_copy_extent = region.extent;
const VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_state->createInfo.format, dst_state->createInfo.format, region.extent);
bool slice_override = false;
uint32_t depth_slices = 0;
// Special case for copying between a 1D/2D array and a 3D image
// TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up.
if ((VK_IMAGE_TYPE_3D == src_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != dst_state->createInfo.imageType)) {
depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
} else if ((VK_IMAGE_TYPE_3D == dst_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != src_state->createInfo.imageType)) {
depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
}
// Do all checks on source image
//
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((0 != region.srcOffset.y) || (1 != src_copy_extent.height)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-00146",
"vkCmdCopyImage(): pRegion[%d] srcOffset.y is %d and extent.height is %d. For 1D images these must "
"be 0 and 1, respectively.",
i, region.srcOffset.y, src_copy_extent.height);
}
}
// VUID-VkImageCopy-srcImage-01785
if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.srcOffset.z) || (1 != src_copy_extent.depth))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-01785",
"vkCmdCopyImage(): pRegion[%d] srcOffset.z is %d and extent.depth is %d. For 1D images "
"these must be 0 and 1, respectively.",
i, region.srcOffset.z, src_copy_extent.depth);
}
// VUID-VkImageCopy-srcImage-01787
if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.srcOffset.z)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-01787",
"vkCmdCopyImage(): pRegion[%d] srcOffset.z is %d. For 2D images the z-offset must be 0.", i,
region.srcOffset.z);
}
if (GetDeviceExtensions(device_data)->vk_khr_maintenance1) {
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-00141",
"vkCmdCopyImage(): pRegion[%d] srcSubresource.baseArrayLayer is %d and srcSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount);
}
}
} else { // Pre maint 1
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D || dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), "VUID-VkImageCopy-srcImage-00141",
"vkCmdCopyImage(): pRegion[%d] srcSubresource.baseArrayLayer is %d and "
"srcSubresource.layerCount is %d. For copies with either source or dest of type "
"VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.",
i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount);
}
}
}
// Source checks that apply only to compressed images (or to _422 images if ycbcr enabled)
bool ext_ycbcr = GetDeviceExtensions(device_data)->vk_khr_sampler_ycbcr_conversion;
if (FormatIsCompressed(src_state->createInfo.format) ||
(ext_ycbcr && FormatIsSinglePlane_422(src_state->createInfo.format))) {
const VkExtent3D block_size = FormatCompressedTexelBlockExtent(src_state->createInfo.format);
// image offsets must be multiples of block dimensions
if ((SafeModulo(region.srcOffset.x, block_size.width) != 0) ||
(SafeModulo(region.srcOffset.y, block_size.height) != 0) ||
(SafeModulo(region.srcOffset.z, block_size.depth) != 0)) {
std::string vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01727" : "VUID-VkImageCopy-srcOffset-00157";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] srcOffset (%d, %d) must be multiples of the compressed image's "
"texel width & height (%d, %d).",
i, region.srcOffset.x, region.srcOffset.y, block_size.width, block_size.height);
}
const VkExtent3D mip_extent = GetImageSubresourceExtent(src_state, &(region.srcSubresource));
if ((SafeModulo(src_copy_extent.width, block_size.width) != 0) &&
(src_copy_extent.width + region.srcOffset.x != mip_extent.width)) {
std::string vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01728" : "VUID-VkImageCopy-extent-00158";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block "
"width (%d), or when added to srcOffset.x (%d) must equal the image subresource width (%d).",
i, src_copy_extent.width, block_size.width, region.srcOffset.x, mip_extent.width);
}
// Extent height must be a multiple of block height, or extent+offset height must equal subresource height
if ((SafeModulo(src_copy_extent.height, block_size.height) != 0) &&
(src_copy_extent.height + region.srcOffset.y != mip_extent.height)) {
std::string vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01729" : "VUID-VkImageCopy-extent-00159";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] extent height (%d) must be a multiple of the compressed texture block "
"height (%d), or when added to srcOffset.y (%d) must equal the image subresource height (%d).",
i, src_copy_extent.height, block_size.height, region.srcOffset.y, mip_extent.height);
}
// Extent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth
uint32_t copy_depth = (slice_override ? depth_slices : src_copy_extent.depth);
if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.srcOffset.z != mip_extent.depth)) {
std::string vuid = ext_ycbcr ? "VUID-VkImageCopy-srcImage-01730" : "VUID-VkImageCopy-extent-00160";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(src_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block "
"depth (%d), or when added to srcOffset.z (%d) must equal the image subresource depth (%d).",
i, src_copy_extent.depth, block_size.depth, region.srcOffset.z, mip_extent.depth);
}
} // Compressed
// Do all checks on dest image
//
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((0 != region.dstOffset.y) || (1 != dst_copy_extent.height)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-dstImage-00152",
"vkCmdCopyImage(): pRegion[%d] dstOffset.y is %d and dst_copy_extent.height is %d. For 1D images "
"these must be 0 and 1, respectively.",
i, region.dstOffset.y, dst_copy_extent.height);
}
}
// VUID-VkImageCopy-dstImage-01786
if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.dstOffset.z) || (1 != dst_copy_extent.depth))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-dstImage-01786",
"vkCmdCopyImage(): pRegion[%d] dstOffset.z is %d and extent.depth is %d. For 1D images these must be 0 "
"and 1, respectively.",
i, region.dstOffset.z, dst_copy_extent.depth);
}
// VUID-VkImageCopy-dstImage-01788
if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.dstOffset.z)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-dstImage-01788",
"vkCmdCopyImage(): pRegion[%d] dstOffset.z is %d. For 2D images the z-offset must be 0.", i,
region.dstOffset.z);
}
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-srcImage-00141",
"vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and dstSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
// VU01199 changed with mnt1
if (GetDeviceExtensions(device_data)->vk_khr_maintenance1) {
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-srcImage-00141",
"vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and dstSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
} else { // Pre maint 1
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D || dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), "VUID-VkImageCopy-srcImage-00141",
"vkCmdCopyImage(): pRegion[%d] dstSubresource.baseArrayLayer is %d and "
"dstSubresource.layerCount is %d. For copies with either source or dest of type "
"VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.",
i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
}
// Dest checks that apply only to compressed images (or to _422 images if ycbcr enabled)
if (FormatIsCompressed(dst_state->createInfo.format) ||
(ext_ycbcr && FormatIsSinglePlane_422(dst_state->createInfo.format))) {
const VkExtent3D block_size = FormatCompressedTexelBlockExtent(dst_state->createInfo.format);
// image offsets must be multiples of block dimensions
if ((SafeModulo(region.dstOffset.x, block_size.width) != 0) ||
(SafeModulo(region.dstOffset.y, block_size.height) != 0) ||
(SafeModulo(region.dstOffset.z, block_size.depth) != 0)) {
std::string vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01731" : "VUID-VkImageCopy-dstOffset-00162";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] dstOffset (%d, %d) must be multiples of the compressed image's "
"texel width & height (%d, %d).",
i, region.dstOffset.x, region.dstOffset.y, block_size.width, block_size.height);
}
const VkExtent3D mip_extent = GetImageSubresourceExtent(dst_state, &(region.dstSubresource));
if ((SafeModulo(dst_copy_extent.width, block_size.width) != 0) &&
(dst_copy_extent.width + region.dstOffset.x != mip_extent.width)) {
std::string vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01732" : "VUID-VkImageCopy-extent-00163";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture "
"block width (%d), or when added to dstOffset.x (%d) must equal the image subresource width (%d).",
i, dst_copy_extent.width, block_size.width, region.dstOffset.x, mip_extent.width);
}
// Extent height must be a multiple of block height, or dst_copy_extent+offset height must equal subresource height
if ((SafeModulo(dst_copy_extent.height, block_size.height) != 0) &&
(dst_copy_extent.height + region.dstOffset.y != mip_extent.height)) {
std::string vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01733" : "VUID-VkImageCopy-extent-00164";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] dst_copy_extent height (%d) must be a multiple of the compressed "
"texture block height (%d), or when added to dstOffset.y (%d) must equal the image subresource "
"height (%d).",
i, dst_copy_extent.height, block_size.height, region.dstOffset.y, mip_extent.height);
}
// Extent depth must be a multiple of block depth, or dst_copy_extent+offset depth must equal subresource depth
uint32_t copy_depth = (slice_override ? depth_slices : dst_copy_extent.depth);
if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.dstOffset.z != mip_extent.depth)) {
std::string vuid = ext_ycbcr ? "VUID-VkImageCopy-dstImage-01734" : "VUID-VkImageCopy-extent-00165";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(dst_state->image), vuid,
"vkCmdCopyImage(): pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture "
"block depth (%d), or when added to dstOffset.z (%d) must equal the image subresource depth (%d).",
i, dst_copy_extent.depth, block_size.depth, region.dstOffset.z, mip_extent.depth);
}
} // Compressed
}
return skip;
}
// vkCmdCopyImage checks that only apply if the multiplane extension is enabled
bool CopyImageMultiplaneValidation(const layer_data *dev_data, VkCommandBuffer command_buffer, const IMAGE_STATE *src_image_state,
const IMAGE_STATE *dst_image_state, const VkImageCopy region) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(dev_data);
// Neither image is multiplane
if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format))) {
// If neither image is multi-plane the aspectMask member of src and dst must match
if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) {
std::stringstream ss;
ss << "vkCmdCopyImage: Copy between non-multiplane images with differing aspectMasks ( 0x" << std::hex
<< region.srcSubresource.aspectMask << " and 0x" << region.dstSubresource.aspectMask << " )";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01551", "%s.", ss.str().c_str());
}
} else {
// Source image multiplane checks
uint32_t planes = FormatPlaneCount(src_image_state->createInfo.format);
VkImageAspectFlags aspect = region.srcSubresource.aspectMask;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage: Source image aspect mask (0x" << std::hex << aspect << ") is invalid for 2-plane format";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01552", "%s.", ss.str().c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage: Source image aspect mask (0x" << std::hex << aspect << ") is invalid for 3-plane format";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01553", "%s.", ss.str().c_str());
}
// Single-plane to multi-plane
if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (FormatIsMultiplane(dst_image_state->createInfo.format)) &&
(VK_IMAGE_ASPECT_COLOR_BIT != aspect)) {
std::stringstream ss;
ss << "vkCmdCopyImage: Source image aspect mask (0x" << std::hex << aspect << ") is not VK_IMAGE_ASPECT_COLOR_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstImage-01557", "%s.", ss.str().c_str());
}
// Dest image multiplane checks
planes = FormatPlaneCount(dst_image_state->createInfo.format);
aspect = region.dstSubresource.aspectMask;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage: Dest image aspect mask (0x" << std::hex << aspect << ") is invalid for 2-plane format";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstImage-01554", "%s.", ss.str().c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT_KHR) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT_KHR) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT_KHR)) {
std::stringstream ss;
ss << "vkCmdCopyImage: Dest image aspect mask (0x" << std::hex << aspect << ") is invalid for 3-plane format";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstImage-01555", "%s.", ss.str().c_str());
}
// Multi-plane to single-plane
if ((FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format)) &&
(VK_IMAGE_ASPECT_COLOR_BIT != aspect)) {
std::stringstream ss;
ss << "vkCmdCopyImage: Dest image aspect mask (0x" << std::hex << aspect << ") is not VK_IMAGE_ASPECT_COLOR_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcImage-01556", "%s.", ss.str().c_str());
}
}
return skip;
}
bool PreCallValidateCmdCopyImage(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *src_image_state,
IMAGE_STATE *dst_image_state, uint32_t region_count, const VkImageCopy *regions,
VkImageLayout src_image_layout, VkImageLayout dst_image_layout) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(device_data);
skip = ValidateImageCopyData(device_data, report_data, region_count, regions, src_image_state, dst_image_state);
VkCommandBuffer command_buffer = cb_node->commandBuffer;
for (uint32_t i = 0; i < region_count; i++) {
const VkImageCopy region = regions[i];
// For comp/uncomp copies, the copy extent for the dest image must be adjusted
VkExtent3D src_copy_extent = region.extent;
VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_image_state->createInfo.format, dst_image_state->createInfo.format, region.extent);
bool slice_override = false;
uint32_t depth_slices = 0;
// Special case for copying between a 1D/2D array and a 3D image
// TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up.
if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D != dst_image_state->createInfo.imageType)) {
depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
} else if ((VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D != src_image_state->createInfo.imageType)) {
depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
}
skip |= ValidateImageSubresourceLayers(device_data, cb_node, ®ion.srcSubresource, "vkCmdCopyImage", "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(device_data, cb_node, ®ion.dstSubresource, "vkCmdCopyImage", "dstSubresource", i);
skip |= ValidateImageMipLevel(device_data, cb_node, src_image_state, region.srcSubresource.mipLevel, i, "vkCmdCopyImage",
"srcSubresource", "VUID-vkCmdCopyImage-srcSubresource-01696");
skip |= ValidateImageMipLevel(device_data, cb_node, dst_image_state, region.dstSubresource.mipLevel, i, "vkCmdCopyImage",
"dstSubresource", "VUID-vkCmdCopyImage-dstSubresource-01697");
skip |= ValidateImageArrayLayerRange(device_data, cb_node, src_image_state, region.srcSubresource.baseArrayLayer,
region.srcSubresource.layerCount, i, "vkCmdCopyImage", "srcSubresource",
"VUID-vkCmdCopyImage-srcSubresource-01698");
skip |= ValidateImageArrayLayerRange(device_data, cb_node, dst_image_state, region.dstSubresource.baseArrayLayer,
region.dstSubresource.layerCount, i, "vkCmdCopyImage", "dstSubresource",
"VUID-vkCmdCopyImage-dstSubresource-01699");
if (GetDeviceExtensions(device_data)->vk_khr_maintenance1) {
// No chance of mismatch if we're overriding depth slice count
if (!slice_override) {
// The number of depth slices in srcSubresource and dstSubresource must match
// Depth comes from layerCount for 1D,2D resources, from extent.depth for 3D
uint32_t src_slices =
(VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType ? src_copy_extent.depth
: region.srcSubresource.layerCount);
uint32_t dst_slices =
(VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType ? dst_copy_extent.depth
: region.dstSubresource.layerCount);
if (src_slices != dst_slices) {
std::stringstream ss;
ss << "vkCmdCopyImage: number of depth slices in source and destination subresources for pRegions[" << i
<< "] do not match";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-extent-00140", "%s.", ss.str().c_str());
}
}
} else {
// For each region the layerCount member of srcSubresource and dstSubresource must match
if (region.srcSubresource.layerCount != region.dstSubresource.layerCount) {
std::stringstream ss;
ss << "vkCmdCopyImage: number of layers in source and destination subresources for pRegions[" << i
<< "] do not match";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-extent-00140", "%s.", ss.str().c_str());
}
}
// Do multiplane-specific checks, if extension enabled
if (GetDeviceExtensions(device_data)->vk_khr_sampler_ycbcr_conversion) {
skip |= CopyImageMultiplaneValidation(device_data, command_buffer, src_image_state, dst_image_state, region);
}
if (!GetDeviceExtensions(device_data)->vk_khr_sampler_ycbcr_conversion) {
// not multi-plane, the aspectMask member of srcSubresource and dstSubresource must match
if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) {
char const str[] = "vkCmdCopyImage: Src and dest aspectMasks for each region must match";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-aspectMask-00137", "%s.", str);
}
}
// For each region, the aspectMask member of srcSubresource must be present in the source image
if (!VerifyAspectsPresent(region.srcSubresource.aspectMask, src_image_state->createInfo.format)) {
std::stringstream ss;
ss << "vkCmdCopyImage: pRegion[" << i
<< "] srcSubresource.aspectMask cannot specify aspects not present in source image";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-aspectMask-00142", "%s.", ss.str().c_str());
}
// For each region, the aspectMask member of dstSubresource must be present in the destination image
if (!VerifyAspectsPresent(region.dstSubresource.aspectMask, dst_image_state->createInfo.format)) {
std::stringstream ss;
ss << "vkCmdCopyImage: pRegion[" << i << "] dstSubresource.aspectMask cannot specify aspects not present in dest image";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-aspectMask-00143", "%s.", ss.str().c_str());
}
// Check region extents for 1D-1D, 2D-2D, and 3D-3D copies
if (src_image_state->createInfo.imageType == dst_image_state->createInfo.imageType) {
// The source region specified by a given element of regions must be a region that is contained within srcImage
VkExtent3D img_extent = GetImageSubresourceExtent(src_image_state, &(region.srcSubresource));
if (0 != ExceedsBounds(®ion.srcOffset, &src_copy_extent, &img_extent)) {
std::stringstream ss;
ss << "vkCmdCopyImage: Source pRegion[" << i << "] with mipLevel [ " << region.srcSubresource.mipLevel
<< " ], offset [ " << region.srcOffset.x << ", " << region.srcOffset.y << ", " << region.srcOffset.z
<< " ], extent [ " << src_copy_extent.width << ", " << src_copy_extent.height << ", " << src_copy_extent.depth
<< " ] exceeds the source image dimensions";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-pRegions-00122", "%s.", ss.str().c_str());
}
// The destination region specified by a given element of regions must be a region that is contained within dst_image
img_extent = GetImageSubresourceExtent(dst_image_state, &(region.dstSubresource));
if (0 != ExceedsBounds(®ion.dstOffset, &dst_copy_extent, &img_extent)) {
std::stringstream ss;
ss << "vkCmdCopyImage: Dest pRegion[" << i << "] with mipLevel [ " << region.dstSubresource.mipLevel
<< " ], offset [ " << region.dstOffset.x << ", " << region.dstOffset.y << ", " << region.dstOffset.z
<< " ], extent [ " << dst_copy_extent.width << ", " << dst_copy_extent.height << ", " << dst_copy_extent.depth
<< " ] exceeds the destination image dimensions";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-pRegions-00123", "%s.", ss.str().c_str());
}
}
// Each dimension offset + extent limits must fall with image subresource extent
VkExtent3D subresource_extent = GetImageSubresourceExtent(src_image_state, &(region.srcSubresource));
if (slice_override) src_copy_extent.depth = depth_slices;
uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &src_copy_extent, &subresource_extent);
if (extent_check & x_bit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcOffset-00144",
"vkCmdCopyImage: Source image pRegion %1d x-dimension offset [%1d] + extent [%1d] exceeds subResource "
"width [%1d].",
i, region.srcOffset.x, src_copy_extent.width, subresource_extent.width);
}
if (extent_check & y_bit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcOffset-00145",
"vkCmdCopyImage: Source image pRegion %1d y-dimension offset [%1d] + extent [%1d] exceeds subResource "
"height [%1d].",
i, region.srcOffset.y, src_copy_extent.height, subresource_extent.height);
}
if (extent_check & z_bit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-srcOffset-00147",
"vkCmdCopyImage: Source image pRegion %1d z-dimension offset [%1d] + extent [%1d] exceeds subResource "
"depth [%1d].",
i, region.srcOffset.z, src_copy_extent.depth, subresource_extent.depth);
}
// Adjust dest extent if necessary
subresource_extent = GetImageSubresourceExtent(dst_image_state, &(region.dstSubresource));
if (slice_override) dst_copy_extent.depth = depth_slices;
extent_check = ExceedsBounds(&(region.dstOffset), &dst_copy_extent, &subresource_extent);
if (extent_check & x_bit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstOffset-00150",
"vkCmdCopyImage: Dest image pRegion %1d x-dimension offset [%1d] + extent [%1d] exceeds subResource "
"width [%1d].",
i, region.dstOffset.x, dst_copy_extent.width, subresource_extent.width);
}
if (extent_check & y_bit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstOffset-00151",
"vkCmdCopyImage: Dest image pRegion %1d y-dimension offset [%1d] + extent [%1d] exceeds subResource "
"height [%1d].",
i, region.dstOffset.y, dst_copy_extent.height, subresource_extent.height);
}
if (extent_check & z_bit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-VkImageCopy-dstOffset-00153",
"vkCmdCopyImage: Dest image pRegion %1d z-dimension offset [%1d] + extent [%1d] exceeds subResource "
"depth [%1d].",
i, region.dstOffset.z, dst_copy_extent.depth, subresource_extent.depth);
}
// The union of all source regions, and the union of all destination regions, specified by the elements of regions,
// must not overlap in memory
if (src_image_state->image == dst_image_state->image) {
for (uint32_t j = 0; j < region_count; j++) {
if (RegionIntersects(®ion, ®ions[j], src_image_state->createInfo.imageType,
FormatIsMultiplane(src_image_state->createInfo.format))) {
std::stringstream ss;
ss << "vkCmdCopyImage: pRegions[" << i << "] src overlaps with pRegions[" << j << "].";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-pRegions-00124", "%s.", ss.str().c_str());
}
}
}
}
// The formats of src_image and dst_image must be compatible. Formats are considered compatible if their texel size in bytes
// is the same between both formats. For example, VK_FORMAT_R8G8B8A8_UNORM is compatible with VK_FORMAT_R32_UINT because
// because both texels are 4 bytes in size. Depth/stencil formats must match exactly.
if (FormatIsDepthOrStencil(src_image_state->createInfo.format) || FormatIsDepthOrStencil(dst_image_state->createInfo.format)) {
if (src_image_state->createInfo.format != dst_image_state->createInfo.format) {
char const str[] = "vkCmdCopyImage called with unmatched source and dest image depth/stencil formats.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), kVUID_Core_DrawState_MismatchedImageFormat, str);
}
} else {
if (!FormatSizesAreEqual(src_image_state->createInfo.format, dst_image_state->createInfo.format, region_count, regions)) {
char const str[] = "vkCmdCopyImage called with unmatched source and dest image format sizes.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-srcImage-00135", "%s.", str);
}
}
// Source and dest image sample counts must match
if (src_image_state->createInfo.samples != dst_image_state->createInfo.samples) {
char const str[] = "vkCmdCopyImage() called on image pair with non-identical sample counts.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(command_buffer), "VUID-vkCmdCopyImage-srcImage-00136", "%s", str);
}
skip |= ValidateMemoryIsBoundToImage(device_data, src_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-srcImage-00127");
skip |= ValidateMemoryIsBoundToImage(device_data, dst_image_state, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-dstImage-00132");
// Validate that SRC & DST images have correct usage flags set
skip |= ValidateImageUsageFlags(device_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
"VUID-vkCmdCopyImage-srcImage-00126", "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateImageUsageFlags(device_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyImage-dstImage-00131", "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
if (GetApiVersion(device_data) >= VK_API_VERSION_1_1 || GetDeviceExtensions(device_data)->vk_khr_maintenance1) {
skip |=
ValidateImageFormatFeatureFlags(device_data, src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, "vkCmdCopyImage()",
"VUID-vkCmdCopyImage-srcImage-01995", "VUID-vkCmdCopyImage-srcImage-01995");
skip |=
ValidateImageFormatFeatureFlags(device_data, dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdCopyImage()",
"VUID-vkCmdCopyImage-dstImage-01996", "VUID-vkCmdCopyImage-dstImage-01996");
}
skip |= ValidateCmdQueueFlags(device_data, cb_node, "vkCmdCopyImage()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyImage-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
skip |= InsideRenderPass(device_data, cb_node, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-renderpass");
bool hit_error = false;
const std::string invalid_src_layout_vuid =
(src_image_state->shared_presentable && core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImage-srcImageLayout-01917"
: "VUID-vkCmdCopyImage-srcImageLayout-00129";
const std::string invalid_dst_layout_vuid =
(dst_image_state->shared_presentable && core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImage-dstImageLayout-01395"
: "VUID-vkCmdCopyImage-dstImageLayout-00134";
for (uint32_t i = 0; i < region_count; ++i) {
skip |= VerifyImageLayout(device_data, cb_node, src_image_state, regions[i].srcSubresource, src_image_layout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdCopyImage()", invalid_src_layout_vuid,
"VUID-vkCmdCopyImage-srcImageLayout-00128", &hit_error);
skip |= VerifyImageLayout(device_data, cb_node, dst_image_state, regions[i].dstSubresource, dst_image_layout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdCopyImage()", invalid_dst_layout_vuid,
"VUID-vkCmdCopyImage-dstImageLayout-00133", &hit_error);
skip |= ValidateCopyImageTransferGranularityRequirements(device_data, cb_node, src_image_state, dst_image_state,
®ions[i], i, "vkCmdCopyImage()");
}
return skip;
}
void PreCallRecordCmdCopyImage(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *src_image_state,
IMAGE_STATE *dst_image_state, uint32_t region_count, const VkImageCopy *regions,
VkImageLayout src_image_layout, VkImageLayout dst_image_layout) {
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < region_count; ++i) {
SetImageLayout(device_data, cb_node, src_image_state, regions[i].srcSubresource, src_image_layout);
SetImageLayout(device_data, cb_node, dst_image_state, regions[i].dstSubresource, dst_image_layout);
}
// Update bindings between images and cmd buffer
AddCommandBufferBindingImage(device_data, cb_node, src_image_state);
AddCommandBufferBindingImage(device_data, cb_node, dst_image_state);
}
// Returns true if sub_rect is entirely contained within rect
static inline bool ContainsRect(VkRect2D rect, VkRect2D sub_rect) {
if ((sub_rect.offset.x < rect.offset.x) || (sub_rect.offset.x + sub_rect.extent.width > rect.offset.x + rect.extent.width) ||
(sub_rect.offset.y < rect.offset.y) || (sub_rect.offset.y + sub_rect.extent.height > rect.offset.y + rect.extent.height))
return false;
return true;
}
bool PreCallValidateCmdClearAttachments(layer_data *device_data, VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount, const VkClearRect *pRects) {
GLOBAL_CB_NODE *cb_node = GetCBNode(device_data, commandBuffer);
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
if (cb_node) {
skip |= ValidateCmdQueueFlags(device_data, cb_node, "vkCmdClearAttachments()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdClearAttachments-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_node, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
// Warn if this is issued prior to Draw Cmd and clearing the entire attachment
if (!cb_node->hasDrawCmd && (cb_node->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
(cb_node->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
// There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
// This warning should be made more specific. It'd be best to avoid triggering this test if it's a use that must call
// CmdClearAttachments.
skip |= log_msg(
report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), kVUID_Core_DrawState_ClearCmdBeforeDraw,
"vkCmdClearAttachments() issued on command buffer object 0x%" PRIx64
" prior to any Draw Cmds. It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
HandleToUint64(commandBuffer));
}
skip |= OutsideRenderPass(device_data, cb_node, "vkCmdClearAttachments()", "VUID-vkCmdClearAttachments-renderpass");
}
// Validate that attachment is in reference list of active subpass
if (cb_node->activeRenderPass) {
const VkRenderPassCreateInfo2KHR *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2KHR *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
auto framebuffer = GetFramebufferState(device_data, cb_node->activeFramebuffer);
for (uint32_t i = 0; i < attachmentCount; i++) {
auto clear_desc = &pAttachments[i];
VkImageView image_view = VK_NULL_HANDLE;
if (0 == clear_desc->aspectMask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-requiredbitmask", " ");
} else if (clear_desc->aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-00020", " ");
} else if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
if (clear_desc->colorAttachment >= subpass_desc->colorAttachmentCount) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdClearAttachments-aspectMask-00015",
"vkCmdClearAttachments() color attachment index %d out of range for active subpass %d.",
clear_desc->colorAttachment, cb_node->activeSubpass);
} else if (subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(commandBuffer),
kVUID_Core_DrawState_MissingAttachmentReference,
"vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored.",
clear_desc->colorAttachment);
} else {
image_view = framebuffer->createInfo
.pAttachments[subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment];
}
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
(clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
char const str[] =
"vkCmdClearAttachments() aspectMask [%d] must set only VK_IMAGE_ASPECT_COLOR_BIT of a color attachment.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-00019", str, i);
}
} else { // Must be depth and/or stencil
if (((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) &&
((clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT)) {
char const str[] = "vkCmdClearAttachments() aspectMask [%d] is not a valid combination of bits.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-VkClearAttachment-aspectMask-parameter", str, i);
}
if (!subpass_desc->pDepthStencilAttachment ||
(subpass_desc->pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), kVUID_Core_DrawState_MissingAttachmentReference,
"vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
} else {
image_view = framebuffer->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment];
}
}
if (image_view) {
auto image_view_state = GetImageViewState(device_data, image_view);
for (uint32_t j = 0; j < rectCount; j++) {
// The rectangular region specified by a given element of pRects must be contained within the render area of
// the current render pass instance
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
if (false == ContainsRect(cb_node->activeRenderPassBeginInfo.renderArea, pRects[j].rect)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdClearAttachments-pRects-00016",
"vkCmdClearAttachments(): The area defined by pRects[%d] is not contained in the area of "
"the current render pass instance.",
j);
}
} else {
const auto local_rect =
pRects[j].rect; // local copy of rect captured by value below to preserve original contents
cb_node->cmd_execute_commands_functions.emplace_back([=](GLOBAL_CB_NODE *prim_cb, VkFramebuffer fb) {
if (false == ContainsRect(prim_cb->activeRenderPassBeginInfo.renderArea, local_rect)) {
return log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdClearAttachments-pRects-00016",
"vkCmdClearAttachments(): The area defined by pRects[%d] is not contained in the area of "
"the current render pass instance.",
j);
}
return false;
});
}
// The layers specified by a given element of pRects must be contained within every attachment that
// pAttachments refers to
auto attachment_layer_count = image_view_state->create_info.subresourceRange.layerCount;
if ((pRects[j].baseArrayLayer >= attachment_layer_count) ||
(pRects[j].baseArrayLayer + pRects[j].layerCount > attachment_layer_count)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(commandBuffer), "VUID-vkCmdClearAttachments-pRects-00017",
"vkCmdClearAttachments(): The layers defined in pRects[%d] are not contained in the layers "
"of pAttachment[%d].",
j, i);
}
}
}
}
}
return skip;
}
bool PreCallValidateCmdResolveImage(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *src_image_state,
VkImageLayout src_image_layout, IMAGE_STATE *dst_image_state, VkImageLayout dst_image_layout,
uint32_t regionCount, const VkImageResolve *pRegions) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
if (cb_node && src_image_state && dst_image_state) {
skip |= ValidateMemoryIsBoundToImage(device_data, src_image_state, "vkCmdResolveImage()",
"VUID-vkCmdResolveImage-srcImage-00256");
skip |= ValidateMemoryIsBoundToImage(device_data, dst_image_state, "vkCmdResolveImage()",
"VUID-vkCmdResolveImage-dstImage-00258");
skip |= ValidateCmdQueueFlags(device_data, cb_node, "vkCmdResolveImage()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdResolveImage-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
skip |= InsideRenderPass(device_data, cb_node, "vkCmdResolveImage()", "VUID-vkCmdResolveImage-renderpass");
skip |= ValidateImageFormatFeatureFlags(device_data, dst_image_state, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT,
"vkCmdResolveImage()", "VUID-vkCmdResolveImage-dstImage-02003",
"VUID-vkCmdResolveImage-dstImage-02003");
bool hit_error = false;
const std::string invalid_src_layout_vuid =
(src_image_state->shared_presentable &&
core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdResolveImage-srcImageLayout-01400"
: "VUID-vkCmdResolveImage-srcImageLayout-00261";
const std::string invalid_dst_layout_vuid =
(dst_image_state->shared_presentable &&
core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdResolveImage-dstImageLayout-01401"
: "VUID-vkCmdResolveImage-dstImageLayout-00263";
// For each region, the number of layers in the image subresource should not be zero
// For each region, src and dest image aspect must be color only
for (uint32_t i = 0; i < regionCount; i++) {
skip |= ValidateImageSubresourceLayers(device_data, cb_node, &pRegions[i].srcSubresource, "vkCmdResolveImage()",
"srcSubresource", i);
skip |= ValidateImageSubresourceLayers(device_data, cb_node, &pRegions[i].dstSubresource, "vkCmdResolveImage()",
"dstSubresource", i);
skip |= VerifyImageLayout(device_data, cb_node, src_image_state, pRegions[i].srcSubresource, src_image_layout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdResolveImage()", invalid_src_layout_vuid,
"VUID-vkCmdResolveImage-srcImageLayout-00260", &hit_error);
skip |= VerifyImageLayout(device_data, cb_node, dst_image_state, pRegions[i].dstSubresource, dst_image_layout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdResolveImage()", invalid_dst_layout_vuid,
"VUID-vkCmdResolveImage-dstImageLayout-00262", &hit_error);
skip |= ValidateImageMipLevel(device_data, cb_node, src_image_state, pRegions[i].srcSubresource.mipLevel, i,
"vkCmdResolveImage()", "srcSubresource", "VUID-vkCmdResolveImage-srcSubresource-01709");
skip |= ValidateImageMipLevel(device_data, cb_node, dst_image_state, pRegions[i].dstSubresource.mipLevel, i,
"vkCmdResolveImage()", "dstSubresource", "VUID-vkCmdResolveImage-dstSubresource-01710");
skip |= ValidateImageArrayLayerRange(device_data, cb_node, src_image_state, pRegions[i].srcSubresource.baseArrayLayer,
pRegions[i].srcSubresource.layerCount, i, "vkCmdResolveImage()", "srcSubresource",
"VUID-vkCmdResolveImage-srcSubresource-01711");
skip |= ValidateImageArrayLayerRange(device_data, cb_node, dst_image_state, pRegions[i].dstSubresource.baseArrayLayer,
pRegions[i].dstSubresource.layerCount, i, "vkCmdResolveImage()", "srcSubresource",
"VUID-vkCmdResolveImage-dstSubresource-01712");
// layer counts must match
if (pRegions[i].srcSubresource.layerCount != pRegions[i].dstSubresource.layerCount) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageResolve-layerCount-00267",
"vkCmdResolveImage: layerCount in source and destination subresource of pRegions[%d] does not match.", i);
}
// For each region, src and dest image aspect must be color only
if ((pRegions[i].srcSubresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) ||
(pRegions[i].dstSubresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT)) {
char const str[] =
"vkCmdResolveImage: src and dest aspectMasks for each region must specify only VK_IMAGE_ASPECT_COLOR_BIT";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageResolve-aspectMask-00266", "%s.", str);
}
}
if (src_image_state->createInfo.format != dst_image_state->createInfo.format) {
char const str[] = "vkCmdResolveImage called with unmatched source and dest formats.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_MismatchedImageFormat, str);
}
if (src_image_state->createInfo.imageType != dst_image_state->createInfo.imageType) {
char const str[] = "vkCmdResolveImage called with unmatched source and dest image types.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_MismatchedImageType, str);
}
if (src_image_state->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) {
char const str[] = "vkCmdResolveImage called with source sample count less than 2.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdResolveImage-srcImage-00257", "%s.", str);
}
if (dst_image_state->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) {
char const str[] = "vkCmdResolveImage called with dest sample count greater than 1.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdResolveImage-dstImage-00259", "%s.", str);
}
} else {
assert(0);
}
return skip;
}
void PreCallRecordCmdResolveImage(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *src_image_state,
IMAGE_STATE *dst_image_state) {
// Update bindings between images and cmd buffer
AddCommandBufferBindingImage(device_data, cb_node, src_image_state);
AddCommandBufferBindingImage(device_data, cb_node, dst_image_state);
}
bool PreCallValidateCmdBlitImage(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *src_image_state,
IMAGE_STATE *dst_image_state, uint32_t region_count, const VkImageBlit *regions,
VkImageLayout src_image_layout, VkImageLayout dst_image_layout, VkFilter filter) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
if (cb_node) {
skip |= ValidateCmd(device_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
}
if (cb_node && src_image_state && dst_image_state) {
skip |= ValidateImageSampleCount(device_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage",
"VUID-vkCmdBlitImage-srcImage-00233");
skip |= ValidateImageSampleCount(device_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage",
"VUID-vkCmdBlitImage-dstImage-00234");
skip |=
ValidateMemoryIsBoundToImage(device_data, src_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-srcImage-00220");
skip |=
ValidateMemoryIsBoundToImage(device_data, dst_image_state, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-dstImage-00225");
skip |=
ValidateImageUsageFlags(device_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
"VUID-vkCmdBlitImage-srcImage-00219", "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
skip |=
ValidateImageUsageFlags(device_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdBlitImage-dstImage-00224", "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmdQueueFlags(device_data, cb_node, "vkCmdBlitImage()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBlitImage-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
skip |= InsideRenderPass(device_data, cb_node, "vkCmdBlitImage()", "VUID-vkCmdBlitImage-renderpass");
skip |= ValidateImageFormatFeatureFlags(device_data, src_image_state, VK_FORMAT_FEATURE_BLIT_SRC_BIT, "vkCmdBlitImage()",
"VUID-vkCmdBlitImage-srcImage-01999", "VUID-vkCmdBlitImage-srcImage-01999");
skip |= ValidateImageFormatFeatureFlags(device_data, dst_image_state, VK_FORMAT_FEATURE_BLIT_DST_BIT, "vkCmdBlitImage()",
"VUID-vkCmdBlitImage-dstImage-02000", "VUID-vkCmdBlitImage-dstImage-02000");
// TODO: Need to validate image layouts, which will include layout validation for shared presentable images
VkFormat src_format = src_image_state->createInfo.format;
VkFormat dst_format = dst_image_state->createInfo.format;
VkImageType src_type = src_image_state->createInfo.imageType;
VkImageType dst_type = dst_image_state->createInfo.imageType;
if (VK_FILTER_LINEAR == filter) {
skip |= ValidateImageFormatFeatureFlags(device_data, src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT,
"vkCmdBlitImage()", "VUID-vkCmdBlitImage-filter-02001",
"VUID-vkCmdBlitImage-filter-02001");
} else if (VK_FILTER_CUBIC_IMG == filter) {
skip |= ValidateImageFormatFeatureFlags(device_data, src_image_state,
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, "vkCmdBlitImage()",
"VUID-vkCmdBlitImage-filter-02002", "VUID-vkCmdBlitImage-filter-02002");
}
if ((VK_FILTER_CUBIC_IMG == filter) && (VK_IMAGE_TYPE_3D != src_type)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-filter-00237",
"vkCmdBlitImage: source image type must be VK_IMAGE_TYPE_3D when cubic filtering is specified.");
}
if ((VK_SAMPLE_COUNT_1_BIT != src_image_state->createInfo.samples) ||
(VK_SAMPLE_COUNT_1_BIT != dst_image_state->createInfo.samples)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00228",
"vkCmdBlitImage: source or dest image has sample count other than VK_SAMPLE_COUNT_1_BIT.");
}
// Validate consistency for unsigned formats
if (FormatIsUInt(src_format) != FormatIsUInt(dst_format)) {
std::stringstream ss;
ss << "vkCmdBlitImage: If one of srcImage and dstImage images has unsigned integer format, "
<< "the other one must also have unsigned integer format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00230", "%s.", ss.str().c_str());
}
// Validate consistency for signed formats
if (FormatIsSInt(src_format) != FormatIsSInt(dst_format)) {
std::stringstream ss;
ss << "vkCmdBlitImage: If one of srcImage and dstImage images has signed integer format, "
<< "the other one must also have signed integer format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format);
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00229", "%s.", ss.str().c_str());
}
// Validate filter for Depth/Stencil formats
if (FormatIsDepthOrStencil(src_format) && (filter != VK_FILTER_NEAREST)) {
std::stringstream ss;
ss << "vkCmdBlitImage: If the format of srcImage is a depth, stencil, or depth stencil "
<< "then filter must be VK_FILTER_NEAREST.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00232", "%s.", ss.str().c_str());
}
// Validate aspect bits and formats for depth/stencil images
if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) {
if (src_format != dst_format) {
std::stringstream ss;
ss << "vkCmdBlitImage: If one of srcImage and dstImage images has a format of depth, stencil or depth "
<< "stencil, the other one must have exactly the same format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is "
<< string_VkFormat(dst_format);
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-srcImage-00231", "%s.", ss.str().c_str());
}
} // Depth or Stencil
// Do per-region checks
const std::string invalid_src_layout_vuid =
(src_image_state->shared_presentable &&
core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdBlitImage-srcImageLayout-01398"
: "VUID-vkCmdBlitImage-srcImageLayout-00222";
const std::string invalid_dst_layout_vuid =
(dst_image_state->shared_presentable &&
core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdBlitImage-dstImageLayout-01399"
: "VUID-vkCmdBlitImage-dstImageLayout-00227";
for (uint32_t i = 0; i < region_count; i++) {
const VkImageBlit rgn = regions[i];
bool hit_error = false;
skip |= VerifyImageLayout(device_data, cb_node, src_image_state, rgn.srcSubresource, src_image_layout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdBlitImage()", invalid_src_layout_vuid,
"VUID-vkCmdBlitImage-srcImageLayout-00221", &hit_error);
skip |= VerifyImageLayout(device_data, cb_node, dst_image_state, rgn.dstSubresource, dst_image_layout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdBlitImage()", invalid_dst_layout_vuid,
"VUID-vkCmdBlitImage-dstImageLayout-00226", &hit_error);
skip |=
ValidateImageSubresourceLayers(device_data, cb_node, &rgn.srcSubresource, "vkCmdBlitImage()", "srcSubresource", i);
skip |=
ValidateImageSubresourceLayers(device_data, cb_node, &rgn.dstSubresource, "vkCmdBlitImage()", "dstSubresource", i);
skip |= ValidateImageMipLevel(device_data, cb_node, src_image_state, rgn.srcSubresource.mipLevel, i, "vkCmdBlitImage()",
"srcSubresource", "VUID-vkCmdBlitImage-srcSubresource-01705");
skip |= ValidateImageMipLevel(device_data, cb_node, dst_image_state, rgn.dstSubresource.mipLevel, i, "vkCmdBlitImage()",
"dstSubresource", "VUID-vkCmdBlitImage-dstSubresource-01706");
skip |= ValidateImageArrayLayerRange(device_data, cb_node, src_image_state, rgn.srcSubresource.baseArrayLayer,
rgn.srcSubresource.layerCount, i, "vkCmdBlitImage()", "srcSubresource",
"VUID-vkCmdBlitImage-srcSubresource-01707");
skip |= ValidateImageArrayLayerRange(device_data, cb_node, dst_image_state, rgn.dstSubresource.baseArrayLayer,
rgn.dstSubresource.layerCount, i, "vkCmdBlitImage()", "dstSubresource",
"VUID-vkCmdBlitImage-dstSubresource-01708");
// Warn for zero-sized regions
if ((rgn.srcOffsets[0].x == rgn.srcOffsets[1].x) || (rgn.srcOffsets[0].y == rgn.srcOffsets[1].y) ||
(rgn.srcOffsets[0].z == rgn.srcOffsets[1].z)) {
std::stringstream ss;
ss << "vkCmdBlitImage: pRegions[" << i << "].srcOffsets specify a zero-volume area.";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str());
}
if ((rgn.dstOffsets[0].x == rgn.dstOffsets[1].x) || (rgn.dstOffsets[0].y == rgn.dstOffsets[1].y) ||
(rgn.dstOffsets[0].z == rgn.dstOffsets[1].z)) {
std::stringstream ss;
ss << "vkCmdBlitImage: pRegions[" << i << "].dstOffsets specify a zero-volume area.";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str());
}
// Check that src/dst layercounts match
if (rgn.srcSubresource.layerCount != rgn.dstSubresource.layerCount) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-layerCount-00239",
"vkCmdBlitImage: layerCount in source and destination subresource of pRegions[%d] does not match.", i);
}
if (rgn.srcSubresource.aspectMask != rgn.dstSubresource.aspectMask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-aspectMask-00238",
"vkCmdBlitImage: aspectMask members for pRegion[%d] do not match.", i);
}
if (!VerifyAspectsPresent(rgn.srcSubresource.aspectMask, src_format)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-aspectMask-00241",
"vkCmdBlitImage: region [%d] source aspectMask (0x%x) specifies aspects not present in source "
"image format %s.",
i, rgn.srcSubresource.aspectMask, string_VkFormat(src_format));
}
if (!VerifyAspectsPresent(rgn.dstSubresource.aspectMask, dst_format)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-aspectMask-00242",
"vkCmdBlitImage: region [%d] dest aspectMask (0x%x) specifies aspects not present in dest image format %s.", i,
rgn.dstSubresource.aspectMask, string_VkFormat(dst_format));
}
// Validate source image offsets
VkExtent3D src_extent = GetImageSubresourceExtent(src_image_state, &(rgn.srcSubresource));
if (VK_IMAGE_TYPE_1D == src_type) {
if ((0 != rgn.srcOffsets[0].y) || (1 != rgn.srcOffsets[1].y)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcImage-00245",
"vkCmdBlitImage: region [%d], source image of type VK_IMAGE_TYPE_1D with srcOffset[].y values "
"of (%1d, %1d). These must be (0, 1).",
i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y);
}
}
if ((VK_IMAGE_TYPE_1D == src_type) || (VK_IMAGE_TYPE_2D == src_type)) {
if ((0 != rgn.srcOffsets[0].z) || (1 != rgn.srcOffsets[1].z)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcImage-00247",
"vkCmdBlitImage: region [%d], source image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with "
"srcOffset[].z values of (%1d, %1d). These must be (0, 1).",
i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z);
}
}
bool oob = false;
if ((rgn.srcOffsets[0].x < 0) || (rgn.srcOffsets[0].x > static_cast<int32_t>(src_extent.width)) ||
(rgn.srcOffsets[1].x < 0) || (rgn.srcOffsets[1].x > static_cast<int32_t>(src_extent.width))) {
oob = true;
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcOffset-00243",
"vkCmdBlitImage: region [%d] srcOffset[].x values (%1d, %1d) exceed srcSubresource width extent (%1d).",
i, rgn.srcOffsets[0].x, rgn.srcOffsets[1].x, src_extent.width);
}
if ((rgn.srcOffsets[0].y < 0) || (rgn.srcOffsets[0].y > static_cast<int32_t>(src_extent.height)) ||
(rgn.srcOffsets[1].y < 0) || (rgn.srcOffsets[1].y > static_cast<int32_t>(src_extent.height))) {
oob = true;
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcOffset-00244",
"vkCmdBlitImage: region [%d] srcOffset[].y values (%1d, %1d) exceed srcSubresource height extent (%1d).", i,
rgn.srcOffsets[0].y, rgn.srcOffsets[1].y, src_extent.height);
}
if ((rgn.srcOffsets[0].z < 0) || (rgn.srcOffsets[0].z > static_cast<int32_t>(src_extent.depth)) ||
(rgn.srcOffsets[1].z < 0) || (rgn.srcOffsets[1].z > static_cast<int32_t>(src_extent.depth))) {
oob = true;
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcOffset-00246",
"vkCmdBlitImage: region [%d] srcOffset[].z values (%1d, %1d) exceed srcSubresource depth extent (%1d).",
i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z, src_extent.depth);
}
if (oob) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-pRegions-00215",
"vkCmdBlitImage: region [%d] source image blit region exceeds image dimensions.", i);
}
// Validate dest image offsets
VkExtent3D dst_extent = GetImageSubresourceExtent(dst_image_state, &(rgn.dstSubresource));
if (VK_IMAGE_TYPE_1D == dst_type) {
if ((0 != rgn.dstOffsets[0].y) || (1 != rgn.dstOffsets[1].y)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstImage-00250",
"vkCmdBlitImage: region [%d], dest image of type VK_IMAGE_TYPE_1D with dstOffset[].y values of "
"(%1d, %1d). These must be (0, 1).",
i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y);
}
}
if ((VK_IMAGE_TYPE_1D == dst_type) || (VK_IMAGE_TYPE_2D == dst_type)) {
if ((0 != rgn.dstOffsets[0].z) || (1 != rgn.dstOffsets[1].z)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstImage-00252",
"vkCmdBlitImage: region [%d], dest image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with "
"dstOffset[].z values of (%1d, %1d). These must be (0, 1).",
i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z);
}
}
oob = false;
if ((rgn.dstOffsets[0].x < 0) || (rgn.dstOffsets[0].x > static_cast<int32_t>(dst_extent.width)) ||
(rgn.dstOffsets[1].x < 0) || (rgn.dstOffsets[1].x > static_cast<int32_t>(dst_extent.width))) {
oob = true;
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstOffset-00248",
"vkCmdBlitImage: region [%d] dstOffset[].x values (%1d, %1d) exceed dstSubresource width extent (%1d).",
i, rgn.dstOffsets[0].x, rgn.dstOffsets[1].x, dst_extent.width);
}
if ((rgn.dstOffsets[0].y < 0) || (rgn.dstOffsets[0].y > static_cast<int32_t>(dst_extent.height)) ||
(rgn.dstOffsets[1].y < 0) || (rgn.dstOffsets[1].y > static_cast<int32_t>(dst_extent.height))) {
oob = true;
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstOffset-00249",
"vkCmdBlitImage: region [%d] dstOffset[].y values (%1d, %1d) exceed dstSubresource height extent (%1d).", i,
rgn.dstOffsets[0].y, rgn.dstOffsets[1].y, dst_extent.height);
}
if ((rgn.dstOffsets[0].z < 0) || (rgn.dstOffsets[0].z > static_cast<int32_t>(dst_extent.depth)) ||
(rgn.dstOffsets[1].z < 0) || (rgn.dstOffsets[1].z > static_cast<int32_t>(dst_extent.depth))) {
oob = true;
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-dstOffset-00251",
"vkCmdBlitImage: region [%d] dstOffset[].z values (%1d, %1d) exceed dstSubresource depth extent (%1d).",
i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z, dst_extent.depth);
}
if (oob) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-vkCmdBlitImage-pRegions-00216",
"vkCmdBlitImage: region [%d] destination image blit region exceeds image dimensions.", i);
}
if ((VK_IMAGE_TYPE_3D == src_type) || (VK_IMAGE_TYPE_3D == dst_type)) {
if ((0 != rgn.srcSubresource.baseArrayLayer) || (1 != rgn.srcSubresource.layerCount) ||
(0 != rgn.dstSubresource.baseArrayLayer) || (1 != rgn.dstSubresource.layerCount)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageBlit-srcImage-00240",
"vkCmdBlitImage: region [%d] blit to/from a 3D image type with a non-zero baseArrayLayer, or a "
"layerCount other than 1.",
i);
}
}
} // per-region checks
} else {
assert(0);
}
return skip;
}
void PreCallRecordCmdBlitImage(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *src_image_state,
IMAGE_STATE *dst_image_state, uint32_t region_count, const VkImageBlit *regions,
VkImageLayout src_image_layout, VkImageLayout dst_image_layout) {
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < region_count; ++i) {
SetImageLayout(device_data, cb_node, src_image_state, regions[i].srcSubresource, src_image_layout);
SetImageLayout(device_data, cb_node, dst_image_state, regions[i].dstSubresource, dst_image_layout);
}
// Update bindings between images and cmd buffer
AddCommandBufferBindingImage(device_data, cb_node, src_image_state);
AddCommandBufferBindingImage(device_data, cb_node, dst_image_state);
}
// This validates that the initial layout specified in the command buffer for
// the IMAGE is the same
// as the global IMAGE layout
bool ValidateCmdBufImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB,
std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const &globalImageLayoutMap,
std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &overlayLayoutMap) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(device_data);
for (auto cb_image_data : pCB->imageLayoutMap) {
VkImageLayout imageLayout;
if (FindLayout(device_data, overlayLayoutMap, cb_image_data.first, imageLayout) ||
FindLayout(device_data, globalImageLayoutMap, cb_image_data.first, imageLayout)) {
if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
} else if (imageLayout != cb_image_data.second.initialLayout) {
if (cb_image_data.first.hasSubresource) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidImageLayout,
"Cannot submit cmd buffer using image (0x%" PRIx64
") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], with layout %s when first use is %s.",
HandleToUint64(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
cb_image_data.first.subresource.arrayLayer, cb_image_data.first.subresource.mipLevel,
string_VkImageLayout(imageLayout), string_VkImageLayout(cb_image_data.second.initialLayout));
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pCB->commandBuffer), kVUID_Core_DrawState_InvalidImageLayout,
"Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when first use is %s.",
HandleToUint64(cb_image_data.first.image), string_VkImageLayout(imageLayout),
string_VkImageLayout(cb_image_data.second.initialLayout));
}
}
SetLayout(overlayLayoutMap, cb_image_data.first, cb_image_data.second.layout);
}
}
return skip;
}
void UpdateCmdBufImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB) {
for (auto cb_image_data : pCB->imageLayoutMap) {
VkImageLayout imageLayout;
FindGlobalLayout(device_data, cb_image_data.first, imageLayout);
SetGlobalLayout(device_data, cb_image_data.first, cb_image_data.second.layout);
}
}
// Print readable FlagBits in FlagMask
static std::string StringVkAccessFlags(VkAccessFlags accessMask) {
std::string result;
std::string separator;
if (accessMask == 0) {
result = "[None]";
} else {
result = "[";
for (auto i = 0; i < 32; i++) {
if (accessMask & (1 << i)) {
result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
separator = " | ";
}
}
result = result + "]";
}
return result;
}
#if 0 // This fxn seems vestigial, not called anywhere. All VkAccessFlagBits VUIDs are implicit & being checked by parameter
// validation ToBeRemoved (disabled 6/18)
// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set. If required_bit is zero, accessMask
// must have at least one of 'optional_bits' set
// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
static bool ValidateMaskBits(core_validation::layer_data *device_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
const char *type) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
if (accessMask & ~(required_bit | optional_bits)) {
// TODO: Verify against Valid Use
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cmdBuffer), kVUID_Core_DrawState_InvalidBarrier,
"Additional bits in %s accessMask 0x%X %s are specified when layout is %s.", type, accessMask,
StringVkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
}
} else {
if (!required_bit) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cmdBuffer), kVUID_Core_DrawState_InvalidBarrier,
"%s AccessMask %d %s must contain at least one of access bits %d %s when layout is %s, unless the app "
"has previously added a barrier for this transition.",
type, accessMask, StringVkAccessFlags(accessMask).c_str(), optional_bits,
StringVkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
} else {
std::string opt_bits;
if (optional_bits != 0) {
std::stringstream ss;
ss << optional_bits;
opt_bits = "and may have optional bits " + ss.str() + ' ' + StringVkAccessFlags(optional_bits);
}
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cmdBuffer), kVUID_Core_DrawState_InvalidBarrier,
"%s AccessMask %d %s must have required access bit %d %s %s when layout is %s, unless the app has "
"previously added a barrier for this transition.",
type, accessMask, StringVkAccessFlags(accessMask).c_str(), required_bit,
StringVkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
}
}
return skip;
}
#endif
// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that READ_ONLY
// layout attachments don't have CLEAR as their loadOp.
bool ValidateLayoutVsAttachmentDescription(const debug_report_data *report_data, RenderPassCreateVersion rp_version,
const VkImageLayout first_layout, const uint32_t attachment,
const VkAttachmentDescription2KHR &attachment_description) {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
// Verify that initial loadOp on READ_ONLY attachments is not CLEAR
if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
vuid =
use_rp2 ? "VUID-VkRenderPassCreateInfo2KHR-pAttachments-03053" : "VUID-VkRenderPassCreateInfo-pAttachments-00836";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
}
}
if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) {
vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pAttachments-01566";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
}
}
if (attachment_description.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL) {
vuid = use_rp2 ? kVUID_Core_DrawState_InvalidRenderpass : "VUID-VkRenderPassCreateInfo-pAttachments-01567";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
}
}
return skip;
}
bool ValidateLayouts(const core_validation::layer_data *device_data, RenderPassCreateVersion rp_version, VkDevice device,
const VkRenderPassCreateInfo2KHR *pCreateInfo) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
VkFormat format = pCreateInfo->pAttachments[i].format;
if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
if ((FormatIsColor(format) || FormatHasDepth(format)) &&
pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Render pass has an attachment with loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout == "
"VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.");
}
if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidRenderpass,
"Render pass has an attachment with stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout "
"== VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.");
}
}
}
// Track when we're observing the first use of an attachment
std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
// Check input attachments first, so we can detect first-use-as-input for VU #00349
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
auto attach_index = subpass.pInputAttachments[j].attachment;
if (attach_index == VK_ATTACHMENT_UNUSED) continue;
switch (subpass.pInputAttachments[j].layout) {
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// These are ideal.
break;
case VK_IMAGE_LAYOUT_GENERAL:
// May not be optimal. TODO: reconsider this warning based on other constraints.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout,
"Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
break;
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Layout for input attachment reference %u in subpass %u is %s but must be "
"DEPTH_STENCIL_READ_ONLY, SHADER_READ_ONLY_OPTIMAL, or GENERAL.",
j, i, string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
break;
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR:
if (GetDeviceExtensions(device_data)->vk_khr_maintenance2) {
break;
} else {
// Intentionally fall through to generic error message
}
// fall through
default:
// No other layouts are acceptable
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidImageLayout,
"Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
string_VkImageLayout(subpass.pInputAttachments[j].layout));
}
if (attach_first_use[attach_index]) {
skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pInputAttachments[j].layout,
attach_index, pCreateInfo->pAttachments[attach_index]);
bool used_as_depth =
(subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attach_index);
bool used_as_color = false;
for (uint32_t k = 0; !used_as_depth && !used_as_color && k < subpass.colorAttachmentCount; ++k) {
used_as_color = (subpass.pColorAttachments[k].attachment == attach_index);
}
if (!used_as_depth && !used_as_color &&
pCreateInfo->pAttachments[attach_index].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-loadOp-03064" : "VUID-VkSubpassDescription-loadOp-00846";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: attachment %u is first used as an input attachment in subpass %u with loadOp=CLEAR.",
function_name, attach_index, attach_index);
}
}
attach_first_use[attach_index] = false;
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
auto attach_index = subpass.pColorAttachments[j].attachment;
if (attach_index == VK_ATTACHMENT_UNUSED) continue;
// TODO: Need a way to validate shared presentable images here, currently just allowing
// VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR
// as an acceptable layout, but need to make sure shared presentable images ONLY use that layout
switch (subpass.pColorAttachments[j].layout) {
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// This is ideal.
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
// TODO: See note above, just assuming that attachment is shared presentable and allowing this for now.
break;
case VK_IMAGE_LAYOUT_GENERAL:
// May not be optimal; TODO: reconsider this warning based on other constraints?
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout,
"Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
break;
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Layout for color attachment reference %u in subpass %u is %s but should be "
"COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
j, i, string_VkImageLayout(subpass.pColorAttachments[j].layout));
break;
default:
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidImageLayout,
"Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
string_VkImageLayout(subpass.pColorAttachments[j].layout));
}
if (subpass.pResolveAttachments && (subpass.pResolveAttachments[j].layout == VK_IMAGE_LAYOUT_UNDEFINED ||
subpass.pResolveAttachments[j].layout == VK_IMAGE_LAYOUT_PREINITIALIZED)) {
vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Layout for color attachment reference %u in subpass %u is %s but should be "
"COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
j, i, string_VkImageLayout(subpass.pColorAttachments[j].layout));
}
if (attach_first_use[attach_index]) {
skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pColorAttachments[j].layout,
attach_index, pCreateInfo->pAttachments[attach_index]);
}
attach_first_use[attach_index] = false;
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
switch (subpass.pDepthStencilAttachment->layout) {
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
// These are ideal.
break;
case VK_IMAGE_LAYOUT_GENERAL:
// May not be optimal; TODO: reconsider this warning based on other constraints? GENERAL can be better than
// doing a bunch of transitions.
skip |= log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_InvalidImageLayout,
"GENERAL layout for depth attachment may not give optimal performance.");
break;
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
vuid = use_rp2 ? "VUID-VkAttachmentReference2KHR-layout-03077" : "VUID-VkAttachmentReference-layout-00857";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"Layout for depth attachment reference in subpass %u is %s but must be a valid depth/stencil "
"layout or GENERAL.",
i, string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
break;
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR:
if (GetDeviceExtensions(device_data)->vk_khr_maintenance2) {
break;
} else {
// Intentionally fall through to generic error message
}
// fall through
default:
// No other layouts are acceptable
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
kVUID_Core_DrawState_InvalidImageLayout,
"Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
"DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
}
auto attach_index = subpass.pDepthStencilAttachment->attachment;
if (attach_first_use[attach_index]) {
skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pDepthStencilAttachment->layout,
attach_index, pCreateInfo->pAttachments[attach_index]);
}
attach_first_use[attach_index] = false;
}
}
return skip;
}
// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
bool ValidateMapImageLayouts(core_validation::layer_data *device_data, VkDevice device, DEVICE_MEM_INFO const *mem_info,
VkDeviceSize offset, VkDeviceSize end_offset) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
// Iterate over all bound image ranges and verify that for any that overlap the map ranges, the layouts are
// VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
// TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
for (auto image_handle : mem_info->bound_images) {
auto img_it = mem_info->bound_ranges.find(image_handle);
if (img_it != mem_info->bound_ranges.end()) {
if (RangesIntersect(device_data, &img_it->second, offset, end_offset)) {
std::vector<VkImageLayout> layouts;
if (FindLayouts(device_data, VkImage(image_handle), layouts)) {
for (auto layout : layouts) {
if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
HandleToUint64(mem_info->mem), kVUID_Core_DrawState_InvalidImageLayout,
"Mapping an image with layout %s can result in undefined behavior if this memory is used "
"by the device. Only GENERAL or PREINITIALIZED should be used.",
string_VkImageLayout(layout));
}
}
}
}
}
}
return skip;
}
// Helper function to validate correct usage bits set for buffers or images. Verify that (actual & desired) flags != 0 or, if strict
// is true, verify that (actual & desired) flags == desired
static bool ValidateUsageFlags(const layer_data *device_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle,
VulkanObjectType obj_type, std::string msgCode, char const *func_name, char const *usage_str) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool correct_usage = false;
bool skip = false;
const char *type_str = object_string[obj_type];
if (strict) {
correct_usage = ((actual & desired) == desired);
} else {
correct_usage = ((actual & desired) != 0);
}
if (!correct_usage) {
if (msgCode == kVUIDUndefined) {
// TODO: Fix callers with kVUIDUndefined to use correct validation checks.
skip =
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_type], obj_handle,
kVUID_Core_MemTrack_InvalidUsageFlag,
"Invalid usage flag for %s 0x%" PRIx64 " used by %s. In this case, %s should have %s set during creation.",
type_str, obj_handle, func_name, type_str, usage_str);
} else {
skip =
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[obj_type], obj_handle, msgCode,
"Invalid usage flag for %s 0x%" PRIx64 " used by %s. In this case, %s should have %s set during creation.",
type_str, obj_handle, func_name, type_str, usage_str);
}
}
return skip;
}
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
bool ValidateImageUsageFlags(layer_data *device_data, IMAGE_STATE const *image_state, VkFlags desired, bool strict,
const std::string &msgCode, char const *func_name, char const *usage_string) {
return ValidateUsageFlags(device_data, image_state->createInfo.usage, desired, strict, HandleToUint64(image_state->image),
kVulkanObjectTypeImage, msgCode, func_name, usage_string);
}
bool ValidateImageFormatFeatureFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFormatFeatureFlags desired,
char const *func_name, const std::string &linear_vuid, const std::string &optimal_vuid) {
VkFormatProperties format_properties = GetFormatProperties(dev_data, image_state->createInfo.format);
const debug_report_data *report_data = core_validation::GetReportData(dev_data);
bool skip = false;
if (image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR) {
if ((format_properties.linearTilingFeatures & desired) != desired) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), linear_vuid,
"In %s, invalid linearTilingFeatures (0x%08X) for format %u used by image %" PRIx64 ".", func_name,
format_properties.linearTilingFeatures, image_state->createInfo.format, HandleToUint64(image_state->image));
}
} else if (image_state->createInfo.tiling == VK_IMAGE_TILING_OPTIMAL) {
if ((format_properties.optimalTilingFeatures & desired) != desired) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), optimal_vuid,
"In %s, invalid optimalTilingFeatures (0x%08X) for format %u used by image %" PRIx64 ".", func_name,
format_properties.optimalTilingFeatures, image_state->createInfo.format,
HandleToUint64(image_state->image));
}
}
return skip;
}
bool ValidateImageSubresourceLayers(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
const VkImageSubresourceLayers *subresource_layers, char const *func_name, char const *member,
uint32_t i) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(dev_data);
// layerCount must not be zero
if (subresource_layers->layerCount == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageSubresourceLayers-layerCount-01700",
"In %s, pRegions[%u].%s.layerCount must not be zero.", func_name, i, member);
}
// aspectMask must not contain VK_IMAGE_ASPECT_METADATA_BIT
if (subresource_layers->aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageSubresourceLayers-aspectMask-00168",
"In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_METADATA_BIT set.", func_name, i, member);
}
// if aspectMask contains COLOR, it must not contain either DEPTH or STENCIL
if ((subresource_layers->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) &&
(subresource_layers->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->commandBuffer), "VUID-VkImageSubresourceLayers-aspectMask-00167",
"In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_COLOR_BIT and either VK_IMAGE_ASPECT_DEPTH_BIT or "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, i, member);
}
return skip;
}
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
bool ValidateBufferUsageFlags(const layer_data *device_data, BUFFER_STATE const *buffer_state, VkFlags desired, bool strict,
const std::string &msgCode, char const *func_name, char const *usage_string) {
return ValidateUsageFlags(device_data, buffer_state->createInfo.usage, desired, strict, HandleToUint64(buffer_state->buffer),
kVulkanObjectTypeBuffer, msgCode, func_name, usage_string);
}
bool ValidateBufferViewRange(const layer_data *device_data, const BUFFER_STATE *buffer_state,
const VkBufferViewCreateInfo *pCreateInfo, const VkPhysicalDeviceLimits *device_limits) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(device_data);
const VkDeviceSize &range = pCreateInfo->range;
if (range != VK_WHOLE_SIZE) {
// Range must be greater than 0
if (range <= 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-range-00928",
"If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range must be greater than 0.",
range);
}
// Range must be a multiple of the element size of format
const size_t format_size = FormatSize(pCreateInfo->format);
if (range % format_size != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-range-00929",
"If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range must be a multiple of the element size of the format "
"(" PRINTF_SIZE_T_SPECIFIER ").",
range, format_size);
}
// Range divided by the element size of format must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements
if (range / format_size > device_limits->maxTexelBufferElements) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-range-00930",
"If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range divided by the element size of the format (" PRINTF_SIZE_T_SPECIFIER
") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").",
range, format_size, device_limits->maxTexelBufferElements);
}
// The sum of range and offset must be less than or equal to the size of buffer
if (range + pCreateInfo->offset > buffer_state->createInfo.size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-offset-00931",
"If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, the sum of offset (%" PRIuLEAST64
") and range must be less than or equal to the size of the buffer (%" PRIuLEAST64 ").",
range, pCreateInfo->offset, buffer_state->createInfo.size);
}
}
return skip;
}
bool ValidateBufferViewBuffer(const layer_data *device_data, const BUFFER_STATE *buffer_state,
const VkBufferViewCreateInfo *pCreateInfo) {
bool skip = false;
const debug_report_data *report_data = GetReportData(device_data);
const VkFormatProperties format_properties = GetFormatProperties(device_data, pCreateInfo->format);
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-buffer-00933",
"If buffer was created with `usage` containing VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, format must "
"be supported for uniform texel buffers");
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-buffer-00934",
"If buffer was created with `usage` containing VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, format must "
"be supported for storage texel buffers");
}
return skip;
}
bool PreCallValidateCreateBuffer(layer_data *device_data, const VkBufferCreateInfo *pCreateInfo) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(device_data);
// TODO: Add check for "VUID-vkCreateBuffer-flags-00911" (sparse address space accounting)
if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) && (!GetEnabledFeatures(device_data)->core.sparseBinding)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-flags-00915",
"vkCreateBuffer(): the sparseBinding device feature is disabled: Buffers cannot be created with the "
"VK_BUFFER_CREATE_SPARSE_BINDING_BIT set.");
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) &&
(!GetEnabledFeatures(device_data)->core.sparseResidencyBuffer)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-flags-00916",
"vkCreateBuffer(): the sparseResidencyBuffer device feature is disabled: Buffers cannot be created with "
"the VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT set.");
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_ALIASED_BIT) &&
(!GetEnabledFeatures(device_data)->core.sparseResidencyAliased)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
"VUID-VkBufferCreateInfo-flags-00917",
"vkCreateBuffer(): the sparseResidencyAliased device feature is disabled: Buffers cannot be created with "
"the VK_BUFFER_CREATE_SPARSE_ALIASED_BIT set.");
}
return skip;
}
void PostCallRecordCreateBuffer(layer_data *device_data, const VkBufferCreateInfo *pCreateInfo, VkBuffer *pBuffer) {
// TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
GetBufferMap(device_data)
->insert(std::make_pair(*pBuffer, std::unique_ptr<BUFFER_STATE>(new BUFFER_STATE(*pBuffer, pCreateInfo))));
}
bool PreCallValidateCreateBufferView(const layer_data *device_data, const VkBufferViewCreateInfo *pCreateInfo) {
bool skip = false;
const debug_report_data *report_data = core_validation::GetReportData(device_data);
BUFFER_STATE *buffer_state = GetBufferState(device_data, pCreateInfo->buffer);
// If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
if (buffer_state) {
skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCreateBufferView()",
"VUID-VkBufferViewCreateInfo-buffer-00935");
// In order to create a valid buffer view, the buffer must have been created with at least one of the following flags:
// UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
skip |= ValidateBufferUsageFlags(device_data, buffer_state,
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
"VUID-VkBufferViewCreateInfo-buffer-00932", "vkCreateBufferView()",
"VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
// Buffer view offset must be less than the size of buffer
if (pCreateInfo->offset >= buffer_state->createInfo.size) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-offset-00925",
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be less than the size of the buffer (%" PRIuLEAST64 ").",
pCreateInfo->offset, buffer_state->createInfo.size);
}
const VkPhysicalDeviceLimits *device_limits = &(GetPhysicalDeviceProperties(device_data)->limits);
// Buffer view offset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment
if ((pCreateInfo->offset % device_limits->minTexelBufferOffsetAlignment) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer_state->buffer), "VUID-VkBufferViewCreateInfo-offset-00926",
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment (%" PRIuLEAST64 ").",
pCreateInfo->offset, device_limits->minTexelBufferOffsetAlignment);
}
skip |= ValidateBufferViewRange(device_data, buffer_state, pCreateInfo, device_limits);
skip |= ValidateBufferViewBuffer(device_data, buffer_state, pCreateInfo);
}
return skip;
}
void PostCallRecordCreateBufferView(layer_data *device_data, const VkBufferViewCreateInfo *pCreateInfo, VkBufferView *pView) {
(*GetBufferViewMap(device_data))[*pView] = std::unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
}
// For the given format verify that the aspect masks make sense
bool ValidateImageAspectMask(const layer_data *device_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
const char *func_name, const char *vuid) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
VkDebugReportObjectTypeEXT objectType = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT;
if (image != VK_NULL_HANDLE) {
objectType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
}
if (FormatIsColor(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set.", func_name);
} else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set.", func_name);
}
} else if (FormatIsDepthAndStencil(format)) {
if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Depth/stencil image formats must have at least one of VK_IMAGE_ASPECT_DEPTH_BIT and "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name);
} else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name);
}
} else if (FormatIsDepthOnly(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set.", func_name);
} else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set.", func_name);
}
} else if (FormatIsStencilOnly(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name);
} else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set.", func_name);
}
} else if (FormatIsMultiplane(format)) {
VkImageAspectFlags valid_flags = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT;
if (3 == FormatPlaneCount(format)) {
valid_flags = valid_flags | VK_IMAGE_ASPECT_PLANE_2_BIT;
}
if ((aspect_mask & valid_flags) != aspect_mask) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, HandleToUint64(image), vuid,
"%s: Multi-plane image formats may have only VK_IMAGE_ASPECT_COLOR_BIT or VK_IMAGE_ASPECT_PLANE_n_BITs "
"set, where n = [0, 1, 2].",
func_name);
}
}
return skip;
}
struct SubresourceRangeErrorCodes {
std::string base_mip_err, mip_count_err, base_layer_err, layer_count_err;
};
bool ValidateImageSubresourceRange(const layer_data *device_data, const uint32_t image_mip_count, const uint32_t image_layer_count,
const VkImageSubresourceRange &subresourceRange, const char *cmd_name, const char *param_name,
const char *image_layer_count_var_name, const uint64_t image_handle,
SubresourceRangeErrorCodes errorCodes) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
// Validate mip levels
if (subresourceRange.baseMipLevel >= image_mip_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.base_mip_err,
"%s: %s.baseMipLevel (= %" PRIu32
") is greater or equal to the mip level count of the image (i.e. greater or equal to %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseMipLevel, image_mip_count);
}
if (subresourceRange.levelCount != VK_REMAINING_MIP_LEVELS) {
if (subresourceRange.levelCount == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.mip_count_err, "%s: %s.levelCount is 0.", cmd_name, param_name);
} else {
const uint64_t necessary_mip_count = uint64_t{subresourceRange.baseMipLevel} + uint64_t{subresourceRange.levelCount};
if (necessary_mip_count > image_mip_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.mip_count_err,
"%s: %s.baseMipLevel + .levelCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than the mip level count of the image (i.e. greater than %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseMipLevel, subresourceRange.levelCount,
necessary_mip_count, image_mip_count);
}
}
}
// Validate array layers
if (subresourceRange.baseArrayLayer >= image_layer_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.base_layer_err,
"%s: %s.baseArrayLayer (= %" PRIu32
") is greater or equal to the %s of the image when it was created (i.e. greater or equal to %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseArrayLayer, image_layer_count_var_name, image_layer_count);
}
if (subresourceRange.layerCount != VK_REMAINING_ARRAY_LAYERS) {
if (subresourceRange.layerCount == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.layer_count_err, "%s: %s.layerCount is 0.", cmd_name, param_name);
} else {
const uint64_t necessary_layer_count =
uint64_t{subresourceRange.baseArrayLayer} + uint64_t{subresourceRange.layerCount};
if (necessary_layer_count > image_layer_count) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, image_handle,
errorCodes.layer_count_err,
"%s: %s.baseArrayLayer + .layerCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than the %s of the image when it was created (i.e. greater than %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseArrayLayer, subresourceRange.layerCount,
necessary_layer_count, image_layer_count_var_name, image_layer_count);
}
}
}
return skip;
}
bool ValidateCreateImageViewSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
bool is_imageview_2d_type, const VkImageSubresourceRange &subresourceRange) {
bool is_khr_maintenance1 = GetDeviceExtensions(device_data)->vk_khr_maintenance1;
bool is_image_slicable = image_state->createInfo.imageType == VK_IMAGE_TYPE_3D &&
(image_state->createInfo.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR);
bool is_3D_to_2D_map = is_khr_maintenance1 && is_image_slicable && is_imageview_2d_type;
const auto image_layer_count = is_3D_to_2D_map ? image_state->createInfo.extent.depth : image_state->createInfo.arrayLayers;
const auto image_layer_count_var_name = is_3D_to_2D_map ? "extent.depth" : "arrayLayers";
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageViewCreateInfo-subresourceRange-01478";
subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageViewCreateInfo-subresourceRange-01718";
subresourceRangeErrorCodes.base_layer_err = is_khr_maintenance1 ? (is_3D_to_2D_map ? "VUID-VkImageViewCreateInfo-image-01484"
: "VUID-VkImageViewCreateInfo-image-01482")
: "VUID-VkImageViewCreateInfo-subresourceRange-01480";
subresourceRangeErrorCodes.layer_count_err = is_khr_maintenance1
? (is_3D_to_2D_map ? "VUID-VkImageViewCreateInfo-subresourceRange-01485"
: "VUID-VkImageViewCreateInfo-subresourceRange-01483")
: "VUID-VkImageViewCreateInfo-subresourceRange-01719";
return ValidateImageSubresourceRange(device_data, image_state->createInfo.mipLevels, image_layer_count, subresourceRange,
"vkCreateImageView", "pCreateInfo->subresourceRange", image_layer_count_var_name,
HandleToUint64(image_state->image), subresourceRangeErrorCodes);
}
bool ValidateCmdClearColorSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange, const char *param_name) {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearColorImage-baseMipLevel-01470";
subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearColorImage-pRanges-01692";
subresourceRangeErrorCodes.base_layer_err = "VUID-vkCmdClearColorImage-baseArrayLayer-01472";
subresourceRangeErrorCodes.layer_count_err = "VUID-vkCmdClearColorImage-pRanges-01693";
return ValidateImageSubresourceRange(device_data, image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers,
subresourceRange, "vkCmdClearColorImage", param_name, "arrayLayers",
HandleToUint64(image_state->image), subresourceRangeErrorCodes);
}
bool ValidateCmdClearDepthSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange, const char *param_name) {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474";
subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01694";
subresourceRangeErrorCodes.base_layer_err = "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476";
subresourceRangeErrorCodes.layer_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01695";
return ValidateImageSubresourceRange(device_data, image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers,
subresourceRange, "vkCmdClearDepthStencilImage", param_name, "arrayLayers",
HandleToUint64(image_state->image), subresourceRangeErrorCodes);
}
bool ValidateImageBarrierSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange, const char *cmd_name,
const char *param_name) {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageMemoryBarrier-subresourceRange-01486";
subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01724";
subresourceRangeErrorCodes.base_layer_err = "VUID-VkImageMemoryBarrier-subresourceRange-01488";
subresourceRangeErrorCodes.layer_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01725";
return ValidateImageSubresourceRange(device_data, image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers,
subresourceRange, cmd_name, param_name, "arrayLayers", HandleToUint64(image_state->image),
subresourceRangeErrorCodes);
}
bool PreCallValidateCreateImageView(layer_data *device_data, const VkImageViewCreateInfo *create_info) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
IMAGE_STATE *image_state = GetImageState(device_data, create_info->image);
if (image_state) {
skip |= ValidateImageUsageFlags(
device_data, image_state,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV,
false, kVUIDUndefined, "vkCreateImageView()",
"VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT|SHADING_RATE_IMAGE]_BIT");
// If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
skip |=
ValidateMemoryIsBoundToImage(device_data, image_state, "vkCreateImageView()", "VUID-VkImageViewCreateInfo-image-01020");
// Checks imported from image layer
skip |= ValidateCreateImageViewSubresourceRange(
device_data, image_state,
create_info->viewType == VK_IMAGE_VIEW_TYPE_2D || create_info->viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY,
create_info->subresourceRange);
VkImageCreateFlags image_flags = image_state->createInfo.flags;
VkFormat image_format = image_state->createInfo.format;
VkImageUsageFlags image_usage = image_state->createInfo.usage;
VkImageTiling image_tiling = image_state->createInfo.tiling;
VkFormat view_format = create_info->format;
VkImageAspectFlags aspect_mask = create_info->subresourceRange.aspectMask;
VkImageType image_type = image_state->createInfo.imageType;
VkImageViewType view_type = create_info->viewType;
// If there's a chained VkImageViewUsageCreateInfo struct, modify image_usage to match
auto chained_ivuci_struct = lvl_find_in_chain<VkImageViewUsageCreateInfoKHR>(create_info->pNext);
if (chained_ivuci_struct) {
if (chained_ivuci_struct->usage & ~image_usage) {
std::stringstream ss;
ss << "vkCreateImageView(): Chained VkImageViewUsageCreateInfo usage field (0x" << std::hex
<< chained_ivuci_struct->usage << ") must not include flags not present in underlying image's usage (0x"
<< image_usage << ").";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewUsageCreateInfo-usage-01587", "%s",
ss.str().c_str());
}
image_usage = chained_ivuci_struct->usage;
}
// Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state, if view/image formats differ
if ((image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) && (image_format != view_format)) {
if (FormatIsMultiplane(image_format)) {
// View format must match the multiplane compatible format
uint32_t plane = 3; // invalid
switch (aspect_mask) {
case VK_IMAGE_ASPECT_PLANE_0_BIT:
plane = 0;
break;
case VK_IMAGE_ASPECT_PLANE_1_BIT:
plane = 1;
break;
case VK_IMAGE_ASPECT_PLANE_2_BIT:
plane = 2;
break;
default:
break;
}
VkFormat compat_format = FindMultiplaneCompatibleFormat(image_format, plane);
if (view_format != compat_format) {
std::stringstream ss;
ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
<< " is not compatible with plane " << plane << " of underlying image format "
<< string_VkFormat(image_format) << ", must be " << string_VkFormat(compat_format) << ".";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-01586", "%s",
ss.str().c_str());
}
} else {
if ((!GetDeviceExtensions(device_data)->vk_khr_maintenance2 ||
!(image_flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR))) {
// Format MUST be compatible (in the same format compatibility class) as the format the image was created with
if (FormatCompatibilityClass(image_format) != FormatCompatibilityClass(view_format)) {
std::stringstream ss;
ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
<< " is not in the same format compatibility class as image (" << HandleToUint64(create_info->image)
<< ") format " << string_VkFormat(image_format)
<< ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
<< "can support ImageViews with differing formats but they must be in the same compatibility class.";
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-01018", "%s",
ss.str().c_str());
}
}
}
} else {
// Format MUST be IDENTICAL to the format the image was created with
if (image_format != view_format) {
std::stringstream ss;
ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from image "
<< HandleToUint64(create_info->image) << " format " << string_VkFormat(image_format)
<< ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-01019", "%s", ss.str().c_str());
}
}
// Validate correct image aspect bits for desired formats and format consistency
skip |= ValidateImageAspectMask(device_data, image_state->image, image_format, aspect_mask, "vkCreateImageView()");
switch (image_type) {
case VK_IMAGE_TYPE_1D:
if (view_type != VK_IMAGE_VIEW_TYPE_1D && view_type != VK_IMAGE_VIEW_TYPE_1D_ARRAY) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
break;
case VK_IMAGE_TYPE_2D:
if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) {
if ((view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) &&
!(image_flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-01003",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if (view_type != VK_IMAGE_VIEW_TYPE_CUBE && view_type != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
break;
case VK_IMAGE_TYPE_3D:
if (GetDeviceExtensions(device_data)->vk_khr_maintenance1) {
if (view_type != VK_IMAGE_VIEW_TYPE_3D) {
if ((view_type == VK_IMAGE_VIEW_TYPE_2D || view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
if (!(image_flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-01005",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if ((image_flags & (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT |
VK_IMAGE_CREATE_SPARSE_ALIASED_BIT))) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s "
"when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
} else {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
} else {
if (view_type != VK_IMAGE_VIEW_TYPE_3D) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
break;
default:
break;
}
VkFormatProperties format_properties = GetFormatProperties(device_data, view_format);
bool check_tiling_features = false;
VkFormatFeatureFlags tiling_features = 0;
if (image_tiling == VK_IMAGE_TILING_LINEAR) {
tiling_features = format_properties.linearTilingFeatures;
check_tiling_features = true;
} else if (image_tiling == VK_IMAGE_TILING_OPTIMAL) {
tiling_features = format_properties.optimalTilingFeatures;
check_tiling_features = true;
}
if (check_tiling_features) {
if (tiling_features == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-None-02273",
"vkCreateImageView() pCreateInfo->format %s cannot be used with an image having the %s flag set.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-usage-02274",
"vkCreateImageView() pCreateInfo->format %s cannot be used with an image having the %s and "
"VK_IMAGE_USAGE_SAMPLED_BIT flags set.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-usage-02275",
"vkCreateImageView() pCreateInfo->format %s cannot be used with an image having the %s and "
"VK_IMAGE_USAGE_STORAGE_BIT flags set.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-usage-02276",
"vkCreateImageView() pCreateInfo->format %s cannot be used with an image having the %s and "
"VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT flags set.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-usage-02277",
"vkCreateImageView() pCreateInfo->format %s cannot be used with an image having the %s and "
"VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT flags set.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
}
}
if (image_usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) {
if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02086",
"vkCreateImageView() If image was created with usage containing "
"VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, viewType must be "
"VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
if (view_format != VK_FORMAT_R8_UINT) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(create_info->image), "VUID-VkImageViewCreateInfo-image-02087",
"vkCreateImageView() If image was created with usage containing "
"VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, format must be VK_FORMAT_R8_UINT.");
}
}
}
return skip;
}
void PostCallRecordCreateImageView(layer_data *device_data, const VkImageViewCreateInfo *create_info, VkImageView view) {
auto image_view_map = GetImageViewMap(device_data);
(*image_view_map)[view] = std::unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, create_info));
auto image_state = GetImageState(device_data, create_info->image);
auto &sub_res_range = (*image_view_map)[view].get()->create_info.subresourceRange;
sub_res_range.levelCount = ResolveRemainingLevels(&sub_res_range, image_state->createInfo.mipLevels);
sub_res_range.layerCount = ResolveRemainingLayers(&sub_res_range, image_state->createInfo.arrayLayers);
}
bool PreCallValidateCmdCopyBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *src_buffer_state,
BUFFER_STATE *dst_buffer_state) {
bool skip = false;
skip |=
ValidateMemoryIsBoundToBuffer(device_data, src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-srcBuffer-00119");
skip |=
ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-dstBuffer-00121");
// Validate that SRC & DST buffers have correct usage flags set
skip |=
ValidateBufferUsageFlags(device_data, src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
"VUID-vkCmdCopyBuffer-srcBuffer-00118", "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
skip |=
ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyBuffer-dstBuffer-00120", "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmdQueueFlags(device_data, cb_node, "vkCmdCopyBuffer()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
skip |= InsideRenderPass(device_data, cb_node, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-renderpass");
return skip;
}
void PreCallRecordCmdCopyBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *src_buffer_state,
BUFFER_STATE *dst_buffer_state) {
// Update bindings between buffers and cmd buffer
AddCommandBufferBindingBuffer(device_data, cb_node, src_buffer_state);
AddCommandBufferBindingBuffer(device_data, cb_node, dst_buffer_state);
}
static bool ValidateIdleBuffer(layer_data *device_data, VkBuffer buffer) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = false;
auto buffer_state = GetBufferState(device_data, buffer);
if (!buffer_state) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, HandleToUint64(buffer),
kVUID_Core_DrawState_DoubleDestroy, "Cannot free buffer 0x%" PRIx64 " that has not been allocated.",
HandleToUint64(buffer));
} else {
if (buffer_state->in_use.load()) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
HandleToUint64(buffer), "VUID-vkDestroyBuffer-buffer-00922",
"Cannot free buffer 0x%" PRIx64 " that is in use by a command buffer.", HandleToUint64(buffer));
}
}
return skip;
}
bool PreCallValidateDestroyImageView(layer_data *device_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state,
VK_OBJECT *obj_struct) {
*image_view_state = GetImageViewState(device_data, image_view);
*obj_struct = {HandleToUint64(image_view), kVulkanObjectTypeImageView};
if (GetDisables(device_data)->destroy_image_view) return false;
bool skip = false;
if (*image_view_state) {
skip |= ValidateObjectNotInUse(device_data, *image_view_state, *obj_struct, "vkDestroyImageView",
"VUID-vkDestroyImageView-imageView-01026");
}
return skip;
}
void PreCallRecordDestroyImageView(layer_data *device_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state,
VK_OBJECT obj_struct) {
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(device_data, image_view_state->cb_bindings, obj_struct);
(*GetImageViewMap(device_data)).erase(image_view);
}
bool PreCallValidateDestroyBuffer(layer_data *device_data, VkBuffer buffer, BUFFER_STATE **buffer_state, VK_OBJECT *obj_struct) {
*buffer_state = GetBufferState(device_data, buffer);
*obj_struct = {HandleToUint64(buffer), kVulkanObjectTypeBuffer};
if (GetDisables(device_data)->destroy_buffer) return false;
bool skip = false;
if (*buffer_state) {
skip |= ValidateIdleBuffer(device_data, buffer);
}
return skip;
}
void PreCallRecordDestroyBuffer(layer_data *device_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VK_OBJECT obj_struct) {
InvalidateCommandBuffers(device_data, buffer_state->cb_bindings, obj_struct);
for (auto mem_binding : buffer_state->GetBoundMemory()) {
auto mem_info = GetMemObjInfo(device_data, mem_binding);
if (mem_info) {
core_validation::RemoveBufferMemoryRange(HandleToUint64(buffer), mem_info);
}
}
ClearMemoryObjectBindings(device_data, HandleToUint64(buffer), kVulkanObjectTypeBuffer);
EraseQFOReleaseBarriers<VkBufferMemoryBarrier>(device_data, buffer);
GetBufferMap(device_data)->erase(buffer_state->buffer);
}
bool PreCallValidateDestroyBufferView(layer_data *device_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
VK_OBJECT *obj_struct) {
*buffer_view_state = GetBufferViewState(device_data, buffer_view);
*obj_struct = {HandleToUint64(buffer_view), kVulkanObjectTypeBufferView};
if (GetDisables(device_data)->destroy_buffer_view) return false;
bool skip = false;
if (*buffer_view_state) {
skip |= ValidateObjectNotInUse(device_data, *buffer_view_state, *obj_struct, "vkDestroyBufferView",
"VUID-vkDestroyBufferView-bufferView-00936");
}
return skip;
}
void PreCallRecordDestroyBufferView(layer_data *device_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
VK_OBJECT obj_struct) {
// Any bound cmd buffers are now invalid
InvalidateCommandBuffers(device_data, buffer_view_state->cb_bindings, obj_struct);
GetBufferViewMap(device_data)->erase(buffer_view);
}
bool PreCallValidateCmdFillBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(device_data, buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-dstBuffer-00031");
skip |= ValidateCmdQueueFlags(device_data, cb_node, "vkCmdFillBuffer()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdFillBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(device_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
// Validate that DST buffer has correct usage flags set
skip |=
ValidateBufferUsageFlags(device_data, buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdFillBuffer-dstBuffer-00029", "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= InsideRenderPass(device_data, cb_node, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-renderpass");
return skip;
}
void PreCallRecordCmdFillBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
// Update bindings between buffer and cmd buffer
AddCommandBufferBindingBuffer(device_data, cb_node, buffer_state);
}
bool ValidateBufferImageCopyData(const debug_report_data *report_data, uint32_t regionCount, const VkBufferImageCopy *pRegions,
IMAGE_STATE *image_state, const char *function) {
bool skip = false;
for (uint32_t i = 0; i < regionCount; i++) {
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((pRegions[i].imageOffset.y != 0) || (pRegions[i].imageExtent.height != 1)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-srcImage-00199",
"%s(): pRegion[%d] imageOffset.y is %d and imageExtent.height is %d. For 1D images these must be 0 "
"and 1, respectively.",
function, i, pRegions[i].imageOffset.y, pRegions[i].imageExtent.height);
}
}
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) || (image_state->createInfo.imageType == VK_IMAGE_TYPE_2D)) {
if ((pRegions[i].imageOffset.z != 0) || (pRegions[i].imageExtent.depth != 1)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-srcImage-00201",
"%s(): pRegion[%d] imageOffset.z is %d and imageExtent.depth is %d. For 1D and 2D images these "
"must be 0 and 1, respectively.",
function, i, pRegions[i].imageOffset.z, pRegions[i].imageExtent.depth);
}
}
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != pRegions[i].imageSubresource.baseArrayLayer) || (1 != pRegions[i].imageSubresource.layerCount)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-baseArrayLayer-00213",
"%s(): pRegion[%d] imageSubresource.baseArrayLayer is %d and imageSubresource.layerCount is %d. "
"For 3D images these must be 0 and 1, respectively.",
function, i, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount);
}
}
// If the the calling command's VkImage parameter's format is not a depth/stencil format,
// then bufferOffset must be a multiple of the calling command's VkImage parameter's texel size
auto texel_size = FormatSize(image_state->createInfo.format);
if (!FormatIsDepthAndStencil(image_state->createInfo.format) && SafeModulo(pRegions[i].bufferOffset, texel_size) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferOffset-00193",
"%s(): pRegion[%d] bufferOffset 0x%" PRIxLEAST64
" must be a multiple of this format's texel size (" PRINTF_SIZE_T_SPECIFIER ").",
function, i, pRegions[i].bufferOffset, texel_size);
}
// BufferOffset must be a multiple of 4
if (SafeModulo(pRegions[i].bufferOffset, 4) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferOffset-00194",
"%s(): pRegion[%d] bufferOffset 0x%" PRIxLEAST64 " must be a multiple of 4.", function, i,
pRegions[i].bufferOffset);
}
// BufferRowLength must be 0, or greater than or equal to the width member of imageExtent
if ((pRegions[i].bufferRowLength != 0) && (pRegions[i].bufferRowLength < pRegions[i].imageExtent.width)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferRowLength-00195",
"%s(): pRegion[%d] bufferRowLength (%d) must be zero or greater-than-or-equal-to imageExtent.width (%d).",
function, i, pRegions[i].bufferRowLength, pRegions[i].imageExtent.width);
}
// BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent
if ((pRegions[i].bufferImageHeight != 0) && (pRegions[i].bufferImageHeight < pRegions[i].imageExtent.height)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferImageHeight-00196",
"%s(): pRegion[%d] bufferImageHeight (%d) must be zero or greater-than-or-equal-to imageExtent.height (%d).",
function, i, pRegions[i].bufferImageHeight, pRegions[i].imageExtent.height);
}
// subresource aspectMask must have exactly 1 bit set
const int num_bits = sizeof(VkFlags) * CHAR_BIT;
std::bitset<num_bits> aspect_mask_bits(pRegions[i].imageSubresource.aspectMask);
if (aspect_mask_bits.count() != 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-aspectMask-00212",
"%s: aspectMasks for imageSubresource in each region must have only a single bit set.", function);
}
// image subresource aspect bit must match format
if (!VerifyAspectsPresent(pRegions[i].imageSubresource.aspectMask, image_state->createInfo.format)) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-aspectMask-00211",
"%s(): pRegion[%d] subresource aspectMask 0x%x specifies aspects that are not present in image format 0x%x.",
function, i, pRegions[i].imageSubresource.aspectMask, image_state->createInfo.format);
}
// Checks that apply only to compressed images
if (FormatIsCompressed(image_state->createInfo.format) || FormatIsSinglePlane_422(image_state->createInfo.format)) {
auto block_size = FormatCompressedTexelBlockExtent(image_state->createInfo.format);
// BufferRowLength must be a multiple of block width
if (SafeModulo(pRegions[i].bufferRowLength, block_size.width) != 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferRowLength-00203",
"%s(): pRegion[%d] bufferRowLength (%d) must be a multiple of the compressed image's texel width (%d)..",
function, i, pRegions[i].bufferRowLength, block_size.width);
}
// BufferRowHeight must be a multiple of block height
if (SafeModulo(pRegions[i].bufferImageHeight, block_size.height) != 0) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferImageHeight-00204",
"%s(): pRegion[%d] bufferImageHeight (%d) must be a multiple of the compressed image's texel height (%d)..",
function, i, pRegions[i].bufferImageHeight, block_size.height);
}
// image offsets must be multiples of block dimensions
if ((SafeModulo(pRegions[i].imageOffset.x, block_size.width) != 0) ||
(SafeModulo(pRegions[i].imageOffset.y, block_size.height) != 0) ||
(SafeModulo(pRegions[i].imageOffset.z, block_size.depth) != 0)) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageOffset-00205",
"%s(): pRegion[%d] imageOffset(x,y) (%d, %d) must be multiples of the compressed image's texel "
"width & height (%d, %d)..",
function, i, pRegions[i].imageOffset.x, pRegions[i].imageOffset.y, block_size.width, block_size.height);
}
// bufferOffset must be a multiple of block size (linear bytes)
size_t block_size_in_bytes = FormatSize(image_state->createInfo.format);
if (SafeModulo(pRegions[i].bufferOffset, block_size_in_bytes) != 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-bufferOffset-00206",
"%s(): pRegion[%d] bufferOffset (0x%" PRIxLEAST64
") must be a multiple of the compressed image's texel block size (" PRINTF_SIZE_T_SPECIFIER ")..",
function, i, pRegions[i].bufferOffset, block_size_in_bytes);
}
// imageExtent width must be a multiple of block width, or extent+offset width must equal subresource width
VkExtent3D mip_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource));
if ((SafeModulo(pRegions[i].imageExtent.width, block_size.width) != 0) &&
(pRegions[i].imageExtent.width + pRegions[i].imageOffset.x != mip_extent.width)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageExtent-00207",
"%s(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block width "
"(%d), or when added to offset.x (%d) must equal the image subresource width (%d)..",
function, i, pRegions[i].imageExtent.width, block_size.width, pRegions[i].imageOffset.x,
mip_extent.width);
}
// imageExtent height must be a multiple of block height, or extent+offset height must equal subresource height
if ((SafeModulo(pRegions[i].imageExtent.height, block_size.height) != 0) &&
(pRegions[i].imageExtent.height + pRegions[i].imageOffset.y != mip_extent.height)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageExtent-00208",
"%s(): pRegion[%d] extent height (%d) must be a multiple of the compressed texture block height "
"(%d), or when added to offset.y (%d) must equal the image subresource height (%d)..",
function, i, pRegions[i].imageExtent.height, block_size.height, pRegions[i].imageOffset.y,
mip_extent.height);
}
// imageExtent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth
if ((SafeModulo(pRegions[i].imageExtent.depth, block_size.depth) != 0) &&
(pRegions[i].imageExtent.depth + pRegions[i].imageOffset.z != mip_extent.depth)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image_state->image), "VUID-VkBufferImageCopy-imageExtent-00209",
"%s(): pRegion[%d] extent width (%d) must be a multiple of the compressed texture block depth "
"(%d), or when added to offset.z (%d) must equal the image subresource depth (%d)..",
function, i, pRegions[i].imageExtent.depth, block_size.depth, pRegions[i].imageOffset.z,
mip_extent.depth);
}
}
}
return skip;
}
static bool ValidateImageBounds(const debug_report_data *report_data, const IMAGE_STATE *image_state, const uint32_t regionCount,
const VkBufferImageCopy *pRegions, const char *func_name, std::string msg_code) {
bool skip = false;
const VkImageCreateInfo *image_info = &(image_state->createInfo);
for (uint32_t i = 0; i < regionCount; i++) {
VkExtent3D extent = pRegions[i].imageExtent;
VkOffset3D offset = pRegions[i].imageOffset;
if (IsExtentSizeZero(&extent)) // Warn on zero area subresource
{
skip |=
log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)0,
kVUID_Core_Image_ZeroAreaSubregion, "%s: pRegion[%d] imageExtent of {%1d, %1d, %1d} has zero area",
func_name, i, extent.width, extent.height, extent.depth);
}
VkExtent3D image_extent = GetImageSubresourceExtent(image_state, &(pRegions[i].imageSubresource));
// If we're using a compressed format, valid extent is rounded up to multiple of block size (per 18.1)
if (FormatIsCompressed(image_info->format)) {
auto block_extent = FormatCompressedTexelBlockExtent(image_info->format);
if (image_extent.width % block_extent.width) {
image_extent.width += (block_extent.width - (image_extent.width % block_extent.width));
}
if (image_extent.height % block_extent.height) {
image_extent.height += (block_extent.height - (image_extent.height % block_extent.height));
}
if (image_extent.depth % block_extent.depth) {
image_extent.depth += (block_extent.depth - (image_extent.depth % block_extent.depth));
}
}
if (0 != ExceedsBounds(&offset, &extent, &image_extent)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)0,
msg_code, "%s: pRegion[%d] exceeds image bounds..", func_name, i);
}
}
return skip;
}
static inline bool ValidateBufferBounds(const debug_report_data *report_data, IMAGE_STATE *image_state, BUFFER_STATE *buff_state,
uint32_t regionCount, const VkBufferImageCopy *pRegions, const char *func_name,
std::string msg_code) {
bool skip = false;
VkDeviceSize buffer_size = buff_state->createInfo.size;
for (uint32_t i = 0; i < regionCount; i++) {
VkExtent3D copy_extent = pRegions[i].imageExtent;
VkDeviceSize buffer_width = (0 == pRegions[i].bufferRowLength ? copy_extent.width : pRegions[i].bufferRowLength);
VkDeviceSize buffer_height = (0 == pRegions[i].bufferImageHeight ? copy_extent.height : pRegions[i].bufferImageHeight);
VkDeviceSize unit_size = FormatSize(image_state->createInfo.format); // size (bytes) of texel or block
// Handle special buffer packing rules for specific depth/stencil formats
if (pRegions[i].imageSubresource.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
unit_size = FormatSize(VK_FORMAT_S8_UINT);
} else if (pRegions[i].imageSubresource.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
switch (image_state->createInfo.format) {
case VK_FORMAT_D16_UNORM_S8_UINT:
unit_size = FormatSize(VK_FORMAT_D16_UNORM);
break;
case VK_FORMAT_D32_SFLOAT_S8_UINT:
unit_size = FormatSize(VK_FORMAT_D32_SFLOAT);
break;
case VK_FORMAT_X8_D24_UNORM_PACK32: // Fall through
case VK_FORMAT_D24_UNORM_S8_UINT:
unit_size = 4;
break;
default:
break;
}
}
if (FormatIsCompressed(image_state->createInfo.format) || FormatIsSinglePlane_422(image_state->createInfo.format)) {
// Switch to texel block units, rounding up for any partially-used blocks
auto block_dim = FormatCompressedTexelBlockExtent(image_state->createInfo.format);
buffer_width = (buffer_width + block_dim.width - 1) / block_dim.width;
buffer_height = (buffer_height + block_dim.height - 1) / block_dim.height;
copy_extent.width = (copy_extent.width + block_dim.width - 1) / block_dim.width;
copy_extent.height = (copy_extent.height + block_dim.height - 1) / block_dim.height;
copy_extent.depth = (copy_extent.depth + block_dim.depth - 1) / block_dim.depth;
}
// Either depth or layerCount may be greater than 1 (not both). This is the number of 'slices' to copy
uint32_t z_copies = std::max(copy_extent.depth, pRegions[i].imageSubresource.layerCount);
if (IsExtentSizeZero(©_extent) || (0 == z_copies)) {
// TODO: Issue warning here? Already warned in ValidateImageBounds()...
} else {
// Calculate buffer offset of final copied byte, + 1.
VkDeviceSize max_buffer_offset = (z_copies - 1) * buffer_height * buffer_width; // offset to slice
max_buffer_offset += ((copy_extent.height - 1) * buffer_width) + copy_extent.width; // add row,col
max_buffer_offset *= unit_size; // convert to bytes
max_buffer_offset += pRegions[i].bufferOffset; // add initial offset (bytes)
if (buffer_size < max_buffer_offset) {
skip |=
log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)0,
msg_code, "%s: pRegion[%d] exceeds buffer size of %" PRIu64 " bytes..", func_name, i, buffer_size);
}
}
}
return skip;
}
bool PreCallValidateCmdCopyImageToBuffer(layer_data *device_data, VkImageLayout srcImageLayout, GLOBAL_CB_NODE *cb_node,
IMAGE_STATE *src_image_state, BUFFER_STATE *dst_buffer_state, uint32_t regionCount,
const VkBufferImageCopy *pRegions, const char *func_name) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = ValidateBufferImageCopyData(report_data, regionCount, pRegions, src_image_state, "vkCmdCopyImageToBuffer");
// Validate command buffer state
skip |= ValidateCmd(device_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
// Command pool must support graphics, compute, or transfer operations
auto pPool = GetCommandPoolNode(device_data, cb_node->createInfo.commandPool);
VkQueueFlags queue_flags = GetPhysDevProperties(device_data)->queue_family_properties[pPool->queueFamilyIndex].queueFlags;
if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->createInfo.commandPool), "VUID-vkCmdCopyImageToBuffer-commandBuffer-cmdpool",
"Cannot call vkCmdCopyImageToBuffer() on a command buffer allocated from a pool without graphics, compute, "
"or transfer capabilities..");
}
skip |= ValidateImageBounds(report_data, src_image_state, regionCount, pRegions, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-pRegions-00182");
skip |= ValidateBufferBounds(report_data, src_image_state, dst_buffer_state, regionCount, pRegions, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-pRegions-00183");
skip |= ValidateImageSampleCount(device_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage",
"VUID-vkCmdCopyImageToBuffer-srcImage-00188");
skip |= ValidateMemoryIsBoundToImage(device_data, src_image_state, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-srcImage-00187");
skip |= ValidateMemoryIsBoundToBuffer(device_data, dst_buffer_state, "vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-dstBuffer-00192");
// Validate that SRC image & DST buffer have correct usage flags set
skip |= ValidateImageUsageFlags(device_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
"VUID-vkCmdCopyImageToBuffer-srcImage-00186", "vkCmdCopyImageToBuffer()",
"VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateBufferUsageFlags(device_data, dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyImageToBuffer-dstBuffer-00191", "vkCmdCopyImageToBuffer()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
if (GetApiVersion(device_data) >= VK_API_VERSION_1_1 || GetDeviceExtensions(device_data)->vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(device_data, src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT,
"vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-srcImage-01998",
"VUID-vkCmdCopyImageToBuffer-srcImage-01998");
}
skip |= InsideRenderPass(device_data, cb_node, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-renderpass");
bool hit_error = false;
const std::string src_invalid_layout_vuid =
(src_image_state->shared_presentable && core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImageToBuffer-srcImageLayout-01397"
: "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00190";
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= ValidateImageSubresourceLayers(device_data, cb_node, &pRegions[i].imageSubresource, "vkCmdCopyImageToBuffer()",
"imageSubresource", i);
skip |= VerifyImageLayout(device_data, cb_node, src_image_state, pRegions[i].imageSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "vkCmdCopyImageToBuffer()", src_invalid_layout_vuid,
"VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189", &hit_error);
skip |= ValidateCopyBufferImageTransferGranularityRequirements(device_data, cb_node, src_image_state, &pRegions[i], i,
"vkCmdCopyImageToBuffer()",
"VUID-vkCmdCopyImageToBuffer-imageOffset-01794");
skip |= ValidateImageMipLevel(device_data, cb_node, src_image_state, pRegions[i].imageSubresource.mipLevel, i,
"vkCmdCopyImageToBuffer()", "imageSubresource",
"VUID-vkCmdCopyImageToBuffer-imageSubresource-01703");
skip |= ValidateImageArrayLayerRange(device_data, cb_node, src_image_state, pRegions[i].imageSubresource.baseArrayLayer,
pRegions[i].imageSubresource.layerCount, i, "vkCmdCopyImageToBuffer()",
"imageSubresource", "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704");
}
return skip;
}
void PreCallRecordCmdCopyImageToBuffer(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *src_image_state,
BUFFER_STATE *dst_buffer_state, uint32_t region_count, const VkBufferImageCopy *regions,
VkImageLayout src_image_layout) {
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < region_count; ++i) {
SetImageLayout(device_data, cb_node, src_image_state, regions[i].imageSubresource, src_image_layout);
}
// Update bindings between buffer/image and cmd buffer
AddCommandBufferBindingImage(device_data, cb_node, src_image_state);
AddCommandBufferBindingBuffer(device_data, cb_node, dst_buffer_state);
}
bool PreCallValidateCmdCopyBufferToImage(layer_data *device_data, VkImageLayout dstImageLayout, GLOBAL_CB_NODE *cb_node,
BUFFER_STATE *src_buffer_state, IMAGE_STATE *dst_image_state, uint32_t regionCount,
const VkBufferImageCopy *pRegions, const char *func_name) {
const debug_report_data *report_data = core_validation::GetReportData(device_data);
bool skip = ValidateBufferImageCopyData(report_data, regionCount, pRegions, dst_image_state, "vkCmdCopyBufferToImage");
// Validate command buffer state
skip |= ValidateCmd(device_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
// Command pool must support graphics, compute, or transfer operations
auto pPool = GetCommandPoolNode(device_data, cb_node->createInfo.commandPool);
VkQueueFlags queue_flags = GetPhysDevProperties(device_data)->queue_family_properties[pPool->queueFamilyIndex].queueFlags;
if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->createInfo.commandPool), "VUID-vkCmdCopyBufferToImage-commandBuffer-cmdpool",
"Cannot call vkCmdCopyBufferToImage() on a command buffer allocated from a pool without graphics, compute, "
"or transfer capabilities..");
}
skip |= ValidateImageBounds(report_data, dst_image_state, regionCount, pRegions, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-pRegions-00172");
skip |= ValidateBufferBounds(report_data, dst_image_state, src_buffer_state, regionCount, pRegions, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-pRegions-00171");
skip |= ValidateImageSampleCount(device_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage",
"VUID-vkCmdCopyBufferToImage-dstImage-00179");
skip |= ValidateMemoryIsBoundToBuffer(device_data, src_buffer_state, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-srcBuffer-00176");
skip |= ValidateMemoryIsBoundToImage(device_data, dst_image_state, "vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-dstImage-00178");
skip |= ValidateBufferUsageFlags(device_data, src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
"VUID-vkCmdCopyBufferToImage-srcBuffer-00174", "vkCmdCopyBufferToImage()",
"VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateImageUsageFlags(device_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyBufferToImage-dstImage-00177", "vkCmdCopyBufferToImage()",
"VK_IMAGE_USAGE_TRANSFER_DST_BIT");
if (GetApiVersion(device_data) >= VK_API_VERSION_1_1 || GetDeviceExtensions(device_data)->vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(device_data, dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT,
"vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-dstImage-01997",
"VUID-vkCmdCopyBufferToImage-dstImage-01997");
}
skip |= InsideRenderPass(device_data, cb_node, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-renderpass");
bool hit_error = false;
const std::string dst_invalid_layout_vuid =
(dst_image_state->shared_presentable && core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyBufferToImage-dstImageLayout-01396"
: "VUID-vkCmdCopyBufferToImage-dstImageLayout-00181";
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= ValidateImageSubresourceLayers(device_data, cb_node, &pRegions[i].imageSubresource, "vkCmdCopyBufferToImage()",
"imageSubresource", i);
skip |= VerifyImageLayout(device_data, cb_node, dst_image_state, pRegions[i].imageSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "vkCmdCopyBufferToImage()", dst_invalid_layout_vuid,
"VUID-vkCmdCopyBufferToImage-dstImageLayout-00180", &hit_error);
skip |= ValidateCopyBufferImageTransferGranularityRequirements(device_data, cb_node, dst_image_state, &pRegions[i], i,
"vkCmdCopyBufferToImage()",
"VUID-vkCmdCopyBufferToImage-imageOffset-01793");
skip |= ValidateImageMipLevel(device_data, cb_node, dst_image_state, pRegions[i].imageSubresource.mipLevel, i,
"vkCmdCopyBufferToImage()", "imageSubresource",
"VUID-vkCmdCopyBufferToImage-imageSubresource-01701");
skip |= ValidateImageArrayLayerRange(device_data, cb_node, dst_image_state, pRegions[i].imageSubresource.baseArrayLayer,
pRegions[i].imageSubresource.layerCount, i, "vkCmdCopyBufferToImage()",
"imageSubresource", "VUID-vkCmdCopyBufferToImage-imageSubresource-01702");
}
return skip;
}
void PreCallRecordCmdCopyBufferToImage(layer_data *device_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *src_buffer_state,
IMAGE_STATE *dst_image_state, uint32_t region_count, const VkBufferImageCopy *regions,
VkImageLayout dst_image_layout) {
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < region_count; ++i) {
SetImageLayout(device_data, cb_node, dst_image_state, regions[i].imageSubresource, dst_image_layout);
}
AddCommandBufferBindingBuffer(device_data, cb_node, src_buffer_state);
AddCommandBufferBindingImage(device_data, cb_node, dst_image_state);
}
bool PreCallValidateGetImageSubresourceLayout(layer_data *device_data, VkImage image, const VkImageSubresource *pSubresource) {
const auto report_data = core_validation::GetReportData(device_data);
bool skip = false;
const VkImageAspectFlags sub_aspect = pSubresource->aspectMask;
// The aspectMask member of pSubresource must only have a single bit set
const int num_bits = sizeof(sub_aspect) * CHAR_BIT;
std::bitset<num_bits> aspect_mask_bits(sub_aspect);
if (aspect_mask_bits.count() != 1) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-vkGetImageSubresourceLayout-aspectMask-00997",
"vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must have exactly 1 bit set.");
}
IMAGE_STATE *image_entry = GetImageState(device_data, image);
if (!image_entry) {
return skip;
}
// image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR
if (image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-vkGetImageSubresourceLayout-image-00996",
"vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR.");
}
// mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created
if (pSubresource->mipLevel >= image_entry->createInfo.mipLevels) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-vkGetImageSubresourceLayout-mipLevel-01716",
"vkGetImageSubresourceLayout(): pSubresource.mipLevel (%d) must be less than %d.", pSubresource->mipLevel,
image_entry->createInfo.mipLevels);
}
// arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created
if (pSubresource->arrayLayer >= image_entry->createInfo.arrayLayers) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-vkGetImageSubresourceLayout-arrayLayer-01717",
"vkGetImageSubresourceLayout(): pSubresource.arrayLayer (%d) must be less than %d.",
pSubresource->arrayLayer, image_entry->createInfo.arrayLayers);
}
// subresource's aspect must be compatible with image's format.
const VkFormat img_format = image_entry->createInfo.format;
if (FormatIsMultiplane(img_format)) {
VkImageAspectFlags allowed_flags = (VK_IMAGE_ASPECT_PLANE_0_BIT_KHR | VK_IMAGE_ASPECT_PLANE_1_BIT_KHR);
std::string vuid = "VUID-vkGetImageSubresourceLayout-format-01581"; // 2-plane version
if (FormatPlaneCount(img_format) > 2u) {
allowed_flags |= VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
vuid = "VUID-vkGetImageSubresourceLayout-format-01582"; // 3-plane version
}
if (sub_aspect != (sub_aspect & allowed_flags)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), vuid,
"vkGetImageSubresourceLayout(): For multi-planar images, VkImageSubresource.aspectMask (0x%" PRIx32
") must be a single-plane specifier flag.",
sub_aspect);
}
} else if (FormatIsColor(img_format)) {
if (sub_aspect != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= log_msg(
report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(image),
"VUID-VkImageSubresource-aspectMask-parameter",
"vkGetImageSubresourceLayout(): For color formats, VkImageSubresource.aspectMask must be VK_IMAGE_ASPECT_COLOR.");
}
} else if (FormatIsDepthOrStencil(img_format)) {
if ((sub_aspect != VK_IMAGE_ASPECT_DEPTH_BIT) && (sub_aspect != VK_IMAGE_ASPECT_STENCIL_BIT)) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
HandleToUint64(image), "VUID-VkImageSubresource-aspectMask-parameter",
"vkGetImageSubresourceLayout(): For depth/stencil formats, VkImageSubresource.aspectMask must be "
"either VK_IMAGE_ASPECT_DEPTH_BIT or VK_IMAGE_ASPECT_STENCIL_BIT.");
}
}
return skip;
}
| 1 | 9,328 |
This test already exists in the object_tracker layer.
|
KhronosGroup-Vulkan-ValidationLayers
|
cpp
|
@@ -64,7 +64,8 @@ public class EthGetCode extends AbstractBlockParameterOrBlockHashMethod {
@Override
protected String resultByBlockHash(final JsonRpcRequestContext request, final Hash blockHash) {
final Address address = request.getRequiredParameter(0, Address.class);
- if (GoQuorumOptions.goQuorumCompatibilityMode && privacyParameters.isPresent()) {
+ final boolean isGoQuorumCompatibilityMode = GoQuorumOptions.goQuorumCompatibilityMode;
+ if (isGoQuorumCompatibilityMode && privacyParameters.isPresent()) {
// get from private state if we can
final Optional<BlockHeader> blockHeader =
blockchainQueries.get().getBlockHeaderByHash(blockHash);
| 1 |
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods;
import static org.hyperledger.besu.ethereum.goquorum.GoQuorumPrivateStateUtil.getPrivateWorldStateAtBlock;
import org.hyperledger.besu.config.GoQuorumOptions;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.parameters.BlockParameterOrBlockHash;
import org.hyperledger.besu.ethereum.api.query.BlockchainQueries;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.MutableWorldState;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.evm.account.Account;
import java.util.Optional;
import java.util.function.Supplier;
import org.apache.tuweni.bytes.Bytes;
public class EthGetCode extends AbstractBlockParameterOrBlockHashMethod {
final Optional<PrivacyParameters> privacyParameters;
public EthGetCode(
final BlockchainQueries blockchainQueries,
final Optional<PrivacyParameters> privacyParameters) {
super(blockchainQueries);
this.privacyParameters = privacyParameters;
}
public EthGetCode(
final Supplier<BlockchainQueries> blockchainQueries,
final Optional<PrivacyParameters> privacyParameters) {
super(blockchainQueries);
this.privacyParameters = privacyParameters;
}
@Override
public String getName() {
return RpcMethod.ETH_GET_CODE.getMethodName();
}
@Override
protected BlockParameterOrBlockHash blockParameterOrBlockHash(
final JsonRpcRequestContext request) {
return request.getRequiredParameter(1, BlockParameterOrBlockHash.class);
}
@Override
protected String resultByBlockHash(final JsonRpcRequestContext request, final Hash blockHash) {
final Address address = request.getRequiredParameter(0, Address.class);
if (GoQuorumOptions.goQuorumCompatibilityMode && privacyParameters.isPresent()) {
// get from private state if we can
final Optional<BlockHeader> blockHeader =
blockchainQueries.get().getBlockHeaderByHash(blockHash);
if (blockHeader.isPresent()) {
final MutableWorldState privateState =
getPrivateWorldStateAtBlock(
privacyParameters.get().getGoQuorumPrivacyParameters(), blockHeader.get());
final Account privAccount = privateState.get(address);
if (privAccount != null) {
return privAccount.getCode().toHexString();
}
}
}
return getBlockchainQueries().getCode(address, blockHash).map(Bytes::toString).orElse(null);
}
}
| 1 | 26,053 |
Same here, can we presume if `privacyParameters` is set we need goQuorum handling? Or is there a goQuorum sub option in `privacyParametes`?
|
hyperledger-besu
|
java
|
@@ -122,6 +122,10 @@ class FastAppend extends SnapshotProducer<AppendFiles> implements AppendFiles {
public List<ManifestFile> apply(TableMetadata base) {
List<ManifestFile> newManifests = Lists.newArrayList();
+ if (base.currentSnapshot() != null) {
+ newManifests.addAll(base.currentSnapshot().deleteManifests());
+ }
+
try {
ManifestFile manifest = writeManifest();
if (manifest != null) {
| 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.iceberg.events.CreateSnapshotEvent;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Iterables;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import static org.apache.iceberg.TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED;
import static org.apache.iceberg.TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT;
/**
* {@link AppendFiles Append} implementation that adds a new manifest file for the write.
* <p>
* This implementation will attempt to commit 5 times before throwing {@link CommitFailedException}.
*/
class FastAppend extends SnapshotProducer<AppendFiles> implements AppendFiles {
private final String tableName;
private final TableOperations ops;
private final PartitionSpec spec;
private final boolean snapshotIdInheritanceEnabled;
private final SnapshotSummary.Builder summaryBuilder = SnapshotSummary.builder();
private final List<DataFile> newFiles = Lists.newArrayList();
private final List<ManifestFile> appendManifests = Lists.newArrayList();
private final List<ManifestFile> rewrittenAppendManifests = Lists.newArrayList();
private ManifestFile newManifest = null;
private boolean hasNewFiles = false;
FastAppend(String tableName, TableOperations ops) {
super(ops);
this.tableName = tableName;
this.ops = ops;
this.spec = ops.current().spec();
this.snapshotIdInheritanceEnabled = ops.current()
.propertyAsBoolean(SNAPSHOT_ID_INHERITANCE_ENABLED, SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT);
}
@Override
protected AppendFiles self() {
return this;
}
@Override
public AppendFiles set(String property, String value) {
summaryBuilder.set(property, value);
return this;
}
@Override
protected String operation() {
return DataOperations.APPEND;
}
@Override
protected Map<String, String> summary() {
return summaryBuilder.build();
}
@Override
public FastAppend appendFile(DataFile file) {
this.hasNewFiles = true;
newFiles.add(file);
summaryBuilder.addedFile(spec, file);
return this;
}
@Override
public FastAppend appendManifest(ManifestFile manifest) {
Preconditions.checkArgument(!manifest.hasExistingFiles(), "Cannot append manifest with existing files");
Preconditions.checkArgument(!manifest.hasDeletedFiles(), "Cannot append manifest with deleted files");
Preconditions.checkArgument(
manifest.snapshotId() == null || manifest.snapshotId() == -1,
"Snapshot id must be assigned during commit");
if (snapshotIdInheritanceEnabled && manifest.snapshotId() == null) {
summaryBuilder.addedManifest(manifest);
appendManifests.add(manifest);
} else {
// the manifest must be rewritten with this update's snapshot ID
ManifestFile copiedManifest = copyManifest(manifest);
rewrittenAppendManifests.add(copiedManifest);
}
return this;
}
private ManifestFile copyManifest(ManifestFile manifest) {
TableMetadata current = ops.current();
InputFile toCopy = ops.io().newInputFile(manifest.path());
OutputFile newManifestPath = newManifestOutput();
return ManifestFiles.copyAppendManifest(
current.formatVersion(), toCopy, current.specsById(), newManifestPath, snapshotId(), summaryBuilder);
}
@Override
public List<ManifestFile> apply(TableMetadata base) {
List<ManifestFile> newManifests = Lists.newArrayList();
try {
ManifestFile manifest = writeManifest();
if (manifest != null) {
newManifests.add(manifest);
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write manifest");
}
// TODO: add sequence numbers here
Iterable<ManifestFile> appendManifestsWithMetadata = Iterables.transform(
Iterables.concat(appendManifests, rewrittenAppendManifests),
manifest -> GenericManifestFile.copyOf(manifest).withSnapshotId(snapshotId()).build());
Iterables.addAll(newManifests, appendManifestsWithMetadata);
if (base.currentSnapshot() != null) {
newManifests.addAll(base.currentSnapshot().manifests());
}
return newManifests;
}
@Override
public Object updateEvent() {
long snapshotId = snapshotId();
long sequenceNumber = ops.current().snapshot(snapshotId).sequenceNumber();
return new CreateSnapshotEvent(
tableName,
operation(),
snapshotId,
sequenceNumber,
summary());
}
@Override
protected void cleanUncommitted(Set<ManifestFile> committed) {
if (newManifest != null && !committed.contains(newManifest)) {
deleteFile(newManifest.path());
}
// clean up only rewrittenAppendManifests as they are always owned by the table
// don't clean up appendManifests as they are added to the manifest list and are not compacted
for (ManifestFile manifest : rewrittenAppendManifests) {
if (!committed.contains(manifest)) {
deleteFile(manifest.path());
}
}
}
private ManifestFile writeManifest() throws IOException {
if (hasNewFiles && newManifest != null) {
deleteFile(newManifest.path());
newManifest = null;
}
if (newManifest == null && newFiles.size() > 0) {
ManifestWriter writer = newManifestWriter(spec);
try {
writer.addAll(newFiles);
} finally {
writer.close();
}
this.newManifest = writer.toManifestFile();
hasNewFiles = false;
}
return newManifest;
}
}
| 1 | 20,258 |
The addition of delete files in `newManifests` far above the addition of data files threw me a bit. Is it intentional to ensure the delete files are at the front of the list?
|
apache-iceberg
|
java
|
@@ -68,7 +68,7 @@ module Bolt
# though, since that will resolve any nested references and we want to
# leave it to the group to do that lazily.
groups = @plugins.resolve_top_level_references(groups)
-
+ raise ValidationError.new("Groups must be an Array", nil) unless groups.is_a?(Array)
@groups = Array(groups).map { |g| Group.new(g, plugins) }
end
| 1 |
# frozen_string_literal: true
require 'bolt/inventory/group'
require 'bolt/inventory/inventory'
require 'bolt/inventory/target'
module Bolt
class Inventory
class Group
attr_accessor :name, :groups
# Regex used to validate group names and target aliases.
NAME_REGEX = /\A[a-z0-9_][a-z0-9_-]*\Z/.freeze
# NOTE: All keys should have a corresponding schema property in schemas/bolt-inventory.schema.json
DATA_KEYS = %w[config facts vars features plugin_hooks].freeze
TARGET_KEYS = DATA_KEYS + %w[name alias uri]
GROUP_KEYS = DATA_KEYS + %w[name groups targets]
CONFIG_KEYS = Bolt::Config::INVENTORY_OPTIONS.keys
def initialize(input, plugins)
@logger = Bolt::Logger.logger(self)
@plugins = plugins
input = @plugins.resolve_top_level_references(input) if @plugins.reference?(input)
raise ValidationError.new("Group does not have a name", nil) unless input.key?('name')
@name = @plugins.resolve_references(input['name'])
raise ValidationError.new("Group name must be a String, not #{@name.inspect}", nil) unless @name.is_a?(String)
raise ValidationError.new("Invalid group name #{@name}", @name) unless @name =~ NAME_REGEX
validate_group_input(input)
@input = input
validate_data_keys(@input)
targets = @plugins.resolve_top_level_references(input.fetch('targets', []))
@unresolved_targets = {}
@resolved_targets = {}
@aliases = {}
@string_targets = []
Array(targets).each do |target|
# If target is a string, it can either be trivially defining a target
# or it could be a name/alias of a target defined in another group.
# We can't tell the difference until all groups have been resolved,
# so we store the string on its own here and process it later.
case target
when String
@string_targets << target
# Handle plugins at this level so that lookups cannot trigger recursive lookups
when Hash
add_target_definition(target)
else
raise ValidationError.new("Target entry must be a String or Hash, not #{target.class}", @name)
end
end
groups = input.fetch('groups', [])
# 'groups' can be a _plugin reference, in which case we want to resolve
# it. That can itself return a reference, so we want to keep resolving
# them until we have a value. We don't just use resolve_references
# though, since that will resolve any nested references and we want to
# leave it to the group to do that lazily.
groups = @plugins.resolve_top_level_references(groups)
@groups = Array(groups).map { |g| Group.new(g, plugins) }
end
def target_data(target_name)
if @unresolved_targets.key?(target_name)
target = @unresolved_targets.delete(target_name)
resolved_data = resolve_data_keys(target, target_name).merge(
'name' => target['name'],
'uri' => target['uri'],
'alias' => target['alias'],
# groups come from group_data
'groups' => []
)
@resolved_targets[target_name] = resolved_data
else
@resolved_targets[target_name]
end
end
def all_target_names
@unresolved_targets.keys + @resolved_targets.keys
end
def add_target_definition(target)
# This check ensures target lookup plugins do not returns bare strings.
# Remove it if we decide to allows task plugins to return string Target
# names.
unless target.is_a?(Hash)
raise ValidationError.new("Target entry must be a Hash, not #{target.class}", @name)
end
target['name'] = @plugins.resolve_references(target['name']) if target.key?('name')
target['uri'] = @plugins.resolve_references(target['uri']) if target.key?('uri')
target['alias'] = @plugins.resolve_references(target['alias']) if target.key?('alias')
t_name = target['name'] || target['uri']
if t_name.nil? || t_name.empty?
raise ValidationError.new("No name or uri for target: #{target}", @name)
end
unless t_name.is_a? String
raise ValidationError.new("Target name must be a String, not #{t_name.class}", @name)
end
unless t_name.ascii_only?
raise ValidationError.new("Target name must be ASCII characters: #{target}", @name)
end
if contains_target?(t_name)
@logger.debug("Ignoring duplicate target in #{@name}: #{target}")
return
end
unless (unexpected_keys = target.keys - TARGET_KEYS).empty?
msg = "Found unexpected key(s) #{unexpected_keys.join(', ')} in target #{t_name}"
@logger.warn(msg)
end
validate_data_keys(target, t_name)
if target.include?('alias')
aliases = target['alias']
aliases = [aliases] if aliases.is_a?(String)
unless aliases.is_a?(Array)
msg = "Alias entry on #{t_name} must be a String or Array, not #{aliases.class}"
raise ValidationError.new(msg, @name)
end
insert_alia(t_name, aliases)
end
@unresolved_targets[t_name] = target
end
def remove_target(target)
@resolved_targets.delete(target.name)
@unresolved_targets.delete(target.name)
end
def add_target(target)
@resolved_targets[target.name] = { 'name' => target.name }
end
def insert_alia(target_name, aliases)
aliases.each do |alia|
raise ValidationError.new("Invalid alias #{alia}", @name) unless alia =~ NAME_REGEX
if (found = @aliases[alia])
raise ValidationError.new(alias_conflict(alia, found, target_name), @name)
end
@aliases[alia] = target_name
end
end
def clear_alia(target_name)
@aliases.reject! { |_alias, name| name == target_name }
end
def data_merge(data1, data2)
if data2.nil? || data1.nil?
return data2 || data1
end
{
'config' => Bolt::Util.deep_merge(data1['config'], data2['config']),
'name' => data1['name'] || data2['name'],
'uri' => data1['uri'] || data2['uri'],
# Collect all aliases across all groups for each target uri
'alias' => [*data1['alias'], *data2['alias']],
# Shallow merge instead of deep merge so that vars with a hash value
# are assigned a new hash, rather than merging the existing value
# with the value meant to replace it
'vars' => data1['vars'].merge(data2['vars']),
'facts' => Bolt::Util.deep_merge(data1['facts'], data2['facts']),
'features' => data1['features'] | data2['features'],
'plugin_hooks' => data1['plugin_hooks'].merge(data2['plugin_hooks']),
'groups' => data2['groups'] + data1['groups']
}
end
def resolve_string_targets(aliases, known_targets)
@string_targets.each do |string_target|
# If this is the name of a target defined elsewhere, then insert the
# target into this group as just a name. Otherwise, add a new target
# with the string as the URI.
if known_targets.include?(string_target)
@unresolved_targets[string_target] = { 'name' => string_target }
# If this is an alias for an existing target, then add it to this group
elsif (canonical_name = aliases[string_target])
if contains_target?(canonical_name)
@logger.debug("Ignoring duplicate target in #{@name}: #{canonical_name}")
else
@unresolved_targets[canonical_name] = { 'name' => canonical_name }
end
# If it's not the name or alias of an existing target, then make a
# new target using the string as the URI
elsif contains_target?(string_target)
@logger.debug("Ignoring duplicate target in #{@name}: #{string_target}")
else
@unresolved_targets[string_target] = { 'uri' => string_target }
end
end
@groups.each { |g| g.resolve_string_targets(aliases, known_targets) }
end
private def alias_conflict(name, target1, target2)
"Alias #{name} refers to multiple targets: #{target1} and #{target2}"
end
private def group_alias_conflict(name)
"Group #{name} conflicts with alias of the same name"
end
private def group_target_conflict(name)
"Group #{name} conflicts with target of the same name"
end
private def alias_target_conflict(name)
"Target name #{name} conflicts with alias of the same name"
end
def validate_group_input(input)
raise ValidationError.new("Expected group to be a Hash, not #{input.class}", nil) unless input.is_a?(Hash)
# DEPRECATION : remove this before finalization
if input.key?('target-lookups')
msg = "'target-lookups' are no longer a separate key. Merge 'target-lookups' and 'targets' lists and replace 'plugin' with '_plugin'" # rubocop:disable Layout/LineLength
raise ValidationError.new(msg, @name)
end
if input.key?('nodes')
command = Bolt::Util.powershell? ? 'Update-BoltProject' : 'bolt project migrate'
msg = <<~MSG.chomp
Found 'nodes' key in group #{@name}. This looks like a v1 inventory file, which is
no longer supported by Bolt. Migrate to a v2 inventory file automatically using
'#{command}'.
MSG
raise ValidationError.new(msg, nil)
end
unless (unexpected_keys = input.keys - GROUP_KEYS).empty?
msg = "Found unexpected key(s) #{unexpected_keys.join(', ')} in group #{@name}"
@logger.warn(msg)
end
Bolt::Util.walk_keys(input) do |key|
if @plugins.reference?(key)
raise ValidationError.new("Group keys cannot be specified as _plugin references", @name)
else
key
end
end
end
def validate(used_group_names = Set.new, used_target_names = Set.new, used_aliases = {})
# Test if this group name conflicts with anything used before.
raise ValidationError.new("Tried to redefine group #{@name}", @name) if used_group_names.include?(@name)
raise ValidationError.new(group_target_conflict(@name), @name) if used_target_names.include?(@name)
raise ValidationError.new(group_alias_conflict(@name), @name) if used_aliases.include?(@name)
used_group_names << @name
# Collect target names and aliases into a list used to validate that subgroups don't conflict.
# Used names validate that previously used group names don't conflict with new target names/aliases.
@unresolved_targets.merge(@resolved_targets).each do |t_name, t_data|
# Require targets to be parseable as a Target.
begin
# Catch malformed URI here
Bolt::Inventory::Target.parse_uri(t_data['uri'])
rescue Bolt::ParseError => e
@logger.debug(e)
raise ValidationError.new("Invalid target uri #{t_data['uri']}", @name)
end
raise ValidationError.new(group_target_conflict(t_name), @name) if used_group_names.include?(t_name)
if used_aliases.include?(t_name)
raise ValidationError.new(alias_target_conflict(t_name), @name)
end
used_target_names << t_name
end
@aliases.each do |n, target|
raise ValidationError.new(group_alias_conflict(n), @name) if used_group_names.include?(n)
if used_target_names.include?(n)
raise ValidationError.new(alias_target_conflict(n), @name)
end
if used_aliases.include?(n)
raise ValidationError.new(alias_conflict(n, target, used_aliases[n]), @name)
end
used_aliases[n] = target
end
@groups.each do |g|
g.validate(used_group_names, used_target_names, used_aliases)
rescue ValidationError => e
e.add_parent(@name)
raise e
end
nil
end
def resolve_data_keys(data, target = nil)
result = {
'config' => @plugins.resolve_references(data.fetch('config', {})),
'vars' => @plugins.resolve_references(data.fetch('vars', {})),
'facts' => @plugins.resolve_references(data.fetch('facts', {})),
'features' => @plugins.resolve_references(data.fetch('features', [])),
'plugin_hooks' => @plugins.resolve_references(data.fetch('plugin_hooks', {}))
}
validate_data_keys(result, target)
result['features'] = Set.new(result['features'].flatten)
result
end
def validate_data_keys(data, target = nil)
{
'config' => Hash,
'vars' => Hash,
'facts' => Hash,
'features' => Array,
'plugin_hooks' => Hash
}.each do |key, expected_type|
next if !data.key?(key) || data[key].is_a?(expected_type) || @plugins.reference?(data[key])
msg = +"Expected #{key} to be of type #{expected_type}, not #{data[key].class}"
msg << " for target #{target}" if target
raise ValidationError.new(msg, @name)
end
unless @plugins.reference?(data['config'])
unexpected_keys = data.fetch('config', {}).keys - CONFIG_KEYS
if unexpected_keys.any?
msg = +"Found unexpected key(s) #{unexpected_keys.join(', ')} in config for"
msg << " target #{target} in" if target
msg << " group #{@name}"
@logger.warn(msg)
end
end
end
def group_data
@group_data ||= resolve_data_keys(@input).merge('groups' => [@name])
end
# Returns targets contained directly within the group, ignoring subgroups
def local_targets
Set.new(@unresolved_targets.keys) + Set.new(@resolved_targets.keys)
end
def contains_target?(target_name)
@unresolved_targets.key?(target_name) || @resolved_targets.key?(target_name)
end
# Returns all targets contained within the group, which includes targets from subgroups.
def all_targets
@groups.inject(local_targets) do |acc, g|
acc.merge(g.all_targets)
end
end
# Returns a mapping of aliases to targets contained within the group, which includes subgroups.
def target_aliases
@groups.inject(@aliases) do |acc, g|
acc.merge(g.target_aliases)
end
end
# Return a mapping of group names to group.
def collect_groups
@groups.inject(name => self) do |acc, g|
acc.merge(g.collect_groups)
end
end
def target_collect(target_name)
child_data = @groups.map { |group| group.target_collect(target_name) }
# Data from earlier groups wins
child_result = child_data.inject do |acc, group_data|
data_merge(group_data, acc)
end
# Children override the parent
data_merge(target_data(target_name), child_result)
end
def group_collect(target_name)
child_data = @groups.map { |group| group.group_collect(target_name) }
# Data from earlier groups wins
child_result = child_data.inject do |acc, group_data|
data_merge(group_data, acc)
end
# If this group has the target or one of the child groups has the
# target, return the data, otherwise return nil
if child_result || contains_target?(target_name)
# Children override the parent
data_merge(group_data, child_result)
end
end
end
end
end
| 1 | 17,183 |
Interesting. The call to `Array(groups)` below this would seem to indicate this doesn't _have_ to be an array. I think the problem is actually that passing a hash to `Array()` actually converts the hash to an array of key/value pairs rather than wrapping it in an array. So maybe `Array(groups)` should be `groups = [groups] unless groups.is_a?(Array)`.
|
puppetlabs-bolt
|
rb
|
@@ -140,4 +140,10 @@ public class TableProperties {
public static final String GC_ENABLED = "gc.enabled";
public static final boolean GC_ENABLED_DEFAULT = true;
+
+ public static final String MAX_SNAPSHOT_AGE_MS = "history.expire.max-snapshot-age-ms";
+ public static final long MAX_SNAPSHOT_AGE_MS_DEFAULT = 5 * 24 * 60 * 60 * 1000; // 5 days
+
+ public static final String RETAIN_MIN_NUM_SNAPSHOTS = "history.expire.retain-min-num-snapshots";
+ public static final int RETAIN_MIN_NUM_SNAPSHOTS_DEFAULT = 1;
}
| 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
public class TableProperties {
private TableProperties() {
}
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String MANIFEST_MERGE_ENABLED = "commit.manifest-merge.enabled";
public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT = true;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String PARQUET_COMPRESSION_LEVEL = "write.parquet.compression-level";
public static final String PARQUET_COMPRESSION_LEVEL_DEFAULT = null;
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String METADATA_SPLIT_SIZE = "read.split.metadata-target-size";
public static final long METADATA_SPLIT_SIZE_DEFAULT = 32 * 1024 * 1024; // 32 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String PARQUET_VECTORIZATION_ENABLED = "read.parquet.vectorization.enabled";
public static final boolean PARQUET_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String PARQUET_BATCH_SIZE = "read.parquet.vectorization.batch-size";
public static final int PARQUET_BATCH_SIZE_DEFAULT = 5000;
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
public static final String WRITE_LOCATION_PROVIDER_IMPL = "write.location-provider.impl";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "metadata" folder underneath the root path of the table.
public static final String WRITE_METADATA_LOCATION = "write.metadata.path";
public static final String WRITE_PARTITION_SUMMARY_LIMIT = "write.summary.partition-limit";
public static final int WRITE_PARTITION_SUMMARY_LIMIT_DEFAULT = 0;
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
public static final String METADATA_PREVIOUS_VERSIONS_MAX = "write.metadata.previous-versions-max";
public static final int METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT = 100;
// This enables to delete the oldest metadata file after commit.
public static final String METADATA_DELETE_AFTER_COMMIT_ENABLED = "write.metadata.delete-after-commit.enabled";
public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT = false;
public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column.";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)";
public static final String DEFAULT_NAME_MAPPING = "schema.name-mapping.default";
public static final String WRITE_AUDIT_PUBLISH_ENABLED = "write.wap.enabled";
public static final String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT = "false";
public static final String WRITE_TARGET_FILE_SIZE_BYTES = "write.target-file-size-bytes";
public static final long WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT = Long.MAX_VALUE;
public static final String WRITE_PARTITIONED_FANOUT_ENABLED = "write.partitioned.fanout.enabled";
public static final boolean WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT = false;
public static final String SNAPSHOT_ID_INHERITANCE_ENABLED = "compatibility.snapshot-id-inheritance.enabled";
public static final boolean SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT = false;
public static final String ENGINE_HIVE_ENABLED = "engine.hive.enabled";
public static final boolean ENGINE_HIVE_ENABLED_DEFAULT = false;
public static final String GC_ENABLED = "gc.enabled";
public static final boolean GC_ENABLED_DEFAULT = true;
}
| 1 | 29,316 |
This is not a particularly good default but we need it to keep the old behavior.
|
apache-iceberg
|
java
|
@@ -117,13 +117,6 @@ func TestPaymentBrokerRedeemWithCondition(t *testing.T) {
blockHeightParam := types.NewBlockHeight(43)
redeemerParams := []interface{}{blockHeightParam}
- sys := setup(t)
- require.NoError(t, sys.st.SetActor(context.TODO(), toAddress, actor.NewActor(pbTestActorCid, types.NewZeroAttoFIL())))
-
- callRedeem := func(condition *types.Predicate, params []interface{}) (*consensus.ApplicationResult, error) {
- return sys.applySignatureMessage(sys.target, 100, types.NewBlockHeight(0), 0, "redeem", 0, condition, params...)
- }
-
// All the following tests attempt to call PBTestActor.ParamsNotZero with a condition.
// PBTestActor.ParamsNotZero takes 3 parameter: an Address, a uint64 sector id, and a BlockHeight
// If any of these are zero values the method throws an error indicating the condition is false.
| 1 |
package paymentbroker_test
import (
"context"
"fmt"
"math/big"
"strings"
"testing"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
"github.com/ipfs/go-hamt-ipld"
"github.com/ipfs/go-ipfs-blockstore"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-filecoin/abi"
"github.com/filecoin-project/go-filecoin/actor"
"github.com/filecoin-project/go-filecoin/actor/builtin"
. "github.com/filecoin-project/go-filecoin/actor/builtin/paymentbroker"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/core"
"github.com/filecoin-project/go-filecoin/exec"
"github.com/filecoin-project/go-filecoin/state"
th "github.com/filecoin-project/go-filecoin/testhelpers"
tf "github.com/filecoin-project/go-filecoin/testhelpers/testflags"
"github.com/filecoin-project/go-filecoin/types"
"github.com/filecoin-project/go-filecoin/vm"
"github.com/filecoin-project/go-filecoin/vm/errors"
)
var mockSigner, _ = types.NewMockSignersAndKeyInfo(10)
var pbTestActorCid = types.NewCidForTestGetter()()
func TestPaymentBrokerGenesis(t *testing.T) {
tf.UnitTest(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, st, _ := requireGenesis(ctx, t, address.NewForTestGetter()())
paymentBroker := state.MustGetActor(st, address.PaymentBrokerAddress)
assert.Equal(t, types.NewAttoFILFromFIL(0), paymentBroker.Balance)
}
func TestPaymentBrokerCreateChannel(t *testing.T) {
tf.UnitTest(t)
ctx := context.Background()
payer := address.TestAddress
target := address.NewForTestGetter()()
_, st, vms := requireGenesis(ctx, t, target)
pdata := core.MustConvertParams(target, big.NewInt(10))
msg := types.NewMessage(payer, address.PaymentBrokerAddress, 0, types.NewAttoFILFromFIL(1000), "createChannel", pdata)
result, err := th.ApplyTestMessage(st, vms, msg, types.NewBlockHeight(0))
require.NoError(t, err)
require.NoError(t, result.ExecutionError)
st.Flush(ctx)
channelID := types.NewChannelIDFromBytes(result.Receipt.Return[0])
paymentBroker := state.MustGetActor(st, address.PaymentBrokerAddress)
assert.Equal(t, types.NewAttoFILFromFIL(1000), paymentBroker.Balance)
channel := requireGetPaymentChannel(t, ctx, st, vms, payer, channelID)
assert.Equal(t, types.NewAttoFILFromFIL(1000), channel.Amount)
assert.Equal(t, types.NewAttoFILFromFIL(0), channel.AmountRedeemed)
assert.Equal(t, target, channel.Target)
assert.Equal(t, types.NewBlockHeight(10), channel.AgreedEol)
assert.Equal(t, types.NewBlockHeight(10), channel.Eol)
}
func TestPaymentBrokerUpdate(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
result, err := sys.ApplyRedeemMessage(sys.target, 100, 0)
require.NoError(t, err)
require.Equal(t, uint8(0), result.Receipt.ExitCode)
paymentBroker := state.MustGetActor(sys.st, address.PaymentBrokerAddress)
assert.Equal(t, types.NewAttoFILFromFIL(900), paymentBroker.Balance)
payee := state.MustGetActor(sys.st, sys.target)
assert.Equal(t, types.NewAttoFILFromFIL(100), payee.Balance)
channel := sys.retrieveChannel(paymentBroker)
assert.Equal(t, types.NewAttoFILFromFIL(1000), channel.Amount)
assert.Equal(t, types.NewAttoFILFromFIL(100), channel.AmountRedeemed)
assert.Equal(t, sys.target, channel.Target)
}
func TestPaymentBrokerRedeemWithCondition(t *testing.T) {
tf.UnitTest(t)
addrGetter := address.NewForTestGetter()
toAddress := addrGetter()
method := "paramsNotZero"
addrParam := addrGetter()
sectorIdParam := uint64(6)
payerParams := []interface{}{addrParam, sectorIdParam}
blockHeightParam := types.NewBlockHeight(43)
redeemerParams := []interface{}{blockHeightParam}
sys := setup(t)
require.NoError(t, sys.st.SetActor(context.TODO(), toAddress, actor.NewActor(pbTestActorCid, types.NewZeroAttoFIL())))
callRedeem := func(condition *types.Predicate, params []interface{}) (*consensus.ApplicationResult, error) {
return sys.applySignatureMessage(sys.target, 100, types.NewBlockHeight(0), 0, "redeem", 0, condition, params...)
}
// All the following tests attempt to call PBTestActor.ParamsNotZero with a condition.
// PBTestActor.ParamsNotZero takes 3 parameter: an Address, a uint64 sector id, and a BlockHeight
// If any of these are zero values the method throws an error indicating the condition is false.
// The Address and the sector id will be included within the condition predicate, and the block
// height will be added as a redeemer supplied parameter to redeem.
t.Run("Redeem should succeed if condition is met", func(t *testing.T) {
condition := &types.Predicate{To: toAddress, Method: method, Params: payerParams}
appResult, err := callRedeem(condition, redeemerParams)
require.NoError(t, err)
require.NoError(t, appResult.ExecutionError)
})
t.Run("Redeem should fail if condition is _NOT_ met", func(t *testing.T) {
badAddressParam := address.Undef
badParams := []interface{}{badAddressParam, sectorIdParam}
condition := &types.Predicate{To: toAddress, Method: method, Params: badParams}
appResult, err := callRedeem(condition, redeemerParams)
require.NoError(t, err)
require.Error(t, appResult.ExecutionError)
require.Contains(t, appResult.ExecutionError.Error(), "failed to validate voucher condition: got undefined address")
})
t.Run("Redeem should fail if condition goes to non-existent actor", func(t *testing.T) {
badToAddress := addrGetter()
condition := &types.Predicate{To: badToAddress, Method: method, Params: payerParams}
appResult, err := callRedeem(condition, redeemerParams)
require.NoError(t, err)
require.Error(t, appResult.ExecutionError)
require.Contains(t, appResult.ExecutionError.Error(), "failed to validate voucher condition: actor code not found")
})
t.Run("Redeem should fail if condition goes to non-existent method", func(t *testing.T) {
badMethod := "nonexistentMethod"
condition := &types.Predicate{To: toAddress, Method: badMethod, Params: payerParams}
appResult, err := callRedeem(condition, redeemerParams)
require.NoError(t, err)
require.Error(t, appResult.ExecutionError)
require.Contains(t, appResult.ExecutionError.Error(), "failed to validate voucher condition: actor does not export method")
})
t.Run("Redeem should fail if condition has the wrong number of condition parameters", func(t *testing.T) {
badParams := []interface{}{}
condition := &types.Predicate{To: toAddress, Method: method, Params: badParams}
appResult, err := callRedeem(condition, redeemerParams)
require.NoError(t, err)
require.Error(t, appResult.ExecutionError)
require.Contains(t, appResult.ExecutionError.Error(), "failed to validate voucher condition: invalid params")
})
t.Run("Redeem should fail if condition has the wrong number of supplied parameters", func(t *testing.T) {
badRedeemerParams := []interface{}{}
condition := &types.Predicate{To: toAddress, Method: method, Params: payerParams}
appResult, err := callRedeem(condition, badRedeemerParams)
require.NoError(t, err)
require.Error(t, appResult.ExecutionError)
require.Contains(t, appResult.ExecutionError.Error(), "failed to validate voucher condition: invalid params")
})
}
func TestPaymentBrokerRedeemReversesCancellations(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
// Cancel the payment channel
pdata := core.MustConvertParams(sys.channelID)
msg := types.NewMessage(sys.payer, address.PaymentBrokerAddress, 1, types.NewAttoFILFromFIL(1000), "cancel", pdata)
result, err := sys.ApplyMessage(msg, 100)
require.NoError(t, result.ExecutionError)
require.NoError(t, err)
require.Equal(t, uint8(0), result.Receipt.ExitCode)
// Expect that the EOL of the payment channel now reflects the cancellation
paymentBroker := state.MustGetActor(sys.st, address.PaymentBrokerAddress)
channel := sys.retrieveChannel(paymentBroker)
assert.Equal(t, types.NewBlockHeight(20000), channel.AgreedEol)
assert.Equal(t, types.NewBlockHeight(10100), channel.Eol)
// Redeem the payment channel
result, err = sys.ApplyRedeemMessageWithBlockHeight(sys.target, 500, 0, 10000)
require.NoError(t, err)
// Expect that the EOL has been reset to its originally agreed upon value
// meaning that the cancellation has been reversed
paymentBroker = state.MustGetActor(sys.st, address.PaymentBrokerAddress)
channel = sys.retrieveChannel(paymentBroker)
assert.Equal(t, types.NewBlockHeight(20000), channel.AgreedEol)
assert.Equal(t, types.NewBlockHeight(20000), channel.Eol)
}
func TestPaymentBrokerUpdateErrorsWithIncorrectChannel(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
// update message from payer instead of target results in error
result, err := sys.ApplyRedeemMessage(sys.payer, 100, 1)
require.NoError(t, err)
require.NotEqual(t, uint8(0), result.Receipt.ExitCode)
// invalid channel id results in revert error
sys.channelID = types.NewChannelID(39932)
result, err = sys.ApplyRedeemMessage(sys.target, 100, 0)
require.NoError(t, err)
require.NotEqual(t, uint8(0), result.Receipt.ExitCode)
require.Contains(t, result.ExecutionError.Error(), "unknown")
}
func TestPaymentBrokerUpdateErrorsWhenNotFromTarget(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
wrongTargetAddress := sys.addressGetter()
wrongTargetActor := th.RequireNewAccountActor(t, types.NewAttoFILFromFIL(0))
sys.st.SetActor(sys.ctx, wrongTargetAddress, wrongTargetActor)
result, err := sys.ApplyRedeemMessage(wrongTargetAddress, 100, 0)
require.NoError(t, err)
require.NotEqual(t, uint8(0), result.Receipt.ExitCode)
require.Contains(t, result.ExecutionError.Error(), "wrong target account")
}
func TestPaymentBrokerUpdateErrorsWhenRedeemingMoreThanChannelContains(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
result, err := sys.ApplyRedeemMessage(sys.target, 1100, 0)
require.NoError(t, err)
require.NotEqual(t, uint8(0), result.Receipt.ExitCode)
require.Contains(t, result.ExecutionError.Error(), "exceeds amount")
}
func TestPaymentBrokerUpdateErrorsWhenRedeemingFundsAlreadyRedeemed(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
// redeem some
result, err := sys.ApplyRedeemMessage(sys.target, 500, 0)
require.NoError(t, result.ExecutionError)
require.NoError(t, err)
require.Equal(t, uint8(0), result.Receipt.ExitCode)
// redeeming funds already redeemed is an error
result, err = sys.ApplyRedeemMessage(sys.target, 400, 1)
require.NoError(t, err)
require.NotEqual(t, uint8(0), result.Receipt.ExitCode)
require.Contains(t, result.ExecutionError.Error(), "update amount")
}
func TestPaymentBrokerUpdateErrorsWhenAtEol(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
// set block height to Eol
result, err := sys.ApplyRedeemMessageWithBlockHeight(sys.target, 500, 0, 20000)
require.NoError(t, err)
// expect an error
assert.NotEqual(t, uint8(0), result.Receipt.ExitCode)
assert.True(t, strings.Contains(strings.ToLower(result.ExecutionError.Error()), "block height"), "Error should relate to block height")
}
func TestPaymentBrokerUpdateErrorsBeforeValidAt(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
result, err := sys.ApplySignatureMessageWithValidAtAndBlockHeight(sys.target, 100, 0, 8, 3, "redeem")
require.NoError(t, err)
assert.NotEqual(t, uint8(0), result.Receipt.ExitCode)
assert.True(t, strings.Contains(strings.ToLower(result.ExecutionError.Error()), "block height too low"), "Error should relate to height lower than validAt")
}
func TestPaymentBrokerUpdateSuccessWithValidAt(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
// Redeem at block height == validAt != 0.
result, err := sys.ApplySignatureMessageWithValidAtAndBlockHeight(sys.target, 100, 0, 4, 4, "redeem")
require.NoError(t, err)
require.Equal(t, uint8(0), result.Receipt.ExitCode)
paymentBroker := state.MustGetActor(sys.st, address.PaymentBrokerAddress)
assert.Equal(t, types.NewAttoFILFromFIL(900), paymentBroker.Balance)
payee := state.MustGetActor(sys.st, sys.target)
assert.Equal(t, types.NewAttoFILFromFIL(100), payee.Balance)
channel := sys.retrieveChannel(paymentBroker)
assert.Equal(t, types.NewAttoFILFromFIL(1000), channel.Amount)
assert.Equal(t, types.NewAttoFILFromFIL(100), channel.AmountRedeemed)
assert.Equal(t, sys.target, channel.Target)
// Redeem after block height == validAt.
result, err = sys.ApplySignatureMessageWithValidAtAndBlockHeight(sys.target, 200, 0, 4, 6, "redeem")
require.NoError(t, err)
require.Equal(t, uint8(0), result.Receipt.ExitCode)
paymentBroker = state.MustGetActor(sys.st, address.PaymentBrokerAddress)
assert.Equal(t, types.NewAttoFILFromFIL(800), paymentBroker.Balance)
payee = state.MustGetActor(sys.st, sys.target)
assert.Equal(t, types.NewAttoFILFromFIL(200), payee.Balance)
channel = sys.retrieveChannel(paymentBroker)
assert.Equal(t, types.NewAttoFILFromFIL(1000), channel.Amount)
assert.Equal(t, types.NewAttoFILFromFIL(200), channel.AmountRedeemed)
assert.Equal(t, sys.target, channel.Target)
}
func TestPaymentBrokerClose(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
payerActor := state.MustGetActor(sys.st, sys.payer)
payerBalancePriorToClose := payerActor.Balance
result, err := sys.ApplyCloseMessage(sys.target, 100, 0)
require.NoError(t, err)
require.NoError(t, result.ExecutionError)
paymentBroker := state.MustGetActor(sys.st, address.PaymentBrokerAddress)
// all funds have been redeemed or returned
assert.Equal(t, types.NewAttoFILFromFIL(0), paymentBroker.Balance)
targetActor := state.MustGetActor(sys.st, sys.target)
// targetActor has been paid
assert.Equal(t, types.NewAttoFILFromFIL(100), targetActor.Balance)
// remaining balance is returned to payer
payerActor = state.MustGetActor(sys.st, sys.payer)
assert.Equal(t, payerBalancePriorToClose.Add(types.NewAttoFILFromFIL(900)), payerActor.Balance)
}
func TestPaymentBrokerCloseErrorsBeforeValidAt(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
result, err := sys.ApplySignatureMessageWithValidAtAndBlockHeight(sys.target, 100, 0, 8, 3, "close")
require.NoError(t, err)
assert.NotEqual(t, uint8(0), result.Receipt.ExitCode)
assert.True(t, strings.Contains(strings.ToLower(result.ExecutionError.Error()), "block height too low"), "Error should relate to height lower than validAt")
}
func TestPaymentBrokerCloseInvalidSig(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
amt := types.NewAttoFILFromFIL(100)
signature, err := sys.Signature(amt, sys.defaultValidAt, nil)
require.NoError(t, err)
// make the signature invalid
signature[0] = 0
signature[1] = 1
var condition *types.Predicate
pdata := core.MustConvertParams(sys.payer, sys.channelID, amt, sys.defaultValidAt, condition, signature, []interface{}{})
msg := types.NewMessage(sys.target, address.PaymentBrokerAddress, 0, types.NewAttoFILFromFIL(0), "close", pdata)
res, err := sys.ApplyMessage(msg, 0)
require.EqualError(t, res.ExecutionError, Errors[ErrInvalidSignature].Error())
require.NoError(t, err)
}
func TestPaymentBrokerCloseWithCondition(t *testing.T) {
tf.UnitTest(t)
addrGetter := address.NewForTestGetter()
toAddress := addrGetter()
sys := setup(t)
require.NoError(t, sys.st.SetActor(context.TODO(), toAddress, actor.NewActor(pbTestActorCid, types.NewZeroAttoFIL())))
t.Run("Close should succeed if condition is met", func(t *testing.T) {
condition := &types.Predicate{To: toAddress, Method: "paramsNotZero", Params: []interface{}{addrGetter(), uint64(6)}}
appResult, err := sys.applySignatureMessage(sys.target, 100, types.NewBlockHeight(0), 0, "close", 0, condition, types.NewBlockHeight(43))
require.NoError(t, err)
require.NoError(t, appResult.ExecutionError)
})
t.Run("Close should fail if condition is _NOT_ met", func(t *testing.T) {
condition := &types.Predicate{To: toAddress, Method: "paramsNotZero", Params: []interface{}{address.Undef, uint64(6)}}
appResult, err := sys.applySignatureMessage(sys.target, 100, types.NewBlockHeight(0), 0, "close", 0, condition, types.NewBlockHeight(43))
require.NoError(t, err)
require.Error(t, appResult.ExecutionError)
require.Contains(t, appResult.ExecutionError.Error(), "failed to validate voucher condition: got undefined address")
})
}
func TestPaymentBrokerRedeemInvalidSig(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
amt := types.NewAttoFILFromFIL(100)
signature, err := sys.Signature(amt, sys.defaultValidAt, nil)
require.NoError(t, err)
// make the signature invalid
signature[0] = 0
signature[1] = 1
var condition *types.Predicate
pdata := core.MustConvertParams(sys.payer, sys.channelID, amt, sys.defaultValidAt, condition, signature, []interface{}{})
msg := types.NewMessage(sys.target, address.PaymentBrokerAddress, 0, types.NewAttoFILFromFIL(0), "redeem", pdata)
res, err := sys.ApplyMessage(msg, 0)
require.EqualError(t, res.ExecutionError, Errors[ErrInvalidSignature].Error())
require.NoError(t, err)
}
func TestPaymentBrokerReclaim(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
payer := state.MustGetActor(sys.st, sys.payer)
payerBalancePriorToClose := payer.Balance
pdata := core.MustConvertParams(sys.channelID)
msg := types.NewMessage(sys.payer, address.PaymentBrokerAddress, 1, types.NewAttoFILFromFIL(0), "reclaim", pdata)
// block height is after Eol
res, err := sys.ApplyMessage(msg, 20001)
require.NoError(t, err)
require.NoError(t, res.ExecutionError)
paymentBroker := state.MustGetActor(sys.st, address.PaymentBrokerAddress)
// all funds have been redeemed or returned
assert.Equal(t, types.NewAttoFILFromFIL(0), paymentBroker.Balance)
// entire balance is returned to payer
payer = state.MustGetActor(sys.st, sys.payer)
assert.Equal(t, payerBalancePriorToClose.Add(types.NewAttoFILFromFIL(1000)), payer.Balance)
}
func TestPaymentBrokerReclaimFailsBeforeChannelEol(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
pdata := core.MustConvertParams(sys.channelID)
msg := types.NewMessage(sys.payer, address.PaymentBrokerAddress, 1, types.NewAttoFILFromFIL(0), "reclaim", pdata)
// block height is before Eol
result, err := sys.ApplyMessage(msg, 0)
require.NoError(t, err)
// fails
assert.NotEqual(t, uint8(0), result.Receipt.ExitCode)
assert.Contains(t, result.ExecutionError.Error(), "reclaim")
assert.Contains(t, result.ExecutionError.Error(), "eol")
}
func TestPaymentBrokerExtend(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
// extend channel
pdata := core.MustConvertParams(sys.channelID, types.NewBlockHeight(30000))
msg := types.NewMessage(sys.payer, address.PaymentBrokerAddress, 1, types.NewAttoFILFromFIL(1000), "extend", pdata)
result, err := sys.ApplyMessage(msg, 9)
require.NoError(t, result.ExecutionError)
require.NoError(t, err)
assert.Equal(t, uint8(0), result.Receipt.ExitCode)
// try to request too high an amount after the eol for the original channel
result, err = sys.ApplyRedeemMessageWithBlockHeight(sys.target, 1100, 0, 12)
require.NoError(t, result.ExecutionError)
// expect success
require.NoError(t, err)
assert.Equal(t, uint8(0), result.Receipt.ExitCode)
// check value
paymentBroker := state.MustGetActor(sys.st, address.PaymentBrokerAddress)
assert.Equal(t, types.NewAttoFILFromFIL(900), paymentBroker.Balance) // 1000 + 1000 - 1100
// get payment channel
channel := sys.retrieveChannel(paymentBroker)
assert.Equal(t, types.NewAttoFILFromFIL(2000), channel.Amount)
assert.Equal(t, types.NewAttoFILFromFIL(1100), channel.AmountRedeemed)
assert.Equal(t, types.NewBlockHeight(30000), channel.AgreedEol)
assert.Equal(t, types.NewBlockHeight(30000), channel.Eol)
}
func TestPaymentBrokerExtendFailsWithNonExistentChannel(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
// extend channel
pdata := core.MustConvertParams(types.NewChannelID(383), types.NewBlockHeight(30000))
msg := types.NewMessage(sys.payer, address.PaymentBrokerAddress, 1, types.NewAttoFILFromFIL(1000), "extend", pdata)
result, err := sys.ApplyMessage(msg, 9)
require.NoError(t, err)
require.EqualError(t, result.ExecutionError, "payment channel is unknown")
assert.NotEqual(t, uint8(0), result.Receipt.ExitCode)
}
func TestPaymentBrokerExtendRefusesToShortenTheEol(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
// extend channel setting block height to 5 (<10)
pdata := core.MustConvertParams(sys.channelID, types.NewBlockHeight(5))
msg := types.NewMessage(sys.payer, address.PaymentBrokerAddress, 1, types.NewAttoFILFromFIL(1000), "extend", pdata)
result, err := sys.ApplyMessage(msg, 9)
require.NoError(t, err)
assert.NotEqual(t, uint8(0), result.Receipt.ExitCode)
assert.Contains(t, result.ExecutionError.Error(), "payment channel eol may not be decreased")
}
func TestPaymentBrokerCancel(t *testing.T) {
tf.UnitTest(t)
sys := setup(t)
pdata := core.MustConvertParams(sys.channelID)
msg := types.NewMessage(sys.payer, address.PaymentBrokerAddress, 1, types.NewAttoFILFromFIL(1000), "cancel", pdata)
result, err := sys.ApplyMessage(msg, 100)
require.NoError(t, result.ExecutionError)
require.NoError(t, err)
assert.Equal(t, uint8(0), result.Receipt.ExitCode)
paymentBroker := state.MustGetActor(sys.st, address.PaymentBrokerAddress)
channel := sys.retrieveChannel(paymentBroker)
assert.Equal(t, types.NewBlockHeight(20000), channel.AgreedEol)
assert.Equal(t, types.NewBlockHeight(10100), channel.Eol)
}
func TestPaymentBrokerLs(t *testing.T) {
tf.UnitTest(t)
t.Run("Successfully returns channels", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
payer := address.TestAddress
target1 := address.NewForTestGetter()()
target2 := address.NewForTestGetter()()
_, st, vms := requireGenesis(ctx, t, target1)
targetActor2 := th.RequireNewAccountActor(t, types.NewAttoFILFromFIL(0))
st.SetActor(ctx, target2, targetActor2)
channelID1 := establishChannel(ctx, st, vms, payer, target1, 0, types.NewAttoFILFromFIL(1000), types.NewBlockHeight(10))
channelID2 := establishChannel(ctx, st, vms, payer, target2, 1, types.NewAttoFILFromFIL(2000), types.NewBlockHeight(20))
// retrieve channels
args, err := abi.ToEncodedValues(payer)
require.NoError(t, err)
returnValue, exitCode, err := consensus.CallQueryMethod(ctx, st, vms, address.PaymentBrokerAddress, "ls", args, payer, types.NewBlockHeight(9))
require.NoError(t, err)
assert.Equal(t, uint8(0), exitCode)
channels := make(map[string]*PaymentChannel)
err = cbor.DecodeInto(returnValue[0], &channels)
require.NoError(t, err)
assert.Equal(t, 2, len(channels))
pc1, found := channels[channelID1.String()]
require.True(t, found)
assert.Equal(t, target1, pc1.Target)
assert.Equal(t, types.NewAttoFILFromFIL(1000), pc1.Amount)
assert.Equal(t, types.NewAttoFILFromFIL(0), pc1.AmountRedeemed)
assert.Equal(t, types.NewBlockHeight(10), pc1.AgreedEol)
assert.Equal(t, types.NewBlockHeight(10), pc1.Eol)
pc2, found := channels[channelID2.String()]
require.True(t, found)
assert.Equal(t, target2, pc2.Target)
assert.Equal(t, types.NewAttoFILFromFIL(2000), pc2.Amount)
assert.Equal(t, types.NewAttoFILFromFIL(0), pc2.AmountRedeemed)
assert.Equal(t, types.NewBlockHeight(20), pc2.AgreedEol)
})
t.Run("Returns empty map when payer has no channels", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
payer := address.TestAddress
target1 := address.NewForTestGetter()()
_, st, vms := requireGenesis(ctx, t, target1)
// retrieve channels
args, err := abi.ToEncodedValues(payer)
require.NoError(t, err)
returnValue, exitCode, err := consensus.CallQueryMethod(ctx, st, vms, address.PaymentBrokerAddress, "ls", args, payer, types.NewBlockHeight(9))
require.NoError(t, err)
assert.Equal(t, uint8(0), exitCode)
channels := make(map[string]*PaymentChannel)
err = cbor.DecodeInto(returnValue[0], &channels)
require.NoError(t, err)
assert.Equal(t, 0, len(channels))
})
}
func TestNewPaymentBrokerVoucher(t *testing.T) {
tf.UnitTest(t)
var nilCondition *types.Predicate
t.Run("Returns valid voucher", func(t *testing.T) {
sys := setup(t)
// create voucher
voucherAmount := types.NewAttoFILFromFIL(100)
pdata := core.MustConvertParams(sys.channelID, voucherAmount, sys.defaultValidAt, nilCondition)
msg := types.NewMessage(sys.payer, address.PaymentBrokerAddress, 1, nil, "voucher", pdata)
res, err := sys.ApplyMessage(msg, 9)
assert.NoError(t, err)
assert.NoError(t, res.ExecutionError)
assert.Equal(t, uint8(0), res.Receipt.ExitCode)
voucher := types.PaymentVoucher{}
err = cbor.DecodeInto(res.Receipt.Return[0], &voucher)
require.NoError(t, err)
assert.Equal(t, *sys.channelID, voucher.Channel)
assert.Equal(t, sys.payer, voucher.Payer)
assert.Equal(t, sys.target, voucher.Target)
assert.Equal(t, *voucherAmount, voucher.Amount)
assert.Nil(t, voucher.Condition)
})
t.Run("Errors when channel does not exist", func(t *testing.T) {
sys := setup(t)
notChannelID := types.NewChannelID(999)
// create voucher
voucherAmount := types.NewAttoFILFromFIL(100)
_, exitCode, err := sys.CallQueryMethod("voucher", 9, notChannelID, voucherAmount, sys.defaultValidAt, nilCondition)
assert.NotEqual(t, uint8(0), exitCode)
assert.Contains(t, fmt.Sprintf("%v", err), "unknown")
})
t.Run("Errors when voucher exceed channel amount", func(t *testing.T) {
sys := setup(t)
// create voucher
voucherAmount := types.NewAttoFILFromFIL(2000)
args := core.MustConvertParams(sys.channelID, voucherAmount, sys.defaultValidAt, nilCondition)
msg := types.NewMessage(sys.payer, address.PaymentBrokerAddress, 1, nil, "voucher", args)
res, err := sys.ApplyMessage(msg, 9)
assert.NoError(t, err)
assert.NotEqual(t, uint8(0), res.Receipt.ExitCode)
assert.Contains(t, fmt.Sprintf("%s", res.ExecutionError), "exceeds amount")
})
t.Run("Returns valid voucher with condition", func(t *testing.T) {
sys := setup(t)
condition := &types.Predicate{
To: address.NewForTestGetter()(),
Method: "someMethod",
Params: []interface{}{"encoded params"},
}
// create voucher
voucherAmount := types.NewAttoFILFromFIL(100)
pdata := core.MustConvertParams(sys.channelID, voucherAmount, sys.defaultValidAt, condition)
msg := types.NewMessage(sys.payer, address.PaymentBrokerAddress, 1, nil, "voucher", pdata)
res, err := sys.ApplyMessage(msg, 9)
assert.NoError(t, err)
assert.NoError(t, res.ExecutionError)
assert.Equal(t, uint8(0), res.Receipt.ExitCode)
voucher := types.PaymentVoucher{}
err = cbor.DecodeInto(res.Receipt.Return[0], &voucher)
require.NoError(t, err)
assert.Equal(t, *sys.channelID, voucher.Channel)
assert.Equal(t, sys.payer, voucher.Payer)
assert.Equal(t, sys.target, voucher.Target)
assert.Equal(t, *voucherAmount, voucher.Amount)
})
}
func TestSignVoucher(t *testing.T) {
payer := mockSigner.Addresses[0]
value := types.NewAttoFILFromFIL(10)
channelId := types.NewChannelID(3)
blockHeight := types.NewBlockHeight(393)
condition := &types.Predicate{
To: address.NewForTestGetter()(),
Method: "someMethod",
Params: []interface{}{"encoded params"},
}
var nilCondition *types.Predicate
t.Run("validates signatures with empty condition", func(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
sig, err := SignVoucher(channelId, value, blockHeight, payer, nilCondition, mockSigner)
require.NoError(err)
assert.True(VerifyVoucherSignature(payer, channelId, value, blockHeight, nilCondition, sig))
assert.False(VerifyVoucherSignature(payer, channelId, value, blockHeight, condition, sig))
})
t.Run("validates signatures with condition", func(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
sig, err := SignVoucher(channelId, value, blockHeight, payer, condition, mockSigner)
require.NoError(err)
assert.True(VerifyVoucherSignature(payer, channelId, value, blockHeight, condition, sig))
assert.False(VerifyVoucherSignature(payer, channelId, value, blockHeight, nilCondition, sig))
})
}
func establishChannel(ctx context.Context, st state.Tree, vms vm.StorageMap, from address.Address, target address.Address, nonce uint64, amt *types.AttoFIL, eol *types.BlockHeight) *types.ChannelID {
pdata := core.MustConvertParams(target, eol)
msg := types.NewMessage(from, address.PaymentBrokerAddress, nonce, amt, "createChannel", pdata)
result, err := th.ApplyTestMessage(st, vms, msg, types.NewBlockHeight(0))
if err != nil {
panic(err)
}
if result.ExecutionError != nil {
panic(result.ExecutionError)
}
channelID := types.NewChannelIDFromBytes(result.Receipt.Return[0])
return channelID
}
func requireGenesis(ctx context.Context, t *testing.T, targetAddresses ...address.Address) (*hamt.CborIpldStore, state.Tree, vm.StorageMap) {
bs := blockstore.NewBlockstore(datastore.NewMapDatastore())
vms := vm.NewStorageMap(bs)
cst := hamt.NewCborStore()
blk, err := consensus.DefaultGenesis(cst, bs)
require.NoError(t, err)
builtinsWithTestActor := map[cid.Cid]exec.ExecutableActor{}
for cid, actor := range builtin.Actors {
builtinsWithTestActor[cid] = actor
}
builtinsWithTestActor[pbTestActorCid] = &PBTestActor{}
st, err := state.LoadStateTree(ctx, cst, blk.StateRoot, builtinsWithTestActor)
require.NoError(t, err)
for _, addr := range targetAddresses {
targetActor := th.RequireNewAccountActor(t, types.NewAttoFILFromFIL(0))
st.SetActor(ctx, addr, targetActor)
}
return cst, st, vms
}
// system is a helper struct to allow for easier testing of sending various messages to the paymentbroker actor.
// TODO: could be abstracted to be used in other actor tests.
type system struct {
t *testing.T
ctx context.Context
payer address.Address
target address.Address
defaultValidAt *types.BlockHeight
channelID *types.ChannelID
st state.Tree
vms vm.StorageMap
addressGetter func() address.Address
}
func setup(t *testing.T) system {
t.Helper()
ctx := context.Background()
payer := mockSigner.Addresses[0]
addrGetter := address.NewForTestGetter()
target := addrGetter()
defaultValidAt := types.NewBlockHeight(uint64(0))
_, st, vms := requireGenesis(ctx, t, target)
payerActor := th.RequireNewAccountActor(t, types.NewAttoFILFromFIL(50000))
state.MustSetActor(st, payer, payerActor)
channelID := establishChannel(ctx, st, vms, payer, target, 0, types.NewAttoFILFromFIL(1000), types.NewBlockHeight(20000))
return system{
t: t,
addressGetter: addrGetter,
ctx: ctx,
payer: payer,
target: target,
channelID: channelID,
defaultValidAt: defaultValidAt,
st: st,
vms: vms,
}
}
func (sys *system) Signature(amt *types.AttoFIL, validAt *types.BlockHeight, condition *types.Predicate) ([]byte, error) {
sig, err := SignVoucher(sys.channelID, amt, validAt, sys.payer, condition, mockSigner)
if err != nil {
return nil, err
}
return ([]byte)(sig), nil
}
func (sys *system) CallQueryMethod(method string, height uint64, params ...interface{}) ([][]byte, uint8, error) {
sys.t.Helper()
args := core.MustConvertParams(params...)
return consensus.CallQueryMethod(sys.ctx, sys.st, sys.vms, address.PaymentBrokerAddress, method, args, sys.payer, types.NewBlockHeight(height))
}
func (sys *system) ApplyRedeemMessage(target address.Address, amtInt uint64, nonce uint64) (*consensus.ApplicationResult, error) {
sys.t.Helper()
return sys.applySignatureMessage(target, amtInt, sys.defaultValidAt, nonce, "redeem", 0, nil)
}
func (sys *system) ApplyRedeemMessageWithBlockHeight(target address.Address, amtInt uint64, nonce uint64, height uint64) (*consensus.ApplicationResult, error) {
sys.t.Helper()
return sys.applySignatureMessage(target, amtInt, sys.defaultValidAt, nonce, "redeem", height, nil)
}
func (sys *system) ApplyCloseMessage(target address.Address, amtInt uint64, nonce uint64) (*consensus.ApplicationResult, error) {
sys.t.Helper()
return sys.applySignatureMessage(target, amtInt, sys.defaultValidAt, nonce, "close", 0, nil)
}
func (sys *system) ApplySignatureMessageWithValidAtAndBlockHeight(target address.Address, amtInt uint64, nonce uint64, validAt uint64, height uint64, method string) (*consensus.ApplicationResult, error) {
sys.t.Helper()
if method != "redeem" && method != "close" {
sys.t.Fatalf("method %s is not a signature method", method)
}
return sys.applySignatureMessage(target, amtInt, types.NewBlockHeight(validAt), nonce, method, height, nil)
}
func (sys *system) retrieveChannel(paymentBroker *actor.Actor) *PaymentChannel {
// retrieve channels
args, err := abi.ToEncodedValues(sys.payer)
require.NoError(sys.t, err)
returnValue, exitCode, err := consensus.CallQueryMethod(sys.ctx, sys.st, sys.vms, address.PaymentBrokerAddress, "ls", args, sys.payer, types.NewBlockHeight(9))
require.NoError(sys.t, err)
assert.Equal(sys.t, uint8(0), exitCode)
channels := make(map[string]*PaymentChannel)
err = cbor.DecodeInto(returnValue[0], &channels)
require.NoError(sys.t, err)
channel := channels[sys.channelID.KeyString()]
require.NotNil(sys.t, channel)
return channel
}
// applySignatureMessage signs voucher parameters and then creates a redeem or close message with all
// the voucher parameters and the signature, sends it to the payment broker, and returns the result
func (sys *system) applySignatureMessage(target address.Address, amtInt uint64, validAt *types.BlockHeight, nonce uint64, method string, height uint64, condition *types.Predicate, suppliedParams ...interface{}) (*consensus.ApplicationResult, error) {
sys.t.Helper()
amt := types.NewAttoFILFromFIL(amtInt)
signature, err := sys.Signature(amt, validAt, condition)
require.NoError(sys.t, err)
pdata := core.MustConvertParams(sys.payer, sys.channelID, amt, validAt, condition, signature, suppliedParams)
msg := types.NewMessage(target, address.PaymentBrokerAddress, nonce, types.NewAttoFILFromFIL(0), method, pdata)
return sys.ApplyMessage(msg, height)
}
func (sys *system) ApplyMessage(msg *types.Message, height uint64) (*consensus.ApplicationResult, error) {
return th.ApplyTestMessage(sys.st, sys.vms, msg, types.NewBlockHeight(height))
}
func requireGetPaymentChannel(t *testing.T, ctx context.Context, st state.Tree, vms vm.StorageMap, payer address.Address, channelId *types.ChannelID) *PaymentChannel {
var paymentMap map[string]*PaymentChannel
pdata := core.MustConvertParams(payer)
values, ec, err := consensus.CallQueryMethod(ctx, st, vms, address.PaymentBrokerAddress, "ls", pdata, payer, types.NewBlockHeight(0))
require.Zero(t, ec)
require.NoError(t, err)
actor.UnmarshalStorage(values[0], &paymentMap)
result, ok := paymentMap[channelId.KeyString()]
require.True(t, ok)
return result
}
// PBTestActor is a fake actor for use in tests.
type PBTestActor struct{}
var _ exec.ExecutableActor = (*PBTestActor)(nil)
// Exports returns the list of fake actor exported functions.
func (ma *PBTestActor) Exports() exec.Exports {
return exec.Exports{
"paramsNotZero": &exec.FunctionSignature{
Params: []abi.Type{abi.Address, abi.SectorID, abi.BlockHeight},
Return: nil,
},
}
}
// InitializeState stores this actors
func (ma *PBTestActor) InitializeState(storage exec.Storage, initializerData interface{}) error {
return nil
}
func (ma *PBTestActor) ParamsNotZero(ctx exec.VMContext, addr address.Address, sector uint64, bh *types.BlockHeight) (uint8, error) {
if addr == address.Undef {
return 1, errors.NewRevertError("got undefined address")
}
if sector == 0 {
return 1, errors.NewRevertError("got zero sector")
}
if types.NewBlockHeight(0).Equal(bh) {
return 1, errors.NewRevertError("got zero block height")
}
return 0, nil
}
| 1 | 18,906 |
We have other tests to assert all properties of a condition are valid. It's enough to assert the condition is not nil here.
|
filecoin-project-venus
|
go
|
@@ -30,7 +30,7 @@ import (
"github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/agent/openflow"
- opsv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/ops/v1alpha1"
+ opsv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/crd/v1alpha1"
binding "github.com/vmware-tanzu/antrea/pkg/ovs/openflow"
)
| 1 |
// Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package traceflow
import (
"context"
"errors"
"fmt"
"net"
"time"
"github.com/contiv/libOpenflow/openflow13"
"github.com/contiv/libOpenflow/protocol"
"github.com/contiv/ofnet/ofctrl"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/retry"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/agent/openflow"
opsv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/ops/v1alpha1"
binding "github.com/vmware-tanzu/antrea/pkg/ovs/openflow"
)
func (c *Controller) HandlePacketIn(pktIn *ofctrl.PacketIn) error {
if !c.traceflowListerSynced() {
return errors.New("traceflow controller is not started")
}
oldTf, nodeResult, err := c.parsePacketIn(pktIn)
if err != nil {
klog.Errorf("parsePacketIn error: %+v", err)
return err
}
// Retry when update CRD conflict which caused by multiple agents updating one CRD at same time.
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
tf, err := c.traceflowInformer.Lister().Get(oldTf.Name)
if err != nil {
klog.Warningf("Get traceflow failed: %+v", err)
return err
}
update := tf.DeepCopy()
update.Status.Results = append(update.Status.Results, *nodeResult)
_, err = c.traceflowClient.OpsV1alpha1().Traceflows().UpdateStatus(context.TODO(), update, v1.UpdateOptions{})
if err != nil {
klog.Warningf("Update traceflow failed: %+v", err)
return err
}
klog.Infof("Updated traceflow %s: %+v", tf.Name, nodeResult)
return nil
})
if err != nil {
klog.Errorf("Update traceflow error: %+v", err)
}
return err
}
func (c *Controller) parsePacketIn(pktIn *ofctrl.PacketIn) (*opsv1alpha1.Traceflow, *opsv1alpha1.NodeResult, error) {
matchers := pktIn.GetMatches()
var match *ofctrl.MatchField
// Get data plane tag.
// Directly read data plane tag from packet.
var tag uint8
if pktIn.Data.Ethertype == protocol.IPv4_MSG {
ipPacket, ok := pktIn.Data.Data.(*protocol.IPv4)
if !ok {
return nil, nil, errors.New("invalid traceflow IPv4 packet")
}
tag = ipPacket.DSCP
} else if pktIn.Data.Ethertype == protocol.IPv6_MSG {
ipv6Packet, ok := pktIn.Data.Data.(*protocol.IPv6)
if !ok {
return nil, nil, errors.New("invalid traceflow IPv6 packet")
}
tag = ipv6Packet.TrafficClass >> 2
} else {
return nil, nil, fmt.Errorf("unsupported traceflow packet Ethertype: %d", pktIn.Data.Ethertype)
}
// Get traceflow CRD from cache by data plane tag.
tf, err := c.GetRunningTraceflowCRD(uint8(tag))
if err != nil {
return nil, nil, err
}
obs := make([]opsv1alpha1.Observation, 0)
isSender := c.isSender(uint8(tag))
tableID := pktIn.TableId
if isSender {
ob := new(opsv1alpha1.Observation)
ob.Component = opsv1alpha1.SpoofGuard
ob.Action = opsv1alpha1.Forwarded
obs = append(obs, *ob)
} else {
ob := new(opsv1alpha1.Observation)
ob.Component = opsv1alpha1.Forwarding
ob.Action = opsv1alpha1.Received
ob.ComponentInfo = openflow.GetFlowTableName(openflow.ClassifierTable)
obs = append(obs, *ob)
}
// Collect Service DNAT.
ctNwDst := ""
ipDst := ""
switch pktIn.Data.Ethertype {
case protocol.IPv4_MSG:
ipPacket, ok := pktIn.Data.Data.(*protocol.IPv4)
if !ok {
return nil, nil, errors.New("invalid traceflow IPv4 packet")
}
ctNwDst, err = getCTDstValue(matchers, false)
if err != nil {
return nil, nil, err
}
ipDst = ipPacket.NWDst.String()
case protocol.IPv6_MSG:
ipPacket, ok := pktIn.Data.Data.(*protocol.IPv6)
if !ok {
return nil, nil, errors.New("invalid traceflow IPv6 packet")
}
ctNwDst, err = getCTDstValue(matchers, true)
if err != nil {
return nil, nil, err
}
ipDst = ipPacket.NWDst.String()
default:
return nil, nil, fmt.Errorf("unsupported traceflow packet ether type %d", pktIn.Data.Ethertype)
}
if isValidCtNw(ctNwDst) && ipDst != ctNwDst {
ob := &opsv1alpha1.Observation{
Component: opsv1alpha1.LB,
Action: opsv1alpha1.Forwarded,
TranslatedDstIP: ipDst,
}
obs = append(obs, *ob)
}
// Collect egress conjunctionID and get NetworkPolicy from cache.
if match = getMatchRegField(matchers, uint32(openflow.EgressReg)); match != nil {
egressInfo, err := getRegValue(match, nil)
if err != nil {
return nil, nil, err
}
ob := getNetworkPolicyObservation(tableID, false)
npRef := c.networkPolicyQuerier.GetNetworkPolicyByRuleFlowID(egressInfo)
if npRef != nil {
ob.NetworkPolicy = npRef.ToString()
}
obs = append(obs, *ob)
}
// Collect ingress conjunctionID and get NetworkPolicy from cache.
if match = getMatchRegField(matchers, uint32(openflow.IngressReg)); match != nil {
ingressInfo, err := getRegValue(match, nil)
if err != nil {
return nil, nil, err
}
ob := getNetworkPolicyObservation(tableID, true)
npRef := c.networkPolicyQuerier.GetNetworkPolicyByRuleFlowID(ingressInfo)
if npRef != nil {
ob.NetworkPolicy = npRef.ToString()
}
obs = append(obs, *ob)
}
// Get drop table.
if tableID == uint8(openflow.EgressMetricTable) || tableID == uint8(openflow.IngressMetricTable) {
ob := getNetworkPolicyObservation(tableID, tableID == uint8(openflow.IngressMetricTable))
if match = getMatchRegField(matchers, uint32(openflow.CNPDenyConjIDReg)); match != nil {
notAllowConjInfo, err := getRegValue(match, nil)
if err != nil {
return nil, nil, err
}
npRef := c.networkPolicyQuerier.GetNetworkPolicyByRuleFlowID(notAllowConjInfo)
if npRef != nil {
ob.NetworkPolicy = npRef.ToString()
}
}
obs = append(obs, *ob)
} else if tableID == uint8(openflow.EgressDefaultTable) || tableID == uint8(openflow.IngressDefaultTable) {
ob := getNetworkPolicyObservation(tableID, tableID == uint8(openflow.IngressDefaultTable))
obs = append(obs, *ob)
}
// Get output table.
if tableID == uint8(openflow.L2ForwardingOutTable) {
ob := new(opsv1alpha1.Observation)
tunnelDstIP := ""
isIPv6 := c.nodeConfig.NodeIPAddr.IP.To4() == nil
if match = getMatchTunnelDstField(matchers, isIPv6); match != nil {
tunnelDstIP, err = getTunnelDstValue(match)
if err != nil {
return nil, nil, err
}
}
var outputPort uint32
if match = getMatchRegField(matchers, uint32(openflow.PortCacheReg)); match != nil {
outputPort, err = getRegValue(match, nil)
if err != nil {
return nil, nil, err
}
}
gatewayIP := c.nodeConfig.GatewayConfig.IPv4
if pktIn.Data.Ethertype == protocol.IPv6_MSG {
gatewayIP = c.nodeConfig.GatewayConfig.IPv6
}
if c.networkConfig.TrafficEncapMode.SupportsEncap() && outputPort == config.DefaultTunOFPort {
ob.TunnelDstIP = tunnelDstIP
ob.Action = opsv1alpha1.Forwarded
} else if ipDst == gatewayIP.String() && outputPort == config.HostGatewayOFPort {
ob.Action = opsv1alpha1.Delivered
} else if c.networkConfig.TrafficEncapMode.SupportsEncap() && outputPort == config.HostGatewayOFPort {
ob.Action = opsv1alpha1.ForwardedOutOfOverlay
} else if outputPort == config.HostGatewayOFPort { // noEncap
ob.Action = opsv1alpha1.Forwarded
} else {
// Output port is Pod port, packet is delivered.
ob.Action = opsv1alpha1.Delivered
}
ob.ComponentInfo = openflow.GetFlowTableName(binding.TableIDType(tableID))
ob.Component = opsv1alpha1.Forwarding
obs = append(obs, *ob)
}
nodeResult := opsv1alpha1.NodeResult{Node: c.nodeConfig.Name, Timestamp: time.Now().Unix(), Observations: obs}
return tf, &nodeResult, nil
}
func getMatchRegField(matchers *ofctrl.Matchers, regNum uint32) *ofctrl.MatchField {
return matchers.GetMatchByName(fmt.Sprintf("NXM_NX_REG%d", regNum))
}
func getMatchTunnelDstField(matchers *ofctrl.Matchers, isIPv6 bool) *ofctrl.MatchField {
if isIPv6 {
return matchers.GetMatchByName(fmt.Sprintf("NXM_NX_TUN_IPV6_DST"))
}
return matchers.GetMatchByName(fmt.Sprintf("NXM_NX_TUN_IPV4_DST"))
}
func getRegValue(regMatch *ofctrl.MatchField, rng *openflow13.NXRange) (uint32, error) {
regValue, ok := regMatch.GetValue().(*ofctrl.NXRegister)
if !ok {
return 0, errors.New("register value cannot be got")
}
if rng != nil {
return ofctrl.GetUint32ValueWithRange(regValue.Data, rng), nil
}
return regValue.Data, nil
}
func getTunnelDstValue(regMatch *ofctrl.MatchField) (string, error) {
regValue, ok := regMatch.GetValue().(net.IP)
if !ok {
return "", errors.New("tunnel destination value cannot be got")
}
return regValue.String(), nil
}
func getCTDstValue(matchers *ofctrl.Matchers, isIPv6 bool) (string, error) {
var match *ofctrl.MatchField
if isIPv6 {
match = matchers.GetMatchByName("NXM_NX_CT_IPV6_DST")
} else {
match = matchers.GetMatchByName("NXM_NX_CT_NW_DST")
}
if match == nil {
return "", nil
}
regValue, ok := match.GetValue().(net.IP)
if !ok {
return "", errors.New("packet-in conntrack destination value cannot be retrieved from metadata")
}
return regValue.String(), nil
}
func getNetworkPolicyObservation(tableID uint8, ingress bool) *opsv1alpha1.Observation {
ob := new(opsv1alpha1.Observation)
ob.Component = opsv1alpha1.NetworkPolicy
if ingress {
switch tableID {
case uint8(openflow.IngressMetricTable), uint8(openflow.IngressDefaultTable):
// Packet dropped by ANP/default drop rule
ob.ComponentInfo = openflow.GetFlowTableName(binding.TableIDType(tableID))
ob.Action = opsv1alpha1.Dropped
default:
ob.ComponentInfo = openflow.GetFlowTableName(openflow.IngressRuleTable)
ob.Action = opsv1alpha1.Forwarded
}
} else {
switch tableID {
case uint8(openflow.EgressMetricTable), uint8(openflow.EgressDefaultTable):
// Packet dropped by ANP/default drop rule
ob.ComponentInfo = openflow.GetFlowTableName(binding.TableIDType(tableID))
ob.Action = opsv1alpha1.Dropped
default:
ob.ComponentInfo = openflow.GetFlowTableName(openflow.EgressRuleTable)
ob.Action = opsv1alpha1.Forwarded
}
}
return ob
}
func isValidCtNw(ipStr string) bool {
ip := net.ParseIP(ipStr)
if ip == nil {
return false
}
// Reserved by IETF [RFC3513][RFC4291]
_, cidr, _ := net.ParseCIDR("0000::/8")
if cidr.Contains(ip) {
return false
}
return true
}
| 1 | 32,486 |
shouldn't the import be renamed `crdv1alpha1` by convention? It seems that you took care of renaming everywhere else...
|
antrea-io-antrea
|
go
|
@@ -86,6 +86,19 @@ const (
// DefaultMinSupportedCNIVersion denotes the minimum version of cni spec required
DefaultMinSupportedCNIVersion = "0.3.0"
+
+ // pauseContainerTarball is the path to the pause container tarball
+ pauseContainerTarballPath = "/images/amazon-ecs-pause.tar"
+)
+
+var (
+ // PauseContainerTag is the tag for the pause container image. The linker's load
+ // flags are used to populate this value from the Makefile
+ PauseContainerTag = ""
+
+ // PauseContainerImageName is the name of the pause container image. The linker's
+ // load flags are used to populate this value from the Makefile
+ PauseContainerImageName = ""
)
// Merge merges two config files, preferring the ones on the left. Any nil or
| 1 |
// Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package config
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strconv"
"strings"
"time"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/cihub/seelog"
)
const (
// http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
DockerReservedPort = 2375
DockerReservedSSLPort = 2376
SSHPort = 22
// AgentIntrospectionPort is used to serve the metadata about the agent and to query the tasks being managed by the agent.
AgentIntrospectionPort = 51678
// AgentCredentialsPort is used to serve the credentials for tasks.
AgentCredentialsPort = 51679
// DefaultClusterName is the name of the default cluster.
DefaultClusterName = "default"
// DefaultTaskCleanupWaitDuration specifies the default value for task cleanup duration. It is used to
// clean up task's containers.
DefaultTaskCleanupWaitDuration = 3 * time.Hour
// DefaultDockerStopTimeout specifies the value for container stop timeout duration
DefaultDockerStopTimeout = 30 * time.Second
// DefaultImageCleanupTimeInterval specifies the default value for image cleanup duration. It is used to
// remove the images pulled by agent.
DefaultImageCleanupTimeInterval = 30 * time.Minute
// DefaultNumImagesToDeletePerCycle specifies the default number of images to delete when agent performs
// image cleanup.
DefaultNumImagesToDeletePerCycle = 5
//DefaultImageDeletionAge specifies the default value for minimum amount of elapsed time after an image
// has been pulled before it can be deleted.
DefaultImageDeletionAge = 1 * time.Hour
// minimumTaskCleanupWaitDuration specifies the minimum duration to wait before cleaning up
// a task's container. This is used to enforce sane values for the config.TaskCleanupWaitDuration field.
minimumTaskCleanupWaitDuration = 1 * time.Minute
// minimumDockerStopTimeout specifies the minimum value for docker StopContainer API
minimumDockerStopTimeout = 1 * time.Second
// minimumImageCleanupInterval specifies the minimum time for agent to wait before performing
// image cleanup.
minimumImageCleanupInterval = 10 * time.Minute
// minimumNumImagesToDeletePerCycle specifies the minimum number of images that to be deleted when
// performing image cleanup.
minimumNumImagesToDeletePerCycle = 1
// defaultCNIPluginsPath is the default path where cni binaries are located
defaultCNIPluginsPath = "/amazon-ecs-cni-plugins"
// DefaultMinSupportedCNIVersion denotes the minimum version of cni spec required
DefaultMinSupportedCNIVersion = "0.3.0"
)
// Merge merges two config files, preferring the ones on the left. Any nil or
// zero values present in the left that are not present in the right will be
// overridden
func (lhs *Config) Merge(rhs Config) *Config {
left := reflect.ValueOf(lhs).Elem()
right := reflect.ValueOf(&rhs).Elem()
for i := 0; i < left.NumField(); i++ {
leftField := left.Field(i)
if utils.ZeroOrNil(leftField.Interface()) {
leftField.Set(reflect.ValueOf(right.Field(i).Interface()))
}
}
return lhs //make it chainable
}
// complete returns true if all fields of the config are populated / nonzero
func (cfg *Config) complete() bool {
cfgElem := reflect.ValueOf(cfg).Elem()
for i := 0; i < cfgElem.NumField(); i++ {
if utils.ZeroOrNil(cfgElem.Field(i).Interface()) {
return false
}
}
return true
}
// checkMissingAndDeprecated checks all zero-valued fields for tags of the form
// missing:STRING and acts based on that string. Current options are: fatal,
// warn. Fatal will result in an error being returned, warn will result in a
// warning that the field is missing being logged.
func (cfg *Config) checkMissingAndDepreciated() error {
cfgElem := reflect.ValueOf(cfg).Elem()
cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type()
fatalFields := []string{}
for i := 0; i < cfgElem.NumField(); i++ {
cfgField := cfgElem.Field(i)
if utils.ZeroOrNil(cfgField.Interface()) {
missingTag := cfgStructField.Field(i).Tag.Get("missing")
if len(missingTag) == 0 {
continue
}
switch missingTag {
case "warn":
seelog.Warnf("Configuration key not set, key: %v", cfgStructField.Field(i).Name)
case "fatal":
seelog.Criticalf("Configuration key not set, key: %v", cfgStructField.Field(i).Name)
fatalFields = append(fatalFields, cfgStructField.Field(i).Name)
default:
seelog.Warnf("Unexpected `missing` tag value, tag %v", missingTag)
}
} else {
// present
deprecatedTag := cfgStructField.Field(i).Tag.Get("deprecated")
if len(deprecatedTag) == 0 {
continue
}
seelog.Warnf("Use of deprecated configuration key, key: %v message: %v", cfgStructField.Field(i).Name, deprecatedTag)
}
}
if len(fatalFields) > 0 {
return errors.New("Missing required fields: " + strings.Join(fatalFields, ", "))
}
return nil
}
// trimWhitespace trims whitespace from all string config values with the
// `trim` tag
func (cfg *Config) trimWhitespace() {
cfgElem := reflect.ValueOf(cfg).Elem()
cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type()
for i := 0; i < cfgElem.NumField(); i++ {
cfgField := cfgElem.Field(i)
if !cfgField.CanInterface() {
continue
}
trimTag := cfgStructField.Field(i).Tag.Get("trim")
if len(trimTag) == 0 {
continue
}
if cfgField.Kind() != reflect.String {
seelog.Warnf("Cannot trim non-string field type %v index %v", cfgField.Kind().String(), i)
continue
}
str := cfgField.Interface().(string)
cfgField.SetString(strings.TrimSpace(str))
}
}
func fileConfig() (Config, error) {
config_file := utils.DefaultIfBlank(os.Getenv("ECS_AGENT_CONFIG_FILE_PATH"), "/etc/ecs_container_agent/config.json")
config := Config{}
file, err := os.Open(config_file)
if err != nil {
return config, nil
}
data, err := ioutil.ReadAll(file)
if err != nil {
seelog.Errorf("Unable to read config file, err %v", err)
return config, err
}
if strings.TrimSpace(string(data)) == "" {
// empty file, not an error
return config, nil
}
err = json.Unmarshal(data, &config)
if err != nil {
seelog.Errorf("Error reading config json data, err %v", err)
}
// Handle any deprecated keys correctly here
if utils.ZeroOrNil(config.Cluster) && !utils.ZeroOrNil(config.ClusterArn) {
config.Cluster = config.ClusterArn
}
return config, nil
}
// environmentConfig reads the given configs from the environment and attempts
// to convert them to the given type
func environmentConfig() (Config, error) {
var errs []error
endpoint := os.Getenv("ECS_BACKEND_HOST")
clusterRef := os.Getenv("ECS_CLUSTER")
awsRegion := os.Getenv("AWS_DEFAULT_REGION")
dockerEndpoint := os.Getenv("DOCKER_HOST")
engineAuthType := os.Getenv("ECS_ENGINE_AUTH_TYPE")
engineAuthData := os.Getenv("ECS_ENGINE_AUTH_DATA")
var checkpoint bool
dataDir := os.Getenv("ECS_DATADIR")
if dataDir != "" {
// if we have a directory to checkpoint to, default it to be on
checkpoint = utils.ParseBool(os.Getenv("ECS_CHECKPOINT"), true)
} else {
// if the directory is not set, default to checkpointing off for
// backwards compatibility
checkpoint = utils.ParseBool(os.Getenv("ECS_CHECKPOINT"), false)
}
// Format: json array, e.g. [1,2,3]
reservedPortEnv := os.Getenv("ECS_RESERVED_PORTS")
portDecoder := json.NewDecoder(strings.NewReader(reservedPortEnv))
var reservedPorts []uint16
err := portDecoder.Decode(&reservedPorts)
// EOF means the string was blank as opposed to UnexepctedEof which means an
// invalid parse
// Blank is not a warning; we have sane defaults
if err != io.EOF && err != nil {
err := fmt.Errorf("Invalid format for \"ECS_RESERVED_PORTS\" environment variable; expected a JSON array like [1,2,3]. err %v", err)
seelog.Warn(err)
}
reservedPortUDPEnv := os.Getenv("ECS_RESERVED_PORTS_UDP")
portDecoderUDP := json.NewDecoder(strings.NewReader(reservedPortUDPEnv))
var reservedPortsUDP []uint16
err = portDecoderUDP.Decode(&reservedPortsUDP)
// EOF means the string was blank as opposed to UnexepctedEof which means an
// invalid parse
// Blank is not a warning; we have sane defaults
if err != io.EOF && err != nil {
err := fmt.Errorf("Invalid format for \"ECS_RESERVED_PORTS_UDP\" environment variable; expected a JSON array like [1,2,3]. err %v", err)
seelog.Warn(err)
}
updateDownloadDir := os.Getenv("ECS_UPDATE_DOWNLOAD_DIR")
updatesEnabled := utils.ParseBool(os.Getenv("ECS_UPDATES_ENABLED"), false)
disableMetrics := utils.ParseBool(os.Getenv("ECS_DISABLE_METRICS"), false)
reservedMemory := parseEnvVariableUint16("ECS_RESERVED_MEMORY")
var dockerStopTimeout time.Duration
parsedStopTimeout := parseEnvVariableDuration("ECS_CONTAINER_STOP_TIMEOUT")
if parsedStopTimeout >= minimumDockerStopTimeout {
dockerStopTimeout = parsedStopTimeout
} else if parsedStopTimeout != 0 {
seelog.Warnf("Discarded invalid value for docker stop timeout, parsed as: %v", parsedStopTimeout)
}
taskCleanupWaitDuration := parseEnvVariableDuration("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION")
availableLoggingDriversEnv := os.Getenv("ECS_AVAILABLE_LOGGING_DRIVERS")
loggingDriverDecoder := json.NewDecoder(strings.NewReader(availableLoggingDriversEnv))
var availableLoggingDrivers []dockerclient.LoggingDriver
err = loggingDriverDecoder.Decode(&availableLoggingDrivers)
// EOF means the string was blank as opposed to UnexepctedEof which means an
// invalid parse
// Blank is not a warning; we have sane defaults
if err != io.EOF && err != nil {
err := fmt.Errorf("Invalid format for \"ECS_AVAILABLE_LOGGING_DRIVERS\" environment variable; expected a JSON array like [\"json-file\",\"syslog\"]. err %v", err)
seelog.Warn(err)
}
privilegedDisabled := utils.ParseBool(os.Getenv("ECS_DISABLE_PRIVILEGED"), false)
seLinuxCapable := utils.ParseBool(os.Getenv("ECS_SELINUX_CAPABLE"), false)
appArmorCapable := utils.ParseBool(os.Getenv("ECS_APPARMOR_CAPABLE"), false)
taskENIEnabled := utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_ENI"), false)
taskIAMRoleEnabled := utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_IAM_ROLE"), false)
taskIAMRoleEnabledForNetworkHost := utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST"), false)
credentialsAuditLogFile := os.Getenv("ECS_AUDIT_LOGFILE")
credentialsAuditLogDisabled := utils.ParseBool(os.Getenv("ECS_AUDIT_LOGFILE_DISABLED"), false)
imageCleanupDisabled := utils.ParseBool(os.Getenv("ECS_DISABLE_IMAGE_CLEANUP"), false)
minimumImageDeletionAge := parseEnvVariableDuration("ECS_IMAGE_MINIMUM_CLEANUP_AGE")
imageCleanupInterval := parseEnvVariableDuration("ECS_IMAGE_CLEANUP_INTERVAL")
numImagesToDeletePerCycleEnvVal := os.Getenv("ECS_NUM_IMAGES_DELETE_PER_CYCLE")
numImagesToDeletePerCycle, err := strconv.Atoi(numImagesToDeletePerCycleEnvVal)
if numImagesToDeletePerCycleEnvVal != "" && err != nil {
seelog.Warnf("Invalid format for \"ECS_NUM_IMAGES_DELETE_PER_CYCLE\", expected an integer. err %v", err)
}
cniPluginsPath := os.Getenv("ECS_CNI_PLUGINS_PATH")
instanceAttributesEnv := os.Getenv("ECS_INSTANCE_ATTRIBUTES")
attributeDecoder := json.NewDecoder(strings.NewReader(instanceAttributesEnv))
var instanceAttributes map[string]string
err = attributeDecoder.Decode(&instanceAttributes)
if err != io.EOF && err != nil {
err := fmt.Errorf("Invalid format for ECS_INSTANCE_ATTRIBUTES. Expected a json hash")
seelog.Warn(err)
errs = append(errs, err)
}
for attributeKey, attributeValue := range instanceAttributes {
seelog.Debugf("Setting instance attribute %v: %v", attributeKey, attributeValue)
}
if len(errs) > 0 {
err = utils.NewMultiError(errs...)
} else {
err = nil
}
return Config{
Cluster: clusterRef,
APIEndpoint: endpoint,
AWSRegion: awsRegion,
DockerEndpoint: dockerEndpoint,
ReservedPorts: reservedPorts,
ReservedPortsUDP: reservedPortsUDP,
DataDir: dataDir,
Checkpoint: checkpoint,
EngineAuthType: engineAuthType,
EngineAuthData: NewSensitiveRawMessage([]byte(engineAuthData)),
UpdatesEnabled: updatesEnabled,
UpdateDownloadDir: updateDownloadDir,
DisableMetrics: disableMetrics,
ReservedMemory: reservedMemory,
AvailableLoggingDrivers: availableLoggingDrivers,
PrivilegedDisabled: privilegedDisabled,
SELinuxCapable: seLinuxCapable,
AppArmorCapable: appArmorCapable,
TaskCleanupWaitDuration: taskCleanupWaitDuration,
TaskENIEnabled: taskENIEnabled,
TaskIAMRoleEnabled: taskIAMRoleEnabled,
DockerStopTimeout: dockerStopTimeout,
CredentialsAuditLogFile: credentialsAuditLogFile,
CredentialsAuditLogDisabled: credentialsAuditLogDisabled,
TaskIAMRoleEnabledForNetworkHost: taskIAMRoleEnabledForNetworkHost,
ImageCleanupDisabled: imageCleanupDisabled,
MinimumImageDeletionAge: minimumImageDeletionAge,
ImageCleanupInterval: imageCleanupInterval,
NumImagesToDeletePerCycle: numImagesToDeletePerCycle,
InstanceAttributes: instanceAttributes,
CNIPluginsPath: cniPluginsPath,
}, err
}
func parseEnvVariableUint16(envVar string) uint16 {
envVal := os.Getenv(envVar)
var var16 uint16
if envVal != "" {
var64, err := strconv.ParseUint(envVal, 10, 16)
if err != nil {
seelog.Warnf("Invalid format for \""+envVar+"\" environment variable; expected unsigned integer. err %v", err)
} else {
var16 = uint16(var64)
}
}
return var16
}
func parseEnvVariableDuration(envVar string) time.Duration {
var duration time.Duration
envVal := os.Getenv(envVar)
if envVal == "" {
seelog.Debugf("Environment variable empty: %v", envVar)
} else {
var err error
duration, err = time.ParseDuration(envVal)
if err != nil {
seelog.Warnf("Could not parse duration value: %v for Environment Variable %v : %v", envVal, envVar, err)
}
}
return duration
}
func ec2MetadataConfig(ec2client ec2.EC2MetadataClient) Config {
iid, err := ec2client.InstanceIdentityDocument()
if err != nil {
seelog.Criticalf("Unable to communicate with EC2 Metadata service to infer region: %v", err.Error())
return Config{}
}
return Config{AWSRegion: iid.Region}
}
// NewConfig returns a config struct created by merging environment variables,
// a config file, and EC2 Metadata info.
// The 'config' struct it returns can be used, even if an error is returned. An
// error is returned, however, if the config is incomplete in some way that is
// considered fatal.
func NewConfig(ec2client ec2.EC2MetadataClient) (config *Config, err error) {
var errs []error
var errTmp error
envConfig, errTmp := environmentConfig() //Environment overrides all else
if errTmp != nil {
errs = append(errs, errTmp)
}
config = &envConfig
defer func() {
config.trimWhitespace()
config.Merge(DefaultConfig())
errTmp = config.validateAndOverrideBounds()
if errTmp != nil {
errs = append(errs, errTmp)
}
if len(errs) != 0 {
err = utils.NewMultiError(errs...)
} else {
err = nil
}
}()
if config.complete() {
// No need to do file / network IO
return config, nil
}
fcfg, errTmp := fileConfig()
if errTmp != nil {
errs = append(errs, errTmp)
}
config.Merge(fcfg)
if config.AWSRegion == "" {
// Get it from metadata only if we need to (network io)
config.Merge(ec2MetadataConfig(ec2client))
}
return config, err
}
// validateAndOverrideBounds performs validation over members of the Config struct
// and check the value against the minimum required value.
func (config *Config) validateAndOverrideBounds() error {
err := config.checkMissingAndDepreciated()
if err != nil {
return err
}
if config.DockerStopTimeout < minimumDockerStopTimeout {
return fmt.Errorf("Invalid negative DockerStopTimeout: %v", config.DockerStopTimeout.String())
}
var badDrivers []string
for _, driver := range config.AvailableLoggingDrivers {
_, ok := dockerclient.LoggingDriverMinimumVersion[driver]
if !ok {
badDrivers = append(badDrivers, string(driver))
}
}
if len(badDrivers) > 0 {
return errors.New("Invalid logging drivers: " + strings.Join(badDrivers, ", "))
}
// If a value has been set for taskCleanupWaitDuration and the value is less than the minimum allowed cleanup duration,
// print a warning and override it
if config.TaskCleanupWaitDuration < minimumTaskCleanupWaitDuration {
seelog.Warnf("Invalid value for image cleanup duration, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultTaskCleanupWaitDuration.String(), config.TaskCleanupWaitDuration, minimumTaskCleanupWaitDuration)
config.TaskCleanupWaitDuration = DefaultTaskCleanupWaitDuration
}
if config.ImageCleanupInterval < minimumImageCleanupInterval {
seelog.Warnf("Invalid value for image cleanup duration, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultImageCleanupTimeInterval.String(), config.ImageCleanupInterval, minimumImageCleanupInterval)
config.ImageCleanupInterval = DefaultImageCleanupTimeInterval
}
if config.NumImagesToDeletePerCycle < minimumNumImagesToDeletePerCycle {
seelog.Warnf("Invalid value for number of images to delete for image cleanup, will be overriden with the default value: %d. Parsed value: %d, minimum value: %d.", DefaultImageDeletionAge, config.NumImagesToDeletePerCycle, minimumNumImagesToDeletePerCycle)
config.NumImagesToDeletePerCycle = DefaultNumImagesToDeletePerCycle
}
config.platformOverrides()
return nil
}
// String returns a lossy string representation of the config suitable for human readable display.
// Consequently, it *should not* return any sensitive information.
func (config *Config) String() string {
return fmt.Sprintf("Cluster: %v, Region: %v, DataDir: %v, Checkpoint: %v, AuthType: %v, UpdatesEnabled: %v, DisableMetrics: %v, ReservedMem: %v, TaskCleanupWaitDuration: %v, DockerStopTimeout: %v", config.Cluster, config.AWSRegion, config.DataDir, config.Checkpoint, config.EngineAuthType, config.UpdatesEnabled, config.DisableMetrics, config.ReservedMemory, config.TaskCleanupWaitDuration, config.DockerStopTimeout)
}
| 1 | 15,550 |
nit: Can Linux-specific paths be moved to a conditionally-compiled file for Linux?
|
aws-amazon-ecs-agent
|
go
|
@@ -81,6 +81,10 @@ public abstract class SessionMap implements HasReadyState, Routable {
public abstract void remove(SessionId id);
+ public int getCount() {
+ return -10;
+ };
+
public URI getUri(SessionId id) throws NoSuchSessionException {
return get(id).getUri();
}
| 1 |
// Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.sessionmap;
import org.openqa.selenium.NoSuchSessionException;
import org.openqa.selenium.grid.data.Session;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.remote.SessionId;
import org.openqa.selenium.remote.http.HttpHandler;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import org.openqa.selenium.remote.http.Routable;
import org.openqa.selenium.remote.http.Route;
import org.openqa.selenium.remote.tracing.Tracer;
import org.openqa.selenium.status.HasReadyState;
import java.net.URI;
import java.util.Map;
import static org.openqa.selenium.remote.http.Route.combine;
import static org.openqa.selenium.remote.http.Route.delete;
import static org.openqa.selenium.remote.http.Route.post;
/**
* Provides a stable API for looking up where on the Grid a particular webdriver instance is
* running.
* <p>
* This class responds to the following URLs:
* <table summary="HTTP commands the SessionMap understands">
* <tr>
* <th>Verb</th>
* <th>URL Template</th>
* <th>Meaning</th>
* </tr>
* <tr>
* <td>DELETE</td>
* <td>/se/grid/session/{sessionId}</td>
* <td>Removes a {@link URI} from the session map. Calling this method more than once for the same
* {@link SessionId} will not throw an error.</td>
* </tr>
* <tr>
* <td>GET</td>
* <td>/se/grid/session/{sessionId}</td>
* <td>Retrieves the {@link URI} associated the {@link SessionId}, or throws a
* {@link org.openqa.selenium.NoSuchSessionException} should the session not be present.</td>
* </tr>
* <tr>
* <td>POST</td>
* <td>/se/grid/session/{sessionId}</td>
* <td>Registers the session with session map. In theory, the session map never expires a session
* from its mappings, but realistically, sessions may end up being removed for many reasons.
* </td>
* </tr>
* </table>
*/
public abstract class SessionMap implements HasReadyState, Routable {
protected final Tracer tracer;
private final Route routes;
public abstract boolean add(Session session);
public abstract Session get(SessionId id) throws NoSuchSessionException;
public abstract void remove(SessionId id);
public URI getUri(SessionId id) throws NoSuchSessionException {
return get(id).getUri();
}
public SessionMap(Tracer tracer) {
this.tracer = Require.nonNull("Tracer", tracer);
Json json = new Json();
routes = combine(
Route.get("/se/grid/session/{sessionId}/uri")
.to(params -> new GetSessionUri(this, sessionIdFrom(params))),
post("/se/grid/session")
.to(() -> new AddToSessionMap(tracer, json, this)),
Route.get("/se/grid/session/{sessionId}")
.to(params -> new GetFromSessionMap(tracer, this, sessionIdFrom(params))),
delete("/se/grid/session/{sessionId}")
.to(params -> new RemoveFromSession(tracer, this, sessionIdFrom(params))));
}
private SessionId sessionIdFrom(Map<String, String> params) {
return new SessionId(params.get("sessionId"));
}
@Override
public boolean matches(HttpRequest req) {
return routes.matches(req);
}
@Override
public HttpResponse execute(HttpRequest req) {
return routes.execute(req);
}
}
| 1 | 17,774 |
This is not the right approach. The `Distributor` maintains a model of the current state of the Grid. That model already contains the information about every active session. We don't need to modify `SessionMap` to expose it further.
|
SeleniumHQ-selenium
|
rb
|
@@ -279,8 +279,10 @@ func (r *runner) waitForJournal(ctx context.Context) error {
// unrecognized attributes are ignored. The list ends with a blank
// line.
func (r *runner) handleList(ctx context.Context, args []string) (err error) {
- if len(args) > 0 {
- return errors.New("Lists for non-fetches unsupported for now")
+ if len(args) == 1 && args[0] == "for-push" {
+ r.log.CDebugf(ctx, "Treating for-push the same as a regular list")
+ } else if len(args) > 0 {
+ return errors.Errorf("Bad list request: %v", args)
}
repo, err := r.initRepoIfNeeded(ctx)
| 1 |
// Copyright 2017 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package kbfsgit
import (
"bufio"
"context"
"fmt"
"io"
"os"
"path"
"strings"
"github.com/keybase/client/go/logger"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/libfs"
"github.com/keybase/kbfs/libkbfs"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
gogit "gopkg.in/src-d/go-git.v4"
gogitcfg "gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
)
const (
gitCmdCapabilities = "capabilities"
gitCmdList = "list"
gitCmdFetch = "fetch"
gitCmdPush = "push"
// Debug tag ID for an individual git command passed to the process.
ctxCommandOpID = "GITCMDID"
kbfsgitPrefix = "keybase://"
repoSplitter = "/"
kbfsRepoDir = ".kbfs_git"
publicName = "public"
privateName = "private"
teamName = "team"
// localRepoRemoteName is the name of the remote that gets added
// locally to the config of the KBFS bare repo, pointing to the
// git repo stored at the `gitDir` passed to `newRunner`.
//
// In go-git, there is no way to hook two go-git.Repository
// instances together to do fetches/pulls between them. One of the
// two repos has to be defined as a "remote" to the other one in
// order to use the nice Fetch and Pull commands. (There might be
// other more involved ways to transfer objects manually
// one-by-one, but that seems like it would be pretty sad.)
//
// Since there is no standard remote protocol for keybase yet
// (that's what we're building!), it's not supported by go-git
// itself. That means our only option is to treat the local
// on-disk repo as a "remote" with respect to the bare KBFS repo,
// and do everything in reverse: for example, when a user does a
// push, we actually fetch from the local repo and write the
// objects into the bare repo.
localRepoRemoteName = "local"
)
type ctxCommandTagKey int
const (
ctxCommandIDKey ctxCommandTagKey = iota
)
func getHandleFromFolderName(ctx context.Context, config libkbfs.Config,
tlfName string, t tlf.Type) (*libkbfs.TlfHandle, error) {
for {
tlfHandle, err := libkbfs.ParseTlfHandle(
ctx, config.KBPKI(), tlfName, t)
switch e := errors.Cause(err).(type) {
case libkbfs.TlfNameNotCanonical:
tlfName = e.NameToTry
case nil:
return tlfHandle, nil
default:
return nil, err
}
}
}
type runner struct {
config libkbfs.Config
log logger.Logger
h *libkbfs.TlfHandle
remote string
repo string
gitDir string
uniqID string
input io.Reader
output io.Writer
}
// newRunner creates a new runner for git commands. It expects `repo`
// to be in the form "keybase://private/user/reponame". `remote`
// is the local name assigned to that URL, while `gitDir` is the
// filepath leading to the .git directory of the caller's local
// on-disk repo
func newRunner(ctx context.Context, config libkbfs.Config,
remote, repo, gitDir string, input io.Reader, output io.Writer) (
*runner, error) {
tlfAndRepo := strings.TrimPrefix(repo, kbfsgitPrefix)
parts := strings.Split(tlfAndRepo, repoSplitter)
if len(parts) != 3 {
return nil, errors.Errorf("Repo should be in the format "+
"%s<tlfType>%s<tlf>%s<repo>, but got %s",
kbfsgitPrefix, repoSplitter, repoSplitter, tlfAndRepo)
}
var t tlf.Type
switch parts[0] {
case publicName:
t = tlf.Public
case privateName:
t = tlf.Private
case teamName:
t = tlf.SingleTeam
default:
return nil, errors.Errorf("Unrecognized TLF type: %s", parts[0])
}
h, err := getHandleFromFolderName(ctx, config, parts[1], t)
if err != nil {
return nil, err
}
// Use the device ID and PID to make a unique ID (for generating
// temp files in KBFS).
session, err := libkbfs.GetCurrentSessionIfPossible(
ctx, config.KBPKI(), h.Type() == tlf.Public)
if err != nil {
return nil, err
}
uniqID := session.VerifyingKey.String() + "-" + string(os.Getpid())
return &runner{
config: config,
log: config.MakeLogger(""),
h: h,
remote: remote,
repo: parts[2],
gitDir: gitDir,
uniqID: uniqID,
input: input,
output: output}, nil
}
// handleCapabilities: from https://git-scm.com/docs/git-remote-helpers
//
// Lists the capabilities of the helper, one per line, ending with a
// blank line. Each capability may be preceded with *, which marks
// them mandatory for git versions using the remote helper to
// understand. Any unknown mandatory capability is a fatal error.
func (r *runner) handleCapabilities() error {
caps := []string{
gitCmdFetch,
gitCmdPush,
}
for _, c := range caps {
_, err := r.output.Write([]byte(c + "\n"))
if err != nil {
return err
}
}
_, err := r.output.Write([]byte("\n"))
return err
}
func (r *runner) initRepoIfNeeded(ctx context.Context) (
*gogit.Repository, error) {
rootNode, _, err := r.config.KBFSOps().GetOrCreateRootNode(
ctx, r.h, libkbfs.MasterBranch)
if err != nil {
return nil, err
}
lookupOrCreateDir := func(n libkbfs.Node, name string) (
libkbfs.Node, error) {
newNode, _, err := r.config.KBFSOps().Lookup(ctx, n, name)
switch errors.Cause(err).(type) {
case libkbfs.NoSuchNameError:
newNode, _, err = r.config.KBFSOps().CreateDir(ctx, n, name)
if err != nil {
return nil, err
}
case nil:
default:
return nil, err
}
return newNode, nil
}
repoDir, err := lookupOrCreateDir(rootNode, kbfsRepoDir)
if err != nil {
return nil, err
}
_, err = lookupOrCreateDir(repoDir, r.repo)
if err != nil {
return nil, err
}
fs, err := libfs.NewFS(
ctx, r.config, r.h, path.Join(kbfsRepoDir, r.repo), r.uniqID)
if err != nil {
return nil, err
}
// We don't persist remotes to the config on disk for two
// reasons. 1) gogit/gcfg has a bug where it can't handle
// backslashes in remote URLs, and 2) we don't want to persist the
// remotes anyway since they'll contain local paths and wouldn't
// make sense to other devices, plus that could leak local info.
storer, err := newConfigWithoutRemotesStorer(fs)
if err != nil {
return nil, err
}
// TODO: This needs to take a server lock when initializing a
// repo.
r.log.CDebugf(ctx, "Attempting to init or open repo %s", r.repo)
repo, err := gogit.Init(storer, nil)
if err == gogit.ErrRepositoryAlreadyExists {
repo, err = gogit.Open(storer, nil)
}
if err != nil {
return nil, err
}
return repo, nil
}
func (r *runner) waitForJournal(ctx context.Context) error {
rootNode, _, err := r.config.KBFSOps().GetOrCreateRootNode(
ctx, r.h, libkbfs.MasterBranch)
if err != nil {
return err
}
err = r.config.KBFSOps().SyncAll(ctx, rootNode.GetFolderBranch())
if err != nil {
return err
}
jServer, err := libkbfs.GetJournalServer(r.config)
if err != nil {
r.log.CDebugf(ctx, "No journal server: %+v", err)
return nil
}
err = jServer.Wait(ctx, rootNode.GetFolderBranch().Tlf)
if err != nil {
return err
}
// Make sure that everything is truly flushed.
status, err := jServer.JournalStatus(rootNode.GetFolderBranch().Tlf)
if err != nil {
return err
}
if status.RevisionStart != kbfsmd.RevisionUninitialized {
r.log.CDebugf(ctx, "Journal status: %+v", status)
return errors.New("Journal is non-empty after a wait")
}
return nil
}
// handleList: From https://git-scm.com/docs/git-remote-helpers
//
// Lists the refs, one per line, in the format "<value> <name> [<attr>
// …]". The value may be a hex sha1 hash, "@<dest>" for a symref, or
// "?" to indicate that the helper could not get the value of the
// ref. A space-separated list of attributes follows the name;
// unrecognized attributes are ignored. The list ends with a blank
// line.
func (r *runner) handleList(ctx context.Context, args []string) (err error) {
if len(args) > 0 {
return errors.New("Lists for non-fetches unsupported for now")
}
repo, err := r.initRepoIfNeeded(ctx)
if err != nil {
return err
}
refs, err := repo.References()
if err != nil {
return err
}
for {
ref, err := refs.Next()
if errors.Cause(err) == io.EOF {
break
}
if err != nil {
return err
}
value := ""
switch ref.Type() {
case plumbing.HashReference:
value = ref.Hash().String()
case plumbing.SymbolicReference:
value = "@" + ref.Target().String()
default:
value = "?"
}
refStr := value + " " + ref.Name().String() + "\n"
_, err = r.output.Write([]byte(refStr))
if err != nil {
return err
}
}
err = r.waitForJournal(ctx)
if err != nil {
return err
}
r.log.CDebugf(ctx, "Done waiting for journal")
_, err = r.output.Write([]byte("\n"))
return err
}
// handleFetchBatch: From https://git-scm.com/docs/git-remote-helpers
//
// fetch <sha1> <name>
// Fetches the given object, writing the necessary objects to the
// database. Fetch commands are sent in a batch, one per line,
// terminated with a blank line. Outputs a single blank line when all
// fetch commands in the same batch are complete. Only objects which
// were reported in the output of list with a sha1 may be fetched this
// way.
//
// Optionally may output a lock <file> line indicating a file under
// GIT_DIR/objects/pack which is keeping a pack until refs can be
// suitably updated.
func (r *runner) handleFetchBatch(ctx context.Context, args [][]string) (
err error) {
repo, err := r.initRepoIfNeeded(ctx)
if err != nil {
return err
}
r.log.CDebugf(ctx, "Fetching %d refs into %s", len(args), r.gitDir)
remote, err := repo.CreateRemote(&gogitcfg.RemoteConfig{
Name: localRepoRemoteName,
URL: r.gitDir,
})
for _, fetch := range args {
if len(fetch) != 2 {
return errors.Errorf("Bad fetch request: %v", fetch)
}
refInBareRepo := fetch[1]
// Push into a local ref with a temporary name, because the
// git process that invoked us will get confused if we make a
// ref with the same name. Later, delete this temporary ref.
localTempRef := plumbing.ReferenceName(refInBareRepo).Short() +
"-" + r.uniqID
refSpec := fmt.Sprintf(
"%s:refs/remotes/%s/%s", refInBareRepo, r.remote, localTempRef)
r.log.CDebugf(ctx, "Fetching %s", refSpec)
// Now "push" into the local repo to get it to store objects
// from the KBFS bare repo.
err = remote.Push(&gogit.PushOptions{
RemoteName: localRepoRemoteName,
RefSpecs: []gogitcfg.RefSpec{gogitcfg.RefSpec(refSpec)},
})
if err != nil && err != gogit.NoErrAlreadyUpToDate {
return err
}
// Delete the temporary refspec now that the objects are
// safely stored in the local repo.
refSpec = fmt.Sprintf(":refs/remotes/%s/%s", r.remote, localTempRef)
err = remote.Push(&gogit.PushOptions{
RemoteName: localRepoRemoteName,
RefSpecs: []gogitcfg.RefSpec{gogitcfg.RefSpec(refSpec)},
})
if err != nil && err != gogit.NoErrAlreadyUpToDate {
return err
}
}
err = r.waitForJournal(ctx)
if err != nil {
return err
}
r.log.CDebugf(ctx, "Done waiting for journal")
_, err = r.output.Write([]byte("\n"))
return err
}
func (r *runner) processCommands(ctx context.Context) (err error) {
r.log.CDebugf(ctx, "Ready to process")
reader := bufio.NewReader(r.input)
var fetchBatch [][]string
for {
cmd, err := reader.ReadString('\n')
if errors.Cause(err) == io.EOF {
r.log.CDebugf(ctx, "Done processing commands")
return nil
} else if err != nil {
return err
}
ctx := libkbfs.CtxWithRandomIDReplayable(
ctx, ctxCommandIDKey, ctxCommandOpID, r.log)
cmdParts := strings.Fields(cmd)
if len(cmdParts) == 0 {
if len(fetchBatch) > 0 {
r.log.CDebugf(ctx, "Processing fetch batch")
err = r.handleFetchBatch(ctx, fetchBatch)
if err != nil {
return err
}
fetchBatch = nil
continue
} else {
r.log.CDebugf(ctx, "Done processing commands")
return nil
}
}
r.log.CDebugf(ctx, "Received command: %s", cmd)
switch cmdParts[0] {
case gitCmdCapabilities:
err = r.handleCapabilities()
case gitCmdList:
err = r.handleList(ctx, cmdParts[1:])
case gitCmdFetch:
fetchBatch = append(fetchBatch, cmdParts[1:])
default:
err = errors.Errorf("Unsupported command: %s", cmdParts[0])
}
if err != nil {
return err
}
}
}
| 1 | 17,603 |
Git doc seems to suggest `for-push` is a optimization. Do we have a plan/ticket for treating them differently?
|
keybase-kbfs
|
go
|
@@ -449,7 +449,8 @@ debug_tls:When 1, print some TLS session secrets when they are computed.
"mobileip", "netbios", "netflow", "ntp", "ppp", "pptp",
"radius", "rip", "rtp", "skinny", "smb", "snmp",
"tftp", "x509", "bluetooth", "dhcp6", "llmnr",
- "sctp", "vrrp", "ipsec", "lltd", "vxlan", "eap"]
+ "sctp", "vrrp", "ipsec", "lltd", "vxlan", "eap",
+ "ltp", "bp"]
contribs = dict()
crypto_valid = isCryptographyValid()
crypto_valid_advanced = isCryptographyAdvanced()
| 1 |
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Implementation of the configuration object.
"""
from __future__ import absolute_import
from __future__ import print_function
import os,time,socket,sys
from scapy import VERSION
from scapy.data import *
from scapy import base_classes
from scapy.themes import NoTheme, apply_ipython_style
from scapy.error import log_scapy
import scapy.modules.six as six
############
## Config ##
############
class ConfClass(object):
def configure(self, cnf):
self.__dict__ = cnf.__dict__.copy()
def __repr__(self):
return str(self)
def __str__(self):
s = ""
keys = self.__class__.__dict__.copy()
keys.update(self.__dict__)
keys = sorted(keys)
for i in keys:
if i[0] != "_":
r = repr(getattr(self, i))
r = " ".join(r.split())
wlen = 76-max(len(i),10)
if len(r) > wlen:
r = r[:wlen-3]+"..."
s += "%-10s = %s\n" % (i, r)
return s[:-1]
class Interceptor(object):
def __init__(self, name, default, hook, args=None, kargs=None):
self.name = name
self.intname = "_intercepted_%s" % name
self.default=default
self.hook = hook
self.args = args if args is not None else []
self.kargs = kargs if kargs is not None else {}
def __get__(self, obj, typ=None):
if not hasattr(obj, self.intname):
setattr(obj, self.intname, self.default)
return getattr(obj, self.intname)
def __set__(self, obj, val):
setattr(obj, self.intname, val)
self.hook(self.name, val, *self.args, **self.kargs)
class ProgPath(ConfClass):
pdfreader = "acroread"
psreader = "gv"
dot = "dot"
display = "display"
tcpdump = "tcpdump"
tcpreplay = "tcpreplay"
hexedit = "hexer"
tshark = "tshark"
wireshark = "wireshark"
ifconfig = "ifconfig"
class ConfigFieldList:
def __init__(self):
self.fields = set()
self.layers = set()
@staticmethod
def _is_field(f):
return hasattr(f, "owners")
def _recalc_layer_list(self):
self.layers = {owner for f in self.fields for owner in f.owners}
def add(self, *flds):
self.fields |= {f for f in flds if self._is_field(f)}
self._recalc_layer_list()
def remove(self, *flds):
self.fields -= set(flds)
self._recalc_layer_list()
def __contains__(self, elt):
if isinstance(elt, base_classes.Packet_metaclass):
return elt in self.layers
return elt in self.fields
def __repr__(self):
return "<%s [%s]>" % (self.__class__.__name__," ".join(str(x) for x in self.fields))
class Emphasize(ConfigFieldList):
pass
class Resolve(ConfigFieldList):
pass
class Num2Layer:
def __init__(self):
self.num2layer = {}
self.layer2num = {}
def register(self, num, layer):
self.register_num2layer(num, layer)
self.register_layer2num(num, layer)
def register_num2layer(self, num, layer):
self.num2layer[num] = layer
def register_layer2num(self, num, layer):
self.layer2num[layer] = num
def __getitem__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return self.layer2num[item]
return self.num2layer[item]
def __contains__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return item in self.layer2num
return item in self.num2layer
def get(self, item, default=None):
if item in self:
return self[item]
return default
def __repr__(self):
lst = []
for num,layer in six.iteritems(self.num2layer):
if layer in self.layer2num and self.layer2num[layer] == num:
dir = "<->"
else:
dir = " ->"
lst.append((num,"%#6x %s %-20s (%s)" % (num, dir, layer.__name__,
layer._name)))
for layer,num in six.iteritems(self.layer2num):
if num not in self.num2layer or self.num2layer[num] != layer:
lst.append((num,"%#6x <- %-20s (%s)" % (num, layer.__name__,
layer._name)))
lst.sort()
return "\n".join(y for x,y in lst)
class LayersList(list):
def __repr__(self):
s=[]
for l in self:
s.append("%-20s: %s" % (l.__name__,l.name))
return "\n".join(s)
def register(self, layer):
self.append(layer)
class CommandsList(list):
def __repr__(self):
s=[]
for l in sorted(self,key=lambda x:x.__name__):
if l.__doc__:
doc = l.__doc__.split("\n")[0]
else:
doc = "--"
s.append("%-20s: %s" % (l.__name__,doc))
return "\n".join(s)
def register(self, cmd):
self.append(cmd)
return cmd # return cmd so that method can be used as a decorator
def lsc():
print(repr(conf.commands))
class CacheInstance(dict, object):
__slots__ = ["timeout", "name", "_timetable", "__dict__"]
def __init__(self, name="noname", timeout=None):
self.timeout = timeout
self.name = name
self._timetable = {}
def flush(self):
self.__init__(name=self.name, timeout=self.timeout)
def __getitem__(self, item):
if item in self.__slots__:
return object.__getattribute__(self, item)
val = dict.__getitem__(self,item)
if self.timeout is not None:
t = self._timetable[item]
if time.time()-t > self.timeout:
raise KeyError(item)
return val
def get(self, item, default=None):
# overloading this method is needed to force the dict to go through
# the timetable check
try:
return self[item]
except KeyError:
return default
def __setitem__(self, item, v):
if item in self.__slots__:
return object.__setattr__(self, item, v)
self._timetable[item] = time.time()
dict.__setitem__(self, item,v)
def update(self, other):
for key, value in other.iteritems():
# We only update an element from `other` either if it does
# not exist in `self` or if the entry in `self` is older.
if key not in self or self._timetable[key] < other._timetable[key]:
dict.__setitem__(self, key, value)
self._timetable[key] = other._timetable[key]
def iteritems(self):
if self.timeout is None:
return six.iteritems(self.__dict__)
t0=time.time()
return ((k,v) for (k,v) in six.iteritems(self.__dict__) if t0-self._timetable[k] < self.timeout)
def iterkeys(self):
if self.timeout is None:
return six.iterkeys(self.__dict__)
t0=time.time()
return (k for k in six.iterkeys(self.__dict__) if t0-self._timetable[k] < self.timeout)
def __iter__(self):
return six.iterkeys(self.__dict__)
def itervalues(self):
if self.timeout is None:
return six.itervalues(self.__dict__)
t0=time.time()
return (v for (k,v) in six.iteritems(self.__dict__) if t0-self._timetable[k] < self.timeout)
def items(self):
if self.timeout is None:
return dict.items(self)
t0=time.time()
return [(k,v) for (k,v) in six.iteritems(self.__dict__) if t0-self._timetable[k] < self.timeout]
def keys(self):
if self.timeout is None:
return dict.keys(self)
t0=time.time()
return [k for k in six.iterkeys(self.__dict__) if t0-self._timetable[k] < self.timeout]
def values(self):
if self.timeout is None:
return six.values(self)
t0=time.time()
return [v for (k,v) in six.iteritems(self.__dict__) if t0-self._timetable[k] < self.timeout]
def __len__(self):
if self.timeout is None:
return dict.__len__(self)
return len(self.keys())
def summary(self):
return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout)
def __repr__(self):
s = []
if self:
mk = max(len(k) for k in six.iterkeys(self.__dict__))
fmt = "%%-%is %%s" % (mk+1)
for item in six.iteritems(self.__dict__):
s.append(fmt % item)
return "\n".join(s)
class NetCache:
def __init__(self):
self._caches_list = []
def add_cache(self, cache):
self._caches_list.append(cache)
setattr(self,cache.name,cache)
def new_cache(self, name, timeout=None):
c = CacheInstance(name=name, timeout=timeout)
self.add_cache(c)
def __delattr__(self, attr):
raise AttributeError("Cannot delete attributes")
def update(self, other):
for co in other._caches_list:
if hasattr(self, co.name):
getattr(self,co.name).update(co)
else:
self.add_cache(co.copy())
def flush(self):
for c in self._caches_list:
c.flush()
def __repr__(self):
return "\n".join(c.summary() for c in self._caches_list)
class LogLevel(object):
def __get__(self, obj, otype):
return obj._logLevel
def __set__(self,obj,val):
log_scapy.setLevel(val)
obj._logLevel = val
def isCryptographyValid():
"""
Check if the cryptography library is present, and if it is recent enough
for most usages in scapy (v1.7 or later).
"""
try:
import cryptography
except ImportError:
return False
from distutils.version import LooseVersion
return LooseVersion(cryptography.__version__) >= LooseVersion("1.7")
def isCryptographyAdvanced():
"""
Check if the cryptography library is present, and if it supports X25519,
ChaCha20Poly1305 and such (v2.0 or later).
"""
try:
import cryptography
except ImportError:
return False
from distutils.version import LooseVersion
lib_valid = LooseVersion(cryptography.__version__) >= LooseVersion("2.0")
if not lib_valid:
return False
try:
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey
X25519PrivateKey.generate()
except:
return False
else:
return True
def isPyPy():
"""Returns either scapy is running under PyPy or not"""
try:
import __pypy__
return True
except ImportError:
return False
def _prompt_changer(attr, val):
"""Change the current prompt theme"""
try:
sys.ps1 = conf.color_theme.prompt(conf.prompt)
except:
pass
try:
apply_ipython_style(get_ipython())
except NameError:
pass
class Conf(ConfClass):
"""This object contains the configuration of Scapy.
session : filename where the session will be saved
interactive_shell : can be "ipython", "python" or "auto". Default: Auto
stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...)
checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received
if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks)
if 2, strictly checks that they are equals
checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks)
checkIPinIP: if True, checks that IP-in-IP layers match. If False, do not
check IP layers that encapsulates another IP layer
check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation
iff : selects the default output interface for srp() and sendp(). default:"eth0")
verb : level of verbosity, from 0 (almost mute) to 3 (verbose)
promisc : default mode for listening socket (to get answers if you spoof on a lan)
sniff_promisc : default mode for sniff()
filter : bpf filter added to every sniffing socket to exclude traffic from analysis
histfile : history file
padding : includes padding in disassembled packets
except_filter : BPF filter for packets to ignore
debug_match : when 1, store received packet that are not matched into debug.recv
route : holds the Scapy routing table and provides methods to manipulate it
warning_threshold : how much time between warnings from the same place
ASN1_default_codec: Codec used by default for ASN1 objects
mib : holds MIB direct access dictionary
resolve : holds list of fields for which resolution should be done
noenum : holds list of enum fields for which conversion to string should NOT be done
AS_resolver: choose the AS resolver class to use
extensions_paths: path or list of paths where extensions are to be looked for
contribs : a dict which can be used by contrib layers to store local configuration
debug_tls:When 1, print some TLS session secrets when they are computed.
"""
version = VERSION
session = ""
interactive = False
interactive_shell = ""
stealth = "not implemented"
iface = None
iface6 = None
layers = LayersList()
commands = CommandsList()
logLevel = LogLevel()
checkIPID = 0
checkIPsrc = 1
checkIPaddr = 1
checkIPinIP = True
check_TCPerror_seqack = 0
verb = 2
prompt = Interceptor("prompt", ">>> ", _prompt_changer)
promisc = 1
sniff_promisc = 1
raw_layer = None
raw_summary = False
default_l2 = None
l2types = Num2Layer()
l3types = Num2Layer()
L3socket = None
L2socket = None
L2listen = None
BTsocket = None
min_pkt_size = 60
histfile = os.getenv('SCAPY_HISTFILE',
os.path.join(os.path.expanduser("~"),
".scapy_history"))
padding = 1
except_filter = ""
debug_match = 0
debug_tls = 0
wepkey = ""
route = None # Filed by route.py
route6 = None # Filed by route6.py
auto_fragment = 1
debug_dissector = 0
color_theme = Interceptor("color_theme", NoTheme(), _prompt_changer)
warning_threshold = 5
warning_next_only_once = False
prog = ProgPath()
resolve = Resolve()
noenum = Resolve()
emph = Emphasize()
use_pypy = isPyPy()
use_pcap = os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y")
use_dnet = os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y")
use_bpf = False
use_winpcapy = False
use_npcap = False
ipv6_enabled = socket.has_ipv6
ethertypes = ETHER_TYPES
protocols = IP_PROTOS
services_tcp = TCP_SERVICES
services_udp = UDP_SERVICES
extensions_paths = "."
manufdb = MANUFDB
stats_classic_protocols = []
stats_dot11_protocols = []
temp_files = []
netcache = NetCache()
geoip_city = '/usr/share/GeoIP/GeoIPCity.dat'
geoip_city_ipv6 = '/usr/share/GeoIP/GeoIPCityv6.dat'
load_layers = ["l2", "inet", "dhcp", "dns", "dot11", "gprs",
"hsrp", "inet6", "ir", "isakmp", "l2tp", "mgcp",
"mobileip", "netbios", "netflow", "ntp", "ppp", "pptp",
"radius", "rip", "rtp", "skinny", "smb", "snmp",
"tftp", "x509", "bluetooth", "dhcp6", "llmnr",
"sctp", "vrrp", "ipsec", "lltd", "vxlan", "eap"]
contribs = dict()
crypto_valid = isCryptographyValid()
crypto_valid_advanced = isCryptographyAdvanced()
fancy_prompt = True
if not Conf.ipv6_enabled:
log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.")
for m in ["inet6","dhcp6"]:
if m in Conf.load_layers:
Conf.load_layers.remove(m)
if not Conf.crypto_valid:
log_scapy.warning("Crypto-related methods disabled for IPsec, Dot11 "
"and TLS layers (needs python-cryptography v1.7+).")
conf=Conf()
conf.logLevel=30 # 30=Warning
def crypto_validator(func):
"""
This a decorator to be used for any method relying on the cryptography library.
Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
"""
def func_in(*args, **kwargs):
if not conf.crypto_valid:
raise ImportError("Cannot execute crypto-related method! "
"Please install python-cryptography v1.7 or later.")
return func(*args, **kwargs)
return func_in
| 1 | 11,551 |
This not necessary since you moved the layers to `contrib/`.
|
secdev-scapy
|
py
|
@@ -1088,7 +1088,7 @@ module Bolt
end
define('--log-level LEVEL',
"Set the log level for the console. Available options are",
- "trace, debug, info, warn, error, fatal, any.") do |level|
+ "trace, debug, info, warn, error, fatal.") do |level|
@options[:log] = { 'console' => { 'level' => level } }
end
define('--clear-cache',
| 1 |
# frozen_string_literal: true
# Note this file includes very few 'requires' because it expects to be used from the CLI.
require 'optparse'
module Bolt
class BoltOptionParser < OptionParser
PROJECT_PATHS = %w[project].freeze
OPTIONS = { inventory: %w[targets query rerun],
authentication: %w[user password password-prompt private-key host-key-check ssl ssl-verify],
escalation: %w[run-as sudo-password sudo-password-prompt sudo-executable],
run_context: %w[concurrency inventoryfile save-rerun cleanup],
global_config_setters: PROJECT_PATHS + %w[modulepath],
transports: %w[transport connect-timeout tty native-ssh ssh-command copy-command],
display: %w[format color verbose trace stream],
global: %w[help version log-level clear-cache] }.freeze
ACTION_OPTS = OPTIONS.values.flatten.freeze
def get_help_text(subcommand, action = nil)
case subcommand
when 'apply'
{ flags: ACTION_OPTS + %w[noop execute compile-concurrency hiera-config],
banner: APPLY_HELP }
when 'command'
case action
when 'run'
{ flags: ACTION_OPTS + %w[env-var],
banner: COMMAND_RUN_HELP }
else
{ flags: OPTIONS[:global],
banner: COMMAND_HELP }
end
when 'file'
case action
when 'upload'
{ flags: ACTION_OPTS + %w[tmpdir],
banner: FILE_UPLOAD_HELP }
when 'download'
{ flags: ACTION_OPTS,
banner: FILE_DOWNLOAD_HELP }
else
{ flags: OPTIONS[:global],
banner: FILE_HELP }
end
when 'inventory'
case action
when 'show'
{ flags: OPTIONS[:inventory] + OPTIONS[:global] +
PROJECT_PATHS + %w[format inventoryfile detail],
banner: INVENTORY_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: INVENTORY_HELP }
end
when 'group'
case action
when 'show'
{ flags: OPTIONS[:global] + PROJECT_PATHS + %w[format inventoryfile],
banner: GROUP_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: GROUP_HELP }
end
when 'guide'
{ flags: OPTIONS[:global] + %w[format],
banner: GUIDE_HELP }
when 'lookup'
{ flags: ACTION_OPTS + %w[hiera-config plan-hierarchy],
banner: LOOKUP_HELP }
when 'module'
case action
when 'add'
{ flags: OPTIONS[:global] + PROJECT_PATHS,
banner: MODULE_ADD_HELP }
when 'generate-types'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters],
banner: MODULE_GENERATETYPES_HELP }
when 'install'
{ flags: OPTIONS[:global] + PROJECT_PATHS + %w[force resolve],
banner: MODULE_INSTALL_HELP }
when 'show'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters],
banner: MODULE_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: MODULE_HELP }
end
when 'plan'
case action
when 'convert'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters],
banner: PLAN_CONVERT_HELP }
when 'new'
{ flags: OPTIONS[:global] + PROJECT_PATHS + %w[pp],
banner: PLAN_NEW_HELP }
when 'run'
{ flags: ACTION_OPTS + %w[params compile-concurrency tmpdir hiera-config],
banner: PLAN_RUN_HELP }
when 'show'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[filter format],
banner: PLAN_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: PLAN_HELP }
end
when 'plugin'
case action
when 'show'
{ flags: OPTIONS[:global] + %w[color format modulepath project],
banner: PLUGIN_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: PLUGIN_HELP }
end
when 'project'
case action
when 'init'
{ flags: OPTIONS[:global] + %w[modules],
banner: PROJECT_INIT_HELP }
when 'migrate'
{ flags: OPTIONS[:global] + PROJECT_PATHS + %w[inventoryfile],
banner: PROJECT_MIGRATE_HELP }
else
{ flags: OPTIONS[:global],
banner: PROJECT_HELP }
end
when 'script'
case action
when 'run'
{ flags: ACTION_OPTS + %w[tmpdir env-var],
banner: SCRIPT_RUN_HELP }
else
{ flags: OPTIONS[:global],
banner: SCRIPT_HELP }
end
when 'secret'
case action
when 'createkeys'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[plugin force],
banner: SECRET_CREATEKEYS_HELP }
when 'decrypt'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[plugin],
banner: SECRET_DECRYPT_HELP }
when 'encrypt'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[plugin],
banner: SECRET_ENCRYPT_HELP }
else
{ flags: OPTIONS[:global],
banner: SECRET_HELP }
end
when 'task'
case action
when 'run'
{ flags: ACTION_OPTS + %w[params tmpdir noop],
banner: TASK_RUN_HELP }
when 'show'
{ flags: OPTIONS[:global] + OPTIONS[:global_config_setters] + %w[filter format],
banner: TASK_SHOW_HELP }
else
{ flags: OPTIONS[:global],
banner: TASK_HELP }
end
else
{ flags: OPTIONS[:global],
banner: BANNER }
end
end
COLORS = {
cyan: "36"
}.freeze
def self.colorize(color, string)
if $stdout.isatty
"\033[#{COLORS[color]}m#{string}\033[0m"
else
string
end
end
BANNER = <<~HELP
#{colorize(:cyan, 'Name')}
bolt
#{colorize(:cyan, 'Usage')}
bolt <subcommand> [action] [options]
#{colorize(:cyan, 'Description')}
Bolt is an orchestration tool that automates the manual work it takes to
maintain your infrastructure.
#{colorize(:cyan, 'Subcommands')}
apply Apply Puppet manifest code
command Run a command remotely
file Copy files between the controller and targets
group Show the list of groups in the inventory
guide View guides for Bolt concepts and features
inventory Show the list of targets an action would run on
module Manage Bolt project modules
lookup Look up a value with Hiera
plan Convert, create, show, and run Bolt plans
plugin Show available plugins
project Create and migrate Bolt projects
script Upload a local script and run it remotely
secret Create encryption keys and encrypt and decrypt values
task Show and run Bolt tasks
#{colorize(:cyan, 'Guides')}
For a list of guides on Bolt's concepts and features, run 'bolt guide'.
Find Bolt's documentation at https://bolt.guide.
HELP
APPLY_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
apply
#{colorize(:cyan, 'Usage')}
bolt apply [manifest] {--targets TARGETS | --query QUERY | --rerun FILTER}
[options]
#{colorize(:cyan, 'Description')}
Apply Puppet manifest code on the specified targets.
#{colorize(:cyan, 'Documentation')}
For documentation see http://pup.pt/bolt-apply.
#{colorize(:cyan, 'Examples')}
bolt apply manifest.pp -t target
bolt apply -e "file { '/etc/puppetlabs': ensure => present }" -t target
HELP
COMMAND_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
command
#{colorize(:cyan, 'Usage')}
bolt command <action> [options]
#{colorize(:cyan, 'Description')}
Run a command on the specified targets.
#{colorize(:cyan, 'Documentation')}
For documentation see http://pup.pt/bolt-commands.
#{colorize(:cyan, 'Actions')}
run Run a command on the specified targets.
HELP
COMMAND_RUN_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
run
#{colorize(:cyan, 'Usage')}
bolt command run <command> {--targets TARGETS | --query QUERY | --rerun FILTER}
[options]
#{colorize(:cyan, 'Description')}
Run a command on the specified targets.
#{colorize(:cyan, 'Documentation')}
For documentation see http://pup.pt/bolt-commands.
#{colorize(:cyan, 'Examples')}
bolt command run 'uptime' -t target1,target2
HELP
FILE_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
file
#{colorize(:cyan, 'Usage')}
bolt file <action> [options]
#{colorize(:cyan, 'Description')}
Copy files and directories between the controller and targets.
#{colorize(:cyan, 'Documentation')}
For documentation see http://pup.pt/bolt-commands.
#{colorize(:cyan, 'Actions')}
download Download a file or directory to the controller
upload Upload a local file or directory from the controller
HELP
FILE_DOWNLOAD_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
download
#{colorize(:cyan, 'Usage')}
bolt file download <source> <destination> {--targets TARGETS | --query QUERY | --rerun FILTER}
[options]
#{colorize(:cyan, 'Description')}
Download a file or directory from one or more targets.
Downloaded files and directories are saved to the a subdirectory
matching the target's name under the destination directory. The
destination directory is expanded relative to the downloads
subdirectory of the project directory.
#{colorize(:cyan, 'Documentation')}
For documentation see http://pup.pt/bolt-commands.
#{colorize(:cyan, 'Examples')}
bolt file download /etc/ssh_config ssh_config -t all
HELP
FILE_UPLOAD_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
upload
#{colorize(:cyan, 'Usage')}
bolt file upload <source> <destination> {--targets TARGETS | --query QUERY | --rerun FILTER}
[options]
#{colorize(:cyan, 'Description')}
Upload a local file or directory.
#{colorize(:cyan, 'Documentation')}
For documentation see http://pup.pt/bolt-commands.
#{colorize(:cyan, 'Examples')}
bolt file upload /tmp/source /etc/profile.d/login.sh -t target1
HELP
GROUP_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
group
#{colorize(:cyan, 'Usage')}
bolt group <action> [options]
#{colorize(:cyan, 'Description')}
Show the list of groups in the inventory.
#{colorize(:cyan, 'Documentation')}
To learn more about the inventory run 'bolt guide inventory'.
#{colorize(:cyan, 'Actions')}
show Show the list of groups in the inventory
HELP
GROUP_SHOW_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
show
#{colorize(:cyan, 'Usage')}
bolt group show [options]
#{colorize(:cyan, 'Description')}
Show the list of groups in the inventory.
#{colorize(:cyan, 'Documentation')}
To learn more about the inventory run 'bolt guide inventory'.
HELP
GUIDE_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
guide
#{colorize(:cyan, 'Usage')}
bolt guide [topic] [options]
#{colorize(:cyan, 'Description')}
View guides for Bolt's concepts and features.
Omitting a topic will display a list of available guides,
while providing a topic will display the relevant guide.
#{colorize(:cyan, 'Examples')}
View a list of available guides
bolt guide
View the 'project' guide page
bolt guide project
HELP
INVENTORY_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
inventory
#{colorize(:cyan, 'Usage')}
bolt inventory <action> [options]
#{colorize(:cyan, 'Description')}
Show the list of targets an action would run on.
#{colorize(:cyan, 'Documentation')}
To learn more about the inventory run 'bolt guide inventory'.
#{colorize(:cyan, 'Actions')}
show Show the list of targets an action would run on
HELP
INVENTORY_SHOW_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
show
#{colorize(:cyan, 'Usage')}
bolt inventory show [options]
#{colorize(:cyan, 'Description')}
Show the list of targets an action would run on. This command will list
all targets in the project's inventory by default.
To filter the targets in the list, use the --targets, --query, or --rerun
options. To view detailed configuration and data for targets, use the
--detail option. To learn more about the inventory run 'bolt guide inventory'.
#{colorize(:cyan, 'Documentation')}
To learn more about the inventory run 'bolt guide inventory'.
HELP
LOOKUP_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
lookup
#{colorize(:cyan, 'Usage')}
bolt lookup <key> {--targets TARGETS | --query QUERY | --rerun FILTER | --plan-hierarchy}
[options]
#{colorize(:cyan, 'Description')}
Look up a value with Hiera.
#{colorize(:cyan, 'Documentation')}
Learn more about using Hiera with Bolt at https://pup.pt/bolt-hiera.
#{colorize(:cyan, 'Examples')}
bolt lookup password --targets servers
bolt lookup password --plan-hierarchy variable=value
HELP
MODULE_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
module
#{colorize(:cyan, 'Usage')}
bolt module <action> [options]
#{colorize(:cyan, 'Description')}
Manage Bolt project modules.
#{colorize(:cyan, 'Documentation')}
To learn more about Bolt modules run 'bolt guide module'.
#{colorize(:cyan, 'Actions')}
add Add a module to the project
generate-types Generate type references to register in plans
install Install the project's modules
show List modules available to the Bolt project
HELP
MODULE_ADD_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
add
#{colorize(:cyan, 'Usage')}
bolt module add <module> [options]
#{colorize(:cyan, 'Description')}
Add a module to the project.
Module declarations are loaded from the project's configuration
file. Bolt will automatically resolve all module dependencies,
generate a Puppetfile, and install the modules.
#{colorize(:cyan, 'Documentation')}
To learn more about Bolt modules, run 'bolt guide module'.
HELP
MODULE_GENERATETYPES_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
generate-types
#{colorize(:cyan, 'Usage')}
bolt module generate-types [options]
#{colorize(:cyan, 'Description')}
Generate type references to register in plans. To learn more about
Bolt modules, run 'bolt guide module'.
#{colorize(:cyan, 'Documentation')}
To learn more about Bolt modules, run 'bolt guide module'.
HELP
MODULE_INSTALL_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
install
#{colorize(:cyan, 'Usage')}
bolt module install [options]
#{colorize(:cyan, 'Description')}
Install the project's modules.
Module declarations are loaded from the project's configuration
file. Bolt will automatically resolve all module dependencies,
generate a Puppetfile, and install the modules.
#{colorize(:cyan, 'Documentation')}
To learn more about Bolt modules, run 'bolt guide module'.
HELP
MODULE_SHOW_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
show
#{colorize(:cyan, 'Usage')}
bolt module show [options]
#{colorize(:cyan, 'Description')}
List modules available to the Bolt project.
#{colorize(:cyan, 'Documentation')}
To learn more about Bolt modules, run 'bolt guide module'.
HELP
PLAN_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
plan
#{colorize(:cyan, 'Usage')}
bolt plan <action> [options]
#{colorize(:cyan, 'Description')}
Convert, create, show, and run Bolt plans.
#{colorize(:cyan, 'Documentation')}
Learn more about Bolt plans at https://pup.pt/bolt-plans.
#{colorize(:cyan, 'Actions')}
convert Convert a YAML plan to a Bolt plan
new Create a new plan in the current project
run Run a plan on the specified targets
show Show available plans and plan documentation
HELP
PLAN_CONVERT_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
convert
#{colorize(:cyan, 'Usage')}
bolt plan convert <plan name> [options]
#{colorize(:cyan, 'Description')}
Convert a YAML plan to a Puppet language plan and print the converted
plan to stdout.
Converting a YAML plan might result in a plan that is syntactically
correct but has different behavior. Always verify a converted plan's
functionality. Note that the converted plan is not written to a file.
#{colorize(:cyan, 'Documentation')}
Learn more about Bolt plans at https://pup.pt/bolt-plans.
#{colorize(:cyan, 'Examples')}
bolt plan convert myproject::myplan
bolt plan convert path/to/plan/myplan.yaml
HELP
PLAN_NEW_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
new
#{colorize(:cyan, 'Usage')}
bolt plan new <plan name> [options]
#{colorize(:cyan, 'Description')}
Create a new plan in the current project.
#{colorize(:cyan, 'Documentation')}
Learn more about Bolt plans at https://pup.pt/bolt-plans.
#{colorize(:cyan, 'Examples')}
bolt plan new myproject::myplan
HELP
PLAN_RUN_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
run
#{colorize(:cyan, 'Usage')}
bolt plan run <plan name> [parameters] [options]
#{colorize(:cyan, 'Description')}
Run a plan on the specified targets.
#{colorize(:cyan, 'Documentation')}
Learn more about Bolt plans at https://pup.pt/bolt-plans.
#{colorize(:cyan, 'Examples')}
bolt plan run canary --targets target1,target2 command=hostname
HELP
PLAN_SHOW_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
show
#{colorize(:cyan, 'Usage')}
bolt plan show [plan name] [options]
#{colorize(:cyan, 'Description')}
Show available plans and plan documentation.
Omitting the name of a plan will display a list of plans available
in the Bolt project.
Providing the name of a plan will display detailed documentation for
the plan, including a list of available parameters.
#{colorize(:cyan, 'Documentation')}
Learn more about Bolt plans at https://pup.pt/bolt-plans.
#{colorize(:cyan, 'Examples')}
Display a list of available plans
bolt plan show
Display documentation for the aggregate::count plan
bolt plan show aggregate::count
HELP
PLUGIN_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
plugin
#{colorize(:cyan, 'Usage')}
bolt plugin <action> [options]
#{colorize(:cyan, 'Description')}
Show available plugins.
#{colorize(:cyan, 'Documentation')}
Learn more about Bolt plugins at https://pup.pt/bolt-plugins.
#{colorize(:cyan, 'Actions')}
show Show available plugins
HELP
PLUGIN_SHOW_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
show
#{colorize(:cyan, 'Usage')}
bolt plugin show [options]
#{colorize(:cyan, 'Description')}
Show available plugins.
#{colorize(:cyan, 'Documentation')}
Learn more about Bolt plugins at https://pup.pt/bolt-plugins.
HELP
PROJECT_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
project
#{colorize(:cyan, 'Usage')}
bolt project <action> [options]
#{colorize(:cyan, 'Description')}
Create and migrate Bolt projects
#{colorize(:cyan, 'Documentation')}
To learn more about Bolt projects, run 'bolt guide project'.
#{colorize(:cyan, 'Actions')}
init Create a new Bolt project
migrate Migrate a Bolt project to the latest version
HELP
PROJECT_INIT_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
init
#{colorize(:cyan, 'Usage')}
bolt project init [name] [options]
#{colorize(:cyan, 'Description')}
Create a new Bolt project in the current working directory.
Specify a name for the Bolt project. Defaults to the basename of the current working directory.
#{colorize(:cyan, 'Documentation')}
To learn more about Bolt projects, run 'bolt guide project'.
#{colorize(:cyan, 'Examples')}
Create a new Bolt project using the directory as the project name.
bolt project init
Create a new Bolt project with a specified name.
bolt project init myproject
Create a new Bolt project with existing modules.
bolt project init --modules puppetlabs-apt,puppetlabs-ntp
HELP
PROJECT_MIGRATE_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
migrate
#{colorize(:cyan, 'Usage')}
bolt project migrate [options]
#{colorize(:cyan, 'Description')}
Migrate a Bolt project to use current best practices and the latest version of
configuration files.
#{colorize(:cyan, 'Documentation')}
To learn more about Bolt projects, run 'bolt guide project'.
HELP
SCRIPT_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
script
#{colorize(:cyan, 'Usage')}
bolt script <action> [options]
#{colorize(:cyan, 'Description')}
Run a script on the specified targets.
#{colorize(:cyan, 'Documentation')}
Learn more about running scripts at https://pup.pt/bolt-commands.
#{colorize(:cyan, 'Actions')}
run Run a script on the specified targets.
HELP
SCRIPT_RUN_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
run
#{colorize(:cyan, 'Usage')}
bolt script run <script> [arguments] {--targets TARGETS | --query QUERY | --rerun FILTER}
[options]
#{colorize(:cyan, 'Description')}
Run a script on the specified targets.
Arguments passed to a script are passed literally and are not interpolated
by the shell. Any arguments containing spaces or special characters should
be quoted.
#{colorize(:cyan, 'Documentation')}
Learn more about running scripts at https://pup.pt/bolt-commands.
#{colorize(:cyan, 'Examples')}
bolt script run myscript.sh 'echo hello' --targets target1,target2
HELP
SECRET_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
secret
#{colorize(:cyan, 'Usage')}
bolt secret <action> [options]
#{colorize(:cyan, 'Description')}
Create encryption keys and encrypt and decrypt values.
#{colorize(:cyan, 'Documentation')}
Learn more about secrets plugins at http://pup.pt/bolt-plugins.
#{colorize(:cyan, 'Actions')}
createkeys Create new encryption keys
encrypt Encrypt a value
decrypt Decrypt a value
HELP
SECRET_CREATEKEYS_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
createkeys
#{colorize(:cyan, 'Usage')}
bolt secret createkeys [options]
#{colorize(:cyan, 'Description')}
Create new encryption keys.
#{colorize(:cyan, 'Documentation')}
Learn more about secrets plugins at http://pup.pt/bolt-plugins.
HELP
SECRET_DECRYPT_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
decrypt
#{colorize(:cyan, 'Usage')}
bolt secret decrypt <ciphertext> [options]
#{colorize(:cyan, 'Description')}
Decrypt a value.
#{colorize(:cyan, 'Documentation')}
Learn more about secrets plugins at http://pup.pt/bolt-plugins.
HELP
SECRET_ENCRYPT_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
encrypt
#{colorize(:cyan, 'Usage')}
bolt secret encrypt <plaintext> [options]
#{colorize(:cyan, 'Description')}
Encrypt a value.
#{colorize(:cyan, 'Documentation')}
Learn more about secrets plugins at http://pup.pt/bolt-plugins.
HELP
TASK_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
task
#{colorize(:cyan, 'Usage')}
bolt task <action> [options]
#{colorize(:cyan, 'Description')}
Show and run Bolt tasks.
#{colorize(:cyan, 'Documentation')}
Learn more about Bolt tasks at http://pup.pt/bolt-tasks.
#{colorize(:cyan, 'Actions')}
run Run a Bolt task
show Show available tasks and task documentation
HELP
TASK_RUN_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
run
#{colorize(:cyan, 'Usage')}
bolt task run <task name> [parameters] {--targets TARGETS | --query QUERY | --rerun FILTER}
[options]
#{colorize(:cyan, 'Description')}
Run a task on the specified targets.
Parameters take the form parameter=value.
#{colorize(:cyan, 'Documentation')}
Learn more about Bolt tasks at http://pup.pt/bolt-tasks.
#{colorize(:cyan, 'Examples')}
bolt task run package --targets target1,target2 action=status name=bash
HELP
TASK_SHOW_HELP = <<~HELP
#{colorize(:cyan, 'Name')}
show
#{colorize(:cyan, 'Usage')}
bolt task show [task name] [options]
#{colorize(:cyan, 'Description')}
Show available tasks and task documentation.
Omitting the name of a task will display a list of tasks available
in the Bolt project.
Providing the name of a task will display detailed documentation for
the task, including a list of available parameters.
#{colorize(:cyan, 'Documentation')}
Learn more about Bolt tasks at http://pup.pt/bolt-tasks.
#{colorize(:cyan, 'Examples')}
Display a list of available tasks
bolt task show
Display documentation for the canary task
bolt task show canary
HELP
def initialize(options)
super()
@options = options
separator "\n#{self.class.colorize(:cyan, 'Inventory options')}"
define('-t', '--targets TARGETS', 'Identifies the targets of the command.',
"For more information, see 'bolt guide targets'.") do |targets|
@options[:targets] ||= []
@options[:targets] << Bolt::Util.get_arg_input(targets)
end
define('-q', '--query QUERY', 'Query PuppetDB to determine the targets.') do |query|
@options[:query] = query
end
define("--rerun FILTER", "Retry on targets from the last run.",
"Available filters are 'all', 'failure', and 'success'.") do |rerun|
@options[:rerun] = rerun
end
define('--noop', 'See what changes Bolt will make without actually executing the changes.') do |_|
@options[:noop] = true
end
define('--params PARAMETERS',
"Parameters to a task or plan as json, a json file '@<file>', or on stdin '-'.") do |params|
@options[:task_options] = parse_params(params)
end
define('-e', '--execute CODE',
"Puppet manifest code to apply to the targets.") do |code|
@options[:code] = code
end
define('--detail', 'Show resolved configuration for the targets.') do |detail|
@options[:detail] = detail
end
separator "\n#{self.class.colorize(:cyan, 'Authentication options')}"
define('-u', '--user USER', 'User to authenticate as.') do |user|
@options[:user] = user
end
define('-p', '--password PASSWORD',
'Password to authenticate with.') do |password|
@options[:password] = password
end
define('--password-prompt', 'Prompt for user to input password.') do |_password|
$stderr.print "Please enter your password: "
@options[:password] = $stdin.noecho(&:gets).chomp
$stderr.puts
end
define('--private-key KEY', 'Path to private ssh key to authenticate with.') do |key|
@options[:'private-key'] = File.expand_path(key)
end
define('--[no-]host-key-check', 'Check host keys with SSH.') do |host_key_check|
@options[:'host-key-check'] = host_key_check
end
define('--[no-]ssl', 'Use SSL with WinRM.') do |ssl|
@options[:ssl] = ssl
end
define('--[no-]ssl-verify', 'Verify remote host SSL certificate with WinRM.') do |ssl_verify|
@options[:'ssl-verify'] = ssl_verify
end
separator "\n#{self.class.colorize(:cyan, 'Escalation options')}"
define('--run-as USER', 'User to run as using privilege escalation.') do |user|
@options[:'run-as'] = user
end
define('--sudo-password PASSWORD',
'Password for privilege escalation.') do |password|
@options[:'sudo-password'] = password
end
define('--sudo-password-prompt', 'Prompt for user to input escalation password.') do |_password|
$stderr.print "Please enter your privilege escalation password: "
@options[:'sudo-password'] = $stdin.noecho(&:gets).chomp
$stderr.puts
end
define('--sudo-executable EXEC', "Experimental. Specify an executable for running as another user.") do |exec|
@options[:'sudo-executable'] = exec
end
separator "\n#{self.class.colorize(:cyan, 'Run context options')}"
define('-c', '--concurrency CONCURRENCY', Integer,
'Maximum number of simultaneous connections.') do |concurrency|
@options[:concurrency] = concurrency
end
define('--compile-concurrency CONCURRENCY', Integer,
'Maximum number of simultaneous manifest block compiles (default: number of cores).') do |concurrency|
@options[:'compile-concurrency'] = concurrency
end
define('--[no-]cleanup',
'Whether to clean up temporary files created on targets.') do |cleanup|
@options[:cleanup] = cleanup
end
define('-m', '--modulepath MODULES',
"List of directories containing modules, separated by '#{File::PATH_SEPARATOR}'",
'Directories are case-sensitive.') do |modulepath|
# When specified from the CLI, modulepath entries are relative to pwd
@options[:modulepath] = modulepath.split(File::PATH_SEPARATOR).map do |moduledir|
File.expand_path(moduledir)
end
end
define('--project PATH',
'Path to load the Bolt project from (default: autodiscovered from current dir).') do |path|
@options[:project] = path
end
define('--hiera-config PATH',
'Specify where to load Hiera config from (default: <project>/hiera.yaml).') do |path|
@options[:'hiera-config'] = File.expand_path(path)
end
define('-i', '--inventoryfile PATH',
'Specify where to load inventory from (default: <project>/inventory.yaml).') do |path|
if ENV.include?(Bolt::Inventory::ENVIRONMENT_VAR)
raise Bolt::CLIError, "Cannot pass inventory file when #{Bolt::Inventory::ENVIRONMENT_VAR} is set"
end
@options[:inventoryfile] = File.expand_path(path)
end
define('--[no-]save-rerun', 'Whether to update the rerun file after this command.') do |save|
@options[:'save-rerun'] = save
end
separator "\n#{self.class.colorize(:cyan, 'Remote environment options')}"
define('--env-var ENVIRONMENT_VARIABLES', 'Environment variables to set on the target.') do |envvar|
unless envvar.include?('=')
raise Bolt::CLIError, "Environment variables must be specified using 'myenvvar=key' format"
end
@options[:env_vars] ||= {}
@options[:env_vars].store(*envvar.split('=', 2))
end
separator "\n#{self.class.colorize(:cyan, 'Transport options')}"
define('--transport TRANSPORT', TRANSPORTS.keys.map(&:to_s),
"Specify a default transport: #{TRANSPORTS.keys.join(', ')}.",
"For more information, see 'bolt guide transports'.") do |t|
@options[:transport] = t
end
define('--[no-]native-ssh',
'Experimental. Whether to shell out to native SSH or use the net-ssh Ruby library.') do |bool|
@options[:'native-ssh'] = bool
end
define('--ssh-command EXEC', "Experimental. Executable to use instead of the net-ssh Ruby library.") do |exec|
@options[:'ssh-command'] = exec
end
define('--copy-command EXEC',
"Experimental. Command to copy files to remote hosts if using native SSH.") do |exec|
@options[:'copy-command'] = exec
end
define('--connect-timeout TIMEOUT', Integer, 'Connection timeout in seconds (defaults vary).') do |timeout|
@options[:'connect-timeout'] = timeout
end
define('--[no-]tty', 'Request a pseudo TTY on targets that support it.') do |tty|
@options[:tty] = tty
end
define('--tmpdir DIR', 'The directory to upload and execute temporary files on the target.') do |tmpdir|
@options[:tmpdir] = tmpdir
end
separator "\n#{self.class.colorize(:cyan, 'Module options')}"
define('--[no-]resolve',
'Use --no-resolve to install modules listed in the Puppetfile without resolving modules configured',
'in Bolt project configuration.') do |resolve|
@options[:resolve] = resolve
end
separator "\n#{self.class.colorize(:cyan, 'Lookup options')}"
define('--plan-hierarchy', 'Look up a value with Hiera in the context of a specific plan.') do |_|
@options[:plan_hierarchy] = true
end
separator "\n#{self.class.colorize(:cyan, 'Plan options')}"
define('--pp', 'Create a new Puppet language plan.') do |_|
@options[:puppet] = true
end
separator "\n#{self.class.colorize(:cyan, 'Display options')}"
define('--filter FILTER', 'Filter tasks and plans by a matching substring.') do |filter|
unless /^[a-z0-9_:]+$/.match(filter)
msg = "Illegal characters in filter string '#{filter}'. Filters can "\
"only include lowercase letters, numbers, underscores, and colons."
raise Bolt::CLIError, msg
end
@options[:filter] = filter
end
define('--format FORMAT', 'Output format to use: human, json, or rainbow.') do |format|
@options[:format] = format
end
define('--[no-]color', 'Whether to show output in color.') do |color|
@options[:color] = color
end
define('-v', '--[no-]verbose', 'Display verbose logging.') do |value|
@options[:verbose] = value
end
define('--stream',
'Stream output from scripts and commands to the console.',
'Run with --no-verbose to prevent Bolt from displaying output',
'a second time after the action is completed.') do |_|
@options[:stream] = true
end
define('--trace', 'Display error stack traces.') do |_|
@options[:trace] = true
end
separator "\n#{self.class.colorize(:cyan, 'Additional options')}"
define('--modules MODULES',
'A comma-separated list of modules to install from the Puppet Forge',
'when initializing a project. Resolves and installs all dependencies.') do |modules|
@options[:modules] = modules.split(',').map { |mod| { 'name' => mod } }
end
define('--force', 'Force a destructive action.') do |_force|
@options[:force] = true
end
separator "\n#{self.class.colorize(:cyan, 'Global options')}"
define('-h', '--help', 'Display help.') do |_|
@options[:help] = true
end
define('--version', 'Display the version.') do |_|
puts Bolt::VERSION
raise Bolt::CLIExit
end
define('--log-level LEVEL',
"Set the log level for the console. Available options are",
"trace, debug, info, warn, error, fatal, any.") do |level|
@options[:log] = { 'console' => { 'level' => level } }
end
define('--clear-cache',
"Clear plugin cache before executing.") do |_|
@options[:clear_cache] = true
end
define('--plugin PLUGIN', 'Select the plugin to use.') do |plug|
@options[:plugin] = plug
end
end
def remove_excluded_opts(option_list)
# Remove any options that are not available for the specified subcommand
top.list.delete_if do |opt|
opt.respond_to?(:switch_name) && !option_list.include?(opt.switch_name)
end
# Remove any separators if all options of that type have been removed
top.list.delete_if do |opt|
i = top.list.index(opt)
opt.is_a?(String) && top.list[i + 1].is_a?(String)
end
end
def update
help_text = get_help_text(@options[:subcommand], @options[:action])
# Update the banner according to the subcommand
self.banner = help_text[:banner]
# Builds the option list for the specified subcommand and removes all excluded
# options from the help text
remove_excluded_opts(help_text[:flags])
end
def parse_params(params)
json = Bolt::Util.get_arg_input(params)
JSON.parse(json)
rescue JSON::ParserError => e
raise Bolt::CLIError, "Unable to parse --params value as JSON: #{e}"
end
end
end
| 1 | 18,720 |
I think this also needs to be removed from `resources/bolt_bash_completion.sh`, `rakelib/pwsh.rake`, `lib/bolt_spec/plans.rb`, and `lib/bolt/config/options.rb`.
|
puppetlabs-bolt
|
rb
|
@@ -63,7 +63,7 @@ describe 'PasswordResetsController' do
account.update_attribute(:email, '[email protected]')
assert_difference(['ActionMailer::Base.deliveries.size'], 1) do
- post :create, password_reset: {email: account.email.downcase}
+ post :create, password_reset: { email: account.email.downcase }
end
must_respond_with :redirect
flash[:success].must_equal I18n.t('password_resets.create.success')
| 1 |
require 'test_helper'
describe 'PasswordResetsController' do
let(:token) { SecureRandom.hex(16) }
let(:original_password) { Faker::Internet.password }
let(:account) do
create(:account, reset_password_tokens: { token => Time.current + 1.hour },
password: original_password, password_confirmation: original_password)
end
describe 'new' do
it 'must respond with success' do
get :new
must_respond_with :success
must_render_template 'password_resets/new'
end
it 'must redirect if logged in' do
login_as create(:account)
get :new
must_respond_with :redirect
flash[:notice].must_match(/already logged in/)
end
end
describe 'create' do
it 'must validate the presence of email' do
assert_difference(['ActionMailer::Base.deliveries.size'], 0) do
post :create, password_reset: { email: '' }
end
must_render_template 'password_resets/new'
assigns(:password_reset).errors.messages[:email].first.must_match(/required/i)
end
it 'must complain about missing account' do
assert_difference(['ActionMailer::Base.deliveries.size'], 0) do
post :create, password_reset: { email: 'not_a_valid_email' }
end
must_render_template 'password_resets/new'
assigns(:password_reset).errors.messages[:email].first.must_match(/no account/i)
end
it 'must successfully send out an email with reset password link' do
account = create(:account)
assert_difference(['ActionMailer::Base.deliveries.size'], 1) do
post :create, password_reset: { email: account.email }
end
must_respond_with :redirect
flash[:success].must_equal I18n.t('password_resets.create.success')
account.reload
account.reset_password_tokens.keys.first.must_be :present?
end
it 'must perform a case insensitive match' do
account = create(:account)
account.update_attribute(:email, '[email protected]')
assert_difference(['ActionMailer::Base.deliveries.size'], 1) do
post :create, password_reset: {email: account.email.downcase}
end
must_respond_with :redirect
flash[:success].must_equal I18n.t('password_resets.create.success')
end
end
describe 'confirm' do
it 'wont reset password for invalid account' do
get :confirm, account_id: Faker::Name.name, token: token
must_respond_with 404
end
it 'wont reset password for invalid token' do
get :confirm, account_id: account.login, token: 'foo'
must_respond_with 404
end
it 'wont allow expired tokens' do
account.update! reset_password_tokens: { token => Time.current }
get :confirm, account_id: account.login, token: token
must_redirect_to new_password_reset_path
flash[:error].must_match(/has expired/)
end
it 'must render the confirmation form correctly' do
get :confirm, account_id: account.login, token: token
must_respond_with :success
must_render_template 'password_resets/confirm'
end
end
describe 'reset' do
it 'wont allow blank password' do
patch :reset, account_id: account.login, token: token, account: { password: '' }
assigns(:account).errors.messages[:password].must_be :present?
must_render_template :confirm
Account::Authenticator.new(login: account.login, password: original_password).must_be :authenticated?
end
it 'must successfully reset the password and clear the token' do
password = Faker::Internet.password
patch :reset, account_id: account.login, token: token,
account: { password: password, password_confirmation: password }
flash[:success].must_match(/reset success/)
must_respond_with :redirect
Account::Authenticator.new(login: account.login, password: password).must_be :authenticated?
account.reload
account.reset_password_tokens.must_be :empty?
end
end
end
| 1 | 8,379 |
I fixed this today with a direct push to master. My build was failing otherwise.
|
blackducksoftware-ohloh-ui
|
rb
|
@@ -0,0 +1,16 @@
+/**
+ * BSD-style license; for more info see http://pmd.sourceforge.net/license.html
+ */
+
+package net.sourceforge.pmd.autofix.rule;
+
+import net.sourceforge.pmd.autofix.RuleViolationFix;
+import net.sourceforge.pmd.lang.ast.Node;
+
+public class DeleteRuleViolationFix implements RuleViolationFix {
+
+ @Override
+ public void applyFixesToNode(final Node node) {
+ node.remove();
+ }
+}
| 1 | 1 | 13,446 |
not sure why this is in a "rule" child package from the interface...
|
pmd-pmd
|
java
|
|
@@ -19,12 +19,14 @@ class Folder extends BaseItem {
return {
id: null,
title: '',
+ icon: '',
};
}
static fieldToLabel(field) {
const fieldsToLabels = {
title: _('title'),
+ icon: _('icon'),
last_note_user_updated_time: _('updated date'),
};
| 1 |
const BaseModel = require('lib/BaseModel.js');
const { time } = require('lib/time-utils.js');
const Note = require('lib/models/Note.js');
const { Database } = require('lib/database.js');
const { _ } = require('lib/locale.js');
const BaseItem = require('lib/models/BaseItem.js');
const { substrWithEllipsis } = require('lib/string-utils.js');
class Folder extends BaseItem {
static tableName() {
return 'folders';
}
static modelType() {
return BaseModel.TYPE_FOLDER;
}
static newFolder() {
return {
id: null,
title: '',
};
}
static fieldToLabel(field) {
const fieldsToLabels = {
title: _('title'),
last_note_user_updated_time: _('updated date'),
};
return field in fieldsToLabels ? fieldsToLabels[field] : field;
}
static noteIds(parentId) {
return this.db()
.selectAll('SELECT id FROM notes WHERE is_conflict = 0 AND parent_id = ?', [parentId])
.then(rows => {
let output = [];
for (let i = 0; i < rows.length; i++) {
let row = rows[i];
output.push(row.id);
}
return output;
});
}
static async subFolderIds(parentId) {
const rows = await this.db().selectAll('SELECT id FROM folders WHERE parent_id = ?', [parentId]);
return rows.map(r => r.id);
}
static async noteCount(parentId) {
let r = await this.db().selectOne('SELECT count(*) as total FROM notes WHERE is_conflict = 0 AND parent_id = ?', [parentId]);
return r ? r.total : 0;
}
static markNotesAsConflict(parentId) {
let query = Database.updateQuery('notes', { is_conflict: 1 }, { parent_id: parentId });
return this.db().exec(query);
}
static async delete(folderId, options = null) {
if (!options) options = {};
if (!('deleteChildren' in options)) options.deleteChildren = true;
let folder = await Folder.load(folderId);
if (!folder) return; // noop
if (options.deleteChildren) {
let noteIds = await Folder.noteIds(folderId);
for (let i = 0; i < noteIds.length; i++) {
await Note.delete(noteIds[i]);
}
let subFolderIds = await Folder.subFolderIds(folderId);
for (let i = 0; i < subFolderIds.length; i++) {
await Folder.delete(subFolderIds[i]);
}
}
await super.delete(folderId, options);
this.dispatch({
type: 'FOLDER_DELETE',
id: folderId,
});
}
static conflictFolderTitle() {
return _('Conflicts');
}
static conflictFolderId() {
return 'c04f1c7c04f1c7c04f1c7c04f1c7c04f';
}
static conflictFolder() {
return {
type_: this.TYPE_FOLDER,
id: this.conflictFolderId(),
parent_id: '',
title: this.conflictFolderTitle(),
updated_time: time.unixMs(),
user_updated_time: time.unixMs(),
};
}
// Calculates note counts for all folders and adds the note_count attribute to each folder
// Note: this only calculates the overall number of nodes for this folder and all its descendants
static async addNoteCounts(folders, includeCompletedTodos = true) {
const foldersById = {};
folders.forEach((f) => {
foldersById[f.id] = f;
f.note_count = 0;
});
const where = !includeCompletedTodos ? 'WHERE (notes.is_todo = 0 OR notes.todo_completed = 0)' : '';
const sql = `SELECT folders.id as folder_id, count(notes.parent_id) as note_count
FROM folders LEFT JOIN notes ON notes.parent_id = folders.id
${where} GROUP BY folders.id`;
const noteCounts = await this.db().selectAll(sql);
noteCounts.forEach((noteCount) => {
let parentId = noteCount.folder_id;
do {
let folder = foldersById[parentId];
if (!folder) break; // https://github.com/laurent22/joplin/issues/2079
folder.note_count = (folder.note_count || 0) + noteCount.note_count;
parentId = folder.parent_id;
} while (parentId);
});
}
// Folders that contain notes that have been modified recently go on top.
// The remaining folders, that don't contain any notes are sorted by their own user_updated_time
static async orderByLastModified(folders, dir = 'DESC') {
dir = dir.toUpperCase();
const sql = 'select parent_id, max(user_updated_time) content_updated_time from notes where parent_id != "" group by parent_id';
const rows = await this.db().selectAll(sql);
const folderIdToTime = {};
for (let i = 0; i < rows.length; i++) {
const row = rows[i];
folderIdToTime[row.parent_id] = row.content_updated_time;
}
const findFolderParent = folderId => {
const folder = BaseModel.byId(folders, folderId);
if (!folder) return null; // For the rare case of notes that are associated with a no longer existing folder
if (!folder.parent_id) return null;
for (let i = 0; i < folders.length; i++) {
if (folders[i].id === folder.parent_id) return folders[i];
}
// In some rare cases, some folders may not have a parent, for example
// if it has not been downloaded via sync yet.
// https://github.com/laurent22/joplin/issues/2088
return null;
};
const applyChildTimeToParent = folderId => {
const parent = findFolderParent(folderId);
if (!parent) return;
if (folderIdToTime[parent.id] && folderIdToTime[parent.id] >= folderIdToTime[folderId]) {
// Don't change so that parent has the same time as the last updated child
} else {
folderIdToTime[parent.id] = folderIdToTime[folderId];
}
applyChildTimeToParent(parent.id);
};
for (let folderId in folderIdToTime) {
if (!folderIdToTime.hasOwnProperty(folderId)) continue;
applyChildTimeToParent(folderId);
}
const mod = dir === 'DESC' ? +1 : -1;
const output = folders.slice();
output.sort((a, b) => {
const aTime = folderIdToTime[a.id] ? folderIdToTime[a.id] : a.user_updated_time;
const bTime = folderIdToTime[b.id] ? folderIdToTime[b.id] : b.user_updated_time;
if (aTime < bTime) return +1 * mod;
if (aTime > bTime) return -1 * mod;
return 0;
});
return output;
}
static async all(options = null) {
let output = await super.all(options);
if (options && options.includeConflictFolder) {
let conflictCount = await Note.conflictedCount();
if (conflictCount) output.push(this.conflictFolder());
}
return output;
}
static async childrenIds(folderId, recursive) {
if (recursive === false) throw new Error('Not implemented');
const folders = await this.db().selectAll('SELECT id FROM folders WHERE parent_id = ?', [folderId]);
let output = [];
for (let i = 0; i < folders.length; i++) {
const f = folders[i];
output.push(f.id);
const subChildrenIds = await this.childrenIds(f.id, true);
output = output.concat(subChildrenIds);
}
return output;
}
static async allAsTree(folders = null, options = null) {
const all = folders ? folders : await this.all(options);
// https://stackoverflow.com/a/49387427/561309
function getNestedChildren(models, parentId) {
const nestedTreeStructure = [];
const length = models.length;
for (let i = 0; i < length; i++) {
const model = models[i];
if (model.parent_id == parentId) {
const children = getNestedChildren(models, model.id);
if (children.length > 0) {
model.children = children;
}
nestedTreeStructure.push(model);
}
}
return nestedTreeStructure;
}
return getNestedChildren(all, '');
}
static folderPath(folders, folderId) {
const idToFolders = {};
for (let i = 0; i < folders.length; i++) {
idToFolders[folders[i].id] = folders[i];
}
const path = [];
while (folderId) {
const folder = idToFolders[folderId];
if (!folder) break; // Shouldn't happen
path.push(folder);
folderId = folder.parent_id;
}
path.reverse();
return path;
}
static folderPathString(folders, folderId, maxTotalLength = 80) {
const path = this.folderPath(folders, folderId);
let currentTotalLength = 0;
for (let i = 0; i < path.length; i++) {
currentTotalLength += path[i].title.length;
}
let pieceLength = maxTotalLength;
if (currentTotalLength > maxTotalLength) {
pieceLength = maxTotalLength / path.length;
}
const output = [];
for (let i = 0; i < path.length; i++) {
output.push(substrWithEllipsis(path[i].title, 0, pieceLength));
}
return output.join(' / ');
}
static buildTree(folders) {
const idToFolders = {};
for (let i = 0; i < folders.length; i++) {
idToFolders[folders[i].id] = folders[i];
idToFolders[folders[i].id].children = [];
}
const rootFolders = [];
for (let folderId in idToFolders) {
if (!idToFolders.hasOwnProperty(folderId)) continue;
const folder = idToFolders[folderId];
if (!folder.parent_id) {
rootFolders.push(folder);
} else {
if (!idToFolders[folder.parent_id]) {
// It means the notebook is refering a folder that doesn't exist. In theory it shouldn't happen
// but sometimes does - https://github.com/laurent22/joplin/issues/1068#issuecomment-450594708
rootFolders.push(folder);
} else {
idToFolders[folder.parent_id].children.push(folder);
}
}
}
return rootFolders;
}
static load(id) {
if (id == this.conflictFolderId()) return this.conflictFolder();
return super.load(id);
}
static defaultFolder() {
return this.modelSelectOne('SELECT * FROM folders ORDER BY created_time DESC LIMIT 1');
}
static async canNestUnder(folderId, targetFolderId) {
if (folderId === targetFolderId) return false;
const conflictFolderId = Folder.conflictFolderId();
if (folderId == conflictFolderId || targetFolderId == conflictFolderId) return false;
if (!targetFolderId) return true;
while (true) {
let folder = await Folder.load(targetFolderId);
if (!folder.parent_id) break;
if (folder.parent_id === folderId) return false;
targetFolderId = folder.parent_id;
}
return true;
}
static async moveToFolder(folderId, targetFolderId) {
if (!(await this.canNestUnder(folderId, targetFolderId))) throw new Error(_('Cannot move notebook to this location'));
// When moving a note to a different folder, the user timestamp is not updated.
// However updated_time is updated so that the note can be synced later on.
const modifiedFolder = {
id: folderId,
parent_id: targetFolderId,
updated_time: time.unixMs(),
};
return Folder.save(modifiedFolder, { autoTimestamp: false });
}
// These "duplicateCheck" and "reservedTitleCheck" should only be done when a user is
// manually creating a folder. They shouldn't be done for example when the folders
// are being synced to avoid any strange side-effects. Technically it's possible to
// have folders and notes with duplicate titles (or no title), or with reserved words.
static async save(o, options = null) {
if (!options) options = {};
if (options.userSideValidation === true) {
if (!('duplicateCheck' in options)) options.duplicateCheck = true;
if (!('reservedTitleCheck' in options)) options.reservedTitleCheck = true;
if (!('stripLeftSlashes' in options)) options.stripLeftSlashes = true;
}
if (options.stripLeftSlashes === true && o.title) {
while (o.title.length && (o.title[0] == '/' || o.title[0] == '\\')) {
o.title = o.title.substr(1);
}
}
// We allow folders with duplicate titles so that folders with the same title can exist under different parent folder. For example:
//
// PHP
// Code samples
// Doc
// Java
// My project
// Doc
// if (options.duplicateCheck === true && o.title) {
// let existingFolder = await Folder.loadByTitle(o.title);
// if (existingFolder && existingFolder.id != o.id) throw new Error(_('A notebook with this title already exists: "%s"', o.title));
// }
if (options.reservedTitleCheck === true && o.title) {
if (o.title == Folder.conflictFolderTitle()) throw new Error(_('Notebooks cannot be named "%s", which is a reserved title.', o.title));
}
return super.save(o, options).then(folder => {
this.dispatch({
type: 'FOLDER_UPDATE_ONE',
item: folder,
});
return folder;
});
}
}
module.exports = Folder;
| 1 | 11,706 |
Not needed, please remove.
|
laurent22-joplin
|
js
|
@@ -66,7 +66,7 @@ static h2o_iovec_t rewrite_location(h2o_mem_pool_t *pool, const char *location,
goto NoRewrite;
if (loc_parsed.scheme != &H2O_URL_SCHEME_HTTP)
goto NoRewrite;
- if (!h2o_lcstris(loc_parsed.host.base, loc_parsed.host.len, match->host.base, match->host.len))
+ if (!h2o_url_compare_hosts(loc_parsed.host, match->host, match->host_is_unix_path))
goto NoRewrite;
if (h2o_url_get_port(&loc_parsed) != h2o_url_get_port(match))
goto NoRewrite;
| 1 |
/*
* Copyright (c) 2014,2015 DeNA Co., Ltd., Kazuho Oku, Masahiro Nagano
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <netdb.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include "picohttpparser.h"
#include "h2o.h"
#include "h2o/http1.h"
#include "h2o/http1client.h"
#include "h2o/tunnel.h"
struct rp_generator_t {
h2o_generator_t super;
h2o_req_t *src_req;
h2o_http1client_t *client;
struct {
h2o_iovec_t bufs[2]; /* first buf is the request line and headers, the second is the POST content */
int is_head;
} up_req;
h2o_buffer_t *last_content_before_send;
h2o_doublebuffer_t sending;
int is_websocket_handshake;
int had_body_error; /* set if an error happened while fetching the body so that we can propagate the error */
};
struct rp_ws_upgrade_info_t {
h2o_context_t *ctx;
h2o_timeout_t *timeout;
h2o_socket_t *upstream_sock;
};
static h2o_http1client_ctx_t *get_client_ctx(h2o_req_t *req)
{
h2o_req_overrides_t *overrides = req->overrides;
if (overrides != NULL && overrides->client_ctx != NULL)
return overrides->client_ctx;
return &req->conn->ctx->proxy.client_ctx;
}
static h2o_iovec_t rewrite_location(h2o_mem_pool_t *pool, const char *location, size_t location_len, h2o_url_t *match,
const h2o_url_scheme_t *req_scheme, h2o_iovec_t req_authority, h2o_iovec_t req_basepath)
{
h2o_url_t loc_parsed;
if (h2o_url_parse(location, location_len, &loc_parsed) != 0)
goto NoRewrite;
if (loc_parsed.scheme != &H2O_URL_SCHEME_HTTP)
goto NoRewrite;
if (!h2o_lcstris(loc_parsed.host.base, loc_parsed.host.len, match->host.base, match->host.len))
goto NoRewrite;
if (h2o_url_get_port(&loc_parsed) != h2o_url_get_port(match))
goto NoRewrite;
if (loc_parsed.path.len < match->path.len)
goto NoRewrite;
if (memcmp(loc_parsed.path.base, match->path.base, match->path.len) != 0)
goto NoRewrite;
return h2o_concat(pool, req_scheme->name, h2o_iovec_init(H2O_STRLIT("://")), req_authority, req_basepath,
h2o_iovec_init(loc_parsed.path.base + match->path.len, loc_parsed.path.len - match->path.len));
NoRewrite:
return (h2o_iovec_t){NULL};
}
static h2o_iovec_t build_request_merge_headers(h2o_mem_pool_t *pool, h2o_iovec_t merged, h2o_iovec_t added, int seperator)
{
if (added.len == 0)
return merged;
if (merged.len == 0)
return added;
size_t newlen = merged.len + 2 + added.len;
char *buf = h2o_mem_alloc_pool(pool, newlen);
memcpy(buf, merged.base, merged.len);
buf[merged.len] = seperator;
buf[merged.len + 1] = ' ';
memcpy(buf + merged.len + 2, added.base, added.len);
merged.base = buf;
merged.len = newlen;
return merged;
}
/*
* A request without neither Content-Length or Transfer-Encoding header implies a zero-length request body (see 6th rule of RFC 7230
* 3.3.3).
* OTOH, section 3.3.3 states:
*
* A user agent SHOULD send a Content-Length in a request message when
* no Transfer-Encoding is sent and the request method defines a meaning
* for an enclosed payload body. For example, a Content-Length header
* field is normally sent in a POST request even when the value is 0
* (indicating an empty payload body). A user agent SHOULD NOT send a
* Content-Length header field when the request message does not contain
* a payload body and the method semantics do not anticipate such a
* body.
*
* PUT and POST define a meaning for the payload body, let's emit a
* Content-Length header if it doesn't exist already, since the server
* might send a '411 Length Required' response.
*
* see also: ML thread starting at https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0580.html
*/
static int req_requires_content_length(h2o_req_t *req)
{
int is_put_or_post =
(req->method.len >= 1 && req->method.base[0] == 'P' && (h2o_memis(req->method.base, req->method.len, H2O_STRLIT("POST")) ||
h2o_memis(req->method.base, req->method.len, H2O_STRLIT("PUT"))));
return is_put_or_post && h2o_find_header(&req->res.headers, H2O_TOKEN_TRANSFER_ENCODING, -1) == -1;
}
static h2o_iovec_t build_request(h2o_req_t *req, int keepalive, int is_websocket_handshake, int use_proxy_protocol)
{
h2o_iovec_t buf;
size_t offset = 0, remote_addr_len = SIZE_MAX;
char remote_addr[NI_MAXHOST];
struct sockaddr_storage ss;
socklen_t sslen;
h2o_iovec_t cookie_buf = {NULL}, xff_buf = {NULL}, via_buf = {NULL};
int preserve_x_forwarded_proto = req->conn->ctx->globalconf->proxy.preserve_x_forwarded_proto;
int emit_x_forwarded_headers = req->conn->ctx->globalconf->proxy.emit_x_forwarded_headers;
/* for x-f-f */
if ((sslen = req->conn->callbacks->get_peername(req->conn, (void *)&ss)) != 0)
remote_addr_len = h2o_socket_getnumerichost((void *)&ss, sslen, remote_addr);
/* build response */
buf.len = req->method.len + req->path.len + req->authority.len + 512;
if (use_proxy_protocol)
buf.len += H2O_PROXY_HEADER_MAX_LENGTH;
buf.base = h2o_mem_alloc_pool(&req->pool, buf.len);
#define RESERVE(sz) \
do { \
size_t required = offset + sz + 4 /* for "\r\n\r\n" */; \
if (required > buf.len) { \
do { \
buf.len *= 2; \
} while (required > buf.len); \
char *newp = h2o_mem_alloc_pool(&req->pool, buf.len); \
memcpy(newp, buf.base, offset); \
buf.base = newp; \
} \
} while (0)
#define APPEND(s, l) \
do { \
memcpy(buf.base + offset, (s), (l)); \
offset += (l); \
} while (0)
#define APPEND_STRLIT(lit) APPEND((lit), sizeof(lit) - 1)
#define FLATTEN_PREFIXED_VALUE(prefix, value, add_size) \
do { \
RESERVE(sizeof(prefix) - 1 + value.len + 2 + add_size); \
APPEND_STRLIT(prefix); \
if (value.len != 0) { \
APPEND(value.base, value.len); \
if (add_size != 0) { \
buf.base[offset++] = ','; \
buf.base[offset++] = ' '; \
} \
} \
} while (0)
if (use_proxy_protocol)
offset += h2o_stringify_proxy_header(req->conn, buf.base + offset);
APPEND(req->method.base, req->method.len);
buf.base[offset++] = ' ';
APPEND(req->path.base, req->path.len);
APPEND_STRLIT(" HTTP/1.1\r\nconnection: ");
if (is_websocket_handshake) {
APPEND_STRLIT("upgrade\r\nupgrade: websocket\r\nhost: ");
} else if (keepalive) {
APPEND_STRLIT("keep-alive\r\nhost: ");
} else {
APPEND_STRLIT("close\r\nhost: ");
}
APPEND(req->authority.base, req->authority.len);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
assert(offset <= buf.len);
if (req->entity.base != NULL || req_requires_content_length(req)) {
RESERVE(sizeof("content-length: " H2O_UINT64_LONGEST_STR) - 1);
offset += sprintf(buf.base + offset, "content-length: %zu\r\n", req->entity.len);
}
{
const h2o_header_t *h, *h_end;
for (h = req->headers.entries, h_end = h + req->headers.size; h != h_end; ++h) {
if (h2o_iovec_is_token(h->name)) {
const h2o_token_t *token = (void *)h->name;
if (token->proxy_should_drop) {
continue;
} else if (token == H2O_TOKEN_COOKIE) {
/* merge the cookie headers; see HTTP/2 8.1.2.5 and HTTP/1 (RFC6265 5.4) */
/* FIXME current algorithm is O(n^2) against the number of cookie headers */
cookie_buf = build_request_merge_headers(&req->pool, cookie_buf, h->value, ';');
continue;
} else if (token == H2O_TOKEN_VIA) {
via_buf = build_request_merge_headers(&req->pool, via_buf, h->value, ',');
continue;
} else if (token == H2O_TOKEN_X_FORWARDED_FOR) {
if (!emit_x_forwarded_headers) {
goto AddHeader;
}
xff_buf = build_request_merge_headers(&req->pool, xff_buf, h->value, ',');
continue;
}
}
if (!preserve_x_forwarded_proto && h2o_lcstris(h->name->base, h->name->len, H2O_STRLIT("x-forwarded-proto")))
continue;
AddHeader:
RESERVE(h->name->len + h->value.len + 2);
APPEND(h->name->base, h->name->len);
buf.base[offset++] = ':';
buf.base[offset++] = ' ';
APPEND(h->value.base, h->value.len);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
}
if (cookie_buf.len != 0) {
FLATTEN_PREFIXED_VALUE("cookie: ", cookie_buf, 0);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
if (emit_x_forwarded_headers) {
if (!preserve_x_forwarded_proto) {
FLATTEN_PREFIXED_VALUE("x-forwarded-proto: ", req->input.scheme->name, 0);
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
if (remote_addr_len != SIZE_MAX) {
FLATTEN_PREFIXED_VALUE("x-forwarded-for: ", xff_buf, remote_addr_len);
APPEND(remote_addr, remote_addr_len);
} else {
FLATTEN_PREFIXED_VALUE("x-forwarded-for: ", xff_buf, 0);
}
buf.base[offset++] = '\r';
buf.base[offset++] = '\n';
}
FLATTEN_PREFIXED_VALUE("via: ", via_buf, sizeof("1.1 ") - 1 + req->input.authority.len);
if (req->version < 0x200) {
buf.base[offset++] = '1';
buf.base[offset++] = '.';
buf.base[offset++] = '0' + (0x100 <= req->version && req->version <= 0x109 ? req->version - 0x100 : 0);
} else {
buf.base[offset++] = '2';
}
buf.base[offset++] = ' ';
APPEND(req->input.authority.base, req->input.authority.len);
APPEND_STRLIT("\r\n\r\n");
#undef RESERVE
#undef APPEND
#undef APPEND_STRLIT
#undef FLATTEN_PREFIXED_VALUE
/* set the length */
assert(offset <= buf.len);
buf.len = offset;
return buf;
}
static void do_close(h2o_generator_t *generator, h2o_req_t *req)
{
struct rp_generator_t *self = (void *)generator;
if (self->client != NULL) {
h2o_http1client_cancel(self->client);
self->client = NULL;
}
}
static void do_send(struct rp_generator_t *self)
{
h2o_iovec_t vecs[1];
size_t veccnt;
h2o_send_state_t ststate;
assert(self->sending.bytes_inflight == 0);
vecs[0] = h2o_doublebuffer_prepare(&self->sending,
self->client != NULL ? &self->client->sock->input : &self->last_content_before_send,
self->src_req->preferred_chunk_size);
if (self->client == NULL && vecs[0].len == self->sending.buf->size && self->last_content_before_send->size == 0) {
veccnt = vecs[0].len != 0 ? 1 : 0;
ststate = H2O_SEND_STATE_FINAL;
} else {
if (vecs[0].len == 0)
return;
veccnt = 1;
ststate = H2O_SEND_STATE_IN_PROGRESS;
}
if (self->had_body_error)
ststate = H2O_SEND_STATE_ERROR;
h2o_send(self->src_req, vecs, veccnt, ststate);
}
static void do_proceed(h2o_generator_t *generator, h2o_req_t *req)
{
struct rp_generator_t *self = (void *)generator;
h2o_doublebuffer_consume(&self->sending);
do_send(self);
}
static void on_websocket_upgrade_complete(void *_info, h2o_socket_t *sock, size_t reqsize)
{
struct rp_ws_upgrade_info_t *info = _info;
if (sock != NULL) {
h2o_tunnel_establish(info->ctx, sock, info->upstream_sock, info->timeout);
} else {
h2o_socket_close(info->upstream_sock);
}
free(info);
}
static inline void on_websocket_upgrade(struct rp_generator_t *self, h2o_timeout_t *timeout)
{
h2o_req_t *req = self->src_req;
h2o_socket_t *sock = h2o_http1client_steal_socket(self->client);
struct rp_ws_upgrade_info_t *info = h2o_mem_alloc(sizeof(*info));
info->upstream_sock = sock;
info->timeout = timeout;
info->ctx = req->conn->ctx;
h2o_http1_upgrade(req, NULL, 0, on_websocket_upgrade_complete, info);
}
static int on_body(h2o_http1client_t *client, const char *errstr)
{
struct rp_generator_t *self = client->data;
if (errstr != NULL) {
/* detach the content */
self->last_content_before_send = self->client->sock->input;
h2o_buffer_init(&self->client->sock->input, &h2o_socket_buffer_prototype);
self->client = NULL;
if (errstr != h2o_http1client_error_is_eos) {
h2o_req_log_error(self->src_req, "lib/core/proxy.c", "%s", errstr);
self->had_body_error = 1;
}
}
if (self->sending.bytes_inflight == 0)
do_send(self);
return 0;
}
static char compress_hint_to_enum(const char *val, size_t len)
{
if (h2o_lcstris(val, len, H2O_STRLIT("on"))) {
return H2O_COMPRESS_HINT_ENABLE;
}
if (h2o_lcstris(val, len, H2O_STRLIT("off"))) {
return H2O_COMPRESS_HINT_DISABLE;
}
return H2O_COMPRESS_HINT_AUTO;
}
static h2o_http1client_body_cb on_head(h2o_http1client_t *client, const char *errstr, int minor_version, int status,
h2o_iovec_t msg, h2o_http1client_header_t *headers, size_t num_headers)
{
struct rp_generator_t *self = client->data;
h2o_req_t *req = self->src_req;
size_t i;
if (errstr != NULL && errstr != h2o_http1client_error_is_eos) {
self->client = NULL;
h2o_req_log_error(req, "lib/core/proxy.c", "%s", errstr);
h2o_send_error_502(req, "Gateway Error", errstr, 0);
return NULL;
}
/* copy the response (note: all the headers must be copied; http1client discards the input once we return from this callback) */
req->res.status = status;
req->res.reason = h2o_strdup(&req->pool, msg.base, msg.len).base;
for (i = 0; i != num_headers; ++i) {
const h2o_token_t *token = h2o_lookup_token(headers[i].name, headers[i].name_len);
h2o_iovec_t value;
if (token != NULL) {
if (token->proxy_should_drop) {
goto Skip;
}
if (token == H2O_TOKEN_CONTENT_LENGTH) {
if (req->res.content_length != SIZE_MAX ||
(req->res.content_length = h2o_strtosize(headers[i].value, headers[i].value_len)) == SIZE_MAX) {
self->client = NULL;
h2o_req_log_error(req, "lib/core/proxy.c", "%s", "invalid response from upstream (malformed content-length)");
h2o_send_error_502(req, "Gateway Error", "invalid response from upstream", 0);
return NULL;
}
goto Skip;
} else if (token == H2O_TOKEN_LOCATION) {
if (req->res_is_delegated && (300 <= status && status <= 399) && status != 304) {
self->client = NULL;
h2o_iovec_t method = h2o_get_redirect_method(req->method, status);
h2o_send_redirect_internal(req, method, headers[i].value, headers[i].value_len, 1);
return NULL;
}
if (req->overrides != NULL && req->overrides->location_rewrite.match != NULL) {
value =
rewrite_location(&req->pool, headers[i].value, headers[i].value_len, req->overrides->location_rewrite.match,
req->input.scheme, req->input.authority, req->overrides->location_rewrite.path_prefix);
if (value.base != NULL)
goto AddHeader;
}
goto AddHeaderDuped;
} else if (token == H2O_TOKEN_LINK) {
h2o_push_path_in_link_header(req, headers[i].value, headers[i].value_len);
} else if (token == H2O_TOKEN_X_COMPRESS_HINT) {
req->compress_hint = compress_hint_to_enum(headers[i].value, headers[i].value_len);
goto Skip;
}
/* default behaviour, transfer the header downstream */
AddHeaderDuped:
value = h2o_strdup(&req->pool, headers[i].value, headers[i].value_len);
AddHeader:
h2o_add_header(&req->pool, &req->res.headers, token, value.base, value.len);
Skip:;
} else {
h2o_iovec_t name = h2o_strdup(&req->pool, headers[i].name, headers[i].name_len);
h2o_iovec_t value = h2o_strdup(&req->pool, headers[i].value, headers[i].value_len);
h2o_add_header_by_str(&req->pool, &req->res.headers, name.base, name.len, 0, value.base, value.len);
}
}
if (self->is_websocket_handshake && req->res.status == 101) {
h2o_http1client_ctx_t *client_ctx = get_client_ctx(req);
assert(client_ctx->websocket_timeout != NULL);
h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_UPGRADE, H2O_STRLIT("websocket"));
on_websocket_upgrade(self, client_ctx->websocket_timeout);
self->client = NULL;
return NULL;
}
/* declare the start of the response */
h2o_start_response(req, &self->super);
if (errstr == h2o_http1client_error_is_eos) {
self->client = NULL;
h2o_send(req, NULL, 0, H2O_SEND_STATE_FINAL);
return NULL;
}
return on_body;
}
static int on_1xx(h2o_http1client_t *client, int minor_version, int status, h2o_iovec_t msg, h2o_http1client_header_t *headers,
size_t num_headers)
{
struct rp_generator_t *self = client->data;
size_t i;
for (i = 0; i != num_headers; ++i) {
if (h2o_memis(headers[i].name, headers[i].name_len, H2O_STRLIT("link")))
h2o_push_path_in_link_header(self->src_req, headers[i].value, headers[i].value_len);
}
return 0;
}
static h2o_http1client_head_cb on_connect(h2o_http1client_t *client, const char *errstr, h2o_iovec_t **reqbufs, size_t *reqbufcnt,
int *method_is_head)
{
struct rp_generator_t *self = client->data;
if (errstr != NULL) {
self->client = NULL;
h2o_req_log_error(self->src_req, "lib/core/proxy.c", "%s", errstr);
h2o_send_error_502(self->src_req, "Gateway Error", errstr, 0);
return NULL;
}
*reqbufs = self->up_req.bufs;
*reqbufcnt = self->up_req.bufs[1].base != NULL ? 2 : 1;
*method_is_head = self->up_req.is_head;
self->client->informational_cb = on_1xx;
return on_head;
}
static void on_generator_dispose(void *_self)
{
struct rp_generator_t *self = _self;
if (self->client != NULL) {
h2o_http1client_cancel(self->client);
self->client = NULL;
}
h2o_buffer_dispose(&self->last_content_before_send);
h2o_doublebuffer_dispose(&self->sending);
}
static struct rp_generator_t *proxy_send_prepare(h2o_req_t *req, int keepalive, int use_proxy_protocol)
{
struct rp_generator_t *self = h2o_mem_alloc_shared(&req->pool, sizeof(*self), on_generator_dispose);
h2o_http1client_ctx_t *client_ctx = get_client_ctx(req);
self->super.proceed = do_proceed;
self->super.stop = do_close;
self->src_req = req;
if (client_ctx->websocket_timeout != NULL && h2o_lcstris(req->upgrade.base, req->upgrade.len, H2O_STRLIT("websocket"))) {
self->is_websocket_handshake = 1;
} else {
self->is_websocket_handshake = 0;
}
self->had_body_error = 0;
self->up_req.bufs[0] = build_request(req, keepalive, self->is_websocket_handshake, use_proxy_protocol);
self->up_req.bufs[1] = req->entity;
self->up_req.is_head = h2o_memis(req->method.base, req->method.len, H2O_STRLIT("HEAD"));
h2o_buffer_init(&self->last_content_before_send, &h2o_socket_buffer_prototype);
h2o_doublebuffer_init(&self->sending, &h2o_socket_buffer_prototype);
return self;
}
void h2o__proxy_process_request(h2o_req_t *req)
{
h2o_req_overrides_t *overrides = req->overrides;
h2o_http1client_ctx_t *client_ctx = get_client_ctx(req);
struct rp_generator_t *self;
if (overrides != NULL) {
if (overrides->socketpool != NULL) {
if (overrides->use_proxy_protocol)
assert(!"proxy protocol cannot be used for a persistent upstream connection");
self = proxy_send_prepare(req, 1, 0);
h2o_http1client_connect_with_pool(&self->client, self, client_ctx, overrides->socketpool, on_connect);
return;
} else if (overrides->hostport.host.base != NULL) {
self = proxy_send_prepare(req, 0, overrides->use_proxy_protocol);
h2o_http1client_connect(&self->client, self, client_ctx, req->overrides->hostport.host, req->overrides->hostport.port,
0, on_connect);
return;
}
}
{ /* default logic */
h2o_iovec_t host;
uint16_t port;
if (h2o_url_parse_hostport(req->authority.base, req->authority.len, &host, &port) == NULL) {
h2o_req_log_error(req, "lib/core/proxy.c", "invalid URL supplied for internal redirection:%s://%.*s%.*s",
req->scheme->name.base, (int)req->authority.len, req->authority.base, (int)req->path.len,
req->path.base);
h2o_send_error_502(req, "Gateway Error", "internal error", 0);
return;
}
if (port == 65535)
port = req->scheme->default_port;
self = proxy_send_prepare(req, 0, overrides != NULL && overrides->use_proxy_protocol);
h2o_http1client_connect(&self->client, self, client_ctx, host, port, req->scheme == &H2O_URL_SCHEME_HTTPS, on_connect);
return;
}
}
| 1 | 11,809 |
Shouldn't the logic be: * the result is a mismatch if either is a unix path but the other is not * if both are unix paths, then perform case-sensitive comparison * if both are not unix paths, then perform case-insensitive comparison
|
h2o-h2o
|
c
|
@@ -11,6 +11,6 @@ import (
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2"
_ "github.com/shurcooL/vfsgen"
- _ "google.golang.org/protobuf/cmd/protoc-gen-go"
_ "google.golang.org/grpc/cmd/protoc-gen-go-grpc"
+ _ "google.golang.org/protobuf/cmd/protoc-gen-go"
)
| 1 |
// +build tools
// This package tracks build dependencies so they are not removed when `go mod tidy` is run.
// https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module
package tools
import (
_ "github.com/bufbuild/buf/cmd/protoc-gen-buf-check-lint"
_ "github.com/envoyproxy/protoc-gen-validate"
_ "github.com/fullstorydev/grpcurl"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2"
_ "github.com/shurcooL/vfsgen"
_ "google.golang.org/protobuf/cmd/protoc-gen-go"
_ "google.golang.org/grpc/cmd/protoc-gen-go-grpc"
)
| 1 | 10,222 |
This is just rearranging it alphabetically
|
lyft-clutch
|
go
|
@@ -798,6 +798,7 @@ def test_chromium_version_unpatched(qapp):
assert version._chromium_version() not in ['', 'unknown', 'unavailable']
+# pylint: disable=too-many-locals
@pytest.mark.parametrize(['git_commit', 'frozen', 'style', 'with_webkit',
'known_distribution'], [
(True, False, True, True, True), # normal
| 1 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.utils.version."""
import io
import sys
import os.path
import subprocess
import contextlib
import builtins
import types
import importlib
import logging
import textwrap
import pkg_resources
import pytest
import qutebrowser
from qutebrowser.utils import version, usertypes, qtutils
from qutebrowser.browser import pdfjs
@pytest.mark.parametrize('os_release, expected', [
# No file
(None, None),
# Invalid file
("\n# foo\n foo=bar=baz",
version.DistributionInfo(id=None, parsed=version.Distribution.unknown,
version=None, pretty='Unknown')),
# Archlinux
("""
NAME="Arch Linux"
PRETTY_NAME="Arch Linux"
ID=arch
ID_LIKE=archlinux
ANSI_COLOR="0;36"
HOME_URL="https://www.archlinux.org/"
SUPPORT_URL="https://bbs.archlinux.org/"
BUG_REPORT_URL="https://bugs.archlinux.org/"
""",
version.DistributionInfo(
id='arch', parsed=version.Distribution.arch, version=None,
pretty='Arch Linux')),
# Ubuntu 14.04
("""
NAME="Ubuntu"
VERSION="14.04.5 LTS, Trusty Tahr"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 14.04.5 LTS"
VERSION_ID="14.04"
""",
version.DistributionInfo(
id='ubuntu', parsed=version.Distribution.ubuntu,
version=pkg_resources.parse_version('14.4'),
pretty='Ubuntu 14.04.5 LTS')),
# Ubuntu 17.04
("""
NAME="Ubuntu"
VERSION="17.04 (Zesty Zapus)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 17.04"
VERSION_ID="17.04"
""",
version.DistributionInfo(
id='ubuntu', parsed=version.Distribution.ubuntu,
version=pkg_resources.parse_version('17.4'),
pretty='Ubuntu 17.04')),
# Debian Jessie
("""
PRETTY_NAME="Debian GNU/Linux 8 (jessie)"
NAME="Debian GNU/Linux"
VERSION_ID="8"
VERSION="8 (jessie)"
ID=debian
""",
version.DistributionInfo(
id='debian', parsed=version.Distribution.debian,
version=pkg_resources.parse_version('8'),
pretty='Debian GNU/Linux 8 (jessie)')),
# Void Linux
("""
NAME="void"
ID="void"
DISTRIB_ID="void"
PRETTY_NAME="void"
""",
version.DistributionInfo(
id='void', parsed=version.Distribution.void,
version=None, pretty='void')),
# Gentoo
("""
NAME=Gentoo
ID=gentoo
PRETTY_NAME="Gentoo/Linux"
""",
version.DistributionInfo(
id='gentoo', parsed=version.Distribution.gentoo,
version=None, pretty='Gentoo/Linux')),
# Fedora
("""
NAME=Fedora
VERSION="25 (Twenty Five)"
ID=fedora
VERSION_ID=25
PRETTY_NAME="Fedora 25 (Twenty Five)"
""",
version.DistributionInfo(
id='fedora', parsed=version.Distribution.fedora,
version=pkg_resources.parse_version('25'),
pretty='Fedora 25 (Twenty Five)')),
# OpenSUSE
("""
NAME="openSUSE Leap"
VERSION="42.2"
ID=opensuse
ID_LIKE="suse"
VERSION_ID="42.2"
PRETTY_NAME="openSUSE Leap 42.2"
""",
version.DistributionInfo(
id='opensuse', parsed=version.Distribution.opensuse,
version=pkg_resources.parse_version('42.2'),
pretty='openSUSE Leap 42.2')),
# Linux Mint
("""
NAME="Linux Mint"
VERSION="18.1 (Serena)"
ID=linuxmint
ID_LIKE=ubuntu
PRETTY_NAME="Linux Mint 18.1"
VERSION_ID="18.1"
""",
version.DistributionInfo(
id='linuxmint', parsed=version.Distribution.linuxmint,
version=pkg_resources.parse_version('18.1'),
pretty='Linux Mint 18.1')),
# Manjaro
("""
NAME="Manjaro Linux"
ID=manjaro
PRETTY_NAME="Manjaro Linux"
""",
version.DistributionInfo(
id='manjaro', parsed=version.Distribution.manjaro,
version=None, pretty='Manjaro Linux')),
])
def test_distribution(tmpdir, monkeypatch, os_release, expected):
os_release_file = tmpdir / 'os-release'
if os_release is not None:
os_release_file.write(textwrap.dedent(os_release))
monkeypatch.setenv('QUTE_FAKE_OS_RELEASE', str(os_release_file))
assert version.distribution() == expected
class GitStrSubprocessFake:
"""Object returned by the git_str_subprocess_fake fixture.
This provides a function which is used to patch _git_str_subprocess.
Attributes:
retval: The value to return when called. Needs to be set before func is
called.
"""
UNSET = object()
def __init__(self):
self.retval = self.UNSET
def func(self, gitpath):
"""Function called instead of _git_str_subprocess.
Checks whether the path passed is what we expected, and returns
self.retval.
"""
if self.retval is self.UNSET:
raise ValueError("func got called without retval being set!")
retval = self.retval
self.retval = self.UNSET
gitpath = os.path.normpath(gitpath)
expected = os.path.abspath(os.path.join(
os.path.dirname(qutebrowser.__file__), os.pardir))
assert gitpath == expected
return retval
class TestGitStr:
"""Tests for _git_str()."""
@pytest.fixture
def commit_file_mock(self, mocker):
"""Fixture providing a mock for utils.read_file for git-commit-id.
On fixture teardown, it makes sure it got called with git-commit-id as
argument.
"""
mocker.patch('qutebrowser.utils.version.subprocess',
side_effect=AssertionError)
m = mocker.patch('qutebrowser.utils.version.utils.read_file')
yield m
m.assert_called_with('git-commit-id')
@pytest.fixture
def git_str_subprocess_fake(self, mocker, monkeypatch):
"""Fixture patching _git_str_subprocess with a GitStrSubprocessFake."""
mocker.patch('qutebrowser.utils.version.subprocess',
side_effect=AssertionError)
fake = GitStrSubprocessFake()
monkeypatch.setattr(version, '_git_str_subprocess', fake.func)
return fake
def test_frozen_ok(self, commit_file_mock, monkeypatch):
"""Test with sys.frozen=True and a successful git-commit-id read."""
monkeypatch.setattr(version.sys, 'frozen', True, raising=False)
commit_file_mock.return_value = 'deadbeef'
assert version._git_str() == 'deadbeef'
def test_frozen_oserror(self, caplog, commit_file_mock, monkeypatch):
"""Test with sys.frozen=True and OSError when reading git-commit-id."""
monkeypatch.setattr(version.sys, 'frozen', True, raising=False)
commit_file_mock.side_effect = OSError
with caplog.at_level(logging.ERROR, 'misc'):
assert version._git_str() is None
@pytest.mark.not_frozen
def test_normal_successful(self, git_str_subprocess_fake):
"""Test with git returning a successful result."""
git_str_subprocess_fake.retval = 'c0ffeebabe'
assert version._git_str() == 'c0ffeebabe'
@pytest.mark.frozen
def test_normal_successful_frozen(self, git_str_subprocess_fake):
"""Test with git returning a successful result."""
# The value is defined in scripts/freeze_tests.py.
assert version._git_str() == 'fake-frozen-git-commit'
def test_normal_error(self, commit_file_mock, git_str_subprocess_fake):
"""Test without repo (but git-commit-id)."""
git_str_subprocess_fake.retval = None
commit_file_mock.return_value = '1b4d1dea'
assert version._git_str() == '1b4d1dea'
def test_normal_path_oserror(self, mocker, git_str_subprocess_fake,
caplog):
"""Test with things raising OSError."""
m = mocker.patch('qutebrowser.utils.version.os')
m.path.join.side_effect = OSError
mocker.patch('qutebrowser.utils.version.utils.read_file',
side_effect=OSError)
with caplog.at_level(logging.ERROR, 'misc'):
assert version._git_str() is None
@pytest.mark.not_frozen
def test_normal_path_nofile(self, monkeypatch, caplog,
git_str_subprocess_fake, commit_file_mock):
"""Test with undefined __file__ but available git-commit-id."""
monkeypatch.delattr(version, '__file__')
commit_file_mock.return_value = '0deadcode'
with caplog.at_level(logging.ERROR, 'misc'):
assert version._git_str() == '0deadcode'
assert len(caplog.records) == 1
assert caplog.records[0].message == "Error while getting git path"
def _has_git():
"""Check if git is installed."""
try:
subprocess.check_call(['git', '--version'], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except (OSError, subprocess.CalledProcessError):
return False
else:
return True
# Decorator for tests needing git, so they get skipped when it's unavailable.
needs_git = pytest.mark.skipif(not _has_git(), reason='Needs git installed.')
class TestGitStrSubprocess:
"""Tests for _git_str_subprocess."""
@pytest.fixture
def git_repo(self, tmpdir):
"""A fixture to create a temporary git repo.
Some things are tested against a real repo so we notice if something in
git would change, or we call git incorrectly.
"""
def _git(*args):
"""Helper closure to call git."""
env = os.environ.copy()
env.update({
'GIT_AUTHOR_NAME': 'qutebrowser testsuite',
'GIT_AUTHOR_EMAIL': '[email protected]',
'GIT_AUTHOR_DATE': 'Thu 1 Jan 01:00:00 CET 1970',
'GIT_COMMITTER_NAME': 'qutebrowser testsuite',
'GIT_COMMITTER_EMAIL': '[email protected]',
'GIT_COMMITTER_DATE': 'Thu 1 Jan 01:00:00 CET 1970',
})
if os.name == 'nt':
# If we don't call this with shell=True it might fail under
# some environments on Windows...
# http://bugs.python.org/issue24493
subprocess.check_call(
'git -C "{}" {}'.format(tmpdir, ' '.join(args)),
env=env, shell=True)
else:
subprocess.check_call(
['git', '-C', str(tmpdir)] + list(args), env=env)
(tmpdir / 'file').write_text("Hello World!", encoding='utf-8')
_git('init')
_git('add', 'file')
_git('commit', '-am', 'foo', '--no-verify', '--no-edit',
'--no-post-rewrite', '--quiet', '--no-gpg-sign')
_git('tag', 'foobar')
return tmpdir
@needs_git
def test_real_git(self, git_repo):
"""Test with a real git repository."""
ret = version._git_str_subprocess(str(git_repo))
assert ret == 'foobar (1970-01-01 01:00:00 +0100)'
def test_missing_dir(self, tmpdir):
"""Test with a directory which doesn't exist."""
ret = version._git_str_subprocess(str(tmpdir / 'does-not-exist'))
assert ret is None
@pytest.mark.parametrize('exc', [
OSError,
subprocess.CalledProcessError(1, 'foobar')
])
def test_exception(self, exc, mocker, tmpdir):
"""Test with subprocess.check_output raising an exception.
Args:
exc: The exception to raise.
"""
m = mocker.patch('qutebrowser.utils.version.os')
m.path.isdir.return_value = True
mocker.patch('qutebrowser.utils.version.subprocess.check_output',
side_effect=exc)
ret = version._git_str_subprocess(str(tmpdir))
assert ret is None
class ReleaseInfoFake:
"""An object providing fakes for glob.glob/open for test_release_info.
Attributes:
_files: The files which should be returned, or None if an exception
should be raised. A {filename: [lines]} dict.
"""
def __init__(self, files):
self._files = files
def glob_fake(self, pattern):
"""Fake for glob.glob.
Verifies the arguments and returns the files listed in self._files, or
a single fake file if an exception is expected.
"""
assert pattern == '/etc/*-release'
if self._files is None:
return ['fake-file']
else:
return sorted(list(self._files))
@contextlib.contextmanager
def open_fake(self, filename, mode, encoding):
"""Fake for open().
Verifies the arguments and returns a StringIO with the content listed
in self._files.
"""
assert mode == 'r'
assert encoding == 'utf-8'
if self._files is None:
raise OSError
yield io.StringIO(''.join(self._files[filename]))
@pytest.mark.parametrize('files, expected', [
# no files -> no output
({}, []),
# empty files are stripped
({'file': ['']}, []),
({'file': []}, []),
# newlines at EOL are stripped
(
{'file1': ['foo\n', 'bar\n'], 'file2': ['baz\n']},
[('file1', 'foo\nbar'), ('file2', 'baz')]
),
# blacklisted lines
(
{'file': ['HOME_URL=example.com\n', 'NAME=FOO']},
[('file', 'NAME=FOO')]
),
# only blacklisted lines
({'file': ['HOME_URL=example.com']}, []),
# broken file
(None, []),
])
def test_release_info(files, expected, caplog, monkeypatch):
"""Test _release_info().
Args:
files: The file dict passed to ReleaseInfoFake.
expected: The expected _release_info output.
"""
fake = ReleaseInfoFake(files)
monkeypatch.setattr(version.glob, 'glob', fake.glob_fake)
monkeypatch.setattr(version, 'open', fake.open_fake, raising=False)
with caplog.at_level(logging.ERROR, 'misc'):
assert version._release_info() == expected
if files is None:
assert len(caplog.records) == 1
assert caplog.records[0].message == "Error while reading fake-file."
def test_path_info(monkeypatch):
"""Test _path_info()."""
patches = {
'config': lambda: 'CONFIG PATH',
'data': lambda: 'DATA PATH',
'system_data': lambda: 'SYSTEM DATA PATH',
'cache': lambda: 'CACHE PATH',
'download': lambda: 'DOWNLOAD PATH',
'runtime': lambda: 'RUNTIME PATH',
}
for attr, val in patches.items():
monkeypatch.setattr(version.standarddir, attr, val)
pathinfo = version._path_info()
assert pathinfo['config'] == 'CONFIG PATH'
assert pathinfo['data'] == 'DATA PATH'
assert pathinfo['system_data'] == 'SYSTEM DATA PATH'
assert pathinfo['cache'] == 'CACHE PATH'
assert pathinfo['download'] == 'DOWNLOAD PATH'
assert pathinfo['runtime'] == 'RUNTIME PATH'
class ImportFake:
"""A fake for __import__ which is used by the import_fake fixture.
Attributes:
exists: A dict mapping module names to bools. If True, the import will
success. Otherwise, it'll fail with ImportError.
version_attribute: The name to use in the fake modules for the version
attribute.
version: The version to use for the modules.
_real_import: Saving the real __import__ builtin so the imports can be
done normally for modules not in self.exists.
"""
def __init__(self):
self.exists = {
'sip': True,
'colorama': True,
'pypeg2': True,
'jinja2': True,
'pygments': True,
'yaml': True,
'cssutils': True,
'typing': True,
'PyQt5.QtWebEngineWidgets': True,
'PyQt5.QtWebKitWidgets': True,
}
self.version_attribute = '__version__'
self.version = '1.2.3'
self._real_import = builtins.__import__
def _do_import(self, name):
"""Helper for fake_import and fake_importlib_import to do the work.
Return:
The imported fake module, or None if normal importing should be
used.
"""
if name not in self.exists:
# Not one of the modules to test -> use real import
return None
elif self.exists[name]:
ns = types.SimpleNamespace()
if self.version_attribute is not None:
setattr(ns, self.version_attribute, self.version)
return ns
else:
raise ImportError("Fake ImportError for {}.".format(name))
def fake_import(self, name, *args, **kwargs):
"""Fake for the builtin __import__."""
module = self._do_import(name)
if module is not None:
return module
else:
return self._real_import(name, *args, **kwargs)
def fake_importlib_import(self, name):
"""Fake for importlib.import_module."""
module = self._do_import(name)
if module is not None:
return module
else:
return importlib.import_module(name)
@pytest.fixture
def import_fake(monkeypatch):
"""Fixture to patch imports using ImportFake."""
fake = ImportFake()
monkeypatch.setattr('builtins.__import__', fake.fake_import)
monkeypatch.setattr(version.importlib, 'import_module',
fake.fake_importlib_import)
return fake
class TestModuleVersions:
"""Tests for _module_versions()."""
@pytest.mark.usefixtures('import_fake')
def test_all_present(self):
"""Test with all modules present in version 1.2.3."""
expected = ['sip: yes', 'colorama: 1.2.3', 'pypeg2: 1.2.3',
'jinja2: 1.2.3', 'pygments: 1.2.3', 'yaml: 1.2.3',
'cssutils: 1.2.3', 'typing: yes',
'PyQt5.QtWebEngineWidgets: yes',
'PyQt5.QtWebKitWidgets: yes']
assert version._module_versions() == expected
@pytest.mark.parametrize('module, idx, expected', [
('colorama', 1, 'colorama: no'),
('cssutils', 6, 'cssutils: no'),
('typing', 7, 'typing: no'),
])
def test_missing_module(self, module, idx, expected, import_fake):
"""Test with a module missing.
Args:
module: The name of the missing module.
idx: The index where the given text is expected.
expected: The expected text.
"""
import_fake.exists[module] = False
assert version._module_versions()[idx] == expected
@pytest.mark.parametrize('value, expected', [
('VERSION', ['sip: yes', 'colorama: 1.2.3', 'pypeg2: yes',
'jinja2: yes', 'pygments: yes', 'yaml: yes',
'cssutils: yes', 'typing: yes',
'PyQt5.QtWebEngineWidgets: yes',
'PyQt5.QtWebKitWidgets: yes']),
('SIP_VERSION_STR', ['sip: 1.2.3', 'colorama: yes', 'pypeg2: yes',
'jinja2: yes', 'pygments: yes', 'yaml: yes',
'cssutils: yes', 'typing: yes',
'PyQt5.QtWebEngineWidgets: yes',
'PyQt5.QtWebKitWidgets: yes']),
(None, ['sip: yes', 'colorama: yes', 'pypeg2: yes', 'jinja2: yes',
'pygments: yes', 'yaml: yes', 'cssutils: yes', 'typing: yes',
'PyQt5.QtWebEngineWidgets: yes',
'PyQt5.QtWebKitWidgets: yes']),
])
def test_version_attribute(self, value, expected, import_fake):
"""Test with a different version attribute.
VERSION is tested for old colorama versions, and None to make sure
things still work if some package suddenly doesn't have __version__.
Args:
value: The name of the version attribute.
expected: The expected return value.
"""
import_fake.version_attribute = value
assert version._module_versions() == expected
@pytest.mark.parametrize('name, has_version', [
('sip', False),
('colorama', True),
('pypeg2', True),
('jinja2', True),
('pygments', True),
('yaml', True),
('cssutils', True),
])
def test_existing_attributes(self, name, has_version):
"""Check if all dependencies have an expected __version__ attribute.
The aim of this test is to fail if modules suddenly don't have a
__version__ attribute anymore in a newer version.
Args:
name: The name of the module to check.
has_version: Whether a __version__ attribute is expected.
"""
module = importlib.import_module(name)
assert hasattr(module, '__version__') == has_version
def test_existing_sip_attribute(self):
"""Test if sip has a SIP_VERSION_STR attribute.
The aim of this test is to fail if that gets missing in some future
version of sip.
"""
import sip
assert isinstance(sip.SIP_VERSION_STR, str)
class TestOsInfo:
"""Tests for _os_info."""
def test_linux_fake(self, monkeypatch):
"""Test with a fake Linux.
No args because osver is set to '' if the OS is linux.
"""
monkeypatch.setattr(version.sys, 'platform', 'linux')
monkeypatch.setattr(version, '_release_info',
lambda: [('releaseinfo', 'Hello World')])
ret = version._os_info()
expected = ['OS Version: ', '',
'--- releaseinfo ---', 'Hello World']
assert ret == expected
def test_windows_fake(self, monkeypatch):
"""Test with a fake Windows."""
monkeypatch.setattr(version.sys, 'platform', 'win32')
monkeypatch.setattr(version.platform, 'win32_ver',
lambda: ('eggs', 'bacon', 'ham', 'spam'))
ret = version._os_info()
expected = ['OS Version: eggs, bacon, ham, spam']
assert ret == expected
@pytest.mark.parametrize('mac_ver, mac_ver_str', [
(('x', ('', '', ''), 'y'), 'x, y'),
(('', ('', '', ''), ''), ''),
(('x', ('1', '2', '3'), 'y'), 'x, 1.2.3, y'),
])
def test_os_x_fake(self, monkeypatch, mac_ver, mac_ver_str):
"""Test with a fake OS X.
Args:
mac_ver: The tuple to set platform.mac_ver() to.
mac_ver_str: The expected Mac version string in version._os_info().
"""
monkeypatch.setattr(version.sys, 'platform', 'darwin')
monkeypatch.setattr(version.platform, 'mac_ver', lambda: mac_ver)
ret = version._os_info()
expected = ['OS Version: {}'.format(mac_ver_str)]
assert ret == expected
def test_unknown_fake(self, monkeypatch):
"""Test with a fake unknown sys.platform."""
monkeypatch.setattr(version.sys, 'platform', 'toaster')
ret = version._os_info()
expected = ['OS Version: ?']
assert ret == expected
@pytest.mark.linux
def test_linux_real(self):
"""Make sure there are no exceptions with a real Linux."""
version._os_info()
@pytest.mark.windows
def test_windows_real(self):
"""Make sure there are no exceptions with a real Windows."""
version._os_info()
@pytest.mark.osx
def test_os_x_real(self):
"""Make sure there are no exceptions with a real OS X."""
version._os_info()
class TestPDFJSVersion:
"""Tests for _pdfjs_version."""
def test_not_found(self, mocker):
mocker.patch('qutebrowser.utils.version.pdfjs.get_pdfjs_res_and_path',
side_effect=pdfjs.PDFJSNotFound('/build/pdf.js'))
assert version._pdfjs_version() == 'no'
def test_unknown(self, monkeypatch):
monkeypatch.setattr(
'qutebrowser.utils.version.pdfjs.get_pdfjs_res_and_path',
lambda path: (b'foobar', None))
assert version._pdfjs_version() == 'unknown (bundled)'
@pytest.mark.parametrize('varname', [
'PDFJS.version', # older versions
'var pdfjsVersion', # newer versions
])
def test_known(self, monkeypatch, varname):
pdfjs_code = textwrap.dedent("""
// Initializing PDFJS global object (if still undefined)
if (typeof PDFJS === 'undefined') {
(typeof window !== 'undefined' ? window : this).PDFJS = {};
}
VARNAME = '1.2.109';
PDFJS.build = '875588d';
(function pdfjsWrapper() {
// Use strict in our context only - users might not want it
'use strict';
""".replace('VARNAME', varname)).strip().encode('utf-8')
monkeypatch.setattr(
'qutebrowser.utils.version.pdfjs.get_pdfjs_res_and_path',
lambda path: (pdfjs_code, '/foo/bar/pdf.js'))
assert version._pdfjs_version() == '1.2.109 (/foo/bar/pdf.js)'
def test_real_file(self):
"""Test against the real file if pdfjs was found."""
try:
pdfjs.get_pdfjs_res_and_path('build/pdf.js')
except pdfjs.PDFJSNotFound:
pytest.skip("No pdfjs found")
ver = version._pdfjs_version()
assert ver.split()[0] not in ['no', 'unknown'], ver
class FakeQSslSocket:
"""Fake for the QSslSocket Qt class.
Attributes:
_version: What QSslSocket::sslLibraryVersionString() should return.
"""
def __init__(self, version=None):
self._version = version
def supportsSsl(self):
"""Fake for QSslSocket::supportsSsl()."""
return True
def sslLibraryVersionString(self):
"""Fake for QSslSocket::sslLibraryVersionString()."""
if self._version is None:
raise AssertionError("Got called with version None!")
return self._version
@pytest.mark.parametrize('ua, expected', [
(None, 'unavailable'), # No QWebEngineProfile
('Mozilla/5.0', 'unknown'),
('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'QtWebEngine/5.8.0 Chrome/53.0.2785.148 Safari/537.36', '53.0.2785.148'),
])
def test_chromium_version(monkeypatch, caplog, ua, expected):
if ua is None:
monkeypatch.setattr(version, 'QWebEngineProfile', None)
else:
class FakeWebEngineProfile:
def httpUserAgent(self):
return ua
monkeypatch.setattr(version, 'QWebEngineProfile', FakeWebEngineProfile)
with caplog.at_level(logging.ERROR):
assert version._chromium_version() == expected
def test_chromium_version_unpatched(qapp):
pytest.importorskip('PyQt5.QtWebEngineWidgets')
assert version._chromium_version() not in ['', 'unknown', 'unavailable']
@pytest.mark.parametrize(['git_commit', 'frozen', 'style', 'with_webkit',
'known_distribution'], [
(True, False, True, True, True), # normal
(False, False, True, True, True), # no git commit
(True, True, True, True, True), # frozen
(True, True, False, True, True), # no style
(True, False, True, False, True), # no webkit
(True, False, True, 'ng', True), # QtWebKit-NG
(True, False, True, True, False), # unknown Linux distribution
])
def test_version_output(git_commit, frozen, style, with_webkit,
known_distribution, stubs, monkeypatch):
"""Test version.version()."""
class FakeWebEngineProfile:
def httpUserAgent(self):
return 'Toaster/4.0.4 Chrome/CHROMIUMVERSION Teapot/4.1.8'
import_path = os.path.abspath('/IMPORTPATH')
patches = {
'qutebrowser.__file__': os.path.join(import_path, '__init__.py'),
'qutebrowser.__version__': 'VERSION',
'_git_str': lambda: ('GIT COMMIT' if git_commit else None),
'platform.python_implementation': lambda: 'PYTHON IMPLEMENTATION',
'platform.python_version': lambda: 'PYTHON VERSION',
'PYQT_VERSION_STR': 'PYQT VERSION',
'earlyinit.qt_version': lambda: 'QT VERSION',
'_module_versions': lambda: ['MODULE VERSION 1', 'MODULE VERSION 2'],
'_pdfjs_version': lambda: 'PDFJS VERSION',
'QSslSocket': FakeQSslSocket('SSL VERSION'),
'platform.platform': lambda: 'PLATFORM',
'platform.architecture': lambda: ('ARCHITECTURE', ''),
'_os_info': lambda: ['OS INFO 1', 'OS INFO 2'],
'_path_info': lambda: {'PATH DESC': 'PATH NAME'},
'QApplication': (stubs.FakeQApplication(style='STYLE') if style else
stubs.FakeQApplication(instance=None)),
'QLibraryInfo.location': (lambda _loc: 'QT PATH'),
}
substitutions = {
'git_commit': '\nGit commit: GIT COMMIT' if git_commit else '',
'style': '\nStyle: STYLE' if style else '',
'qt': 'QT VERSION',
'frozen': str(frozen),
'import_path': import_path,
}
if with_webkit:
patches['qWebKitVersion'] = lambda: 'WEBKIT VERSION'
patches['objects.backend'] = usertypes.Backend.QtWebKit
patches['QWebEngineProfile'] = None
if with_webkit == 'ng':
backend = 'QtWebKit-NG'
patches['qtutils.is_qtwebkit_ng'] = lambda: True
else:
backend = 'legacy QtWebKit'
patches['qtutils.is_qtwebkit_ng'] = lambda: False
substitutions['backend'] = backend + ' (WebKit WEBKIT VERSION)'
else:
monkeypatch.delattr(version, 'qtutils.qWebKitVersion', raising=False)
patches['objects.backend'] = usertypes.Backend.QtWebEngine
patches['QWebEngineProfile'] = FakeWebEngineProfile
substitutions['backend'] = 'QtWebEngine (Chromium CHROMIUMVERSION)'
if known_distribution:
patches['distribution'] = lambda: version.DistributionInfo(
parsed=version.Distribution.arch, version=None,
pretty='LINUX DISTRIBUTION', id='arch')
substitutions['linuxdist'] = ('\nLinux distribution: '
'LINUX DISTRIBUTION (arch)')
substitutions['osinfo'] = ''
else:
patches['distribution'] = lambda: None
substitutions['linuxdist'] = ''
substitutions['osinfo'] = 'OS INFO 1\nOS INFO 2\n'
for attr, val in patches.items():
monkeypatch.setattr('qutebrowser.utils.version.' + attr, val)
if frozen:
monkeypatch.setattr(sys, 'frozen', True, raising=False)
else:
monkeypatch.delattr(sys, 'frozen', raising=False)
template = textwrap.dedent("""
qutebrowser vVERSION{git_commit}
Backend: {backend}
PYTHON IMPLEMENTATION: PYTHON VERSION
Qt: {qt}
PyQt: PYQT VERSION
MODULE VERSION 1
MODULE VERSION 2
pdf.js: PDFJS VERSION
SSL: SSL VERSION
{style}
Platform: PLATFORM, ARCHITECTURE{linuxdist}
Frozen: {frozen}
Imported from {import_path}
Qt library executable path: QT PATH, data path: QT PATH
{osinfo}
Paths:
PATH DESC: PATH NAME
""".lstrip('\n'))
expected = template.rstrip('\n').format(**substitutions)
assert version.version() == expected
@pytest.mark.skipif(not qtutils.version_check('5.4'),
reason="Needs Qt >= 5.4.")
def test_opengl_vendor():
"""Simply call version.opengl_vendor() and see if it doesn't crash."""
pytest.importorskip("PyQt5.QtOpenGL")
return version.opengl_vendor()
| 1 | 17,531 |
I think this disables it for the rest of the file - you should be able to use it inside of the function instead.
|
qutebrowser-qutebrowser
|
py
|
@@ -61,9 +61,9 @@ final class MediaExtension extends AbstractExtension
}
/**
- * @param MediaInterface $media
- * @param string $format
- * @param array $options
+ * @param MediaInterface|int|string $media
+ * @param string $format
+ * @param array $options
*
* @return string
*/
| 1 |
<?php
declare(strict_types=1);
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Twig\Extension;
use Sonata\Doctrine\Model\ManagerInterface;
use Sonata\MediaBundle\Model\MediaInterface;
use Sonata\MediaBundle\Provider\Pool;
use Sonata\MediaBundle\Twig\TokenParser\MediaTokenParser;
use Sonata\MediaBundle\Twig\TokenParser\PathTokenParser;
use Sonata\MediaBundle\Twig\TokenParser\ThumbnailTokenParser;
use Twig\Environment;
use Twig\Extension\AbstractExtension;
final class MediaExtension extends AbstractExtension
{
/**
* @var Pool
*/
protected $mediaService;
/**
* @var array
*/
protected $resources = [];
/**
* @var ManagerInterface
*/
protected $mediaManager;
/**
* @var Environment
*/
protected $twig;
public function __construct(Pool $mediaService, ManagerInterface $mediaManager, Environment $twig)
{
$this->mediaService = $mediaService;
$this->mediaManager = $mediaManager;
$this->twig = $twig;
}
public function getTokenParsers()
{
return [
new MediaTokenParser(static::class),
new ThumbnailTokenParser(static::class),
new PathTokenParser(static::class),
];
}
/**
* @param MediaInterface $media
* @param string $format
* @param array $options
*
* @return string
*/
public function media($media, $format, $options = [])
{
$media = $this->getMedia($media);
if (null === $media) {
return '';
}
$provider = $this
->getMediaService()
->getProvider($media->getProviderName());
$format = $provider->getFormatName($media, $format);
$options = $provider->getHelperProperties($media, $format, $options);
return $this->render($provider->getTemplate('helper_view'), [
'media' => $media,
'format' => $format,
'options' => $options,
]);
}
/**
* Returns the thumbnail for the provided media.
*
* @param MediaInterface $media
* @param string $format
* @param array $options
*
* @return string
*/
public function thumbnail($media, $format, $options = [])
{
$media = $this->getMedia($media);
if (null === $media) {
return '';
}
$provider = $this->getMediaService()
->getProvider($media->getProviderName());
$format = $provider->getFormatName($media, $format);
$format_definition = $provider->getFormat($format);
// build option
$defaultOptions = [
'title' => $media->getName(),
'alt' => $media->getName(),
];
if (\is_array($format_definition) && $format_definition['width']) {
$defaultOptions['width'] = $format_definition['width'];
}
if (\is_array($format_definition) && $format_definition['height']) {
$defaultOptions['height'] = $format_definition['height'];
}
$options = array_merge($defaultOptions, $options);
$options['src'] = $provider->generatePublicUrl($media, $format);
return $this->render($provider->getTemplate('helper_thumbnail'), [
'media' => $media,
'options' => $options,
]);
}
/**
* @param string $template
*
* @return mixed
*/
public function render($template, array $parameters = [])
{
if (!isset($this->resources[$template])) {
$this->resources[$template] = $this->twig->load($template);
}
return $this->resources[$template]->render($parameters);
}
/**
* @param MediaInterface $media
* @param string $format
*
* @return string
*/
public function path($media, $format)
{
$media = $this->getMedia($media);
if (!$media) {
return '';
}
$provider = $this->getMediaService()
->getProvider($media->getProviderName());
$format = $provider->getFormatName($media, $format);
return $provider->generatePublicUrl($media, $format);
}
/**
* @return Pool
*/
public function getMediaService()
{
return $this->mediaService;
}
/**
* @param mixed $media
*/
private function getMedia($media): ?MediaInterface
{
if (!$media instanceof MediaInterface && \strlen((string) $media) > 0) {
$media = $this->mediaManager->findOneBy([
'id' => $media,
]);
}
if (!$media instanceof MediaInterface) {
return null;
}
if (MediaInterface::STATUS_OK !== $media->getProviderStatus()) {
return null;
}
return $media;
}
}
| 1 | 11,999 |
this could receive the MediaInterface directly or an identifier, it is correct to type it with int|string? or just use mixed?
|
sonata-project-SonataMediaBundle
|
php
|
@@ -89,4 +89,15 @@ final class MultiLeafFieldComparator implements LeafFieldComparator {
}
}
+ @Override
+ public Object leafValue(int docID) throws IOException {
+ Object[] valuesArray = new Object[comparators.length];
+
+ for (int i = 0; i < comparators.length; i++) {
+ valuesArray[i] = comparators[i].leafValue(docID);
+ }
+
+ return valuesArray;
+ }
+
}
| 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import java.io.IOException;
final class MultiLeafFieldComparator implements LeafFieldComparator {
private final LeafFieldComparator[] comparators;
private final int[] reverseMul;
// we extract the first comparator to avoid array access in the common case
// that the first comparator compares worse than the bottom entry in the queue
private final LeafFieldComparator firstComparator;
private final int firstReverseMul;
MultiLeafFieldComparator(LeafFieldComparator[] comparators, int[] reverseMul) {
if (comparators.length != reverseMul.length) {
throw new IllegalArgumentException("Must have the same number of comparators and reverseMul, got "
+ comparators.length + " and " + reverseMul.length);
}
this.comparators = comparators;
this.reverseMul = reverseMul;
this.firstComparator = comparators[0];
this.firstReverseMul = reverseMul[0];
}
@Override
public void setBottom(int slot) throws IOException {
for (LeafFieldComparator comparator : comparators) {
comparator.setBottom(slot);
}
}
@Override
public int compareBottom(int doc) throws IOException {
int cmp = firstReverseMul * firstComparator.compareBottom(doc);
if (cmp != 0) {
return cmp;
}
for (int i = 1; i < comparators.length; ++i) {
cmp = reverseMul[i] * comparators[i].compareBottom(doc);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
@Override
public int compareTop(int doc) throws IOException {
int cmp = firstReverseMul * firstComparator.compareTop(doc);
if (cmp != 0) {
return cmp;
}
for (int i = 1; i < comparators.length; ++i) {
cmp = reverseMul[i] * comparators[i].compareTop(doc);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
@Override
public void copy(int slot, int doc) throws IOException {
for (LeafFieldComparator comparator : comparators) {
comparator.copy(slot, doc);
}
}
@Override
public void setScorer(Scorable scorer) throws IOException {
for (LeafFieldComparator comparator : comparators) {
comparator.setScorer(scorer);
}
}
}
| 1 | 30,168 |
Why not return `Object[]`?
|
apache-lucene-solr
|
java
|
@@ -46,12 +46,14 @@ public class GenericManifestFile
private String manifestPath = null;
private Long length = null;
private int specId = -1;
+ private long sequenceNumber = 0;
+ private long minSequenceNumber = 0;
private Long snapshotId = null;
private Integer addedFilesCount = null;
- private Long addedRowsCount = null;
private Integer existingFilesCount = null;
- private Long existingRowsCount = null;
private Integer deletedFilesCount = null;
+ private Long addedRowsCount = null;
+ private Long existingRowsCount = null;
private Long deletedRowsCount = null;
private List<PartitionFieldSummary> partitions = null;
| 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.google.common.base.MoreObjects;
import com.google.common.base.Objects;
import com.google.common.collect.Lists;
import java.io.Serializable;
import java.util.Collections;
import java.util.List;
import java.util.function.Function;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData.SchemaConstructable;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.types.Types;
public class GenericManifestFile
implements ManifestFile, StructLike, IndexedRecord, SchemaConstructable, Serializable {
private static final Schema AVRO_SCHEMA = AvroSchemaUtil.convert(
ManifestFile.schema(), "manifest_file");
private transient Schema avroSchema; // not final for Java serialization
private int[] fromProjectionPos;
// data fields
private InputFile file = null;
private String manifestPath = null;
private Long length = null;
private int specId = -1;
private Long snapshotId = null;
private Integer addedFilesCount = null;
private Long addedRowsCount = null;
private Integer existingFilesCount = null;
private Long existingRowsCount = null;
private Integer deletedFilesCount = null;
private Long deletedRowsCount = null;
private List<PartitionFieldSummary> partitions = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
public GenericManifestFile(org.apache.avro.Schema avroSchema) {
this.avroSchema = avroSchema;
List<Types.NestedField> fields = AvroSchemaUtil.convert(avroSchema)
.asNestedType()
.asStructType()
.fields();
List<Types.NestedField> allFields = ManifestFile.schema().asStruct().fields();
this.fromProjectionPos = new int[fields.size()];
for (int i = 0; i < fromProjectionPos.length; i += 1) {
boolean found = false;
for (int j = 0; j < allFields.size(); j += 1) {
if (fields.get(i).fieldId() == allFields.get(j).fieldId()) {
found = true;
fromProjectionPos[i] = j;
}
}
if (!found) {
throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i));
}
}
}
GenericManifestFile(InputFile file, int specId) {
this.avroSchema = AVRO_SCHEMA;
this.file = file;
this.manifestPath = file.location();
this.length = null; // lazily loaded from file
this.specId = specId;
this.snapshotId = null;
this.addedFilesCount = null;
this.addedRowsCount = null;
this.existingFilesCount = null;
this.existingRowsCount = null;
this.deletedFilesCount = null;
this.deletedRowsCount = null;
this.partitions = null;
this.fromProjectionPos = null;
}
public GenericManifestFile(String path, long length, int specId, Long snapshotId,
int addedFilesCount, int existingFilesCount, int deletedFilesCount,
List<PartitionFieldSummary> partitions) {
this.avroSchema = AVRO_SCHEMA;
this.manifestPath = path;
this.length = length;
this.specId = specId;
this.snapshotId = snapshotId;
this.addedFilesCount = addedFilesCount;
this.addedRowsCount = null;
this.existingFilesCount = existingFilesCount;
this.existingRowsCount = null;
this.deletedFilesCount = deletedFilesCount;
this.deletedRowsCount = null;
this.partitions = partitions;
this.fromProjectionPos = null;
}
public GenericManifestFile(String path, long length, int specId, Long snapshotId,
int addedFilesCount, long addedRowsCount, int existingFilesCount,
long existingRowsCount, int deletedFilesCount, long deletedRowsCount,
List<PartitionFieldSummary> partitions) {
this.avroSchema = AVRO_SCHEMA;
this.manifestPath = path;
this.length = length;
this.specId = specId;
this.snapshotId = snapshotId;
this.addedFilesCount = addedFilesCount;
this.addedRowsCount = addedRowsCount;
this.existingFilesCount = existingFilesCount;
this.existingRowsCount = existingRowsCount;
this.deletedFilesCount = deletedFilesCount;
this.deletedRowsCount = deletedRowsCount;
this.partitions = partitions;
this.fromProjectionPos = null;
}
/**
* Copy constructor.
*
* @param toCopy a generic manifest file to copy.
*/
private GenericManifestFile(GenericManifestFile toCopy) {
this.avroSchema = toCopy.avroSchema;
this.manifestPath = toCopy.manifestPath;
this.length = toCopy.length;
this.specId = toCopy.specId;
this.snapshotId = toCopy.snapshotId;
this.addedFilesCount = toCopy.addedFilesCount;
this.addedRowsCount = toCopy.addedRowsCount;
this.existingFilesCount = toCopy.existingFilesCount;
this.existingRowsCount = toCopy.existingRowsCount;
this.deletedFilesCount = toCopy.deletedFilesCount;
this.deletedRowsCount = toCopy.deletedRowsCount;
this.partitions = copyList(toCopy.partitions, PartitionFieldSummary::copy);
this.fromProjectionPos = toCopy.fromProjectionPos;
}
/**
* Constructor for Java serialization.
*/
GenericManifestFile() {
}
@Override
public String path() {
return manifestPath;
}
public Long lazyLength() {
if (length == null) {
if (file != null) {
// this was created from an input file and length is lazily loaded
this.length = file.getLength();
} else {
// this was loaded from a file without projecting length, throw an exception
return null;
}
}
return length;
}
@Override
public long length() {
return lazyLength();
}
@Override
public int partitionSpecId() {
return specId;
}
@Override
public Long snapshotId() {
return snapshotId;
}
@Override
public Integer addedFilesCount() {
return addedFilesCount;
}
@Override
public Long addedRowsCount() {
return addedRowsCount;
}
@Override
public Integer existingFilesCount() {
return existingFilesCount;
}
@Override
public Long existingRowsCount() {
return existingRowsCount;
}
@Override
public Integer deletedFilesCount() {
return deletedFilesCount;
}
@Override
public Long deletedRowsCount() {
return deletedRowsCount;
}
@Override
public List<PartitionFieldSummary> partitions() {
return partitions;
}
@Override
public int size() {
return ManifestFile.schema().columns().size();
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(get(pos));
}
@Override
public Object get(int i) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
return manifestPath;
case 1:
return lazyLength();
case 2:
return specId;
case 3:
return snapshotId;
case 4:
return addedFilesCount;
case 5:
return existingFilesCount;
case 6:
return deletedFilesCount;
case 7:
return partitions;
case 8:
return addedRowsCount;
case 9:
return existingRowsCount;
case 10:
return deletedRowsCount;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + pos);
}
}
@Override
@SuppressWarnings("unchecked")
public <T> void set(int i, T value) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
// always coerce to String for Serializable
this.manifestPath = value.toString();
return;
case 1:
this.length = (Long) value;
return;
case 2:
this.specId = (Integer) value;
return;
case 3:
this.snapshotId = (Long) value;
return;
case 4:
this.addedFilesCount = (Integer) value;
return;
case 5:
this.existingFilesCount = (Integer) value;
return;
case 6:
this.deletedFilesCount = (Integer) value;
return;
case 7:
this.partitions = (List<PartitionFieldSummary>) value;
return;
case 8:
this.addedRowsCount = (Long) value;
return;
case 9:
this.existingRowsCount = (Long) value;
return;
case 10:
this.deletedRowsCount = (Long) value;
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public void put(int i, Object v) {
set(i, v);
}
@Override
public ManifestFile copy() {
return new GenericManifestFile(this);
}
@Override
public Schema getSchema() {
return avroSchema;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (!(other instanceof GenericManifestFile)) {
return false;
}
GenericManifestFile that = (GenericManifestFile) other;
return Objects.equal(manifestPath, that.manifestPath);
}
@Override
public int hashCode() {
return Objects.hashCode(manifestPath);
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("path", manifestPath)
.add("length", length)
.add("partition_spec_id", specId)
.add("added_snapshot_id", snapshotId)
.add("added_data_files_count", addedFilesCount)
.add("added_rows_count", addedRowsCount)
.add("existing_data_files_count", existingFilesCount)
.add("existing_rows_count", existingRowsCount)
.add("deleted_data_files_count", deletedFilesCount)
.add("deleted_rows_count", deletedRowsCount)
.add("partitions", partitions)
.toString();
}
public static CopyBuilder copyOf(ManifestFile manifestFile) {
return new CopyBuilder(manifestFile);
}
public static class CopyBuilder {
private final GenericManifestFile manifestFile;
private CopyBuilder(ManifestFile toCopy) {
if (toCopy instanceof GenericManifestFile) {
this.manifestFile = new GenericManifestFile((GenericManifestFile) toCopy);
} else {
this.manifestFile = new GenericManifestFile(
toCopy.path(), toCopy.length(), toCopy.partitionSpecId(), toCopy.snapshotId(),
toCopy.addedFilesCount(), toCopy.addedRowsCount(), toCopy.existingFilesCount(),
toCopy.existingRowsCount(), toCopy.deletedFilesCount(), toCopy.deletedRowsCount(),
copyList(toCopy.partitions(), PartitionFieldSummary::copy));
}
}
public CopyBuilder withSnapshotId(Long newSnapshotId) {
manifestFile.snapshotId = newSnapshotId;
return this;
}
public ManifestFile build() {
return manifestFile;
}
}
private static <E, R> List<R> copyList(List<E> list, Function<E, R> transform) {
if (list != null) {
List<R> copy = Lists.newArrayListWithExpectedSize(list.size());
for (E element : list) {
copy.add(transform.apply(element));
}
return Collections.unmodifiableList(copy);
}
return null;
}
}
| 1 | 19,095 |
Looks like we are trying to match the new ordering of fields in `ManifestFile`. Earlier, we co-located `...FilesCount` with `...RowsCount` to match the ordering of methods in `ManifestFile` and args in constructors. Is this change intentional?
|
apache-iceberg
|
java
|
@@ -933,11 +933,13 @@ void Monster::onThinkDefense(uint32_t interval)
Monster* summon = Monster::createMonster(summonBlock.name);
if (summon) {
const Position& summonPos = getPosition();
-
- addSummon(summon);
-
+ summon->setDropLoot(false);
+ summon->setLossSkill(false);
+ summon->setMaster(this);
if (!g_game.placeCreature(summon, summonPos, false, summonBlock.force)) {
- removeSummon(summon);
+ summon->setDropLoot(false);
+ summon->setLossSkill(true);
+ summon->setMaster(nullptr);
} else {
g_game.addMagicEffect(getPosition(), CONST_ME_MAGIC_BLUE);
g_game.addMagicEffect(summon->getPosition(), CONST_ME_TELEPORT);
| 1 |
/**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2017 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "monster.h"
#include "game.h"
#include "spells.h"
extern Game g_game;
extern Monsters g_monsters;
int32_t Monster::despawnRange;
int32_t Monster::despawnRadius;
uint32_t Monster::monsterAutoID = 0x40000000;
Monster* Monster::createMonster(const std::string& name)
{
MonsterType* mType = g_monsters.getMonsterType(name);
if (!mType) {
return nullptr;
}
return new Monster(mType);
}
Monster::Monster(MonsterType* mtype) :
Creature(),
strDescription(asLowerCaseString(mtype->nameDescription)),
mType(mtype)
{
defaultOutfit = mType->info.outfit;
currentOutfit = mType->info.outfit;
skull = mType->info.skull;
health = mType->info.health;
healthMax = mType->info.healthMax;
baseSpeed = mType->info.baseSpeed;
internalLight = mType->info.light;
hiddenHealth = mType->info.hiddenHealth;
// register creature events
for (const std::string& scriptName : mType->info.scripts) {
if (!registerCreatureEvent(scriptName)) {
std::cout << "[Warning - Monster::Monster] Unknown event name: " << scriptName << std::endl;
}
}
}
Monster::~Monster()
{
clearTargetList();
clearFriendList();
}
void Monster::addList()
{
g_game.addMonster(this);
}
void Monster::removeList()
{
g_game.removeMonster(this);
}
bool Monster::canSee(const Position& pos) const
{
return Creature::canSee(getPosition(), pos, 9, 9);
}
void Monster::onAttackedCreatureDisappear(bool)
{
attackTicks = 0;
extraMeleeAttack = true;
}
void Monster::onCreatureAppear(Creature* creature, bool isLogin)
{
Creature::onCreatureAppear(creature, isLogin);
if (mType->info.creatureAppearEvent != -1) {
// onCreatureAppear(self, creature)
LuaScriptInterface* scriptInterface = mType->info.scriptInterface;
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - Monster::onCreatureAppear] Call stack overflow" << std::endl;
return;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(mType->info.creatureAppearEvent, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(mType->info.creatureAppearEvent);
LuaScriptInterface::pushUserdata<Monster>(L, this);
LuaScriptInterface::setMetatable(L, -1, "Monster");
LuaScriptInterface::pushUserdata<Creature>(L, creature);
LuaScriptInterface::setCreatureMetatable(L, -1, creature);
if (scriptInterface->callFunction(2)) {
return;
}
}
if (creature == this) {
//We just spawned lets look around to see who is there.
if (isSummon()) {
isMasterInRange = canSee(getMaster()->getPosition());
}
updateTargetList();
updateIdleStatus();
} else {
onCreatureEnter(creature);
}
}
void Monster::onRemoveCreature(Creature* creature, bool isLogout)
{
Creature::onRemoveCreature(creature, isLogout);
if (mType->info.creatureDisappearEvent != -1) {
// onCreatureDisappear(self, creature)
LuaScriptInterface* scriptInterface = mType->info.scriptInterface;
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - Monster::onCreatureDisappear] Call stack overflow" << std::endl;
return;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(mType->info.creatureDisappearEvent, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(mType->info.creatureDisappearEvent);
LuaScriptInterface::pushUserdata<Monster>(L, this);
LuaScriptInterface::setMetatable(L, -1, "Monster");
LuaScriptInterface::pushUserdata<Creature>(L, creature);
LuaScriptInterface::setCreatureMetatable(L, -1, creature);
if (scriptInterface->callFunction(2)) {
return;
}
}
if (creature == this) {
if (spawn) {
spawn->startSpawnCheck();
}
setIdle(true);
} else {
onCreatureLeave(creature);
}
}
void Monster::onCreatureMove(Creature* creature, const Tile* newTile, const Position& newPos,
const Tile* oldTile, const Position& oldPos, bool teleport)
{
Creature::onCreatureMove(creature, newTile, newPos, oldTile, oldPos, teleport);
if (mType->info.creatureMoveEvent != -1) {
// onCreatureMove(self, creature, oldPosition, newPosition)
LuaScriptInterface* scriptInterface = mType->info.scriptInterface;
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - Monster::onCreatureMove] Call stack overflow" << std::endl;
return;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(mType->info.creatureMoveEvent, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(mType->info.creatureMoveEvent);
LuaScriptInterface::pushUserdata<Monster>(L, this);
LuaScriptInterface::setMetatable(L, -1, "Monster");
LuaScriptInterface::pushUserdata<Creature>(L, creature);
LuaScriptInterface::setCreatureMetatable(L, -1, creature);
LuaScriptInterface::pushPosition(L, oldPos);
LuaScriptInterface::pushPosition(L, newPos);
if (scriptInterface->callFunction(4)) {
return;
}
}
if (creature == this) {
if (isSummon()) {
isMasterInRange = canSee(getMaster()->getPosition());
}
updateTargetList();
updateIdleStatus();
} else {
bool canSeeNewPos = canSee(newPos);
bool canSeeOldPos = canSee(oldPos);
if (canSeeNewPos && !canSeeOldPos) {
onCreatureEnter(creature);
} else if (!canSeeNewPos && canSeeOldPos) {
onCreatureLeave(creature);
}
if (canSeeNewPos && isSummon() && getMaster() == creature) {
isMasterInRange = true; //Follow master again
}
updateIdleStatus();
if (!isSummon()) {
if (followCreature) {
const Position& followPosition = followCreature->getPosition();
const Position& position = getPosition();
int32_t offset_x = Position::getDistanceX(followPosition, position);
int32_t offset_y = Position::getDistanceY(followPosition, position);
if ((offset_x > 1 || offset_y > 1) && mType->info.changeTargetChance > 0) {
Direction dir = getDirectionTo(position, followPosition);
const Position& checkPosition = getNextPosition(dir, position);
Tile* tile = g_game.map.getTile(checkPosition);
if (tile) {
Creature* topCreature = tile->getTopCreature();
if (topCreature && followCreature != topCreature && isOpponent(topCreature)) {
selectTarget(topCreature);
}
}
}
} else if (isOpponent(creature)) {
//we have no target lets try pick this one
selectTarget(creature);
}
}
}
}
void Monster::onCreatureSay(Creature* creature, SpeakClasses type, const std::string& text)
{
Creature::onCreatureSay(creature, type, text);
if (mType->info.creatureSayEvent != -1) {
// onCreatureSay(self, creature, type, message)
LuaScriptInterface* scriptInterface = mType->info.scriptInterface;
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - Monster::onCreatureSay] Call stack overflow" << std::endl;
return;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(mType->info.creatureSayEvent, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(mType->info.creatureSayEvent);
LuaScriptInterface::pushUserdata<Monster>(L, this);
LuaScriptInterface::setMetatable(L, -1, "Monster");
LuaScriptInterface::pushUserdata<Creature>(L, creature);
LuaScriptInterface::setCreatureMetatable(L, -1, creature);
lua_pushnumber(L, type);
LuaScriptInterface::pushString(L, text);
scriptInterface->callVoidFunction(4);
}
}
void Monster::addFriend(Creature* creature)
{
assert(creature != this);
auto result = friendList.insert(creature);
if (result.second) {
creature->incrementReferenceCounter();
}
}
void Monster::removeFriend(Creature* creature)
{
auto it = friendList.find(creature);
if (it != friendList.end()) {
creature->decrementReferenceCounter();
friendList.erase(it);
}
}
void Monster::addTarget(Creature* creature, bool pushFront/* = false*/)
{
assert(creature != this);
if (std::find(targetList.begin(), targetList.end(), creature) == targetList.end()) {
creature->incrementReferenceCounter();
if (pushFront) {
targetList.push_front(creature);
} else {
targetList.push_back(creature);
}
}
}
void Monster::removeTarget(Creature* creature)
{
auto it = std::find(targetList.begin(), targetList.end(), creature);
if (it != targetList.end()) {
creature->decrementReferenceCounter();
targetList.erase(it);
}
}
void Monster::updateTargetList()
{
auto friendIterator = friendList.begin();
while (friendIterator != friendList.end()) {
Creature* creature = *friendIterator;
if (creature->getHealth() <= 0 || !canSee(creature->getPosition())) {
creature->decrementReferenceCounter();
friendIterator = friendList.erase(friendIterator);
} else {
++friendIterator;
}
}
auto targetIterator = targetList.begin();
while (targetIterator != targetList.end()) {
Creature* creature = *targetIterator;
if (creature->getHealth() <= 0 || !canSee(creature->getPosition())) {
creature->decrementReferenceCounter();
targetIterator = targetList.erase(targetIterator);
} else {
++targetIterator;
}
}
SpectatorHashSet spectators;
g_game.map.getSpectators(spectators, position, true);
spectators.erase(this);
for (Creature* spectator : spectators) {
if (canSee(spectator->getPosition())) {
onCreatureFound(spectator);
}
}
}
void Monster::clearTargetList()
{
for (Creature* creature : targetList) {
creature->decrementReferenceCounter();
}
targetList.clear();
}
void Monster::clearFriendList()
{
for (Creature* creature : friendList) {
creature->decrementReferenceCounter();
}
friendList.clear();
}
void Monster::onCreatureFound(Creature* creature, bool pushFront/* = false*/)
{
if (isFriend(creature)) {
addFriend(creature);
}
if (isOpponent(creature)) {
addTarget(creature, pushFront);
}
updateIdleStatus();
}
void Monster::onCreatureEnter(Creature* creature)
{
// std::cout << "onCreatureEnter - " << creature->getName() << std::endl;
if (getMaster() == creature) {
//Follow master again
isMasterInRange = true;
}
onCreatureFound(creature, true);
}
bool Monster::isFriend(const Creature* creature) const
{
if (isSummon() && getMaster()->getPlayer()) {
const Player* masterPlayer = getMaster()->getPlayer();
const Player* tmpPlayer = nullptr;
if (creature->getPlayer()) {
tmpPlayer = creature->getPlayer();
} else {
const Creature* creatureMaster = creature->getMaster();
if (creatureMaster && creatureMaster->getPlayer()) {
tmpPlayer = creatureMaster->getPlayer();
}
}
if (tmpPlayer && (tmpPlayer == getMaster() || masterPlayer->isPartner(tmpPlayer))) {
return true;
}
} else if (creature->getMonster() && !creature->isSummon()) {
return true;
}
return false;
}
bool Monster::isOpponent(const Creature* creature) const
{
if (isSummon() && getMaster()->getPlayer()) {
if (creature != getMaster()) {
return true;
}
} else {
if ((creature->getPlayer() && !creature->getPlayer()->hasFlag(PlayerFlag_IgnoredByMonsters)) ||
(creature->getMaster() && creature->getMaster()->getPlayer())) {
return true;
}
}
return false;
}
void Monster::onCreatureLeave(Creature* creature)
{
// std::cout << "onCreatureLeave - " << creature->getName() << std::endl;
if (getMaster() == creature) {
//Take random steps and only use defense abilities (e.g. heal) until its master comes back
isMasterInRange = false;
}
//update friendList
if (isFriend(creature)) {
removeFriend(creature);
}
//update targetList
if (isOpponent(creature)) {
removeTarget(creature);
if (targetList.empty()) {
updateIdleStatus();
}
}
}
bool Monster::searchTarget(TargetSearchType_t searchType /*= TARGETSEARCH_DEFAULT*/)
{
std::list<Creature*> resultList;
const Position& myPos = getPosition();
for (Creature* creature : targetList) {
if (followCreature != creature && isTarget(creature)) {
if (searchType == TARGETSEARCH_RANDOM || canUseAttack(myPos, creature)) {
resultList.push_back(creature);
}
}
}
switch (searchType) {
case TARGETSEARCH_NEAREST: {
Creature* target = nullptr;
if (!resultList.empty()) {
auto it = resultList.begin();
target = *it;
if (++it != resultList.end()) {
const Position& targetPosition = target->getPosition();
int32_t minRange = Position::getDistanceX(myPos, targetPosition) + Position::getDistanceY(myPos, targetPosition);
do {
const Position& pos = (*it)->getPosition();
int32_t distance = Position::getDistanceX(myPos, pos) + Position::getDistanceY(myPos, pos);
if (distance < minRange) {
target = *it;
minRange = distance;
}
} while (++it != resultList.end());
}
} else {
int32_t minRange = std::numeric_limits<int32_t>::max();
for (Creature* creature : targetList) {
if (!isTarget(creature)) {
continue;
}
const Position& pos = creature->getPosition();
int32_t distance = Position::getDistanceX(myPos, pos) + Position::getDistanceY(myPos, pos);
if (distance < minRange) {
target = creature;
minRange = distance;
}
}
}
if (target && selectTarget(target)) {
return true;
}
break;
}
case TARGETSEARCH_DEFAULT:
case TARGETSEARCH_ATTACKRANGE:
case TARGETSEARCH_RANDOM:
default: {
if (!resultList.empty()) {
auto it = resultList.begin();
std::advance(it, uniform_random(0, resultList.size() - 1));
return selectTarget(*it);
}
if (searchType == TARGETSEARCH_ATTACKRANGE) {
return false;
}
break;
}
}
//lets just pick the first target in the list
for (Creature* target : targetList) {
if (followCreature != target && selectTarget(target)) {
return true;
}
}
return false;
}
void Monster::onFollowCreatureComplete(const Creature* creature)
{
if (creature) {
auto it = std::find(targetList.begin(), targetList.end(), creature);
if (it != targetList.end()) {
Creature* target = (*it);
targetList.erase(it);
if (hasFollowPath) {
targetList.push_front(target);
} else if (!isSummon()) {
targetList.push_back(target);
} else {
target->decrementReferenceCounter();
}
}
}
}
BlockType_t Monster::blockHit(Creature* attacker, CombatType_t combatType, int32_t& damage,
bool checkDefense /* = false*/, bool checkArmor /* = false*/, bool /* field = false */)
{
BlockType_t blockType = Creature::blockHit(attacker, combatType, damage, checkDefense, checkArmor);
if (damage != 0) {
int32_t elementMod = 0;
auto it = mType->info.elementMap.find(combatType);
if (it != mType->info.elementMap.end()) {
elementMod = it->second;
}
if (elementMod != 0) {
damage = static_cast<int32_t>(std::round(damage * ((100 - elementMod) / 100.)));
if (damage <= 0) {
damage = 0;
blockType = BLOCK_ARMOR;
}
}
}
return blockType;
}
bool Monster::isTarget(const Creature* creature) const
{
if (creature->isRemoved() || !creature->isAttackable() ||
creature->getZone() == ZONE_PROTECTION || !canSeeCreature(creature)) {
return false;
}
if (creature->getPosition().z != getPosition().z) {
return false;
}
return true;
}
bool Monster::selectTarget(Creature* creature)
{
if (!isTarget(creature)) {
return false;
}
auto it = std::find(targetList.begin(), targetList.end(), creature);
if (it == targetList.end()) {
//Target not found in our target list.
return false;
}
if (isHostile() || isSummon()) {
if (setAttackedCreature(creature) && !isSummon()) {
g_dispatcher.addTask(createTask(std::bind(&Game::checkCreatureAttack, &g_game, getID())));
}
}
return setFollowCreature(creature);
}
void Monster::setIdle(bool idle)
{
if (isRemoved() || getHealth() <= 0) {
return;
}
isIdle = idle;
if (!isIdle) {
g_game.addCreatureCheck(this);
} else {
onIdleStatus();
clearTargetList();
clearFriendList();
Game::removeCreatureCheck(this);
}
}
void Monster::updateIdleStatus()
{
bool idle = false;
if (conditions.empty()) {
if (!isSummon() && targetList.empty()) {
idle = true;
}
}
setIdle(idle);
}
void Monster::onAddCondition(ConditionType_t type)
{
if (type == CONDITION_FIRE || type == CONDITION_ENERGY || type == CONDITION_POISON) {
updateMapCache();
}
updateIdleStatus();
}
void Monster::onEndCondition(ConditionType_t type)
{
if (type == CONDITION_FIRE || type == CONDITION_ENERGY || type == CONDITION_POISON) {
updateMapCache();
}
updateIdleStatus();
}
void Monster::onThink(uint32_t interval)
{
Creature::onThink(interval);
if (mType->info.thinkEvent != -1) {
// onThink(self, interval)
LuaScriptInterface* scriptInterface = mType->info.scriptInterface;
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - Monster::onThink] Call stack overflow" << std::endl;
return;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(mType->info.thinkEvent, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(mType->info.thinkEvent);
LuaScriptInterface::pushUserdata<Monster>(L, this);
LuaScriptInterface::setMetatable(L, -1, "Monster");
lua_pushnumber(L, interval);
if (scriptInterface->callFunction(2)) {
return;
}
}
if (!isInSpawnRange(position)) {
g_game.internalTeleport(this, masterPos);
setIdle(true);
} else {
updateIdleStatus();
if (!isIdle) {
addEventWalk();
if (isSummon()) {
if (!attackedCreature) {
if (getMaster() && getMaster()->getAttackedCreature()) {
//This happens if the monster is summoned during combat
selectTarget(getMaster()->getAttackedCreature());
} else if (getMaster() != followCreature) {
//Our master has not ordered us to attack anything, lets follow him around instead.
setFollowCreature(getMaster());
}
} else if (attackedCreature == this) {
setFollowCreature(nullptr);
} else if (followCreature != attackedCreature) {
//This happens just after a master orders an attack, so lets follow it aswell.
setFollowCreature(attackedCreature);
}
} else if (!targetList.empty()) {
if (!followCreature || !hasFollowPath) {
searchTarget();
} else if (isFleeing()) {
if (attackedCreature && !canUseAttack(getPosition(), attackedCreature)) {
searchTarget(TARGETSEARCH_ATTACKRANGE);
}
}
}
onThinkTarget(interval);
onThinkYell(interval);
onThinkDefense(interval);
}
}
}
void Monster::doAttacking(uint32_t interval)
{
if (!attackedCreature || (isSummon() && attackedCreature == this)) {
return;
}
bool updateLook = true;
bool resetTicks = interval != 0;
attackTicks += interval;
const Position& myPos = getPosition();
const Position& targetPos = attackedCreature->getPosition();
for (const spellBlock_t& spellBlock : mType->info.attackSpells) {
bool inRange = false;
if (canUseSpell(myPos, targetPos, spellBlock, interval, inRange, resetTicks)) {
if (spellBlock.chance >= static_cast<uint32_t>(uniform_random(1, 100))) {
if (updateLook) {
updateLookDirection();
updateLook = false;
}
minCombatValue = spellBlock.minCombatValue;
maxCombatValue = spellBlock.maxCombatValue;
spellBlock.spell->castSpell(this, attackedCreature);
if (spellBlock.isMelee) {
extraMeleeAttack = false;
}
}
}
if (!inRange && spellBlock.isMelee) {
//melee swing out of reach
extraMeleeAttack = true;
}
}
if (updateLook) {
updateLookDirection();
}
if (resetTicks) {
attackTicks = 0;
}
}
bool Monster::canUseAttack(const Position& pos, const Creature* target) const
{
if (isHostile()) {
const Position& targetPos = target->getPosition();
uint32_t distance = std::max<uint32_t>(Position::getDistanceX(pos, targetPos), Position::getDistanceY(pos, targetPos));
for (const spellBlock_t& spellBlock : mType->info.attackSpells) {
if (spellBlock.range != 0 && distance <= spellBlock.range) {
return g_game.isSightClear(pos, targetPos, true);
}
}
return false;
}
return true;
}
bool Monster::canUseSpell(const Position& pos, const Position& targetPos,
const spellBlock_t& sb, uint32_t interval, bool& inRange, bool& resetTicks)
{
inRange = true;
if (sb.isMelee && isFleeing()) {
return false;
}
if (extraMeleeAttack) {
lastMeleeAttack = OTSYS_TIME();
} else if (sb.isMelee && (OTSYS_TIME() - lastMeleeAttack) < 1500) {
return false;
}
if (!sb.isMelee || !extraMeleeAttack) {
if (sb.speed > attackTicks) {
resetTicks = false;
return false;
}
if (attackTicks % sb.speed >= interval) {
//already used this spell for this round
return false;
}
}
if (sb.range != 0 && std::max<uint32_t>(Position::getDistanceX(pos, targetPos), Position::getDistanceY(pos, targetPos)) > sb.range) {
inRange = false;
return false;
}
return true;
}
void Monster::onThinkTarget(uint32_t interval)
{
if (!isSummon()) {
if (mType->info.changeTargetSpeed != 0) {
bool canChangeTarget = true;
if (targetChangeCooldown > 0) {
targetChangeCooldown -= interval;
if (targetChangeCooldown <= 0) {
targetChangeCooldown = 0;
targetChangeTicks = mType->info.changeTargetSpeed;
} else {
canChangeTarget = false;
}
}
if (canChangeTarget) {
targetChangeTicks += interval;
if (targetChangeTicks >= mType->info.changeTargetSpeed) {
targetChangeTicks = 0;
targetChangeCooldown = mType->info.changeTargetSpeed;
if (mType->info.changeTargetChance >= uniform_random(1, 100)) {
if (mType->info.targetDistance <= 1) {
searchTarget(TARGETSEARCH_RANDOM);
} else {
searchTarget(TARGETSEARCH_NEAREST);
}
}
}
}
}
}
}
void Monster::onThinkDefense(uint32_t interval)
{
bool resetTicks = true;
defenseTicks += interval;
for (const spellBlock_t& spellBlock : mType->info.defenseSpells) {
if (spellBlock.speed > defenseTicks) {
resetTicks = false;
continue;
}
if (defenseTicks % spellBlock.speed >= interval) {
//already used this spell for this round
continue;
}
if ((spellBlock.chance >= static_cast<uint32_t>(uniform_random(1, 100)))) {
minCombatValue = spellBlock.minCombatValue;
maxCombatValue = spellBlock.maxCombatValue;
spellBlock.spell->castSpell(this, this);
}
}
if (!isSummon() && summons.size() < mType->info.maxSummons && hasFollowPath) {
for (const summonBlock_t& summonBlock : mType->info.summons) {
if (summonBlock.speed > defenseTicks) {
resetTicks = false;
continue;
}
if (summons.size() >= mType->info.maxSummons) {
continue;
}
if (defenseTicks % summonBlock.speed >= interval) {
//already used this spell for this round
continue;
}
uint32_t summonCount = 0;
for (Creature* summon : summons) {
if (summon->getName() == summonBlock.name) {
++summonCount;
}
}
if (summonCount >= summonBlock.max) {
continue;
}
if (summonBlock.chance < static_cast<uint32_t>(uniform_random(1, 100))) {
continue;
}
Monster* summon = Monster::createMonster(summonBlock.name);
if (summon) {
const Position& summonPos = getPosition();
addSummon(summon);
if (!g_game.placeCreature(summon, summonPos, false, summonBlock.force)) {
removeSummon(summon);
} else {
g_game.addMagicEffect(getPosition(), CONST_ME_MAGIC_BLUE);
g_game.addMagicEffect(summon->getPosition(), CONST_ME_TELEPORT);
}
}
}
}
if (resetTicks) {
defenseTicks = 0;
}
}
void Monster::onThinkYell(uint32_t interval)
{
if (mType->info.yellSpeedTicks == 0) {
return;
}
yellTicks += interval;
if (yellTicks >= mType->info.yellSpeedTicks) {
yellTicks = 0;
if (!mType->info.voiceVector.empty() && (mType->info.yellChance >= static_cast<uint32_t>(uniform_random(1, 100)))) {
uint32_t index = uniform_random(0, mType->info.voiceVector.size() - 1);
const voiceBlock_t& vb = mType->info.voiceVector[index];
if (vb.yellText) {
g_game.internalCreatureSay(this, TALKTYPE_MONSTER_YELL, vb.text, false);
} else {
g_game.internalCreatureSay(this, TALKTYPE_MONSTER_SAY, vb.text, false);
}
}
}
}
void Monster::onWalk()
{
Creature::onWalk();
}
bool Monster::pushItem(Item* item)
{
const Position& centerPos = item->getPosition();
static std::vector<std::pair<int32_t, int32_t>> relList {
{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}
};
std::shuffle(relList.begin(), relList.end(), getRandomGenerator());
for (const auto& it : relList) {
Position tryPos(centerPos.x + it.first, centerPos.y + it.second, centerPos.z);
Tile* tile = g_game.map.getTile(tryPos);
if (tile && g_game.canThrowObjectTo(centerPos, tryPos)) {
if (g_game.internalMoveItem(item->getParent(), tile, INDEX_WHEREEVER, item, item->getItemCount(), nullptr) == RETURNVALUE_NOERROR) {
return true;
}
}
}
return false;
}
void Monster::pushItems(Tile* tile)
{
//We can not use iterators here since we can push the item to another tile
//which will invalidate the iterator.
//start from the end to minimize the amount of traffic
if (TileItemVector* items = tile->getItemList()) {
uint32_t moveCount = 0;
uint32_t removeCount = 0;
int32_t downItemSize = tile->getDownItemCount();
for (int32_t i = downItemSize; --i >= 0;) {
Item* item = items->at(i);
if (item && item->hasProperty(CONST_PROP_MOVEABLE) && (item->hasProperty(CONST_PROP_BLOCKPATH)
|| item->hasProperty(CONST_PROP_BLOCKSOLID))) {
if (moveCount < 20 && Monster::pushItem(item)) {
++moveCount;
} else if (g_game.internalRemoveItem(item) == RETURNVALUE_NOERROR) {
++removeCount;
}
}
}
if (removeCount > 0) {
g_game.addMagicEffect(tile->getPosition(), CONST_ME_POFF);
}
}
}
bool Monster::pushCreature(Creature* creature)
{
static std::vector<Direction> dirList {
DIRECTION_NORTH,
DIRECTION_WEST, DIRECTION_EAST,
DIRECTION_SOUTH
};
std::shuffle(dirList.begin(), dirList.end(), getRandomGenerator());
for (Direction dir : dirList) {
const Position& tryPos = Spells::getCasterPosition(creature, dir);
Tile* toTile = g_game.map.getTile(tryPos);
if (toTile && !toTile->hasFlag(TILESTATE_BLOCKPATH)) {
if (g_game.internalMoveCreature(creature, dir) == RETURNVALUE_NOERROR) {
return true;
}
}
}
return false;
}
void Monster::pushCreatures(Tile* tile)
{
//We can not use iterators here since we can push a creature to another tile
//which will invalidate the iterator.
if (CreatureVector* creatures = tile->getCreatures()) {
uint32_t removeCount = 0;
Monster* lastPushedMonster = nullptr;
for (size_t i = 0; i < creatures->size();) {
Monster* monster = creatures->at(i)->getMonster();
if (monster && monster->isPushable()) {
if (monster != lastPushedMonster && Monster::pushCreature(monster)) {
lastPushedMonster = monster;
continue;
}
monster->changeHealth(-monster->getHealth());
monster->setDropLoot(false);
removeCount++;
}
++i;
}
if (removeCount > 0) {
g_game.addMagicEffect(tile->getPosition(), CONST_ME_BLOCKHIT);
}
}
}
bool Monster::getNextStep(Direction& direction, uint32_t& flags)
{
if (isIdle || getHealth() <= 0) {
//we dont have anyone watching might aswell stop walking
eventWalk = 0;
return false;
}
bool result = false;
if ((!followCreature || !hasFollowPath) && (!isSummon() || !isMasterInRange)) {
if (followCreature || getTimeSinceLastMove() > 1000) {
//choose a random direction
result = getRandomStep(getPosition(), direction);
}
} else if ((isSummon() && isMasterInRange) || followCreature) {
result = Creature::getNextStep(direction, flags);
if (result) {
flags |= FLAG_PATHFINDING;
} else {
//target dancing
if (attackedCreature && attackedCreature == followCreature) {
if (isFleeing()) {
result = getDanceStep(getPosition(), direction, false, false);
} else if (mType->info.staticAttackChance < static_cast<uint32_t>(uniform_random(1, 100))) {
result = getDanceStep(getPosition(), direction);
}
}
}
}
if (result && (canPushItems() || canPushCreatures())) {
const Position& pos = Spells::getCasterPosition(this, direction);
Tile* tile = g_game.map.getTile(pos);
if (tile) {
if (canPushItems()) {
Monster::pushItems(tile);
}
if (canPushCreatures()) {
Monster::pushCreatures(tile);
}
}
}
return result;
}
bool Monster::getRandomStep(const Position& creaturePos, Direction& direction) const
{
static std::vector<Direction> dirList{
DIRECTION_NORTH,
DIRECTION_WEST, DIRECTION_EAST,
DIRECTION_SOUTH
};
std::shuffle(dirList.begin(), dirList.end(), getRandomGenerator());
for (Direction dir : dirList) {
if (canWalkTo(creaturePos, dir)) {
direction = dir;
return true;
}
}
return false;
}
bool Monster::getDanceStep(const Position& creaturePos, Direction& direction,
bool keepAttack /*= true*/, bool keepDistance /*= true*/)
{
bool canDoAttackNow = canUseAttack(creaturePos, attackedCreature);
assert(attackedCreature != nullptr);
const Position& centerPos = attackedCreature->getPosition();
int_fast32_t offset_x = Position::getOffsetX(creaturePos, centerPos);
int_fast32_t offset_y = Position::getOffsetY(creaturePos, centerPos);
int_fast32_t distance_x = std::abs(offset_x);
int_fast32_t distance_y = std::abs(offset_y);
uint32_t centerToDist = std::max<uint32_t>(distance_x, distance_y);
std::vector<Direction> dirList;
if (!keepDistance || offset_y >= 0) {
uint32_t tmpDist = std::max<uint32_t>(distance_x, std::abs((creaturePos.getY() - 1) - centerPos.getY()));
if (tmpDist == centerToDist && canWalkTo(creaturePos, DIRECTION_NORTH)) {
bool result = true;
if (keepAttack) {
result = (!canDoAttackNow || canUseAttack(Position(creaturePos.x, creaturePos.y - 1, creaturePos.z), attackedCreature));
}
if (result) {
dirList.push_back(DIRECTION_NORTH);
}
}
}
if (!keepDistance || offset_y <= 0) {
uint32_t tmpDist = std::max<uint32_t>(distance_x, std::abs((creaturePos.getY() + 1) - centerPos.getY()));
if (tmpDist == centerToDist && canWalkTo(creaturePos, DIRECTION_SOUTH)) {
bool result = true;
if (keepAttack) {
result = (!canDoAttackNow || canUseAttack(Position(creaturePos.x, creaturePos.y + 1, creaturePos.z), attackedCreature));
}
if (result) {
dirList.push_back(DIRECTION_SOUTH);
}
}
}
if (!keepDistance || offset_x <= 0) {
uint32_t tmpDist = std::max<uint32_t>(std::abs((creaturePos.getX() + 1) - centerPos.getX()), distance_y);
if (tmpDist == centerToDist && canWalkTo(creaturePos, DIRECTION_EAST)) {
bool result = true;
if (keepAttack) {
result = (!canDoAttackNow || canUseAttack(Position(creaturePos.x + 1, creaturePos.y, creaturePos.z), attackedCreature));
}
if (result) {
dirList.push_back(DIRECTION_EAST);
}
}
}
if (!keepDistance || offset_x >= 0) {
uint32_t tmpDist = std::max<uint32_t>(std::abs((creaturePos.getX() - 1) - centerPos.getX()), distance_y);
if (tmpDist == centerToDist && canWalkTo(creaturePos, DIRECTION_WEST)) {
bool result = true;
if (keepAttack) {
result = (!canDoAttackNow || canUseAttack(Position(creaturePos.x - 1, creaturePos.y, creaturePos.z), attackedCreature));
}
if (result) {
dirList.push_back(DIRECTION_WEST);
}
}
}
if (!dirList.empty()) {
std::shuffle(dirList.begin(), dirList.end(), getRandomGenerator());
direction = dirList[uniform_random(0, dirList.size() - 1)];
return true;
}
return false;
}
bool Monster::getDistanceStep(const Position& targetPos, Direction& direction, bool flee /* = false */)
{
const Position& creaturePos = getPosition();
int_fast32_t dx = Position::getDistanceX(creaturePos, targetPos);
int_fast32_t dy = Position::getDistanceY(creaturePos, targetPos);
int32_t distance = std::max<int32_t>(dx, dy);
if (!flee && (distance > mType->info.targetDistance || !g_game.isSightClear(creaturePos, targetPos, true))) {
return false; // let the A* calculate it
} else if (!flee && distance == mType->info.targetDistance) {
return true; // we don't really care here, since it's what we wanted to reach (a dancestep will take of dancing in that position)
}
int_fast32_t offsetx = Position::getOffsetX(creaturePos, targetPos);
int_fast32_t offsety = Position::getOffsetY(creaturePos, targetPos);
if (dx <= 1 && dy <= 1) {
//seems like a target is near, it this case we need to slow down our movements (as a monster)
if (stepDuration < 2) {
stepDuration++;
}
} else if (stepDuration > 0) {
stepDuration--;
}
if (offsetx == 0 && offsety == 0) {
return getRandomStep(creaturePos, direction); // player is "on" the monster so let's get some random step and rest will be taken care later.
}
if (dx == dy) {
//player is diagonal to the monster
if (offsetx >= 1 && offsety >= 1) {
// player is NW
//escape to SE, S or E [and some extra]
bool s = canWalkTo(creaturePos, DIRECTION_SOUTH);
bool e = canWalkTo(creaturePos, DIRECTION_EAST);
if (s && e) {
direction = boolean_random() ? DIRECTION_SOUTH : DIRECTION_EAST;
return true;
} else if (s) {
direction = DIRECTION_SOUTH;
return true;
} else if (e) {
direction = DIRECTION_EAST;
return true;
} else if (canWalkTo(creaturePos, DIRECTION_SOUTHEAST)) {
direction = DIRECTION_SOUTHEAST;
return true;
}
/* fleeing */
bool n = canWalkTo(creaturePos, DIRECTION_NORTH);
bool w = canWalkTo(creaturePos, DIRECTION_WEST);
if (flee) {
if (n && w) {
direction = boolean_random() ? DIRECTION_NORTH : DIRECTION_WEST;
return true;
} else if (n) {
direction = DIRECTION_NORTH;
return true;
} else if (w) {
direction = DIRECTION_WEST;
return true;
}
}
/* end of fleeing */
if (w && canWalkTo(creaturePos, DIRECTION_SOUTHWEST)) {
direction = DIRECTION_WEST;
} else if (n && canWalkTo(creaturePos, DIRECTION_NORTHEAST)) {
direction = DIRECTION_NORTH;
}
return true;
} else if (offsetx <= -1 && offsety <= -1) {
//player is SE
//escape to NW , W or N [and some extra]
bool w = canWalkTo(creaturePos, DIRECTION_WEST);
bool n = canWalkTo(creaturePos, DIRECTION_NORTH);
if (w && n) {
direction = boolean_random() ? DIRECTION_WEST : DIRECTION_NORTH;
return true;
} else if (w) {
direction = DIRECTION_WEST;
return true;
} else if (n) {
direction = DIRECTION_NORTH;
return true;
}
if (canWalkTo(creaturePos, DIRECTION_NORTHWEST)) {
direction = DIRECTION_NORTHWEST;
return true;
}
/* fleeing */
bool s = canWalkTo(creaturePos, DIRECTION_SOUTH);
bool e = canWalkTo(creaturePos, DIRECTION_EAST);
if (flee) {
if (s && e) {
direction = boolean_random() ? DIRECTION_SOUTH : DIRECTION_EAST;
return true;
} else if (s) {
direction = DIRECTION_SOUTH;
return true;
} else if (e) {
direction = DIRECTION_EAST;
return true;
}
}
/* end of fleeing */
if (s && canWalkTo(creaturePos, DIRECTION_SOUTHWEST)) {
direction = DIRECTION_SOUTH;
} else if (e && canWalkTo(creaturePos, DIRECTION_NORTHEAST)) {
direction = DIRECTION_EAST;
}
return true;
} else if (offsetx >= 1 && offsety <= -1) {
//player is SW
//escape to NE, N, E [and some extra]
bool n = canWalkTo(creaturePos, DIRECTION_NORTH);
bool e = canWalkTo(creaturePos, DIRECTION_EAST);
if (n && e) {
direction = boolean_random() ? DIRECTION_NORTH : DIRECTION_EAST;
return true;
} else if (n) {
direction = DIRECTION_NORTH;
return true;
} else if (e) {
direction = DIRECTION_EAST;
return true;
}
if (canWalkTo(creaturePos, DIRECTION_NORTHEAST)) {
direction = DIRECTION_NORTHEAST;
return true;
}
/* fleeing */
bool s = canWalkTo(creaturePos, DIRECTION_SOUTH);
bool w = canWalkTo(creaturePos, DIRECTION_WEST);
if (flee) {
if (s && w) {
direction = boolean_random() ? DIRECTION_SOUTH : DIRECTION_WEST;
return true;
} else if (s) {
direction = DIRECTION_SOUTH;
return true;
} else if (w) {
direction = DIRECTION_WEST;
return true;
}
}
/* end of fleeing */
if (w && canWalkTo(creaturePos, DIRECTION_NORTHWEST)) {
direction = DIRECTION_WEST;
} else if (s && canWalkTo(creaturePos, DIRECTION_SOUTHEAST)) {
direction = DIRECTION_SOUTH;
}
return true;
} else if (offsetx <= -1 && offsety >= 1) {
// player is NE
//escape to SW, S, W [and some extra]
bool w = canWalkTo(creaturePos, DIRECTION_WEST);
bool s = canWalkTo(creaturePos, DIRECTION_SOUTH);
if (w && s) {
direction = boolean_random() ? DIRECTION_WEST : DIRECTION_SOUTH;
return true;
} else if (w) {
direction = DIRECTION_WEST;
return true;
} else if (s) {
direction = DIRECTION_SOUTH;
return true;
} else if (canWalkTo(creaturePos, DIRECTION_SOUTHWEST)) {
direction = DIRECTION_SOUTHWEST;
return true;
}
/* fleeing */
bool n = canWalkTo(creaturePos, DIRECTION_NORTH);
bool e = canWalkTo(creaturePos, DIRECTION_EAST);
if (flee) {
if (n && e) {
direction = boolean_random() ? DIRECTION_NORTH : DIRECTION_EAST;
return true;
} else if (n) {
direction = DIRECTION_NORTH;
return true;
} else if (e) {
direction = DIRECTION_EAST;
return true;
}
}
/* end of fleeing */
if (e && canWalkTo(creaturePos, DIRECTION_SOUTHEAST)) {
direction = DIRECTION_EAST;
} else if (n && canWalkTo(creaturePos, DIRECTION_NORTHWEST)) {
direction = DIRECTION_NORTH;
}
return true;
}
}
//Now let's decide where the player is located to the monster (what direction) so we can decide where to escape.
if (dy > dx) {
Direction playerDir = offsety < 0 ? DIRECTION_SOUTH : DIRECTION_NORTH;
switch (playerDir) {
case DIRECTION_NORTH: {
// Player is to the NORTH, so obviously we need to check if we can go SOUTH, if not then let's choose WEST or EAST and again if we can't we need to decide about some diagonal movements.
if (canWalkTo(creaturePos, DIRECTION_SOUTH)) {
direction = DIRECTION_SOUTH;
return true;
}
bool w = canWalkTo(creaturePos, DIRECTION_WEST);
bool e = canWalkTo(creaturePos, DIRECTION_EAST);
if (w && e && offsetx == 0) {
direction = boolean_random() ? DIRECTION_WEST : DIRECTION_EAST;
return true;
} else if (w && offsetx <= 0) {
direction = DIRECTION_WEST;
return true;
} else if (e && offsetx >= 0) {
direction = DIRECTION_EAST;
return true;
}
/* fleeing */
if (flee) {
if (w && e) {
direction = boolean_random() ? DIRECTION_WEST : DIRECTION_EAST;
return true;
} else if (w) {
direction = DIRECTION_WEST;
return true;
} else if (e) {
direction = DIRECTION_EAST;
return true;
}
}
/* end of fleeing */
bool sw = canWalkTo(creaturePos, DIRECTION_SOUTHWEST);
bool se = canWalkTo(creaturePos, DIRECTION_SOUTHEAST);
if (sw || se) {
// we can move both dirs
if (sw && se) {
direction = boolean_random() ? DIRECTION_SOUTHWEST : DIRECTION_SOUTHEAST;
} else if (w) {
direction = DIRECTION_WEST;
} else if (sw) {
direction = DIRECTION_SOUTHWEST;
} else if (e) {
direction = DIRECTION_EAST;
} else if (se) {
direction = DIRECTION_SOUTHEAST;
}
return true;
}
/* fleeing */
if (flee && canWalkTo(creaturePos, DIRECTION_NORTH)) {
// towards player, yea
direction = DIRECTION_NORTH;
return true;
}
/* end of fleeing */
break;
}
case DIRECTION_SOUTH: {
if (canWalkTo(creaturePos, DIRECTION_NORTH)) {
direction = DIRECTION_NORTH;
return true;
}
bool w = canWalkTo(creaturePos, DIRECTION_WEST);
bool e = canWalkTo(creaturePos, DIRECTION_EAST);
if (w && e && offsetx == 0) {
direction = boolean_random() ? DIRECTION_WEST : DIRECTION_EAST;
return true;
} else if (w && offsetx <= 0) {
direction = DIRECTION_WEST;
return true;
} else if (e && offsetx >= 0) {
direction = DIRECTION_EAST;
return true;
}
/* fleeing */
if (flee) {
if (w && e) {
direction = boolean_random() ? DIRECTION_WEST : DIRECTION_EAST;
return true;
} else if (w) {
direction = DIRECTION_WEST;
return true;
} else if (e) {
direction = DIRECTION_EAST;
return true;
}
}
/* end of fleeing */
bool nw = canWalkTo(creaturePos, DIRECTION_NORTHWEST);
bool ne = canWalkTo(creaturePos, DIRECTION_NORTHEAST);
if (nw || ne) {
// we can move both dirs
if (nw && ne) {
direction = boolean_random() ? DIRECTION_NORTHWEST : DIRECTION_NORTHEAST;
} else if (w) {
direction = DIRECTION_WEST;
} else if (nw) {
direction = DIRECTION_NORTHWEST;
} else if (e) {
direction = DIRECTION_EAST;
} else if (ne) {
direction = DIRECTION_NORTHEAST;
}
return true;
}
/* fleeing */
if (flee && canWalkTo(creaturePos, DIRECTION_SOUTH)) {
// towards player, yea
direction = DIRECTION_SOUTH;
return true;
}
/* end of fleeing */
break;
}
default:
break;
}
} else {
Direction playerDir = offsetx < 0 ? DIRECTION_EAST : DIRECTION_WEST;
switch (playerDir) {
case DIRECTION_WEST: {
if (canWalkTo(creaturePos, DIRECTION_EAST)) {
direction = DIRECTION_EAST;
return true;
}
bool n = canWalkTo(creaturePos, DIRECTION_NORTH);
bool s = canWalkTo(creaturePos, DIRECTION_SOUTH);
if (n && s && offsety == 0) {
direction = boolean_random() ? DIRECTION_NORTH : DIRECTION_SOUTH;
return true;
} else if (n && offsety <= 0) {
direction = DIRECTION_NORTH;
return true;
} else if (s && offsety >= 0) {
direction = DIRECTION_SOUTH;
return true;
}
/* fleeing */
if (flee) {
if (n && s) {
direction = boolean_random() ? DIRECTION_NORTH : DIRECTION_SOUTH;
return true;
} else if (n) {
direction = DIRECTION_NORTH;
return true;
} else if (s) {
direction = DIRECTION_SOUTH;
return true;
}
}
/* end of fleeing */
bool se = canWalkTo(creaturePos, DIRECTION_SOUTHEAST);
bool ne = canWalkTo(creaturePos, DIRECTION_NORTHEAST);
if (se || ne) {
if (se && ne) {
direction = boolean_random() ? DIRECTION_SOUTHEAST : DIRECTION_NORTHEAST;
} else if (s) {
direction = DIRECTION_SOUTH;
} else if (se) {
direction = DIRECTION_SOUTHEAST;
} else if (n) {
direction = DIRECTION_NORTH;
} else if (ne) {
direction = DIRECTION_NORTHEAST;
}
return true;
}
/* fleeing */
if (flee && canWalkTo(creaturePos, DIRECTION_WEST)) {
// towards player, yea
direction = DIRECTION_WEST;
return true;
}
/* end of fleeing */
break;
}
case DIRECTION_EAST: {
if (canWalkTo(creaturePos, DIRECTION_WEST)) {
direction = DIRECTION_WEST;
return true;
}
bool n = canWalkTo(creaturePos, DIRECTION_NORTH);
bool s = canWalkTo(creaturePos, DIRECTION_SOUTH);
if (n && s && offsety == 0) {
direction = boolean_random() ? DIRECTION_NORTH : DIRECTION_SOUTH;
return true;
} else if (n && offsety <= 0) {
direction = DIRECTION_NORTH;
return true;
} else if (s && offsety >= 0) {
direction = DIRECTION_SOUTH;
return true;
}
/* fleeing */
if (flee) {
if (n && s) {
direction = boolean_random() ? DIRECTION_NORTH : DIRECTION_SOUTH;
return true;
} else if (n) {
direction = DIRECTION_NORTH;
return true;
} else if (s) {
direction = DIRECTION_SOUTH;
return true;
}
}
/* end of fleeing */
bool nw = canWalkTo(creaturePos, DIRECTION_NORTHWEST);
bool sw = canWalkTo(creaturePos, DIRECTION_SOUTHWEST);
if (nw || sw) {
if (nw && sw) {
direction = boolean_random() ? DIRECTION_NORTHWEST : DIRECTION_SOUTHWEST;
} else if (n) {
direction = DIRECTION_NORTH;
} else if (nw) {
direction = DIRECTION_NORTHWEST;
} else if (s) {
direction = DIRECTION_SOUTH;
} else if (sw) {
direction = DIRECTION_SOUTHWEST;
}
return true;
}
/* fleeing */
if (flee && canWalkTo(creaturePos, DIRECTION_EAST)) {
// towards player, yea
direction = DIRECTION_EAST;
return true;
}
/* end of fleeing */
break;
}
default:
break;
}
}
return true;
}
bool Monster::canWalkTo(Position pos, Direction direction) const
{
pos = getNextPosition(direction, pos);
if (isInSpawnRange(pos)) {
if (getWalkCache(pos) == 0) {
return false;
}
Tile* tile = g_game.map.getTile(pos);
if (tile && tile->getTopVisibleCreature(this) == nullptr && tile->queryAdd(0, *this, 1, FLAG_PATHFINDING) == RETURNVALUE_NOERROR) {
return true;
}
}
return false;
}
void Monster::death(Creature*)
{
setAttackedCreature(nullptr);
for (Creature* summon : summons) {
summon->changeHealth(-summon->getHealth());
summon->setMaster(nullptr);
summon->decrementReferenceCounter();
}
summons.clear();
clearTargetList();
clearFriendList();
onIdleStatus();
}
Item* Monster::getCorpse(Creature* lastHitCreature, Creature* mostDamageCreature)
{
Item* corpse = Creature::getCorpse(lastHitCreature, mostDamageCreature);
if (corpse) {
if (mostDamageCreature) {
if (mostDamageCreature->getPlayer()) {
corpse->setCorpseOwner(mostDamageCreature->getID());
} else {
const Creature* mostDamageCreatureMaster = mostDamageCreature->getMaster();
if (mostDamageCreatureMaster && mostDamageCreatureMaster->getPlayer()) {
corpse->setCorpseOwner(mostDamageCreatureMaster->getID());
}
}
}
}
return corpse;
}
bool Monster::isInSpawnRange(const Position& pos) const
{
if (!spawn) {
return true;
}
if (Monster::despawnRadius == 0) {
return true;
}
if (!Spawns::isInZone(masterPos, Monster::despawnRadius, pos)) {
return false;
}
if (Monster::despawnRange == 0) {
return true;
}
if (Position::getDistanceZ(pos, masterPos) > Monster::despawnRange) {
return false;
}
return true;
}
bool Monster::getCombatValues(int32_t& min, int32_t& max)
{
if (minCombatValue == 0 && maxCombatValue == 0) {
return false;
}
min = minCombatValue;
max = maxCombatValue;
return true;
}
void Monster::updateLookDirection()
{
Direction newDir = getDirection();
if (attackedCreature) {
const Position& pos = getPosition();
const Position& attackedCreaturePos = attackedCreature->getPosition();
int_fast32_t offsetx = Position::getOffsetX(attackedCreaturePos, pos);
int_fast32_t offsety = Position::getOffsetY(attackedCreaturePos, pos);
int32_t dx = std::abs(offsetx);
int32_t dy = std::abs(offsety);
if (dx > dy) {
//look EAST/WEST
if (offsetx < 0) {
newDir = DIRECTION_WEST;
} else {
newDir = DIRECTION_EAST;
}
} else if (dx < dy) {
//look NORTH/SOUTH
if (offsety < 0) {
newDir = DIRECTION_NORTH;
} else {
newDir = DIRECTION_SOUTH;
}
} else {
Direction dir = getDirection();
if (offsetx < 0 && offsety < 0) {
if (dir == DIRECTION_SOUTH) {
newDir = DIRECTION_WEST;
} else if (dir == DIRECTION_EAST) {
newDir = DIRECTION_NORTH;
}
} else if (offsetx < 0 && offsety > 0) {
if (dir == DIRECTION_NORTH) {
newDir = DIRECTION_WEST;
} else if (dir == DIRECTION_EAST) {
newDir = DIRECTION_SOUTH;
}
} else if (offsetx > 0 && offsety < 0) {
if (dir == DIRECTION_SOUTH) {
newDir = DIRECTION_EAST;
} else if (dir == DIRECTION_WEST) {
newDir = DIRECTION_NORTH;
}
} else {
if (dir == DIRECTION_NORTH) {
newDir = DIRECTION_EAST;
} else if (dir == DIRECTION_WEST) {
newDir = DIRECTION_SOUTH;
}
}
}
}
g_game.internalCreatureTurn(this, newDir);
}
void Monster::dropLoot(Container* corpse, Creature*)
{
if (corpse && lootDrop) {
mType->createLoot(corpse);
}
}
void Monster::setNormalCreatureLight()
{
internalLight = mType->info.light;
}
void Monster::drainHealth(Creature* attacker, int32_t damage)
{
Creature::drainHealth(attacker, damage);
if (isInvisible()) {
removeCondition(CONDITION_INVISIBLE);
}
}
void Monster::changeHealth(int32_t healthChange, bool sendHealthChange/* = true*/)
{
//In case a player with ignore flag set attacks the monster
setIdle(false);
Creature::changeHealth(healthChange, sendHealthChange);
}
bool Monster::challengeCreature(Creature* creature)
{
if (isSummon()) {
return false;
}
bool result = selectTarget(creature);
if (result) {
targetChangeCooldown = 8000;
targetChangeTicks = 0;
}
return result;
}
bool Monster::convinceCreature(Creature* creature)
{
Player* player = creature->getPlayer();
if (player && !player->hasFlag(PlayerFlag_CanConvinceAll)) {
if (!mType->info.isConvinceable) {
return false;
}
}
if (isSummon()) {
if (getMaster()->getPlayer()) {
return false;
} else if (getMaster() == creature) {
return false;
}
Creature* oldMaster = getMaster();
oldMaster->removeSummon(this);
}
creature->addSummon(this);
setFollowCreature(nullptr);
setAttackedCreature(nullptr);
//destroy summons
for (Creature* summon : summons) {
summon->changeHealth(-summon->getHealth());
summon->setMaster(nullptr);
summon->decrementReferenceCounter();
}
summons.clear();
isMasterInRange = true;
updateTargetList();
updateIdleStatus();
//Notify surrounding about the change
SpectatorHashSet spectators;
g_game.map.getSpectators(spectators, getPosition(), true);
g_game.map.getSpectators(spectators, creature->getPosition(), true);
for (Creature* spectator : spectators) {
spectator->onCreatureConvinced(creature, this);
}
if (spawn) {
spawn->removeMonster(this);
spawn = nullptr;
}
return true;
}
void Monster::onCreatureConvinced(const Creature* convincer, const Creature* creature)
{
if (convincer != this && (isFriend(creature) || isOpponent(creature))) {
updateTargetList();
updateIdleStatus();
}
}
void Monster::getPathSearchParams(const Creature* creature, FindPathParams& fpp) const
{
Creature::getPathSearchParams(creature, fpp);
fpp.minTargetDist = 1;
fpp.maxTargetDist = mType->info.targetDistance;
if (isSummon()) {
if (getMaster() == creature) {
fpp.maxTargetDist = 2;
fpp.fullPathSearch = true;
} else if (mType->info.targetDistance <= 1) {
fpp.fullPathSearch = true;
} else {
fpp.fullPathSearch = !canUseAttack(getPosition(), creature);
}
} else if (isFleeing()) {
//Distance should be higher than the client view range (Map::maxClientViewportX/Map::maxClientViewportY)
fpp.maxTargetDist = Map::maxViewportX;
fpp.clearSight = false;
fpp.keepDistance = true;
fpp.fullPathSearch = false;
} else if (mType->info.targetDistance <= 1) {
fpp.fullPathSearch = true;
} else {
fpp.fullPathSearch = !canUseAttack(getPosition(), creature);
}
}
| 1 | 14,260 |
You're already setting drop loot to false just above.
|
otland-forgottenserver
|
cpp
|
@@ -20,6 +20,7 @@ type Management struct {
listenerShutdownStarted chan bool
listenerShutdownWaiter sync.WaitGroup
+ once sync.Once
}
type ManagementMiddleware interface {
| 1 |
package openvpn
import (
"bufio"
"net"
"net/textproto"
"sync"
"time"
log "github.com/cihub/seelog"
)
// https://openvpn.net/index.php/open-source/documentation/miscellaneous/79-management-interface.html
type Management struct {
socketAddress string
logPrefix string
lineReceiver chan string
middlewares []ManagementMiddleware
listenerShutdownStarted chan bool
listenerShutdownWaiter sync.WaitGroup
}
type ManagementMiddleware interface {
Start(connection net.Conn) error
Stop() error
ConsumeLine(line string) (consumed bool, err error)
}
func NewManagement(socketAddress, logPrefix string, middlewares ...ManagementMiddleware) *Management {
return &Management{
socketAddress: socketAddress,
logPrefix: logPrefix,
lineReceiver: make(chan string),
middlewares: middlewares,
listenerShutdownStarted: make(chan bool),
listenerShutdownWaiter: sync.WaitGroup{},
}
}
func (management *Management) Start() error {
log.Info(management.logPrefix, "Connecting to socket:", management.socketAddress)
listener, err := net.Listen("unix", management.socketAddress)
if err != nil {
log.Error(management.logPrefix, err)
return err
}
go management.waitForShutdown(listener)
go management.deliverLines()
go management.waitForConnections(listener)
return nil
}
func (management *Management) Stop() {
log.Info(management.logPrefix, "Shutdown")
close(management.listenerShutdownStarted)
management.listenerShutdownWaiter.Wait()
log.Info(management.logPrefix, "Shutdown finished")
}
func (management *Management) waitForShutdown(listener net.Listener) {
<-management.listenerShutdownStarted
for _, middleware := range management.middlewares {
middleware.Stop()
}
listener.Close()
}
func (management *Management) waitForConnections(listener net.Listener) {
management.listenerShutdownWaiter.Add(1)
defer management.listenerShutdownWaiter.Done()
for {
connection, err := listener.Accept()
if err != nil {
select {
case <-management.listenerShutdownStarted:
log.Info(management.logPrefix, "Connection closed")
default:
log.Critical(management.logPrefix, "Connection accept error: ", err)
}
return
}
go management.serveNewConnection(connection)
}
}
func (management *Management) serveNewConnection(connection net.Conn) {
log.Info(management.logPrefix, "New connection started")
for _, middleware := range management.middlewares {
middleware.Start(connection)
}
reader := textproto.NewReader(bufio.NewReader(connection))
for {
line, err := reader.ReadLine()
if err != nil {
log.Warn(management.logPrefix, "Connection failed to read. ", err)
return
}
log.Debug(management.logPrefix, "Line received: ", line)
// Try to deliver the message
select {
case management.lineReceiver <- line:
case <-time.After(time.Second):
log.Error(management.logPrefix, "Failed to transport line: ", line)
}
}
}
func (management *Management) deliverLines() {
for {
line := <-management.lineReceiver
log.Trace(management.logPrefix, "Line delivering: ", line)
lineConsumed := false
for _, middleware := range management.middlewares {
consumed, err := middleware.ConsumeLine(line)
if err != nil {
log.Error(management.logPrefix, "Failed to deliver line: ", line, ". ", err)
}
lineConsumed = lineConsumed || consumed
}
if !lineConsumed {
log.Trace(management.logPrefix, "Line not delivered: ", line)
}
}
}
| 1 | 10,465 |
`once` is very generic and doesn't specify it's purpose - `closeOnce`?
|
mysteriumnetwork-node
|
go
|
@@ -79,7 +79,7 @@ public class HiveTableBaseTest extends HiveMetastoreTest {
return getTableLocationPath(tableName).toString();
}
- private static String metadataLocation(String tableName) {
+ protected static String metadataLocation(String tableName) {
return Paths.get(getTableBasePath(tableName), "metadata").toString();
}
| 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import java.io.File;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.TableMetadataParser;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.types.Types;
import org.junit.After;
import org.junit.Before;
import static org.apache.iceberg.PartitionSpec.builderFor;
import static org.apache.iceberg.TableMetadataParser.getFileExtension;
import static org.apache.iceberg.types.Types.NestedField.optional;
import static org.apache.iceberg.types.Types.NestedField.required;
public class HiveTableBaseTest extends HiveMetastoreTest {
static final String TABLE_NAME = "tbl";
static final TableIdentifier TABLE_IDENTIFIER = TableIdentifier.of(DB_NAME, TABLE_NAME);
static final Schema schema = new Schema(Types.StructType.of(
required(1, "id", Types.LongType.get())).fields());
static final Schema altered = new Schema(Types.StructType.of(
required(1, "id", Types.LongType.get()),
optional(2, "data", Types.LongType.get())).fields());
private static final PartitionSpec partitionSpec = builderFor(schema).identity("id").build();
private Path tableLocation;
@Before
public void createTestTable() {
this.tableLocation = new Path(catalog.createTable(TABLE_IDENTIFIER, schema, partitionSpec).location());
}
@After
public void dropTestTable() throws Exception {
// drop the table data
tableLocation.getFileSystem(hiveConf).delete(tableLocation, true);
catalog.dropTable(TABLE_IDENTIFIER, false /* metadata only, location was already deleted */);
}
private static String getTableBasePath(String tableName) {
String databasePath = metastore.getDatabasePath(DB_NAME);
return Paths.get(databasePath, tableName).toAbsolutePath().toString();
}
protected static Path getTableLocationPath(String tableName) {
return new Path("file", null, Paths.get(getTableBasePath(tableName)).toString());
}
protected static String getTableLocation(String tableName) {
return getTableLocationPath(tableName).toString();
}
private static String metadataLocation(String tableName) {
return Paths.get(getTableBasePath(tableName), "metadata").toString();
}
private static List<String> metadataFiles(String tableName) {
return Arrays.stream(new File(metadataLocation(tableName)).listFiles())
.map(File::getAbsolutePath)
.collect(Collectors.toList());
}
protected static List<String> metadataVersionFiles(String tableName) {
return filterByExtension(tableName, getFileExtension(TableMetadataParser.Codec.NONE));
}
protected static List<String> manifestFiles(String tableName) {
return filterByExtension(tableName, ".avro");
}
private static List<String> filterByExtension(String tableName, String extension) {
return metadataFiles(tableName)
.stream()
.filter(f -> f.endsWith(extension))
.collect(Collectors.toList());
}
}
| 1 | 44,618 |
Why did this need to change from `private`? Oh for the test below. And this is only a test class too.
|
apache-iceberg
|
java
|
@@ -0,0 +1,7 @@
+require 'test_helper'
+
+class NotificationAcknowledgementTest < ActiveSupport::TestCase
+ # test "the truth" do
+ # assert true
+ # end
+end
| 1 | 1 | 17,616 |
Should test the `has?` method here
|
DMPRoadmap-roadmap
|
rb
|
|
@@ -78,7 +78,18 @@ final class SavedSearchesTest extends \VuFindTest\Integration\MinkTestCase
public function testSaveSearch()
{
$page = $this->performSearch('test');
- $this->clickCss($page, '.fa.fa-save');
+ $links = $page->findAll('css', '.searchtools a');
+ $saveLink = null;
+ foreach ($links as $link) {
+ if ($this->checkVisibility($link)
+ && str_contains($link->getHtml(), 'Save Search')
+ ) {
+ $saveLink = $link;
+ break;
+ }
+ }
+ $this->assertNotNull($link);
+ $link->click();
$this->snooze();
$this->clickCss($page, '.createAccountLink');
$this->snooze();
| 1 |
<?php
/**
* Mink saved searches test class.
*
* PHP version 7
*
* Copyright (C) Villanova University 2011.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Tests
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Page
*/
namespace VuFindTest\Mink;
/**
* Mink saved searches test class.
*
* Class must be final due to use of "new static()" by LiveDatabaseTrait.
*
* @category VuFind
* @package Tests
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Page
* @retry 4
*/
final class SavedSearchesTest extends \VuFindTest\Integration\MinkTestCase
{
use \VuFindTest\Feature\LiveDatabaseTrait;
use \VuFindTest\Feature\UserCreationTrait;
/**
* Standard setup method.
*
* @return void
*/
public static function setUpBeforeClass(): void
{
static::failIfUsersExist();
}
/**
* Standard setup method.
*
* @return void
*/
public function setUp(): void
{
// Give up if we're not running in CI:
if (!$this->continuousIntegrationRunning()) {
$this->markTestSkipped('Continuous integration not running.');
return;
}
}
/**
* Test saving and clearing a search.
*
* @retryCallback tearDownAfterClass
*
* @return void
*/
public function testSaveSearch()
{
$page = $this->performSearch('test');
$this->clickCss($page, '.fa.fa-save');
$this->snooze();
$this->clickCss($page, '.createAccountLink');
$this->snooze();
$this->fillInAccountForm($page);
$this->clickCss($page, 'input.btn.btn-primary');
$this->snooze();
$this->assertEquals(
'Search saved successfully.',
$this->findCss($page, '.alert.alert-success')->getText()
);
}
/**
* Test search history.
*
* @depends testSaveSearch
*
* @return void
*/
public function testSearchHistory()
{
// Use "foo \ bar" as our search because the backslash has been known
// to cause problems in some situations (e.g. PostgreSQL database with
// incorrect escaping); this allows us to catch regressions for a few
// different problems in a single test.
$page = $this->performSearch('foo \ bar');
$this->findAndAssertLink($page, 'Search History')->click();
$this->snooze();
// We should see our "foo \ bar" search in the history, but no saved
// searches because we are logged out:
$this->assertEquals(
'foo \ bar', $this->findAndAssertLink($page, 'foo \ bar')->getText()
);
$this->assertFalse(
$this->hasElementsMatchingText($page, 'h2', 'Saved Searches')
);
$this->assertNull($page->findLink('test'));
// Now log in and see if our saved search shows up (without making the
// unsaved search go away):
$this->clickCss($page, '#loginOptions a');
$this->snooze();
$this->fillInLoginForm($page, 'username1', 'test');
$this->submitLoginForm($page);
$this->assertEquals(
'foo \ bar', $this->findAndAssertLink($page, 'foo \ bar')->getText()
);
$this->assertTrue(
$this->hasElementsMatchingText($page, 'h2', 'Saved Searches')
);
$this->assertEquals(
'test', $this->findAndAssertLink($page, 'test')->getText()
);
// Now purge unsaved searches, confirm that unsaved search is gone
// but saved search is still present:
$this->findAndAssertLink($page, 'Purge unsaved searches')->click();
$this->snooze();
$this->assertNull($page->findLink('foo \ bar'));
$this->assertEquals(
'test', $this->findAndAssertLink($page, 'test')->getText()
);
}
/**
* Test that user A cannot delete user B's favorites.
*
* @depends testSaveSearch
* @retryCallback removeUsername2
*
* @return void
*/
public function testSavedSearchSecurity()
{
// Log in as user A and get the ID of their saved search:
$session = $this->getMinkSession();
$session->visit($this->getVuFindUrl() . '/Search/History');
$page = $session->getPage();
$this->clickCss($page, '#loginOptions a');
$this->snooze();
$this->fillInLoginForm($page, 'username1', 'test');
$this->submitLoginForm($page);
$delete = $this->findAndAssertLink($page, 'Delete')->getAttribute('href');
$this->findAndAssertLink($page, 'Log Out')->click();
$this->snooze();
// Use user A's delete link, but try to execute it as user B:
[$base, $params] = explode('?', $delete);
$session->visit($this->getVuFindUrl() . '/MyResearch/SaveSearch?' . $params);
$page = $session->getPage();
$this->clickCss($page, '.createAccountLink');
$this->snooze();
$this->fillInAccountForm(
$page, ['username' => 'username2', 'email' => '[email protected]']
);
$this->clickCss($page, 'input.btn.btn-primary');
$this->snooze();
$this->findAndAssertLink($page, 'Log Out')->click();
$this->snooze();
// Go back in as user A -- see if the saved search still exists.
$this->findAndAssertLink($page, 'Search History')->click();
$this->snooze();
$this->clickCss($page, '#loginOptions a');
$this->snooze();
$this->fillInLoginForm($page, 'username1', 'test');
$this->submitLoginForm($page);
$this->assertTrue(
$this->hasElementsMatchingText($page, 'h2', 'Saved Searches')
);
$this->assertEquals(
'test', $this->findAndAssertLink($page, 'test')->getText()
);
}
/**
* Test that notification settings work correctly.
*
* @depends testSaveSearch
*
* @return void
*/
public function testNotificationSettings()
{
// Add a search to history...
$page = $this->performSearch('journal');
// Now log in and go to search history...
$this->clickCss($page, '#loginOptions a');
$this->snooze();
$this->fillInLoginForm($page, 'username1', 'test');
$this->submitLoginForm($page);
$this->findAndAssertLink($page, 'Search History')->click();
$this->snooze();
// By default, there should be no alert option at all:
$scheduleSelector = 'select[name="schedule"]';
$this->assertNull($page->find('css', $scheduleSelector));
// Now reconfigure to allow alerts, and refresh the page:
$this->changeConfigs(
[
'config' => ['Account' => ['schedule_searches' => true]]
]
);
$session = $this->getMinkSession();
$session->reload();
$this->snooze();
$page = $session->getPage();
// Now there should be two alert options visible (one in saved, one in
// unsaved):
$this->assertEquals(2, count($page->findAll('css', $scheduleSelector)));
$this->assertEquals(
1, count($page->findAll('css', '#recent-searches ' . $scheduleSelector))
);
$this->assertEquals(
1, count($page->findAll('css', '#saved-searches ' . $scheduleSelector))
);
// At this point, our journals search should be in the unsaved list; let's
// set it up for alerts and confirm that this auto-saves it.
$select = $this->findCss($page, '#recent-searches ' . $scheduleSelector);
$select->selectOption(7);
$this->snooze();
$this->assertEquals(
2, count($page->findAll('css', '#saved-searches ' . $scheduleSelector))
);
// Now let's delete the saved search and confirm that this clears the
// alert subscription.
$this->findAndAssertLink($page, 'Delete')->click();
$this->snooze();
$select = $this->findCss($page, '#recent-searches ' . $scheduleSelector);
$this->assertEquals(0, $select->getValue());
}
/**
* Retry cleanup method in case of failure during testSavedSearchSecurity.
*
* @return void
*/
protected function removeUsername2()
{
static::removeUsers(['username2']);
}
/**
* Standard teardown method.
*
* @return void
*/
public static function tearDownAfterClass(): void
{
static::removeUsers(['username1', 'username2']);
}
}
| 1 | 31,917 |
I think you want to be asserting/clicking $saveLink, not $link.
|
vufind-org-vufind
|
php
|
@@ -95,6 +95,7 @@ def sensor(
def asset_sensor(
asset_key: AssetKey,
pipeline_name: Optional[str] = None,
+ target_name: Optional[str] = None,
name: Optional[str] = None,
solid_selection: Optional[List[str]] = None,
mode: Optional[str] = None,
| 1 |
import inspect
from functools import update_wrapper
from typing import TYPE_CHECKING, Callable, Generator, List, Optional, Sequence, Union
from dagster import check
from dagster.core.definitions.sensor import RunRequest, SensorDefinition, SkipReason
from dagster.core.errors import DagsterInvariantViolationError
from ...errors import DagsterInvariantViolationError
from ..events import AssetKey
from ..graph import GraphDefinition
from ..job import JobDefinition
from ..sensor import AssetSensorDefinition, RunRequest, SensorDefinition, SkipReason
if TYPE_CHECKING:
from ..sensor import SensorEvaluationContext
from ...events.log import EventLogEntry
def sensor(
pipeline_name: Optional[str] = None,
name: Optional[str] = None,
solid_selection: Optional[List[str]] = None,
mode: Optional[str] = None,
minimum_interval_seconds: Optional[int] = None,
description: Optional[str] = None,
job: Optional[Union[GraphDefinition, JobDefinition]] = None,
jobs: Optional[Sequence[Union[GraphDefinition, JobDefinition]]] = None,
) -> Callable[
[
Callable[
["SensorEvaluationContext"],
Union[Generator[Union[RunRequest, SkipReason], None, None], RunRequest, SkipReason],
]
],
SensorDefinition,
]:
"""
Creates a sensor where the decorated function is used as the sensor's evaluation function. The
decorated function may:
1. Return a `RunRequest` object.
2. Yield multiple of `RunRequest` objects.
3. Return or yield a `SkipReason` object, providing a descriptive message of why no runs were
requested.
4. Return or yield nothing (skipping without providing a reason)
Takes a :py:class:`~dagster.SensorEvaluationContext`.
Args:
pipeline_name (str): Name of the target pipeline
name (Optional[str]): The name of the sensor. Defaults to the name of the decorated
function.
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute for runs for this sensor e.g.
``['*some_solid+', 'other_solid']``
mode (Optional[str]): The mode to apply when executing runs for this sensor.
(default: 'default')
minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse
between sensor evaluations.
description (Optional[str]): A human-readable description of the sensor.
job (Optional[Union[GraphDefinition, JobDefinition]]): Experimental
jobs (Optional[Sequence[Union[GraphDefinition, JobDefinition]]]): Experimental
"""
check.opt_str_param(name, "name")
def inner(
fn: Callable[
["SensorEvaluationContext"],
Union[Generator[Union[SkipReason, RunRequest], None, None], SkipReason, RunRequest],
]
) -> SensorDefinition:
check.callable_param(fn, "fn")
sensor_name = name or fn.__name__
sensor_def = SensorDefinition(
name=sensor_name,
pipeline_name=pipeline_name,
evaluation_fn=fn,
solid_selection=solid_selection,
mode=mode,
minimum_interval_seconds=minimum_interval_seconds,
description=description,
job=job,
jobs=jobs,
)
update_wrapper(sensor_def, wrapped=fn)
return sensor_def
return inner
def asset_sensor(
asset_key: AssetKey,
pipeline_name: Optional[str] = None,
name: Optional[str] = None,
solid_selection: Optional[List[str]] = None,
mode: Optional[str] = None,
minimum_interval_seconds: Optional[int] = None,
description: Optional[str] = None,
job: Optional[Union[GraphDefinition, JobDefinition]] = None,
) -> Callable[
[
Callable[
[
"SensorEvaluationContext",
"EventLogEntry",
],
Union[Generator[Union[RunRequest, SkipReason], None, None], RunRequest, SkipReason],
]
],
AssetSensorDefinition,
]:
"""
Creates an asset sensor where the decorated function is used as the asset sensor's evaluation
function. The decorated function may:
1. Return a `RunRequest` object.
2. Yield multiple of `RunRequest` objects.
3. Return or yield a `SkipReason` object, providing a descriptive message of why no runs were
requested.
4. Return or yield nothing (skipping without providing a reason)
Takes a :py:class:`~dagster.SensorEvaluationContext` and an EventLogEntry corresponding to an
AssetMaterialization event.
Args:
asset_key (AssetKey): The asset_key this sensor monitors.
pipeline_name (Optional[str]): Name of the target pipeline
name (Optional[str]): The name of the sensor. Defaults to the name of the decorated
function.
solid_selection (Optional[List[str]]): A list of solid subselection (including single
solid names) to execute for runs for this sensor e.g.
``['*some_solid+', 'other_solid']``
mode (Optional[str]): The mode to apply when executing runs for this sensor.
(default: 'default')
minimum_interval_seconds (Optional[int]): The minimum number of seconds that will elapse
between sensor evaluations.
description (Optional[str]): A human-readable description of the sensor.
"""
check.opt_str_param(name, "name")
def inner(
fn: Callable[
[
"SensorEvaluationContext",
"EventLogEntry",
],
Union[Generator[Union[SkipReason, RunRequest], None, None], SkipReason, RunRequest],
]
) -> AssetSensorDefinition:
check.callable_param(fn, "fn")
sensor_name = name or fn.__name__
def _wrapped_fn(context, event):
result = fn(context, event)
if inspect.isgenerator(result):
for item in result:
yield item
elif isinstance(result, (RunRequest, SkipReason)):
yield result
elif result is not None:
raise DagsterInvariantViolationError(
(
"Error in sensor {sensor_name}: Sensor unexpectedly returned output "
"{result} of type {type_}. Should only return SkipReason or "
"RunRequest objects."
).format(sensor_name=sensor_name, result=result, type_=type(result))
)
return AssetSensorDefinition(
name=sensor_name,
asset_key=asset_key,
pipeline_name=pipeline_name,
asset_materialization_fn=_wrapped_fn,
solid_selection=solid_selection,
mode=mode,
minimum_interval_seconds=minimum_interval_seconds,
description=description,
job=job,
)
return inner
| 1 | 15,802 |
We don't have `target_name` on `@sensor` - why introduce it for `@asset_sensor`? If we do want to introduce it everywhere, can we just call it `job_name` to avoid introducing a new term?
|
dagster-io-dagster
|
py
|
@@ -0,0 +1,9 @@
+using System.Collections.Generic;
+
+namespace Microsoft.DotNet.Build.Tasks.Feed
+{
+ public class SleetSettings
+ {
+ public List<SleetSource> Sources { get; set; }
+ }
+}
| 1 | 1 | 13,794 |
Missing copyright header (+other new files).
|
dotnet-buildtools
|
.cs
|
|
@@ -104,12 +104,7 @@ class TypeToSparkType extends TypeUtil.SchemaVisitor<DataType> {
throw new UnsupportedOperationException(
"Spark does not support time fields");
case TIMESTAMP:
- Types.TimestampType timestamp = (Types.TimestampType) primitive;
- if (timestamp.shouldAdjustToUTC()) {
- return TimestampType$.MODULE$;
- }
- throw new UnsupportedOperationException(
- "Spark does not support timestamp without time zone fields");
+ return TimestampType$.MODULE$;
case STRING:
return StringType$.MODULE$;
case UUID:
| 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark;
import java.util.List;
import org.apache.iceberg.Schema;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types;
import org.apache.spark.sql.types.ArrayType$;
import org.apache.spark.sql.types.BinaryType$;
import org.apache.spark.sql.types.BooleanType$;
import org.apache.spark.sql.types.DataType;
import org.apache.spark.sql.types.DateType$;
import org.apache.spark.sql.types.DecimalType$;
import org.apache.spark.sql.types.DoubleType$;
import org.apache.spark.sql.types.FloatType$;
import org.apache.spark.sql.types.IntegerType$;
import org.apache.spark.sql.types.LongType$;
import org.apache.spark.sql.types.MapType$;
import org.apache.spark.sql.types.Metadata;
import org.apache.spark.sql.types.StringType$;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType$;
import org.apache.spark.sql.types.TimestampType$;
class TypeToSparkType extends TypeUtil.SchemaVisitor<DataType> {
TypeToSparkType() {
}
@Override
public DataType schema(Schema schema, DataType structType) {
return structType;
}
@Override
public DataType struct(Types.StructType struct, List<DataType> fieldResults) {
List<Types.NestedField> fields = struct.fields();
List<StructField> sparkFields = Lists.newArrayListWithExpectedSize(fieldResults.size());
for (int i = 0; i < fields.size(); i += 1) {
Types.NestedField field = fields.get(i);
DataType type = fieldResults.get(i);
StructField sparkField = StructField.apply(
field.name(), type, field.isOptional(), Metadata.empty());
if (field.doc() != null) {
sparkField = sparkField.withComment(field.doc());
}
sparkFields.add(sparkField);
}
return StructType$.MODULE$.apply(sparkFields);
}
@Override
public DataType field(Types.NestedField field, DataType fieldResult) {
return fieldResult;
}
@Override
public DataType list(Types.ListType list, DataType elementResult) {
return ArrayType$.MODULE$.apply(elementResult, list.isElementOptional());
}
@Override
public DataType map(Types.MapType map, DataType keyResult, DataType valueResult) {
return MapType$.MODULE$.apply(keyResult, valueResult, map.isValueOptional());
}
@Override
public DataType primitive(Type.PrimitiveType primitive) {
switch (primitive.typeId()) {
case BOOLEAN:
return BooleanType$.MODULE$;
case INTEGER:
return IntegerType$.MODULE$;
case LONG:
return LongType$.MODULE$;
case FLOAT:
return FloatType$.MODULE$;
case DOUBLE:
return DoubleType$.MODULE$;
case DATE:
return DateType$.MODULE$;
case TIME:
throw new UnsupportedOperationException(
"Spark does not support time fields");
case TIMESTAMP:
Types.TimestampType timestamp = (Types.TimestampType) primitive;
if (timestamp.shouldAdjustToUTC()) {
return TimestampType$.MODULE$;
}
throw new UnsupportedOperationException(
"Spark does not support timestamp without time zone fields");
case STRING:
return StringType$.MODULE$;
case UUID:
// use String
return StringType$.MODULE$;
case FIXED:
return BinaryType$.MODULE$;
case BINARY:
return BinaryType$.MODULE$;
case DECIMAL:
Types.DecimalType decimal = (Types.DecimalType) primitive;
return DecimalType$.MODULE$.apply(decimal.precision(), decimal.scale());
default:
throw new UnsupportedOperationException(
"Cannot convert unknown type to Spark: " + primitive);
}
}
}
| 1 | 34,280 |
Could we do the flag check here as well to check whether or not we have enabled the "Handle without timezone" flag here as well? We may be using this not on the read path (like in the migrate/snapshot code) and it would be good to catch it here as well and make sure users know what is happening.
|
apache-iceberg
|
java
|
@@ -2,7 +2,7 @@ import os
from typing import Dict, List
from dagster import SensorDefinition
-from dagster.core.definitions.pipeline_sensor import PipelineFailureSensorContext
+from dagster.core.definitions.pipeline_definition_definition_sensor import PipelineFailureSensorContext
from dagster_slack import make_slack_on_pipeline_failure_sensor
from hacker_news_assets.utils.slack_message import build_slack_message_blocks
| 1 |
import os
from typing import Dict, List
from dagster import SensorDefinition
from dagster.core.definitions.pipeline_sensor import PipelineFailureSensorContext
from dagster_slack import make_slack_on_pipeline_failure_sensor
from hacker_news_assets.utils.slack_message import build_slack_message_blocks
def slack_message_blocks_fn(context: PipelineFailureSensorContext, base_url: str) -> List[Dict]:
return build_slack_message_blocks(
title="👎 Pipeline Failure",
markdown_message=f'Pipeline "{context.pipeline_run.pipeline_name}" failed.',
pipeline_name=context.pipeline_run.pipeline_name,
run_id=context.pipeline_run.run_id,
mode=context.pipeline_run.mode,
run_page_url=f"{base_url}/instance/runs/{context.pipeline_run.run_id}",
)
def make_pipeline_failure_sensor(base_url: str) -> SensorDefinition:
return make_slack_on_pipeline_failure_sensor(
channel="#dogfooding-alert",
slack_token=os.environ.get("SLACK_DAGSTER_ETL_BOT_TOKEN", ""),
blocks_fn=lambda context: slack_message_blocks_fn(context, base_url),
pipeline_selection=[
"download_pipeline",
"buildkite_activity_pipeline",
"slack_stats_pipeline",
"github_community_pipeline",
],
)
| 1 | 16,491 |
why is the file named `pipeline_definition_definition_sensor`?
|
dagster-io-dagster
|
py
|
@@ -6,6 +6,10 @@ _ts = None
def init_timescale_connection(logger, conf):
global _ts
+
+ if not conf["SQLALCHEMY_TIMESCALE_URI"]:
+ return None
+
while True:
try:
_ts = TimescaleListenStore(conf, logger)
| 1 |
import time
from listenbrainz.listenstore import TimescaleListenStore
_ts = None
def init_timescale_connection(logger, conf):
global _ts
while True:
try:
_ts = TimescaleListenStore(conf, logger)
break
except Exception as e:
logger.error("Couldn't create TimescaleListenStore instance: {}, sleeping and trying again..."
.format(str(e)), exc_info=True)
time.sleep(2)
return _ts
| 1 | 18,725 |
None not needed, can just be `return`. not important for this release
|
metabrainz-listenbrainz-server
|
py
|
@@ -104,7 +104,12 @@ const topPageTemplate = `
<div class="g-recaptcha" data-sitekey="{{.RecaptchaSiteKey}}">
</div>
<div>
- <input id="target" placeholder="target address">
+ The dispensed Algos have no monetary value and should only be used to test applications.
+ <p>This service is gracefully provided to enable development on the Algorand blockchain test networks.
+ <p>Please do not abuse it by requesting millions of Algos.
+ </div>
+ <div>
+ <input id="target" placeholder="target address" size="80">
<button id="dispense">Dispense</button>
</div>
<div>
| 1 |
// Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
import (
// "bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strings"
"text/template"
"time"
"golang.org/x/crypto/acme/autocert"
"github.com/algorand/go-algorand/libgoal"
)
var configFile = flag.String("config", "", "JSON configuration file")
var autocertDir = flag.String("autocert", "", "Autocert cache directory")
var listenPort = flag.Int("port", 443, "Port to listen for incoming connections")
var httpsCert = flag.String("cert", "", "https certificate.pem file; mutually exclusive with autocert")
var httpsKey = flag.String("key", "", "https key.pem file; mutually exclusive with autocert")
var configMap map[string]dispenserSiteConfig
var client map[string]libgoal.Client
type recaptchaResponse struct {
Success bool `json:"success"`
ChallengeTS time.Time `json:"challenge_ts"`
Hostname string `json:"hostname"`
ErrorCodes []string `json:"error-codes"`
}
type dispenserSiteConfig struct {
RecaptchaSiteKey string `json:"recaptcha_sitekey"`
RecaptchaSecret string `json:"recaptcha_secret"`
Amount int `json:"amount"`
Fee int `json:"fee"`
Source string `json:"wallet"`
DataDir string `json:"data_dir"`
ExeDir string `json:"exe_dir"`
topPage string
}
const topPageTemplate = `
<html>
<head>
<title>Algorand dispenser</title>
<script src='https://www.google.com/recaptcha/api.js'>
</script>
<script src="https://code.jquery.com/jquery-3.3.1.min.js"
integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8="
crossorigin="anonymous">
</script>
<script>
function loadparam() {
const queryString = window.location.search;
const urlParams = new URLSearchParams(queryString);
$('#target').val(urlParams.get('account'));
}
function onload() {
loadparam();
$('#dispense').click(function(e) {
var recaptcha = grecaptcha.getResponse();
var target = $('#target').val();
$('#status').html('Sending request..');
var req = $.post('/dispense', {
recaptcha: recaptcha,
target: target,
}, function(data) {
$('#status').html('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
}).fail(function() {
$('#status').html('Code ' + req.status + ' ' + req.statusText + ': ' + req.responseText);
});
});
}
</script>
</head>
<body onload="onload()">
<h1>Algorand dispenser</h1>
<div class="g-recaptcha" data-sitekey="{{.RecaptchaSiteKey}}">
</div>
<div>
<input id="target" placeholder="target address">
<button id="dispense">Dispense</button>
</div>
<div>
Status: <span id="status"></span>
</div>
</body>
</html>
`
func getConfig(r *http.Request) dispenserSiteConfig {
return configMap[r.Host]
}
func handler(w http.ResponseWriter, r *http.Request) {
cfg := getConfig(r)
w.Header().Set("Content-Type", "text/html; charset=utf-8")
tmpl, err := template.New("top").Parse(topPageTemplate)
if err != nil {
log.Printf("Error parsing top page template: %v\n", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = tmpl.Execute(w, cfg)
if err != nil {
log.Printf("Error executing template: %v\n", err)
}
}
func (cfg dispenserSiteConfig) checkRecaptcha(remoteip, response string) (r recaptchaResponse, err error) {
resp, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify",
url.Values{"secret": {cfg.RecaptchaSecret},
"response": {response},
"remoteip": {remoteip}})
if err != nil {
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
err = json.Unmarshal(body, &r)
return
}
func dispense(w http.ResponseWriter, r *http.Request) {
cfg := getConfig(r)
err := r.ParseForm()
if err != nil {
log.Printf("Error parsing form: %v\n", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
recaptcha := r.Form.Get("recaptcha")
recap, err := cfg.checkRecaptcha(r.RemoteAddr, recaptcha)
if err != nil {
log.Printf("Error checking RECAPTCHA: %v\n", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if !recap.Success {
log.Printf("RECAPTCHA failed\n")
http.Error(w, "RECAPTCHA failed", http.StatusForbidden)
return
}
targets := r.Form["target"]
if len(targets) != 1 {
log.Printf("Corrupted target argument\n")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
target := targets[0]
c, ok := client[r.Host]
if !ok {
http.Error(w, fmt.Sprintf("didn't find client for host %s", r.Host), http.StatusBadRequest)
return
}
tx, err := c.SendPaymentFromUnencryptedWallet(cfg.Source, target, uint64(cfg.Fee), uint64(cfg.Amount), nil)
if err != nil {
http.Error(w, fmt.Sprintf("failed to dispense money - %v", err), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(tx.ID().String())
}
func main() {
flag.Parse()
http.HandleFunc("/", handler)
http.HandleFunc("/dispense", dispense)
tmpl, err := template.New("top").Parse(topPageTemplate)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing top page template: %v\n", err)
os.Exit(1)
}
configText, err := ioutil.ReadFile(*configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read config file (%s): %v\n", *configFile, err)
os.Exit(1)
}
configMap = make(map[string]dispenserSiteConfig)
err = json.Unmarshal(configText, &configMap)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot load config file (%s): %v\n", *configFile, err)
os.Exit(1)
}
client = make(map[string]libgoal.Client)
var hosts []string
for h, cfg := range configMap {
// Make a cache dir for wallet handle tokens
cacheDir, err := ioutil.TempDir("", "dispenser")
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot make temp dir: %v\n", err)
os.Exit(1)
}
// Init libgoal Client
c, err := libgoal.MakeClientWithBinDir(cfg.ExeDir, cfg.DataDir, cacheDir, libgoal.FullClient)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot init libgoal %v\n", err)
os.Exit(1)
}
client[h] = c
hosts = append(hosts, h)
var buf strings.Builder
err = tmpl.Execute(&buf, cfg)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot execute template for site %s: %v\n", h, err)
os.Exit(1)
}
cfg.topPage = buf.String()
configMap[h] = cfg
}
useAutocert := false
if *autocertDir != "" || *httpsCert == "" || *httpsKey == "" {
useAutocert = true
}
if useAutocert {
cacheDir := *autocertDir
if cacheDir == "" {
cacheDir = os.Getenv("HOME") + "/.autocert"
}
m := autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(hosts...),
Cache: autocert.DirCache(cacheDir),
}
go http.ListenAndServe(":80", m.HTTPHandler(nil))
log.Fatal(http.Serve(m.Listener(), nil))
} else {
log.Fatal(http.ListenAndServeTLS(fmt.Sprintf(":%d", *listenPort), *httpsCert, *httpsKey, nil))
}
}
| 1 | 42,051 |
I think it's missing closing `</p>` tags in line 108 and 109 unless they don't matter...
|
algorand-go-algorand
|
go
|
@@ -579,10 +579,12 @@ public abstract class ProcessEngineConfigurationImpl extends ProcessEngineConfig
protected ScriptingEnvironment scriptingEnvironment;
protected List<ScriptEnvResolver> scriptEnvResolvers;
protected ScriptFactory scriptFactory;
+ protected String scriptEngineNameJavaScript;
protected boolean autoStoreScriptVariables = false;
protected boolean enableScriptCompilation = true;
protected boolean enableScriptEngineCaching = true;
protected boolean enableFetchScriptEngineFromProcessApplication = true;
+ protected boolean enableScriptEngineLoadExternalResources = false;
/**
* When set to false, the following behavior changes:
| 1 |
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.impl.cfg;
import static org.camunda.bpm.engine.impl.cmd.HistoryCleanupCmd.MAX_THREADS_NUMBER;
import static org.camunda.bpm.engine.impl.util.EnsureUtil.ensureNotNull;
import javax.naming.InitialContext;
import javax.sql.DataSource;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.nio.charset.Charset;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArraySet;
import org.apache.ibatis.builder.xml.XMLConfigBuilder;
import org.apache.ibatis.datasource.pooled.PooledDataSource;
import org.apache.ibatis.mapping.Environment;
import org.apache.ibatis.session.Configuration;
import org.apache.ibatis.session.ExecutorType;
import org.apache.ibatis.session.SqlSessionFactory;
import org.apache.ibatis.session.defaults.DefaultSqlSessionFactory;
import org.apache.ibatis.transaction.TransactionFactory;
import org.apache.ibatis.transaction.jdbc.JdbcTransactionFactory;
import org.apache.ibatis.transaction.managed.ManagedTransactionFactory;
import org.camunda.bpm.dmn.engine.DmnEngine;
import org.camunda.bpm.dmn.engine.DmnEngineConfiguration;
import org.camunda.bpm.dmn.engine.impl.DefaultDmnEngineConfiguration;
import org.camunda.bpm.dmn.feel.impl.scala.function.FeelCustomFunctionProvider;
import org.camunda.bpm.engine.ArtifactFactory;
import org.camunda.bpm.engine.AuthorizationService;
import org.camunda.bpm.engine.CaseService;
import org.camunda.bpm.engine.DecisionService;
import org.camunda.bpm.engine.ExternalTaskService;
import org.camunda.bpm.engine.FilterService;
import org.camunda.bpm.engine.FormService;
import org.camunda.bpm.engine.HistoryService;
import org.camunda.bpm.engine.IdentityService;
import org.camunda.bpm.engine.ManagementService;
import org.camunda.bpm.engine.ProcessEngine;
import org.camunda.bpm.engine.ProcessEngineConfiguration;
import org.camunda.bpm.engine.ProcessEngineException;
import org.camunda.bpm.engine.RepositoryService;
import org.camunda.bpm.engine.RuntimeService;
import org.camunda.bpm.engine.TaskService;
import org.camunda.bpm.engine.authorization.Groups;
import org.camunda.bpm.engine.authorization.Permission;
import org.camunda.bpm.engine.authorization.Permissions;
import org.camunda.bpm.engine.impl.AuthorizationServiceImpl;
import org.camunda.bpm.engine.impl.DecisionServiceImpl;
import org.camunda.bpm.engine.impl.DefaultArtifactFactory;
import org.camunda.bpm.engine.impl.ExternalTaskServiceImpl;
import org.camunda.bpm.engine.impl.FilterServiceImpl;
import org.camunda.bpm.engine.impl.FormServiceImpl;
import org.camunda.bpm.engine.impl.HistoryServiceImpl;
import org.camunda.bpm.engine.impl.IdentityServiceImpl;
import org.camunda.bpm.engine.impl.ManagementServiceImpl;
import org.camunda.bpm.engine.impl.ModificationBatchJobHandler;
import org.camunda.bpm.engine.impl.OptimizeService;
import org.camunda.bpm.engine.impl.PriorityProvider;
import org.camunda.bpm.engine.impl.ProcessEngineImpl;
import org.camunda.bpm.engine.impl.ProcessEngineLogger;
import org.camunda.bpm.engine.impl.RepositoryServiceImpl;
import org.camunda.bpm.engine.impl.RestartProcessInstancesJobHandler;
import org.camunda.bpm.engine.impl.RuntimeServiceImpl;
import org.camunda.bpm.engine.impl.ServiceImpl;
import org.camunda.bpm.engine.impl.TaskServiceImpl;
import org.camunda.bpm.engine.impl.application.ProcessApplicationManager;
import org.camunda.bpm.engine.impl.batch.BatchJobHandler;
import org.camunda.bpm.engine.impl.batch.BatchMonitorJobHandler;
import org.camunda.bpm.engine.impl.batch.BatchSeedJobHandler;
import org.camunda.bpm.engine.impl.batch.deletion.DeleteHistoricProcessInstancesJobHandler;
import org.camunda.bpm.engine.impl.batch.deletion.DeleteProcessInstancesJobHandler;
import org.camunda.bpm.engine.impl.batch.externaltask.SetExternalTaskRetriesJobHandler;
import org.camunda.bpm.engine.impl.batch.job.SetJobRetriesJobHandler;
import org.camunda.bpm.engine.impl.batch.removaltime.BatchSetRemovalTimeJobHandler;
import org.camunda.bpm.engine.impl.batch.removaltime.DecisionSetRemovalTimeJobHandler;
import org.camunda.bpm.engine.impl.batch.removaltime.ProcessSetRemovalTimeJobHandler;
import org.camunda.bpm.engine.impl.batch.update.UpdateProcessInstancesSuspendStateJobHandler;
import org.camunda.bpm.engine.impl.batch.variables.BatchSetVariablesHandler;
import org.camunda.bpm.engine.impl.bpmn.behavior.ExternalTaskActivityBehavior;
import org.camunda.bpm.engine.impl.bpmn.deployer.BpmnDeployer;
import org.camunda.bpm.engine.impl.bpmn.parser.BpmnParseListener;
import org.camunda.bpm.engine.impl.bpmn.parser.BpmnParser;
import org.camunda.bpm.engine.impl.bpmn.parser.DefaultFailedJobParseListener;
import org.camunda.bpm.engine.impl.calendar.BusinessCalendarManager;
import org.camunda.bpm.engine.impl.calendar.CycleBusinessCalendar;
import org.camunda.bpm.engine.impl.calendar.DueDateBusinessCalendar;
import org.camunda.bpm.engine.impl.calendar.DurationBusinessCalendar;
import org.camunda.bpm.engine.impl.calendar.MapBusinessCalendarManager;
import org.camunda.bpm.engine.impl.cfg.auth.AuthorizationCommandChecker;
import org.camunda.bpm.engine.impl.cfg.auth.DefaultAuthorizationProvider;
import org.camunda.bpm.engine.impl.cfg.auth.DefaultPermissionProvider;
import org.camunda.bpm.engine.impl.cfg.auth.PermissionProvider;
import org.camunda.bpm.engine.impl.cfg.auth.ResourceAuthorizationProvider;
import org.camunda.bpm.engine.impl.cfg.multitenancy.TenantCommandChecker;
import org.camunda.bpm.engine.impl.cfg.multitenancy.TenantIdProvider;
import org.camunda.bpm.engine.impl.cfg.standalone.StandaloneTransactionContextFactory;
import org.camunda.bpm.engine.impl.cmd.HistoryCleanupCmd;
import org.camunda.bpm.engine.impl.cmmn.CaseServiceImpl;
import org.camunda.bpm.engine.impl.cmmn.deployer.CmmnDeployer;
import org.camunda.bpm.engine.impl.cmmn.entity.repository.CaseDefinitionManager;
import org.camunda.bpm.engine.impl.cmmn.entity.runtime.CaseExecutionManager;
import org.camunda.bpm.engine.impl.cmmn.entity.runtime.CaseSentryPartManager;
import org.camunda.bpm.engine.impl.cmmn.handler.DefaultCmmnElementHandlerRegistry;
import org.camunda.bpm.engine.impl.cmmn.transformer.CmmnTransformFactory;
import org.camunda.bpm.engine.impl.cmmn.transformer.CmmnTransformListener;
import org.camunda.bpm.engine.impl.cmmn.transformer.CmmnTransformer;
import org.camunda.bpm.engine.impl.cmmn.transformer.DefaultCmmnTransformFactory;
import org.camunda.bpm.engine.impl.db.DbIdGenerator;
import org.camunda.bpm.engine.impl.db.entitymanager.DbEntityManagerFactory;
import org.camunda.bpm.engine.impl.db.entitymanager.cache.DbEntityCacheKeyMapping;
import org.camunda.bpm.engine.impl.db.sql.DbSqlPersistenceProviderFactory;
import org.camunda.bpm.engine.impl.db.sql.DbSqlSessionFactory;
import org.camunda.bpm.engine.impl.delegate.DefaultDelegateInterceptor;
import org.camunda.bpm.engine.impl.digest.Default16ByteSaltGenerator;
import org.camunda.bpm.engine.impl.digest.PasswordEncryptor;
import org.camunda.bpm.engine.impl.digest.PasswordManager;
import org.camunda.bpm.engine.impl.digest.SaltGenerator;
import org.camunda.bpm.engine.impl.digest.Sha512HashDigest;
import org.camunda.bpm.engine.impl.dmn.batch.DeleteHistoricDecisionInstancesJobHandler;
import org.camunda.bpm.engine.impl.dmn.configuration.DmnEngineConfigurationBuilder;
import org.camunda.bpm.engine.impl.dmn.deployer.DecisionDefinitionDeployer;
import org.camunda.bpm.engine.impl.dmn.deployer.DecisionRequirementsDefinitionDeployer;
import org.camunda.bpm.engine.impl.dmn.entity.repository.DecisionDefinitionManager;
import org.camunda.bpm.engine.impl.dmn.entity.repository.DecisionRequirementsDefinitionManager;
import org.camunda.bpm.engine.impl.el.CommandContextFunctionMapper;
import org.camunda.bpm.engine.impl.el.DateTimeFunctionMapper;
import org.camunda.bpm.engine.impl.el.ExpressionManager;
import org.camunda.bpm.engine.impl.event.CompensationEventHandler;
import org.camunda.bpm.engine.impl.event.ConditionalEventHandler;
import org.camunda.bpm.engine.impl.event.EventHandler;
import org.camunda.bpm.engine.impl.event.EventHandlerImpl;
import org.camunda.bpm.engine.impl.event.EventType;
import org.camunda.bpm.engine.impl.event.SignalEventHandler;
import org.camunda.bpm.engine.impl.externaltask.DefaultExternalTaskPriorityProvider;
import org.camunda.bpm.engine.impl.form.engine.FormEngine;
import org.camunda.bpm.engine.impl.form.engine.HtmlFormEngine;
import org.camunda.bpm.engine.impl.form.engine.JuelFormEngine;
import org.camunda.bpm.engine.impl.form.type.AbstractFormFieldType;
import org.camunda.bpm.engine.impl.form.type.BooleanFormType;
import org.camunda.bpm.engine.impl.form.type.DateFormType;
import org.camunda.bpm.engine.impl.form.type.FormTypes;
import org.camunda.bpm.engine.impl.form.type.LongFormType;
import org.camunda.bpm.engine.impl.form.type.StringFormType;
import org.camunda.bpm.engine.impl.form.validator.FormFieldValidator;
import org.camunda.bpm.engine.impl.form.validator.FormValidators;
import org.camunda.bpm.engine.impl.form.validator.MaxLengthValidator;
import org.camunda.bpm.engine.impl.form.validator.MaxValidator;
import org.camunda.bpm.engine.impl.form.validator.MinLengthValidator;
import org.camunda.bpm.engine.impl.form.validator.MinValidator;
import org.camunda.bpm.engine.impl.form.validator.ReadOnlyValidator;
import org.camunda.bpm.engine.impl.form.validator.RequiredValidator;
import org.camunda.bpm.engine.impl.history.DefaultHistoryRemovalTimeProvider;
import org.camunda.bpm.engine.impl.history.HistoryLevel;
import org.camunda.bpm.engine.impl.history.HistoryRemovalTimeProvider;
import org.camunda.bpm.engine.impl.history.event.HistoricDecisionInstanceManager;
import org.camunda.bpm.engine.impl.history.event.HostnameProvider;
import org.camunda.bpm.engine.impl.history.event.SimpleIpBasedProvider;
import org.camunda.bpm.engine.impl.history.handler.CompositeDbHistoryEventHandler;
import org.camunda.bpm.engine.impl.history.handler.CompositeHistoryEventHandler;
import org.camunda.bpm.engine.impl.history.handler.DbHistoryEventHandler;
import org.camunda.bpm.engine.impl.history.handler.HistoryEventHandler;
import org.camunda.bpm.engine.impl.history.parser.HistoryParseListener;
import org.camunda.bpm.engine.impl.history.producer.CacheAwareCmmnHistoryEventProducer;
import org.camunda.bpm.engine.impl.history.producer.CacheAwareHistoryEventProducer;
import org.camunda.bpm.engine.impl.history.producer.CmmnHistoryEventProducer;
import org.camunda.bpm.engine.impl.history.producer.DefaultDmnHistoryEventProducer;
import org.camunda.bpm.engine.impl.history.producer.DmnHistoryEventProducer;
import org.camunda.bpm.engine.impl.history.producer.HistoryEventProducer;
import org.camunda.bpm.engine.impl.history.transformer.CmmnHistoryTransformListener;
import org.camunda.bpm.engine.impl.identity.DefaultPasswordPolicyImpl;
import org.camunda.bpm.engine.impl.identity.ReadOnlyIdentityProvider;
import org.camunda.bpm.engine.impl.identity.WritableIdentityProvider;
import org.camunda.bpm.engine.impl.identity.db.DbIdentityServiceProvider;
import org.camunda.bpm.engine.impl.incident.DefaultIncidentHandler;
import org.camunda.bpm.engine.impl.incident.IncidentHandler;
import org.camunda.bpm.engine.impl.interceptor.CommandContextFactory;
import org.camunda.bpm.engine.impl.interceptor.CommandExecutor;
import org.camunda.bpm.engine.impl.interceptor.CommandExecutorImpl;
import org.camunda.bpm.engine.impl.interceptor.CommandInterceptor;
import org.camunda.bpm.engine.impl.interceptor.CrdbTransactionRetryInterceptor;
import org.camunda.bpm.engine.impl.interceptor.DelegateInterceptor;
import org.camunda.bpm.engine.impl.interceptor.SessionFactory;
import org.camunda.bpm.engine.impl.jobexecutor.AsyncContinuationJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.DefaultFailedJobCommandFactory;
import org.camunda.bpm.engine.impl.jobexecutor.DefaultJobExecutor;
import org.camunda.bpm.engine.impl.jobexecutor.DefaultJobPriorityProvider;
import org.camunda.bpm.engine.impl.jobexecutor.FailedJobCommandFactory;
import org.camunda.bpm.engine.impl.jobexecutor.JobDeclaration;
import org.camunda.bpm.engine.impl.jobexecutor.JobExecutor;
import org.camunda.bpm.engine.impl.jobexecutor.JobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.NotifyAcquisitionRejectedJobsHandler;
import org.camunda.bpm.engine.impl.jobexecutor.ProcessEventJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.RejectedJobsHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerActivateJobDefinitionHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerActivateProcessDefinitionHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerCatchIntermediateEventJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerExecuteNestedActivityJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerStartEventJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerStartEventSubprocessJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerSuspendJobDefinitionHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerSuspendProcessDefinitionHandler;
import org.camunda.bpm.engine.impl.jobexecutor.TimerTaskListenerJobHandler;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.BatchWindowManager;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.DefaultBatchWindowManager;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupBatch;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupHandler;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupHelper;
import org.camunda.bpm.engine.impl.jobexecutor.historycleanup.HistoryCleanupJobHandler;
import org.camunda.bpm.engine.impl.metrics.MetricsRegistry;
import org.camunda.bpm.engine.impl.metrics.MetricsReporterIdProvider;
import org.camunda.bpm.engine.impl.metrics.parser.MetricsBpmnParseListener;
import org.camunda.bpm.engine.impl.metrics.parser.MetricsCmmnTransformListener;
import org.camunda.bpm.engine.impl.metrics.reporter.DbMetricsReporter;
import org.camunda.bpm.engine.impl.migration.DefaultMigrationActivityMatcher;
import org.camunda.bpm.engine.impl.migration.DefaultMigrationInstructionGenerator;
import org.camunda.bpm.engine.impl.migration.MigrationActivityMatcher;
import org.camunda.bpm.engine.impl.migration.MigrationInstructionGenerator;
import org.camunda.bpm.engine.impl.migration.batch.MigrationBatchJobHandler;
import org.camunda.bpm.engine.impl.migration.validation.activity.MigrationActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.activity.NoCompensationHandlerActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.activity.SupportedActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.activity.SupportedPassiveEventTriggerActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.AsyncAfterMigrationValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.AsyncMigrationValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.AsyncProcessStartMigrationValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.MigratingActivityInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.MigratingCompensationInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.MigratingTransitionInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.NoUnmappedCompensationStartEventValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.NoUnmappedLeafInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.SupportedActivityInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instance.VariableConflictActivityInstanceValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.AdditionalFlowScopeInstructionValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.CannotAddMultiInstanceBodyValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.CannotAddMultiInstanceInnerActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.CannotRemoveMultiInstanceInnerActivityValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.ConditionalEventUpdateEventTriggerValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.GatewayMappingValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.MigrationInstructionValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.OnlyOnceMappedActivityInstructionValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.SameBehaviorInstructionValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.SameEventScopeInstructionValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.SameEventTypeValidator;
import org.camunda.bpm.engine.impl.migration.validation.instruction.UpdateEventTriggersValidator;
import org.camunda.bpm.engine.impl.optimize.OptimizeManager;
import org.camunda.bpm.engine.impl.persistence.GenericManagerFactory;
import org.camunda.bpm.engine.impl.persistence.deploy.Deployer;
import org.camunda.bpm.engine.impl.persistence.deploy.cache.CacheFactory;
import org.camunda.bpm.engine.impl.persistence.deploy.cache.DefaultCacheFactory;
import org.camunda.bpm.engine.impl.persistence.deploy.cache.DeploymentCache;
import org.camunda.bpm.engine.impl.persistence.entity.AttachmentManager;
import org.camunda.bpm.engine.impl.persistence.entity.AuthorizationManager;
import org.camunda.bpm.engine.impl.persistence.entity.BatchManager;
import org.camunda.bpm.engine.impl.persistence.entity.ByteArrayManager;
import org.camunda.bpm.engine.impl.persistence.entity.CommentManager;
import org.camunda.bpm.engine.impl.persistence.entity.DeploymentManager;
import org.camunda.bpm.engine.impl.persistence.entity.EventSubscriptionManager;
import org.camunda.bpm.engine.impl.persistence.entity.ExecutionManager;
import org.camunda.bpm.engine.impl.persistence.entity.ExternalTaskManager;
import org.camunda.bpm.engine.impl.persistence.entity.FilterManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricActivityInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricBatchManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricCaseActivityInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricCaseInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricDetailManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricExternalTaskLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricIdentityLinkLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricIncidentManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricJobLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricProcessInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricStatisticsManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricTaskInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.HistoricVariableInstanceManager;
import org.camunda.bpm.engine.impl.persistence.entity.IdentityInfoManager;
import org.camunda.bpm.engine.impl.persistence.entity.IdentityLinkManager;
import org.camunda.bpm.engine.impl.persistence.entity.IncidentManager;
import org.camunda.bpm.engine.impl.persistence.entity.JobDefinitionManager;
import org.camunda.bpm.engine.impl.persistence.entity.JobManager;
import org.camunda.bpm.engine.impl.persistence.entity.MeterLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.ProcessDefinitionManager;
import org.camunda.bpm.engine.impl.persistence.entity.PropertyManager;
import org.camunda.bpm.engine.impl.persistence.entity.ReportManager;
import org.camunda.bpm.engine.impl.persistence.entity.ResourceManager;
import org.camunda.bpm.engine.impl.persistence.entity.SchemaLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.StatisticsManager;
import org.camunda.bpm.engine.impl.persistence.entity.TableDataManager;
import org.camunda.bpm.engine.impl.persistence.entity.TaskManager;
import org.camunda.bpm.engine.impl.persistence.entity.TaskReportManager;
import org.camunda.bpm.engine.impl.persistence.entity.TenantManager;
import org.camunda.bpm.engine.impl.persistence.entity.UserOperationLogManager;
import org.camunda.bpm.engine.impl.persistence.entity.VariableInstanceManager;
import org.camunda.bpm.engine.impl.repository.DefaultDeploymentHandlerFactory;
import org.camunda.bpm.engine.impl.runtime.ConditionHandler;
import org.camunda.bpm.engine.impl.runtime.CorrelationHandler;
import org.camunda.bpm.engine.impl.runtime.DefaultConditionHandler;
import org.camunda.bpm.engine.impl.runtime.DefaultCorrelationHandler;
import org.camunda.bpm.engine.impl.runtime.DefaultDeserializationTypeValidator;
import org.camunda.bpm.engine.impl.scripting.ScriptFactory;
import org.camunda.bpm.engine.impl.scripting.engine.BeansResolverFactory;
import org.camunda.bpm.engine.impl.scripting.engine.ResolverFactory;
import org.camunda.bpm.engine.impl.scripting.engine.ScriptBindingsFactory;
import org.camunda.bpm.engine.impl.scripting.engine.ScriptingEngines;
import org.camunda.bpm.engine.impl.scripting.engine.VariableScopeResolverFactory;
import org.camunda.bpm.engine.impl.scripting.env.ScriptEnvResolver;
import org.camunda.bpm.engine.impl.scripting.env.ScriptingEnvironment;
import org.camunda.bpm.engine.impl.telemetry.TelemetryRegistry;
import org.camunda.bpm.engine.impl.telemetry.dto.Data;
import org.camunda.bpm.engine.impl.telemetry.dto.Database;
import org.camunda.bpm.engine.impl.telemetry.dto.Internals;
import org.camunda.bpm.engine.impl.telemetry.dto.Jdk;
import org.camunda.bpm.engine.impl.telemetry.dto.Product;
import org.camunda.bpm.engine.impl.telemetry.reporter.TelemetryReporter;
import org.camunda.bpm.engine.impl.util.IoUtil;
import org.camunda.bpm.engine.impl.util.ParseUtil;
import org.camunda.bpm.engine.impl.util.ProcessEngineDetails;
import org.camunda.bpm.engine.impl.util.ReflectUtil;
import org.camunda.bpm.engine.impl.variable.ValueTypeResolverImpl;
import org.camunda.bpm.engine.impl.variable.serializer.BooleanValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.ByteArrayValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.DateValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.DefaultVariableSerializers;
import org.camunda.bpm.engine.impl.variable.serializer.DoubleValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.FileValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.IntegerValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.JavaObjectSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.LongValueSerlializer;
import org.camunda.bpm.engine.impl.variable.serializer.NullValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.ShortValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.StringValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.TypedValueSerializer;
import org.camunda.bpm.engine.impl.variable.serializer.VariableSerializerFactory;
import org.camunda.bpm.engine.impl.variable.serializer.VariableSerializers;
import org.camunda.bpm.engine.impl.variable.serializer.jpa.EntityManagerSession;
import org.camunda.bpm.engine.impl.variable.serializer.jpa.EntityManagerSessionFactory;
import org.camunda.bpm.engine.impl.variable.serializer.jpa.JPAVariableSerializer;
import org.camunda.bpm.engine.management.Metrics;
import org.camunda.bpm.engine.repository.CaseDefinition;
import org.camunda.bpm.engine.repository.DecisionDefinition;
import org.camunda.bpm.engine.repository.DecisionRequirementsDefinition;
import org.camunda.bpm.engine.repository.DeploymentBuilder;
import org.camunda.bpm.engine.repository.DeploymentHandlerFactory;
import org.camunda.bpm.engine.runtime.Incident;
import org.camunda.bpm.engine.runtime.WhitelistingDeserializationTypeValidator;
import org.camunda.bpm.engine.task.TaskQuery;
import org.camunda.bpm.engine.test.mock.MocksResolverFactory;
import org.camunda.bpm.engine.variable.Variables;
import org.camunda.bpm.engine.variable.type.ValueType;
import org.camunda.connect.Connectors;
import org.camunda.connect.spi.Connector;
import org.camunda.connect.spi.ConnectorRequest;
/**
* @author Tom Baeyens
*/
public abstract class ProcessEngineConfigurationImpl extends ProcessEngineConfiguration {
protected final static ConfigurationLogger LOG = ConfigurationLogger.CONFIG_LOGGER;
public static final String DB_SCHEMA_UPDATE_CREATE = "create";
public static final String DB_SCHEMA_UPDATE_DROP_CREATE = "drop-create";
public static final int HISTORYLEVEL_NONE = HistoryLevel.HISTORY_LEVEL_NONE.getId();
public static final int HISTORYLEVEL_ACTIVITY = HistoryLevel.HISTORY_LEVEL_ACTIVITY.getId();
public static final int HISTORYLEVEL_AUDIT = HistoryLevel.HISTORY_LEVEL_AUDIT.getId();
public static final int HISTORYLEVEL_FULL = HistoryLevel.HISTORY_LEVEL_FULL.getId();
public static final String DEFAULT_WS_SYNC_FACTORY = "org.camunda.bpm.engine.impl.webservice.CxfWebServiceClientFactory";
public static final String DEFAULT_MYBATIS_MAPPING_FILE = "org/camunda/bpm/engine/impl/mapping/mappings.xml";
public static final int DEFAULT_FAILED_JOB_LISTENER_MAX_RETRIES = 3;
public static final int DEFAULT_INVOCATIONS_PER_BATCH_JOB = 1;
protected static final String PRODUCT_NAME = "Camunda BPM Runtime";
public static SqlSessionFactory cachedSqlSessionFactory;
// SERVICES /////////////////////////////////////////////////////////////////
protected RepositoryService repositoryService = new RepositoryServiceImpl();
protected RuntimeService runtimeService = new RuntimeServiceImpl();
protected HistoryService historyService = new HistoryServiceImpl();
protected IdentityService identityService = new IdentityServiceImpl();
protected TaskService taskService = new TaskServiceImpl();
protected FormService formService = new FormServiceImpl();
protected ManagementService managementService = new ManagementServiceImpl();
protected AuthorizationService authorizationService = new AuthorizationServiceImpl();
protected CaseService caseService = new CaseServiceImpl();
protected FilterService filterService = new FilterServiceImpl();
protected ExternalTaskService externalTaskService = new ExternalTaskServiceImpl();
protected DecisionService decisionService = new DecisionServiceImpl();
protected OptimizeService optimizeService = new OptimizeService();
// COMMAND EXECUTORS ////////////////////////////////////////////////////////
// Command executor and interceptor stack
/**
* the configurable list which will be {@link #initInterceptorChain(java.util.List) processed} to build the {@link #commandExecutorTxRequired}
*/
protected List<CommandInterceptor> customPreCommandInterceptorsTxRequired;
protected List<CommandInterceptor> customPostCommandInterceptorsTxRequired;
protected List<CommandInterceptor> commandInterceptorsTxRequired;
/**
* this will be initialized during the configurationComplete()
*/
protected CommandExecutor commandExecutorTxRequired;
/**
* the configurable list which will be {@link #initInterceptorChain(List) processed} to build the {@link #commandExecutorTxRequiresNew}
*/
protected List<CommandInterceptor> customPreCommandInterceptorsTxRequiresNew;
protected List<CommandInterceptor> customPostCommandInterceptorsTxRequiresNew;
protected List<CommandInterceptor> commandInterceptorsTxRequiresNew;
/**
* this will be initialized during the configurationComplete()
*/
protected CommandExecutor commandExecutorTxRequiresNew;
/**
* Separate command executor to be used for db schema operations. Must always use NON-JTA transactions
*/
protected CommandExecutor commandExecutorSchemaOperations;
/**
* Allows for specific commands to be retried when using CockroachDB. This is due to the fact that
* OptimisticLockingExceptions can't be handled on CockroachDB and transactions must be rolled back.
* The commands where CockroachDB retries are possible are:
*
* <ul>
* <li>BootstrapEngineCommand</li>
* <li>AcquireJobsCmd</li>
* <li>DeployCmd</li>
* <li>FetchExternalTasksCmd</li>
* <li>HistoryCleanupCmd</li>
* <li>HistoryLevelSetupCommand</li>
* </ul>
*/
protected int commandRetries = 0;
// SESSION FACTORIES ////////////////////////////////////////////////////////
protected List<SessionFactory> customSessionFactories;
protected DbSqlSessionFactory dbSqlSessionFactory;
protected Map<Class<?>, SessionFactory> sessionFactories;
// DEPLOYERS ////////////////////////////////////////////////////////////////
protected List<Deployer> customPreDeployers;
protected List<Deployer> customPostDeployers;
protected List<Deployer> deployers;
protected DeploymentCache deploymentCache;
// CACHE ////////////////////////////////////////////////////////////////////
protected CacheFactory cacheFactory;
protected int cacheCapacity = 1000;
protected boolean enableFetchProcessDefinitionDescription = true;
// JOB EXECUTOR /////////////////////////////////////////////////////////////
protected List<JobHandler> customJobHandlers;
protected Map<String, JobHandler> jobHandlers;
protected JobExecutor jobExecutor;
protected PriorityProvider<JobDeclaration<?, ?>> jobPriorityProvider;
// EXTERNAL TASK /////////////////////////////////////////////////////////////
protected PriorityProvider<ExternalTaskActivityBehavior> externalTaskPriorityProvider;
// MYBATIS SQL SESSION FACTORY //////////////////////////////////////////////
protected SqlSessionFactory sqlSessionFactory;
protected TransactionFactory transactionFactory;
// ID GENERATOR /////////////////////////////////////////////////////////////
protected IdGenerator idGenerator;
protected DataSource idGeneratorDataSource;
protected String idGeneratorDataSourceJndiName;
// INCIDENT HANDLER /////////////////////////////////////////////////////////
protected Map<String, IncidentHandler> incidentHandlers;
protected List<IncidentHandler> customIncidentHandlers;
// BATCH ////////////////////////////////////////////////////////////////////
protected Map<String, BatchJobHandler<?>> batchHandlers;
protected List<BatchJobHandler<?>> customBatchJobHandlers;
/**
* Number of jobs created by a batch seed job invocation
*/
protected int batchJobsPerSeed = 100;
/**
* Number of invocations executed by a single batch job
*/
protected int invocationsPerBatchJob = DEFAULT_INVOCATIONS_PER_BATCH_JOB;
/**
* Map to set an individual value for each batch type to
* control the invocations per batch job. Unless specified
* in this map, value of 'invocationsPerBatchJob' is used.
*/
protected Map<String, Integer> invocationsPerBatchJobByBatchType;
/**
* seconds to wait between polling for batch completion
*/
protected int batchPollTime = 30;
/**
* default priority for batch jobs
*/
protected long batchJobPriority = DefaultJobPriorityProvider.DEFAULT_PRIORITY;
// OTHER ////////////////////////////////////////////////////////////////////
protected List<FormEngine> customFormEngines;
protected Map<String, FormEngine> formEngines;
protected List<AbstractFormFieldType> customFormTypes;
protected FormTypes formTypes;
protected FormValidators formValidators;
protected Map<String, Class<? extends FormFieldValidator>> customFormFieldValidators;
protected List<TypedValueSerializer> customPreVariableSerializers;
protected List<TypedValueSerializer> customPostVariableSerializers;
protected VariableSerializers variableSerializers;
protected VariableSerializerFactory fallbackSerializerFactory;
protected String defaultSerializationFormat = Variables.SerializationDataFormats.JAVA.getName();
protected boolean javaSerializationFormatEnabled = false;
protected String defaultCharsetName = null;
protected Charset defaultCharset = null;
protected ExpressionManager expressionManager;
protected ScriptingEngines scriptingEngines;
protected List<ResolverFactory> resolverFactories;
protected ScriptingEnvironment scriptingEnvironment;
protected List<ScriptEnvResolver> scriptEnvResolvers;
protected ScriptFactory scriptFactory;
protected boolean autoStoreScriptVariables = false;
protected boolean enableScriptCompilation = true;
protected boolean enableScriptEngineCaching = true;
protected boolean enableFetchScriptEngineFromProcessApplication = true;
/**
* When set to false, the following behavior changes:
* <ul>
* <li>The automated schema maintenance (creating and dropping tables, see property <code>databaseSchemaUpdate</code>)
* does not cover the tables required for CMMN execution.</li>
* <li>CMMN resources are not deployed as {@link CaseDefinition} to the engine.</li>
* <li>Tasks from CMMN cases are not returned by the {@link TaskQuery}.</li>
* </ul>
*/
protected boolean cmmnEnabled = true;
/**
* When set to false, the following behavior changes:
* <ul>
* <li>The automated schema maintenance (creating and dropping tables, see property <code>databaseSchemaUpdate</code>)
* does not cover the tables required for DMN execution.</li>
* <li>DMN resources are not deployed as {@link DecisionDefinition} or
* {@link DecisionRequirementsDefinition} to the engine.</li>
* </ul>
*/
protected boolean dmnEnabled = true;
/**
* When set to <code>false</code>, the following behavior changes:
* <ul>
* <li>Standalone tasks can no longer be created via API.</li>
* <li>Standalone tasks are not returned by the TaskQuery.</li>
* </ul>
*/
protected boolean standaloneTasksEnabled = true;
protected boolean enableGracefulDegradationOnContextSwitchFailure = true;
protected BusinessCalendarManager businessCalendarManager;
protected String wsSyncFactoryClassName = DEFAULT_WS_SYNC_FACTORY;
protected CommandContextFactory commandContextFactory;
protected TransactionContextFactory transactionContextFactory;
protected BpmnParseFactory bpmnParseFactory;
// cmmn
protected CmmnTransformFactory cmmnTransformFactory;
protected DefaultCmmnElementHandlerRegistry cmmnElementHandlerRegistry;
// dmn
protected DefaultDmnEngineConfiguration dmnEngineConfiguration;
protected DmnEngine dmnEngine;
/**
* a list of DMN FEEL custom function providers
*/
protected List<FeelCustomFunctionProvider> dmnFeelCustomFunctionProviders;
/**
* Enable DMN FEEL legacy behavior
*/
protected boolean dmnFeelEnableLegacyBehavior = false;
protected HistoryLevel historyLevel;
/**
* a list of supported history levels
*/
protected List<HistoryLevel> historyLevels;
/**
* a list of supported custom history levels
*/
protected List<HistoryLevel> customHistoryLevels;
protected List<BpmnParseListener> preParseListeners;
protected List<BpmnParseListener> postParseListeners;
protected List<CmmnTransformListener> customPreCmmnTransformListeners;
protected List<CmmnTransformListener> customPostCmmnTransformListeners;
protected Map<Object, Object> beans;
protected boolean isDbIdentityUsed = true;
protected boolean isDbHistoryUsed = true;
protected DelegateInterceptor delegateInterceptor;
protected CommandInterceptor actualCommandExecutor;
protected RejectedJobsHandler customRejectedJobsHandler;
protected Map<String, EventHandler> eventHandlers;
protected List<EventHandler> customEventHandlers;
protected FailedJobCommandFactory failedJobCommandFactory;
protected String databaseTablePrefix = "";
/**
* In some situations you want to set the schema to use for table checks / generation if the database metadata
* doesn't return that correctly, see https://jira.codehaus.org/browse/ACT-1220,
* https://jira.codehaus.org/browse/ACT-1062
*/
protected String databaseSchema = null;
protected boolean isCreateDiagramOnDeploy = false;
protected ProcessApplicationManager processApplicationManager;
protected CorrelationHandler correlationHandler;
protected ConditionHandler conditionHandler;
/**
* session factory to be used for obtaining identity provider sessions
*/
protected SessionFactory identityProviderSessionFactory;
protected PasswordEncryptor passwordEncryptor;
protected List<PasswordEncryptor> customPasswordChecker;
protected PasswordManager passwordManager;
protected SaltGenerator saltGenerator;
protected Set<String> registeredDeployments;
protected DeploymentHandlerFactory deploymentHandlerFactory;
protected ResourceAuthorizationProvider resourceAuthorizationProvider;
protected List<ProcessEnginePlugin> processEnginePlugins = new ArrayList<>();
protected HistoryEventProducer historyEventProducer;
protected CmmnHistoryEventProducer cmmnHistoryEventProducer;
protected DmnHistoryEventProducer dmnHistoryEventProducer;
/**
* As an instance of {@link org.camunda.bpm.engine.impl.history.handler.CompositeHistoryEventHandler}
* it contains all the provided history event handlers that process history events.
*/
protected HistoryEventHandler historyEventHandler;
/**
* Allows users to add additional {@link HistoryEventHandler}
* instances to process history events.
*/
protected List<HistoryEventHandler> customHistoryEventHandlers = new ArrayList<>();
/**
* If true, the default {@link DbHistoryEventHandler} will be included in the list
* of history event handlers.
*/
protected boolean enableDefaultDbHistoryEventHandler = true;
protected PermissionProvider permissionProvider;
protected boolean isExecutionTreePrefetchEnabled = true;
/**
* If true the process engine will attempt to acquire an exclusive lock before
* creating a deployment.
*/
protected boolean isDeploymentLockUsed = true;
/**
* If true then several deployments will be processed strictly sequentially. When false they may be processed in parallel.
*/
protected boolean isDeploymentSynchronized = true;
/**
* Allows setting whether the process engine should try reusing the first level entity cache.
* Default setting is false, enabling it improves performance of asynchronous continuations.
*/
protected boolean isDbEntityCacheReuseEnabled = false;
protected boolean isInvokeCustomVariableListeners = true;
/**
* The process engine created by this configuration.
*/
protected ProcessEngineImpl processEngine;
/**
* used to create instances for listeners, JavaDelegates, etc
*/
protected ArtifactFactory artifactFactory;
protected DbEntityCacheKeyMapping dbEntityCacheKeyMapping = DbEntityCacheKeyMapping.defaultEntityCacheKeyMapping();
/**
* the metrics registry
*/
protected MetricsRegistry metricsRegistry;
protected DbMetricsReporter dbMetricsReporter;
protected boolean isMetricsEnabled = true;
protected boolean isDbMetricsReporterActivate = true;
protected MetricsReporterIdProvider metricsReporterIdProvider;
protected boolean isTaskMetricsEnabled = true;
/**
* the historic job log host name
*/
protected String hostname;
protected HostnameProvider hostnameProvider;
/**
* handling of expressions submitted via API; can be used as guards against remote code execution
*/
protected boolean enableExpressionsInAdhocQueries = false;
protected boolean enableExpressionsInStoredQueries = true;
/**
* If false, disables XML eXternal Entity (XXE) Processing. This provides protection against XXE Processing attacks.
*/
protected boolean enableXxeProcessing = false;
/**
* If true, user operation log entries are only written if there is an
* authenticated user present in the context. If false, user operation log
* entries are written regardless of authentication state.
*/
protected boolean restrictUserOperationLogToAuthenticatedUsers = true;
protected boolean disableStrictCallActivityValidation = false;
protected boolean isBpmnStacktraceVerbose = false;
protected boolean forceCloseMybatisConnectionPool = true;
protected TenantIdProvider tenantIdProvider = null;
protected List<CommandChecker> commandCheckers = null;
protected List<String> adminGroups;
protected List<String> adminUsers;
// Migration
protected MigrationActivityMatcher migrationActivityMatcher;
protected List<MigrationActivityValidator> customPreMigrationActivityValidators;
protected List<MigrationActivityValidator> customPostMigrationActivityValidators;
protected MigrationInstructionGenerator migrationInstructionGenerator;
protected List<MigrationInstructionValidator> customPreMigrationInstructionValidators;
protected List<MigrationInstructionValidator> customPostMigrationInstructionValidators;
protected List<MigrationInstructionValidator> migrationInstructionValidators;
protected List<MigratingActivityInstanceValidator> customPreMigratingActivityInstanceValidators;
protected List<MigratingActivityInstanceValidator> customPostMigratingActivityInstanceValidators;
protected List<MigratingActivityInstanceValidator> migratingActivityInstanceValidators;
protected List<MigratingTransitionInstanceValidator> migratingTransitionInstanceValidators;
protected List<MigratingCompensationInstanceValidator> migratingCompensationInstanceValidators;
// Default user permission for task
protected Permission defaultUserPermissionForTask;
/**
* Historic instance permissions are disabled by default
*/
protected boolean enableHistoricInstancePermissions = false;
protected boolean isUseSharedSqlSessionFactory = false;
//History cleanup configuration
protected String historyCleanupBatchWindowStartTime;
protected String historyCleanupBatchWindowEndTime = "00:00";
protected Date historyCleanupBatchWindowStartTimeAsDate;
protected Date historyCleanupBatchWindowEndTimeAsDate;
protected Map<Integer, BatchWindowConfiguration> historyCleanupBatchWindows = new HashMap<>();
//shortcuts for batch windows configuration available to be configured from XML
protected String mondayHistoryCleanupBatchWindowStartTime;
protected String mondayHistoryCleanupBatchWindowEndTime;
protected String tuesdayHistoryCleanupBatchWindowStartTime;
protected String tuesdayHistoryCleanupBatchWindowEndTime;
protected String wednesdayHistoryCleanupBatchWindowStartTime;
protected String wednesdayHistoryCleanupBatchWindowEndTime;
protected String thursdayHistoryCleanupBatchWindowStartTime;
protected String thursdayHistoryCleanupBatchWindowEndTime;
protected String fridayHistoryCleanupBatchWindowStartTime;
protected String fridayHistoryCleanupBatchWindowEndTime;
protected String saturdayHistoryCleanupBatchWindowStartTime;
protected String saturdayHistoryCleanupBatchWindowEndTime;
protected String sundayHistoryCleanupBatchWindowStartTime;
protected String sundayHistoryCleanupBatchWindowEndTime;
protected int historyCleanupDegreeOfParallelism = 1;
protected String historyTimeToLive;
protected String batchOperationHistoryTimeToLive;
protected Map<String, String> batchOperationsForHistoryCleanup;
protected Map<String, Integer> parsedBatchOperationsForHistoryCleanup;
/**
* Time to live for historic job log entries written by history cleanup jobs.
* Must be an ISO-8601 conform String specifying only a number of days. Only
* works in conjunction with removal-time-based cleanup strategy.
*/
protected String historyCleanupJobLogTimeToLive;
protected String taskMetricsTimeToLive;
protected Integer parsedTaskMetricsTimeToLive;
protected BatchWindowManager batchWindowManager = new DefaultBatchWindowManager();
protected HistoryRemovalTimeProvider historyRemovalTimeProvider;
protected String historyRemovalTimeStrategy;
protected String historyCleanupStrategy;
/**
* Size of batch in which history cleanup data will be deleted. {@link HistoryCleanupBatch#MAX_BATCH_SIZE} must be respected.
*/
private int historyCleanupBatchSize = 500;
/**
* Indicates the minimal amount of data to trigger the history cleanup.
*/
private int historyCleanupBatchThreshold = 10;
private boolean historyCleanupMetricsEnabled = true;
/**
* Controls whether engine participates in history cleanup or not.
*/
protected boolean historyCleanupEnabled = true;
private int failedJobListenerMaxRetries = DEFAULT_FAILED_JOB_LISTENER_MAX_RETRIES;
protected String failedJobRetryTimeCycle;
// login attempts ///////////////////////////////////////////////////////
protected int loginMaxAttempts = 10;
protected int loginDelayFactor = 2;
protected int loginDelayMaxTime = 60;
protected int loginDelayBase = 3;
// max results limit
protected int queryMaxResultsLimit = Integer.MAX_VALUE;
// logging context property names (with default values)
protected String loggingContextActivityId = "activityId";
protected String loggingContextApplicationName = "applicationName";
protected String loggingContextBusinessKey;// default == null => disabled by default
protected String loggingContextProcessDefinitionId = "processDefinitionId";
protected String loggingContextProcessInstanceId = "processInstanceId";
protected String loggingContextTenantId = "tenantId";
// telemetry ///////////////////////////////////////////////////////
/**
* Sets the initial property value of telemetry configuration only once
* when it has never been enabled/disabled before.
* Subsequent changes can be done only via the
* {@link ManagementService#toggleTelemetry(boolean) Telemetry} API in {@link ManagementService}
*/
protected Boolean initializeTelemetry = null;
/** The endpoint which telemetry is sent to */
protected String telemetryEndpoint = "https://api.telemetry.camunda.cloud/pings";
/** The number of times the telemetry request is retried in case it fails **/
protected int telemetryRequestRetries = 2;
protected TelemetryReporter telemetryReporter;
/** Determines if the telemetry reporter thread runs. For telemetry to be sent,
* this flag must be set to <code>true</code> and telemetry must be enabled via API
* (see {@link ManagementService#toggleTelemetry(boolean)}. */
protected boolean isTelemetryReporterActivate = true;
/** http client used for sending telemetry */
protected Connector<? extends ConnectorRequest<?>> telemetryHttpConnector;
/** default: once every 24 hours */
protected long telemetryReportingPeriod = 24 * 60 * 60;
protected Data telemetryData;
/** the connection and socket timeout configuration of the telemetry request
* in milliseconds
* default: 15 seconds */
protected int telemetryRequestTimeout = 15 * 1000;
// buildProcessEngine ///////////////////////////////////////////////////////
@Override
public ProcessEngine buildProcessEngine() {
init();
processEngine = new ProcessEngineImpl(this);
invokePostProcessEngineBuild(processEngine);
return processEngine;
}
// init /////////////////////////////////////////////////////////////////////
protected void init() {
invokePreInit();
initDefaultCharset();
initHistoryLevel();
initHistoryEventProducer();
initCmmnHistoryEventProducer();
initDmnHistoryEventProducer();
initHistoryEventHandler();
initExpressionManager();
initBeans();
initArtifactFactory();
initFormEngines();
initFormTypes();
initFormFieldValidators();
initScripting();
initDmnEngine();
initBusinessCalendarManager();
initCommandContextFactory();
initTransactionContextFactory();
// Database type needs to be detected before CommandExecutors are initialized
initDataSource();
initCommandExecutors();
initServices();
initIdGenerator();
initFailedJobCommandFactory();
initDeployers();
initJobProvider();
initExternalTaskPriorityProvider();
initBatchHandlers();
initJobExecutor();
initTransactionFactory();
initSqlSessionFactory();
initIdentityProviderSessionFactory();
initSessionFactories();
initValueTypeResolver();
initTypeValidator();
initSerialization();
initJpa();
initDelegateInterceptor();
initEventHandlers();
initProcessApplicationManager();
initCorrelationHandler();
initConditionHandler();
initIncidentHandlers();
initPasswordDigest();
initDeploymentRegistration();
initDeploymentHandlerFactory();
initResourceAuthorizationProvider();
initPermissionProvider();
initHostName();
initMetrics();
initTelemetry();
initMigration();
initCommandCheckers();
initDefaultUserPermissionForTask();
initHistoryRemovalTime();
initHistoryCleanup();
initInvocationsPerBatchJobByBatchType();
initAdminUser();
initAdminGroups();
initPasswordPolicy();
invokePostInit();
}
protected void initTypeValidator() {
if (deserializationTypeValidator == null) {
deserializationTypeValidator = new DefaultDeserializationTypeValidator();
}
if (deserializationTypeValidator instanceof WhitelistingDeserializationTypeValidator) {
WhitelistingDeserializationTypeValidator validator = (WhitelistingDeserializationTypeValidator) deserializationTypeValidator;
validator.setAllowedClasses(deserializationAllowedClasses);
validator.setAllowedPackages(deserializationAllowedPackages);
}
}
public void initHistoryRemovalTime() {
initHistoryRemovalTimeProvider();
initHistoryRemovalTimeStrategy();
}
public void initHistoryRemovalTimeStrategy() {
if (historyRemovalTimeStrategy == null) {
historyRemovalTimeStrategy = HISTORY_REMOVAL_TIME_STRATEGY_END;
}
if (!HISTORY_REMOVAL_TIME_STRATEGY_START.equals(historyRemovalTimeStrategy) &&
!HISTORY_REMOVAL_TIME_STRATEGY_END.equals(historyRemovalTimeStrategy) &&
!HISTORY_REMOVAL_TIME_STRATEGY_NONE.equals(historyRemovalTimeStrategy)) {
throw LOG.invalidPropertyValue("historyRemovalTimeStrategy", String.valueOf(historyRemovalTimeStrategy),
String.format("history removal time strategy must be set to '%s', '%s' or '%s'", HISTORY_REMOVAL_TIME_STRATEGY_START, HISTORY_REMOVAL_TIME_STRATEGY_END, HISTORY_REMOVAL_TIME_STRATEGY_NONE));
}
}
public void initHistoryRemovalTimeProvider() {
if (historyRemovalTimeProvider == null) {
historyRemovalTimeProvider = new DefaultHistoryRemovalTimeProvider();
}
}
public void initHistoryCleanup() {
initHistoryCleanupStrategy();
//validate number of threads
if (historyCleanupDegreeOfParallelism < 1 || historyCleanupDegreeOfParallelism > MAX_THREADS_NUMBER) {
throw LOG.invalidPropertyValue("historyCleanupDegreeOfParallelism", String.valueOf(historyCleanupDegreeOfParallelism),
String.format("value for number of threads for history cleanup should be between 1 and %s", HistoryCleanupCmd.MAX_THREADS_NUMBER));
}
if (historyCleanupBatchWindowStartTime != null) {
initHistoryCleanupBatchWindowStartTime();
}
if (historyCleanupBatchWindowEndTime != null) {
initHistoryCleanupBatchWindowEndTime();
}
initHistoryCleanupBatchWindowsMap();
if (historyCleanupBatchSize > HistoryCleanupHandler.MAX_BATCH_SIZE || historyCleanupBatchSize <= 0) {
throw LOG.invalidPropertyValue("historyCleanupBatchSize", String.valueOf(historyCleanupBatchSize),
String.format("value for batch size should be between 1 and %s", HistoryCleanupHandler.MAX_BATCH_SIZE));
}
if (historyCleanupBatchThreshold < 0) {
throw LOG.invalidPropertyValue("historyCleanupBatchThreshold", String.valueOf(historyCleanupBatchThreshold),
"History cleanup batch threshold cannot be negative.");
}
initHistoryTimeToLive();
initBatchOperationsHistoryTimeToLive();
initHistoryCleanupJobLogTimeToLive();
initTaskMetricsTimeToLive();
}
protected void initHistoryCleanupStrategy() {
if (historyCleanupStrategy == null) {
historyCleanupStrategy = HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED;
}
if (!HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED.equals(historyCleanupStrategy) &&
!HISTORY_CLEANUP_STRATEGY_END_TIME_BASED.equals(historyCleanupStrategy)) {
throw LOG.invalidPropertyValue("historyCleanupStrategy", String.valueOf(historyCleanupStrategy),
String.format("history cleanup strategy must be either set to '%s' or '%s'", HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED, HISTORY_CLEANUP_STRATEGY_END_TIME_BASED));
}
if (HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED.equals(historyCleanupStrategy) &&
HISTORY_REMOVAL_TIME_STRATEGY_NONE.equals(historyRemovalTimeStrategy)) {
throw LOG.invalidPropertyValue("historyRemovalTimeStrategy", String.valueOf(historyRemovalTimeStrategy),
String.format("history removal time strategy cannot be set to '%s' in conjunction with '%s' history cleanup strategy", HISTORY_REMOVAL_TIME_STRATEGY_NONE, HISTORY_CLEANUP_STRATEGY_REMOVAL_TIME_BASED));
}
}
private void initHistoryCleanupBatchWindowsMap() {
if (mondayHistoryCleanupBatchWindowStartTime != null || mondayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.MONDAY, new BatchWindowConfiguration(mondayHistoryCleanupBatchWindowStartTime, mondayHistoryCleanupBatchWindowEndTime));
}
if (tuesdayHistoryCleanupBatchWindowStartTime != null || tuesdayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.TUESDAY, new BatchWindowConfiguration(tuesdayHistoryCleanupBatchWindowStartTime, tuesdayHistoryCleanupBatchWindowEndTime));
}
if (wednesdayHistoryCleanupBatchWindowStartTime != null || wednesdayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.WEDNESDAY, new BatchWindowConfiguration(wednesdayHistoryCleanupBatchWindowStartTime, wednesdayHistoryCleanupBatchWindowEndTime));
}
if (thursdayHistoryCleanupBatchWindowStartTime != null || thursdayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.THURSDAY, new BatchWindowConfiguration(thursdayHistoryCleanupBatchWindowStartTime, thursdayHistoryCleanupBatchWindowEndTime));
}
if (fridayHistoryCleanupBatchWindowStartTime != null || fridayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.FRIDAY, new BatchWindowConfiguration(fridayHistoryCleanupBatchWindowStartTime, fridayHistoryCleanupBatchWindowEndTime));
}
if (saturdayHistoryCleanupBatchWindowStartTime != null ||saturdayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.SATURDAY, new BatchWindowConfiguration(saturdayHistoryCleanupBatchWindowStartTime, saturdayHistoryCleanupBatchWindowEndTime));
}
if (sundayHistoryCleanupBatchWindowStartTime != null || sundayHistoryCleanupBatchWindowEndTime != null) {
historyCleanupBatchWindows.put(Calendar.SUNDAY, new BatchWindowConfiguration(sundayHistoryCleanupBatchWindowStartTime, sundayHistoryCleanupBatchWindowEndTime));
}
}
protected void initInvocationsPerBatchJobByBatchType() {
if (invocationsPerBatchJobByBatchType == null) {
invocationsPerBatchJobByBatchType = new HashMap<>();
} else {
Set<String> batchTypes = invocationsPerBatchJobByBatchType.keySet();
batchTypes.stream()
// batchHandlers contains custom & built-in batch handlers
.filter(batchType -> !batchHandlers.containsKey(batchType))
.forEach(LOG::invalidBatchTypeForInvocationsPerBatchJob);
}
}
protected void initHistoryTimeToLive() {
try {
ParseUtil.parseHistoryTimeToLive(historyTimeToLive);
} catch (Exception e) {
throw LOG.invalidPropertyValue("historyTimeToLive", historyTimeToLive, e);
}
}
protected void initBatchOperationsHistoryTimeToLive() {
try {
ParseUtil.parseHistoryTimeToLive(batchOperationHistoryTimeToLive);
} catch (Exception e) {
throw LOG.invalidPropertyValue("batchOperationHistoryTimeToLive", batchOperationHistoryTimeToLive, e);
}
if (batchOperationsForHistoryCleanup == null) {
batchOperationsForHistoryCleanup = new HashMap<>();
} else {
for (String batchOperation : batchOperationsForHistoryCleanup.keySet()) {
String timeToLive = batchOperationsForHistoryCleanup.get(batchOperation);
if (!batchHandlers.keySet().contains(batchOperation)) {
LOG.invalidBatchOperation(batchOperation, timeToLive);
}
try {
ParseUtil.parseHistoryTimeToLive(timeToLive);
} catch (Exception e) {
throw LOG.invalidPropertyValue("history time to live for " + batchOperation + " batch operations", timeToLive, e);
}
}
}
if (batchHandlers != null && batchOperationHistoryTimeToLive != null) {
for (String batchOperation : batchHandlers.keySet()) {
if (!batchOperationsForHistoryCleanup.containsKey(batchOperation)) {
batchOperationsForHistoryCleanup.put(batchOperation, batchOperationHistoryTimeToLive);
}
}
}
parsedBatchOperationsForHistoryCleanup = new HashMap<>();
if (batchOperationsForHistoryCleanup != null) {
for (String operation : batchOperationsForHistoryCleanup.keySet()) {
Integer historyTimeToLive = ParseUtil.parseHistoryTimeToLive(batchOperationsForHistoryCleanup.get(operation));
parsedBatchOperationsForHistoryCleanup.put(operation, historyTimeToLive);
}
}
}
private void initHistoryCleanupBatchWindowEndTime() {
try {
historyCleanupBatchWindowEndTimeAsDate = HistoryCleanupHelper.parseTimeConfiguration(historyCleanupBatchWindowEndTime);
} catch (ParseException e) {
throw LOG.invalidPropertyValue("historyCleanupBatchWindowEndTime", historyCleanupBatchWindowEndTime);
}
}
private void initHistoryCleanupBatchWindowStartTime() {
try {
historyCleanupBatchWindowStartTimeAsDate = HistoryCleanupHelper.parseTimeConfiguration(historyCleanupBatchWindowStartTime);
} catch (ParseException e) {
throw LOG.invalidPropertyValue("historyCleanupBatchWindowStartTime", historyCleanupBatchWindowStartTime);
}
}
protected void initHistoryCleanupJobLogTimeToLive() {
try {
ParseUtil.parseHistoryTimeToLive(historyCleanupJobLogTimeToLive);
} catch (Exception e) {
throw LOG.invalidPropertyValue("historyCleanupJobLogTimeToLive", historyCleanupJobLogTimeToLive, e);
}
}
protected void initTaskMetricsTimeToLive() {
try {
parsedTaskMetricsTimeToLive = ParseUtil.parseHistoryTimeToLive(taskMetricsTimeToLive);
} catch (Exception e) {
throw LOG.invalidPropertyValue("taskMetricsTimeToLive", taskMetricsTimeToLive, e);
}
}
protected void invokePreInit() {
for (ProcessEnginePlugin plugin : processEnginePlugins) {
LOG.pluginActivated(plugin.toString(), getProcessEngineName());
plugin.preInit(this);
}
}
protected void invokePostInit() {
for (ProcessEnginePlugin plugin : processEnginePlugins) {
plugin.postInit(this);
}
}
protected void invokePostProcessEngineBuild(ProcessEngine engine) {
for (ProcessEnginePlugin plugin : processEnginePlugins) {
plugin.postProcessEngineBuild(engine);
}
}
// failedJobCommandFactory ////////////////////////////////////////////////////////
protected void initFailedJobCommandFactory() {
if (failedJobCommandFactory == null) {
failedJobCommandFactory = new DefaultFailedJobCommandFactory();
}
if (postParseListeners == null) {
postParseListeners = new ArrayList<>();
}
postParseListeners.add(new DefaultFailedJobParseListener());
}
// incident handlers /////////////////////////////////////////////////////////////
protected void initIncidentHandlers() {
if (incidentHandlers == null) {
incidentHandlers = new HashMap<>();
DefaultIncidentHandler failedJobIncidentHandler = new DefaultIncidentHandler(Incident.FAILED_JOB_HANDLER_TYPE);
incidentHandlers.put(failedJobIncidentHandler.getIncidentHandlerType(), failedJobIncidentHandler);
DefaultIncidentHandler failedExternalTaskIncidentHandler = new DefaultIncidentHandler(Incident.EXTERNAL_TASK_HANDLER_TYPE);
incidentHandlers.put(failedExternalTaskIncidentHandler.getIncidentHandlerType(), failedExternalTaskIncidentHandler);
}
if (customIncidentHandlers != null) {
for (IncidentHandler incidentHandler : customIncidentHandlers) {
incidentHandlers.put(incidentHandler.getIncidentHandlerType(), incidentHandler);
}
}
}
// batch ///////////////////////////////////////////////////////////////////////
protected void initBatchHandlers() {
if (batchHandlers == null) {
batchHandlers = new HashMap<>();
MigrationBatchJobHandler migrationHandler = new MigrationBatchJobHandler();
batchHandlers.put(migrationHandler.getType(), migrationHandler);
ModificationBatchJobHandler modificationHandler = new ModificationBatchJobHandler();
batchHandlers.put(modificationHandler.getType(), modificationHandler);
DeleteProcessInstancesJobHandler deleteProcessJobHandler = new DeleteProcessInstancesJobHandler();
batchHandlers.put(deleteProcessJobHandler.getType(), deleteProcessJobHandler);
DeleteHistoricProcessInstancesJobHandler deleteHistoricProcessInstancesJobHandler = new DeleteHistoricProcessInstancesJobHandler();
batchHandlers.put(deleteHistoricProcessInstancesJobHandler.getType(), deleteHistoricProcessInstancesJobHandler);
SetJobRetriesJobHandler setJobRetriesJobHandler = new SetJobRetriesJobHandler();
batchHandlers.put(setJobRetriesJobHandler.getType(), setJobRetriesJobHandler);
SetExternalTaskRetriesJobHandler setExternalTaskRetriesJobHandler = new SetExternalTaskRetriesJobHandler();
batchHandlers.put(setExternalTaskRetriesJobHandler.getType(), setExternalTaskRetriesJobHandler);
RestartProcessInstancesJobHandler restartProcessInstancesJobHandler = new RestartProcessInstancesJobHandler();
batchHandlers.put(restartProcessInstancesJobHandler.getType(), restartProcessInstancesJobHandler);
UpdateProcessInstancesSuspendStateJobHandler suspendProcessInstancesJobHandler = new UpdateProcessInstancesSuspendStateJobHandler();
batchHandlers.put(suspendProcessInstancesJobHandler.getType(), suspendProcessInstancesJobHandler);
DeleteHistoricDecisionInstancesJobHandler deleteHistoricDecisionInstancesJobHandler = new DeleteHistoricDecisionInstancesJobHandler();
batchHandlers.put(deleteHistoricDecisionInstancesJobHandler.getType(), deleteHistoricDecisionInstancesJobHandler);
ProcessSetRemovalTimeJobHandler processSetRemovalTimeJobHandler = new ProcessSetRemovalTimeJobHandler();
batchHandlers.put(processSetRemovalTimeJobHandler.getType(), processSetRemovalTimeJobHandler);
DecisionSetRemovalTimeJobHandler decisionSetRemovalTimeJobHandler = new DecisionSetRemovalTimeJobHandler();
batchHandlers.put(decisionSetRemovalTimeJobHandler.getType(), decisionSetRemovalTimeJobHandler);
BatchSetRemovalTimeJobHandler batchSetRemovalTimeJobHandler = new BatchSetRemovalTimeJobHandler();
batchHandlers.put(batchSetRemovalTimeJobHandler.getType(), batchSetRemovalTimeJobHandler);
BatchSetVariablesHandler batchSetVariablesHandler = new BatchSetVariablesHandler();
batchHandlers.put(batchSetVariablesHandler.getType(), batchSetVariablesHandler);
}
if (customBatchJobHandlers != null) {
for (BatchJobHandler<?> customBatchJobHandler : customBatchJobHandlers) {
batchHandlers.put(customBatchJobHandler.getType(), customBatchJobHandler);
}
}
}
// command executors ////////////////////////////////////////////////////////
protected abstract Collection<? extends CommandInterceptor> getDefaultCommandInterceptorsTxRequired();
protected abstract Collection<? extends CommandInterceptor> getDefaultCommandInterceptorsTxRequiresNew();
protected void initCommandExecutors() {
initActualCommandExecutor();
initCommandInterceptorsTxRequired();
initCommandExecutorTxRequired();
initCommandInterceptorsTxRequiresNew();
initCommandExecutorTxRequiresNew();
initCommandExecutorDbSchemaOperations();
}
protected void initActualCommandExecutor() {
actualCommandExecutor = new CommandExecutorImpl();
}
protected void initCommandInterceptorsTxRequired() {
if (commandInterceptorsTxRequired == null) {
if (customPreCommandInterceptorsTxRequired != null) {
commandInterceptorsTxRequired = new ArrayList<>(customPreCommandInterceptorsTxRequired);
} else {
commandInterceptorsTxRequired = new ArrayList<>();
}
commandInterceptorsTxRequired.addAll(getDefaultCommandInterceptorsTxRequired());
if (customPostCommandInterceptorsTxRequired != null) {
commandInterceptorsTxRequired.addAll(customPostCommandInterceptorsTxRequired);
}
commandInterceptorsTxRequired.add(actualCommandExecutor);
}
}
protected void initCommandInterceptorsTxRequiresNew() {
if (commandInterceptorsTxRequiresNew == null) {
if (customPreCommandInterceptorsTxRequiresNew != null) {
commandInterceptorsTxRequiresNew = new ArrayList<>(customPreCommandInterceptorsTxRequiresNew);
} else {
commandInterceptorsTxRequiresNew = new ArrayList<>();
}
commandInterceptorsTxRequiresNew.addAll(getDefaultCommandInterceptorsTxRequiresNew());
if (customPostCommandInterceptorsTxRequiresNew != null) {
commandInterceptorsTxRequiresNew.addAll(customPostCommandInterceptorsTxRequiresNew);
}
commandInterceptorsTxRequiresNew.add(actualCommandExecutor);
}
}
protected void initCommandExecutorTxRequired() {
if (commandExecutorTxRequired == null) {
commandExecutorTxRequired = initInterceptorChain(commandInterceptorsTxRequired);
}
}
protected void initCommandExecutorTxRequiresNew() {
if (commandExecutorTxRequiresNew == null) {
commandExecutorTxRequiresNew = initInterceptorChain(commandInterceptorsTxRequiresNew);
}
}
protected void initCommandExecutorDbSchemaOperations() {
if (commandExecutorSchemaOperations == null) {
// in default case, we use the same command executor for DB Schema Operations as for runtime operations.
// configurations that Use JTA Transactions should override this method and provide a custom command executor
// that uses NON-JTA Transactions.
commandExecutorSchemaOperations = commandExecutorTxRequired;
}
}
protected CommandInterceptor initInterceptorChain(List<CommandInterceptor> chain) {
if (chain == null || chain.isEmpty()) {
throw new ProcessEngineException("invalid command interceptor chain configuration: " + chain);
}
for (int i = 0; i < chain.size() - 1; i++) {
chain.get(i).setNext(chain.get(i + 1));
}
return chain.get(0);
}
// services /////////////////////////////////////////////////////////////////
protected void initServices() {
initService(repositoryService);
initService(runtimeService);
initService(historyService);
initService(identityService);
initService(taskService);
initService(formService);
initService(managementService);
initService(authorizationService);
initService(caseService);
initService(filterService);
initService(externalTaskService);
initService(decisionService);
initService(optimizeService);
}
protected void initService(Object service) {
if (service instanceof ServiceImpl) {
((ServiceImpl) service).setCommandExecutor(commandExecutorTxRequired);
}
if (service instanceof RepositoryServiceImpl) {
((RepositoryServiceImpl) service).setDeploymentCharset(getDefaultCharset());
}
}
// DataSource ///////////////////////////////////////////////////////////////
protected void initDataSource() {
if (dataSource == null) {
if (dataSourceJndiName != null) {
try {
dataSource = (DataSource) new InitialContext().lookup(dataSourceJndiName);
} catch (Exception e) {
throw new ProcessEngineException("couldn't lookup datasource from " + dataSourceJndiName + ": " + e.getMessage(), e);
}
} else if (jdbcUrl != null) {
if ((jdbcDriver == null) || (jdbcUrl == null) || (jdbcUsername == null)) {
throw new ProcessEngineException("DataSource or JDBC properties have to be specified in a process engine configuration");
}
PooledDataSource pooledDataSource =
new PooledDataSource(ReflectUtil.getClassLoader(), jdbcDriver, jdbcUrl, jdbcUsername, jdbcPassword);
if (jdbcMaxActiveConnections > 0) {
pooledDataSource.setPoolMaximumActiveConnections(jdbcMaxActiveConnections);
}
if (jdbcMaxIdleConnections > 0) {
pooledDataSource.setPoolMaximumIdleConnections(jdbcMaxIdleConnections);
}
if (jdbcMaxCheckoutTime > 0) {
pooledDataSource.setPoolMaximumCheckoutTime(jdbcMaxCheckoutTime);
}
if (jdbcMaxWaitTime > 0) {
pooledDataSource.setPoolTimeToWait(jdbcMaxWaitTime);
}
if (jdbcPingEnabled == true) {
pooledDataSource.setPoolPingEnabled(true);
if (jdbcPingQuery != null) {
pooledDataSource.setPoolPingQuery(jdbcPingQuery);
}
pooledDataSource.setPoolPingConnectionsNotUsedFor(jdbcPingConnectionNotUsedFor);
}
dataSource = pooledDataSource;
}
if (dataSource instanceof PooledDataSource) {
// ACT-233: connection pool of Ibatis is not properely initialized if this is not called!
((PooledDataSource) dataSource).forceCloseAll();
}
}
if (databaseType == null) {
initDatabaseType();
}
}
protected static Properties databaseTypeMappings = getDefaultDatabaseTypeMappings();
protected static final String MY_SQL_PRODUCT_NAME = "MySQL";
protected static final String MARIA_DB_PRODUCT_NAME = "MariaDB";
protected static final String POSTGRES_DB_PRODUCT_NAME = "PostgreSQL";
protected static final String CRDB_DB_PRODUCT_NAME = "CockroachDB";
protected static Properties getDefaultDatabaseTypeMappings() {
Properties databaseTypeMappings = new Properties();
databaseTypeMappings.setProperty("H2", "h2");
databaseTypeMappings.setProperty(MY_SQL_PRODUCT_NAME, "mysql");
databaseTypeMappings.setProperty(MARIA_DB_PRODUCT_NAME, "mariadb");
databaseTypeMappings.setProperty("Oracle", "oracle");
databaseTypeMappings.setProperty(POSTGRES_DB_PRODUCT_NAME, "postgres");
databaseTypeMappings.setProperty(CRDB_DB_PRODUCT_NAME, "cockroachdb");
databaseTypeMappings.setProperty("Microsoft SQL Server", "mssql");
databaseTypeMappings.setProperty("DB2", "db2");
databaseTypeMappings.setProperty("DB2", "db2");
databaseTypeMappings.setProperty("DB2/NT", "db2");
databaseTypeMappings.setProperty("DB2/NT64", "db2");
databaseTypeMappings.setProperty("DB2 UDP", "db2");
databaseTypeMappings.setProperty("DB2/LINUX", "db2");
databaseTypeMappings.setProperty("DB2/LINUX390", "db2");
databaseTypeMappings.setProperty("DB2/LINUXX8664", "db2");
databaseTypeMappings.setProperty("DB2/LINUXZ64", "db2");
databaseTypeMappings.setProperty("DB2/400 SQL", "db2");
databaseTypeMappings.setProperty("DB2/6000", "db2");
databaseTypeMappings.setProperty("DB2 UDB iSeries", "db2");
databaseTypeMappings.setProperty("DB2/AIX64", "db2");
databaseTypeMappings.setProperty("DB2/HPUX", "db2");
databaseTypeMappings.setProperty("DB2/HP64", "db2");
databaseTypeMappings.setProperty("DB2/SUN", "db2");
databaseTypeMappings.setProperty("DB2/SUN64", "db2");
databaseTypeMappings.setProperty("DB2/PTX", "db2");
databaseTypeMappings.setProperty("DB2/2", "db2");
return databaseTypeMappings;
}
public void initDatabaseType() {
Connection connection = null;
try {
connection = dataSource.getConnection();
DatabaseMetaData databaseMetaData = connection.getMetaData();
String databaseProductName = databaseMetaData.getDatabaseProductName();
if (MY_SQL_PRODUCT_NAME.equals(databaseProductName)) {
databaseProductName = checkForMariaDb(databaseMetaData, databaseProductName);
}
if (POSTGRES_DB_PRODUCT_NAME.equals(databaseProductName)) {
databaseProductName = checkForCrdb(connection);
}
LOG.debugDatabaseproductName(databaseProductName);
databaseType = databaseTypeMappings.getProperty(databaseProductName);
ensureNotNull("couldn't deduct database type from database product name '" + databaseProductName + "'", "databaseType", databaseType);
LOG.debugDatabaseType(databaseType);
initDatabaseVendorAndVersion(databaseMetaData);
} catch (SQLException e) {
LOG.databaseConnectionAccessException(e);
} finally {
try {
if (connection != null) {
connection.close();
}
} catch (SQLException e) {
LOG.databaseConnectionCloseException(e);
}
}
}
/**
* The product name of mariadb is still 'MySQL'. This method
* tries if it can find some evidence for mariadb. If it is successful
* it will return "MariaDB", otherwise the provided database name.
*/
protected String checkForMariaDb(DatabaseMetaData databaseMetaData, String databaseName) {
try {
String databaseProductVersion = databaseMetaData.getDatabaseProductVersion();
if (databaseProductVersion != null && databaseProductVersion.toLowerCase().contains("mariadb")) {
return MARIA_DB_PRODUCT_NAME;
}
} catch (SQLException ignore) {
}
try {
String driverName = databaseMetaData.getDriverName();
if (driverName != null && driverName.toLowerCase().contains("mariadb")) {
return MARIA_DB_PRODUCT_NAME;
}
} catch (SQLException ignore) {
}
String metaDataClassName = databaseMetaData.getClass().getName();
if (metaDataClassName != null && metaDataClassName.toLowerCase().contains("mariadb")) {
return MARIA_DB_PRODUCT_NAME;
}
return databaseName;
}
protected String checkForCrdb(Connection connection) {
try {
ResultSet result = connection.prepareStatement("select version() as version;").executeQuery();
if (result.next()) {
String versionData = result.getString(1);
if (versionData != null && versionData.toLowerCase().contains("cockroachdb")) {
return CRDB_DB_PRODUCT_NAME;
}
}
} catch (SQLException ignore) {
}
return POSTGRES_DB_PRODUCT_NAME;
}
protected void initDatabaseVendorAndVersion(DatabaseMetaData databaseMetaData) throws SQLException {
databaseVendor = databaseMetaData.getDatabaseProductName();
databaseVersion = databaseMetaData.getDatabaseProductVersion();
}
// myBatis SqlSessionFactory ////////////////////////////////////////////////
protected void initTransactionFactory() {
if (transactionFactory == null) {
if (transactionsExternallyManaged) {
transactionFactory = new ManagedTransactionFactory();
} else {
transactionFactory = new JdbcTransactionFactory();
}
}
}
protected void initSqlSessionFactory() {
// to protect access to cachedSqlSessionFactory see CAM-6682
synchronized (ProcessEngineConfigurationImpl.class) {
if (isUseSharedSqlSessionFactory) {
sqlSessionFactory = cachedSqlSessionFactory;
}
if (sqlSessionFactory == null) {
InputStream inputStream = null;
try {
inputStream = getMyBatisXmlConfigurationSteam();
// update the jdbc parameters to the configured ones...
Environment environment = new Environment("default", transactionFactory, dataSource);
Reader reader = new InputStreamReader(inputStream);
Properties properties = new Properties();
if (isUseSharedSqlSessionFactory) {
properties.put("prefix", "${@org.camunda.bpm.engine.impl.context.Context@getProcessEngineConfiguration().databaseTablePrefix}");
} else {
properties.put("prefix", databaseTablePrefix);
}
initSqlSessionFactoryProperties(properties, databaseTablePrefix, databaseType);
XMLConfigBuilder parser = new XMLConfigBuilder(reader, "", properties);
Configuration configuration = parser.getConfiguration();
configuration.setEnvironment(environment);
configuration = parser.parse();
configuration.setDefaultStatementTimeout(jdbcStatementTimeout);
if (isJdbcBatchProcessing()) {
configuration.setDefaultExecutorType(ExecutorType.BATCH);
}
sqlSessionFactory = new DefaultSqlSessionFactory(configuration);
if (isUseSharedSqlSessionFactory) {
cachedSqlSessionFactory = sqlSessionFactory;
}
} catch (Exception e) {
throw new ProcessEngineException("Error while building ibatis SqlSessionFactory: " + e.getMessage(), e);
} finally {
IoUtil.closeSilently(inputStream);
}
}
}
}
public static void initSqlSessionFactoryProperties(Properties properties, String databaseTablePrefix, String databaseType) {
if (databaseType != null) {
properties.put("limitBefore", DbSqlSessionFactory.databaseSpecificLimitBeforeStatements.get(databaseType));
properties.put("limitAfter", DbSqlSessionFactory.databaseSpecificLimitAfterStatements.get(databaseType));
properties.put("limitBeforeWithoutOffset", DbSqlSessionFactory.databaseSpecificLimitBeforeWithoutOffsetStatements.get(databaseType));
properties.put("limitAfterWithoutOffset", DbSqlSessionFactory.databaseSpecificLimitAfterWithoutOffsetStatements.get(databaseType));
properties.put("optimizeLimitBeforeWithoutOffset", DbSqlSessionFactory.optimizeDatabaseSpecificLimitBeforeWithoutOffsetStatements.get(databaseType));
properties.put("optimizeLimitAfterWithoutOffset", DbSqlSessionFactory.optimizeDatabaseSpecificLimitAfterWithoutOffsetStatements.get(databaseType));
properties.put("innerLimitAfter", DbSqlSessionFactory.databaseSpecificInnerLimitAfterStatements.get(databaseType));
properties.put("limitBetween", DbSqlSessionFactory.databaseSpecificLimitBetweenStatements.get(databaseType));
properties.put("limitBetweenFilter", DbSqlSessionFactory.databaseSpecificLimitBetweenFilterStatements.get(databaseType));
properties.put("limitBetweenAcquisition", DbSqlSessionFactory.databaseSpecificLimitBetweenAcquisitionStatements.get(databaseType));
properties.put("orderBy", DbSqlSessionFactory.databaseSpecificOrderByStatements.get(databaseType));
properties.put("limitBeforeNativeQuery", DbSqlSessionFactory.databaseSpecificLimitBeforeNativeQueryStatements.get(databaseType));
properties.put("distinct", DbSqlSessionFactory.databaseSpecificDistinct.get(databaseType));
properties.put("numericCast", DbSqlSessionFactory.databaseSpecificNumericCast.get(databaseType));
properties.put("countDistinctBeforeStart", DbSqlSessionFactory.databaseSpecificCountDistinctBeforeStart.get(databaseType));
properties.put("countDistinctBeforeEnd", DbSqlSessionFactory.databaseSpecificCountDistinctBeforeEnd.get(databaseType));
properties.put("countDistinctAfterEnd", DbSqlSessionFactory.databaseSpecificCountDistinctAfterEnd.get(databaseType));
properties.put("escapeChar", DbSqlSessionFactory.databaseSpecificEscapeChar.get(databaseType));
properties.put("bitand1", DbSqlSessionFactory.databaseSpecificBitAnd1.get(databaseType));
properties.put("bitand2", DbSqlSessionFactory.databaseSpecificBitAnd2.get(databaseType));
properties.put("bitand3", DbSqlSessionFactory.databaseSpecificBitAnd3.get(databaseType));
properties.put("datepart1", DbSqlSessionFactory.databaseSpecificDatepart1.get(databaseType));
properties.put("datepart2", DbSqlSessionFactory.databaseSpecificDatepart2.get(databaseType));
properties.put("datepart3", DbSqlSessionFactory.databaseSpecificDatepart3.get(databaseType));
properties.put("trueConstant", DbSqlSessionFactory.databaseSpecificTrueConstant.get(databaseType));
properties.put("falseConstant", DbSqlSessionFactory.databaseSpecificFalseConstant.get(databaseType));
properties.put("dbSpecificDummyTable", DbSqlSessionFactory.databaseSpecificDummyTable.get(databaseType));
properties.put("dbSpecificIfNullFunction", DbSqlSessionFactory.databaseSpecificIfNull.get(databaseType));
properties.put("dayComparator", DbSqlSessionFactory.databaseSpecificDaysComparator.get(databaseType));
properties.put("collationForCaseSensitivity", DbSqlSessionFactory.databaseSpecificCollationForCaseSensitivity.get(databaseType));
Map<String, String> constants = DbSqlSessionFactory.dbSpecificConstants.get(databaseType);
for (Entry<String, String> entry : constants.entrySet()) {
properties.put(entry.getKey(), entry.getValue());
}
}
}
protected InputStream getMyBatisXmlConfigurationSteam() {
return ReflectUtil.getResourceAsStream(DEFAULT_MYBATIS_MAPPING_FILE);
}
// session factories ////////////////////////////////////////////////////////
protected void initIdentityProviderSessionFactory() {
if (identityProviderSessionFactory == null) {
identityProviderSessionFactory = new GenericManagerFactory(DbIdentityServiceProvider.class);
}
}
protected void initSessionFactories() {
if (sessionFactories == null) {
sessionFactories = new HashMap<>();
initPersistenceProviders();
addSessionFactory(new DbEntityManagerFactory(idGenerator));
addSessionFactory(new GenericManagerFactory(AttachmentManager.class));
addSessionFactory(new GenericManagerFactory(CommentManager.class));
addSessionFactory(new GenericManagerFactory(DeploymentManager.class));
addSessionFactory(new GenericManagerFactory(ExecutionManager.class));
addSessionFactory(new GenericManagerFactory(HistoricActivityInstanceManager.class));
addSessionFactory(new GenericManagerFactory(HistoricCaseActivityInstanceManager.class));
addSessionFactory(new GenericManagerFactory(HistoricStatisticsManager.class));
addSessionFactory(new GenericManagerFactory(HistoricDetailManager.class));
addSessionFactory(new GenericManagerFactory(HistoricProcessInstanceManager.class));
addSessionFactory(new GenericManagerFactory(HistoricCaseInstanceManager.class));
addSessionFactory(new GenericManagerFactory(UserOperationLogManager.class));
addSessionFactory(new GenericManagerFactory(HistoricTaskInstanceManager.class));
addSessionFactory(new GenericManagerFactory(HistoricVariableInstanceManager.class));
addSessionFactory(new GenericManagerFactory(HistoricIncidentManager.class));
addSessionFactory(new GenericManagerFactory(HistoricIdentityLinkLogManager.class));
addSessionFactory(new GenericManagerFactory(HistoricJobLogManager.class));
addSessionFactory(new GenericManagerFactory(HistoricExternalTaskLogManager.class));
addSessionFactory(new GenericManagerFactory(IdentityInfoManager.class));
addSessionFactory(new GenericManagerFactory(IdentityLinkManager.class));
addSessionFactory(new GenericManagerFactory(JobManager.class));
addSessionFactory(new GenericManagerFactory(JobDefinitionManager.class));
addSessionFactory(new GenericManagerFactory(ProcessDefinitionManager.class));
addSessionFactory(new GenericManagerFactory(PropertyManager.class));
addSessionFactory(new GenericManagerFactory(ResourceManager.class));
addSessionFactory(new GenericManagerFactory(ByteArrayManager.class));
addSessionFactory(new GenericManagerFactory(TableDataManager.class));
addSessionFactory(new GenericManagerFactory(TaskManager.class));
addSessionFactory(new GenericManagerFactory(TaskReportManager.class));
addSessionFactory(new GenericManagerFactory(VariableInstanceManager.class));
addSessionFactory(new GenericManagerFactory(EventSubscriptionManager.class));
addSessionFactory(new GenericManagerFactory(StatisticsManager.class));
addSessionFactory(new GenericManagerFactory(IncidentManager.class));
addSessionFactory(new GenericManagerFactory(AuthorizationManager.class));
addSessionFactory(new GenericManagerFactory(FilterManager.class));
addSessionFactory(new GenericManagerFactory(MeterLogManager.class));
addSessionFactory(new GenericManagerFactory(ExternalTaskManager.class));
addSessionFactory(new GenericManagerFactory(ReportManager.class));
addSessionFactory(new GenericManagerFactory(BatchManager.class));
addSessionFactory(new GenericManagerFactory(HistoricBatchManager.class));
addSessionFactory(new GenericManagerFactory(TenantManager.class));
addSessionFactory(new GenericManagerFactory(SchemaLogManager.class));
addSessionFactory(new GenericManagerFactory(CaseDefinitionManager.class));
addSessionFactory(new GenericManagerFactory(CaseExecutionManager.class));
addSessionFactory(new GenericManagerFactory(CaseSentryPartManager.class));
addSessionFactory(new GenericManagerFactory(DecisionDefinitionManager.class));
addSessionFactory(new GenericManagerFactory(DecisionRequirementsDefinitionManager.class));
addSessionFactory(new GenericManagerFactory(HistoricDecisionInstanceManager.class));
addSessionFactory(new GenericManagerFactory(OptimizeManager.class));
sessionFactories.put(ReadOnlyIdentityProvider.class, identityProviderSessionFactory);
// check whether identityProviderSessionFactory implements WritableIdentityProvider
Class<?> identityProviderType = identityProviderSessionFactory.getSessionType();
if (WritableIdentityProvider.class.isAssignableFrom(identityProviderType)) {
sessionFactories.put(WritableIdentityProvider.class, identityProviderSessionFactory);
}
}
if (customSessionFactories != null) {
for (SessionFactory sessionFactory : customSessionFactories) {
addSessionFactory(sessionFactory);
}
}
}
protected void initPersistenceProviders() {
ensurePrefixAndSchemaFitToegether(databaseTablePrefix, databaseSchema);
dbSqlSessionFactory = new DbSqlSessionFactory(jdbcBatchProcessing);
dbSqlSessionFactory.setDatabaseType(databaseType);
dbSqlSessionFactory.setIdGenerator(idGenerator);
dbSqlSessionFactory.setSqlSessionFactory(sqlSessionFactory);
dbSqlSessionFactory.setDbIdentityUsed(isDbIdentityUsed);
dbSqlSessionFactory.setDbHistoryUsed(isDbHistoryUsed);
dbSqlSessionFactory.setCmmnEnabled(cmmnEnabled);
dbSqlSessionFactory.setDmnEnabled(dmnEnabled);
dbSqlSessionFactory.setDatabaseTablePrefix(databaseTablePrefix);
//hack for the case when schema is defined via databaseTablePrefix parameter and not via databaseSchema parameter
if (databaseTablePrefix != null && databaseSchema == null && databaseTablePrefix.contains(".")) {
databaseSchema = databaseTablePrefix.split("\\.")[0];
}
dbSqlSessionFactory.setDatabaseSchema(databaseSchema);
addSessionFactory(dbSqlSessionFactory);
addSessionFactory(new DbSqlPersistenceProviderFactory());
}
protected void initMigration() {
initMigrationInstructionValidators();
initMigrationActivityMatcher();
initMigrationInstructionGenerator();
initMigratingActivityInstanceValidators();
initMigratingTransitionInstanceValidators();
initMigratingCompensationInstanceValidators();
}
protected void initMigrationActivityMatcher() {
if (migrationActivityMatcher == null) {
migrationActivityMatcher = new DefaultMigrationActivityMatcher();
}
}
protected void initMigrationInstructionGenerator() {
if (migrationInstructionGenerator == null) {
migrationInstructionGenerator = new DefaultMigrationInstructionGenerator(migrationActivityMatcher);
}
List<MigrationActivityValidator> migrationActivityValidators = new ArrayList<>();
if (customPreMigrationActivityValidators != null) {
migrationActivityValidators.addAll(customPreMigrationActivityValidators);
}
migrationActivityValidators.addAll(getDefaultMigrationActivityValidators());
if (customPostMigrationActivityValidators != null) {
migrationActivityValidators.addAll(customPostMigrationActivityValidators);
}
migrationInstructionGenerator = migrationInstructionGenerator
.migrationActivityValidators(migrationActivityValidators)
.migrationInstructionValidators(migrationInstructionValidators);
}
protected void initMigrationInstructionValidators() {
if (migrationInstructionValidators == null) {
migrationInstructionValidators = new ArrayList<>();
if (customPreMigrationInstructionValidators != null) {
migrationInstructionValidators.addAll(customPreMigrationInstructionValidators);
}
migrationInstructionValidators.addAll(getDefaultMigrationInstructionValidators());
if (customPostMigrationInstructionValidators != null) {
migrationInstructionValidators.addAll(customPostMigrationInstructionValidators);
}
}
}
protected void initMigratingActivityInstanceValidators() {
if (migratingActivityInstanceValidators == null) {
migratingActivityInstanceValidators = new ArrayList<>();
if (customPreMigratingActivityInstanceValidators != null) {
migratingActivityInstanceValidators.addAll(customPreMigratingActivityInstanceValidators);
}
migratingActivityInstanceValidators.addAll(getDefaultMigratingActivityInstanceValidators());
if (customPostMigratingActivityInstanceValidators != null) {
migratingActivityInstanceValidators.addAll(customPostMigratingActivityInstanceValidators);
}
}
}
protected void initMigratingTransitionInstanceValidators() {
if (migratingTransitionInstanceValidators == null) {
migratingTransitionInstanceValidators = new ArrayList<>();
migratingTransitionInstanceValidators.addAll(getDefaultMigratingTransitionInstanceValidators());
}
}
protected void initMigratingCompensationInstanceValidators() {
if (migratingCompensationInstanceValidators == null) {
migratingCompensationInstanceValidators = new ArrayList<>();
migratingCompensationInstanceValidators.add(new NoUnmappedLeafInstanceValidator());
migratingCompensationInstanceValidators.add(new NoUnmappedCompensationStartEventValidator());
}
}
/**
* When providing a schema and a prefix the prefix has to be the schema ending with a dot.
*/
protected void ensurePrefixAndSchemaFitToegether(String prefix, String schema) {
if (schema == null) {
return;
} else if (prefix == null || (prefix != null && !prefix.startsWith(schema + "."))) {
throw new ProcessEngineException("When setting a schema the prefix has to be schema + '.'. Received schema: " + schema + " prefix: " + prefix);
}
}
protected void addSessionFactory(SessionFactory sessionFactory) {
sessionFactories.put(sessionFactory.getSessionType(), sessionFactory);
}
// deployers ////////////////////////////////////////////////////////////////
protected void initDeployers() {
if (this.deployers == null) {
this.deployers = new ArrayList<>();
if (customPreDeployers != null) {
this.deployers.addAll(customPreDeployers);
}
this.deployers.addAll(getDefaultDeployers());
if (customPostDeployers != null) {
this.deployers.addAll(customPostDeployers);
}
}
if (deploymentCache == null) {
List<Deployer> deployers = new ArrayList<>();
if (customPreDeployers != null) {
deployers.addAll(customPreDeployers);
}
deployers.addAll(getDefaultDeployers());
if (customPostDeployers != null) {
deployers.addAll(customPostDeployers);
}
initCacheFactory();
deploymentCache = new DeploymentCache(cacheFactory, cacheCapacity);
deploymentCache.setDeployers(deployers);
}
}
protected Collection<? extends Deployer> getDefaultDeployers() {
List<Deployer> defaultDeployers = new ArrayList<>();
BpmnDeployer bpmnDeployer = getBpmnDeployer();
defaultDeployers.add(bpmnDeployer);
if (isCmmnEnabled()) {
CmmnDeployer cmmnDeployer = getCmmnDeployer();
defaultDeployers.add(cmmnDeployer);
}
if (isDmnEnabled()) {
DecisionRequirementsDefinitionDeployer decisionRequirementsDefinitionDeployer = getDecisionRequirementsDefinitionDeployer();
DecisionDefinitionDeployer decisionDefinitionDeployer = getDecisionDefinitionDeployer();
// the DecisionRequirementsDefinition cacheDeployer must be before the DecisionDefinitionDeployer
defaultDeployers.add(decisionRequirementsDefinitionDeployer);
defaultDeployers.add(decisionDefinitionDeployer);
}
return defaultDeployers;
}
protected BpmnDeployer getBpmnDeployer() {
BpmnDeployer bpmnDeployer = new BpmnDeployer();
bpmnDeployer.setExpressionManager(expressionManager);
bpmnDeployer.setIdGenerator(idGenerator);
if (bpmnParseFactory == null) {
bpmnParseFactory = new DefaultBpmnParseFactory();
}
BpmnParser bpmnParser = new BpmnParser(expressionManager, bpmnParseFactory);
if (preParseListeners != null) {
bpmnParser.getParseListeners().addAll(preParseListeners);
}
bpmnParser.getParseListeners().addAll(getDefaultBPMNParseListeners());
if (postParseListeners != null) {
bpmnParser.getParseListeners().addAll(postParseListeners);
}
bpmnDeployer.setBpmnParser(bpmnParser);
return bpmnDeployer;
}
protected List<BpmnParseListener> getDefaultBPMNParseListeners() {
List<BpmnParseListener> defaultListeners = new ArrayList<>();
if (!HistoryLevel.HISTORY_LEVEL_NONE.equals(historyLevel)) {
defaultListeners.add(new HistoryParseListener(historyEventProducer));
}
if (isMetricsEnabled) {
defaultListeners.add(new MetricsBpmnParseListener());
}
return defaultListeners;
}
protected CmmnDeployer getCmmnDeployer() {
CmmnDeployer cmmnDeployer = new CmmnDeployer();
cmmnDeployer.setIdGenerator(idGenerator);
if (cmmnTransformFactory == null) {
cmmnTransformFactory = new DefaultCmmnTransformFactory();
}
if (cmmnElementHandlerRegistry == null) {
cmmnElementHandlerRegistry = new DefaultCmmnElementHandlerRegistry();
}
CmmnTransformer cmmnTransformer = new CmmnTransformer(expressionManager, cmmnElementHandlerRegistry, cmmnTransformFactory);
if (customPreCmmnTransformListeners != null) {
cmmnTransformer.getTransformListeners().addAll(customPreCmmnTransformListeners);
}
cmmnTransformer.getTransformListeners().addAll(getDefaultCmmnTransformListeners());
if (customPostCmmnTransformListeners != null) {
cmmnTransformer.getTransformListeners().addAll(customPostCmmnTransformListeners);
}
cmmnDeployer.setTransformer(cmmnTransformer);
return cmmnDeployer;
}
protected List<CmmnTransformListener> getDefaultCmmnTransformListeners() {
List<CmmnTransformListener> defaultListener = new ArrayList<>();
if (!HistoryLevel.HISTORY_LEVEL_NONE.equals(historyLevel)) {
defaultListener.add(new CmmnHistoryTransformListener(cmmnHistoryEventProducer));
}
if (isMetricsEnabled) {
defaultListener.add(new MetricsCmmnTransformListener());
}
return defaultListener;
}
protected DecisionDefinitionDeployer getDecisionDefinitionDeployer() {
DecisionDefinitionDeployer decisionDefinitionDeployer = new DecisionDefinitionDeployer();
decisionDefinitionDeployer.setIdGenerator(idGenerator);
decisionDefinitionDeployer.setTransformer(dmnEngineConfiguration.getTransformer());
return decisionDefinitionDeployer;
}
protected DecisionRequirementsDefinitionDeployer getDecisionRequirementsDefinitionDeployer() {
DecisionRequirementsDefinitionDeployer drdDeployer = new DecisionRequirementsDefinitionDeployer();
drdDeployer.setIdGenerator(idGenerator);
drdDeployer.setTransformer(dmnEngineConfiguration.getTransformer());
return drdDeployer;
}
public DmnEngine getDmnEngine() {
return dmnEngine;
}
public void setDmnEngine(DmnEngine dmnEngine) {
this.dmnEngine = dmnEngine;
}
public DefaultDmnEngineConfiguration getDmnEngineConfiguration() {
return dmnEngineConfiguration;
}
public void setDmnEngineConfiguration(DefaultDmnEngineConfiguration dmnEngineConfiguration) {
this.dmnEngineConfiguration = dmnEngineConfiguration;
}
// job executor /////////////////////////////////////////////////////////////
protected void initJobExecutor() {
if (jobExecutor == null) {
jobExecutor = new DefaultJobExecutor();
}
jobHandlers = new HashMap<>();
TimerExecuteNestedActivityJobHandler timerExecuteNestedActivityJobHandler = new TimerExecuteNestedActivityJobHandler();
jobHandlers.put(timerExecuteNestedActivityJobHandler.getType(), timerExecuteNestedActivityJobHandler);
TimerCatchIntermediateEventJobHandler timerCatchIntermediateEvent = new TimerCatchIntermediateEventJobHandler();
jobHandlers.put(timerCatchIntermediateEvent.getType(), timerCatchIntermediateEvent);
TimerStartEventJobHandler timerStartEvent = new TimerStartEventJobHandler();
jobHandlers.put(timerStartEvent.getType(), timerStartEvent);
TimerStartEventSubprocessJobHandler timerStartEventSubprocess = new TimerStartEventSubprocessJobHandler();
jobHandlers.put(timerStartEventSubprocess.getType(), timerStartEventSubprocess);
AsyncContinuationJobHandler asyncContinuationJobHandler = new AsyncContinuationJobHandler();
jobHandlers.put(asyncContinuationJobHandler.getType(), asyncContinuationJobHandler);
ProcessEventJobHandler processEventJobHandler = new ProcessEventJobHandler();
jobHandlers.put(processEventJobHandler.getType(), processEventJobHandler);
TimerSuspendProcessDefinitionHandler suspendProcessDefinitionHandler = new TimerSuspendProcessDefinitionHandler();
jobHandlers.put(suspendProcessDefinitionHandler.getType(), suspendProcessDefinitionHandler);
TimerActivateProcessDefinitionHandler activateProcessDefinitionHandler = new TimerActivateProcessDefinitionHandler();
jobHandlers.put(activateProcessDefinitionHandler.getType(), activateProcessDefinitionHandler);
TimerSuspendJobDefinitionHandler suspendJobDefinitionHandler = new TimerSuspendJobDefinitionHandler();
jobHandlers.put(suspendJobDefinitionHandler.getType(), suspendJobDefinitionHandler);
TimerActivateJobDefinitionHandler activateJobDefinitionHandler = new TimerActivateJobDefinitionHandler();
jobHandlers.put(activateJobDefinitionHandler.getType(), activateJobDefinitionHandler);
TimerTaskListenerJobHandler taskListenerJobHandler = new TimerTaskListenerJobHandler();
jobHandlers.put(taskListenerJobHandler.getType(), taskListenerJobHandler);
BatchSeedJobHandler batchSeedJobHandler = new BatchSeedJobHandler();
jobHandlers.put(batchSeedJobHandler.getType(), batchSeedJobHandler);
BatchMonitorJobHandler batchMonitorJobHandler = new BatchMonitorJobHandler();
jobHandlers.put(batchMonitorJobHandler.getType(), batchMonitorJobHandler);
HistoryCleanupJobHandler historyCleanupJobHandler = new HistoryCleanupJobHandler();
jobHandlers.put(historyCleanupJobHandler.getType(), historyCleanupJobHandler);
for (JobHandler batchHandler : batchHandlers.values()) {
jobHandlers.put(batchHandler.getType(), batchHandler);
}
// if we have custom job handlers, register them
if (getCustomJobHandlers() != null) {
for (JobHandler customJobHandler : getCustomJobHandlers()) {
jobHandlers.put(customJobHandler.getType(), customJobHandler);
}
}
jobExecutor.setAutoActivate(jobExecutorActivate);
if (jobExecutor.getRejectedJobsHandler() == null) {
if (customRejectedJobsHandler != null) {
jobExecutor.setRejectedJobsHandler(customRejectedJobsHandler);
} else {
jobExecutor.setRejectedJobsHandler(new NotifyAcquisitionRejectedJobsHandler());
}
}
}
protected void initJobProvider() {
if (producePrioritizedJobs && jobPriorityProvider == null) {
jobPriorityProvider = new DefaultJobPriorityProvider();
}
}
//external task /////////////////////////////////////////////////////////////
protected void initExternalTaskPriorityProvider() {
if (producePrioritizedExternalTasks && externalTaskPriorityProvider == null) {
externalTaskPriorityProvider = new DefaultExternalTaskPriorityProvider();
}
}
// history //////////////////////////////////////////////////////////////////
public void initHistoryLevel() {
if (historyLevel != null) {
setHistory(historyLevel.getName());
}
if (historyLevels == null) {
historyLevels = new ArrayList<>();
historyLevels.add(HistoryLevel.HISTORY_LEVEL_NONE);
historyLevels.add(HistoryLevel.HISTORY_LEVEL_ACTIVITY);
historyLevels.add(HistoryLevel.HISTORY_LEVEL_AUDIT);
historyLevels.add(HistoryLevel.HISTORY_LEVEL_FULL);
}
if (customHistoryLevels != null) {
historyLevels.addAll(customHistoryLevels);
}
if (HISTORY_VARIABLE.equalsIgnoreCase(history)) {
historyLevel = HistoryLevel.HISTORY_LEVEL_ACTIVITY;
LOG.usingDeprecatedHistoryLevelVariable();
} else {
for (HistoryLevel historyLevel : historyLevels) {
if (historyLevel.getName().equalsIgnoreCase(history)) {
this.historyLevel = historyLevel;
}
}
}
// do allow null for history level in case of "auto"
if (historyLevel == null && !ProcessEngineConfiguration.HISTORY_AUTO.equalsIgnoreCase(history)) {
throw new ProcessEngineException("invalid history level: " + history);
}
}
// id generator /////////////////////////////////////////////////////////////
protected void initIdGenerator() {
if (idGenerator == null) {
CommandExecutor idGeneratorCommandExecutor = null;
if (idGeneratorDataSource != null) {
ProcessEngineConfigurationImpl processEngineConfiguration = new StandaloneProcessEngineConfiguration();
processEngineConfiguration.setDataSource(idGeneratorDataSource);
processEngineConfiguration.setDatabaseSchemaUpdate(DB_SCHEMA_UPDATE_FALSE);
processEngineConfiguration.init();
idGeneratorCommandExecutor = processEngineConfiguration.getCommandExecutorTxRequiresNew();
} else if (idGeneratorDataSourceJndiName != null) {
ProcessEngineConfigurationImpl processEngineConfiguration = new StandaloneProcessEngineConfiguration();
processEngineConfiguration.setDataSourceJndiName(idGeneratorDataSourceJndiName);
processEngineConfiguration.setDatabaseSchemaUpdate(DB_SCHEMA_UPDATE_FALSE);
processEngineConfiguration.init();
idGeneratorCommandExecutor = processEngineConfiguration.getCommandExecutorTxRequiresNew();
} else {
idGeneratorCommandExecutor = commandExecutorTxRequiresNew;
}
DbIdGenerator dbIdGenerator = new DbIdGenerator();
dbIdGenerator.setIdBlockSize(idBlockSize);
dbIdGenerator.setCommandExecutor(idGeneratorCommandExecutor);
idGenerator = dbIdGenerator;
}
}
// OTHER ////////////////////////////////////////////////////////////////////
protected void initCommandContextFactory() {
if (commandContextFactory == null) {
commandContextFactory = new CommandContextFactory();
commandContextFactory.setProcessEngineConfiguration(this);
}
}
protected void initTransactionContextFactory() {
if (transactionContextFactory == null) {
transactionContextFactory = new StandaloneTransactionContextFactory();
}
}
protected void initValueTypeResolver() {
if (valueTypeResolver == null) {
valueTypeResolver = new ValueTypeResolverImpl();
}
}
protected void initDefaultCharset() {
if (defaultCharset == null) {
if (defaultCharsetName == null) {
defaultCharsetName = "UTF-8";
}
defaultCharset = Charset.forName(defaultCharsetName);
}
}
protected void initMetrics() {
if (isMetricsEnabled) {
if (metricsRegistry == null) {
metricsRegistry = new MetricsRegistry();
}
initDefaultMetrics(metricsRegistry);
if (dbMetricsReporter == null) {
dbMetricsReporter = new DbMetricsReporter(metricsRegistry, commandExecutorTxRequired);
}
}
}
protected void initHostName() {
if (hostname == null) {
if (hostnameProvider == null) {
hostnameProvider = new SimpleIpBasedProvider();
}
hostname = hostnameProvider.getHostname(this);
}
}
protected void initDefaultMetrics(MetricsRegistry metricsRegistry) {
metricsRegistry.createMeter(Metrics.ACTIVTY_INSTANCE_START);
metricsRegistry.createDbMeter(Metrics.ACTIVTY_INSTANCE_END);
metricsRegistry.createDbMeter(Metrics.JOB_ACQUISITION_ATTEMPT);
metricsRegistry.createDbMeter(Metrics.JOB_ACQUIRED_SUCCESS);
metricsRegistry.createDbMeter(Metrics.JOB_ACQUIRED_FAILURE);
metricsRegistry.createDbMeter(Metrics.JOB_SUCCESSFUL);
metricsRegistry.createDbMeter(Metrics.JOB_FAILED);
metricsRegistry.createDbMeter(Metrics.JOB_LOCKED_EXCLUSIVE);
metricsRegistry.createDbMeter(Metrics.JOB_EXECUTION_REJECTED);
metricsRegistry.createMeter(Metrics.ROOT_PROCESS_INSTANCE_START);
metricsRegistry.createMeter(Metrics.EXECUTED_DECISION_INSTANCES);
metricsRegistry.createMeter(Metrics.EXECUTED_DECISION_ELEMENTS);
}
protected void initSerialization() {
if (variableSerializers == null) {
variableSerializers = new DefaultVariableSerializers();
if (customPreVariableSerializers != null) {
for (TypedValueSerializer<?> customVariableType : customPreVariableSerializers) {
variableSerializers.addSerializer(customVariableType);
}
}
// register built-in serializers
variableSerializers.addSerializer(new NullValueSerializer());
variableSerializers.addSerializer(new StringValueSerializer());
variableSerializers.addSerializer(new BooleanValueSerializer());
variableSerializers.addSerializer(new ShortValueSerializer());
variableSerializers.addSerializer(new IntegerValueSerializer());
variableSerializers.addSerializer(new LongValueSerlializer());
variableSerializers.addSerializer(new DateValueSerializer());
variableSerializers.addSerializer(new DoubleValueSerializer());
variableSerializers.addSerializer(new ByteArrayValueSerializer());
variableSerializers.addSerializer(new JavaObjectSerializer());
variableSerializers.addSerializer(new FileValueSerializer());
if (customPostVariableSerializers != null) {
for (TypedValueSerializer<?> customVariableType : customPostVariableSerializers) {
variableSerializers.addSerializer(customVariableType);
}
}
}
}
protected void initFormEngines() {
if (formEngines == null) {
formEngines = new HashMap<>();
// html form engine = default form engine
FormEngine defaultFormEngine = new HtmlFormEngine();
formEngines.put(null, defaultFormEngine); // default form engine is looked up with null
formEngines.put(defaultFormEngine.getName(), defaultFormEngine);
FormEngine juelFormEngine = new JuelFormEngine();
formEngines.put(juelFormEngine.getName(), juelFormEngine);
}
if (customFormEngines != null) {
for (FormEngine formEngine : customFormEngines) {
formEngines.put(formEngine.getName(), formEngine);
}
}
}
protected void initFormTypes() {
if (formTypes == null) {
formTypes = new FormTypes();
formTypes.addFormType(new StringFormType());
formTypes.addFormType(new LongFormType());
formTypes.addFormType(new DateFormType("dd/MM/yyyy"));
formTypes.addFormType(new BooleanFormType());
}
if (customFormTypes != null) {
for (AbstractFormFieldType customFormType : customFormTypes) {
formTypes.addFormType(customFormType);
}
}
}
protected void initFormFieldValidators() {
if (formValidators == null) {
formValidators = new FormValidators();
formValidators.addValidator("min", MinValidator.class);
formValidators.addValidator("max", MaxValidator.class);
formValidators.addValidator("minlength", MinLengthValidator.class);
formValidators.addValidator("maxlength", MaxLengthValidator.class);
formValidators.addValidator("required", RequiredValidator.class);
formValidators.addValidator("readonly", ReadOnlyValidator.class);
}
if (customFormFieldValidators != null) {
for (Entry<String, Class<? extends FormFieldValidator>> validator : customFormFieldValidators.entrySet()) {
formValidators.addValidator(validator.getKey(), validator.getValue());
}
}
}
protected void initScripting() {
if (resolverFactories == null) {
resolverFactories = new ArrayList<>();
resolverFactories.add(new MocksResolverFactory());
resolverFactories.add(new VariableScopeResolverFactory());
resolverFactories.add(new BeansResolverFactory());
}
if (scriptingEngines == null) {
scriptingEngines = new ScriptingEngines(new ScriptBindingsFactory(resolverFactories));
scriptingEngines.setEnableScriptEngineCaching(enableScriptEngineCaching);
}
if (scriptFactory == null) {
scriptFactory = new ScriptFactory();
}
if (scriptEnvResolvers == null) {
scriptEnvResolvers = new ArrayList<>();
}
if (scriptingEnvironment == null) {
scriptingEnvironment = new ScriptingEnvironment(scriptFactory, scriptEnvResolvers, scriptingEngines);
}
}
protected void initDmnEngine() {
if (dmnEngine == null) {
if (dmnEngineConfiguration == null) {
dmnEngineConfiguration = (DefaultDmnEngineConfiguration) DmnEngineConfiguration.createDefaultDmnEngineConfiguration();
}
dmnEngineConfiguration = new DmnEngineConfigurationBuilder(dmnEngineConfiguration)
.dmnHistoryEventProducer(dmnHistoryEventProducer)
.scriptEngineResolver(scriptingEngines)
.expressionManager(expressionManager)
.feelCustomFunctionProviders(dmnFeelCustomFunctionProviders)
.enableFeelLegacyBehavior(dmnFeelEnableLegacyBehavior)
.build();
dmnEngine = dmnEngineConfiguration.buildEngine();
} else if (dmnEngineConfiguration == null) {
dmnEngineConfiguration = (DefaultDmnEngineConfiguration) dmnEngine.getConfiguration();
}
}
protected void initExpressionManager() {
if (expressionManager == null) {
expressionManager = new ExpressionManager(beans);
}
// add function mapper for command context (eg currentUser(), currentUserGroups())
expressionManager.addFunctionMapper(new CommandContextFunctionMapper());
// add function mapper for date time (eg now(), dateTime())
expressionManager.addFunctionMapper(new DateTimeFunctionMapper());
}
protected void initBusinessCalendarManager() {
if (businessCalendarManager == null) {
MapBusinessCalendarManager mapBusinessCalendarManager = new MapBusinessCalendarManager();
mapBusinessCalendarManager.addBusinessCalendar(DurationBusinessCalendar.NAME, new DurationBusinessCalendar());
mapBusinessCalendarManager.addBusinessCalendar(DueDateBusinessCalendar.NAME, new DueDateBusinessCalendar());
mapBusinessCalendarManager.addBusinessCalendar(CycleBusinessCalendar.NAME, new CycleBusinessCalendar());
businessCalendarManager = mapBusinessCalendarManager;
}
}
protected void initDelegateInterceptor() {
if (delegateInterceptor == null) {
delegateInterceptor = new DefaultDelegateInterceptor();
}
}
protected void initEventHandlers() {
if (eventHandlers == null) {
eventHandlers = new HashMap<>();
SignalEventHandler signalEventHander = new SignalEventHandler();
eventHandlers.put(signalEventHander.getEventHandlerType(), signalEventHander);
CompensationEventHandler compensationEventHandler = new CompensationEventHandler();
eventHandlers.put(compensationEventHandler.getEventHandlerType(), compensationEventHandler);
EventHandler messageEventHandler = new EventHandlerImpl(EventType.MESSAGE);
eventHandlers.put(messageEventHandler.getEventHandlerType(), messageEventHandler);
EventHandler conditionalEventHandler = new ConditionalEventHandler();
eventHandlers.put(conditionalEventHandler.getEventHandlerType(), conditionalEventHandler);
}
if (customEventHandlers != null) {
for (EventHandler eventHandler : customEventHandlers) {
eventHandlers.put(eventHandler.getEventHandlerType(), eventHandler);
}
}
}
protected void initCommandCheckers() {
if (commandCheckers == null) {
commandCheckers = new ArrayList<>();
// add the default command checkers
commandCheckers.add(new TenantCommandChecker());
commandCheckers.add(new AuthorizationCommandChecker());
}
}
// JPA //////////////////////////////////////////////////////////////////////
protected void initJpa() {
if (jpaPersistenceUnitName != null) {
jpaEntityManagerFactory = JpaHelper.createEntityManagerFactory(jpaPersistenceUnitName);
}
if (jpaEntityManagerFactory != null) {
sessionFactories.put(EntityManagerSession.class, new EntityManagerSessionFactory(jpaEntityManagerFactory, jpaHandleTransaction, jpaCloseEntityManager));
JPAVariableSerializer jpaType = (JPAVariableSerializer) variableSerializers.getSerializerByName(JPAVariableSerializer.NAME);
// Add JPA-type
if (jpaType == null) {
// We try adding the variable right after byte serializer, if available
int serializableIndex = variableSerializers.getSerializerIndexByName(ValueType.BYTES.getName());
if (serializableIndex > -1) {
variableSerializers.addSerializer(new JPAVariableSerializer(), serializableIndex);
} else {
variableSerializers.addSerializer(new JPAVariableSerializer());
}
}
}
}
protected void initBeans() {
if (beans == null) {
beans = new HashMap<>();
}
}
protected void initArtifactFactory() {
if (artifactFactory == null) {
artifactFactory = new DefaultArtifactFactory();
}
}
protected void initProcessApplicationManager() {
if (processApplicationManager == null) {
processApplicationManager = new ProcessApplicationManager();
}
}
// correlation handler //////////////////////////////////////////////////////
protected void initCorrelationHandler() {
if (correlationHandler == null) {
correlationHandler = new DefaultCorrelationHandler();
}
}
// condition handler //////////////////////////////////////////////////////
protected void initConditionHandler() {
if (conditionHandler == null) {
conditionHandler = new DefaultConditionHandler();
}
}
// deployment handler //////////////////////////////////////////////////////
protected void initDeploymentHandlerFactory() {
if (deploymentHandlerFactory == null) {
deploymentHandlerFactory = new DefaultDeploymentHandlerFactory();
}
}
// history handlers /////////////////////////////////////////////////////
protected void initHistoryEventProducer() {
if (historyEventProducer == null) {
historyEventProducer = new CacheAwareHistoryEventProducer();
}
}
protected void initCmmnHistoryEventProducer() {
if (cmmnHistoryEventProducer == null) {
cmmnHistoryEventProducer = new CacheAwareCmmnHistoryEventProducer();
}
}
protected void initDmnHistoryEventProducer() {
if (dmnHistoryEventProducer == null) {
dmnHistoryEventProducer = new DefaultDmnHistoryEventProducer();
}
}
protected void initHistoryEventHandler() {
if (historyEventHandler == null) {
if (enableDefaultDbHistoryEventHandler) {
historyEventHandler = new CompositeDbHistoryEventHandler(customHistoryEventHandlers);
} else {
historyEventHandler = new CompositeHistoryEventHandler(customHistoryEventHandlers);
}
}
}
// password digest //////////////////////////////////////////////////////////
protected void initPasswordDigest() {
if(saltGenerator == null) {
saltGenerator = new Default16ByteSaltGenerator();
}
if (passwordEncryptor == null) {
passwordEncryptor = new Sha512HashDigest();
}
if(customPasswordChecker == null) {
customPasswordChecker = Collections.emptyList();
}
if(passwordManager == null) {
passwordManager = new PasswordManager(passwordEncryptor, customPasswordChecker);
}
}
public void initPasswordPolicy() {
if(passwordPolicy == null && enablePasswordPolicy) {
passwordPolicy = new DefaultPasswordPolicyImpl();
}
}
protected void initDeploymentRegistration() {
if (registeredDeployments == null) {
registeredDeployments = new CopyOnWriteArraySet<>();
}
}
// cache factory //////////////////////////////////////////////////////////
protected void initCacheFactory() {
if (cacheFactory == null) {
cacheFactory = new DefaultCacheFactory();
}
}
// resource authorization provider //////////////////////////////////////////
protected void initResourceAuthorizationProvider() {
if (resourceAuthorizationProvider == null) {
resourceAuthorizationProvider = new DefaultAuthorizationProvider();
}
}
protected void initPermissionProvider() {
if (permissionProvider == null) {
permissionProvider = new DefaultPermissionProvider();
}
}
protected void initDefaultUserPermissionForTask() {
if (defaultUserPermissionForTask == null) {
if (Permissions.UPDATE.getName().equals(defaultUserPermissionNameForTask)) {
defaultUserPermissionForTask = Permissions.UPDATE;
} else if (Permissions.TASK_WORK.getName().equals(defaultUserPermissionNameForTask)) {
defaultUserPermissionForTask = Permissions.TASK_WORK;
} else {
throw LOG.invalidConfigDefaultUserPermissionNameForTask(defaultUserPermissionNameForTask, new String[]{Permissions.UPDATE.getName(), Permissions.TASK_WORK.getName()});
}
}
}
protected void initAdminUser() {
if (adminUsers == null) {
adminUsers = new ArrayList<>();
}
}
protected void initAdminGroups() {
if (adminGroups == null) {
adminGroups = new ArrayList<>();
}
if (adminGroups.isEmpty() || !(adminGroups.contains(Groups.CAMUNDA_ADMIN))) {
adminGroups.add(Groups.CAMUNDA_ADMIN);
}
}
protected void initTelemetry() {
if (telemetryRegistry == null) {
telemetryRegistry = new TelemetryRegistry();
}
if (telemetryData == null) {
initTelemetryData();
}
try {
if (telemetryHttpConnector == null) {
telemetryHttpConnector = Connectors.getConnector(Connectors.HTTP_CONNECTOR_ID);
}
} catch (Exception e) {
ProcessEngineLogger.TELEMETRY_LOGGER.unexpectedExceptionDuringHttpConnectorConfiguration(e);
}
if (telemetryHttpConnector == null) {
ProcessEngineLogger.TELEMETRY_LOGGER.unableToConfigureHttpConnectorWarning();
} else {
if (telemetryReporter == null) {
telemetryReporter = new TelemetryReporter(commandExecutorTxRequired,
telemetryEndpoint,
telemetryRequestRetries,
telemetryReportingPeriod,
telemetryData,
telemetryHttpConnector,
telemetryRegistry,
metricsRegistry,
telemetryRequestTimeout);
}
}
}
protected void initTelemetryData() {
Database database = new Database(databaseVendor, databaseVersion);
Jdk jdk = ParseUtil.parseJdkDetails();
Internals internals = new Internals(database, telemetryRegistry.getApplicationServer(), telemetryRegistry.getLicenseKey(), jdk);
String camundaIntegration = telemetryRegistry.getCamundaIntegration();
if (camundaIntegration != null && !camundaIntegration.isEmpty()) {
internals.getCamundaIntegration().add(camundaIntegration);
}
ProcessEngineDetails engineInfo = ParseUtil
.parseProcessEngineVersion(true);
Product product = new Product(PRODUCT_NAME, engineInfo.getVersion(), engineInfo.getEdition(), internals);
// installationId=null, the id will be fetched later from database
telemetryData = new Data(null, product);
}
// getters and setters //////////////////////////////////////////////////////
@Override
public String getProcessEngineName() {
return processEngineName;
}
public HistoryLevel getHistoryLevel() {
return historyLevel;
}
public void setHistoryLevel(HistoryLevel historyLevel) {
this.historyLevel = historyLevel;
}
public HistoryLevel getDefaultHistoryLevel() {
if (historyLevels != null) {
for (HistoryLevel historyLevel : historyLevels) {
if (HISTORY_DEFAULT != null && HISTORY_DEFAULT.equalsIgnoreCase(historyLevel.getName())) {
return historyLevel;
}
}
}
return null;
}
@Override
public ProcessEngineConfigurationImpl setProcessEngineName(String processEngineName) {
this.processEngineName = processEngineName;
return this;
}
public List<CommandInterceptor> getCustomPreCommandInterceptorsTxRequired() {
return customPreCommandInterceptorsTxRequired;
}
public ProcessEngineConfigurationImpl setCustomPreCommandInterceptorsTxRequired(List<CommandInterceptor> customPreCommandInterceptorsTxRequired) {
this.customPreCommandInterceptorsTxRequired = customPreCommandInterceptorsTxRequired;
return this;
}
public List<CommandInterceptor> getCustomPostCommandInterceptorsTxRequired() {
return customPostCommandInterceptorsTxRequired;
}
public ProcessEngineConfigurationImpl setCustomPostCommandInterceptorsTxRequired(List<CommandInterceptor> customPostCommandInterceptorsTxRequired) {
this.customPostCommandInterceptorsTxRequired = customPostCommandInterceptorsTxRequired;
return this;
}
public List<CommandInterceptor> getCommandInterceptorsTxRequired() {
return commandInterceptorsTxRequired;
}
public ProcessEngineConfigurationImpl setCommandInterceptorsTxRequired(List<CommandInterceptor> commandInterceptorsTxRequired) {
this.commandInterceptorsTxRequired = commandInterceptorsTxRequired;
return this;
}
public CommandExecutor getCommandExecutorTxRequired() {
return commandExecutorTxRequired;
}
public ProcessEngineConfigurationImpl setCommandExecutorTxRequired(CommandExecutor commandExecutorTxRequired) {
this.commandExecutorTxRequired = commandExecutorTxRequired;
return this;
}
public List<CommandInterceptor> getCustomPreCommandInterceptorsTxRequiresNew() {
return customPreCommandInterceptorsTxRequiresNew;
}
public ProcessEngineConfigurationImpl setCustomPreCommandInterceptorsTxRequiresNew(List<CommandInterceptor> customPreCommandInterceptorsTxRequiresNew) {
this.customPreCommandInterceptorsTxRequiresNew = customPreCommandInterceptorsTxRequiresNew;
return this;
}
public List<CommandInterceptor> getCustomPostCommandInterceptorsTxRequiresNew() {
return customPostCommandInterceptorsTxRequiresNew;
}
public ProcessEngineConfigurationImpl setCustomPostCommandInterceptorsTxRequiresNew(List<CommandInterceptor> customPostCommandInterceptorsTxRequiresNew) {
this.customPostCommandInterceptorsTxRequiresNew = customPostCommandInterceptorsTxRequiresNew;
return this;
}
public List<CommandInterceptor> getCommandInterceptorsTxRequiresNew() {
return commandInterceptorsTxRequiresNew;
}
public ProcessEngineConfigurationImpl setCommandInterceptorsTxRequiresNew(List<CommandInterceptor> commandInterceptorsTxRequiresNew) {
this.commandInterceptorsTxRequiresNew = commandInterceptorsTxRequiresNew;
return this;
}
public CommandExecutor getCommandExecutorTxRequiresNew() {
return commandExecutorTxRequiresNew;
}
public ProcessEngineConfigurationImpl setCommandExecutorTxRequiresNew(CommandExecutor commandExecutorTxRequiresNew) {
this.commandExecutorTxRequiresNew = commandExecutorTxRequiresNew;
return this;
}
public RepositoryService getRepositoryService() {
return repositoryService;
}
public ProcessEngineConfigurationImpl setRepositoryService(RepositoryService repositoryService) {
this.repositoryService = repositoryService;
return this;
}
public RuntimeService getRuntimeService() {
return runtimeService;
}
public ProcessEngineConfigurationImpl setRuntimeService(RuntimeService runtimeService) {
this.runtimeService = runtimeService;
return this;
}
public HistoryService getHistoryService() {
return historyService;
}
public ProcessEngineConfigurationImpl setHistoryService(HistoryService historyService) {
this.historyService = historyService;
return this;
}
public IdentityService getIdentityService() {
return identityService;
}
public ProcessEngineConfigurationImpl setIdentityService(IdentityService identityService) {
this.identityService = identityService;
return this;
}
public TaskService getTaskService() {
return taskService;
}
public ProcessEngineConfigurationImpl setTaskService(TaskService taskService) {
this.taskService = taskService;
return this;
}
public FormService getFormService() {
return formService;
}
public ProcessEngineConfigurationImpl setFormService(FormService formService) {
this.formService = formService;
return this;
}
public ManagementService getManagementService() {
return managementService;
}
public AuthorizationService getAuthorizationService() {
return authorizationService;
}
public void setAuthorizationService(AuthorizationService authorizationService) {
this.authorizationService = authorizationService;
}
public ProcessEngineConfigurationImpl setManagementService(ManagementService managementService) {
this.managementService = managementService;
return this;
}
public CaseService getCaseService() {
return caseService;
}
public void setCaseService(CaseService caseService) {
this.caseService = caseService;
}
public FilterService getFilterService() {
return filterService;
}
public void setFilterService(FilterService filterService) {
this.filterService = filterService;
}
public ExternalTaskService getExternalTaskService() {
return externalTaskService;
}
public void setExternalTaskService(ExternalTaskService externalTaskService) {
this.externalTaskService = externalTaskService;
}
public DecisionService getDecisionService() {
return decisionService;
}
public OptimizeService getOptimizeService() {
return optimizeService;
}
public void setDecisionService(DecisionService decisionService) {
this.decisionService = decisionService;
}
public Map<Class<?>, SessionFactory> getSessionFactories() {
return sessionFactories;
}
public ProcessEngineConfigurationImpl setSessionFactories(Map<Class<?>, SessionFactory> sessionFactories) {
this.sessionFactories = sessionFactories;
return this;
}
public List<Deployer> getDeployers() {
return deployers;
}
public ProcessEngineConfigurationImpl setDeployers(List<Deployer> deployers) {
this.deployers = deployers;
return this;
}
public JobExecutor getJobExecutor() {
return jobExecutor;
}
public ProcessEngineConfigurationImpl setJobExecutor(JobExecutor jobExecutor) {
this.jobExecutor = jobExecutor;
return this;
}
public PriorityProvider<JobDeclaration<?, ?>> getJobPriorityProvider() {
return jobPriorityProvider;
}
public void setJobPriorityProvider(PriorityProvider<JobDeclaration<?, ?>> jobPriorityProvider) {
this.jobPriorityProvider = jobPriorityProvider;
}
public PriorityProvider<ExternalTaskActivityBehavior> getExternalTaskPriorityProvider() {
return externalTaskPriorityProvider;
}
public void setExternalTaskPriorityProvider(PriorityProvider<ExternalTaskActivityBehavior> externalTaskPriorityProvider) {
this.externalTaskPriorityProvider = externalTaskPriorityProvider;
}
public IdGenerator getIdGenerator() {
return idGenerator;
}
public ProcessEngineConfigurationImpl setIdGenerator(IdGenerator idGenerator) {
this.idGenerator = idGenerator;
return this;
}
public String getWsSyncFactoryClassName() {
return wsSyncFactoryClassName;
}
public ProcessEngineConfigurationImpl setWsSyncFactoryClassName(String wsSyncFactoryClassName) {
this.wsSyncFactoryClassName = wsSyncFactoryClassName;
return this;
}
public Map<String, FormEngine> getFormEngines() {
return formEngines;
}
public ProcessEngineConfigurationImpl setFormEngines(Map<String, FormEngine> formEngines) {
this.formEngines = formEngines;
return this;
}
public FormTypes getFormTypes() {
return formTypes;
}
public ProcessEngineConfigurationImpl setFormTypes(FormTypes formTypes) {
this.formTypes = formTypes;
return this;
}
public ScriptingEngines getScriptingEngines() {
return scriptingEngines;
}
public ProcessEngineConfigurationImpl setScriptingEngines(ScriptingEngines scriptingEngines) {
this.scriptingEngines = scriptingEngines;
return this;
}
public VariableSerializers getVariableSerializers() {
return variableSerializers;
}
public VariableSerializerFactory getFallbackSerializerFactory() {
return fallbackSerializerFactory;
}
public void setFallbackSerializerFactory(VariableSerializerFactory fallbackSerializerFactory) {
this.fallbackSerializerFactory = fallbackSerializerFactory;
}
public ProcessEngineConfigurationImpl setVariableTypes(VariableSerializers variableSerializers) {
this.variableSerializers = variableSerializers;
return this;
}
public ExpressionManager getExpressionManager() {
return expressionManager;
}
public ProcessEngineConfigurationImpl setExpressionManager(ExpressionManager expressionManager) {
this.expressionManager = expressionManager;
return this;
}
public BusinessCalendarManager getBusinessCalendarManager() {
return businessCalendarManager;
}
public ProcessEngineConfigurationImpl setBusinessCalendarManager(BusinessCalendarManager businessCalendarManager) {
this.businessCalendarManager = businessCalendarManager;
return this;
}
public CommandContextFactory getCommandContextFactory() {
return commandContextFactory;
}
public ProcessEngineConfigurationImpl setCommandContextFactory(CommandContextFactory commandContextFactory) {
this.commandContextFactory = commandContextFactory;
return this;
}
public TransactionContextFactory getTransactionContextFactory() {
return transactionContextFactory;
}
public ProcessEngineConfigurationImpl setTransactionContextFactory(TransactionContextFactory transactionContextFactory) {
this.transactionContextFactory = transactionContextFactory;
return this;
}
public BpmnParseFactory getBpmnParseFactory() {
return bpmnParseFactory;
}
public ProcessEngineConfigurationImpl setBpmnParseFactory(BpmnParseFactory bpmnParseFactory) {
this.bpmnParseFactory = bpmnParseFactory;
return this;
}
public List<Deployer> getCustomPreDeployers() {
return customPreDeployers;
}
public ProcessEngineConfigurationImpl setCustomPreDeployers(List<Deployer> customPreDeployers) {
this.customPreDeployers = customPreDeployers;
return this;
}
public List<Deployer> getCustomPostDeployers() {
return customPostDeployers;
}
public ProcessEngineConfigurationImpl setCustomPostDeployers(List<Deployer> customPostDeployers) {
this.customPostDeployers = customPostDeployers;
return this;
}
public void setCacheFactory(CacheFactory cacheFactory) {
this.cacheFactory = cacheFactory;
}
public void setCacheCapacity(int cacheCapacity) {
this.cacheCapacity = cacheCapacity;
}
public void setEnableFetchProcessDefinitionDescription(boolean enableFetchProcessDefinitionDescription){
this.enableFetchProcessDefinitionDescription = enableFetchProcessDefinitionDescription;
}
public boolean getEnableFetchProcessDefinitionDescription() {
return this.enableFetchProcessDefinitionDescription;
}
public Permission getDefaultUserPermissionForTask() {
return defaultUserPermissionForTask;
}
public ProcessEngineConfigurationImpl setDefaultUserPermissionForTask(Permission defaultUserPermissionForTask) {
this.defaultUserPermissionForTask = defaultUserPermissionForTask;
return this;
}
public ProcessEngineConfigurationImpl setEnableHistoricInstancePermissions(boolean enable) {
this.enableHistoricInstancePermissions = enable;
return this;
}
public boolean isEnableHistoricInstancePermissions() {
return enableHistoricInstancePermissions;
}
public Map<String, JobHandler> getJobHandlers() {
return jobHandlers;
}
public ProcessEngineConfigurationImpl setJobHandlers(Map<String, JobHandler> jobHandlers) {
this.jobHandlers = jobHandlers;
return this;
}
public SqlSessionFactory getSqlSessionFactory() {
return sqlSessionFactory;
}
public ProcessEngineConfigurationImpl setSqlSessionFactory(SqlSessionFactory sqlSessionFactory) {
this.sqlSessionFactory = sqlSessionFactory;
return this;
}
public DbSqlSessionFactory getDbSqlSessionFactory() {
return dbSqlSessionFactory;
}
public ProcessEngineConfigurationImpl setDbSqlSessionFactory(DbSqlSessionFactory dbSqlSessionFactory) {
this.dbSqlSessionFactory = dbSqlSessionFactory;
return this;
}
public TransactionFactory getTransactionFactory() {
return transactionFactory;
}
public ProcessEngineConfigurationImpl setTransactionFactory(TransactionFactory transactionFactory) {
this.transactionFactory = transactionFactory;
return this;
}
public List<SessionFactory> getCustomSessionFactories() {
return customSessionFactories;
}
public ProcessEngineConfigurationImpl setCustomSessionFactories(List<SessionFactory> customSessionFactories) {
this.customSessionFactories = customSessionFactories;
return this;
}
public List<JobHandler> getCustomJobHandlers() {
return customJobHandlers;
}
public ProcessEngineConfigurationImpl setCustomJobHandlers(List<JobHandler> customJobHandlers) {
this.customJobHandlers = customJobHandlers;
return this;
}
public List<FormEngine> getCustomFormEngines() {
return customFormEngines;
}
public ProcessEngineConfigurationImpl setCustomFormEngines(List<FormEngine> customFormEngines) {
this.customFormEngines = customFormEngines;
return this;
}
public List<AbstractFormFieldType> getCustomFormTypes() {
return customFormTypes;
}
public ProcessEngineConfigurationImpl setCustomFormTypes(List<AbstractFormFieldType> customFormTypes) {
this.customFormTypes = customFormTypes;
return this;
}
public List<TypedValueSerializer> getCustomPreVariableSerializers() {
return customPreVariableSerializers;
}
public ProcessEngineConfigurationImpl setCustomPreVariableSerializers(List<TypedValueSerializer> customPreVariableTypes) {
this.customPreVariableSerializers = customPreVariableTypes;
return this;
}
public List<TypedValueSerializer> getCustomPostVariableSerializers() {
return customPostVariableSerializers;
}
public ProcessEngineConfigurationImpl setCustomPostVariableSerializers(List<TypedValueSerializer> customPostVariableTypes) {
this.customPostVariableSerializers = customPostVariableTypes;
return this;
}
public List<BpmnParseListener> getCustomPreBPMNParseListeners() {
return preParseListeners;
}
public void setCustomPreBPMNParseListeners(List<BpmnParseListener> preParseListeners) {
this.preParseListeners = preParseListeners;
}
public List<BpmnParseListener> getCustomPostBPMNParseListeners() {
return postParseListeners;
}
public void setCustomPostBPMNParseListeners(List<BpmnParseListener> postParseListeners) {
this.postParseListeners = postParseListeners;
}
/**
* @deprecated use {@link #getCustomPreBPMNParseListeners} instead.
*/
@Deprecated
public List<BpmnParseListener> getPreParseListeners() {
return preParseListeners;
}
/**
* @deprecated use {@link #setCustomPreBPMNParseListeners} instead.
*/
@Deprecated
public void setPreParseListeners(List<BpmnParseListener> preParseListeners) {
this.preParseListeners = preParseListeners;
}
/**
* @deprecated use {@link #getCustomPostBPMNParseListeners} instead.
*/
@Deprecated
public List<BpmnParseListener> getPostParseListeners() {
return postParseListeners;
}
/**
* @deprecated use {@link #setCustomPostBPMNParseListeners} instead.
*/
@Deprecated
public void setPostParseListeners(List<BpmnParseListener> postParseListeners) {
this.postParseListeners = postParseListeners;
}
public List<CmmnTransformListener> getCustomPreCmmnTransformListeners() {
return customPreCmmnTransformListeners;
}
public void setCustomPreCmmnTransformListeners(List<CmmnTransformListener> customPreCmmnTransformListeners) {
this.customPreCmmnTransformListeners = customPreCmmnTransformListeners;
}
public List<CmmnTransformListener> getCustomPostCmmnTransformListeners() {
return customPostCmmnTransformListeners;
}
public void setCustomPostCmmnTransformListeners(List<CmmnTransformListener> customPostCmmnTransformListeners) {
this.customPostCmmnTransformListeners = customPostCmmnTransformListeners;
}
public Map<Object, Object> getBeans() {
return beans;
}
public void setBeans(Map<Object, Object> beans) {
this.beans = beans;
}
@Override
public ProcessEngineConfigurationImpl setClassLoader(ClassLoader classLoader) {
super.setClassLoader(classLoader);
return this;
}
@Override
public ProcessEngineConfigurationImpl setDatabaseType(String databaseType) {
super.setDatabaseType(databaseType);
return this;
}
@Override
public ProcessEngineConfigurationImpl setDataSource(DataSource dataSource) {
super.setDataSource(dataSource);
return this;
}
@Override
public ProcessEngineConfigurationImpl setDatabaseSchemaUpdate(String databaseSchemaUpdate) {
super.setDatabaseSchemaUpdate(databaseSchemaUpdate);
return this;
}
@Override
public ProcessEngineConfigurationImpl setHistory(String history) {
super.setHistory(history);
return this;
}
@Override
public ProcessEngineConfigurationImpl setIdBlockSize(int idBlockSize) {
super.setIdBlockSize(idBlockSize);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcDriver(String jdbcDriver) {
super.setJdbcDriver(jdbcDriver);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcPassword(String jdbcPassword) {
super.setJdbcPassword(jdbcPassword);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcUrl(String jdbcUrl) {
super.setJdbcUrl(jdbcUrl);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcUsername(String jdbcUsername) {
super.setJdbcUsername(jdbcUsername);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJobExecutorActivate(boolean jobExecutorActivate) {
super.setJobExecutorActivate(jobExecutorActivate);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerDefaultFrom(String mailServerDefaultFrom) {
super.setMailServerDefaultFrom(mailServerDefaultFrom);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerHost(String mailServerHost) {
super.setMailServerHost(mailServerHost);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerPassword(String mailServerPassword) {
super.setMailServerPassword(mailServerPassword);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerPort(int mailServerPort) {
super.setMailServerPort(mailServerPort);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerUseTLS(boolean useTLS) {
super.setMailServerUseTLS(useTLS);
return this;
}
@Override
public ProcessEngineConfigurationImpl setMailServerUsername(String mailServerUsername) {
super.setMailServerUsername(mailServerUsername);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcMaxActiveConnections(int jdbcMaxActiveConnections) {
super.setJdbcMaxActiveConnections(jdbcMaxActiveConnections);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcMaxCheckoutTime(int jdbcMaxCheckoutTime) {
super.setJdbcMaxCheckoutTime(jdbcMaxCheckoutTime);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcMaxIdleConnections(int jdbcMaxIdleConnections) {
super.setJdbcMaxIdleConnections(jdbcMaxIdleConnections);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcMaxWaitTime(int jdbcMaxWaitTime) {
super.setJdbcMaxWaitTime(jdbcMaxWaitTime);
return this;
}
@Override
public ProcessEngineConfigurationImpl setTransactionsExternallyManaged(boolean transactionsExternallyManaged) {
super.setTransactionsExternallyManaged(transactionsExternallyManaged);
return this;
}
@Override
public ProcessEngineConfigurationImpl setJpaEntityManagerFactory(Object jpaEntityManagerFactory) {
this.jpaEntityManagerFactory = jpaEntityManagerFactory;
return this;
}
@Override
public ProcessEngineConfigurationImpl setJpaHandleTransaction(boolean jpaHandleTransaction) {
this.jpaHandleTransaction = jpaHandleTransaction;
return this;
}
@Override
public ProcessEngineConfigurationImpl setJpaCloseEntityManager(boolean jpaCloseEntityManager) {
this.jpaCloseEntityManager = jpaCloseEntityManager;
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcPingEnabled(boolean jdbcPingEnabled) {
this.jdbcPingEnabled = jdbcPingEnabled;
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcPingQuery(String jdbcPingQuery) {
this.jdbcPingQuery = jdbcPingQuery;
return this;
}
@Override
public ProcessEngineConfigurationImpl setJdbcPingConnectionNotUsedFor(int jdbcPingNotUsedFor) {
this.jdbcPingConnectionNotUsedFor = jdbcPingNotUsedFor;
return this;
}
public boolean isDbIdentityUsed() {
return isDbIdentityUsed;
}
public void setDbIdentityUsed(boolean isDbIdentityUsed) {
this.isDbIdentityUsed = isDbIdentityUsed;
}
public boolean isDbHistoryUsed() {
return isDbHistoryUsed;
}
public void setDbHistoryUsed(boolean isDbHistoryUsed) {
this.isDbHistoryUsed = isDbHistoryUsed;
}
public List<ResolverFactory> getResolverFactories() {
return resolverFactories;
}
public void setResolverFactories(List<ResolverFactory> resolverFactories) {
this.resolverFactories = resolverFactories;
}
public DeploymentCache getDeploymentCache() {
return deploymentCache;
}
public void setDeploymentCache(DeploymentCache deploymentCache) {
this.deploymentCache = deploymentCache;
}
public DeploymentHandlerFactory getDeploymentHandlerFactory() {
return deploymentHandlerFactory;
}
public ProcessEngineConfigurationImpl setDeploymentHandlerFactory(DeploymentHandlerFactory deploymentHandlerFactory) {
this.deploymentHandlerFactory = deploymentHandlerFactory;
return this;
}
public ProcessEngineConfigurationImpl setDelegateInterceptor(DelegateInterceptor delegateInterceptor) {
this.delegateInterceptor = delegateInterceptor;
return this;
}
public DelegateInterceptor getDelegateInterceptor() {
return delegateInterceptor;
}
public RejectedJobsHandler getCustomRejectedJobsHandler() {
return customRejectedJobsHandler;
}
public ProcessEngineConfigurationImpl setCustomRejectedJobsHandler(RejectedJobsHandler customRejectedJobsHandler) {
this.customRejectedJobsHandler = customRejectedJobsHandler;
return this;
}
public EventHandler getEventHandler(String eventType) {
return eventHandlers.get(eventType);
}
public void setEventHandlers(Map<String, EventHandler> eventHandlers) {
this.eventHandlers = eventHandlers;
}
public Map<String, EventHandler> getEventHandlers() {
return eventHandlers;
}
public List<EventHandler> getCustomEventHandlers() {
return customEventHandlers;
}
public void setCustomEventHandlers(List<EventHandler> customEventHandlers) {
this.customEventHandlers = customEventHandlers;
}
public FailedJobCommandFactory getFailedJobCommandFactory() {
return failedJobCommandFactory;
}
public ProcessEngineConfigurationImpl setFailedJobCommandFactory(FailedJobCommandFactory failedJobCommandFactory) {
this.failedJobCommandFactory = failedJobCommandFactory;
return this;
}
/**
* Allows configuring a database table prefix which is used for all runtime operations of the process engine.
* For example, if you specify a prefix named 'PRE1.', activiti will query for executions in a table named
* 'PRE1.ACT_RU_EXECUTION_'.
* <p>
* <p/>
* <strong>NOTE: the prefix is not respected by automatic database schema management. If you use
* {@link ProcessEngineConfiguration#DB_SCHEMA_UPDATE_CREATE_DROP}
* or {@link ProcessEngineConfiguration#DB_SCHEMA_UPDATE_TRUE}, activiti will create the database tables
* using the default names, regardless of the prefix configured here.</strong>
*
* @since 5.9
*/
public ProcessEngineConfiguration setDatabaseTablePrefix(String databaseTablePrefix) {
this.databaseTablePrefix = databaseTablePrefix;
return this;
}
public String getDatabaseTablePrefix() {
return databaseTablePrefix;
}
public boolean isCreateDiagramOnDeploy() {
return isCreateDiagramOnDeploy;
}
public ProcessEngineConfiguration setCreateDiagramOnDeploy(boolean createDiagramOnDeploy) {
this.isCreateDiagramOnDeploy = createDiagramOnDeploy;
return this;
}
public String getDatabaseSchema() {
return databaseSchema;
}
public void setDatabaseSchema(String databaseSchema) {
this.databaseSchema = databaseSchema;
}
public DataSource getIdGeneratorDataSource() {
return idGeneratorDataSource;
}
public void setIdGeneratorDataSource(DataSource idGeneratorDataSource) {
this.idGeneratorDataSource = idGeneratorDataSource;
}
public String getIdGeneratorDataSourceJndiName() {
return idGeneratorDataSourceJndiName;
}
public void setIdGeneratorDataSourceJndiName(String idGeneratorDataSourceJndiName) {
this.idGeneratorDataSourceJndiName = idGeneratorDataSourceJndiName;
}
public ProcessApplicationManager getProcessApplicationManager() {
return processApplicationManager;
}
public void setProcessApplicationManager(ProcessApplicationManager processApplicationManager) {
this.processApplicationManager = processApplicationManager;
}
public CommandExecutor getCommandExecutorSchemaOperations() {
return commandExecutorSchemaOperations;
}
public void setCommandExecutorSchemaOperations(CommandExecutor commandExecutorSchemaOperations) {
this.commandExecutorSchemaOperations = commandExecutorSchemaOperations;
}
public CorrelationHandler getCorrelationHandler() {
return correlationHandler;
}
public void setCorrelationHandler(CorrelationHandler correlationHandler) {
this.correlationHandler = correlationHandler;
}
public ConditionHandler getConditionHandler() {
return conditionHandler;
}
public void setConditionHandler(ConditionHandler conditionHandler) {
this.conditionHandler = conditionHandler;
}
public ProcessEngineConfigurationImpl setHistoryEventHandler(HistoryEventHandler historyEventHandler) {
this.historyEventHandler = historyEventHandler;
return this;
}
public HistoryEventHandler getHistoryEventHandler() {
return historyEventHandler;
}
public boolean isEnableDefaultDbHistoryEventHandler() {
return enableDefaultDbHistoryEventHandler;
}
public void setEnableDefaultDbHistoryEventHandler(boolean enableDefaultDbHistoryEventHandler) {
this.enableDefaultDbHistoryEventHandler = enableDefaultDbHistoryEventHandler;
}
public List<HistoryEventHandler> getCustomHistoryEventHandlers() {
return customHistoryEventHandlers;
}
public void setCustomHistoryEventHandlers(List<HistoryEventHandler> customHistoryEventHandlers) {
this.customHistoryEventHandlers = customHistoryEventHandlers;
}
public IncidentHandler getIncidentHandler(String incidentType) {
return incidentHandlers.get(incidentType);
}
public Map<String, IncidentHandler> getIncidentHandlers() {
return incidentHandlers;
}
public void setIncidentHandlers(Map<String, IncidentHandler> incidentHandlers) {
this.incidentHandlers = incidentHandlers;
}
public List<IncidentHandler> getCustomIncidentHandlers() {
return customIncidentHandlers;
}
public void setCustomIncidentHandlers(List<IncidentHandler> customIncidentHandlers) {
this.customIncidentHandlers = customIncidentHandlers;
}
public Map<String, BatchJobHandler<?>> getBatchHandlers() {
return batchHandlers;
}
public void setBatchHandlers(Map<String, BatchJobHandler<?>> batchHandlers) {
this.batchHandlers = batchHandlers;
}
public List<BatchJobHandler<?>> getCustomBatchJobHandlers() {
return customBatchJobHandlers;
}
public void setCustomBatchJobHandlers(List<BatchJobHandler<?>> customBatchJobHandlers) {
this.customBatchJobHandlers = customBatchJobHandlers;
}
public int getBatchJobsPerSeed() {
return batchJobsPerSeed;
}
public void setBatchJobsPerSeed(int batchJobsPerSeed) {
this.batchJobsPerSeed = batchJobsPerSeed;
}
public Map<String, Integer> getInvocationsPerBatchJobByBatchType() {
return invocationsPerBatchJobByBatchType;
}
public ProcessEngineConfigurationImpl setInvocationsPerBatchJobByBatchType(Map<String, Integer> invocationsPerBatchJobByBatchType) {
this.invocationsPerBatchJobByBatchType = invocationsPerBatchJobByBatchType;
return this;
}
public int getInvocationsPerBatchJob() {
return invocationsPerBatchJob;
}
public void setInvocationsPerBatchJob(int invocationsPerBatchJob) {
this.invocationsPerBatchJob = invocationsPerBatchJob;
}
public int getBatchPollTime() {
return batchPollTime;
}
public void setBatchPollTime(int batchPollTime) {
this.batchPollTime = batchPollTime;
}
public long getBatchJobPriority() {
return batchJobPriority;
}
public void setBatchJobPriority(long batchJobPriority) {
this.batchJobPriority = batchJobPriority;
}
public SessionFactory getIdentityProviderSessionFactory() {
return identityProviderSessionFactory;
}
public void setIdentityProviderSessionFactory(SessionFactory identityProviderSessionFactory) {
this.identityProviderSessionFactory = identityProviderSessionFactory;
}
public SaltGenerator getSaltGenerator() {
return saltGenerator;
}
public void setSaltGenerator(SaltGenerator saltGenerator) {
this.saltGenerator = saltGenerator;
}
public void setPasswordEncryptor(PasswordEncryptor passwordEncryptor) {
this.passwordEncryptor = passwordEncryptor;
}
public PasswordEncryptor getPasswordEncryptor() {
return passwordEncryptor;
}
public List<PasswordEncryptor> getCustomPasswordChecker() {
return customPasswordChecker;
}
public void setCustomPasswordChecker(List<PasswordEncryptor> customPasswordChecker) {
this.customPasswordChecker = customPasswordChecker;
}
public PasswordManager getPasswordManager() {
return passwordManager;
}
public void setPasswordManager(PasswordManager passwordManager) {
this.passwordManager = passwordManager;
}
public Set<String> getRegisteredDeployments() {
return registeredDeployments;
}
public void setRegisteredDeployments(Set<String> registeredDeployments) {
this.registeredDeployments = registeredDeployments;
}
public ResourceAuthorizationProvider getResourceAuthorizationProvider() {
return resourceAuthorizationProvider;
}
public void setResourceAuthorizationProvider(ResourceAuthorizationProvider resourceAuthorizationProvider) {
this.resourceAuthorizationProvider = resourceAuthorizationProvider;
}
public PermissionProvider getPermissionProvider() {
return permissionProvider;
}
public void setPermissionProvider(PermissionProvider permissionProvider) {
this.permissionProvider = permissionProvider;
}
public List<ProcessEnginePlugin> getProcessEnginePlugins() {
return processEnginePlugins;
}
public void setProcessEnginePlugins(List<ProcessEnginePlugin> processEnginePlugins) {
this.processEnginePlugins = processEnginePlugins;
}
public ProcessEngineConfigurationImpl setHistoryEventProducer(HistoryEventProducer historyEventProducer) {
this.historyEventProducer = historyEventProducer;
return this;
}
public HistoryEventProducer getHistoryEventProducer() {
return historyEventProducer;
}
public ProcessEngineConfigurationImpl setCmmnHistoryEventProducer(CmmnHistoryEventProducer cmmnHistoryEventProducer) {
this.cmmnHistoryEventProducer = cmmnHistoryEventProducer;
return this;
}
public CmmnHistoryEventProducer getCmmnHistoryEventProducer() {
return cmmnHistoryEventProducer;
}
public ProcessEngineConfigurationImpl setDmnHistoryEventProducer(DmnHistoryEventProducer dmnHistoryEventProducer) {
this.dmnHistoryEventProducer = dmnHistoryEventProducer;
return this;
}
public DmnHistoryEventProducer getDmnHistoryEventProducer() {
return dmnHistoryEventProducer;
}
public Map<String, Class<? extends FormFieldValidator>> getCustomFormFieldValidators() {
return customFormFieldValidators;
}
public void setCustomFormFieldValidators(Map<String, Class<? extends FormFieldValidator>> customFormFieldValidators) {
this.customFormFieldValidators = customFormFieldValidators;
}
public void setFormValidators(FormValidators formValidators) {
this.formValidators = formValidators;
}
public FormValidators getFormValidators() {
return formValidators;
}
public boolean isExecutionTreePrefetchEnabled() {
return isExecutionTreePrefetchEnabled;
}
public void setExecutionTreePrefetchEnabled(boolean isExecutionTreePrefetchingEnabled) {
this.isExecutionTreePrefetchEnabled = isExecutionTreePrefetchingEnabled;
}
public ProcessEngineImpl getProcessEngine() {
return processEngine;
}
/**
* If set to true, the process engine will save all script variables (created from Java Script, Groovy ...)
* as process variables.
*/
public void setAutoStoreScriptVariables(boolean autoStoreScriptVariables) {
this.autoStoreScriptVariables = autoStoreScriptVariables;
}
/**
* @return true if the process engine should save all script variables (created from Java Script, Groovy ...)
* as process variables.
*/
public boolean isAutoStoreScriptVariables() {
return autoStoreScriptVariables;
}
/**
* If set to true, the process engine will attempt to pre-compile script sources at runtime
* to optimize script task execution performance.
*/
public void setEnableScriptCompilation(boolean enableScriptCompilation) {
this.enableScriptCompilation = enableScriptCompilation;
}
/**
* @return true if compilation of script sources ins enabled. False otherwise.
*/
public boolean isEnableScriptCompilation() {
return enableScriptCompilation;
}
public boolean isEnableGracefulDegradationOnContextSwitchFailure() {
return enableGracefulDegradationOnContextSwitchFailure;
}
/**
* <p>If set to true, the process engine will tolerate certain exceptions that may result
* from the fact that it cannot switch to the context of a process application that has made
* a deployment.</p>
* <p>
* <p>Affects the following scenarios:</p>
* <ul>
* <li><b>Determining job priorities</b>: uses a default priority in case an expression fails to evaluate</li>
* </ul>
*/
public void setEnableGracefulDegradationOnContextSwitchFailure(boolean enableGracefulDegradationOnContextSwitchFailure) {
this.enableGracefulDegradationOnContextSwitchFailure = enableGracefulDegradationOnContextSwitchFailure;
}
/**
* @return true if the process engine acquires an exclusive lock when creating a deployment.
*/
public boolean isDeploymentLockUsed() {
return isDeploymentLockUsed;
}
/**
* If set to true, the process engine will acquire an exclusive lock when creating a deployment.
* This ensures that {@link DeploymentBuilder#enableDuplicateFiltering()} works correctly in a clustered environment.
*/
public void setDeploymentLockUsed(boolean isDeploymentLockUsed) {
this.isDeploymentLockUsed = isDeploymentLockUsed;
}
/**
* @return true if deployment processing must be synchronized
*/
public boolean isDeploymentSynchronized() {
return isDeploymentSynchronized;
}
/**
* Sets if deployment processing must be synchronized.
* @param deploymentSynchronized {@code true} when deployment must be synchronized,
* {@code false} when several depoloyments may be processed in parallel
*/
public void setDeploymentSynchronized(boolean deploymentSynchronized) {
isDeploymentSynchronized = deploymentSynchronized;
}
public boolean isCmmnEnabled() {
return cmmnEnabled;
}
public void setCmmnEnabled(boolean cmmnEnabled) {
this.cmmnEnabled = cmmnEnabled;
}
public boolean isDmnEnabled() {
return dmnEnabled;
}
public void setDmnEnabled(boolean dmnEnabled) {
this.dmnEnabled = dmnEnabled;
}
public boolean isStandaloneTasksEnabled() {
return standaloneTasksEnabled;
}
public ProcessEngineConfigurationImpl setStandaloneTasksEnabled(boolean standaloneTasksEnabled) {
this.standaloneTasksEnabled = standaloneTasksEnabled;
return this;
}
public ScriptFactory getScriptFactory() {
return scriptFactory;
}
public ScriptingEnvironment getScriptingEnvironment() {
return scriptingEnvironment;
}
public void setScriptFactory(ScriptFactory scriptFactory) {
this.scriptFactory = scriptFactory;
}
public void setScriptingEnvironment(ScriptingEnvironment scriptingEnvironment) {
this.scriptingEnvironment = scriptingEnvironment;
}
public List<ScriptEnvResolver> getEnvScriptResolvers() {
return scriptEnvResolvers;
}
public void setEnvScriptResolvers(List<ScriptEnvResolver> scriptEnvResolvers) {
this.scriptEnvResolvers = scriptEnvResolvers;
}
public ProcessEngineConfiguration setArtifactFactory(ArtifactFactory artifactFactory) {
this.artifactFactory = artifactFactory;
return this;
}
public ArtifactFactory getArtifactFactory() {
return artifactFactory;
}
public String getDefaultSerializationFormat() {
return defaultSerializationFormat;
}
public ProcessEngineConfigurationImpl setDefaultSerializationFormat(String defaultSerializationFormat) {
this.defaultSerializationFormat = defaultSerializationFormat;
return this;
}
public boolean isJavaSerializationFormatEnabled() {
return javaSerializationFormatEnabled;
}
public void setJavaSerializationFormatEnabled(boolean javaSerializationFormatEnabled) {
this.javaSerializationFormatEnabled = javaSerializationFormatEnabled;
}
public ProcessEngineConfigurationImpl setDefaultCharsetName(String defaultCharsetName) {
this.defaultCharsetName = defaultCharsetName;
return this;
}
public ProcessEngineConfigurationImpl setDefaultCharset(Charset defautlCharset) {
this.defaultCharset = defautlCharset;
return this;
}
public Charset getDefaultCharset() {
return defaultCharset;
}
public boolean isDbEntityCacheReuseEnabled() {
return isDbEntityCacheReuseEnabled;
}
public ProcessEngineConfigurationImpl setDbEntityCacheReuseEnabled(boolean isDbEntityCacheReuseEnabled) {
this.isDbEntityCacheReuseEnabled = isDbEntityCacheReuseEnabled;
return this;
}
public DbEntityCacheKeyMapping getDbEntityCacheKeyMapping() {
return dbEntityCacheKeyMapping;
}
public ProcessEngineConfigurationImpl setDbEntityCacheKeyMapping(DbEntityCacheKeyMapping dbEntityCacheKeyMapping) {
this.dbEntityCacheKeyMapping = dbEntityCacheKeyMapping;
return this;
}
public ProcessEngineConfigurationImpl setCustomHistoryLevels(List<HistoryLevel> customHistoryLevels) {
this.customHistoryLevels = customHistoryLevels;
return this;
}
public List<HistoryLevel> getHistoryLevels() {
return historyLevels;
}
public List<HistoryLevel> getCustomHistoryLevels() {
return customHistoryLevels;
}
public boolean isInvokeCustomVariableListeners() {
return isInvokeCustomVariableListeners;
}
public ProcessEngineConfigurationImpl setInvokeCustomVariableListeners(boolean isInvokeCustomVariableListeners) {
this.isInvokeCustomVariableListeners = isInvokeCustomVariableListeners;
return this;
}
public void close() {
if (forceCloseMybatisConnectionPool
&& dataSource instanceof PooledDataSource) {
// ACT-233: connection pool of Ibatis is not properely initialized if this is not called!
((PooledDataSource) dataSource).forceCloseAll();
}
}
public MetricsRegistry getMetricsRegistry() {
return metricsRegistry;
}
public ProcessEngineConfigurationImpl setMetricsRegistry(MetricsRegistry metricsRegistry) {
this.metricsRegistry = metricsRegistry;
return this;
}
public ProcessEngineConfigurationImpl setMetricsEnabled(boolean isMetricsEnabled) {
this.isMetricsEnabled = isMetricsEnabled;
return this;
}
public boolean isMetricsEnabled() {
return isMetricsEnabled;
}
public DbMetricsReporter getDbMetricsReporter() {
return dbMetricsReporter;
}
public ProcessEngineConfigurationImpl setDbMetricsReporter(DbMetricsReporter dbMetricsReporter) {
this.dbMetricsReporter = dbMetricsReporter;
return this;
}
public boolean isDbMetricsReporterActivate() {
return isDbMetricsReporterActivate;
}
public ProcessEngineConfigurationImpl setDbMetricsReporterActivate(boolean isDbMetricsReporterEnabled) {
this.isDbMetricsReporterActivate = isDbMetricsReporterEnabled;
return this;
}
/**
* @deprecated use {@link #getHostnameProvider()} instead.
*/
@Deprecated
public MetricsReporterIdProvider getMetricsReporterIdProvider() {
return metricsReporterIdProvider;
}
/**
* @deprecated use {@link #setHostnameProvider(HostnameProvider)} instead.
*/
@Deprecated
public ProcessEngineConfigurationImpl setMetricsReporterIdProvider(MetricsReporterIdProvider metricsReporterIdProvider) {
this.metricsReporterIdProvider = metricsReporterIdProvider;
return this;
}
public String getHostname() {
return hostname;
}
public ProcessEngineConfigurationImpl setHostname(String hostname) {
this.hostname = hostname;
return this;
}
public HostnameProvider getHostnameProvider() {
return hostnameProvider;
}
public ProcessEngineConfigurationImpl setHostnameProvider(HostnameProvider hostnameProvider) {
this.hostnameProvider = hostnameProvider;
return this;
}
public boolean isTaskMetricsEnabled() {
return isTaskMetricsEnabled;
}
public ProcessEngineConfigurationImpl setTaskMetricsEnabled(boolean isTaskMetricsEnabled) {
this.isTaskMetricsEnabled = isTaskMetricsEnabled;
return this;
}
public boolean isEnableScriptEngineCaching() {
return enableScriptEngineCaching;
}
public ProcessEngineConfigurationImpl setEnableScriptEngineCaching(boolean enableScriptEngineCaching) {
this.enableScriptEngineCaching = enableScriptEngineCaching;
return this;
}
public boolean isEnableFetchScriptEngineFromProcessApplication() {
return enableFetchScriptEngineFromProcessApplication;
}
public ProcessEngineConfigurationImpl setEnableFetchScriptEngineFromProcessApplication(boolean enable) {
this.enableFetchScriptEngineFromProcessApplication = enable;
return this;
}
public boolean isEnableExpressionsInAdhocQueries() {
return enableExpressionsInAdhocQueries;
}
public void setEnableExpressionsInAdhocQueries(boolean enableExpressionsInAdhocQueries) {
this.enableExpressionsInAdhocQueries = enableExpressionsInAdhocQueries;
}
public boolean isEnableExpressionsInStoredQueries() {
return enableExpressionsInStoredQueries;
}
public void setEnableExpressionsInStoredQueries(boolean enableExpressionsInStoredQueries) {
this.enableExpressionsInStoredQueries = enableExpressionsInStoredQueries;
}
public boolean isEnableXxeProcessing() {
return enableXxeProcessing;
}
public void setEnableXxeProcessing(boolean enableXxeProcessing) {
this.enableXxeProcessing = enableXxeProcessing;
}
public ProcessEngineConfigurationImpl setBpmnStacktraceVerbose(boolean isBpmnStacktraceVerbose) {
this.isBpmnStacktraceVerbose = isBpmnStacktraceVerbose;
return this;
}
public boolean isBpmnStacktraceVerbose() {
return this.isBpmnStacktraceVerbose;
}
public boolean isForceCloseMybatisConnectionPool() {
return forceCloseMybatisConnectionPool;
}
public ProcessEngineConfigurationImpl setForceCloseMybatisConnectionPool(boolean forceCloseMybatisConnectionPool) {
this.forceCloseMybatisConnectionPool = forceCloseMybatisConnectionPool;
return this;
}
public boolean isRestrictUserOperationLogToAuthenticatedUsers() {
return restrictUserOperationLogToAuthenticatedUsers;
}
public ProcessEngineConfigurationImpl setRestrictUserOperationLogToAuthenticatedUsers(boolean restrictUserOperationLogToAuthenticatedUsers) {
this.restrictUserOperationLogToAuthenticatedUsers = restrictUserOperationLogToAuthenticatedUsers;
return this;
}
public ProcessEngineConfigurationImpl setTenantIdProvider(TenantIdProvider tenantIdProvider) {
this.tenantIdProvider = tenantIdProvider;
return this;
}
public TenantIdProvider getTenantIdProvider() {
return this.tenantIdProvider;
}
public void setMigrationActivityMatcher(MigrationActivityMatcher migrationActivityMatcher) {
this.migrationActivityMatcher = migrationActivityMatcher;
}
public MigrationActivityMatcher getMigrationActivityMatcher() {
return migrationActivityMatcher;
}
public void setCustomPreMigrationActivityValidators(List<MigrationActivityValidator> customPreMigrationActivityValidators) {
this.customPreMigrationActivityValidators = customPreMigrationActivityValidators;
}
public List<MigrationActivityValidator> getCustomPreMigrationActivityValidators() {
return customPreMigrationActivityValidators;
}
public void setCustomPostMigrationActivityValidators(List<MigrationActivityValidator> customPostMigrationActivityValidators) {
this.customPostMigrationActivityValidators = customPostMigrationActivityValidators;
}
public List<MigrationActivityValidator> getCustomPostMigrationActivityValidators() {
return customPostMigrationActivityValidators;
}
public List<MigrationActivityValidator> getDefaultMigrationActivityValidators() {
List<MigrationActivityValidator> migrationActivityValidators = new ArrayList<>();
migrationActivityValidators.add(SupportedActivityValidator.INSTANCE);
migrationActivityValidators.add(SupportedPassiveEventTriggerActivityValidator.INSTANCE);
migrationActivityValidators.add(NoCompensationHandlerActivityValidator.INSTANCE);
return migrationActivityValidators;
}
public void setMigrationInstructionGenerator(MigrationInstructionGenerator migrationInstructionGenerator) {
this.migrationInstructionGenerator = migrationInstructionGenerator;
}
public MigrationInstructionGenerator getMigrationInstructionGenerator() {
return migrationInstructionGenerator;
}
public void setMigrationInstructionValidators(List<MigrationInstructionValidator> migrationInstructionValidators) {
this.migrationInstructionValidators = migrationInstructionValidators;
}
public List<MigrationInstructionValidator> getMigrationInstructionValidators() {
return migrationInstructionValidators;
}
public void setCustomPostMigrationInstructionValidators(List<MigrationInstructionValidator> customPostMigrationInstructionValidators) {
this.customPostMigrationInstructionValidators = customPostMigrationInstructionValidators;
}
public List<MigrationInstructionValidator> getCustomPostMigrationInstructionValidators() {
return customPostMigrationInstructionValidators;
}
public void setCustomPreMigrationInstructionValidators(List<MigrationInstructionValidator> customPreMigrationInstructionValidators) {
this.customPreMigrationInstructionValidators = customPreMigrationInstructionValidators;
}
public List<MigrationInstructionValidator> getCustomPreMigrationInstructionValidators() {
return customPreMigrationInstructionValidators;
}
public List<MigrationInstructionValidator> getDefaultMigrationInstructionValidators() {
List<MigrationInstructionValidator> migrationInstructionValidators = new ArrayList<>();
migrationInstructionValidators.add(new SameBehaviorInstructionValidator());
migrationInstructionValidators.add(new SameEventTypeValidator());
migrationInstructionValidators.add(new OnlyOnceMappedActivityInstructionValidator());
migrationInstructionValidators.add(new CannotAddMultiInstanceBodyValidator());
migrationInstructionValidators.add(new CannotAddMultiInstanceInnerActivityValidator());
migrationInstructionValidators.add(new CannotRemoveMultiInstanceInnerActivityValidator());
migrationInstructionValidators.add(new GatewayMappingValidator());
migrationInstructionValidators.add(new SameEventScopeInstructionValidator());
migrationInstructionValidators.add(new UpdateEventTriggersValidator());
migrationInstructionValidators.add(new AdditionalFlowScopeInstructionValidator());
migrationInstructionValidators.add(new ConditionalEventUpdateEventTriggerValidator());
return migrationInstructionValidators;
}
public void setMigratingActivityInstanceValidators(List<MigratingActivityInstanceValidator> migratingActivityInstanceValidators) {
this.migratingActivityInstanceValidators = migratingActivityInstanceValidators;
}
public List<MigratingActivityInstanceValidator> getMigratingActivityInstanceValidators() {
return migratingActivityInstanceValidators;
}
public void setCustomPostMigratingActivityInstanceValidators(List<MigratingActivityInstanceValidator> customPostMigratingActivityInstanceValidators) {
this.customPostMigratingActivityInstanceValidators = customPostMigratingActivityInstanceValidators;
}
public List<MigratingActivityInstanceValidator> getCustomPostMigratingActivityInstanceValidators() {
return customPostMigratingActivityInstanceValidators;
}
public void setCustomPreMigratingActivityInstanceValidators(List<MigratingActivityInstanceValidator> customPreMigratingActivityInstanceValidators) {
this.customPreMigratingActivityInstanceValidators = customPreMigratingActivityInstanceValidators;
}
public List<MigratingActivityInstanceValidator> getCustomPreMigratingActivityInstanceValidators() {
return customPreMigratingActivityInstanceValidators;
}
public List<MigratingTransitionInstanceValidator> getMigratingTransitionInstanceValidators() {
return migratingTransitionInstanceValidators;
}
public List<MigratingCompensationInstanceValidator> getMigratingCompensationInstanceValidators() {
return migratingCompensationInstanceValidators;
}
public List<MigratingActivityInstanceValidator> getDefaultMigratingActivityInstanceValidators() {
List<MigratingActivityInstanceValidator> migratingActivityInstanceValidators = new ArrayList<>();
migratingActivityInstanceValidators.add(new NoUnmappedLeafInstanceValidator());
migratingActivityInstanceValidators.add(new VariableConflictActivityInstanceValidator());
migratingActivityInstanceValidators.add(new SupportedActivityInstanceValidator());
return migratingActivityInstanceValidators;
}
public List<MigratingTransitionInstanceValidator> getDefaultMigratingTransitionInstanceValidators() {
List<MigratingTransitionInstanceValidator> migratingTransitionInstanceValidators = new ArrayList<>();
migratingTransitionInstanceValidators.add(new NoUnmappedLeafInstanceValidator());
migratingTransitionInstanceValidators.add(new AsyncAfterMigrationValidator());
migratingTransitionInstanceValidators.add(new AsyncProcessStartMigrationValidator());
migratingTransitionInstanceValidators.add(new AsyncMigrationValidator());
return migratingTransitionInstanceValidators;
}
public List<CommandChecker> getCommandCheckers() {
return commandCheckers;
}
public void setCommandCheckers(List<CommandChecker> commandCheckers) {
this.commandCheckers = commandCheckers;
}
public ProcessEngineConfigurationImpl setUseSharedSqlSessionFactory(boolean isUseSharedSqlSessionFactory) {
this.isUseSharedSqlSessionFactory = isUseSharedSqlSessionFactory;
return this;
}
public boolean isUseSharedSqlSessionFactory() {
return isUseSharedSqlSessionFactory;
}
public boolean getDisableStrictCallActivityValidation() {
return disableStrictCallActivityValidation;
}
public void setDisableStrictCallActivityValidation(boolean disableStrictCallActivityValidation) {
this.disableStrictCallActivityValidation = disableStrictCallActivityValidation;
}
public String getHistoryCleanupBatchWindowStartTime() {
return historyCleanupBatchWindowStartTime;
}
public void setHistoryCleanupBatchWindowStartTime(String historyCleanupBatchWindowStartTime) {
this.historyCleanupBatchWindowStartTime = historyCleanupBatchWindowStartTime;
}
public String getHistoryCleanupBatchWindowEndTime() {
return historyCleanupBatchWindowEndTime;
}
public void setHistoryCleanupBatchWindowEndTime(String historyCleanupBatchWindowEndTime) {
this.historyCleanupBatchWindowEndTime = historyCleanupBatchWindowEndTime;
}
public String getMondayHistoryCleanupBatchWindowStartTime() {
return mondayHistoryCleanupBatchWindowStartTime;
}
public void setMondayHistoryCleanupBatchWindowStartTime(String mondayHistoryCleanupBatchWindowStartTime) {
this.mondayHistoryCleanupBatchWindowStartTime = mondayHistoryCleanupBatchWindowStartTime;
}
public String getMondayHistoryCleanupBatchWindowEndTime() {
return mondayHistoryCleanupBatchWindowEndTime;
}
public void setMondayHistoryCleanupBatchWindowEndTime(String mondayHistoryCleanupBatchWindowEndTime) {
this.mondayHistoryCleanupBatchWindowEndTime = mondayHistoryCleanupBatchWindowEndTime;
}
public String getTuesdayHistoryCleanupBatchWindowStartTime() {
return tuesdayHistoryCleanupBatchWindowStartTime;
}
public void setTuesdayHistoryCleanupBatchWindowStartTime(String tuesdayHistoryCleanupBatchWindowStartTime) {
this.tuesdayHistoryCleanupBatchWindowStartTime = tuesdayHistoryCleanupBatchWindowStartTime;
}
public String getTuesdayHistoryCleanupBatchWindowEndTime() {
return tuesdayHistoryCleanupBatchWindowEndTime;
}
public void setTuesdayHistoryCleanupBatchWindowEndTime(String tuesdayHistoryCleanupBatchWindowEndTime) {
this.tuesdayHistoryCleanupBatchWindowEndTime = tuesdayHistoryCleanupBatchWindowEndTime;
}
public String getWednesdayHistoryCleanupBatchWindowStartTime() {
return wednesdayHistoryCleanupBatchWindowStartTime;
}
public void setWednesdayHistoryCleanupBatchWindowStartTime(String wednesdayHistoryCleanupBatchWindowStartTime) {
this.wednesdayHistoryCleanupBatchWindowStartTime = wednesdayHistoryCleanupBatchWindowStartTime;
}
public String getWednesdayHistoryCleanupBatchWindowEndTime() {
return wednesdayHistoryCleanupBatchWindowEndTime;
}
public void setWednesdayHistoryCleanupBatchWindowEndTime(String wednesdayHistoryCleanupBatchWindowEndTime) {
this.wednesdayHistoryCleanupBatchWindowEndTime = wednesdayHistoryCleanupBatchWindowEndTime;
}
public String getThursdayHistoryCleanupBatchWindowStartTime() {
return thursdayHistoryCleanupBatchWindowStartTime;
}
public void setThursdayHistoryCleanupBatchWindowStartTime(String thursdayHistoryCleanupBatchWindowStartTime) {
this.thursdayHistoryCleanupBatchWindowStartTime = thursdayHistoryCleanupBatchWindowStartTime;
}
public String getThursdayHistoryCleanupBatchWindowEndTime() {
return thursdayHistoryCleanupBatchWindowEndTime;
}
public void setThursdayHistoryCleanupBatchWindowEndTime(String thursdayHistoryCleanupBatchWindowEndTime) {
this.thursdayHistoryCleanupBatchWindowEndTime = thursdayHistoryCleanupBatchWindowEndTime;
}
public String getFridayHistoryCleanupBatchWindowStartTime() {
return fridayHistoryCleanupBatchWindowStartTime;
}
public void setFridayHistoryCleanupBatchWindowStartTime(String fridayHistoryCleanupBatchWindowStartTime) {
this.fridayHistoryCleanupBatchWindowStartTime = fridayHistoryCleanupBatchWindowStartTime;
}
public String getFridayHistoryCleanupBatchWindowEndTime() {
return fridayHistoryCleanupBatchWindowEndTime;
}
public void setFridayHistoryCleanupBatchWindowEndTime(String fridayHistoryCleanupBatchWindowEndTime) {
this.fridayHistoryCleanupBatchWindowEndTime = fridayHistoryCleanupBatchWindowEndTime;
}
public String getSaturdayHistoryCleanupBatchWindowStartTime() {
return saturdayHistoryCleanupBatchWindowStartTime;
}
public void setSaturdayHistoryCleanupBatchWindowStartTime(String saturdayHistoryCleanupBatchWindowStartTime) {
this.saturdayHistoryCleanupBatchWindowStartTime = saturdayHistoryCleanupBatchWindowStartTime;
}
public String getSaturdayHistoryCleanupBatchWindowEndTime() {
return saturdayHistoryCleanupBatchWindowEndTime;
}
public void setSaturdayHistoryCleanupBatchWindowEndTime(String saturdayHistoryCleanupBatchWindowEndTime) {
this.saturdayHistoryCleanupBatchWindowEndTime = saturdayHistoryCleanupBatchWindowEndTime;
}
public String getSundayHistoryCleanupBatchWindowStartTime() {
return sundayHistoryCleanupBatchWindowStartTime;
}
public void setSundayHistoryCleanupBatchWindowStartTime(String sundayHistoryCleanupBatchWindowStartTime) {
this.sundayHistoryCleanupBatchWindowStartTime = sundayHistoryCleanupBatchWindowStartTime;
}
public String getSundayHistoryCleanupBatchWindowEndTime() {
return sundayHistoryCleanupBatchWindowEndTime;
}
public void setSundayHistoryCleanupBatchWindowEndTime(String sundayHistoryCleanupBatchWindowEndTime) {
this.sundayHistoryCleanupBatchWindowEndTime = sundayHistoryCleanupBatchWindowEndTime;
}
public Date getHistoryCleanupBatchWindowStartTimeAsDate() {
return historyCleanupBatchWindowStartTimeAsDate;
}
public void setHistoryCleanupBatchWindowStartTimeAsDate(Date historyCleanupBatchWindowStartTimeAsDate) {
this.historyCleanupBatchWindowStartTimeAsDate = historyCleanupBatchWindowStartTimeAsDate;
}
public void setHistoryCleanupBatchWindowEndTimeAsDate(Date historyCleanupBatchWindowEndTimeAsDate) {
this.historyCleanupBatchWindowEndTimeAsDate = historyCleanupBatchWindowEndTimeAsDate;
}
public Date getHistoryCleanupBatchWindowEndTimeAsDate() {
return historyCleanupBatchWindowEndTimeAsDate;
}
public Map<Integer, BatchWindowConfiguration> getHistoryCleanupBatchWindows() {
return historyCleanupBatchWindows;
}
public void setHistoryCleanupBatchWindows(Map<Integer, BatchWindowConfiguration> historyCleanupBatchWindows) {
this.historyCleanupBatchWindows = historyCleanupBatchWindows;
}
public int getHistoryCleanupBatchSize() {
return historyCleanupBatchSize;
}
public void setHistoryCleanupBatchSize(int historyCleanupBatchSize) {
this.historyCleanupBatchSize = historyCleanupBatchSize;
}
public int getHistoryCleanupBatchThreshold() {
return historyCleanupBatchThreshold;
}
public void setHistoryCleanupBatchThreshold(int historyCleanupBatchThreshold) {
this.historyCleanupBatchThreshold = historyCleanupBatchThreshold;
}
public boolean isHistoryCleanupMetricsEnabled() {
return historyCleanupMetricsEnabled;
}
public void setHistoryCleanupMetricsEnabled(boolean historyCleanupMetricsEnabled) {
this.historyCleanupMetricsEnabled = historyCleanupMetricsEnabled;
}
public boolean isHistoryCleanupEnabled() {
return historyCleanupEnabled;
}
public ProcessEngineConfigurationImpl setHistoryCleanupEnabled(boolean historyCleanupEnabled) {
this.historyCleanupEnabled = historyCleanupEnabled;
return this;
}
public String getHistoryTimeToLive() {
return historyTimeToLive;
}
public void setHistoryTimeToLive(String historyTimeToLive) {
this.historyTimeToLive = historyTimeToLive;
}
public String getBatchOperationHistoryTimeToLive() {
return batchOperationHistoryTimeToLive;
}
public int getHistoryCleanupDegreeOfParallelism() {
return historyCleanupDegreeOfParallelism;
}
public void setHistoryCleanupDegreeOfParallelism(int historyCleanupDegreeOfParallelism) {
this.historyCleanupDegreeOfParallelism = historyCleanupDegreeOfParallelism;
}
public void setBatchOperationHistoryTimeToLive(String batchOperationHistoryTimeToLive) {
this.batchOperationHistoryTimeToLive = batchOperationHistoryTimeToLive;
}
public Map<String, String> getBatchOperationsForHistoryCleanup() {
return batchOperationsForHistoryCleanup;
}
public void setBatchOperationsForHistoryCleanup(Map<String, String> batchOperationsForHistoryCleanup) {
this.batchOperationsForHistoryCleanup = batchOperationsForHistoryCleanup;
}
public Map<String, Integer> getParsedBatchOperationsForHistoryCleanup() {
return parsedBatchOperationsForHistoryCleanup;
}
public void setParsedBatchOperationsForHistoryCleanup(Map<String, Integer> parsedBatchOperationsForHistoryCleanup) {
this.parsedBatchOperationsForHistoryCleanup = parsedBatchOperationsForHistoryCleanup;
}
public String getHistoryCleanupJobLogTimeToLive() {
return historyCleanupJobLogTimeToLive;
}
public ProcessEngineConfigurationImpl setHistoryCleanupJobLogTimeToLive(String historyCleanupJobLogTimeToLive) {
this.historyCleanupJobLogTimeToLive = historyCleanupJobLogTimeToLive;
return this;
}
public String getTaskMetricsTimeToLive() {
return taskMetricsTimeToLive;
}
public ProcessEngineConfigurationImpl setTaskMetricsTimeToLive(String taskMetricsTimeToLive) {
this.taskMetricsTimeToLive = taskMetricsTimeToLive;
return this;
}
public Integer getParsedTaskMetricsTimeToLive() {
return parsedTaskMetricsTimeToLive;
}
public ProcessEngineConfigurationImpl setParsedTaskMetricsTimeToLive(Integer parsedTaskMetricsTimeToLive) {
this.parsedTaskMetricsTimeToLive = parsedTaskMetricsTimeToLive;
return this;
}
public BatchWindowManager getBatchWindowManager() {
return batchWindowManager;
}
public void setBatchWindowManager(BatchWindowManager batchWindowManager) {
this.batchWindowManager = batchWindowManager;
}
public HistoryRemovalTimeProvider getHistoryRemovalTimeProvider() {
return historyRemovalTimeProvider;
}
public ProcessEngineConfigurationImpl setHistoryRemovalTimeProvider(HistoryRemovalTimeProvider removalTimeProvider) {
historyRemovalTimeProvider = removalTimeProvider;
return this;
}
public String getHistoryRemovalTimeStrategy() {
return historyRemovalTimeStrategy;
}
public ProcessEngineConfigurationImpl setHistoryRemovalTimeStrategy(String removalTimeStrategy) {
historyRemovalTimeStrategy = removalTimeStrategy;
return this;
}
public String getHistoryCleanupStrategy() {
return historyCleanupStrategy;
}
public ProcessEngineConfigurationImpl setHistoryCleanupStrategy(String historyCleanupStrategy) {
this.historyCleanupStrategy = historyCleanupStrategy;
return this;
}
public int getFailedJobListenerMaxRetries() {
return failedJobListenerMaxRetries;
}
public void setFailedJobListenerMaxRetries(int failedJobListenerMaxRetries) {
this.failedJobListenerMaxRetries = failedJobListenerMaxRetries;
}
public String getFailedJobRetryTimeCycle() {
return failedJobRetryTimeCycle;
}
public void setFailedJobRetryTimeCycle(String failedJobRetryTimeCycle) {
this.failedJobRetryTimeCycle = failedJobRetryTimeCycle;
}
public int getLoginMaxAttempts() {
return loginMaxAttempts;
}
public void setLoginMaxAttempts(int loginMaxAttempts) {
this.loginMaxAttempts = loginMaxAttempts;
}
public int getLoginDelayFactor() {
return loginDelayFactor;
}
public void setLoginDelayFactor(int loginDelayFactor) {
this.loginDelayFactor = loginDelayFactor;
}
public int getLoginDelayMaxTime() {
return loginDelayMaxTime;
}
public void setLoginDelayMaxTime(int loginDelayMaxTime) {
this.loginDelayMaxTime = loginDelayMaxTime;
}
public int getLoginDelayBase() {
return loginDelayBase;
}
public void setLoginDelayBase(int loginInitialDelay) {
this.loginDelayBase = loginInitialDelay;
}
public List<String> getAdminGroups() {
return adminGroups;
}
public void setAdminGroups(List<String> adminGroups) {
this.adminGroups = adminGroups;
}
public List<String> getAdminUsers() {
return adminUsers;
}
public void setAdminUsers(List<String> adminUsers) {
this.adminUsers = adminUsers;
}
public int getQueryMaxResultsLimit() {
return queryMaxResultsLimit;
}
public ProcessEngineConfigurationImpl setQueryMaxResultsLimit(int queryMaxResultsLimit) {
this.queryMaxResultsLimit = queryMaxResultsLimit;
return this;
}
public String getLoggingContextActivityId() {
return loggingContextActivityId;
}
public ProcessEngineConfigurationImpl setLoggingContextActivityId(String loggingContextActivityId) {
this.loggingContextActivityId = loggingContextActivityId;
return this;
}
public String getLoggingContextApplicationName() {
return loggingContextApplicationName;
}
public ProcessEngineConfigurationImpl setLoggingContextApplicationName(String loggingContextApplicationName) {
this.loggingContextApplicationName = loggingContextApplicationName;
return this;
}
public String getLoggingContextBusinessKey() {
return loggingContextBusinessKey;
}
public ProcessEngineConfigurationImpl setLoggingContextBusinessKey(String loggingContextBusinessKey) {
this.loggingContextBusinessKey = loggingContextBusinessKey;
return this;
}
public String getLoggingContextProcessDefinitionId() {
return loggingContextProcessDefinitionId;
}
public ProcessEngineConfigurationImpl setLoggingContextProcessDefinitionId(String loggingContextProcessDefinitionId) {
this.loggingContextProcessDefinitionId = loggingContextProcessDefinitionId;
return this;
}
public String getLoggingContextProcessInstanceId() {
return loggingContextProcessInstanceId;
}
public ProcessEngineConfigurationImpl setLoggingContextProcessInstanceId(String loggingContextProcessInstanceId) {
this.loggingContextProcessInstanceId = loggingContextProcessInstanceId;
return this;
}
public String getLoggingContextTenantId() {
return loggingContextTenantId;
}
public ProcessEngineConfigurationImpl setLoggingContextTenantId(String loggingContextTenantId) {
this.loggingContextTenantId = loggingContextTenantId;
return this;
}
public List<FeelCustomFunctionProvider> getDmnFeelCustomFunctionProviders() {
return dmnFeelCustomFunctionProviders;
}
public ProcessEngineConfigurationImpl setDmnFeelCustomFunctionProviders(List<FeelCustomFunctionProvider> dmnFeelCustomFunctionProviders) {
this.dmnFeelCustomFunctionProviders = dmnFeelCustomFunctionProviders;
return this;
}
public boolean isDmnFeelEnableLegacyBehavior() {
return dmnFeelEnableLegacyBehavior;
}
public ProcessEngineConfigurationImpl setDmnFeelEnableLegacyBehavior(boolean dmnFeelEnableLegacyBehavior) {
this.dmnFeelEnableLegacyBehavior = dmnFeelEnableLegacyBehavior;
return this;
}
public Boolean isInitializeTelemetry() {
return initializeTelemetry;
}
public ProcessEngineConfigurationImpl setInitializeTelemetry(boolean telemetryInitialized) {
this.initializeTelemetry = telemetryInitialized;
return this;
}
public String getTelemetryEndpoint() {
return telemetryEndpoint;
}
public ProcessEngineConfigurationImpl setTelemetryEndpoint(String telemetryEndpoint) {
this.telemetryEndpoint = telemetryEndpoint;
return this;
}
public int getTelemetryRequestRetries() {
return telemetryRequestRetries;
}
public ProcessEngineConfigurationImpl setTelemetryRequestRetries(int telemetryRequestRetries) {
this.telemetryRequestRetries = telemetryRequestRetries;
return this;
}
public long getTelemetryReportingPeriod() {
return telemetryReportingPeriod;
}
public ProcessEngineConfigurationImpl setTelemetryReportingPeriod(long telemetryReportingPeriod) {
this.telemetryReportingPeriod = telemetryReportingPeriod;
return this;
}
public TelemetryReporter getTelemetryReporter() {
return telemetryReporter;
}
public ProcessEngineConfigurationImpl setTelemetryReporter(TelemetryReporter telemetryReporter) {
this.telemetryReporter = telemetryReporter;
return this;
}
public boolean isTelemetryReporterActivate() {
return isTelemetryReporterActivate;
}
public ProcessEngineConfigurationImpl setTelemetryReporterActivate(boolean isTelemetryReporterActivate) {
this.isTelemetryReporterActivate = isTelemetryReporterActivate;
return this;
}
public Connector<? extends ConnectorRequest<?>> getTelemetryHttpConnector() {
return telemetryHttpConnector;
}
public ProcessEngineConfigurationImpl setTelemetryHttpConnector(Connector<? extends ConnectorRequest<?>> telemetryHttp) {
this.telemetryHttpConnector = telemetryHttp;
return this;
}
public Data getTelemetryData() {
return telemetryData;
}
public ProcessEngineConfigurationImpl setTelemetryData(Data telemetryData) {
this.telemetryData = telemetryData;
return this;
}
public int getTelemetryRequestTimeout() {
return telemetryRequestTimeout;
}
public ProcessEngineConfigurationImpl setTelemetryRequestTimeout(int telemetryRequestTimeout) {
this.telemetryRequestTimeout = telemetryRequestTimeout;
return this;
}
public ProcessEngineConfigurationImpl setCommandRetries(int commandRetries) {
this.commandRetries = commandRetries;
return this;
}
public int getCommandRetries() {
return commandRetries;
}
protected CrdbTransactionRetryInterceptor getCrdbRetryInterceptor() {
return new CrdbTransactionRetryInterceptor(commandRetries);
}
}
| 1 | 11,934 |
So it seems that loading a script file from the classpath needs to be enabled explicitly now since this is `false` by default. Is it possible to enable this via the YAML config file in a Camunda Platform Run distribution?
|
camunda-camunda-bpm-platform
|
java
|
@@ -5,8 +5,7 @@
const config = require('@purest/providers')
const dropbox = require('./dropbox')
const drive = require('./drive')
-const instagram = require('./instagram')
-const instagramGraph = require('./instagram/graph')
+const instagram = require('./instagram/graph')
const facebook = require('./facebook')
const onedrive = require('./onedrive')
const zoom = require('./zoom')
| 1 |
/**
* @module provider
*/
// @ts-ignore
const config = require('@purest/providers')
const dropbox = require('./dropbox')
const drive = require('./drive')
const instagram = require('./instagram')
const instagramGraph = require('./instagram/graph')
const facebook = require('./facebook')
const onedrive = require('./onedrive')
const zoom = require('./zoom')
const { getURLBuilder } = require('../helpers/utils')
const logger = require('../logger')
// eslint-disable-next-line
const Provider = require('./Provider')
// leave here for now until Purest Providers gets updated with Zoom provider
config.zoom = {
'https://zoom.us/': {
__domain: {
auth: {
auth: { bearer: '[0]' }
}
},
'[version]/{endpoint}': {
__path: {
alias: '__default',
version: 'v2'
}
},
'oauth/revoke': {
__path: {
alias: 'logout',
auth: {
auth: { basic: '[0]' }
}
}
}
}
}
/**
* adds the desired provider module to the request object,
* based on the providerName parameter specified
*
* @param {Object.<string, typeof Provider>} providers
*/
module.exports.getProviderMiddleware = (providers) => {
/**
*
* @param {object} req
* @param {object} res
* @param {function} next
* @param {string} providerName
*/
const middleware = (req, res, next, providerName) => {
if (providers[providerName] && validOptions(req.companion.options)) {
req.companion.provider = new providers[providerName]({ providerName, config })
} else {
logger.warn('invalid provider options detected. Provider will not be loaded', 'provider.middleware.invalid', req.id)
}
next()
}
return middleware
}
/**
* @param {{server: object, providerOptions: object}} companionOptions
* @return {Object.<string, typeof Provider>}
*/
module.exports.getDefaultProviders = (companionOptions) => {
const { providerOptions } = companionOptions || { providerOptions: null }
// @todo: we should rename drive to googledrive or google-drive or google
const providers = { dropbox, drive, facebook, onedrive, zoom }
// Instagram's Graph API key is just numbers, while the old API key is hex
const usesGraphAPI = () => /^\d+$/.test(providerOptions.instagram.key)
if (providerOptions && providerOptions.instagram && usesGraphAPI()) {
providers.instagram = instagramGraph
} else {
providers.instagram = instagram
}
return providers
}
/**
*
* @typedef {{module: typeof Provider, config: object}} CustomProvider
*
* @param {Object.<string, CustomProvider>} customProviders
* @param {Object.<string, typeof Provider>} providers
* @param {object} grantConfig
*/
module.exports.addCustomProviders = (customProviders, providers, grantConfig) => {
Object.keys(customProviders).forEach((providerName) => {
providers[providerName] = customProviders[providerName].module
const providerConfig = Object.assign({}, customProviders[providerName].config)
// todo: consider setting these options from a universal point also used
// by official providers. It'll prevent these from getting left out if the
// requirement changes.
providerConfig.callback = `/${providerName}/callback`
providerConfig.transport = 'session'
grantConfig[providerName] = providerConfig
})
}
/**
*
* @param {{server: object, providerOptions: object}} companionOptions
* @param {object} grantConfig
*/
module.exports.addProviderOptions = (companionOptions, grantConfig) => {
const { server, providerOptions } = companionOptions
if (!validOptions({ server })) {
logger.warn('invalid provider options detected. Providers will not be loaded', 'provider.options.invalid')
return
}
grantConfig.defaults = {
host: server.host,
protocol: server.protocol,
path: server.path
}
const { oauthDomain } = server
const keys = Object.keys(providerOptions).filter((key) => key !== 'server')
keys.forEach((providerName) => {
const authProvider = providerNameToAuthName(providerName, companionOptions)
if (authProvider && grantConfig[authProvider]) {
// explicitly add providerOptions so users don't override other providerOptions.
grantConfig[authProvider].key = providerOptions[providerName].key
grantConfig[authProvider].secret = providerOptions[providerName].secret
const provider = exports.getDefaultProviders(companionOptions)[providerName]
Object.assign(grantConfig[authProvider], provider.getExtraConfig())
// override grant.js redirect uri with companion's custom redirect url
const isExternal = !!server.implicitPath
const redirectPath = `/${providerName}/redirect`
grantConfig[authProvider].redirect_uri = getURLBuilder(companionOptions)(redirectPath, isExternal)
if (oauthDomain) {
const fullRedirectPath = getURLBuilder(companionOptions)(redirectPath, isExternal, true)
grantConfig[authProvider].redirect_uri = `${server.protocol}://${oauthDomain}${fullRedirectPath}`
}
if (server.implicitPath) {
// no url builder is used for this because grant internally adds the path
grantConfig[authProvider].callback = `${server.implicitPath}${grantConfig[authProvider].callback}`
} else if (server.path) {
grantConfig[authProvider].callback = `${server.path}${grantConfig[authProvider].callback}`
}
} else if (providerName !== 's3') {
logger.warn(`skipping one found unsupported provider "${providerName}".`, 'provider.options.skip')
}
})
}
/**
*
* @param {string} name of the provider
* @param {{server: object, providerOptions: object}} options
* @return {string} the authProvider for this provider
*/
const providerNameToAuthName = (name, options) => {
const providers = exports.getDefaultProviders(options)
return (providers[name] || {}).authProvider
}
/**
*
* @param {{server: object}} options
*/
const validOptions = (options) => {
return options.server.host && options.server.protocol
}
| 1 | 13,435 |
can we delete any code in the provider implementation? i imagine the entire thing is unused now?
|
transloadit-uppy
|
js
|
@@ -148,6 +148,7 @@ func wrapErrorWithClusterIssuerStatusCondition(client clientset.ClusterIssuerInt
// WaitForCertificateCondition waits for the status of the named Certificate to contain
// a condition whose type and status matches the supplied one.
+// Deprecated: this function is not used anymore
func WaitForCertificateCondition(client clientset.CertificateInterface, name string, condition v1.CertificateCondition, timeout time.Duration) (*v1.Certificate, error) {
var certificate *v1.Certificate = nil
pollErr := wait.PollImmediate(500*time.Millisecond, timeout,
| 1 |
/*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
// TODO: we should break this file apart into separate more sane/reusable parts
import (
"context"
"crypto"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"net"
"net/url"
"time"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/gateway-api/apis/v1alpha1"
gwapiv1alpha1 "sigs.k8s.io/gateway-api/apis/v1alpha1"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
v1 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1"
cmmeta "github.com/jetstack/cert-manager/pkg/apis/meta/v1"
intscheme "github.com/jetstack/cert-manager/pkg/client/clientset/versioned/scheme"
clientset "github.com/jetstack/cert-manager/pkg/client/clientset/versioned/typed/certmanager/v1"
"github.com/jetstack/cert-manager/pkg/util"
"github.com/jetstack/cert-manager/pkg/util/pki"
"github.com/jetstack/cert-manager/test/e2e/framework/log"
)
func CertificateOnlyValidForDomains(cert *x509.Certificate, commonName string, dnsNames ...string) bool {
if commonName != cert.Subject.CommonName || !util.EqualUnsorted(cert.DNSNames, dnsNames) {
return false
}
return true
}
func WaitForIssuerStatusFunc(client clientset.IssuerInterface, name string, fn func(*v1.Issuer) (bool, error)) error {
return wait.PollImmediate(500*time.Millisecond, time.Minute,
func() (bool, error) {
issuer, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error getting Issuer %q: %v", name, err)
}
return fn(issuer)
})
}
// WaitForIssuerCondition waits for the status of the named issuer to contain
// a condition whose type and status matches the supplied one.
func WaitForIssuerCondition(client clientset.IssuerInterface, name string, condition v1.IssuerCondition) error {
pollErr := wait.PollImmediate(500*time.Millisecond, time.Minute,
func() (bool, error) {
log.Logf("Waiting for issuer %v condition %#v", name, condition)
issuer, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if nil != err {
return false, fmt.Errorf("error getting Issuer %q: %v", name, err)
}
return apiutil.IssuerHasCondition(issuer, condition), nil
},
)
return wrapErrorWithIssuerStatusCondition(client, pollErr, name, condition.Type)
}
// try to retrieve last condition to help diagnose tests.
func wrapErrorWithIssuerStatusCondition(client clientset.IssuerInterface, pollErr error, name string, conditionType v1.IssuerConditionType) error {
if pollErr == nil {
return nil
}
issuer, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return pollErr
}
for _, cond := range issuer.GetStatus().Conditions {
if cond.Type == conditionType {
return fmt.Errorf("%s: Last Status: '%s' Reason: '%s', Message: '%s'", pollErr.Error(), cond.Status, cond.Reason, cond.Message)
}
}
return pollErr
}
// WaitForClusterIssuerCondition waits for the status of the named issuer to contain
// a condition whose type and status matches the supplied one.
func WaitForClusterIssuerCondition(client clientset.ClusterIssuerInterface, name string, condition v1.IssuerCondition) error {
pollErr := wait.PollImmediate(500*time.Millisecond, time.Minute,
func() (bool, error) {
log.Logf("Waiting for clusterissuer %v condition %#v", name, condition)
issuer, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if nil != err {
return false, fmt.Errorf("error getting ClusterIssuer %v: %v", name, err)
}
return apiutil.IssuerHasCondition(issuer, condition), nil
},
)
return wrapErrorWithClusterIssuerStatusCondition(client, pollErr, name, condition.Type)
}
// try to retrieve last condition to help diagnose tests.
func wrapErrorWithClusterIssuerStatusCondition(client clientset.ClusterIssuerInterface, pollErr error, name string, conditionType v1.IssuerConditionType) error {
if pollErr == nil {
return nil
}
issuer, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return pollErr
}
for _, cond := range issuer.GetStatus().Conditions {
if cond.Type == conditionType {
return fmt.Errorf("%s: Last Status: '%s' Reason: '%s', Message: '%s'", pollErr.Error(), cond.Status, cond.Reason, cond.Message)
}
}
return pollErr
}
// WaitForCertificateCondition waits for the status of the named Certificate to contain
// a condition whose type and status matches the supplied one.
func WaitForCertificateCondition(client clientset.CertificateInterface, name string, condition v1.CertificateCondition, timeout time.Duration) (*v1.Certificate, error) {
var certificate *v1.Certificate = nil
pollErr := wait.PollImmediate(500*time.Millisecond, timeout,
func() (bool, error) {
log.Logf("Waiting for Certificate %v condition %v=%v", name, condition.Type, condition.Status)
certificate, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if nil != err {
return false, fmt.Errorf("error getting Certificate %v: %v", name, err)
}
if !apiutil.CertificateHasCondition(certificate, condition) {
log.Logf("Expected Certificate %v condition %v=%v but it has: %v", name, condition.Type, condition.Status, certificate.Status.Conditions)
return false, nil
}
return true, nil
},
)
return certificate, wrapErrorWithCertificateStatusCondition(client, pollErr, name, condition.Type)
}
// WaitForMissingCertificateCondition waits for the status of the named Certificate to NOT contain
// a condition whose type and status matches the supplied one.
func WaitForMissingCertificateCondition(client clientset.CertificateInterface, name string, condition v1.CertificateCondition, timeout time.Duration) (*v1.Certificate, error) {
var certificate *v1.Certificate = nil
pollErr := wait.PollImmediate(500*time.Millisecond, timeout,
func() (bool, error) {
log.Logf("Waiting for Certificate %v condition %v=%v to be missing", name, condition.Type, condition.Status)
certificate, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if nil != err {
return false, fmt.Errorf("error getting Certificate %v: %v", name, err)
}
if apiutil.CertificateHasCondition(certificate, condition) {
log.Logf("Expected Certificate %v condition %v=%v to be missing but it has: %v", name, condition.Type, condition.Status, certificate.Status.Conditions)
return false, nil
}
return true, nil
},
)
return certificate, wrapErrorWithCertificateStatusCondition(client, pollErr, name, condition.Type)
}
// WaitForCertificateConditionWithObservedGeneration waits for the status of the named Certificate to contain
// a condition whose type and status matches the supplied one.
func WaitForCertificateConditionWithObservedGeneration(client clientset.CertificateInterface, name string, condition v1.CertificateCondition, timeout time.Duration) (*v1.Certificate, error) {
var certificate *v1.Certificate = nil
pollErr := wait.PollImmediate(500*time.Millisecond, timeout,
func() (bool, error) {
log.Logf("Waiting for Certificate %v condition %v=%v", name, condition.Type, condition.Status)
certificate, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if nil != err {
return false, fmt.Errorf("error getting Certificate %v: %v", name, err)
}
if !apiutil.CertificateHasConditionWithObservedGeneration(certificate, condition) {
log.Logf("Expected Certificate %v condition %v=%v (generation >= %v) but it has: %v", name, condition.Type, condition.Status, condition.ObservedGeneration, certificate.Status.Conditions)
return false, nil
}
return true, nil
},
)
return certificate, wrapErrorWithCertificateStatusCondition(client, pollErr, name, condition.Type)
}
// WaitForCertificateEvent waits for an event on the named Certificate to contain
// an event reason matches the supplied one.
func WaitForCertificateEvent(client kubernetes.Interface, cert *v1.Certificate, reason string, timeout time.Duration) error {
return wait.PollImmediate(500*time.Millisecond, timeout,
func() (bool, error) {
log.Logf("Waiting for Certificate event %v reason %#v", cert.Name, reason)
evts, err := client.CoreV1().Events(cert.Namespace).Search(intscheme.Scheme, cert)
if err != nil {
return false, fmt.Errorf("error getting Certificate %v: %v", cert.Name, err)
}
return hasEvent(evts, reason), nil
},
)
}
func hasEvent(events *corev1.EventList, reason string) bool {
for _, evt := range events.Items {
if evt.Reason == reason {
return true
}
}
return false
}
// try to retrieve last condition to help diagnose tests.
func wrapErrorWithCertificateStatusCondition(client clientset.CertificateInterface, pollErr error, name string, conditionType v1.CertificateConditionType) error {
if pollErr == nil {
return nil
}
certificate, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return pollErr
}
for _, cond := range certificate.Status.Conditions {
if cond.Type == conditionType {
return fmt.Errorf("%s: Last Status: '%s' Reason: '%s', Message: '%s'", pollErr.Error(), cond.Status, cond.Reason, cond.Message)
}
}
return pollErr
}
// WaitForCertificateToExist waits for the named certificate to exist
func WaitForCertificateToExist(client clientset.CertificateInterface, name string, timeout time.Duration) error {
return wait.PollImmediate(500*time.Millisecond, timeout,
func() (bool, error) {
log.Logf("Waiting for Certificate %v to exist", name)
_, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, fmt.Errorf("error getting Certificate %v: %v", name, err)
}
return true, nil
},
)
}
// WaitForCRDToNotExist waits for the CRD with the given name to no
// longer exist.
func WaitForCRDToNotExist(client apiextensionsv1.CustomResourceDefinitionInterface, name string) error {
return wait.PollImmediate(500*time.Millisecond, time.Minute,
func() (bool, error) {
log.Logf("Waiting for CRD %v to not exist", name)
_, err := client.Get(context.TODO(), name, metav1.GetOptions{})
if nil == err {
return false, nil
}
if errors.IsNotFound(err) {
return true, nil
}
return false, nil
},
)
}
// Deprecated: use test/unit/gen/Certificate in future
func NewCertManagerBasicCertificate(name, secretName, issuerName string, issuerKind string, duration, renewBefore *metav1.Duration, dnsNames ...string) *v1.Certificate {
cn := "test.domain.com"
if len(dnsNames) > 0 {
cn = dnsNames[0]
}
return &v1.Certificate{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.CertificateSpec{
CommonName: cn,
DNSNames: dnsNames,
Subject: &v1.X509Subject{
Organizations: []string{"test-org"},
},
SecretName: secretName,
Duration: duration,
RenewBefore: renewBefore,
PrivateKey: &v1.CertificatePrivateKey{},
IssuerRef: cmmeta.ObjectReference{
Name: issuerName,
Kind: issuerKind,
},
},
}
}
// Deprecated: use test/unit/gen/CertificateRequest in future
func NewCertManagerBasicCertificateRequest(name, issuerName string, issuerKind string, duration *metav1.Duration,
dnsNames []string, ips []net.IP, uris []string, keyAlgorithm x509.PublicKeyAlgorithm) (*v1.CertificateRequest, crypto.Signer, error) {
cn := "test.domain.com"
if len(dnsNames) > 0 {
cn = dnsNames[0]
}
var parsedURIs []*url.URL
for _, uri := range uris {
parsed, err := url.Parse(uri)
if err != nil {
return nil, nil, err
}
parsedURIs = append(parsedURIs, parsed)
}
var sk crypto.Signer
var signatureAlgorithm x509.SignatureAlgorithm
var err error
switch keyAlgorithm {
case x509.RSA:
sk, err = pki.GenerateRSAPrivateKey(2048)
if err != nil {
return nil, nil, err
}
signatureAlgorithm = x509.SHA256WithRSA
case x509.ECDSA:
sk, err = pki.GenerateECPrivateKey(pki.ECCurve256)
if err != nil {
return nil, nil, err
}
signatureAlgorithm = x509.ECDSAWithSHA256
case x509.Ed25519:
sk, err = pki.GenerateEd25519PrivateKey()
if err != nil {
return nil, nil, err
}
signatureAlgorithm = x509.PureEd25519
default:
return nil, nil, fmt.Errorf("unrecognised key algorithm: %s", err)
}
csr := &x509.CertificateRequest{
Version: 3,
SignatureAlgorithm: signatureAlgorithm,
PublicKeyAlgorithm: keyAlgorithm,
PublicKey: sk.Public(),
Subject: pkix.Name{
CommonName: cn,
},
DNSNames: dnsNames,
IPAddresses: ips,
URIs: parsedURIs,
}
csrBytes, err := pki.EncodeCSR(csr, sk)
if err != nil {
return nil, nil, err
}
csrPEM := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE REQUEST", Bytes: csrBytes,
})
return &v1.CertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.CertificateRequestSpec{
Duration: duration,
Request: csrPEM,
IssuerRef: cmmeta.ObjectReference{
Name: issuerName,
Kind: issuerKind,
},
},
}, sk, nil
}
func NewCertManagerVaultCertificate(name, secretName, issuerName string, issuerKind string, duration, renewBefore *metav1.Duration) *v1.Certificate {
return &v1.Certificate{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.CertificateSpec{
CommonName: "test.domain.com",
SecretName: secretName,
Duration: duration,
RenewBefore: renewBefore,
IssuerRef: cmmeta.ObjectReference{
Name: issuerName,
Kind: issuerKind,
},
},
}
}
func NewIngress(name, secretName string, annotations map[string]string, dnsNames ...string) *networkingv1.Ingress {
return &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: annotations,
},
Spec: networkingv1.IngressSpec{
TLS: []networkingv1.IngressTLS{
{
Hosts: dnsNames,
SecretName: secretName,
},
},
Rules: []networkingv1.IngressRule{
{
Host: dnsNames[0],
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: pathTypePrefix(),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "somesvc",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
},
},
},
},
},
},
},
}
}
func NewV1Beta1Ingress(name, secretName string, annotations map[string]string, dnsNames ...string) *networkingv1beta1.Ingress {
return &networkingv1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: annotations,
},
Spec: networkingv1beta1.IngressSpec{
TLS: []networkingv1beta1.IngressTLS{
{
Hosts: dnsNames,
SecretName: secretName,
},
},
Rules: []networkingv1beta1.IngressRule{
{
Host: dnsNames[0],
IngressRuleValue: networkingv1beta1.IngressRuleValue{
HTTP: &networkingv1beta1.HTTPIngressRuleValue{
Paths: []networkingv1beta1.HTTPIngressPath{
{
Path: "/",
Backend: networkingv1beta1.IngressBackend{
ServiceName: "somesvc",
ServicePort: intstr.FromInt(80),
},
},
},
},
},
},
},
},
}
}
func pathTypePrefix() *networkingv1.PathType {
p := networkingv1.PathTypePrefix
return &p
}
func NewGateway(gatewayName, ns, secretName string, annotations map[string]string, dnsNames ...string) (*gwapiv1alpha1.Gateway, *gwapiv1alpha1.HTTPRoute) {
var hostnames []gwapiv1alpha1.Hostname
for _, dnsName := range dnsNames {
hostnames = append(hostnames, gwapiv1alpha1.Hostname(dnsName))
}
return &gwapiv1alpha1.Gateway{
ObjectMeta: metav1.ObjectMeta{
Name: gatewayName,
Annotations: annotations,
},
Spec: gwapiv1alpha1.GatewaySpec{
GatewayClassName: "istio",
Listeners: []v1alpha1.Listener{{
Routes: gwapiv1alpha1.RouteBindingSelector{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
"gw": gatewayName,
}},
},
Port: gwapiv1alpha1.PortNumber(80),
Hostname: (*gwapiv1alpha1.Hostname)(&dnsNames[0]),
TLS: &gwapiv1alpha1.GatewayTLSConfig{
CertificateRef: &gwapiv1alpha1.LocalObjectReference{
Name: secretName,
Kind: "Secret",
Group: "core",
},
},
}},
},
},
&gwapiv1alpha1.HTTPRoute{
ObjectMeta: metav1.ObjectMeta{
Name: gatewayName,
Annotations: annotations,
Labels: map[string]string{
"gw": gatewayName,
},
},
Spec: gwapiv1alpha1.HTTPRouteSpec{
Gateways: &gwapiv1alpha1.RouteGateways{
GatewayRefs: []gwapiv1alpha1.GatewayReference{{
Name: gatewayName,
Namespace: ns,
}},
},
Hostnames: hostnames,
Rules: []gwapiv1alpha1.HTTPRouteRule{{
Matches: []gwapiv1alpha1.HTTPRouteMatch{{
Path: &gwapiv1alpha1.HTTPPathMatch{
Type: ptrPathMatch(gwapiv1alpha1.PathMatchExact),
Value: ptrStr("/"),
},
}},
ForwardTo: []gwapiv1alpha1.HTTPRouteForwardTo{{
ServiceName: ptrStr("dummy-service"),
Port: ptrPort(80),
}},
}},
},
}
}
func ptrPathMatch(p gwapiv1alpha1.PathMatchType) *gwapiv1alpha1.PathMatchType {
return &p
}
func ptrStr(s string) *string {
return &s
}
func ptrPort(port int32) *gwapiv1alpha1.PortNumber {
p := gwapiv1alpha1.PortNumber(port)
return &p
}
// HasIngresses lets you know if an API exists in the discovery API
// calling this function always performs a request to the API server.
func HasIngresses(d discovery.DiscoveryInterface, GroupVersion string) bool {
resourceList, err := d.ServerResourcesForGroupVersion(GroupVersion)
if err != nil {
return false
}
for _, r := range resourceList.APIResources {
if r.Kind == "Ingress" {
return true
}
}
return false
}
| 1 | 28,828 |
I suggest we remove all these unused functions. I don't expect any external projects to be importing packages from `cert-manager/test/...`
|
jetstack-cert-manager
|
go
|
@@ -150,15 +150,10 @@ void DataManWriter::MetadataThread(const std::string &address)
while (m_Listening)
{
auto request = tpm.ReceiveRequest();
- if (request == nullptr)
+ if (request && request->size() > 0)
{
- continue;
- }
- if (request->size() >= 0)
- {
- m_AggregatedMetadataMutex.lock();
+ std::lock_guard lck(m_AggregatedMetadataMutex);
tpm.SendReply(m_AggregatedMetadata);
- m_AggregatedMetadataMutex.unlock();
}
}
}
| 1 |
/*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* DataMan.cpp
*
* Created on: Jan 10, 2017
* Author: wfg
*/
#include "DataManWriter.h"
#include "DataManWriter.tcc"
#include <iostream>
#include "adios2/ADIOSMacros.h"
#include "adios2/helper/adiosFunctions.h" //CSVToVector
namespace adios2
{
namespace core
{
namespace engine
{
DataManWriter::DataManWriter(IO &io, const std::string &name, const Mode mode,
MPI_Comm mpiComm)
: DataManCommon("DataManWriter", io, name, mode, mpiComm)
{
m_EndMessage = ", in call to Open DataManWriter\n";
Init();
}
StepStatus DataManWriter::BeginStep(StepMode mode, const float timeout_sec)
{
if (m_Verbosity >= 5)
{
std::cout << "DataManWriter::BeginStep() begin. Last step "
<< m_CurrentStep << std::endl;
}
++m_CurrentStep;
for (size_t i = 0; i < m_Channels; ++i)
{
m_DataManSerializer[i]->New(m_BufferSize);
}
if (m_Verbosity >= 5)
{
std::cout << "DataManWriter::BeginStep() end. Current step "
<< m_CurrentStep << std::endl;
}
return StepStatus::OK;
}
size_t DataManWriter::CurrentStep() const { return m_CurrentStep; }
void DataManWriter::PerformPuts() {}
void DataManWriter::EndStep()
{
for (auto &serializer : m_DataManSerializer)
{
serializer->PutAttributes(m_IO);
}
if (m_CurrentStep == 0)
{
m_DataManSerializer[0]->AggregateMetadata();
m_AggregatedMetadataMutex.lock();
int64_t stepProvided;
m_AggregatedMetadata =
m_DataManSerializer[0]->GetAggregatedMetadataPack(0, stepProvided,
-1);
m_AggregatedMetadataMutex.unlock();
}
if (m_WorkflowMode == "file")
{
const auto buf = m_DataManSerializer[0]->GetLocalPack();
m_FileTransport.Write(buf->data(), buf->size());
}
else if (m_WorkflowMode == "stream")
{
for (size_t i = 0; i < m_Channels; ++i)
{
m_DataManSerializer[i]->AttachAttributes();
const auto buf = m_DataManSerializer[i]->GetLocalPack();
m_BufferSize = buf->size();
m_WANMan->Write(buf, i);
}
}
}
void DataManWriter::Flush(const int transportIndex) {}
// PRIVATE functions below
void DataManWriter::Init()
{
if (m_WorkflowMode == "file")
{
m_FileTransport.Open(m_Name, Mode::Write);
return;
}
// initialize transports
m_WANMan = std::make_shared<transportman::WANMan>(m_MPIComm, m_DebugMode);
m_WANMan->OpenTransports(m_IO.m_TransportsParameters, Mode::Write,
m_WorkflowMode, true);
// initialize serializer
for (size_t i = 0; i < m_Channels; ++i)
{
m_DataManSerializer.push_back(
std::make_shared<format::DataManSerializer>(
m_IsRowMajor, m_ContiguousMajor, m_IsLittleEndian, m_MPIComm));
}
}
#define declare_type(T) \
void DataManWriter::DoPutSync(Variable<T> &variable, const T *values) \
{ \
PutSyncCommon(variable, values); \
} \
void DataManWriter::DoPutDeferred(Variable<T> &variable, const T *values) \
{ \
PutDeferredCommon(variable, values); \
}
ADIOS2_FOREACH_STDTYPE_1ARG(declare_type)
#undef declare_type
void DataManWriter::DoClose(const int transportIndex)
{
if (m_WorkflowMode == "file")
{
m_FileTransport.Close();
return;
}
m_WANMan->Write(format::DataManSerializer::EndSignal(CurrentStep()), 0);
}
void DataManWriter::MetadataThread(const std::string &address)
{
transportman::StagingMan tpm(m_MPIComm, Mode::Write, 0, 1e7);
tpm.OpenTransport(address);
while (m_Listening)
{
auto request = tpm.ReceiveRequest();
if (request == nullptr)
{
continue;
}
if (request->size() >= 0)
{
m_AggregatedMetadataMutex.lock();
tpm.SendReply(m_AggregatedMetadata);
m_AggregatedMetadataMutex.unlock();
}
}
}
} // end namespace engine
} // end namespace core
} // end namespace adios2
| 1 | 12,860 |
Looks like the template argument doesn't resolve by default so you need `std::lock_guard<std::mutex> lck`
|
ornladios-ADIOS2
|
cpp
|
@@ -128,12 +128,6 @@ public final class ArrayCreationLevel extends Node implements NodeWithAnnotation
return this;
}
- @Override
- @Generated("com.github.javaparser.generator.core.node.GetNodeListsGenerator")
- public List<NodeList<?>> getNodeLists() {
- return Arrays.asList(getAnnotations());
- }
-
@Generated("com.github.javaparser.generator.core.node.RemoveMethodGenerator")
public ArrayCreationLevel removeDimension() {
return setDimension((Expression) null);
| 1 |
/*
* Copyright (C) 2007-2010 Júlio Vilmar Gesser.
* Copyright (C) 2011, 2013-2016 The JavaParser Team.
*
* This file is part of JavaParser.
*
* JavaParser can be used either under the terms of
* a) the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* b) the terms of the Apache License
*
* You should have received a copy of both licenses in LICENCE.LGPL and
* LICENCE.APACHE. Please refer to those files for details.
*
* JavaParser is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*/
package com.github.javaparser.ast;
import com.github.javaparser.ast.expr.AnnotationExpr;
import com.github.javaparser.ast.expr.Expression;
import com.github.javaparser.ast.expr.IntegerLiteralExpr;
import com.github.javaparser.ast.nodeTypes.NodeWithAnnotations;
import com.github.javaparser.ast.observer.ObservableProperty;
import com.github.javaparser.ast.visitor.GenericVisitor;
import com.github.javaparser.ast.visitor.VoidVisitor;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import static com.github.javaparser.utils.Utils.assertNotNull;
import com.github.javaparser.ast.Node;
import com.github.javaparser.ast.visitor.CloneVisitor;
import com.github.javaparser.metamodel.ArrayCreationLevelMetaModel;
import com.github.javaparser.metamodel.JavaParserMetaModel;
import javax.annotation.Generated;
import com.github.javaparser.TokenRange;
/**
* In <code>new int[1][2];</code> there are two ArrayCreationLevel objects,
* the first one contains the expression "1",
* the second the expression "2".
*/
public final class ArrayCreationLevel extends Node implements NodeWithAnnotations<ArrayCreationLevel> {
private Expression dimension;
private NodeList<AnnotationExpr> annotations = new NodeList<>();
public ArrayCreationLevel() {
this(null, null, new NodeList<>());
}
public ArrayCreationLevel(int dimension) {
this(null, new IntegerLiteralExpr("" + dimension), new NodeList<>());
}
public ArrayCreationLevel(Expression dimension) {
this(null, dimension, new NodeList<>());
}
@AllFieldsConstructor
public ArrayCreationLevel(Expression dimension, NodeList<AnnotationExpr> annotations) {
this(null, dimension, annotations);
}
/**This constructor is used by the parser and is considered private.*/
@Generated("com.github.javaparser.generator.core.node.MainConstructorGenerator")
public ArrayCreationLevel(TokenRange tokenRange, Expression dimension, NodeList<AnnotationExpr> annotations) {
super(tokenRange);
setDimension(dimension);
setAnnotations(annotations);
customInitialization();
}
@Override
public <R, A> R accept(final GenericVisitor<R, A> v, final A arg) {
return v.visit(this, arg);
}
@Override
public <A> void accept(final VoidVisitor<A> v, final A arg) {
v.visit(this, arg);
}
/**
* Sets the dimension
*
* @param dimension the dimension, can be null
* @return this, the ArrayCreationLevel
*/
@Generated("com.github.javaparser.generator.core.node.PropertyGenerator")
public ArrayCreationLevel setDimension(final Expression dimension) {
if (dimension == this.dimension) {
return (ArrayCreationLevel) this;
}
notifyPropertyChange(ObservableProperty.DIMENSION, this.dimension, dimension);
if (this.dimension != null)
this.dimension.setParentNode(null);
this.dimension = dimension;
setAsParentNodeOf(dimension);
return this;
}
@Generated("com.github.javaparser.generator.core.node.PropertyGenerator")
public Optional<Expression> getDimension() {
return Optional.ofNullable(dimension);
}
@Generated("com.github.javaparser.generator.core.node.PropertyGenerator")
public NodeList<AnnotationExpr> getAnnotations() {
return annotations;
}
@Generated("com.github.javaparser.generator.core.node.PropertyGenerator")
public ArrayCreationLevel setAnnotations(final NodeList<AnnotationExpr> annotations) {
assertNotNull(annotations);
if (annotations == this.annotations) {
return (ArrayCreationLevel) this;
}
notifyPropertyChange(ObservableProperty.ANNOTATIONS, this.annotations, annotations);
if (this.annotations != null)
this.annotations.setParentNode(null);
this.annotations = annotations;
setAsParentNodeOf(annotations);
return this;
}
@Override
@Generated("com.github.javaparser.generator.core.node.GetNodeListsGenerator")
public List<NodeList<?>> getNodeLists() {
return Arrays.asList(getAnnotations());
}
@Generated("com.github.javaparser.generator.core.node.RemoveMethodGenerator")
public ArrayCreationLevel removeDimension() {
return setDimension((Expression) null);
}
@Override
@Generated("com.github.javaparser.generator.core.node.RemoveMethodGenerator")
public boolean remove(Node node) {
if (node == null)
return false;
for (int i = 0; i < annotations.size(); i++) {
if (annotations.get(i) == node) {
annotations.remove(i);
return true;
}
}
if (dimension != null) {
if (node == dimension) {
removeDimension();
return true;
}
}
return super.remove(node);
}
@Override
@Generated("com.github.javaparser.generator.core.node.CloneGenerator")
public ArrayCreationLevel clone() {
return (ArrayCreationLevel) accept(new CloneVisitor(), null);
}
@Override
@Generated("com.github.javaparser.generator.core.node.GetMetaModelGenerator")
public ArrayCreationLevelMetaModel getMetaModel() {
return JavaParserMetaModel.arrayCreationLevelMetaModel;
}
@Override
@Generated("com.github.javaparser.generator.core.node.ReplaceMethodGenerator")
public boolean replace(Node node, Node replacementNode) {
if (node == null)
return false;
for (int i = 0; i < annotations.size(); i++) {
if (annotations.get(i) == node) {
annotations.set(i, (AnnotationExpr) replacementNode);
return true;
}
}
if (dimension != null) {
if (node == dimension) {
setDimension((Expression) replacementNode);
return true;
}
}
return super.replace(node, replacementNode);
}
}
| 1 | 11,517 |
Are we removing this public method? Could it be useful to someone? Maybe we could implement it once in Node using the metamodel
|
javaparser-javaparser
|
java
|
@@ -47,6 +47,10 @@ class Plan < ActiveRecord::Base
self == self.class.popular
end
+ def includes_exercises?
+ sku != THE_WEEKLY_ITERATION_SKU
+ end
+
def subscription_interval
stripe_plan.interval
end
| 1 |
class Plan < ActiveRecord::Base
PRIME_249_SKU = "prime-249"
PROFESSIONAL_SKU = "professional"
THE_WEEKLY_ITERATION_SKU = "the-weekly-iteration"
has_many :checkouts
has_many :subscriptions, as: :plan
belongs_to :annual_plan, class_name: "Plan"
validates :description, presence: true
validates :price, presence: true
validates :name, presence: true
validates :short_description, presence: true
validates :sku, presence: true
include PlanForPublicListing
def self.individual
where includes_team: false
end
def self.team
where includes_team: true
end
def self.active
where active: true
end
def self.default
individual.active.featured.ordered.first
end
def self.default_team
team.active.featured.ordered.first
end
def self.basic
where(sku: THE_WEEKLY_ITERATION_SKU).first
end
def self.popular
where(sku: PROFESSIONAL_SKU).first
end
def popular?
self == self.class.popular
end
def subscription_interval
stripe_plan.interval
end
def fulfill(checkout, user)
user.create_subscription(
plan: self,
stripe_id: checkout.stripe_subscription_id
)
SubscriptionFulfillment.new(user, self).fulfill
if includes_team?
TeamFulfillment.new(checkout, user).fulfill
end
end
def included_in_plan?(plan)
false
end
def has_annual_plan?
annual_plan.present?
end
def has_feature?(feature)
public_send("includes_#{feature}?")
end
def annualized_payment
12 * price
end
def discounted_annual_payment
annual_plan.price
end
def annual_plan_sku
annual_plan.sku
end
private
def stripe_plan
@stripe_plan ||= Stripe::Plan.retrieve(sku)
end
end
| 1 | 14,034 |
I think this method is unused now?
|
thoughtbot-upcase
|
rb
|
@@ -115,6 +115,11 @@ func run(o *Options) error {
appliedToGroupStore,
networkPolicyStore)
+ var networkPolicyStatusController *networkpolicy.StatusController
+ if features.DefaultFeatureGate.Enabled(features.AntreaPolicy) {
+ networkPolicyStatusController = networkpolicy.NewStatusController(crdClient, networkPolicyStore, cnpInformer, anpInformer)
+ }
+
endpointQuerier := networkpolicy.NewEndpointQuerier(networkPolicyController)
controllerQuerier := querier.NewControllerQuerier(networkPolicyController, o.config.APIPort)
| 1 |
// Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"io/ioutil"
"net"
"os"
"path"
"time"
genericopenapi "k8s.io/apiserver/pkg/endpoints/openapi"
genericapiserver "k8s.io/apiserver/pkg/server"
genericoptions "k8s.io/apiserver/pkg/server/options"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
aggregatorclientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"github.com/vmware-tanzu/antrea/pkg/apiserver"
"github.com/vmware-tanzu/antrea/pkg/apiserver/certificate"
"github.com/vmware-tanzu/antrea/pkg/apiserver/openapi"
"github.com/vmware-tanzu/antrea/pkg/apiserver/storage"
crdinformers "github.com/vmware-tanzu/antrea/pkg/client/informers/externalversions"
"github.com/vmware-tanzu/antrea/pkg/controller/metrics"
"github.com/vmware-tanzu/antrea/pkg/controller/networkpolicy"
"github.com/vmware-tanzu/antrea/pkg/controller/networkpolicy/store"
"github.com/vmware-tanzu/antrea/pkg/controller/querier"
"github.com/vmware-tanzu/antrea/pkg/controller/stats"
"github.com/vmware-tanzu/antrea/pkg/controller/traceflow"
"github.com/vmware-tanzu/antrea/pkg/features"
"github.com/vmware-tanzu/antrea/pkg/k8s"
"github.com/vmware-tanzu/antrea/pkg/log"
"github.com/vmware-tanzu/antrea/pkg/monitor"
"github.com/vmware-tanzu/antrea/pkg/signals"
"github.com/vmware-tanzu/antrea/pkg/version"
)
const (
// informerDefaultResync is the default resync period if a handler doesn't specify one.
// Use the same default value as kube-controller-manager:
// https://github.com/kubernetes/kubernetes/blob/release-1.17/pkg/controller/apis/config/v1alpha1/defaults.go#L120
informerDefaultResync = 12 * time.Hour
// serverMinWatchTimeout determines the timeout allocated to watches from Antrea
// clients. Each watch will be allocated a random timeout between this value and twice this
// value, to help randomly distribute reconnections over time.
// This parameter corresponds to the MinRequestTimeout server config parameter in
// https://godoc.org/k8s.io/apiserver/pkg/server#Config.
// When the Antrea client re-creates a watch, all relevant NetworkPolicy objects need to be
// sent again by the controller. It may be a good idea to use a value which is larger than
// the kube-apiserver default (1800s). The K8s documentation states that clients should be
// able to handle watch timeouts gracefully but recommends using a large value in
// production.
serverMinWatchTimeout = 2 * time.Hour
)
var allowedPaths = []string{
"/healthz",
"/validate/tier",
"/validate/acnp",
"/validate/anp",
}
// run starts Antrea Controller with the given options and waits for termination signal.
func run(o *Options) error {
klog.Infof("Starting Antrea Controller (version %s)", version.GetFullVersion())
// Create K8s Clientset, Aggregator Clientset, CRD Clientset and SharedInformerFactory for the given config.
// Aggregator Clientset is used to update the CABundle of the APIServices backed by antrea-controller so that
// the aggregator can verify its serving certificate.
client, aggregatorClient, crdClient, err := k8s.CreateClients(o.config.ClientConnection)
if err != nil {
return fmt.Errorf("error creating K8s clients: %v", err)
}
informerFactory := informers.NewSharedInformerFactory(client, informerDefaultResync)
crdInformerFactory := crdinformers.NewSharedInformerFactory(crdClient, informerDefaultResync)
podInformer := informerFactory.Core().V1().Pods()
namespaceInformer := informerFactory.Core().V1().Namespaces()
networkPolicyInformer := informerFactory.Networking().V1().NetworkPolicies()
nodeInformer := informerFactory.Core().V1().Nodes()
cnpInformer := crdInformerFactory.Security().V1alpha1().ClusterNetworkPolicies()
externalEntityInformer := crdInformerFactory.Core().V1alpha2().ExternalEntities()
anpInformer := crdInformerFactory.Security().V1alpha1().NetworkPolicies()
tierInformer := crdInformerFactory.Security().V1alpha1().Tiers()
traceflowInformer := crdInformerFactory.Ops().V1alpha1().Traceflows()
// Create Antrea object storage.
addressGroupStore := store.NewAddressGroupStore()
appliedToGroupStore := store.NewAppliedToGroupStore()
networkPolicyStore := store.NewNetworkPolicyStore()
networkPolicyController := networkpolicy.NewNetworkPolicyController(client,
crdClient,
podInformer,
namespaceInformer,
externalEntityInformer,
networkPolicyInformer,
cnpInformer,
anpInformer,
tierInformer,
addressGroupStore,
appliedToGroupStore,
networkPolicyStore)
endpointQuerier := networkpolicy.NewEndpointQuerier(networkPolicyController)
controllerQuerier := querier.NewControllerQuerier(networkPolicyController, o.config.APIPort)
controllerMonitor := monitor.NewControllerMonitor(crdClient, nodeInformer, controllerQuerier)
var traceflowController *traceflow.Controller
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
traceflowController = traceflow.NewTraceflowController(crdClient, podInformer, traceflowInformer)
}
// statsAggregator takes stats summaries from antrea-agents, aggregates them, and serves the Stats APIs with the
// aggregated data. For now it's only used for NetworkPolicy stats.
var statsAggregator *stats.Aggregator
if features.DefaultFeatureGate.Enabled(features.NetworkPolicyStats) {
statsAggregator = stats.NewAggregator(networkPolicyInformer, cnpInformer, anpInformer)
}
apiServerConfig, err := createAPIServerConfig(o.config.ClientConnection.Kubeconfig,
client,
aggregatorClient,
o.config.SelfSignedCert,
o.config.APIPort,
addressGroupStore,
appliedToGroupStore,
networkPolicyStore,
controllerQuerier,
endpointQuerier,
networkPolicyController,
statsAggregator,
o.config.EnablePrometheusMetrics)
if err != nil {
return fmt.Errorf("error creating API server config: %v", err)
}
apiServer, err := apiServerConfig.Complete(informerFactory).New()
if err != nil {
return fmt.Errorf("error creating API server: %v", err)
}
err = apiserver.CleanupDeprecatedAPIServices(aggregatorClient)
if err != nil {
return fmt.Errorf("failed to clean up the deprecated APIServices: %v", err)
}
// Set up signal capture: the first SIGTERM / SIGINT signal is handled gracefully and will
// cause the stopCh channel to be closed; if another signal is received before the program
// exits, we will force exit.
stopCh := signals.RegisterSignalHandlers()
log.StartLogFileNumberMonitor(stopCh)
informerFactory.Start(stopCh)
crdInformerFactory.Start(stopCh)
go controllerMonitor.Run(stopCh)
go networkPolicyController.Run(stopCh)
go apiServer.Run(stopCh)
if features.DefaultFeatureGate.Enabled(features.NetworkPolicyStats) {
go statsAggregator.Run(stopCh)
}
if o.config.EnablePrometheusMetrics {
metrics.InitializePrometheusMetrics()
}
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
go traceflowController.Run(stopCh)
}
<-stopCh
klog.Info("Stopping Antrea controller")
return nil
}
func createAPIServerConfig(kubeconfig string,
client clientset.Interface,
aggregatorClient aggregatorclientset.Interface,
selfSignedCert bool,
bindPort int,
addressGroupStore storage.Interface,
appliedToGroupStore storage.Interface,
networkPolicyStore storage.Interface,
controllerQuerier querier.ControllerQuerier,
endpointQuerier networkpolicy.EndpointQuerier,
npController *networkpolicy.NetworkPolicyController,
statsAggregator *stats.Aggregator,
enableMetrics bool) (*apiserver.Config, error) {
secureServing := genericoptions.NewSecureServingOptions().WithLoopback()
authentication := genericoptions.NewDelegatingAuthenticationOptions()
authorization := genericoptions.NewDelegatingAuthorizationOptions().WithAlwaysAllowPaths(allowedPaths...)
caCertController, err := certificate.ApplyServerCert(selfSignedCert, client, aggregatorClient, secureServing)
if err != nil {
return nil, fmt.Errorf("error applying server cert: %v", err)
}
secureServing.BindPort = bindPort
secureServing.BindAddress = net.ParseIP("0.0.0.0")
// kubeconfig file is useful when antrea-controller isn't not running as a pod, like during development.
if len(kubeconfig) > 0 {
authentication.RemoteKubeConfigFile = kubeconfig
authorization.RemoteKubeConfigFile = kubeconfig
}
serverConfig := genericapiserver.NewConfig(apiserver.Codecs)
if err := secureServing.ApplyTo(&serverConfig.SecureServing, &serverConfig.LoopbackClientConfig); err != nil {
return nil, err
}
if err := authentication.ApplyTo(&serverConfig.Authentication, serverConfig.SecureServing, nil); err != nil {
return nil, err
}
if err := authorization.ApplyTo(&serverConfig.Authorization); err != nil {
return nil, err
}
if err := os.MkdirAll(path.Dir(apiserver.TokenPath), os.ModeDir); err != nil {
return nil, fmt.Errorf("error when creating dirs of token file: %v", err)
}
if err := ioutil.WriteFile(apiserver.TokenPath, []byte(serverConfig.LoopbackClientConfig.BearerToken), 0600); err != nil {
return nil, fmt.Errorf("error when writing loopback access token to file: %v", err)
}
serverConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(
openapi.GetOpenAPIDefinitions,
genericopenapi.NewDefinitionNamer(apiserver.Scheme))
serverConfig.OpenAPIConfig.Info.Title = "Antrea"
serverConfig.EnableMetrics = enableMetrics
serverConfig.MinRequestTimeout = int(serverMinWatchTimeout.Seconds())
return apiserver.NewConfig(
serverConfig,
addressGroupStore,
appliedToGroupStore,
networkPolicyStore,
caCertController,
statsAggregator,
controllerQuerier,
endpointQuerier,
npController), nil
}
| 1 | 26,985 |
I know we discussed this before and decided to reuse the AntreaPolicy gate. But do you think realization status will introduce much overhead or not?
|
antrea-io-antrea
|
go
|
@@ -0,0 +1,11 @@
+package constants
+
+const (
+ RunController = "cd ${GOPATH}/src/github.com/kubeedge/kubeedge/cloud/; sudo nohup ./edgecontroller > edgecontroller.log 2>&1 &"
+ RunEdgecore = "cd ${GOPATH}/src/github.com/kubeedge/kubeedge/edge/; sudo nohup ./edge_core > edge_core.log 2>&1 &"
+ RunEdgeSite = "cd ${GOPATH}/src/github.com/kubeedge/kubeedge/edgesite/; sudo nohup ./edgesite > edgesite.log 2>&1 &"
+
+ AppHandler = "/api/v1/namespaces/default/pods"
+ NodeHandler = "/api/v1/nodes"
+ DeploymentHandler = "/apis/apps/v1/namespaces/default/deployments"
+)
| 1 | 1 | 11,398 |
please add a blank line at the end of this file. Applicable to similar cases below
|
kubeedge-kubeedge
|
go
|
|
@@ -203,6 +203,9 @@ public class RegistrationRequest {
if (pendingConfiguration.port != null) {
pendingRequest.configuration.port = pendingConfiguration.port;
}
+ if (pendingConfiguration.remoteHost != null) {
+ pendingRequest.configuration.remoteHost = pendingConfiguration.remoteHost;
+ }
// make sure we have a valid host
pendingRequest.configuration.fixUpHost();
| 1 |
// Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.grid.common;
import org.openqa.grid.common.exception.GridConfigurationException;
import org.openqa.grid.internal.utils.configuration.GridNodeConfiguration;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.json.JsonException;
import java.util.Map;
import java.util.TreeMap;
/**
* Helper to register to the grid. Using JSON to exchange the object between the node and the hub.
*/
public class RegistrationRequest {
// some special param for capability
public static final String MAX_INSTANCES = "maxInstances";
// see enum SeleniumProtocol
public static final String SELENIUM_PROTOCOL = "seleniumProtocol";
public static final String PATH = "path";
private String name;
private String description;
private GridNodeConfiguration configuration;
/**
* Create a new registration request using the default values of a
* {@link GridNodeConfiguration}
*/
public RegistrationRequest() {
this(new GridNodeConfiguration());
}
/**
* Create a new registration request using the supplied {@link GridNodeConfiguration}
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
*/
public RegistrationRequest(GridNodeConfiguration configuration) {
this(configuration, null, null);
}
/**
* Create a new registration request using the supplied {@link GridNodeConfiguration}, and name
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
* @param name the name for the remote
*/
public RegistrationRequest(GridNodeConfiguration configuration, String name) {
this(configuration, name, null);
}
/**
* Create a new registration request using the supplied {@link GridNodeConfiguration}, name, and
* description
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
* @param name the name for the remote
* @param description the description for the remote host
*/
public RegistrationRequest(GridNodeConfiguration configuration, String name, String description) {
this.configuration = (configuration == null) ? new GridNodeConfiguration() : configuration;
this.name = name;
this.description = description;
// make sure we have something that looks like a valid host
this.configuration.fixUpHost();
// make sure the capabilities are updated with required fields
this.configuration.fixUpCapabilities();
}
public String getName() {
return name;
}
public String getDescription() {
return description;
}
public GridNodeConfiguration getConfiguration() {
return configuration;
}
public Map<String, Object> toJson() {
Map<String, Object> json = new TreeMap<>();
json.put("class", getClass());
json.put("name", getName());
json.put("description", getDescription());
json.put("configuration", getConfiguration());
return json;
}
/**
* Create an object from a registration request formatted as a json string.
*/
public static RegistrationRequest fromJson(Map<String, Object> raw) throws JsonException {
// If we could, we'd just get Json to coerce this for us, but that would lead to endless
// recursion as the first thing it would do would be to call this very method. *sigh*
Json json = new Json();
RegistrationRequest request = new RegistrationRequest();
if (raw.get("name") instanceof String) {
request.name = (String) raw.get("name");
}
if (raw.get("description") instanceof String) {
request.description = (String) raw.get("description");
}
if (raw.get("configuration") instanceof Map) {
// This is nasty. Look away now!
String converted = json.toJson(raw.get("configuration"));
request.configuration = GridConfiguredJson.toType(converted, GridNodeConfiguration.class);
}
return request;
}
/**
* Build a RegistrationRequest.
*/
public static RegistrationRequest build() {
return RegistrationRequest.build(new GridNodeConfiguration(), null, null);
}
/**
* Build a RegistrationRequest from the provided {@link GridNodeConfiguration}. This is different
* than {@code new RegistrationRequest(GridNodeConfiguration)} because it will first load any
* specified {@link GridNodeConfiguration#nodeConfigFile} and then merge the provided
* configuration onto it.
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
*/
public static RegistrationRequest build(GridNodeConfiguration configuration) {
return RegistrationRequest.build(configuration, null, null);
}
/**
* Build a RegistrationRequest from the provided {@link GridNodeConfiguration}, use the provided
* name. This is different than {@code new RegistrationRequest(GridNodeConfiguration, String)}
* because it will first load any specified {@link GridNodeConfiguration#nodeConfigFile} and then
* merge the provided configuration onto it.
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
* @param name the name for the remote
*/
public static RegistrationRequest build(GridNodeConfiguration configuration, String name) {
return RegistrationRequest.build(configuration, name, null);
}
/**
* Build a RegistrationRequest from the provided {@link GridNodeConfiguration}, use the provided
* name and description. This is different than {@code new RegistrationRequest(GridNodeConfiguration,
* String, String)} because it will first load any specified {@link
* GridNodeConfiguration#nodeConfigFile} and then merge the provided configuration onto it.
*
* @param configuration the {@link GridNodeConfiguration} to use. Internally calls {@code new
* GridNodeConfiguration()} if a {@code null} value is provided since a
* request without configuration is not valid.
* @param name the name for the remote
* @param description the description for the remote host
*/
public static RegistrationRequest build(GridNodeConfiguration configuration, String name, String description) {
RegistrationRequest pendingRequest = new RegistrationRequest(configuration, name, description);
GridNodeConfiguration pendingConfiguration = pendingRequest.configuration;
if (pendingConfiguration.nodeConfigFile != null) {
pendingRequest.configuration = GridNodeConfiguration.loadFromJSON(pendingConfiguration.nodeConfigFile);
}
pendingRequest.configuration.merge(pendingConfiguration);
//update important merge protected values for the pendingRequest we are building.
if (pendingConfiguration.host != null) {
pendingRequest.configuration.host = pendingConfiguration.host;
}
if (pendingConfiguration.port != null) {
pendingRequest.configuration.port = pendingConfiguration.port;
}
// make sure we have a valid host
pendingRequest.configuration.fixUpHost();
// make sure the capabilities are updated with required fields
pendingRequest.configuration.fixUpCapabilities();
pendingRequest.configuration.dropCapabilitiesThatDoesNotMatchCurrentPlatform();
return pendingRequest;
}
/**
* Validate the current setting and throw a config exception is an invalid setup is detected.
*
* @throws GridConfigurationException grid configuration
*/
public void validate() throws GridConfigurationException {
// validations occur here in the getters called on the configuration.
try {
configuration.getHubHost();
configuration.getHubPort();
} catch (RuntimeException e) {
throw new GridConfigurationException(e.getMessage());
}
}
}
| 1 | 15,979 |
This is not really needed, the line added in `GridNodeConfiguration.java` is what really fixes the `remoteHost` regression.
|
SeleniumHQ-selenium
|
java
|
@@ -588,6 +588,10 @@ class JoinQuery extends Query {
// use the filterCache to get a DocSet
if (toTermsEnum.docFreq() >= minDocFreqTo || resultBits == null) {
// use filter cache
+ if (! toSearcher.getFilterCache().isAsync()) {
+ throw new SolrException(SolrException.ErrorCode.INVALID_STATE,
+ "Using join queries with synchronous filterCache is not supported! Details can be found in Solr Reference Guide under 'query-settings-in-solrconfig'.");
+ }
DocSet toTermSet = toSearcher.getDocSet(toDeState);
resultListDocs += toTermSet.size();
if (resultBits != null) {
| 1 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.search;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiPostingsEnum;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryVisitor;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.StringHelper;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.component.ResponseBuilder;
import org.apache.solr.request.LocalSolrQueryRequest;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.request.SolrRequestInfo;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.schema.TrieField;
import org.apache.solr.search.join.CrossCollectionJoinQParser;
import org.apache.solr.search.join.GraphPointsCollector;
import org.apache.solr.search.join.ScoreJoinQParserPlugin;
import org.apache.solr.util.RTimer;
import org.apache.solr.util.RefCounted;
public class JoinQParserPlugin extends QParserPlugin {
public static final String NAME = "join";
/** Choose the internal algorithm */
private static final String METHOD = "method";
private String routerField;
private Set<String> allowSolrUrls;
private static class JoinParams {
final String fromField;
final String fromCore;
final Query fromQuery;
final long fromCoreOpenTime;
final String toField;
public JoinParams(String fromField, String fromCore, Query fromQuery, long fromCoreOpenTime, String toField) {
this.fromField = fromField;
this.fromCore = fromCore;
this.fromQuery = fromQuery;
this.fromCoreOpenTime = fromCoreOpenTime;
this.toField = toField;
}
}
private enum Method {
index {
@Override
Query makeFilter(QParser qparser, JoinQParserPlugin plugin) throws SyntaxError {
final JoinParams jParams = parseJoin(qparser);
final JoinQuery q = new JoinQuery(jParams.fromField, jParams.toField, jParams.fromCore, jParams.fromQuery);
q.fromCoreOpenTime = jParams.fromCoreOpenTime;
return q;
}
@Override
Query makeJoinDirectFromParams(JoinParams jParams) {
return new JoinQuery(jParams.fromField, jParams.toField, null, jParams.fromQuery);
}
},
dvWithScore {
@Override
Query makeFilter(QParser qparser, JoinQParserPlugin plugin) throws SyntaxError {
return new ScoreJoinQParserPlugin().createParser(qparser.qstr, qparser.localParams, qparser.params, qparser.req).parse();
}
@Override
Query makeJoinDirectFromParams(JoinParams jParams) {
return ScoreJoinQParserPlugin.createJoinQuery(jParams.fromQuery, jParams.fromField, jParams.toField, org.apache.lucene.search.join.ScoreMode.None);
}
},
topLevelDV {
@Override
Query makeFilter(QParser qparser, JoinQParserPlugin plugin) throws SyntaxError {
final JoinParams jParams = parseJoin(qparser);
final JoinQuery q = createTopLevelJoin(jParams);
q.fromCoreOpenTime = jParams.fromCoreOpenTime;
return q;
}
@Override
Query makeJoinDirectFromParams(JoinParams jParams) {
return new TopLevelJoinQuery(jParams.fromField, jParams.toField, null, jParams.fromQuery);
}
private JoinQuery createTopLevelJoin(JoinParams jParams) {
if (isSelfJoin(jParams)) {
return new TopLevelJoinQuery.SelfJoin(jParams.fromField, jParams.fromQuery);
}
return new TopLevelJoinQuery(jParams.fromField, jParams.toField, jParams.fromCore, jParams.fromQuery);
}
private boolean isSelfJoin(JoinParams jparams) {
return jparams.fromCore == null &&
(jparams.fromField != null && jparams.fromField.equals(jparams.toField));
}
},
crossCollection {
@Override
Query makeFilter(QParser qparser, JoinQParserPlugin plugin) throws SyntaxError {
return new CrossCollectionJoinQParser(qparser.qstr, qparser.localParams, qparser.params, qparser.req,
plugin.routerField, plugin.allowSolrUrls).parse();
}
};
abstract Query makeFilter(QParser qparser, JoinQParserPlugin plugin) throws SyntaxError;
Query makeJoinDirectFromParams(JoinParams jParams) {
throw new IllegalStateException("Join method [" + name() + "] doesn't support qparser-less creation");
}
JoinParams parseJoin(QParser qparser) throws SyntaxError {
final String fromField = qparser.getParam("from");
final String fromIndex = qparser.getParam("fromIndex");
final String toField = qparser.getParam("to");
final String v = qparser.localParams.get(QueryParsing.V);
final String coreName;
Query fromQuery;
long fromCoreOpenTime = 0;
if (fromIndex != null && !fromIndex.equals(qparser.req.getCore().getCoreDescriptor().getName()) ) {
CoreContainer container = qparser.req.getCore().getCoreContainer();
// if in SolrCloud mode, fromIndex should be the name of a single-sharded collection
coreName = ScoreJoinQParserPlugin.getCoreName(fromIndex, container);
final SolrCore fromCore = container.getCore(coreName);
if (fromCore == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Cross-core join: no such core " + coreName);
}
RefCounted<SolrIndexSearcher> fromHolder = null;
LocalSolrQueryRequest otherReq = new LocalSolrQueryRequest(fromCore, qparser.params);
try {
QParser parser = QParser.getParser(v, otherReq);
fromQuery = parser.getQuery();
fromHolder = fromCore.getRegisteredSearcher();
if (fromHolder != null) fromCoreOpenTime = fromHolder.get().getOpenNanoTime();
} finally {
otherReq.close();
fromCore.close();
if (fromHolder != null) fromHolder.decref();
}
} else {
coreName = null;
QParser fromQueryParser = qparser.subQuery(v, null);
fromQueryParser.setIsFilter(true);
fromQuery = fromQueryParser.getQuery();
}
final String indexToUse = coreName == null ? fromIndex : coreName;
return new JoinParams(fromField, indexToUse, fromQuery, fromCoreOpenTime, toField);
}
}
@Override
@SuppressWarnings({"unchecked"})
public void init(@SuppressWarnings({"rawtypes"}) NamedList args) {
routerField = (String) args.get("routerField");
if (args.get("allowSolrUrls") != null) {
allowSolrUrls = new HashSet<>();
allowSolrUrls.addAll((List<String>) args.get("allowSolrUrls"));
} else {
allowSolrUrls = null;
}
}
@Override
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
final JoinQParserPlugin plugin = this;
return new QParser(qstr, localParams, params, req) {
@Override
public Query parse() throws SyntaxError {
if (localParams != null && localParams.get(METHOD) != null) {
// TODO Make sure 'method' is valid value here and give users a nice error
final Method explicitMethod = Method.valueOf(localParams.get(METHOD));
return explicitMethod.makeFilter(this, plugin);
}
// Legacy join behavior before introduction of SOLR-13892
if(localParams!=null && localParams.get(ScoreJoinQParserPlugin.SCORE)!=null) {
return new ScoreJoinQParserPlugin().createParser(qstr, localParams, params, req).parse();
} else {
return Method.index.makeFilter(this, plugin);
}
}
};
}
private static final EnumSet<Method> JOIN_METHOD_WHITELIST = EnumSet.of(Method.index, Method.topLevelDV, Method.dvWithScore);
/**
* A helper method for other plugins to create (non-scoring) JoinQueries wrapped around arbitrary queries against the same core.
*
* @param subQuery the query to define the starting set of documents on the "left side" of the join
* @param fromField "left side" field name to use in the join
* @param toField "right side" field name to use in the join
* @param method indicates which implementation should be used to process the join. Currently only 'index',
* 'dvWithScore', and 'topLevelDV' are supported.
*/
public static Query createJoinQuery(Query subQuery, String fromField, String toField, String method) {
// no method defaults to 'index' for back compatibility
if ( method == null ) {
return new JoinQuery(fromField, toField, null, subQuery);
}
final Method joinMethod = parseMethodString(method);
if (! JOIN_METHOD_WHITELIST.contains(joinMethod)) {
// TODO Throw something that the callers here (FacetRequest) can catch and produce a more domain-appropriate error message for?
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Join method " + method + " not supported for non-scoring, same-core joins");
}
final JoinParams jParams = new JoinParams(fromField, null, subQuery, 0L, toField);
return joinMethod.makeJoinDirectFromParams(jParams);
}
private static Method parseMethodString(String method) {
try {
return Method.valueOf(method);
} catch (IllegalArgumentException iae) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Provided join method '" + method + "' not supported");
}
}
}
class JoinQuery extends Query {
String fromField;
String toField;
String fromIndex; // TODO: name is missleading here compared to JoinQParserPlugin usage - here it must be a core name
Query q;
long fromCoreOpenTime;
public JoinQuery(String fromField, String toField, String coreName, Query subQuery) {
assert null != fromField;
assert null != toField;
assert null != subQuery;
this.fromField = fromField;
this.toField = toField;
this.q = subQuery;
this.fromIndex = coreName; // may be null
}
public Query getQuery() { return q; }
@Override
public Query rewrite(IndexReader reader) throws IOException {
// don't rewrite the subQuery
return super.rewrite(reader);
}
@Override
public void visit(QueryVisitor visitor) {
}
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
return new JoinQueryWeight((SolrIndexSearcher) searcher, scoreMode, boost);
}
protected class JoinQueryWeight extends ConstantScoreWeight {
SolrIndexSearcher fromSearcher;
RefCounted<SolrIndexSearcher> fromRef;
SolrIndexSearcher toSearcher;
ResponseBuilder rb;
ScoreMode scoreMode;
public JoinQueryWeight(SolrIndexSearcher searcher, ScoreMode scoreMode, float boost) {
super(JoinQuery.this, boost);
this.scoreMode = scoreMode;
this.fromSearcher = searcher;
SolrRequestInfo info = SolrRequestInfo.getRequestInfo();
if (info != null) {
rb = info.getResponseBuilder();
}
if (fromIndex == null) {
this.fromSearcher = searcher;
} else {
if (info == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cross-core join must have SolrRequestInfo");
}
CoreContainer container = searcher.getCore().getCoreContainer();
final SolrCore fromCore = container.getCore(fromIndex);
if (fromCore == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cross-core join: no such core " + fromIndex);
}
if (info.getReq().getCore() == fromCore) {
// if this is the same core, use the searcher passed in... otherwise we could be warming and
// get an older searcher from the core.
fromSearcher = searcher;
} else {
// This could block if there is a static warming query with a join in it, and if useColdSearcher is true.
// Deadlock could result if two cores both had useColdSearcher and had joins that used eachother.
// This would be very predictable though (should happen every time if misconfigured)
fromRef = fromCore.getSearcher(false, true, null);
// be careful not to do anything with this searcher that requires the thread local
// SolrRequestInfo in a manner that requires the core in the request to match
fromSearcher = fromRef.get();
}
if (fromRef != null) {
final RefCounted<SolrIndexSearcher> ref = fromRef;
info.addCloseHook(ref::decref);
}
info.addCloseHook(fromCore);
}
this.toSearcher = searcher;
}
DocSet resultSet;
Filter filter;
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
if (filter == null) {
boolean debug = rb != null && rb.isDebug();
RTimer timer = (debug ? new RTimer() : null);
resultSet = getDocSet();
if (timer != null) timer.stop();
if (debug) {
SimpleOrderedMap<Object> dbg = new SimpleOrderedMap<>();
dbg.add("time", (long) timer.getTime());
dbg.add("fromSetSize", fromSetSize); // the input
dbg.add("toSetSize", resultSet.size()); // the output
dbg.add("fromTermCount", fromTermCount);
dbg.add("fromTermTotalDf", fromTermTotalDf);
dbg.add("fromTermDirectCount", fromTermDirectCount);
dbg.add("fromTermHits", fromTermHits);
dbg.add("fromTermHitsTotalDf", fromTermHitsTotalDf);
dbg.add("toTermHits", toTermHits);
dbg.add("toTermHitsTotalDf", toTermHitsTotalDf);
dbg.add("toTermDirectCount", toTermDirectCount);
dbg.add("smallSetsDeferred", smallSetsDeferred);
dbg.add("toSetDocsAdded", resultListDocs);
// TODO: perhaps synchronize addDebug in the future...
rb.addDebug(dbg, "join", JoinQuery.this.toString());
}
filter = resultSet.getTopFilter();
}
// Although this set only includes live docs, other filters can be pushed down to queries.
DocIdSet readerSet = filter.getDocIdSet(context, null);
if (readerSet == null) {
return null;
}
DocIdSetIterator readerSetIterator = readerSet.iterator();
if (readerSetIterator == null) {
return null;
}
return new ConstantScoreScorer(this, score(), scoreMode, readerSetIterator);
}
@Override
public boolean isCacheable(LeafReaderContext ctx) {
return false;
}
// most of these statistics are only used for the enum method
int fromSetSize; // number of docs in the fromSet (that match the from query)
long resultListDocs; // total number of docs collected
int fromTermCount;
long fromTermTotalDf;
int fromTermDirectCount; // number of fromTerms that were too small to use the filter cache
int fromTermHits; // number of fromTerms that intersected the from query
long fromTermHitsTotalDf; // sum of the df of the matching terms
int toTermHits; // num if intersecting from terms that match a term in the to field
long toTermHitsTotalDf; // sum of the df for the toTermHits
int toTermDirectCount; // number of toTerms that we set directly on a bitset rather than doing set intersections
int smallSetsDeferred; // number of small sets collected to be used later to intersect w/ bitset or create another small set
public DocSet getDocSet() throws IOException {
SchemaField fromSchemaField = fromSearcher.getSchema().getField(fromField);
SchemaField toSchemaField = toSearcher.getSchema().getField(toField);
boolean usePoints = false;
if (toSchemaField.getType().isPointField()) {
if (!fromSchemaField.hasDocValues()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "join from field " + fromSchemaField + " should have docValues to join with points field " + toSchemaField);
}
usePoints = true;
}
if (!usePoints) {
return getDocSetEnumerate();
}
// point fields
GraphPointsCollector collector = new GraphPointsCollector(fromSchemaField, null, null);
fromSearcher.search(q, collector);
Query resultQ = collector.getResultQuery(toSchemaField, false);
// don't cache the resulting docSet... the query may be very large. Better to cache the results of the join query itself
DocSet result = resultQ==null ? DocSet.EMPTY : toSearcher.getDocSetNC(resultQ, null);
return result;
}
public DocSet getDocSetEnumerate() throws IOException {
FixedBitSet resultBits = null;
// minimum docFreq to use the cache
int minDocFreqFrom = Math.max(5, fromSearcher.maxDoc() >> 13);
int minDocFreqTo = Math.max(5, toSearcher.maxDoc() >> 13);
// use a smaller size than normal since we will need to sort and dedup the results
int maxSortedIntSize = Math.max(10, toSearcher.maxDoc() >> 10);
DocSet fromSet = fromSearcher.getDocSet(q);
fromSetSize = fromSet.size();
List<DocSet> resultList = new ArrayList<>(10);
// make sure we have a set that is fast for random access, if we will use it for that
DocSet fastForRandomSet = fromSet;
if (minDocFreqFrom>0 && fromSet instanceof SortedIntDocSet) {
SortedIntDocSet sset = (SortedIntDocSet)fromSet;
fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size());
}
LeafReader fromReader = fromSearcher.getSlowAtomicReader();
LeafReader toReader = fromSearcher==toSearcher ? fromReader : toSearcher.getSlowAtomicReader();
Terms terms = fromReader.terms(fromField);
Terms toTerms = toReader.terms(toField);
if (terms == null || toTerms==null) return DocSet.EMPTY;
String prefixStr = TrieField.getMainValuePrefix(fromSearcher.getSchema().getFieldType(fromField));
BytesRef prefix = prefixStr == null ? null : new BytesRef(prefixStr);
BytesRef term = null;
TermsEnum termsEnum = terms.iterator();
TermsEnum toTermsEnum = toTerms.iterator();
SolrIndexSearcher.DocsEnumState fromDeState = null;
SolrIndexSearcher.DocsEnumState toDeState = null;
if (prefix == null) {
term = termsEnum.next();
} else {
if (termsEnum.seekCeil(prefix) != TermsEnum.SeekStatus.END) {
term = termsEnum.term();
}
}
Bits fromLiveDocs = fromSearcher.getLiveDocsBits();
Bits toLiveDocs = fromSearcher == toSearcher ? fromLiveDocs : toSearcher.getLiveDocsBits();
fromDeState = new SolrIndexSearcher.DocsEnumState();
fromDeState.fieldName = fromField;
fromDeState.liveDocs = fromLiveDocs;
fromDeState.termsEnum = termsEnum;
fromDeState.postingsEnum = null;
fromDeState.minSetSizeCached = minDocFreqFrom;
toDeState = new SolrIndexSearcher.DocsEnumState();
toDeState.fieldName = toField;
toDeState.liveDocs = toLiveDocs;
toDeState.termsEnum = toTermsEnum;
toDeState.postingsEnum = null;
toDeState.minSetSizeCached = minDocFreqTo;
while (term != null) {
if (prefix != null && !StringHelper.startsWith(term, prefix))
break;
fromTermCount++;
boolean intersects = false;
int freq = termsEnum.docFreq();
fromTermTotalDf++;
if (freq < minDocFreqFrom) {
fromTermDirectCount++;
// OK to skip liveDocs, since we check for intersection with docs matching query
fromDeState.postingsEnum = fromDeState.termsEnum.postings(fromDeState.postingsEnum, PostingsEnum.NONE);
PostingsEnum postingsEnum = fromDeState.postingsEnum;
if (postingsEnum instanceof MultiPostingsEnum) {
MultiPostingsEnum.EnumWithSlice[] subs = ((MultiPostingsEnum) postingsEnum).getSubs();
int numSubs = ((MultiPostingsEnum) postingsEnum).getNumSubs();
outer: for (int subindex = 0; subindex<numSubs; subindex++) {
MultiPostingsEnum.EnumWithSlice sub = subs[subindex];
if (sub.postingsEnum == null) continue;
int base = sub.slice.start;
int docid;
while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (fastForRandomSet.exists(docid+base)) {
intersects = true;
break outer;
}
}
}
} else {
int docid;
while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (fastForRandomSet.exists(docid)) {
intersects = true;
break;
}
}
}
} else {
// use the filter cache
DocSet fromTermSet = fromSearcher.getDocSet(fromDeState);
intersects = fromSet.intersects(fromTermSet);
}
if (intersects) {
fromTermHits++;
fromTermHitsTotalDf++;
TermsEnum.SeekStatus status = toTermsEnum.seekCeil(term);
if (status == TermsEnum.SeekStatus.END) break;
if (status == TermsEnum.SeekStatus.FOUND) {
toTermHits++;
int df = toTermsEnum.docFreq();
toTermHitsTotalDf += df;
if (resultBits==null && df + resultListDocs > maxSortedIntSize && resultList.size() > 0) {
resultBits = new FixedBitSet(toSearcher.maxDoc());
}
// if we don't have a bitset yet, or if the resulting set will be too large
// use the filterCache to get a DocSet
if (toTermsEnum.docFreq() >= minDocFreqTo || resultBits == null) {
// use filter cache
DocSet toTermSet = toSearcher.getDocSet(toDeState);
resultListDocs += toTermSet.size();
if (resultBits != null) {
toTermSet.addAllTo(new BitDocSet(resultBits));
} else {
if (toTermSet instanceof BitDocSet) {
resultBits = ((BitDocSet)toTermSet).bits.clone();
} else {
resultList.add(toTermSet);
}
}
} else {
toTermDirectCount++;
// need to use liveDocs here so we don't map to any deleted ones
toDeState.postingsEnum = toDeState.termsEnum.postings(toDeState.postingsEnum, PostingsEnum.NONE);
toDeState.postingsEnum = BitsFilteredPostingsEnum.wrap(toDeState.postingsEnum, toDeState.liveDocs);
PostingsEnum postingsEnum = toDeState.postingsEnum;
if (postingsEnum instanceof MultiPostingsEnum) {
MultiPostingsEnum.EnumWithSlice[] subs = ((MultiPostingsEnum) postingsEnum).getSubs();
int numSubs = ((MultiPostingsEnum) postingsEnum).getNumSubs();
for (int subindex = 0; subindex<numSubs; subindex++) {
MultiPostingsEnum.EnumWithSlice sub = subs[subindex];
if (sub.postingsEnum == null) continue;
int base = sub.slice.start;
int docid;
while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
resultListDocs++;
resultBits.set(docid + base);
}
}
} else {
int docid;
while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
resultListDocs++;
resultBits.set(docid);
}
}
}
}
}
term = termsEnum.next();
}
smallSetsDeferred = resultList.size();
if (resultBits != null) {
BitDocSet bitSet = new BitDocSet(resultBits);
for (DocSet set : resultList) {
set.addAllTo(bitSet);
}
return bitSet;
}
if (resultList.size()==0) {
return DocSet.EMPTY;
}
if (resultList.size() == 1) {
return resultList.get(0);
}
int sz = 0;
for (DocSet set : resultList)
sz += set.size();
int[] docs = new int[sz];
int pos = 0;
for (DocSet set : resultList) {
System.arraycopy(((SortedIntDocSet)set).getDocs(), 0, docs, pos, set.size());
pos += set.size();
}
Arrays.sort(docs);
int[] dedup = new int[sz];
pos = 0;
int last = -1;
for (int doc : docs) {
if (doc != last)
dedup[pos++] = doc;
last = doc;
}
if (pos != dedup.length) {
dedup = Arrays.copyOf(dedup, pos);
}
return new SortedIntDocSet(dedup, dedup.length);
}
}
@Override
public String toString(String field) {
return "{!join from="+fromField+" to="+toField
+ (fromIndex != null ? " fromIndex="+fromIndex : "")
+"}"+q.toString();
}
@Override
public boolean equals(Object other) {
return sameClassAs(other) &&
equalsTo(getClass().cast(other));
}
private boolean equalsTo(JoinQuery other) {
return this.fromField.equals(other.fromField)
&& this.toField.equals(other.toField)
&& this.q.equals(other.q)
&& Objects.equals(fromIndex, other.fromIndex)
&& this.fromCoreOpenTime == other.fromCoreOpenTime;
}
@Override
public int hashCode() {
int h = classHash();
h = h * 31 + fromField.hashCode();
h = h * 31 + toField.hashCode();
h = h * 31 + q.hashCode();
h = h * 31 + Objects.hashCode(fromIndex);
h = h * 31 + (int) fromCoreOpenTime;
return h;
}
}
| 1 | 41,007 |
filterCache can be null
|
apache-lucene-solr
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.