message
stringlengths
6
474
diff
stringlengths
8
5.22k
esp32/modsocket: Convert EADDRINUSE error code from lwip return value.
@@ -155,7 +155,9 @@ void usocket_events_handler(void) { NORETURN static void exception_from_errno(int _errno) { // Here we need to convert from lwip errno values to MicroPython's standard ones - if (_errno == EINPROGRESS) { + if (_errno == EADDRINUSE) { + _errno = MP_EADDRINUSE; + } else if (_errno == EINPROGRESS) { _errno = MP_EINPROGRESS; } mp_raise_OSError(_errno);
examples/trace: update enable script to work with new API endpoint.
set -eu REMOTE_HOST=${REMOTE_HOST:-127.0.0.1} if command -v http; then - http -v $REMOTE_HOST:2020/api/v1/trace input=dummy.0 output=stdout prefix=trace. enable:=true params:='{"format":"json"}' + http -v $REMOTE_HOST:2020/api/v1/trace/dummy.0 output=stdout prefix=trace. params:='{"format":"json"}' elif command -v curl; then - curl --header 'Content-Type: application/json' --data '{"enable":true, "input": "dummy.0", "output": "stdout", "params": { "format": "json" }, "prefix": "trace."}' "$REMOTE_HOST":2020/api/v1/trace + curl --header 'Content-Type: application/json' --data '{"output": "stdout", "params": { "format": "json" }, "prefix": "trace."}' "$REMOTE_HOST":2020/api/v1/trace/dummy.0 else echo "No curl or httpie installed" apt-get update apt-get install -y curl - curl --header 'Content-Type: application/json' --data '{"enable":true, "input": "dummy.0", "output": "stdout", "params": { "format": "json" }, "prefix": "trace."}' "$REMOTE_HOST":2020/api/v1/trace + curl --header 'Content-Type: application/json' --data '{"output": "stdout", "params": { "format": "json" }, "prefix": "trace."}' "$REMOTE_HOST":2020/api/v1/trace/dummy fi
doc: add blacklist plugin to plugin README.md
@@ -216,6 +216,7 @@ copied by the `spec` plugin just before) to validate values: - [network](network/) by using network APIs - [path](path/) by checking files on file system - [unit](unit/) validates and normalizes units of memory (e.g. 20KB to 20000 Bytes) +- [blacklist](blacklist/) blacklist and reject values The same but experimental:
adding io configuration
@@ -12,7 +12,6 @@ using namespace CoreML::Specification; void NCatboost::NCoreML::ConfigureTrees(const TFullModel& model, TreeEnsembleParameters* ensemble) { const auto classesCount = static_cast<size_t>(model.ObliviousTrees.ApproxDimension); - CB_ENSURE(!model.HasCategoricalFeatures(), "model with only float features supported"); auto& binFeatures = model.ObliviousTrees.GetBinFeatures(); size_t currentSplitIndex = 0; auto currentTreeFirstLeafPtr = model.ObliviousTrees.LeafValues.data(); @@ -48,8 +47,22 @@ void NCatboost::NCoreML::ConfigureTrees(const TFullModel& model, TreeEnsemblePar for (int layer = treeDepth - 1; layer >= 0; --layer) { const auto& binFeature = binFeatures[model.ObliviousTrees.TreeSplits.at(currentSplitIndex)]; ++currentSplitIndex; - auto featureId = binFeature.FloatFeature.FloatFeature; - auto branchValue = binFeature.FloatFeature.Split; + auto featureType = binFeature.Type; + CB_ENSURE(featureType == ESplitType::FloatFeature || featureType == ESplitType::OneHotFeature, + "model with only float features or one hot encoded features supported"); + + int featureId; + int branchValueCat; + float branchValueFloat; + auto branchParameter = TreeEnsembleParameters::TreeNode::BranchOnValueGreaterThan; + if (featureType == ESplitType::FloatFeature) { + featureId = binFeature.FloatFeature.FloatFeature; + branchValueFloat = binFeature.FloatFeature.Split; + } else { + featureId = binFeature.OneHotFeature.CatFeatureIdx; + branchValueCat = binFeature.OneHotFeature.Value; + branchParameter = TreeEnsembleParameters::TreeNode::BranchOnValueEqual; + } auto nodesInLayerCount = std::pow(2, layer); TVector<TreeEnsembleParameters::TreeNode*> currentLayer(nodesInLayerCount); @@ -61,9 +74,12 @@ void NCatboost::NCoreML::ConfigureTrees(const TFullModel& model, TreeEnsemblePar branchNode->set_nodeid(lastNodeId); ++lastNodeId; - branchNode->set_nodebehavior(TreeEnsembleParameters::TreeNode::BranchOnValueGreaterThan); + branchNode->set_nodebehavior(branchParameter); branchNode->set_branchfeatureindex(featureId); - branchNode->set_branchfeaturevalue(branchValue); + if (featureType == ESplitType::FloatFeature) + branchNode->set_branchfeaturevalue(branchValueFloat); + else + branchNode->set_branchfeaturevalue(branchValueCat); branchNode->set_falsechildnodeid( previousLayer[2 * nodeIdx]->nodeid()); @@ -93,6 +109,19 @@ void NCatboost::NCoreML::ConfigureIO(const TFullModel& model, const NJson::TJson feature->set_allocated_type(featureType); } + for (const auto& oneHotFeature : model.ObliviousTrees.OneHotFeatures) { + for (const auto& oneHotValue : oneHotFeature.StringValues) { + auto feature = description->add_input(); + feature->set_name(oneHotValue); + + auto featureType = new FeatureType(); + featureType->set_isoptional(false); + featureType->set_allocated_int64type(new Int64FeatureType()); + feature->set_allocated_type(featureType); + + } + } + const auto classesCount = static_cast<size_t>(model.ObliviousTrees.ApproxDimension); regressor->mutable_treeensemble()->set_numpredictiondimensions(classesCount); for (size_t outputIdx = 0; outputIdx < classesCount; ++outputIdx) {
Disable most logs by default
// m3log (...) -------------------------------------------------------------------- -# define d_m3LogParse 1 -# define d_m3LogCompile 1 +# define d_m3LogParse 0 +# define d_m3LogCompile 0 # define d_m3LogStack 0 # define d_m3LogEmit 0 # define d_m3LogCodePages 0 # define d_m3LogModule 0 # define d_m3LogRuntime 1 -# define d_m3LogExec 1 +# define d_m3LogExec 0
btc: do not show `bech32` in receive screen It is redundant and possibly confusing, especially since the BitBoxApp has unified accounts (only one Bitcoin account) by default.
@@ -106,27 +106,7 @@ static commander_error_t _btc_pub_address_simple( } if (request->display) { const char* coin = btc_common_coin_name(request->coin); - char title[100] = {0}; - int n_written; - switch (request->output.script_config.config.simple_type) { - case BTCScriptConfig_SimpleType_P2WPKH_P2SH: - n_written = snprintf(title, sizeof(title), "%s", coin); - break; - case BTCScriptConfig_SimpleType_P2WPKH: - n_written = snprintf(title, sizeof(title), "%s bech32", coin); - break; - default: - return COMMANDER_ERR_GENERIC; - } - if (n_written < 0 || n_written >= (int)sizeof(title)) { - /* - * The message was truncated, or an error occurred. - * We don't want to display it: there could - * be some possibility for deceiving the user. - */ - return COMMANDER_ERR_GENERIC; - } - if (!workflow_verify_pub(title, response->pub)) { + if (!workflow_verify_pub(coin, response->pub)) { return COMMANDER_ERR_USER_ABORT; } }
Fixed issue where fisher would return '-nan' or 'nan' in different environments by making explicit test for NaN and using '-nan' (as epected by the test script)
@@ -85,8 +85,17 @@ void Fisher::giveFinalReport(RecordOutputMgr *outputMgr) printf("# p-values for fisher's exact test\n"); printf("left\tright\ttwo-tail\tratio\n"); + + /* Some implementations report NAN as negative, some as positive. To ensure + * we get consistent output from each compiler, we should do our own test. + * Since the test script assumes "-nan", let's setle on that. + */ + if(std::isnan(ratio)) { + printf("%.5g\t%.5g\t%.5g\t-nan\n", left, right, two); + } else { printf("%.5g\t%.5g\t%.5g\t%.3f\n", left, right, two, ratio); } +} unsigned long Fisher::getTotalIntersection(RecordKeyVector &recList) {
Default multi file test to 64
@@ -2059,7 +2059,7 @@ static void demo_test_multi_scenario_free(picoquic_demo_stream_desc_t** scenario } } -size_t picohttp_test_multifile_number = 1999; +size_t picohttp_test_multifile_number = 64; #define MULTI_FILE_CLIENT_BIN "multi_file_client_trace.bin" #define MULTI_FILE_SERVER_BIN "multi_file_server_trace.bin"
toml: Another README restyle
@@ -182,7 +182,7 @@ kdb rm -r user/tests/storage sudo kdb umount user/tests/storage ``` -# Comments/Empty Lines +# Comments and Empty Lines The plugin preserves all comments with only one limitation for arrays. The amount of whitespace in front of a comment is also saved. For this purpose, each tab will get translated to 4 spaces. @@ -236,6 +236,7 @@ sudo kdb umount user/tests/storage ``` ## Comments in Arrays + Any amount of comments can be placed between array elements or between the first element and the opening brackets. However, only one comment - an inline commment - can be placed after the last element and the closing brackets.
Moving all pre-built libraries to LFS.
# Set the default behavior, in case people don't have core.autocrlf set. * text=auto - # Explicitly declare text files you want to always be normalized and converted # to native line endings on checkout. *.c text *.pdsc text *.svd text *.bat text - # Declare files that will always have CRLF line endings on checkout. *.uvproj text eol=crlf *.uvproj text eol=crlf - # Denote all files that are truly binary and should not be modified. *.png binary *.jpg binary -*.a binary -*.lib binary +*.a filter=lfs diff=lfs merge=lfs -text +*.lib filter=lfs diff=lfs merge=lfs -text *.exe binary
Emit HashrateNotification for miner's total hashrate
@@ -197,6 +197,7 @@ namespace Miningcore.Mining await cf.RunTx(async (con, tx) => { stats.Miner = minerHashes.Key; + double minerTotalHashrate = 0; foreach(var item in minerHashes) { @@ -206,6 +207,7 @@ namespace Miningcore.Mining if (windowActual >= MinHashrateCalculationWindow) { var hashrate = pool.HashrateFromShares(item.Sum, windowActual) * HashrateBoostFactor; + minerTotalHashrate += hashrate; // update stats.Hashrate = hashrate; @@ -218,6 +220,8 @@ namespace Miningcore.Mining messageBus.NotifyHashrateUpdated(pool.Config.Id, hashrate, stats.Miner, item.Worker); } } + + messageBus.NotifyHashrateUpdated(pool.Config.Id, minerTotalHashrate, stats.Miner, null); }); } }
Use COMPACT_EXTENDED_LIST when handling allocation lists
#define COMPACT_LARGE_ATOM 10 #define COMPACT_LARGE_YREG 12 +#define COMPACT_EXTENDED_LIST 0x37 #define COMPACT_EXTENDED_LITERAL 0x47 #define COMPACT_LARGE_IMM_MASK 0x18 @@ -424,7 +425,7 @@ typedef union } #define IS_EXTENDED_ALLOCATOR(code_chunk, base_index, off) \ - ((code_chunk[(base_index) + (off)]) & 0xF) == COMPACT_EXTENDED + (code_chunk[(base_index) + (off)]) == COMPACT_EXTENDED_LIST #define DECODE_ALLOCATOR_LIST(need, code_chunk, base_index, off, next_operand_offset) \ if (IS_EXTENDED_ALLOCATOR(code, base_index, off)) { \
test/accel_cal.c: Format with clang-format BRANCH=none TEST=none
@@ -29,12 +29,12 @@ struct accel_cal cal = { static bool accumulate(float x, float y, float z, float temperature) { - return accel_cal_accumulate(&cal, 0, x, y, z, temperature) - || accel_cal_accumulate(&cal, 200 * MSEC, x, y, z, temperature) - || accel_cal_accumulate(&cal, 400 * MSEC, x, y, z, temperature) - || accel_cal_accumulate(&cal, 600 * MSEC, x, y, z, temperature) - || accel_cal_accumulate(&cal, 800 * MSEC, x, y, z, temperature) - || accel_cal_accumulate(&cal, 1000 * MSEC, x, y, z, temperature); + return accel_cal_accumulate(&cal, 0, x, y, z, temperature) || + accel_cal_accumulate(&cal, 200 * MSEC, x, y, z, temperature) || + accel_cal_accumulate(&cal, 400 * MSEC, x, y, z, temperature) || + accel_cal_accumulate(&cal, 600 * MSEC, x, y, z, temperature) || + accel_cal_accumulate(&cal, 800 * MSEC, x, y, z, temperature) || + accel_cal_accumulate(&cal, 1000 * MSEC, x, y, z, temperature); } DECLARE_EC_TEST(test_calibrated_correctly_with_kasa) @@ -66,14 +66,10 @@ DECLARE_EC_TEST(test_calibrated_correctly_with_newton) float kasa_radius; int i; float data[] = { - 1.00290f, 0.09170f, 0.09649f, - 0.95183f, 0.23626f, 0.25853f, - 0.95023f, 0.15387f, 0.31865f, - 0.97374f, 0.01639f, 0.27675f, - 0.88521f, 0.30212f, 0.39558f, - 0.92787f, 0.35157f, 0.21209f, - 0.95162f, 0.33173f, 0.10924f, - 0.98397f, 0.22644f, 0.07737f, + 1.00290f, 0.09170f, 0.09649f, 0.95183f, 0.23626f, 0.25853f, + 0.95023f, 0.15387f, 0.31865f, 0.97374f, 0.01639f, 0.27675f, + 0.88521f, 0.30212f, 0.39558f, 0.92787f, 0.35157f, 0.21209f, + 0.95162f, 0.33173f, 0.10924f, 0.98397f, 0.22644f, 0.07737f, }; kasa_reset(&kasa); @@ -125,7 +121,9 @@ void before_test(void) accel_cal_reset(&cal); } -void after_test(void) {} +void after_test(void) +{ +} TEST_MAIN() {
Update Tools.md I might be wrong with pillow, but for me it kicked off with `ERROR: No matching distribution found for PIL` which was resolved with installing pillow. In my very limited experience with python though, I seem to remember pillow being deprecated.. ? Either way, this fixed my error.
@@ -11,7 +11,7 @@ Have a look into the script for further details regarding the formats. ### Prerequisites: ``` shell -sudo python3 -m pip install construct bitarray bitstring +sudo python3 -m pip install construct bitarray bitstring pillow ``` ### Usage:
Simplify the appveyor script.
@@ -34,8 +34,8 @@ test_script: - ps: if ($Env:Platform -eq "x64") { cd x64 } - ps: cd "$Env:Configuration" - ps: vstest.console /logger:Appveyor UnitTest1.dll - # Alternative to UnitTest1 (apparently running the same tests): - - ps: .\picoquic_t -n -r -x fuzz_initial + # Alternative to UnitTest1 (running the same tests): + # - ps: .\picoquic_t -n -r -x fuzz_initial deploy: off
dont calc sprite_index unless needed
@@ -700,13 +700,14 @@ void SceneRenderActors_b() { LOG("CHECK FOR REDRAW Actor %u\n", i); - sprite_index = MUL_2(i); redraw = actors[i].redraw; // If just landed on new tile or needs a redraw if (redraw) { + sprite_index = MUL_2(i); + flip = FALSE; frame = actors[i].sprite;
esp_prov: added support for session cookie in SoftAP mode
@@ -39,6 +39,13 @@ class Transport_HTTP(Transport): try: self.conn.request('POST', path, tobytes(data), self.headers) response = self.conn.getresponse() + # While establishing a session, the device sends the Set-Cookie header + # with value 'session=cookie_session_id' in its first response of the session to the tool. + # To maintain the same session, successive requests from the tool should include + # an additional 'Cookie' header with the above recieved value. + for hdr_key, hdr_val in response.getheaders(): + if hdr_key == 'Set-Cookie': + self.headers['Cookie'] = hdr_val if response.status == 200: return response.read().decode('latin-1') except Exception as err:
esp32/modsocket: Make socket.read return when socket closes. socket.read(n) will try to read exactly the requested number of bytes. In blocking mode the read will only stop if the socket is closed, which is indicated by lwip by returning a zero-byte read. This patch makes sure the uPy socket code handles such a case.
@@ -365,7 +365,7 @@ STATIC mp_uint_t socket_stream_read(mp_obj_t self_in, void *buf, mp_uint_t size, // XXX Would be nicer to use RTC to handle timeouts for (int i=0; i<=sock->retries; i++) { int x = lwip_recvfrom_r(sock->fd, buf, size, 0, NULL, NULL); - if (x > 0) return x; + if (x >= 0) return x; if (x < 0 && errno != EWOULDBLOCK) { *errcode = errno; return MP_STREAM_ERROR; } check_for_exceptions(); }
py/asmxtensa: Handle function entry/exit when stack use larger than 127.
@@ -69,7 +69,12 @@ void asm_xtensa_entry(asm_xtensa_t *as, int num_locals) { // adjust the stack-pointer to store a0, a12, a13, a14 and locals, 16-byte aligned as->stack_adjust = (((4 + num_locals) * WORD_SIZE) + 15) & ~15; + if (SIGNED_FIT8(-as->stack_adjust)) { asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, -as->stack_adjust); + } else { + asm_xtensa_op_movi(as, ASM_XTENSA_REG_A9, as->stack_adjust); + asm_xtensa_op_sub(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A9); + } // save return value (a0) and callee-save registers (a12, a13, a14) asm_xtensa_op_s32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0); @@ -86,7 +91,13 @@ void asm_xtensa_exit(asm_xtensa_t *as) { asm_xtensa_op_l32i_n(as, ASM_XTENSA_REG_A0, ASM_XTENSA_REG_A1, 0); // restore stack-pointer and return + if (SIGNED_FIT8(as->stack_adjust)) { asm_xtensa_op_addi(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, as->stack_adjust); + } else { + asm_xtensa_op_movi(as, ASM_XTENSA_REG_A9, as->stack_adjust); + asm_xtensa_op_add(as, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A1, ASM_XTENSA_REG_A9); + } + asm_xtensa_op_ret_n(as); }
Use sockaddr length from fuzzer Commit made major changes and introduced the "HonggfuzzNetDriverServerAddress" function which can be defined by a fuzzer. Unfortunately the connection attempt didn't use the fuzzer-provided length and thus truncated longer socket address structures such as "sockaddr_un".
@@ -288,7 +288,7 @@ static bool netDriver_checkIfServerReady(int argc, char **argv) { socklen_t slen = HonggfuzzNetDriverServerAddress(&addr); if (slen > 0) { /* User provided specific destination address */ - int fd = netDriver_sockConnAddr(addr, sizeof(addr)); + int fd = netDriver_sockConnAddr(addr, slen); if (fd >= 0) { close(fd); hfnd_globals.saddr = addr;
Set default reference to 2.1
@@ -333,7 +333,7 @@ def parse_command_line(): parser.add_argument("--encoder", dest="encoders", default="avx2", choices=coders, help="test encoder variant") - parser.add_argument("--reference", dest="reference", default="ref-2.0-avx2", + parser.add_argument("--reference", dest="reference", default="ref-2.1-avx2", choices=refcoders, help="reference encoder variant") astcProfile = ["ldr", "ldrs", "hdr", "all"]
Post the next recv when we pass maxreqh steps We need to wait for the oldest recv and post the next recv when we pass maxreqh steps (i.e., istep > maxreqh - 1). Without the fix the code will hang when using flow control with handshake
@@ -321,7 +321,7 @@ int pio_swapm(void *sendbuf, int *sendcounts, int *sdispls, MPI_Datatype *sendty /* We did comms in sets of size max_reqs, if istep > maxreqh * then there is a remainder that must be handled. */ - if (istep > maxreqh) + if (istep > maxreqh - 1) { p = istep - maxreqh; if (rcvids[p] != MPI_REQUEST_NULL)
UI docs: Rephrase the UI method function return value description It seems the =item isn't supposed to have pure numbers, or so tells me perldoc.
@@ -85,28 +85,13 @@ by closing the channel to the tty, maybe by destroying a dialog box. =back -All of these functions are expected to return one of these values: - -=over 4 - -=item 0 - -on error. - -=item 1 - -on success. - -=item -1 - -on out-off-band events, for example if some prompting has been -cancelled (by pressing Ctrl-C, for example). -This is only expected to be returned by the flusher or the reader. +All of these functions are expected to return 0 on error, 1 on +success, or -1 on out-off-band events, for example if some prompting +has been cancelled (by pressing Ctrl-C, for example). +Only the flusher or the reader are expected to return -1. If returned by another of the functions, it's treated as if 0 was returned. -=back - Regarding the writer and the reader, don't assume the former should only write and don't assume the latter should only read. This depends on the needs of the method.
doc/news: remove duplicate entries in next release news
@@ -140,10 +140,6 @@ The text below summarizes updates to the [C (and C++)-based libraries](https://w - Removed mentions of VERBOSE and replaced debug prints with the logger _(@mandoway)_ -### Core - -- Removed mentions of VERBOSE and replaced debug prints with the logger _(@mandoway)_ - ### <<Library>> - <<TODO>> @@ -214,10 +210,6 @@ you up-to-date with the multi-language support provided by Elektra. - Deleted occurrences of removed property key.fullname _(@mandoway)_ -### Python - -- Deleted occurrences of removed property key.fullname _(@mandoway)_ - ### CPP - Removed mentions of VERBOSE _(@mandoway)_
release: git add alpine release image
@@ -814,6 +814,8 @@ def buildRelease(stageName, image, packageRevision='1', sh "scripts/release/populate-release-notes.sh ${RELEASE_VERSION} ../${RELEASE_VERSION}/" sh "git add doc/news" sh "git commit -m 'release: add hashsums and statistics to release notes'" + sh "git add scripts/docker/alpine" + sh "git commit -m 'docker: update alpine release image'" } if (bundleRepo) { sh 'git bundle create libelektra.bundle --all'
Fix Quick Start to AOMP development clone_aomp.sh script is located in bin directory.
@@ -89,6 +89,7 @@ To build and clone all components using the latest development sources, first cl cd aomp git checkout aomp-dev git pull + cd bin ./clone_aomp.sh ``` The first time you run ./clone_aomp.sh, it could take a long time to clone the repositories.
ip: fix build without vector unit Type: fix
@@ -99,7 +99,9 @@ static inline u32 check_adj_port_range_x1 (const protocol_port_range_dpo_t * ppr_dpo, u16 dst_port, u32 next) { +#ifdef CLIB_HAVE_VEC128 u16x8 key = u16x8_splat (dst_port); +#endif int i; if (NULL == ppr_dpo || dst_port == 0) @@ -107,9 +109,20 @@ check_adj_port_range_x1 (const protocol_port_range_dpo_t * ppr_dpo, for (i = 0; i < ppr_dpo->n_used_blocks; i++) +#ifdef CLIB_HAVE_VEC128 if (!u16x8_is_all_zero ((ppr_dpo->blocks[i].low.as_u16x8 <= key) & (ppr_dpo->blocks[i].hi.as_u16x8 >= key))) return next; +#else + { + for (int j = 0; j < 8; j++) + { + if ((ppr_dpo->blocks[i].low.as_u16[j] <= dst_port) && + (ppr_dpo->blocks[i].hi.as_u16[j] >= dst_port)) + return next; + } + }; +#endif return IP4_SOURCE_AND_PORT_RANGE_CHECK_NEXT_DROP; }
Slightly reduce lock usage Locking the frame_buffer mutex to reference the input frame into the tmp_frame is unnecessary. This also fixes the missing unlock on error.
@@ -51,8 +51,6 @@ swap_frames(AVFrame **lhs, AVFrame **rhs) { bool sc_frame_buffer_push(struct sc_frame_buffer *fb, const AVFrame *frame, bool *previous_frame_skipped) { - sc_mutex_lock(&fb->mutex); - // Use a temporary frame to preserve pending_frame in case of error. // tmp_frame is an empty frame, no need to call av_frame_unref() beforehand. int r = av_frame_ref(fb->tmp_frame, frame); @@ -61,6 +59,8 @@ sc_frame_buffer_push(struct sc_frame_buffer *fb, const AVFrame *frame, return false; } + sc_mutex_lock(&fb->mutex); + // Now that av_frame_ref() succeeded, we can replace the previous // pending_frame swap_frames(&fb->pending_frame, &fb->tmp_frame);
Add a missing break statement when determining compression in exr2aces
@@ -137,6 +137,7 @@ exr2aces (const char inFileName[], case B44_COMPRESSION: case B44A_COMPRESSION: h.compression() = B44A_COMPRESSION; + break; default: h.compression() = PIZ_COMPRESSION;
avx512/abs: add SSE2 implementation of _mm_abs_epi64
@@ -125,6 +125,9 @@ simde__m128i simde_mm_abs_epi64(simde__m128i a) { #if defined(SIMDE_X86_AVX512VL_NATIVE) return _mm_abs_epi64(a); + #elif defined(SIMDE_X86_SSE2_NATIVE) + const __m128i m = _mm_srai_epi32(_mm_shuffle_epi32(a, 0xF5), 31); + return _mm_sub_epi64(_mm_xor_si128(a, m), m); #else simde__m128i_private r_, @@ -132,8 +135,13 @@ simde_mm_abs_epi64(simde__m128i a) { #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_i64 = vabsq_s64(a_.neon_i64); + #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) + const int64x2_t m = vshrq_n_s64(a_.neon_i64, 63); + r_.neon_i64 = vsubq_s64(veorq_s64(a_.neon_i64, m), m); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && !defined(HEDLEY_IBM_VERSION) r_.altivec_i64 = vec_abs(a_.altivec_i64); + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && 0 + r_.wasm_v128 = wasm_i64x2_abs(a_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
Fix doc regarding aliasing of modulus input to mbedtls_mpi_core_montmul()
@@ -273,8 +273,8 @@ mbedtls_mpi_uint mbedtls_mpi_core_montmul_init( const mbedtls_mpi_uint *N ); * \param[in] N Little-endian presentation of the modulus. * This must be odd, and have exactly the same number * of limbs as \p A. - * It must not alias or otherwise overlap any of the - * other parameters. + * It may alias \p X, but must not alias or otherwise + * overlap any of the other parameters. * \param[in] AN_limbs The number of limbs in \p X, \p A and \p N. * \param mm The Montgomery constant for \p N: -N^-1 mod 2^biL. * This can be calculated by `mbedtls_mpi_core_montmul_init()`.
Add step test-ruby
@@ -531,3 +531,41 @@ jobs: - name: Run Tests shell: bash run: python $GITHUB_WORKSPACE/test/python/tests.py + + test-ruby: + needs: [package] + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + ruby: ['2.3', '2.4', '2.5', '2.6', '2.7', '3.0', '3.1'] + + steps: + - uses: actions/checkout@v2 + - uses: ruby/setup-ruby@v1 + with: + ruby-version: ${{ matrix.ruby }} + + - name: Download Packages + uses: actions/download-artifact@v2 + with: + path: packages + + - name: Install Package + shell: bash + run: | + cd $GITHUB_WORKSPACE/packages/tinyspline + if [ "$RUNNER_OS" == "Linux" ]; then + find . -name "*linux*.gem" -exec gem install {} \; + elif [ "$RUNNER_OS" == "Windows" ]; then + find . -name "*windows*.gem" -exec gem install {} \; + elif [ "$RUNNER_OS" == "macOS" ]; then + find . -name "*darwin*.gem" -exec gem install {} \; + fi + + - name: Run Tests + shell: bash + run: | + cd $GITHUB_WORKSPACE/test/ruby + bundle install + ruby tests.rb
Update Debian version needed
@@ -16,9 +16,8 @@ The complete documentation (for users, admins and developers) is available here Requirements: -- Debian7 or higher Environment 64 bits (a Docker image is available if you are on Windows environment) -Recommended version is Debian 8 -(if you are on Debian 7 you will need to add the testing repo in /etc/apt/sources.list) +- Debian8 or higher Environment 64 bits (a Docker image is available if you are on Windows environment) +Recommended version is Debian 9 - Processor : 1GHZ and RAM : 8GB - Ports 8080, 5432, 9200, 5601 are opened - Debian environment : requires curl, debconf, unzip, sudo, libc6-dev, jq, lsof
change yp_util resource Note: mandatory check (NEED_CHECK) was skipped
}, "yp-util": { "formula": { - "sandbox_id": [401046412], + "sandbox_id": [413413694], "match": "yp_util" }, "executable": {
hv: vlapic: minor fix for update_msr_bitmap_x2apic_apicv Shouldn't trap TPR since we always enable "Use TPR shadow"
@@ -640,10 +640,11 @@ void update_msr_bitmap_x2apic_apicv(const struct acrn_vcpu *vcpu) * writes to them are virtualized with Register Virtualization * Refer to Section 29.1 in Intel SDM Vol. 3 */ - enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_TPR, INTERCEPT_DISABLE); enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_EOI, INTERCEPT_READ); enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_SELF_IPI, INTERCEPT_READ); } + + enable_msr_interception(msr_bitmap, MSR_IA32_EXT_APIC_TPR, INTERCEPT_DISABLE); } /*
Add a few more type casting
@@ -220,7 +220,7 @@ size_t picoquic_decode_transport_param_prefered_address(uint8_t * bytes, size_t byte_index += 2; cnx_id_length = bytes[byte_index++]; if (cnx_id_length > 0 && cnx_id_length <= PICOQUIC_CONNECTION_ID_MAX_SIZE && - byte_index + cnx_id_length + 16 <= bytes_max && + byte_index + (size_t)cnx_id_length + 16 <= bytes_max && cnx_id_length == picoquic_parse_connection_id(bytes + byte_index, cnx_id_length, &prefered_address->connection_id)){ byte_index += cnx_id_length; @@ -613,7 +613,7 @@ int picoquic_receive_transport_extensions_old(picoquic_cnx_t* cnx, int extension ret = picoquic_connection_error(cnx, PICOQUIC_TRANSPORT_PARAMETER_ERROR, 0); } else { - size_t extensions_end = byte_index + extensions_size; + size_t extensions_end = byte_index + (size_t)extensions_size; while (ret == 0 && byte_index < extensions_end) { if (byte_index + 4 > extensions_end) {
hls_intersect: a minor change to getopt list
@@ -197,7 +197,7 @@ int main(int argc, char *argv[]) }; ch = getopt_long(argc, argv, - "C:i:o:m:n:l:t:XVIvh", + "C:i:o:m:n:l:t:VIvh", long_options, &option_index); if (ch == -1) break;
Constants: Add missing variable to MSR test
@@ -36,6 +36,7 @@ kdb ls user/examples/constants #> user/examples/constants/cmake/BUILTIN_EXEC_FOLDER #> user/examples/constants/cmake/BUILTIN_PLUGIN_FOLDER #> user/examples/constants/cmake/CMAKE_INSTALL_PREFIX +#> user/examples/constants/cmake/ENABLE_ASAN #> user/examples/constants/cmake/ENABLE_DEBUG #> user/examples/constants/cmake/ENABLE_LOGGER #> user/examples/constants/cmake/GTEST_ROOT
Add inertia timeout;
@@ -58,6 +58,9 @@ static void enableMouselook(GLFWwindow* window) { static void disableMouselook(GLFWwindow* window) { state.mouselook = false; + if (glfwGetTime() - state.prevMove > .25) { + state.vYaw = state.vPitch = 0; + } } static void window_focus_callback(GLFWwindow* window, int focused) {
VERSION bump to version 2.0.64
@@ -58,7 +58,7 @@ set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) # set version of the project set(LIBYANG_MAJOR_VERSION 2) set(LIBYANG_MINOR_VERSION 0) -set(LIBYANG_MICRO_VERSION 63) +set(LIBYANG_MICRO_VERSION 64) set(LIBYANG_VERSION ${LIBYANG_MAJOR_VERSION}.${LIBYANG_MINOR_VERSION}.${LIBYANG_MICRO_VERSION}) # set version of the library set(LIBYANG_MAJOR_SOVERSION 2)
Add Windows OpenSSL build dependency
@@ -186,11 +186,24 @@ SOURCES = air_quality.cpp \ win32 { + OPENSSL_PATH = E:/Qt/Tools/OpenSSL/Win_x86 + + exists($$OPENSSL_PATH) { + message(OpenSLL detected $$OPENSSL_PATH) + + LIBS += -L$$OPENSSL_PATH/bin \ + -llibcrypto-1_1 \ + -llibssl-1_1 + INCLUDEPATH += $$OPENSSL_PATH/include + DEFINES += HAS_OPENSSL + } + LIBS += \ -L../.. \ -L$${PWD}/../../../lib/sqlite-dll-win32-x86-3240000 \ -ldeCONZ1 \ -lsqlite3 + INCLUDEPATH += $${PWD}/../../../lib/sqlite-amalgamation-3240000 CONFIG += dll }
Fix pbuf docs
@@ -178,7 +178,8 @@ esp_pbuf_chain(esp_pbuf_p head, esp_pbuf_p tail) { /** * \brief Unchain first pbuf from list and return second one * - * `tot_len` and `len` fields are adjusted to reflect new values and reference counter is as is + * `tot_len` and `len` fields are adjusted to reflect new values and reference counter is `as is` + * * \note After unchain, user must take care of both pbufs (`head` and `new returned one`) * \param[in] head: First pbuf in chain to remove from chain * \return Next pbuf after `head`
add cloc back to run list, and control with EPSDB=1
@@ -23,7 +23,12 @@ echo " A non-zero exit code means a failure occured." echo "************************************************************************************" #Loop over all directories and run the check script -for directory in ./fortran hip openmp/; do +cloc="" +if [ "$EPSDB" != "1" ]; then + CLOC=cloc +fi + +for directory in ./fortran hip openmp $CLOC/; do (cd "$directory" && path=$(pwd) && base=$(basename $path) script=check_$base.sh ./$script @@ -31,7 +36,7 @@ for directory in ./fortran hip openmp/; do ) done echo -e "$ORG"FINAL RESULTS:"$BLK" -for directory in ./*/; do +for directory in fortran hip openmp $CLOC/; do (cd "$directory" && path=$(pwd) && base=$(basename $path) cat check-$base.txt )
speed: Always reset the outlen when calling EVP_PKEY_derive Fixes
@@ -880,11 +880,14 @@ static int FFDH_derive_key_loop(void *args) loopargs_t *tempargs = *(loopargs_t **) args; EVP_PKEY_CTX *ffdh_ctx = tempargs->ffdh_ctx[testnum]; unsigned char *derived_secret = tempargs->secret_ff_a; - size_t outlen = MAX_FFDH_SIZE; int count; - for (count = 0; COND(ffdh_c[testnum][0]); count++) + for (count = 0; COND(ffdh_c[testnum][0]); count++) { + /* outlen can be overwritten with a too small value (no padding used) */ + size_t outlen = MAX_FFDH_SIZE; + EVP_PKEY_derive(ffdh_ctx, derived_secret, &outlen); + } return count; } #endif /* OPENSSL_NO_DH */
Docs: avoid confusing use of the word "synchronized" It's misleading to call the data directory the "synchronized data directory" when discussing a crash scenario when using pg_rewind's no-sync option. Here we just remove the word "synchronized" to avoid any possible confusion. Author: Justin Pryzby Discussion: Backpatch-through: 12, where --no-sync was added
@@ -182,8 +182,8 @@ PostgreSQL documentation to be written safely to disk. This option causes <command>pg_rewind</command> to return without waiting, which is faster, but means that a subsequent operating system crash can leave - the synchronized data directory corrupt. Generally, this option is - useful for testing but should not be used when creating a production + the data directory corrupt. Generally, this option is useful for + testing but should not be used when creating a production installation. </para> </listitem>
Use BIT4 instead of BIT3 for PCAT MixMode capability based on spec
@@ -336,8 +336,9 @@ union { UINT8 OneLm : 1; UINT8 Memory : 1; UINT8 AppDirect : 1; + UINT8 Reserved1 : 1; UINT8 MixedMode : 1; - UINT8 Reserved : 4; + UINT8 Reserved2 : 3; } MemoryModesFlags; } SUPPORTED_MEMORY_MODE3;
WIP: Move ++cast-thou to turbo, but it's a no-op in practice.
:: :: $thou + ~& %axon-thou ?+ -.tee !! $ay (ames-gram (slav %p p.tee) got+~ (slav %uv q.tee) |2.sih) $hi (cast-thou q.tee httr+!>(p.sih)) == :: $made + =. our (need hov) :: XX + =| ses/(unit hole) + |- ^+ ..axon :: hack: we must disambiguate between %f and %t %made responses :: ?: ?=([%t %made *] sih) - ?> ?=([%se ^] tee) - ~! sih - (get-made:(dom-vi q.tee) p.tee result:sih) + ?+ tee ~& [%tee tee] !! + {$si $~} (give-turbo-sigh result.sih) + {$se ^} (get-made:(dom-vi q.tee) p.tee result:sih) + == :: ?> ?=([%f %made *] sih) :: ~! sih ?< ?=($tabl -.q.sih) - =. our (need hov) :: XX - =| ses/(unit hole) - |- ^+ ..axon ?- tee $@($~ {?($on $ow) *}) ~|(e+ford+lost+tee !!) {$of @ $~} ~|(e+ford+lost+tee !!) - {$si $~} (give-sigh q.sih) + {$si $~} !! ::(give-sigh q.sih) {$se ^} !! ::(get-made:(dom-vi q.tee) p.tee [p q]:sih) {$ay ^} =/ res/(each (cask) tang) |= {tea/whir mar/mark cay/cage} (execute tea (norm-beak -.top) [%cast mar $+cay]) :: - ++ cast-thou + ++ cast-old-thou |= {mar/mark cay/cage} ?: ?=($httr mar) (give-sigh %& cay) %^ execute si+~ (norm-beak -.top) [%alts [%cast mar $+cay] [%cast %recoverable-error $+cay] ~] :: + ++ cast-thou :: turbo + |= [mar=mark cay=cage] + ?: ?=($httr mar) + ~& [%cast-thou-immediate mar] + (give-sigh %& cay) + ~& [%cast-thou mar] + %^ execute-turbo si+~ live=%.n + :: %^ execute si+~ (norm-beak -.top) + =/ =beak (norm-beak -.top) + [%alts [%cast [p q]:beak mar $+cay] [%cast [p q]:beak %recoverable-error $+cay] ~] + :: ++ del-deps |= {a/@uvH b/(each duct ixor)} ^+ +>.$ ?: =(`@`0 a) +>.$
halo mass func
@@ -101,7 +101,46 @@ double ccl_sigmaR(ccl_cosmology *cosmo, double R); double ccl_sigma8(ccl_cosmology *cosmo); ```` These and other functions for different matter power spectra can be found in file *include/ccl_power.h*. +<<<<<<< b57cc2940e42eed4b11c528eafdbabca66c1460b ### Example code +======= + +### Angular power spectra +CCL can compute angular power spectra for two tracer types: galaxy number counts and galaxy weak lensing. Tracer parameters are defined in structure **CCL_ClTracer**. In general, you can create this object with function **ccl_cl_tracer_new** +````c +CCL_ClTracer *ccl_cl_tracer_new(ccl_cosmology *cosmo,int tracer_type, + int has_rsd,int has_magnification,int has_intrinsic_alignment, + int nz_n,double *z_n,double *n, + int nz_b,double *z_b,double *b, + int nz_s,double *z_s,double *s, + int nz_ba,double *z_ba,double *ba, + int nz_rf,double *z_rf,double *rf); +```` +Exact definition of these parameters are described in file *include/ccl_cls.h*. Usually you can use simplified versions of this function, namely **ccl_cl_tracer_number_counts_new, ccl_cl_tracer_number_counts_simple_new, ccl_cl_tracer_lensing_new** or **ccl_cl_tracer_lensing_simple_new**. Two most simplified versions (one for number counts and one for shear) take parameters: +````c +CCL_ClTracer *ccl_cl_tracer_number_counts_simple_new(ccl_cosmology *cosmo, int nz_n,double *z_n,double *n, int nz_b,double *z_b,double *b); +CCL_ClTracer *ccl_cl_tracer_lensing_simple_new(ccl_cosmology *cosmo, int nz_n,double *z_n,double *n); + +```` +where **nz_n** is dimension of arrays **z_n** and **n**. **z_n** and **n** are arrays for the number count of objects per redshift interval (arbitrary normalization - renormalized inside). **nz_b, z_b** and **b** are the same for the clustering bias. +With initialized tracers you can compute limber power spectrum with **ccl_angular_cl** +````c +double ccl_angular_cl(ccl_cosmology *cosmo,int l,CCL_ClTracer *clt1,CCL_ClTracer *clt2); +```` +After you are done working with tracers, you should free its work space by **ccl_cl_tracer_free** +````c +void ccl_cl_tracer_free(CCL_ClTracer *clt); +```` + +### Halo mass function +The halo mass function *dN/dz* can be obtained by function **ccl_massfunc** +````c +double ccl_massfunc(ccl_cosmology * cosmo, double halo_mass, double redshift) +```` +where **halo_mass** is mass smoothing scale. For more details (or other functions like *sigma_M*) see *include/ccl_massfunc.h* and *src/mass_func.c*. + +## Example code +>>>>>>> halo mass func This code can also be found in *tests/min_code.h* You can run the following example code. For this you will need to compile with: ````c gcc -Wall -Wpedantic -g -O0 -I./include -std=c99 -fPIC tests/min_code.c -o tests/min_code -L./lib -L/usr/local/lib -lgsl -lgslcblas -lm -Lclass -lclass -lccl
[hg] update client for linux and mac Pull-request for branch users/nslus/arcadia/869/client
}, "hg": { "formula": { - "sandbox_id": [302223618, 302223634, 74450064], + "sandbox_id": [335981402, 335981414, 74450064], "match": "Hg" }, "executable": {
Make ctest suitable for multi-core machines.
@@ -68,7 +68,7 @@ sub_build() { # Tests (coverage needs to run the tests) if [ $BUILD_TESTS = 1 ] || [ $BUILD_COVERAGE = 1 ]; then - ctest -VV -C $BUILD_TYPE + ctest -j$(getconf _NPROCESSORS_ONLN) --output-on-failure -C $BUILD_TYPE fi # Coverage
load_key_certs_crls: Avoid reporting any spurious errors When there is other PEM data in between certs the OSSL_STORE_load returns NULL and reports error. Avoid printing that error unless there was nothing read at all. Fixes
@@ -871,9 +871,6 @@ int load_key_certs_crls_suppress(const char *uri, int format, int maybe_stdin, OSSL_PARAM itp[2]; const OSSL_PARAM *params = NULL; - if (suppress_decode_errors) - ERR_set_mark(); - if (ppkey != NULL) { *ppkey = NULL; cnt_expectations++; @@ -971,10 +968,6 @@ int load_key_certs_crls_suppress(const char *uri, int format, int maybe_stdin, * certificate in it. We just retry until eof. */ if (info == NULL) { - if (OSSL_STORE_error(ctx)) { - ERR_print_errors(bio_err); - ERR_clear_error(); - } continue; } @@ -1078,8 +1071,9 @@ int load_key_certs_crls_suppress(const char *uri, int format, int maybe_stdin, BIO_printf(bio_err, "\n"); ERR_print_errors(bio_err); } - if (suppress_decode_errors) - ERR_pop_to_mark(); + if (suppress_decode_errors || failed == NULL) + /* clear any spurious errors */ + ERR_clear_error(); return failed == NULL; }
FIX: -Wformat-security compile error.
@@ -136,7 +136,7 @@ static void do_lqdetect_write(char client_ip[], char *key, if (keylen > 250) { /* long key string */ keylen = snprintf(keybuf, sizeof(keybuf), "%.*s...%.*s", 124, key, 123, (key+(keylen - 123))); } else { /* short key string */ - keylen = snprintf(keybuf, sizeof(keybuf), key); + keylen = snprintf(keybuf, sizeof(keybuf), "%s", key); } gettimeofday(&val, NULL);
update ya tool arc enable stash arc/diff add diff-format selector support arc/action: add root command
}, "arc": { "formula": { - "sandbox_id": [374170017], + "sandbox_id": [376680307], "match": "arc" }, "executable": {
Add Rx filter update before spur cancelation
@@ -1532,7 +1532,8 @@ int LMS7002M::SetFrequencySX(bool tx, float_type freq_Hz, SX_details* output) */ int LMS7002M::SetFrequencySXWithSpurCancelation(bool tx, float_type freq_Hz, float_type BW) { - BW += 2e6; //offset to avoid ref clock on BW edge + const float BWOffset = 2e6; + BW += BWOffset; //offset to avoid ref clock on BW edge bool needCancelation = false; float_type refClk = GetReferenceClk_SX(false); int low = (freq_Hz-BW/2)/refClk; @@ -1545,6 +1546,7 @@ int LMS7002M::SetFrequencySXWithSpurCancelation(bool tx, float_type freq_Hz, flo if(needCancelation) { newFreq = (int)(freq_Hz/refClk+0.5)*refClk; + TuneRxFilter(BW-BWOffset+abs(freq_Hz-newFreq)); status = SetFrequencySX(tx, newFreq); } else
Error out on invalid empty prototypes This way, a function prototype like `glms_mat3_identity()` will not compile, instead you have to change it to the proper `glms_mat3_identity(void)`.
@@ -12,7 +12,8 @@ AM_CFLAGS = -Wall \ -O3 \ -Wstrict-aliasing=2 \ -fstrict-aliasing \ - -pedantic + -pedantic \ + -Werror=strict-prototypes lib_LTLIBRARIES = libcglm.la libcglm_la_LDFLAGS = -no-undefined -version-info 0:1:0
kdb: put the string which was set under quotes
@@ -70,7 +70,7 @@ int SetCommand::execute (Cmdline const & cl) key = Key (name, KEY_END); if (!nullValue) { - toprint << " with string " << value << endl; + toprint << " with string \"" << value << '"' << endl; key.setString (value); } else @@ -89,7 +89,7 @@ int SetCommand::execute (Cmdline const & cl) { if (!nullValue) { - toprint << "Set string to " << value << endl; + toprint << "Set string to \"" << value << '"' << endl; key.setString (value); } else
examples:dns_matching: accept args from user Accepts arguments from user. This change makes it slightly more interactive. usage is show with -h option, so no extra documentation required for understanding the usage.
@@ -9,6 +9,7 @@ import socket import os import struct import dnslib +import argparse def encode_dns(name): @@ -35,6 +36,15 @@ def add_cache_entry(cache, name): leaf.p = (c_ubyte * 4).from_buffer(bytearray(4)) cache[key] = leaf + +parser = argparse.ArgumentParser(usage='For detailed information about usage,\ + try with -h option') +req_args = parser.add_argument_group("Required arguments") +req_args.add_argument("-i", "--interface", type=str, required=True, help="Interface name") +req_args.add_argument("-d", "--domains", type=str, required=True, + help='List of domain names separated by comma. For example: -d "abc.def, xyz.mno"') +args = parser.parse_args() + # initialize BPF - load source code from http-parse-simple.c bpf = BPF(src_file = "dns_matching.c", debug=0) # print(bpf.dump_func("dns_test")) @@ -45,16 +55,24 @@ bpf = BPF(src_file = "dns_matching.c", debug=0) function_dns_matching = bpf.load_func("dns_matching", BPF.SOCKET_FILTER) -#create raw socket, bind it to eth0 +#create raw socket, bind it to user provided interface #attach bpf program to socket created -BPF.attach_raw_socket(function_dns_matching, "eth1") +BPF.attach_raw_socket(function_dns_matching, args.interface) # Get the table. cache = bpf.get_table("cache") # Add cache entries -add_cache_entry(cache, "foo.bar") -add_cache_entry(cache, "another.sample.domain") +entries = [i.strip() for i in args.domains.split(",")] +for e in entries: + print(">>>> Adding map entry: ", e) + add_cache_entry(cache, e) + +print("\nTry to lookup some domain names using nslookup from another terminal.") +print("For exmaple: nslookup foo.bar") +print("\nBPF program will filter-in DNS packets which match with map entries.") +print("Packets received by user space program will be printed here") +print("\nHit Ctrl+C to end...") socket_fd = function_dns_matching.sock sock = socket.fromfd(socket_fd, socket.PF_PACKET, socket.SOCK_RAW, socket.IPPROTO_IP)
CHANGELOG: mark v9.10.0
@@ -8,11 +8,11 @@ customers cannot upgrade their bootloader, its changes are recorded separately. ### [Unreleased] +### 9.10.0 [2022-03-10] - Bitcoin: add support for BIP-86: receive to taproot addresses and sign transactions with taproot inputs - Ethereum: add support for the Binance Smart Chain, Optimism, Polygon, Fantom Opera and Arbitrum One networks ### 9.9.1 [2022-02-07] - - Cardano: support sending to legacy Byron addresses - Cardano: disallow duplicate token keys in an output
fix: is_running is accessed as int in curl which is 4 bytes instead of 1 byte
@@ -384,7 +384,7 @@ _ws_perform(struct websockets *ws) ws->now_tstamp = orka_timestamp_ms(); //update our concept of now pthread_mutex_unlock(&ws->lock); - bool is_running; + int is_running; CURLMcode mcode = curl_multi_perform(ws->mhandle, (int*)&is_running); ASSERT_S(CURLM_OK == mcode, curl_multi_strerror(mcode));
Explain meaning of lighting checkbox
Common Controls ~~~~~~~~~~~~~~~ -There are a number of attributes of plots that are common to -many, if not all plots. These include such things as **Color table**, -**Foreground** and **Background** colors, **Opacity**, -**Line style** and **Point type**, **Log** or **Linear** scaling, -the **Legend** checkbox and others. These common plot attributes -are described here first using the **Pseudocolor plot** as an example. +There are a number of attributes of plots that are common to many, if not all plots. +These include such things as **Color table**, **Foreground** and **Background** colors, **Opacity**, **Line width** and **Point type**, **Log** or **Linear** scaling, the **Legend** checkbox, the **Lighting** checkbox and others. +These common plot attributes are described here first using the **Pseudocolor plot** as an example. .. _pseudocolorwindow_1: @@ -101,3 +98,10 @@ select a new scalar variable from the **Variable** menu. The value ``default`` must be replaced with the name of another scalar variable if you want VisIt_ to scale the points with a variable other than the one being plotted. +Lighting +"""""""" + +Various plots include a **Lighting** checkbox. +When the box is checked, it means the plot will obey all *active* :ref:`light sources <Lighting>`. +When the box is **not** checked, this does not mean the plot will not be lit at all. +Instead, it means that the plot will be lit by **Ambient** lighting only.
pkg/columns: reset kind and columnType after stringer Symptoms: panic: reflect: call of reflect.Value.Int on string Value
@@ -224,11 +224,16 @@ func (ci *Column[T]) parseTagInfo(tagInfo []string) error { } ci.template = params[1] case "stringer": + if ci.Extractor != nil { + break + } stringer := reflect.TypeOf((*fmt.Stringer)(nil)).Elem() if ci.Type().Implements(stringer) { ci.Extractor = func(t *T) string { return ci.getRawField(reflect.ValueOf(t)).Interface().(fmt.Stringer).String() } + ci.kind = reflect.String + ci.columnType = stringType } else { return fmt.Errorf("column parameter %q set for field %q, but doesn't implement fmt.Stringer", params[0], ci.Name) }
Explicitly add cast to uint32_t before shifting uint8 to left
@@ -135,7 +135,7 @@ static void _tu_fifo_write_to_const_dst_ptr(void * dst, const void * src, uint16 // Pushing full available 32 bit words to FIFO uint16_t full_words = len >> 2; for(uint16_t i = 0; i < full_words; i++){ - *tx_fifo = (src_u8[3] << 24) | (src_u8[2] << 16) | (src_u8[1] << 8) | src_u8[0]; + *tx_fifo = ((uint32_t)(src_u8[3]) << 24) | ((uint32_t)(src_u8[2]) << 16) | ((uint32_t)(src_u8[1]) << 8) | (uint32_t)src_u8[0]; src_u8 += 4; } @@ -145,10 +145,10 @@ static void _tu_fifo_write_to_const_dst_ptr(void * dst, const void * src, uint16 uint32_t tmp_word = 0; tmp_word |= src_u8[0]; if(bytes_rem > 1){ - tmp_word |= src_u8[1] << 8; + tmp_word |= (uint32_t)(src_u8[1]) << 8; } if(bytes_rem > 2){ - tmp_word |= src_u8[2] << 16; + tmp_word |= (uint32_t)(src_u8[2]) << 16; } *tx_fifo = tmp_word; }
Reformat md file
@@ -21,7 +21,8 @@ Information about the syntax: - Can contain multiple keys with different locales (`keyName[en]` and `keyName[de]`) - Cannot contain multiple keys with different metadata (either `keyname[$a]` or `keyname[$b]`). - If a key `keyname[$metavalue]` is parsed, it will be represented as a Key with name `parent/keyname` and meta `kconfig` as `metavalue` -- Group names begin have a `[` symbol at the beginning of a line and every key that follows them is part of this group (until the next group is declared) +- Group names begin have a `[` symbol at the beginning of a line and every key that follows them is part of this group (until the next + group is declared) ## Usage
Fix Coverity & uninitised value Both are false positives, but better to be rid of them forever than ignoring them and having repeats.
@@ -212,7 +212,7 @@ static int test_explicit_EVP_MD_fetch_by_X509_ALGOR(int idx) int ret = 0; X509_ALGOR *algor = make_algor(NID_sha256); const ASN1_OBJECT *obj; - char id[OSSL_MAX_NAME_SIZE]; + char id[OSSL_MAX_NAME_SIZE] = { 0 }; if (algor == NULL) return 0; @@ -328,7 +328,7 @@ static int test_explicit_EVP_CIPHER_fetch_by_X509_ALGOR(int idx) int ret = 0; X509_ALGOR *algor = make_algor(NID_aes_128_cbc); const ASN1_OBJECT *obj; - char id[OSSL_MAX_NAME_SIZE]; + char id[OSSL_MAX_NAME_SIZE] = { 0 }; if (algor == NULL) return 0;
chip/mec1322/lpc.c: Format with clang-format BRANCH=none TEST=none
@@ -375,8 +375,7 @@ DECLARE_IRQ(MEC1322_IRQ_ACPIEC0_IBF, acpi_0_interrupt, 1); void acpi_1_interrupt(void) { uint8_t st = MEC1322_ACPI_EC_STATUS(1); - if (!(st & EC_LPC_STATUS_FROM_HOST) || - !(st & EC_LPC_STATUS_LAST_CMD)) + if (!(st & EC_LPC_STATUS_FROM_HOST) || !(st & EC_LPC_STATUS_LAST_CMD)) return; /* Set the busy bit */ @@ -515,6 +514,5 @@ static enum ec_status lpc_get_protocol_info(struct host_cmd_handler_args *args) return EC_SUCCESS; } -DECLARE_HOST_COMMAND(EC_CMD_GET_PROTOCOL_INFO, - lpc_get_protocol_info, +DECLARE_HOST_COMMAND(EC_CMD_GET_PROTOCOL_INFO, lpc_get_protocol_info, EC_VER_MASK(0));
mactime: remove unnecessary function declaration Type: fix
#include <vppinfra/time_range.h> #include <vnet/ethernet/ethernet.h> -uword vat_unformat_sw_if_index (unformat_input_t * input, va_list * args); - /* Declare message IDs */ #include <mactime/mactime.api_enum.h> #include <mactime/mactime.api_types.h>
perfmon: added bundle to measure pci bandwidth Added an Intel Ice Lake specific bundles to measure pci bandwidth through the Intel IO PMU. The "PCI" bundle measures read/writes from pci devices. The "CPU" bundle measure read/writes from cpus to pci devices. Type: improvement
@@ -32,6 +32,7 @@ add_vpp_plugin(perfmon intel/bundle/power_license.c intel/bundle/topdown_metrics.c intel/bundle/topdown_tremont.c + intel/bundle/iio_bw.c COMPONENT vpp-plugin-devtools
Do not copy string in elektraGetString
@@ -75,8 +75,7 @@ const char * elektraGetString (Elektra * elektra, const char * name) { READ_KEY - char * value = elektraMalloc (keyGetValueSize(resultKey)); - strcpy (value, string); + const char * value = string; RETURN_VALUE }
router_readconfig: don't prefix error messages with timestamps Parse errors will be almost instant, and the relay will terminate, so no need to add useless information here.
@@ -1191,13 +1191,13 @@ router_readconfig(router *orig, ret->parser_err.msg = NULL; if (router_yyparse(lptr, ret) != 0) { if (ret->parser_err.msg == NULL) { - logerr("parsing %s failed\n", path); + fprintf(stderr, "parsing %s failed\n", path); } else if (ret->parser_err.line != 0) { char *line; char *p; char *carets; size_t carlen; - logerr("%s:%d:%d: %s\n", path, ret->parser_err.line, + fprintf(stderr, "%s:%zd:%zd: %s\n", path, ret->parser_err.line, ret->parser_err.start, ret->parser_err.msg); /* get some relevant context from buff and put ^^^^ below it * to point out the position of the error */ @@ -1223,7 +1223,7 @@ router_readconfig(router *orig, *p = '\0'; fprintf(stderr, "%s%s\n", line, carets); } else { - logerr("%s: %s\n", path, ret->parser_err.msg); + fprintf(stderr, "%s: %s\n", path, ret->parser_err.msg); } router_yylex_destroy(lptr); router_free(ret);
fixed issue with unit test linking on windows with SSL
@@ -50,7 +50,8 @@ SET_TARGET_PROPERTIES(hiredis PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE VERSION "${HIREDIS_SONAME}") IF(WIN32 OR MINGW) - TARGET_LINK_LIBRARIES(hiredis PRIVATE ws2_32) + TARGET_LINK_LIBRARIES(hiredis PUBLIC ws2_32 crypt32) + TARGET_LINK_LIBRARIES(hiredis_static PUBLIC ws2_32 crypt32) ENDIF() TARGET_INCLUDE_DIRECTORIES(hiredis PUBLIC $<INSTALL_INTERFACE:include> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>) @@ -152,6 +153,7 @@ IF(ENABLE_SSL) TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE ${OPENSSL_LIBRARIES}) IF (WIN32 OR MINGW) TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE hiredis) + TARGET_LINK_LIBRARIES(hiredis_ssl_static PUBLIC hiredis_static) ENDIF() CONFIGURE_FILE(hiredis_ssl.pc.in hiredis_ssl.pc @ONLY)
Build: -e is better than -d let us try that
@@ -484,7 +484,7 @@ clean: echo -e " Error: [make "$@"] failed for action/application in $(ACTION_ROOT)"; exit -1; \ fi \ fi - if [ -d $(PSLSE_ROOT) ]; then \ + if [ -e $(PSLSE_ROOT) ]; then \ for d in afu_driver/src pslse libcxl debug ; do \ $(MAKE) -C $(PSLSE_ROOT)/$$d $@ ; \ done \
version T13.818 for pools: xfer from pool fixed
-/* cheatcoin main, T13.654-T13.816 $DVS:time$ */ +/* cheatcoin main, T13.654-T13.818 $DVS:time$ */ #include <stdio.h> #include <stdlib.h> @@ -126,7 +126,7 @@ static int xfer_callback(void *data, cheatcoin_hash_t hash, cheatcoin_amount_t a cheatcoin_amount_t todo = d->remains; int i; if (!amount) return -1; - if (g_is_pool && cheatcoin_main_time() < (time >> 10) + 2 * CHEATCOIN_POOL_N_CONFIRMATIONS) return 0; + if (g_is_pool && cheatcoin_main_time() < (time >> 16) + 2 * CHEATCOIN_POOL_N_CONFIRMATIONS) return 0; for (i = 0; i < d->nkeys; ++i) if (n_our_key == d->keys[i]) break; if (i == d->nkeys) d->keys[d->nkeys++] = n_our_key; if (d->keys[XFER_MAX_IN] == n_our_key) d->outsig = 0;
Mild renamings.
+$ deco ?($bl $br $un $~) :: text decoration +$ date {{a/? y/@ud} m/@ud t/tarp} :: parsed date +$ knot @ta :: ASCII text -++ noun * :: any noun -++ path (list knot) :: like unix path -++ stud :: standard name - $@ @tas :: auth=urbit ++$ noun * :: any noun ++$ path (list knot) :: like unix path ++$ stud :: standard name + $@ mark=@tas :: auth=urbit $: auth=@tas :: standards authority type=path :: standard label == :: -++ stub (list (pair stye (list @c))) :: styled unicode -++ stye (pair (set deco) (pair tint tint)) :: decos/bg/fg -++ styl :: cascading style ++$ stub (list (pair stye (list @c))) :: styled unicode ++$ stye (pair (set deco) (pair tint tint)) :: decos/bg/fg ++$ styl :: cascading style %+ pair (unit deco) :: (pair (unit tint) (unit tint)) :: :: :: -++ styx (list $@(@t (pair styl styx))) :: styled text -++ tile :: XX: ?@(knot (pair styl knot)) ++$ styx (list $@(@t (pair styl styx))) :: styled text ++$ tile :: XX: ?@(knot (pair styl knot)) :: cord -++ tint ?($r $g $b $c $m $y $k $w $~) :: text color -++ plum :: text output noun ++$ tint ?($r $g $b $c $m $y $k $w $~) :: text color ++$ plum :: text output noun $@ cord $% :: %|: wrappable paragraph without linebreaks :: %&: decorated list +$ foot $% {$dry p/hoon} :: dry arm, geometric {$wet p/hoon} :: wet arm, generic == :: - +$ link :: lexical segment $% [%chat p/term] :: |chapter - [%cont p/aura q/atom] :: %constant + [%cone p/aura q/atom] :: %constant [%frag p/term] :: .leg - [%func p/term] :: +arm - == -+$ body (pair cord (list sect)) :: -+$ help (pair (list link) body) :: documentation + [%funk p/term] :: +arm + == :: ++$ body [summary=cord details=(list sect)] :: ++$ help [links=(list link) =body] :: documentation +$ limb $@ term :: wing element $% {%& p/axis} :: by geometry {%| p/@ud q/(unit term)} :: by name {$0 p/@} :: axis select == :: +$ note :: type annotation - $% {$like p/help} :: documentation + $% {$help help} :: documentation {$know p/stud} :: global standard {$made p/term q/(unit (list wing))} :: structure == :: {$bust *} ~(example ax fab %base p.gen) {$ktcl *} ~(factory ax fab p.gen) {$dbug *} q.gen - {$eror *} ~|(p.gen !!) + {$eror *} ~>(%slog.[0 leaf/p.gen] !!) :: {$knit *} :: :+ %tsbn [%ktts %v %$ 1] :: => v=.
cups-browsed.c: revert silence the compiler warning Revert the commit because it puts back the invalid memory error, and just recast 'uuid' to 'char*'.
@@ -3615,7 +3615,7 @@ new_local_printer (const char *device_uri, { local_printer_t *printer = g_malloc (sizeof (local_printer_t)); printer->device_uri = strdup (device_uri); - printer->uuid = (uuid ? strdup (uuid) : NULL); + printer->uuid = (char*)uuid; printer->cups_browsed_controlled = cups_browsed_controlled; return printer; } @@ -3797,7 +3797,7 @@ get_printer_uuid(http_t *http_printer, if (attr) - uuid = ippGetString(attr, 0, NULL) + 9; + uuid = strdup(ippGetString(attr, 0, NULL) + 9); else { debug_printf ("Printer with URI %s: Cannot read \"printer-uuid\" IPP attribute!\n", raw_uri);
Retry to check if walsender is gone after standby stop. Tests stops standby and checks if walsender is gone along with other checks, but walsender may take some time to go away, hence add retry for checking. This makes test robust, as failurs were seen some times in CI.
@@ -127,8 +127,14 @@ class neg_test(StandbyRunMixin, MPPTestCase): # Stop the standby as its of no use anymore self.standby.stop() - # Verify that the wal sender died + # Verify that the wal sender died, max within 1 min + for retry in range(1,30): num_walsender = self.count_walsender() + if num_walsender == 0: + break; + logger.info('Wal sender still exists, retrying ...' + str(retry)) + time.sleep(2); + self.assertEqual(num_walsender, 0, "WAL sender has not gone") logger.info('Wal sender is now dead...')
Changelog note for Merge from FGasper: Support OpenSSLs that lack SSL_get0_alpn_selected.
+22 April 2021: Wouter + - Merge #466 from FGasper: Support OpenSSLs that lack + SSL_get0_alpn_selected. + 13 April 2021: George - Fix documentation comment for files previously residing in checkconf/. - Remove unused functions worker_handle_reply and libworker_handle_reply.
Fixed a small bug on PCM command play (thanks to hsk for notification about it)
@@ -2067,7 +2067,7 @@ com_pcm_p0_ch1 ; 51 ' 80 com_pcm_p1_ch1 ; 55 ' 80 LD C, 1 ; C = prio ' 7 | - JP com_pcm_ch0 ; execute PCM com ' 10 | 17 (97) + JP com_pcm_ch1 ; execute PCM com ' 10 | 17 (97) com_pcm_p2_ch1 ; 59 ' 80 LD C, 2 ; C = prio ' 7 |
Improve BufString interface and performance
@@ -22,24 +22,24 @@ constexpr size_t BufStringOverHead = 2; // length + null termintor template <size_t Size> class BufString { - char buf[Size] = { }; + char buf[Size]{ }; static_assert (Size <= (255 + BufStringOverHead), "Size too large"); public: - BufString() + constexpr BufString() { clear(); } - BufString(const char *str) : + constexpr BufString(const char *str) : BufString() { setString(str, strlen(str)); } - bool setString(const char *str) + constexpr bool setString(const char *str) { return setString(str, strlen(str)); } - bool setString(const char *str, const size_t len) + constexpr bool setString(const char *str, const size_t len) { if (len > maxSize()) { @@ -55,21 +55,35 @@ public: return true; } - void clear() + constexpr void clear() { buf[0] = 0; buf[1] = '\0'; } - const char *c_str() const { return &buf[1]; }; - bool empty() const { return size() == 0; } - size_t size() const { return buf[0]; } + constexpr const char *c_str() const { return &buf[1]; }; + constexpr bool empty() const { return size() == 0; } + constexpr size_t size() const { return buf[0]; } constexpr size_t maxSize() const { return Size - BufStringOverHead; } - size_t capacity() const { return maxSize() - size(); } + constexpr size_t capacity() const { return maxSize() - size(); } operator QString () const { return QString::fromUtf8(c_str(), int(size())); } operator QLatin1String () const { return QLatin1String(c_str(), int(size())); } + inline bool operator==(const BufString &rhs) const + { + const auto sz = size() + 1; // first byte is length + for (uint8_t i = 0; i < sz; ++i) + { + if (buf[i] != rhs.buf[i]) + { + return false; + } + } + + return true; + } + bool startsWith(const QLatin1String &str) const { if (str.size() <= int(size()))
acme: load custom cert from base desk, not home Small patch for the ancient workaround.
~& [%failed-order-history fal.hit] this :: - :: install privkey and cert .pem from /=home=/acme, ignores app state + :: install privkey and cert .pem from /=base=/acme, ignores app state ::TODO refactor this out of %acme, see also arvo#1151 :: %install-from-clay - =/ bas=path /(scot %p our.bow)/home/(scot %da now.bow)/acme + =/ bas=path /(scot %p our.bow)/base/(scot %da now.bow)/acme =/ key=wain .^(wain %cx (weld bas /privkey/pem)) =/ cer=wain .^(wain %cx (weld bas /cert/pem)) (emit %pass /install %arvo %e %rule %cert `[key cer])
link-view: in pagination logic, only +lent once Also just use +swag instead of chaining +scag and +slag manually.
:: ++ page-size 25 ++ get-paginated - |* [p=(unit @ud) l=(list)] - ^- [total=@ud pages=@ud page=_l] - :+ (lent l) - %+ add (div (lent l) page-size) - (min 1 (mod (lent l) page-size)) - ?~ p l - %+ scag page-size - %+ slag (mul u.p page-size) - l + |* [page=(unit @ud) list=(list)] + ^- [total=@ud pages=@ud page=_list] + =/ l=@ud (lent list) + :+ l + %+ add (div l page-size) + (min 1 (mod l page-size)) + ?~ page list + %+ swag + [(mul u.page page-size) page-size] + list :: ++ page-to-json =, enjs:format
filter_lua: rename c_int_key to type_int_key
@@ -98,7 +98,7 @@ struct lua_filter *lua_config_create(struct flb_filter_instance *ins, } lf->l2c_types_num = 0; - tmp = flb_filter_get_property("c_int_key", ins); + tmp = flb_filter_get_property("type_int_key", ins); if (tmp) { split = flb_utils_split(tmp, ' ', L2C_TYPES_NUM_MAX); mk_list_foreach_safe(head, tmp_list, split) {
gpcheckperf: fix python3 errors in multidd
@@ -23,7 +23,7 @@ import sys def usage(exitarg): - print __doc__ + print(__doc__) sys.exit(exitarg) @@ -132,20 +132,20 @@ pfile = [] blocksz = opt['-B'] cnt = int(math.ceil(opt['-S'] / blocksz)) totalBytes = 0 -for i in xrange(len(opt['-i'])): +for i in range(len(opt['-i'])): ifile = opt['-i'][i] ofile = opt['-o'][i] cmd.append('dd if=%s of=%s count=%d bs=%d' % (ifile, ofile, cnt, blocksz)) totalBytes += cnt * blocksz for c in cmd: - print c + print(c) pfile.append(os.popen(c)) for f in pfile: ok = False try: - print f.read() + print(f.read()) ok = not f.close() f = None except: @@ -157,4 +157,4 @@ for f in pfile: sys.exit(1) os.system('sync') -print 'multidd total bytes ', totalBytes +print('multidd total bytes ', totalBytes)
Update the README.md in apps/builtin Developer can set the priority and stacksize for builtin-app
@@ -24,7 +24,7 @@ Application Configuration -> Support builtin applications to y 1. Add calling REGISTER at context tab ```bash $(BUILTIN_REGISTRY)$(DELIM)$(APPNAME)_main.bdat: $(DEPCONFIG) Makefile - $(call REGISTER,$(APPNAME),$(FUNCNAME),$(THREADEXEC)) + $(call REGISTER,$(APPNAME),$(FUNCNAME),$(THREADEXEC),$(PRIORITY),$(STACKSIZE)) context: $(BUILTIN_REGISTRY)$(DELIM)$(APPNAME)_main.bdat ``` @@ -39,6 +39,8 @@ include $(APPDIR)/Make.defs APPNAME = FUNCNAME = THREADEXEC = +PRIORITY = +STACKSIZE = ``` For arguments details, please refer **Public APIs** [[tab]](../shell/README.md#public-apis) of TASH README
fs/mtd: fix malformed output of /proc/partitions Output of /proc/partitions is malformed due to incorrect buffer handling. This commit fixes it.
@@ -531,15 +531,8 @@ static ssize_t part_procfs_read(FAR struct file *filep, FAR char *buffer, size_t attr = (FAR struct part_procfs_file_s *)filep->f_priv; DEBUGASSERT(attr); - /* If we are at the end of the list, then return 0 signifying the - * end-of-file. This also handles the special case when there are - * no registered MTD partitions. - */ - - if (attr->nextpart) { /* Output a header before the first entry */ - - if (attr->nextpart == g_pfirstpartition) { + if (filep->f_pos == 0) { #ifdef CONFIG_MTD_PARTITION_NAMES total = snprintf(buffer, buflen, "Name Start Size"); #else @@ -553,6 +546,12 @@ static ssize_t part_procfs_read(FAR struct file *filep, FAR char *buffer, size_t #endif } + /* If we are at the end of the list, then return 0 signifying the + * end-of-file. This also handles the special case when there are + * no registered MTD partitions. + */ + + if (attr->nextpart) { /* Get the geometry of the FLASH device */ ret = attr->nextpart->parent->ioctl(attr->nextpart->parent, MTDIOC_GEOMETRY, (unsigned long)((uintptr_t)&geo)); if (ret < 0) { @@ -607,7 +606,7 @@ static ssize_t part_procfs_read(FAR struct file *filep, FAR char *buffer, size_t } #endif - if (ret + total < buflen) { + if (ret + total + 1 < buflen) { /* It fit in the buffer totally. Advance total and move to * next partition. */ @@ -615,8 +614,6 @@ static ssize_t part_procfs_read(FAR struct file *filep, FAR char *buffer, size_t total += ret; attr->nextpart = attr->nextpart->pnext; } - - buffer[total] = '\0'; } /* Update the file offset */
Add scePowerRequestDisplayOn()
@@ -209,6 +209,13 @@ int scePowerRequestStandby(void); */ int scePowerRequestSuspend(void); +/** + * Request display on + * + * @return always 0 + */ +int scePowerRequestDisplayOn(void); + /** * Request display off *
Fix endless reading of sw build id on Ubisys devices
@@ -3530,7 +3530,11 @@ bool DeRestPluginPrivate::processZclAttributes(LightNode *lightNode) } } - if ((processed < 2) && lightNode->mustRead(READ_SWBUILD_ID) && tNow > lightNode->nextReadTime(READ_SWBUILD_ID)) + if (lightNode->manufacturerCode() == VENDOR_UBISYS) + { + lightNode->clearRead(READ_SWBUILD_ID); // Ubisys devices have empty sw build id + } + else if ((processed < 2) && lightNode->mustRead(READ_SWBUILD_ID) && tNow > lightNode->nextReadTime(READ_SWBUILD_ID)) { std::vector<uint16_t> attributes; attributes.push_back(0x4000); // Software build identifier
Update create and update gpdb_main pipelines.
@@ -26,7 +26,7 @@ configure workflow. The following workflow should be followed: * Edit the template file (`templates/gpdb-tpl.yml`). -* Generate the pipeline. During this step, the pipeline release jobs will be validated. +* Generate the pipelines. During this step, the pipeline release jobs will be validated. * Use the Concourse `fly` command to set the pipeline (`gpdb_main-generated.yml`). * Once the pipeline is validated to function properly, commit the updated template and pipeline. @@ -54,10 +54,10 @@ gen_pipeline.py -h|--help ## Examples of usage -### Create Production Pipeline +### Create and Update Production Pipelines The `./gen_pipeline.py -t prod` command will generate the production -pipeline (`gpdb_main-generated.yml`). All supported platforms and +pipeline (`gpdb_main-generated.yml`). Default platform and test sections are included. The pipeline release jobs will be validated. The output of the utility will provide details of the pipeline generated. Following standard conventions, two `fly` @@ -111,6 +111,15 @@ The generated pipeline file `gpdb_main-generated.yml` will be set, validated and ultimately committed (including the updated pipeline template) to the source repository. +Create and update gpdb_main_rhel8, gpdb_main_oel8 pipelines: + +``` +./gen_pipeline.py -t prod -O rhel8 +./gen_pipeline.py -t prod -O oel8 +``` + + + ### Creating Developer pipelines As an example of generating a pipeline with a targeted test subset,
Fix SELinux capabilities. Creating a tap interface with 'tap connect' was returning an error when VPP was launched as a service (tested on CentOS 7.3). Adding 'net_admin' to SELinux capabilities for VPP solves the issue.
@@ -43,9 +43,9 @@ files_tmp_file(vpp_tmp_t) # vpp local policy # -allow vpp_t self:capability { dac_override ipc_lock setgid sys_rawio net_raw sys_admin }; # too benefolent +allow vpp_t self:capability { dac_override ipc_lock setgid sys_rawio net_raw sys_admin net_admin }; # too benevolent dontaudit vpp_t self:capability2 block_suspend; -allow vpp_t self:process { execmem execstack setsched signal }; # too benefolent +allow vpp_t self:process { execmem execstack setsched signal }; # too benevolent allow vpp_t self:packet_socket { bind create setopt ioctl }; allow vpp_t self:tun_socket { create relabelto relabelfrom }; allow vpp_t self:udp_socket { create ioctl };
Enhance computation of first normal in RMF
@@ -1611,9 +1611,6 @@ ts_bspline_compute_rmf(const tsBSpline *spline, frames[0].normal, frames[0].normal); ts_vec_norm(frames[0].normal, 3, frames[0].normal); - ts_vec3_cross(frames[0].tangent, - frames[0].normal, - frames[0].normal); } else { /* Never trust user input! */ ts_vec_norm(frames[0].normal, 3, frames[0].normal);
btc: fix b158 test
:: ++ check-all-match |= v=match-vector + =/ b=hexb (from-cord:hxb (crip blockhash.v)) + =/ inc=(list [address hexb]) (turn inc-spks.v |=(h=hexb [*address h])) + =/ exc=(list [address hexb]) (turn exc-spks.v |=(h=hexb [*address h])) %+ expect-eq - !>(`(set hexb)`(sy [*address inc-spks.v])) - !> ^- (set hexb) - %: all-match - filter.v - blockhash.v - %+ turn (weld inc-spks.v exc-spks.v) - |=(h=hexb [*address a]) - == + !>(`(set [address hexb])`(sy inc)) + !>(`(set [address hexb])`(all-match filter.v b (weld inc exc))) -- --
l10n for tracker fields
"FIELD_LINK_CLOSE": "Close the current link session.", "FIELD_LINK_JOIN": "Join a link session.", "FIELD_LINK_HOST": "Host a link session.", + "FIELD_TEST_INSTRUMENT": "Test Instrument (C5)", "// 7": "Asset Viewer ---------------------------------------------", "ASSET_SEARCH": "Search...", "EVENT_LINK_CLOSE": "Link: Close", "EVENT_LINK_JOIN": "Link: Join", "EVENT_LINK_HOST": "Link: Host", - "EVENT_LINK_CLOSE": "Link: Transfer", + "EVENT_LINK_TRANSFER": "Link: Transfer", "// 10": "Menu -----------------------------------------------------", "MENU_SPRITE_STATE_PASTE": "Paste Animation State", "MENU_SPRITE_STATE_DELETE": "Delete Animation State", "MENU_OPEN_MUSIC_PROCESS_WINDOW": "Open Music Process Window", + "MENU_PATTERN_DELETE": "Delete Pattern", "// 11": "Warnings -----------------------------------------------------",
C comment: clarify why psql's help/exit/quit must alone Document why no indentation and why no non-whitespace postfix is supported. Backpatch-through: master
@@ -237,7 +237,13 @@ MainLoop(FILE *source) bool found_exit_or_quit = false; bool found_q = false; - /* Search for the words we recognize; must be first word */ + /* + * The assistance words, help/exit/quit, must have no + * whitespace before them, and only whitespace after, with an + * optional semicolon. This prevents indented use of these + * words, perhaps as identifiers, from invoking the assistance + * behavior. + */ if (pg_strncasecmp(first_word, "help", 4) == 0) { rest_of_line = first_word + 4; @@ -249,7 +255,6 @@ MainLoop(FILE *source) rest_of_line = first_word + 4; found_exit_or_quit = true; } - else if (strncmp(first_word, "\\q", 2) == 0) { rest_of_line = first_word + 2;
Enforce proper memory alignment on APDU buffer for response status words
@@ -28,7 +28,9 @@ void handle_eip712_return_code(bool success) { } else if (apdu_response_code == APDU_RESPONSE_OK) { // somehow not set apdu_response_code = APDU_RESPONSE_ERROR_NO_INFO; } - *(uint16_t *) G_io_apdu_buffer = __builtin_bswap16(apdu_response_code); + + G_io_apdu_buffer[0] = (apdu_response_code >> 8) & 0xff; + G_io_apdu_buffer[1] = apdu_response_code & 0xff; // Send back the response, do not restart the event loop io_exchange(CHANNEL_APDU | IO_RETURN_AFTER_TX, 2);
Reset buffer and add comment with reason for this action
@@ -407,6 +407,14 @@ send_data(esp_mqtt_client_p client) { ESP_DEBUGF(ESP_CFG_DBG_MQTT_TRACE_WARNING, "[MQTT] Cannot send data with error: %d\r\n", (int)res); } + } else { + /* + * If buffer is empty, reset it to default state (read & write pointers) + * This is to make sure everytime function needs to send data, + * it can do it in single shot rather than in 2 attempts (when read > write pointer). + * Effectively this means faster transmission of MQTT packets and lower latency. + */ + esp_buff_reset(&client->tx_buff); } } @@ -418,8 +426,8 @@ send_data(esp_mqtt_client_p client) { static espr_t mqtt_close(esp_mqtt_client_p client) { espr_t res = espERR; - if (client->conn_state != ESP_MQTT_CONN_DISCONNECTED && - client->conn_state != ESP_MQTT_CONN_DISCONNECTING) { + if (client->conn_state != ESP_MQTT_CONN_DISCONNECTED + && client->conn_state != ESP_MQTT_CONN_DISCONNECTING) { res = esp_conn_close(client->conn, 0); /* Close the connection in non-blocking mode */ if (res == espOK) { @@ -444,7 +452,7 @@ sub_unsub(esp_mqtt_client_p client, const char* topic, esp_mqtt_qos_t qos, void* uint32_t rem_len; esp_mqtt_request_t* request; - if (!(len_topic = ESP_U16(strlen(topic)))) { + if ((len_topic = ESP_U16(strlen(topic))) == 0) { return 0; } @@ -565,7 +573,6 @@ mqtt_process_incoming_message(esp_mqtt_client_p client) { client->evt.evt.publish_recv.dup = dup; client->evt.evt.publish_recv.qos = qos; client->evt_fn(client, &client->evt); - break; } case MQTT_MSG_TYPE_PINGRESP: { /* Respond to PINGREQ received */ @@ -627,7 +634,6 @@ mqtt_process_incoming_message(esp_mqtt_client_p client) { "[MQTT] Protocol violation. Received ACK without sent packet\r\n"); } } - break; } default:
common/mock/battery_mock.c: Format with clang-format BRANCH=none TEST=none
@@ -164,7 +164,8 @@ void set_battery_device_name(char *new_value) } #define MAX_DEVICE_CHEMISTRY_LENGTH 40 -static char battery_device_chemistry_value[MAX_DEVICE_CHEMISTRY_LENGTH+1] = "?"; +static char battery_device_chemistry_value[MAX_DEVICE_CHEMISTRY_LENGTH + 1] = + "?"; int battery_device_chemistry(char *dest, int size) { int i;
uv binding for socket_is_closed
@@ -25,6 +25,7 @@ struct st_h2o_uv_socket_t { uv_handle_t *handle; uv_close_cb close_cb; h2o_timer_t write_cb_timer; + int is_closed; union { struct { union { @@ -147,10 +148,17 @@ void do_dispose_socket(h2o_socket_t *_sock) void do_close_socket(h2o_socket_t *_sock) { struct st_h2o_uv_socket_t *sock = (struct st_h2o_uv_socket_t *)_sock; + sock->is_closed = 1; sock->super._cb.write = NULL; /* avoid the write callback getting called when closing the socket (#1249) */ h2o_timer_unlink(&sock->write_cb_timer); } +int socket_is_closed(h2o_socket_t *_sock) +{ + struct st_h2o_uv_socket_t *sock = (struct st_h2o_uv_socket_t *)_sock; + return sock->is_closed; +} + int h2o_socket_get_fd(h2o_socket_t *_sock) { int fd, ret;
in_serial: fix packer
@@ -63,7 +63,8 @@ static inline int process_line(msgpack_packer *mp_pck, char *line, int len, return 0; } -static inline int process_pack(struct flb_in_serial_config *ctx, +static inline int process_pack(msgpack_packer *mp_pck, + struct flb_in_serial_config *ctx, char *pack, size_t size) { size_t off = 0; @@ -75,13 +76,13 @@ static inline int process_pack(struct flb_in_serial_config *ctx, while (msgpack_unpack_next(&result, pack, size, &off)) { entry = result.data; - msgpack_pack_array(&ctx->i_ins->mp_pck, 2); - msgpack_pack_uint64(&ctx->i_ins->mp_pck, time(NULL)); + msgpack_pack_array(mp_pck, 2); + msgpack_pack_uint64(mp_pck, time(NULL)); - msgpack_pack_map(&ctx->i_ins->mp_pck, 1); - msgpack_pack_str(&ctx->i_ins->mp_pck, 3); - msgpack_pack_str_body(&ctx->i_ins->mp_pck, "msg", 3); - msgpack_pack_object(&ctx->i_ins->mp_pck, entry); + msgpack_pack_map(mp_pck, 1); + msgpack_pack_str(mp_pck, 3); + msgpack_pack_str_body(mp_pck, "msg", 3); + msgpack_pack_object(mp_pck, entry); } msgpack_unpacked_destroy(&result); @@ -212,7 +213,7 @@ static int in_serial_collect(struct flb_input_instance *in, * Given the Tokens used for the packaged message, append * the records and then adjust buffer. */ - process_pack(ctx, pack, out_size); + process_pack(&mp_pck, ctx, pack, out_size); flb_free(pack); consume_bytes(ctx->buf_data, ctx->pack_state.last_byte, ctx->buf_len);
Only use the encoding attribute for MathML annotation-xml elements Extremely minor optimization. The encoding attribute only matters when parsing fragments with a context element that is a MathML annotation-xml.
@@ -501,7 +501,8 @@ error: } // Encoding. - if (RSTRING_LEN(tag_name) == 14 + if (ctx_ns == GUMBO_NAMESPACE_MATHML + && RSTRING_LEN(tag_name) == 14 && !st_strcasecmp(ctx_tag, "annotation-xml")) { VALUE enc = rb_funcall(ctx, rb_intern_const("[]"), 1,
Remove recursion in data processing There is a recursion between process_connection() -> connection_desctructor(). This could potentially lead to infinite recursion under some situations. Replace recursion with simple iteration.
@@ -1073,7 +1073,6 @@ static void process_connection(struct conn *c, int remote_ready, int local_ready) { again: - /* Read from remote end if it is ready */ if (remote_ready && io_space_len(&c->rem.io)) read_stream(&c->rem); @@ -1103,6 +1102,7 @@ again: (c->rem.flags & FLAG_CLOSED) || ((c->loc.flags & FLAG_CLOSED) && !io_data_len(&c->loc.io))) if (connection_desctructor(&c->link)) { + // More data to read and process ... remote_ready = 0; local_ready = 0; goto again;
features: don't define __wasm_unimplemented_simd128__ This is meant to be defined by the compiler when you pass munimplemented-simd128. I think previously there were some functions which were implemented in the header (as opposed to calling __builtin_* functions) protected by this macro, so I thought you could just define it in code.
#endif #endif #if defined(SIMDE_WASM_SIMD128_NATIVE) - #if !defined(__wasm_unimplemented_simd128__) - HEDLEY_DIAGNOSTIC_PUSH - SIMDE_DIAGNOSTIC_DISABLE_RESERVED_ID_MACRO_ - #define __wasm_unimplemented_simd128__ - HEDLEY_DIAGNOSTIC_POP - #endif #include <wasm_simd128.h> #endif