message
stringlengths
6
474
diff
stringlengths
8
5.22k
extmod/modlogger: don't use strncat This is not available on stm32 builds.
@@ -97,19 +97,16 @@ void make_data_row_str(char *row, int32_t *data, uint8_t n) { // Set initial row to empty string so we can concat to its row[0] = 0; - // String representation of one integer - char value_str[max_val_strln]; for (uint8_t v = 0; v < n; v++) { // Convert value to string - if (snprintf(value_str, max_val_strln, "%" PRId32, data[v]) < 0) { + if (snprintf(&row[strlen(row)], max_val_strln, "%" PRId32, data[v]) < 0) { pb_assert(PBIO_ERROR_IO); } - // Concatenate value - row = strncat(row, value_str, max_val_strln); - // Concatenate line break or comma separator - row = strncat(row, v == n-1 ? "\n" : ", ", 1); + if (snprintf(&row[strlen(row)], 2, "%s", v == n-1 ? "\n" : ",") < 0) { + pb_assert(PBIO_ERROR_IO); + } } }
fix version; fix typo
@@ -53,6 +53,7 @@ make install BIN_PATH=${RPM_BUILD_ROOT}/usr MAN_PATH=${RPM_BUILD_ROOT}/usr/share %config /etc/oidc-agent/privileges/socket.priv %config /etc/oidc-agent/privileges/time.priv %config /etc/oidc-agent/privileges/write.priv +%config /etc/oidc-agent/pubclients.config %doc /usr/share/man/man1/oidc-add.1.gz %doc /usr/share/man/man1/oidc-agent.1.gz %doc /usr/share/man/man1/oidc-gen.1.gz @@ -62,7 +63,7 @@ make install BIN_PATH=${RPM_BUILD_ROOT}/usr MAN_PATH=${RPM_BUILD_ROOT}/usr/share %doc /usr/share/bash-completion/completions/oidc-gen %doc /usr/share/bash-completion/completions/oidc-token /usr/lib/x86_64-linux-gnu/liboidc-agent.so.2 -/usr/lib/x86_64-linux-gnu/liboidc-agent.so.2.1.3 +/usr/lib/x86_64-linux-gnu/liboidc-agent.so.%{version} %defattr(-,root,root,-) %{_bindir}/* #%doc
check request mark And put ++scry code back in.
++ burb :: per ship |= who/ship ~(able ~(ex ur urb) who) + :: :: ++scry:of + ++ scry :: read + |= {syd/@tas pax/path} ^- (unit gilt) + =^ mar pax =/(a (flop pax) [-.a (flop t.+.a)]) + ?> ?=(_-:*gilt mar) + =- (biff - (flit |=(a/gilt =(-.a mar)))) + ~ ::TODO :: :: ++call:of ++ call :: invoke |= $: :: hen: event cause tyl/spur == ^- (unit (unit cage)) - ~ + :: XX security + ?. =(lot [%$ %da now]) ~ + %- some + ?. =(%$ ren) ~ + %+ bind (~(scry of [now eny] lex) syd tyl) + =- ~! - - + |=(a/gilt [-.a (slot `@`3 !>(a))]) :: :: ++stay ++ stay :: preserve lex
jlink.sh; pass extra jtag command line arguments to JLinkExe.
@@ -53,7 +53,7 @@ jlink_load () { # downloading somewhere in the flash. So need to figure out how to tell it # not to do that, or report failure if gdb fails to write this file # - echo "shell sh -c \"trap '' 2; $JLINK_GDB_SERVER -device $JLINK_DEV -speed 4000 -if SWD -port 3333 -singlerun &\" " > $GDB_CMD_FILE + echo "shell sh -c \"trap '' 2; $JLINK_GDB_SERVER $EXTRA_JTAG_CMD -device $JLINK_DEV -speed 4000 -if SWD -port 3333 -singlerun &\" " > $GDB_CMD_FILE echo "target remote localhost:3333" >> $GDB_CMD_FILE echo "mon reset" >> $GDB_CMD_FILE echo "restore $FILE_NAME binary $FLASH_OFFSET" >> $GDB_CMD_FILE @@ -125,13 +125,13 @@ jlink_debug() { # Launch jlink server in a separate command interpreter, to make # sure it doesn't get killed by Ctrl-C signal from bash. # - $COMSPEC "/C start $COMSPEC /C $JLINK_GDB_SERVER -device $JLINK_DEV -speed 4000 -if SWD -port 3333 -singlerun" + $COMSPEC "/C start $COMSPEC /C $JLINK_GDB_SERVER $EXTRA_JTAG_CMD -device $JLINK_DEV -speed 4000 -if SWD -port 3333 -singlerun" else # # Block Ctrl-C from getting passed to jlink server. # set -m - $JLINK_GDB_SERVER -device $JLINK_DEV -speed 4000 -if SWD -port 3333 -singlerun > /dev/null & + $JLINK_GDB_SERVER $EXTRA_JTAG_CMD -device $JLINK_DEV -speed 4000 -if SWD -port 3333 -singlerun > /dev/null & set +m fi @@ -151,7 +151,7 @@ jlink_debug() { rm $GDB_CMD_FILE fi else - $JLINK_GDB_SERVER -device $JLINK_DEV -speed 4000 -if SWD -port 3333 -singlerun + $JLINK_GDB_SERVER $EXTRA_JTAG_CMD -device $JLINK_DEV -speed 4000 -if SWD -port 3333 -singlerun fi return 0 }
Fixing function definition for EN_getcurve
@@ -3648,7 +3648,7 @@ int DLLEXPORT EN_getpumptype(EN_ProjectHandle ph, int index, int *type) { return (0); } -int DLLEXPORT EN_getcurvetype(EN_Project *p, int curveindex, int *type) { +int DLLEXPORT EN_getcurvetype(EN_ProjectHandle ph, int curveindex, int *type) { EN_Network *net = &p->network;
in_node_exporter_metrics: do not enqueue data before the collector start
static void update_metrics(struct flb_input_instance *ins, struct flb_ne *ctx) { - int ret; - /* Update our metrics */ ne_cpu_update(ctx); ne_cpufreq_update(ctx); @@ -56,12 +54,6 @@ static void update_metrics(struct flb_input_instance *ins, struct flb_ne *ctx) ne_vmstat_update(ctx); ne_netdev_update(ctx); ne_filefd_update(ctx); - - /* Append the updated metrics */ - ret = flb_input_metrics_append(ins, NULL, 0, ctx->cmt); - if (ret != 0) { - flb_plg_error(ins, "could not append metrics"); - } } /* @@ -71,9 +63,17 @@ static void update_metrics(struct flb_input_instance *ins, struct flb_ne *ctx) static int cb_ne_collect(struct flb_input_instance *ins, struct flb_config *config, void *in_context) { + int ret; struct flb_ne *ctx = in_context; update_metrics(ins, ctx); + + /* Append the updated metrics */ + ret = flb_input_metrics_append(ins, NULL, 0, ctx->cmt); + if (ret != 0) { + flb_plg_error(ins, "could not append metrics"); + } + return 0; } @@ -118,7 +118,6 @@ static int in_ne_init(struct flb_input_instance *in, ne_netdev_init(ctx); ne_filefd_init(ctx); - update_metrics(in, ctx); return 0; }
Correctly set the curve orders when each curve has a different order
@@ -243,6 +243,15 @@ InputCurveNode::compute(const MPlug& plug, MDataBlock& data) &cvCounts.front(), 0, cvCounts.size() )); + if(curveInfo.order == HAPI_CURVE_ORDER_VARYING) + { + CHECK_HAPI(HAPI_SetCurveOrders( + Util::theHAPISession.get(), + myNodeId, 0, + &orders.front(), + 0, orders.size() + )); + } HAPI_AttributeInfo attrInfo = HAPI_AttributeInfo_Create(); attrInfo.count = partInfo.pointCount;
Fix Marvell CMake file
@@ -177,8 +177,9 @@ set( "${mw320_dir}/sdk/src/wlan/wifidriver/incl" "${AFR_KERNEL_DIR}/portable/GCC/ARM_CM4F" "${mw320_dir}/sdk/src/incl/platform/os/freertos" - "${AFR_MODULES_STANDARD_DIR}/freertos_plus_tcp/include" - "${AFR_MODULES_STANDARD_DIR}/freertos_plus_tcp/source/portable/Compiler/GCC" + "${AFR_MODULES_FREERTOS_PLUS_DIR}/standard/freertos_plus_tcp/include" + "${AFR_MODULES_FREERTOS_PLUS_DIR}/standard/freertos_plus_tcp/source/portable/Compiler/GCC" + "$<IF:${AFR_IS_TESTING},${AFR_TESTS_DIR}/common/include,${AFR_DEMOS_DIR}/include>" ) # Global include directories @@ -199,8 +200,8 @@ afr_mcu_port(freertos_plus_tcp) target_sources( AFR::freertos_plus_tcp::mcu_port INTERFACE - "${AFR_MODULES_STANDARD_DIR}/freertos_plus_tcp/source/portable/BufferManagement/BufferAllocation_2.c" - "${AFR_MODULES_STANDARD_DIR}/freertos_plus_tcp/source/portable/NetworkInterface/mw300_rd/NetworkInterface.c" + "${AFR_MODULES_FREERTOS_PLUS_DIR}/standard/freertos_plus_tcp/source/portable/BufferManagement/BufferAllocation_2.c" + "${AFR_MODULES_FREERTOS_PLUS_DIR}/standard/freertos_plus_tcp/source/portable/NetworkInterface/mw300_rd/NetworkInterface.c" ) # PKCS11 @@ -225,15 +226,15 @@ target_link_libraries( ) target_sources( AFR::secure_sockets::mcu_port - INTERFACE "${AFR_MODULES_PORTS_DIR}/secure_sockets/freertos_plus_tcp/aws_secure_sockets.c" + INTERFACE "${AFR_MODULES_ABSTRACTIONS_DIR}/secure_sockets/freertos_plus_tcp/aws_secure_sockets.c" ) target_include_directories( AFR::secure_sockets::mcu_port INTERFACE - "${AFR_MODULES_PORTS_DIR}/pkcs11/include" + "${AFR_MODULES_ABSTRACTIONS_DIR}/pkcs11/include" "${AFR_3RDPARTY_DIR}/pkcs11" - "${AFR_MODULES_STANDARD_DIR}/crypto/include" + "${AFR_MODULES_FREERTOS_PLUS_DIR}/standard/crypto/include" ) # ------------------------------------------------------------------------------------------------- # Amazon FreeRTOS demos and tests @@ -254,9 +255,9 @@ add_executable( target_include_directories( ${exe_target} PUBLIC - "${AFR_MODULES_PORTS_DIR}/pkcs11/include" + "${AFR_MODULES_ABSTRACTIONS_DIR}/pkcs11/include" "${AFR_3RDPARTY_DIR}/pkcs11" - "${AFR_MODULES_STANDARD_DIR}/crypto/include" + "${AFR_MODULES_FREERTOS_PLUS_DIR}/standard/crypto/include" ) target_link_libraries(
Fix stack resolver in travis build
@@ -38,7 +38,7 @@ matrix: # The Stack builds. We can pass in arbitrary Stack arguments via the ARGS # variable, such as using --stack-yaml to point to a different file. - - env: BUILD=stack ARGS="--resolver lts-7" + - env: BUILD=stack ARGS="--resolver lts-8" compiler: ": #stack 8.0.1" addons: apt:
Copy BUILD_ settings to the LAPACK make.inc
@@ -304,6 +304,18 @@ else endif ifeq ($(BUILD_LAPACK_DEPRECATED), 1) -@echo "BUILD_DEPRECATED = 1" >> $(NETLIB_LAPACK_DIR)/make.inc +endif +ifeq ($(BUILD_SINGLE), 1) + -@echo "BUILD_SINGLE = 1" >> $(NETLIB_LAPACK_DIR)/make.inc +endif +ifeq ($(BUILD_DOUBLE), 1) + -@echo "BUILD_DOUBLE = 1" >> $(NETLIB_LAPACK_DIR)/make.inc +endif +ifeq ($(BUILD_COMPLEX), 1) + -@echo "BUILD_COMPLEX = 1" >> $(NETLIB_LAPACK_DIR)/make.inc +endif +ifeq ($(BUILD_COMPLEX16), 1) + -@echo "BUILD_COMPLEX16 = 1" >> $(NETLIB_LAPACK_DIR)/make.inc endif -@echo "LAPACKE_WITH_TMG = 1" >> $(NETLIB_LAPACK_DIR)/make.inc -@cat make.inc >> $(NETLIB_LAPACK_DIR)/make.inc
add Access-Control-Allow-Origin: *
@@ -85,7 +85,8 @@ static void wrap_http(int fd, const char *content) write(fd, "Connection: close\r\n",19); write(fd, lbuf, strlen(lbuf)); write(fd, tbuf, strlen(tbuf)); - write(fd, "Content-Type: application/json\r\n\r\n", 34); + write(fd, "Content-Type: application/json\r\n", 32); + write(fd, "Access-Control-Allow-Origin: *\r\n\r\n", 34); } static int send_response(struct xdag_rpc_connection * conn,const char *response) {
re-org the tutorial section so it will not break the flow of build/run
@@ -33,10 +33,6 @@ sudo apt-get install -y libcurl4-openssl-dev make echo-bot ``` -## Tutorial on how to make a bot - -Instructions on how to make a ping-pong bot is found [here](/docs/BUILDING_A_BOT.md). - ## Run echo-bot 1. Get your bot token and paste it to `bot.config` to replace `YOUR-BOT-TOKEN`. There are @@ -55,16 +51,9 @@ Type any message in any public channel of the server that the bot is invited. Close the Terminal that echo-bot is running or type "Ctrl-C" to kill it. -## Usage example -```c -void on_message(discord_t *client, discord_user_t *self, discord_message_t *message) -{ - // make sure it doesn't echoes itself - if (strcmp(self->username, message->author->username)){ - discord_send_message(client, message->channel_id, message->content); - } -} -``` +## Tutorial on how to make a bot + +Instructions on how to make a ping-pong bot is found [here](/docs/BUILDING_A_BOT.md). ## Supported Features: - discord gateway rate limiting
fix batch multiclass prediction on pre-quantized data
@@ -378,7 +378,7 @@ namespace NCB::NModelEvaluation { treeEnd, resultPtr ); - resultPtr += subBlock.GetObjectsCount(); + resultPtr += subBlock.GetObjectsCount() * ObliviousTrees->ApproxDimension; } }
Tie return value of mbld to results of specified targets I'd like to have `mbld test' return non-0 when the tests fail. I think this does it, but I may be over-simplifying. mbld/main.myr | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) Tie return value of mbld to results of specified targets
@@ -15,7 +15,7 @@ use "syssel" const main = {args : byte[:][:] var b, runsrc, objdir, path, cmd - var targname, tags, pid, ok, r + var targname, tags, pid, ok cmd = std.optparse(args, &[ .argdesc = "[inputs...]", @@ -104,15 +104,15 @@ const main = {args : byte[:][:] ok = bld.buildtarg(b, "all") else match cmd.args[0] - | "clean": r = bld.clean(b) - | "install": r = bld.install(b) - | "uninstall": r = bld.uninstall(b) - | "test": r = bld.test(b, cmd.args[1:]) - | "bench": r = bld.bench(b, cmd.args[1:]) - | "list": r = show(b, cmd.args[1:]) + | "clean": ok = bld.clean(b) + | "install": ok = bld.install(b) + | "uninstall": ok = bld.uninstall(b) + | "test": ok = bld.test(b, cmd.args[1:]) + | "bench": ok = bld.bench(b, cmd.args[1:]) + | "list": ok = show(b, cmd.args[1:]) | _: for target : cmd.args - r = ok && bld.buildtarg(b, target) + ok = ok && bld.buildtarg(b, target) ;; ;; ;;
Update vdp_spr.c Fixed VDP_updateSprites() with DMA queue operation to prevent modify before DMA occurs.
// important to have a global structure here (consumes 640 bytes of memory) VDPSprite vdpSpriteCache[MAX_VDP_SPRITE]; +// copy of the global structure needed for DMA queue as we can modify +// global structure before DMA actually occurs (consumes 640 bytes of memory) +VDPSprite vdpSpriteCacheQueue[MAX_VDP_SPRITE]; // keep trace of last allocated sprite (for special operation as link) VDPSprite *lastAllocatedVDPSprite; // keep trace of highest index allocated since the last VDP_resetSprites() or VDP_releaseAllSprites. @@ -223,8 +226,14 @@ void VDP_updateSprites(u16 num, u16 queue) // send the sprite cache to VRAM sprite table using DMA queue if (queue) - DMA_queueDma(DMA_VRAM, (u32) vdpSpriteCache, VDP_SPRITE_TABLE, (sizeof(VDPSprite) / 2) * num, 2); + { + // copy global structure to queue copy + memcpy(vdpSpriteCacheQueue, vdpSpriteCache, sizeof(VDPSprite) * num); + // then queue the DMA operation + DMA_queueDma(DMA_VRAM, (u32) vdpSpriteCacheQueue, VDP_SPRITE_TABLE, (sizeof(VDPSprite) / 2) * num, 2); + } else + // send the sprite cache to the VRAM with DMA now DMA_doDma(DMA_VRAM, (u32) vdpSpriteCache, VDP_SPRITE_TABLE, (sizeof(VDPSprite) / 2) * num, 2); }
Test that Doctype with an extra ID is rejected
@@ -6722,6 +6722,14 @@ START_TEST(test_short_doctype_3) } END_TEST +START_TEST(test_long_doctype) +{ + const char *text = "<!DOCTYPE doc PUBLIC 'foo' 'bar' 'baz'></doc>"; + expect_failure(text, XML_ERROR_SYNTAX, + "DOCTYPE with extra ID not rejected"); +} +END_TEST + /* * Namespaces tests. */ @@ -12208,6 +12216,7 @@ make_suite(void) tcase_add_test(tc_basic, test_short_doctype); tcase_add_test(tc_basic, test_short_doctype_2); tcase_add_test(tc_basic, test_short_doctype_3); + tcase_add_test(tc_basic, test_long_doctype); suite_add_tcase(s, tc_namespace); tcase_add_checked_fixture(tc_namespace,
Initial support for Quirky Tripper open close sensor made by SerComm
@@ -296,6 +296,7 @@ static const SupportedDevice supportedDevices[] = { { VENDOR_SERCOMM, "SZ-SRN12N", emberMacPrefix }, // Sercomm siren { VENDOR_SERCOMM, "SZ-SRN12N", energyMiMacPrefix }, // Sercomm siren { VENDOR_SERCOMM, "SZ-DWS04", emberMacPrefix }, // Sercomm open/close sensor + { VENDOR_SERCOMM, "Tripper", emberMacPrefix }, // Quirky Tripper (Sercomm) open/close sensor { VENDOR_ALERTME, "MOT003", tiMacPrefix }, // Hive Motion Sensor { VENDOR_SUNRICHER, "4512703", silabs2MacPrefix }, // Namron 4-ch remote controller { VENDOR_SENGLED_OPTOELEC, "E13-", zhejiangMacPrefix }, // Sengled PAR38 Bulbs @@ -6148,7 +6149,8 @@ void DeRestPluginPrivate::updateSensorNode(const deCONZ::NodeEvent &event) i->modelId().startsWith(QLatin1String("3320-L")) || // Centralite contact sensor i->modelId().startsWith(QLatin1String("3323")) || // Centralite contact sensor i->modelId().startsWith(QLatin1String("lumi.sen_ill")) || // Xiaomi ZB3.0 light sensor - i->modelId().startsWith(QLatin1String("SZ-DWS04"))) // Sercomm open/close sensor + i->modelId().startsWith(QLatin1String("SZ-DWS04")) || // Sercomm open/close sensor + i->modelId().startsWith(QLatin1String("Tripper"))) // Quirky Tripper (Sercomm) open/close { } else {
Have hand-written decode_huffman_fast obey limits This makes it pass "wuffs test -skipgen std/deflate".
@@ -155,24 +155,33 @@ wuffs_base__status c_wuffs_deflate__decoder__decode_huffman_fast( // Load contextual state. Prepare to check that pdst and psrc remain within // a_dst's and a_src's bounds. + uint8_t* pdst = a_dst.private_impl.buf->data.ptr + a_dst.private_impl.buf->meta.wi; uint8_t* qdst = a_dst.private_impl.buf->data.ptr + a_dst.private_impl.buf->data.len; + if (a_dst.private_impl.limit) { + qdst = a_dst.private_impl.limit; + } if ((qdst - pdst) < 258) { return NULL; } else { qdst -= 258; } + uint8_t* psrc = a_src.private_impl.buf->data.ptr + a_src.private_impl.buf->meta.ri; uint8_t* qsrc = a_src.private_impl.buf->data.ptr + a_src.private_impl.buf->meta.wi; + if (a_src.private_impl.limit) { + qsrc = a_src.private_impl.limit; + } if ((qsrc - psrc) < 12) { return NULL; } else { qsrc -= 12; } + #if defined(WUFFS_DEFLATE__HAVE_64_BIT_UNALIGNED_LITTLE_ENDIAN_LOADS) uint64_t bits = self->private_impl.f_bits; #else
Additional Ring Sig Logs
@@ -825,6 +825,9 @@ int generateRingSignatureAB(data_chunk &keyImage, uint256 &txnHash, int nRingSiz rv = 1; goto End; } + printf("\n RingDbg: secret offset : %d, Ringsize: %d, C%d is :\n", nSecretOffset, nRingSize, nSecretOffset+1); + BN_print_fp(stdout, bnC); + printf("\n"); // c_{j+2} = h(P_1,...,P_n,s_{j+1}*G+c_{j+1}*P_{j+1},s_{j+1}*H(P_{j+1})+c_{j+1}*I_j) for (int k = 0, ib = (nSecretOffset + 1) % nRingSize, i = (nSecretOffset + 2) % nRingSize; @@ -930,6 +933,10 @@ int generateRingSignatureAB(data_chunk &keyImage, uint256 &txnHash, int nRingSiz && (rv = printf("%s: hash -> bnC failed.", __func__))) goto End; + printf(" \n RingDbg: Iteration %d: C%d follows\n", ib, ib+1); + BN_print_fp(stdout, bnC); + printf("\n"); + if (i == nSecretOffset &&!BN_copy(bnCj, bnC) && (rv = printf("%s: BN_copy failed.\n", __func__))) @@ -949,6 +956,9 @@ int generateRingSignatureAB(data_chunk &keyImage, uint256 &txnHash, int nRingSiz printf("%s: sigC.resize failed.\n", __func__); rv = 1; goto End; } + printf("\n RingDbg: So E0 = c0 = \n"); + BN_print_fp(stdout, bnC); + printf("\n"); memcpy(&sigC[0], tempData, EC_SECRET_SIZE); } } @@ -1034,6 +1044,10 @@ int verifyRingSignatureAB(data_chunk &keyImage, uint256 &txnHash, int nRingSize, rv = 1; goto End; } + printf("\n RingDbg: Starting with C0 which is\n"); + BN_print_fp(stdout, bnC1); + printf("\n"); + for (int i = 0; i < nRingSize; ++i) { if (!bnS || !(BN_bin2bn(&pSigS[i * EC_SECRET_SIZE], EC_SECRET_SIZE, bnS))) @@ -1109,6 +1123,9 @@ int verifyRingSignatureAB(data_chunk &keyImage, uint256 &txnHash, int nRingSize, printf("%s: tmpHash -> bnC failed.\n", __func__); rv = 1; goto End; } + printf("\n RingDbg: Iteration %d, so C%d follows\n", i, i+1); + BN_print_fp(stdout, bnC); + printf("\n"); } // bnT = (bnC - bnC1) % N @@ -1118,6 +1135,13 @@ int verifyRingSignatureAB(data_chunk &keyImage, uint256 &txnHash, int nRingSize, rv = 1; goto End; } + printf("\n RingDbg: End result, bnC:\n"); + BN_print_fp(stdout, bnC); + printf("\n bnC1 old is \n"); + BN_print_fp(stdout, bnC1); + printf("\n So bnT is \n"); + BN_print_fp(stdout, bnT); + printf("\n"); // test bnT == 0 (bnC == bnC1) if (!BN_is_zero(bnT))
allow enlive on the gpu
@@ -373,10 +373,6 @@ __attribute__((optimize("-fno-finite-math-only"))) static void proj_add(unsigned int D, const long dims[D], const long ostrs[D], complex float* optr, const long v1_strs[D], complex float* v1, const long v2_strs[D], complex float* v2) { -#ifdef USE_CUDA - if (cuda_ondevice(v1)) - error("md_zscalar is far too slow on the GPU, refusing to run...\n"); -#endif float v22 = md_zscalar_real2(D, dims, v2_strs, v2, v2_strs, v2); // since it is real anyway complex float v12 = md_zscalar2(D, dims, v1_strs, v1, v2_strs, v2) / v22;
ack previous on future nack
++ on-hear-message-ack |= [=message-num ok=? lag=@dr] ^+ message-pump + :: future nack implies positive ack on all earlier messages + :: + ?. ok + |- ^+ message-pump + :: base case: current message got nacked; handle same as ack + :: + ?: =(message-num current.state) + ^$(ok %.y) + :: recursive case: future message got nacked + :: + =. message-pump ^$(ok %.y, message-num current.state) + $ :: ignore duplicate and future acks :: ?. (is-message-num-in-range message-num)
build/bsync: extending line width for more compact header with comment apis
@@ -4329,18 +4329,37 @@ void ofdmflexframesync_debug_print(ofdmflexframesync _q, // TC : coefficients data type // TI : input data type #define LIQUID_BSYNC_DEFINE_API(BSYNC,TO,TC,TI) \ + \ +/* Binary P/N synchronizer */ \ typedef struct BSYNC(_s) * BSYNC(); \ \ -BSYNC() BSYNC(_create)(unsigned int _n, TC * _v); \ +/* Create bsync object */ \ +/* _n : sequence length */ \ +/* _v : correlation sequence [size: _n x 1] */ \ +BSYNC() BSYNC(_create)(unsigned int _n, \ + TC * _v); \ \ -/* create binary synchronizer from m-sequence */ \ +/* Create binary synchronizer from m-sequence */ \ /* _g : m-sequence generator polynomial */ \ /* _k : samples/symbol (over-sampling factor) */ \ BSYNC() BSYNC(_create_msequence)(unsigned int _g, \ unsigned int _k); \ -void BSYNC(_destroy)(BSYNC() _fs); \ -void BSYNC(_print)(BSYNC() _fs); \ -void BSYNC(_correlate)(BSYNC() _fs, TI _sym, TO * _y); + \ +/* Destroy binary synchronizer object, freeing all internal memory */ \ +/* _q : bsync object */ \ +void BSYNC(_destroy)(BSYNC() _q); \ + \ +/* Print object internals to stdout */ \ +/* _q : bsync object */ \ +void BSYNC(_print)(BSYNC() _q); \ + \ +/* Correlate input signal against internal sequence */ \ +/* _q : bsync object */ \ +/* _x : input sample */ \ +/* _y : pointer to output sample */ \ +void BSYNC(_correlate)(BSYNC() _q, \ + TI _x, \ + TO * _y); \ LIQUID_BSYNC_DEFINE_API(LIQUID_BSYNC_MANGLE_RRRF, float,
os/tools/check_package.py : Add sys.exit(1) for validation failure If there is a validation failure, return exit(1).
@@ -41,6 +41,8 @@ LOADING_HIGH = 3 PACKAGE_TYPE_ELF = 1 +VERIFY_SUCCESS = True + target = sys.argv[1] if target == "-h" or target == "--help" : print("Usage :") @@ -113,6 +115,7 @@ elif "app_" in target : print("\tPackage Type : ELF(%d)" %package_type[0]) else : print("\tPackage Type Invalid : %d" %package_type[0]) + VERIFY_SUCCESS = False print("\tMain Priority : " + str(main_prio[0])) if loading_prio[0] == LOADING_LOW : print("\tLoading Priority : LOADING_LOW(%d)" %LOADING_LOW) @@ -122,6 +125,7 @@ elif "app_" in target : print("\tLoading Priority : LOADING_HIGH(%d)" %LOADING_HIGH) else : print("\tLoading Priority : Invalid(%d)" %loading_prio[0]) + VERIFY_SUCCESS = False print("\tFile Size : " + str(file_size[0])) print("\tPackage Name : " + str(package_name[0]) + str(package_name[1]) + str(package_name[2])) print("\tPackage Version : " + str(package_ver[0])) @@ -131,6 +135,7 @@ elif "app_" in target : else : print("!!!Not Supported Package. Please Check the package!!!") + VERIFY_SUCCESS = False sys.exit(1) # Verify the package by calculating the crc and comparing it with the value of the header. @@ -148,6 +153,7 @@ if checksum_header == checksum_calc : print("\t* Checksum verification Done.") else : print("\t* Checksum is invalid, header : " + str(checksum_header) + ", calc : " + str(checksum_calc)); + VERIFY_SUCCESS = False # Verify the package version between header and the file name @@ -157,3 +163,8 @@ if package_ver[0] == int(file_name_ver) : print("\t* Version Matched between filename and header.") else : print("\t* Version NOT MATCHED!!! between filename(" + file_name_ver + ") and header(" + str(package_ver[0]) + ")") + VERIFY_SUCCESS = False + +# Return exit(1) if there is a validation failure. +if VERIFY_SUCCESS == False : + sys.exit(1)
OcAppleKernelLib: Fix variable initialisation
@@ -819,6 +819,7 @@ InternalRelocateRelocation ( Length = Relocation->Size; Type = (UINT8) Relocation->Type; PcRelative = (Relocation->PcRelative != 0); + PairTarget = 0; InvalidPcRel = FALSE; ScatteredRelocation = NULL; @@ -874,7 +875,9 @@ InternalRelocateRelocation ( } } + // // Length == 2 + // if (Length != 3) { CopyMem (&Instruction32, InstructionPtr, sizeof (Instruction32));
filter NVMe error msgs as they are safe to ignore, see Xilinx AR61901
else echo "dont know where I am, leaving $P3 $PWD"; exit 12; fi - if [ "$PARM_FILE" != "" ];then cp ${SIMBASE}/${PARM_FILE} pslse.parms;fi # overwrite with specified parms file +#if [ "$PARM_FILE" != "" ];then cp ${SIMBASE}/${PARM_FILE} pslse.parms;fi # overwrite with specified parms file + if [ "$PARM_FILE" != "" ];then cp $DONUT_HARDWARE_ROOT/sim/${PARM_FILE} pslse.parms;fi # overwrite with specified parms file export LD_LIBRARY_PATH=$PSLSE_ROOT/afu_driver/src # to find libvpi.so if [ "$SIMDIR" == "ies" ];then # SIM_ARG='-gui -tcl' # interactive with simvision # SIM_ARG='-gui -run -tcl' # non_interactive, with simvision # SIM_ARG='-run -tcl' # non_interactive, no exit at end - SIM_ARG='-batch' # -batch overrides -tcl +# SIM_ARG='-batch' # -batch overrides -tcl + SIM_ARG='-batch +model_data+'.'' # model_data for tracing ddr3_dimm # if [ "$CDS_COVERAGE" == "1" ];then # coverage options in ncsim based on envvariable if [ -z ${CDS_COVERAGE+x} ];then : # coverage options in ncsim based on envvariable, works with set -u # echo "env_var CDS_COVERAGE is unset" SIM_ARG='' # -batch overrides -tcl VPI_OPT='libdpi.so' SIM_ARG+=" -t xsrun.tcl" +# SIM_ARG+=" -t xsrun.tcl +model_data+." # syntax errors if [ "$AET" == "1" ];then `sed -i "s/#source xsaet.tcl/ source xsaet.tcl/g" xsrun.tcl` # enable/uncomment AET generation in xsrun.tcl else done fi - echo "**** append msgs from sim.log" |tee -a stim.log; grep -i -E "error=|error:|FIR:|assertion" sim.log |grep -v LOADLB |tee -a stim.log + echo "**** append msgs from sim.log" |tee -a stim.log; + grep -i -E "error=|error:|FIR:" sim.log |grep -v -E "LOADLB|tIS violation|tRCD violation|RST_N goes inactive" |tee -a stim.log + xil_warnings=`grep -i "error:" sim.log |grep -E "tIS violation|tRCD violation|RST_N goes inactive" |wc -l`; + if [[ "$xil_warnings" -ne "0" ]]; then echo "$xil_warnings tIS/tRCD/RST_N violations found in sim.log, ignored according to Xilinx AR#61901" |tee -a stim.log;fi #echo "**** append msgs from $SIMULATOR.log"|tee -a stim.log; grep -i "error" $SIMULATOR.log|grep -v LOADLB |tee -a stim.log # duplicate info to sim.log #cd $DONUT_HARDWARE_ROOT; HWVERS=`git log --pretty=format:"%h %ad" -n1 --date=iso|awk '{print $1 " " $2}'`;cd - >/dev/null #cd $DONUT_SOFTWARE_ROOT; SWVERS=`git log --pretty=format:"%h %ad" -n1 --date=iso|awk '{print $1 " " $2}'`;cd - >/dev/null
Fix: Ignore duplicate data blocks received when sending OTA status update to service
@@ -1543,13 +1543,17 @@ static void prvOTAUpdateTask( void * pvUnused ) } } else - { /* We're actively receiving a file so update the job status as needed. */ + { + if( xResult == eIngest_Result_Accepted_Continue ) + { + /* We're actively receiving a file so update the job status as needed. */ /* First reset the momentum counter since we received a good block. */ C->ulRequestMomentum = 0; prvUpdateJobStatus( C, eJobStatus_InProgress, ( int32_t ) eJobReason_Receiving, ( int32_t ) NULL ); } } } + } else { /* Ignore unknown message types. */
Also free tmp buffer after copy
@@ -454,7 +454,7 @@ pocl_exec_command (_cl_command_node * volatile node) node->command.copy_image.dst_slicepitch, tmp_rowpitch, tmp_slicepitch); - + free (tmp); } POCL_UPDATE_EVENT_COMPLETE(event); POCL_DEBUG_EVENT_TIME(event, "Copy Buffer Rect ");
ip4_rewrite_inline: fix variable error
@@ -2348,7 +2348,7 @@ ip4_rewrite_inline (vlib_main_t * vm, adj0->sub_type.midchain.fixup_func (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data); adj1->sub_type.midchain.fixup_func - (vm, adj1, b[1], adj0->sub_type.midchain.fixup_data); + (vm, adj1, b[1], adj1->sub_type.midchain.fixup_data); } if (is_mcast) @@ -2360,7 +2360,7 @@ ip4_rewrite_inline (vlib_main_t * vm, adj0->rewrite_header.dst_mcast_offset, &ip0->dst_address.as_u32, (u8 *) ip0); vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK, - adj0->rewrite_header.dst_mcast_offset, + adj1->rewrite_header.dst_mcast_offset, &ip1->dst_address.as_u32, (u8 *) ip1); }
sidebar: level padding-right
@@ -67,12 +67,12 @@ export function SidebarListHeader(props: { <Icon icon="Plus" color="gray" pr='2'/> </Link> <Link to={`${props.baseUrl}/invites`} - style={{ display: (props.workspace?.type === 'home') ? 'inline-block' : 'none', verticalAlign: 'bottom' }}> + style={{ display: (props.workspace?.type === 'home') ? 'inline-block' : 'none'}}> <Text display='inline-block' - verticalAlign='middle' py='1px' px='3px' + mr='2' backgroundColor='washedBlue' color='blue' borderRadius='1'>
apps/ext_advertiser: Fix instance and sid values Let's use instance number as sid also, this makes it easier to find those instances in the air. Also fix instance numbers in comments.
@@ -86,7 +86,7 @@ start_ext_max_events(uint8_t pattern, bool configure) params.primary_phy = BLE_HCI_LE_PHY_1M; params.secondary_phy = BLE_HCI_LE_PHY_1M; params.tx_power = 127; - params.sid = pattern % 16; + params.sid = 4; /* allow larger interval, 400 * 0.625ms with 100 events will give up to * ~2.5 seconds for instance @@ -94,7 +94,7 @@ start_ext_max_events(uint8_t pattern, bool configure) params.itvl_min = BLE_GAP_ADV_FAST_INTERVAL1_MIN; params.itvl_max = 400; - /* configure instance 0 */ + /* configure instance 4 */ rc = ble_gap_ext_adv_configure(instance, &params, NULL, start_ext_max_events_gap_event, NULL); assert (rc == 0); @@ -184,9 +184,9 @@ start_legacy_duration(uint8_t pattern, bool configure) params.primary_phy = BLE_HCI_LE_PHY_1M; params.secondary_phy = BLE_HCI_LE_PHY_1M; params.tx_power = 127; - params.sid = pattern % 16; + params.sid = 3; - /* configure instance 0 */ + /* configure instance 3 */ rc = ble_gap_ext_adv_configure(instance, &params, NULL, start_legacy_duration_gap_event, NULL); assert (rc == 0); @@ -251,7 +251,7 @@ start_scannable_legacy_ext(void) params.tx_power = 127; params.sid = 2; - /* configure instance 0 */ + /* configure instance 2 */ rc = ble_gap_ext_adv_configure(instance, &params, NULL, NULL, NULL); assert (rc == 0); @@ -335,7 +335,7 @@ start_scannable_ext(void) params.tx_power = 127; params.sid = 1; - /* configure instance 0 */ + /* configure instance 1 */ rc = ble_gap_ext_adv_configure(instance, &params, NULL, scannable_ext_gap_event, NULL); assert (rc == 0); @@ -387,7 +387,7 @@ start_non_connectable_ext(void) params.tx_power = 127; params.sid = 0; - /* configure instance */ + /* configure instance 0 */ rc = ble_gap_ext_adv_configure(instance, &params, NULL, NULL, NULL); assert (rc == 0); @@ -430,9 +430,9 @@ static void start_periodic(void) params.primary_phy = BLE_HCI_LE_PHY_1M; params.secondary_phy = BLE_HCI_LE_PHY_1M; params.tx_power = 127; - params.sid = 2; + params.sid = 5; - /* configure instance 0 */ + /* configure instance 5 */ rc = ble_gap_ext_adv_configure(instance, &params, NULL, NULL, NULL); assert (rc == 0);
Travis: make a separate job for external tests Some of the external tests do not run well with 'no-shared'
@@ -71,7 +71,17 @@ matrix: sources: - ubuntu-toolchain-r-test compiler: gcc-5 - env: EXTENDED_TEST="yes" CONFIG_OPTS="--debug --coverage no-asm enable-rc5 enable-md2 enable-ec_nistp_64_gcc_128 enable-ssl3 enable-ssl3-method enable-nextprotoneg enable-weak-ssl-ciphers enable-external-tests no-shared -DPEDANTIC -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION" COVERALLS="yes" BORINGSSL_TESTS="yes" CXX="g++-5" + env: EXTENDED_TEST="yes" CONFIG_OPTS="--debug --coverage no-asm enable-rc5 enable-md2 enable-ec_nistp_64_gcc_128 enable-ssl3 enable-ssl3-method enable-nextprotoneg enable-weak-ssl-ciphers no-shared -DPEDANTIC -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION" COVERALLS="yes" BORINGSSL_TESTS="yes" CXX="g++-5" + - os: linux + addons: + apt: + packages: + - gcc-5 + - g++-5 + sources: + - ubuntu-toolchain-r-test + compiler: gcc-5 + env: EXTENDED_TEST="yes" CONFIG_OPTS="--debug enable-ssl3 enable-ssl3-method enable-weak-ssl-ciphers enable-external-tests" BORINGSSL_TESTS="yes" CXX="g++-5" TESTS=95 - os: linux addons: apt:
acrn-config: modify the key of vuart base Return correct key of vuart base to webUI for parsing. Acked-by: Victor Sun
@@ -718,14 +718,14 @@ def avl_vuart_ui_select(scenario_info): vm_type = get_order_type_by_vmid(vm_i) if vm_type == "SOS_VM": - key = "vm={}:vuart=0,base".format(vm_i) + key = "vm={},vuart=0,base".format(vm_i) tmp_vuart[key] = ['SOS_COM1_BASE', 'INVALID_COM_BASE'] - key = "vm={}:vuart=1,base".format(vm_i) + key = "vm={},vuart=1,base".format(vm_i) tmp_vuart[key] = ['SOS_COM2_BASE', 'INVALID_COM_BASE'] else: - key = "vm={}:vuart=0,base".format(vm_i) + key = "vm={},vuart=0,base".format(vm_i) tmp_vuart[key] = ['INVALID_COM_BASE', 'COM1_BASE'] - key = "vm={}:vuart=1,base".format(vm_i) + key = "vm={},vuart=1,base".format(vm_i) tmp_vuart[key] = ['INVALID_COM_BASE', 'COM2_BASE'] #print(tmp_vuart)
quic: refactor connection search fn Type: refactor
@@ -1715,19 +1715,23 @@ tx_end: static inline int quic_find_packet_ctx (quic_rx_packet_ctx_t * pctx, u32 caller_thread_index) { - quic_ctx_t *ctx_; - quicly_conn_t *conn_; clib_bihash_kv_16_8_t kv; clib_bihash_16_8_t *h; + quic_ctx_t *ctx; + u32 index, thread_id; h = &quic_main.connection_hash; quic_make_connection_key (&kv, &pctx->packet.cid.dest.plaintext); QUIC_DBG (3, "Searching conn with id %lu %lu", kv.key[0], kv.key[1]); - if (clib_bihash_search_16_8 (h, &kv, &kv) == 0) + if (clib_bihash_search_16_8 (h, &kv, &kv)) { - u32 index = kv.value & UINT32_MAX; - u32 thread_id = kv.value >> 32; + QUIC_DBG (3, "connection not found"); + return QUIC_PACKET_TYPE_NONE; + } + + index = kv.value & UINT32_MAX; + thread_id = kv.value >> 32; /* Check if this connection belongs to this thread, otherwise * ask for it to be moved */ if (thread_id != caller_thread_index) @@ -1738,20 +1742,20 @@ quic_find_packet_ctx (quic_rx_packet_ctx_t * pctx, u32 caller_thread_index) pctx->thread_index = thread_id; return QUIC_PACKET_TYPE_MIGRATE; } - ctx_ = quic_ctx_get (index, vlib_get_thread_index ()); - conn_ = ctx_->conn; - if (conn_ - && quicly_is_destination (conn_, NULL, &pctx->sa, &pctx->packet)) + ctx = quic_ctx_get (index, vlib_get_thread_index ()); + if (!ctx->conn) { + QUIC_ERR ("ctx has no conn"); + return QUIC_PACKET_TYPE_NONE; + } + if (!quicly_is_destination (ctx->conn, NULL, &pctx->sa, &pctx->packet)) + return QUIC_PACKET_TYPE_NONE; + QUIC_DBG (3, "Connection found"); pctx->ctx_index = index; pctx->thread_index = thread_id; return QUIC_PACKET_TYPE_RECEIVE; } - } - QUIC_DBG (3, "connection not found"); - return QUIC_PACKET_TYPE_NONE; -} static int quic_accept_connection (u32 ctx_index, quic_rx_packet_ctx_t * pctx)
some injection test improvements
@@ -6479,13 +6479,16 @@ qsort(scanlist, scanlistmax, SCANLIST_SIZE, sort_scanlist_by_hit); for(zeiger = scanlist; zeiger < scanlist +SCANLIST_MAX; zeiger++) { if(zeiger->count == 0) break; - if((zeiger->channel < 36) && (zeiger->hit > 0)) inject24 = true; - if(((zeiger->channel >= 36) && (zeiger->channel < 200)) && (zeiger->hit > 0)) inject5 = true; - if((zeiger->channel >= 200) && (zeiger->hit > 0)) inject6 = true; + if(zeiger->hit > 0) + { + if(zeiger->channel < 36) inject24 = true; + else if((zeiger->channel >= 36) && (zeiger->channel < 200)) inject5 = true; + else if(zeiger->channel >= 200) inject6 = true; injectionhit += zeiger->hit; + networkhit++; + } injectioncount += zeiger->beacon; networkcount++; - if(zeiger->hit > 0) networkhit ++; } if(injectionhit > 0) {
zephyr/test/drivers/src/keyboard_scan.c: Format with clang-format BRANCH=none TEST=none
int emulate_keystate(int row, int col, int pressed) { - const struct device *dev = - DEVICE_DT_GET(DT_NODELABEL(cros_kb_raw)); + const struct device *dev = DEVICE_DT_GET(DT_NODELABEL(cros_kb_raw)); return emul_kb_raw_set_kbstate(dev, row, col, pressed); }
Fixes on bsd for os/cpu-count.
@@ -232,7 +232,7 @@ JANET_CORE_FN(os_cpu_count, const int name[2] = {CTL_HW, HW_NCPUONLINE}; int result = 0; size_t len = sizeof(int); - if (-1 == sysctl(name, 2, &result, &len, sizeof(result), NULL, 0)) { + if (-1 == sysctl(name, 2, &result, &len, ,NULL, 0)) { return dflt; } return janet_wrap_integer(result); @@ -241,7 +241,7 @@ JANET_CORE_FN(os_cpu_count, const int name[2] = {CTL_HW, HW_NCPU}; int result = 0; size_t len = sizeof(int); - if (-1 == sysctl(name, 2, &result, &len, sizeof(result), NULL, 0)) { + if (-1 == sysctl(name, 2, &result, &len, NULL, 0)) { return dflt; } return janet_wrap_integer(result);
Minify HTML report app.js upon compiling program.
@@ -93,14 +93,19 @@ endif # Charts.js chartsjs.h: bin2c$(EXEEXT) resources/js/charts.js if HAS_SEDTR - cat resources/js/charts.js | sed -E "s@(;)\s?//..*@\1@g" | sed -E "s@^[ \t]*//..*@@g" | sed "s/^[ \t]*//" | sed "/^$$/d" | tr -d "\r\n" > resources/js/charts.js.tmp + cat resources/js/charts.js | sed -E "s@(,|;)[ \t]*?//..*@\1@g" | sed -E "s@^[ \t]*//..*@@g" | sed "s/^[ \t]*//" | sed "/^$$/d" | tr -d "\r\n" > resources/js/charts.js.tmp ./bin2c resources/js/charts.js.tmp src/chartsjs.h charts_js else ./bin2c resources/js/charts.js src/chartsjs.h charts_js endif # App.js appjs.h: bin2c$(EXEEXT) resources/js/app.js +if HAS_SEDTR + cat resources/js/app.js | sed -E "s@(,|;)[ \t]*?//..*@\1@g" | sed -E "s@^[ \t]*//..*@@g" | sed "s/^[ \t]*//" | sed "/^$$/d" | tr -d "\r\n" > resources/js/app.js.tmp + ./bin2c resources/js/app.js.tmp src/appjs.h app_js +else ./bin2c resources/js/app.js src/appjs.h app_js +endif confdir = $(sysconfdir) dist_conf_DATA = config/goaccess.conf
doc: update release notes for v2.7 on configuration upgrades This patch updates recommendations to upgrade from a prior ACRN version for v2.7, and updates the what's new summary.
@@ -85,6 +85,24 @@ Update Scenario Names VMs and the Service VM provides resource emulation and sharing for post-launched User VMs, all in the same system configuration. +User-Friendly VM names + Instead of using a UUID as the User VM identifier, we're now using a + user-friendly VM name. + +Extend Use of CAT Cache Tuning to VMs + In previous releases, Cache Allocation Technology (vCAT) was available only + at the hypervisor level and with per-VM granularity. In this v2.7 release, + each VM with exclusive cache resources can partition its cache resources with + per-thread granularity and allocate cache resources to prioritized tasks. + +Expand Passthrough Device Use Cases to Pre-Launched VMs + We now allow pre-launched VMs (in partitioned or hybrid scenarios) to use + graphics device passthrough for improved performance, a feature previously + available to only post-launched VMs. + + We've extended Trusted Platform Module (TPM) 2.0 and its associated resource + passthrough to post-launched VMs. + Upgrading to v2.7 From Previous Releases **************************************** @@ -99,18 +117,26 @@ that is essential to build ACRN. Compared to previous versions, ACRN v2.7 adds the following hardware information to board XMLs to support new features and fixes. - - list features here + - Always initialize ``hw_ignore`` when parsing ``DMAR``. The new board XML can be generated using the ACRN board inspector in the same way as ACRN v2.6. Refer to :ref:`acrn_config_workflow` for a complete list of steps to deploy and run the tool. -Add New Configuration Options -============================= +Update Configuration Options +============================ In v2.7, the following elements are added to scenario XML files. -- list elements here +- :option:`vm.name` (This is a required element. Names must be unique, up to 15 + characters long, and contain no space characters.) +- :option:`hv.CAPACITIES.MAX_VM_NUM` (Default value is ``8``) +- :option:`hv.FEATURES.RDT.VCAT_ENABLED` (Default value is ``n``) + +The following elements were removed. + +- ``KATA_VM`` VM type. +- ``hv.CAPACITIES.MAX_EFI_MMAP_ENTRIES`` Document Updates **************** @@ -144,6 +170,9 @@ Fixed Issues Details .. comment example item - :acrn-issue:`5626` - [CFL][industry] Host Call Trace once detected +- :acrn-issue:`6610` - [ConfigTool]Remove the restriction that SERIAL_CONSOLE needs to be ttys0, ttys1, ttys2 or ttys3 +- :acrn-issue:`6620` - [ConfigTool]pci devices' io-ports passthrough + Known Issues ************
Fixed a bug in Velocyto.
@@ -134,7 +134,7 @@ void Transcriptome::classifyAlign (Transcript **alignG, uint64 nAlignG, ReadAnno if (aStatus==AlignVsTranscript::Concordant) aStatus=AlignVsTranscript::Exon; - if (!aStatus==AlignVsTranscript::ExonIntronSpan) { + if (aStatus!=AlignVsTranscript::ExonIntronSpan) { reAnn.set(AlignVsTranscript::ExonIntronSpan, true);//meaning of this bit is NoExonIntronSpan reAnn.set(aStatus, true); };
Fix empty string consumption
@@ -244,6 +244,7 @@ static size_t resp_parse(resp_parser_s *parser, const void *buffer, resp_on_start_string(parser, 0); resp_on_end_string(parser); --parser->obj_countdown; + eol += 2; /* consume the extra "\r\n" */ } else { if (resp_on_start_string(parser, i)) { pos = eol + 1;
zephyr: sort CMakeLists sources Sort a few out-of-order configs in zephyr/CMakeLists.txt. This is done in a separate CL to avoid clutter with the following change. BRANCH=none TEST=zmake testall
@@ -159,17 +159,16 @@ zephyr_sources_ifdef(CONFIG_PLATFORM_EC_POWERSEQ_INTEL zephyr_sources_ifdef(CONFIG_PLATFORM_EC_POWERSEQ_HOST_SLEEP "${PLATFORM_EC}/power/host_sleep.c") zephyr_sources_ifdef(CONFIG_PLATFORM_EC_PANIC "${PLATFORM_EC}/common/panic_output.c") +zephyr_sources_ifdef(CONFIG_PLATFORM_EC_PWM "${PLATFORM_EC}/common/pwm.c") zephyr_sources_ifdef(CONFIG_PLATFORM_EC_SHA256_SW "${PLATFORM_EC}/common/sha256.c") zephyr_sources_ifdef(CONFIG_PLATFORM_EC_SWITCH "${PLATFORM_EC}/common/switch.c") - -zephyr_sources_ifdef(CONFIG_PLATFORM_EC_THROTTLE_AP - "${PLATFORM_EC}/common/throttle_ap.c") zephyr_sources_ifdef(CONFIG_PLATFORM_EC_TABLET_MODE "${PLATFORM_EC}/common/tablet_mode.c") -zephyr_sources_ifdef(CONFIG_PLATFORM_EC_PWM "${PLATFORM_EC}/common/pwm.c") +zephyr_sources_ifdef(CONFIG_PLATFORM_EC_THROTTLE_AP + "${PLATFORM_EC}/common/throttle_ap.c") zephyr_sources_ifdef(CONFIG_PLATFORM_EC_TIMER "${PLATFORM_EC}/common/timer.c") zephyr_sources_ifdef(CONFIG_PLATFORM_EC_USB_CHARGER
Update help message for flag `--hba_hostnames`
@@ -129,7 +129,7 @@ def parseargs(): 'if the warm master standby host has failed. This option will ' 'need to shutdown the GPDB array to be able to complete the request') optgrp.add_option('', '--hba_hostnames', action='store_true', dest='hba_hostnames', - help='add fully-qualified domain names to pg_hba.conf') + help='use hostnames instead of CIDR in pg_hba.conf') # XXX - This option is added to keep backward compatibility with DCA tools. # But this option plays no role in the whole process, its a No-Op
Update some sr.ht configs.
@@ -6,15 +6,15 @@ packages: tasks: - with-epoll: | cd janet - meson setup build --buildtype=release - cd build + meson setup with-epoll --buildtype=release + cd with-epoll meson configure -Depoll=true ninja ninja test - no-epoll: | cd janet - meson setup build --buildtype=release - cd build + meson setup no-epoll --buildtype=release + cd no-epoll meson configure -Depoll=false ninja ninja test
examples/qencoder: fix nxstyle warning and errors
@@ -242,7 +242,7 @@ int main(int argc, FAR char *argv[]) { printf("qe_main: open %s failed: %d\n", g_qeexample.devpath, errno); exitval = EXIT_FAILURE; - goto errout_with_dev; + goto errout; } /* Reset the count if so requested */
Changed broken link to new address of udev rules Changed and checked that it should work with the new link. Also changed the number of files to 4 since there are 4 udev rule files in the directory.
@@ -81,7 +81,7 @@ SUBSYSTEMS=="usb", ATTRS{idVendor}=="0483", ATTRS{idProduct}=="374b", \ and the `idVendor` of `0483` and `idProduct` of `374b` matches the vendor id from the `lsusb` output. -Make sure that you have all 3 files from here: https://github.com/stlink-org/stlink/tree/master/etc/udev/rules.d in your `/etc/udev/rules.d` directory. After copying new files or editing excisting files in `/etc/udev/ruled.d` you should run the following: +Make sure that you have all 4 files from here: https://github.com/stlink-org/stlink/tree/master/config/udev/rules.d in your `/etc/udev/rules.d` directory. After copying new files or editing excisting files in `/etc/udev/ruled.d` you should run the following: ``` sudo udevadm control --reload-rules
Memory leaks in encoding BUFR (part 1)
@@ -1354,6 +1354,7 @@ static int encode_new_element(grib_context* c, grib_accessor_bufr_data_array* se } else { err = encode_string_value(c, buff, pos, bd, self, csval); + grib_context_free(c, csval); } } else {
Fix typo (`UDB` -> `UDP`)
@@ -5,6 +5,6 @@ We're using Cribl LogStream setup with a few sourceswe can connect to. * TCP:10090 is configured as an AppScope source using TLS and the cert/key pair in /tmp * TCP:10091 is another AppScope source using plain TCP * TCP:10070 is a TCP JSON source -* UDP:8125 is a UDB metrics source +* UDP:8125 is a UDP metrics source We have a Filesystem destination set to produce files under `/tmp/out` with a subdirectory for each source; `appscope:in_appscope_tls`, `appscope:in_appscope_tcp`, `tcp_json:in_tcp_json`, and `metrics:in_metrics`. The destination is set to close files and move them to where they go after they've been idle for 5 seconds so tests need to wait 5+ seconds before looking for the files they expect to get.
runtime/net: always free rx buffers in icmp
@@ -56,11 +56,8 @@ void net_rx_icmp(struct mbuf *m, struct ip_hdr *iphdr, uint16_t len) break; default: log_err("icmp: type %d not yet supported", icmp_hdr->type); - goto drop; + break; } - return; - -drop: mbuf_free(m); }
crypto/provider_core.c: Avoid calling unlock two times
@@ -641,7 +641,7 @@ int ossl_provider_add_to_store(OSSL_PROVIDER *prov, OSSL_PROVIDER **actualprov, if (!ossl_provider_up_ref(actualtmp)) { ERR_raise(ERR_LIB_CRYPTO, ERR_R_MALLOC_FAILURE); actualtmp = NULL; - goto err; + return 0; } *actualprov = actualtmp; } @@ -665,8 +665,6 @@ int ossl_provider_add_to_store(OSSL_PROVIDER *prov, OSSL_PROVIDER **actualprov, err: CRYPTO_THREAD_unlock(store->lock); - if (actualprov != NULL) - ossl_provider_free(*actualprov); return 0; }
Android: update default build.yml
@@ -110,7 +110,10 @@ uwp: android: #manifest_template: 'AndroidManifest.erb' - version: 4.4.2 + version: 7.0 + abis: + - arm + - aarch64 ## Note: in order to simplify debugging only app messages and system channels with priority informative and higher, and any errors are enabled by default logcatFilter: APP:I StrictMode:I DEBUG:I *:E
Change to only build cli by default
@@ -24,12 +24,12 @@ build/debug build/release: | build debug_cli: | build/debug @cc $(basic_flags) $(debug_flags) $(sanitize_flags) $(cli_source_files) -o build/debug/orca $(library_flags) -.PHONY: debug_ui -debug_ui: | build/debug - @cc $(basic_flags) $(debug_flags) $(sanitize_flags) $(tui_source_files) -o build/debug/orca_ui $(library_flags) +.PHONY: debug_tui +debug_tui: | build/debug + @cc $(basic_flags) $(debug_flags) $(sanitize_flags) $(tui_source_files) -o build/debug/orca_tui $(library_flags) .PHONY: debug -debug: debug_cli debug_ui +debug: debug_cli .PHONY: release_cli release_cli: | build/release @@ -40,7 +40,7 @@ release_tui: | build/release @cc $(basic_flags) $(release_flags) $(tui_source_files) -o build/release/orca_tui $(library_flags) .PHONY: release -release: release_cli release_tui +release: release_cli .PHONY: clean clean:
cosmetics and update
@@ -42,6 +42,7 @@ long options: --all : convert all possible hashes instead of only the best one that can lead to much overhead hashes use hcxhashtool to filter hashes + need hashcat --nonce-error-corrections >= 8 --eapoltimeout=<digit> : set EAPOL TIMEOUT (milliseconds) : default: 5000 ms --nonce-error-corrections=<digit> : set nonce error correction @@ -62,9 +63,12 @@ long options: to convert it to gpx, use GPSBabel: gpsbabel -i nmea -f hcxdumptool.nmea -o gpx -F file.gpx to display the track, open file.gpx with viking +--log=<file> : output logfile +--raw=<file> : output frames in HEX ASCII + : format: TIMESTAMP*LINKTYPE*FRAME --pmkid=<file> : output deprecated PMKID file (delimter *) --hccapx=<file> : output deprecated hccapx v4 file ---hccap=<file> : output deprecated hccap file (delimter *) +--hccap=<file> : output deprecated hccap file --john=<file> : output deprecated PMKID/EAPOL (JtR wpapsk-opencl/wpapsk-pmk-opencl) --prefix=<file> : convert everything to lists using this prefix (overrides single options): -o <file.22000> : output PMKID/EAPOL hash file @@ -93,8 +97,9 @@ Do not use hcxpcapngtool in combination with third party cap/pcap/pcapng cleanin It is much better to run gzip to compress the files. Wireshark, tshark and hcxpcapngtool will understand this. + hcxhashtool: new tool -$ hcxhashtool --hel +$ hcxhashtool -h hcxhashtool 6.0.0 (C) 2020 ZeroBeat usage: hcxhashtool <options> @@ -151,6 +156,7 @@ options: : no nonce error corrections --hccapx=<file> : output to deprecated hccapx file --hccap=<file> : output to ancient hccap file +--hccap-single : output to ancient hccap single files (MAC + count) --john=<file> : output to deprecated john file --help : show this help --version : show version
Permanently enable isatty check to ignore SIGWINCH.
@@ -2629,16 +2629,16 @@ option_list = ( 'being started separately using the generated \'apachectl\' ' 'script.'), - optparse.make_option('--isatty', action='store_true', default=False, - help='Flag indicating whether should assume being run in an ' - 'interactive terminal session. In this case Apache will not ' - 'replace this wrapper script, but will be run as a sub process.' - 'Signals such as SIGINT, SIGTERM, SIGHUP and SIGUSR1 will be ' - 'forwarded onto Apache, but SIGWINCH will be blocked so that ' - 'resizing of a terminal session window will not cause Apache ' - 'to shutdown. This is a separate option at this time rather ' - 'than being determined automatically while the reliability of ' - 'intercepting and forwarding signals is verified.'), + # optparse.make_option('--isatty', action='store_true', default=False, + # help='Flag indicating whether should assume being run in an ' + # 'interactive terminal session. In this case Apache will not ' + # 'replace this wrapper script, but will be run as a sub process.' + # 'Signals such as SIGINT, SIGTERM, SIGHUP and SIGUSR1 will be ' + # 'forwarded onto Apache, but SIGWINCH will be blocked so that ' + # 'resizing of a terminal session window will not cause Apache ' + # 'to shutdown. This is a separate option at this time rather ' + # 'than being determined automatically while the reliability of ' + # 'intercepting and forwarding signals is verified.'), ) def cmd_setup_server(params): @@ -3429,7 +3429,8 @@ def cmd_start_server(params): executable = os.path.join(config['server_root'], 'apachectl') - if config['isatty'] and sys.stdout.isatty(): + #if config['isatty'] and sys.stdout.isatty(): + if sys.stdout.isatty(): process = None def handler(signum, frame):
Switched by to ConcurrentQueue
@@ -21,14 +21,12 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. using System; using System.Collections.Concurrent; using System.Collections.Generic; -using System.Globalization; using System.IO; using System.Net; using System.Reactive; using System.Reactive.Disposables; using System.Reactive.Linq; using System.Text; -using NetUV.Core.Buffers; using NetUV.Core.Handles; using Newtonsoft.Json; using NLog; @@ -59,8 +57,7 @@ namespace MiningCore.JsonRpc private readonly JsonSerializerSettings serializerSettings; private const int MaxRequestLength = 8192; - private object queueLock = new object(); - private Queue<byte[]> sendQueue; + private ConcurrentQueue<byte[]> sendQueue; private Async sendQueueDrainer; #region Implementation of IJsonRpcConnection @@ -74,8 +71,9 @@ namespace MiningCore.JsonRpc RemoteEndPoint = tcp.GetPeerEndPoint(); // initialize send queue - sendQueue = new Queue<byte[]>(); - sendQueueDrainer = loop.CreateAsync(handle => DrainSendQueue(tcp)); + sendQueue = new ConcurrentQueue<byte[]>(); + sendQueueDrainer = loop.CreateAsync(DrainSendQueue); + sendQueueDrainer.UserToken = tcp; var incomingLines = Observable.Create<string>(observer => { @@ -121,11 +119,7 @@ namespace MiningCore.JsonRpc // release handles handle.CloseHandle(); - - lock (queueLock) - { sendQueueDrainer.CloseHandle(); - } }); return Disposable.Create(() => @@ -173,30 +167,31 @@ namespace MiningCore.JsonRpc { Contract.RequiresNonNull(data, nameof(data)); - lock (queueLock) - { - if (sendQueueDrainer.IsValid) + try { sendQueue.Enqueue(data); sendQueueDrainer.Send(); } + + catch (ObjectDisposedException) + { + // ignored } } - private void DrainSendQueue(Tcp tcp) + private void DrainSendQueue(Async handle) { try { byte[] data; - lock (queueLock) - { - while (sendQueue.TryDequeue(out data) && - tcp.IsValid && !tcp.IsClosing && tcp.IsWritable) + var tcp = (Tcp) handle.UserToken; + + while (tcp?.IsValid == true && !tcp.IsClosing && tcp.IsWritable && + sendQueue.TryDequeue(out data)) { tcp.QueueWrite(data); } } - } catch (Exception ex) {
Make the path detection more flexible
@@ -39,7 +39,7 @@ namespace clap { : srcRoot_(computeSrcRoot(pluginPath)), buildRoot_(computeBuildRoot(pluginPath)), pluginName_(pluginName) {} static std::string computeSrcRoot(const std::string &pluginPath) { - static const std::regex r("(/.*)/cmake-builds/.*$", std::regex::optimize); + static const std::regex r("(/.*)/.*build.*/.*$", std::regex::optimize); std::smatch m; if (!std::regex_match(pluginPath, m, r)) @@ -48,7 +48,7 @@ namespace clap { } static std::string computeBuildRoot(const std::string &pluginPath) { - static const std::regex r("(/.*/cmake-builds/.*)/examples/plugins/.*\\.clap$", + static const std::regex r("(/.*/.*build.*(/.*)?)/examples/plugins/.*\\.clap$", std::regex::optimize); std::smatch m;
Improvement in EPS32 GpioPin toogle
@@ -154,15 +154,7 @@ HRESULT Library_win_dev_gpio_native_Windows_Devices_Gpio_GpioPin::Toggle___VOID( (driveMode == GpioPinDriveMode_OutputOpenSourcePullDown)) { // ESP32 GPIO API doesn't offer a 'toggle', so need to rely on the last output value field and toggle that one - GpioPinValue state = (GpioPinValue)pThis[ FIELD___lastOutputValue ].NumericByRef().s4; - - // ...handle the toggle... - GpioPinValue newState = GpioPinValue_Low; - - if(state == GpioPinValue_Low) - { - newState = GpioPinValue_High; - } + GpioPinValue newState = (GpioPinValue)(GpioPinValue_High ^ (GpioPinValue)pThis[ FIELD___lastOutputValue ].NumericByRef().s4); // ...write back to the GPIO... gpio_set_level((gpio_num_t)pinNumber, newState);
BugID:22475679: fixed implicite declairation
#include "linkkit/wrappers/wrappers_defs.h" #include "linkkit/wrappers/wrappers_os.h" #include "ulog/ulog.h" +#include "aos/kernel.h" #ifndef NULL #define NULL ((void *)0) #define DEBUG_PRINTF(fmt, ...) //#define DEBUG_PRINTF printf +extern void aos_get_mac_hex(unsigned char mac[MAC_ADDRESS_SIZE]); +extern void aos_get_chip_code(unsigned char chip_code[CHIP_CODE_SIZE]); + //num is 0~0xF UINT8 tans_num2char( UINT8 num ) {
[core] stricter validation of request-URI begin check that request-URI begins with '/', "http://", "https://", or is OPTIONS * request, or else reject with 400 Bad Request unless server.http-parseopt-header-strict = "disable" (default is enabled) x-ref:
@@ -635,9 +635,15 @@ int http_request_parse(server *srv, connection *con) { reqline_hostlen = nuri - reqline_host; buffer_copy_string_len(con->request.uri, nuri, proto - nuri - 1); - } else { + } else if (!http_header_strict + || (HTTP_METHOD_OPTIONS == con->request.http_method && uri[0] == '*' && uri[1] == '\0')) { /* everything looks good so far */ buffer_copy_string_len(con->request.uri, uri, proto - uri - 1); + } else { + con->http_status = 400; + con->keep_alive = 0; + log_error_write(srv, __FILE__, __LINE__, "ss", "request-URI parse error -> 400 for:", uri); + return 0; } /* check uri for invalid characters */
Add go installation instructions.
@@ -27,11 +27,15 @@ Conan (C/C++): https://conan.io/center/tinyspline NuGet (C#): - ```xml <PackageReference Include="tinyspline" version="0.4.0.1" /> ``` +Go: +```bash +go get github.com/tinyspline/[email protected] +``` + Luarocks (Lua): ```bash luarocks install --server=https://tinyspline.github.io/lua tinyspline
website: fix feed urls
@@ -81,7 +81,7 @@ module.exports = function(grunt) { feed.item({ title: post.title, description: content, - url: self.data.post_url + guid + ".html", + url: self.data.feed.post_url + guid + ".html", guid: guid, date: new Date(Date.parse(post.date)).toUTCString() });
Bug fix when sending to the same network prefix
@@ -103,9 +103,9 @@ owerror_t forwarding_send(OpenQueueEntry_t* msg) { msg->l3_sourceAdd.type=ADDR_128B; packetfunctions_ip128bToMac64b(&(msg->l3_destinationAdd),&temp_dest_prefix,&temp_dest_mac64b); - //xv poipoi -- get the src prefix as well - packetfunctions_ip128bToMac64b(&(msg->l3_sourceAdd),&temp_src_prefix,&temp_src_mac64b); - //XV -poipoi we want to check if the source address prefix is the same as destination prefix + // at this point, we still haven't written in the packet the source prefix + // that we will use - it depends whether the destination address is link-local or not + // if we are sending to a link-local address set the source prefix to link-local if (packetfunctions_isLinkLocal(&msg->l3_destinationAdd) || packetfunctions_isAllRoutersMulticast(&msg->l3_destinationAdd) || @@ -117,7 +117,7 @@ owerror_t forwarding_send(OpenQueueEntry_t* msg) { myprefix = &temp_src_prefix; sac = IPHC_SAC_STATELESS; dac = IPHC_DAC_STATELESS; - } else if (packetfunctions_sameAddress(&temp_dest_prefix,&temp_src_prefix)==FALSE && packetfunctions_isBroadcastMulticast(&(msg->l3_destinationAdd))==FALSE) { + } else if (packetfunctions_sameAddress(&temp_dest_prefix,idmanager_getMyID(ADDR_PREFIX))==FALSE && packetfunctions_isBroadcastMulticast(&(msg->l3_destinationAdd))==FALSE) { myprefix = idmanager_getMyID(ADDR_PREFIX); sac = IPHC_SAC_STATELESS; dac = IPHC_DAC_STATELESS; @@ -126,6 +126,7 @@ owerror_t forwarding_send(OpenQueueEntry_t* msg) { sac = IPHC_SAC_STATEFUL; dac = IPHC_DAC_STATEFUL; } + // myprefix now contains the pointer to the correct prefix to use (link-local or global) memcpy(&(msg->l3_sourceAdd.addr_128b[0]),myprefix->prefix,8); memcpy(&(msg->l3_sourceAdd.addr_128b[8]),myadd64->addr_64b,8);
Add storage/remote test for unknown user/group name.
@@ -210,6 +210,11 @@ testRun(void) storagePutP(storageNewWriteP(storagePgWrite, STRDEF("test"), .timeModified = 1555160001), BUFSTRDEF("TESTME")); +#ifdef TEST_CONTAINER_REQUIRED + storagePutP(storageNewWriteP(storagePgWrite, STRDEF("noname"), .timeModified = 1555160002), BUFSTRDEF("NONAME")); + HRN_SYSTEM_FMT("sudo chown 99999:99999 %s", strZ(storagePathP(storagePgWrite, STRDEF("noname")))); +#endif // TEST_CONTAINER_REQUIRED + // Path timestamp must be set after file is created since file creation updates it HRN_STORAGE_TIME(storagePgWrite, NULL, 1555160000); @@ -219,6 +224,9 @@ testRun(void) TEST_RESULT_STR_Z( callbackData.content, ". {path, m=0750, u=" TEST_USER ", g=" TEST_GROUP "}\n" +#ifdef TEST_CONTAINER_REQUIRED + "noname {file, s=6, m=0640, t=1555160002, u=99999, g=99999}\n" +#endif // TEST_CONTAINER_REQUIRED "test {file, s=6, m=0640, t=1555160001, u=" TEST_USER ", g=" TEST_GROUP "}\n", "check content"); }
travis: use default Ruby version unless defined explicitly
@@ -107,7 +107,7 @@ env: - S3_JOB_DIR="$TRAVIS_COMMIT"/"$TRAVIS_OS_NAME" - S3_DEPLOY_DIR="$TRAVIS_REPO_SLUG"/"$TRAVIS_BRANCH" - QTDIR="$HOME/Qt5.13.2/5.13.2/clang_64" - - RHO_RUBY="${RHO_RUBY:=2.7.0}" + - RHO_RUBY="${RHO_RUBY:=RUBY_VERSION}" - ANDROID_ABIS="${ANDROID_ABIS:=arm,aarch64}" #osx_image: xcode9.1
plugins: fix enum link
@@ -78,7 +78,7 @@ sudo kdb umount user/tests/together ``` The approach is not limited to validation via regular expressions, but -any values-validation plugin can be used, e.g. [enum](/src/plugins/enum). +any values-validation plugin can be used, e.g. [type](/src/plugins/type). For a full list refer to the section "Value Validation" in the [list of all plugins](/src/plugins/README.md).
[hardware] Fix hard-coded parameters
@@ -28,7 +28,6 @@ module axi_rab_wrap #( parameter type axi_resp_t = logic, parameter type axi_lite_req_t = logic, parameter type axi_lite_resp_t = logic - ) ( input logic clk_i, input logic rst_ni, @@ -69,8 +68,8 @@ module axi_rab_wrap #( .AXI_DATA_WIDTH (AxiDataWidth ), .AXI_S_ADDR_WIDTH (AxiAddrWidth ), .AXI_M_ADDR_WIDTH (AxiAddrWidth ), - .AXI_LITE_DATA_WIDTH (64/* TODO */ ), - .AXI_LITE_ADDR_WIDTH (32/* TODO */ ), + .AXI_LITE_DATA_WIDTH (AxiDataWidth ), + .AXI_LITE_ADDR_WIDTH (AxiAddrWidth ), .AXI_ID_WIDTH (AxiIdWidth ), .AXI_USER_WIDTH (AxiUserWidth ), .MH_FIFO_DEPTH (MhFifoDepth ) @@ -266,4 +265,3 @@ module axi_rab_wrap #( ); endmodule -
Change to split highlighting prediction based on is_playing
@@ -1418,7 +1418,7 @@ staticni void ged_draw(Ged *a, WINDOW *win, char const *filename, // mark buffer that it produces, then roll back the glyph buffer to where it // was before. This should produce results similar to having specialized UI // code that looks at each glyph and figures out the ports, etc. - if (a->needs_remarking) { + if (a->needs_remarking && !a->is_playing) { field_resize_raw_if_necessary(&a->scratch_field, a->field.height, a->field.width); field_copy(&a->field, &a->scratch_field);
Change the state context extension so it only provide a load method
/// @page state-context extension /// @brief extended state handling /// -/// This extension let the host specify how the plugin state should be saved or loaded -/// by setting a context prior to the save or load operation. +/// This extension let the host specify how the plugin state should be loaded. /// -/// If unspecified, then the context is `CLAP_STATE_CONTEXT_FULL`. -/// -/// Save and Load operations may have a different context. -/// Only the following sequences are specified: -/// -/// | save ctx | load ctx | result | -/// +------------+------------+-----------+ -/// | full | full | full | -/// | full | preset | preset | -/// | full | duplicate | duplicate | -/// | duplicate | duplicate | duplicate | -/// | duplicate | full | duplicate | -/// | preset | full | preset | -/// | preset | preset | preset | +/// Briefly, when loading a preset or duplicating a device, the plugin may want to partially load +/// the state and initialize certain things differently. #ifdef __cplusplus extern "C" { @@ -31,21 +18,18 @@ extern "C" { static CLAP_CONSTEXPR const char CLAP_EXT_STATE_CONTEXT[] = "clap.state-context.draft/1"; enum clap_plugin_state_context_type { - // saves and loads *everything* - CLAP_STATE_CONTEXT_FULL = 1, - // suitable for duplicating a plugin instance - CLAP_STATE_CONTEXT_FOR_DUPLICATE = 2, + CLAP_STATE_CONTEXT_FOR_DUPLICATE = 1, - // suitable for saving and loading a preset state - CLAP_STATE_CONTEXT_FOR_PRESET = 3, + // suitable for loading a state as a preset + CLAP_STATE_CONTEXT_FOR_PRESET = 2, }; typedef struct clap_plugin_state_context { - // Assign the context for subsequent calls to clap_plugin_state->save() or - // clap_plugin_state->load() of the clap_plugin_state extension. + // Loads the plugin state from stream. + // Returns true if the state was correctly restored. // [main-thread] - void (*set)(const clap_plugin_t *plugin, uint32_t context_type); + void (*load)(const clap_plugin_t *plugin, const clap_istream_t *stream, uint32_t context_type); } clap_plugin_state_context_t; #ifdef __cplusplus
[KBJ] Correct check for deleting INFO/END when rlen == len(ref)
@@ -1170,9 +1170,9 @@ cdef inline bcf_sync_end(VariantRecord record): # If INFO/END is not defined in the header, it doesn't exist in the record if end_id >= 0: info = bcf_get_info(hdr, record.ptr, b'END') - if info and not info.vptr: + if info and info.vptr: if bcf_update_info(hdr, record.ptr, b'END', NULL, 0, info.type) < 0: - raise ValueError('Unable to delete INFO') + raise ValueError('Unable to delete END') else: # Create END header, if not present if end_id < 0:
[mod_accesslog] reformat numeric timestamp code
@@ -870,7 +870,6 @@ static int log_access_record (const request_st * const r, buffer * const b, form } else { buffer * const ts_accesslog_str = &parsed_format->ts_accesslog_str; /* cache the generated timestamp (only if ! FORMAT_FLAG_TIME_BEGIN) */ - struct tm *tmptr; time_t t; struct tm tm; @@ -886,24 +885,19 @@ static int log_access_record (const request_st * const r, buffer * const b, form t = r->start_hp.tv_sec; } - #if defined(HAVE_STRUCT_TM_GMTOFF) - tmptr = localtime_r(&t, &tm); - #else /* HAVE_STRUCT_TM_GMTOFF */ - tmptr = gmtime_r(&t, &tm); - #endif /* HAVE_STRUCT_TM_GMTOFF */ - + const char *fmt = buffer_string_is_empty(&f->string) + ? NULL + : f->string.ptr; buffer_clear(ts_accesslog_str); - - if (buffer_string_is_empty(&f->string)) { #if defined(HAVE_STRUCT_TM_GMTOFF) - buffer_append_strftime(ts_accesslog_str, "[%d/%b/%Y:%H:%M:%S %z]", tmptr); - #else - buffer_append_strftime(ts_accesslog_str, "[%d/%b/%Y:%H:%M:%S +0000]", tmptr); + buffer_append_strftime(ts_accesslog_str, + fmt ? fmt : "[%d/%b/%Y:%H:%M:%S %z]", + localtime_r(&t, &tm)); + #else /* HAVE_STRUCT_TM_GMTOFF */ + buffer_append_strftime(ts_accesslog_str, + fmt ? fmt : "[%d/%b/%Y:%H:%M:%S +0000]", + gmtime_r(&t, &tm)); #endif /* HAVE_STRUCT_TM_GMTOFF */ - } else { - buffer_append_strftime(ts_accesslog_str, f->string.ptr, tmptr); - } - buffer_append_string_buffer(b, ts_accesslog_str); } break;
chip/mt_scp/mt818x/registers.h: Format with clang-format BRANCH=none TEST=none
#define SCP_CACHE_ENTRY_BASE(x) (SCP_CACHE_SEL(x) + 0x2000) #define SCP_CACHE_ENTRY(x, reg) REG32(SCP_CACHE_ENTRY_BASE(x) + (reg)*4) #define SCP_CACHE_END_ENTRY_BASE(x) (SCP_CACHE_SEL(x) + 0x2040) -#define SCP_CACHE_END_ENTRY(x, reg) REG32(SCP_CACHE_END_ENTRY_BASE(x) + \ - (reg)*4) +#define SCP_CACHE_END_ENTRY(x, reg) REG32(SCP_CACHE_END_ENTRY_BASE(x) + (reg)*4) #define SCP_CACHE_ENTRY_C BIT(8) #define SCP_CACHE_ENTRY_BASEADDR_MASK (0xfffff << 12)
ta: remove unnecessary variable definition/initialization
@@ -1213,7 +1213,6 @@ static lv_res_t lv_ta_scrollable_signal(lv_obj_t * scrl, lv_signal_t sign, void if(ext->label) { if(lv_obj_get_width(ta) != lv_area_get_width(param) || lv_obj_get_height(ta) != lv_area_get_height(param)) { - lv_obj_t * scrl = lv_page_get_scrl(ta); lv_style_t * style_scrl = lv_obj_get_style(scrl); lv_obj_set_width(ext->label, lv_obj_get_width(scrl) - 2 * style_scrl->body.padding.hor); lv_obj_set_pos(ext->label, style_scrl->body.padding.hor, style_scrl->body.padding.ver);
Fix Windows image builds
@@ -98,7 +98,8 @@ def buildWindowsManagedImage(String os_series, String img_name_suffix, String la def managed_image_name_id = image_id def gallery_image_version = image_version def vm_rg_name = "build-${managed_image_name_id}-${img_name_suffix}-${BUILD_NUMBER}" - def vm_name = "${os_series}-vm" + // Azure VM names must be 15 characters or less + def vm_name = img_name_suffix.drop(7) + "-${BUILD_NUMBER}" def jenkins_rg_name = params.JENKINS_RESOURCE_GROUP def jenkins_vnet_name = params.JENKINS_VNET_NAME def jenkins_subnet_name = params.JENKINS_SUBNET_NAME @@ -154,7 +155,8 @@ def buildWindowsManagedImage(String os_series, String img_name_suffix, String la --admin-username ${SSH_USERNAME} \ --admin-password ${SSH_PASSWORD} \ --image ${AZURE_IMAGE_ID} \ - --public-ip-address "" + --public-ip-address \"\" \ + --nsg-rule NONE ''' } }
iOS doc script fix
@@ -10,10 +10,13 @@ distDir="${baseDir}/dist/ios" rm -rf ${tempDir} mkdir -p ${tempDir} mkdir -p ${tempDir}/ui +mkdir -p ${tempDir}/utils cp -r ${distDir}/CartoMobileSDK.framework/Headers/CartoMobileSDK.h ${tempDir}/CartoMobileSDK.h cp -r ${baseDir}/generated/ios-objc/proxies/* ${tempDir} cp -r ${baseDir}/ios/objc/ui/MapView.h ${tempDir}/ui/MapView.h cp -r ${baseDir}/ios/objc/ui/MapView.mm ${tempDir}/ui/MapView.mm +cp -r ${baseDir}/ios/objc/utils/ExceptionWrapper.h ${tempDir}/utils/ExceptionWrapper.h +cp -r ${baseDir}/ios/objc/utils/ExceptionWrapper.mm ${tempDir}/utils/ExceptionWrapper.mm find ${tempDir} -name "*NTIOSUtils.*" -exec rm {} \; # Execute Jazzy
tests/run-tests: minimal: Exclude recently added subclass_native_init.py. It relies on MICROPY_CPYTHON_COMPAT being defined.
@@ -313,6 +313,7 @@ def run_tests(pyb, tests, args, base_path="."): skip_tests.add('misc/rge_sm.py') # too large elif args.target == 'minimal': skip_tests.add('basics/class_inplace_op.py') # all special methods not supported + skip_tests.add('basics/subclass_native_init.py')# native subclassing corner cases not support skip_tests.add('misc/rge_sm.py') # too large skip_tests.add('micropython/opt_level.py') # don't assume line numbers are stored skip_tests.add('float/float_parse.py') # minor parsing artifacts with 32-bit floats
use more portable variable initialization
@@ -141,6 +141,8 @@ static struct flb_http_client *flb_aws_client_mock_vtable_request( struct flb_aws_client *aws_client, int method, const char *uri, const char *body, size_t body_len, struct flb_aws_header *dynamic_headers, size_t dynamic_headers_len) { + int h; + int i; int ret; /* Get access to mock */ @@ -168,7 +170,7 @@ static struct flb_http_client *flb_aws_client_mock_vtable_request( mk_list_init(&c->headers); /* Response configuration */ - for (int i = 0; i < response->length; ++i) { + for (i = 0; i < response->length; ++i) { struct flb_aws_client_mock_response_config *response_config = &(response->config_parameters[i]); void *val1 = response_config->config_value; @@ -178,7 +180,7 @@ static struct flb_http_client *flb_aws_client_mock_vtable_request( if (response_config->config_parameter == FLB_AWS_CLIENT_MOCK_EXPECT_HEADER) { int header_found = FLB_FALSE; /* Search for header in request */ - for (int h = 0; h < dynamic_headers_len; ++h) { + for (h = 0; h < dynamic_headers_len; ++h) { ret = strncmp(dynamic_headers[h].key, (char *)val1, dynamic_headers[h].key_len); if (ret == 0) {
Use more reliable NMemInfo::GetMemInfo().RSS instead of TRusage::Get().Rss. Also more consistent with other catboost libs.
#include <catboost/libs/logging/logging.h> -#include <util/system/rusage.h> +#include <util/system/mem_info.h> inline void DumpMemUsage(const TString& msg) { - CATBOOST_DEBUG_LOG << "Mem usage: " << msg << ": " << TRusage::Get().Rss << Endl; + CATBOOST_DEBUG_LOG << "Mem usage: " << msg << ": " << NMemInfo::GetMemInfo().RSS << Endl; }
libs/libc/stdio: fix ungetc operation Do not modify errno in case of failure Return EOF if the value of c equals to EOF
@@ -43,11 +43,10 @@ int ungetc(int c, FAR FILE *stream) int nungotten; #endif - /* Verify that a non-NULL stream was provided */ + /* Verify that a non-NULL stream was provided and c is not EOF */ - if (!stream) + if (!stream || c == EOF) { - set_errno(EBADF); return EOF; } @@ -55,7 +54,6 @@ int ungetc(int c, FAR FILE *stream) if ((stream->fs_fd < 0) || ((stream->fs_oflags & O_RDOK) == 0)) { - set_errno(EBADF); return EOF; } @@ -70,7 +68,6 @@ int ungetc(int c, FAR FILE *stream) else #endif { - set_errno(ENOMEM); return EOF; } }
zephyr: disable flash storage This code fails to compile (see b/176828988). Disable by default until this code can be compiled. BRANCH=none TEST=less compile errors
@@ -102,9 +102,9 @@ config PLATFORM_EC_EXTPOWER_GPIO project should define a GPIO pin named GPIO_AC_PRESENT, with extpower_interrupt configured as the handler in gpio_map.h. +# TODO(b/176828988): enable by default once the code can compile config PLATFORM_EC_FLASH bool "Enable flash support" - default y help Enables access to the device's flash through a simple API. With this is it possible for the EC to update its flash while running,
make sure verilator builds correctly
@@ -14,7 +14,7 @@ default: $(sim) debug: $(sim_debug) -CXXFLAGS := $(CXXFLAGS) -O1 -std=c++11 -I$(RISCV)/include +CXXFLAGS := $(CXXFLAGS) -O1 -std=c++11 -I$(RISCV)/include -D__STDC_FORMAT_MACROS LDFLAGS := $(LDFLAGS) -L$(RISCV)/lib -Wl,-rpath,$(RISCV)/lib -L$(sim_dir) -lfesvr -lpthread include $(base_dir)/Makefrag
A bad commit
@@ -59,4 +59,4 @@ If this config does not work for you, try the flavor "tap-preferred" and a short If you want to use a tap-hold with a keycode from a different code page, you have to define another behavior with another "bindings" parameter.For example, if you want to use SHIFT and volume up, define the bindings like `bindings = <&kp>, <&cp>;`. Only single-argument behaviors are supported at the moment. #### Note -Astute readers may notice similarities between the possible behaviors in ZMK and other firmware, such as QMK. The hold-preferred flavor works similar to the `HOLD_ON_OTHER_KEY_PRESS` setting. The 'balanced' flavor is similar to the `PERMISSIVE_HOLD` setting, and the `tap-preferred` flavor is similar to `IGNORE_MOD_TAP_INTERRUPT`. +Astute readers may notice similarities between the possible behaviors in ZMK and other firmware, such as QMK. The hold-preferred flavor works similar to the `HOLD_ON_OTHER_KEY_PRESS` setting. The 'balanced' flavor is similar to the `PERMISSIVE_HOLD` setting, and the `tap-preferred` flavor is similar to `IGNORE_MOD_TAP_INTERRUPT`
gta vc update
@@ -673,7 +673,7 @@ void ApplyIniOptions() fCustomRadarPosYIV = 116.0f - 7.5f; pattern = hook::pattern("D9 05 ? ? ? ? D8 CB DA 2C 24 DE C1"); - injector::WriteMemory<float>(*pattern.count(1).get(0).get<uint32_t*>(2), fCustomRadarPosYIV); //0x68FD34 + injector::WriteMemory<float>(*pattern.count(1).get(0).get<uint32_t*>(2), fCustomRadarPosYIV, true); //0x68FD34 fCustomRadarHeightIV = 76.0f + 5.0f; pattern = hook::pattern("D9 05 ? ? ? ? D8 C9 DD DB D9 C1 D8 CB"); injector::WriteMemory<float>(*pattern.count(1).get(0).get<uint32_t*>(2), fCustomRadarHeightIV, true); //0x68FD30
taniks: undefine unnecessary PD config Taniks doesn't support USB4, so this config is not needed. BRANCH=none TEST=make -j BOARD=taniks
#define USB_PORT_COUNT 1 #define CONFIG_USB_PORT_POWER_DUMB -/* USB Type C and USB PD defines */ -#define CONFIG_USB_PD_REQUIRE_AP_MODE_ENTRY - #define CONFIG_IO_EXPANDER #define CONFIG_IO_EXPANDER_NCT38XX #define CONFIG_IO_EXPANDER_PORT_COUNT 1 +/* USB Type C and USB PD defines */ #define CONFIG_USB_PD_TCPM_PS8815 #define CONFIG_USB_PD_TCPM_PS8815_FORCE_DID #define CONFIG_USBC_PPC_SYV682X
mmapstorage: check return value of close()
@@ -827,7 +827,11 @@ int ELEKTRA_PLUGIN_FUNCTION (mmapstorage, get) (Plugin * handle ELEKTRA_UNUSED, if (sbuf.st_size == 0) { // empty mmap file - close (fd); + if (close (fd) != 0) + { + ELEKTRA_LOG_WARNING ("could not close"); + goto error; + } return ELEKTRA_PLUGIN_STATUS_SUCCESS; }
[cmake] ABI -> SO_current 6
@@ -33,7 +33,7 @@ set(SICONOS_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}") # If any interfaces have been removed since the last public release, then set # age to 0. -set(SO_current 5) +set(SO_current 6) set(SO_revision 0) set(SO_age 0)
Sane World View The z=0 plane is now initially aligned to the device's z=0 plane during calibration. (works really well for the tracker, which is oriented with Z in the proper direction when sitting on a horizontal surface)
@@ -895,9 +895,9 @@ void getNormalizedAndScaledRotationGradient(FLT *vectorToScale, FLT desiredMagni static void WhereIsTheTrackedObjectAxisAngle(FLT *posOut, FLT *rotation, Point lhPoint) { - posOut[0] = lhPoint.x; - posOut[1] = lhPoint.y; - posOut[2] = lhPoint.z; + posOut[0] = -lhPoint.x; + posOut[1] = -lhPoint.y; + posOut[2] = -lhPoint.z; rotatearoundaxis(posOut, posOut, rotation, rotation[3]); @@ -1273,8 +1273,12 @@ static Point SolveForLighthouse(FLT posOut[3], FLT quatOut[4], TrackedObject *ob quatrotatevector(tmpPos, rotQuat, tmpPos); //} + //static int foo = 0; + + //if (0 == foo) if (setLhCalibration) { + //foo = 1; if (so->ctx->bsd[lh].PositionSet) { printf("Warning: resetting base station calibration data"); @@ -1297,20 +1301,22 @@ static Point SolveForLighthouse(FLT posOut[3], FLT quatOut[4], TrackedObject *ob quatrotatevector(wcPos, so->ctx->bsd[lh].Pose.Rot, objPos); - wcPos[0] -= so->ctx->bsd[lh].Pose.Pos[0]; - wcPos[1] -= so->ctx->bsd[lh].Pose.Pos[1]; - wcPos[2] -= so->ctx->bsd[lh].Pose.Pos[2]; + wcPos[0] += so->ctx->bsd[lh].Pose.Pos[0]; + wcPos[1] += so->ctx->bsd[lh].Pose.Pos[1]; + wcPos[2] += so->ctx->bsd[lh].Pose.Pos[2]; - printf(" <% 04.4f, % 04.4f, % 04.4f > ", wcPos[0], wcPos[1], wcPos[2]); + so->OutPose.Pos[0] = wcPos[0]; + so->OutPose.Pos[1] = wcPos[1]; + so->OutPose.Pos[2] = wcPos[2]; - //posOut = + printf(" <% 04.4f, % 04.4f, % 04.4f > ", wcPos[0], wcPos[1], wcPos[2]); if (logFile) { updateHeader(logFile); fclose(logFile); } - //fgetc(stdin); + return refinedEstimateGd; } @@ -1526,8 +1532,8 @@ int PoserTurveyTori( SurviveObject * so, PoserData * poserData ) FLT norm[3] = { so->sensor_normals[i * 3 + 0] , so->sensor_normals[i * 3 + 1] , so->sensor_normals[i * 3 + 2] }; FLT point[3] = { so->sensor_locations[i * 3 + 0] , so->sensor_locations[i * 3 + 1] , so->sensor_locations[i * 3 + 2] }; - quatrotatevector(norm, downQuat, norm); - quatrotatevector(point, downQuat, point); + //quatrotatevector(norm, downQuat, norm); + //quatrotatevector(point, downQuat, point); to->sensor[sensorCount].normal.x = norm[0]; to->sensor[sensorCount].normal.y = norm[1];
Generate ETIMEDOUT when remaining_time is reached and EHOSTDOWN when all addresses are tried
@@ -238,7 +238,10 @@ httpAddrConnect2( } if (!addrlist && nfds == 0) + { + errno = EHOSTDOWN; break; + } /* * See if we can connect to any of the addresses so far... @@ -369,6 +372,9 @@ httpAddrConnect2( remaining -= 250; } + if (remaining <= 0) + errno = ETIMEDOUT; + while (nfds > 0) { nfds --;
use oidc_proto_state_destroy instead of json_decref
@@ -516,7 +516,7 @@ int oidc_proto_authorization_request(request_rec *r, } /* cleanup */ - json_decref(proto_state); + oidc_proto_state_destroy(proto_state); /* see if we need to preserve POST parameters through Javascript/HTML5 storage */ if (oidc_post_preserve_javascript(r, authorization_request, NULL,
plugins BUGFIX set correct validation error code
@@ -489,7 +489,7 @@ lyplg_type_validate_patterns(struct lysc_pattern **patterns, const char *str, si return ly_err_new(err, LY_EVALID, LYVE_DATA, NULL, eapptag, patterns[u]->emsg); } else { const char *inverted = patterns[u]->inverted ? "inverted " : ""; - return ly_err_new(err, LY_EVALID, 0, NULL, eapptag, + return ly_err_new(err, LY_EVALID, LYVE_DATA, NULL, eapptag, LY_ERRMSG_NOPATTERN, (int)str_len, str, inverted, patterns[u]->expr); } }
ssl client: add key_opaque_algs command line option
@@ -115,6 +115,7 @@ int main( void ) #define DFL_USE_SRTP 0 #define DFL_SRTP_FORCE_PROFILE 0 #define DFL_SRTP_MKI "" +#define DFL_KEY_OPAQUE_ALG "none" #define GET_REQUEST "GET %s HTTP/1.0\r\nExtra-header: " #define GET_REQUEST_END "\r\n\r\n" @@ -343,6 +344,13 @@ int main( void ) #define USAGE_SERIALIZATION "" #endif +#define USAGE_KEY_OPAQUE_ALGS \ + " key_opaque_algs=%%s Allowed opaque key algorithms.\n" \ + " coma-separated pair of values among the following:\n" \ + " rsa-sign-pkcs1, rsa-sign-pss, rsa-decrypt,\n" \ + " ecdsa-sign, ecdh, none (only acceptable for\n" \ + " the second value).\n" \ + #if defined(MBEDTLS_SSL_PROTO_TLS1_3) #define USAGE_TLS1_3_KEY_EXCHANGE_MODES \ " tls13_kex_modes=%%s default: all\n" \ @@ -411,6 +419,7 @@ int main( void ) USAGE_CURVES \ USAGE_SIG_ALGS \ USAGE_DHMLEN \ + USAGE_KEY_OPAQUE_ALGS \ "\n" #if defined(MBEDTLS_SSL_PROTO_TLS1_3) @@ -523,6 +532,8 @@ struct options int use_srtp; /* Support SRTP */ int force_srtp_profile; /* SRTP protection profile to use or all */ const char *mki; /* The dtls mki value to use */ + const char *key_opaque_alg1; /* Allowed opaque key alg 1 */ + const char *key_opaque_alg2; /* Allowed Opaque key alg 2 */ } opt; #include "ssl_test_common_source.c" @@ -885,6 +896,8 @@ int main( int argc, char *argv[] ) opt.use_srtp = DFL_USE_SRTP; opt.force_srtp_profile = DFL_SRTP_FORCE_PROFILE; opt.mki = DFL_SRTP_MKI; + opt.key_opaque_alg1 = DFL_KEY_OPAQUE_ALG; + opt.key_opaque_alg2 = DFL_KEY_OPAQUE_ALG; for( i = 1; i < argc; i++ ) { @@ -1308,6 +1321,12 @@ int main( int argc, char *argv[] ) { opt.mki = q; } + else if( strcmp( p, "key_opaque_algs" ) == 0 ) + { + if ( key_opaque_alg_parse( q, &opt.key_opaque_alg1, + &opt.key_opaque_alg2 ) != 0 ) + goto usage; + } else goto usage; }
Fix call to mbedtls_pk_wrap_as_opaque(): use usage variable instead PSA_KEY_USAGE_SIGN_HASH
@@ -1747,8 +1747,7 @@ int main( int argc, char *argv[] ) } if( ( ret = mbedtls_pk_wrap_as_opaque( &pkey, &key_slot, psa_alg, - PSA_KEY_USAGE_SIGN_HASH, - psa_alg2 ) ) != 0 ) + usage, psa_alg2 ) ) != 0 ) { mbedtls_printf( " failed\n ! " "mbedtls_pk_wrap_as_opaque returned -0x%x\n\n", (unsigned int) -ret );
prevent to access heap overflow
@@ -884,7 +884,10 @@ sixel_decode_raw( } *ncolors = image.ncolors + 1; - *palette = (unsigned char *)sixel_allocator_malloc(allocator, (size_t)(*ncolors * 3)); + int alloc_size = *ncolors; + if (alloc_size < 256) // memory access range should be 0 <= 255 (in write_png_to_file) + alloc_size = 256; + *palette = (unsigned char *)sixel_allocator_malloc(allocator, (size_t)(alloc_size * 3)); if (palette == NULL) { sixel_allocator_free(allocator, image.data); sixel_helper_set_additional_message(
Fix typo in comment of flushAppendOnlyFile
@@ -364,7 +364,7 @@ ssize_t aofWrite(int fd, const char *buf, size_t len) { /* Write the append only file buffer on disk. * * Since we are required to write the AOF before replying to the client, - * and the only way the client socket can get a write is entering when the + * and the only way the client socket can get a write is entering when * the event loop, we accumulate all the AOF writes in a memory * buffer and write it on disk using this function just before entering * the event loop again.
api/rootfingerprint: add unit tests
@@ -28,3 +28,37 @@ pub fn process() -> Result<Response, Error> { fingerprint: fingerprint.to_vec(), })) } + +#[cfg(test)] +mod tests { + use super::*; + + use bitbox02::keystore::lock; + use bitbox02::testing::mock_unlocked_using_mnemonic; + + #[test] + fn test_process() { + lock(); + assert_eq!(process(), Err(Error::Generic)); + + mock_unlocked_using_mnemonic( + "purity concert above invest pigeon category peace tuition hazard vivid latin since legal speak nation session onion library travel spell region blast estate stay" + ); + assert_eq!( + process(), + Ok(Response::Fingerprint(pb::RootFingerprintResponse { + fingerprint: vec![0x02, 0x40, 0xe9, 0x2a], + })) + ); + + mock_unlocked_using_mnemonic( + "small agent wife animal marine cloth exit thank stool idea steel frame", + ); + assert_eq!( + process(), + Ok(Response::Fingerprint(pb::RootFingerprintResponse { + fingerprint: vec![0xf4, 0x0b, 0x46, 0x9a], + })) + ); + } +}
Fixing uninitialized ncpu value on unsupported platforms. Thanks to This closes issue on GitHub.
@@ -91,9 +91,12 @@ nxt_lib_start(const char *app, char **argv, char ***envp) #elif (NXT_HPUX) n = mpctl(MPC_GETNUMSPUS, NULL, NULL); +#else + n = 0; + #endif - nxt_debug(&nxt_main_task, "ncpu: %ui", n); + nxt_debug(&nxt_main_task, "ncpu: %d", n); if (n > 1) { nxt_ncpu = n;
tweak install path for use with slurm/MPI stacks
@@ -23,7 +23,7 @@ Source1: OHPC_macros BuildRequires: libevent-devel -%global install_path %{OHPC_LIBS}/%{pname}/%version +%global install_path %{OHPC_LIBS}/%{pname} %description The Process Management Interface (PMI) has been used for quite some time as a @@ -86,8 +86,8 @@ EOF %changelog * Tue Sep 26 2017 Karl W Schulz <[email protected]> - 2.0.1-1 -- add patch to disable c++ checks -- use mcmodel=large for aarch64 +- downgrade to v1.2.3 for slurm support +- tweak install path * Thu Sep 21 2017 Adrian Reber <[email protected]> - 2.0.1-1 - Update to 2.0.1
st.c: Fix copy+paste oops. (fixes
@@ -200,8 +200,8 @@ int lily_hash_take(lily_state *s, lily_hash_val *table, lily_value *boxed_key) ptr->next = ptr->next->next; table->num_entries--; - lily_stack_push_and_destroy(s, ptr->boxed_key); - lily_stack_push_and_destroy(s, ptr->record); + lily_stack_push_and_destroy(s, tmp->boxed_key); + lily_stack_push_and_destroy(s, tmp->record); lily_free(tmp); return 1; }
sidebarlistheader: fixing potential undefined ref
@@ -45,7 +45,7 @@ export function SidebarListHeader(props: { const metadata = associations?.groups?.[groupPath]?.metadata; const memberMetadata = - groupPath ? metadata.vip === 'member-metadata' : false; + groupPath && metadata ? metadata.vip === 'member-metadata' : false; const isAdmin = memberMetadata || (role === 'admin') || (props.workspace?.type === 'home') || (props.workspace?.type === 'messages');
thread: %group-leave-graph now compiles
(pure:m !>(~)) :: :: get graphs associated with group and archive them +;< =associations:met bind:m + %+ scry associations:met + ;: weld + /gx/metadata-store/resource/group + (en-path:res resource.update) + /noun + == =/ graphs=(list path) - %+ turn - %~ tap in - %~ key by - ^- associations:met - %+ scry - %noun - (weld /group (en-path:res resource.update)) + %+ turn ~(tap in ~(key by associations)) |= [g=group-path:met m=md-resource:met] ^- path app-path.m -|- -?~ groups +;< =bowl:spider bind:m get-bowl:strandio +|- ^- form:m +=* loop $ +?~ graphs (pure:m !>(~)) ;< ~ bind:m - %+ poke-our %graph-store - !> ^- update:graph-store - [%archive-graph (de-path:res i.groups)] -$(groups t.groups) + %^ poke-our + %graph-store + %graph-update + !> ^- update:gra + [%0 now.bowl [%archive-graph (de-path:res i.graphs)]] +loop(graphs t.graphs)
tcp: fix skipping RSTs in SYN_SENT state Type: fix
@@ -215,6 +215,7 @@ tcp_rcv_rst (tcp_worker_ctx_t * wrk, tcp_connection_t * tc) break; case TCP_STATE_SYN_SENT: /* Do not program ntf because the connection is half-open */ + tc->rst_state = tc->state; tcp_handle_rst (tc); break; case TCP_STATE_ESTABLISHED:
proc: detach thread from the process before it is zombified
@@ -510,6 +510,7 @@ void proc_threadDestroy(void) int zombie = 0; hal_spinlockSet(&threads_common.spinlock); + thr->process = NULL; if (proc != NULL) { LIST_REMOVE_EX(&proc->threads, thr, procnext, procprev); zombie = (proc->threads == NULL);