message
stringlengths
6
474
diff
stringlengths
8
5.22k
zephyr/shim/src/gpio.c: Format with clang-format BRANCH=none TEST=none
@@ -73,8 +73,8 @@ static const struct gpio_config configs[] = { * point directly into the table by exposing the gpio_config struct. */ -#define GPIO_PTRS(id) const struct gpio_dt_spec * const \ - GPIO_DT_NAME(GPIO_SIGNAL(id)) = \ +#define GPIO_PTRS(id) \ + const struct gpio_dt_spec *const GPIO_DT_NAME(GPIO_SIGNAL(id)) = \ &configs[GPIO_SIGNAL(id)].spec; #if DT_NODE_EXISTS(DT_PATH(named_gpios)) @@ -137,8 +137,7 @@ void gpio_set_level(enum gpio_signal signal, int value) return; int rv = gpio_pin_set_raw(configs[signal].spec.port, - configs[signal].spec.pin, - value); + configs[signal].spec.pin, value); if (rv < 0) { LOG_ERR("Cannot write %s (%d)", configs[signal].name, rv); @@ -219,11 +218,11 @@ gpio_flags_t convert_to_zephyr_flags(int ec_flags) } if (ec_flags & GPIO_INT_F_RISING) - zephyr_flags |= GPIO_INT_ENABLE - | GPIO_INT_EDGE | GPIO_INT_HIGH_1; + zephyr_flags |= GPIO_INT_ENABLE | GPIO_INT_EDGE | + GPIO_INT_HIGH_1; if (ec_flags & GPIO_INT_F_FALLING) - zephyr_flags |= GPIO_INT_ENABLE - | GPIO_INT_EDGE | GPIO_INT_LOW_0; + zephyr_flags |= GPIO_INT_ENABLE | GPIO_INT_EDGE | + GPIO_INT_LOW_0; if (ec_flags & GPIO_INT_F_LOW) zephyr_flags |= GPIO_INT_ENABLE | GPIO_INT_LOW_0; if (ec_flags & GPIO_INT_F_HIGH)
BugID:17646749:[hal] fix HAL_Fread/Fwrite() prototype mismatch
@@ -639,11 +639,12 @@ void *HAL_Fopen(const char *path, const char *mode) return (void *)fopen(path, mode); } -size_t HAL_Fread(void *buff, size_t size, size_t count, void *stream) +uint32_t HAL_Fread(void *buff, uint32_t size, uint32_t count, void *stream) { return fread(buff, size, count, (FILE *)stream); } -size_t HAL_Fwrite(const void *ptr, size_t size, size_t count, void *stream) + +uint32_t HAL_Fwrite(const void *ptr, uint32_t size, uint32_t count, void *stream) { return fwrite(ptr, size, count, (FILE *)stream); }
FEC: queue repair symbols depending on the generation order
@@ -74,7 +74,7 @@ static __attribute__((always_inline)) void put_item_at_index(block_fec_framework // adds a repair symbol in the queue waiting for the symbol to be sent static __attribute__((always_inline)) void queue_repair_symbol(picoquic_cnx_t *cnx, block_fec_framework_t *bff, repair_symbol_t *rs, fec_block_t *fb){ - int idx = ((uint32_t) rs->repair_fec_payload_id.source_fpid.raw) % MAX_QUEUED_REPAIR_SYMBOLS; + int idx = ((uint32_t) bff->repair_symbols_queue_head + bff->repair_symbols_queue_length) % MAX_QUEUED_REPAIR_SYMBOLS; if (has_repair_symbol_at_index(bff, idx)) { remove_item_at_index(cnx, bff, idx); if (bff->repair_symbols_queue_length > 1 && bff->repair_symbols_queue_head == idx) {
libc/stdio: fix rounding errors for fractional values less than 1
@@ -127,12 +127,13 @@ int __dtoa_engine(double x, FAR struct dtoa_s *dtoa, int max_digits, /* If limiting decimals, then limit the max digits to no more than the * number of digits left of the decimal plus the number of digits right - * of the decimal + * of the decimal. If the integer value is 0, there are only values to + * the right of the decimal point in dtoa->digits. */ if (max_decimals != 0) { - max_digits = MIN(max_digits, max_decimals + MAX(exp + 1, 1)); + max_digits = MIN(max_digits, max_decimals + MAX(exp + 1, 0)); } /* Round nearest by adding 1/2 of the last digit before converting to
fix(Kconfig): Add LV_USE_GRIDNAV and LV_USE_FRAGMENT to Kconfig
@@ -985,6 +985,14 @@ menu "LVGL configuration" bool "Enable Monkey test" default n + config LV_USE_GRIDNAV + bool "Enable grid navigation" + default n + + config LV_USE_FRAGMENT + bool "Enable lv_obj fragment" + default n + config LV_USE_IMGFONT bool "draw img in label or span obj" default n
sse2: add Hasindu Gamaarachchi and Jeff Daily copyrights to header
* 2015-2017 John W. Ratcliff <[email protected]> * 2015 Brandon Rowlett <[email protected]> * 2015 Ken Fast <[email protected]> + * 2017 Hasindu Gamaarachchi <[email protected]> + * 2018 Jeff Daily <[email protected]> */ #if !defined(SIMDE__SSE2_H)
engine: do not release parsers on shutdown
@@ -713,11 +713,6 @@ int flb_engine_shutdown(struct flb_config *config) /* router */ flb_router_exit(config); -#ifdef FLB_HAVE_PARSER - /* parsers */ - flb_parser_exit(config); -#endif - /* cleanup plugins */ flb_filter_exit(config); flb_input_exit_all(config);
fix(refr): initializing row_cnt is to silence the warning
@@ -902,7 +902,7 @@ void refr_obj(lv_draw_ctx_t * draw_ctx, lv_obj_t * obj) return; } - int32_t row_cnt; + int32_t row_cnt = 0; lv_draw_ctx_t * new_draw_ctx = lv_mem_alloc(disp_refr->driver->draw_ctx_size); LV_ASSERT_MALLOC(new_draw_ctx); if(new_draw_ctx == NULL) {
fix version number in pc-config
@@ -3,8 +3,8 @@ libdir=@libdir_for_pc_file@ includedir=@includedir_for_pc_file@ Name: @PROJECT_NAME@ -Description: a compact general purpose allocator with excellent performance -Version: @PROJECT_VERSION@ +Description: A compact general purpose allocator with excellent performance +Version: @PACKAGE_VERSION@ URL: https://github.com/microsoft/mimalloc/ Libs: -L${libdir} -lmimalloc Libs.private: @pc_libraries@
plugins: register out_splunk plugin
@@ -134,8 +134,9 @@ REGISTER_OUT_PLUGIN("out_null") if (NOT CMAKE_SYSTEM_NAME MATCHES "Windows") REGISTER_OUT_PLUGIN("out_plot") endif() -REGISTER_OUT_PLUGIN("out_stdout") REGISTER_OUT_PLUGIN("out_retry") +REGISTER_OUT_PLUGIN("out_splunk") +REGISTER_OUT_PLUGIN("out_stdout") REGISTER_OUT_PLUGIN("out_td") REGISTER_OUT_PLUGIN("out_lib") REGISTER_OUT_PLUGIN("out_flowcounter")
Don't try to clean up RAND from ENGINE This is especially harmful since OPENSSL_cleanup() has already called the RAND cleanup function
@@ -166,11 +166,6 @@ void engine_cleanup_int(void) engine_cleanup_cb_free); cleanup_stack = NULL; } - /* - * FIXME: This should be handled (somehow) through RAND, eg. by it - * registering a cleanup callback. - */ - RAND_set_rand_method(NULL); CRYPTO_THREAD_lock_free(global_engine_lock); }
Testing older FreeBSD releases as well.
@@ -218,7 +218,7 @@ matrix: ## env: DOCKER="fedora:rawhide" VARIANT="fedora" TOOL="mock" - ## ###### FreeBSD ######################################################### + # ###### FreeBSD ######################################################### # ====== FreeBSD 12.1-RELEASE ============================================ - name: "FreeBSD 12.1-RELEASE with Clang" @@ -232,29 +232,29 @@ matrix: group: travis_latest env: QEMU="FreeBSD" VARIANT="12.1-RELEASE" TOOL="compile" COMPILER_C="gcc" COMPILER_CXX="g++" - ## ====== FreeBSD 11.3-RELEASE ============================================ - #- name: "FreeBSD 11.3-RELEASE with Clang" - #os: linux - #dist: bionic - #group: travis_latest - #env: QEMU="FreeBSD" VARIANT="11.3-RELEASE" TOOL="compile" COMPILER_C="clang" COMPILER_CXX="clang++" - #- name: "FreeBSD 11.3-RELEASE with GCC" - #os: linux - #dist: bionic - #group: travis_latest - #env: QEMU="FreeBSD" VARIANT="11.3-RELEASE" TOOL="compile" COMPILER_C="gcc" COMPILER_CXX="g++" + # ====== FreeBSD 12.0-RELEASE ============================================ + - name: "FreeBSD 12.0-RELEASE with Clang" + os: linux + dist: bionic + group: travis_latest + env: QEMU="FreeBSD" VARIANT="12.0-RELEASE" TOOL="compile" COMPILER_C="clang" COMPILER_CXX="clang++" + - name: "FreeBSD 12.0-RELEASE with GCC" + os: linux + dist: bionic + group: travis_latest + env: QEMU="FreeBSD" VARIANT="12.0-RELEASE" TOOL="compile" COMPILER_C="gcc" COMPILER_CXX="g++" - ## ====== FreeBSD 11.2-RELEASE ============================================ - #- name: "FreeBSD 11.2-RELEASE with Clang" - #os: linux - #dist: bionic - #group: travis_latest - #env: QEMU="FreeBSD" VARIANT="11.2-RELEASE" TOOL="compile" COMPILER_C="clang" COMPILER_CXX="clang++" - #- name: "FreeBSD 11.2-RELEASE with GCC" - #os: linux - #dist: bionic - #group: travis_latest - #env: QEMU="FreeBSD" VARIANT="11.2-RELEASE" TOOL="compile" COMPILER_C="gcc" COMPILER_CXX="g++" + # ====== FreeBSD 11.3-RELEASE ============================================ + - name: "FreeBSD 11.3-RELEASE with Clang" + os: linux + dist: bionic + group: travis_latest + env: QEMU="FreeBSD" VARIANT="11.3-RELEASE" TOOL="compile" COMPILER_C="clang" COMPILER_CXX="clang++" + - name: "FreeBSD 11.3-RELEASE with GCC" + os: linux + dist: bionic + group: travis_latest + env: QEMU="FreeBSD" VARIANT="11.3-RELEASE" TOOL="compile" COMPILER_C="gcc" COMPILER_CXX="g++" # ###### Other ###########################################################
Fixed rect draw when wider then 360 pixels
@@ -576,10 +576,11 @@ static void draw_rect_grad(lv_obj_t * cpicker, const lv_area_t * mask) /*scale angle (hue/sat/val) to linear coordinate*/ lv_coord_t xi = (i * grad_w) / 360; + lv_coord_t xi2 = ((i+i_step) * grad_w) / 360; rect_area.x1 = LV_MATH_MIN(grad_area.x1 + xi, grad_area.x1 + grad_w - i_step); rect_area.y1 = grad_area.y1; - rect_area.x2 = rect_area.x1 + i_step; + rect_area.x2 = LV_MATH_MIN(grad_area.x1 + xi2, grad_area.x1 + grad_w - i_step); rect_area.y2 = grad_area.y2; lv_draw_rect(&rect_area, mask, &bg_dsc);
admin/docs: bumping version for 2.4
%include %{_sourcedir}/OHPC_macros Name: docs%{PROJ_DELIM} -Version: 2.3.0 +Version: 2.4.0 Release: 1 Summary: OpenHPC documentation License: BSD-3-Clause
Free the dim topology structure after call to get system topology in nvm management.
@@ -483,14 +483,16 @@ NVM_API int nvm_get_number_of_memory_topology_devices(unsigned int *count) } ReturnCode = gNvmDimmDriverNvmDimmConfig.GetSystemTopology(&gNvmDimmDriverNvmDimmConfig, &pDimmTopology, (UINT16 *)&DdrDimmCnt); + if (EFI_ERROR(ReturnCode)) { NVDIMM_ERR_W(FORMAT_STR_NL, CLI_ERR_INTERNAL_ERROR); + FREE_POOL_SAFE(pDimmTopology); return NVM_ERR_UNKNOWN; } else if (pDimmTopology == NULL) { NVDIMM_ERR("Could not read the system topology.\n"); return NVM_ERR_UNKNOWN; } - + FREE_POOL_SAFE(pDimmTopology); if (NVM_SUCCESS != (nvm_status = nvm_get_number_of_devices(&DpcCnt))) { NVDIMM_ERR("Failed to obtain the number of devices (%d)\n", nvm_status);
crypto/pem/pem_lib.c: Add check for BIO_read As the potential failure of the BIO_read(), it should be better to add the check and return error if fails. Also, in order to decrease the same code, using 'out_free' will be better.
@@ -971,19 +971,22 @@ int PEM_read_bio_ex(BIO *bp, char **name_out, char **header, headerlen = BIO_get_mem_data(headerB, NULL); *header = pem_malloc(headerlen + 1, flags); *data = pem_malloc(len, flags); - if (*header == NULL || *data == NULL) { - pem_free(*header, flags, 0); - pem_free(*data, flags, 0); - goto end; - } - BIO_read(headerB, *header, headerlen); + if (*header == NULL || *data == NULL) + goto out_free; + if (headerlen != 0 && BIO_read(headerB, *header, headerlen) != headerlen) + goto out_free; (*header)[headerlen] = '\0'; - BIO_read(dataB, *data, len); + if (BIO_read(dataB, *data, len) != len) + goto out_free; *len_out = len; *name_out = name; name = NULL; ret = 1; + goto end; +out_free: + pem_free(*header, flags, 0); + pem_free(*data, flags, 0); end: EVP_ENCODE_CTX_free(ctx); pem_free(name, flags, 0);
Set debug_query_string in worker_spi. This makes elog.c emit the string, which is good practice for a background worker that executes SQL strings. Discussion:
@@ -119,6 +119,7 @@ initialize_worker_spi(worktable *table) appendStringInfo(&buf, "select count(*) from pg_namespace where nspname = '%s'", table->schema); + debug_query_string = buf.data; ret = SPI_execute(buf.data, true, 0); if (ret != SPI_OK_SELECT) elog(FATAL, "SPI_execute failed: error code %d", ret); @@ -134,6 +135,7 @@ initialize_worker_spi(worktable *table) if (ntup == 0) { + debug_query_string = NULL; resetStringInfo(&buf); appendStringInfo(&buf, "CREATE SCHEMA \"%s\" " @@ -147,15 +149,19 @@ initialize_worker_spi(worktable *table) /* set statement start time */ SetCurrentStatementStartTimestamp(); + debug_query_string = buf.data; ret = SPI_execute(buf.data, false, 0); if (ret != SPI_OK_UTILITY) elog(FATAL, "failed to create my schema"); + + debug_query_string = NULL; /* rest is not statement-specific */ } SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); + debug_query_string = NULL; pgstat_report_activity(STATE_IDLE, NULL); } @@ -262,6 +268,7 @@ worker_spi_main(Datum main_arg) StartTransactionCommand(); SPI_connect(); PushActiveSnapshot(GetTransactionSnapshot()); + debug_query_string = buf.data; pgstat_report_activity(STATE_RUNNING, buf.data); /* We can now execute queries via SPI */ @@ -291,6 +298,7 @@ worker_spi_main(Datum main_arg) SPI_finish(); PopActiveSnapshot(); CommitTransactionCommand(); + debug_query_string = NULL; pgstat_report_stat(false); pgstat_report_activity(STATE_IDLE, NULL); }
Add ssm to switch resource for server_certification_test app
@@ -901,6 +901,9 @@ register_resources(void) oc_resource_set_request_handler(temp_resource, OC_POST, post_temp, NULL); oc_resource_tag_func_desc(temp_resource, OC_ENUM_HEATING); oc_resource_tag_pos_desc(temp_resource, OC_POS_CENTRE); + #ifdef OC_OSCORE + oc_resource_set_secure_mcast(temp_resource, true); +#endif /* OC_OSCORE */ oc_add_resource(temp_resource); PRINT("\tTemperature resource added.\n"); bswitch = oc_new_resource(NULL, "/switch", 1, 0);
lens: clear state on %cancel if you use herb to send a dojo command that doesn't compile, the lens state doesn't clear and herb no longer works. this just adds a simple way to clear that state.
++ on-poke |= [=mark =vase] ^- (quip card:agent:gall _this) + :: + ?: &(?=(%noun mark) ?=(%cancel q.vase)) + ~& %lens-cancel + [~ this(job.state ~)] + :: ?. ?=(%handle-http-request mark) (on-poke:def mark vase) =+ !<([eyre-id=@ta =inbound-request:eyre] vase)
xfconf-plugin: check for 0 status dry-run
@@ -18,7 +18,7 @@ static void test_basics (void) KeySet * conf = ksNew (0, KS_END); int statusCode = elektraXfconfInit (parentKey, 1, 1); printf ("xfconf dry open returned: %d\n", statusCode); - if (!statusCode) + if (statusCode != 0) { printf ("WARNING: dry open xfconf failed, is dbus running? skipping tests\n"); return;
Remove React.FC type since CustomEventEditor doesn't have children
@@ -21,7 +21,11 @@ import { CustomEvent } from "store/features/entities/entitiesTypes"; const customEventName = (customEvent: CustomEvent, customEventIndex: number) => customEvent.name ? customEvent.name : `Script ${customEventIndex + 1}`; -const CustomEventEditor: React.FC<{ id: string }> = ({ id }) => { +interface CustomEventEditorProps { + id: string; +} + +const CustomEventEditor = ({ id }: CustomEventEditorProps) => { const customEvents = useSelector((state: RootState) => customEventSelectors.selectAll(state) );
FLS-NB only set node name from light
@@ -2147,6 +2147,17 @@ void DeRestPluginPrivate::addSensorNode(const deCONZ::Node *node) { if (i->address().ext() == node->address().ext()) { + // address changed? + if (i->address().nwk() != node->address().nwk()) + { + i->address() = node->address(); + } + + if (i->modelId().startsWith(QLatin1String("FLS-NB"))) + { + continue; // use name from light + } + if (i->node() != node) { i->setNode(const_cast<deCONZ::Node*>(node)); @@ -2164,12 +2175,6 @@ void DeRestPluginPrivate::addSensorNode(const deCONZ::Node *node) if (!i->swVersion().isEmpty()) { q->nodeUpdated(i->address().ext(), QLatin1String("version"), i->swVersion()); } } - - // address changed? - if (i->address().nwk() != node->address().nwk()) - { - i->address() = node->address(); - } } checkSensorNodeReachable(&*i);
DoxyGen: Added example code for Armv8 MPU.
@@ -10,8 +10,16 @@ The MPU is used to prevent from illegal memory accesses that are typically cause \code void main() { - // Set Region 0 - ARM_MPU_SetRegionEx(0UL, 0x08000000UL, MPU_RASR(0UL, ARM_MPU_AP_FULL, 0UL, 0UL, 1UL, 1UL, 0x00UL, ARM_MPU_REGION_SIZE_1MB)); + // Set Region 0 using Attr 0 + ARM_MPU_SetMemAttr(0UL, ARM_MPU_ATTR( /* Normal memory */ + ARM_MPU_ATTR_MEMORY_(0UL, 1UL, 1UL, 1UL), /* Outer Write-Back transient with read and write allocate */ + ARM_MPU_ATTR_MEMORY_(0UL, 0UL, 1UL, 1UL) /* Inner Write-Through transient with read and write allocate */ + )); + + ARM_MPU_SetRegion(0UL, + ARM_MPU_RBAR(0x08000000UL, ARM_MPU_SH_NON, 0UL, 1UL, 1UL), /* Non-shareable, read/write, non-privileged, execute-never */ + ARM_MPU_RLAR(0x080FFFFFUL, 0UL) /* 1MB memory block using Attr 0 */ + ); ARM_MPU_Enable(0); @@ -197,6 +205,24 @@ __STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_ * \param rnr First region number to be configured. * \param table Pointer to the MPU configuration table. * \param cnt Amount of regions to be configured. +* +* <b>Example:</b> +* \code +* const ARM_MPU_Region_t mpuTable[1][4] = { +* { +* // BASE SH RO NP XN LIMIT ATTR +* { .RBAR = ARM_MPU_RBAR(0x08000000UL, ARM_MPU_SH_NON, 0UL, 1UL, 0UL), .RLAR = ARM_MPU_RLAR(0x080FFFFFUL, 0UL) }, +* { .RBAR = ARM_MPU_RBAR(0x20000000UL, ARM_MPU_SH_NON, 0UL, 1UL, 1UL), .RLAR = ARM_MPU_RLAR(0x20007FFFUL, 0UL) }, +* { .RBAR = ARM_MPU_RBAR(0x40020000UL, ARM_MPU_SH_NON, 0UL, 1UL, 1UL), .RLAR = ARM_MPU_RLAR(0x40021FFFUL, 1UL) }, +* { .RBAR = ARM_MPU_RBAR(0x40022000UL, ARM_MPU_SH_NON, 0UL, 1UL, 1UL), .RLAR = ARM_MPU_RLAR(0x40022FFFUL, 1UL) } +* } +* }; +* +* void UpdateMpu(uint32_t idx) +* { +* ARM_MPU_Load(0, mpuTable[idx], 4); +* } +* \endcode */ __STATIC_INLINE void ARM_MPU_Load(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt);
Fix #define in audio_test
@@ -379,7 +379,7 @@ bool tud_audio_tx_done_pre_load_cb(uint8_t rhport, uint8_t itf, uint8_t ep_in, u (void) ep_in; (void) cur_alt_setting; - tud_audio_write ((uint8_t *)test_buffer_audio, CFG_TUD_AUDIO_EPSIZE_IN); + tud_audio_write ((uint8_t *)test_buffer_audio, CFG_TUD_AUDIO_EP_SZ_IN); return true; } @@ -392,7 +392,7 @@ bool tud_audio_tx_done_post_load_cb(uint8_t rhport, uint16_t n_bytes_copied, uin (void) ep_in; (void) cur_alt_setting; - for (size_t cnt = 0; cnt < CFG_TUD_AUDIO_EPSIZE_IN/2; cnt++) + for (size_t cnt = 0; cnt < CFG_TUD_AUDIO_EP_SZ_IN/2; cnt++) { test_buffer_audio[cnt] = startVal++; }
patch for maps android ndk
@@ -24,25 +24,42 @@ IF (OS_LINUX) ELSEIF (OS_ANDROID) # protobuf 3.6.1 DECLARE_EXTERNAL_RESOURCE(MAPKIT_SDK sbr:881642915) + DECLARE_EXTERNAL_RESOURCE(MAPS_NDK_PATCH sbr:1045044111) CFLAGS( GLOBAL "-I$MAPKIT_SDK_RESOURCE_GLOBAL/mapkit_sdk/include" ) IF (ARCH_ARM7) LDFLAGS_FIXED( + "-L$MAPS_NDK_PATCH/android.armeabi-v7a/lib" "-L$MAPKIT_SDK_RESOURCE_GLOBAL/mapkit_sdk/local/android.armeabi-v7a/lib" ) + CFLAGS( + GLOBAL "-I$MAPS_NDK_PATCH/android.armeabi-v7a/include" + ) ELSEIF (ARCH_ARM64) LDFLAGS_FIXED( + "-L$MAPS_NDK_PATCH/android.arm64-v8a/lib" "-L$MAPKIT_SDK_RESOURCE_GLOBAL/mapkit_sdk/local/android.arm64-v8a/lib" ) + CFLAGS( + GLOBAL "-I$MAPS_NDK_PATCH/android.arm64-v8a/include" + ) ELSEIF(ARCH_I386) LDFLAGS_FIXED( + "-L$MAPS_NDK_PATCH/android.x86/lib" "-L$MAPKIT_SDK_RESOURCE_GLOBAL/mapkit_sdk/local/android.x86/lib" ) + CFLAGS( + GLOBAL "-I$MAPS_NDK_PATCH/android.x86/include" + ) ELSEIF (ARCH_X86_64) LDFLAGS_FIXED( + "-L$MAPS_NDK_PATCH/android.x86_64/lib" "-L$MAPKIT_SDK_RESOURCE_GLOBAL/mapkit_sdk/local/android.x86_64/lib" ) + CFLAGS( + GLOBAL "-I$MAPS_NDK_PATCH/android.x86_64/include" + ) ELSE() MESSAGE(FATAL_ERROR Unsupported platform) ENDIF()
Fix checks for AVX512 and atomics
@@ -254,7 +254,7 @@ if (($architecture eq "x86") || ($architecture eq "x86_64")) { # $tmpf = new File::Temp( UNLINK => 1 ); ($fh,$tmpf) = tempfile( SUFFIX => '.c' , UNLINK => 1 ); $code = '"vbroadcastss -4 * 4(%rsi), %zmm2"'; - print $tmpf "#include <immintrin.h>\n\nint main(void){ __asm__ volatile($code); }\n"; + print $fh "#include <immintrin.h>\n\nint main(void){ __asm__ volatile($code); }\n"; $args = " -march=skylake-avx512 -c -o $tmpf.o $tmpf"; if ($compiler eq "PGI") { $args = " -tp skylake -c -o $tmpf.o $tmpf"; @@ -278,7 +278,7 @@ if ($data =~ /HAVE_C11/) { $c11_atomics = 0; } else { ($fh,$tmpf) = tempfile( SUFFIX => '.c' , UNLINK => 1 ); - print $tmpf "#include <stdatomic.h>\nint main(void){}\n"; + print $fh "#include <stdatomic.h>\nint main(void){}\n"; $args = " -c -o $tmpf.o $tmpf"; my @cmd = ("$compiler_name $flags $args >/dev/null 2>/dev/null"); system(@cmd) == 0;
frame: ack-range also needs to be boundary checked
@@ -93,11 +93,12 @@ int quicly_decode_ack_frame(const uint8_t **src, const uint8_t *end, quicly_ack_ tmp += 1; if (frame->smallest_acknowledged < tmp) goto Error; + if (i < QUICLY_MAX_ACK_RANGE_COUNT) { frame->ack_block_lengths[i + 1] = tmp; frame->smallest_acknowledged -= tmp; - if (i < QUICLY_MAX_ACK_RANGE_COUNT) frame->gaps[i] = curr_gap; } + } if (is_ack_ecn) { /* just skip ECT(0), ECT(1), ECT-CE counters for the time being */
Add cpu identification via mfpvr call for the BSDs fixes
@@ -142,6 +142,52 @@ int detect(void){ return CPUTYPE_PPC970; #endif + +#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) +int id; +id = __asm __volatile("mfpvr %0" : "=r"(id)); +switch ( id >> 16 ) { + case 0x4e: // POWER9 + return return CPUTYPE_POWER8; + break; + case 0x4d: + case 0x4b: // POWER8/8E + return CPUTYPE_POWER8; + break; + case 0x4a: + case 0x3f: // POWER7/7E + return CPUTYPE_POWER6; + break; + case 0x3e: + return CPUTYPE_POWER6; + break; + case 0x3a: + return CPUTYPE_POWER5; + break; + case 0x35: + case 0x38: // POWER4 /4+ + return CPUTYPE_POWER4; + break; + case 0x40: + case 0x41: // POWER3 /3+ + return CPUTYPE_POWER3; + break; + case 0x39: + case 0x3c: + case 0x44: + case 0x45: + return CPUTYPE_PPC970; + break; + case 0x70: + return CPUTYPE_CELL; + break; + case 0x8003: + return CPUTYPE_PPCG4; + break; + default: + return CPUTYPE_UNKNOWN; + } +#endif } void get_architecture(void){
YAMBi: Use same style of text for start and end
@@ -405,7 +405,7 @@ void Lexer::scanValue () if (addIndentation (start.column, Level::Type::MAP)) { location.begin = start; - tokens.insert (tokens.begin () + offset, Symbol (token::MAP_START, location, "MAPPING START")); + tokens.insert (tokens.begin () + offset, Symbol (token::MAP_START, location, "MAP START")); } }
Added initial BSD for simulator
#include <survive.h> #include <survive_reproject.h> -STATIC_CONFIG_ITEM(Simulator_DRIVER_ENABLE, "use-simulator", 'i', "Load a Simulator driver for testing.", 0); +STATIC_CONFIG_ITEM(Simulator_DRIVER_ENABLE, "simulator", 'i', "Load a Simulator driver for testing.", 0); STATIC_CONFIG_ITEM(Simulator_TIME, "simulator-time", 'f', "Seconds to run simulator for.", 0.0); struct SurviveDriverSimulator { @@ -201,6 +201,19 @@ void str_append(char **pString, const char *str) { strcat(*pString, str); } +const BaseStationData simulated_bsd[2] = { + { + .PositionSet = 1, + .BaseStationID = 0, + .Pose = {.Pos = {-3, 0, 1},.Rot = { -0.70710678118, 0, 0.70710678118, 0 } } + }, + { + .PositionSet = 1, + .BaseStationID = 1, + .Pose = { .Pos = { 3, 0, 1 }, .Rot = { 0.70710678118, 0, 0.70710678118, 0 } } + }, +}; + int DriverRegSimulator(SurviveContext *ctx) { SurviveDriverSimulator *sp = calloc(1, sizeof(SurviveDriverSimulator)); sp->ctx = ctx; @@ -243,6 +256,13 @@ int DriverRegSimulator(SurviveContext *ctx) { FLT r = .1; srand(42); + + for (int i = 0; i < ctx->activeLighthouses; i++) { + if (!ctx->bsd[i].PositionSet) { + memcpy(ctx->bsd + i, simulated_bsd + i, sizeof(simulated_bsd[i])); + } + } + for (int i = 0; i < device->sensor_ct; i++) { FLT azi = rand(); FLT pol = rand();
Fixed common hal_gpio for stm32f1
@@ -531,7 +531,9 @@ int hal_gpio_init_out(int pin, int val) #else cfg.Speed = GPIO_SPEED_FREQ_HIGH; #endif +#if !MYNEWT_VAL(MCU_STM32F1) cfg.Alternate = 0; +#endif /* Initialize pin as an output, setting proper mode */ HAL_GPIO_Init(portmap[port], &cfg); @@ -554,6 +556,7 @@ hal_gpio_init_af(int pin, uint8_t af_type, enum hal_gpio_pull pull, uint8_t od) } else { gpio.Mode = GPIO_MODE_AF_OD; } + gpio.Pull = pull; #if (defined GPIO_SPEED_FREQ_VERY_HIGH) gpio.Speed = GPIO_SPEED_FREQ_VERY_HIGH; #elif (defined GPIO_SPEED_HIGH) @@ -561,8 +564,9 @@ hal_gpio_init_af(int pin, uint8_t af_type, enum hal_gpio_pull pull, uint8_t od) #else gpio.Speed = GPIO_SPEED_FREQ_HIGH; #endif - gpio.Pull = pull; +#if !MYNEWT_VAL(MCU_STM32F1) gpio.Alternate = af_type; +#endif return hal_gpio_init_stm(pin, &gpio); }
Update pub/sub include location
@@ -28,7 +28,7 @@ sleep 10 while `websocket-bench broadcast ws://127.0.0.1:3000/ --concurrent 10 \ */ #define WEBSOCKET_SHOOTOUT_PUBSUB_H -#include "unused/pubsub.h" // includes the "http.h" header +#include "pubsub.h" // includes the "http.h" header #include "websockets.h" // includes the "http.h" header #include <errno.h>
Remove duplicate docs link
@@ -17,7 +17,6 @@ LVGL provides everything you need to create embedded GUI with easy-to-use graphi <a href="https://lvgl.io/demos">Live demo</a> &middot; <a href="https://docs.lvgl.io/">Docs</a> &middot; <a href="https://forum.lvgl.io">Forum</a> &middot; -<a href="https://docs.lvgl.io/">Docs</a> &middot; <a href="https://blog.lvgl.io/">Blog</a> </h4>
Add dependency for metrics
@@ -81,9 +81,9 @@ import: version: 1b2967e3c290b7c545b3db0deeda16e9be4f98a2 - package: github.com/davecgh/go-spew version: ~1.1.0 -- package: github.com/zalando/go-keyring - package: github.com/mattn/go-colorable version: ~0.0.9 +- package: github.com/rcrowley/go-metrics testImport: - package: github.com/stretchr/testify version: ~1.2.2
Call SetMBounds on non-expr statements
@@ -218,11 +218,15 @@ func (q *checker) bcheckStatement(n *a.Node) error { switch n.Kind() { case a.KAssert: - return q.bcheckAssert(n.AsAssert()) + if err := q.bcheckAssert(n.AsAssert()); err != nil { + return err + } case a.KAssign: n := n.AsAssign() - return q.bcheckAssignment(n.LHS(), n.Operator(), n.RHS()) + if err := q.bcheckAssignment(n.LHS(), n.Operator(), n.RHS()); err != nil { + return err + } case a.KExpr: n := n.AsExpr() @@ -234,7 +238,6 @@ func (q *checker) bcheckStatement(n *a.Node) error { return err } } - return nil case a.KIOBind: n := n.AsIOBind() @@ -242,10 +245,11 @@ func (q *checker) bcheckStatement(n *a.Node) error { return err } // TODO: invalidate any facts regarding the io_bind expressions. - return nil case a.KIf: - return q.bcheckIf(n.AsIf()) + if err := q.bcheckIf(n.AsIf()); err != nil { + return err + } case a.KIterate: n := n.AsIterate() @@ -273,7 +277,6 @@ func (q *checker) bcheckStatement(n *a.Node) error { } q.facts = q.facts[:0] - return nil case a.KJump: n := n.AsJump() @@ -290,21 +293,27 @@ func (q *checker) bcheckStatement(n *a.Node) error { } } q.facts = q.facts[:0] - return nil case a.KRet: // TODO. case a.KVar: - return q.bcheckVar(n.AsVar(), false) + if err := q.bcheckVar(n.AsVar(), false); err != nil { + return err + } case a.KWhile: - return q.bcheckWhile(n.AsWhile()) + if err := q.bcheckWhile(n.AsWhile()); err != nil { + return err + } default: return fmt.Errorf("check: unrecognized ast.Kind (%s) for bcheckStatement", n.Kind()) } + if b := n.MBounds(); b[0] == nil { + n.SetMBounds(a.Bounds{zero, zero}) + } return nil }
docs - correct gpinitsystem use of HBA_HOSTNAME parameter.
@@ -486,15 +486,14 @@ declare -a MIRROR_ARRAY=( </plentry> <plentry> <pt>HBA_HOSTNAMES</pt> - <pd><b>Optional.</b> This parameter controls whether Greenplum Database - utilities use IP addresses or host names in the <codeph>pg_hba.conf</codeph> - file when updating the file with addresses that can connect to Greenplum - Database. The default value is <codeph>0</codeph>, the utilities use IP - addresses when updating the file. When initializing a Greenplum Database - system, specify <codeph>HBA_HOSTNAMES=1</codeph> to have the utilities use - host names in the <codeph>pg_hba.conf</codeph> file. This option is set when - the system is initialized and cannot be changed after the system is - initialized. </pd> + <pd><b>Optional.</b> This parameter controls whether + <codeph>gpinitsystem</codeph> uses IP addresses or host names in the + <codeph>pg_hba.conf</codeph> file when updating the file with addresses + that can connect to Greenplum Database. The default value is + <codeph>0</codeph>, the utility use IP addresses when updating the file. + When initializing a Greenplum Database system, specify + <codeph>HBA_HOSTNAMES=1</codeph> to have the utility use host names in + the <codeph>pg_hba.conf</codeph> file. </pd> <pd>For information about how Greenplum Database resolves host names in the <codeph>pg_hba.conf</codeph> file, see <xref href="../../admin_guide/client_auth.xml#topic1"/>.</pd>
t: use something other than 1 for an ack range
@@ -94,26 +94,27 @@ static void test_ack_decode(void) const uint8_t *src = pat; int i, range_sum; quicly_ack_frame_t decoded; - pos = quicly_encodev(pos, 0x84D0); + pos = quicly_encodev(pos, 0xFA00); pos = quicly_encodev(pos, 0); pos = quicly_encodev(pos, QUICLY_ACK_MAX_GAPS + 1); pos = quicly_encodev(pos, 8); for (i = 0; i <= QUICLY_ACK_MAX_GAPS ; ++i) { pos = quicly_encodev(pos, i); // gap - pos = quicly_encodev(pos, 1); // ack-range + pos = quicly_encodev(pos, i % 10); // ack-range } + ok(quicly_decode_ack_frame(&src, pos, &decoded, 0) == 0); - ok(decoded.largest_acknowledged == 0x84D0); + ok(decoded.largest_acknowledged == 0xFA00); ok(decoded.ack_delay == 0); ok(decoded.num_gaps == QUICLY_ACK_MAX_GAPS); ok(decoded.ack_block_lengths[0] == 8 + 1); // first ack-range range_sum = decoded.ack_block_lengths[0]; for (i = 0; i < QUICLY_ACK_MAX_GAPS; ++i) { ok(decoded.gaps[i] == i + 1); - ok(decoded.ack_block_lengths[i + 1] == 1 + 1); + ok(decoded.ack_block_lengths[i + 1] == (i % 10) + 1); range_sum += decoded.gaps[i] + decoded.ack_block_lengths[i + 1]; } - ok(decoded.smallest_acknowledged == 0x84D0 - range_sum + 1); + ok(decoded.smallest_acknowledged == 0xFA00 - range_sum + 1); } subtest("underflow", test_ack_decode_underflow);
BUMP pymongocrypt 1.4.2.dev0
# See the License for the specific language governing permissions and # limitations under the License. -__version__ = '1.4.1' +__version__ = '1.4.2.dev0' _MIN_LIBMONGOCRYPT_VERSION = '1.5.2'
Try to open normal ports 80/8080/443/etc before just opening some fucking port.
@@ -263,7 +263,7 @@ drivers inst who isFake plan shutdownSTM termSys stderr = where (behnBorn, runBehn) = behn inst plan (amesBorn, runAmes) = ames inst who isFake plan stderr - (httpBorn, runHttp) = serv inst plan + (httpBorn, runHttp) = serv inst plan isFake (clayBorn, runClay) = clay inst plan (irisBorn, runIris) = client inst plan (termBorn, runTerm) = Term.term termSys shutdownSTM inst plan
Don't force static crypto dependency in case of a static build
@@ -59,7 +59,11 @@ else() if (BUILD_SHARED_LIBS) set(crypto_LIBRARY ${crypto_SHARED_LIBRARY}) else() + if (crypto_STATIC_LIBRARY) set(crypto_LIBRARY ${crypto_STATIC_LIBRARY}) + else() + set(crypto_LIBRARY ${crypto_SHARED_LIBRARY}) + endif() endif() endif()
Multicast example: exclude nrf52dk, which does not support routing
CONTIKI_PROJECT = root intermediate sink all: $(CONTIKI_PROJECT) +# nrf52dk only supports slave mode, i.e., with no routing +PLATFORMS_EXCLUDE = nrf52dk + CONTIKI = ../.. MODULES += os/net/ipv6/multicast
[rt-smart] fix warnings
@@ -4206,7 +4206,6 @@ int sys_fsync(int fd) mqd_t sys_mq_open(const char *name, int flags, mode_t mode, struct mq_attr *attr) { - int ret = 0; mqd_t mqdes; #ifdef ARCH_MM_MMU char *kname = RT_NULL; @@ -4216,13 +4215,13 @@ mqd_t sys_mq_open(const char *name, int flags, mode_t mode, struct mq_attr *attr lwp_user_strlen(name, &a_err); if (a_err) - return -EFAULT; + return (mqd_t)-EFAULT; len = rt_strlen(name); if (!len) - return -EINVAL; + return (mqd_t)-EINVAL; kname = (char *)kmem_get(len + 1); if (!kname) - return -ENOMEM; + return (mqd_t)-ENOMEM; lwp_get_from_user(&attr_k, (void *)attr, sizeof(struct mq_attr)); lwp_get_from_user(kname, (void *)name, len + 1); @@ -4296,7 +4295,6 @@ int sys_mq_timedreceive(mqd_t mqd, char *restrict msg, size_t len, unsigned *res #ifdef ARCH_MM_MMU char *restrict kmsg = RT_NULL; int a_err = 0; - rt_size_t prio_len = 0; struct timespec at_k;
avf: fix feature arc, take two
@@ -263,11 +263,12 @@ avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (PREDICT_FALSE (ad->per_interface_next_index != ~0)) next_index = ad->per_interface_next_index; - vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next); if (PREDICT_FALSE (vnet_device_input_have_features (ad->sw_if_index))) vnet_feature_start_device_input_x1 (ad->sw_if_index, &next_index, bt); + vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next); + /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and copy needed data from descriptor to rx vector */ bi = to_next;
nucleo-dartmonkey: Build the same tests as dartmonkey This would enable us to test dartmonkey's on-device tests on nucleo-h743zi. BRANCH=none TEST=make -j buildall
@@ -14,12 +14,17 @@ test-list-y=\ aes \ compile_time_macros \ crc \ + flash_physical \ + flash_write_protect \ fpsensor \ + mpu \ mutex \ pingpong \ - rsa \ + rollback \ + rollback_entropy \ rsa3 \ rtc \ + scratchpad \ sha256 \ sha256_unrolled \ stm32f_rtc \
Participants: unify sig handling
@@ -61,11 +61,10 @@ function getParticipants(cs: Contacts, group: Group) { f.toPairs, f.map(([patp, c]: [string, Contact]) => ({ ...c, - patp, + patp: patp.slice(1), pending: false })) )(cs); - console.log(contacts); const members: Participant[] = _.map( Array.from(group.members) .filter(e => group?.policy?.invite?.pending ? !group.policy.invite.pending.has(e) : true), m =>
filter_lua: load scripts using relative path
#include "lua_config.h" -#include <fcntl.h> +#include <sys/types.h> +#include <sys/stat.h> #include <unistd.h> +#include <fcntl.h> struct lua_filter *lua_config_create(struct flb_filter_instance *ins, struct flb_config *config) @@ -35,7 +37,10 @@ struct lua_filter *lua_config_create(struct flb_filter_instance *ins, int ret; char *tmp; char *tmp_key; + char buf[1024]; + char *script = NULL; (void) config; + struct stat st; struct lua_filter *lf; struct mk_list *split = NULL; struct mk_list *head = NULL; @@ -60,15 +65,33 @@ struct lua_filter *lua_config_create(struct flb_filter_instance *ins, return NULL; } - /* Validate path */ - ret = access(tmp, R_OK); - if (ret == -1) { + /* Compose path */ + ret = stat(tmp, &st); + if (ret == -1 && errno == ENOENT) { + if (tmp[0] == '/') { flb_error("[filter_lua] cannot access script '%s'", tmp); flb_free(lf); return NULL; } - lf->script = flb_sds_create(tmp); + if (config->conf_path) { + snprintf(buf, PATH_MAX, "%s%s", config->conf_path, tmp); + script = buf; + } + } + else { + script = tmp; + } + + /* Validate script path */ + ret = access(script, R_OK); + if (ret == -1) { + flb_error("[filter_lua] cannot access script '%s'", script); + flb_free(lf); + return NULL; + } + + lf->script = flb_sds_create(script); if (!lf->script) { flb_error("[filter_lua] could not allocate string"); flb_free(lf);
VTL: Makefile: Don't install pip twice. Pip is installed via virtualenv. No need to reinstall it again.
@@ -98,7 +98,6 @@ $(GET_PIP_SCRIPT): $(PIP_INSTALL_DONE): $(GET_PIP_SCRIPT) @virtualenv $(VENV_PATH) -p $(PYTHON_INTERP) - @bash -c "source $(VENV_PATH)/bin/activate && $(PYTHON_INTERP) $(GET_PIP_SCRIPT)" @bash -c "source $(VENV_PATH)/bin/activate && $(PYTHON_INTERP) -m pip install $(PYTHON_DEPENDS)" @touch $@
travis: see if apt-get update makes lz4 found
@@ -21,7 +21,8 @@ matrix: script: - | if [ $TRAVIS_OS_NAME = linux ]; then - sudo apt-get install liblz4 liblz4-dev + sudo apt-get -qq update + sudo apt-get install -y liblz4 liblz4-dev else brew install lz4 fi
tweak Changelong item
@@ -19,7 +19,7 @@ Version 1.3.6 (07 November 2018) [General] - * updated RPM packaging defaults to include an ohpc version designation in release string + * updated RPM package naming to include an ohpc version designation in release string (https://github.com/openhpc/ohpc/issues/744) * patch MPICH to allow use of HYDRA_BINDING environment variable for setting processor binding settings (https://github.com/openhpc/ohpc/issues/750)
util/mkdef.pl: Add UNIX as a platform This allows us to guard Unix specific functions with #ifndef / #ifdef OPENSSL_SYS_UNIX
@@ -68,6 +68,7 @@ my $do_checkexist = 0; my $VMS=0; my $W32=0; my $NT=0; +my $UNIX=0; my $linux=0; # Set this to make typesafe STACK definitions appear in DEF my $safe_stack_def = 0; @@ -75,7 +76,7 @@ my $safe_stack_def = 0; my @known_platforms = ( "__FreeBSD__", "PERL5", "EXPORT_VAR_AS_FUNCTION", "ZLIB", "_WIN32" ); -my @known_ossl_platforms = ( "VMS", "WIN32", "WINNT", "OS2" ); +my @known_ossl_platforms = ( "UNIX", "VMS", "WIN32", "WINNT", "OS2" ); my @known_algorithms = ( "RC2", "RC4", "RC5", "IDEA", "DES", "BF", "CAST", "MD2", "MD4", "MD5", "SHA", "SHA0", "SHA1", "SHA256", "SHA512", "RMD160", @@ -165,6 +166,7 @@ foreach (@ARGV, split(/ /, $config{options})) } if ($_ eq "linux") { $linux=1; + $UNIX=1; } $VMS=1 if $_ eq "VMS"; if ($_ eq "zlib" || $_ eq "enable-zlib" || $_ eq "zlib-dynamic" @@ -1064,6 +1066,7 @@ sub is_valid if ($platforms) { # platforms + if ($keyword eq "UNIX" && $UNIX) { return 1; } if ($keyword eq "VMS" && $VMS) { return 1; } if ($keyword eq "WIN32" && $W32) { return 1; } if ($keyword eq "_WIN32" && $W32) { return 1; }
generate-fwts-olog: add support for parsing prerror()
@@ -41,10 +41,10 @@ def create_parser(): re.DOTALL) # Match the following prlog() call - log_call = (Literal("prlog") + - Literal('(').suppress() + + log_call = (((Literal("prerror") + Literal('(').suppress()) | + (Literal("prlog") + Literal('(').suppress() + Word(string.letters + string.digits + '_') + - Literal(',').suppress() + + Literal(',').suppress())) + Combine(OneOrMore(QuotedString('"')), adjacent=False) + (Literal(')') | Literal(',')).suppress() ) @@ -167,11 +167,15 @@ def parse_patterns(parser, fname, tag): i = 1 for result in parser.scanString(data): (token, loc, _) = result + if token[1] == 'prlog': (annotations, logfn, level, msg) = token + else: + (annotations, logfn, msg) = token + level = 'PR_ERR' loc = (fname, lineno(loc, data)) - if logfn != 'prlog': + if logfn != 'prlog' and logfn != 'prerror': warn(loc, "unknown log output function '%s'" % logfn) compare_mode, pattern_str = message_to_pattern(loc, msg)
Testing: shuffle and deflate only available for
@@ -100,8 +100,10 @@ fi echo "Test shuffle and deflate ..." # --------------------------------- +if [ $have_netcdf4 -eq 1 ]; then input=${data_dir}/sst_globus0083.grib ${tools_dir}/grib_to_netcdf -s -d9 -k4 -o $tempNetcdf $input +fi echo "Test ECC-1060 ..." # ----------------------
Add HLS Action decoding for verbose mode.
@@ -247,15 +247,11 @@ static int snap_m_init(void *handle) VERBOSE1(" %d 0x%8.8x ", (int)(reg >> 32ll), atype); switch (atype) { - case 0x10140000: - VERBOSE1("IBM Sample Code\n"); - break; - case 0x10141000: - VERBOSE1("HLS Demo Memcopy\n"); - break; - case 0x10141001: - VERBOSE1("HLS Code 2\n"); - break; + case 0x10140000: VERBOSE1("IBM Sample Code\n"); break; + case 0x10141000: VERBOSE1("HLS Demo Memcopy\n"); break; + case 0x10141001: VERBOSE1("HLS sponge\n"); break; + case 0x10141002: VERBOSE1("HLS XXXX\n"); break; + case 0x10141003: VERBOSE1("HLS test search\n"); break; default: VERBOSE1("UNKNOWN Code.....\n"); break;
Missing a newline('\n')
@@ -31,7 +31,7 @@ const char argp_program_doc[] = "USAGE: readahead [--help] [-d DURATION]\n" "\n" "EXAMPLES:\n" -" readahead # summarize on-CPU time as a histogram" +" readahead # summarize on-CPU time as a histogram\n" " readahead -d 10 # trace for 10 seconds only\n"; static const struct argp_option opts[] = {
behn: ignore duplicate %wait requests
:: ?~ timers ~[t] + :: ignore duplicates + :: + ?: =(t i.timers) + timers :: timers at the same date form a fifo queue :: ?: (lth date.t date.i.timers)
Update TP libs for my machine
## -## Setup VISITHOME & VISITARCH variables. +## Setup VISITHOME variables. ## -SET(VISITHOME /home/biagas2/visit/thirdparty/3.0.0) +SET(VISITHOME /home/biagas2/visit/thirdparty/2.13.0) ## Compiler flags. ## @@ -70,7 +70,7 @@ VISIT_OPTION_DEFAULT(VISIT_QWT_DIR ${VISITHOME}/qwt/6.1.2) ## ## BOOST ## -VISIT_OPTION_DEFAULT(VISIT_BOOST_DIR /home/biagas2/visit/boost_minimal_headers/1.57.0) +VISIT_OPTION_DEFAULT(VISIT_BOOST_DIR /home/biagas2/visit/boost_minimal_headers/1.60.0) ## ## MPICH @@ -125,10 +125,17 @@ VISIT_OPTION_DEFAULT(VISIT_HDF5_LIBDEP ${VISIT_SZIP_DIR}/lib sz ${VISIT_ZLIB_DIR VISIT_OPTION_DEFAULT(VISIT_CGNS_DIR ${VISITHOME}/cgns/3.2.1) VISIT_OPTION_DEFAULT(VISIT_CGNS_LIBDEP HDF5_LIBRARY_DIR hdf5 ${VISIT_HDF5_LIBDEP} TYPE STRING) +## +## Conduit +## +VISIT_OPTION_DEFAULT(VISIT_CONDUIT_DIR ${VISITHOME}/conduit/0.2.1) +VISIT_OPTION_DEFAULT(VISIT_CONDUIT_LIBDEP HDF5_LIBRARY_DIR hdf5 ${VISIT_HDF5_LIBDEP} TYPE STRING) + ## ## FastBit ## -VISIT_OPTION_DEFAULT(VISIT_FASTBIT_DIR ${VISITHOME}/fastbit/1.2.0) +SETUP_APP_VERSION(FASTBIT 2.0.3) +VISIT_OPTION_DEFAULT(VISIT_FASTBIT_DIR ${VISITHOME}/fastbit/${FASTBIT_VERSION}) ## ## GDAL @@ -148,15 +155,21 @@ VISIT_OPTION_DEFAULT(VISIT_HDF4_DIR ${VISITHOME}/hdf4/4.2.5) VISIT_OPTION_DEFAULT(VISIT_HDF4_LIBDEP ${VISIT_SZIP_DIR}/lib sz ${VISIT_VTK_DIR}/lib vtkjpeg-${VTK_MAJOR_VERSION}.${VTK_MINOR_VERSION} TYPE STRING) ## -## NetCDF +## MFEM ## -VISIT_OPTION_DEFAULT(VISIT_NETCDF_DIR ${VISITHOME}/netcdf/4.1.1) -VISIT_OPTION_DEFAULT(VISIT_NETCDF_LIBDEP HDF5_LIBRARY_DIR hdf5_hl HDF5_LIBRARY_DIR hdf5 ${VISIT_HDF5_LIBDEP} TYPE STRING) +VISIT_OPTION_DEFAULT(VISIT_MFEM_DIR ${VISITHOME}/mfem/3.3) ## -## MFEM +## Mili ## -VISIT_OPTION_DEFAULT(VISIT_MFEM_DIR ${VISITHOME}/mfem/3.1) +VISIT_OPTION_DEFAULT(VISIT_MILI_DIR ${VISITHOME}/mili/15.1) +## + +## +## NetCDF +## +VISIT_OPTION_DEFAULT(VISIT_NETCDF_DIR ${VISITHOME}/netcdf/4.1.1) +VISIT_OPTION_DEFAULT(VISIT_NETCDF_LIBDEP HDF5_LIBRARY_DIR hdf5_hl HDF5_LIBRARY_DIR hdf5 ${VISIT_HDF5_LIBDEP} TYPE STRING) ## ## Silo
Fix: swap pairs for inverted cv as well.
@@ -95,6 +95,7 @@ void BuildCvPools( if (reverseCv) { learnPool->Docs.Swap(testPool->Docs); + learnPool->Pairs.swap(testPool->Pairs); } MATRIXNET_INFO_LOG << "Learn docs: " << learnPool->Docs.GetDocCount() << ", test docs: " << testPool->Docs.GetDocCount() << Endl;
Treat label=0 as an invalid next-hop-via-label
@@ -794,7 +794,13 @@ add_del_route_t_handler (u8 is_multipath, fib_route_path_t *paths = NULL; fib_entry_flag_t entry_flags = FIB_ENTRY_FLAG_NONE; - if (MPLS_LABEL_INVALID != next_hop_via_label) + /* + * the special INVALID label meams we are not recursing via a + * label. Exp-null value is never a valid via-label so that + * also means it's not a via-label and means clients that set + * it to 0 by default get the expected behaviour + */ + if ((MPLS_LABEL_INVALID != next_hop_via_label) && (0 != next_hop_via_label)) { path.frp_proto = DPO_PROTO_MPLS; path.frp_local_label = next_hop_via_label;
blk_hdr.dat Fix
@@ -3989,12 +3989,15 @@ bool CBlock::SetBestChain(CTxDB& txdb, CBlockIndex* pindexNew) uint256 nBestBlockTrust = pindexBest->nHeight != 0 ? (pindexBest->nChainTrust - pindexBest->pprev->nChainTrust) : pindexBest->nChainTrust; + if(fDebugChain) + { printf("SetBestChain: new best=%s height=%d tx=%lu trust=%s blocktrust=%" PRId64" date=%s\n", hashBestChain.ToString().substr(0,20).c_str(), nBestHeight, (unsigned long)pindexBest->nChainTx, CBigNum(nBestChainTrust).ToString().c_str(), nBestBlockTrust.Get64(), DateTimeStrFormat("%x %H:%M:%S", pindexBest->GetBlockTime()).c_str()); + } // Check the version of the last 100 blocks to see if we need to upgrade: if (!fIsInitialDownload) @@ -5047,9 +5050,9 @@ bool CheckDiskSpace(uint64_t nAdditionalBytes) static unsigned int nCurrentBlockFile = 1; static unsigned int nCurrentBlockThinFile = 1; -static filesystem::path BlockFilePath(unsigned int nFile) +static filesystem::path BlockFilePath(bool fHeaderFile, unsigned int nFile) { - string strBlockFn = strprintf("blk%04u.dat", nFile); + string strBlockFn = strprintf(fHeaderFile ? "blk_hdr%04u.dat": "blk%04u.dat", nFile); return GetDataDir() / strBlockFn; } @@ -5160,10 +5163,10 @@ bool LoadBlockIndex(bool fAllowNew) { block.nNonce = 13278; } - if (false && (block.GetHash() != hashGenesisBlock)) { // This will figure out a valid hash and Nonce if you're // creating a different genesis block: + if (false && (block.GetHash() != hashGenesisBlock)) { uint256 hashTarget = CBigNum().SetCompact(block.nBits).getuint256(); while (block.GetHash() > hashTarget) {
build(cmake) add support for demos
@@ -13,11 +13,14 @@ get_filename_component(LV_CONF_DIR ${LV_CONF_PATH} DIRECTORY) file(GLOB_RECURSE SOURCES ${LVGL_ROOT_DIR}/src/*.c) file(GLOB_RECURSE EXAMPLE_SOURCES ${LVGL_ROOT_DIR}/examples/*.c) +file(GLOB_RECURSE DEMO_SOURCES ${LVGL_ROOT_DIR}/demos/*.c) add_library(lvgl STATIC ${SOURCES}) add_library(lvgl::lvgl ALIAS lvgl) add_library(lvgl_examples STATIC ${EXAMPLE_SOURCES}) add_library(lvgl::examples ALIAS lvgl_examples) +add_library(lvgl_demos STATIC ${DEMO_SOURCES}) +add_library(lvgl::demos ALIAS lvgl_demos) target_compile_definitions( lvgl PUBLIC $<$<BOOL:${LV_LVGL_H_INCLUDE_SIMPLE}>:LV_LVGL_H_INCLUDE_SIMPLE> @@ -29,8 +32,11 @@ target_include_directories(lvgl SYSTEM PUBLIC ${LVGL_ROOT_DIR} ${LV_CONF_DIR}) # Include /examples folder target_include_directories(lvgl_examples SYSTEM PUBLIC ${LVGL_ROOT_DIR}/examples) +target_include_directories(lvgl_demos SYSTEM + PUBLIC ${LVGL_ROOT_DIR}/demos) target_link_libraries(lvgl_examples PUBLIC lvgl) +target_link_libraries(lvgl_demos PUBLIC lvgl) # Lbrary and headers can be installed to system using make install file(GLOB LVGL_PUBLIC_HEADERS "${CMAKE_SOURCE_DIR}/lv_conf.h"
New confidentiality and integrity limits
#include <ngtcp2/ngtcp2_crypto.h> /* Maximum key usage (encryption) limits */ -#define NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_GCM (33554432ULL) +#define NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_GCM (1ULL << 23) #define NGTCP2_CRYPTO_MAX_ENCRYPTION_CHACHA20_POLY1305 (1ULL << 62) -#define NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_CCM (118632831ULL) +#define NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_CCM (2965820ULL) /* Maximum authentication failure (decryption) limits during the lifetime of a connection. */ -#define NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_GCM (1ULL << 54) +#define NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_GCM (1ULL << 52) #define NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_CHACHA20_POLY1305 (1ULL << 36) -#define NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_CCM (11863283ULL) +#define NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_CCM (2965820ULL) /** * @function
nct38xx: Use common tcpci set_polarity BRANCH=none TEST=verify TCPCI is still functional
@@ -201,20 +201,6 @@ int tcpci_nct38xx_drp_toggle(int port) return tcpci_tcpc_drp_toggle(port); } -int tcpci_nct38xx_set_polarity(int port, int polarity) -{ - int rv, reg; - - rv = tcpc_read(port, TCPC_REG_TCPC_CTRL, &reg); - if (rv) - return rv; - - reg = polarity ? (reg | TCPC_REG_TCPC_CTRL_SET(1)) : - (reg & ~TCPC_REG_TCPC_CTRL_SET(1)); - - return tcpc_write(port, TCPC_REG_TCPC_CTRL, reg); -} - int tcpci_nct38xx_transmit(int port, enum tcpm_transmit_type type, uint16_t header, const uint32_t *data) { @@ -309,7 +295,7 @@ const struct tcpm_drv nct38xx_tcpm_drv = { #endif .select_rp_value = &tcpci_tcpm_select_rp_value, .set_cc = &tcpci_nct38xx_set_cc, - .set_polarity = &tcpci_nct38xx_set_polarity, + .set_polarity = &tcpci_tcpm_set_polarity, .set_vconn = &tcpci_tcpm_set_vconn, .set_msg_header = &tcpci_tcpm_set_msg_header, .set_rx_enable = &tcpci_tcpm_set_rx_enable,
[cmake] accept OCE 0.16
@@ -23,7 +23,7 @@ if(WITH_OCE) # OCE_ALL_FOUND - set to TRUE if all requested COMPONENTS are specified (see below), false otherwise # OCE_MISSING_TOOLKITS - when OCE_ALL_FOUND is FALSE, contains a list of missing toolkits # OCE_ALL_BUILT_MODULES - the list of source directories compiled (mostly useful when running swig to generate wrappers) - find_package(OCE 0.17 REQUIRED COMPONENTS ${OCE_TOOLKITS}) + find_package(OCE 0.16 REQUIRED COMPONENTS ${OCE_TOOLKITS}) if(OCE_ALL_FOUND) message(STATUS "OCE found.")
os/Kconfig : Add new configs for Common Binary implementation Add new configs which will be used by the common binary implementation.
@@ -270,6 +270,31 @@ config APPS_RAM_REGION_SHAREABLE shareable region in the MPU. If the system has multiple cores which might access the external applicaiton RAM region, then this config needs to be set to true. +config SUPPORT_COMMON_BINARY + bool "Support Common Binary" + default n + depends on FS_SMARTFS + ---help--- + This config indicates whether we support common binary. The common binary will + contain all the common libraries which will be used by the different loadable + applications. The common binary will be stored in the file system. + +if SUPPORT_COMMON_BINARY + +config COMMON_BINARY_PATH + string "Path where common binary is stored" + default "/mnt/" + ---help--- + Path where common binary is stored. + +config COMMON_BINARY_NAME + string "Name of common binary file" + default "tinyara_common_binary" + ---help--- + Filename of the common binary file. + +endif # SUPPORT_COMMON_BINARY + endif # APP_BINARY_SEPARATION
Configure: Reflect that We don't build loader_attic when dynamic-engine is disabled
@@ -618,6 +618,7 @@ my @disable_cascades = ( "module" => [ "fips", "dso" ], "engine" => [ "dynamic-engine", grep(/eng$/, @disablables) ], + "dynamic-engine" => [ "loadereng" ], "hw" => [ "padlockeng" ], # no-autoalginit is only useful when building non-shared
fix mytoken profile parsing in gen
@@ -10,7 +10,7 @@ int parseAndSetProfile(struct oidc_account* account, char* profile) { return 0; } cJSON* p = stringToJsonDontLogError(profile); - if (!cJSON_IsObject(p) || !cJSON_IsArray(p)) { + if (!(cJSON_IsObject(p) || cJSON_IsArray(p))) { secFreeJson(p); char* quotedProfile = oidc_sprintf("\"%s\"", profile); p = stringToJson(quotedProfile);
Update documentation for outbound-msg-retry
@@ -1454,6 +1454,11 @@ This can make ordinary queries complete (if repeatedly queried for), and enter the cache, whilst also mitigating the traffic flow by the factor given. .TP 5 +.B outbound\-msg\-retry: \fI<number> +The number of retries unbound will do in case of a non positive response is +received. If a forward nameserver is used, this is the number of retries per +forward nameserver in case of throwaway response. +.TP 5 .B fast\-server\-permil: \fI<number> Specify how many times out of 1000 to pick from the set of fastest servers. 0 turns the feature off. A value of 900 would pick from the fastest
Fixes os/execute should return non-zero on signals Behave more like shells, and catch segfaults.
@@ -401,7 +401,16 @@ static Janet os_execute(int32_t argc, Janet *argv) { } os_execute_cleanup(envp, child_argv); - return janet_wrap_integer(WEXITSTATUS(status)); + /* Use POSIX shell semantics for interpreting signals */ + int ret; + if (WIFEXITED(status)) { + ret = WEXITSTATUS(status); + } else if (WIFSTOPPED(status)) { + ret = WSTOPSIG(status) + 128; + } else { + ret = WTERMSIG(status) + 128; + } + return janet_wrap_integer(ret); #endif }
Print error message when PNG is not enabled
@@ -742,19 +742,31 @@ static int unpack_double_element_set(grib_accessor* a, const size_t* index_array #else -static int unpack_double(grib_accessor* a, double* val, size_t* len) +static void print_error_feature_not_enabled(grib_context* c) { - grib_context_log(a->context, GRIB_LOG_ERROR, + grib_context_log(c, GRIB_LOG_ERROR, "grib_accessor_data_png_packing: PNG support not enabled. " "Please rebuild with -DENABLE_PNG=ON"); - return GRIB_FUNCTIONALITY_NOT_ENABLED; } +static int unpack_double(grib_accessor* a, double* val, size_t* len) +{ + print_error_feature_not_enabled(a->context); + return GRIB_FUNCTIONALITY_NOT_ENABLED; +} static int pack_double(grib_accessor* a, const double* val, size_t* len) { - grib_context_log(a->context, GRIB_LOG_ERROR, - "grib_accessor_data_png_packing: PNG support not enabled. " - "Please rebuild with -DENABLE_PNG=ON"); + print_error_feature_not_enabled(a->context); + return GRIB_FUNCTIONALITY_NOT_ENABLED; +} +static int unpack_double_element(grib_accessor* a, size_t idx, double* val) +{ + print_error_feature_not_enabled(a->context); + return GRIB_FUNCTIONALITY_NOT_ENABLED; +} +static int unpack_double_element_set(grib_accessor* a, const size_t* index_array, size_t len, double* val_array) +{ + print_error_feature_not_enabled(a->context); return GRIB_FUNCTIONALITY_NOT_ENABLED; }
add PostgreSQL Apt Repository to Dockerfile
FROM ubuntu:bionic +RUN apt-get update && apt-get install -y --no-install-recommends \ + curl \ + ca-certificates \ + gnupg \ + lsb-release + +RUN curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \ + sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' + RUN apt-get update && apt-get install -y --no-install-recommends \ build-essential \ cmake \
codegen: fix infinite loop for wrong check/enum
@@ -292,6 +292,11 @@ kainjow::mustache::list EnumProcessor::getValues (const std::string & prefix, co std::set<std::pair<std::string, std::string>> stringValues; const auto end = key.getMeta<std::string> ("check/enum"); + if (ckdb::elektraArrayValidateBaseNameString (end.c_str ()) < 0) + { + throw CommandAbortException ("The key '" + key.getName () + "' has invalid check/enum metadata: " + end); + } + kdb::long_long_t i = 0; std::string cur = "#0"; while (cur <= end)
Fix pipeline failure commit '5f7cdc' didn't update the answer file expand_table.out
@@ -914,9 +914,9 @@ CREATE VIEW expand_view AS select * from expand_table1; CREATE rule "_RETURN" AS ON SELECT TO expand_table2 DO INSTEAD SELECT * FROM expand_table1; ALTER TABLE expand_table2 EXPAND TABLE; -ERROR: "expand_table2" is not a table or foreign table +ERROR: "expand_table2" is not a table, materialized view, or foreign table ALTER TABLE expand_view EXPAND TABLE; -ERROR: "expand_view" is not a table or foreign table +ERROR: "expand_view" is not a table, materialized view, or foreign table ALTER TABLE expand_table1 EXPAND TABLE; -- -- Test expanding a table with a domain type as distribution key.
status: fix the durations buffer capacity check
@@ -140,8 +140,8 @@ static h2o_iovec_t durations_status_final(void *priv, h2o_globalconf_t *gconf, h ret.len += sprintf(ret.base + ret.len, ",\n\"evloop-latency-nanosec\": ["); for(int i = 0; i < agg_stats->stats.evloop_latency_nanosec.size; i++) { size_t len = snprintf(NULL, 0, "%s%llu", delim, agg_stats->stats.evloop_latency_nanosec.entries[i]); - /* require that there's enough space for the closing array bracket */ - if (ret.len + len + 1 > BUFSIZE) + /* require that there's enough space for the closing "]\0" */ + if (ret.len + len + 1 >= BUFSIZE) break; ret.len += snprintf(ret.base + ret.len, BUFSIZE - ret.len, "%s%llu", delim, agg_stats->stats.evloop_latency_nanosec.entries[i]);
common/battery_v1.c: Format with clang-format BRANCH=none TEST=none
@@ -136,7 +136,8 @@ void update_dynamic_battery_info(void) * Don't report zero charge, as that has special meaning * to Chrome OS powerd. */ - if (curr->batt.remaining_capacity == 0 && !curr->batt_is_charging) + if (curr->batt.remaining_capacity == 0 && + !curr->batt_is_charging) *memmap_cap = 1; else *memmap_cap = curr->batt.remaining_capacity;
esp32/modsocket: Make read/write return None when in non-blocking mode.
@@ -383,13 +383,14 @@ STATIC mp_uint_t socket_stream_read(mp_obj_t self_in, void *buf, mp_uint_t size, // XXX Would be nicer to use RTC to handle timeouts for (int i=0; i<=sock->retries; i++) { MP_THREAD_GIL_EXIT(); - int x = lwip_recvfrom_r(sock->fd, buf, size, 0, NULL, NULL); + int r = lwip_recvfrom_r(sock->fd, buf, size, 0, NULL, NULL); MP_THREAD_GIL_ENTER(); - if (x >= 0) return x; - if (x < 0 && errno != EWOULDBLOCK) { *errcode = errno; return MP_STREAM_ERROR; } + if (r >= 0) return r; + if (r < 0 && errno != EWOULDBLOCK) { *errcode = errno; return MP_STREAM_ERROR; } check_for_exceptions(); } - return 0; // causes a timeout error to be raised. + *errcode = sock->retries == 0 ? MP_EWOULDBLOCK : MP_ETIMEDOUT; + return MP_STREAM_ERROR; } STATIC mp_uint_t socket_stream_write(mp_obj_t self_in, const void *buf, mp_uint_t size, int *errcode) { @@ -402,7 +403,8 @@ STATIC mp_uint_t socket_stream_write(mp_obj_t self_in, const void *buf, mp_uint_ if (r < 0 && errno != EWOULDBLOCK) { *errcode = errno; return MP_STREAM_ERROR; } check_for_exceptions(); } - return 0; + *errcode = sock->retries == 0 ? MP_EWOULDBLOCK : MP_ETIMEDOUT; + return MP_STREAM_ERROR; } STATIC mp_uint_t socket_stream_ioctl(mp_obj_t self_in, mp_uint_t request, uintptr_t arg, int *errcode) {
add extra check for compiler time minor change to add extra check for compiler time to prevent bad config
#define portTIMSK TIMSK0 #define portTIFR TIFR0 +#else + #error "No Timer defined for scheduler" #endif /*-----------------------------------------------------------*/
readme: Update static lib build instructions
@@ -176,10 +176,10 @@ with PUC Lua 5.3 you can run make with: If you want to build luv as a static library run make with: ``` -~/Code/luv> BUILD_MODULE=OFF make +~/Code/luv> BUILD_MODULE=OFF BUILD_STATIC_LIBS=ON make ``` -This will create a static library `libluv.a`. +This will create a static library `libluv_a.a`. #### Build as shared library
docs/fingerprint: Remove unnecessary BOARD env variable Since we're running the host tests (using the "host" board), we don't need to set the BOARD environment variable. BRANCH=none TEST=view in gitiles
@@ -98,19 +98,19 @@ prevent you from uploading. List available unit tests: ```bash -(chroot) ~/trunk/src/platform/ec $ make BOARD=nocturne_fp print-host-tests +(chroot) ~/trunk/src/platform/ec $ make print-host-tests ``` Build and run a specific unit test: ```bash -(chroot) ~/trunk/src/platform/ec $ make BOARD=nocturne_fp run-fpsensor +(chroot) ~/trunk/src/platform/ec $ make run-fpsensor ``` Build and run all unit tests: ```bash -(chroot) ~/trunk/src/platform/ec $ make BOARD=nocturne_fp runhosttests -j +(chroot) ~/trunk/src/platform/ec $ make runhosttests -j ``` ## Build ectool @@ -122,7 +122,7 @@ Build and run all unit tests: ## Build and run the `host_command` fuzz test ```bash -(chroot) ~/trunk/src/platform/ec $ make BOARD=nocturne_fp run-host_command_fuzz +(chroot) ~/trunk/src/platform/ec $ make run-host_command_fuzz ``` ## Logs
Add support for common font formats to pappl-makeresheader.
@@ -16,7 +16,7 @@ for file in "$@"; do varname="`echo $file | sed -e '1,$s/[ -.]/_/g'`" echo "/* $file */" case $file in - *.icc | *.jpg | *.png) + *.icc | *.jpg | *.otf | *.otc | *.png | *.ttc | *.ttf | *.woff | *.woff2) echo "static unsigned char $varname[] = {" od -t u1 -A n -v $file | awk '{for (i = 1; i <= NF; i ++) printf("%s,", $i); print "";}' echo "};"
Update pulse project names for concourse pipeline
@@ -879,14 +879,14 @@ jobs: input_mapping: *input_mappings params: <<: *pulse_properties - PULSE_PROJECT_NAME: "GPDB-BehaveBackupRestore" + PULSE_PROJECT_NAME: "GPDB-BehaveBackupRestore_Harmonize" - task: monitor_pulse attempts: 2 tags: ["gpdb5-pulse-worker"] file: gpdb_src/ci/pulse/api/monitor_pulse.yml params: <<: *pulse_properties - PULSE_PROJECT_NAME: "GPDB-BehaveBackupRestore" + PULSE_PROJECT_NAME: "GPDB-BehaveBackupRestore_Harmonize" - name: MU_backup_43_restore_5 plan: @@ -899,14 +899,14 @@ jobs: input_mapping: *input_mappings params: <<: *pulse_properties - PULSE_PROJECT_NAME: "GPDB-BehaveBackup-43_to_5" + PULSE_PROJECT_NAME: "GPDB-BehaveBackupRestore-43_to_5" - task: monitor_pulse attempts: 2 tags: ["gpdb5-pulse-worker"] file: gpdb_src/ci/pulse/api/monitor_pulse.yml params: <<: *pulse_properties - PULSE_PROJECT_NAME: "GPDB-BehaveBackup-43_to_5" + PULSE_PROJECT_NAME: "GPDB-BehaveBackupRestore-43_to_5" - name: MU_gpcheckcat plan:
admin/losf: bump version to v0.56.0
Summary: A Linux operating system framework for managing HPC clusters Name: %{pname}%{PROJ_DELIM} -Version: 0.55.0 +Version: 0.56.0 Release: 1%{?dist} License: GPL-2 Group: %{PROJ_NAME}/admin
oc_tls:fixed ciphersuite slections for OTMs
@@ -186,9 +186,8 @@ static const int anon_ecdh_priority[2] = { }; #endif /* OC_CLIENT */ -static const int jw_otm_priority[3] = { - MBEDTLS_TLS_ECDH_ANON_WITH_AES_128_CBC_SHA256, - MBEDTLS_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, 0 +static const int jw_otm_priority[2] = { + MBEDTLS_TLS_ECDH_ANON_WITH_AES_128_CBC_SHA256, 0 }; static const int pin_otm_priority[2] = { @@ -196,13 +195,11 @@ static const int pin_otm_priority[2] = { }; #ifdef OC_PKI -static const int cert_otm_priority[6] = { +static const int cert_otm_priority[5] = { MBEDTLS_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8, MBEDTLS_TLS_ECDHE_ECDSA_WITH_AES_128_CCM, MBEDTLS_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8, - MBEDTLS_TLS_ECDHE_ECDSA_WITH_AES_256_CCM, - MBEDTLS_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, - 0 + MBEDTLS_TLS_ECDHE_ECDSA_WITH_AES_256_CCM, 0 }; #endif /* OC_PKI */ @@ -1095,6 +1092,7 @@ oc_tls_set_ciphersuites(mbedtls_ssl_config *conf, oc_endpoint_t *endpoint) break; #endif /* OC_PKI */ default: + OC_DBG("oc_tls: selected default OTM priority"); ciphers = (int *)default_priority; break; }
metrics_exporter: push metrics into HTTP engine
#include <fluent-bit/flb_config.h> #include <fluent-bit/flb_input.h> #include <fluent-bit/flb_pack.h> +#include <fluent-bit/flb_http_server.h> #include <fluent-bit/flb_metrics.h> #include <fluent-bit/flb_metrics_exporter.h> @@ -68,7 +69,11 @@ static int collect_inputs(struct flb_me *me, struct flb_config *ctx) flb_free(buf); } - flb_pack_print(mp_sbuf.data, mp_sbuf.size); +#ifdef FLB_HAVE_HTTP_SERVER + if (ctx->http_server == FLB_TRUE) { + flb_hs_push_metrics(ctx->http_ctx, mp_sbuf.data, mp_sbuf.size); + } +#endif msgpack_sbuffer_destroy(&mp_sbuf); return 0;
diff BUGFIX redundant check
@@ -1501,7 +1501,7 @@ lyd_diff_merge_r(const struct lyd_node *src_diff, struct lyd_node *diff_parent, } /* remove any redundant nodes */ - if (diff_parent && lyd_diff_is_redundant(diff_parent)) { + if (lyd_diff_is_redundant(diff_parent)) { if (diff_parent == *diff) { *diff = (*diff)->next; }
INCLUDE: clap/ext/params.h: amend request_flush with recent comments by
@@ -278,7 +278,10 @@ typedef struct clap_host_params { void (*clear)(const clap_host_t *host, clap_id param_id, clap_param_clear_flags flags); - // Request a parameter flush. + // Request a parameter flush. Note that this is not useful to call from an + // [audio-thread], because a plugin executing within any [audio-thread] is either: + // 1. within process() (which may include clap_plugin_thread_pool->exec) + // 2. within flush() // // The host will then schedule a call to either: // - clap_plugin.process()
xive: Add basic opal_xive_reset() call and exploitation mode This adds an opal_xive_reset() call that currently does nothing other than store the mode (emulation vs. exploitation), and returns the appropriate error code if emulation mode calls are done while in exploitation mode.
#endif +/* The xive operation mode indicates the active "API" and corresponds + * to the "version" parameter of the opal_xive_reset() call + */ +static enum { + XIVE_MODE_EMU = 0, + XIVE_MODE_EXPL = 1, +} xive_mode; + + /* Each source controller has one of these. There's one embedded * in the XIVE struct for IPIs */ @@ -2018,6 +2027,8 @@ static int64_t opal_xive_eoi(uint32_t xirr) struct xive *src_x; bool special_ipi = false; + if (xive_mode != XIVE_MODE_EMU) + return OPAL_WRONG_STATE; if (!xs) return OPAL_INTERNAL_ERROR; @@ -2120,6 +2131,8 @@ static int64_t opal_xive_get_xirr(uint32_t *out_xirr, bool just_poll) uint16_t ack; uint8_t active, old_cppr; + if (xive_mode != XIVE_MODE_EMU) + return OPAL_WRONG_STATE; if (!xs) return OPAL_INTERNAL_ERROR; if (!out_xirr) @@ -2236,6 +2249,9 @@ static int64_t opal_xive_set_cppr(uint8_t cppr) struct cpu_thread *c = this_cpu(); struct xive_cpu_state *xs = c->xstate; + if (xive_mode != XIVE_MODE_EMU) + return OPAL_WRONG_STATE; + /* Limit supported CPPR values */ cppr = xive_sanitize_cppr(cppr); @@ -2258,6 +2274,8 @@ static int64_t opal_xive_set_mfrr(uint32_t cpu, uint8_t mfrr) struct xive_cpu_state *xs; uint8_t old_mfrr; + if (xive_mode != XIVE_MODE_EMU) + return OPAL_WRONG_STATE; if (!c) return OPAL_PARAMETER; xs = c->xstate; @@ -2275,6 +2293,16 @@ static int64_t opal_xive_set_mfrr(uint32_t cpu, uint8_t mfrr) return OPAL_SUCCESS; } +static int64_t opal_xive_reset(uint64_t version) +{ + if (version > 1) + return OPAL_PARAMETER; + + xive_mode = version; + + return OPAL_SUCCESS; +} + void init_xive(void) { struct dt_node *np; @@ -2307,5 +2335,8 @@ void init_xive(void) opal_register(OPAL_INT_SET_CPPR, opal_xive_set_cppr, 1); opal_register(OPAL_INT_EOI, opal_xive_eoi, 1); opal_register(OPAL_INT_SET_MFRR, opal_xive_set_mfrr, 2); + + /* Register XIVE exploitation calls */ + opal_register(OPAL_XIVE_RESET, opal_xive_reset, 1); }
last_modified: fix Windows version
@@ -40,7 +40,7 @@ int last_modified(const char *filepath, time_t *time) 0, NULL ); - if (!hFile) { + if (hFile == INVALID_HANDLE_VALUE) { // TODO: convert GetLastError() to errno // for now let's just assume that file was not found. errno = ENOENT;
doc: How to build ACRN on Fedora 29 Update FAQ about compilation issue workaround on Fedora 29
@@ -145,3 +145,39 @@ static memory allocation. This is why ACRN removed all ``malloc()``-type code, and why it needs to pre-identify the size of all buffers and structures used in the Virtual Memory Manager. For this reason, knowing the available RAM size at compile time is necessary to statically allocate memory usage. + + +How to build ACRN on Fedora 29? +******************************* + +There is a known issue when attempting to build ACRN on Fedora 29 +because of how ``gnu-efi`` is packaged in this Fedora release. +(See the `ACRN GitHub issue +<https://github.com/projectacrn/acrn-hypervisor/issues/2457>`_ +for more information.) The following patch to ``/efi-stub/Makefile`` +fixes the problem on Fedora 29 development systems (but should +not be used on other Linux distros):: + + diff --git a/efi-stub/Makefile b/efi-stub/Makefile + index 5b87d49b..dfc64843 100644 + --- a/efi-stub/Makefile + +++ b/efi-stub/Makefile + @@ -52,14 +52,14 @@ endif + # its tools and libraries in different folders. The next couple of + # variables will determine and set the right path for both the + # tools $(GNUEFI_DIR) and libraries $(LIBDIR) + -GNUEFI_DIR := $(shell find $(SYSROOT)/usr/lib* -name elf_$(ARCH)_efi.lds -type f | xargs dirname) + +GNUEFI_DIR := $(shell find $(SYSROOT)/usr/lib* -name elf_x64_efi.lds -type f | xargs dirname) + LIBDIR := $(subst gnuefi,,$(GNUEFI_DIR)) + -CRT0 := $(GNUEFI_DIR)/crt0-efi-$(ARCH).o + -LDSCRIPT := $(GNUEFI_DIR)/elf_$(ARCH)_efi.lds + +CRT0 := $(GNUEFI_DIR)/crt0-efi-x64.o + +LDSCRIPT := $(GNUEFI_DIR)/elf_x64_efi.lds + + INCDIR := $(SYSROOT)/usr/include + + -CFLAGS=-I. -I.. -I../hypervisor/include/arch/x86/guest -I$(INCDIR)/efi -I$(INCDIR)/efi/$(ARCH) \ + +CFLAGS=-I. -I.. -I../hypervisor/include/arch/x86/guest -I$(INCDIR)/efi -I$(INCDIR)/efi/x64 \ + -I../hypervisor/include/public -I../hypervisor/include/lib -I../hypervisor/bsp/include/uefi \ + -DEFI_FUNCTION_WRAPPER -fPIC -fshort-wchar -ffreestanding \ + -Wall -I../fs/ -D$(ARCH) -O2 \
py: replace deprecated fielderror
@@ -14,7 +14,7 @@ Single dispatch of available SBP messages, keyed by msg_type. """ -from construct.core import FieldError +from construct.core import FormatFieldError from . import acquisition as acq from . import bootload as boot from . import file_io as file_io @@ -88,7 +88,7 @@ def dispatch(msg, table=_SBP_TABLE): % (msg.msg_type, msg) warnings.warn(warn, RuntimeWarning) return msg - except FieldError: + except FormatFieldError: warnings.warn("SBP payload deserialization error! 0x%x" % msg.msg_type, RuntimeWarning) return msg
pin docutils version so bullets appear in website
@@ -13,7 +13,8 @@ jobs: uses: actions/setup-python@v2 - name: Install Python dependencies run: | - pip install sphinx sphinx-rtd-theme breathe + # pin docutils here due to bullets not appearing + pip install sphinx sphinx-rtd-theme breathe docutils==0.16 - name: Build docs run: | cd docs/src && make docs && touch ../.nojekyll
fixes miscount in u3_pier_grab, improves printing
@@ -2116,31 +2116,45 @@ c3_w u3_pier_mark(FILE* fil_u) { c3_w len_w = u3K.len_w; - c3_w tot_w = 0; + c3_w tot_w = 0, pir_w = 0; u3_pier* pir_u; while ( 0 < len_w ) { pir_u = u3K.tab_u[--len_w]; + pir_w = 0; + + if ( 1 < u3K.len_w ) { fprintf(fil_u, "pier: %u\r\n", len_w); + } if ( 0 != pir_u->bot_u ) { - tot_w += u3a_maid(fil_u, " boot event", u3a_mark_noun(pir_u->bot_u->ven)); - tot_w += u3a_maid(fil_u, " pill", u3a_mark_noun(pir_u->bot_u->pil)); + pir_w += u3a_maid(fil_u, " boot event", u3a_mark_noun(pir_u->bot_u->ven)); + pir_w += u3a_maid(fil_u, " pill", u3a_mark_noun(pir_u->bot_u->pil)); } { - u3_writ* wit_u = pir_u->ent_u; - c3_w wit_w = 0; + u3_writ* wit_u = pir_u->ext_u; + c3_w len_w = 0, tim_w = 0, job_w = 0, mat_w = 0, act_w =0; while ( 0 != wit_u ) { - wit_w += u3a_mark_noun(wit_u->job); - wit_w += u3a_mark_noun(wit_u->now); - wit_w += u3a_mark_noun(wit_u->mat); - wit_w += u3a_mark_noun(wit_u->act); + tim_w += u3a_mark_noun(wit_u->now); + job_w += u3a_mark_noun(wit_u->job); + mat_w += u3a_mark_noun(wit_u->mat); + act_w += u3a_mark_noun(wit_u->act); + len_w++; wit_u = wit_u->nex_u; } - tot_w += u3a_maid(fil_u, " writs", wit_w); + if ( 0 < len_w ) { + fprintf(fil_u, " marked %u writs\r\n", len_w); + } + + pir_w += u3a_maid(fil_u, " timestamps", tim_w); + pir_w += u3a_maid(fil_u, " events", job_w); + pir_w += u3a_maid(fil_u, " encoded events", mat_w); + pir_w += u3a_maid(fil_u, " pending effects", act_w); + + tot_w += u3a_maid(fil_u, "total pier stuff", pir_w); } }
compile schema BUGFIX removing act/notif memory problems
@@ -1295,6 +1295,8 @@ static LY_ERR lys_compile_unres(struct lysc_ctx *ctx) { struct lysc_node *node; + struct lysc_action **actions; + struct lysc_notif **notifs; struct lysc_type *type, *typeiter; struct lysc_type_leafref *lref; struct lysc_augment *aug; @@ -1331,12 +1333,7 @@ lys_compile_unres(struct lysc_ctx *ctx) } if (node->nodetype & (LYS_RPC | LYS_ACTION | LYS_NOTIF)) { - if (node->nodetype & (LYS_RPC | LYS_ACTION)) { - lysc_action_free(ctx->ctx, (struct lysc_action *)node); - } else { - lysc_notif_free(ctx->ctx, (struct lysc_notif *)node); - } - /* remember all freed RPCs/actions/notifs */ + /* just remember all RPCs/actions/notifs for now */ ly_set_add(&disabled_op, node, 1, NULL); } else { lysc_node_free(ctx->ctx, node, 1); @@ -1349,13 +1346,29 @@ lys_compile_unres(struct lysc_ctx *ctx) --i; node = disabled_op.snodes[i]; if (node->nodetype == LYS_RPC) { - ARRAY_DEL_ITEM(node->module->compiled->rpcs, (struct lysc_action *)node); + actions = &node->module->compiled->rpcs; + assert(actions); + notifs = NULL; } else if (node->nodetype == LYS_ACTION) { - ARRAY_DEL_ITEM(*lysc_node_actions_p(node->parent), (struct lysc_action *)node); + actions = lysc_node_actions_p(node->parent); + assert(actions); + notifs = NULL; } else if (node->parent) { - ARRAY_DEL_ITEM(*lysc_node_notifs_p(node->parent), (struct lysc_notif *)node); + actions = NULL; + notifs = lysc_node_notifs_p(node->parent); + assert(notifs); } else { - ARRAY_DEL_ITEM(node->module->compiled->notifs, (struct lysc_notif *)node); + actions = NULL; + notifs = &node->module->compiled->notifs; + assert(notifs); + } + + if (actions) { + lysc_action_free(ctx->ctx, (struct lysc_action *)node); + ARRAY_DEL_ITEM(*actions, (struct lysc_action *)node); + } else { + lysc_notif_free(ctx->ctx, (struct lysc_notif *)node); + ARRAY_DEL_ITEM(*notifs, (struct lysc_notif *)node); } } ly_set_erase(&disabled_op, NULL);
Don't return NULL on 0 length input A 0 length string is still a valid input and should be treated as such, a NULL return should be reserved for when errors occur during line editing or EOF is reached. Merges
@@ -979,11 +979,11 @@ char *linenoise(const char *prompt) { } else { count = linenoiseDumb(buf, LINENOISE_MAX_LINE, prompt); } - if (count > 0) { + if (count >= 0) { sanitize(buf); count = strlen(buf); } - if (count <= 0) { + if (count < 0) { free(buf); return NULL; }
i2s: add mclk_div underflow check Closes
@@ -985,7 +985,7 @@ static esp_err_t i2s_calculate_adc_dac_clock(int i2s_num, i2s_hal_clock_cfg_t *c /* Set I2S bit clock */ clk_cfg->bclk = p_i2s[i2s_num]->hal_cfg.sample_rate * I2S_LL_AD_BCK_FACTOR; /* Set I2S bit clock default division */ - clk_cfg->bclk_div = I2S_LL_AD_BCK_FACTOR * 16; + clk_cfg->bclk_div = p_i2s[i2s_num]->hal_cfg.chan_bits; /* If fixed_mclk and use_apll are set, use fixed_mclk as mclk frequency, otherwise calculate by mclk = sample_rate * multiple */ clk_cfg->mclk = (p_i2s[i2s_num]->use_apll && p_i2s[i2s_num]->fixed_mclk) ? p_i2s[i2s_num]->fixed_mclk : clk_cfg->bclk * clk_cfg->bclk_div;
Adding include paths for tests build
@@ -28,7 +28,7 @@ if(MSVC) endif(MSVC) set(Boost_THREAD_FOUND OFF) find_package(Boost COMPONENTS system thread filesystem) -include_directories (${Boost_INCLUDE_DIRS}) +include_directories (${Boost_INCLUDE_DIRS} ${PROJECT_SOURCE_DIR}/include ${PROJECT_SOURCE_DIR}/tools/epanet-output/include) #I like to keep test files in a separate source directory called test
nissa: Set I2C5_1 pins to be low voltage. Set the sub-board type C I2C bus to be low voltage. This is done for all boards, since the pin is 3.3V tolerant. TEST=zmake build nivviks BRANCH=none
enable_gpio = <&gpio_fan_enable>; }; }; + + /* + * Set I2C pins for type C sub-board to be + * low voltage (I2C5_1). + * We do this for all boards, since the pins are + * 3.3V tolerant, and the only 2 types of sub-boards + * used on nivviks both have type-C ports on them. + */ + def-lvol-io-list { + compatible = "nuvoton,npcx-lvolctrl-def"; + lvol-io-pads = <&lvol_iof5 &lvol_iof4>; + }; }; &thermistor_3V3_51K1_47K_4050B {
Allow composite types in catalog bootstrap When resolving types during catalog bootstrap, try to reload the pg_type contents if a type is not found. That allows catalogs to contain composite types, e.g. row types for other catalogs. Author: Justin Pryzby Discussion:
@@ -934,6 +934,29 @@ gettype(char *type) return app->am_oid; } } + + /* + * The type wasn't known; reload the pg_type contents and check again + * to handle composite types, added since last populating the list. + */ + + list_free_deep(Typ); + Typ = NIL; + populate_typ_list(); + + /* + * Calling gettype would result in infinite recursion for types missing + * in pg_type, so just repeat the lookup. + */ + foreach (lc, Typ) + { + struct typmap *app = lfirst(lc); + if (strncmp(NameStr(app->am_typ.typname), type, NAMEDATALEN) == 0) + { + Ap = app; + return app->am_oid; + } + } } else {
VERSION bump to version 0.10.12
@@ -28,7 +28,7 @@ set(CMAKE_C_FLAGS_DEBUG "-g -O0") # set version set(LIBNETCONF2_MAJOR_VERSION 0) set(LIBNETCONF2_MINOR_VERSION 10) -set(LIBNETCONF2_MICRO_VERSION 11) +set(LIBNETCONF2_MICRO_VERSION 12) set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION}.${LIBNETCONF2_MICRO_VERSION}) set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION})
jna: do not search for java, rely on working maven fix
-find_package (Java) find_program (MAVEN_EXECUTABLE mvn) -if (Java_FOUND) # set by find_package -if (Java_VERSION_MAJOR GREATER 0 AND Java_VERSION_MINOR GREATER 7) if (MAVEN_EXECUTABLE) # set by find_program file ( MAKE_DIRECTORY libelektra4j @@ -52,9 +49,3 @@ if (MAVEN_EXECUTABLE) # set by find_program else (MAVEN_EXECUTABLE) remove_binding (jna "Maven Executable not found, but required to build the jna bindings") endif (MAVEN_EXECUTABLE) -else (Java_VERSION_MAJOR GREATER 0 AND Java_VERSION_MINOR GREATER 7) - remove_binding (jna "Java is not greater version 8, but ${Java_VERSION} was found") -endif (Java_VERSION_MAJOR GREATER 0 AND Java_VERSION_MINOR GREATER 7) -else (Java_FOUND) - remove_binding (jna "Java not found, but required to build the jna bindings") -endif (Java_FOUND)
tneat: 1 flow as default
@@ -37,7 +37,7 @@ static uint16_t config_mode = 0; static uint16_t config_chargen_offset = 0; static uint16_t config_port = 23232; static uint16_t config_log_level = 1; -static uint16_t config_num_flows = 10; +static uint16_t config_num_flows = 1; static uint16_t config_max_flows = 100; static uint16_t config_max_server_runs = 0; static uint32_t config_low_watermark = 0;