message
stringlengths
6
474
diff
stringlengths
8
5.22k
link libpocl-devices-ttasim.so with -pthread
@@ -33,7 +33,7 @@ install(FILES "tta_device_main.c" add_pocl_device_library(pocl-devices-ttasim tce_common.h tce_common.cc ttasim/ttasim.h ttasim/ttasim.cc) if(ENABLE_LOADABLE_DRIVERS) - target_link_libraries(pocl-devices-ttasim PRIVATE ${TCE_LIBS}) + target_link_libraries(pocl-devices-ttasim PRIVATE ${TCE_LIBS} ${PTHREAD_LIBRARY}) endif() if(MSVC)
Clean up uniqmap by key instead of iteratating over the whole table.
@@ -2551,6 +2551,7 @@ static void free_key_by_type (GKHashMetric mtrc, uint32_t key) { khiter_t k; void *list = NULL; + bitmap *bm = NULL; char *value = NULL; switch (mtrc.type) { @@ -2558,6 +2559,14 @@ free_key_by_type (GKHashMetric mtrc, uint32_t key) { k = kh_get (ii32, mtrc.ii32, key); kh_del (ii32, mtrc.ii32, k); break; + case MTRC_TYPE_BTMP: + k = kh_get (btmp, mtrc.btmp, key); + if (k != kh_end (mtrc.btmp) && (bm = kh_val (mtrc.btmp, k))) { + free (bm->bmp); + free (bm); + kh_del (btmp, mtrc.btmp, k); + } + break; case MTRC_TYPE_IS32: k = kh_get (is32, mtrc.is32, key); if (k != kh_end (mtrc.is32) && (value = kh_val (mtrc.is32, k))) { @@ -2622,41 +2631,12 @@ free_record_from_partial_key (GModule module, const char *key) { return 0; } -static int -free_partial_match_uniqmap (GModule module, uint32_t uniq_nkey) { - khiter_t k; - khash_t (si32) * hash = get_hash (module, MTRC_UNIQMAP); - char *key = NULL, *p = NULL; - int len = 0; - - if (!hash) - return -1; - - key = u322str (uniq_nkey, 0); - len = strlen (key); - - for (k = kh_begin (hash); k != kh_end (hash); ++k) { - if (!kh_exist (hash, k) || (!(p = strstr (kh_key (hash, k), key)))) - continue; - p += len; - if (*p != '|') - continue; - free ((char *) kh_key (hash, k)); - kh_del (si32, hash, k); - } - free (key); - - return 0; -} - int clean_partial_match_hashes (int date) { - GModule module; khash_t (si32) * hash = ht_unique_keys; khiter_t k; char *p = NULL, *key = NULL; int len = 0; - size_t idx = 0; if (!hash) return -1; @@ -2670,12 +2650,6 @@ clean_partial_match_hashes (int date) { if (*p != '|') continue; - idx = 0; - FOREACH_MODULE (idx, module_list) { - module = module_list[idx]; - free_partial_match_uniqmap (module, kh_value (hash, k)); - } - free ((char *) kh_key (hash, k)); kh_del (si32, hash, k); }
erc20: add Multi-Collateral DAI and USDC Change Single-Collateral DAI to SAI Add Multi-Collateral DAI Add USDC
@@ -67,6 +67,13 @@ static const app_eth_erc20_params_t _erc20_params[] = { "\xda\xc1\x7f\x95\x8d\x2e\xe5\x23\xa2\x20\x62\x06\x99\x45\x97\xc1\x3d\x83\x1e\xc7", .decimals = 6, }, + { + .coin = ETHCoin_ETH, + .unit = "USDC", + .contract_address = + "\xa0\xb8\x69\x91\xc6\x21\x8b\x36\xc1\xd1\x9d\x4a\x2e\x9e\xb0\xce\x36\x06\xeb\x48", + .decimals = 6, + }, { .coin = ETHCoin_ETH, .unit = "LINK", @@ -97,11 +104,18 @@ static const app_eth_erc20_params_t _erc20_params[] = { }, { .coin = ETHCoin_ETH, - .unit = "DAI", + .unit = "SAI", .contract_address = "\x89\xd2\x4a\x6b\x4c\xcb\x1b\x6f\xaa\x26\x25\xfe\x56\x2b\xdd\x9a\x23\x26\x03\x59", .decimals = 18, }, + { + .coin = ETHCoin_ETH, + .unit = "DAI", + .contract_address = + "\x6b\x17\x54\x74\xe8\x90\x94\xc4\x4d\xa9\x8b\x95\x4e\xed\xea\xc4\x95\x27\x1d\x0f", + .decimals = 18, + }, }; const app_eth_erc20_params_t* app_eth_erc20_params_get(
decision: clarifications for query
@@ -139,6 +139,7 @@ We could just point the plugin to `backendsData->keys` or the internal cache if ### Provide an API within `libelektra-kdb` The API should be useable both by plugins and applications utilizing ELektra. +It does not matter whether the changetracking is implemented as part of `libelektra-kdb` or as a seperate plugin. The API may look something like this: ```c @@ -147,19 +148,30 @@ ChangeTrackingContext * elektraChangeTrackingGetContext (KDB * kdb, Key * parent KeySet * elektraChangeTrackingGetAddedKeys (ChangeTrackingContext * context); KeySet * elektraChangeTrackingGetRemovedKeys (ChangeTrackingContext * context); -KeySet * elektraChangeTrackingGetModifiedKeys (ChangeTrackingContext * context); +KeySet * elektraChangeTrackingGetModifiedKeys (ChangeTrackingContext * context); // Returns old keys (pre-modification) bool elektraChangeTrackingValueChanged (ChangeTrackingContext * context, Key * key); bool elektraChangeTrackingMetaChanged (ChangeTrackingContext * context, Key * key); KeySet * elektraChangeTrackingGetAddedMetaKeys (ChangeTrackingContext * context, Key * key); KeySet * elektraChangeTrackingGetRemovedMetaKeys (ChangeTrackingContext * context, Key * key); -KeySet * elektraChangeTrackingGetModifiedMetaKeys (ChangeTrackingContext * context, Key * key); - -Key * elektraChangeTrackingGetOriginalKey (ChangeTrackingContext * context, Key * key); -const Key * elektraChangeTrackingGetOriginalMetaKey (ChangeTrackingContext * context, Key * key, const char * metaName); +KeySet * elektraChangeTrackingGetModifiedMetaKeys (ChangeTrackingContext * context, Key * key); // Returns old meta keys (pre-modification) ``` +### Provide query methods as part of a seperat plugin + +This solution only makes sense if changetrackig is implemented as part of a seperate plugin. +It will be a bit challenging to use for applications, as it would require that applications have access to the plugin contracts. + +The changetracking plugin needs to export at least functions for the following things in its contract: + +- Get added keys +- Get removed keys +- Get modified keys +- Get added meta keys for a key +- Get removed meta keys for a key +- Get modified meta keys for a key + ## Decision ## Rationale
Homebridge script: Don't write config file if homebridge pin is missing
@@ -428,11 +428,6 @@ function checkHomebridge { fi fi else - # create homebridge dir and config and add Mainuser ownership - mkdir /home/$MAINUSER/.homebridge - touch /home/$MAINUSER/.homebridge/config.json - chown -R $MAINUSER /home/$MAINUSER/.homebridge - RC=1 while [ $RC -ne 0 ]; do HOMEBRIDGE_PIN=$(sqlite3 $ZLLDB "select value from config2 where key='homebridge-pin'") @@ -442,6 +437,18 @@ function checkHomebridge { sleep 2 fi done + + if [ -z "$HOMEBRIDGE_PIN" ]; then + [[ $LOG_DEBUG ]] && echo "${LOG_DEBUG}homebridge-pin is empty. Trying to get new one." + TIMEOUT=2 + return + fi + + # create homebridge dir and config and add Mainuser ownership + mkdir /home/$MAINUSER/.homebridge + touch /home/$MAINUSER/.homebridge/config.json + chown -R $MAINUSER /home/$MAINUSER/.homebridge + local HB_PIN="${HOMEBRIDGE_PIN:0:3}-${HOMEBRIDGE_PIN:3:2}-${HOMEBRIDGE_PIN:5:3}" echo "{ \"bridge\": {
noHDK_Makefile: config call now copy
@@ -107,15 +107,9 @@ copy $(BUILD_DIR)/Checkpoint/b_route_design.dcp: @ln -s $(BUILD_NAME) build; @ln -s $(DONUT_HARDWARE_ROOT)/setup/noHDK_build.tcl $(BUILD_DIR)/noHDK_build.tcl; @ln -s $(DONUT_HARDWARE_ROOT)/setup/fw_xpr_build.tcl $(BUILD_DIR)/fw_xpr_build.tcl; -<<<<<<< HEAD @cp -p $(PSL_DCP) $(BUILD_DIR)/Checkpoint/; @if [ $(FPGACARD) = "KU3" ]; then ln -f -s $(DONUT_HDL_CORE)/psl_fpga_ku3.vhd_source $(DONUT_HDL_CORE)/psl_fpga.vhd_source; fi @if [ $(FPGACARD) = "FGT" ]; then ln -f -s $(DONUT_HDL_CORE)/psl_fpga_fgt.vhd_source $(DONUT_HDL_CORE)/psl_fpga.vhd_source; fi -======= - @cp -ra $(PSL_DCP) $(BUILD_DIR)/Checkpoint/; - @if [ $(FPGACARD) = "KU3" ]; then ln -f -s $(DONUT_HARDWARE_ROOT)/hdl/core/psl_fpga_ku3.vhd_source $(DONUT_HARDWARE_ROOT)/hdl/core/psl_fpga.vhd_source; fi - @if [ $(FPGACARD) = "FGT" ]; then ln -f -s $(DONUT_HARDWARE_ROOT)/hdl/core/psl_fpga_fgt.vhd_source $(DONUT_HARDWARE_ROOT)/hdl/core/psl_fpga.vhd_source; fi ->>>>>>> noHDK_Makefile supports own psl_fpga %.vhd: %.vhd_source @echo "Creating $@"; $(DONUT_HARDWARE_ROOT)/setup/snap_config.sh $@_source $@
commented debug code.
@@ -54,7 +54,7 @@ uint8_t vcp_getch(void) { uint8_t ch = CDC_Itf_Getch(); - drv_uart_write(DRV_UART_NUM_4, ch); + //drv_uart_write(DRV_UART_NUM_4, ch); //for debugging return ch; }
Test more sqrt corner cases
@@ -1095,6 +1095,7 @@ describe("Pallene coder /", function() assert(2.0 == test.square_root(4.0)) assert(3.0 == test.square_root(9.0)) assert(4.0 == test.square_root(16.0)) + assert(math.huge == test.square_root(math.huge)) ]]) end) @@ -1104,6 +1105,13 @@ describe("Pallene coder /", function() assert(x ~= x) ]]) end) + + it("returns NaN on NaN", function() + run_test([[ + local x = test.square_root(0.0 / 0.0) + assert(x ~= x) + ]]) + end) end) describe("value", function()
Add link to C++ API documentation; Add Ubuntu Focal Fossa
@@ -7,7 +7,8 @@ A list of supported Zigbee devices can be found on the [Supported Devices](https To communicate with Zigbee devices the [RaspBee](https://phoscon.de/raspbee?ref=gh) / [RaspBee&nbsp;II](https://phoscon.de/raspbee2?ref=gh) Zigbee shield for Raspberry Pi, or a [ConBee](https://phoscon.de/conbee?ref=gh) / [ConBee&nbsp;II](https://phoscon.de/conbee2?ref=gh) USB dongle is required. -To learn more about the REST-API itself please visit the [REST-API Documentation](http://dresden-elektronik.github.io/deconz-rest-doc/) page. +To learn more about the REST-API itself please visit the [REST-API Documentation](http://dresden-elektronik.github.io/deconz-rest-doc/) page.<br> +The REST-API plugin is implemented in C++ using the [deCONZ C++ API Documentation](https://phoscon.de/deconz-cpp). For community based support with deCONZ or Phoscon, please visit the [deCONZ Discord server](https://discord.gg/QFhTxqN). @@ -30,7 +31,7 @@ Installation ##### Supported platforms * Raspbian Jessie, Stretch and Buster -* Ubuntu Xenial and Bionic (AMD64) +* Ubuntu Xenial, Bionic and Focal Fossa (AMD64) * Windows 7 and 10 ### Install deCONZ
README.md: Use version 2.05.45
@@ -44,11 +44,11 @@ https://github.com/dresden-elektronik/deconz-rest-plugin/releases 1. Download deCONZ package - wget http://www.dresden-elektronik.de/rpi/deconz/beta/deconz-2.05.44-qt5.deb + wget http://www.dresden-elektronik.de/rpi/deconz/beta/deconz-2.05.45-qt5.deb 2. Install deCONZ package - sudo dpkg -i deconz-2.05.44-qt5.deb + sudo dpkg -i deconz-2.05.45-qt5.deb **Important** this step might print some errors *that's ok* and will be fixed in the next step. @@ -63,11 +63,11 @@ The deCONZ package already contains the REST API plugin, the development package 1. Download deCONZ development package - wget http://www.dresden-elektronik.de/rpi/deconz-dev/deconz-dev-2.05.44.deb + wget http://www.dresden-elektronik.de/rpi/deconz-dev/deconz-dev-2.05.45.deb 2. Install deCONZ development package - sudo dpkg -i deconz-dev-2.05.44.deb + sudo dpkg -i deconz-dev-2.05.45.deb 3. Install missing dependencies @@ -82,7 +82,7 @@ The deCONZ package already contains the REST API plugin, the development package 2. Checkout related version tag cd deconz-rest-plugin - git checkout -b mybranch V2_05_44 + git checkout -b mybranch V2_05_45 3. Compile the plugin
Accept line feed to complete input of passphrase
@@ -1840,7 +1840,8 @@ HandleParsingLibDestructor( return 0; } #define MAX_PROMT_INPUT_SZ 1024 -#define RETURN_KEY 13 +#define RETURN_KEY 0xD +#define LINE_FEED 0xA /** Prompted input request @@ -1878,7 +1879,7 @@ PromptedInput( for (PromptIndex = 0; PromptIndex < MAX_PROMT_INPUT_SZ; ++PromptIndex) { buff[PromptIndex] = _getch(); - if (RETURN_KEY == buff[PromptIndex]) + if (RETURN_KEY == buff[PromptIndex] || LINE_FEED == buff[PromptIndex]) break; } VOID * ptr = AllocateZeroPool(MAX_PROMT_INPUT_SZ);
zephyr/include/ap_power/ap_power_interface.h: Format with clang-format BRANCH=none TEST=none
@@ -93,11 +93,11 @@ enum ap_power_state_mask { AP_POWER_STATE_ON = BIT(3), /* On (S0) */ AP_POWER_STATE_STANDBY = BIT(4), /* Standby (S0ix) */ /* Common combinations, any off state */ - AP_POWER_STATE_ANY_OFF = (AP_POWER_STATE_HARD_OFF | - AP_POWER_STATE_SOFT_OFF), + AP_POWER_STATE_ANY_OFF = + (AP_POWER_STATE_HARD_OFF | AP_POWER_STATE_SOFT_OFF), /* This combination covers any kind of suspend i.e. S3 or S0ix. */ - AP_POWER_STATE_ANY_SUSPEND = (AP_POWER_STATE_SUSPEND | - AP_POWER_STATE_STANDBY), + AP_POWER_STATE_ANY_SUSPEND = + (AP_POWER_STATE_SUSPEND | AP_POWER_STATE_STANDBY), }; /**
fix error: variable 'len' is used uninitialized when NDEBUG is defined
@@ -357,6 +357,7 @@ ble_hci_sock_cmdevt_tx(uint8_t *hci_ev, uint8_t h4_type) STATS_INC(hci_sock_stats, oevt); } else { assert(0); + return BLE_ERR_UNKNOWN_HCI_CMD; } iov[1].iov_len = len;
fix build for Android with USE_STL_SYSTEM=yes
@@ -7,8 +7,12 @@ NO_PLATFORM() ADDINCL(GLOBAL contrib/libs/cxxsupp/system_stl/include) IF (NOT OS_IOS AND NOT OS_DARWIN) + IF (NOT OS_ANDROID) LDFLAGS( -lgcc_s + ) + ENDIF() + LDFLAGS( -lstdc++ ) ELSE()
MIT license, 2011 - 2018.
-Copyright (c) 2011-2017 Jesse Ray Adkins +Copyright (c) 2011-2018 Jesse Adkins (FascinatedBox) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
ssl_tls13_generic.c: adapt guards for MBEDTLS_SHAxxx_C
@@ -1388,7 +1388,7 @@ int mbedtls_ssl_reset_transcript_for_hrr( mbedtls_ssl_context *ssl ) if( ciphersuite_info->mac == MBEDTLS_MD_SHA256 ) { -#if defined(MBEDTLS_SHA256_C) +#if defined(MBEDTLS_HAS_ALG_SHA_256_VIA_MD_OR_PSA_BASED_ON_USE_PSA) MBEDTLS_SSL_DEBUG_BUF( 4, "Truncated SHA-256 handshake transcript", hash_transcript, hash_len ); @@ -1398,11 +1398,11 @@ int mbedtls_ssl_reset_transcript_for_hrr( mbedtls_ssl_context *ssl ) #else mbedtls_sha256_starts( &ssl->handshake->fin_sha256, 0 ); #endif -#endif /* MBEDTLS_SHA256_C */ +#endif /* MBEDTLS_HAS_ALG_SHA_256_VIA_MD_OR_PSA_BASED_ON_USE_PSA */ } else if( ciphersuite_info->mac == MBEDTLS_MD_SHA384 ) { -#if defined(MBEDTLS_SHA384_C) +#if defined(MBEDTLS_HAS_ALG_SHA_384_VIA_MD_OR_PSA_BASED_ON_USE_PSA) MBEDTLS_SSL_DEBUG_BUF( 4, "Truncated SHA-384 handshake transcript", hash_transcript, hash_len ); @@ -1412,12 +1412,12 @@ int mbedtls_ssl_reset_transcript_for_hrr( mbedtls_ssl_context *ssl ) #else mbedtls_sha512_starts( &ssl->handshake->fin_sha384, 1 ); #endif -#endif /* MBEDTLS_SHA384_C */ +#endif /* MBEDTLS_HAS_ALG_SHA_384_VIA_MD_OR_PSA_BASED_ON_USE_PSA */ } -#if defined(MBEDTLS_SHA256_C) || defined(MBEDTLS_SHA384_C) +#if defined(MBEDTLS_HAS_ALG_SHA_256_VIA_MD_OR_PSA_BASED_ON_USE_PSA) || defined(MBEDTLS_HAS_ALG_SHA_384_VIA_MD_OR_PSA_BASED_ON_USE_PSA) ssl->handshake->update_checksum( ssl, hash_transcript, hash_len ); -#endif /* MBEDTLS_SHA256_C || MBEDTLS_SHA384_C */ +#endif /* MBEDTLS_HAS_ALG_SHA_256_VIA_MD_OR_PSA_BASED_ON_USE_PSA || MBEDTLS_HAS_ALG_SHA_384_VIA_MD_OR_PSA_BASED_ON_USE_PSA */ return( ret ); }
Remove requirement to wait 4 intervals before closing
@@ -250,7 +250,6 @@ typedef struct st_client_loop_cb_t { int key_update_done; int zero_rtt_available; int is_siduck; - int client_ready_loop; char const* saved_alpn; struct sockaddr_storage server_address; struct sockaddr_storage client_address; @@ -400,9 +399,7 @@ int client_loop_cb(picoquic_quic_t* quic, picoquic_packet_loop_cb_enum cb_mode, } } - cb_ctx->client_ready_loop++; - - if (!cb_ctx->is_siduck && cb_ctx->client_ready_loop > 4 && cb_ctx->demo_callback_ctx->nb_open_streams == 0) { + if (!cb_ctx->is_siduck && cb_ctx->demo_callback_ctx->nb_open_streams == 0) { fprintf(stdout, "All done, Closing the connection.\n"); picoquic_log_app_message(cb_ctx->cnx_client, "%s", "All done, Closing the connection.");
assertions in ++beam-dents-in-dir
^- (set dent) ?> =(%z ren) :: only %z supported :: + =/ folder=dent [%beam bem ren] + :: + ?. (~(has by sup.a) folder) :: state must have bem + ~&(missing-folder+folder !!) + :: + =- ?. (~(has in -) folder) :: result must keep bem + ~&([folder+folder missing-from+-] !!) + - + :: %- silt %+ skim ~(tap in ~(key by sup.a)) |= den=dent :: :: match the dent tag and beak exactly, and match the tops of the spurs - .= [%beam bem ren] :: a dent representing the folder + .= folder :: a dent representing the folder den(s.bem (flop (scag (lent s.bem) (flop s.bem.den)))) -- ::
docs: Fix BPF_HISTGRAM typo in reference guide Fix typo in reference guide, should be BPF_HISTOGRAM.
@@ -869,7 +869,7 @@ Maps are BPF data stores, and are the basis for higher level object types includ Syntax: ```BPF_TABLE(_table_type, _key_type, _leaf_type, _name, _max_entries)``` -Creates a map named ```_name```. Most of the time this will be used via higher-level macros, like BPF_HASH, BPF_ARRAY, BPF_HISTGRAM, etc. +Creates a map named ```_name```. Most of the time this will be used via higher-level macros, like BPF_HASH, BPF_ARRAY, BPF_HISTOGRAM, etc. `BPF_F_TABLE` is a variant that takes a flag in the last parameter. `BPF_TABLE(...)` is actually a wrapper to `BPF_F_TABLE(..., 0 /* flag */)`.
Added brief instructions to the CONTRIBUTE.md file.
@@ -18,7 +18,8 @@ Things to do if you are adding new features to the CCL C lib Makefile.am. 3. When adding a new header file (.h), put it in include. The new file should be listed under include_HEADERS in - include/Makefile.am + include/Makefile.am. It should also have doxygen + compatible documentation. 4. When adding new unit test files, they should be listed under check_ccl_SOURCES in Makefile.am 5. Any other new files that should be included with the @@ -45,6 +46,9 @@ More autotools fun: can be created automatically by typing: $> make dist +To view the doxygen documentation, open any .html file in the html/ +directory. To refresh the docs to reflect new changes, run +`doxygen` in the main directory (assuming you already have it installed). Modifying the Python wrapper ---------------------------------------------------------
Examples: Fix memory leaks and clean up comments
! below might not work directly for other types of TEMP messages than the one used in the ! example. It is advised to use bufr_dump first to understand the structure of these messages. ! -program bufr_read_temp +program bufr_read_tempf use eccodes implicit none integer :: ifile @@ -44,11 +44,11 @@ program bufr_read_temp call codes_open_file(ifile, '../../data/bufr/PraticaTemp.bufr', 'r') - ! the first bufr message is loaded from file - ! ibufr is the bufr id to be used in subsequent calls + ! the first BUFR message is loaded from file + ! ibufr is the BUFR id to be used in subsequent calls call codes_bufr_new_from_file(ifile, ibufr, iret) - ! do while (iret/=CODES_END_OF_FILE) + ! loop through all messages in the file do while (iret /= CODES_END_OF_FILE .AND. status_time == CODES_SUCCESS) ! we need to instruct ecCodes to expand all the descriptors @@ -57,7 +57,6 @@ program bufr_read_temp ! In our BUFR message verticalSoundingSignificance is always followed by ! geopotential, airTemperature, dewpointTemperature, ! windDirection, windSpeed and pressure. - ! count = count + 1 llskip = .False. @@ -157,21 +156,21 @@ program bufr_read_temp wdirVal(i), wspVal(i), INT(vssVal(i)), Note end do - ! free arrays + ! free allocated arrays deallocate (dlatVal, dlonVal, vssVal) deallocate (presVal, zVal, tVal, tdVal, wdirVal, wspVal) + deallocate (lat, lon) END IF IF (ALLOCATED(timeVal)) deallocate (timeVal) - ! release the bufr message + ! release the BUFR message call codes_release(ibufr) - ! load the next bufr message + ! load the next BUFR message call codes_bufr_new_from_file(ifile, ibufr, iret) end do - ! close file call codes_close_file(ifile) -end program bufr_read_temp +end program bufr_read_tempf
kukui_scp: Let the AP setup the UART pinmux correctly This helps make our images compatible between P1 and P2. BRANCH=none TEST=Flash kukui_scp on P1 and P2, UART works in both cases.
#ifndef __CROS_EC_BOARD_H #define __CROS_EC_BOARD_H -/* board revision */ -#define BOARD_REV 2 - -#if BOARD_REV < 1 || BOARD_REV > 2 -#error "Board revision out of range" -#endif - #define CONFIG_FLASH_SIZE 0x40000 /* Image file size: 256KB */ #undef CONFIG_LID_SWITCH #undef CONFIG_FW_INCLUDE_RO * 2 - share with AP UART0 */ #define CONFIG_UART_CONSOLE 0 -#if BOARD_REV <= 1 -#define UART0_PINMUX_11_12 -#undef UART0_PINMUX_110_112 -#else + +/* We let AP setup the correct pinmux. */ #undef UART0_PINMUX_11_12 -#define UART0_PINMUX_110_112 -#endif +#undef UART0_PINMUX_110_112 /* * Allow dangerous commands all the time, since we don't have a write protect
Version: Fix minor spelling mistakes
@@ -24,7 +24,7 @@ and is described in [src/plugins/doc/doc.c](/src/plugins/doc/doc.c). ## Compatibility This section describes under which circumstances API -and ABI incompatiblities may occur. As developer from +and ABI incompatibilities may occur. As developer from Elektra your mission is to avoid that. The tool icheck against the interfaces mentioned above may help you too. @@ -33,7 +33,7 @@ In `0.8.*` the API and ABI must be always forward-compatible, but not backwards-compatible. That means that a program written and compiled against 0.8.0 compiles and links against 0.8.1. But because it is -not necessarily backendwards-compatible a program written +not necessarily backwards-compatible a program written for 0.8.1 may not link or compile against elektra 0.8.0 (but it may do when you use the compatible subset, maybe with #ifdefs).
Added new convert for completeness
@@ -241,6 +241,7 @@ void libxsmm_generator_cvtfp32bf16_avx512_microkernel( libxsmm_generated_code* } /* Downconvert to BF16 */ + if (!((use_m_masking == 1) && (im == m_trips-1) && (m % 32 <= 16))) { libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VCVTNE2PS2BF16, @@ -248,6 +249,15 @@ void libxsmm_generator_cvtfp32bf16_avx512_microkernel( libxsmm_generated_code* reg_0, reg_1, reg_0, 0); + } else { + libxsmm_x86_instruction_vec_compute_convert( io_generated_code, + i_micro_kernel_config->instruction_set, + LIBXSMM_X86_INSTR_VCVTNEPS2BF16, + i_micro_kernel_config->vector_name, + reg_0, LIBXSMM_X86_VEC_REG_UNDEF, + reg_0, + 0); + } /* Store the result */ libxsmm_x86_instruction_vec_move( io_generated_code,
test write predictions to streams
@@ -5965,3 +5965,47 @@ def test_train_on_quantized_pool_with_large_grid(): '-i', '10') yatest.common.execute(cmd) + +def test_write_predictions_to_streams(): + output_model_path = yatest.common.test_output_path('model.bin') + output_eval_path = yatest.common.test_output_path('test.eval') + calc_output_eval_path_redirected = yatest.common.test_output_path('calc_test.eval') + + cmd = ( + CATBOOST_PATH, + 'fit', + '-f', data_file('adult', 'train_small'), + '-t', data_file('adult', 'test_small'), + '--eval-file', output_eval_path, + '--column-description', data_file('adult', 'train.cd'), + '-i', '10', + '-m', output_model_path + ) + yatest.common.execute(cmd) + + calc_cmd = ( + CATBOOST_PATH, + 'calc', + '--input-path', data_file('adult', 'test_small'), + '--column-description', data_file('adult', 'train.cd'), + '-m', output_model_path, + '--output-path', 'stream://stdout', + '>', calc_output_eval_path_redirected + ) + yatest.common.execute(calc_cmd) + + assert filecmp.cmp(output_eval_path, calc_output_eval_path_redirected) + + calc_cmd = ( + CATBOOST_PATH, + 'calc', + '--input-path', data_file('adult', 'test_small'), + '--column-description', data_file('adult', 'train.cd'), + '-m', output_model_path, + '--output-path', 'stream://stderr', + '2>', calc_output_eval_path_redirected + ) + yatest.common.execute(calc_cmd) + + assert filecmp.cmp(output_eval_path, calc_output_eval_path_redirected) + \ No newline at end of file
zonemd, fix to harden against failure in pickup zonemd lookups.
@@ -8171,6 +8171,8 @@ void auth_zones_pickup_zonemd_verify(struct auth_zones* az, key.namelen = savezname_len; key.name = savezname; z = (struct auth_zone*)rbtree_search(&az->ztree, &key); + if(!z) + break; } lock_rw_unlock(&az->lock); }
chat-store: immediate handle-read on our %message
?- -.action %create (handle-create action) %delete (handle-delete action) - %message (handle-message action) - %messages (handle-messages action) %read (handle-read action) + %messages (handle-messages action) + %message + ?. =(our.bol author.envelope.action) + (handle-message action) + =^ message-moves state (handle-message action) + =^ read-moves state (handle-read [%read path.action]) + [(weld message-moves read-moves) state] == :: ++ handle-create
fix assertion, add check for page committed before doing reset
@@ -231,6 +231,7 @@ static void mi_segment_protect(mi_segment_t* segment, bool protect, mi_os_tld_t* ----------------------------------------------------------- */ static void mi_page_reset(mi_segment_t* segment, mi_page_t* page, size_t size, mi_segments_tld_t* tld) { + mi_assert_internal(page->is_committed); if (!mi_option_is_enabled(mi_option_page_reset)) return; if (segment->mem_is_fixed || page->segment_in_use || page->is_reset) return; size_t psize; @@ -330,7 +331,7 @@ static void mi_pages_reset_remove_all_in_segment(mi_segment_t* segment, bool for if (segment->mem_is_fixed) return; // never reset in huge OS pages for (size_t i = 0; i < segment->capacity; i++) { mi_page_t* page = &segment->pages[i]; - if (!page->segment_in_use && !page->is_reset) { + if (!page->segment_in_use && page->is_committed && !page->is_reset) { mi_pages_reset_remove(page, tld); if (force_reset) { mi_page_reset(segment, page, 0, tld); @@ -544,9 +545,13 @@ void _mi_segment_thread_collect(mi_segments_tld_t* tld) { } mi_assert_internal(tld->cache_count == 0); mi_assert_internal(tld->cache == NULL); +#if MI_DEBUG>=2 + if (!_mi_is_main_thread()) { mi_assert_internal(tld->pages_reset.first == NULL); mi_assert_internal(tld->pages_reset.last == NULL); } +#endif +} /* -----------------------------------------------------------
nvhw/pgraph: Get rid of one usage of nv01_pgraph_expand_surf.
@@ -225,6 +225,14 @@ struct pgraph_color nv01_pgraph_expand_surf(struct pgraph_state *state, uint32_t return res; } +uint32_t nv01_pgraph_upconvert_r5g5b5(struct pgraph_state *state, uint32_t pixel) { + int factor = extr(state->canvas_config, 20, 1) ? 0x21 : 0x20; + uint16_t r = extr(pixel, 10, 5) * factor; + uint16_t g = extr(pixel, 5, 5) * factor; + uint16_t b = extr(pixel, 0, 5) * factor; + return r << 20 | g << 10 | b; +} + struct pgraph_color nv03_pgraph_expand_surf(int fmt, uint32_t pixel) { struct pgraph_color res; res.i16 = pixel & 0xffff; @@ -622,8 +630,6 @@ uint32_t nv01_pgraph_rop(struct pgraph_state *state, int x, int y, uint32_t pixe int blend_en = op >= 0x18; bool dither = extr(state->canvas_config, 16, 1); int mode; - uint32_t src; - uint32_t dst; if (cpp == 1 || (s.mode == COLOR_MODE_Y8 && !expand_y8 && !blend_en)) { mode = COLOR_MODE_Y8; } else if (s.mode == COLOR_MODE_RGB5 && cpp == 2) { @@ -633,20 +639,24 @@ uint32_t nv01_pgraph_rop(struct pgraph_state *state, int x, int y, uint32_t pixe } else { mode = COLOR_MODE_RGB10; } - struct pgraph_color d = nv01_pgraph_expand_surf(state, pixel); uint32_t mask; + uint32_t src; + uint32_t dst; if (mode == COLOR_MODE_Y8) { - src = s.i; - dst = pixel & 0xff; mask = 0xff; + src = s.i; + dst = pixel & mask; } else if (mode == COLOR_MODE_RGB5) { - src = extr(s.r, 5, 5) << 10 | extr(s.g, 5, 5) << 5 | extr(s.b, 5, 5); - dst = pixel & 0x7fff; mask = 0x7fff; + src = extr(s.r, 5, 5) << 10 | extr(s.g, 5, 5) << 5 | extr(s.b, 5, 5); + dst = pixel & mask; } else { - src = s.r << 20 | s.g << 10 | s.b; - dst = d.r << 20 | d.g << 10 | d.b; mask = 0x3fffffff; + src = s.r << 20 | s.g << 10 | s.b; + if (cpp == 2) + dst = nv01_pgraph_upconvert_r5g5b5(state, pixel); + else + dst = pixel & mask; } if (!s.a) return pixel;
Fix chip/imxrt_lpi2c.c:755:1: error: unused function 'imxrt_lpi2c_sem_waitstop'
@@ -243,8 +243,6 @@ static uint32_t imxrt_lpi2c_toticks(int msgc, struct i2c_msg_s *msgs); static inline int imxrt_lpi2c_sem_waitdone(struct imxrt_lpi2c_priv_s *priv); -static inline void -imxrt_lpi2c_sem_waitstop(struct imxrt_lpi2c_priv_s *priv); #ifdef CONFIG_I2C_TRACE static void imxrt_lpi2c_tracereset(struct imxrt_lpi2c_priv_s *priv); @@ -743,95 +741,6 @@ imxrt_lpi2c_sem_waitdone(struct imxrt_lpi2c_priv_s *priv) } #endif -/**************************************************************************** - * Name: imxrt_lpi2c_sem_waitstop - * - * Description: - * Wait for a STOP to complete - * - ****************************************************************************/ - -static inline void -imxrt_lpi2c_sem_waitstop(struct imxrt_lpi2c_priv_s *priv) -{ - clock_t start; - clock_t elapsed; - clock_t timeout; - uint32_t regval; - - /* Select a timeout */ - -#ifdef CONFIG_IMXRT_LPI2C_DYNTIMEO - timeout = USEC2TICK(CONFIG_IMXRT_LPI2C_DYNTIMEO_STARTSTOP); -#else - timeout = CONFIG_IMXRT_LPI2C_TIMEOTICKS; -#endif - - /* Wait as stop might still be in progress; but stop might also - * be set because of a timeout error: "The [STOP] bit is set and - * cleared by software, cleared by hardware when a Stop condition is - * detected, set by hardware when a timeout error is detected." - */ - - start = clock_systime_ticks(); - do - { - /* Calculate the elapsed time */ - - elapsed = clock_systime_ticks() - start; - - /* Check for STOP condition */ - - if (priv->config->mode == LPI2C_MASTER) - { - regval = imxrt_lpi2c_getreg(priv, IMXRT_LPI2C_MSR_OFFSET); - if ((regval & LPI2C_MSR_SDF) == LPI2C_MSR_SDF) - { - return; - } - } - - /* Enable Interrupts when slave mode */ - - else - { - regval = imxrt_lpi2c_getreg(priv, IMXRT_LPI2C_SSR_OFFSET); - if ((regval & LPI2C_SSR_SDF) == LPI2C_SSR_SDF) - { - return; - } - } - - /* Check for NACK error */ - - if (priv->config->mode == LPI2C_MASTER) - { - regval = imxrt_lpi2c_getreg(priv, IMXRT_LPI2C_MSR_OFFSET); - if ((regval & LPI2C_MSR_NDF) == LPI2C_MSR_NDF) - { - return; - } - } - - /* Enable Interrupts when slave mode */ - - else - { -#warning Missing logic for I2C Slave - } - } - - /* Loop until the stop is complete or a timeout occurs. */ - - while (elapsed < timeout); - - /* If we get here then a timeout occurred with the STOP condition - * still pending. - */ - - i2cinfo("Timeout with Status Register: %" PRIx32 "\n", regval); -} - /**************************************************************************** * Name: imxrt_dma_callback *
[mod_fastcgi] fix memleak with FastCGI auth,resp (fixes fix memleak in mod_fastcgi when FastCGI is used for both authentication and response on the same request (thx rschmid) x-ref: "Memory leak if two fcgi calls with one request (authentication and response)"
@@ -515,8 +515,13 @@ static handler_t fcgi_check_extension(server *srv, connection *con, void *p_d, i hctx->opts.pdata = hctx; hctx->stdin_append = fcgi_stdin_append; hctx->create_env = fcgi_create_env; + if (!hctx->rb) { hctx->rb = chunkqueue_init(); } + else { + chunkqueue_reset(hctx->rb); + } + } return HANDLER_GO_ON; }
Sets tls version to 1.2 to avoid use of weaker protocol versions
@@ -125,6 +125,7 @@ static long acvp_curl_http_get (ACVP_CTX *ctx, char *url, void *writefunc) { curl_easy_setopt(hnd, CURLOPT_URL, url); curl_easy_setopt(hnd, CURLOPT_NOPROGRESS, 1L); curl_easy_setopt(hnd, CURLOPT_USERAGENT, "curl/7.27.0"); + curl_easy_setopt(hnd, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2); if (slist) { curl_easy_setopt(hnd, CURLOPT_HTTPHEADER, slist); } @@ -231,6 +232,7 @@ static long acvp_curl_http_post (ACVP_CTX *ctx, char *url, char *data, void *wri curl_easy_setopt(hnd, CURLOPT_POST, 1L); curl_easy_setopt(hnd, CURLOPT_POSTFIELDS, data); curl_easy_setopt(hnd, CURLOPT_POSTFIELDSIZE_LARGE, (curl_off_t) strlen(data)); + curl_easy_setopt(hnd, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1_2); //FIXME: we should always to TLS peer auth if (ctx->verify_peer && ctx->cacerts_file) { curl_easy_setopt(hnd, CURLOPT_CAINFO, ctx->cacerts_file);
display: header changes
@@ -150,9 +150,9 @@ static void display_displayLocked(honggfuzz_t* hfuzz) MX_SCOPED_LOCK(logMutexGet()); display_put(ESC_NAV(13, 1) ESC_CLEAR_ABOVE ESC_NAV(1, 1)); - display_put("------------------------------[ " ESC_BOLD "%s v%s" ESC_RESET - " ]-------------------------------\n", - PROG_NAME, PROG_VERSION); + display_put("------------------------- [ " ESC_BOLD "HONGGFUZZ" ESC_RESET " ] - [ " ESC_BOLD + "v%s" ESC_RESET " ] ----------------------------\n", + PROG_VERSION); display_put(" Iterations : " ESC_BOLD "%" _HF_MONETARY_MOD "zu" ESC_RESET, curr_exec_cnt); display_printKMG(curr_exec_cnt); if (hfuzz->mutationsMax) { @@ -270,8 +270,8 @@ static void display_displayLocked(honggfuzz_t* hfuzz) display_put(" #crashes: " ESC_BOLD "%" _HF_MONETARY_MOD PRIu64 ESC_RESET, ATOMIC_GET(hfuzz->sanCovCnts.crashesCnt)); } - display_put("\n-----------------------------------[ " ESC_BOLD "LOGS" ESC_RESET - " ]------------------------------------\n"); + display_put("\n---------------------------------- [ " ESC_BOLD "LOGS" ESC_RESET + " ] -----------------------------------\n"); display_put(ESC_SCROLL(14, 999) ESC_NAV(999, 1)); }
cvsd/autotest: testing algorithm to run in blocks of 8 samples
#include "autotest/autotest.h" #include "liquid.internal.h" -// -// AUTOTEST: check RMSE for CVSD -// -void autotest_cvsd_rmse_sine() { +// check RMS error +void autotest_cvsd_rmse_sine() +{ unsigned int n=256; unsigned int nbits=3; float zeta=1.5f; @@ -35,6 +34,7 @@ void autotest_cvsd_rmse_sine() { // create cvsd codecs cvsd cvsd_encoder = cvsd_create(nbits,zeta,alpha); cvsd cvsd_decoder = cvsd_create(nbits,zeta,alpha); + CONTEND_EQUALITY(cvsd_print(cvsd_encoder), LIQUID_OK); float phi=0.0f; float dphi=0.1f; @@ -61,6 +61,49 @@ void autotest_cvsd_rmse_sine() { cvsd_destroy(cvsd_decoder); } +// check RMS error running in blocks of 8 samples +void autotest_cvsd_rmse_sine8() +{ + unsigned int n=256; + unsigned int nbits=3; + float zeta=1.5f; + float alpha=0.90f; + + // create cvsd codecs + cvsd cvsd_encoder = cvsd_create(nbits,zeta,alpha); + cvsd cvsd_decoder = cvsd_create(nbits,zeta,alpha); + CONTEND_EQUALITY(cvsd_print(cvsd_encoder), LIQUID_OK); + + float phi=0.0f, dphi=0.1f; + float buf_0[8], buf_1[8]; + unsigned int i, j; + unsigned char byte; + float rmse=0.0f; + for (i=0; i<n; i++) { + // generate tone + for (j=0; j<8; j++) { + buf_0[j] = 0.5f*sinf(phi); + phi += dphi; + } + // encode/decode + cvsd_encode8(cvsd_encoder, buf_0, &byte); + cvsd_decode8(cvsd_decoder, byte, buf_1); + + // accumulate RMS error + for (j=0; j<8; j++) + rmse += (buf_0[j]-buf_1[j])*(buf_0[j]-buf_1[j]); + } + + rmse = 10*log10f(rmse/(n*8)); + if (liquid_autotest_verbose) + printf("cvsd rmse : %8.2f dB\n", rmse); + CONTEND_LESS_THAN(rmse, -20.0f); + + // destroy cvsd codecs + cvsd_destroy(cvsd_encoder); + cvsd_destroy(cvsd_decoder); +} + // configuration void autotest_cvsd_invalid_config() {
Fix markdown error.
## C library - Deprecated the `native` non-Limber angular power spectrum method (#506). -- Renamed `ccl\_lsst\_specs.c` to `ccl\_redshifts.c`, deprecated LSST-specific redshift distribution functionality, introduced user-defined true dNdz (changes in call signature of `ccl\_dNdz\_tomog`). (#528). +- Renamed `ccl_lsst_specs.c` to `ccl_redshifts.c`, deprecated LSST-specific redshift distribution functionality, introduced user-defined true dNdz (changes in call signature of `ccl_dNdz_tomog`). (#528). ## Python library -- Renamed `lsst\_specs.py` to `redshifts.py`, deprecated LSST-specific redshift distribution functionality, introduced user-defined true dNdz (changes in call signature of `dNdz\_tomog`). (#528). +- Renamed `lsst_specs.py` to `redshifts.py`, deprecated LSST-specific redshift distribution functionality, introduced user-defined true dNdz (changes in call signature of `dNdz_tomog`). (#528). - Deprecated the `native` non-Limber angular power spectrum method (#506). - Deprecated the `Parameters` object in favor of only the `Cosmology` object (#493). - Renamed the `ClTracer` family of objects (#496).
Add static scope to sest_bucket_realloc and set_bucket_realloc_iterator.
@@ -86,7 +86,7 @@ size_t set_size(set s) return 0; } -int set_bucket_realloc_iterator(set s, set_key key, set_value value, set_cb_iterate_args args) +static int set_bucket_realloc_iterator(set s, set_key key, set_value value, set_cb_iterate_args args) { set new_set = (set)args; @@ -113,7 +113,7 @@ int set_bucket_realloc_iterator(set s, set_key key, set_value value, set_cb_iter return 1; } -int set_bucket_realloc(set s) +static int set_bucket_realloc(set s) { struct set_type new_set;
h2olog/quic: support the new_token_receive probe
@@ -316,6 +316,26 @@ int trace_new_token_acked(struct pt_regs *ctx) { return 0; } + +int trace_new_token_receive(struct pt_regs *ctx) { + void *pos = NULL; + struct quic_event_t event = {}; + struct st_quicly_conn_t conn = {}; + sprintf(event.type, "new_token_receive"); + + bpf_usdt_readarg(1, ctx, &pos); + bpf_probe_read(&conn, sizeof(conn), pos); + event.master_conn_id = conn.master_id; + bpf_usdt_readarg(2, ctx, &event.at); + bpf_usdt_readarg(3, ctx, &pos); + bpf_probe_read(&event.token_preview, TOKEN_PREVIEW_LEN, pos); + bpf_usdt_readarg(4, ctx, &event.len); + + if (events.perf_submit(ctx, &event, sizeof(event)) < 0) + bpf_trace_printk("failed to perf_submit\\n"); + + return 0; +} """ def handle_req_line(cpu, data, size): @@ -431,6 +451,7 @@ if sys.argv[1] == "quic": u.enable_probe(probe="packet_lost", fn_name="trace_packet_lost") u.enable_probe(probe="new_token_send", fn_name="trace_new_token_send") u.enable_probe(probe="new_token_acked", fn_name="trace_new_token_acked") + u.enable_probe(probe="new_token_receive", fn_name="trace_new_token_receive") b = BPF(text=quic_bpf, usdt_contexts=[u]) else: u.enable_probe(probe="receive_request", fn_name="trace_receive_req")
Fix octave/LimeSuite.cc compilation GW version now reports both GW version and revision gw revision info noe longer exists
@@ -55,8 +55,8 @@ void PrintDeviceInfo(lms_device_t* port) { octave_stdout << "Connected to device: " << info->deviceName << " FW: " << info->firmwareVersion << " HW: " << info->hardwareVersion - << " Protocol: " << info->protocolVersion << " GW: " << info->gatewareVersion - << " GW_rev: " << info->gatewareRevision << endl; + << " Protocol: " << info->protocolVersion + << " GW: " << info->gatewareVersion << endl; } }
Make handles steps individually/incrementally
@@ -89,26 +89,45 @@ btc-test-run: btc-test # ######################################################################################## +# Test for DEBUG nature of the build requires inspecting +# the prior `cmake` CMAKE_BUILD_TYPE, presuming cmake has +# been run. Only `Debug` results in the production of +# tests and required test libraries, so in some cases +# force remake with appropriate flags. cmake-walletkit-debug: - if [ ! -d "build" ]; then mkdir build; cd build; cmake -DCMAKE_BUILD_TYPE=Debug ..; fi + @if [ ! -d "build" ]; then \ + mkdir build; \ + cd build; \ + cmake -DCMAKE_BUILD_TYPE=Debug ..; \ + else \ + cur_dbg_setting=($$(cat build/CMakeCache.txt | grep CMAKE_BUILD_TYPE | tr "=" " ")); \ + if [ -z "$${cur_dbg_setting[1]}" ]; then \ + echo "Re-make for debug (tests)"; \ + rm -rf build; \ + mkdir build; \ + cd build; \ + cmake -DCMAKE_BUILD_TYPE=Debug ..; \ + fi; \ + fi; + cmake-walletkit: - if [ ! -d "build" ]; then mkdir build; cd build; cmake ..; fi + @if [ ! -d "build" ]; then mkdir build; cd build; cmake ..; fi cmake-walletkit-verbose: - if [ ! -d "build" ]; then mkdir build; cd build; cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON ..; fi + @if [ ! -d "build" ]; then mkdir build; cd build; cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON ..; fi cmake-installed: @[ -f `which cmake` ] || { echo "cmake is required for WalletKit make"; exit 1; } wallet-kit-libs: cmake-installed cmake-walletkit FORCE - make -C build + @make -C build wallet-kit-libs-verbose: cmake-installed cmake-walletkit-verbose FORCE - make -C build + @make -C build wallet-kit-test: cmake-installed cmake-walletkit-debug FORCE - make -C build + @make -C build wallet-kit-test-run: wallet-kit-test FORCE build/WalletKitCoreTests
cleanup: add comment to CONFIG_BATTERY_REVIVE_DISCONNECT When you define CONFIG_BATTERY_REVIVE_DISCONNECT you also need to define battery_get_disconnected_state method() BRANCH=none TEST=none
/* * Check for battery in disconnect state (similar to cut-off state). If this * battery is found to be in disconnect state, take it out of this state by - * force-applying a charge current. + * force-applying a charge current. This option requires + * battery_get_disconnect_state() to be defined. */ #undef CONFIG_BATTERY_REVIVE_DISCONNECT
c++: Change the default value of CXX_STANDARD from c++17 to gnu++17 since many 3rd party code use some gnu c++ extension
@@ -87,9 +87,10 @@ endif config CXX_STANDARD string "Language standard" - default "c++17" + default "gnu++17" ---help--- - Possible values: c++98, c++11, c++14, c++17 and c++20 + Possible values: + gnu++98/c++98, gnu++11/c++11, gnu++14/c++14, gnu++17/c++17 and gnu++20/c++20 config CXX_EXCEPTION bool "Enable Exception Support"
[cli] Stop running when an error occurs
@@ -86,6 +86,7 @@ var removeCmd = &cobra.Command{ reply, err := client.ChangeMembership(context.Background(), changeReq) if err != nil { cmd.Printf("Failed to remove member: %s\n", err.Error()) + return } cmd.Printf("removed member from cluster: %s\n", reply.Attr.ToString())
renames +en-host to +join
^- spec:asn1 :- %seq %+ turn hot - |=(h=(list @t) [%con [& 2] (rip 3 (en-host h))]) + |=(h=(list @t) [%con [& 2] (rip 3 (join '.' h))]) :: :: +cert:spec:pkcs10 ++ cert :: cert request info |= csr :: XX rename |= [a=* b=*] ^- ? (fall (bind (both (find ~[a] lit) (find ~[b] lit)) com) |) -:: :: +en-host -++ en-host :: rendor host - |= hot=(list @t) +:: :: +join +++ join :: join cords w/ sep + |= [sep=@t hot=(list @t)] ^- @t =| out=(list @t) ?> ?=(^ hot) |- ^- @t ?~ t.hot (rap 3 [i.hot out]) - $(out ['.' i.hot out], hot t.hot) + $(out [sep i.hot out], hot t.hot) :: :::: acme api response json reparsers :: :- %a %+ turn dom.rod - |=(a=turf [%o (my type+s+'dns' value+s+(en-host a) ~)]) + |=(a=turf [%o (my type+s+'dns' value+s+(join '.' a) ~)]) == :: ++ authorize
py/objtype: Use mp_obj_new_tuple_always() when allocating __slots__ object. Fixes creating of object with __slots__ = ().
@@ -119,7 +119,7 @@ mp_obj_instance_t *mp_obj_new_instance(const mp_obj_type_t *class, const mp_obj_ const mp_obj_namedtuple_type_t *type = (const mp_obj_namedtuple_type_t *)class; size_t num_fields = type->n_fields; - mp_obj_tuple_t *t = MP_OBJ_TO_PTR(mp_obj_new_tuple(num_fields, NULL)); + mp_obj_tuple_t *t = MP_OBJ_TO_PTR(mp_obj_new_tuple_always(num_fields, NULL)); t->base.type = class; return (mp_obj_instance_t *)t; }
comment in dockerfile
+# This docker image was loaded manually, since the latest version of cribl in docker +# hub does not include the changes to the appscope source that are required to parse http1 FROM cribl-dev-c9b1c605 RUN apt update && apt install -y \
Add PhGetUserOrMachineDpi
@@ -161,6 +161,34 @@ VOID PhCenterWindow( } } +// rev from EtwRundown.dll!EtwpLogDPISettingsInfo (dmex) +LONG PhGetUserOrMachineDpi( + VOID + ) +{ + static PH_STRINGREF machineKeyName = PH_STRINGREF_INIT(L"System\\CurrentControlSet\\Hardware Profiles\\Current\\Software\\Fonts"); + static PH_STRINGREF userKeyName = PH_STRINGREF_INIT(L"Control Panel\\Desktop"); + HANDLE keyHandle; + LONG dpi = 0; + + if (NT_SUCCESS(PhOpenKey(&keyHandle, KEY_QUERY_VALUE, PH_KEY_USERS, &userKeyName, 0))) + { + dpi = PhQueryRegistryUlong(keyHandle, L"LogPixels"); + NtClose(keyHandle); + } + + if (dpi == 0) + { + if (NT_SUCCESS(PhOpenKey(&keyHandle, KEY_QUERY_VALUE, PH_KEY_LOCAL_MACHINE, &machineKeyName, 0))) + { + dpi = PhQueryRegistryUlong(keyHandle, L"LogPixels"); + NtClose(keyHandle); + } + } + + return dpi; +} + LONG PhGetDpi( _In_ LONG Number, _In_ LONG DpiValue @@ -180,9 +208,9 @@ LONG PhGetSystemDpi( VOID ) { - UINT dpi; + LONG dpi; - // Try avoid calling GetDpiForSystem since it'll return incorrect DPI + // Note: Avoid calling GetDpiForSystem since it'll return incorrect DPI // when the user changes the display settings. GetDpiForWindow doesn't have // this same limitation and always returns the correct DPI. (dmex) dpi = PhGetTaskbarDpi(); @@ -199,7 +227,7 @@ LONG PhGetTaskbarDpi( VOID ) { - UINT dpi = 0; + LONG dpi = 0; HWND shellWindow; // Note: GetShellWindow is cached in TEB (dmex) @@ -297,7 +325,7 @@ LONG PhGetDpiValue( { if (GetDpiForWindow_I && WindowHandle) // Win10 RS1 { - UINT dpi; + LONG dpi; if (dpi = GetDpiForWindow_I(WindowHandle)) { @@ -308,8 +336,8 @@ LONG PhGetDpiValue( if (GetDpiForMonitor_I) // Win81 { HMONITOR monitor; - UINT dpi_x; - UINT dpi_y; + LONG dpi_x; + LONG dpi_y; if (Rect) monitor = MonitorFromRect(Rect, MONITOR_DEFAULTTONEAREST);
rm bootstrap
@@ -71,6 +71,7 @@ export PATH=${LMOD_DIR}:${PATH} MODULEPATH= python ./bootstrap_eb.py %{buildroot}/%{install_path} +rm bootstrap_eb.py* pushd %{buildroot}%{install_path}/modules/tools/EasyBuild/ rm %version ln -s ../../all/EasyBuild/%version .
examples/bind: faster named.conf
@@ -7,24 +7,38 @@ options { recursive-clients 1000000; max-clients-per-query 100000; max-recursion-queries 100000; - max-cache-size 1; - max-acache-size 1; - acache-enable no; + max-recursion-depth 2; + max-cache-size 1024; + max-acache-size 1024; + acache-enable yes; max-ncache-ttl 1; max-cache-ttl 1; - lame-ttl 1; + lame-ttl 0; reserved-sockets 2048; max-retry-time 1; max-refresh-time 1; check-integrity false; cleaning-interval 1; - notify no; + notify yes; dnssec-enable yes; dnssec-validation yes; dnssec-secure-to-insecure yes; + dnssec-lookaside no; + allow-new-zones yes; interface-interval 0; additional-from-auth yes; additional-from-cache yes; + minimal-responses yes; + prefetch 1; + resolver-query-timeout 1; + auth-nxdomain yes; + empty-server "tesZ"; + disable-empty-zone "tesY"; + zone-statistics yes; + preferred-glue AAAA; + query-source 127.0.0.3; + querylog yes; + allow-query { any; }; @@ -37,10 +51,25 @@ options { allow-transfer { any; }; - forwarders { - 127.0.0.2 port 5353; + allow-update-forwarding { + any; }; + forward only; + + forwarders { + 127.0.0.2 port 53; + }; + + rate-limit { + responses-per-second 0; + all-per-second 0; + window 1; + log-only yes; + exempt-clients { + 127.0.0.0/8; + }; + }; }; logging { @@ -55,7 +84,8 @@ logging { zone "test." { type master; - file "test.zone"; + file "test"; + also-notify { 127.0.0.2; }; }; key "rndc-key" {
hw: mcu: pic32mx470f512h: Reduce number of UART TX interrupts See commit for explanations.
@@ -155,15 +155,16 @@ uart_receive_ready(int port) static void uart_transmit_ready(int port) { + while(!(UxSTA(port) & _U1STA_UTXBF_MASK)) { int c = uarts[port].u_tx_func(uarts[port].u_func_arg); if (c < 0) { uart_disable_tx_int(port); - /* call tx done cb */ if (uarts[port].u_tx_done) { uarts[port].u_tx_done(uarts[port].u_func_arg); } - } else { + break; + } UxTXREG(port) = (uint32_t)c & 0xff; } }
nimble/ll: Remove unused syscfg We do support extended scanner filter policy and it's always enabled, there's no need to make it configurable.
@@ -240,13 +240,6 @@ syscfg.defs: This option is used to enable/disable LL privacy. value: '1' - BLE_LL_CFG_FEAT_EXT_SCAN_FILT: - description: > - This option is used to enable/disable the extended scanner filter - policy feature. Currently, this feature is not supported by the - nimble controller. - value: '0' - BLE_LL_CFG_FEAT_LE_CSA2: description: > This option is used to enable/disable support for LE Channel
hv: fix "Array has no bounds specified" in vmsr.c MISRAC requires that the array size should be declared explicitly. Acked-by: Anthony Xu
@@ -145,7 +145,8 @@ static const uint32_t emulated_msrs[NUM_EMULATED_MSR] = { /* MSR 0xC90 ... 0xD8F, not in this array */ }; -static const uint32_t x2apic_msrs[] = { +#define NUM_X2APIC_MSR 44U +static const uint32_t x2apic_msrs[NUM_X2APIC_MSR] = { MSR_IA32_EXT_XAPICID, MSR_IA32_EXT_APIC_VERSION, MSR_IA32_EXT_APIC_TPR, @@ -240,7 +241,7 @@ static void intercept_x2apic_msrs(uint8_t *msr_bitmap_arg, enum rw_mode mode) uint8_t *msr_bitmap = msr_bitmap_arg; uint32_t i; - for (i = 0U; i < ARRAY_SIZE(x2apic_msrs); i++) { + for (i = 0U; i < NUM_X2APIC_MSR; i++) { enable_msr_interception(msr_bitmap, x2apic_msrs[i], mode); } }
Update pbuf function documentation
@@ -502,7 +502,8 @@ esp_pbuf_set_ip(esp_pbuf_p pbuf, const esp_ip_t* ip, esp_port_t port) { * they are not adjusted in length and total length * * \param[in] pbuf: Pbuf to advance - * \param[in] len: Number of bytes to advance. when negative is used, buffer size is increased by only if it was decreased before + * \param[in] len: Number of bytes to advance. + * when negative is used, buffer size is increased only if it was decreased before * \return `1` on success, `0` otherwise */ uint8_t
gpexpand: need to start and stop the primaries with convertMasterDataDirToSegment. This reverts partial part of commit as primaries need to be started once using convertMasterDataDirToSegment.
@@ -745,6 +745,45 @@ class SegmentTemplate: self.pool.join() self.pool.check_results() + def _start_new_primary_segments(self): + newSegments = self.gparray.getExpansionSegDbList() + for seg in newSegments: + if seg.isSegmentMirror(): + continue + """ Start all the new segments in utilty mode. """ + segStartCmd = SegmentStart( + name="Starting new segment dbid %s on host %s." % (str(seg.getSegmentDbId()), seg.getSegmentHostName()) + , gpdb=seg + , numContentsInCluster=0 # Starting seg on it's own. + , era=None + , mirrormode=MIRROR_MODE_MIRRORLESS + , utilityMode=True + , specialMode='convertMasterDataDirToSegment' + , ctxt=REMOTE + , remoteHost=seg.getSegmentHostName() + , pg_ctl_wait=True + , timeout=SEGMENT_TIMEOUT_DEFAULT) + self.pool.addCommand(segStartCmd) + self.pool.join() + self.pool.check_results() + + def _stop_new_primary_segments(self): + newSegments = self.gparray.getExpansionSegDbList() + for seg in newSegments: + if seg.isSegmentMirror() == True: + continue + segStopCmd = SegmentStop( + name="Stopping new segment dbid %s on host %s." % (str(seg.getSegmentDbId), seg.getSegmentHostName()) + , dataDir=seg.getSegmentDataDirectory() + , mode='smart' + , nowait=False + , ctxt=REMOTE + , remoteHost=seg.getSegmentHostName() + ) + self.pool.addCommand(segStopCmd) + self.pool.join() + self.pool.check_results() + def _configure_new_segments(self): """Configures new segments. This includes modifying the postgresql.conf file and setting up the gp_id table""" @@ -763,6 +802,9 @@ class SegmentTemplate: self.pool.join() self.pool.check_results() + self._start_new_primary_segments() + self._stop_new_primary_segments() + def _fixup_template(self): """Copies postgresql.conf and pg_hba.conf files from a valid segment on the system. Then modifies the template copy of pg_hba.conf"""
stm32/mboot/main: Use correct formula for DFU download address. As per ST's DfuSe specification, and following their example code.
@@ -760,7 +760,8 @@ static int dfu_process_dnload(void) { } } else if (dfu_state.wBlockNum > 1) { // write data to memory - ret = do_write(dfu_state.addr, dfu_state.buf, dfu_state.wLength); + uint32_t addr = (dfu_state.wBlockNum - 2) * DFU_XFER_SIZE + dfu_state.addr; + ret = do_write(addr, dfu_state.buf, dfu_state.wLength); } if (ret == 0) { return DFU_STATUS_DNLOAD_IDLE;
Do not default to first language for new carts Do not default to the first available language for new carts.
@@ -1115,8 +1115,8 @@ static void onNewCommandConfirmed(Console* console) } else { - loadDemo(console, 0); - done = true; + printError(console, "\nerror: choose a language for the new cart."); + printUsage(console, console->desc->command); } if(done) printBack(console, "\nnew cart has been created"); @@ -2549,7 +2549,7 @@ static const char HelpUsage[] = "help [<text>" macro("new", \ NULL, \ "creates a new `Hello World` cartridge.", \ - "new [$LANG_NAMES_PIPE$]", \ + "new <$LANG_NAMES_PIPE$>", \ onNewCommand) \ \ macro("load", \
Fix return value and const declaration from commit This fixes the non-OpenSSL compile case. Reported-by: buildfarm member sifaka Backpatch-through: master
#include "common/cipher.h" -static cipher_failure(void); +static void cipher_failure(void); PgCipherCtx * pg_cipher_ctx_create(int cipher, uint8 *key, int klen, bool enc) @@ -46,12 +46,12 @@ bool pg_cipher_decrypt(PgCipherCtx *ctx, const unsigned char *ciphertext, const int inlen, unsigned char *plaintext, int *outlen, const unsigned char *iv, const int ivlen, - const unsigned char *intag, const int taglen) + unsigned char *intag, const int taglen) { cipher_failure(); } -static +static void cipher_failure(void) { #ifndef FRONTEND
disable appveyor test
@@ -10,10 +10,10 @@ environment: BINPATH: C:\msys64\mingw64\bin OPENBLAS: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win64.zip - - platform: x86 - ARCH: 32 - BINPATH: C:\MinGw\bin;C:\msys64\mingw32\bin - OPENBLAS: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win32.zip + #- platform: x86 + # ARCH: 32 + # BINPATH: C:\MinGw\bin;C:\msys64\mingw32\bin + # OPENBLAS: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win32.zip build_script: - ps: (new-object System.Net.WebClient).DownloadFile($env:OPENBLAS, 'c:\openblas.zip')
zuse: added docstring and warning
-- -- :: +:: +mop: constructs and validates ordered ordered map based on key, +:: val, and comparator gate +:: ++ mop |* [key=mold value=mold] |= ord=$-([key key] ?) :: smallest key can be popped off the head. If $key is `@` and :: .compare is +lte, then the numerically smallest item is the head. :: +:: WARNING: ordered-map will not work properly if two keys can be +:: unequal under noun equality but equal via the compare gate +:: ++ ordered-map |* [key=mold val=mold] => |%
DM: Fix deinit_mmio_devs() conflicting type There is "void deinit_mmio_devs()" in ./devicemodel/hw/mmio/core.c, but "int deinit_mmio_devs()" in ./devicemodel/include/mmio_dev.h Acked-by: Wang, Yu1
@@ -12,6 +12,6 @@ int parse_pt_acpidev(char *arg); int parse_pt_mmiodev(char *arg); int init_mmio_devs(struct vmctx *ctx); -int deinit_mmio_devs(struct vmctx *ctx); +void deinit_mmio_devs(struct vmctx *ctx); #endif /* _MMIO_DEV_H_ */
Add comment about kci_toss
@@ -253,7 +253,7 @@ struct kvs_cursor_impl { u32 kci_peek : 1; u32 kci_toss : 1; u32 kci_reverse : 1; - u32 kci_unused : 14; + u32 kci_unused : 15; u32 kci_pfx_len : 8; u64 kci_pfxhash; @@ -1867,6 +1867,12 @@ ikvs_cursor_seek( tombs->kct_update = false; } + /* This flag is used to prevent tossing the last read key in this seek + * when this seek is followed by update and read. A call to read will + * perform a seek only when it's preceded by an update or create. In other + * words, when read sees this flag set, the order of operations has been + * [seek, update, read] and it should skip tossing the key. + */ cursor->kci_toss = 0; return 0; }
Fix error reported by in lv_imgbtn
@@ -81,11 +81,10 @@ lv_obj_t * lv_imgbtn_create(lv_obj_t * par, const lv_obj_t * copy) } /*Copy an existing image button*/ else { + lv_imgbtn_ext_t * copy_ext = lv_obj_get_ext_attr(copy); #if LV_IMGBTN_TILED == 0 memcpy(ext->img_src, copy_ext->img_src, sizeof(ext->img_src)); #else - lv_imgbtn_ext_t * copy_ext = lv_obj_get_ext_attr(copy); - memcpy(ext->img_src_left, copy_ext->img_src_left, sizeof(ext->img_src_left)); memcpy(ext->img_src_mid, copy_ext->img_src_mid, sizeof(ext->img_src_mid)); memcpy(ext->img_src_right, copy_ext->img_src_right, sizeof(ext->img_src_right));
nimble/phy: Allow idle state when starting RX nRF52 radio can ramp-up from idle state so it can be either disabled or idle (for whatever reason - it happens sometimes) when starting RX. Similar problem was already fixed on nRF51 by forcing radio disable since that radio can ramp-up only from disabled state.
@@ -1309,9 +1309,19 @@ ble_phy_init(void) int ble_phy_rx(void) { - /* Check radio state */ + /* + * Check radio state. + * + * In case radio is now disabling we'll wait for it to finish, but if for + * any reason it's just in idle state we proceed with RX as usual since + * nRF52 radio can ramp-up from idle state as well. + * + * Note that TX and RX states values are the same except for 3rd bit so we + * can make a shortcut here when checking for idle state. + */ nrf_wait_disabled(); - if (NRF_RADIO->STATE != RADIO_STATE_STATE_Disabled) { + if ((NRF_RADIO->STATE != RADIO_STATE_STATE_Disabled) && + ((NRF_RADIO->STATE & 0x07) != RADIO_STATE_STATE_RxIdle)) { ble_phy_disable(); STATS_INC(ble_phy_stats, radio_state_errs); return BLE_PHY_ERR_RADIO_STATE;
dm: fix mutex lock issue in tpm_rbc.c In function crb_reg_write() in tpm_rbc.c 'tpm_vdev->request_mutex' will potentially kept in locked state after crb_reg_write() returns. Acked-by: Yu Wang
@@ -333,12 +333,10 @@ static void crb_reg_write(struct tpm_crb_vdev *tpm_vdev, uint64_t addr, int size if (pthread_cond_signal(&tpm_vdev->request_cond)) { DPRINTF("ERROR: Failed to wait condition\n"); - break; } if (pthread_mutex_unlock(&tpm_vdev->request_mutex)) { DPRINTF("ERROR: Failed to release mutex lock\n"); - break; } } break;
docs: add pointer to more detailed release notes in Release_Notes.txt
OpenHPC Software Stack -Version 1.2 (12 November 2016) Introduction: - This release provides a variety of common, pre-built ingredients + OpenHPC provides a variety of common, pre-built ingredients required to deploy and manage an HPC Linux cluster including provisioning tools, resource management, I/O clients, development tools, and a variety of scientific libraries. - Please see https://github.com/openhpc/ohpc/releases/tag/v1.2.GA for specific - release notes. + Please see https://github.com/openhpc/ohpc/releases for more detailed + release notes regarding a specific version. Questions, Comments, or Bug Reports?
Add ability to change output archive name
@@ -64,6 +64,7 @@ def generate_dart(unit, as_lib=False): includes = extract_macro_calls(unit, 'DOCSINCLUDESOURCES') data = { + 'DOCS_NAME': unit.name(), 'PATH': module_dir, 'MODULE_TAG': unit.get('MODULE_TAG'), 'DOCSDIR': docs_dir,
remove potential using default i/o handler device driver should register valid i/o handlers in any cases, avoid referencing to default handler remove i/o handler test code as they shall never be NULL.
@@ -116,9 +116,6 @@ int io_instr_vmexit_handler(struct vcpu *vcpu) } if (direction == 0) { - if (handler->desc.io_write == NULL) - continue; - handler->desc.io_write(handler, vm, port, sz, cur_context->guest_cpu_regs.regs.rax); @@ -127,7 +124,7 @@ int io_instr_vmexit_handler(struct vcpu *vcpu) status = 0; break; - } else if (handler->desc.io_read) { + } else { uint32_t data = handler->desc.io_read(handler, vm, port, sz); @@ -221,21 +218,6 @@ static void deny_guest_io_access(struct vm *vm, uint32_t address, uint32_t nbyte } } -static uint32_t -default_io_read(__unused struct vm_io_handler *hdlr, __unused struct vm *vm, - uint16_t address, size_t width) -{ - uint32_t v = io_read(address, width); - return v; -} - -static void default_io_write(__unused struct vm_io_handler *hdlr, - __unused struct vm *vm, uint16_t addr, - size_t width, uint32_t v) -{ - io_write(v, addr, width); -} - static struct vm_io_handler *create_io_handler(uint32_t port, uint32_t len, io_read_fn_t io_read_fn_ptr, io_write_fn_t io_write_fn_ptr) @@ -280,23 +262,17 @@ void register_io_emulation_handler(struct vm *vm, struct vm_io_range *range, io_write_fn_t io_write_fn_ptr) { struct vm_io_handler *handler = NULL; - io_read_fn_t io_read_fn = &default_io_read; - io_write_fn_t io_write_fn = &default_io_write; - - if (range->flags == IO_ATTR_RW && io_read_fn_ptr && io_write_fn_ptr) { - io_read_fn = io_read_fn_ptr; - io_write_fn = io_write_fn_ptr; - } else if (range->flags == IO_ATTR_R) { - if (io_read_fn_ptr) - io_read_fn = io_read_fn_ptr; - io_write_fn = NULL; + + if (io_read_fn_ptr == NULL || io_write_fn_ptr == NULL) { + pr_err("Invalid IO handler."); + return; } if (is_vm0(vm)) deny_guest_io_access(vm, range->base, range->len); handler = create_io_handler(range->base, - range->len, io_read_fn, io_write_fn); + range->len, io_read_fn_ptr, io_write_fn_ptr); register_io_handler(vm, handler); }
dojo: remove unnecessary debug output The tip already communicates to the user that dojo is busy, so the sigpam output isn't needed anymore.
^+ +>+> =^ dat say (~(transceive sole say) cal) ?: |(?=(^ per) ?=(^ pux) ?=(~ pro)) - ~& %dy-edit-busy =^ lic say (~(transmit sole say) dat) =/ tip=@t 'dojo: busy (press backspace to abort)' (dy-diff %mor [%det lic] [%bel ~] [%tan [tip ~]] ~)
[numerics] suppress compiler's warning
@@ -277,10 +277,8 @@ void SBM_extract_component_3x3(const SparseBlockStructuredMatrix* const restrict - /* Column (block) position of the current block*/ - size_t colNumber; - /* Number of rows/columns of the current block */ - unsigned int nbRows, nbColumns; + /* Number of rows of the current block */ + unsigned int nbRows; /* Loop over all non-null blocks Works whatever the ordering order of the block is, in A->block
ci: increase test_flash_psram timeout
@@ -36,4 +36,4 @@ MSPI_F4R4_configs = [p.name.replace('sdkconfig.ci.', '') for p in pathlib.Path(o @pytest.mark.MSPI_F4R4 @pytest.mark.parametrize('config', MSPI_F4R4_configs, indirect=True) def test_flash4_psram4(dut: Dut) -> None: - dut.expect_exact('flash psram test success') + dut.expect_exact('flash psram test success', timeout=40)
in_dummy: use new flb_pack_json() prototype
@@ -74,6 +74,7 @@ static int configure(struct flb_in_dummy_config *ctx, struct timespec *tm) { char *str = NULL; + int root_type; int ret = -1; long val = 0; @@ -101,7 +102,7 @@ static int configure(struct flb_in_dummy_config *ctx, ret = flb_pack_json(ctx->dummy_message, ctx->dummy_message_len, - &ctx->ref_msgpack, &ctx->ref_msgpack_size); + &ctx->ref_msgpack, &ctx->ref_msgpack_size, &root_type); if (ret != 0) { flb_warn("[in_dummy] Data is incomplete. Use default string."); @@ -111,7 +112,8 @@ static int configure(struct flb_in_dummy_config *ctx, ret = flb_pack_json(ctx->dummy_message, ctx->dummy_message_len, - &ctx->ref_msgpack, &ctx->ref_msgpack_size); + &ctx->ref_msgpack, &ctx->ref_msgpack_size, + &root_type); if (ret != 0) { flb_error("[in_dummy] Unexpected error"); return -1;
Fix markdown syntax in pipeline's README It was showed strangely in text editors.
# Concourse Pipeline Generation -To facilitate pipeline maintenance, a Python utility 'gen_pipeline.py` +To facilitate pipeline maintenance, a Python utility `gen_pipeline.py` is used to generate the production pipeline. It can also be used to build custom pipelines for developer use.
tup: fully support renaming android package;
@@ -43,7 +43,7 @@ config = { keystore = '/path/to/keystore', keystorepass = 'pass:password', manifest = nil, - package = nil, + package = 'org.lovr.app', project = nil } } @@ -173,6 +173,7 @@ if target == 'android' then flags += config.debug and '-funwind-tables' or '' cflags += '-D_POSIX_C_SOURCE=200809L' cflags += ('-I%s/sources/android/native_app_glue'):format(config.android.ndk) + cflags += '-DLOVR_JAVA_PACKAGE=' .. config.android.package:gsub('%.', '_') lflags += '-shared -landroid' end @@ -487,7 +488,7 @@ if target == 'android' then end java = 'bin/Activity.java' - class = 'org/lovr/app/Activity.class' + class = config.android.package:gsub('%.', '/') .. '/Activity.class' binclass = 'bin/' .. class jar = 'bin/lovr.jar' dex = 'bin/apk/classes.dex' @@ -496,7 +497,7 @@ if target == 'android' then apk = 'bin/lovr.apk' manifest = config.android.manifest or 'etc/AndroidManifest.xml' - package = config.android.package and #config.android.package > 0 and ('--rename-manifest-package ' .. config.android.package) or '' + package = '--rename-manifest-package ' .. config.android.package project = config.android.project and #config.android.project > 0 and ('-A ' .. config.android.project) or '' version = config.android.version @@ -506,7 +507,7 @@ if target == 'android' then tools = config.android.sdk .. '/build-tools/' .. config.android.buildtools copy(manifest, 'bin/AndroidManifest.xml') - copy('etc/Activity.java', java) + tup.rule('etc/Activity.java', 'tup varsed %f %o', java) tup.rule(java, '^ JAVAC %b^ javac -classpath $(androidjar) -d bin %f', binclass) tup.rule(binclass, '^ JAR %b^ jar -cf %o -C bin $(class)', jar) tup.rule(jar, '^ D8 %b^ $(tools)/d8 --min-api $(version) --output bin/apk %f', dex)
Update bucket location for singlecluster
@@ -460,11 +460,11 @@ resources: secret_access_key: {{bucket-secret-access-key}} regexp: deliverables/greenplum-db-(.*)-behave.tar.gz -- name: singlecluster +- name: singlecluster-CDH type: s3 source: - access_key_id: {{pxf-aws-access-key-id}} - secret_access_key: {{pxf-aws-secret-access-key}} + access_key_id: {{bucket-access-key-id}} + secret_access_key: {{bucket-secret-access-key}} bucket: {{pxf-aws-bucket-name}} region_name: {{aws-region}} versioned_file: singlecluster-without-pxf/singlecluster-CDH.tar.gz @@ -943,6 +943,7 @@ jobs: passed: [compile_gpdb_centos6] trigger: true - get: singlecluster + resource: singlecluster-CDH trigger: true - get: pxf_automation_src trigger: true @@ -951,7 +952,7 @@ jobs: file: gpdb_src/concourse/tasks/regression_tests_pxf.yml image: centos-gpdb-dev-6 params: - overwrite_pxf: false + GROUP: gpdb TARGET_OS: centos TARGET_OS_VERSION: 6
Add tests to cover line numbering edge cases with JRuby
@@ -1262,6 +1262,54 @@ module Nokogiri end describe "#line" do + it "properly numbers lines with documents containing XML prolog" do + xml = Nokogiri::XML(<<~eoxml) + <?xml version="1.0" ?> + <a> + <b> + Test + </b> + </root> + eoxml + + set = xml.search("//b") + assert_equal(3, set[0].line) + end + + it "properly numbers lines with documents containing XML comments" do + xml = Nokogiri::XML(<<~eoxml) + <a> + <b> + <!-- This is a comment --> + <c> + Test + </c> + </b> + </a> + eoxml + + set = xml.search("//c") + assert_equal(4, set[0].line) + end + + it "properly numbers lines with documents containing XML multiline comments" do + xml = Nokogiri::XML(<<~eoxml) + <a> + <b> + <!-- + This is a comment + --> + <c> + Test + </c> + </b> + </a> + eoxml + + set = xml.search("//c") + assert_equal(6, set[0].line) + end + it "returns a sensible line number for each node" do xml = Nokogiri::XML(<<~eoxml) <a>
Update thread stack window layout
@@ -162,10 +162,10 @@ STYLE DS_SETFONT | DS_FIXEDSYS | WS_POPUP | WS_CAPTION | WS_SYSMENU | WS_THICKFR CAPTION "Thread Stack" FONT 8, "MS Shell Dlg", 400, 0, 0x1 BEGIN - PUSHBUTTON "Copy",IDC_COPY,111,212,50,14 - PUSHBUTTON "Refresh",IDC_REFRESH,165,212,50,14 - PUSHBUTTON "Close",IDOK,219,212,50,14 - CONTROL "",IDC_TREELIST,"PhTreeNew",WS_CLIPSIBLINGS | WS_CLIPCHILDREN | WS_TABSTOP | 0x2,2,2,269,209,WS_EX_CLIENTEDGE + PUSHBUTTON "Copy",IDC_COPY,113,212,50,14 + PUSHBUTTON "Refresh",IDC_REFRESH,167,212,50,14 + PUSHBUTTON "Close",IDOK,221,212,50,14 + CONTROL "",IDC_TREELIST,"PhTreeNew",WS_CLIPSIBLINGS | WS_CLIPCHILDREN | WS_TABSTOP | 0x2,0,0,273,211,WS_EX_CLIENTEDGE PUSHBUTTON "Options",IDC_OPTIONS,2,212,50,14 END @@ -1403,9 +1403,6 @@ BEGIN IDD_THRDSTACK, DIALOG BEGIN - LEFTMARGIN, 2 - RIGHTMARGIN, 271 - TOPMARGIN, 2 BOTTOMMARGIN, 226 END
dbug fe: separate list query term with space Allows searching for multiple parts of the item key separately, returning only items that match all.
@@ -32,7 +32,9 @@ export class SearchableList extends Component { ); let items = props.items.filter(item => { - return (state.query === '') || item.key.includes(state.query); + return state.query.split(' ').reduce((match, query) => { + return match && item.key.includes(query); + }, true); }) if (items.length === 0) { items = 'none';
Enable assert_exhaustion tests
@@ -518,6 +518,8 @@ for fn in jsonFiles: test.expected[0]["value"] = "<Arithmetic NaN>" elif test.type == "assert_trap": test.expected_trap = cmd["text"] + elif test.type == "assert_exhaustion": + test.expected_trap = "stack overflow" else: stats.skipped += 1 warning(f"Skipped {test.source} ({test.type} not implemented)")
Fix slf4j lib in tomcat-mcf
<exclude name="log4j-*.jar"/> <exclude name="jasper-*.jar"/> <exclude name="ecj-*.jar"/> + <exclude name="log4j-*.jar"/> + <exclude name="slf4j-*.jar"/> </fileset> </copy> <copy flatten="true" toDir="${linux.dist.dir}/tomcat-mcf/lib" overwrite="true" force="true"> <fileset dir="${datafari.core.dir}"> <include name="dependency/log4j-*.jar" /> - </fileset> - <fileset dir="${datafari.core.dir}"> - <include name="log4j-*.jar" /> + <include name="dependency/slf4j-*.jar" /> + <include name="dependency/jul-to-slf4j-*.jar" /> </fileset> </copy>
bugfix: malloc retention buffer with MALLOC_CAP_RETENTION caps
@@ -37,8 +37,6 @@ static DRAM_ATTR __attribute__((unused)) sleep_retention_t s_retention; #if SOC_PM_SUPPORT_TAGMEM_PD -#define TAGMEM_PD_MEM_TYPE_CAPS (MALLOC_CAP_DMA | MALLOC_CAP_DEFAULT) - #if CONFIG_PM_POWER_DOWN_TAGMEM_IN_LIGHT_SLEEP static int cache_tagmem_retention_setup(uint32_t code_seg_vaddr, uint32_t code_seg_size, uint32_t data_seg_vaddr, uint32_t data_seg_size) { @@ -123,7 +121,7 @@ static esp_err_t esp_sleep_tagmem_pd_low_init(bool enable) int tagmem_sz = cache_tagmem_retention_setup(code_start, code_size, data_start, data_size); void *buf = heap_caps_aligned_alloc(SOC_RTC_CNTL_TAGMEM_PD_DMA_ADDR_ALIGN, tagmem_sz + RTC_HAL_DMA_LINK_NODE_SIZE, - TAGMEM_PD_MEM_TYPE_CAPS); + MALLOC_CAP_RETENTION); if (buf) { memset(buf, 0, tagmem_sz + RTC_HAL_DMA_LINK_NODE_SIZE); s_retention.retent.tagmem.link_addr = rtc_cntl_hal_dma_link_init(buf, @@ -157,19 +155,13 @@ static esp_err_t esp_sleep_tagmem_pd_low_init(bool enable) #if SOC_PM_SUPPORT_CPU_PD -#if CONFIG_IDF_TARGET_ESP32C3 || CONFIG_IDF_TARGET_ESP32H2 -#define CPU_PD_MEM_TYPE_CAPS (MALLOC_CAP_RETENTION | MALLOC_CAP_DEFAULT) -#else -#define CPU_PD_MEM_TYPE_CAPS (MALLOC_CAP_DMA | MALLOC_CAP_DEFAULT) -#endif - esp_err_t esp_sleep_cpu_pd_low_init(bool enable) { if (enable) { if (s_retention.retent.cpu_pd_mem == NULL) { void *buf = heap_caps_aligned_alloc(SOC_RTC_CNTL_CPU_PD_DMA_ADDR_ALIGN, SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE + RTC_HAL_DMA_LINK_NODE_SIZE, - CPU_PD_MEM_TYPE_CAPS); + MALLOC_CAP_RETENTION); if (buf) { memset(buf, 0, SOC_RTC_CNTL_CPU_PD_RETENTION_MEM_SIZE + RTC_HAL_DMA_LINK_NODE_SIZE); s_retention.retent.cpu_pd_mem = rtc_cntl_hal_dma_link_init(buf,
Find `libjwt`: Reformat CMake code
+# ~~~ # Find the libjwt includes and library # # LIBJWT_INCLUDE_DIR - Where to find libjwt include sub-directory. # LIBJWT_LIBRARY - Path to libjwt library. # LIBJWT_FOUND - True if libjwt found. +# ~~~ if (LIBJWT_INCLUDE_DIR) - # Already in cache, be silent. - set (LIBJWT_FIND_QUIETLY TRUE) + set (LIBJWT_FIND_QUIETLY TRUE) # Already in cache, be silent. endif (LIBJWT_INCLUDE_DIR) find_path (LIBJWT_INCLUDE_DIR jwt.h PATHS /usr/include /usr/local/include PATH_SUFFIXES jwt) find_library (LIBJWT_LIBRARY NAMES jwt PATHS /usr/lib /usr/lib64 /usr/local/lib) -# Handle the QUIETLY and REQUIRED arguments and set LIBJWT_FOUND to -# TRUE if all listed variables are TRUE. +# Handle the QUIETLY and REQUIRED arguments and set LIBJWT_FOUND to TRUE if all listed variables are TRUE. include (FindPackageHandleStandardArgs) -find_package_handle_standard_args ( - LibJWT DEFAULT_MSG - LIBJWT_LIBRARY LIBJWT_INCLUDE_DIR -) +find_package_handle_standard_args (LibJWT DEFAULT_MSG LIBJWT_LIBRARY LIBJWT_INCLUDE_DIR) mark_as_advanced (LIBJWT_LIBRARY LIBJWT_INCLUDE_DIR)
libopae: remove extraneous function del_dev()
@@ -340,17 +340,6 @@ out_free: return NULL; } -bool del_dev(struct dev_list *pdev, struct dev_list *parent) -{ - if (!parent || !pdev) - return false; - - parent->next = pdev->next; - free(pdev); - - return true; -} - static fpga_result enum_fme(const char *sysfspath, const char *name, struct dev_list *parent) { @@ -481,10 +470,8 @@ static fpga_result enum_afu(const char *sysfspath, const char *name, /* if we can't read the afu_id, remove device from list */ if (FPGA_OK != result) { FPGA_MSG("Could not read afu_id from '%s', ignoring", spath); - if (!del_dev(pdev, parent)) { - FPGA_ERR("del_dev() failed"); - return FPGA_EXCEPTION; - } + parent->next = pdev->next; + free(pdev); } return FPGA_OK;
baseboard/asurada/usb_pd_policy.c: Format with clang-format BRANCH=none TEST=none
@@ -79,8 +79,7 @@ __override int svdm_dp_attention(int port, uint32_t *payload) if (lvl) gpio_set_level_verbose(CC_USBPD, GPIO_DP_AUX_PATH_SEL, port); - if (chipset_in_state(CHIPSET_STATE_ANY_SUSPEND) && - (irq || lvl)) + if (chipset_in_state(CHIPSET_STATE_ANY_SUSPEND) && (irq || lvl)) /* * Wake up the AP. IRQ or level high indicates a DP sink is now * present.
uses %init for all merges up to and including the first remote sync
=. let ?. ?=($w p.p.u.rot) let ud:((hard cass:clay) q.q.r.u.rot) =/ =wire /kiln/sync/[syd]/(scot %p her)/[sud] =/ =cass .^(cass:clay %cw /(scot %p our)/[syd]/(scot %da now)) - =/ =germ ?:(=(0 ud.cass) %init %mate) + :: + :: If we will be syncing in remote changes, we need all our sync merges + :: up to and including the first remote sync to use the %init germ. + :: Otherwise we won't have a merge-base with our sponsor. + :: + =/ bar=@ud + ?: ?| ?=(?($czar $pawn) (clan:title our)) + !?=(%home syd) + == + 2 + 3 + =/ =germ ?:((gte bar ud.cass) %init %mate) + =< %- spam + ?: =(our her) ~ + [(render "beginning sync" sud her syd) ~] (blab [ost %merg wire our syd her sud ud+let germ] ~) :: ++ mere
SOVERSION bump to version 2.6.3
@@ -63,7 +63,7 @@ set(LIBYANG_VERSION ${LIBYANG_MAJOR_VERSION}.${LIBYANG_MINOR_VERSION}.${LIBYANG_ # set version of the library set(LIBYANG_MAJOR_SOVERSION 2) set(LIBYANG_MINOR_SOVERSION 6) -set(LIBYANG_MICRO_SOVERSION 2) +set(LIBYANG_MICRO_SOVERSION 3) set(LIBYANG_SOVERSION_FULL ${LIBYANG_MAJOR_SOVERSION}.${LIBYANG_MINOR_SOVERSION}.${LIBYANG_MICRO_SOVERSION}) set(LIBYANG_SOVERSION ${LIBYANG_MAJOR_SOVERSION})
Read config from env variables
@@ -313,6 +313,36 @@ void survive_init_plugins() { #endif } static bool disable_colorization = false; +static void survive_process_env(SurviveContext *ctx, bool only_print) { + char ** env; +#if defined(WIN) && (_MSC_VER >= 1900) + env = *__p__environ(); +#else + extern char ** environ; + env = environ; +#endif + +#define ENV_PREFIX "SURVIVE_" + for (env; *env; ++env) { + if(strncmp(*env, ENV_PREFIX, strlen(ENV_PREFIX)) == 0) { + const char* entry = *env + strlen(ENV_PREFIX); + char tag[32] = {}; + const char* value = strchr(entry, '=') + 1; + int offset = value - entry - 1; + if(offset > 32) continue; + for(int i = 0; i < offset; i++) tag[i] = tolower(entry[i]); + if(tag) { + if(only_print) { + SV_VERBOSE(100, "\t[ENV]'%s'",*env); + } else { + survive_configs(ctx, tag, SC_OVERRIDE | SC_SET, value); + } + } + } + } + +} + SurviveContext *survive_init_internal(int argc, char *const *argv, void *userData, log_process_func log_func) { int i; @@ -421,6 +451,8 @@ SurviveContext *survive_init_internal(int argc, char *const *argv, void *userDat } } + survive_process_env(ctx, false); + const char *log_file = survive_configs(ctx, "log", SC_GET, 0); int record_to_stdout = survive_configi(ctx, "record-stdout", SC_GET, 0); if (log_file || record_to_stdout) @@ -452,6 +484,7 @@ SurviveContext *survive_init_internal(int argc, char *const *argv, void *userDat for(int i = 0;i < argc;i++) { SV_VERBOSE(100, "\t'%s'",argv[i]); } + survive_process_env(ctx, true); const char *record_config_prefix_fields[] = {"record", "usbmon-record", 0}; if (!user_set_configfile && find_correct_config_file(ctx, record_config_prefix_fields)) {
Update documentation of mbedtls_pk_setup_opaque() The function now accepts a RSA key pair in addition to an ECC key pair.
@@ -330,8 +330,8 @@ int mbedtls_pk_setup( mbedtls_pk_context *ctx, const mbedtls_pk_info_t *info ); * storing and manipulating the key material directly. * * \param ctx The context to initialize. It must be empty (type NONE). - * \param key The PSA key to wrap, which must hold an ECC key pair - * (see notes below). + * \param key The PSA key to wrap, which must hold an ECC or RSA key + * pair (see notes below). * * \note The wrapped key must remain valid as long as the * wrapping PK context is in use, that is at least between @@ -339,8 +339,8 @@ int mbedtls_pk_setup( mbedtls_pk_context *ctx, const mbedtls_pk_info_t *info ); * mbedtls_pk_free() is called on this context. The wrapped * key might then be independently used or destroyed. * - * \note This function is currently only available for ECC key - * pairs (that is, ECC keys containing private key material). + * \note This function is currently only available for ECC or RSA + * key pairs (that is, keys containing private key material). * Support for other key types may be added later. * * \return \c 0 on success.
fix nsh map index error, it will cause vpp appear Segmentation fault when vpp process double packages
@@ -2000,7 +2000,7 @@ nsh_input_map (vlib_main_t * vm, goto trace1; } vnet_buffer (b1)->sw_if_index[VLIB_RX] = - map0->rx_sw_if_index; + map1->rx_sw_if_index; } /* Pop NSH header */
boot: bootutil: Change ec256 so that it can support multiple interfaces Change ec256 interface so that it could support multiple interfaces in the future.
#include "mbedtls/oid.h" #include "mbedtls/asn1.h" +#ifdef MCUBOOT_USE_TINYCRYPT #include "tinycrypt/ecc_dsa.h" +#endif #include "bootutil_priv.h" /* @@ -40,7 +42,7 @@ static const uint8_t ec_secp256r1_oid[] = MBEDTLS_OID_EC_GRP_SECP256R1; * Parse the public key used for signing. */ static int -tinycrypt_import_key(uint8_t **cp, uint8_t *end) +bootutil_import_key(uint8_t **cp, uint8_t *end) { size_t len; mbedtls_asn1_buf alg; @@ -91,7 +93,7 @@ tinycrypt_import_key(uint8_t **cp, uint8_t *end) * Verify the tag, and that the length is 32 bytes. */ static int -tinycrypt_read_bigint(uint8_t i[NUM_ECC_BYTES], uint8_t **cp, uint8_t *end) +bootutil_read_bigint(uint8_t i[NUM_ECC_BYTES], uint8_t **cp, uint8_t *end) { size_t len; @@ -113,7 +115,7 @@ tinycrypt_read_bigint(uint8_t i[NUM_ECC_BYTES], uint8_t **cp, uint8_t *end) * Read in signature. Signature has r and s encoded as integers. */ static int -tinycrypt_decode_sig(uint8_t signature[NUM_ECC_BYTES * 2], uint8_t *cp, uint8_t *end) +bootutil_decode_sig(uint8_t signature[NUM_ECC_BYTES * 2], uint8_t *cp, uint8_t *end) { int rc; size_t len; @@ -127,17 +129,18 @@ tinycrypt_decode_sig(uint8_t signature[NUM_ECC_BYTES * 2], uint8_t *cp, uint8_t return -2; } - rc = tinycrypt_read_bigint(signature, &cp, end); + rc = bootutil_read_bigint(signature, &cp, end); if (rc) { return -3; } - rc = tinycrypt_read_bigint(signature + NUM_ECC_BYTES, &cp, end); + rc = bootutil_read_bigint(signature + NUM_ECC_BYTES, &cp, end); if (rc) { return -4; } return 0; } +#if defined(MCUBOOT_USE_TINYCRYPT) int bootutil_verify_sig(uint8_t *hash, uint32_t hlen, uint8_t *sig, size_t slen, uint8_t key_id) @@ -151,12 +154,12 @@ bootutil_verify_sig(uint8_t *hash, uint32_t hlen, uint8_t *sig, size_t slen, pubkey = (uint8_t *)bootutil_keys[key_id].key; end = pubkey + *bootutil_keys[key_id].len; - rc = tinycrypt_import_key(&pubkey, end); + rc = bootutil_import_key(&pubkey, end); if (rc) { return -1; } - rc = tinycrypt_decode_sig(signature, sig, sig + slen); + rc = bootutil_decode_sig(signature, sig, sig + slen); if (rc) { return -1; } @@ -175,4 +178,5 @@ bootutil_verify_sig(uint8_t *hash, uint32_t hlen, uint8_t *sig, size_t slen, return -2; } } +#endif /* MCUBOOT_USE_TINYCRYPT */ #endif /* MCUBOOT_SIGN_EC256 */
Increase the size of the stack buffer to prevent an overflow.
@@ -238,7 +238,7 @@ static int print_bin(BIO *fp, const char *name, const unsigned char *buf, size_t len, int off) { size_t i; - char str[128]; + char str[128 + 1 + 4]; if (buf == NULL) return 1;
Removed 32-bit x86 target
@@ -203,51 +203,6 @@ let }, -- - -- Generic Intel ia32 32-bit x86 core - -- - cpuDriver { - architectures = [ "x86_32" ], - assemblyFiles = [ "arch/x86_32/boot.S", - "arch/x86_32/entry.S", - "arch/x86_32/pic.S", - "../usr/drivers/cpuboot/init_ap_x86_32.S" ], - cFiles = [ "arch/x86_32/debug.c", - "arch/x86_32/gdb_arch.c", - "arch/x86_32/init.c", - "arch/x86_32/irq.c", - "arch/x86_32/startup_arch.c", - "arch/x86_32/dispatch.c", - "arch/x86_32/exec.c", - "arch/x86_32/syscall.c", - "arch/x86_32/paging.c", - "arch/x86_32/page_mappings_arch.c", - "arch/x86/apic.c", - "arch/x86/pic.c", - "arch/x86/cmos.c", - "arch/x86/misc.c", - "arch/x86/serial.c", - "arch/x86/conio.c", - "arch/x86/syscall.c", - "arch/x86/debugregs.c", - "arch/x86/perfmon.c", - "arch/x86/perfmon_intel.c", - "arch/x86/perfmon_amd.c", - "arch/x86/rtc.c", - "arch/x86/timing.c", - "arch/x86/startup_x86.c", - "arch/x86/multiboot.c", - "arch/x86/ipi_notify.c" - ], - mackerelDevices = [ "lpc_pic", - "pc16550d", - "ia32", - "amd64", - "xapic", - "cpuid", - "lpc_rtc" ], - addLibraries = [ "elf_kernel" ] - }, - -- -- Versatile Express Cortex-A15 ARMv7-A, e.g. GEM5 or qemu -- cpuDriver {
fix source file list
@@ -46,6 +46,7 @@ SOURCES_C := $(NP2_PATH)/calendar.c \ $(wildcard $(NP2_PATH)/font/*.c) \ $(wildcard $(NP2_PATH)/generic/*.c) \ $(wildcard $(NP2_PATH)/io/*.c) \ + $(wildcard $(NP2_PATH)/io/pci/*.c) \ $(wildcard $(NP2_PATH)/lio/*.c) \ $(wildcard $(NP2_PATH)/mem/*.c) \ $(wildcard $(NP2_PATH)/sdl2/*.c) \
Hopefully fix Appveyor builds.
version: "{build}" -image: Visual Studio 2017 +image: Visual Studio 2019 +clone_script: + - ps: >- + if (-not $env:APPVEYOR_PULL_REQUEST_NUMBER) { + git clone -q --branch=$env:APPVEYOR_REPO_BRANCH https://github.com/$env:APPVEYOR_REPO_NAME.git $env:APPVEYOR_BUILD_FOLDER + cd $env:APPVEYOR_BUILD_FOLDER + git checkout -qf $env:APPVEYOR_REPO_COMMIT + } else { + git clone -q https://github.com/$env:APPVEYOR_REPO_NAME.git $env:APPVEYOR_BUILD_FOLDER + cd $env:APPVEYOR_BUILD_FOLDER + git fetch -q origin +refs/pull/$env:APPVEYOR_PULL_REQUEST_NUMBER/merge: + git checkout -qf FETCH_HEAD + } + - cmd: git submodule update --init --recursive # Uncomment to debug via RDP # init: @@ -18,6 +31,8 @@ install: # Install zlib dependency - ps: nuget restore vcnet\ippsample.sln + - ps: nuget restore libcups\vcnet\libcups.sln + - ps: nuget restore pdfio\pdfio.sln build_script: - ps: msbuild.exe vcnet\ippsample.sln /p:Configuration=Release /p:Platform=x64 /p:PlatformToolset=v140
oc_discovery: fix bug while filtering eps list
@@ -619,11 +619,12 @@ oc_ri_process_discovery_payload(uint8_t *payload, int len, */ bool ep_ipv6 = (temp_ep.flags & IPV6) ? true : false; bool ep_link_local = - (oc_ipv6_endpoint_is_link_local(&temp_ep)) ? true : false; + (oc_ipv6_endpoint_is_link_local(&temp_ep) == 0) ? true + : false; /* 1) */ if (sender_ipv6 && sender_link_local && ep_ipv6 && ep_link_local) { - if (oc_endpoint_compare(endpoint, &temp_ep) != 0) { + if (oc_endpoint_compare_address(endpoint, &temp_ep) != 0) { goto next_ep; } }
esp32: do not try to load PHY data if fail to open NVS
@@ -446,6 +446,7 @@ esp_err_t esp_phy_load_cal_data_from_nvs(esp_phy_calibration_data_t* out_cal_dat if (err == ESP_ERR_NVS_NOT_INITIALIZED) { ESP_LOGE(TAG, "%s: NVS has not been initialized. " "Call nvs_flash_init before starting WiFi/BT.", __func__); + return err; } else if (err != ESP_OK) { ESP_LOGD(TAG, "%s: failed to open NVS namespace (0x%x)", __func__, err); return err;
Fix a warning which showed up on NetBSD.
@@ -3269,7 +3269,7 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, /* bind a ep to a socket address */ struct sctppcbhead *head; struct sctp_inpcb *inp, *inp_tmp; -#if defined(INET) || (defined(INET6) && defined(__APPLE__)) || defined(__FreeBSD__) || defined(__APPLE__) +#if defined(__FreeBSD__) || defined(__APPLE__) struct inpcb *ip_inp; #endif int port_reuse_active = 0; @@ -3284,7 +3284,7 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, lport = 0; bindall = 1; inp = (struct sctp_inpcb *)so->so_pcb; -#if defined(INET) || (defined(INET6) && defined(__APPLE__)) || defined(__FreeBSD__) || defined(__APPLE__) +#if defined(__FreeBSD__) || defined(__APPLE__) ip_inp = (struct inpcb *)so->so_pcb; #endif #ifdef SCTP_DEBUG
update flow links
@@ -377,15 +377,17 @@ oidc-agent. ## Response Type The following response types must be registered: -- 'token' when using the Password Flow (see also [flow](oidc-gen.md#flow)) #TODO -- 'code' when using the Authorization Code Flow (see also [flow](oidc-gen.md#flow)) #TODO +- 'token' when using the Password Flow (see also + [flow](oidc-gen.md#password-flow)) +- 'code' when using the Authorization Code Flow (see also [flow](oidc-gen.md#authorization-code-flow)) ## Grant Types The following grant types must be registered: - 'refresh_token' if available -- 'authorization_code' when using the Authorization Code Flow (see also [flow](oidc-gen.md#flow)) #TODO -- 'password' when using the Password Flow (see also [flow](oidc-gen.md#flow)) #TODO -- 'urn:ietf:params:oauth:grant-type:device_code' when using the Device Flow (see also [flow](oidc-gen.md#flow)) #TODO +- 'authorization_code' when using the Authorization Code Flow (see also [flow](oidc-gen.md#authorization-code-flow)) +- 'password' when using the Password Flow (see also + [flow](oidc-gen.md#password-flow)) +- 'urn:ietf:params:oauth:grant-type:device_code' when using the Device Flow (see also [flow](oidc-gen.md#device-flow))
Fix tilemap generation for logo scenes
@@ -8,7 +8,7 @@ import { Trigger, } from "store/features/entities/entitiesTypes"; import { FontData } from "../fonts/fontData"; -import { hexDec } from "../helpers/8bit"; +import { hexDec, wrap8Bit } from "../helpers/8bit"; import { PrecompiledSpriteSheetData } from "./compileSprites"; import { dirEnum } from "./helpers"; @@ -813,7 +813,7 @@ export const compileTilemap = (tilemap: number[], tilemapIndex: number) => DATA_TYPE, tilemapSymbol(tilemapIndex), `// Tilemap ${tilemapIndex}`, - Array.from(tilemap).map(toHex), + Array.from(tilemap).map(wrap8Bit).map(toHex), 16 );
CMakeLists: Add back support for Luarocks 2.x since that doesn't have the LUALIB var See
@@ -88,7 +88,26 @@ if (LUA) SET(INSTALL_LIB_DIR ${LIBDIR}) if (LUA_LIBDIR) + if (NOT LUA_LIBFILE) + # If LIBDIR is set but LIBFILE is not, then that means + # Luarocks 2.x is being used, and we'll need to find the lib + # ourselves + GET_FILENAME_COMPONENT(LUA_EXEC_NAME ${LUA_EXECUTABLE} NAME_WE) + IF(LUA_EXEC_NAME STREQUAL "luajit") + FIND_LIBRARY(LUA_LIBRARIES + NAMES luajit libluajit + PATHS ${LUA_LIBDIR} + NO_DEFAULT_PATH) + ELSEIF(LUA_EXEC_NAME MATCHES "lua.*") + FIND_LIBRARY(LUA_LIBRARIES + NAMES lua lua54 lua53 lua52 lua51 liblua liblua54 liblua53 liblua52 liblua51 + PATHS ${LUA_LIBDIR} + NO_DEFAULT_PATH) + ENDIF() + else() + # Otherwise, we can just use the LIBFILE that Luarocks provides get_filename_component(LUA_LIBRARIES "${LUA_LIBDIR}/${LUA_LIBFILE}" ABSOLUTE) + endif() MESSAGE(STATUS "Lua library: ${LUA_LIBRARIES}") else() MESSAGE(STATUS "Lua library not set, presuming LuaRocks config has link_lua_explicitly set to false")
tests: subscriber ames gets kick
== == :- t7 |. :- %| - :: ames hears ack from gall, sends over the network + :: publisher ames hears ack from gall, sends over the network =^ t8 ames.bud %: ames-check-take:v ames.bud [~1111.1.2 0xbeef.dead *roof] == == == :- t8 |. :- %| - :: ames hears watch-ack packet, gives to gall + :: subscriber ames hears watch-ack packet, gives to gall =^ t9 ames.nec %: ames-check-call:v ames.nec [~1111.1.3 0xdead.beef *roof] :- ~[/ames] [%pass /pump/~nec/1 %b %wait ~1111.1.4..00.00.01] == == - :- t14 |. :- %& ~ + :- t14 |. :- %| + :: subscriber ames receives kick, gives to gall and gives ack to ames + =^ t15 ames.nec + %: ames-check-call:v ames.nec + [~1111.1.4 0xdead.beef *roof] + :- ~[//unix] + :* %hear [%& ~bud] + 0xa1fc.cd35.c730.9a00.07e0.90a2.f87c.3657.935e. + 4ca0.801d.3ddc.d400.0100.0223.bc18.1000 + == + :~ :- ~[/sys/way/~bud/pub /use/sub/0w1.d6Isf/out/~bud/pub/1/sub /init] + [%give %boon %x ~] + :- ~[//unix] + :* %give %send [%& ~nec] + 0xfe.e208.da00.0491.bf7f.9594.2ddc.0948. + 9de0.3906.b678.6e00.0200.0132.e55d.5000 + == == + == + :- t15 |. :- %& ~ --
gitlab: Add lazor build We currently have only volteer building on gitlab. Since trogdor is under active development, add a build for that as well. BRANCH=none TEST=pass at
@@ -41,17 +41,24 @@ before_script: - export ZEPHYR_DIR=/zephyr - export PATH="$PATH:$HOME/.local/bin" -test: +volteer: script: - echo "It works" + - zmake --zephyr-base "${ZEPHYR_BASE}" --modules-dir "${MODULES_DIR}" + -l DEBUG configure -b -B "${BUILD_DIR}/volteer" -t zephyr + zephyr/projects/volteer/volteer + - ls "${BUILD_DIR}/volteer" "${BUILD_DIR}/volteer/output" + artifacts: + paths: + - build/volteer/output/* + expire_in: 1 week -run: +lazor: script: - zmake --zephyr-base "${ZEPHYR_BASE}" --modules-dir "${MODULES_DIR}" - -l DEBUG configure -b -B "${BUILD_DIR}/vol" -t zephyr - zephyr/projects/volteer/volteer - - ls "${BUILD_DIR}/vol" "${BUILD_DIR}/vol/output" + -l DEBUG configure -b -B "${BUILD_DIR}/lazor" -t zephyr + zephyr/projects/trogdor/lazor + - ls "${BUILD_DIR}/lazor" "${BUILD_DIR}/lazor/output" artifacts: paths: - - build/vol/output/* + - build/lazor/output/* expire_in: 1 week