message
stringlengths
6
474
diff
stringlengths
8
5.22k
out_s3: renamed flb_digest to flb_hash
#include <fluent-bit/flb_config_map.h> #include <fluent-bit/flb_aws_util.h> #include <fluent-bit/aws/flb_aws_compress.h> -#include <fluent-bit/flb_digest.h> +#include <fluent-bit/flb_hash.h> #include <fluent-bit/flb_crypto.h> #include <fluent-bit/flb_signv4.h> #include <fluent-bit/flb_scheduler.h> @@ -1468,7 +1468,7 @@ int get_md5_base64(char *buf, size_t buf_size, char *md5_str, size_t md5_str_siz size_t olen; int ret; - ret = flb_digest_simple(FLB_DIGEST_MD5, + ret = flb_hash_simple(FLB_HASH_MD5, (unsigned char *) buf, buf_size, md5_bin, sizeof(md5_bin));
u3: check for overflow in interpreter if guard page not present
@@ -1444,10 +1444,9 @@ _n_push(c3_ys mov, c3_ys off, u3_noun a) { u3R->cap_p += mov; - // XX define symbol to control guard page - // or switch to u3a_push() + // XX switch to u3a_push() // -#if 0 +#ifndef U3_GUARD_PAGE if ( 0 == off ) { if( !(u3R->cap_p > u3R->hat_p) ) { u3m_bail(c3__meme);
Reduce list of kernels in the dynamic arch build to make compilation complete reliably within the 1h limit again
@@ -55,7 +55,7 @@ before_build: - if [%COMPILER%]==[cl] cmake -G "Visual Studio 15 2017 Win64" .. - if [%WITH_FORTRAN%]==[no] cmake -G "Ninja" -DCMAKE_CXX_COMPILER=clang-cl -DCMAKE_C_COMPILER=clang-cl -DMSVC_STATIC_CRT=ON .. - if [%WITH_FORTRAN%]==[yes] cmake -G "Ninja" -DCMAKE_CXX_COMPILER=clang-cl -DCMAKE_C_COMPILER=clang-cl -DCMAKE_Fortran_COMPILER=flang -DBUILD_WITHOUT_LAPACK=no -DNOFORTRAN=0 .. - - if [%DYNAMIC_ARCH%]==[ON] cmake -DDYNAMIC_ARCH=ON .. + - if [%DYNAMIC_ARCH%]==[ON] cmake -DDYNAMIC_ARCH=ON -DDYNAMIC_LIST='CORE2;NEHALEM;SANDYBRIDGE;BULLDOZER;HASWELL' .. build_script: - cmake --build .
converted dchmod markdown to man page
-.\" Automatically generated by Pandoc 1.17.1 +.\" Automatically generated by Pandoc 1.19.1 .\" .TH "DCHMOD" "1" "" "" "" .hy +.SH NAME .PP -NAME +dchmod \- distributed chmod program +.SH SYNOPSIS .PP -dchmod \- distributed change mode program +\f[B]dchmod [OPTION] PATH...\f[] +.SH DESCRIPTION .PP -SYNOPSIS +Parallel MPI application to recurseively walk, and then change +permissions starting from the top level directory. .PP -dchmod ... +dchmod prrovides functionality similar to chmod. +Like chmod, the tool supports the use of octal or symbolic mode to +change the permissions. +But unlinke chmod, there is no need to use a recursive option, as the +permissions will be updated from the top level directory all the way to +the bottom of the tree by default. +.SH OPTIONS +.TP +.B \-g, \-\-group +Change group to specified group name. +.RS +.RE +.TP +.B \-m, \-\-mode +The mode that you want the file or directory to be. +.RS +.RE +.TP +.B \-e, \-\-exclude +Exclude a set of files from command given a regular expression. +.RS +.RE +.TP +.B \-m, \-\-match +Match a set of files from command given a regular expression. +.RS +.RE +.TP +.B \-n, \-\-name +Match or exclude the regular expression based only on file name, and not +the full path. +Should be used in combination with the match and/or exclude options if +you don not want to match/exclude the full path name, but just the file +name. +.RS +.RE +.TP +.B \-h, \-\-help +Print the command usage, and the list of options available. +.RS +.RE +.TP +.B \-v, \-\-verbose +Prints a list of statistics/timing data for the command. +How many files walked, how many levels there are in the tree, and how +many files the command operated on. +This option also prints the files/sec for each of those. +.RS +.RE +.SS Known bugs .PP -DESCRIPTION +N/A +.SH SEE ALSO .PP -OPTIONS +\f[C]dcmp\f[] (1). +\f[C]dcp\f[] (1). +\f[C]dfilemaker\f[] (1). +\f[C]dfind\f[] (1). +\f[C]dgrep\f[] (1). +\f[C]dparallel\f[] (1). +\f[C]drm\f[] (1). +\f[C]dtar\f[] (1). +\f[C]dwalk\f[] (1). .PP -\-h, \-\-help : Print a brief message listing the drm(1) options and -usage. -.PP -\-v, \-\-version : Print version information and exit. -.PP -add the other options -.PP -Known bugs -.PP -SEE ALSO -.PP -dcmp (1). -dcp (1). -dfind:ilemaker (1). -dfind (1). -dgrep (1). -dparallel (1). -drm (1). -dtar (1). -dwalk (1). -.PP -The FileUtils source code and all documentation may be downloaded from -http://fileutils.io +The mpiFileUtils source code and all documentation may be downloaded +from <http://fileutils.io>
psa_tls12_prf_psk_to_ms_set_key(): add support for other secret input
@@ -5299,31 +5299,58 @@ static psa_status_t psa_tls12_prf_psk_to_ms_set_key( size_t data_length ) { psa_status_t status; - uint8_t pms[ 4 + 2 * PSA_TLS12_PSK_TO_MS_PSK_MAX_SIZE ]; - uint8_t *cur = pms; + const size_t pms_len = ( prf->state == PSA_TLS12_PRF_STATE_OTHER_KEY_SET ? + 4 + data_length + prf->other_secret_length : + 4 + 2 * data_length ); if( data_length > PSA_TLS12_PSK_TO_MS_PSK_MAX_SIZE ) return( PSA_ERROR_INVALID_ARGUMENT ); - /* Quoting RFC 4279, Section 2: + uint8_t *pms = mbedtls_calloc( 1, pms_len ); + uint8_t *cur = pms; + + /* pure-PSK: + * Quoting RFC 4279, Section 2: * * The premaster secret is formed as follows: if the PSK is N octets * long, concatenate a uint16 with the value N, N zero octets, a second * uint16 with the value N, and the PSK itself. + * + * mixed-PSK: + * In a DHE-PSK, RSA-PSK, ECDHE-PSK the premaster secret is formed as + * follows: concatenate a uint16 with the length of the other secret, + * the other secret itself, uint16 with the length of PSK, and the + * PSK itself. + * For details please check: + * - RFC 4279, Section 4 for the definition of RSA-PSK, + * - RFC 4279, Section 3 for the definition of DHE-PSK, + * - RFC 5489 for the definition of ECDHE-PSK. */ + if ( prf->state == PSA_TLS12_PRF_STATE_OTHER_KEY_SET ) + { + *cur++ = MBEDTLS_BYTE_1( prf->other_secret_length ); + *cur++ = MBEDTLS_BYTE_0( prf->other_secret_length ); + memcpy( cur, prf->other_secret, prf->other_secret_length ); + cur += prf->other_secret_length; + } + else + { *cur++ = MBEDTLS_BYTE_1( data_length ); *cur++ = MBEDTLS_BYTE_0( data_length ); memset( cur, 0, data_length ); cur += data_length; - *cur++ = pms[0]; - *cur++ = pms[1]; + } + + *cur++ = MBEDTLS_BYTE_1( data_length ); + *cur++ = MBEDTLS_BYTE_0( data_length ); memcpy( cur, data, data_length ); cur += data_length; status = psa_tls12_prf_set_key( prf, pms, cur - pms ); - mbedtls_platform_zeroize( pms, sizeof( pms ) ); + mbedtls_platform_zeroize( pms, pms_len ); + mbedtls_free( pms ); return( status ); }
Correct name "Linux" -> GNU+Linux "Arch Linux" -> Arch
@@ -115,8 +115,8 @@ Download, extract and compile GoAccess with: ### Distributions ### -It is easiest to install GoAccess on Linux using the preferred package manager -of your Linux distribution. Please note that not all distributions will have +It is easiest to install GoAccess on GNU+Linux using the preferred package manager +of your GNU+Linux distribution. Please note that not all distributions will have the latest version of GoAccess available. #### Debian/Ubuntu #### @@ -141,7 +141,7 @@ alternative option below. # yum install goaccess -#### Arch Linux #### +#### Arch #### # pacman -S goaccess @@ -185,7 +185,7 @@ log analysis, all in a 4 MB package. If you prefer to go the more tedious route, GoAccess can be used in Windows through Cygwin. See Cygwin's <a href="https://goaccess.io/faq#installation">packages</a>. -Or through the Linux Subsystem on Windows 10. +Or through the GNU+Linux Subsystem on Windows 10. #### Distribution Packages #### @@ -197,7 +197,7 @@ Distro | NCurses | GeoIP (opt) |GeoIP2 (opt) ---------------------- | -----------------|------------------|---------------------- | ------------------- **Ubuntu/Debian** | libncursesw5-dev | libgeoip-dev | libmaxminddb-dev | libssl-dev **RHEL/CentOS** | ncurses-devel | geoip-devel | libmaxminddb-devel | openssl-devel -**Arch Linux** | ncurses | geoip | libmaxminddb | openssl +**Arch** | ncurses | geoip | libmaxminddb | openssl **Gentoo** | sys-libs/ncurses | dev-libs/geoip | dev-libs/libmaxminddb | dev-libs/openssl **Slackware** | ncurses | GeoIP | libmaxminddb | openssl
settings-store: handle missing desk
:: [%desk @ ~] =* desk i.t.pax - ?> (~(has by settings) desk) [~ this] :: [%bucket @ @ ~] :: [%x %desk @ ~] =* desk i.t.t.pax - ?~ desk-settings=(~(get by settings) desk) [~ ~] - ``settings-data+!>(desk+u.desk-settings) + =/ desk-settings (~(gut by settings) desk ~) + ``settings-data+!>(desk+desk-settings) :: [%x %bucket @ @ ~] =* desk i.t.t.pax
SR-MPLS: MPLS table checks
@@ -159,6 +159,10 @@ sr_mpls_policy_add (mpls_label_t bsid, mpls_label_t * segments, if (!sm->sr_policies_index_hash) sm->sr_policies_index_hash = hash_create (0, sizeof (mpls_label_t)); + /* MPLS SR policies cannot be created unless the MPLS table is present */ + if (~0 == fib_table_find (FIB_PROTOCOL_MPLS, MPLS_FIB_DEFAULT_TABLE_ID)) + return (VNET_API_ERROR_NO_SUCH_TABLE); + /* Search for existing keys (BSID) */ p = hash_get (sm->sr_policies_index_hash, bsid); if (p) @@ -169,6 +173,13 @@ sr_mpls_policy_add (mpls_label_t bsid, mpls_label_t * segments, /* Add an SR policy object */ pool_get (sm->sr_policies, sr_policy); memset (sr_policy, 0, sizeof (*sr_policy)); + + /* the first policy needs to lock the MPLS table so it doesn't + * disappear with policies in it */ + if (1 == pool_elts (sm->sr_policies)) + fib_table_find_or_create_and_lock (FIB_PROTOCOL_MPLS, + MPLS_FIB_DEFAULT_TABLE_ID, + FIB_SOURCE_SR); sr_policy->bsid = bsid; sr_policy->type = behavior; sr_policy->endpoint_type = 0; @@ -261,6 +272,10 @@ sr_mpls_policy_del (mpls_label_t bsid) hash_unset (sm->sr_policies_index_hash, sr_policy->bsid); pool_put (sm->sr_policies, sr_policy); + if (0 == pool_elts (sm->sr_policies)) + fib_table_unlock (MPLS_FIB_DEFAULT_TABLE_ID, + FIB_PROTOCOL_MPLS, FIB_SOURCE_SR); + return 0; } @@ -545,6 +560,8 @@ sr_mpls_policy_command_fn (vlib_main_t * vm, unformat_input_t * input, return clib_error_return (0, "Could not modify the segment list. " "The given SL is not associated with such SR policy."); + case VNET_API_ERROR_NO_SUCH_TABLE: + return clib_error_return (0, "the Default MPLS table is not present"); default: return clib_error_return (0, "BUG: sr policy returns %d", rv); }
BUFR: Fix assert
@@ -767,7 +767,7 @@ static int descriptor_get_min_max(bufr_descriptor* bd, long width, long referenc { /* Maximum value is allowed to be the largest number (all bits 1) which means it's MISSING */ unsigned long max1 = (1UL << width) - 1; /* Highest value for number with 'width' bits */ - DebugAssert(width > 0 && width <= 32); + DebugAssert(width > 0 && width < 64); *maxAllowed = (max1 + reference) * factor; *minAllowed = reference * factor;
output: retry_limit allows a 'false' value for unlimited retries
@@ -290,7 +290,13 @@ int flb_output_set_property(struct flb_output_instance *out, char *k, char *v) } else if (prop_key_check("retry_limit", k, len) == 0) { if (tmp) { + if (strcmp(tmp, "false") == 0 || strcmp(tmp, "off") == 0) { + /* No limits for retries */ + out->retry_limit = -1; + } + else { out->retry_limit = atoi(tmp); + } flb_free(tmp); } else {
audio: throw if miniaudio fails to initialize; This prevents a situation where miniaudio fails to initialize but the audio module is still usable.
@@ -215,12 +215,11 @@ static Spatializer* spatializers[] = { bool lovrAudioInit(const char* spatializer) { if (state.initialized) return false; - if (ma_context_init(NULL, 0, NULL, &state.context)) { - return false; - } + ma_result result = ma_context_init(NULL, 0, NULL, &state.context); + lovrAssert(result == MA_SUCCESS, "Failed to initialize miniaudio"); - int mutexStatus = ma_mutex_init(&state.lock); - lovrAssert(mutexStatus == MA_SUCCESS, "Failed to create audio mutex"); + result = ma_mutex_init(&state.lock); + lovrAssert(result == MA_SUCCESS, "Failed to create audio mutex"); for (size_t i = 0; i < sizeof(spatializers) / sizeof(spatializers[0]); i++) { if (spatializer && strcmp(spatializer, spatializers[i]->name)) {
doc: clarify valid array
@@ -24,7 +24,7 @@ an array or not. ## Decision Store length in metadata `array` of key, or keep metadata `array` empty if empty array. -Only children that have `#` syntax are allowed. +Only children that have `#` syntax are allowed in a valid array. The index start with `#0`. Both `keyAddName("#12")` or `keyAddBaseName("#_12")` is allowed to add the 13th index. @@ -80,12 +80,12 @@ user:/myarray/#0 system:/myarray # <- not found in cascading lookup, as user:/myarray exists ``` -Guarantees we want from the spec plugin: +The `spec` plugin should check if it is a valid array, i.e.: -- that the parent key always contain `array`. -- that the correct length is in `array` -- that the array only contains `#` children -- that the children are numbered from 0 to n, without holes +- that the parent key always contain the metadata `array`, +- that the correct length is in `array`, +- that the array only contains `#` children, and +- that the children are numbered from `#0` to `#n`, without holes. ## Rationale
sys: config: Allow LittleFS backend
@@ -22,12 +22,14 @@ syscfg.defs: value: 0 restrictions: - '!CONFIG_NFFS' + - '!CONFIG_LITTLEFS' - '!CONFIG_FCB2' - 'CONFIG_FCB_FLASH_AREA' CONFIG_NFFS: description: 'Config default storage is in NFFS' value: 0 restrictions: + - '!CONFIG_LITTLEFS' - '!CONFIG_FCB' - '!CONFIG_FCB2' CONFIG_FCB2: @@ -36,7 +38,16 @@ syscfg.defs: restrictions: - '!CONFIG_FCB' - '!CONFIG_NFFS' + - '!CONFIG_LITTLEFS' - 'CONFIG_FCB_FLASH_AREA' + CONFIG_LITTLEFS: + description: 'Config default storage is in littefs' + value: 0 + restrictions: + - '!CONFIG_NFFS' + - '!CONFIG_FCB' + - '!CONFIG_FCB2' + CONFIG_MGMT: description: 'SMP access to config' value: 0
ci: fix "can set sleep wake stub from stack in RTC RAM" test case failure "can set sleep wake stub from stack in RTC RAM" would randomly fail on S3 due to stack overflow. Fixed wrong usage of stack size and slightly increased it.
@@ -324,11 +324,12 @@ static void prepare_wake_stub_from_rtc(void) a memory capability (as it's an implementation detail). So to test this we need to allocate the stack statically. */ + #define STACK_SIZE 1500 #if CONFIG_IDF_TARGET_ESP32S3 - uint8_t *sleep_stack = (uint8_t *)heap_caps_malloc(1024, MALLOC_CAP_RTCRAM); + uint8_t *sleep_stack = (uint8_t *)heap_caps_malloc(STACK_SIZE, MALLOC_CAP_RTCRAM); TEST_ASSERT((uint32_t)sleep_stack >= SOC_RTC_DRAM_LOW && (uint32_t)sleep_stack < SOC_RTC_DRAM_HIGH); #else - static RTC_FAST_ATTR uint8_t sleep_stack[1024]; + static RTC_FAST_ATTR uint8_t sleep_stack[STACK_SIZE]; #endif static RTC_FAST_ATTR StaticTask_t sleep_task; @@ -336,7 +337,7 @@ static void prepare_wake_stub_from_rtc(void) * wake from deep sleep. So to ensure unused stack is different if test is re-run without a full reset, * fill with some random bytes */ - esp_fill_random(sleep_stack, sizeof(sleep_stack)); + esp_fill_random(sleep_stack, STACK_SIZE); /* to make things extra sure, start a periodic timer to write to RTC FAST RAM at high frequency */ const esp_timer_create_args_t timer_args = { @@ -350,7 +351,7 @@ static void prepare_wake_stub_from_rtc(void) ESP_ERROR_CHECK( esp_timer_start_periodic(timer, 200) ); printf("Creating test task with stack %p\n", sleep_stack); - TEST_ASSERT_NOT_NULL(xTaskCreateStatic( (void *)prepare_wake_stub, "sleep", sizeof(sleep_stack), NULL, + TEST_ASSERT_NOT_NULL(xTaskCreateStatic( (void *)prepare_wake_stub, "sleep", STACK_SIZE, NULL, UNITY_FREERTOS_PRIORITY, sleep_stack, &sleep_task)); vTaskDelay(1000 / portTICK_PERIOD_MS); TEST_FAIL_MESSAGE("Should be asleep by now");
Changed default P/Q values for CGEMM and ZGEMM (Power10 only)
@@ -2471,8 +2471,8 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #define SGEMM_DEFAULT_Q 512 #define DGEMM_DEFAULT_Q 512 -#define CGEMM_DEFAULT_Q 1026 -#define ZGEMM_DEFAULT_Q 1026 +#define CGEMM_DEFAULT_Q 384 +#define ZGEMM_DEFAULT_Q 384 #define SGEMM_DEFAULT_R 4096 #define DGEMM_DEFAULT_R 4096
docs: Mark network related chapters as updated for C6
@@ -46,7 +46,6 @@ api-guides/RF_calibration api-guides/unit-tests api-guides/deep-sleep-stub api-guides/blufi -api-guides/lwip api-guides/coexist api-guides/flash_psram_config api-guides/usb-serial-jtag-console @@ -149,15 +148,12 @@ api-reference/peripherals/rmt api-reference/kconfig api-reference/network api-reference/network/esp_openthread -api-reference/network/esp_eth -api-reference/network/esp_netif_driver api-reference/network/esp_dpp api-reference/network/esp_now api-reference/network/esp-wifi-mesh api-reference/network/esp_smartconfig api-reference/network/esp_wifi api-reference/network/index -api-reference/network/esp_netif api-reference/system api-reference/system/sleep_modes api-reference/system/ota @@ -227,9 +223,7 @@ api-reference/bluetooth/classic_bt api-reference/error-codes api-reference/index api-reference/protocols -api-reference/protocols/icmp_echo api-reference/protocols/esp_serial_slave_link -api-reference/protocols/mqtt api-reference/protocols/mbedtls api-reference/protocols/esp_http_server api-reference/protocols/esp_sdio_slave_protocol @@ -240,9 +234,7 @@ api-reference/protocols/esp_https_server api-reference/protocols/esp_spi_slave_protocol api-reference/protocols/modbus api-reference/protocols/esp_tls -api-reference/protocols/mdns api-reference/protocols/index -api-reference/protocols/asio security security/flash-encryption security/secure-boot-v2
posix: dont overwrite posixsrv's oid in open
@@ -369,6 +369,8 @@ int posix_open(const char *filename, int oflag, char *ustack) open_file_t *f; mode_t mode; + hal_memset(&pipesrv, 0xff, sizeof(oid_t)); + if ((proc_lookup("/dev/posix/pipes", NULL, &pipesrv)) < 0) ; /* that's fine */ @@ -376,7 +378,6 @@ int posix_open(const char *filename, int oflag, char *ustack) return -1; hal_memset(&dev, 0, sizeof(oid_t)); - hal_memset(&pipesrv, -1, sizeof(oid_t)); proc_lockSet(&p->lock);
Try disabling address.wast
@@ -235,10 +235,9 @@ class Wasm3(): self.run() try: if self.loaded: - res = self.load(self.loaded) - print(res) + self.load(self.loaded) except Exception as e: - print(e) + pass break except Exception as e: print(f"wasm3: {e} => retry") @@ -288,8 +287,6 @@ class Wasm3(): else: error = "Timeout" - print("Last output: " + buff) - self.terminate() raise Exception(error) @@ -321,6 +318,7 @@ print("Version: " + wasm3.version()) blacklist = Blacklist([ "float_exprs.wast:* f32.nonarithmetic_nan_bitpattern*", "imports.wast:*", + "address.wast:*", "names.wast:630 *", # name that starts with '\0' ]) @@ -482,10 +480,8 @@ for fn in jsonFiles: try: wasm_fn = os.path.join(pathname(fn), wasm_module) - res = wasm3.load(wasm_fn) - print(res) + wasm3.load(wasm_fn) except Exception as e: - print(e) pass #fatal(str(e)) elif ( test.type == "action" or
Added extern C {} wrapper consistent with other headers in include/sys
@@ -72,10 +72,28 @@ struct utsname char machine[SYS_NAMELEN]; /* Machine hardware */ }; +/**************************************************************************** + * Pre-processor Definitions + ****************************************************************************/ + +#undef EXTERN +#if defined(__cplusplus) +#define EXTERN extern "C" +extern "C" +{ +#else +#define EXTERN extern +#endif + /**************************************************************************** * Public Function Prototypes ****************************************************************************/ int uname(FAR struct utsname *name); +#undef EXTERN +#if defined(__cplusplus) +} +#endif + #endif /* __INCLUDE_SYS_UTSNAME_H */
schemas CHANGE add enum for YANG versions and slightly change struct lysp_module
@@ -500,6 +500,15 @@ struct lysp_notif { uint16_t flags; /**< [schema node flags](@ref snodeflags) - only LYS_STATUS_* values are allowed */ }; +/** + * @brief supported YANG schema version values + */ +typedef enum LYS_VERSION { + LYS_VERSION_UNDEF = 0, /**< no specific version, YANG 1.0 as default */ + LYS_VERSION_1_0 = 1, /**< YANG 1.0 */ + LYS_VERSION_1_1 = 2 /**< YANG 1.1 */ +} LYS_VERSION; + /** * @brief Printable YANG schema tree structure representing YANG module. * @@ -536,17 +545,12 @@ struct lysp_module { struct lysp_deviation *deviations; /**< list of deviations (NULL-terminated) */ struct lysp_ext_instance **exts; /**< list of the extension instances (NULL-terminated) */ - uint8_t type:1; /**< 0 - module, 1 - submodule */ - uint8_t version:3; /**< yang-version (LYS_VERSION): - - 0 = not specified, YANG 1.0 as default, - - 1 = YANG 1.0, - - 2 = YANG 1.1 */ - uint8_t deviated:2; /**< deviated flag: - - 0 = not deviated, - - 1 = the module is deviated by another module */ + uint8_t submodule:1; /**< flag to distinguish main modules and submodules */ + uint8_t deviated:1; /**< flag if the module is deviated by another module */ uint8_t implemented:1; /**< flag if the module is implemented, not just imported */ uint8_t latest_revision:1; /**< flag if the module was loaded without specific revision and is the latest revision found */ + uint8_t version:4; /**< yang-version (LYS_VERSION values) */ }; /**
test(kdb-import): add shell-recorder tests verify that format and provider are accepted
@@ -82,6 +82,33 @@ To import a configuration stored in the `ini` format in a file called `example.i To restore a backup (stored as `sw.ecf`) of a user's configuration below `system/sw`:<br> `cat sw.ecf | kdb import system/sw` +To import a sample `json` content with the `yajl` plugin: +```sh +# import two keys from a json string +kdb import user/tests/ yajl <<< "{ \"one\": \"one\", \"two\": \"two\" }" + +# get the values and verify they got imported correctly +kdb get user/tests/one +#> one +kdb get user/tests/two +#> two +kdb rm -r user/tests +``` + +To import a sample `json` content via specifying : +```sh +# import two keys from a json string +kdb import user/tests/ json <<< "{ \"one\": \"one\", \"two\": \"two\" }" + +# get the values and verify they got imported correctly +kdb get user/tests/one +#> one +kdb get user/tests/two +#> two + +kdb rm -r user/tests +``` + ## SEE ALSO - [elektra-merge-strategy(7)](elektra-merge-strategy.md)
zuse: include all relevant azimuth addresses
++ azimuth 0x223c.067f.8cf2.8ae1.73ee.5caf.ea60.ca44.c335.fecb :: + ++ ecliptic + 0x6ac0.7b7c.4601.b5ce.11de.8dfe.6335.b871.c7c4.dd4d + :: ++ linear-star-release 0x86cd.9cd0.992f.0423.1751.e376.1de4.5cec.ea5d.1801 :: ++ azimuth 0x308a.b6a6.024c.f198.b57e.008d.0ac9.ad02.1988.6579 :: + ++ ecliptic + 0x8b9f.86a2.8921.d9c7.05b3.113a.755f.b979.e1bd.1bce + :: + ++ linear-star-release + 0x1f8e.dd03.1ee4.1474.0aed.b39b.84fb.8f2f.66ca.422f + :: + ++ conditional-star-release + 0x0 + :: ++ delegated-sending - 0x1000.0000.0000.0000.0000.0000.0000.0000.0000.0000 + 0x3e8c.a510.354b.c2fd.bbd6.1502.52d9.3105.c9c2.7bbe :: ++ launch 4.601.630 + ++ public launch -- :: :: ++ azimuth 0x863d.9c2e.5c4c.1335.96cf.ac29.d552.55f0.d0f8.6381 :: local bridge
doc: update README technical community meeting Add a mention of the weekly Technical Community meeting to the repo's README, with links to the meeting information in the wiki.
@@ -31,6 +31,13 @@ its members and the User Community to get the most out of Project ACRN. Welcome to the project ARCN community! +We're now holding weekly Technical Community Meetings and encourage you +to call in and learn more about the project. Meeting information is on +the `TCM Meeting page`_ in our `ACRN wiki <https://wiki.projectacrn.org/>`_. + +.. _TCM Meeting page: + https://github.com/projectacrn/acrn-hypervisor/wiki/ACRN-Committee-and-Working-Group-Meetings#technical-community-meetings + Resources *********
Set JANET_DIST_DIR on release.
@@ -4,7 +4,7 @@ script: - make test - sudo make install - make test-install -- make build/janet-${TRAVIS_TAG}-${TRAVIS_OS_NAME}.tar.gz +- JANET_DIST_DIR=janet-${TRAVIS_TAG}-${TRAVIS_OS_NAME} make build/janet-${TRAVIS_TAG}-${TRAVIS_OS_NAME}.tar.gz compiler: - clang - gcc
[ drivers/usb]Fixed a bug may cause stackover flow
@@ -162,6 +162,10 @@ rt_err_t rt_usbh_attatch_instance(uinst_t device) /* alloc memory for configuration descriptor */ device->cfg_desc = (ucfg_desc_t)rt_malloc(cfg_desc.wTotalLength); + if(device->cfg_desc == RT_NULL) + { + return RT_ENOMEM; + } rt_memset(device->cfg_desc, 0, cfg_desc.wTotalLength); /* get full configuration descriptor */ @@ -219,6 +223,10 @@ rt_err_t rt_usbh_attatch_instance(uinst_t device) { /* allocate memory for interface device */ device->intf[i] = (struct uhintf*)rt_malloc(sizeof(struct uhintf)); + if(device->intf[i] == RT_NULL) + { + return RT_ENOMEM; + } device->intf[i]->drv = drv; device->intf[i]->device = device; device->intf[i]->intf_desc = intf_desc;
Node.js: checking for exception after running JS code from C++.
@@ -277,7 +277,7 @@ void Unit::request_handler(nxt_unit_request_info_t *req) { Unit *obj; - napi_value socket, request, response, global, server_obj; + napi_value socket, request, response, global, server_obj, except; napi_value emit_events, events_res, async_name, resource_object; napi_status status; napi_async_context async_context; @@ -372,10 +372,27 @@ Unit::request_handler(nxt_unit_request_info_t *req) status = napi_make_callback(obj->env_, async_context, server_obj, emit_events, 3, events_args, &events_res); if (status != napi_ok) { + if (status != napi_pending_exception) { napi_throw_error(obj->env_, NULL, "Failed to make callback"); return; } + status = napi_get_and_clear_last_exception(obj->env_, &except); + if (status != napi_ok) { + napi_throw_error(obj->env_, NULL, + "Failed to get and clear last exception"); + return; + } + + /* Logging a description of the error and call stack. */ + status = napi_fatal_exception(obj->env_, except); + if (status != napi_ok) { + napi_throw_error(obj->env_, NULL, "Failed to call " + "napi_fatal_exception() function"); + return; + } + } + status = napi_close_callback_scope(obj->env_, async_scope); if (status != napi_ok) { napi_throw_error(obj->env_, NULL, "Failed to close callback scope");
gnu7 default in v1.3.1
@@ -102,17 +102,25 @@ update_pkg_if_link () { perl -pi -e 's/project=\'\S+\'/project=\'${branchNew}\'/ ${localdir}/_link || ERROR "unable to update parent in _link for $1" fi + # update to latest gnu compiler family + gnu_family=gnu7 + + echo $1 | grep -q gnu + if [ $? -eq 0 ];then + echo "Updating _link for gnu compiler family" + perl -pi -e "s/compiler_family gnu</compiler_family ${gnu_family}</" ${localdir}/_link + fi + cd - } # end update_pkg_if_link() update_pkg_if_service () { local localdir="${branchNew}/$1" + local version=`echo $branchNew | awk -F : '{print $2}'` echo "${branchNew}" | grep -q ":Update" - local version=`echo $branchNew | awk -F : '{print $2}'` - if [ $? -eq 0 ];then local update_ver=`echo $branchNew | awk -F : '{print $3}' | awk -F 'Update' '{print $2}'` version="$version.$update_ver"
Swapped to simpler obs residual model
@@ -404,23 +404,10 @@ static bool map_obs_data(void *user, const struct CnMat *Z, const struct CnMat * SurviveKalmanTracker *tracker = (SurviveKalmanTracker *)user; if(y) { subnd(cn_as_vector(y), cn_as_const_vector(Z), cn_as_const_vector(x_t), 7); - //quatfind(cn_as_vector(y) + 3, cn_as_const_vector(x_t) + 3, cn_as_const_vector(Z) + 3); - quatfind(cn_as_vector(y) + 3, cn_as_const_vector(Z) + 3, cn_as_const_vector(x_t) + 3); - cn_as_vector(y)[3] = 1 - fabs((cn_as_vector(y) + 3)[0]); } if(H_k) { cn_set_zero(H_k); - for(int i = 0;i < 3;i++) { - cnMatrixSet(H_k, i, i, 1); - } - FLT jac[16]; - gen_quatfind_jac_q1(jac, cn_as_const_vector(x_t) + 3, cn_as_const_vector(Z) + 3); - //gen_quatfind_jac_q2(jac, cn_as_const_vector(Z) + 3, cn_as_const_vector(x_t) + 3); - for(int i = 0;i < 4;i++) { - for(int j = 0;j < 4;j++) { - cnMatrixSet(H_k, i + 3, j + 3, jac[j + i * 4]); - } - } + cn_set_diag_val(H_k, 1); } return true; } @@ -1002,6 +989,7 @@ void survive_kalman_tracker_init(SurviveKalmanTracker *tracker, SurviveObject *s tracker->lightcap_model.term_criteria.max_iterations = 5; cnkalman_meas_model_init(&tracker->model, "obs", &tracker->obs_model, map_obs_data); + tracker->obs_model.term_criteria.max_iterations = 1; tracker->obs_model.adaptive = tracker->adaptive_obs; cnkalman_meas_model_init(&tracker->model, "zvu", &tracker->zvu_model, 0);
apps.c: Fix crash in case uri arg of IS_HTTP or IS_HTTPS is NULL
@@ -469,10 +469,10 @@ CONF *app_load_config_modules(const char *configfile) return conf; } -#define IS_HTTP(uri) \ - (strncmp(uri, OSSL_HTTP_PREFIX, strlen(OSSL_HTTP_PREFIX)) == 0) -#define IS_HTTPS(uri) \ - (strncmp(uri, OSSL_HTTPS_PREFIX, strlen(OSSL_HTTPS_PREFIX)) == 0) +#define IS_HTTP(uri) ((uri) != NULL \ + && strncmp(uri, OSSL_HTTP_PREFIX, strlen(OSSL_HTTP_PREFIX)) == 0) +#define IS_HTTPS(uri) ((uri) != NULL \ + && strncmp(uri, OSSL_HTTPS_PREFIX, strlen(OSSL_HTTPS_PREFIX)) == 0) X509 *load_cert_pass(const char *uri, int maybe_stdin, const char *pass, const char *desc)
kdb-complete: use reference type to prevent copying
@@ -317,7 +317,7 @@ void CompleteCommand::addNamespaces (map<Key, pair<int, int>> & hierarchy, Cmdli { // since ens are numbers, there is no way to get a string representation if not found in that case bool found = false; - for (const string ns : namespaces) + for (const string & ns : namespaces) { found = found || ckdb::keyGetNamespace (Key (ns, KEY_END).getKey ()) == ens; } @@ -328,7 +328,7 @@ void CompleteCommand::addNamespaces (map<Key, pair<int, int>> & hierarchy, Cmdli } } - for (const string ns : namespaces) + for (const string & ns : namespaces) { const Key nsKey (ns, KEY_END); if ((cl.debug || cl.verbose) && ckdb::keyGetNamespace (nsKey.getKey ()) == KEY_NS_EMPTY)
fix warning on MSVC 'function' : different 'const' qualifiers
@@ -528,7 +528,7 @@ static void DeleteConfig(Config* const config) { if (config != NULL) { free(config->args_); if (config->own_argv_) { - free(config->argv_); + free((void*)config->argv_); WebPDataClear(&config->argv_data_); } memset(config, 0, sizeof(*config));
[Fix] Remove patch large data
@@ -74,14 +74,12 @@ uint16_t data_count = 0; uint16_t data_size = 0; uint16_t crc_val = 0; static int64_t ll_rx_timestamp = 0; -uint16_t large_data_num = 0; /******************************************************************************* * Function ******************************************************************************/ static inline uint8_t Recep_IsAckNeeded(void); static inline uint16_t Recep_CtxIndexFromID(uint16_t id); -_CRITICAL void Recep_ComputeMsgNumber(void); /****************************************************************************** * @brief Reception init. * @param None @@ -147,8 +145,6 @@ _CRITICAL void Recep_GetHeader(volatile uint8_t *data) if (current_msg->header.size > MAX_DATA_MSG_SIZE) { data_size = MAX_DATA_MSG_SIZE; - // store the number of the messages that compose the large message - Recep_ComputeMsgNumber(); } else { @@ -188,8 +184,6 @@ _CRITICAL void Recep_GetHeader(volatile uint8_t *data) ******************************************************************************/ _CRITICAL void Recep_GetData(volatile uint8_t *data) { - static uint16_t last_crc = 0; - MsgAlloc_SetData(*data); if (data_count < data_size) { @@ -231,27 +225,7 @@ _CRITICAL void Recep_GetData(volatile uint8_t *data) } else { - // check if we have already received the same message - if ((crc == last_crc) && (large_data_num > 0)) - { - // if yes remove this message from memory - MsgAlloc_InvalidMsg(); - ctx.rx.callback = Recep_Drop; - return; - } - // if not treat message normally MsgAlloc_EndMsg(); - // reduce the number of remaining messages in case of a large message - if (large_data_num > 0) - { - large_data_num--; - // store the current crc for the next message comparison - last_crc = crc; - } - else - { - last_crc = 0; - } } } else @@ -730,24 +704,3 @@ static inline uint16_t Recep_CtxIndexFromID(uint16_t id) { return (id - ctx.ll_service_table[0].id); } - -/****************************************************************************** - * @brief computes the number of the messages that compose a large data message - * @param None - * @return None - * _CRITICAL function call in IRQ - ******************************************************************************/ -_CRITICAL void Recep_ComputeMsgNumber(void) -{ - LUOS_ASSERT(current_msg->header.size > MAX_DATA_MSG_SIZE); - // check if it is the first msg of large data received - if (large_data_num == 0) - { - // find the number of the messages that belong to the same large message - large_data_num = current_msg->header.size / MAX_DATA_MSG_SIZE; - if (current_msg->header.size % MAX_DATA_MSG_SIZE != 0) - { - large_data_num++; - } - } -}
Trying yo fix (again) the windows issue by moving the arrays to inside the InitializeBodyFlare function.
@@ -883,6 +883,7 @@ struct BODY { double *daFFD; double *daLXUVFlare; + // GALHABIT int bGalHabit; /**< Use galhabit module */ double dPeriQ; /**< Pericenter distance */
doc: note that the BN_new() initialises the BIGNUM BN_new() and BN_secure_new() not only allocate memory, but also initialise it to deterministic value - 0. Document that behaviour to make it explicit
@@ -36,7 +36,8 @@ If B<a> is NULL, nothing is done. =head1 RETURN VALUES BN_new() and BN_secure_new() -return a pointer to the B<BIGNUM>. If the allocation fails, +return a pointer to the B<BIGNUM> initialised to the value 0. +If the allocation fails, they return B<NULL> and set an error code that can be obtained by L<ERR_get_error(3)>.
CLEANUP: refactored the main function of collection delete thread.
@@ -5664,6 +5664,7 @@ static void *collection_delete_thread(void *arg) { struct default_engine *engine = arg; hash_item *it; + coll_meta_info *info; int current_ssl; uint32_t evict_count; uint32_t bg_evict_count = 0; @@ -5672,9 +5673,28 @@ static void *collection_delete_thread(void *arg) while (engine->initialized) { it = pop_coll_del_queue(); - if (it == NULL) { + if (it != NULL) { + assert(IS_COLL_ITEM(it)); + ENGINE_ITEM_TYPE type = GET_ITEM_TYPE(it); + + LOCK_CACHE(); + info = (coll_meta_info *)item_get_meta(it); + while (info->ccnt > 0) { + do_coll_elem_delete(info, type, 100); + if (info->ccnt > 0) { + UNLOCK_CACHE(); + LOCK_CACHE(); + } + } + do_item_free(it); + UNLOCK_CACHE(); + continue; + } + evict_count = 0; - if (item_evict_to_free && (current_ssl = slabs_space_shortage_level()) >= 10) { + if (item_evict_to_free) { + current_ssl = slabs_space_shortage_level(); + if (current_ssl >= 10) { LOCK_CACHE(); if (item_evict_to_free) { rel_time_t current_time = svcore->get_current_time(); @@ -5682,6 +5702,7 @@ static void *collection_delete_thread(void *arg) } UNLOCK_CACHE(); } + } if (evict_count > 0) { if (bg_evict_start == false) { /***** @@ -5714,24 +5735,6 @@ static void *collection_delete_thread(void *arg) } coll_del_thread_sleep(); } - continue; - } - - assert(IS_COLL_ITEM(it)); - ENGINE_ITEM_TYPE type = GET_ITEM_TYPE(it); - coll_meta_info *info; - - LOCK_CACHE(); - info = (coll_meta_info *)item_get_meta(it); - while (info->ccnt > 0) { - do_coll_elem_delete(info, type, 100); - if (info->ccnt > 0) { - UNLOCK_CACHE(); - LOCK_CACHE(); - } - } - do_item_free(it); - UNLOCK_CACHE(); } return NULL; }
mesh: Fix typo when deleting app key in CDB This commit sets `net_idx` to `BT_MESH_KEY_UNUSED` when deleting app key in cdb. this is port of
@@ -359,7 +359,7 @@ void bt_mesh_cdb_app_key_del(struct bt_mesh_cdb_app_key *key, bool store) bt_mesh_clear_cdb_app_key(key); } - key->net_idx = BT_MESH_ADDR_UNASSIGNED; + key->net_idx = BT_MESH_KEY_UNUSED; memset(key->keys, 0, sizeof(key->keys)); }
Fix crash on startup on Oculus driver
@@ -83,6 +83,8 @@ static ovrInputState *refreshButtons(void) { } static bool oculus_init(float supersample, float offset, uint32_t msaa) { + arr_init(&state.textures, realloc); + ovrResult result = ovr_Initialize(NULL); if (OVR_FAILURE(result)) { return false; @@ -112,6 +114,7 @@ static void oculus_destroy(void) { for (size_t i = 0; i < state.textures.length; i++) { lovrRelease(state.textures.data[i], lovrTextureDestroy); } + arr_free(&state.textures); map_free(&state.textureLookup); if (state.mirror) {
apps/wm_test: remove additional args remove additional args
#pragma once #define WT_LOG(tag, fmt, args...) \ - printf(tag"[T%d] "fmt"\t%s:%d\n", getpid(), ##args, __FUNCTION__, __LINE__, ##args) + printf(tag "[T%d] "fmt"\t%s:%d\n", getpid(), ##args, __FUNCTION__, __LINE__) #define WT_LOGE(tag, fmt, args...) \ - printf(tag"[ERR][T%d] "fmt"\t%s:%d\n", getpid(), ##args, __FUNCTION__, __LINE__, ##args) + printf(tag "[ERR][T%d] "fmt"\t%s:%d\n", getpid(), ##args, __FUNCTION__, __LINE__) #define WT_LOGP(tag, fmt, args...) \ printf(fmt, ##args)
Directory Value: Use separate contract function
#include <kdbhelper.h> -int elektraDirectoryvalueGet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned, Key * parentKey) -{ - if (!elektraStrCmp (keyName (parentKey), "system/elektra/modules/directoryvalue")) +/** + * @brief This function returns a key set containing the contract of this plugin. + * + * @return A contract describing the functionality of this plugin. + */ +static KeySet * directoryValueContract (void) { - KeySet * contract = - ksNew (30, keyNew ("system/elektra/modules/directoryvalue", KEY_VALUE, - "directoryvalue plugin waits for your orders", KEY_END), + return ksNew (30, + keyNew ("system/elektra/modules/directoryvalue", KEY_VALUE, "directoryvalue plugin waits for your orders", KEY_END), keyNew ("system/elektra/modules/directoryvalue/exports", KEY_END), keyNew ("system/elektra/modules/directoryvalue/exports/get", KEY_FUNC, elektraDirectoryvalueGet, KEY_END), keyNew ("system/elektra/modules/directoryvalue/exports/set", KEY_FUNC, elektraDirectoryvalueSet, KEY_END), #include ELEKTRA_README (directoryvalue) keyNew ("system/elektra/modules/directoryvalue/infos/version", KEY_VALUE, PLUGINVERSION, KEY_END), KS_END); +} + +int elektraDirectoryvalueGet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned, Key * parentKey) +{ + if (!elektraStrCmp (keyName (parentKey), "system/elektra/modules/directoryvalue")) + { + KeySet * contract = directoryValueContract (); ksAppend (returned, contract); ksDel (contract); return ELEKTRA_PLUGIN_STATUS_SUCCESS; } + return ELEKTRA_PLUGIN_STATUS_NO_UPDATE; }
docs - pxf with oss greenplum requires that the pxf server/agent bits be built and installed separately refer to pxf readmes in greenplum database and apache hawq repos
@@ -52,4 +52,6 @@ the Greenplum hosts. - The deprecated <codeph>gpcheck</codeph> management utility and its replacement <codeph>gpsupport</codeph> are only supported with Pivotal Greenplum Database. +- To use the Greenplum Platform Extension Framework (PXF) with open source Greenplum Database, you must separately build and install the PXF server software. Refer to the build instructions in the PXF README files in the Greenplum Database and Apache HAWQ (incubating) repositories. + - Suggestions to contact Pivotal Technical Support in this documentation are intended only for Pivotal Greenplum Database customers.
armv8-m: Fix pthread_start syscall The 'arg' parameter is in R3, not in R2.
@@ -315,8 +315,9 @@ int arm_svcall(int irq, void *context, void *arg) * At this point, the following values are saved in context: * * R0 = SYS_pthread_start - * R1 = entrypt - * R2 = arg + * R1 = startup (trampoline) + * R2 = entrypt + * R3 = arg */ #if !defined(CONFIG_BUILD_FLAT) && !defined(CONFIG_DISABLE_PTHREAD) @@ -334,7 +335,7 @@ int arm_svcall(int irq, void *context, void *arg) */ regs[REG_R0] = regs[REG_R2]; /* pthread entry */ - regs[REG_R1] = regs[REG_R2]; /* arg */ + regs[REG_R1] = regs[REG_R3]; /* arg */ } break; #endif
more flags for the gcc compiler
@@ -69,17 +69,20 @@ AC_SUBST(LIBAGE) AC_LANG([C]) AC_PROG_CC_C99 AS_IF([test "$GCC" = yes], - [AX_APPEND_COMPILE_FLAGS([-Wall], [CFLAGS]) + [AX_APPEND_COMPILE_FLAGS([-Wall -Wextra], [CFLAGS]) dnl Be careful about adding the -fexceptions option; some versions of dnl GCC don't support it and it causes extra warnings that are only dnl distracting; avoid. AX_APPEND_COMPILE_FLAGS([-fexceptions], [CFLAGS]) - AX_APPEND_COMPILE_FLAGS([-fno-strict-aliasing -Wmissing-prototypes -Wstrict-prototypes], [CFLAGS])]) + AX_APPEND_COMPILE_FLAGS([-fno-strict-aliasing -Wmissing-prototypes -Wstrict-prototypes], [CFLAGS]) + AX_APPEND_COMPILE_FLAGS([-pedantic -Wduplicated-cond -Wduplicated-branches -Wlogical-op], [CFLAGS]) + AX_APPEND_COMPILE_FLAGS([-Wrestrict -Wnull-dereference -Wjump-misses-init -Wdouble-promotion], [CFLAGS]) + AX_APPEND_COMPILE_FLAGS([-Wshadow -Wformat=2 -Wmisleading-indentation], [CFLAGS])]) AC_LANG_PUSH([C++]) AC_PROG_CXX AS_IF([test "$GCC" = yes], - [AX_APPEND_COMPILE_FLAGS([-Wall], [CXXFLAGS]) + [AX_APPEND_COMPILE_FLAGS([-Wall -Wextra], [CXXFLAGS]) dnl Be careful about adding the -fexceptions option; some versions of dnl GCC don't support it and it causes extra warnings that are only dnl distracting; avoid.
test for all RPMs in meta-package
@@ -14,6 +14,10 @@ rm=$RESOURCE_MANAGER @test "[nagios] check for RPM" { run check_if_rpm_installed "nagios${DELIM}" assert_success + run check_if_rpm_installed "nagios-plugins-all${DELIM}" + assert_success + run check_if_rpm_installed "nrpe${DELIM}" + assert_success } @test "[nagios] test check_http" {
copy wrapper at sameplace
@@ -1071,9 +1071,7 @@ const struct operator_s* operator_copy_wrapper_sameplace(unsigned int N, const l const struct operator_s* operator_copy_wrapper(unsigned int N, const long* strs[N], const struct operator_s* op) { - int cpu = 0; - - return operator_copy_wrapper_sameplace(N, strs, op, &cpu); + return operator_copy_wrapper_sameplace(N, strs, op, NULL); }
Add wait for Db to sleep 1 sec before checking for db up.
@@ -557,11 +557,11 @@ class PSQL(Command): down = True results = {'rc':0, 'stdout':'', 'stderr':''} for i in range(60): + time.sleep(1) res = PSQL.run_sql_command('select count(*) from gp_dist_random(\'gp_id\');', results=results) if results['rc'] == 0: down = False break - time.sleep(1) if down: raise PSQLException('database has not come up')
nat: adding support for icmp-error msg Extending tests. Type: test
@@ -914,6 +914,41 @@ class NAT44EDTestCase(VppTestCase): class TestNAT44ED(NAT44EDTestCase): """ NAT44ED Test Case """ + def test_icmp_error(self): + """ NAT44ED test ICMP error message with inner header""" + + payload = "H" * 10 + + self.nat_add_address(self.nat_addr) + self.nat_add_inside_interface(self.pg0) + self.nat_add_outside_interface(self.pg1) + + # in2out (initiate connection) + p1 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) / + IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) / + UDP(sport=21, dport=20) / payload) + + self.pg0.add_stream(p1) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + capture = self.pg1.get_capture(1)[0] + + # out2in (send error message) + p2 = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) / + IP(src=self.pg1.remote_ip4, dst=self.nat_addr) / + ICMP(type='dest-unreach', code='port-unreachable') / + capture[IP:]) + + self.pg1.add_stream(p2) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + capture = self.pg0.get_capture(1)[0] + + self.logger.info(ppp("p1 packet:", p1)) + self.logger.info(ppp("p2 packet:", p2)) + self.logger.info(ppp("capture packet:", capture)) + def test_users_dump(self): """ NAT44ED API test - nat44_user_dump """
[mod_extforward] skip after HANDLER_COMEBACK do not re-run mod_extforward uri handler after HANDLER_COMEBACK add some additional comments
@@ -128,6 +128,7 @@ typedef struct { /* connection-level state applied to requests in handle_request_env */ array *env; int ssl_client_verify; + uint32_t request_count; } handler_ctx; @@ -572,6 +573,7 @@ static int mod_extforward_set_addr(request_st * const r, plugin_data *p, const c if (extforward_check_proxy) { http_header_env_set(r, CONST_STR_LEN("_L_EXTFORWARD_ACTUAL_FOR"), CONST_BUF_LEN(con->dst_addr_buf)); } + hctx->request_count = con->request_count; hctx->saved_remote_addr = con->dst_addr; hctx->saved_remote_addr_buf = con->dst_addr_buf; /* patch connection address */ @@ -1047,6 +1049,7 @@ URIHANDLER_FUNC(mod_extforward_uri_handler) { int is_forwarded_header = 0; mod_extforward_patch_config(r, p); + if (NULL == p->conf.forwarder) return HANDLER_GO_ON; if (r->conf.log_request_handling) { log_error(r->conf.errh, __FILE__, __LINE__, @@ -1074,9 +1077,24 @@ URIHANDLER_FUNC(mod_extforward_uri_handler) { } } - if (NULL == p->conf.forwarder) return HANDLER_GO_ON; + /* Note: headers are parsed per-request even when using HAProxy PROXY + * protocol since Forwarded header might provide additional info and + * internal _L_ vars might be set for later use by mod_proxy or others*/ + /*if (p->conf.hap_PROXY) return HANDLER_GO_ON;*/ + if (NULL == p->conf.headers) return HANDLER_GO_ON; - for (uint32_t k = 0; k < p->conf.headers->used && NULL == forwarded; ++k) { + + /* Do not reparse headers for same request, e.g after HANDER_COMEBACK + * from mod_rewrite, mod_magnet MAGNET_RESTART_REQUEST, mod_cgi + * cgi.local-redir, or gw_backend reconnect. This has the implication + * that mod_magnet and mod_cgi with local-redir should not modify + * Forwarded or related headers and expect effects here. */ + handler_ctx *hctx = r->con->plugin_ctx[p->id]; + if (NULL != hctx && NULL != hctx->saved_remote_addr_buf + && hctx->request_count == r->con->request_count) + return HANDLER_GO_ON; + + for (uint32_t k = 0; k < p->conf.headers->used; ++k) { buffer *hdr = &((data_string *)p->conf.headers->data[k])->value; forwarded = http_header_request_get(r, HTTP_HEADER_UNSPECIFIED, CONST_BUF_LEN(hdr)); if (forwarded) {
wifi_prov_mgr : Free memory allocated by cJSON_Print
@@ -296,7 +296,9 @@ static esp_err_t wifi_prov_mgr_start_service(const char *service_name, const cha /* Set version information / capabilities of provisioning service and application */ cJSON *version_json = wifi_prov_get_info_json(); - ret = protocomm_set_version(prov_ctx->pc, "proto-ver", cJSON_Print(version_json)); + char *version_str = cJSON_Print(version_json); + ret = protocomm_set_version(prov_ctx->pc, "proto-ver", version_str); + free(version_str); cJSON_Delete(version_json); if (ret != ESP_OK) { ESP_LOGE(TAG, "Failed to set version endpoint");
[riscv-tests] Adapt Test Virtual Machines to Xpulp toolchain
@@ -227,7 +227,7 @@ reset_vector: \ //----------------------------------------------------------------------- #define RVTEST_CODE_END \ - unimp + csrrw x0, cycle, x0 //----------------------------------------------------------------------- // Pass/Fail Macro
Fix 9front build Patched locally, forgot to pusht .
use sys use "alloc" +use "blat" use "cstrconv" use "die" use "extremum" @@ -9,10 +10,12 @@ use "option" use "result" use "slcp" use "sldup" +use "sleq" use "slpush" use "slurp" -use "blat" use "threadhooks" +use "traits" + pkg std = const getenv : (name : byte[:] -> option(byte[:]))
[IQ change] Halve angular_steppings density This change reduces the number of angular steppings used for testing weight quantization levels, which is one of the most significant processing costs in the codec. This reduces image quality by ~0.01 dB, and improves performance by ~10%.
#include <stdio.h> -static const float angular_steppings[] = { - 1.0, 1.125, 1.25, 1.375, 1.5, 1.625, 1.75, 1.875, - - 2.0, 2.25, 2.5, 2.75, - 3.0, 3.25, 3.5, 3.75, - 4.0, 4.25, 4.5, 4.75, - 5.0, 5.25, 5.5, 5.75, - 6.0, 6.25, 6.5, 6.75, - 7.0, 7.25, 7.5, 7.75, - - 8.0, 8.5, - 9.0, 9.5, - 10.0, 10.5, - 11.0, 11.5, - 12.0, 12.5, - 13.0, 13.5, - 14.0, 14.5, - 15.0, 15.5, - 16.0, 16.5, - 17.0, 17.5, - 18.0, 18.5, - 19.0, 19.5, - 20.0, 20.5, - 21.0, 21.5, - 22.0, 22.5, - 23.0, 23.5, - 24.0, 24.5, - 25.0, 25.5, - 26.0, 26.5, - 27.0, 27.5, - 28.0, 28.5, - 29.0, 29.5, - 30.0, 30.5, - 31.0, 31.5, - 32.0, 32.5, - 33.0, 33.5, - 34.0, 34.5, - 35.0, 35.5 +static const float angular_steppings[44] = { + 1.0f, 1.25f, 1.5f, 1.75f, + + 2.0f, 2.5f, 3.0f, 3.5f, + 4.0f, 4.5f, 5.0f, 5.5f, + 6.0f, 6.5f, 7.0f, 7.5f, + + 8.0f, 9.0f, 10.0f, 11.0f, + 12.0f, 13.0f, 14.0f, 15.0f, + 16.0f, 17.0f, 18.0f, 19.0f, + 20.0f, 21.0f, 22.0f, 23.0f, + 24.0f, 25.0f, 26.0f, 27.0f, + 28.0f, 29.0f, 30.0f, 31.0f, + 32.0f, 33.0f, 34.0f, 35.0f }; #define ANGULAR_STEPS ((int)(sizeof(angular_steppings)/sizeof(angular_steppings[0])))
Update createdb.sql Remove the removed colum "coin" from the primary key
@@ -48,7 +48,7 @@ CREATE TABLE balances created TIMESTAMP NOT NULL, updated TIMESTAMP NOT NULL, - primary key(poolid, address, coin) + primary key(poolid, address) ); CREATE TABLE balance_changes
doc: fix wrong register description regarding to ethernet SMI
@@ -79,7 +79,7 @@ typedef volatile struct { struct { uint32_t miibusy : 1; /*This bit should read logic 0 before writing to PHY Addr Register and PHY data Register.During a PHY register access the software sets this bit to 1'b1 to indicate that a Read or Write access is in progress. PHY data Register is invalid until this bit is cleared by the MAC. Therefore PHY data Register (MII Data) should be kept valid until the MAC clears this bit during a PHY Write operation. Similarly for a read operation the contents of Register 5 are not valid until this bit is cleared. The subsequent read or write operation should happen only after the previous operation is complete. Because there is no acknowledgment from the PHY to MAC after a read or write operation is completed there is no change in the functionality of this bit even when the PHY is not Present.*/ uint32_t miiwrite : 1; /*When set this bit indicates to the PHY that this is a Write operation using the MII Data register. If this bit is not set it indicates that this is a Read operation that is placing the data in the MII Data register.*/ - uint32_t miicsrclk : 4; /*CSR clock range: 1.0 MHz ~ 2.5 MHz. 4'b0000: When the APB clock frequency is 80 MHz the MDC clock frequency is APB CLK/42 4'b0000: When the APB clock frequency is 40 MHz the MDC clock frequency is APB CLK/26.*/ + uint32_t miicsrclk : 4; /*CSR clock range: 1.0 MHz ~ 2.5 MHz. 4'b0000: When the APB clock frequency is 80 MHz the MDC clock frequency is APB CLK/42 4'b0011: When the APB clock frequency is 40 MHz the MDC clock frequency is APB CLK/26.*/ uint32_t miireg : 5; /*These bits select the desired MII register in the selected PHY device.*/ uint32_t miidev : 5; /*This field indicates which of the 32 possible PHY devices are being accessed.*/ uint32_t reserved16 : 16;
README.md: Use version 2.05.50
@@ -7,8 +7,8 @@ As hardware the [RaspBee](https://www.dresden-elektronik.de/raspbee?L=1&ref=gh) To learn more about the REST API itself please visit the [REST API Documentation](http://dresden-elektronik.github.io/deconz-rest-doc/) page. -### Phoscon App Beta -The *Phoscon App* is the successor of the current WebApp (Wireless Light Control), it's browser based too and in open beta state, for more information and screenshots check out: +### Phoscon App +The *Phoscon App* is the successor of the 2016 WebApp (Wireless Light Control), it's browser based and supports more sensors and switches. For more information and screenshots check out: https://www.dresden-elektronik.de/funktechnik/solutions/wireless-light-control/phoscon-app?L=1 @@ -28,8 +28,6 @@ Installation Raspberry Pi * Raspbian Jessie * Raspbian Stretch -Raspbian Wheezy and Qt4 is no longer maintained. - ##### Supported devices A uncomplete list of supported devices can be found here: @@ -44,11 +42,11 @@ https://github.com/dresden-elektronik/deconz-rest-plugin/releases 1. Download deCONZ package - wget http://www.dresden-elektronik.de/rpi/deconz/beta/deconz-2.05.48-qt5.deb + wget http://www.dresden-elektronik.de/rpi/deconz/beta/deconz-2.05.50-qt5.deb 2. Install deCONZ package - sudo dpkg -i deconz-2.05.48-qt5.deb + sudo dpkg -i deconz-2.05.50-qt5.deb **Important** this step might print some errors *that's ok* and will be fixed in the next step. @@ -63,11 +61,11 @@ The deCONZ package already contains the REST API plugin, the development package 1. Download deCONZ development package - wget http://www.dresden-elektronik.de/rpi/deconz-dev/deconz-dev-2.05.48.deb + wget http://www.dresden-elektronik.de/rpi/deconz-dev/deconz-dev-2.05.50.deb 2. Install deCONZ development package - sudo dpkg -i deconz-dev-2.05.48.deb + sudo dpkg -i deconz-dev-2.05.50.deb 3. Install missing dependencies @@ -82,7 +80,7 @@ The deCONZ package already contains the REST API plugin, the development package 2. Checkout related version tag cd deconz-rest-plugin - git checkout -b mybranch V2_05_48 + git checkout -b mybranch V2_05_50 3. Compile the plugin @@ -99,8 +97,6 @@ Headless support The beta version contains a systemd script, which allows deCONZ to run without a X11 server. -**Note** The service does not yet support deCONZ updates via WebApp, therefore these must be installed manually. A further systemd script will handle updates in future versions. - 1. Enable the service at boot time ```bash
Flatten Jenkins artifact directories
@@ -289,14 +289,10 @@ pipeline { dir('upload') { unstash 'astcenc-linux-x64-hash' unstash 'astcenc-macos-x64-hash' - } - dir('upload/linux-x64') { + unstash 'astcenc-macos-aarch64-hash' + unstash 'astcenc-linux-x64' - } - dir('upload/macos-x64') { unstash 'astcenc-macos-x64' - } - dir('upload/macos-aarch64') { unstash 'astcenc-macos-aarch64' } dir('upload/windows-x64') { @@ -310,7 +306,9 @@ pipeline { passwordVariable: 'PASSWORD')]) { sh 'python3 ./signing/windows-client-wrapper.py ${USERNAME} *.zip' sh 'mv *.zip.sha256 ../' - sh 'rm -rf ./signing' + sh 'mv *.zip ../' + sh 'cd ..' + sh 'rm -rf ./windows-x64' } } dir('upload') {
dprint: add missing doccords
/- *sole ::> a library for printing doccords :: -=/ debug & +=/ debug | => :> types used by doccords |% %dprint-types (print-overview [%header `['compiled against: ' ~] compiled]~ styles) == :: +:> +print-chapter: renders documentation for a single chapter ++ print-chapter |= [name=tape doc=what sut=type con=coil] ^- (list sole-effect) == == == :: -:: +print-overview: prints summaries of several items -:: the (list styl) provides styles for each generation of child -:: items +:> +print-overview: prints summaries of several items +:> the (list styl) provides styles for each generation of child +:> items ++ print-overview |= [=overview styles=(pair styl styl)] ~? >> debug %print-overview [%leaf (trip q.pica)] [%leaf " {(trip q.pica)}"] :: +:> +styled: makes $sole-effects out of $styls and $cords ++ styled |= [in=(list (pair styl cord))] ^- (list sole-effect)
Update README.md Add note about windows binaries (refs
@@ -24,7 +24,7 @@ Two different transport layers are used: ## Installation -Currently there are no binaries for Windows available. +Currently there are no binaries for Windows available (see issue [#166](https://github.com/texane/stlink/issues/166)). It is known to compile and work with MinGW/Cygwin. For Debian Linux based distributions there is also no package available
Fixed text in menu text
@@ -219,7 +219,7 @@ void *GetDriverByConfig(SurviveContext *ctx, const char *name, const char *confi int prefixLen = strlen(name); if (verbose > 1) - SV_INFO("Available %s:", name); + SV_INFO("Available %ss:", name); while ((DriverName = GetDriverNameMatching(name, i++))) { void *p = GetDriver(DriverName); @@ -237,7 +237,7 @@ void *GetDriverByConfig(SurviveContext *ctx, const char *name, const char *confi if (verbose > 1) SV_INFO("Totals %d %ss.", i - 1, name); if (verbose > 0) - SV_INFO("Using %s for %s", name, configname); + SV_INFO("Using '%s' for %s", picked, configname); return func; }
dcmp: improve the usage() We have already supported multi output files with different filters, likes following: dcmp foo_src foo_dst -o EXIST=ONLY_SRC,TYPE=DIFFER,PERM=DIFFER,MTIME=DIFFER:fileA -o MTIME=DIFFER:fileB but the usage doesn't say it, so add it now.
@@ -51,6 +51,9 @@ static void print_usage(void) printf(" -b, --base - do base comparison\n"); printf(" -o, --output field0=state0@field1=state1,field2=state2:file " "- write list to file\n"); + printf(" multiple output files supports, e.g.\n"); + printf(" -o EXIST=ONLY_SRC,TYPE=DIFFER,PERM=DIFFER,MTIME=DIFFER:" + "fileA -o MTIME=DIFFER:fileB\n"); printf(" -v, --verbose\n"); printf(" -h, --help - print usage\n"); printf("\n");
ble_mesh: fix MESH/NODE/CFG/HBS/BV-01-C related bug
@@ -3121,15 +3121,10 @@ static void hb_sub_send_status(struct bt_mesh_model *model, net_buf_simple_add_u8(&msg, status); net_buf_simple_add_le16(&msg, cfg->hb_sub.src); net_buf_simple_add_le16(&msg, cfg->hb_sub.dst); - if (cfg->hb_sub.src == BLE_MESH_ADDR_UNASSIGNED || - cfg->hb_sub.dst == BLE_MESH_ADDR_UNASSIGNED) { - memset(net_buf_simple_add(&msg, 4), 0, 4); - } else { net_buf_simple_add_u8(&msg, hb_log(period)); net_buf_simple_add_u8(&msg, hb_log(cfg->hb_sub.count)); net_buf_simple_add_u8(&msg, cfg->hb_sub.min_hops); net_buf_simple_add_u8(&msg, cfg->hb_sub.max_hops); - } if (bt_mesh_model_send(model, ctx, &msg, NULL, NULL)) { BT_ERR("%s, Unable to send Config Heartbeat Subscription Status", __func__); @@ -3188,9 +3183,13 @@ static void heartbeat_sub_set(struct bt_mesh_model *model, * trigger clearing of the values according to * MESH/NODE/CFG/HBS/BV-02-C. */ - if (cfg->hb_sub.src != sub_src || cfg->hb_sub.dst != sub_dst) { + if (sub_src == BLE_MESH_ADDR_UNASSIGNED || + sub_dst == BLE_MESH_ADDR_UNASSIGNED) { cfg->hb_sub.src = BLE_MESH_ADDR_UNASSIGNED; cfg->hb_sub.dst = BLE_MESH_ADDR_UNASSIGNED; + cfg->hb_sub.min_hops = BLE_MESH_TTL_MAX; + cfg->hb_sub.max_hops = 0U; + cfg->hb_sub.count = 0U; } period_ms = 0;
Fix for people who are experiencing it.
$% {$diff lime} :: {$poke wire dock pear} :: {$peer wire dock path} :: + {$pull wire dock $~} :: == :: ++ work ::> interface action $% :: circle management :: ^- (quip move _..prep) ?~ old ta-done:ta-init:ta - [~ ..prep(+<+ u.old)] + :_ ..prep(+<+ u.old) + [[ost.bol %pull / server ~] ~] :: ::> || ::> || %utility
Check skipped entity handler
@@ -1756,6 +1756,18 @@ END_TEST static void *handler_data = NULL; /* Count of the number of times the comment handler has been invoked */ static int comment_count = 0; +/* Count of the number of skipped entities */ +static int skip_count = 0; + +static void XMLCALL +param_check_skip_handler(void *userData, + const XML_Char *UNUSED_P(entityName), + int UNUSED_P(is_parameter_entity)) +{ + if (userData != handler_data) + fail("User data (skip) not correctly set"); + skip_count++; +} static void XMLCALL data_check_comment_handler(void *userData, const XML_Char *UNUSED_P(data)) @@ -1806,9 +1818,11 @@ START_TEST(test_user_parameters) "</doc>"; comment_count = 0; + skip_count = 0; XML_SetParamEntityParsing(parser, XML_PARAM_ENTITY_PARSING_ALWAYS); XML_SetExternalEntityRefHandler(parser, external_entity_param_checker); XML_SetCommentHandler(parser, data_check_comment_handler); + XML_SetSkippedEntityHandler(parser, param_check_skip_handler); XML_UseParserAsHandlerArg(parser); XML_SetUserData(parser, (void *)1); handler_data = parser; @@ -1822,6 +1836,8 @@ START_TEST(test_user_parameters) xml_failure(parser); if (comment_count != 3) fail("Comment handler not invoked enough times"); + if (skip_count != 1) + fail("Skip handler not invoked enough times"); } END_TEST
upstream: monitor idle keepalive connections for 'disconnections'
@@ -156,7 +156,7 @@ static struct flb_upstream_conn *create_conn(struct flb_upstream *u) u->n_connections++; if (conn->u->flags & FLB_IO_TCP_KA) { - flb_debug("[upstream] KA connection #%i to %s:%i created", + flb_debug("[upstream] KA connection #%i to %s:%i is connected", conn->fd, u->tcp_host, u->tcp_port); } @@ -252,6 +252,16 @@ struct flb_upstream_conn *flb_upstream_conn_get(struct flb_upstream *u) mk_list_add(&conn->_head, &u->busy_queue); flb_debug("[upstream] KA connection #%i to %s:%i has been assigned (recycled)", conn->fd, u->tcp_host, u->tcp_port); + + /* + * Note: since we are in a keepalive connection, the socket is already being + * monitored for possible disconnections while idle. Upon re-use by the caller + * when it try to send some data, the I/O interface (flb_io.c) will put the + * proper event mask and reuse, there is no need to remove the socket from + * the event loop and re-add it again. + * + * So... just return the connection context. + */ return conn; } @@ -263,9 +273,30 @@ struct flb_upstream_conn *flb_upstream_conn_get(struct flb_upstream *u) return conn; } +/* + * An 'idle' and keepalive might be disconnected, if so, this callback will perform + * the proper connection cleanup. + */ +static int cb_upstream_conn_ka_dropped(void *data) +{ + struct flb_upstream_conn *conn; + + conn = (struct flb_upstream_conn *) data; + + flb_debug("[upstream] KA connection #%i to %s:%i has been disconnected " + "by the remote service", + conn->fd, conn->u->tcp_host, conn->u->tcp_port); + return destroy_conn(conn); +} + int flb_upstream_conn_release(struct flb_upstream_conn *conn) { + int ret; time_t ts; + struct flb_upstream *u; + + /* Upstream context */ + u = conn->u; /* If this is a valid KA connection just recycle */ if (conn->u->flags & FLB_IO_TCP_KA) { @@ -285,6 +316,26 @@ int flb_upstream_conn_release(struct flb_upstream_conn *conn) mk_list_del(&conn->_head); mk_list_add(&conn->_head, &conn->u->av_queue); conn->ts_available = time(NULL); + + /* + * The socket at this point is not longer monitored, so if we want to be + * notified if the 'available keepalive connection' gets disconnected by + * the remote endpoint we need to add it again. + */ + conn->event.handler = cb_upstream_conn_ka_dropped; + conn->event.data = &conn; + + ret = mk_event_add(u->evl, conn->fd, + FLB_ENGINE_EV_CUSTOM, + MK_EVENT_CLOSE, &conn->event); + if (ret == -1) { + /* We failed the registration, for safety just destroy the connection */ + flb_debug("[upstream] KA connection #%i to %s:%i could not be " + "registered, closing.", + conn->fd, conn->u->tcp_host, conn->u->tcp_port); + return destroy_conn(conn); + } + flb_debug("[upstream] KA connection #%i to %s:%i is now available", conn->fd, conn->u->tcp_host, conn->u->tcp_port); return 0;
omniboxResult: scroll into view if necessary
@@ -10,6 +10,18 @@ export class OmniboxResult extends Component { hovered: false }; this.setHover = this.setHover.bind(this); + this.result = React.createRef(); + } + + componentDidUpdate(prevProps) { + const { props, state } = this; + if (prevProps && + !state.hovered && + prevProps.selected !== props.selected && + props.selected === props.link + ) { + this.result.current.scrollIntoView({ block: 'nearest' }); + } } setHover(boolean) { @@ -55,6 +67,7 @@ export class OmniboxResult extends Component { } onClick={navigate} width="100%" + ref={this.result} > {this.state.hovered || selected === link ? ( <>
Removed fake test from python port interfacing with ruby.
@@ -36,13 +36,9 @@ class py_port_test(unittest.TestCase): self.assertEqual(metacall.metacall('say_null'), None); - # TODO: Solve None return - #self.assertEqual(metacall.metacall('say_multiply', 3, 4), 12); - self.assertEqual(metacall.metacall('say_multiply', 3, 4), None); + self.assertEqual(metacall.metacall('say_multiply', 3, 4), 12); - # TODO: Solve None return - #self.assertEqual(metacall.metacall('say_hello', 'world'), 'Hello world!'); - self.assertEqual(metacall.metacall('say_hello', 'world'), None); + self.assertEqual(metacall.metacall('say_hello', 'world'), 'Hello world!'); if __name__ == '__main__': unittest.main()
Show test-suite.log after running tests in GitHub workflow.
@@ -33,7 +33,9 @@ jobs: - name: Build run: make clean && make - name: Run tests - run: make check + run: | + make check + cat test-suite.log mingw-build: runs-on: ubuntu-latest
Turning off USM till we have a machine that can test this
#include <iostream> #include <omp.h> -#pragma omp requires unified_shared_memory -#define IS_USM 1 +//#pragma omp requires unified_shared_memory +#define IS_USM 0 int main() { int *a = (int *) malloc(2*sizeof(int));
use %zu for printf size_t
@@ -1361,7 +1361,7 @@ size_t picoquic_log_qoe_frame(FILE* F, const uint8_t* bytes, size_t bytes_max) byte_index = bytes_max; } else { - fprintf(F, " QOE, path: %" PRIu64 ", length: %" PRIu64 ", v: ", + fprintf(F, " QOE, path: %" PRIu64 ", length: %zu, v: ", path_id, length); for (size_t i = 0; i < 10 && i < length; i++) { fprintf(F, "%02x", bytes[i]);
bugid:22305881:according new bootlaoder modified set boot
@@ -126,6 +126,8 @@ int ota_hal_read(unsigned int *off, char *out_buf, unsigned int out_buf_len) static int hal_ota_switch(uint32_t ota_len, uint16_t ota_crc) { + int ret = 0; + int offset = 0; uint32_t addr = 0; ota_hdr_t ota_hdr = { .dst_adr = 0xA000, @@ -133,7 +135,11 @@ static int hal_ota_switch(uint32_t ota_len, uint16_t ota_crc) .siz = ota_len, .crc = ota_crc, }; + ret = hal_flash_erase(HAL_PARTITION_PARAMETER_1, offset, 4096); + if(ret == 0) { + printf("ota finished, switch to new firmware ... \r\n"); hal_flash_write(HAL_PARTITION_PARAMETER_1, &addr, (uint8_t *)&ota_hdr, sizeof(ota_hdr)); + } return 0; } @@ -142,7 +148,6 @@ int ota_hal_boot(ota_boot_param_t *param) int ret = 0; if (param != NULL) { param->crc = ota_image_crc(param->len - IMAGE_INFORMATION_LEN); - printf("ota finished, switch to new firmware ...\r\n"); hal_ota_switch(param->len - IMAGE_INFORMATION_LEN, param->crc); } return ret;
Improved error message The new error message now points to the location of the missing delimiter, instead of only to the location of the opening token.
@@ -37,19 +37,18 @@ function Parser:init(lexer) self.curr_line = 0 self.curr_indent = 0 self.indent_of_token = {} -- { token => integer } - self.mismatched_openers = {} -- list of token + self.mismatched_indentation = {} -- list of tokens (2i+1 = open, 2i+2 = close) setmetatable(self.indent_of_token, { __mode = "k" }) self:_advance(); self:_advance() end function Parser:_pay_attention_to_suspicious_indentation(open_tok, close_tok) - if open_tok.loc.line ~= close_tok.loc.line then local d1 = assert(self.indent_of_token[open_tok]) local d2 = assert(self.indent_of_token[close_tok]) if d1 > d2 then - table.insert(self.mismatched_openers, open_tok) - end + table.insert(self.mismatched_indentation, open_tok) + table.insert(self.mismatched_indentation, close_tok) end end @@ -1087,27 +1086,40 @@ function Parser:unexpected_token_error(non_terminal) end function Parser:wrong_token_error(expected_name, open_tok) - local loc = self.next.loc + + local next_tok = self.next + local is_stolen_delimiter = false + + if open_tok then + for i = 1, #self.mismatched_indentation, 2 do + local susp_open = self.mismatched_indentation[i] + local susp_close = self.mismatched_indentation[i+1] + if expected_name == susp_close.name and susp_open.loc.pos > open_tok.loc.pos then + open_tok = susp_open + next_tok = susp_close + is_stolen_delimiter = true + break + end + end + end + + local loc = next_tok.loc local what = self:describe_token_name(expected_name) - local where = self:describe_token(self.next) + local where = self:describe_token(next_tok) if not open_tok or loc.line == open_tok.loc.line then self:syntax_error(loc, "expected %s before %s", what, where) else local owhat = self:describe_token_name(open_tok.name) - self:syntax_error(loc, "expected %s before %s, to close the %s at line %d", - what, where, owhat, open_tok.loc.line) - end - - if open_tok then - for _, susp_tok in ipairs(self.mismatched_openers) do - if susp_tok.loc.pos > open_tok.loc.pos then - local susp_what = self:describe_token(susp_tok) - self:syntax_error(susp_tok.loc, - "...possibly because this %s is missing an %s (mismatched indentation)", - susp_what, what) - break - end + local oline = open_tok.loc.line + if is_stolen_delimiter then + self:syntax_error(loc, + "expected %s to close %s at line %d, before this less indented %s", + what, owhat, oline, what) + else + self:syntax_error(loc, + "expected %s before %s, to close the %s at line %d", + what, where, owhat, oline) end end
mm/heap: Skip memset backtrace buffer
tcb = nxsched_get_tcb(tmp->pid); \ if ((heap)->mm_procfs.backtrace || (tcb && tcb->flags & TCB_FLAG_HEAP_DUMP)) \ { \ - memset(tmp->backtrace, 0, sizeof(tmp->backtrace)); \ - backtrace(tmp->backtrace, CONFIG_MM_BACKTRACE); \ + int n = backtrace(tmp->backtrace, CONFIG_MM_BACKTRACE); \ + if (n < CONFIG_MM_BACKTRACE) \ + { \ + tmp->backtrace[n] = 0; \ + } \ } \ else \ { \
Update test-conversions.lua
@@ -4,7 +4,7 @@ return require('lib/tap')(function (test) assert(string.format("%x", 29913653248) == "6f6fe2000") assert(string.format("%x", 32207650816) == "77fb9c000") else - print('skiped') + print('skipped') end end) end)
Initialize ip4_reass_lock of map_main to zero
@@ -2241,6 +2241,7 @@ map_init (vlib_main_t * vm) mm->ip4_reass_pool = 0; mm->ip4_reass_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); + *mm->ip4_reass_lock = 0; mm->ip4_reass_conf_ht_ratio = MAP_IP4_REASS_HT_RATIO_DEFAULT; mm->ip4_reass_conf_lifetime_ms = MAP_IP4_REASS_LIFETIME_DEFAULT; mm->ip4_reass_conf_pool_size = MAP_IP4_REASS_POOL_SIZE_DEFAULT; @@ -2256,6 +2257,7 @@ map_init (vlib_main_t * vm) mm->ip6_reass_pool = 0; mm->ip6_reass_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); + *mm->ip6_reass_lock = 0; mm->ip6_reass_conf_ht_ratio = MAP_IP6_REASS_HT_RATIO_DEFAULT; mm->ip6_reass_conf_lifetime_ms = MAP_IP6_REASS_LIFETIME_DEFAULT; mm->ip6_reass_conf_pool_size = MAP_IP6_REASS_POOL_SIZE_DEFAULT;
mdns: fix memory leak in mdns_free when adding delegated hostnames
@@ -4181,6 +4181,13 @@ static void _mdns_free_action(mdns_action_t * action) pbuf_free(action->data.rx_handle.packet->pb); free(action->data.rx_handle.packet); break; + case ACTION_DELEGATE_HOSTNAME_ADD: + free((char *)action->data.delegate_hostname.hostname); + free_address_list(action->data.delegate_hostname.address_list); + break; + case ACTION_DELEGATE_HOSTNAME_REMOVE: + free((char *)action->data.delegate_hostname.hostname); + break; default: break; }
utils: trace: Remove github.com/sirupsen/logrus. utils.trace is thought to be used from CLI, so better to print on standard output or error than creating a logger at each call.
@@ -28,8 +28,6 @@ import ( "text/tabwriter" "time" - log "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -120,7 +118,7 @@ func printTraceFeedback(f func(format string, args ...interface{}), m map[string } } -func deleteTraces(contextLogger *log.Entry, traceRestClient *restclient.RESTClient, traceID string) { +func deleteTraces(traceRestClient *restclient.RESTClient, traceID string) { var listTracesOptions = metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", GLOBAL_TRACE_ID, traceID), FieldSelector: fields.Everything().String(), @@ -132,8 +130,8 @@ func deleteTraces(contextLogger *log.Entry, traceRestClient *restclient.RESTClie VersionedParams(&listTracesOptions, scheme.ParameterCodec). Do(context.TODO()). Error() - if contextLogger != nil && err != nil { - contextLogger.Warningf("Error deleting traces: %q", err) + if err != nil { + fmt.Fprintf(os.Stderr, "Error deleting traces: %q", err) } } @@ -203,7 +201,7 @@ func createTraces(trace *gadgetv1alpha1.Trace) error { traceID, present := trace.ObjectMeta.Labels[GLOBAL_TRACE_ID] if present { // Clean before exiting! - deleteTraces(nil, traceRestClient, traceID) + deleteTraces(traceRestClient, traceID) } return fmt.Errorf("Error creating trace on node %q: %w", node.Name, err) @@ -487,7 +485,7 @@ func DeleteTrace(traceID string) error { return err } - deleteTraces(nil, traceRestClient, traceID) + deleteTraces(traceRestClient, traceID) return nil } @@ -643,7 +641,6 @@ func RunTraceAndPrintStatusOutput(config *TraceConfig, customResultsDisplay func } func genericStreamsDisplay( - contextLogger *log.Entry, params *CommonFlags, results *gadgetv1alpha1.TraceList, transformLine func(string) string,
Shell Recorder: Remove useless commands
@@ -67,11 +67,9 @@ execute() cp "${DBFile}" "${DBFile}.1" 2>/dev/null ;; Ini) - rm ./previousState 2>/dev/null "$KDBCOMMAND" export "$Mountpoint" ini > ./previousState 2>/dev/null ;; Dump) - rm ./previousState 2>/dev/null "$KDBCOMMAND" export "$Mountpoint" dump > ./previousState 2>/dev/null ;; esac
BugID:17651887:Fix HAL_TCP_Establish function inconsistent with statement
@@ -223,7 +223,7 @@ int32_t HAL_TCP_Read(uintptr_t fd, char *buf, uint32_t len, uint32_t timeout_ms) return (0 != len_recv) ? len_recv : err_code; } #else -intptr_t HAL_TCP_Establish(const char *host, uint16_t port) +uintptr_t HAL_TCP_Establish(_IN_ const char *host, _IN_ uint16_t port) { return 0; }
Update Luos version for Release v2.0.1
"name": "Luos", "keywords": "robus,network,microservice,luos,operating system,os,embedded,communication,service,ST", "description": "Luos turns your embedded system into services like microservices architecture does it in software.", - "version": "2.0.0", + "version": "2.0.1", "authors": { "name": "Luos", "url": "https://luos.io"
Export PhImageListGetIconSize
@@ -1144,6 +1144,15 @@ PhImageListGetIcon( _In_ UINT Flags ); +PHLIBAPI +BOOLEAN +NTAPI +PhImageListGetIconSize( + _In_ HIMAGELIST ImageListHandle, + _Out_ PINT cx, + _Out_ PINT cy + ); + PHLIBAPI BOOLEAN NTAPI
task: use task tag reference
@@ -233,7 +233,7 @@ struct flb_task *flb_task_create(uint64_t ref_id, continue; } - if (flb_router_match(tag_buf, tag_len, o_ins->match + if (flb_router_match(task->tag, task->tag_len, o_ins->match #ifdef FLB_HAVE_REGEX , o_ins->match_regex #endif
DDF for Nedis temperature/humidity sensor (_TZ3000_fie1dpkm)
{ "schema": "devcap1.schema.json", - "manufacturername": ["_TZ3000_yd2e749y", "_TZ3000_fllyghyj"], - "modelid": ["TS0201", "TS0201"], + "manufacturername": ["_TZ3000_yd2e749y", "_TZ3000_fllyghyj", "_TZ3000_fie1dpkm"], + "modelid": ["TS0201", "TS0201", "TS0201"], "vendor": "Tuya", "product": "Temperature and humidity sensor", "sleeper": true,
Add target to build clap-tests
"configurePreset": "ninja", "displayName": "Build ninja-release", "description": "Build ninja Release configuration", - "configuration": "RelWithDebInfo" + "configuration": "RelWithDebInfo", + "targets": ["clap-tests"] } ], "testPresets": [
samples: remove extra option for dm remove extra "-l com1,stdio" option for dm
@@ -287,7 +287,6 @@ else fi acrn-dm -A -m $mem_size -c $2$boot_GVT_option"$GVT_args" -s 0:0,hostbridge -s 1:0,lpc -l com1,stdio \ - -l com1,stdio \ -s 9,virtio-net,$tap \ -s 3,virtio-blk$boot_dev_flag,/data/$5/$5.img \ -s 7,passthru,0/15/0 \
Add SceGpuEs4ForDriver nids
@@ -1993,6 +1993,8 @@ modules: kernel: true nid: 0x54802381 functions: + OSAllocMem: 0x68815236 + OSFreeMem: 0x0BB49287 OSGetCurrentProcessIDKM: 0x6C6F2BEA OSMemCopy: 0x771EC61E OSMemSet: 0x50494916
OcConfigurationLib: Fix typos in schema
@@ -79,7 +79,7 @@ mAcpiBlockSchema = OC_SCHEMA_DICT (NULL, mAcpiBlockSchemaEntry); STATIC OC_SCHEMA mAcpiPatchSchemaEntry[] = { - OC_SCHEMA_STRING_IN ("Comment", OC_ACPI_BLOCK_ENTRY, Comment), + OC_SCHEMA_STRING_IN ("Comment", OC_ACPI_PATCH_ENTRY, Comment), OC_SCHEMA_INTEGER_IN ("Count", OC_ACPI_PATCH_ENTRY, Count), OC_SCHEMA_BOOLEAN_IN ("Enabled", OC_ACPI_PATCH_ENTRY, Enabled), OC_SCHEMA_DATA_IN ("Find", OC_ACPI_PATCH_ENTRY, Find), @@ -188,7 +188,7 @@ STATIC OC_SCHEMA mKernelPatchSchemaEntry[] = { OC_SCHEMA_STRING_IN ("Base", OC_KERNEL_PATCH_ENTRY, Base), - OC_SCHEMA_STRING_IN ("Comment", OC_KERNEL_BLOCK_ENTRY, Comment), + OC_SCHEMA_STRING_IN ("Comment", OC_KERNEL_PATCH_ENTRY, Comment), OC_SCHEMA_INTEGER_IN ("Count", OC_KERNEL_PATCH_ENTRY, Count), OC_SCHEMA_BOOLEAN_IN ("Enabled", OC_KERNEL_PATCH_ENTRY, Enabled), OC_SCHEMA_DATA_IN ("Find", OC_KERNEL_PATCH_ENTRY, Find),
Port currrent postgresql content for ldapsearchfilter in pg_hba.conf
@@ -254,13 +254,18 @@ ldapbindpasswd ldapsearchattribute : Attribute to match against the user name in the search when doing search+bind authentication. -Example: +ldapsearchfilter +: Beginning with Greenplum 6.22, this attribute enables you to provide a search filter to use when doing search+bind authentication. Occurrences of `$username` will be replaced with the user name. This allows for more flexible search filters than `ldapsearchattribute`. + +When using search+bind mode, the search can be performed using a single attribute specified with `ldapsearchattribute`, or using a custom search filter specified with `ldapsearchfilter`. Specifying `ldapsearchattribute=foo` is equivalent to specifying `ldapsearchfilter="(foo=$username)"`. If neither option is specified the default is `ldapsearchattribute=uid`. + +Here is an example for a search+bind configuration that uses `ldapsearchfilter` instead of `ldapsearchattribute` to allow authentication by user ID or email address: ``` -ldapserver=ldap.greenplum.com prefix="cn=" suffix=", dc=greenplum, dc=com" +host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapsearchfilter="(|(uid=$username)(mail=$username))" ``` -Following are sample `pg_hba.conf` file entries for LDAP authentication: +Following are additional sample `pg_hba.conf` file entries for LDAP authentication: ``` host all testuser 0.0.0.0/0 ldap ldap
[chain] Disable tx execution if done already
@@ -312,11 +312,14 @@ type blockExecutor struct { sdb *state.ChainStateDB execTx TxExecFn txs []*types.Tx + commitOnly bool } func newBlockExecutor(cs *ChainService, bState *types.BlockState, block *types.Block) (*blockExecutor, error) { var exec TxExecFn + commitOnly := false + // The DPoS block factory excutes transactions during block generation. In // such a case it send block with block state so that bState != nil. On the // contrary, the block propagated from the network is not half-executed. @@ -333,6 +336,10 @@ func newBlockExecutor(cs *ChainService, bState *types.BlockState, block *types.B exec = NewTxExecutor( cs.sdb, bState, block.GetHeader().GetTimestamp()) + } else { + // In this case (bState != nil), the transactions has already been + // executed by the block factory. + commitOnly = true } txs := block.GetBody().GetTxs() @@ -342,6 +349,7 @@ func newBlockExecutor(cs *ChainService, bState *types.BlockState, block *types.B sdb: cs.sdb, execTx: exec, txs: txs, + commitOnly: commitOnly, }, nil } @@ -356,7 +364,7 @@ func (e *blockExecutor) execute() error { // Receipt must be committed unconditionally. defer e.CommitReceipt() - if e.execTx != nil { + if !e.commitOnly { for _, tx := range e.txs { if err := e.execTx(tx); err != nil { return err @@ -397,7 +405,7 @@ func (cs *ChainService) executeBlock(bstate *types.BlockState, block *types.Bloc cdbTx := cs.cdb.store.NewTx(true) // This is a chain DB update. setChainState(cdbTx, block) - // TODO: What happens if db update is failed in setChainState? The + // TODO: What happens if DB update is failed in setChainState()? Such an // unconditional commit is OK? cdbTx.Commit()
delete serial state
@@ -28,8 +28,8 @@ void setup() { Serial.begin(57600); DEBUG.begin(57600); - while (!Serial) - ; + // while (!Serial) + // ; connectProcessing(); connectRC100();
travis: add variable for add or not ASAN build
@@ -32,6 +32,7 @@ addons: - cmake-data before_install: - $CC --version + - if [ "$TRAVIS_OS_NAME" == "linux" ]; then CMAKE_OPTS=" -DENABLE_ASAN=1" AUTOTOOLS_OPTS=" --enable-asan"; fi - if [ "$TRAVIS_OS_NAME" == "linux" ]; then if [ "$CXX" = "g++" ]; then export CXX="g++-7" CC="gcc-7" EXTRA_LDFLAGS="-fuse-ld=gold"; else export CXX="clang++-5.0" CC="clang-5.0"; fi; fi - $CC --version - cmake --version @@ -42,9 +43,9 @@ before_script: # configure ngtcp2 - if [ "$CI_BUILD" == "autotools" ]; then autoreconf -i; fi - export PKG_CONFIG_PATH=$PWD/../openssl/build/lib/pkgconfig LDFLAGS="$EXTRA_LDFLAGS -Wl,-rpath,$PWD/../openssl/build/lib" - - if [ "$CI_BUILD" == "autotools" ]; then ./configure --enable-werror --enable-asan; fi + - if [ "$CI_BUILD" == "autotools" ]; then ./configure --enable-werror $AUTOTOOLS_OPTS; fi # Set CMAKE_LIBRARY_ARCHITECTURE to workaround failure to parse implicit link information from GCC 5 - - if [ "$CI_BUILD" == "cmake" ]; then cmake -DENABLE_ASAN=1 -DCMAKE_LIBRARY_ARCHITECTURE=x86_64-linux-gnu; fi + - if [ "$CI_BUILD" == "cmake" ]; then cmake $CMAKE_OPTS -DCMAKE_LIBRARY_ARCHITECTURE=x86_64-linux-gnu; fi script: # Now build ngtcp2 examples and test - make
dma: make the error print buffer bigger
@@ -285,8 +285,8 @@ errval_t ioat_dma_channel_init(struct ioat_dma_device *dev, debug_printf(" %x channel error ERROR: %08x\n", dma_chan->id, ioat_dma_chan_err_rd(&chan->channel)); - static char buf[512]; - ioat_dma_chan_err_pr(buf, 512, &chan->channel); + static char buf[1024]; + ioat_dma_chan_err_pr(buf, 1024, &chan->channel); printf("Channel Error::\n%s\n",buf); dma_ring_free(chan->ring); free(chan);
pydiag: fix import of exit codes from os
@@ -28,7 +28,8 @@ import argparse import logging import sys from collections import OrderedDict -from os.path import basename, EX_OK, EX_USAGE, EX_SOFTWARE, EX_UNAVAILABLE +from os import EX_OK, EX_USAGE, EX_SOFTWARE, EX_UNAVAILABLE +from os.path import basename from nlb0 import nlb0 from nlb3 import nlb3 from nlb7 import nlb7
Fix use after free in c0sk_test unit test
@@ -1042,11 +1042,11 @@ retry: destroy_key_generator(kg); - c0snr_set_destroy(css); - err = c0sk_close(mkvdb.ikdb_c0sk); ASSERT_EQ(0, err); + c0snr_set_destroy(css); + destroy_mock_cn(mock_cn); } @@ -1502,11 +1502,11 @@ MTF_DEFINE_UTEST_PREPOST(c0sk_test, parallel_ctxn_put, no_fail_ctxn_pre, no_fail destroy_key_generator(kg); - c0snr_set_destroy(css); - err = c0sk_close(mkvdb.ikdb_c0sk); ASSERT_EQ(0, err); + c0snr_set_destroy(css); + destroy_mock_cn(mock_cn); }
remove hard link to python 3.6 remove hard link to python 3.6
set -ex sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y -sudo add-apt-repository ppa:deadsnakes/ppa -y sudo apt-get update -DEPENDENCIES="unzip make indent kwstyle python3.6 python-pip libssl-dev" +DEPENDENCIES="unzip make indent kwstyle libssl-dev" sudo apt-get install -y ${DEPENDENCIES}
Mini optimized the spi_flash_init.c Delete the ";"
@@ -22,7 +22,7 @@ static int rt_hw_spi_flash_init(void) if (RT_NULL == rt_sfud_flash_probe("W25Q64", "spi30")) { return -RT_ERROR; - }; + } return RT_EOK; }
sse2: much faster saturated signed addition functions The improvement obviously varies by CPU, but for -DSIMDE_NO_NATIVE with msse2 it's about an order of magnitude faster. Compilers can actually vectorize this version.
@@ -453,13 +453,10 @@ simde_mm_adds_epi8 (simde__m128i a, simde__m128i b) { #else SIMDE__VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i8) / sizeof(r_.i8[0])) ; i++) { - if ((((b_.i8[i]) > 0) && ((a_.i8[i]) > (INT8_MAX - (b_.i8[i]))))) { - r_.i8[i] = INT8_MAX; - } else if ((((b_.i8[i]) < 0) && ((a_.i8[i]) < (INT8_MIN - (b_.i8[i]))))) { - r_.i8[i] = INT8_MIN; - } else { - r_.i8[i] = (a_.i8[i]) + (b_.i8[i]); - } + const int32_t tmp = + HEDLEY_STATIC_CAST(int16_t, a_.i8[i]) + + HEDLEY_STATIC_CAST(int16_t, b_.i8[i]); + r_.i8[i] = HEDLEY_STATIC_CAST(int8_t, ((tmp < INT8_MAX) ? ((tmp > INT8_MIN) ? tmp : INT8_MIN) : INT8_MAX)); } #endif @@ -481,18 +478,16 @@ simde_mm_adds_epi16 (simde__m128i a, simde__m128i b) { a_ = simde__m128i_to_private(a), b_ = simde__m128i_to_private(b); + #if defined(SIMDE_SSE2_NEON) r_.neon_i16 = vqaddq_s16(a_.neon_i16, b_.neon_i16); #else SIMDE__VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { - if ((((b_.i16[i]) > 0) && ((a_.i16[i]) > (INT16_MAX - (b_.i16[i]))))) { - r_.i16[i] = INT16_MAX; - } else if ((((b_.i16[i]) < 0) && ((a_.i16[i]) < (INT16_MIN - (b_.i16[i]))))) { - r_.i16[i] = INT16_MIN; - } else { - r_.i16[i] = (a_.i16[i]) + (b_.i16[i]); - } + const int32_t tmp = + HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) + + HEDLEY_STATIC_CAST(int32_t, b_.i16[i]); + r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, ((tmp < INT16_MAX) ? ((tmp > INT16_MIN) ? tmp : INT16_MIN) : INT16_MAX)); } #endif
Minor fix when LV_LABEL_TEXT_SEL is 0
@@ -111,7 +111,9 @@ lv_obj_t * lv_ta_create(lv_obj_t * par, const lv_obj_t * copy) ext->cursor.type = LV_CURSOR_LINE; ext->cursor.valid_x = 0; ext->one_line = 0; +#if LV_LABEL_TEXT_SEL ext->text_sel_en = 0; +#endif ext->label = NULL; ext->placeholder = NULL;
cpeng: fix API functions to use correct registers Fix the following API functions to reference the shadow registers/fields: * hps_kernel_verify() * hps_ssbl_verify()
@@ -21,9 +21,9 @@ api: | def ce_fifo2_status() -> int: return CSR_CE2HOST_STATUS.CE_FIFO2_STS def hps_kernel_verify() -> int: - return CSR_HPS2HOST_RSP.KERNEL_VFY + return CSR_HPS2HOST_RSP_SHDW.KERNEL_VFY_SHDW def hps_ssbl_verify() -> int: - return CSR_HPS2HOST_RSP.SSBL_VFY + return CSR_HPS2HOST_RSP_SHDW.SSBL_VFY_SHDW def ce_soft_reset(): CSR_CE_SFTRST.CE_SFTRST = 1 def image_complete():
Use MEL commands to create and assign sets This fixes crash when undoing a sync.
#include <maya/MFnIntArrayData.h> #include <maya/MFnNumericAttribute.h> #include <maya/MFnNurbsCurve.h> -#include <maya/MFnSet.h> #include <maya/MFnSingleIndexedComponent.h> #include <maya/MFnStringArrayData.h> #include <maya/MFnTypedAttribute.h> @@ -1088,27 +1087,15 @@ SyncOutputGeometryPart::createOutputGroups( MFn::Type componentType = (MFn::Type) groupTypePlug.asInt(); MObject setObj = Util::findNodeByName(setName); - MFnSet setFn; - if(!setObj.isNull()) - { - status = setFn.setObject(setObj); - if(MFAIL(status)) - { - setObj = MObject::kNullObj; - } - } - if(setObj.isNull()) { - setObj = setFn.create( - MSelectionList(), - MFnSet::kNone, - &status + status = Util::createNodeByModifierCommand( + myDagModifier, + "select -noExpand `sets " + "-name \"" + setName + "\"`", + setObj ); CHECK_MSTATUS(status); - - status = myDagModifier.renameNode(setObj, setName); - CHECK_MSTATUS(status); } MObject groupMembersObj = groupMembersPlug.asMObject(); @@ -1121,7 +1108,19 @@ SyncOutputGeometryPart::createOutputGroups( MIntArray componentArray = groupMembersDataFn.array(); componentFn.addElements(componentArray); - setFn.addMember(dagPath, componentObj); + MString assignCommand = "sets -e -forceElement " + + MFnDependencyNode(setObj).name(); + + MSelectionList selectionList; + MStringArray selectionStrings; + selectionList.add(dagPath, componentObj); + selectionList.getSelectionStrings(selectionStrings); + for(unsigned int i = 0; i < selectionStrings.length(); i++) + { + assignCommand += " " + selectionStrings[i]; + } + + myDagModifier.commandToExecute(assignCommand); } return MStatus::kSuccess;
ci: redirect stderr into stdout in check.mk (again)
@@ -32,7 +32,7 @@ _check: _do-check: cmake $(CMAKE_ARGS) -H$(SRC_DIR) -B. make all - make check + exec 2>&1 ; make check _fuzz: $(FUZZ_ASAN) CC=clang CXX=clang++ $(MAKE) -f $(CHECK_MK) _check CMAKE_ARGS=-DBUILD_FUZZER=ON
hslua-marshalling: improve docs of Result type
@@ -46,7 +46,7 @@ import qualified HsLua.Core.Utf8 as Utf8 -- from the Lua stack. data Result a = Success a - | Failure ByteString [Name] + | Failure ByteString [Name] -- ^ Error message and stack of contexts deriving (Show, Eq, Functor) instance Applicative Result where