message
stringlengths
6
474
diff
stringlengths
8
5.22k
integer scaling, not integral
@@ -65,8 +65,8 @@ must be separated by one space. For example: recommended, but fractional values are also supported. If a fractional value are specified, be warned that it is not possible to faithfully represent the contents of your windows - they will be rendered at the next - highest integral scale factor and downscaled. You may be better served by - setting an integral scale factor and adjusting the font size of your + highest integer scale factor and downscaled. You may be better served by + setting an integer scale factor and adjusting the font size of your applications to taste. HiDPI isn't supported with Xwayland clients (windows will blur).
updates FreeRTOS.h to handle new usages of task notify
@@ -637,7 +637,7 @@ hold explicit before calling the code. */ #endif #ifndef traceTASK_NOTIFY_TAKE - #define traceTASK_NOTIFY_TAKE() + #define traceTASK_NOTIFY_TAKE( uxIndexToWait ) #endif #ifndef traceTASK_NOTIFY_WAIT_BLOCK @@ -645,7 +645,7 @@ hold explicit before calling the code. */ #endif #ifndef traceTASK_NOTIFY_WAIT - #define traceTASK_NOTIFY_WAIT() + #define traceTASK_NOTIFY_WAIT( uxIndexToWait ) #endif #ifndef traceTASK_NOTIFY
name a magical logic to check if a number is a power of 2
@@ -843,10 +843,13 @@ def generate_filter_cflag(candidates): return "-DCHECK_ALLOWED_RES_HEADER_NAME(s,l)=(%s)" % " || ".join(conditions) +def is_power_of_two(n): + return (n > 0) and (n & (n-1) == 0) + def is_valid_page_cnt(s): try: n = int(s) - return (n > 0) and (n & (n-1) == 0) + return is_power_of_two(n) except: return False
nuc-samples: use huge page as default launch_uos cmdline we should use hugetlb as default uos memory allocation solution
@@ -16,7 +16,10 @@ fi #for memsize setting mem_size=1000M -acrn-dm -A -m $mem_size -c $2 -s 0:0,hostbridge -s 1:0,lpc -l com1,stdio \ +# make sure there is enough 2M hugepages in the pool +echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages + +acrn-dm -T -A -m $mem_size -c $2 -s 0:0,hostbridge -s 1:0,lpc -l com1,stdio \ -s 5,virtio-console,@pty:pty_port \ -s 6,virtio-hyper_dmabuf \ -s 3,virtio-blk,/root/clear-21260-kvm.img \
switched to older bash silencing syntax
@@ -6,7 +6,7 @@ set(GSLMD5 dba736f15404807834dc1c7b93e83b92) find_package(GSL 2.4) # If GSL is not installed, lets go ahead and compile it -if(NOT GSL_FOUND ) +if( GSL_FOUND ) message(STATUS "GSL not found, downloading and compiling from source") ExternalProject_Add(GSL PREFIX GSL @@ -17,7 +17,7 @@ if(NOT GSL_FOUND ) --prefix=${CMAKE_BINARY_DIR}/extern --enable-shared=no --with-pic=yes - BUILD_COMMAND make -j8 &> out.log + BUILD_COMMAND make -j8 > out.log 2>&1 INSTALL_COMMAND make install BUILD_IN_SOURCE 1) set(GSL_LIBRARY_DIRS ${CMAKE_BINARY_DIR}/extern/lib/ )
initial implementation of printing logic
bas=fuse-source con=(list [fuse-source germ]) == +:: Request to list current fuses. ~ means "list all" +:: ++$ kiln-fuse-list (unit desk) -- |= [bowl:gall state] ?> =(src our) ?~ +< abet abet:abet:(merge:(work syd) ali sud cas gim) :: +++ poke-fuse-list + => + |% + ++ format-fuse + |= pf=per-fuse + ^- tang + [>bas.kf.pf< (turn con.kf.pf |=(x=[fuse-source germ] >x<))] + -- + |= k=kiln-fuse-list + ^+ abet + %. abet + ?~ k + ?~ fus + (slog [leaf+"no ongoing fuses" ~]) + %- slog + %+ roll + ~(tap by `(map desk per-fuse)`fus) + |= [[syd=desk pf=per-fuse] acc=tang] + ^- tang + [leaf+"{<syd>}" (weld (format-fuse pf) acc)] + =/ pfu=(unit per-fuse) (~(get by fus) u.k) + ?~ pfu + (slog [leaf+"no ongoing fuse for {<u.k>}" ~]) + (slog (format-fuse u.pfu)) +:: ++ poke-fuse |= k=kiln-fuse ?~ k abet %kiln-label =;(f (f !<(_+<.f vase)) poke-label) %kiln-merge =;(f (f !<(_+<.f vase)) poke-merge) %kiln-fuse =;(f (f !<(_+<.f vase)) poke-fuse) + %kiln-fuse-list =;(f (f !<(_+<.f vase)) poke-fuse-list) %kiln-mount =;(f (f !<(_+<.f vase)) poke-mount) %kiln-ota =;(f (f !<(_+<.f vase)) poke:update) %kiln-ota-info =;(f (f !<(_+<.f vase)) poke-ota-info)
Resolve duplicate label
@@ -47,7 +47,7 @@ The first step in attaching the FIR filter as a MMIO peripheral is to create an :start-after: DOC include start: GenericFIRBlock chisel :end-before: DOC include end: GenericFIRBlock chisel -Connecting by TileLink +Connecting DspBlock by TileLink ---------------------- With these classes implemented, you can begin to construct the chain by extending ``GenericFIRBlock`` while using the ``TLDspBlock`` trait via mixin.
Add time since last test started to test output. This makes it easier to see the timing of each step in the test.
@@ -46,6 +46,8 @@ static char testGroupData[64]; static struct HarnessTestLocal { + uint64_t logLastBeginTime; // Store the begin time of the last log for deltas + struct HarnessTestResult { bool running; // Is the test currently running? @@ -208,6 +210,8 @@ testBegin(const char *name) result = true; } + harnessTestLocal.logLastBeginTime = 0; + FUNCTION_HARNESS_RESULT(BOOL, result); } @@ -410,10 +414,25 @@ hrnTestLogPrefix(int lineNo, bool padding) // Add timing if requested if (testTiming) + { + uint64_t currentTime = testTimeMSec(); + + // Print elapsed time size the beginning of the test run + printf( + "%03" PRIu64 ".%03" PRIu64"s", ((currentTime - testTimeMSecBegin()) / 1000), + ((currentTime - testTimeMSecBegin()) % 1000)); + + // Print delta time since the last log message + if (harnessTestLocal.logLastBeginTime != 0) { printf( - "%03u.%03us ", (unsigned int)((testTimeMSec() - testTimeMSecBegin()) / 1000), - (unsigned int)((testTimeMSec() - testTimeMSecBegin()) % 1000)); + " %03" PRIu64 ".%03" PRIu64"s ", ((currentTime - harnessTestLocal.logLastBeginTime) / 1000), + ((currentTime - harnessTestLocal.logLastBeginTime) % 1000)); + } + else + printf(" "); + + harnessTestLocal.logLastBeginTime = currentTime; } // Add number and padding
[ci] Fix gitlab's CI to match the new dependency target
@@ -102,7 +102,7 @@ verilator-model: ln -s $VERILATOR_ROOT/share/verilator/bin/verilator_includer $VERILATOR_ROOT/bin/verilator_includer # Build the verilator model git submodule update --init --recursive -- hardware/deps/* - make patch-hw + make update-deps make -C hardware $ROOT_DIR/hardware/verilator_build/Vmempool_tb_verilator $CI_PROJECT_DIR/scripts/memora_retry.sh insert verilator-model fi @@ -114,7 +114,7 @@ hardware: stage: test script: - git submodule update --init --recursive -- hardware/deps/* - - make patch-hw + - make update-deps - make -C hardware compile needs: [] @@ -153,7 +153,7 @@ check-opcodes: - make COMPILER=${COMPILER} all - cd ../.. - git submodule update --init --recursive -- hardware/deps/* - - make patch-hw + - make update-deps - cd hardware - | for APP in ${APPS}; do @@ -188,7 +188,7 @@ apps-halide: - make COMPILER=${COMPILER} all - cd ../.. - git submodule update --init --recursive -- hardware/deps/* - - make patch-hw + - make update-deps - cd hardware - app=halide-2d_convolution make simc variables: @@ -204,7 +204,7 @@ unit-tests: - $CI_PROJECT_DIR/scripts/memora_retry.sh get riscv-isa-sim - $CI_PROJECT_DIR/scripts/memora_retry.sh get verilator-model - git submodule update --init --recursive -- hardware/deps/* - - make patch-hw + - make update-deps - touch $ROOT_DIR/hardware/src/bootrom.sv - touch $ROOT_DIR/hardware/verilator_build/Vmempool_tb_verilator.mk - touch $ROOT_DIR/hardware/verilator_build/Vmempool_tb_verilator
Fix potential not initialized warning
@@ -291,6 +291,9 @@ void mpi_mod_raw_sub( char * input_A, size_t limbs_N; size_t limbs_res; + mbedtls_mpi_mod_modulus m; + mbedtls_mpi_mod_modulus_init( &m ); + TEST_EQUAL( mbedtls_test_read_mpi_core( &A, &limbs_A, input_A ), 0 ); TEST_EQUAL( mbedtls_test_read_mpi_core( &B, &limbs_B, input_B ), 0 ); TEST_EQUAL( mbedtls_test_read_mpi_core( &N, &limbs_N, input_N ), 0 ); @@ -305,9 +308,6 @@ void mpi_mod_raw_sub( char * input_A, ASSERT_ALLOC( X, limbs ); - mbedtls_mpi_mod_modulus m; - mbedtls_mpi_mod_modulus_init( &m ); - TEST_EQUAL( mbedtls_mpi_mod_modulus_setup( &m, N, limbs, MBEDTLS_MPI_MOD_EXT_REP_BE,
MPU6050: removed extra param comments
@@ -81,7 +81,6 @@ static const struct sensor_driver g_mpu6050_sensor_driver = { * Writes a single byte to the specified register * * @param The sensor interface - * @param The I2C address to use * @param The register address to write to * @param The value to write * @@ -117,7 +116,6 @@ mpu6050_write8(struct sensor_itf *itf, uint8_t reg, uint32_t value) * Reads a single byte from the specified register * * @param The sensor interface - * @param The I2C address to use * @param The register address to read from * @param Pointer to where the register value should be written * @@ -163,7 +161,6 @@ mpu6050_read8(struct sensor_itf *itf, uint8_t reg, uint8_t *value) * Reads a six bytes from the specified register * * @param The sensor interface - * @param The I2C address to use * @param The register address to read from * @param Pointer to where the register value should be written *
Update README.md Inform about the actual dependencies.
@@ -14,14 +14,18 @@ other UNIX tools. ## Installing RGBDS (UNIX) -RGBDS requires libpng and pkg-config to be installed. +RGBDS requires yacc, flex, libpng and pkg-config to be installed. On Mac OS X, install them with [Homebrew](http://brew.sh/). On other Unixes, -use the built-in package manager. +use the built-in package manager. For example, on Debian or Ubuntu: -You can test if they're installed by running `pkg-config --cflags libpng`: -if the output is a path, then you're good, and if it outputs an error then -you need to install them via a package manager. +```sh +sudo apt-get install byacc flex pkg-config libpng-dev +``` + +You can test if libpng and pkg-config are installed by running +`pkg-config --cflags libpng`: if the output is a path, then you're good, and if +it outputs an error then you need to install them via a package manager. To build the programs on a UNIX or UNIX-like system, just run in your terminal:
doc: v2.0 release notes
@@ -126,6 +126,9 @@ a passthrough device to/from a post-launched VM is shown in the following figure ptdev de-assignment control flow +.. _vtd-posted-interrupt: + + VT-d Interrupt-remapping ************************
Restore ADDRESS_SANITIZER blocks to reduce diff with upstream.
#include "arena.h" + +#ifdef ADDRESS_SANITIZER +#include <sanitizer/asan_interface.h> +#endif + namespace google { namespace protobuf { @@ -129,6 +134,12 @@ Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n, b->pos = kHeaderSize + n; b->size = size; b->owner = me; +#ifdef ADDRESS_SANITIZER + // Poison the rest of the block for ASAN. It was unpoisoned by the underlying + // malloc but it's not yet usable until we return it as part of an allocation. + ASAN_POISON_MEMORY_REGION( + reinterpret_cast<char*>(b) + b->pos, b->size - b->pos); +#endif return b; } @@ -190,6 +201,9 @@ void* Arena::AllocateAligned(const std::type_info* allocated, size_t n) { void* Arena::AllocFromBlock(Block* b, size_t n) { size_t p = b->pos; b->pos = p + n; +#ifdef ADDRESS_SANITIZER + ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n); +#endif return reinterpret_cast<char*>(b) + p; return reinterpret_cast<char*>(b) + p; }
Moved =, in /lib/hall-json to the top level.
:: /- hall /+ old-zuse +=, hall :: |_ bol/bowl:gall ++ en-tape ::> sur to tape - =, hall |% ++ circ ::> circle |= a/circle -- :: ++ de-tape ::> tape to sur (parse) - =, hall |% ++ circ ::> circle ;~((glue fas) ;~(pfix sig fed:ag) urt:ab) -- :: ++ enjs ::> sur to json - =, hall =, enjs:format |% ::TODO these first few should probably make their way -- :: ++ dejs ::> json to sur - =, hall =, dejs-soft:format |% ::TODO these first few should maybe make their way
Update docs/Coding style.md Fix duplicated word.
@@ -153,7 +153,7 @@ C++ code explicitly expresss lifetime semantics. - If required by a platform (e.g, Microsoft COM), use the appropriate ref-counted type, but do not expose those types to platform agnostic code, - prefer to use use an abstraction. + prefer to use an abstraction. Python code -----------
perf-tools/tau: remove patch debug
CFLAGS = $(TAU_MPI_COMPILE_INCLUDE) $(TAU_INCLUDE) $(TAU_DEFS) $(USER_OPT) $(TAU_INTERNAL_FLAGS) -fPIC -LDFLAGS = $(TAU_MPI_LIB) -+LDFLAGS = -L/tmp/opt/ohpc/pub/libs/gnu7/openmpi3/tau/2.27/lib $(TAU_MPI_LIB) ++LDFLAGS = -L$(TAU_PREFIX_INSTALL_DIR)/lib $(TAU_MPI_LIB) OBJS = libtau_plugin_function_registration_complete.so libtau_plugin_atomic_event_trigger.so libtau_plugin_atomic_event_registration_complete.so libtau_plugin_end_of_execution.so libtau_plugin_interrupt_trigger.so tau_plugin_function_registration_complete.o tau_plugin_atomic_event_trigger.o tau_plugin_atomic_event_registration_complete.o tau_plugin_end_of_execution.o tau_plugin_interrupt_trigger.o CFLAGS = $(TAU_MPI_COMPILE_INCLUDE) $(TAU_INCLUDE) $(TAU_DEFS) $(USER_OPT) $(TAU_INTERNAL_FLAGS) -fPIC -LDFLAGS = $(TAU_MPI_LIB) -+LDFLAGS = -L/tmp/opt/ohpc/pub/libs/gnu7/openmpi3/tau/2.27/lib $(TAU_MPI_LIB) ++LDFLAGS = -L$(TAU_PREFIX_INSTALL_DIR)/lib $(TAU_MPI_LIB) OBJS = libTAU-filter-plugin.so libTAU-mpit-recommend-sharp-usage-plugin.so libTAU-mpit-mvapich-free_unused_vbufs.so tau_plugin_example_disable_instrumentation_runtime.o tau_plugin_example_mpit_recommend_sharp_usage.o tau_plugin_example_free_unused_vbufs.o
cancel remaining subs on [%news
?. (~(has by def.deh.bay) dep) ~&([%wasp-unknown dep] this) :: - |^ ?:(ask start cancel) - ++ start - ^+ this + ?:(ask (wasp-start dep) (wasp-cancel dep)) + :: + ++ wasp-start + |= dep=@uvH ^+ this ?: (~(has by sup.bay) dep) :: already requested this(sup.bay (~(put ju sup.bay) dep hen)) =. sup.bay (~(put ju sup.bay) dep hen) mow :_(mow [hen (pass-warp & i.bes)]) == :: - ++ cancel - ^+ this + ++ wasp-cancel + |= dep=@uvH ^+ this =. sup.bay (~(del ju sup.bay) dep hen) ?: (~(has by sup.bay) dep) :: other listeners exist this =/ bes=(list [beam care:clay]) ~(tap in (dep-beams des)) |- ^+ this ?~ bes this - ?> (~(has in out.bay) i.bes) + ?. (~(has in out.bay) i.bes) $(bes t.bes) :: already cancelled ?: (dent-has-subscribers [%beam i.bes]) :: if any other dep cares about this beam, stay subscribed $(bes t.bes) bes t.bes mow :_(mow [hen (pass-warp | i.bes)]) == - -- + :: ++ dent-has-subscribers :> does the dent or any dent that depends on it have subscribers? |= den/dent =/ dos=(set dent) (downstream-dents (sy den ~)) =. dos (~(put in dos) den) =/ hashes=(list @uvH) - =- ~(tap in out) + =- ~(tap in hashes) %- ~(rep in dos) - |= [den=dent out=(set @uvH)] - (~(uni in out) (~(get ju bak.deh.bay) den)) + |= [den=dent hashes=(set @uvH)] + (~(uni in hashes) (~(get ju bak.deh.bay) den)) :: :: ~& [den=den dos=dos hashes=hashes] :: |- ^+ this ?~ hashes this - %_ $ - hashes t.hashes :: iterate - sup.bay (~(del by sup.bay) i.hashes) :: remove listeners - mow :: send %news moves - %- weld :_ mow + =. this $(hashes t.hashes) :: iterate =/ listeners=(set duct) (~(get ju sup.bay) i.hashes) - :: ~& [hax=i.hashes liz=listeners] + =. mow :: send %news moves + %- weld :_ mow %+ turn ~(tap in listeners) |=(a=duct `move`[a %give %news i.hashes]) - == + :: + (wasp-cancel i.hashes) :: ++ downstream-dents :> obtain all dents that depend on any of the dents in the sample. ?~ cax b ?< ?=($beam -.a) :: + ::XX only for original beak in q.q.u.cax? + =. dep.p.u.cax (move-to bek dep.p.u.cax) :: ~& promo+a =. u.cax ?- -.a $load [%load mar bem]:den == :: + ++ move-to + |= [bek=beak des=(set dent)] ^- (set dent) + %- sy + %+ turn ~(tap in des) + |= den=dent ^+ den + =? den ?=(%boil -.den) den(-.bom bek) + den(-.bem bek) + :: ::+| :: ++ pass
jni-documentation: extend document for macOS
@@ -30,6 +30,7 @@ The java plugin itself needs to have the following methods: ## Installation ## +### Java prerequisites on Linux (Debian) ### Please install java8 as package, e.g. [for debian](http://www.webupd8.org/2014/03/how-to-install-oracle-java-8-in-debian.html) and then let cmake actually find jdk8: @@ -47,6 +48,25 @@ and run: sudo ldconfig +### Java prerequisites on macOS ### + +macOS includes an old apple specific version of java, based on 1.6. However, for the jni plugin version 1.8 of Java is required, so either the openjdk or the oracle jdk has to be installed. + +Please install oracle's jdk8 via their provided installer. After that, you have to set the JAVA_HOME environment variable to the folder where the jdk is installed, usually like + + export JAVA_HOME="/Library/Java/JavaVirtualMachines/jdk1.8.0_112.jdk/Contents/Home/" + +If it should still not find the correct jni version, or says the jni version is not 1.8, then it most likely still searches in the wrong directory for the jni header file. You can try clearing your build folder and configuring it again from scratch. +It has been experienced that if the project has been built already without this environment variable set, the java location is cached and will be resolved wrong in future builds, even though the environment variable is set. + +As macOS handles linked libraries differently, there is no ldconfig command. Instead you can export an environment variable to tell elektra the location of the java dynamic libraries. + + export DYLD_FALLBACK_LIBRARY_PATH="/Library/Java/JavaVirtualMachines/jdk1.8.0_112.jdk/Contents/Home/jre/lib:/Library/Java/JavaVirtualMachines/jdk1.8.0_112.jdk/Contents/Home/jre/lib/server/" + + +Afterwards, the jni plugin should be included in the build and compile successfully. + +### Enabling the plugin ### Then enable the plugin using (`ALL;-EXPERIMENTAL` is default): cmake -DPLUGINS="ALL;-EXPERIMENTAL;jni" /path/to/libelektra
tls: pake: do not destroy key on errors while setting opaque password
@@ -1950,31 +1950,24 @@ int mbedtls_ssl_set_hs_ecjpake_password_opaque( mbedtls_ssl_context *ssl, status = psa_pake_setup( &ssl->handshake->psa_pake_ctx, &cipher_suite ); if( status != PSA_SUCCESS ) - { - psa_destroy_key( ssl->handshake->psa_pake_password ); - return( MBEDTLS_ERR_SSL_HW_ACCEL_FAILED ); - } + goto error; status = psa_pake_set_role( &ssl->handshake->psa_pake_ctx, psa_role ); if( status != PSA_SUCCESS ) - { - psa_destroy_key( ssl->handshake->psa_pake_password ); - psa_pake_abort( &ssl->handshake->psa_pake_ctx ); - return( MBEDTLS_ERR_SSL_HW_ACCEL_FAILED ); - } + goto error; psa_pake_set_password_key( &ssl->handshake->psa_pake_ctx, ssl->handshake->psa_pake_password ); if( status != PSA_SUCCESS ) - { - psa_destroy_key( ssl->handshake->psa_pake_password ); - psa_pake_abort( &ssl->handshake->psa_pake_ctx ); - return( MBEDTLS_ERR_SSL_HW_ACCEL_FAILED ); - } + goto error; ssl->handshake->psa_pake_ctx_is_ok = 1; return( 0 ); + +error: + psa_pake_abort( &ssl->handshake->psa_pake_ctx ); + return( MBEDTLS_ERR_SSL_HW_ACCEL_FAILED ); } #else /* MBEDTLS_USE_PSA_CRYPTO */ int mbedtls_ssl_set_hs_ecjpake_password( mbedtls_ssl_context *ssl,
Ask for review less often
@@ -17,10 +17,10 @@ import StoreKit if minLaunches == nil { minLaunches = 0 } else if (minLaunches as! Int) == 0 { - minLaunches = 3 - } else if launches == 3 { - minLaunches = 5 - } else if launches >= 5 { + minLaunches = 6 + } else if launches == 6 { + minLaunches = 10 + } else if launches >= 10 { minLaunches = 0 }
Remove duplicated debug macro
@@ -1007,30 +1007,6 @@ Once LOGM is approved, each module should have its own index #endif #endif /* CONFIG_CPP_HAVE_VARARGS */ -#ifdef CONFIG_DEBUG_AUDIO_ERROR -#define auddbg dbg -#define audlldbg lldbg -#else -#define auddbg(...) -#define audlldbg(...) -#endif - -#ifdef CONFIG_DEBUG_AUDIO_WARN -#define audwdbg wdbg -#define audllwdbg llwdbg -#else -#define audwdbg(...) -#define audllwdbg(...) -#endif - -#ifdef CONFIG_DEBUG_AUDIO_INFO -#define audvdbg vdbg -#define audllvdbg llvdbg -#else -#define audvdbg(...) -#define audllvdbg(...) -#endif - /* Buffer dumping macros do not depend on varargs */ #ifdef CONFIG_DEBUG
Work CI-CD Update win32 task to update pip on the fly. ***NO_CI***
@@ -44,10 +44,13 @@ steps: targetType: 'inline' script: | + # update pip (until the build agent image is updated) + python -m pip install --upgrade pip + # install Cloudsmith CLI python -m pip install --upgrade cloudsmith-cli cloudsmith push raw net-nanoframework/$(CLOUDSMITH_REPO) $(Build.ArtifactStagingDirectory)\$(TargetPublishName)\nanoFramework.nanoCLR.exe --name $(TargetPublishName) --version $(PACKAGE_VERSION) -k $(CLOUDSMITH_KEY) errorActionPreference: 'stop' - failOnStderr: 'true' + failOnStderr: 'false'
RNG seed: add get_entropy hook for seeding.
@@ -34,6 +34,8 @@ static OSSL_FUNC_rand_verify_zeroization_fn seed_src_verify_zeroization; static OSSL_FUNC_rand_enable_locking_fn seed_src_enable_locking; static OSSL_FUNC_rand_lock_fn seed_src_lock; static OSSL_FUNC_rand_unlock_fn seed_src_unlock; +static OSSL_FUNC_rand_get_seed_fn seed_get_seed; +static OSSL_FUNC_rand_clear_seed_fn seed_clear_seed; typedef struct { void *provctx; @@ -170,6 +172,47 @@ static int seed_src_verify_zeroization(ossl_unused void *vseed) return 1; } +static size_t seed_get_seed(void *vseed, unsigned char **pout, + int entropy, size_t min_len, size_t max_len, + int prediction_resistance, + const unsigned char *adin, size_t adin_len) +{ + size_t bytes_needed; + unsigned char *p; + + /* + * Figure out how many bytes we need. + * This assumes that the seed sources provide eight bits of entropy + * per byte. For lower quality sources, the formula will need to be + * different. + */ + bytes_needed = entropy >= 0 ? (entropy + 7) / 8 : 0; + if (bytes_needed < min_len) + bytes_needed = min_len; + if (bytes_needed > max_len) { + ERR_raise(ERR_LIB_PROV, PROV_R_ENTROPY_SOURCE_STRENGTH_TOO_WEAK); + return 0; + } + + p = OPENSSL_secure_malloc(bytes_needed); + if (p == NULL) { + ERR_raise(ERR_LIB_PROV, ERR_R_MALLOC_FAILURE); + return 0; + } + *pout = p; + if (seed_src_generate(vseed, p, bytes_needed, 0, prediction_resistance, + adin, adin_len) != 0) + return bytes_needed; + OPENSSL_secure_clear_free(p, bytes_needed); + return 0; +} + +static void seed_clear_seed(ossl_unused void *vdrbg, + unsigned char *out, size_t outlen) +{ + OPENSSL_secure_clear_free(out, outlen); +} + static int seed_src_enable_locking(ossl_unused void *vseed) { return 1; @@ -201,5 +244,7 @@ const OSSL_DISPATCH ossl_seed_src_functions[] = { { OSSL_FUNC_RAND_GET_CTX_PARAMS, (void(*)(void))seed_src_get_ctx_params }, { OSSL_FUNC_RAND_VERIFY_ZEROIZATION, (void(*)(void))seed_src_verify_zeroization }, + { OSSL_FUNC_RAND_GET_SEED, (void(*)(void))seed_get_seed }, + { OSSL_FUNC_RAND_CLEAR_SEED, (void(*)(void))seed_clear_seed }, { 0, NULL } };
Corrected false character in some defines
@@ -866,7 +866,7 @@ static struct stlink_chipid_params devices[] = { { // STM32H7A3/7B3 // RM0455 - .chip_id = STLINK_CHIPID_STM32_H7AX, + .chip_id = STLINK_CHIPID_STM32_H7Ax, .description = "H7Ax/H7Bx", .flash_type = STLINK_FLASH_TYPE_H7, .flash_size_reg = 0x08FFF80C, // "Flash size register" (p.2949) @@ -883,7 +883,7 @@ static struct stlink_chipid_params devices[] = { { // STM32H72x/H73x // RM0468 - .chip_id = STLINK_CHIPID_STM32_H72X, + .chip_id = STLINK_CHIPID_STM32_H72x, .description = "H72x/H73x", .flash_type = STLINK_FLASH_TYPE_H7, .flash_size_reg = 0x1FF1E880, // "Flash size register" (p.3286)
genesis: Redefine GPIO BJ_ADP_PRESENT_L TEST=Built EC for genesis Tested-by: Neill Corlett
@@ -45,6 +45,7 @@ GPIO_INT(EC_RECOVERY_BTN_ODL, PIN(F, 1), GPIO_INT_BOTH, button_interrupt) * Recovery button input from H1. */ GPIO_INT(H1_EC_RECOVERY_BTN_ODL, PIN(2, 4), GPIO_INT_BOTH, button_interrupt) +GPIO(BJ_ADP_PRESENT_L, PIN(8, 2), GPIO_INT_BOTH | GPIO_PULL_UP) /* Port power control interrupts */ GPIO_INT(HDMI_CONN0_OC_ODL, PIN(0, 7), GPIO_INT_BOTH, port_ocp_interrupt)
[mod_webdav] define _NETBSD_SOURCE on NetBSD NetBSD dirent.h improperly hides fdopendir() (POSIX.1-2008) declaration which should be visible w/ _XOPEN_SOURCE 700 or _POSIX_C_SOURCE 200809L
#if !defined(_XOPEN_SOURCE) || _XOPEN_SOURCE-0 < 700 #undef _XOPEN_SOURCE #define _XOPEN_SOURCE 700 +/* NetBSD dirent.h improperly hides fdopendir() (POSIX.1-2008) declaration + * which should be visible with _XOPEN_SOURCE 700 or _POSIX_C_SOURCE 200809L */ +#ifdef __NetBSD__ +#define _NETBSD_SOURCE +#endif #endif /* DT_UNKNOWN DTTOIF() */ #ifndef _GNU_SOURCE
[net][at] Fix AT Socket select send data issue.
@@ -200,7 +200,7 @@ static void at_do_event_changes(struct at_socket *sock, at_event_t event, rt_boo { if (is_plus) { - sock->sendevent++; + sock->sendevent = 1; #ifdef SAL_USING_POSIX rt_wqueue_wakeup(&sock->wait_head, (void*) POLLOUT); @@ -209,7 +209,7 @@ static void at_do_event_changes(struct at_socket *sock, at_event_t event, rt_boo } else if (sock->sendevent) { - sock->sendevent --; + sock->sendevent = 0; } break; } @@ -252,6 +252,30 @@ static void at_do_event_changes(struct at_socket *sock, at_event_t event, rt_boo } } +static void at_do_event_clean(struct at_socket *sock, at_event_t event) +{ + switch (event) + { + case AT_EVENT_SEND: + { + sock->sendevent = 0; + break; + } + case AT_EVENT_RECV: + { + sock->rcvevent = 0; + break; + } + case AT_EVENT_ERROR: + { + sock->errevent = 0; + break; + } + default: + LOG_E("Not supported event (%d)", event); + } +} + static struct at_socket *alloc_socket(void) { static rt_mutex_t at_slock = RT_NULL; @@ -565,6 +589,8 @@ __exit: at_do_event_changes(sock, AT_EVENT_ERROR, RT_TRUE); } + at_do_event_changes(sock, AT_EVENT_SEND, RT_TRUE); + return result; } @@ -688,6 +714,10 @@ __exit: { at_do_event_changes(sock, AT_EVENT_RECV, RT_TRUE); } + else + { + at_do_event_clean(sock, AT_EVENT_RECV); + } } else {
Make calc_sao_bands less opaque
@@ -254,8 +254,11 @@ static void calc_sao_bands(const encoder_state_t * const state, const kvz_pixel //Loop pixels and take top 5 bits to classify different bands for (y = 0; y < block_height; ++y) { for (x = 0; x < block_width; ++x) { - sao_bands[0][rec_data[y * block_width + x]>>shift] += orig_data[y * block_width + x] - rec_data[y * block_width + x]; - sao_bands[1][rec_data[y * block_width + x]>>shift]++; + int32_t curr_pos = y * block_width + x; + + kvz_pixel sb_index = rec_data[curr_pos] >> shift; + sao_bands[0][sb_index] += orig_data[curr_pos] - rec_data[curr_pos]; + sao_bands[1][sb_index]++; } } }
Switch to star refinement in TZ search
@@ -614,8 +614,8 @@ static void tz_search(inter_search_info_t *info, vector2d_t extra_mv) const int iRaster = 5; // search distance limit and downsampling factor for step 3 const unsigned step2_type = 0; // search patterns for steps 2 and 4 const unsigned step4_type = 0; - const bool bRasterRefinementEnable = true; // enable step 4 mode 1 - const bool bStarRefinementEnable = false; // enable step 4 mode 2 (only one mode will be executed) + const bool bRasterRefinementEnable = false; // enable step 4 mode 1 + const bool bStarRefinementEnable = true; // enable step 4 mode 2 (only one mode will be executed) int best_dist = 0; info->best_cost = UINT32_MAX;
arch in lib path
@@ -129,7 +129,7 @@ fi -pdt=$PDTOOLKIT_DIR \ -useropt="%optflags -I$MPI_INCLUDE_DIR -I$PWD/include -fno-strict-aliasing" \ -openmp \ - -extrashlibopts="-fPIC -L$MPI_LIB_DIR -lmpi -L/tmp/%{install_path}/lib" + -extrashlibopts="-fPIC -L$MPI_LIB_DIR -lmpi -L/tmp/%{install_path}/lib -L/tmp/%{install_path}/%{machine}/lib" make install make exports
scripts: do not fail if backend was down see
@@ -40,13 +40,13 @@ echo -n "Old PID was: " cat /run/elektra-rest-backend.pid || /bin/true echo -# then restart the backend -kdb stop-rest-backend +# then restart the backend, succeed if it was not started before.. +kdb stop-rest-backend || /bin/true # avoid 'address already in use' while netstat --listen --numeric-ports | grep "LISTEN" | grep "0.0.0.0:8080" do - sleep 1 + sleep 1 # keep waiting (=downtime) short done kdb run-rest-backend @@ -57,6 +57,7 @@ echo # we have to make sure the backend had time to start before we can go on # (necessary because the command `kdb run-rest-backend` runs in the background) +# everything is online already, so no hurry here sleep 60 # and finally re-compile the frontend
sailfish: adding net and net-ssh gems to gemspec
@@ -34,6 +34,8 @@ Gem::Specification.new do |s| s.add_dependency('rdoc', '4.2.2') s.add_dependency('deep_merge','1.1.1') s.add_dependency('nokogiri', '1.8.2') + s.add_dependency('net', '0.3.3') + s.add_dependency('net-ssh', '5.0.2') s.post_install_message = "Thanks for installing Rhodes ! Please visit our website: http://tau-technologies.com" if s.respond_to? :post_install_message= s.executables << 'rhogen' s.executables << 'rhodes'
collector_runner: avoid shadowing variable
@@ -85,11 +85,11 @@ collector_runner(void *s) logout("%s", metric); \ else { \ size_t len = strlen(metric); \ - char *m = malloc(sizeof(char) * len + sizeof(len)); \ - if (m != NULL) { \ - *((size_t *)m) = len; \ - memcpy(m + sizeof(len), metric, len); \ - server_send(submission, m, 1); \ + char *mtrc = malloc(sizeof(char) * len + sizeof(len)); \ + if (mtrc != NULL) { \ + *((size_t *)mtrc) = len; \ + memcpy(mtrc + sizeof(len), metric, len); \ + server_send(submission, mtrc, 1); \ } \ }
do non case sensitve VENDOR search
@@ -1991,6 +1991,7 @@ return; static void readoui() { static int len; +static int c; static uid_t uid; static struct passwd *pwd; static struct stat statinfo; @@ -2015,6 +2016,15 @@ else if(stat(ouinameuser, &statinfo) == 0) usedoui = ouinamesystemwide; else return; if((fh_oui = fopen(usedoui, "r")) == NULL) return; zeiger = ouilist; + +if(filtervendorptr != NULL) + { + len = strlen(filtervendorptr); + for(c = 0; c < len; c++) + { + if(islower(filtervendorptr[c])) filtervendorptr[c] = toupper(filtervendorptr[c]); + } + } while(1) { if((len = fgetline(fh_oui, OUI_LINE_LEN, linein)) == -1) break; @@ -2024,6 +2034,10 @@ while(1) if(strstr(&linein[7], "(base 16)") == NULL) continue; if(filtervendorptr != NULL) { + for(c = 7; c < len; c++) + { + if(islower(linein[c])) linein[c] = toupper(linein[c]); + } if(strstr(&linein[7], filtervendorptr) == NULL) continue; } vendorptr = strrchr(&linein[7], '\t');
Update settings.c Update the AWS Elastic Load Balancing log format
@@ -72,7 +72,7 @@ static const GPreConfLog logs = { "%d %t %^ %m %U %q %^ %^ %h %u %R %s %^ %^ %L", /* W3C */ "%d\\t%t\\t%^\\t%b\\t%h\\t%m\\t%v\\t%U\\t%s\\t%R\\t%u\\t%q\\t%^\\t%C\\t%^\\t%^\\t%^\\t%^\\t%T\\t%^\\t%K\\t%k\\t%^\\t%H\\t%^", /* CloudFront */ "\"%x\",\"%h\",%^,%^,\"%m\",\"%U\",\"%s\",%^,\"%b\",\"%D\",%^,\"%R\",\"%u\"", /* Cloud Storage */ - "%^ %dT%t.%^ %v %h:%^ %^ %T %^ %^ %s %^ %b %^ \"%r\" \"%u\" %^", /* AWS Elastic Load Balancing */ + "%^ %dT%t.%^ %^ %h:%^ %^ %^ %T %^ %s %^ %^ %b \"%r\" \"%u\" %k %K %^ \"%^\" \"%v\"", /* AWS Elastic Load Balancing */ "%^ %^ %^ %v %^: %x.%^ %~%L %h %^/%s %b %m %U", /* Squid Native */ "%^ %v [%d:%t %^] %h %^\"%r\" %s %^ %b %^ %L %^ \"%R\" \"%u\"", /* Amazon S3 */
releases/describe_firmware: remove bitbox02 dependency As per the releases/README, the file is supposed to be easy to audit. Pulling in a dependency makes audit more difficult. It also makes the script harder to use, if the user does not have the dependency installed. This commit inlines the relevant parse_signed_firmware() function from bootloader.py.
# limitations under the License. """CLI tool to dump infos about a signed firmware binary""" -try: - from bitbox02.bitbox02.bootloader import ( - parse_signed_firmware, - SIGDATA_MAGIC_STANDARD, - SIGDATA_MAGIC_BTCONLY, - SIGNING_PUBKEYS_DATA_LEN, - MAX_FIRMWARE_SIZE, - ) -except ModuleNotFoundError: - print("bitbox02 module not found; please see bitbox02-firmware/py/README.md") - -import sys import hashlib import struct +import sys + +# A signed firmware file consists of MAGIC_LEN (4) bytes of a firmware edition marker, followed by a +# SIGDATA_LEN bytes of a signature, and ending with the actual firmware binary bytes as resulting +# from a reproducible build. + +MAGIC_LEN = 4 +MAGIC_MULTI = struct.pack(">I", 0x653F362B) +MAGIC_BTCONLY = struct.pack(">I", 0x11233B0B) + +MAX_FIRMWARE_SIZE = 884736 +NUM_ROOT_KEYS = 3 +NUM_SIGNING_KEYS = 3 +VERSION_LEN = 4 +SIGNING_PUBKEYS_DATA_LEN = VERSION_LEN + NUM_SIGNING_KEYS * 64 + NUM_ROOT_KEYS * 64 +FIRMWARE_DATA_LEN = VERSION_LEN + NUM_SIGNING_KEYS * 64 +SIGDATA_LEN = SIGNING_PUBKEYS_DATA_LEN + FIRMWARE_DATA_LEN def main() -> int: """Main function""" + try: filename = sys.argv[1] except IndexError: @@ -39,21 +45,22 @@ def main() -> int: return 1 with open(filename, "rb") as fileobj: - binary = fileobj.read() + signed_firmware = fileobj.read() - try: - magic, sigdata, firmware = parse_signed_firmware(binary) - except ValueError as exception: - print(exception) + # Split signed firmware into sigdata and firmware + if len(signed_firmware) < SIGDATA_LEN: + print("firmware too small") return 1 + magic, rest = signed_firmware[:MAGIC_LEN], signed_firmware[MAGIC_LEN:] + sigdata, firmware = rest[:SIGDATA_LEN], rest[SIGDATA_LEN:] print( "The following information assumes the provided binary was signed correctly; " "the signatures are not being verified." ) - if magic == SIGDATA_MAGIC_STANDARD: + if magic == MAGIC_MULTI: print("This is a Multi-edition firmware.") - elif magic == SIGDATA_MAGIC_BTCONLY: + elif magic == MAGIC_BTCONLY: print("This is a Bitcoin-only edition firmware.") else: print("Unrecognized firmware edition; magic =", magic.hex()) @@ -64,7 +71,7 @@ def main() -> int: "The hash of the unsigned firmware binary is (compare with reproducible build):" ) print(hashlib.sha256(firmware).hexdigest()) - version = sigdata[SIGNING_PUBKEYS_DATA_LEN:][:4] + version = sigdata[SIGNING_PUBKEYS_DATA_LEN:][:VERSION_LEN] print("The monotonic firmware version is:", struct.unpack("<I", version)[0]) print("The hash of the firmware as verified/shown by the bootloader is:") print(
fix travis broken
@@ -42,7 +42,15 @@ install: protobuf-c thrift - brew outdated libyaml || brew upgrade libyaml - - brew outdated json-c || brew upgrade json-c + - git clone https://github.com/json-c/json-c.git + - pushd json-c + - git reset --hard json-c-0.12.1-20160607 + - sh autogen.sh + - ./configure + - make + - make install + - popd + - rm -rf ./json-c - brew uninstall boost --ignore-dependencies - brew install homebrew/core/[email protected] - brew link [email protected] --force
Fix broken shell command
if (BUILD_TESTING) - add_test (NAME "test_bindings_elixir" COMMAND sh -c "cd ${CMAKE_CURRENT_BINARY_DIR} && mix test") + add_test (NAME "test_bindings_elixir" COMMAND sh -c "cd \"${CMAKE_CURRENT_BINARY_DIR}\" && mix test") endif (BUILD_TESTING)
unix/coverage: Init all pairheap test nodes before using them.
@@ -149,6 +149,9 @@ STATIC int pairheap_lt(mp_pairheap_t *a, mp_pairheap_t *b) { // ops array contain operations: x>=0 means push(x), x<0 means delete(-x) STATIC void pairheap_test(size_t nops, int *ops) { mp_pairheap_t node[8]; + for (size_t i = 0; i < MP_ARRAY_SIZE(node); ++i) { + mp_pairheap_init_node(pairheap_lt, &node[i]); + } mp_pairheap_t *heap = mp_pairheap_new(pairheap_lt); printf("create:"); for (size_t i = 0; i < nops; ++i) {
1. changed pin_type of Legacy Paring from FIXED to VARIABLE to avoid authentication failure 2. provide compatibilities with devices that do not support Secure Simple Paring Closes Closes
@@ -412,8 +412,25 @@ static void bt_gap_event_handler(esp_bt_gap_cb_event_t event, esp_bt_gap_cb_para case ESP_BT_GAP_MODE_CHG_EVT: ESP_LOGI(TAG, "BT GAP MODE_CHG_EVT mode:%d", param->mode_chg.mode); break; + case ESP_BT_GAP_PIN_REQ_EVT: { + ESP_LOGI(TAG, "ESP_BT_GAP_PIN_REQ_EVT min_16_digit:%d", param->pin_req.min_16_digit); + if (param->pin_req.min_16_digit) { + ESP_LOGI(TAG, "Input pin code: 0000 0000 0000 0000"); + esp_bt_pin_code_t pin_code = {0}; + esp_bt_gap_pin_reply(param->pin_req.bda, true, 16, pin_code); + } else { + ESP_LOGI(TAG, "Input pin code: 1234"); + esp_bt_pin_code_t pin_code; + pin_code[0] = '1'; + pin_code[1] = '2'; + pin_code[2] = '3'; + pin_code[3] = '4'; + esp_bt_gap_pin_reply(param->pin_req.bda, true, 4, pin_code); + } + break; + } default: - ESP_LOGV(TAG, "BT GAP EVENT %s", bt_gap_evt_str(event)); + ESP_LOGW(TAG, "BT GAP EVENT %s", bt_gap_evt_str(event)); break; } } @@ -429,15 +446,11 @@ static esp_err_t init_bt_gap(void) #endif /* * Set default parameters for Legacy Pairing - * Use fixed pin code + * Use variable pin, input pin code when pairing */ - esp_bt_pin_type_t pin_type = ESP_BT_PIN_TYPE_FIXED; + esp_bt_pin_type_t pin_type = ESP_BT_PIN_TYPE_VARIABLE; esp_bt_pin_code_t pin_code; - pin_code[0] = '1'; - pin_code[1] = '2'; - pin_code[2] = '3'; - pin_code[3] = '4'; - esp_bt_gap_set_pin(pin_type, 4, pin_code); + esp_bt_gap_set_pin(pin_type, 0, pin_code); if ((ret = esp_bt_gap_register_callback(bt_gap_event_handler)) != ESP_OK) { ESP_LOGE(TAG, "esp_bt_gap_register_callback failed: %d", ret);
process: fix crash when parent process kstack size is reduced
@@ -817,7 +817,7 @@ int proc_copyexec(void) current->kstack = current->execkstack; kstack = current->execkstack + current->kstacksz - sizeof(cpu_context_t); - hal_memcpy(kstack, current->parentkstack + current->kstacksz - sizeof(cpu_context_t), sizeof(cpu_context_t)); + hal_memcpy(kstack, parent->kstack + current->kstacksz - sizeof(cpu_context_t), sizeof(cpu_context_t)); PUTONSTACK(kstack, void *, NULL); PUTONSTACK(kstack, void *, NULL);
modify incorrect comment on socket.h CONFIG_CUSTOM_SOCKETS -> CONFIG_NET_SOCKET
#ifdef CONFIG_NET_SOCKET #include <net/lwip/sockets.h> #include <net/lwip/api.h> -#endif /* CONFIG_CUSTOM_SOCKETS */ +#endif /* CONFIG_NET_SOCKET */ /**************************************************************************** * Public Function Prototypes
naive: fix rank-filter
:: ++ filter-rank |= [=rank =event-list] - (skim event-list |=(=event =(rank.event event))) + (skim event-list |=(=event =(rank.event rank))) :: ++ filter-owner |= [owner=? =event-list]
fix qpack integer test 255
@@ -45,7 +45,7 @@ static uint8_t h3zero_pref127_valmax[] = { 0xFF, 0x80, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F }; static uint8_t h3zero_pref7_err1[] = { 0x07 }; static uint8_t h3zero_pref7_err2[] = { 0x07, 0xFF, 0xFF, 0x80, 0x80, 0x80 }; -static uint8_t h3zero_pref127_val255[] = { 0x1F, 0xE0, 0x01 }; +static uint8_t h3zero_pref127_val255[] = { 0x7F, 0x80, 0x01 }; typedef struct st_h3zero_test_integer_case_t { uint64_t test_value;
update patches for Linux 5.10
@@ -492,3 +492,15 @@ diff -rupN old/linux-5.10/include/linux/usb/chipidea.h linux-5.10/include/linux/ enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 +diff -rupN old/linux-5.10/scripts/gcc-plugins/Kconfig linux-5.10/scripts/gcc-plugins/Kconfig +--- old/linux-5.10/scripts/gcc-plugins/Kconfig ++++ linux-5.10/scripts/gcc-plugins/Kconfig +@@ -9,7 +9,7 @@ menuconfig GCC_PLUGINS + bool "GCC plugins" + depends on HAVE_GCC_PLUGINS + depends on CC_IS_GCC +- depends on $(success,test -e $(shell,$(CC) -print-file-name=plugin)/include/plugin-version.h) ++ depends on $(success,test -e $(shell,$(HOSTCC) -print-file-name=plugin)/include/plugin-version.h) + default y + help + GCC plugins are loadable modules that provide extra features to the
Added a test for EFRA
@@ -16,7 +16,9 @@ temp1=temp1.$label.grib temp2=temp2.$label.grib temp3=temp3.$label.grib - +# --------------------------- +# Stream EFAS +# --------------------------- # Create a starting GRIB with a basic local definition for MARS ${tools_dir}/grib_set -s tablesVersion=19,setLocalDefinition=1,stream=efas $ECCODES_SAMPLES_PATH/GRIB2.tmpl $sample @@ -83,6 +85,20 @@ for t in $types; do #${tools_dir}/grib_dump -Da $temp1 | grep mars.step done +# --------------------------- +# Stream EFRA +# --------------------------- +${tools_dir}/grib_set -s tablesVersion=19,setLocalDefinition=1,stream=efra,type=fc $ECCODES_SAMPLES_PATH/GRIB2.tmpl $sample + +# Test a non-ensemble, instantaneous field +${tools_dir}/grib_set -s productDefinitionTemplateNumber=70,typeOfPostProcessing=1 $sample $temp1 +${tools_dir}/grib_set -s setLocalDefinition=1,localDefinitionNumber=41,yearOfReanalysis=2019,monthOfReanalysis=12,dayOfReanalysis=13 $temp1 $temp2 +# ${tools_dir}/grib_ls -m $temp2 +grib_check_key_equals $temp2 mars.hdate,mars.date '20070323 20191213' +# This stream does not have the 'anoffset' key +anoffset=`${tools_dir}/grib_get -f -p mars.anoffset $temp2` +[ "anoffset" = "not_found" ] + # Clean up rm -f $sample $temp1 $temp2 $temp3
nvbios/gpio: 0x5d output is most likely another PWM
@@ -238,6 +238,7 @@ static struct enum_val gpio_spec_out[] = { { 0x5a, "THERMAL_LOAD_STEP_0" }, { 0x5b, "THERMAL_LOAD_STEP_1" }, { 0x5c, "PWM_0" }, + { 0x5d, "PWM_1" }, { 0x80, "SOR0_PANEL_BACKLIGHT_LEVEL" }, { 0x81, "SOR0_PANEL_POWER" }, { 0x82, "SOR0_PANEL_BACKLIGHT_ON" },
Document new native platform defines
#define LOG_MODULE "Native" #define LOG_LEVEL LOG_LEVEL_MAIN +/*---------------------------------------------------------------------------*/ +/** + * \name Native Platform Configuration + * + * @{ + */ + +/* + * Defines the maximum number of file descriptors monitored by the platform + * main loop. + */ #ifdef SELECT_CONF_MAX #define SELECT_MAX SELECT_CONF_MAX #else #define SELECT_MAX 8 #endif +/* + * Defines the timeout (in msec) of the select operation if no monitored file + * descriptors becomes ready. + */ #ifdef SELECT_CONF_TIMEOUT #define SELECT_TIMEOUT SELECT_CONF_TIMEOUT #else #define SELECT_TIMEOUT 1000 #endif +/* + * Adds the STDIN file descriptor to the list of monitored file descriptors. + */ #ifdef SELECT_CONF_STDIN #define SELECT_STDIN SELECT_CONF_STDIN #else #define SELECT_STDIN 1 #endif +/** @} */ +/*---------------------------------------------------------------------------*/ static const struct select_callback *select_callback[SELECT_MAX]; static int select_max = 0;
Fix emenu crash from commit
@@ -3309,10 +3309,11 @@ VOID PhMwpUpdateUsersMenu( escapedMenuText = PhEscapeStringForMenuPrefix(&menuText->sr); PhDereferenceObject(menuText); - PhInsertEMenuItem(UsersMenu, userMenu = PhCreateEMenuItem(0, IDR_USER, escapedMenuText->Buffer, NULL, UlongToPtr(sessions[i].SessionId)), -1); + userMenu = PhCreateEMenuItem(PH_EMENU_TEXT_OWNED, IDR_USER, PhAllocateCopy(escapedMenuText->Buffer, escapedMenuText->Length + sizeof(WCHAR)), NULL, UlongToPtr(sessions[i].SessionId)); PhLoadResourceEMenuItem(userMenu, PhInstanceHandle, MAKEINTRESOURCE(IDR_USER), 0); + PhInsertEMenuItem(UsersMenu, userMenu, -1); - PhAutoDereferenceObject(escapedMenuText); + PhDereferenceObject(escapedMenuText); } WinStationFreeMemory(sessions);
Fix topics decoding to work with zero values
:: tox: list of hex words |* [tox=(lest @ux) tys=(list etyp)] =- (decode-arguments (crip -) tys) - %+ render-hex-bytes (mul 32 (lent tox)) %+ roll `(list @ux)`tox - |= [top=@ux tos=@] - (cat 8 top tos) + |= [top=@ tos=tape] + (weld tos (render-hex-bytes 32 top)) :: ++ decode-results :: rex: string of hex bytes with leading 0x.
wifi: remove esp_phy_enable/disable IRAM_attr esp_phy_enable and esp_phy_disable were declared both with IRAM attr and with (noflash) in the linker fragment file, causing ldgen to emit warnings. Remove IRAM_attr and let the linker fragment decide placement
@@ -195,7 +195,7 @@ IRAM_ATTR void esp_phy_common_clock_disable(void) wifi_bt_common_module_disable(); } -IRAM_ATTR void esp_phy_enable(void) +void esp_phy_enable(void) { _lock_acquire(&s_phy_access_lock); @@ -231,7 +231,7 @@ IRAM_ATTR void esp_phy_enable(void) _lock_release(&s_phy_access_lock); } -IRAM_ATTR void esp_phy_disable(void) +void esp_phy_disable(void) { _lock_acquire(&s_phy_access_lock);
fix fdatasync bug
@@ -24400,13 +24400,9 @@ SQLITE_API int sqlite3_fullsync_count = 0; ** then simply compile with -Dfdatasync=fdatasync */ #if !defined(fdatasync) -#include "dfs.h" -#include "dfs_file.h" int fdatasync(fd) { - struct dfs_fd *dfs_fd; - dfs_fd = fd_get(fd); - return dfs_file_flush(dfs_fd); + fsync(fd); } #endif
out_stackdriver: fix metadata not assigned correctly
@@ -415,6 +415,7 @@ static int process_local_resource_id(struct flb_stackdriver *ctx, char *type) { int ret = -1; int first = FLB_TRUE; + int counter = 0; int len_k8s_container; int len_k8s_node; int len_k8s_pod; @@ -451,15 +452,26 @@ static int process_local_resource_id(struct flb_stackdriver *ctx, char *type) } /* Follow the order of fields in local_resource_id */ - if (!ctx->namespace_name) { + if (counter == 0) { + if (ctx->namespace_name) { + flb_sds_destroy(ctx->namespace_name); + } ctx->namespace_name = flb_sds_create(ptr->val); } - else if (!ctx->pod_name) { + else if (counter == 1) { + if (ctx->pod_name) { + flb_sds_destroy(ctx->pod_name); + } ctx->pod_name = flb_sds_create(ptr->val); } - else if (!ctx->container_name) { + else if (counter == 2) { + if (ctx->container_name) { + flb_sds_destroy(ctx->container_name); + } ctx->container_name = flb_sds_create(ptr->val); } + + counter++; } if (!ctx->namespace_name || !ctx->pod_name || !ctx->container_name) { @@ -484,6 +496,9 @@ static int process_local_resource_id(struct flb_stackdriver *ctx, char *type) } if (ptr != NULL) { + if (ctx->node_name) { + flb_sds_destroy(ctx->node_name); + } ctx->node_name = flb_sds_create(ptr->val); } } @@ -510,12 +525,20 @@ static int process_local_resource_id(struct flb_stackdriver *ctx, char *type) } /* Follow the order of fields in local_resource_id */ - if (!ctx->namespace_name) { + if (counter == 0) { + if (ctx->namespace_name) { + flb_sds_destroy(ctx->namespace_name); + } ctx->namespace_name = flb_sds_create(ptr->val); } - else if (!ctx->pod_name) { + else if (counter == 1) { + if (ctx->pod_name) { + flb_sds_destroy(ctx->pod_name); + } ctx->pod_name = flb_sds_create(ptr->val); } + + counter++; } if (!ctx->namespace_name || !ctx->pod_name) {
add data partition to python
@@ -1548,6 +1548,7 @@ class CatBoostClassifier(CatBoost): max_bin=None, scale_pos_weight=None, gpu_cat_features_storage=None, + data_partition=None, **kwargs ): if objective is not None: @@ -1874,6 +1875,7 @@ class CatBoostRegressor(CatBoost): eta=None, max_bin=None, gpu_cat_features_storage=None, + data_partition=None, **kwargs ): if objective is not None:
SOVERSION bump to version 5.1.10
@@ -39,7 +39,7 @@ set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_ # with backward compatible change and micro version is connected with any internal change of the library. set(SYSREPO_MAJOR_SOVERSION 5) set(SYSREPO_MINOR_SOVERSION 1) -set(SYSREPO_MICRO_SOVERSION 9) +set(SYSREPO_MICRO_SOVERSION 10) set(SYSREPO_SOVERSION_FULL ${SYSREPO_MAJOR_SOVERSION}.${SYSREPO_MINOR_SOVERSION}.${SYSREPO_MICRO_SOVERSION}) set(SYSREPO_SOVERSION ${SYSREPO_MAJOR_SOVERSION})
initrd_fopen properly contains filenames beginning with numbers
@@ -60,14 +60,15 @@ fs_node_t* finddir_fs(fs_node_t* node, char* name) { fd_entry_t _tab[512]; static int _fd_count = 0; FILE* initrd_fopen(const char* filename, char* mode) { - //printf("initrd_fopen(\"%s\")\n", filename); + printf("initrd_fopen(\"%s\")\n", filename); //skip preceding ./ //TODO properly traverse file paths - while (!isalpha(*filename)) { + while (!isalnum(*filename)) { filename++; } fs_node_t* file = finddir_fs(fs_root, (char*)filename); + printf("Found node 0x%08x: %s\n", file, file->name); if (!file) { return NULL; }
Correct the no-dh and no-dsa fix The condition wasn't quite right
@@ -60,7 +60,7 @@ my %conf_dependent_tests = ( "10-resumption.conf" => !$is_default_tls, "11-dtls_resumption.conf" => !$is_default_dtls, "19-mac-then-encrypt.conf" => !$is_default_tls, - "20-cert-select.conf" => !$is_default_tls && !$no_dh && !$no_dsa, + "20-cert-select.conf" => !$is_default_tls || $no_dh || $no_dsa, ); # Add your test here if it should be skipped for some compile-time
[chainmaker]change the header file function declaration
@@ -262,7 +262,7 @@ void BoatHlchainmakerTxDeInit(BoatHlchainmakerTx *tx_ptr); * @return * Return \c BOAT_SUCCESS if set success, otherwise return a error code. ******************************************************************************/ -BOAT_RESULT BoatHlchainmakerAddTxParam(BoatHlchainmakerTx *tx_ptr, const BCHAR *arg, ...); +BOAT_RESULT BoatHlchainmakerAddTxParam(BoatHlchainmakerTx *tx_ptr, BUINT8 length, const BCHAR *arg, ...); /*!****************************************************************************
hv: bugfix in min() and max() MACROs These two MACROs shall be wrapped as a single value respectively, hence brackets should be used.
/** Roundup (x) to (y) aligned **/ #define roundup(x, y) (((x) + ((y) - 1UL)) & (~((y) - 1UL))) -#define min(x, y) ((x) < (y)) ? (x) : (y) +#define min(x, y) (((x) < (y)) ? (x) : (y)) -#define max(x, y) ((x) < (y)) ? (y) : (x) +#define max(x, y) (((x) < (y)) ? (y) : (x)) /** Replaces 'x' by the string "x". */ #define STRINGIFY(x) #x
doc: Sentences are short and written in one line.
# Documentation This document gives guidelines for contributors concerning Elektra's documentation. -This document takes preference to the actual situation, so if you see documentation -not according to this document, please [create an issue](https://issue.libelektra.org) -or directly fix it with your next PR. +This document takes preference to the actual situation. +If you see documentation not according to this document, please [create an issue](https://issues.libelektra.org). +Alternatively, you can directly fix it with your next PR. -> Note: It is always allowed to improve the documentation, in every PR, even if -> the documentation fix is completely unrelated. +> Note: It is always allowed to improve the documentation, in every PR, even if the documentation fix is completely unrelated. +> Preferably are several PRs, though. ## Target Groups @@ -27,26 +27,24 @@ There are 3 separate folders for these 3 groups: > Takeaway: > Every document must have a clear target group (user, developer or contributor). -> Sometimes it is clear from the directory sometimes it must be explicitly stated, -> e.g. tutorials. +> Sometimes it is clear from the directory sometimes it must be explicitly stated, e.g. tutorials. ## Orientation Each documentation should clearly be oriented to one of these 3 directions: 1. learning-oriented: first introduction is done via tutorials, it teaches the basics. -2. information-oriented: the README.md and API docs together are the references: they cover everything that someone needs to know about a [module](/doc/help/elektra-glossary.md). -3. understanding-oriented: the [doc/decisions](/doc/decisions) explain the "Why?" something is done as it is done (Rationale). +2. information-oriented: the README.md and API docs together are the references: + they cover everything that someone needs to know about a [module](/doc/help/elektra-glossary.md). +3. understanding-oriented: the [doc/decisions](/doc/decisions) explain the "Why?": + something is done as it is done (Rationale). In literature there is also goal-oriented but we prefer learning-oriented approaches. E.g. of course you might have the goal to write a new plugin. But why not also learn about plugins while creating a new plugin? > Takeaway: -> Don't try to combine different orientations in one document, -> instead split your documentation up in e.g. a README.md (information), -> tutorial (learning), -> decisions (understanding). +> Don't try to combine different orientations in one document, instead split your documentation up in e.g. a README.md (information), tutorial (learning), decisions (understanding). ## Criteria @@ -57,10 +55,12 @@ Elektra's documentation must fulfill: All explanation must be within the repo, e.g., in case the external information goes away. - It is consistent in its [terminology](/doc/help/elektra-glossary.md). - Spelling is American English with [spellings as defined here](/scripts/sed). +- Sentences are short and written in one line. + I.e. lines usually end with `.`, `:` or `;`. + Avoid line breaks in the middle of the sentence. +- Use enumerations or itemizations to keep a clear structure in the text. -> Note: -> Please extend [terminology](/doc/help/elektra-glossary.md) and -> [spellings](/scripts/sed) as needed. +> Note: Please extend [terminology](/doc/help/elektra-glossary.md) and [spellings](/scripts/sed) as needed. ## Completeness
apps/ocf_sample; client was not actually toggling light value to opposite value.
@@ -156,7 +156,7 @@ observe_light(oc_client_response_t *rsp) }; len = coap_get_payload(rsp->packet, &m, &data_off); - if (cbor_read_mbuf_attrs(m, data_off, len, attrs)) { + if (!cbor_read_mbuf_attrs(m, data_off, len, attrs)) { printf("OBSERVE_light: %d\n", state); light_state = state; }
BIER coveroty fix for unintialised return value on error
@@ -293,7 +293,7 @@ vl_api_bier_imp_add_t_handler (vl_api_bier_imp_add_t * mp) { vl_api_bier_imp_add_reply_t *rmp; vnet_main_t *vnm; - index_t bii; + index_t bii = ~0; int rv = 0; vnm = vnet_get_main ();
Fix coverity bug, dead code
@@ -2577,10 +2577,6 @@ static ACVP_RESULT acvp_build_kas_ecc_register_cap(ACVP_CTX *ctx, break; case ACVP_KAS_ECC_PARMSET: default: - if (i != ACVP_KAS_ECC_MODE_NONE) { - ACVP_LOG_ERR("Unsupported KAS-ECC kdf %d", kdf); - return ACVP_INVALID_ARG; - } break; } switch (scheme) {
Fix oesign test without quote provider
#include <openenclave/host.h> #include <openenclave/internal/error.h> #include <openenclave/internal/load.h> +#include <openenclave/internal/sgx/tests.h> #include <openenclave/internal/tests.h> #include <stdio.h> #include "../host/sgx/cpuid.h" @@ -93,7 +94,10 @@ int main(int argc, const char* argv[]) } } - if (_is_kss_supported()) + /* check_kss_extended_ids currently assumes the quote provider is available. + * Skip if there is no quote provider for now. + */ + if (_is_kss_supported() && oe_has_sgx_quote_provider()) { result = check_kss_extended_ids( enclave,
OcMemoryLib: Fix casting issues closes acidanthera/bugtracker#798
@@ -631,7 +631,7 @@ OcSplitMemoryEntryByAttribute ( } NewMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize); - DiffPages = EFI_SIZE_TO_PAGES (MemoryAttribte->PhysicalStart - MemoryMapEntry->PhysicalStart); + DiffPages = (UINTN) EFI_SIZE_TO_PAGES (MemoryAttribte->PhysicalStart - MemoryMapEntry->PhysicalStart); CopyMem ( NewMemoryMapEntry, MemoryMapEntry, @@ -673,7 +673,7 @@ OcSplitMemoryEntryByAttribute ( } NewMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize); - DiffPages = MemoryMapEntry->NumberOfPages - MemoryAttribte->NumberOfPages; + DiffPages = (UINTN) (MemoryMapEntry->NumberOfPages - MemoryAttribte->NumberOfPages); CopyMem ( NewMemoryMapEntry, MemoryMapEntry,
LICENSE: add license for apps/wireless/wapi add the license for apps/wireless/wapi
@@ -587,4 +587,37 @@ apps/canutils/libcanutils Send feedback to <[email protected]> +apps/wireless/wapi +================== + + Copyright (C) 2011, 2017 Gregory Nutt. All rights reserved. + Author: Gregory Nutt <[email protected]> + + Adapted for NuttX from WAPI: + + Copyright (c) 2010, Volkan YAZICI <[email protected]> + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + - Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + - Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Add PhSetThreadIdealProcessor
@@ -1077,6 +1077,31 @@ PhGetThreadIdealProcessor( ); } +FORCEINLINE +NTSTATUS +PhSetThreadIdealProcessor( + _In_ HANDLE ThreadHandle, + _In_ PPROCESSOR_NUMBER ProcessorNumber, + _Out_opt_ PPROCESSOR_NUMBER PreviousIdealProcessor + ) +{ + NTSTATUS status; + PROCESSOR_NUMBER processorNumber; + + processorNumber = *ProcessorNumber; + status = NtSetInformationThread( + ThreadHandle, + ThreadIdealProcessorEx, + &processorNumber, + sizeof(PROCESSOR_NUMBER) + ); + + if (PreviousIdealProcessor) + *PreviousIdealProcessor = processorNumber; + + return status; +} + FORCEINLINE NTSTATUS PhGetThreadSuspendCount(
[core] dup FILE_CHUNK fd when splitting FILE_CHUNK
@@ -507,6 +507,8 @@ void chunkqueue_steal(chunkqueue * const restrict dest, chunkqueue * const restr case FILE_CHUNK: /* tempfile flag is in "last" chunk after the split */ chunkqueue_append_file(dest, c->mem, c->file.start + c->offset, use); + if (c->file.fd >= 0) + dest->last->file.fd = fdevent_dup_cloexec(c->file.fd); break; } @@ -679,6 +681,8 @@ int chunkqueue_steal_with_tempfiles(chunkqueue * const restrict dest, chunkqueue /* partial chunk with length "use" */ /* tempfile flag is in "last" chunk after the split */ chunkqueue_append_file(dest, c->mem, c->file.start + c->offset, use); + if (c->file.fd >= 0) + dest->last->file.fd = fdevent_dup_cloexec(c->file.fd); c->offset += use; force_assert(0 == len);
Fix input select to prevent case where no button is selected
@@ -90,8 +90,10 @@ const renderButton = (id, value, onChange) => input => ( } onChange={() => { if (Array.isArray(value)) { - if (value.indexOf(input.key) > -1 && value.length > 1) { + if (value.indexOf(input.key) > -1) { + if (value.length > 1) { onChange(value.filter(i => i !== input.key)); + } } else { onChange([].concat(value, input.key)); }
Remove risattacking member.
@@ -2509,7 +2509,6 @@ typedef struct entity e_update_mark update_mark; // Which updates are completed. ~~ - int riseattacking; // playing rise attack animation int edge; // in edge (unbalanced) int normaldamageflipdir; // used to reset backpain direction int frozen; // Flag to determine if an entity is frozen
moved output complete to hex format
@@ -445,7 +445,7 @@ if((anonceliste != NULL) && (anonceoutname != NULL)) if((memcmp(zeigerold->mac_ap, zeiger->mac_ap, 6) != 0) && (memcmp(zeigerold->mac_sta, zeiger->mac_sta, 6) != 0) && (memcmp(zeigerold->anonce, zeiger->anonce, 32) != 0)) { fwritetimestamphigh(zeiger->tv_sec, fhoutlist); - fprintf(fhoutlist, "%8x:", zeiger->tv_sec); + fprintf(fhoutlist, "%08x:", zeiger->tv_sec); fwriteaddr1addr2(zeiger->mac_sta, zeiger->mac_ap, fhoutlist); fprintf(fhoutlist, "%x:%016llx:", (int)zeiger->keyinfo, (unsigned long long int)zeiger->replaycount); fwritehexbuff(32, zeiger->anonce, fhoutlist); @@ -476,7 +476,7 @@ if((eapolliste != NULL) && (eapoloutname != NULL)) fprintf(fhoutlist, "%08x:", zeiger->tv_sec); fwriteaddr1addr2(zeiger->mac_sta, zeiger->mac_ap, fhoutlist); fprintf(fhoutlist, "%x:%016llx:", (int)zeiger->keyinfo, (unsigned long long int)zeiger->replaycount); - fprintf(fhoutlist, "%03d:", zeiger->authlen -4); + fprintf(fhoutlist, "%02x:", zeiger->authlen -4); fwritehexbuff(zeiger->authlen, zeiger->eapol, fhoutlist); zeiger++; for(c = 1; c < eapolcount; c++) @@ -487,7 +487,7 @@ if((eapolliste != NULL) && (eapoloutname != NULL)) fprintf(fhoutlist, "%08x:", zeiger->tv_sec); fwriteaddr1addr2(zeiger->mac_sta, zeiger->mac_ap, fhoutlist); fprintf(fhoutlist, "%x:%016llx:", (int)zeiger->keyinfo, (unsigned long long int)zeiger->replaycount); - fprintf(fhoutlist, "%03d:", zeiger->authlen -4); + fprintf(fhoutlist, "%02x:", zeiger->authlen -4); fwritehexbuff(zeiger->authlen, zeiger->eapol, fhoutlist); } zeigerold = zeiger;
tools: Produce user friendly error during install when the server is down Closes
@@ -565,11 +565,16 @@ class IDFTool(object): for retry in range(DOWNLOAD_RETRY_COUNT): local_temp_path = local_path + '.tmp' info('Downloading {} to {}'.format(archive_name, local_temp_path)) + try: urlretrieve(url, local_temp_path, report_progress if not global_non_interactive else None) sys.stdout.write("\rDone\n") + except Exception as e: + # urlretrieve could throw different exceptions, e.g. IOError when the server is down + # Errors are ignored because the downloaded file is checked a couple of lines later. + warn('Download failure {}'.format(e)) sys.stdout.flush() - if not self.check_download_file(download_obj, local_temp_path): - warn('Failed to download file {}'.format(local_temp_path)) + if not os.path.isfile(local_temp_path) or not self.check_download_file(download_obj, local_temp_path): + warn('Failed to download {} to {}'.format(url, local_temp_path)) continue rename_with_retry(local_temp_path, local_path) downloaded = True
docs(lv_obj_style) update add_style and remove_style function headers
@@ -67,10 +67,10 @@ void _lv_obj_style_init(void); /** * Add a style to an object. * @param obj pointer to an object - * @param part a part of the object to which the style should be added E.g. `LV_PART_MAIN` or `LV_PART_KNOB` - * @param state a state or combination of states to which the style should be assigned * @param style pointer to a style to add - * @example lv_obj_add_style_no_refresh(slider, LV_PART_KNOB, LV_STATE_PRESSED, &style1); + * @param selector OR-ed value of parts and state to which the style should be added + * @example lv_obj_add_style(btn, &style_btn, 0); //Default button style + * @example lv_obj_add_style(btn, &btn_red, LV_STATE_PRESSED); //Overwrite only some colors to red when pressed */ void lv_obj_add_style(struct _lv_obj_t * obj, lv_style_t * style, lv_style_selector_t selector); @@ -79,9 +79,9 @@ void lv_obj_add_style(struct _lv_obj_t * obj, lv_style_t * style, lv_style_selec * @param obj pointer to an object * @param style pointer to a style to remove. Can be NULL to check only the selector * @param selector OR-ed values of states and a part to remove only styles with matching selectors. LV_STATE_ANY and LV_PART_ANY can be used - * @example lv_obj_remove_style(obj, LV_PART_ANY, LV_STATE_ANY, &style); //Remove a specific style - * @example lv_obj_remove_style(obj, LV_PART_MAIN, LV_STATE_ANY, &style); //Remove all styles from the main part - * @example lv_obj_remove_style(obj, LV_PART_ANY, LV_STATE_ANY, NULL); //Remove all styles + * @example lv_obj_remove_style(obj, &style, LV_PART_ANY | LV_STATE_ANY); //Remove a specific style + * @example lv_obj_remove_style(obj, NULL, LV_PART_MAIN | LV_STATE_ANY); //Remove all styles from the main part + * @example lv_obj_remove_style(obj, NULL, LV_PART_ANY | LV_STATE_ANY); //Remove all styles */ void lv_obj_remove_style(struct _lv_obj_t * obj, lv_style_t * style, lv_style_selector_t selector);
SOVERSION bump to version 6.4.6
@@ -68,7 +68,7 @@ set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_ # with backward compatible change and micro version is connected with any internal change of the library. set(SYSREPO_MAJOR_SOVERSION 6) set(SYSREPO_MINOR_SOVERSION 4) -set(SYSREPO_MICRO_SOVERSION 5) +set(SYSREPO_MICRO_SOVERSION 6) set(SYSREPO_SOVERSION_FULL ${SYSREPO_MAJOR_SOVERSION}.${SYSREPO_MINOR_SOVERSION}.${SYSREPO_MICRO_SOVERSION}) set(SYSREPO_SOVERSION ${SYSREPO_MAJOR_SOVERSION})
options/linux-headers: Add some missing v4l2 defines
#define V4L2_CAP_RADIO 0x00040000 #define V4L2_CAP_READWRITE 0x01000000 +#define V4L2_CAP_VIDEO_CAPTURE_MPLANE 0x00001000 +#define V4L2_CAP_VIDEO_OUTPUT_MPLANE 0x00002000 + +#define V4L2_CAP_DEVICE_CAPS 0x80000000 + #ifdef __cplusplus extern "C" { #endif
build docs separately.
@@ -39,7 +39,8 @@ main = shakeMain $ do -- fake "." pats "sanity" $ const $ do need [ "tests-error" ] - need [ "docs", "lint", "weed" ] + need [ "docs" ] + need [ "lint", "weed" ] -- | Default things to run. --
admin/meta-packages: typo
@@ -332,7 +332,7 @@ Requires: scalasca-gnu%{gnu_major_ver}-openmpi%{openmpi_major_ver}%{PROJ_DELIM} Requires: scorep-gnu%{gnu_major_ver}-openmpi%{openmpi_major_ver}%{PROJ_DELIM} %ifnarch aarch64 Requires: wxparaver-gnu%{gnu_major_ver}-openmpi%{openmpi_major_ver}%{PROJ_DELIM} -$endif +%endif Requires: papi%{PROJ_DELIM} %description -n %{PROJ_NAME}-gnu%{gnu_major_ver}-openmpi%{openmpi_major_ver}-perf-tools Collection of performance tool builds for use with GNU compiler toolchain and the OpenMPI runtime
docs: fix FIH example command in design.md
@@ -1468,8 +1468,8 @@ are issued from the MCUboot source directory): ```sh $ mkdir docker $ ./ci/fih-tests_install.sh -$ FIH_LEVEL=MCUBOOT_FIH_PROFILE_MEDIUM BUILD_TYPE=RELEASE SKIP_SIZE=2 \ - DAMAGE_TYPE=SIGNATURE ./ci/fih-tests_run.sh +$ FIH_LEVEL=MEDIUM BUILD_TYPE=RELEASE SKIP_SIZE=2 DAMAGE_TYPE=SIGNATURE \ + ./ci/fih-tests_run.sh ``` On the travis CI the environment variables in the last command are set based on the configs provided in the `.travis.yaml`
chip/stm32/usb_isochronous.c: Format with clang-format BRANCH=none TEST=none
#include "usb_hw.h" #include "usb_isochronous.h" - /* Console output macro */ #define CPRINTF(format, args...) cprintf(CC_USB, format, ##args) #define CPRINTS(format, args...) cprints(CC_USB, format, ##args) @@ -65,8 +64,7 @@ static usb_uint *get_app_addr(struct usb_isochronous_config const *config, * Sets number of bytes written to application buffer. */ static void set_app_count(struct usb_isochronous_config const *config, - int dtog_value, - usb_uint count) + int dtog_value, usb_uint count) { if (dtog_value) btable_ep[config->endpoint].tx_count = count; @@ -74,13 +72,9 @@ static void set_app_count(struct usb_isochronous_config const *config, btable_ep[config->endpoint].rx_count = count; } -int usb_isochronous_write_buffer( - struct usb_isochronous_config const *config, - const uint8_t *src, - size_t n, - size_t dst_offset, - int *buffer_id, - int commit) +int usb_isochronous_write_buffer(struct usb_isochronous_config const *config, + const uint8_t *src, size_t n, + size_t dst_offset, int *buffer_id, int commit) { int dtog_value = get_tx_dtog(config); usb_uint *buffer = get_app_addr(config, dtog_value); @@ -142,14 +136,12 @@ void usb_isochronous_tx(struct usb_isochronous_config const *config) } int usb_isochronous_iface_handler(struct usb_isochronous_config const *config, - usb_uint *ep0_buf_rx, - usb_uint *ep0_buf_tx) + usb_uint *ep0_buf_rx, usb_uint *ep0_buf_tx) { int ret = -1; - if (ep0_buf_rx[0] == (USB_DIR_OUT | - USB_TYPE_STANDARD | - USB_RECIP_INTERFACE | + if (ep0_buf_rx[0] == + (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_INTERFACE | USB_REQ_SET_INTERFACE << 8)) { ret = config->set_interface(ep0_buf_rx[1], ep0_buf_rx[2]);
fix bug in septrafo (broke cdf97 wavelets)
@@ -1322,10 +1322,11 @@ static void md_septrafo_r(unsigned int D, unsigned int R, long dimensions[D], un void* nptr[1] = { ptr }; const long* nstrides[1] = { strides }; - dimensions[R] = 1; // we made a copy in md_septrafo2 long dimsR = dimensions[R]; long strsR = strides[R]; // because of clang + dimensions[R] = 1; // we made a copy in md_septrafo2 + NESTED(void, nary_septrafo, (void* ptr[])) { fun(dimsR, strsR, ptr[0]);
Fix setting of normal vector.
@@ -1515,9 +1515,9 @@ ts_bspline_compute_rmf(const tsBSpline *spline, /* Compute vector s_{i+1} of U_{i+1}. */ ts_vec3_cross(xn, xc, frames[i+1].binormal); - /* Set vector t_{i+1} and r_{i+1} of U_{i+1}. */ + /* Set vectors t_{i+1} and r_{i+1} of U_{i+1}. */ ts_vec3_set(frames[i+1].tangent, xn, 3); - ts_vec3_set(xc, frames[i+1].normal, 3); + ts_vec3_set(frames[i+1].normal, xc, 3); } TS_CATCH(err) ts_bspline_free(&deriv);
reSync asset after baking so there is a fresh copy of the output immediately available for feedback.
@@ -49,8 +49,12 @@ houdiniEngine_bakeAsset(string $assetNode) else delete $child; } - // don't disconnect output transforms, because they connect back to the asset - // and that connection is determinded by the asset options, and not the existence ot + // don't disconnect output transforms, since they connect back to the asset + // connection is determinded by the asset options + // and not a particular output + + // now sync it again - so there is a new output to provide feedback + houdiniEngine_syncAssetOutput $assetNode; return 1; }
Read latest build tools version id from buildTools folder
@@ -7,10 +7,10 @@ import getTmp from "../helpers/getTmp"; const rmdir = promisify(rimraf); -const EXPECTED_TOOLS_VERSION = "2.0.0"; - const ensureBuildTools = async () => { const buildToolsPath = `${buildToolsRoot}/${process.platform}-${process.arch}`; + const expectedBuildToolsVersionPath = `${buildToolsPath}/tools_version`; + const expectedToolsVersion = await fs.readFile(expectedBuildToolsVersionPath, "utf8"); const tmpPath = getTmp(); const tmpBuildToolsPath = `${tmpPath}/_gbstools`; @@ -20,7 +20,7 @@ const ensureBuildTools = async () => { // GBDKDIR doesn't work if path has spaces :-( try { const toolsVersion = await fs.readFile(tmpBuildToolsVersionPath, "utf8"); - if(toolsVersion !== EXPECTED_TOOLS_VERSION) { + if(toolsVersion !== expectedToolsVersion) { throw new Error("Incorrect tools version found"); } } catch (e) {
u3: updates u3qe_en_base16() to use slab api
@@ -9,21 +9,18 @@ const c3_y hex_y[16] = { '0', '1', '2', '3', '4', '5', '6', '7', u3_noun u3qe_en_base16(u3_atom len, u3_atom dat) { - c3_w len_w, byt_w; - if ( c3n == u3a_is_cat(len) ) { return u3m_bail(c3__fail); } else { - len_w = (c3_w)len; - byt_w = len_w * 2; + c3_w len_w = (c3_w)len; + u3i_slab sab_u; - if ( (byt_w / 2) != len_w ) { - return u3m_bail(c3__fail); - } - else { - c3_y* buf_y = u3a_malloc(byt_w); - c3_y* dat_y = buf_y; + u3i_slab_bare(&sab_u, 4, len_w); + sab_u.buf_w[sab_u.len_w - 1] = 0; + + { + c3_y* buf_y = sab_u.buf_y; c3_y inp_y; while ( len_w-- ) { @@ -32,13 +29,9 @@ u3qe_en_base16(u3_atom len, u3_atom dat) *buf_y++ = hex_y[inp_y >> 4]; *buf_y++ = hex_y[inp_y & 0xf]; } - - { - u3_noun pro = u3i_bytes(byt_w, dat_y); - u3a_free(dat_y); - return pro; - } } + + return u3i_slab_moot_bytes(&sab_u); } }
netkvm: log device RSS feature is presented
@@ -408,6 +408,7 @@ static void DumpVirtIOFeatures(PPARANDIS_ADAPTER pContext) {VIRTIO_F_RING_PACKED, "VIRTIO_F_RING_PACKED"}, {VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS" }, {VIRTIO_NET_F_RSC_EXT, "VIRTIO_NET_F_RSC_EXT" }, + {VIRTIO_NET_F_RSS, "VIRTIO_NET_F_RSS" }, }; UINT i; for (i = 0; i < sizeof(Features)/sizeof(Features[0]); ++i)
Filling cmsghdr with 0 to pass Go 1.11 message validation.
@@ -2167,9 +2167,12 @@ nxt_unit_send_mmap(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *port_id, int fd) msg.mf = 0; msg.tracking = 0; -#if (NXT_VALGRIND) + /* + * Fill all padding fields with 0. + * Code in Go 1.11 validate cmsghdr using padding field as part of len. + * See Cmsghdr definition and socketControlMessageHeaderAndData function. + */ memset(&cmsg, 0, sizeof(cmsg)); -#endif cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int)); cmsg.cm.cmsg_level = SOL_SOCKET; @@ -3001,9 +3004,7 @@ nxt_unit_send_port(nxt_unit_ctx_t *ctx, nxt_unit_port_id_t *dst, m.new_port.max_size = 16 * 1024; m.new_port.max_share = 64 * 1024; -#if (NXT_VALGRIND) memset(&cmsg, 0, sizeof(cmsg)); -#endif cmsg.cm.cmsg_len = CMSG_LEN(sizeof(int)); cmsg.cm.cmsg_level = SOL_SOCKET;
correct tests for jenkins
@@ -96,7 +96,7 @@ rm -f snap_scatter_gather.log touch snap_scatter_gather.log if [ $duration = "SHORT" ]; then - tests=5 + tests=2 elif [ $duration = "NORMAL" ]; then tests=20 else @@ -104,20 +104,20 @@ else fi echo "Run $tests tests for each testpoint. (num = $num, scatter_size = $scatter_size)" -args=("-K1" "-K4" "-K16" "-K64" "-K256" "-K1024" "-K4096" "-K16384" \ - "-m0K1" "-m0K4" "-m0K16" "-m0K64" "-m0K256" "-m0K1024" "-m0K4096" "-m0K16384" \ - "-RK1" "-RK4" "-RK16" "-RK64" "-RK256" "-RK1024" "-RK4096" "-RK16384" \ - "-m0RK1" "-m0RK4" "-m0RK16" "-m0RK64" "-m0RK256" "-m0RK1024" "-m0RK4096" "-m0RK16384") -#args=("-W" " " "-RK4" "-RK16" "-RK64" "-RK256" "-RK2048" "-RK8192") +Kargs=("-K1" "-K4" "-K16" "-K64" "-K256" "-K1024" "-K4096" "-K16384") +Rargs=(" " "-R") +Margs=("-m2" "-m3") -for arg in ${args[*]} ; do - echo "Testpoint: -n$num -s$scatter_size $arg" >> snap_scatter_gather.log +for Karg in ${Kargs[*]} ; do +for Rarg in ${Rargs[*]} ; do +for Marg in ${Margs[*]} ; do + echo "Testpoint: -n$num -s$scatter_size $Karg $Rarg $Marg" >> snap_scatter_gather.log for i in $(seq 1 ${tests}) ; do if [ $verbose -eq 1 ]; then echo -n "Run $i: " fi #eval "echo 3 > /proc/sys/vm/drop_caches" - cmd="snap_scatter_gather -C${snap_card} -n$num -s$scatter_size -t250 $arg \ + cmd="snap_scatter_gather -C${snap_card} -n$num -s$scatter_size -t350 $Karg $Rarg $Marg \ >> snap_scatter_gather.log 2>&1" if [ $verbose -eq 1 ]; then echo -n "$cmd ..." @@ -138,13 +138,17 @@ for arg in ${args[*]} ; do fi done done +done +done +if [ $duration != "SHORT" ]; then cmd="process.awk snap_scatter_gather.log" eval ${cmd} if [ $? -ne 0 ]; then echo "failed" exit 1 fi +fi rm -f *.txt *.out echo "Test OK" exit 0
Fixes some issues in the pubsub zmq admin
@@ -515,8 +515,8 @@ static celix_status_t pubsub_zmqAdmin_connectEndpointToReceiver(pubsub_zmq_admin const char *url = celix_properties_get(endpoint, PUBSUB_ZMQ_URL_KEY, NULL); if (url == NULL) { - const char *admin = celix_properties_get(endpoint, PUBSUB_ENDPOINT_ADMIN_TYPE); - const char *type = celix_properties_get(endpoint, PUBSUB_ENDPOINT_TYPE); + const char *admin = celix_properties_get(endpoint, PUBSUB_ENDPOINT_ADMIN_TYPE, NULL); + const char *type = celix_properties_get(endpoint, PUBSUB_ENDPOINT_TYPE, NULL); L_WARN("[PSA ZMQ] Error got endpoint without a zmq url (admin: %s, type: %s)", admin , type); status = CELIX_BUNDLE_EXCEPTION; } else {
Updated lua.c to fix telnet issue
@@ -335,7 +335,8 @@ static int l_read_stdin (lua_State *L) { return 1; /* return false if pipe empty */ if (b[l-1] != '\n') { /* likewise if not CR terminated, then unread and ditto */ - lua_getfield(L, 1, "unread"); + lua_insert(L, 1); /* insert false return above the pipe */ + lua_getfield(L, 2, "unread"); lua_insert(L, 1); /* insert pipe.unread above the pipe */ lua_call(L, 2, 0); /* pobj:unread(line) */ return 1; /* return false */
Remove reference to legacy aliases for MAC and KDF
@@ -68,14 +68,10 @@ information on what parameters each implementation supports. =item B<-kdf-algorithms> Display a list of key derivation function algorithms. -If a line is of the form C<foo =E<gt> bar> then B<foo> is an alias for the -official algorithm name, B<bar>. =item B<-mac-algorithms> Display a list of message authentication code algorithms. -If a line is of the form C<foo =E<gt> bar> then B<foo> is an alias for the -official algorithm name, B<bar>. =item B<-cipher-commands>
update ya tool yfm 0.24.6
}, "yfm": { "formula": { - "sandbox_id": [407397469, 407398241, 407398546], + "sandbox_id": 440317482, "match": "YFM" },
travis: Add checking duplicated NID name between multiple libs resolve
@@ -49,19 +49,29 @@ def read_def_groups(): return definitions def read_nids(): - nids = dict() + errors = [] + user_nids = dict() + kernel_nids = dict() + nids = None with open(DB_FILE_PATH, 'r') as d: SECTION = None - for line in d.xreadlines(): + for line_no, line in enumerate(d.xreadlines()): line = line.strip() - k, v = line.split(':')[:2] + k, v = line.split(':')[:3] if not v.strip(): SECTION = k continue + if k.strip() == 'kernel': + if v.strip() == 'true': + nids = kernel_nids + else: + nids = user_nids if SECTION != 'functions': continue + if nids.get(k): + errors.append('%s: NID conflict %s' % (line_no + 1, k)) nids[k] = 1 - return nids + return dict(user_nids, **kernel_nids), errors def check_header_groups(definitions): errors = [] @@ -120,8 +130,9 @@ def check_function_nids(nids): return errors if __name__ == '__main__': - errors = check_header_groups(read_def_groups()) \ - + check_function_nids(read_nids()) + nids, errors = read_nids() + errors += check_header_groups(read_def_groups()) \ + + check_function_nids(nids) if len(errors): for e in errors: print e
Don't negotiate TLSv1.3 with the ossl_shim The ossl_shim doesn't know about TLSv1.3 so we should disable that protocol version for all tests for now. This fixes the current Travis failures. [extended tests]
@@ -533,6 +533,12 @@ static bssl::UniquePtr<SSL_CTX> SetupCtx(const TestConfig *config) { !SSL_CTX_set_max_proto_version(ssl_ctx.get(), TLS1_3_VERSION)) { return nullptr; } +#else + /* Ensure we don't negotiate TLSv1.3 until we can handle it */ + if (!config->is_dtls && + !SSL_CTX_set_max_proto_version(ssl_ctx.get(), TLS1_2_VERSION)) { + return nullptr; + } #endif std::string cipher_list = "ALL";
Fix DDF Javascript numeric value convesion for Attr.val For some data types a string instead of numeric was returned. If the expression was evaluated weird results appeared, e.g: "5" + 1 = "51" This fixes DDF state/humidity values.
@@ -154,16 +154,24 @@ QVariant JsZclAttribute::value() const case deCONZ::Zcl56BitBitMap: case deCONZ::Zcl56BitData: case deCONZ::Zcl56BitUint: + return QVariant::fromValue(quint64(attr->numericValue().u64)); + case deCONZ::Zcl64BitBitMap: case deCONZ::Zcl64BitUint: case deCONZ::Zcl64BitData: case deCONZ::ZclIeeeAddress: - return QString::number(attr->numericValue().u64); + return QString::number(quint64(attr->numericValue().u64)); + case deCONZ::Zcl8BitInt: + case deCONZ::Zcl16BitInt: + case deCONZ::Zcl24BitInt: + case deCONZ::Zcl32BitInt: case deCONZ::Zcl48BitInt: case deCONZ::Zcl56BitInt: + return QVariant::fromValue(qint64(attr->numericValue().s64)); + case deCONZ::Zcl64BitInt: - return QString::number(attr->numericValue().s64); + return QString::number(qint64(attr->numericValue().u64)); case deCONZ::ZclSingleFloat: return attr->numericValue().real;
Use local max_idle_timeout if remote one is 0
@@ -9265,6 +9265,7 @@ ngtcp2_tstamp ngtcp2_conn_get_idle_expiry(ngtcp2_conn *conn) { completion. */ if (!(conn->flags & NGTCP2_CONN_FLAG_HANDSHAKE_COMPLETED) || + conn->remote.transport_params.max_idle_timeout == 0 || (conn->local.settings.transport_params.max_idle_timeout && conn->local.settings.transport_params.max_idle_timeout < conn->remote.transport_params.max_idle_timeout)) {
admin/docs: bump to v2.0
%include %{_sourcedir}/OHPC_macros Name: docs%{PROJ_DELIM} -Version: 1.3.9 +Version: 2.0.0 Release: 1 Summary: OpenHPC documentation License: BSD-3-Clause
Make dotnet module optional
@@ -3,7 +3,6 @@ MODULES = modules/tests.c MODULES += modules/pe.c MODULES += modules/elf.c MODULES += modules/math.c -MODULES += modules/dotnet.c if CUCKOO_MODULE MODULES += modules/cuckoo.c @@ -17,6 +16,10 @@ if HASH_MODULE MODULES += modules/hash.c endif +if DOTNET_MODULE +MODULES += modules/dotnet.c +endif + # This isn't really a module, but needs to be compiled with them. MODULES += modules/pe_utils.c
Correct debug output for padding options.
@@ -358,9 +358,14 @@ dio_input(void) goto discard; } - LOG_DBG("Incoming DIO (option, length) = (%u, %u)\n", subopt_type, len - 2); + LOG_DBG("Incoming DIO (option, length) = (%u, %u)\n", + subopt_type, len); switch(subopt_type) { + case RPL_OPTION_PAD1: + case RPL_OPTION_PADN: + LOG_DBG("PAD %u bytes\n", len); + break; case RPL_OPTION_DAG_METRIC_CONTAINER: if(len < 6) { LOG_WARN("Invalid DAG MC, len = %d\n", len);
add fallback when accelerate fails, closes
#include "scs_blas.h" #include "util.h" -/* This file uses acceleration to improve the convergence of the - * ADMM iteration z^+ = \phi(z). At each iteration we need to solve a (small) - * linear system, we do this using LAPACK, first forming the normal equations - * and using ?posv (fastest, but bad numerical stability), if that fails we - * switch to using ?gels, which uses a QR factorization (slower, but better - * numerically). If this fails then we just don't do any acceleration this - * iteration, however we could fall back further to ?gelsy or other more - * robust methods if we wanted to. +/* This file uses acceleration to improve the convergence of the ADMM iteration + * z^+ = \phi(z). At each iteration we need to solve a (small) linear system, we + * do this using LAPACK ?gesv. If this fails then we just don't do any + * acceleration this iteration, however we could fall back further to ?gelsy or + * other more robust methods if we wanted to. */ struct SCS_ACCEL_WORK { @@ -204,11 +201,16 @@ scs_int accelerate(ScsWork *w, scs_int iter) { } /* solve linear system, new point stored in sol */ info = solve_with_gesv(w->accel, MIN(iter, k)); + /* check that info == 0 and fallback otherwise */ + if (info != 0) { + scs_printf("Call to accelerate failed with code %i, falling back to using " + "no acceleration\n", info); + RETURN 0; + } /* set [u;v] = sol */ memcpy(w->u, w->accel->sol, sizeof(scs_float) * l); memcpy(w->v, &(w->accel->sol[l]), sizeof(scs_float) * l); w->accel->total_accel_time += tocq(&accel_timer); - /* add check that info == 0 and fallback */ RETURN info; }