message
stringlengths
6
474
diff
stringlengths
8
5.22k
bit_writer_utils,Flush: quiet implicit conversion warnings no change in object code from clang-7 -fsanitize=implicit-integer-truncation implicit conversion from type 'int32_t' (aka 'int') of value 287 (32-bit, signed) to type 'uint8_t' (aka 'unsigned char') changed the value to 31 (8-bit, unsigned)
@@ -70,7 +70,7 @@ static void Flush(VP8BitWriter* const bw) { const int value = (bits & 0x100) ? 0x00 : 0xff; for (; bw->run_ > 0; --bw->run_) bw->buf_[pos++] = value; } - bw->buf_[pos++] = bits; + bw->buf_[pos++] = bits & 0xff; bw->pos_ = pos; } else { bw->run_++; // delay writing of bytes 0xff, pending eventual carry.
tree data BUGFIX minor improvements
@@ -264,7 +264,10 @@ lyd_is_default(const struct lyd_node *node) const struct lyd_node_term *term; LY_ARRAY_COUNT_TYPE u; - assert(node->schema->nodetype & LYD_NODE_TERM); + if (!(node->schema->nodetype & LYD_NODE_TERM)) { + return 0; + } + term = (const struct lyd_node_term *)node; if (node->schema->nodetype == LYS_LEAF) { @@ -274,8 +277,8 @@ lyd_is_default(const struct lyd_node *node) } /* compare with the default value */ - if (leaf->type->plugin->compare(&term->value, leaf->dflt)) { - return 0; + if (!leaf->type->plugin->compare(&term->value, leaf->dflt)) { + return 1; } } else { llist = (const struct lysc_node_leaflist *)node->schema; @@ -285,13 +288,13 @@ lyd_is_default(const struct lyd_node *node) LY_ARRAY_FOR(llist->dflts, u) { /* compare with each possible default value */ - if (llist->type->plugin->compare(&term->value, llist->dflts[u])) { - return 0; + if (!llist->type->plugin->compare(&term->value, llist->dflts[u])) { + return 1; } } } - return 1; + return 0; } static LYD_FORMAT
Add some performance notes about early data In particular add information about the effect of Nagle's algorithm on early data. Fixes
@@ -168,6 +168,30 @@ In the event that the current maximum early data setting for the server is different to that originally specified in a session that a client is resuming with then the lower of the two values will apply. +=head1 NOTES + +The whole purpose of early data is to enable a client to start sending data to +the server before a full round trip of network traffic has occurred. Application +developers should ensure they consider optimisation of the underlying TCP socket +to obtain a performant solution. For example Nagle's algorithm is commonly used +by operating systems in an attempt to avoid lots of small TCP packets. In many +scenarios this is beneficial for performance, but it does not work well with the +early data solution as implemented in OpenSSL. In Nagle's algorithm the OS will +buffer outgoing TCP data if a TCP packet has already been sent which we have not +yet received an ACK for from the peer. The buffered data will only be +transmitted if enough data to fill an entire TCP packet is accumulated, or if +the ACK is received from the peer. The initial ClientHello will be sent as the +first TCP packet, causing the early application data from calls to +SSL_write_early_data() to be buffered by the OS and not sent until an ACK is +received for the ClientHello packet. This means the early data is not actually +sent until a complete round trip with the server has occurred which defeats the +objective of early data. + +In many operating systems the TCP_NODELAY socket option is available to disable +Nagle's algorithm. If an application opts to disable Nagle's algorithm +consideration should be given to turning it back on again after the handshake is +complete if appropriate. + =head1 RETURN VALUES SSL_write_early_data() returns 1 for success or 0 for failure. In the event of a
Specify python version in Makefile
@@ -310,10 +310,10 @@ SDK_SOURCE_PATH += lib_blewbxx lib_blewbxx_impl endif load: all - python -m ledgerblue.loadApp $(APP_LOAD_PARAMS) + python3 -m ledgerblue.loadApp $(APP_LOAD_PARAMS) delete: - python -m ledgerblue.deleteApp $(COMMON_DELETE_PARAMS) + python3 -m ledgerblue.deleteApp $(COMMON_DELETE_PARAMS) # import generic rules from the sdk include $(BOLOS_SDK)/Makefile.rules
website: bump npm package versions
}, "devDependencies": { "@iamadamjowett/angular-logger-max": "^1.2.3", - "angular": "^1.5.8", - "angular-animate": "^1.5.8", - "angular-breadcrumb": "^0.4.1", - "angular-clipboard": "^1.5.0", - "angular-file-saver": "^1.1.2", + "angular": "^1.8.2", + "angular-animate": "^1.8.2", + "angular-breadcrumb": "^0.5.0", + "angular-clipboard": "^1.7.0", + "angular-file-saver": "^1.1.3", "angular-marked": "^1.2.2", - "angular-messages": "^1.5.8", - "angular-sanitize": "^1.5.8", + "angular-messages": "^1.8.2", + "angular-sanitize": "^1.8.2", "angular-slugify": "^1.0.3", - "angular-translate": "^2.12.1", - "angular-translate-loader-static-files": "^2.12.1", + "angular-translate": "^2.18.4", + "angular-translate-loader-static-files": "^2.18.4", "angular-typewriter": "0.0.15", - "angular-ui-bootstrap": "^2.2.0", + "angular-ui-bootstrap": "^2.5.6", "angular-ui-notification": "^0.3.6", - "angular-ui-router": "^0.4.2", + "@uirouter/angularjs": "^0.4.2", "bootstrap": "^3.3.7", "connect-modrewrite": "^0.10.2", "docsearch.js": "latest", "fs-extra": "^0.30.0", - "grunt": "^1.1.0", - "grunt-browserify": "^5.0.0", + "grunt": "^1.3.0", + "grunt-browserify": "^6.0.0", "grunt-cli": "latest", "grunt-contrib-concat": "^1.0.1", - "grunt-contrib-connect": "^1.0.2", + "grunt-contrib-connect": "^3.0.0", "grunt-contrib-copy": "^1.0.0", "grunt-contrib-cssmin": "latest", "grunt-contrib-jshint": "latest", "grunt-contrib-uglify": "latest", "grunt-contrib-watch": "latest", "grunt-preprocess": "latest", - "highlight.js": "^10.4.1", - "jquery": "^3.3.1", + "highlight.js": "^10.7.1", + "jquery": "^3.6.0", "jshint-stylish": "latest", - "ng-tags-input": "^3.1.1", + "ng-tags-input": "^3.2.0", "pace-progress": "^1.0.2", - "rss": "^1.2.1", + "rss": "^1.2.2", "satellizer": "^0.15.5", - "slugify": "^1.0.2", - "sync-request": "^3.0.1", - "underscore": "^1.8.3", - "xmlbuilder": "^8.2.2" + "slugify": "^1.5.0", + "sync-request": "^6.1.0", + "underscore": "^1.12.1", + "xmlbuilder": "^15.1.1" }, "scripts": { "postinstall": "./node_modules/grunt-cli/bin/grunt install"
net/lora Change public/private network default setting to private.
@@ -92,4 +92,4 @@ syscfg.defs: description: > Sets public or private lora network. A value of 1 means the network is public; private otherwise - value: 1 + value: 0
Recursively look for property fields
@@ -482,7 +482,7 @@ const isPropertyField = (cmd, fieldName, args) => { const events = require("../events").default; const event = events[cmd]; if (!event) return false; - const field = event.fields.find((f) => f.key === fieldName); + const field = getField(cmd, fieldName, args); const fieldValue = args[fieldName]; return ( field &&
Fixes a clang error
@@ -100,7 +100,7 @@ int etcd_get(const char* key, char** value, int* modifiedIndex) { reply.header = NULL; /* will be grown as needed by the realloc above */ reply.headerSize = 0; /* no data at this point */ - int retVal; + int retVal = ETCDLIB_RC_ERROR; char *url; asprintf(&url, "http://%s:%d/v2/keys/%s", etcd_server, etcd_port, key); res = performRequest(url, GET, NULL, (void *) &reply);
extmod/btstack/btstack_config.h: Fix stm32 build error due to macro redefine.
#define HAVE_EMBEDDED_TIME_MS // Some USB dongles take longer to respond to HCI reset (e.g. BCM20702A). -#define HCI_RESET_RESEND_TIMEOUT_MS 1000 +//#define HCI_RESET_RESEND_TIMEOUT_MS 1000 #endif // MICROPY_INCLUDED_EXTMOD_BTSTACK_BTSTACK_CONFIG_H
update set-prerelease-version script
#!/bin/bash +DEVSTRING="pr" VERSION_FILE=VERSION +while [[ $# -gt 0 ]]; do + case $1 in + --devstring) + DEVSTRING="$2" + shift # past argument + shift # past value + ;; + --version_file) + VERSION_FILE="$2" + shift # past argument + shift # past value + ;; + -*|--*) + echo "Unknown option $1" + exit 1 + ;; + esac +done -[ "$CI" == "true" ] && { +[ "x${CI}" == "xtrue" ] && { git config --global --add safe.directory "$PWD" } @@ -16,44 +35,39 @@ for R in $REMOTES; do | sed -n '/HEAD branch/s/.*: //p') MASTER_BRANCH="refs/remotes/${R}/${MASTER}" #echo "Master-branch: ${MASTER_BRANCH}" - [ "$R" == "origin" ] && break + [ "x${R}" == "xorigin" ] && break done PREREL=$(git rev-list --count HEAD ^"$MASTER_BRANCH") -# if we use a version file, things are easy: -[ -e $VERSION_FILE ] && { +# use version file: VERSION=$(cat $VERSION_FILE) - PR_VERSION="${VERSION}.dev${PREREL}" +PR_VERSION="${VERSION}-${DEVSTRING}${PREREL}" echo "$PR_VERSION" > $VERSION_FILE echo "$PR_VERSION" -} + +TILDE_VERSION="$(echo $PR_VERSION | sed 's/-/~/g')" # if we store the version in debian changelog: [ -e debian/changelog ] && { # get the latest version - CHANGELOG_VERSION=$(cat debian/changelog \ + DEBIAN_VERSION=$(cat debian/changelog \ | grep "(.*) " \ | head -n 1 \ | cut -d\( -f 2 \ | cut -d\) -f 1) - DEBIAN_VERSION=$(echo "$CHANGELOG_VERSION" | cut -d- -f 1) - DEBIAN_RELEASE=$(echo "$CHANGELOG_VERSION" | cut -d- -f 2) - PR_VERSION="${DEBIAN_VERSION}-pr${PREREL}-${DEBIAN_RELEASE}" - sed "s/${CHANGELOG_VERSION}/${PR_VERSION}/" -i debian/changelog - echo "$PR_VERSION" + NEW_DEB_VERSION="${TILDE_VERSION}-1" + sed s%${DEBIAN_VERSION}%${NEW_DEB_VERSION}% -i debian/changelog } # lets see if RPM also needs a version to be set SPEC_FILES=$(ls rpm/*spec) -[ -z "$SPEC_FILES" ] || { - [ -z "$VERSION" ] || { - PR_VERSION="${VERSION}~pr${PREREL}" +[ -z "${SPEC_FILES}" ] || { + [ -z "${VERSION}" ] || { for SPEC_FILE in $SPEC_FILES; do grep -q "$VERSION" "$SPEC_FILE" && { # version found, needs update - sed "s/${VERSION}/${PR_VERSION}/" -i "$SPEC_FILE" + sed "s/${VERSION}/${TILDE_VERSION}/" -i "$SPEC_FILE" } done - echo "$PR_VERSION" } }
use opt* in janet_sync
@@ -860,13 +860,10 @@ static Janet janet_music(int32_t argc, Janet* argv) static Janet janet_sync(int32_t argc, Janet* argv) { janet_arity(argc, 0, 3); - u32 mask = 0; - s32 bank = 0; - bool toCart = false; - if (argc >= 1) mask = janet_getinteger(argv, 0); - if (argc >= 2) bank = janet_getinteger(argv, 1); - if (argc >= 3) toCart = janet_getboolean(argv, 2); + u32 mask = janet_optinteger(argv, argc, 0, 0); + s32 bank = janet_optinteger(argv, argc, 1, 0); + bool toCart = janet_optboolean(argv, argc, 2, false); tic_mem* memory = (tic_mem*)getJanetMachine(); tic_api_sync(memory, mask, bank, toCart);
fix a 2nd case if species names have null entries
@@ -4046,7 +4046,7 @@ avtSiloFileFormat::ReadSpecies(DBfile *dbfile, { oss.str(""); oss << (k+1); - if(spec->specnames != NULL) + if(spec->specnames && spec->specnames[spec_name_idx]) { // //add spec name if it exists @@ -4054,8 +4054,8 @@ avtSiloFileFormat::ReadSpecies(DBfile *dbfile, oss << " (" << string(spec->specnames[spec_name_idx]) << ")"; - spec_name_idx++; } + spec_name_idx++; tmp_string_vector.push_back(oss.str()); } speciesNames.push_back(tmp_string_vector);
more big endian fixes (pineapple) in pcapng options
@@ -3992,6 +3992,10 @@ while(1) { return; } + #ifdef BIG_ENDIAN_HOST + opthdr.option_code = byte_swap_16(opthdr.option_code); + opthdr.option_length = byte_swap_16(opthdr.option_length); + #endif if(endianess == 1) { opthdr.option_code = byte_swap_16(opthdr.option_code); @@ -4075,8 +4079,22 @@ while(1) { return; } - myaktreplaycount = filereplaycound[0x00] & 0xff; - myaktreplaycount += (filereplaycound[0x01] & 0xff) << 8; + myaktreplaycount = 0; + myaktreplaycount = (myaktreplaycount << 8) + (filereplaycound[0x07] & 0xff); + myaktreplaycount = (myaktreplaycount << 8) + (filereplaycound[0x06] & 0xff); + myaktreplaycount = (myaktreplaycount << 8) + (filereplaycound[0x05] & 0xff); + myaktreplaycount = (myaktreplaycount << 8) + (filereplaycound[0x04] & 0xff); + myaktreplaycount = (myaktreplaycount << 8) + (filereplaycound[0x03] & 0xff); + myaktreplaycount = (myaktreplaycount << 8) + (filereplaycound[0x02] & 0xff); + myaktreplaycount = (myaktreplaycount << 8) + (filereplaycound[0x01] & 0xff); + myaktreplaycount = (myaktreplaycount << 8) + (filereplaycound[0x00] & 0xff); + #ifdef BIG_ENDIAN_HOST + myaktreplaycount = byte_swap_64(myaktreplaycount); + #endif + if(endianess == 1) + { + myaktreplaycount = byte_swap_64(myaktreplaycount); + } } else if(opthdr.option_code == OPTIONCODE_ANONCE) {
Separate make clean and make lib in check_names
@@ -569,8 +569,15 @@ class CodeParser(): ) my_environment = os.environ.copy() my_environment["CFLAGS"] = "-fno-asynchronous-unwind-tables" + # Run make clean separately to lib to prevent unwanted behavior when + # make is invoked with parallelism. subprocess.run( - ["make", "clean", "lib"], + ["make", "clean"], + universal_newlines=True, + check=True + ) + subprocess.run( + ["make", "lib"], env=my_environment, universal_newlines=True, stdout=subprocess.PIPE,
noise.c: Check length of packets beforehand. Simplify the code by checking for incoming packet lengths to be valid beforehand instead of performing the checks within the code processing the messages' content.
@@ -215,6 +215,23 @@ void noise_rand_bytes(void* bytes, size_t size) random_32_bytes_mcu(bytes); } +/** + * Checks that a packet's length is valid for the message type. + * + * @param[in] in_packet Packet to process. + * @return Whether the packet's length is valid. + */ +static bool _check_message_length(const Packet* in_packet) +{ + switch (in_packet->data_addr[0]) { + case OP_I_CAN_HAS_HANDSHAKE: + case OP_I_CAN_HAS_PAIRIN_VERIFICASHUN: + return in_packet->len == 1; + default: + return in_packet->len >= 1; + } +} + // processes client messages. The first two messages are handshake messages // (see XX in https://noiseprotocol.org/noise.html#interactive-handshake-patterns-fundamental). // After, all incoming messages are decrypted and outgoing messages encrypted. @@ -226,8 +243,13 @@ bool bb_noise_process_msg( const size_t max_out_len, bb_noise_process_msg_callback process_msg) { + if (!_check_message_length(in_packet)) { + out_packet->len = 1; + out_packet->data_addr[0] = OP_STATUS_FAILURE; + return false; + } // If this is a handshake init message, start the handshake. - if (in_packet->len == 1 && in_packet->data_addr[0] == OP_I_CAN_HAS_HANDSHAKE) { + if (in_packet->data_addr[0] == OP_I_CAN_HAS_HANDSHAKE) { if (!_setup_and_init_handshake()) { return false; } @@ -249,7 +271,7 @@ bool bb_noise_process_msg( } { // After the handshake we can perform the out of band pairing verification, if required by the // device or requested by the host app. - if (in_packet->len == 1 && in_packet->data_addr[0] == OP_I_CAN_HAS_PAIRIN_VERIFICASHUN) { + if (in_packet->data_addr[0] == OP_I_CAN_HAS_PAIRIN_VERIFICASHUN) { #if PLATFORM_BITBOX02 == 1 bool result = workflow_pairing_create(_handshake_hash); #elif PLATFORM_BITBOXBASE == 1 @@ -283,7 +305,7 @@ bool bb_noise_process_msg( return true; } } - if (in_packet->len >= 1 && in_packet->data_addr[0] == OP_NOISE_MSG) { + if (in_packet->data_addr[0] == OP_NOISE_MSG) { // Otherwise decrypt, process, encrypt. NoiseBuffer noise_buffer; #pragma GCC diagnostic push
apps/examples/elf: Fix Build error This patch fix the build error due to previous distclean failure.
@@ -63,7 +63,7 @@ $(1)_$(2): endef all: build install -.PHONY: all build clean install +.PHONY: all build clean install distclean $(foreach DIR, $(BUILD_SUBDIRS), $(eval $(call DIR_template,$(DIR),build, all))) $(foreach DIR, $(BUILD_SUBDIRS), $(eval $(call DIR_template,$(DIR),clean,clean))) @@ -87,6 +87,10 @@ depend: clean: $(foreach DIR, $(BUILD_SUBDIRS), $(DIR)_clean) $(Q) rm -rf $(SYSTEM_BIN_DIR) +distclean: clean + $(call DELFILE, Make.dep) + $(call DELFILE, .depend) + -include Make.dep .PHONY: preconfig preconfig:
Fixed memory leaks in xfpga error tests.
@@ -76,6 +76,12 @@ class error_c_mock_p } virtual void TearDown() override { + if (fake_fme_token_.errors) { + free_error_list(fake_fme_token_.errors); + } + if (fake_port_token_.errors) { + free_error_list(fake_port_token_.errors); + } if (filter_) { EXPECT_EQ(fpgaDestroyProperties(&filter_), FPGA_OK); filter_ = nullptr; @@ -84,6 +90,14 @@ class error_c_mock_p system_->finalize(); } + void free_error_list(struct error_list *p) { + while (p) { + struct error_list *q = p->next; + free(p); + p = q; + } + } + fpga_properties filter_; std::string tmpsysfs_; test_platform platform_; @@ -431,6 +445,8 @@ TEST_P(error_c_mock_p, error_06) { } } + free_error_list(fake_fme_token_.errors); + // set error list to null fake_fme_token_.errors = nullptr; EXPECT_EQ(FPGA_NOT_FOUND, xfpga_fpgaClearError(t, 0));
Add script access to binding overriding.
@@ -28,6 +28,7 @@ int mapstrings_binding(ScriptVariant **varlist, int paramCount) "direction", "matching", "offset", + "overriding", "positioning", "sort_id", "tag", @@ -121,6 +122,13 @@ HRESULT openbor_get_binding_property(ScriptVariant **varlist , ScriptVariant **p break; + case _BINDING_OVERRIDING: + + ScriptVariant_ChangeType(*pretvar, VT_INTEGER); + (*pretvar)->lVal = (LONG)handle->overriding; + + break; + case _BINDING_POSITIONING: ScriptVariant_ChangeType(*pretvar, VT_PTR); @@ -247,6 +255,15 @@ HRESULT openbor_set_binding_property(ScriptVariant **varlist, ScriptVariant **pr break; + case _BINDING_OVERRIDING: + + if (SUCCEEDED(ScriptVariant_IntegerValue(varlist[ARG_VALUE], &temp_int))) + { + handle->overriding = temp_int; + } + + break; + case _BINDING_POSITIONING: // Read only.
Automation change only: move to CMake 3.24 in Dockerfile. The up-coming move to NRFConnect 2/Zephyr 3 requires a later version of CMake than comes on Ubuntu. CMake 3.24 is now added specifically in the [Ubuntu] Dockerfile which builds the Docker image we use in the test automation.
@@ -89,8 +89,19 @@ RUN apt-get update && apt-get install -y wget unzip tar && # Cleanup apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -WORKDIR /workdir +# Install a later version of CMake (required by Zephyr 3) +RUN \ + if [ "$(arch)" != "aarch64" ]; then \ + wget -q -P /tmp https://cmake.org/files/v3.24/cmake-3.24.1-linux-x86_64.tar.gz && \ + tar --strip-components=1 -xf /tmp/cmake-3.24.1-linux-x86_64.tar.gz -C /usr/local && \ + rm -rf /tmp/*; \ + else \ + wget -q -P /tmp https://cmake.org/files/v3.24/cmake-3.24.1-linux-aarch64.tar.gz && \ + tar --strip-components=1 -xf /tmp/cmake-3.24.1-linux-aarch64.tar.gz -C /usr/local && \ + rm -rf /tmp/*; \ + fi +WORKDIR /workdir #*************************************************** # Add missing Python modules and apk packages here
linux/trace: save backtrace for the report
@@ -1023,6 +1023,7 @@ static void arch_traceExitSaveData(run_t* run, pid_t pid) { } } + uintptr_t savedBacktrace = run->backtrace; int fd = TEMP_FAILURE_RETRY(open(run->crashFileName, O_WRONLY | O_EXCL | O_CREAT, 0600)); if (fd == -1 && errno == EEXIST) { LOG_I("It seems that '%s' already exists, skipping", run->crashFileName); @@ -1061,7 +1062,7 @@ static void arch_traceExitSaveData(run_t* run, pid_t pid) { util_ssnprintf(run->report, sizeof(run->report), "DESCRIPTION: %s\n", description); if (funcCnt > 0) { util_ssnprintf( - run->report, sizeof(run->report), "STACK HASH: %016" PRIx64 "\n", run->backtrace); + run->report, sizeof(run->report), "STACK HASH: %016" PRIx64 "\n", savedBacktrace); util_ssnprintf(run->report, sizeof(run->report), "STACK:\n"); for (int i = 0; i < funcCnt; i++) { util_ssnprintf(run->report, sizeof(run->report), " <" REG_PD REG_PM "> ",
build: allow defined but empty FLB_NIGHTLY_BUILD
@@ -210,7 +210,7 @@ option(FLB_FILTER_RECORD_MODIFIER "Enable record_modifier filter" Yes) option(FLB_FILTER_TENSORFLOW "Enable tensorflow filter" No) option(FLB_FILTER_GEOIP2 "Enable geoip2 filter" Yes) -if(DEFINED FLB_NIGHTLY_BUILD) +if(DEFINED FLB_NIGHTLY_BUILD AND NOT "${FLB_NIGHTLY_BUILD}" STREQUAL "") FLB_DEFINITION_VAL(FLB_NIGHTLY_BUILD ${FLB_NIGHTLY_BUILD}) endif()
ensure unique build names for C++ azure pipeline jobs
@@ -64,7 +64,7 @@ jobs: Debug++: CC: gcc CXX: g++ - BuildType: debug + BuildType: debug-cxx cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON Debug Clang: CC: clang @@ -84,7 +84,7 @@ jobs: Debug++ Clang: CC: clang CXX: clang++ - BuildType: debug-clang + BuildType: debug-clang-cxx cmakeExtraArgs: -DCMAKE_BUILD_TYPE=Debug -DMI_DEBUG_FULL=ON -DMI_USE_CXX=ON steps: - task: CMake@1
dprint: name core containing types
:: A library for printing doccords =/ debug | => - |% + |% %dprint-types :> an overview of all named things in the type. :> :> each element in the overview list is either a documentation for a sublist
Add workspace property to files.
context.compile(fcfg) fcfg.project = prj + fcfg.workspace = prj.workspace fcfg.configs = {} fcfg.abspath = fname function fileconfig.addconfig(fcfg, cfg) local prj = cfg.project + local wks = cfg.workspace -- Create a new context object for this configuration-file pairing. -- The context has the ability to pull out configuration settings fsub.vpath = fcfg.vpath fsub.config = cfg fsub.project = prj + fsub.workspace = wks -- Set the context's base directory to the project's file system -- location. Any path tokens which are expanded in non-path fields
add commit to empty brackets
@@ -55,6 +55,7 @@ int od_target_module_add(od_logger_t *logger, od_module_t *modules, module_exists: if (logger == NULL) { + /* most probably its logger is not ready yet */ } else { od_log(logger, "od_load_module", NULL, NULL, "od_load_module: skip load module %s: was already loaded!", @@ -66,8 +67,7 @@ error_close_handle: od_dlclose(handle); error: err = od_dlerror(); - if (logger == NULL) { - } else { + if (logger) { od_log(logger, "od_load_module", NULL, NULL, "od_load_module: failed to load module %s", err); }
Add unit test for L2REQ_IINVALIDATE
@@ -557,7 +557,40 @@ module test_l2_cache(input clk, input reset); end end + ////////////////////////////////////////////////////////////// + // Send an L2REQ_IINVALIDATE. This is just a pass through + // that gets broadcasted to all cores. + ////////////////////////////////////////////////////////////// 30: + begin + assert(!l2_response_valid); + l2i_request_valid <= 1; + l2i_request[0].id <= 1; + l2i_request[0].packet_type = L2REQ_IINVALIDATE; + l2i_request[0].cache_type = CT_ICACHE; + l2i_request[0].address = ADDR0; + state <= state + 1; + end + + // Check response + 31: + begin + assert(!axi_bus.m_arvalid); + assert(!axi_bus.m_awvalid); + assert(!axi_bus.m_wvalid); + + if (l2_response_valid) + begin + assert(l2_response.core == 0); + assert(l2_response.id == 1); + assert(l2_response.packet_type == L2RSP_IINVALIDATE_ACK); + assert(l2_response.cache_type == CT_ICACHE); + // XXX the address isn't set. + state <= state + 1; + end + end + + 32: begin $display("PASS"); $finish;
Added additional information about the exsiting metrics exporter for Prometheus monitoring system based on libipmctl to the README.md file
@@ -19,6 +19,8 @@ ipmctl refers to the following interface components: * libipmctl: An Application Programming Interface (API) library for managing PMems. * ipmctl: A Command Line Interface (CLI) application for configuring and managing PMems from the command line. +Also, metrics exporter for [Prometheus](https://prometheus.io/docs/introduction/overview/) based on libipmctl was provided. For more details take a look [here](https://github.com/intel/ipmctl-exporter) + ## Workarounds ### Slow Firmware Updates
pkg/gadgets: Use CO-RE tracer and switch back to bcc for biolatency.
@@ -17,9 +17,12 @@ package biolatency import ( "fmt" + log "github.com/sirupsen/logrus" + gadgetv1alpha1 "github.com/kinvolk/inspektor-gadget/pkg/apis/gadget/v1alpha1" "github.com/kinvolk/inspektor-gadget/pkg/gadgets" "github.com/kinvolk/inspektor-gadget/pkg/gadgets/biolatency/tracer" + coretracer "github.com/kinvolk/inspektor-gadget/pkg/gadgets/biolatency/tracer/core" standardtracer "github.com/kinvolk/inspektor-gadget/pkg/gadgets/biolatency/tracer/standard" ) @@ -92,11 +95,20 @@ func (t *Trace) Start(trace *gadgetv1alpha1.Trace) { } var err error + t.tracer, err = coretracer.NewTracer(trace.Spec.Node) + if err != nil { + trace.Status.OperationWarning = fmt.Sprint("failed to create core tracer. Falling back to standard one") + + // fallback to standard tracer + log.Infof("Gadget %s: falling back to standard tracer. CO-RE tracer failed: %s", + trace.Spec.Gadget, err) + t.tracer, err = standardtracer.NewTracer(trace.Spec.Node) if err != nil { - trace.Status.OperationError = fmt.Sprintf("Failed to start: %s", err) + trace.Status.OperationError = fmt.Sprintf("failed to create tracer: %s", err) return } + } t.started = true trace.Status.Output = ""
rename pk_psa_rsa_sign_ext param
@@ -197,7 +197,7 @@ static int rsa_verify_wrap( void *ctx, mbedtls_md_type_t md_alg, } #if defined(MBEDTLS_PSA_CRYPTO_C) -int mbedtls_pk_psa_rsa_sign_ext( psa_algorithm_t psa_alg_md, void *pk_ctx, +int mbedtls_pk_psa_rsa_sign_ext( psa_algorithm_t alg, void *pk_ctx, const unsigned char *hash, size_t hash_len, unsigned char *sig, size_t sig_size, size_t *sig_len ) @@ -224,7 +224,7 @@ int mbedtls_pk_psa_rsa_sign_ext( psa_algorithm_t psa_alg_md, void *pk_ctx, if( key_len <= 0 ) return( MBEDTLS_ERR_PK_BAD_INPUT_DATA ); psa_set_key_usage_flags( &attributes, PSA_KEY_USAGE_SIGN_HASH ); - psa_set_key_algorithm( &attributes, psa_alg_md ); + psa_set_key_algorithm( &attributes, alg ); psa_set_key_type( &attributes, PSA_KEY_TYPE_RSA_KEY_PAIR ); status = psa_import_key( &attributes, @@ -235,7 +235,7 @@ int mbedtls_pk_psa_rsa_sign_ext( psa_algorithm_t psa_alg_md, void *pk_ctx, ret = mbedtls_pk_error_from_psa( status ); goto cleanup; } - status = psa_sign_hash( key_id, psa_alg_md, hash, hash_len, + status = psa_sign_hash( key_id, alg, hash, hash_len, sig, sig_size, sig_len ); if( status != PSA_SUCCESS ) {
check heads first in sing and friends
@@ -606,8 +606,8 @@ _sang_x(u3_noun a, u3_noun b) _eq_no; } else { - _eq_push(a_u->hed, b_u->hed); _eq_push(a_u->tel, b_u->tel); + _eq_push(a_u->hed, b_u->hed); fam->returning = 1; } } @@ -775,8 +775,8 @@ _sung_x(u3_noun a, u3_noun b) _eq_no; } else { - _eq_push(a_u->hed, b_u->hed); _eq_push(a_u->tel, b_u->tel); + _eq_push(a_u->hed, b_u->hed); fam->returning = 1; } } @@ -864,8 +864,8 @@ _sing_x(u3_noun a, _eq_no; } else { - _eq_push(a_u->hed, b_u->hed); _eq_push(a_u->tel, b_u->tel); + _eq_push(a_u->hed, b_u->hed); fam->returning = 1; } }
Run $(SENSNIFF) directly
@@ -16,7 +16,6 @@ MAKE_NET=MAKE_NET_NULLNET # use a custom MAC driver: sensniff_mac_driver MAKE_MAC = MAKE_MAC_OTHER -PYTHON ?= python SENSNIFF = $(CONTIKI)/tools/sensniff/sensniff.py ifeq ($(BAUDRATE),) @@ -35,5 +34,5 @@ sniff: ifeq ($(wildcard $(SENSNIFF)), ) $(error Could not find the sensniff script. Did you run 'git submodule update --init' ?") else - $(PYTHON) $(SENSNIFF) $(SENSNIFF_FLAGS) + $(SENSNIFF) $(SENSNIFF_FLAGS) endif
Fix gamecore distribution
@@ -68,7 +68,7 @@ foreach ($Build in $AllBuilds) { $Headers = @(Join-Path $HeaderDir "msquic.h") - if ($Platform -eq "windows" -or $Platform -eq "uwp") { + if ($Platform -eq "windows" -or $Platform -eq "uwp" -or $Platform -eq "gamecore_console") { $Headers += Join-Path $HeaderDir "msquic_winuser.h" } else { $Headers += Join-Path $HeaderDir "msquic_posix.h" @@ -79,7 +79,7 @@ foreach ($Build in $AllBuilds) { $Binaries = @() - if ($Platform -eq "windows" -or $Platform -eq "uwp") { + if ($Platform -eq "windows" -or $Platform -eq "uwp" -or $Platform -eq "gamecore_console") { $Binaries += Join-Path $ArtifactsDir "msquic.dll" $Binaries += Join-Path $ArtifactsDir "msquic.pdb" } elseif ($Platform -eq "linux") { @@ -95,7 +95,7 @@ foreach ($Build in $AllBuilds) { $Libraries = @() - if ($Platform -eq "windows" -or $Platform -eq "uwp") { + if ($Platform -eq "windows" -or $Platform -eq "uwp" -or $Platform -eq "gamecore_console") { $Libraries += Join-Path $ArtifactsDir "msquic.lib" }
fix empty getDeepOlderThan
@@ -472,7 +472,7 @@ export const getDeepOlderThan = ( ) => ({ app: 'graph-store', path: `/graph/${ship}/${name}/node/siblings` + - `/${start.length > 0 ? 'older' : 'oldest'}` + + `/${start.length > 0 ? 'older' : 'newest'}` + `/kith/${count}${encodeIndex(start)}` });
OcMachoLib: Consider indirect Symbol Table might be separate from Symbol Table.
@@ -35,8 +35,11 @@ InternalSymbolIsSane ( ASSERT (Context->SymbolTable != NULL); ASSERT (Context->Symtab->NumSymbols > 0); - ASSERT ((Symbol >= &Context->SymbolTable[0]) - && (Symbol < &Context->SymbolTable[Context->Symtab->NumSymbols])); + ASSERT (((Symbol >= &Context->SymbolTable[0]) + && (Symbol < &Context->SymbolTable[Context->Symtab->NumSymbols])) + || ((Context->DySymtab != NULL) + && (Symbol >= &Context->IndirectSymbolTable[0]) + && (Symbol < &Context->IndirectSymbolTable[Context->DySymtab->NumIndirectSymbols]))); // // Symbol->Section is implicitly verified by MachoGetSectionByIndex64() when // passed to it.
Only check sensor error for failure/success
@@ -510,7 +510,8 @@ static FLT handle_optimizer_results(survive_optimizer *mpfitctx, int res, const return -1; } bool solvedLHPoses = false; - FLT norm_error = result->bestnorm * d->sensor_variance * d->sensor_variance; + FLT sensor_error = sqrtf(mpfitctx->stats.sensor_error / mpfitctx->stats.sensor_error_cnt); + FLT norm_error = sensor_error; // result->bestnorm * d->sensor_variance * d->sensor_variance; bool error_failure = !general_optimizer_data_record_success(&d->opt, norm_error, soLocation, canPossiblySolveLHS); if (!status_failure && !error_failure) { quatnormalize(soLocation->Rot, soLocation->Rot);
fix ubsan warnings on model load
@@ -475,7 +475,10 @@ void TModelTrees::FBDeserialize(const NCatBoostFbs::TModelTrees* fbObj) { } if (fbObj->LeafValues()) { - LeafValues.assign(fbObj->LeafValues()->begin(), fbObj->LeafValues()->end()); + LeafValues.assign( + fbObj->LeafValues()->data(), + fbObj->LeafValues()->data() + fbObj->LeafValues()->size() + ); } if (fbObj->NonSymmetricStepNodes()) { NonSymmetricStepNodes.resize(fbObj->NonSymmetricStepNodes()->size()); @@ -506,7 +509,10 @@ void TModelTrees::FBDeserialize(const NCatBoostFbs::TModelTrees* fbObj) { FBS_ARRAY_DESERIALIZER(CtrFeatures) #undef FBS_ARRAY_DESERIALIZER if (fbObj->LeafWeights() && fbObj->LeafWeights()->size() > 0) { - LeafWeights.assign(fbObj->LeafWeights()->begin(), fbObj->LeafWeights()->end()); + LeafWeights.assign( + fbObj->LeafWeights()->data(), + fbObj->LeafWeights()->data() + fbObj->LeafWeights()->size() + ); } SetScaleAndBias({fbObj->Scale(), fbObj->Bias()}); }
Fix node.input() not working
@@ -324,7 +324,7 @@ int lua_main (int argc, char **argv) { } int lua_put_line(const char *s, size_t l) { - if (s == NULL || ++l < LUA_MAXINPUT || gLoad.line_position > 0) + if (s == NULL || ++l > LUA_MAXINPUT || gLoad.line_position > 0) return 0; c_memcpy(gLoad.line, s, l); gLoad.line[l] = '\0';
Trace new instructions
@@ -5002,6 +5002,9 @@ static bool maybe_call_native(Context *ctx, AtomString module_name, AtomString f DECODE_DEST_REGISTER(dreg, dreg_type, code, i, next_off, next_off); next_off++; // skip extended list tag + int size_args; + DECODE_INTEGER(size_args, code, i, next_off, next_off); + TRACE("make_fun3/3, fun_index=%i dreg=%c%i arity=%i\n", fun_index, T_DEST_REG(dreg_type, dreg), size_args); #ifdef IMPL_EXECUTE_LOOP uint32_t n_freeze = module_get_fun_freeze(mod, fun_index); @@ -5017,8 +5020,6 @@ static bool maybe_call_native(Context *ctx, AtomString module_name, AtomString f boxed_func[2] = term_from_int(fun_index); #endif - int size_args; - DECODE_INTEGER(size_args, code, i, next_off, next_off); for (int j = 0; j < size_args; j++) { term arg; DECODE_COMPACT_TERM(arg, code, i, next_off, next_off); @@ -5059,6 +5060,7 @@ static bool maybe_call_native(Context *ctx, AtomString module_name, AtomString f dreg_t reg_b; dreg_type_t reg_b_type; DECODE_DEST_REGISTER(reg_b, reg_b_type, code, i, next_off, next_off); + TRACE("recv_marker_bind/2: reg1=%c%i reg2=%c%i\n", T_DEST_REG(reg_a_type, reg_a), T_DEST_REG(reg_b_type, reg_b)); NEXT_INSTRUCTION(next_off); break; } @@ -5068,6 +5070,7 @@ static bool maybe_call_native(Context *ctx, AtomString module_name, AtomString f dreg_t reg_a; dreg_type_t reg_a_type; DECODE_DEST_REGISTER(reg_a, reg_a_type, code, i, next_off, next_off); + TRACE("recv_marker_clean/1: reg1=%c%i\n", T_DEST_REG(reg_a_type, reg_a)); NEXT_INSTRUCTION(next_off); break; } @@ -5077,6 +5080,7 @@ static bool maybe_call_native(Context *ctx, AtomString module_name, AtomString f dreg_t reg_a; dreg_type_t reg_a_type; DECODE_DEST_REGISTER(reg_a, reg_a_type, code, i, next_off, next_off); + TRACE("recv_marker_reserve/1: reg1=%c%i\n", T_DEST_REG(reg_a_type, reg_a)); NEXT_INSTRUCTION(next_off); break; } @@ -5086,6 +5090,7 @@ static bool maybe_call_native(Context *ctx, AtomString module_name, AtomString f dreg_t reg_a; dreg_type_t reg_a_type; DECODE_DEST_REGISTER(reg_a, reg_a_type, code, i, next_off, next_off); + TRACE("recv_marker_use/1: reg1=%c%i\n", T_DEST_REG(reg_a_type, reg_a)); NEXT_INSTRUCTION(next_off); break; }
Fix bug cmake error because ninja is missing Needed to add ninja to the list of dependencies.
@@ -31,7 +31,7 @@ This requires Ubuntu 16 (Xenial Xeres) or later to get the proper package versions. It should work for other distributions, but you will probably need to change some package names. From a terminal, execute the following: - sudo apt-get -y install autoconf cmake make gcc g++ bison flex python \ + sudo apt-get -y install autoconf cmake make ninja gcc g++ bison flex python \ python3 perl emacs openjdk-8-jdk swig zlib1g-dev python-dev \ libxml2-dev libedit-dev libncurses5-dev libsdl2-dev gtkwave python3-pip pip3 install pillow
[libgui] Fix invalid pointer
@@ -488,7 +488,7 @@ void gui_run_event_loop_pass(bool prevent_blocking, bool* did_exit) { if (prevent_blocking) { should_block = false; } - _process_amc_messages(_g_application, should_block, &did_exit); + _process_amc_messages(_g_application, should_block, did_exit); // Dispatch any ready timers gui_dispatch_ready_timers(_g_application); // Redraw any dirty elements
Re-assign current thread on file read complete
@@ -661,6 +661,7 @@ closure_function(7, 1, void, file_read_complete, status, s) { thread_log(bound(t), "%s: status %v", __func__, s); + current_cpu()->current_thread = (nanos_thread)bound(t); sysreturn rv; if (is_ok(s)) { file f = bound(f);
ipsec: rewind missing from dual loop Type: fix Fixes:
@@ -149,6 +149,7 @@ ipsec_if_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, esp_header_t *esp0, *esp1; u32 len0, len1; u16 buf_adv0, buf_adv1; + u16 buf_rewind0, buf_rewind1; u32 tid0, tid1; ipsec_tunnel_if_t *t0, *t1; ipsec4_tunnel_key_t key40, key41; @@ -185,11 +186,12 @@ ipsec_if_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, (esp_header_t *) ((u8 *) ip40 + ip4_header_bytes (ip40) + sizeof (udp_header_t)); buf_adv0 = 0; + buf_rewind0 = ip4_header_bytes (ip40) + sizeof (udp_header_t); } else { esp0 = (esp_header_t *) ((u8 *) ip40 + ip4_header_bytes (ip40)); - buf_adv0 = ip4_header_bytes (ip40); + buf_rewind0 = buf_adv0 = ip4_header_bytes (ip40); } /* NAT UDP port 4500 case, don't advance any more */ if (ip41->protocol == IP_PROTOCOL_UDP) @@ -198,11 +200,12 @@ ipsec_if_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, (esp_header_t *) ((u8 *) ip41 + ip4_header_bytes (ip41) + sizeof (udp_header_t)); buf_adv1 = 0; + buf_rewind1 = ip4_header_bytes (ip40) + sizeof (udp_header_t); } else { esp1 = (esp_header_t *) ((u8 *) ip41 + ip4_header_bytes (ip41)); - buf_adv1 = ip4_header_bytes (ip41); + buf_rewind1 = buf_adv1 = ip4_header_bytes (ip41); } } @@ -262,7 +265,8 @@ ipsec_if_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, else { next[0] = - ipsec_ip4_if_no_tunnel (node, b[0], esp0, ip40, buf_adv0); + ipsec_ip4_if_no_tunnel (node, b[0], esp0, ip40, + buf_rewind0); n_no_tunnel++; goto pkt1; } @@ -358,7 +362,8 @@ ipsec_if_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, else { next[1] = - ipsec_ip4_if_no_tunnel (node, b[1], esp1, ip41, buf_adv1); + ipsec_ip4_if_no_tunnel (node, b[1], esp1, ip41, + buf_rewind1); n_no_tunnel++; goto trace1; }
Fixed maxflag change
@@ -1459,7 +1459,7 @@ HistogramAttributes::ChangesRequireRecalculation(const HistogramAttributes &obj) { if (minFlag && (min != obj.GetMin())) return true; - if (minFlag && (max != obj.GetMax())) + if (maxFlag && (max != obj.GetMax())) return true; if (useBinWidths != obj.GetUseBinWidths()) return true;
don't initialize hdlcLastRxByte as HDLC_FLAG.
@@ -81,7 +81,6 @@ void openserial_init(void) { openserial_vars.debugPrintCounter = 0; // input - openserial_vars.hdlcLastRxByte = HDLC_FLAG; openserial_vars.hdlcBusyReceiving = FALSE; openserial_vars.hdlcInputEscaping = FALSE; openserial_vars.inputBufFillLevel = 0;
board: standardized how health voltage is reported between revisions.
@@ -80,7 +80,27 @@ int get_health_pkt(void *dat) { uint8_t started_alt; } *health = dat; - health->voltage = adc_get(ADCCHAN_VOLTAGE); + //Voltage will be measured in mv. 5000 = 5V + uint32_t voltage = adc_get(ADCCHAN_VOLTAGE); + if (revision == PANDA_REV_AB) { + //REVB has a 100, 27 (27/127) voltage divider + //Here is the calculation for the scale + //ADCV = VIN_S * (27/127) * (4095/3.3) + //RETVAL = ADCV * s = VIN_S*1000 + //s = 1000/((4095/3.3)*(27/127)) = 3.79053046 + + //Avoid needing floating point math + health->voltage = (voltage * 3791) / 1000; + } else { + //REVC has a 10, 1 (1/11) voltage divider + //Here is the calculation for the scale (s) + //ADCV = VIN_S * (1/11) * (4095/3.3) + //RETVAL = ADCV * s = VIN_S*1000 + //s = 1000/((4095/3.3)*(1/11)) = 8.8623046875 + + //Avoid needing floating point math + health->voltage = (voltage * 8862) / 1000; + } #ifdef PANDA health->current = adc_get(ADCCHAN_CURRENT);
Updated check_var() to check if var.exp.var is a 'ast.Var.Name'.
@@ -625,7 +625,9 @@ function FunChecker:check_var(var) end elseif tag == "ast.Var.Dot" then - if var.exp._tag == "ast.Exp.Var" and builtins.modules[var.exp.var.name] then + if var.exp._tag == "ast.Exp.Var" and + var.exp.var._tag == "ast.Var.Name" and + builtins.modules[var.exp.var.name] then local module_name = var.exp.var.name local function_name = var.name local internal_name = module_name .. "." .. function_name
extend midas examples timeout in ci
@@ -341,6 +341,7 @@ jobs: - run: name: Run midasexamples tests command: .circleci/run-midasexamples-tests.sh + no_output_timeout: 20m chipyard-ariane-run-tests: executor: main-env steps:
Add Arduino fixes.
@@ -81,9 +81,17 @@ static mp_obj_t py_audio_init(uint n_args, const mp_obj_t *args, mp_map_t *kw_ar .interrupt_priority = PDM_IRQ_PRIORITY, }; + // Enable high frequency oscillator if not already enabled + if (NRF_CLOCK->EVENTS_HFCLKSTARTED == 0) { + NRF_CLOCK->TASKS_HFCLKSTART = 1; + while (NRF_CLOCK->EVENTS_HFCLKSTARTED == 0) { + } + } + // configure the sample rate and channels switch (frequency) { case 16000: + NRF_PDM->RATIO = ((PDM_RATIO_RATIO_Ratio80 << PDM_RATIO_RATIO_Pos) & PDM_RATIO_RATIO_Msk); nrfx_pdm_config.clock_freq = NRF_PDM_FREQ_1280K; break; case 41667:
Tests: added test for QUERY_STRING variable.
@@ -80,6 +80,40 @@ def application(environ, start_response): }, 'headers') self.assertEqual(r.content, str.encode(body), 'body') + def test_python_application_query_string(self): + code, name = """ + +def application(environ, start_response): + + start_response('200 OK', [ + ('Content-Length', '0'), + ('Request-Method', environ.get('REQUEST_METHOD')), + ('Request-Uri', environ.get('REQUEST_URI')), + ('Path-Info', environ.get('PATH_INFO')), + ('Query-String', environ.get('QUERY_STRING')) + ]) + return [] + +""", 'py_app' + + self.python_application(name, code) + self.put('/', self.conf % (self.testdir + '/' + name)) + + r = unit.TestUnitHTTP.get(uri='/?var1=val1&var2=val2', headers={ + 'Host': 'localhost' + }) + + self.assertEqual(r.status_code, 200, 'status') + headers = dict(r.headers) + headers.pop('Server') + self.assertDictEqual(headers, { + 'Content-Length': '0', + 'Request-Method': 'GET', + 'Query-String': 'var1=val1&var2=val2', + 'Request-Uri': '/?var1=val1&var2=val2', + 'Path-Info': '/' + }, 'headers') + @unittest.expectedFailure def test_python_application_server_port(self): code, name = """
neon/cvt: disable some code on 32-bit x86 which uses _mm_cvttsd_si64 This function is only available in 64-bit mode. Unfortunately there really isn't a good way to emulate it, at least as far as I know. Fixes
@@ -502,7 +502,7 @@ simde_vcvtq_s64_f64(simde_float64x2_t a) { simde_float64x2_private a_ = simde_float64x2_to_private(a); simde_int64x2_private r_; - #if defined(SIMDE_X86_SSE2_NATIVE) + #if defined(SIMDE_X86_SSE2_NATIVE) && (defined(SIMDE_ARCH_AMD64) || (defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE))) #if !defined(SIMDE_FAST_CONVERSION_RANGE) const __m128i i64_max_mask = _mm_castpd_si128(_mm_cmpge_pd(a_.m128d, _mm_set1_pd(HEDLEY_STATIC_CAST(simde_float64, INT64_MAX)))); const __m128d clamped_low = _mm_max_pd(a_.m128d, _mm_set1_pd(HEDLEY_STATIC_CAST(simde_float64, INT64_MIN))); @@ -590,7 +590,7 @@ simde_vcvtq_u64_f64(simde_float64x2_t a) { #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE) SIMDE_CONVERT_VECTOR_(r_.values, a_.values); - #elif defined(SIMDE_X86_SSE2_NATIVE) + #elif defined(SIMDE_X86_SSE2_NATIVE) && (defined(SIMDE_ARCH_AMD64) || (defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE))) #if defined(SIMDE_X86_AVX512DQ_NATIVE) && defined(SIMDE_X86_AVX512VL_NATIVE) r_.m128i = _mm_cvttpd_epu64(a_.m128d); #else
Don't die with the other "intcallback" callbacks
@@ -630,12 +630,18 @@ static void callJanetBoot(tic_mem* tic) } } +/* + * Find a function with the given name and execute it with the given value. + * If we can't find it, then it's not a problem. + */ static void callJanetIntCallback(tic_mem* tic, s32 value, void* data, const char* name) { tic_core* core = (tic_core*)tic; Janet pre_fn; - janet_dostring(core->currentVM, name, __func__, &pre_fn); + if (janet_dostring(core->currentVM, name, __func__, &pre_fn)) { + return; + } JanetFunction *fn = janet_unwrap_function(pre_fn); Janet result;
bip32: adds network type option for xpub/priv
?> =(0 x) :: sanity check %. [d i p] =< set-metadata - =+ v=(scag 4 t) - ?: =("xprv" v) (from-private k c) - ?: =("xpub" v) (from-public k c) + =+ v=(swag [1 3] t) + ?: =("prv" v) (from-private k c) + ?: =("pub" v) (from-public k c) !! :: ++ set-metadata ++ fingerprint (cut 3 [16 4] identity) :: ++ prv-extended - %+ en-b58c-bip32 0x488.ade4 + |= network=?(%main %regtest %testnet) + %+ en-b58c-bip32 (version-bytes network %prv %.y) (build-extended private-key) :: ++ pub-extended - %+ en-b58c-bip32 0x488.b21e + |= network=?(%main %regtest %testnet) + %+ en-b58c-bip32 (version-bytes network %pub %.y) (build-extended public-key) :: ++ build-extended :: ++ en-b58c-bip32 |= [v=@ k=@] + %- en-base58:mimes:html (en-base58check [4 v] [74 k]) :: :: base58check :: v: version bytes :: d: data |= [v=byts d=byts] - %- en-base58:mimes:html =+ p=[(add wid.v wid.d) (can 3 ~[d v])] =- (can 3 ~[4^- p]) %^ rsh 3 28 ++ hash160 |= d=@ (ripemd-160:ripemd:crypto 32 (sha-256:sha d)) +:: +++ version-bytes + |= [network=?(%main %regtest %testnet) type=?(%pub %prv) bip32=?] + ^- @ux + |^ + ?- type + %pub ?:(bip32 xpub-key pay-to-pubkey) + %prv ?:(bip32 xprv-key private-key) + == + :: + ++ pay-to-pubkey ?:(=(network %main) 0x0 0x6f) + ++ private-key ?:(=(network %main) 0x80 0xef) + ++ xpub-key ?:(=(network %main) 0x488.b21e 0x435.87cf) + ++ xprv-key ?:(=(network %main) 0x488.ade4 0x435.8394) + -- --
Run clang without optimization for SAW proofs Optimized code sometimes leads to constructs that SAW doesn't handle, and unoptimized code should be semantically equivalent to optimized code.
@@ -61,7 +61,7 @@ ifeq ($(S2N_UNSAFE_FUZZING_MODE),1) endif -CFLAGS_LLVM = ${DEFAULT_CFLAGS} -fno-inline -emit-llvm -c +CFLAGS_LLVM = ${DEFAULT_CFLAGS} -fno-inline -emit-llvm -c -O0 $(BITCODE_DIR)%.bc: %.c clang $(CFLAGS_LLVM) -o $@ $<
Update appveyor.yml Put Quail board fist in the develop matrix.
secure: WOqlCsnwTzfDPJFoNV/h8mEESIpG/9uFn1u6oE8hGZtXwIQQlsY+NyyLt9Y5xoFn matrix: + - BOARD_NAME: 'MBN_QUAIL' + BUILD_OPTIONS: '-DTARGET_SERIES=STM32F4xx -DUSE_FPU=TRUE -DNF_FEATURE_DEBUGGER=TRUE -DSWO_OUTPUT=OFF -DNF_FEATURE_RTC=ON -DAPI_Windows.Devices.Gpio=ON -DAPI_Windows.Devices.Spi=ON -DAPI_Windows.Devices.I2c=ON -DAPI_Windows.Devices.Pwm=ON -DAPI_Windows.Devices.SerialCommunication=ON' + NEEDS_DFU: 'True' - BOARD_NAME: 'ST_STM32F4_DISCOVERY' BUILD_OPTIONS: '-DTARGET_SERIES=STM32F4xx -DUSE_FPU=TRUE -DNF_FEATURE_DEBUGGER=TRUE -DSWO_OUTPUT=ON -DNF_FEATURE_RTC=ON -DAPI_Windows.Devices.Gpio=ON -DAPI_Windows.Devices.Spi=ON -DAPI_Windows.Devices.I2c=ON -DAPI_Windows.Devices.Pwm=ON' - BOARD_NAME: 'ST_STM32F429I_DISCOVERY' BUILD_OPTIONS: '-DTARGET_SERIES=STM32F0xx -DUSE_FPU=TRUE -DNF_FEATURE_DEBUGGER=TRUE -DSWO_OUTPUT=OFF -DNF_FEATURE_RTC=ON -DAPI_Windows.Devices.Gpio=OFF' - BOARD_NAME: 'ST_NUCLEO144_F746ZG' BUILD_OPTIONS: '-DTARGET_SERIES=STM32F7xx -DUSE_FPU=TRUE -DNF_FEATURE_DEBUGGER=TRUE -DSWO_OUTPUT=ON -DNF_FEATURE_RTC=ON -DAPI_Windows.Devices.Gpio=ON -DAPI_Windows.Devices.Spi=ON -DAPI_Windows.Devices.I2c=ON -DAPI_Windows.Devices.Pwm=ON' - - BOARD_NAME: 'MBN_QUAIL' - BUILD_OPTIONS: '-DTARGET_SERIES=STM32F4xx -DUSE_FPU=TRUE -DNF_FEATURE_DEBUGGER=TRUE -DSWO_OUTPUT=OFF -DNF_FEATURE_RTC=ON -DAPI_Windows.Devices.Gpio=ON -DAPI_Windows.Devices.Spi=ON -DAPI_Windows.Devices.I2c=ON -DAPI_Windows.Devices.Pwm=ON -DAPI_Windows.Devices.SerialCommunication=ON' - NEEDS_DFU: 'True' - BOARD_NAME: 'ST_STM32F769I_DISCOVERY' BUILD_OPTIONS: '-DTARGET_SERIES=STM32F7xx -DUSE_FPU=TRUE -DNF_FEATURE_DEBUGGER=TRUE -DSWO_OUTPUT=ON -DNF_FEATURE_RTC=ON -DAPI_Windows.Devices.Gpio=ON -DAPI_Windows.Devices.Spi=ON -DAPI_Windows.Devices.I2c=ON -DAPI_Windows.Devices.Pwm=ON -DAPI_Windows.Devices.SerialCommunication=ON' - BOARD_NAME: 'NETDUINO3_WIFI'
CI refactor: remove unused CLI_2 tests Now that the previous commits have placed these tests in CLI_TESTS, we can remove references to this symbol.
'gpcheckcat', 'gpinitstandby', 'gpactivatestandby'] %} -{% set CLI_2_suites = [] %} {% set gppkg_additional_task = " - task: setup_gppkg_second_install @@ -120,7 +119,7 @@ groups: {% if "CLI" in test_sections %} ## -------------------------------------------------------------------- - gate_cli_start -{% for test_name in CLI_1_suites + CLI_2_suites %} +{% for test_name in CLI_1_suites %} - [[ test_name ]] {% endfor %} {% for test in CLI_TESTS %} @@ -299,7 +298,7 @@ groups: - name: CLI jobs: - gate_cli_start -{% for test_name in CLI_1_suites + CLI_2_suites %} +{% for test_name in CLI_1_suites %} - [[ test_name ]] {% endfor %} {% for test in CLI_TESTS %} @@ -1871,27 +1870,6 @@ jobs: {% endfor %} -{% for test_name in CLI_2_suites %} -- name: [[ test_name ]] - plan: - - aggregate: - - get: gpdb_src - params: - submodules: - - gpMgmt/bin/pythonSrc/ext - passed: [gate_cli_start] - - get: bin_gpdb - resource: bin_gpdb_centos6 - passed: [gate_cli_start] - trigger: [[ test_trigger ]] - - get: gpdb6-centos6-test - - task: [[ test_name ]] - file: gpdb_src/concourse/tasks/behave_gpdb.yml - image: gpdb6-centos6-test - params: - BEHAVE_TAGS: [[ test_name ]] - -{% endfor %} - name: check_centos plan: - aggregate: @@ -2278,8 +2256,7 @@ jobs: - gpperfmon_centos7 - gpperfmon_sles12 ## - resource_group_sles12 -{% for test_name in CLI_1_suites + - CLI_2_suites %} +{% for test_name in CLI_1_suites %} - [[ test_name ]] {% endfor %} {% for test in CLI_TESTS %} @@ -2305,8 +2282,7 @@ jobs: - icw_extensions_gpcloud_centos6 ## - client_loader_remote_test_aix - resource_group_centos6 -{% for test_name in CLI_1_suites + - CLI_2_suites %} +{% for test_name in CLI_1_suites %} - [[ test_name ]] {% endfor %} {% for test in CLI_TESTS %}
zeromqrecv: listen only on localhost for unit tests
@@ -48,7 +48,7 @@ static void * createTestSocket (void) usleep (TIME_HOLDOFF); void * pubSocket = zmq_socket (context, ZMQ_PUB); - int result = zmq_bind (pubSocket, "tcp://*:6001"); + int result = zmq_bind (pubSocket, "tcp://127.0.0.1:6001"); if (result != 0) { yield_error ("zmq_bind failed");
Add note about Visual STudio 2015 runtimes being a requirement, and how to know if you need them, to the readme.
@@ -25,6 +25,8 @@ Two builds are currently provided for each release: * 32 bit (Win32, SDL2 only) binaries also including source code. * 64 bit (x64, SDL2 only) binaries also including source code. +**Warning:** Run `samples.exe` as your first action, if you download these builds. If you get an error about `vcruntime140.dll` being missing, you need to install the two [Visual Studio 2015 runtimes](https://www.microsoft.com/en-us/download/details.aspx?id=53587). Make sure you install the 32-bit runtime. And make sure you also install the 64-bit runtime. + ### Compiling from source ### These are the recommended places to obtain the source code from:
Travis CI failed to install pytest, and stated that we need to install "python-pytest", so I'm trying that.
@@ -9,7 +9,7 @@ if ! command -v conda > /dev/null; then conda create --yes -n test python=$PYTHON_VERSION conda activate test conda install tectonic; - conda install -c conda-forge numpy=$NUMPY_VERSION scipy matplotlib setuptools pytest pytest-cov pip; + conda install -c conda-forge numpy=$NUMPY_VERSION scipy matplotlib setuptools python-pytest pytest-cov pip; fi # Display some info
build: add an option to build INF files for Win10 as DIFX compatible INF files for Win10 has 10.0 OS ornamentation that makes them not installable with DIFX. Add a possibility to build INF files for Win10 with 6.3 ornamentation by settinge environment variable WIN10_INF_DIFX_COMPAT=1
@@ -21,6 +21,7 @@ Common property definitions used by all drivers: <DDKINSTALLROOT Condition="'$(DDKINSTALLROOT)' == ''">C:\WINDDK\</DDKINSTALLROOT> <DDKVER Condition="'$(DDKVER)' == ''">7600.16385.1</DDKVER> <LegacyDDKDir>$(DDKINSTALLROOT)$(DDKVER)</LegacyDDKDir> + <INF_ARCH_FOR_WIN10>6.3</INF_ARCH_FOR_WIN10> </PropertyGroup> <PropertyGroup Label="Globals"> @@ -38,6 +39,10 @@ Common property definitions used by all drivers: <TargetArch>ARM64</TargetArch> </PropertyGroup> + <PropertyGroup Condition="'$(WIN10_INF_DIFX_COMPAT)'=='' OR '$(WIN10_INF_DIFX_COMPAT)'=='0'"> + <INF_ARCH_FOR_WIN10>10.0</INF_ARCH_FOR_WIN10> + </PropertyGroup> + <!-- _NT_TARGET_MAJ is one of the components of driver version (always reflects target OS version) --> <!-- $(TargetOS) --> <!-- InfArch turns into the TargetOS inf file directive --> @@ -45,7 +50,7 @@ Common property definitions used by all drivers: <_NT_TARGET_MAJ>100</_NT_TARGET_MAJ> <TargetOS>Win10</TargetOS> <DriverTargetPlatform>Universal</DriverTargetPlatform> - <InfArch>$(TargetArch).10.0</InfArch> + <InfArch>$(TargetArch).$(INF_ARCH_FOR_WIN10)</InfArch> </PropertyGroup> <PropertyGroup Condition="'$(Configuration)'=='Win8.1 Release' OR '$(Configuration)'=='Win8.1 Debug'"> <_NT_TARGET_MAJ>63</_NT_TARGET_MAJ>
update board.c and radio.c
@@ -43,21 +43,20 @@ static void button_init(void); extern int mote_main(void); -int main(void) -{ +int main(void) { return mote_main(); } //=========================== public ========================================== -void board_init(void) -{ +void board_init(void) { + // start low-frequency clock (LFCLK) nrf_drv_clock_init(); NRF_CLOCK->EVENTS_LFCLKSTARTED= 0; ///< part of workaround for 3.1 [20] RTC: Register values are invalid from http://infocenter.nordicsemi.com/pdf/nRF52840_Rev_1_Errata_v1.1.pdf nrf_drv_clock_lfclk_request(NULL); - while (!nrf_drv_clock_lfclk_is_running()) { } + while (!nrf_drv_clock_lfclk_is_running()); NRF_RTC0->TASKS_STOP= 0; ///< part of workaround for 3.1 [20] RTC: Register values are invalid from http://infocenter.nordicsemi.com/pdf/nRF52840_Rev_1_Errata_v1.1.pdf nrfx_systick_init(); @@ -96,8 +95,7 @@ void board_init(void) /** * Puts the board to sleep */ -void board_sleep(void) -{ +void board_sleep(void) { nrf_pwr_mgmt_run(); /* @@ -118,8 +116,7 @@ void board_sleep(void) /** * Resets the board */ -void board_reset(void) -{ +void board_reset(void) { NVIC_SystemReset(); }
Allow partial runas dialog executable paths, Fix runas crash
@@ -386,10 +386,23 @@ INT_PTR CALLBACK PhpRunAsDlgProc( if (PhIsNullOrEmptyString(program)) break; + if (RtlDoesFileExists_U(program->Buffer)) + { // Escape the path. (dmex: poor man's PathQuoteSpaces) if (!PhStartsWithString2(program, L"\"", FALSE) && PhFindCharInString(program, 0, L' ') != -1) - { programEscaped = PhaConcatStrings(3, L"\"", PhGetString(program), L"\""); + else + programEscaped = program; + } + else + { + WCHAR buffer[MAX_PATH]; + + // The user typed a name without a path so attempt to locate the executable. + if (PhSearchFilePath(program->Buffer, L".exe", buffer)) + programEscaped = PhaConcatStrings(3, L"\"", buffer, L"\""); + else + programEscaped = NULL; } // Fix up the user name if it doesn't have a domain. @@ -441,7 +454,7 @@ INT_PTR CALLBACK PhpRunAsDlgProc( PhpSplitUserName(userName->Buffer, &domainPart, &userPart); memset(&createInfo, 0, sizeof(PH_CREATE_PROCESS_AS_USER_INFO)); - createInfo.CommandLine = programEscaped->Buffer; + createInfo.CommandLine = PhGetString(programEscaped); createInfo.UserName = PhGetString(userPart); createInfo.DomainName = PhGetString(domainPart); createInfo.Password = PhGetStringOrEmpty(password); @@ -467,7 +480,7 @@ INT_PTR CALLBACK PhpRunAsDlgProc( { status = PhExecuteRunAsCommand2( hwndDlg, - programEscaped->Buffer, + PhGetString(programEscaped), userName->Buffer, PhGetStringOrEmpty(password), logonType,
xeonphi: use the model to allocate memory
@@ -1232,6 +1232,8 @@ errval_t interphi_init_xphi(uint8_t xphi, return SYS_ERR_OK; } +#include <driverkit/hwmodel.h> +#include <driverkit/iommu.h> /** @@ -1258,7 +1260,21 @@ errval_t interphi_init(struct xeon_phi *phi, size_t frame_size; if (capref_is_null(frame)) { +#ifdef __k1om__ err = frame_alloc(&mi->frame, XEON_PHI_INTERPHI_FRAME_SIZE, &frame_size); +#else + int32_t nodes[3]; + nodes[0] = driverkit_iommu_get_nodeid(phi->iommu_client); + nodes[1] = driverkit_hwmodel_get_my_node_id(); + nodes[2] = 0; + int32_t dest_nodeid = driverkit_hwmodel_lookup_dram_node_id(); + + err = driverkit_hwmodel_frame_alloc(&mi->frame, XEON_PHI_INTERPHI_FRAME_SIZE, + dest_nodeid, nodes); + + frame_size =XEON_PHI_INTERPHI_FRAME_SIZE; +#endif + //err = frame_alloc(&mi->frame, XEON_PHI_INTERPHI_FRAME_SIZE, &frame_size); if (err_is_fail(err)) { return err; }
rmt: fix bad config initializer Merges
@@ -149,9 +149,7 @@ typedef struct { rmt_carrier_level_t carrier_level; /*!< Level of the RMT output, when the carrier is applied */ rmt_idle_level_t idle_level; /*!< RMT idle level */ uint8_t carrier_duty_percent; /*!< RMT carrier duty (%) */ -#if SOC_RMT_SUPPORT_TX_LOOP_COUNT - uint32_t loop_count; /*!< Maximum loop count */ -#endif + uint32_t loop_count; /*!< Maximum loop count, only take effect for chips that is capable of `SOC_RMT_SUPPORT_TX_LOOP_COUNT` */ bool carrier_en; /*!< RMT carrier enable */ bool loop_en; /*!< Enable sending RMT items in a loop */ bool idle_output_en; /*!< RMT idle level output enable */ @@ -205,6 +203,7 @@ typedef struct { .carrier_level = RMT_CARRIER_LEVEL_HIGH, \ .idle_level = RMT_IDLE_LEVEL_LOW, \ .carrier_duty_percent = 33, \ + .loop_count = 0, \ .carrier_en = false, \ .loop_en = false, \ .idle_output_en = true, \
Define cache line size for x86 32-bit
*/ #ifndef CLIB_LOG2_CACHE_LINE_BYTES -#if defined(__x86_64__) || defined(__ARM_ARCH_7A__) +#if defined(__x86_64__) || defined(__ARM_ARCH_7A__) || defined(__i386__) #define CLIB_LOG2_CACHE_LINE_BYTES 6 #endif
More sanity to set OSRAM manufacturer name
@@ -1551,7 +1551,8 @@ void DeRestPluginPrivate::addLightNode(const deCONZ::Node *node) lightNode.setNeedSaveDatabase(true); } - if (checkMacVendor(node->address(), VENDOR_OSRAM)) + if (checkMacVendor(node->address(), VENDOR_OSRAM) && + (node->nodeDescriptor().manufacturerCode() == VENDOR_OSRAM || node->nodeDescriptor().manufacturerCode() == VENDOR_OSRAM_STACK)) { if (lightNode.manufacturer() != QLatin1String("OSRAM")) {
openwsman: fix configure without openssl Add openssl to the list of ifdefs so build will not fail on empty #if.
@@ -389,7 +389,7 @@ SET( CRAY_STACKSEG_END 0 ) # The code below ensures that "HAVE_xxx" is set to "0" or "1" # -SET (FUNCS_TO_TEST "bcopy" "crypt" "daemon" "fnmatch" "getaddrinfo" "getnameinfo" "getpid" "gettimeofday" "gmtime_r" "inet_aton" "inet_ntop" "inet_pton" "sleep" "srandom" "strsep" "strtok_r" "syslog" "timegm" "memmove" "unlink" "va_copy" ) +SET (FUNCS_TO_TEST "bcopy" "crypt" "daemon" "fnmatch" "getaddrinfo" "getnameinfo" "getpid" "gettimeofday" "gmtime_r" "inet_aton" "inet_ntop" "inet_pton" "sleep" "srandom" "ssl" "strsep" "strtok_r" "syslog" "timegm" "memmove" "unlink" "va_copy" ) FOREACH( FUNC ${FUNCS_TO_TEST}) STRING(TOUPPER ${FUNC} UPNAME) SET(HAVENAME "HAVE_${UPNAME}")
apps/posix_spawn: Eliminate a warning.
@@ -300,7 +300,7 @@ int spawn_main(int argc, char *argv[]) /* Make sure that we are using our symbol tablee */ - symdesc.symtab = exports; + symdesc.symtab = (FAR struct symtab_s *)exports; /* Discard 'const' */ symdesc.nsymbols = nexports; (void)boardctl(BOARDIOC_APP_SYMTAB, (uintptr_t)&symdesc); @@ -321,6 +321,7 @@ int spawn_main(int argc, char *argv[]) { errmsg("ERROR: posix_spawn_file_actions_init failed: %d\n", ret); } + posix_spawn_file_actions_dump(&file_actions); ret = posix_spawnattr_init(&attr); @@ -328,6 +329,7 @@ int spawn_main(int argc, char *argv[]) { errmsg("ERROR: posix_spawnattr_init failed: %d\n", ret); } + posix_spawnattr_dump(&attr); mm_update(&g_mmstep, "after file_action/attr init"); @@ -358,13 +360,14 @@ int spawn_main(int argc, char *argv[]) sleep(4); mm_update(&g_mmstep, "after posix_spawn"); - /* Free attibutes and file actions */ + /* Free attributes and file actions */ ret = posix_spawn_file_actions_destroy(&file_actions); if (ret != 0) { errmsg("ERROR: posix_spawn_file_actions_destroy failed: %d\n", ret); } + posix_spawn_file_actions_dump(&file_actions); ret = posix_spawnattr_destroy(&attr); @@ -372,6 +375,7 @@ int spawn_main(int argc, char *argv[]) { errmsg("ERROR: posix_spawnattr_destroy failed: %d\n", ret); } + posix_spawnattr_dump(&attr); mm_update(&g_mmstep, "after file_action/attr destruction"); @@ -393,6 +397,7 @@ int spawn_main(int argc, char *argv[]) { errmsg("ERROR: posix_spawn_file_actions_init failed: %d\n", ret); } + posix_spawn_file_actions_dump(&file_actions); ret = posix_spawnattr_init(&attr); @@ -400,6 +405,7 @@ int spawn_main(int argc, char *argv[]) { errmsg("ERROR: posix_spawnattr_init failed: %d\n", ret); } + posix_spawnattr_dump(&attr); mm_update(&g_mmstep, "after file_action/attr init"); @@ -411,6 +417,7 @@ int spawn_main(int argc, char *argv[]) { errmsg("ERROR: posix_spawn_file_actions_addclose failed: %d\n", ret); } + posix_spawn_file_actions_dump(&file_actions); snprintf(fullpath, 128, "%s/%s", MOUNTPT, g_data); @@ -419,6 +426,7 @@ int spawn_main(int argc, char *argv[]) { errmsg("ERROR: posix_spawn_file_actions_addopen failed: %d\n", ret); } + posix_spawn_file_actions_dump(&file_actions); mm_update(&g_mmstep, "after adding file_actions");
[kernel] move updateInteractions() to TimeStepping::update()
@@ -360,6 +360,9 @@ void TimeStepping::update(unsigned int levelInput) // need this until mechanics' BulletTimeStepping class is removed updateWorldFromDS(); + // Update interactions if a manager was provided + updateInteractions(); + // 3 - compute output ( x ... -> y) if (!_allNSProblems->empty()) { @@ -452,9 +455,6 @@ void TimeStepping::advanceToEvent() { DEBUG_PRINTF("TimeStepping::advanceToEvent(). Time =%f\n",getTkp1()); - // Update interactions if a manager was provided - updateInteractions(); - // Initialize lambdas of all interactions. SP::InteractionsGraph indexSet0 = _nsds-> topology()->indexSet(0);
Minor fuzzer improvements
@@ -101,7 +101,10 @@ ZyanUSize ZydisLibFuzzerRead(void* ctx, ZyanU8* buf, ZyanUSize max_len) ZyanUSize len = ZYAN_MIN(c->buf_len - c->read_offs, max_len); // printf("buf_len: %ld, read_offs: %ld, len: %ld, max_len: %ld, ptr: %p\n", // c->buf_len, c->read_offs, len, max_len, c->buf + c->read_offs); - if (!len) return 0; + if (!len) + { + return 0; + } ZYAN_MEMCPY(buf, c->buf + c->read_offs, len); c->read_offs += len; return len; @@ -122,14 +125,14 @@ static int ZydisFuzzIteration(ZydisStreamRead read_fn, void* stream_ctx) #ifdef ZYAN_WINDOWS // The `stdin` pipe uses text-mode on Windows platforms by default. We need it to be opened in // binary mode - _setmode(_fileno(ZYAN_STDIN), _O_BINARY); + (void)_setmode(_fileno(ZYAN_STDIN), _O_BINARY); #endif if (read_fn( stream_ctx, (ZyanU8*)&control_block, sizeof(control_block)) != sizeof(control_block)) { ZYDIS_MAYBE_FPUTS("Not enough bytes to fuzz\n", ZYAN_STDERR); - return EXIT_FAILURE; + return EXIT_SUCCESS; } control_block.string[ZYAN_ARRAY_LENGTH(control_block.string) - 1] = 0; @@ -327,5 +330,3 @@ int main(void) #endif // ZYDIS_LIBFUZZER /* ============================================================================================== */ - -
Update URL to Luarocks repository
@@ -34,7 +34,7 @@ NuGet (C#): Luarocks (Lua): ```bash -luarocks install --server=https://msteinbeck.github.io/tinyspline/luarocks tinyspline +luarocks install --server=https://tinyspline.github.io/lua tinyspline ``` Maven (Java):
hslua-aeson: Cleanup pushValue
@@ -72,18 +72,14 @@ peekScientific idx = fromFloatDigits <$!> peekRealFloat @Double idx -- | Hslua StackValue instance for the Aeson Value data type. pushValue :: LuaError e => Pusher e Aeson.Value -pushValue = \case +pushValue val = do + checkstack' 1 "HsLua.Aeson.pushValue" + case val of Aeson.Object o -> pushKeyMap pushValue o - Aeson.Number n -> checkstack 1 >>= \case - True -> pushScientific n - False -> failLua "stack overflow" - Aeson.String s -> checkstack 1 >>= \case - True -> pushText s - False -> failLua "stack overflow" + Aeson.Number n -> pushScientific n + Aeson.String s -> pushText s Aeson.Array a -> pushVector pushValue a - Aeson.Bool b -> checkstack 1 >>= \case - True -> pushBool b - False -> failLua "stack overflow" + Aeson.Bool b -> pushBool b Aeson.Null -> pushNull peekValue :: LuaError e => Peeker e Aeson.Value
scp: do not NUL-terminate the command for remote exec It breaks SCP download/upload from/to certain server implementations. The bug does not manifest with OpenSSH, which silently drops the NUL byte (eventually with any garbage that follows the NUL byte) before executing it.
@@ -303,8 +303,8 @@ scp_recv(LIBSSH2_SESSION * session, const char *path, libssh2_struct_stat * sb) &session->scpRecv_command[cmd_len], session->scpRecv_command_len - cmd_len); - session->scpRecv_command[cmd_len] = '\0'; - session->scpRecv_command_len = cmd_len + 1; + /* the command to exec should _not_ be NUL-terminated */ + session->scpRecv_command_len = cmd_len; _libssh2_debug(session, LIBSSH2_TRACE_SCP, "Opening channel for SCP receive"); @@ -845,8 +845,8 @@ scp_send(LIBSSH2_SESSION * session, const char *path, int mode, &session->scpSend_command[cmd_len], session->scpSend_command_len - cmd_len); - session->scpSend_command[cmd_len] = '\0'; - session->scpSend_command_len = cmd_len + 1; + /* the command to exec should _not_ be NUL-terminated */ + session->scpSend_command_len = cmd_len; _libssh2_debug(session, LIBSSH2_TRACE_SCP, "Opening channel for SCP send");
docs: Fix typos in write protection doc BRANCH=none TEST=view in gitiles
[TOC] This is a somewhat tricky topic since write protection implementations can -differ between chips and the hardware write protection has changed over time, so -please edit or open a bug if something is not clear. +differ between chips, and the hardware write protection has changed over time, +so please edit or open a bug if something is not clear. ## Terminology @@ -13,9 +13,9 @@ please edit or open a bug if something is not clear. MCUs running the EC code have read-only (RO) and read-write (RW) firmware. Coming out of reset, the MCU boots into its RO firmware. -In the case of the EC, the RO firmware boots the host and asks it verify a hash -of the RW firmware (software sync). If the RW firmware is invalid, it is updated -from a copy in the host's RW firmware. +In the case of the EC, the RO firmware boots the host and asks it to verify a +hash of the RW firmware (software sync). If the RW firmware is invalid, it is +updated from a copy in the host's RW firmware. In the case of the FPMCU, the RO firmware uses the public key embedded in it to validate the signature of the RW firmware. If the RW firmware is invalid it does @@ -108,7 +108,7 @@ enabled or disabled (note that some implementations require an EC reset to disable software write protect). The underlying mechanism implementing software write protect may differ between -EC chips. However the common requirements are that software write protect can +EC chips. However, the common requirements are that software write protect can only be disabled when hardware write protect is off and that the RO firmware must be protected before jumping to RW firmware if protection is enabled. @@ -184,9 +184,9 @@ Clear `ro_at_boot` flag. This can only be cleared if the EC booted without hardware write protect enabled. Note that you must reset the EC to clear write protect after removing the screw. -If the `ro_at_boot` flag set and the EC resets with the HW gpio disabled, the EC -will leave the flash unprotected (`ro_now` and `all_now` flags are not set) but -leave `ro_at_boot` flag set. +If the `ro_at_boot` flag set, and the EC resets with the HW gpio disabled, the +EC will leave the flash unprotected (`ro_now` and `all_now` flags are not set) +but leave `ro_at_boot` flag set. ### Changing Software Write Protection with flashrom @@ -240,7 +240,7 @@ FAILED ``` Reboot with [hardware write protection](#hw_wp) disabled. Note that protection -is still enabled but the protection range is zero. +is still enabled, but the protection range is zero. ```bash (chroot) $ flashrom -p ec --wp-status @@ -264,7 +264,7 @@ SUCCESS ## system_is_locked() The [`system_is_locked()`] function in the EC code returns false if the HW write -protect GPIO is disabled or the read-only firmware is not protected. +protect GPIO is disabled, or the read-only firmware is not protected. One way this is used in the FPMCU source is to compile test or debug functionality into the firmware. Guarding the test functionality with
fixed status out if no records written to file
@@ -302,7 +302,7 @@ for (index = optind; index < argc; index++) printf("%ld record(s) read from %s\n", wkpcount, argv[index]); } } - +if(hcxcount > 0) printf("%ld record(s) written to %s\n", hcxcount, hcxoutname); return EXIT_SUCCESS;
libsodium: fix source directory names to address build issues with Make
@@ -16,7 +16,8 @@ COMPONENT_SRCDIRS += \ $(LSRC)/crypto_auth/hmacsha512256 \ $(LSRC)/crypto_box \ $(LSRC)/crypto_box/curve25519xsalsa20poly1305 \ - $(LSRC)/crypto_core/curve25519/ref10 \ + $(LSRC)/crypto_core/ed25519 \ + $(LSRC)/crypto_core/ed25519/ref10 \ $(LSRC)/crypto_core/hchacha20 \ $(LSRC)/crypto_core/hsalsa20/ref2 \ $(LSRC)/crypto_core/hsalsa20 \ @@ -40,6 +41,9 @@ COMPONENT_SRCDIRS += \ $(LSRC)/crypto_scalarmult \ $(LSRC)/crypto_scalarmult/curve25519 \ $(LSRC)/crypto_scalarmult/curve25519/ref10 \ + $(LSRC)/crypto_scalarmult/curve25519/sandy2x \ + $(LSRC)/crypto_scalarmult/ed25519/ref10 \ + $(LSRC)/crypto_scalarmult/ristretto255/ref10 \ $(LSRC)/crypto_secretbox \ $(LSRC)/crypto_secretbox/xsalsa20poly1305 \ $(LSRC)/crypto_shorthash \
bt: fix incorrect comments for error codes Closes
@@ -85,23 +85,23 @@ typedef UINT8 tSMP_EVT; #define SMP_MAX_FAIL_RSN_PER_SPEC SMP_XTRANS_DERIVE_NOT_ALLOW /* self defined error code */ -#define SMP_PAIR_INTERNAL_ERR (SMP_MAX_FAIL_RSN_PER_SPEC + 0x01) /* 0x0E */ +#define SMP_PAIR_INTERNAL_ERR (SMP_MAX_FAIL_RSN_PER_SPEC + 0x01) /* 0x0F */ /* 0x0F unknown IO capability, unable to decide association model */ -#define SMP_UNKNOWN_IO_CAP (SMP_MAX_FAIL_RSN_PER_SPEC + 0x02) /* 0x0F */ +#define SMP_UNKNOWN_IO_CAP (SMP_MAX_FAIL_RSN_PER_SPEC + 0x02) /* 0x10 */ -#define SMP_INIT_FAIL (SMP_MAX_FAIL_RSN_PER_SPEC + 0x03) /* 0x10 */ -#define SMP_CONFIRM_FAIL (SMP_MAX_FAIL_RSN_PER_SPEC + 0x04) /* 0x11 */ -#define SMP_BUSY (SMP_MAX_FAIL_RSN_PER_SPEC + 0x05) /* 0x12 */ -#define SMP_ENC_FAIL (SMP_MAX_FAIL_RSN_PER_SPEC + 0x06) /* 0x13 */ -#define SMP_STARTED (SMP_MAX_FAIL_RSN_PER_SPEC + 0x07) /* 0x14 */ -#define SMP_RSP_TIMEOUT (SMP_MAX_FAIL_RSN_PER_SPEC + 0x08) /* 0x15 */ -#define SMP_DIV_NOT_AVAIL (SMP_MAX_FAIL_RSN_PER_SPEC + 0x09) /* 0x16 */ +#define SMP_INIT_FAIL (SMP_MAX_FAIL_RSN_PER_SPEC + 0x03) /* 0x11 */ +#define SMP_CONFIRM_FAIL (SMP_MAX_FAIL_RSN_PER_SPEC + 0x04) /* 0x12 */ +#define SMP_BUSY (SMP_MAX_FAIL_RSN_PER_SPEC + 0x05) /* 0x13 */ +#define SMP_ENC_FAIL (SMP_MAX_FAIL_RSN_PER_SPEC + 0x06) /* 0x14 */ +#define SMP_STARTED (SMP_MAX_FAIL_RSN_PER_SPEC + 0x07) /* 0x15 */ +#define SMP_RSP_TIMEOUT (SMP_MAX_FAIL_RSN_PER_SPEC + 0x08) /* 0x16 */ +#define SMP_DIV_NOT_AVAIL (SMP_MAX_FAIL_RSN_PER_SPEC + 0x09) /* 0x17 */ /* 0x17 unspecified failed reason */ -#define SMP_FAIL (SMP_MAX_FAIL_RSN_PER_SPEC + 0x0A) /* 0x17 */ +#define SMP_FAIL (SMP_MAX_FAIL_RSN_PER_SPEC + 0x0A) /* 0x18 */ -#define SMP_CONN_TOUT (SMP_MAX_FAIL_RSN_PER_SPEC + 0x0B) +#define SMP_CONN_TOUT (SMP_MAX_FAIL_RSN_PER_SPEC + 0x0B) /* 0x19 */ #define SMP_SUCCESS 0 typedef UINT8 tSMP_STATUS;
go: fix link
@@ -30,7 +30,7 @@ Experimental bindings (included in `cmake -DBINDINGS=EXPERIMENTAL`): External bindings (in a separate repo): -- [go](https://go.libelektra.org/) Go bindings (experimental) +- [go](https://github.com/ElektraInitiative/go-elektra) Go bindings (experimental) # I/O Bindings
nimble/ll: Allow to report ext advertising without aux data This should fix LL/DDI/SCN/BV-19
@@ -1781,7 +1781,7 @@ ble_ll_scan_parse_ext_hdr(struct os_mbuf *om, struct ble_mbuf_hdr *ble_hdr, ble_ll_get_addr_type(rxbuf[0] & BLE_ADV_PDU_HDR_TXADD_MASK); i += BLE_LL_EXT_ADV_ADVA_SIZE; } else { - if (aux_data->flags & BLE_LL_AUX_HAS_ADDRA) { + if (aux_data && (aux_data->flags & BLE_LL_AUX_HAS_ADDRA)) { /* Have address in aux_data */ memcpy(out_evt->addr, aux_data->addr, 6); out_evt->addr_type = aux_data->addr_type; @@ -2290,11 +2290,6 @@ ble_ll_hci_send_ext_adv_report(uint8_t ptype, struct os_mbuf *om, return; } - if (!aux_data) { - BLE_LL_ASSERT(0); - return; - } - evt = ble_ll_scan_init_ext_adv(NULL); if (!evt) { return; @@ -2331,7 +2326,7 @@ ble_ll_hci_send_ext_adv_report(uint8_t ptype, struct os_mbuf *om, } else { evt->evt_type |= (BLE_HCI_ADV_DATA_STATUS_TRUNCATED); } - } else { + } else if (aux_data) { if (BLE_LL_CHECK_AUX_FLAG(aux_data, BLE_LL_AUX_INCOMPLETE_BIT)) { evt->evt_type |= (BLE_HCI_ADV_DATA_STATUS_INCOMPLETE); } else if (BLE_LL_CHECK_AUX_FLAG(aux_data, BLE_LL_AUX_INCOMPLETE_ERR_BIT)) {
esp_event: fix minor memory leak when overwriting alredy registered handler
@@ -180,6 +180,7 @@ static esp_err_t handler_instances_add(esp_event_handler_instances_t* handlers, if (handler == it->handler) { it->arg = handler_arg; ESP_LOGW(TAG, "handler already registered, overwriting"); + free(handler_instance); return ESP_OK; } last = it;
Inform user if some metric is not calculated for train dataset by default
@@ -4118,6 +4118,16 @@ static TVector<TVector<T>> ConstructSquareMatrix(const TString& matrixString) { return result; } +static bool HintedToEvalOnTrain(const TMap<TString, TString>& params) { + const bool hasHints = params.contains("hints"); + const auto& hints = hasHints ? ParseHintsDescription(params.at("hints")) : TMap<TString, TString>(); + return hasHints && hints.contains("skip_train") && hints.at("skip_train") == "false"; +} + +static bool HintedToEvalOnTrain(const NCatboostOptions::TLossDescription& metricDescription) { + return HintedToEvalOnTrain(metricDescription.GetLossParams()); +} + static TVector<THolder<IMetric>> CreateMetric(ELossFunction metric, const TMap<TString, TString>& params, int approxDimension) { const double binaryClassPredictionBorder = NCatboostOptions::GetPredictionBorderFromLossParams(params).GetOrElse( GetDefaultPredictionBorder()); @@ -4468,6 +4478,9 @@ static TVector<THolder<IMetric>> CreateMetric(ELossFunction metric, const TMap<T for (THolder<IMetric>& metricHolder : result) { metricHolder->AddHint("skip_train", "true"); } + if (!HintedToEvalOnTrain(params)) { + CATBOOST_INFO_LOG << "Metric " << metric << " is not calculated on train by default. To calculate this metric on train, add hints=skip_train~false to metric parameters." << Endl; + } } if (params.contains("hints")) { // TODO(smirnovpavel): hints shouldn't be added for each metric @@ -4543,13 +4556,6 @@ static void SetHintToCalcMetricOnTrain(const THashSet<TString>& metricsToCalcOnT } } -static bool HintedToEvalOnTrain(const NCatboostOptions::TLossDescription& metricDescription) { - const auto& params = metricDescription.GetLossParams(); - const bool hasHints = params.contains("hints"); - const auto& hints = hasHints ? ParseHintsDescription(params.at("hints")) : TMap<TString, TString>(); - return hasHints && hints.contains("skip_train") && hints.at("skip_train") == "false"; -} - void InitializeEvalMetricIfNotSet( const NCatboostOptions::TOption<NCatboostOptions::TLossDescription>& objectiveMetric, NCatboostOptions::TOption<NCatboostOptions::TLossDescription>* evalMetric){
OpenCorePkg/Library/OcRtcLib: fix possible loss of data OpenCorePkg\Library\OcRtcLib\AppleRtcRam.c(151): error C2220: the following warning is treated as an error OpenCorePkg\Library\OcRtcLib\AppleRtcRam.c(151): warning C4244: 'function': conversion from 'UINTN' to 'UINT8', possible loss of data
@@ -148,7 +148,7 @@ AppleRtcRamReadData ( return Status; } - Status = SyncRtcRead (Address, Buffer); + Status = SyncRtcRead ((UINT8) Address, Buffer); if (EFI_ERROR (Status)) { return Status; }
[cmake] SERVER_SRC variable
@@ -882,7 +882,7 @@ add_executable(lighttpd-angel lighttpd-angel.c) set(L_INSTALL_TARGETS ${L_INSTALL_TARGETS} lighttpd-angel) add_target_properties(lighttpd-angel COMPILE_FLAGS "-DSBIN_DIR=\\\\\"${CMAKE_INSTALL_FULL_SBINDIR}\\\\\"") -add_executable(lighttpd +set(SERVER_SRC server.c response.c connections.c @@ -899,9 +899,9 @@ add_executable(lighttpd data_config.c configfile.c configparser.c - ${COMMON_SRC} - ${BUILTIN_MODS} ) + +add_executable(lighttpd ${SERVER_SRC} ${COMMON_SRC} ${BUILTIN_MODS}) set(L_INSTALL_TARGETS ${L_INSTALL_TARGETS} lighttpd) add_and_install_library(mod_access mod_access.c)
[test] Skip test on travis
@@ -6,6 +6,7 @@ import ( "errors" "fmt" "math/big" + "os" "strconv" "strings" "testing" @@ -5115,6 +5116,11 @@ abi.register(testall) } func TestTimeoutCnt(t *testing.T) { + timeout := 250 + if os.Getenv("TRAVIS") == "true" { + //timeout = 1000 + return + } src := ` function ecverify(n) for i = 1, n do @@ -5129,7 +5135,7 @@ abi.register(ecverify) ` bc, err := LoadDummyChain( func(d *DummyChain) { - d.timeout = 250 // milliseconds + d.timeout = timeout // milliseconds }, ) if err != nil {
Fixed Rx filter calibration C_CTL_LPFL_RBB value restore
@@ -459,7 +459,7 @@ uint8_t TuneRxFilter(const float_type rx_lpf_freq_RF) uint8_t ccomp_tia_rfe = Get_SPI_Reg_bits(CCOMP_TIA_RFE); uint8_t rcomp_tia_rfe = Get_SPI_Reg_bits(RCOMP_TIA_RFE); uint16_t rcc_ctl_lpfl_rbb = Get_SPI_Reg_bits(RCC_CTL_LPFL_RBB); - uint8_t c_ctl_lpfl_rbb = Get_SPI_Reg_bits(C_CTL_LPFL_RBB); + uint16_t c_ctl_lpfl_rbb = Get_SPI_Reg_bits(C_CTL_LPFL_RBB); uint8_t c_ctl_pga_rbb = Get_SPI_Reg_bits(C_CTL_PGA_RBB); uint8_t rcc_ctl_pga_rbb = Get_SPI_Reg_bits(RCC_CTL_PGA_RBB); uint8_t rcc_ctl_lpfh_rbb = Get_SPI_Reg_bits(RCC_CTL_LPFH_RBB);
Fix missing disable_active_migration tp logging
@@ -632,6 +632,9 @@ void ngtcp2_log_remote_tp(ngtcp2_log *log, uint8_t exttype, log->log_printf(log->user_data, (NGTCP2_LOG_TP " active_connection_id_limit=%" PRIu64), NGTCP2_LOG_TP_HD_FIELDS, params->active_connection_id_limit); + log->log_printf(log->user_data, + (NGTCP2_LOG_TP " disable_active_migration=%d"), + NGTCP2_LOG_TP_HD_FIELDS, params->disable_active_migration); } void ngtcp2_log_pkt_lost(ngtcp2_log *log, int64_t pkt_num, uint8_t type,
website: fix deployment URL
@@ -43,11 +43,11 @@ RUN mkdir build \ && ldconfig ARG BACKEND=https://restapi.libelektra.org/ -ARG URL=https://libelektra.org/ +ARG URL=https://www.libelektra.org/ RUN kdb global-mount \ && kdb mount-website-frontend-config \ - && kdb set -N system /sw/elektra/restfrontend/#0/current/backend/root "${BACKEND}" \ - && kdb set -N system /sw/elektra/restfrontend/#0/current/website/url "${URL}" \ + && kdb set -N system /sw/elektra/websitefrontend/#0/current/backend/root "${BACKEND}" \ + && kdb set -N system /sw/elektra/websitefrontend/#0/current/website/url "${URL}" \ && kdb build-website-frontend # FROM nginx:alpine
Redrix: Lower LED task priority BRANCH=none TEST=make BOARD=redrix
#define CONFIG_TASK_LIST \ TASK_ALWAYS(HOOKS, hook_task, NULL, LARGER_TASK_STACK_SIZE) \ + TASK_ALWAYS(LED, led_task, NULL, TASK_STACK_SIZE) \ TASK_ALWAYS(CHG_RAMP, chg_ramp_task, NULL, TASK_STACK_SIZE) \ TASK_ALWAYS(USB_CHG_P0, usb_charger_task, 0, TASK_STACK_SIZE) \ TASK_ALWAYS(USB_CHG_P1, usb_charger_task, 0, TASK_STACK_SIZE) \ TASK_ALWAYS(PD_C0, pd_task, NULL, VENTI_TASK_STACK_SIZE) \ TASK_ALWAYS(PD_C1, pd_task, NULL, VENTI_TASK_STACK_SIZE) \ TASK_ALWAYS(PD_INT_C0, pd_interrupt_handler_task, 0, TASK_STACK_SIZE) \ - TASK_ALWAYS(PD_INT_C1, pd_interrupt_handler_task, 1, TASK_STACK_SIZE) \ - TASK_ALWAYS(LED, led_task, NULL, TASK_STACK_SIZE) + TASK_ALWAYS(PD_INT_C1, pd_interrupt_handler_task, 1, TASK_STACK_SIZE)
Test: add missing assert
@@ -162,6 +162,7 @@ TEST( UTIL_Platform_Threads, IotThreads_CreateDetachedThread ) } printf( "Expected Pri = 7, actual = %d\r\n", ( int ) attrData ); + TEST_ASSERT_EQUAL( 7, attrData ); } #endif /* if ( INCLUDE_uxTaskPriorityGet == 1 ) */
Fix xcode12 build and add OSX/OpenMP
@@ -224,7 +224,16 @@ matrix: before_script: - COMMON_FLAGS="DYNAMIC_ARCH=1 NUM_THREADS=32" - brew update - - brew install gcc@10 + script: + - travis_wait 45 make QUIET_MAKE=1 $COMMON_FLAGS $BTYPE + env: + - BTYPE="TARGET=HASWELL USE_OPENMP=1 BINARY=64 INTERFACE64=1 FC=gfortran-10" + + - <<: *test-macos + osx_image: xcode12 + before_script: + - COMMON_FLAGS="DYNAMIC_ARCH=1 NUM_THREADS=32" + - brew update script: - travis_wait 45 make QUIET_MAKE=1 $COMMON_FLAGS $BTYPE env:
Update release notes with 1.0.0 release
# RELEASE NOTES +04 June 2018 - Apache NimBLE v1.0.0 + +For full release notes, please visit the +[Apache Mynewt Wiki](https://cwiki.apache.org/confluence/display/MYNEWT/Release+Notes). + +Apache NimBLE is an open-source Bluetooth 5.0 stack (both Host & Controller) that completely +replaces the proprietary SoftDevice on Nordic chipsets. + +New features in this version of NimBLE include: + +* Initial version after moving to separate repo - https://github.com/apache/mynewt-nimble +* removal of deprecated bletiny application (superseded by btshell) +* Added NimBLE Porting Layer (NPL) which abstracts OS specific details to improve portability +* Initial ports for FreeRTOS and RIOT OS +* Support for advertising up to 1650 bytes of data with Extended Advertising +* Support for host flow control +* Support for Direct Test Mode (DTM) via HCI interface +* Support for Device Information Service +* Bugfixes for issues found on UPF59 and during BT certification testing +* Lots of other bugfixes, minor enhancements and optimisations +* Mesh improvements + +If working on next-generation RTOS and Bluetooth protocotol stack +sounds exciting to you, get in touch, by sending a mail to the Apache Mynewt +Developer's list, [email protected].
alpine: disable kdb testing Used commands are not POSIX and thus fail.
@@ -450,7 +450,8 @@ def generateFullBuildStages() { DOCKER_IMAGES.alpine, CMAKE_FLAGS_BUILD_ALL + [ 'BUILD_STATIC': 'ON', - 'PLUGINS': 'ALL;-date;-passwd' + 'PLUGINS': 'ALL;-date;-passwd', + 'ENABLE_KDB_TESTING': 'OFF' ], [TEST.ALL] )
Support passing std::vector<std::string> into Util::convert
@@ -358,6 +358,14 @@ T convert(const U &src) return src; } +// std::string owns the returned char* +template<> +inline const char* +convert(const std::string &src) +{ + return src.c_str(); +} + // MString owns the returned char* template<> inline const char*
Escape $1 in awk command
@@ -173,7 +173,7 @@ harness_macro_temp: $(HARNESS_SMEMS_CONF) | top_macro_temp # remove duplicate files and headers in list of simulation file inputs ######################################################################################## $(sim_common_files): $(sim_files) $(sim_top_blackboxes) $(sim_harness_blackboxes) - awk '{print $1;}' $^ | sort -u | grep -v '.*\.\(svh\|h\)$$' > $@ + awk '{print $$1;}' $^ | sort -u | grep -v '.*\.\(svh\|h\)$$' > $@ ######################################################################################### # helper rule to just make verilog files
data BUGFIX set correct return code in lyd_dup_recursive()
@@ -1798,7 +1798,13 @@ lyd_dup_recursive(const struct lyd_node *node, struct lyd_node *parent, struct l case LYD_ANYDATA_DATATREE: if (orig->value.tree) { any->value.tree = lyd_dup(orig->value.tree, NULL, LYD_DUP_RECURSIVE | LYD_DUP_WITH_SIBLINGS); - LY_CHECK_GOTO(!any->value.tree, error); + if (!any->value.tree) { + /* get the last error's error code recorded by lyd_dup */ + struct ly_err_item *ei = ly_err_first(LYD_NODE_CTX(node)); + ret = ei ? ei->prev->no : LY_EOTHER; + goto error; + } + LY_CHECK_ERR_GOTO(!any->value.tree, ret = 0 ,error); } break; case LYD_ANYDATA_STRING:
tools: Fix website redirect loop
@@ -82,11 +82,15 @@ module.exports = [ $timeout(function () { if ($stateParams.file === null) { - $state.go("main.news", { + $state.go( + "main.news", + { file: files.filter(function (elem) { return elem.type === "file"; })[0].slug, - }); + }, + { location: "replace" } + ); deferred.reject(); } else { var filtered = files.filter(function (elem) { @@ -95,7 +99,11 @@ module.exports = [ ); }); if (filtered.length === 0) { - $state.go("main.news", { file: files[0].slug }); + $state.go( + "main.news", + { file: files[0].slug }, + { location: "replace" } + ); deferred.reject(); } else { WebsiteService.loadFile(filtered[0].file).then(function ( @@ -171,11 +179,15 @@ module.exports = [ $timeout(function () { if ($stateParams.file === null) { - $state.go("main.dyn." + entry.ref, { + $state.go( + "main.dyn." + entry.ref, + { file: files.filter(function (elem) { return elem.type === "file"; })[0].slug, - }); + }, + { location: "replace" } + ); deferred.reject(); } else { var filtered = files.filter(function (elem) { @@ -185,11 +197,15 @@ module.exports = [ ); }); if (filtered.length === 0) { - $state.go("main.dyn." + entry.ref, { + $state.go( + "main.dyn." + entry.ref, + { file: files.filter(function (elem) { return elem.type === "file"; })[0].slug, - }); + }, + { location: "replace" } + ); deferred.reject(); } else { WebsiteService.loadFile(filtered[0].options.path).then(
fix(spinbox): remove invalid judgment
@@ -105,7 +105,6 @@ void lv_spinbox_set_digit_format(lv_obj_t * obj, uint8_t digit_count, uint8_t se if(digit_count > LV_SPINBOX_MAX_DIGIT_COUNT) digit_count = LV_SPINBOX_MAX_DIGIT_COUNT; if(separator_position >= digit_count) separator_position = 0; - if(separator_position > LV_SPINBOX_MAX_DIGIT_COUNT) separator_position = LV_SPINBOX_MAX_DIGIT_COUNT; if(digit_count < LV_SPINBOX_MAX_DIGIT_COUNT) { int64_t max_val = lv_pow(10, digit_count);
rowan: enable SPI and GPIO console commands Enable more hardware related console commands to help hardware validation. TEST=manual build and load into Rowan check console commands: gpioget spixfer BRANCH=none
/* * Allow dangerous commands. - * TODO: Remove this config engineering velidation. + * TODO: Remove this config engineering validation. */ #define CONFIG_SYSTEM_UNLOCKED +#define CONFIG_CMD_SPI_XFER +#define CONFIG_CMD_GPIO_EXTENDED /* Accelero meter and gyro sensor */ #define CONFIG_ACCEL_KX022
[MQTT5] Fix includes
#include <stdlib.h> #include <stdio.h> #include <string.h> +#include <stdarg.h> /*---------------------------------------------------------------------------*/ /* Protocol constants */ #define MQTT_PROTOCOL_VERSION_3_1 3
Fix create_vlan_subif API using sw_if_index as hw_if_index Also added check for bounded interface.
@@ -492,7 +492,7 @@ vl_api_create_vlan_subif_t_handler (vl_api_create_vlan_subif_t * mp) { vl_api_create_vlan_subif_reply_t *rmp; vnet_main_t *vnm = vnet_get_main (); - u32 hw_if_index, sw_if_index = (u32) ~ 0; + u32 sw_if_index = (u32) ~ 0; vnet_hw_interface_t *hi; int rv = 0; u32 id; @@ -506,8 +506,13 @@ vl_api_create_vlan_subif_t_handler (vl_api_create_vlan_subif_t * mp) VALIDATE_SW_IF_INDEX (mp); - hw_if_index = ntohl (mp->sw_if_index); - hi = vnet_get_hw_interface (vnm, hw_if_index); + hi = vnet_get_sup_hw_interface (vnm, ntohl (mp->sw_if_index)); + + if (hi->bond_info == VNET_HW_INTERFACE_BOND_INFO_SLAVE) + { + rv = VNET_API_ERROR_BOND_SLAVE_NOT_ALLOWED; + goto out; + } id = ntohl (mp->vlan_id); if (id == 0 || id > 4095)
[bt] fix if allocation fails If osi_malloc fails for work_queues or osi_work_queue_create fails, osi_work_queue_delete in _err may release unallocated memory.
@@ -214,17 +214,17 @@ osi_thread_t *osi_thread_create(const char *name, size_t stack_size, int priorit return NULL; } - osi_thread_t *thread = (osi_thread_t *)osi_malloc(sizeof(osi_thread_t)); + osi_thread_t *thread = (osi_thread_t *)osi_calloc(sizeof(osi_thread_t)); if (thread == NULL) { goto _err; } thread->stop = false; - thread->work_queue_num = work_queue_num; - thread->work_queues = (struct work_queue **)osi_malloc(sizeof(struct work_queue *) * work_queue_num); + thread->work_queues = (struct work_queue **)osi_calloc(sizeof(struct work_queue *) * work_queue_num); if (thread->work_queues == NULL) { goto _err; } + thread->work_queue_num = work_queue_num; for (int i = 0; i < thread->work_queue_num; i++) { size_t queue_len = work_queue_len[i] ? work_queue_len[i] : DEFAULT_WORK_QUEUE_CAPACITY;