message
stringlengths
6
474
diff
stringlengths
8
5.22k
[FIX] fix build problem in drv_crypto.c
* 2020-10-14 Dozingfiretruck Porting for stm32wbxx * 2020-11-26 thread-liu add hash * 2020-11-26 thread-liu add cryp + * 2020-12-11 WKJay fix build problem */ #include <rtthread.h> @@ -628,6 +629,7 @@ static void _crypto_reset(struct rt_hwcrypto_ctx *ctx) } } +#if defined(HASH2_IN_DMA_INSTANCE) void HASH2_DMA_IN_IRQHandler(void) { extern DMA_HandleTypeDef hdma_hash_in; @@ -640,7 +642,9 @@ void HASH2_DMA_IN_IRQHandler(void) /* leave interrupt */ rt_interrupt_leave(); } +#endif +#if defined(CRYP2_IN_DMA_INSTANCE) void CRYP2_DMA_IN_IRQHandler(void) { extern DMA_HandleTypeDef hdma_cryp_in; @@ -653,7 +657,9 @@ void CRYP2_DMA_IN_IRQHandler(void) /* leave interrupt */ rt_interrupt_leave(); } +#endif +#if defined (CRYP2_OUT_DMA_INSTANCE) void CRYP2_DMA_OUT_IRQHandler(void) { extern DMA_HandleTypeDef hdma_cryp_out; @@ -666,6 +672,7 @@ void CRYP2_DMA_OUT_IRQHandler(void) /* leave interrupt */ rt_interrupt_leave(); } +#endif static const struct rt_hwcrypto_ops _ops = {
Remove redundant select
@@ -196,7 +196,6 @@ static void compute_lowest_and_highest_weight( // Accumulate on min hit mask = idxv == minidx; - minidx = select(minidx, idxv, mask); vfloat accum = cut_low_weight_err + wt - vfloat(2.0f) * dwt; cut_low_weight_err = select(cut_low_weight_err, accum, mask);
[scripts] Remove warnings in `gen_trace` script
# and computes various performance metrics up to each mcycle CSR read. # Author: Paul Scheffler <[email protected]> +# Samuel Riedel <[email protected]> # TODO: OPER_TYPES and FPU_OPER_TYPES could break: optimization might alter # enum mapping @@ -17,11 +18,9 @@ import math import numpy as np import argparse import csv -from csv import DictWriter from ctypes import c_int32, c_uint32 from collections import deque, defaultdict import warnings -from itertools import compress EXTRA_WB_WARN = 'WARNING: {} transactions still in flight for {}.' @@ -269,7 +268,7 @@ def read_annotations(dict_str: str) -> dict: return { key: int( val, 16) for key, val in re.findall( - r"'([^']+)'\s*:\s*([^\s,]+)", dict_str)} + r"'([^']+)'\s*:\s*(0x[0-9a-fA-F]+)", dict_str)} def annotate_snitch( @@ -803,7 +802,7 @@ def main(): sec['core'] = core_id # Write metrics to CSV if csv_file is not None: - if os.path.split(csv_file)[0] is '': + if os.path.split(csv_file)[0] == '': csv_file = os.path.join(path, csv_file) perf_metrics_to_csv(perf_metrics, csv_file) # Emit metrics
Check buffer upper boundary.
@@ -142,7 +142,8 @@ uart_thread(void *param) { size_t read_bytes = 0; for(;;) { read_bytes += read(uart_fd, &data_buffer[read_bytes], 1); - if(read_bytes > 0 && data_buffer[read_bytes - 1] == '\n') { + /* If a newline is received or receive buffer full, pass data to the library */ + if((read_bytes >= sizeof(data_buffer) - 1) || (read_bytes > 0 && data_buffer[read_bytes - 1] == '\n')) { data_buffer[read_bytes] = '\0'; fprintf(stderr, "[AT <]: \e[32m%s\e[0m", data_buffer); /* Send received data to input processing module */
put tls-use-sni: on a line of its own Sorry about this, this must have happened when I added the entries for padding queries and responses.
@@ -583,6 +583,7 @@ Default is yes. .B pad\-queries\-block\-size: \fI<number> The block size with which to pad queries sent over TLS upstreams. Default is 128. +.TP .B tls\-use\-sni: \fI<yes or no> Enable or disable sending the SNI extension on TLS connections. Default is yes.
Remove redundant python wheel step
@@ -48,13 +48,6 @@ jobs: brew install freeglut lapack libusb openblas brew info libusb brew ls libusb - - name: Build/Test Python Bindings - if: matrix.build_type == 'Release' - run: | - python --version - python -m pip install wheel setuptools - python setup.py install bdist_wheel -v ${{ steps.flags.outputs.SETUP_PY_FLAGS }} - - name: Create Build Environment # Some projects don't allow in-source building, so create a separate build directory # We'll use this as our working directory for all subsequent commands
soapy lms - support using "CALIBRATE" for writeSetting The Tx/Rx named variants were redundant since there is a direction parameter Also clear the channels to cal list since they were just calibrated.
@@ -798,20 +798,22 @@ void SoapyLMS7::writeSetting(const int direction, const size_t channel, const st lms7Device->SetTestSignal(isTx, channel, LMS_TESTSIG_DC, ampl, 0); } - else if (key == "CALIBRATE_TX") + else if (key == "CALIBRATE_TX" or (isTx and key == "CALIBRATE")) { double bw = std::stof(value); SoapySDR::logf(SOAPY_SDR_INFO, "Calibrate Tx %f", bw); if (lms7Device->Calibrate(true, channel, bw, 0)!=0) throw std::runtime_error(lime::GetLastErrorMessage()); + _channelsToCal.erase(std::make_pair(direction, channel)); } - else if (key == "CALIBRATE_RX") + else if (key == "CALIBRATE_RX" or (not isTx and key == "CALIBRATE")) { double bw = std::stof(value); SoapySDR::logf(SOAPY_SDR_INFO, "CalibrateRx %f", bw); if (lms7Device->Calibrate(false, channel, bw, 0)!=0) throw std::runtime_error(lime::GetLastErrorMessage()); + _channelsToCal.erase(std::make_pair(direction, channel)); } else if (key == "ENABLE_GFIR_LPF")
VERSION bump to version 0.11.6
@@ -29,7 +29,7 @@ set(CMAKE_C_FLAGS_DEBUG "-g -O0") # set version set(LIBNETCONF2_MAJOR_VERSION 0) set(LIBNETCONF2_MINOR_VERSION 11) -set(LIBNETCONF2_MICRO_VERSION 5) +set(LIBNETCONF2_MICRO_VERSION 6) set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION}.${LIBNETCONF2_MICRO_VERSION}) set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION})
doc: properly handle last_updated info for new docs Update the last_updated extension to correctly handle when new docs are added and aren't yet commited into git vs. when the list of git folders isn't correct. Previous PR incorrectly handled these as the same.
@@ -80,14 +80,20 @@ def _get_last_updated_from_git(file_path, git_repo, doc_root): # folder on the list continue else: + # we found the .rst file in one of the git_repo paths, either + # use the date of the last commit (from git) or today's date if + # there is no git history for this file. try: last_updated = datetime.strptime(output.decode('utf-8'), time_format).date() return last_updated except: - continue + return date.today() else: + # can't find a .rst file on that git_repo path, try the next continue + # falling out of the loop means we can't find that file in any of the + # git_repo paths return None @@ -134,9 +140,9 @@ def on_html_page_context(app, pagename, templatename, context, doctree): git_repo=app.config.last_updated_git_path, doc_root=app.srcdir) if last_updated_value is None: - #app.logger.warning(f'Could not get the last updated value from git for the following file:\ - # \n {rst_file_path}\n Ensure that you specified the correct folder in last_updated_git_path.') - #context['last_updated'] = None + app.logger.warning(f'Could not get the last updated value from git for the following file:\ + \n {rst_file_path}\n Ensure that you specified the correct folder(s) in last_updated_git_path:\ + \n {app.config.last_updated_git_path}\n') context['last_updated'] = context['last_published'] else: context['last_updated'] = last_updated_value.strftime(date_fmt)
mangoapp: hide fsr sharp when off
@@ -770,11 +770,13 @@ void HudElements::gamescope_fsr(){ ImGui::TableNextColumn(); right_aligned_text(FSR_COLOR, HUDElements.ralign_width, "%s", FSR_TEXT.c_str()); ImGui::TableNextColumn(); + if (g_fsrUpscale){ right_aligned_text(HUDElements.colors.text, HUDElements.ralign_width, "%i", g_fsrSharpness); ImGui::SameLine(0,1.0f); ImGui::PushFont(HUDElements.sw_stats->font1); ImGui::Text("Sharp"); ImGui::PopFont(); + } #endif }
input: fix variable definition if metrics are disabled
@@ -327,7 +327,9 @@ int flb_input_instance_init(struct flb_input_instance *in, struct flb_config *config) { int ret; +#ifdef FLB_HAVE_METRICS const char *name; +#endif struct flb_input_plugin *p = in->p; /* Skip pseudo input plugins */
Support CMS decrypt without a certificate for all key types
@@ -583,19 +583,17 @@ static int cms_kari_set1_pkey(CMS_ContentInfo *cms, CMS_RecipientInfo *ri, STACK_OF(CMS_RecipientEncryptedKey) *reks; CMS_RecipientEncryptedKey *rek; reks = CMS_RecipientInfo_kari_get0_reks(ri); - if (!cert) - return 0; for (i = 0; i < sk_CMS_RecipientEncryptedKey_num(reks); i++) { int rv; rek = sk_CMS_RecipientEncryptedKey_value(reks, i); - if (CMS_RecipientEncryptedKey_cert_cmp(rek, cert)) + if (cert != NULL && CMS_RecipientEncryptedKey_cert_cmp(rek, cert)) continue; CMS_RecipientInfo_kari_set0_pkey(ri, pk); rv = CMS_RecipientInfo_kari_decrypt(cms, ri, rek); CMS_RecipientInfo_kari_set0_pkey(ri, NULL); if (rv > 0) return 1; - return -1; + return cert == NULL ? 0 : -1; } return 0; } @@ -659,8 +657,8 @@ int CMS_decrypt_set1_pkey(CMS_ContentInfo *cms, EVP_PKEY *pk, X509 *cert) return 1; } } - /* If no cert and not debugging always return success */ - if (match_ri && !cert && !debug) { + /* If no cert, key transport and not debugging always return success */ + if (cert == NULL && ri_type == CMS_RECIPINFO_TRANS && match_ri && !debug) { ERR_clear_error(); return 1; }
fixes infinite loop in high-embed collections
@@ -3,7 +3,8 @@ import React, { useCallback, useEffect, useMemo, - useState + useState, + useRef } from 'react'; import styled from 'styled-components'; import UnstyledEmbedContainer from 'react-oembed-container'; @@ -333,8 +334,9 @@ export const RemoteContentOembed = React.forwardRef< RemoteContentOembedProps >((props, ref) => { const { url, renderUrl = false, thumbnail = false, ...rest } = props; - const oembed = useEmbed(url); - const embed = oembed.read(); + + const oembed = useRef(useEmbed(url)) + const embed = oembed.current.read() const fallbackError = new Error('fallback'); const [aspect, width, height] = useMemo(() => {
lwip: add source ip based route for unicast packet Add source IP based route for unicast packets forwarding.
@@ -133,16 +133,14 @@ bool ip4_netif_exist(const ip4_addr_t *src, const ip4_addr_t *dest) return false; } /** - * Source based IPv4 routing hook function. This function works only - * when destination IP is broadcast IP. + * Source based IPv4 routing hook function. */ struct netif * ESP_IRAM_ATTR ip4_route_src_hook(const ip4_addr_t *dest, const ip4_addr_t *src) { struct netif *netif = NULL; - /* destination IP is broadcast IP? */ - if ((src != NULL) && (dest->addr == IPADDR_BROADCAST)) { + if ((src != NULL) && !ip4_addr_isany(src)) { /* iterate through netifs */ for (netif = netif_list; netif != NULL; netif = netif->next) { /* is the netif up, does it have a link and a valid address? */
Use scheduler name rather than tid in log pause/resume message...
@@ -694,7 +694,7 @@ void sts_pause(struct sts *self) { if (csched_rp_dbg_mon(self->rp)) - log_info("sts/mon %d pausing", gettid()); + log_info("sts/mon %s pausing", self->name); m_lock(self); atomic_set(&self->state, SS_PAUSE); @@ -706,7 +706,7 @@ void sts_resume(struct sts *self) { if (csched_rp_dbg_mon(self->rp)) - log_info("sts/mon %d resuming", gettid()); + log_info("sts/mon %s resuming", self->name); m_lock(self); atomic_set(&self->state, SS_RUN);
avf: don't switch process if already running process node Type: fix
@@ -1165,11 +1165,31 @@ error: vlib_log_err (avf_log.class, "%U", format_clib_error, ad->error); } +static void +avf_process_handle_request (vlib_main_t * vm, avf_process_req_t * req) +{ + avf_device_t *ad = avf_get_device (req->dev_instance); + + if (req->type == AVF_PROCESS_REQ_ADD_DEL_ETH_ADDR) + req->error = avf_op_add_del_eth_addr (vm, ad, 1, req->eth_addr, + req->is_add); + else if (req->type == AVF_PROCESS_REQ_CONFIG_PROMISC_MDDE) + req->error = avf_op_config_promisc_mode (vm, ad, req->is_enable); + else + clib_panic ("BUG: unknown avf proceess request type"); + + if (req->calling_process_index != avf_process_node.index) + vlib_process_signal_event (vm, req->calling_process_index, 0, 0); +} + static clib_error_t * avf_process_request (vlib_main_t * vm, avf_process_req_t * req) { uword *event_data = 0; req->calling_process_index = vlib_get_current_process_node_index (vm); + + if (req->calling_process_index != avf_process_node.index) + { vlib_process_signal_event_pointer (vm, avf_process_node.index, AVF_PROCESS_EVENT_REQ, req); @@ -1178,24 +1198,11 @@ avf_process_request (vlib_main_t * vm, avf_process_req_t * req) if (vlib_process_get_events (vm, &event_data) != 0) clib_panic ("avf process node failed to reply in 5 seconds"); vec_free (event_data); - - return req->error; } - -static void -avf_process_handle_request (vlib_main_t * vm, avf_process_req_t * req) -{ - avf_device_t *ad = avf_get_device (req->dev_instance); - - if (req->type == AVF_PROCESS_REQ_ADD_DEL_ETH_ADDR) - req->error = avf_op_add_del_eth_addr (vm, ad, 1, req->eth_addr, - req->is_add); - else if (req->type == AVF_PROCESS_REQ_CONFIG_PROMISC_MDDE) - req->error = avf_op_config_promisc_mode (vm, ad, req->is_enable); else - clib_panic ("BUG: unknown avf proceess request type"); + avf_process_handle_request (vm, req); - vlib_process_signal_event (vm, req->calling_process_index, 0, 0); + return req->error; } static u32
Implemented NM:FM warning when it goes outside of 1:4 to 1:16 ratio for 2LM
@@ -175,6 +175,14 @@ CheckAndConfirmAlignments( UINT32 Index = 0; BOOLEAN CapacityReducedForSKU = FALSE; CHAR16 *pSingleStatusCodeMessage = NULL; + UINT64 TwoLM_NmFmRatioLower = 4; + UINT64 TwoLM_NmFmRatioUpper = 16; + UINT64 TwoLM_FmLowerLimit = 0; + UINT64 TwoLM_FmUpperLimit = 0; + UINT64 TwoLM_NMTotal = 0; + UINT64 TwoLM_FMTotal = 0; + TOPOLOGY_DIMM_INFO *pTopologyDimms = NULL; + UINT16 TopologyDimmsNumber = 0; NVDIMM_ENTRY(); @@ -220,6 +228,37 @@ CheckAndConfirmAlignments( goto Finish; } + ReturnCode = pNvmDimmConfigProtocol->GetSystemTopology(pNvmDimmConfigProtocol, &pTopologyDimms, &TopologyDimmsNumber); + if (EFI_ERROR(ReturnCode)) { + PRINTER_SET_MSG(pCmd->pPrintCtx, ReturnCode, CLI_ERR_INTERNAL_ERROR); + goto Finish; + } + + //sum up the near memory + for (Index = 0; Index < TopologyDimmsNumber; Index++) + { + if (pTopologyDimms[Index].MemoryType == MEMORYTYPE_DDR4) { + TwoLM_NMTotal += pTopologyDimms[Index].VolatileCapacity; + } + } + + //sum up the near memory + for (Index = 0; Index < RegionConfigsCount; Index++) + { + TwoLM_FMTotal += RegionConfigsInfo[Index].VolatileSize; + } + + if (TwoLM_FMTotal > 0) { + TwoLM_FmLowerLimit = TwoLM_NMTotal * TwoLM_NmFmRatioLower; + TwoLM_FmUpperLimit = TwoLM_NMTotal * TwoLM_NmFmRatioUpper; + if (TwoLM_FMTotal > TwoLM_FmUpperLimit) { + PRINTER_PROMPT_MSG(pCmd->pPrintCtx, ReturnCode, L"WARNING! The requested 2LM goal is above the recommended NM:FM limit of 1:%d", TwoLM_NmFmRatioUpper); + } + else if (TwoLM_FMTotal < TwoLM_FmLowerLimit) { + PRINTER_PROMPT_MSG(pCmd->pPrintCtx, ReturnCode, L"WARNING! The requested 2LM goal is below the recommended NM:FM limit of 1:%d", TwoLM_NmFmRatioLower); + } + } + if (VolatilePercent >= VolatilePercentAligned) { PercentDiff = VolatilePercent - VolatilePercentAligned; } else { @@ -277,6 +316,7 @@ CheckAndConfirmAlignments( Finish: FreeCommandStatus(&pCommandStatus); NVDIMM_EXIT_I64(ReturnCode); + FREE_POOL_SAFE(pTopologyDimms); return ReturnCode; }
fix the syslog message ident
@@ -104,30 +104,6 @@ void tcmu_set_log_level(int level) tcmu_log_level = to_syslog_level(level); } -static void open_syslog(const char *ident, int option, int facility) -{ -#define ID_MAX_LEN 16 - char id[ID_MAX_LEN + 1] = {0}, path[128]; - int fd, len = -1; - - if (!ident) { - sprintf(path, "/proc/%d/comm", getpid()); - fd = open(path, O_RDONLY); - if (fd < 0) - return; - len = read(fd, id, ID_MAX_LEN); - if (len < 0) { - close(fd); - return; - } - close(fd); - } else { - strncpy(id, ident, ID_MAX_LEN); - } - - openlog(id, option, facility); -} - static inline uint8_t rb_get_pri(struct log_buf *logbuf, unsigned int cur) { return logbuf->buf[cur][0]; @@ -304,7 +280,7 @@ static void close_fd(void *data) static int create_syslog_output(int pri, const char *ident) { - open_syslog(ident, 0 ,0); + openlog(ident, 0 ,0); if (append_output(output_to_syslog, close_syslog, NULL, pri, TCMU_LOG_TO_SYSLOG, ident) < 0) { closelog();
peview: fix unicode string parameters
@@ -1158,6 +1158,7 @@ BOOLEAN PvpLoadDbgHelp( UNICODE_STRING symbolPathUs = { .Buffer = buffer, + .Length = sizeof(buffer) - sizeof(UNICODE_NULL), .MaximumLength = sizeof(buffer) };
make warnings consistent by switching to fprintf everywhere
@@ -139,7 +139,7 @@ int main(int argc, char **argv) buf = bpf_buffer__new(obj->maps.events, obj->maps.heap); if (!buf) { err = -errno; - warn("failed to create ring/perf buffer: %d\n", err); + fprintf(stderr, "failed to create ring/perf buffer: %d\n", err); goto cleanup; }
validation BUGFIX add meta for userord diff nodes from validation
* * https://opensource.org/licenses/BSD-3-Clause */ +#define _XOPEN_SOURCE 500 /* strdup */ + #include "validation.h" #include <assert.h> @@ -40,16 +42,50 @@ lyd_val_diff_add(const struct lyd_node *node, enum lyd_diff_op op, struct lyd_no { LY_ERR ret = LY_SUCCESS; struct lyd_node *new_diff = NULL; + const struct lyd_node *prev_inst; + char *key = NULL, *value = NULL; + size_t buflen = 0, bufused = 0; assert((op == LYD_DIFF_OP_DELETE) || (op == LYD_DIFF_OP_CREATE)); + if ((op == LYD_DIFF_OP_CREATE) && lysc_is_userordered(node->schema)) { + if (node->prev->next && (node->prev->schema == node->schema)) { + prev_inst = node->prev; + } else { + /* first instance */ + prev_inst = NULL; + } + + if (node->schema->nodetype == LYS_LIST) { + /* generate key meta */ + if (prev_inst) { + LY_CHECK_GOTO(ret = lyd_path_list_predicate(prev_inst, &key, &buflen, &bufused, 0), cleanup); + } else { + key = strdup(""); + LY_CHECK_ERR_GOTO(!key, LOGMEM(LYD_CTX(node)); ret = LY_EMEM, cleanup); + } + } else { + /* generate value meta */ + if (prev_inst) { + value = strdup(LYD_CANON_VALUE(prev_inst)); + LY_CHECK_ERR_GOTO(!value, LOGMEM(LYD_CTX(node)); ret = LY_EMEM, cleanup); + } else { + value = strdup(""); + LY_CHECK_ERR_GOTO(!value, LOGMEM(LYD_CTX(node)); ret = LY_EMEM, cleanup); + } + } + } + /* create new diff tree */ - LY_CHECK_RET(lyd_diff_add(node, op, NULL, NULL, NULL, NULL, NULL, &new_diff)); + LY_CHECK_GOTO(ret = lyd_diff_add(node, op, NULL, NULL, key, value, NULL, &new_diff), cleanup); /* merge into existing diff */ ret = lyd_diff_merge_all(diff, new_diff, 0); +cleanup: lyd_free_tree(new_diff); + free(key); + free(value); return ret; }
Group record instance creation with other functions
@@ -653,6 +653,22 @@ local function rec_create_metatable(rec, ctx) return cstats, mt.name end +local function rec_create_instance(rec, typ, ctx) + local udata = ctx:new_tvar(typ) + local mt_slot = upvalues_slot(rec._upvalue_index, ctx) + local cstats = util.render([[ + ${UDATA_DECL} = luaS_newudata(L, ${MEM_SIZE}, ${UV_SIZE}); + ${UDATA}->metatable = ${GET_MT_SLOT}; + ]], { + UDATA_DECL = c_declaration(udata), + MEM_SIZE = rec_mem_size(rec), + UV_SIZE = rec_gc_size(rec), + UDATA = udata.name, + GET_MT_SLOT = get_slot(rec_metatable_type(), mt_slot), + }) + return cstats, udata.name +end + -- -- code generation -- @@ -1877,32 +1893,18 @@ generate_exp = function(exp, ctx) table.insert(body, gc_cond_gc(ctx)) local rec = exp._type.type_decl - local udata = ctx:new_tvar(exp._type) - table.insert(body, util.render([[ - ${UDATA_DECL} = luaS_newudata(L, ${MEM_SIZE}, ${UV_SIZE}); - ]], { - UDATA_DECL = c_declaration(udata), - MEM_SIZE = rec_mem_size(rec), - UV_SIZE = rec_gc_size(rec), - })) - - local mt_slot = upvalues_slot(rec._upvalue_index, ctx) - table.insert(body, util.render([[ - ${UDATA}->metatable = ${GET_MT_SLOT}; - ]], { - UDATA = udata.name, - GET_MT_SLOT = get_slot(rec_metatable_type(), mt_slot), - })) + local cstats, udata = rec_create_instance(rec, exp._type, ctx) + table.insert(body, cstats) for _, field in ipairs(exp.fields) do local field_cstats, field_cvalue = generate_exp(field.exp, ctx) local set_field = rec_set_field( - rec, udata.name, field.name, field_cvalue, ctx) + rec, udata, field.name, field_cvalue, ctx) table.insert(body, field_cstats) table.insert(body, set_field) end - return table.concat(body, "\n"), udata.name + return table.concat(body, "\n"), udata else error("impossible")
feat: bot-log.c subscribes to every possible event
#include <stdio.h> #include <stdlib.h> #include <inttypes.h> +#include <limits.h> #include <string.h> #include <assert.h> @@ -56,6 +57,7 @@ int main(int argc, char *argv[]) struct discord *client = discord_config_init(config_file); assert(NULL != client); + discord_add_intents(client, 32767); discord_set_on_ready(client, &on_ready); discord_set_on_guild_member_add(client, &on_guild_member_add); discord_set_on_guild_member_update(client, &on_guild_member_update);
doc: Install bison to support kconfig Current kconfig-frontends is not supporting byacc anymore. Observed issue using byacc (20140715-1): kconfig-frontends/kconfig-frontends/libs/parser/yconf.y", syntax error %destructor { Problem has been solved on Ubuntu 16.04.2 (x86_64) using bison-2:3.0.4.dfsg-1 instead.
@@ -78,9 +78,9 @@ Please keep in mind that we are actively working on board configurations, and wi ## APPENDIX ### Kconfig-frontends Installation -1. The *byacc*, *flex*, *gperf* and *libncurses5-dev* packages should be installed. +1. The *bison* (or byacc if supported), *flex*, *gperf* and *libncurses5-dev* packages should be installed: ```bash -sudo apt-get install byacc flex gperf libncurses5-dev +sudo apt-get install bison flex gperf libncurses5-dev ``` 2. Download and untar *kconfig-frontends* package.
RTOS2/RTX5: Enhanced ThreadTerminate to not allow termination of idle or timer threads.
@@ -1194,6 +1194,13 @@ osStatus_t svcRtxThreadTerminate (osThreadId_t thread_id) { return osErrorParameter; } + // Don't allow termination of timer or idle thread + if ((thread == osRtxInfo.timer.thread) || + (thread == osRtxInfo.thread.idle)) { + EvrRtxThreadError(thread, osErrorParameter); + return osErrorResource; + } + // Check object state switch (thread->state & osRtxThreadStateMask) { case osRtxThreadRunning:
Add missing log param and assert.
@@ -53,6 +53,7 @@ storageDriverPosixFileWriteNew( bool syncPath, bool atomic) { FUNCTION_LOG_BEGIN(logLevelTrace); + FUNCTION_LOG_PARAM(STORAGE_DRIVER_POSIX, storage); FUNCTION_LOG_PARAM(STRING, name); FUNCTION_LOG_PARAM(MODE, modeFile); FUNCTION_LOG_PARAM(MODE, modePath); @@ -62,6 +63,7 @@ storageDriverPosixFileWriteNew( FUNCTION_LOG_PARAM(BOOL, atomic); FUNCTION_LOG_END(); + ASSERT(storage != NULL); ASSERT(name != NULL); ASSERT(modeFile != 0); ASSERT(modePath != 0);
Update emscripten build.
@@ -111,11 +111,14 @@ EMCFLAGS=-std=c99 -Wall -Wextra -Isrc/include -O2 \ JANET_EMTARGET=build/janet.js JANET_WEB_SOURCES=$(JANET_CORE_SOURCES) $(JANET_WEBCLIENT_SOURCES) JANET_EMOBJECTS=$(patsubst src/%.c,build/%.bc,$(JANET_WEB_SOURCES)) \ - build/webinit.gen.bc build/core.gen.bc + build/webinit.gen.bc build/core_image.bc %.gen.bc: %.gen.c $(EMCC) $(EMCFLAGS) -o $@ -c $< +build/core_image.bc: build/core_image.c $(JANET_HEADERS) $(JANET_LOCAL_HEADERS) + $(EMCC) $(EMCFLAGS) -o $@ -c $< + build/%.bc: src/%.c $(JANET_HEADERS) $(JANET_LOCAL_HEADERS) $(EMCC) $(EMCFLAGS) -o $@ -c $<
bugID:18002625:[breeze] Add awss dependence on auth.
@@ -27,15 +27,19 @@ endif $(NAME)_SOURCES-y += api/breeze_export.c +bz_en_auth ?= 1 +ifeq ($(bz_en_auth), 1) +GLOBAL_DEFINES-y += EN_AUTH +$(NAME)_SOURCES-y += core/auth.c +endif + bz_en_awss ?= 1 ifeq ($(bz_en_awss), 1) +ifeq ($(bz_en_auth), 0) +$(error awss need authentication, please set "bz_en_auth = 1") +endif GLOBAL_DEFINES-y += EN_COMBO_NET $(NAME)_SOURCES-y += core/extcmd.c $(NAME)_SOURCES-y += api/breeze_awss_export.c endif -bz_en_auth ?= 1 -ifeq ($(bz_en_auth), 1) -GLOBAL_DEFINES-y += EN_AUTH -$(NAME)_SOURCES-y += core/auth.c -endif
add warewulf upgrade info
@@ -9,6 +9,11 @@ Version 1.3.6 (07 November 2018) installed system, administrators can opt-in to enable the gnu8 variant. This procedure is detailed in Appendix B of the OpenHPC Install Guide(s). + * Warewulf now packages iPXE in an archetecture-specific RPM. Upgrading from + previous versions will require installing + `warewulf-provision-server-ipxe-aarch64-ohpc` or + `warewulf-provision-server-ipxe-x86_64-ohpc` + * There are significant changes included in the warewulf-httpd.conf file that ships with the warewulf-provision-server-ohpc package. If upgrading from a version prior to OpenHPC v1.3, the updated config file will be saved as /etc/httpd/conf.d/warewulf-httpd.conf.rpmnew locally. You will need to copy this new version to the
OpenCanopy: Fix boot label scroll for UIScale 2
@@ -109,7 +109,7 @@ InternalBootPickerAnimateLabel ( // // If the second drawn label reaches the front, switch back to the first. // - if (Entry->LabelOffset == -(INT16) (Entry->Label.Width + BOOT_LABEL_WRAPAROUND_PADDING)) { + if (Entry->LabelOffset <= -(INT16) (Entry->Label.Width + BOOT_LABEL_WRAPAROUND_PADDING * DrawContext->Scale)) { Entry->LabelOffset = 0; mBootPickerLabelScrollHoldTime = 0; } @@ -518,7 +518,7 @@ InternalBootPickerEntryDraw ( DrawContext, BaseX, BaseY, - (INT64) Entry->LabelOffset + Entry->Label.Width + BOOT_LABEL_WRAPAROUND_PADDING, + (INT64) Entry->LabelOffset + Entry->Label.Width + BOOT_LABEL_WRAPAROUND_PADDING * DrawContext->Scale, This->Height - Label->Height, OffsetX, OffsetY,
YAML Smith: Fix output for nested keys
@@ -72,11 +72,11 @@ NameIterator relativeKeyIterator (CppKey const & key, CppKey const & parent) return keyIterator; } -bool sameLevel (CppKey const & key1, CppKey const & key2) +bool sameLevelOrBelow (CppKey const & key1, CppKey const & key2) { if (!key1 || !key2) return false; - return key1.getFullName ().substr (0, key1.getFullNameSize () - key1.getBaseNameSize ()) == + return key2.isBelow (key1) || key1.getFullName ().substr (0, key1.getFullNameSize () - key1.getBaseNameSize ()) == key2.getFullName ().substr (0, key2.getFullNameSize () - key2.getBaseNameSize ()); } @@ -163,13 +163,13 @@ int elektraYamlsmithSet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned, Key for (CppKey last = nullptr; keys.next (); last = keys.current ()) { string indent; - bool sameLevelAsLast = sameLevel (keys.current (), last); + bool sameOrBelowLast = sameLevelOrBelow (last, keys.current ()); auto relative = relativeKeyIterator (keys.current (), parent); auto baseName = keys.current ().rbegin (); while (*relative != *baseName) { - if (!sameLevelAsLast) file << indent << *relative << ":" << endl; + if (!sameOrBelowLast) file << indent << *relative << ":" << endl; relative++; indent += " "; }
fixes bug with va_list
@@ -214,10 +214,11 @@ char* ipc_read(int _sock) { * @return 0 on success; -1 on failure */ int ipc_write(int _sock, char* fmt, ...) { - va_list args; + va_list args, original; + va_start(original, fmt); va_start(args, fmt); char* msg = calloc(sizeof(char), vsnprintf(NULL, 0, fmt, args)+1); - vsprintf(msg, fmt, args); + vsprintf(msg, fmt, original); syslog(LOG_AUTHPRIV|LOG_DEBUG, "ipc writing to socket %d\n",_sock); syslog(LOG_AUTHPRIV|LOG_DEBUG, "ipc write %s\n",msg); if (write(_sock, msg, strlen(msg)+1) < 0) {
Create go package via CMake.
@@ -1228,6 +1228,29 @@ if(${TINYSPLINE_BINDING_REQUESTED}) ${CMAKE_COMMAND} -E copy "${TINYSPLINE_GO_SOURCE_DIRECTORY}/${TINYSPLINE_GO_CMAKE_TARGET}.go" "${TINYSPLINE_GO_INTERFACE_FILE}") + add_custom_command( + TARGET ${TINYSPLINE_GO_CMAKE_TARGET} + POST_BUILD + COMMAND + ${CMAKE_COMMAND} -E make_directory + "${TINYSPLINE_BINARY_DIR}/go/${TINYSPLINE_PLATFORM}" + COMMAND + ${CMAKE_COMMAND} -E copy + "${TINYSPLINE_GO_INTERFACE_FILE}" + "${TINYSPLINE_BINARY_DIR}/go/" + COMMAND + ${CMAKE_COMMAND} -E copy + "${TINYSPLINE_OUTPUT_DIRECTORY}/$<TARGET_FILE_NAME:${TINYSPLINE_GO_CMAKE_TARGET}>" + "${TINYSPLINE_BINARY_DIR}/go/${TINYSPLINE_PLATFORM}") + add_custom_command( + TARGET ${TINYSPLINE_GO_CMAKE_TARGET} + POST_BUILD + COMMAND + ${CMAKE_COMMAND} -E tar "cvf" + "${TINYSPLINE_BINARY_DIR}/tinyspline-go.zip" + --format=zip + -- . + WORKING_DIRECTORY "${TINYSPLINE_BINARY_DIR}/go") endif() # Java
Implement resource-ids for Create-Job, Print-Job, and Print-URI for 'template-job' resources (Issue
@@ -8664,8 +8664,11 @@ valid_job_attributes( int i, /* Looping var */ count, /* Number of values */ valid = 1; /* Valid attributes? */ - ipp_attribute_t *attr, /* Current attribute */ - *supported; /* xxx-supported attribute */ + ipp_attribute_t *attr, /* Request attribute */ + *resource_ids, /* resources-ids attribute */ + *supported; /* Supported attribute */ + int resource_id; /* Resource ID value */ + server_resource_t *resource; /* Resource */ ipp_op_t op = ippGetOperation(client->request); /* Current operation */ @@ -8678,6 +8681,60 @@ valid_job_attributes( supported = ippFindAttribute(client->printer->pinfo.attrs, "job-creation-attributes-suppored", IPP_TAG_KEYWORD); + if ((resource_ids = ippFindAttribute(client->request, "resource-ids", IPP_TAG_INTEGER)) != NULL) + { + if (ippGetGroupTag(resource_ids) != IPP_TAG_OPERATION) + { + serverRespondIPP(client, IPP_STATUS_ERROR_BAD_REQUEST, "The 'resource-ids' attribute is not in the operation group."); + serverRespondUnsupported(client, resource_ids); + _cupsRWUnlock(&client->printer->rwlock); + return (0); + } + else if ((count = ippGetCount(resource_ids)) > SERVER_RESOURCES_MAX) + { + serverRespondIPP(client, IPP_STATUS_ERROR_ATTRIBUTES_OR_VALUES, "Too many resources (%d) specified.", count); + serverRespondUnsupported(client, resource_ids); + _cupsRWUnlock(&client->printer->rwlock); + return (0); + } + + for (i = 0; i < count; i ++) + { + resource_id = ippGetInteger(resource_ids, i); + + if ((resource = serverFindResourceById(resource_id)) == NULL) + { + serverRespondIPP(client, IPP_STATUS_ERROR_ATTRIBUTES_OR_VALUES, "Resource #%d not found.", resource_id); + serverRespondUnsupported(client, resource_ids); + _cupsRWUnlock(&client->printer->rwlock); + return (0); + } + else if (resource->state != IPP_RSTATE_INSTALLED) + { + serverRespondIPP(client, IPP_STATUS_ERROR_ATTRIBUTES_OR_VALUES, "Resource #%d is not installed (%s).", resource_id, ippEnumString("resource-state", (int)resource->state)); + serverRespondUnsupported(client, resource_ids); + _cupsRWUnlock(&client->printer->rwlock); + return (0); + } + else if (!strcmp(resource->type, "template-job")) + { + if (!apply_template_attributes(client->request, IPP_TAG_JOB, resource, supported, sizeof(job_values) / sizeof(job_values[0]), job_values)) + { + serverRespondIPP(client, IPP_STATUS_ERROR_INTERNAL, "Unable to apply template-job resource #%d: %s", resource_id, cupsLastErrorString()); + _cupsRWUnlock(&client->printer->rwlock); + return (0); + } + } + else + { + serverRespondIPP(client, IPP_STATUS_ERROR_ATTRIBUTES_OR_VALUES, "Resource #%d is the wrong type (%s).", resource_id, resource->type); + serverRespondUnsupported(client, resource_ids); + _cupsRWUnlock(&client->printer->rwlock); + return (0); + } + } + } + if (!valid_values(client, IPP_TAG_JOB, supported, sizeof(job_values) / sizeof(job_values[0]), job_values)) { _cupsRWUnlock(&client->printer->rwlock);
fcb/test: Assert on flash_area_erase error
@@ -260,10 +260,12 @@ void config_wipe_srcs(void) void config_wipe_fcb(struct flash_area *fa, int cnt) { + int rc; int i; for (i = 0; i < cnt; i++) { - flash_area_erase(&fa[i], 0, fa[i].fa_size); + rc = flash_area_erase(&fa[i], 0, fa[i].fa_size); + TEST_ASSERT(rc == 0); } }
Fix AXI4 example. I accidentally stumbled into a working AXI4 configuration by multiplying pbus.beatBytes by 8, but it was fragile. This is the "right way" to add an AXI4 peripheral.
@@ -105,9 +105,15 @@ trait HasPeripheryPWMAXI4 { this: BaseSubsystem => private val portName = "pwm" val pwm = LazyModule(new PWMAXI4( - PWMParams(address, 8 * pbus.beatBytes))(p)) + PWMParams(address, pbus.beatBytes))(p)) - pbus.toFixedWidthSlave(Some(portName)) { pwm.node := TLToAXI4() } + pbus.toSlave(Some(portName)) { + pwm.node := + AXI4Buffer () := + TLToAXI4() := + // toVariableWidthSlave doesn't use holdFirstDeny, which TLToAXI4() needs + TLFragmenter(pbus.beatBytes, pbus.blockBytes, holdFirstDeny = true) + } } trait HasPeripheryPWMAXI4ModuleImp extends LazyModuleImp {
Tests: vpp_pg_interface. Don't rewrite Dot1AD ethertype. Type: Refactor
@@ -423,10 +423,6 @@ class VppPGInterface(VppInterface): pg_interface.name) return arp_reply = captured_packet.copy() # keep original for exception - # Make Dot1AD packet content recognizable to scapy - if arp_reply.type == 0x88a8: - arp_reply.type = 0x8100 - arp_reply = Ether(scapy.compat.raw(arp_reply)) try: if arp_reply[ARP].op == ARP.is_at: self.test.logger.info("VPP %s MAC address is %s " % @@ -470,13 +466,6 @@ class VppPGInterface(VppInterface): "Timeout while waiting for NDP response") raise ndp_reply = captured_packet.copy() # keep original for exception - # Make Dot1AD packet content recognizable to scapy - if ndp_reply.type == 0x88a8: - self._test.logger.info( - "Replacing EtherType: 0x88a8 with " - "0x8100 and regenerating Ethernet header. ") - ndp_reply.type = 0x8100 - ndp_reply = Ether(scapy.compat.raw(ndp_reply)) try: ndp_na = ndp_reply[ICMPv6ND_NA] opt = ndp_na[ICMPv6NDOptDstLLAddr]
Do some re-styling in pe.c
@@ -486,7 +486,8 @@ void pe_parse_version_info( { PVERSION_INFO version_info; - int64_t version_info_offset = pe_rva_to_offset(pe, yr_le32toh(rsrc_data->OffsetToData)); + int64_t version_info_offset = pe_rva_to_offset( + pe, yr_le32toh(rsrc_data->OffsetToData)); if (version_info_offset < 0) return; @@ -582,7 +583,10 @@ int pe_collect_resources( int64_t offset = pe_rva_to_offset(pe, yr_le32toh(rsrc_data->OffsetToData)); - if (offset < 0 || !fits_in_pe(pe, pe->data + offset, yr_le32toh(rsrc_data->Size))) + if (offset < 0) + return RESOURCE_CALLBACK_CONTINUE; + + if (!fits_in_pe(pe, pe->data + offset, yr_le32toh(rsrc_data->Size))) return RESOURCE_CALLBACK_CONTINUE; set_integer( @@ -688,7 +692,8 @@ IMPORT_EXPORT_FUNCTION* pe_parse_import_descriptor( PIMAGE_THUNK_DATA64 thunks64 = (PIMAGE_THUNK_DATA64)(pe->data + offset); while (struct_fits_in_pe(pe, thunks64, IMAGE_THUNK_DATA64) && - yr_le64toh(thunks64->u1.Ordinal) != 0 && num_functions < MAX_PE_IMPORTS) + yr_le64toh(thunks64->u1.Ordinal) != 0 && + num_functions < MAX_PE_IMPORTS) { char* name = NULL; uint16_t ordinal = 0; @@ -942,13 +947,14 @@ IMPORT_EXPORT_FUNCTION* pe_parse_exports( PIMAGE_DATA_DIRECTORY directory; PIMAGE_EXPORT_DIRECTORY exports; + DWORD* names; WORD* ordinals; int64_t offset; uint32_t i; size_t remaining; - uint8_t* eos; + int num_exports = 0; // If not a PE file, return UNDEFINED @@ -967,8 +973,7 @@ IMPORT_EXPORT_FUNCTION* pe_parse_exports( if (offset < 0) return NULL; - exports = (PIMAGE_EXPORT_DIRECTORY) \ - (pe->data + offset); + exports = (PIMAGE_EXPORT_DIRECTORY) (pe->data + offset); if (!struct_fits_in_pe(pe, exports, IMAGE_EXPORT_DIRECTORY)) return NULL; @@ -993,6 +998,7 @@ IMPORT_EXPORT_FUNCTION* pe_parse_exports( // Walk the number of functions, not the number of names as each exported // symbol has an ordinal value, but names are optional. + for (i = 0; i < yr_le32toh(exports->NumberOfFunctions); i++) { char* name;
fix wrong arch report for `os.versioninfo()` xmake on Windows built for x86_64 arch will report i386 calling `os.versioninfo()`. This may be a history problem that in the past there's no 64-bit xmake on Windows and some other places should also do this modify
@@ -249,7 +249,7 @@ tb_int_t xm_os_versioninfo(lua_State* lua) lua_pushstring(lua, "arch"); // init architecture -#if defined(TB_ARCH_x86) || defined(TB_CONFIG_OS_WINDOWS) +#if defined(TB_ARCH_x86) lua_pushstring(lua, "i386"); #elif defined(TB_ARCH_x64) lua_pushstring(lua, "x86_64");
avoid waking up for bad reasons
@@ -267,7 +267,7 @@ protoop_arg_t set_nxt_wake_time(picoquic_cnx_t *cnx) if (pd != NULL && pd->state != 2) { continue; } - for (picoquic_packet_context_enum pc = 0; pc < picoquic_nb_packet_context; pc++) { + for (picoquic_packet_context_enum pc = 0; pc < picoquic_nb_packet_context && (i == 0 || pc == picoquic_packet_context_application); pc++) { pkt_ctx = (picoquic_packet_context_t *) get_path(path_x, AK_PATH_PKT_CTX, pc); picoquic_packet_t* p = (picoquic_packet_t *) get_pkt_ctx(pkt_ctx, AK_PKTCTX_RETRANSMIT_OLDEST); @@ -309,7 +309,7 @@ protoop_arg_t set_nxt_wake_time(picoquic_cnx_t *cnx) uint64_t pacing_margin_micros_x = (uint64_t) get_path(path_x, AK_PATH_PACING_MARGIN_MICROS, 0); if (next_pacing_time_x < current_time + pacing_margin_micros_x) { #endif - PROTOOP_PRINTF(cnx, "Not blocked because should max data %d tls ready %d cnx_state %d stream %p has_cc %d\n", should_send_max_data, is_tls_stream_ready, cnx_state, stream, has_cc_to_send); + PROTOOP_PRINTF(cnx, "Not blocked because path %p has should max data %d tls ready %d cnx_state %d stream %p has_cc %d cwin %d BIF %d\n", path_x, should_send_max_data, is_tls_stream_ready, cnx_state, stream, has_cc_to_send, cwin_x, bytes_in_transit_x); blocked = 0; #ifdef PACING } @@ -402,7 +402,7 @@ protoop_arg_t set_nxt_wake_time(picoquic_cnx_t *cnx) uint64_t keep_alive_interval = (uint64_t) get_cnx(cnx, AK_CNX_KEEP_ALIVE_INTERVAL, 0); if (keep_alive_interval != 0 && next_time > (latest_progress_time + keep_alive_interval)) { next_time = latest_progress_time + keep_alive_interval; - PROTOOP_PRINTF(cnx, "Keep alive for path %p is %lu\n", path_x, next_time); + //PROTOOP_PRINTF(cnx, "Keep alive for path %p is %lu\n", path_x, next_time); } } }
VERSION bump to version 1.4.57
@@ -37,7 +37,7 @@ endif() # micro version is changed with a set of small changes or bugfixes anywhere in the project. set(SYSREPO_MAJOR_VERSION 1) set(SYSREPO_MINOR_VERSION 4) -set(SYSREPO_MICRO_VERSION 56) +set(SYSREPO_MICRO_VERSION 57) set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION}) # Version of the library
Replace configs_enabled check with query_compile_time_config
@@ -45,6 +45,7 @@ fi : ${P_SRV:=../programs/ssl/ssl_server2} : ${P_CLI:=../programs/ssl/ssl_client2} : ${P_PXY:=../programs/test/udp_proxy} +: ${P_QUERY:=../programs/test/query_compile_time_config} : ${OPENSSL_CMD:=openssl} # OPENSSL would conflict with the build system : ${GNUTLS_CLI:=gnutls-cli} : ${GNUTLS_SERV:=gnutls-serv} @@ -194,10 +195,7 @@ esac # testing. Skip non-boolean options (with something other than spaces # and a comment after "#define SYMBOL"). The variable contains a # space-separated list of symbols. -CONFIGS_ENABLED=" $(<"$CONFIG_H" \ - sed -n 's!^ *#define *\([A-Za-z][0-9A-Z_a-z]*\) *\(/*\)*!\1!p' | - tr '\n' ' ')" - +CONFIGS_ENABLED="$($P_QUERY -l)" # Skip next test; use this macro to skip tests which are legitimate # in theory and expected to be re-introduced at some point, but # aren't expected to succeed at the moment due to problems outside @@ -210,6 +208,7 @@ skip_next_test() { requires_config_enabled() { case $CONFIGS_ENABLED in *" $1 "*) :;; + *" $1="*) :;; *) SKIP_NEXT="YES";; esac } @@ -218,6 +217,7 @@ requires_config_enabled() { requires_config_disabled() { case $CONFIGS_ENABLED in *" $1 "*) SKIP_NEXT="YES";; + *" $1="*) SKIP_NEXT="YES";; esac }
fix: add BoatIotSdkInit(); in test_001CreateWallet_0004CreateLoadWalletSuccess
@@ -157,6 +157,7 @@ START_TEST(test_001CreateWallet_0004CreateLoadWalletSuccess) { BSINT32 rtnVal; BoatPlatONWallet *g_platon_wallet_ptr = NULL; + BoatIotSdkInit(); extern BoatIotSdkContext g_boat_iot_sdk_context; /* 1. execute unit test */
stm32: Add LSI config option.
@@ -243,6 +243,10 @@ void SystemClock_Config(void) RCC_OscInitStruct.HSEState = OMV_OSC_HSE_STATE; RCC_OscInitStruct.OscillatorType |= RCC_OSCILLATORTYPE_HSE; #endif + #if defined(OMV_OSC_LSI_STATE) + RCC_OscInitStruct.LSIState = OMV_OSC_LSI_STATE; + RCC_OscInitStruct.OscillatorType |= RCC_OSCILLATORTYPE_LSI; + #endif #if defined(OMV_OSC_HSI_STATE) RCC_OscInitStruct.HSIState = OMV_OSC_HSI_STATE; RCC_OscInitStruct.OscillatorType |= RCC_OSCILLATORTYPE_HSI;
Remove duplicate variables
@@ -429,7 +429,6 @@ NTSTATUS PhpUpdateMemoryRegionTypes( PVOID processHeapsPtr; PVOID *processHeaps; PVOID apiSetMap; - ULONG i; #ifdef _WIN64 PVOID peb32; ULONG processHeapsPtr32; @@ -691,8 +690,8 @@ NTSTATUS PhpUpdateMemoryRegionTypes( if (cfgBitmapAddress && (cfgBitmapMemoryItem = PhLookupMemoryItemList(List, cfgBitmapAddress))) { - PLIST_ENTRY listEntry = &cfgBitmapMemoryItem->ListEntry; - PPH_MEMORY_ITEM memoryItem = CONTAINING_RECORD(listEntry, PH_MEMORY_ITEM, ListEntry); + listEntry = &cfgBitmapMemoryItem->ListEntry; + memoryItem = CONTAINING_RECORD(listEntry, PH_MEMORY_ITEM, ListEntry); while (memoryItem->AllocationBaseItem == cfgBitmapMemoryItem) { @@ -709,8 +708,8 @@ NTSTATUS PhpUpdateMemoryRegionTypes( // Note: Wow64 processes on 64bit also have CfgBitmap regions. if (isWow64 && cfgBitmapWow64Address && (cfgBitmapMemoryItem = PhLookupMemoryItemList(List, cfgBitmapWow64Address))) { - PLIST_ENTRY listEntry = &cfgBitmapMemoryItem->ListEntry; - PPH_MEMORY_ITEM memoryItem = CONTAINING_RECORD(listEntry, PH_MEMORY_ITEM, ListEntry); + listEntry = &cfgBitmapMemoryItem->ListEntry; + memoryItem = CONTAINING_RECORD(listEntry, PH_MEMORY_ITEM, ListEntry); while (memoryItem->AllocationBaseItem == cfgBitmapMemoryItem) {
Download PyPi index in background
@@ -54,8 +54,7 @@ import TrueTime /// Updates the PyPi index cache. func updatePyPiCache() { - - + UIApplication.shared.beginBackgroundTask(expirationHandler: nil) URLSession.shared.downloadTask(with: URL(string: "https://pypi.org/simple")!) { (fileURL, _, error) in if let error = error {
Don't pass a negative value to __redisAsyncCommand if redisFormatSdsCommandArgv fails
@@ -676,6 +676,8 @@ int redisAsyncCommandArgv(redisAsyncContext *ac, redisCallbackFn *fn, void *priv int len; int status; len = redisFormatSdsCommandArgv(&cmd,argc,argv,argvlen); + if (len < 0) + return REDIS_ERR; status = __redisAsyncCommand(ac,fn,privdata,cmd,len); sdsfree(cmd); return status;
cherry: update USB PID PID allocated in cl/370096781 TEST=make BRANCH=none Tested-by: Ting Shen
#define CONFIG_USB_PD_TCPMV2 #define CONFIG_USB_PD_TRY_SRC #define CONFIG_USB_PD_VBUS_DETECT_PPC -#define CONFIG_USB_PID 0x5566 /* TODO: update PID */ +#define CONFIG_USB_PID 0x5054 #define CONFIG_USB_POWER_DELIVERY #define PD_MAX_CURRENT_MA 3000
wait before sending HID report
+#include <zephyr.h> #include <device.h> #include <init.h> @@ -15,6 +16,18 @@ static enum usb_dc_status_code usb_status; static struct device *hid_dev; +static K_SEM_DEFINE(hid_sem, 1, 1); + +static void in_ready_cb(void) +{ + k_sem_give(&hid_sem); +} + +static const struct hid_ops ops = +{ + .int_in_ready = in_ready_cb, +}; + int zmk_usb_hid_send_report(const u8_t *report, size_t len) { if (usb_status == USB_DC_SUSPEND) @@ -22,6 +35,7 @@ int zmk_usb_hid_send_report(const u8_t *report, size_t len) return usb_wakeup_request(); } + k_sem_take(&hid_sem, K_FOREVER); return hid_int_ep_write(hid_dev, report, len, NULL); } @@ -43,7 +57,7 @@ static int zmk_usb_hid_init(struct device *_arg) usb_hid_register_device(hid_dev, zmk_hid_report_desc, sizeof(zmk_hid_report_desc), - NULL); + &ops); usb_hid_init(hid_dev);
Tests: ctests: CMakeList.txt: no continue()
file (GLOB TESTS test_*.c) foreach (file ${TESTS}) get_filename_component (name ${file} NAME_WE) - if (NOT ENABLE_OPTIMIZATIONS AND ${name} MATCHES "opmphm") - continue() - endif(NOT ENABLE_OPTIMIZATIONS AND ${name} MATCHES "opmphm") + if (ENABLE_OPTIMIZATIONS OR NOT ${name} MATCHES "opmphm") do_test (${name}) target_link_elektra(${name} elektra-kdb) + endif(ENABLE_OPTIMIZATIONS OR NOT ${name} MATCHES "opmphm") endforeach (file ${TESTS}) target_link_elektra(test_array elektra-ease)
Bazel support: Bump OpenEXR version to 3.2
@@ -11,7 +11,7 @@ config_setting( generate_header( name = "IexConfig.h", substitutions = { - "@IEX_INTERNAL_NAMESPACE@": "Iex_3_1", + "@IEX_INTERNAL_NAMESPACE@": "Iex_3_2", "@IEX_NAMESPACE_CUSTOM@": "0", "@IEX_NAMESPACE@": "Iex", }, @@ -31,7 +31,7 @@ generate_header( generate_header( name = "IlmThreadConfig.h", substitutions = { - "@ILMTHREAD_INTERNAL_NAMESPACE@": "IlmThread_3_1", + "@ILMTHREAD_INTERNAL_NAMESPACE@": "IlmThread_3_2", "@ILMTHREAD_NAMESPACE_CUSTOM@": "0", "@ILMTHREAD_NAMESPACE@": "IlmThread", "#cmakedefine01 ILMTHREAD_HAVE_POSIX_SEMAPHORES": "#define ILMTHREAD_HAVE_POSIX_SEMAPHORES 0", @@ -44,15 +44,15 @@ generate_header( name = "OpenEXRConfig.h", substitutions = { "@OPENEXR_IMF_NAMESPACE@": "Imf", - "@OPENEXR_INTERNAL_IMF_NAMESPACE@": "Imf_3_1", - "@OPENEXR_LIB_VERSION@": "3.1.0", - "@OPENEXR_NAMESPACE_CUSTOM@": "3.1.0", - "@OPENEXR_PACKAGE_NAME@": "OpenEXR 3.1.0", + "@OPENEXR_INTERNAL_IMF_NAMESPACE@": "Imf_3_2", + "@OPENEXR_LIB_VERSION@": "3.2.0", + "@OPENEXR_NAMESPACE_CUSTOM@": "3.2.0", + "@OPENEXR_PACKAGE_NAME@": "OpenEXR 3.2.0", "@OPENEXR_VERSION_EXTRA@": "", "@OPENEXR_VERSION_MAJOR@": "3", - "@OPENEXR_VERSION_MINOR@": "1", + "@OPENEXR_VERSION_MINOR@": "2", "@OPENEXR_VERSION_PATCH@": "0", - "@OPENEXR_VERSION@": "3.1.0", + "@OPENEXR_VERSION@": "3.2.0", "#cmakedefine OPENEXR_ENABLE_API_VISIBILITY": "#define OPENEXR_ENABLE_API_VISIBILITY", "#cmakedefine OPENEXR_HAVE_LARGE_STACK 1": "/* #undef OPENEXR_HAVE_LARGE_STACK */", },
Compile: Update style of link with `prettier`
@@ -505,7 +505,7 @@ This value specifies the root directory of a local copy of the [Google Test][] f It can be provided as CMake or environment variable. If both options are provided the value passed via CMake takes precedence. -[Google Test]: https://github.com/google/googletest +[google test]: https://github.com/google/googletest It is recommended that you browse through all of the options using `ccmake`. Afterwards press `c` again (maybe multiple times until all variables are
uart: Add missing critical section wrappers around rx_buffered_len The missing barriers caused uart_get_buffered_data_len() to (very rarely) return a garbage value. When used in MicroPython, though, this caused select() to return and a subsequent read() to stall indefinitely until a char was actually available. Closes Merges
@@ -1295,7 +1295,9 @@ esp_err_t uart_get_buffered_data_len(uart_port_t uart_num, size_t *size) { ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error"); ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), ESP_FAIL, UART_TAG, "uart driver error"); + UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock)); *size = p_uart_obj[uart_num]->rx_buffered_len; + UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock)); return ESP_OK; } @@ -1336,12 +1338,15 @@ esp_err_t uart_flush_input(uart_port_t uart_num) } data = (uint8_t*) xRingbufferReceive(p_uart->rx_ring_buf, &size, (portTickType) 0); if(data == NULL) { + UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock)); if( p_uart_obj[uart_num]->rx_buffered_len != 0 ) { - ESP_LOGE(UART_TAG, "rx_buffered_len error"); p_uart_obj[uart_num]->rx_buffered_len = 0; + UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock)); + // this must be called outside the critical section + ESP_LOGE(UART_TAG, "rx_buffered_len error"); + UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock)); } //We also need to clear the `rx_buffer_full_flg` here. - UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock)); p_uart_obj[uart_num]->rx_buffer_full_flg = false; UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock)); break;
Rework the audio_ports_config_info documentation
/// extension where all busses can be retrieved in the same way as in the audio-port extension. static CLAP_CONSTEXPR const char CLAP_EXT_AUDIO_PORTS_CONFIG[] = "clap.audio-ports-config"; -static CLAP_CONSTEXPR const char CLAP_EXT_AUDIO_PORTS_CONFIG_INFO[] = "clap.audio-ports-config-info"; +static CLAP_CONSTEXPR const char CLAP_EXT_AUDIO_PORTS_CONFIG_INFO[] = + "clap.audio-ports-config-info/draft-0"; #ifdef __cplusplus extern "C" { @@ -70,15 +71,17 @@ typedef struct clap_plugin_audio_ports_config { bool(CLAP_ABI *select)(const clap_plugin_t *plugin, clap_id config_id); } clap_plugin_audio_ports_config_t; +// Extended config info typedef struct clap_plugin_audio_ports_config_info { - // get info about about an audio port. - // gets the clap_id of the currently selected config - // clap_id might now match the index, can be used to call - // clap_plugin_audio_ports_config::select() + // Gets the id of the currently selected config, or CLAP_INVALID_ID if the current port + // layout isn't part of the config list. + // // [main-thread] clap_id(CLAP_ABI *current_config)(const clap_plugin_t *plugin); + // Get info about about an audio port, for a given config_id. + // This is analogous to clap_plugin_audio_ports.get(). // [main-thread] bool(CLAP_ABI *get)(const clap_plugin_t *plugin, clap_id config_id,
Attempted to scale the extra_mvs
@@ -1136,6 +1136,35 @@ static void search_frac(inter_search_info_t *info) } +static INLINE int16_t get_scaled_mv(int16_t mv, int scale) +{ + int32_t scaled = scale * mv; + return CLIP(-32768, 32767, (scaled + 127 + (scaled < 0)) >> 8); +} + +static void apply_mv_scaling(int32_t current_poc, + int32_t current_ref_poc, + int32_t neighbor_poc, + int32_t neighbor_ref_poc, + vector2d_t* mv_cand) +{ + int32_t diff_current = current_poc - current_ref_poc; + int32_t diff_neighbor = neighbor_poc - neighbor_ref_poc; + + if (diff_current == diff_neighbor) return; + if (diff_neighbor == 0) return; + + diff_current = CLIP(-128, 127, diff_current); + diff_neighbor = CLIP(-128, 127, diff_neighbor); + + int scale = CLIP(-4096, 4095, + (diff_current * ((0x4000 + (abs(diff_neighbor) >> 1)) / diff_neighbor) + 32) >> 6); + + mv_cand->x = get_scaled_mv(mv_cand->x, scale); + mv_cand->y = get_scaled_mv(mv_cand->y, scale); +} + + /** * \brief Perform inter search for a single reference frame. */ @@ -1196,7 +1225,7 @@ static void search_pu_inter_ref(inter_search_info_t *info, cur_cu->inter.mv_ref[ref_list] = temp_ref_idx; vector2d_t mv = { 0, 0 }; - { + // Take starting point for MV search from previous frame. // When temporal motion vector candidates are added, there is probably // no point to this anymore, but for now it helps. @@ -1208,10 +1237,21 @@ static void search_pu_inter_ref(inter_search_info_t *info, if (ref_cu->inter.mv_dir & 1) { mv.x = ref_cu->inter.mv[0][0]; mv.y = ref_cu->inter.mv[0][1]; - } else { + } + else { mv.x = ref_cu->inter.mv[1][0]; mv.y = ref_cu->inter.mv[1][1]; } + if (true) { + apply_mv_scaling( + info->state->frame->poc, + info->state->frame->ref->pocs[temp_ref_idx], + info->state->frame->ref->pocs[info->state->frame->ref_LX[ref_list][LX_idx]], + info->state->frame->ref->images[info->state->frame->ref_LX[ref_list][LX_idx]]->ref_pocs[ + info->state->frame->ref->ref_LXs[info->state->frame->ref_LX[ref_list][LX_idx]] + [ref_list][ref_cu->inter.mv_ref[ref_list]]], + &mv + ); } }
armv7m7-imxrt: fixed bug in logical operation.
@@ -578,7 +578,7 @@ static u32 _imxrt_ccmGetPeriphClkFreq(void) freq /= ((*(imxrt_common.ccm + ccm_cbcdr) >> 27) & 0x7) + 1; } else { /* Pre_Periph_clk ---> Periph_clk */ - switch ((*(imxrt_common.ccm + ccm_cbcmr) >> 18) * 0x3) { + switch ((*(imxrt_common.ccm + ccm_cbcmr) >> 18) & 0x3) { /* PLL2 ---> Pre_Periph_clk ---> Periph_clk */ case 0x0: freq = _imxrt_ccmGetPllFreq(clk_pll_sys);
esp-lwip: Added check in esp_netif_new() to restrict configuring DHCP server and client together.
@@ -677,6 +677,16 @@ esp_netif_t *esp_netif_new(const esp_netif_config_t *esp_netif_config) return NULL; } +#if ESP_DHCPS + // DHCP server and client cannot be configured together + if((esp_netif_config->base->flags & ESP_NETIF_DHCP_SERVER) && + (esp_netif_config->base->flags & ESP_NETIF_DHCP_CLIENT)) { + ESP_LOGE(TAG, "%s: Failed to configure netif with config=%p (DHCP server and client cannot be configured together)", + __func__, esp_netif_config); + return NULL; + } +#endif + // Create parent esp-netif object esp_netif_t *esp_netif = calloc(1, sizeof(struct esp_netif_obj)); if (!esp_netif) {
meep: correct battery discharging parameter Correct batteery discharging parameter to follow datasheet. BRANCH=octopus TEST=make buildall
@@ -56,7 +56,7 @@ const struct board_batt_params board_battery_info[] = { .start_charging_max_c = 45, .charging_min_c = 0, .charging_max_c = 45, - .discharging_min_c = 0, + .discharging_min_c = -10, .discharging_max_c = 60, }, }, @@ -85,7 +85,7 @@ const struct board_batt_params board_battery_info[] = { .start_charging_max_c = 45, .charging_min_c = 0, .charging_max_c = 45, - .discharging_min_c = 0, + .discharging_min_c = -10, .discharging_max_c = 60, }, }, @@ -114,7 +114,7 @@ const struct board_batt_params board_battery_info[] = { .start_charging_max_c = 45, .charging_min_c = 0, .charging_max_c = 45, - .discharging_min_c = 0, + .discharging_min_c = -10, .discharging_max_c = 60, }, }, @@ -143,7 +143,7 @@ const struct board_batt_params board_battery_info[] = { .start_charging_max_c = 45, .charging_min_c = 0, .charging_max_c = 45, - .discharging_min_c = 0, + .discharging_min_c = -10, .discharging_max_c = 60, }, }, @@ -172,7 +172,7 @@ const struct board_batt_params board_battery_info[] = { .start_charging_max_c = 45, .charging_min_c = 0, .charging_max_c = 45, - .discharging_min_c = 0, + .discharging_min_c = -10, .discharging_max_c = 60, }, }, @@ -201,7 +201,7 @@ const struct board_batt_params board_battery_info[] = { .start_charging_max_c = 45, .charging_min_c = 0, .charging_max_c = 45, - .discharging_min_c = 0, + .discharging_min_c = -10, .discharging_max_c = 60, }, },
coap:send last block2 response/block1 ACK as CON
@@ -316,10 +316,13 @@ coap_receive(oc_message_t *msg) goto send_message; } else { OC_DBG("received all blocks for payload"); + coap_udp_init_message(response, COAP_TYPE_CON, CONTENT_2_05, + response->mid); coap_set_header_block1(response, block1_num, block1_more, block1_size); request_buffer->payload_size = request_buffer->next_block_offset; + request_buffer->ref_count = 0; goto request_handler; } } @@ -355,12 +358,17 @@ coap_receive(oc_message_t *msg) response_buffer->payload_size) ? 1 : 0; + if (more == 0) { + coap_udp_init_message(response, COAP_TYPE_CON, CONTENT_2_05, + response->mid); + } coap_set_payload(response, payload, payload_size); coap_set_header_block2(response, block2_num, more, block2_size); oc_blockwise_response_state_t *response_state = (oc_blockwise_response_state_t *)response_buffer; coap_set_header_etag(response, response_state->etag, COAP_ETAG_LEN); + response_buffer->ref_count = more; goto send_message; } else { OC_ERR("could not dispatch block");
Add link to 18.01 test framework documentation.
@@ -3,6 +3,7 @@ Test Framework Documentation {#test_framework_doc} PyDoc generated documentation for the "make test" framework is available for the following releases +- [Test framework documentation for VPP 18.01](https://docs.fd.io/vpp/18.01/vpp_make_test/html) - [Test framework documentation for VPP 17.10](https://docs.fd.io/vpp/17.10/vpp_make_test/html) - [Test framework documentation for VPP 17.04](https://docs.fd.io/vpp/17.04/vpp_make_test/html) - [Test framework documentation for VPP 17.01](https://docs.fd.io/vpp/17.01/vpp_make_test/html)
Fix urls in the example The urls in the docs/03.API-EXAMPLE.md was pointing to the old samsung.github.io pages. JerryScript-DCO-1.0-Signed-off-by: Kristof Kosztyo
@@ -512,4 +512,4 @@ Value of x is 17 ## Further steps -For further API description, please visit [API Reference page](https://samsung.github.io/jerryscript/api-reference/) on [JerryScript home page](https://samsung.github.io/jerryscript/). +For further API description, please visit [API Reference page](https://jerryscript-project.github.io/jerryscript/api-reference/) on [JerryScript home page](https://jerryscript-project.github.io/jerryscript/).
fixed some typos in status display
@@ -2060,26 +2060,26 @@ else if(hcxwritecount > 1) printf("\x1B[32mtotal %ld usefull wpa handshakes:\x1B[0m\n", hcxwritecount); if(wpakv1c > 0) - printf("\x1B[32mfound %ld wpa1 RC4 Cipher, HMAC-MD5\x1B[0m\n", wpakv1c); + printf("\x1B[32mfound %ld WPA1 RC4 Cipher, HMAC-MD5\x1B[0m\n", wpakv1c); if(wpakv2c > 0) - printf("\x1B[32mfound %ld wpa2 AES Cipher, HMAC-SHA1\x1B[0m\n", wpakv2c); + printf("\x1B[32mfound %ld WPA2 AES Cipher, HMAC-SHA1\x1B[0m\n", wpakv2c); if(wpakv3c > 0) - printf("\x1B[32mfound %ld wpa2 AES Cipher, AES-128-CMAC\x1B[0m\n", wpakv3c); + printf("\x1B[32mfound %ld WPA2 AES Cipher, AES-128-CMAC\x1B[0m\n", wpakv3c); if(wpakv4c > 0) printf("\x1B[32mfound %ld Groupkeys\x1B[0m\n", wpakv4c); if(hcxwritewldcount == 1) { - printf("\x1B[32mfound %ld valid wpa handshake (by wlandump-ng/wlanresponse)\x1B[0m\n", hcxwritewldcount); + printf("\x1B[32mfound %ld valid WPA handshake (by wlandump-ng/wlanresponse)\x1B[0m\n", hcxwritewldcount); if(wdfhcxoutname != NULL) printf("\x1B[32myou can use hashcat --nonce-error-corrections=0 on %s\x1B[0m\n", wdfhcxoutname); } else if(hcxwritewldcount > 1) { - printf("\x1B[32mfound %ld valid wpa handshakes (by wlandump-ng/wlanresponse)\x1B[0m\n", hcxwritewldcount); + printf("\x1B[32mfound %ld valid WPA handshakes (by wlandump-ng/wlanresponse)\x1B[0m\n", hcxwritewldcount); if(wdfhcxoutname != NULL) printf("\x1B[32myou can use hashcat --nonce-error-corrections=0 on %s\x1B[0m\n", wdfhcxoutname); }
jenkinsfile: build debian bullseye with debug
@@ -553,7 +553,8 @@ def generateFullBuildStages() { tasks << buildAndTest( "debian-bullseye-full", DOCKER_IMAGES.bullseye, - CMAKE_FLAGS_BUILD_ALL, + CMAKE_FLAGS_BUILD_ALL+ + CMAKE_FLAGS_DEBUG, [TEST.ALL, TEST.MEM, TEST.INSTALL] )
graph-store: if null timestamp, replace with now
^- (quip card _state) |^ ?> ?=(%0 -.update) + =? p.update =(p.update *time) now.bowl ?- -.q.update %add-graph (add-graph +.q.update) %remove-graph (remove-graph +.q.update)
Fix help message for pg_basebackup -R The recovery.conf file that's generated is specifically for replication, and not needed (or wanted) for regular backup restore, so indicate that in the message.
@@ -237,7 +237,7 @@ usage(void) printf(_(" -r, --max-rate=RATE maximum transfer rate to transfer data directory\n" " (in kB/s, or use suffix \"k\" or \"M\")\n")); printf(_(" -R, --write-recovery-conf\n" - " write recovery.conf after backup\n")); + " write recovery.conf for replication\n")); printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n" " relocate tablespace in OLDDIR to NEWDIR\n")); printf(_(" -x, --xlog include required WAL files in backup (fetch mode)\n"));
[mod_extforward] fix typos in Forwarded handling also: add commented-out code for potentially creating X-Forwarded-For from Forwarded, if X-Forwarded-For is not present.
@@ -801,7 +801,7 @@ static handler_t mod_extforward_Forwarded (server *srv, connection *con, plugin_ #if 0 case 2: if (0 == buffer_caseless_compare(s+offsets[j],2,"by",2)) - oproto = j; + oby = j; break; #endif #if 0 @@ -912,8 +912,8 @@ static handler_t mod_extforward_Forwarded (server *srv, connection *con, plugin_ if (-1 != oremote_user) { /* ???: should we also support param for auth_type ??? */ /* remove trailing spaces/tabs, and double-quotes from remote_user*/ - v = offsets[oproto+2]; - vlen = v + offsets[oproto+3]; + v = offsets[oremote_user+2]; + vlen = v + offsets[oremote_user+3]; while (vlen > v && (s[vlen-1] == ' ' || s[vlen-1] == '\t')) --vlen; if (vlen > v+1 && s[v] == '"' && s[vlen-1] == '"') { data_string *dsuser; @@ -938,6 +938,51 @@ static handler_t mod_extforward_Forwarded (server *srv, connection *con, plugin_ } } + #if 0 + if ((p->conf.opts & PROXY_FORWARDED_CREATE_XFF) + && NULL == array_get_element(con->request.headers, "X-Forwarded-For")) { + /* create X-Forwarded-For if not present + * (and at least original connecting IP is a trusted proxy) */ + buffer *xff; + data_string *dsxff = (data_string *) + array_get_unused_element(con->request.headers, TYPE_STRING); + if (NULL == dsxff) dsxff = data_string_init(); + buffer_copy_string_len(dsxff->key, CONST_STR_LEN("X-Forwarded-For")); + array_insert_unique(con->request.headers, (data_unset *)dsxff); + xff = dsxff->value; + for (j = 0; j < used; ) { + if (-1 == offsets[j]) { ++j; continue; } + if (3 == offsets[j+1] + && 0 == buffer_caseless_compare(s+offsets[j], 3, "for", 3)) { + if (!buffer_string_is_empty(xff)) + buffer_append_string_len(xff, CONST_STR_LEN(", ")); + /* quoted-string, IPv6 brackets, and :port already removed */ + v = offsets[j+2]; + vlen = offsets[j+3]; + buffer_append_string_len(xff, s+v, vlen); + if (s[v-1] != '=') { /*(must have been quoted-string)*/ + char *x = + memchr(xff->ptr+buffer_string_length(xff)-vlen,'\\',vlen); + if (NULL != x) { /* backslash unescape in-place */ + for (v = 0; x[v]; ++x) { + if (x[v] == '\\' && x[++v] == '\0') + break; /*(invalid trailing backslash)*/ + *x = x[v]; + } + buffer_string_set_length(xff, x - xff->ptr); + } + } + /* skip to next group; take first "for=..." in group + * (should be 0 or 1 "for=..." per group, but not trusted) */ + do { j += 4; } while (-1 != offsets[j]); + ++j; + continue; + } + j += 4; /*(k, klen, v, vlen come in sets of 4)*/ + } + } + #endif + return HANDLER_GO_ON; }
untested pls work
@@ -83,7 +83,7 @@ for x in range(len(my_info)): start_index = int(my_info[x].time/1000 * rate) - if x < len(beatmap_info) and "slider" in beatmap_info[x]["type"]: + if x < len(beatmap_info) and "slider" in beatmap_info[x]["type"] and beatmap_info[x]["repeated"] > 1: spinSpeedup = 6 arrow_time_list = [] for a in range(beatmap_info[x]["repeated"]): @@ -91,10 +91,11 @@ for x in range(len(my_info)): if my_info[x].time < beatmap_info[x]["time"] + beatmap_info[x]["duration"] * beatmap_info[x]["repeated"]: for abc in arrow_time_list: + print(beatmap_info[x]["time"]) start_index2 = int(abc/1000 * rate) z[start_index2:start_index2 + len(y)] += y * 0.5 - if not type(my_info[x].more).__name__ == "Spinner": + elif type(my_info[x].more).__name__ != "Spinner": spinSpeedup = 6 if my_info[x].hitresult == None: continue
Configurations/unix-Makefile.tmpl: fix HP-UX build. HP-UX make doesn't recognize $< in explict target rules, only in inference ones such as .c.o.
@@ -856,7 +856,7 @@ EOF } return <<"EOF"; $args{src}: $args{generator}->[0] $deps - \$(CC) $incs \$(CFLAGS) -E \$< | \\ + \$(CC) $incs \$(CFLAGS) -E $args{generator}->[0] | \\ \$(PERL) -ne '/^#(line)?\\s*[0-9]+/ or print' > \$@ EOF }
virtio: vhost gso checksum error when both indirect and mrg_rxbuf are off Turn on gso, turn off both indirect and mrg_rxbuf caused traffic received and sent with checksum error. The problem is we are not mapping the hdr correctly in the shared memory address. Type: fix
@@ -544,13 +544,11 @@ vhost_user_if_input (vlib_main_t * vm, /* This depends on the setup but is very consistent * So I think the CPU branch predictor will make a pretty good job * at optimizing the decision. */ - u8 indirect = 0; if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT) { desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr, &map_hint); desc_current = 0; - indirect = 1; if (PREDICT_FALSE (desc_table == 0)) { vlib_error_count (vm, node->node_index, @@ -565,36 +563,37 @@ vhost_user_if_input (vlib_main_t * vm, { virtio_net_hdr_mrg_rxbuf_t *hdr; u8 *b_data; - u16 current = desc_current; - u32 data_offset = desc_data_offset; + u16 current; - if ((data_offset == desc_table[current].len) && - (desc_table[current].flags & VIRTQ_DESC_F_NEXT)) - { - current = desc_table[current].next; - data_offset = 0; - } - hdr = map_guest_mem (vui, desc_table[current].addr, &map_hint); + hdr = map_guest_mem (vui, desc_table[desc_current].addr, &map_hint); if (PREDICT_FALSE (hdr == 0)) { vlib_error_count (vm, node->node_index, VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1); goto out; } - b_data = (u8 *) hdr + data_offset; - if (indirect) + if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) + { + if ((desc_data_offset == desc_table[desc_current].len) && + (desc_table[desc_current].flags & VIRTQ_DESC_F_NEXT)) { - hdr = map_guest_mem (vui, desc_table[desc_current].addr, + current = desc_table[desc_current].next; + b_data = map_guest_mem (vui, desc_table[current].addr, &map_hint); - if (PREDICT_FALSE (hdr == 0)) + if (PREDICT_FALSE (b_data == 0)) { vlib_error_count (vm, node->node_index, - VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1); + VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, + 1); goto out; } } + else + b_data = (u8 *) hdr + desc_data_offset; + vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr); } + } while (1) {
QUIC RSTREAM: Allow pointer to be NULL when calling free
@@ -33,6 +33,9 @@ QUIC_RSTREAM *ossl_quic_rstream_new(QUIC_RXFC *rxfc, void ossl_quic_rstream_free(QUIC_RSTREAM *qrs) { + if (qrs == NULL) + return; + ossl_sframe_list_destroy(&qrs->fl); OPENSSL_free(qrs); }
A few minor tweaks to unit test. [ci skip]
+// +// Copyright 2017 Jeff Bush +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// - -module testbench(input clk, input reset); +module test_rr_arbiter(input clk, input reset); localparam NUM_REQUESTERS = 4;
KDB Find: Require `sync` plugin for MSR test
@@ -14,7 +14,7 @@ if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) message (STATUS "Excluding Markdown Shell Recorder test for `kdb find`, since GCC ${CMAKE_CXX_COMPILER_VERSION} " "does not support regular expressions properly") else (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) - add_msr_test (kdb_find "${CMAKE_SOURCE_DIR}/doc/help/kdb-find.md") + add_msr_test (kdb_find "${CMAKE_SOURCE_DIR}/doc/help/kdb-find.md" REQUIRED_PLUGINS sync) endif (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) if (ENABLE_ASAN)
fix warning: unused variable 'ctx'
@@ -133,7 +133,6 @@ static int luv_pipe_chmod(lua_State* L) { #if LUV_UV_VERSION_GEQ(1,41,0) static int luv_pipe(lua_State* L) { - luv_ctx_t* ctx = luv_context(L); int read_flags = 0, write_flags = 0; uv_file fds[2]; int ret;
Make Codecov less sensitive
ignore: - "test/stream_expectations.c" # Function pointers are not resolved correctly - "examples" +coverage: + status: + project: + default: + target: auto + threshold: 1% # Don't fail CI on trivial changes and Codecov flakiness
appveyor: bundler installation seem to freeze the build, disabling
@@ -104,7 +104,7 @@ install: - gem install rest-client --no-document - gem install listen --no-document - gem install zip --no-document - - gem install bundler --quiet --no-ri --no-rdoc +# - gem install bundler --quiet --no-ri --no-rdoc - bundler --version before_test:
Core: OPMPHM: Predictor: set final config
#include <stdlib.h> #include <string.h> - -const uint16_t opmphmPredictorHistoryMask = 0x7FF; // 11 bit history -//~ const uint16_t opmphmPredictorHistoryMask = 0x1FF; // 9 bit history -//~ const uint16_t opmphmPredictorHistoryMask = 0x7F; // 7 bit history -//~ const uint16_t opmphmPredictorHistoryMask = 0x1F; // 5 bit history - - -const size_t opmphmPredictorActionLimit = 1; +/** + * The benchmarked and evaluated values of the predictors configuration + */ +const uint16_t opmphmPredictorHistoryMask = 0x1FF; // 9 bit history +const size_t opmphmPredictorActionLimit = 599; /** * Prediction Automata A2
vlib: make runtime_data handling thread-local
@@ -750,7 +750,6 @@ vnet_register_interface (vnet_main_t * vnm, if (vec_len (im->deleted_hw_interface_nodes) > 0) { vnet_hw_interface_nodes_t *hn; - vnet_interface_output_runtime_t *rt; vlib_node_t *node; vlib_node_runtime_t *nrt; @@ -762,17 +761,23 @@ vnet_register_interface (vnet_main_t * vnm, vlib_node_rename (vm, hw->tx_node_index, "%v", tx_node_name); vlib_node_rename (vm, hw->output_node_index, "%v", output_node_name); - rt = vlib_node_get_runtime_data (vm, hw->output_node_index); + /* *INDENT-OFF* */ + foreach_vlib_main ({ + vnet_interface_output_runtime_t *rt; + + rt = vlib_node_get_runtime_data (this_vlib_main, hw->output_node_index); ASSERT (rt->is_deleted == 1); rt->is_deleted = 0; rt->hw_if_index = hw_index; rt->sw_if_index = hw->sw_if_index; rt->dev_instance = hw->dev_instance; - rt = vlib_node_get_runtime_data (vm, hw->tx_node_index); + rt = vlib_node_get_runtime_data (this_vlib_main, hw->tx_node_index); rt->hw_if_index = hw_index; rt->sw_if_index = hw->sw_if_index; rt->dev_instance = hw->dev_instance; + }); + /* *INDENT-ON* */ /* The new class may differ from the old one. * Functions have to be updated. */ @@ -790,7 +795,6 @@ vnet_register_interface (vnet_main_t * vnm, nrt = vlib_node_get_runtime (vm, hw->tx_node_index); nrt->function = node->function; - vlib_worker_thread_node_runtime_update (); _vec_len (im->deleted_hw_interface_nodes) -= 1; } else @@ -909,11 +913,17 @@ vnet_delete_hw_interface (vnet_main_t * vnm, u32 hw_if_index) { vnet_hw_interface_nodes_t *dn; + + /* *INDENT-OFF* */ + foreach_vlib_main ({ vnet_interface_output_runtime_t *rt = - vlib_node_get_runtime_data (vm, hw->output_node_index); + vlib_node_get_runtime_data (this_vlib_main, hw->output_node_index); - /* Mark node runtime as deleted so output node (if called) will drop packets. */ + /* Mark node runtime as deleted so output node (if called) + * will drop packets. */ rt->is_deleted = 1; + }); + /* *INDENT-ON* */ vlib_node_rename (vm, hw->output_node_index, "interface-%d-output-deleted", hw_if_index);
Have rac.Writer check for too much input
@@ -163,11 +163,19 @@ func (w *Writer) write(data []byte) error { ioWriter = w.TempFile } + if uint64(len(data)) > MaxSize { + w.err = errors.New("rac: too much input") + return w.err + } if _, err := ioWriter.Write(data); err != nil { w.err = err return err } w.dataSize += uint64(len(data)) + if w.dataSize > MaxSize { + w.err = errors.New("rac: too much input") + return w.err + } return nil } @@ -183,7 +191,7 @@ func (w *Writer) AddResource(resource []byte) (OptResource, error) { } if len(w.resourcesCOffsets) >= (1 << 30) { - w.err = errors.New("rac: too many shared resources") + w.err = errors.New("rac: too many resources") return 0, w.err } if len(w.resourcesCOffsets) == 0 { @@ -208,18 +216,22 @@ func (w *Writer) AddResource(resource []byte) (OptResource, error) { // // The caller may modify primary's contents after this method returns. func (w *Writer) AddChunk(dRangeSize uint64, primary []byte, secondary OptResource, tertiary OptResource) error { - if err := w.initialize(); err != nil { - return err - } - if dRangeSize == 0 { - w.err = errors.New("rac: dRangeSize is too small") - return w.err + return nil } if (dRangeSize > MaxSize) || ((w.dFileSize + dRangeSize) > MaxSize) { - w.err = errors.New("rac: dRangeSize is too large") + w.err = errors.New("rac: too much input") return w.err } + if len(w.leafNodes) >= (1 << 30) { + w.err = errors.New("rac: too many chunks") + return w.err + } + + if err := w.initialize(); err != nil { + return err + } + cLength := calcCLength(len(primary)) w.dFileSize += dRangeSize w.leafNodes = append(w.leafNodes, node{ @@ -258,6 +270,10 @@ func (w *Writer) Close() error { } indexSize := rootNode.calcEncodedSize(0) + if (indexSize + w.dataSize) > MaxSize { + w.err = errors.New("rac: too much input") + return w.err + } nw := &nodeWriter{ w: w.Writer, resourcesCOffsets: w.resourcesCOffsets,
Add bigger timeout for PING command
@@ -58,7 +58,7 @@ esp_ping(const char* host, uint32_t* time, ESP_MSG_VAR_REF(msg).msg.tcpip_ping.host = host; ESP_MSG_VAR_REF(msg).msg.tcpip_ping.time_out = time; - return espi_send_msg_to_producer_mbox(&ESP_MSG_VAR_REF(msg), espi_initiate_cmd, 10000); + return espi_send_msg_to_producer_mbox(&ESP_MSG_VAR_REF(msg), espi_initiate_cmd, 30000); } #endif /* ESP_CFG_PING || __DOXYGEN__ */
libc: Keep printf("%pS", p) behavior consistent When ALLSYMS is not turned on, %pS will be treated as %p
@@ -1145,10 +1145,10 @@ str_lpad: continue; } -#ifdef CONFIG_ALLSYMS case 'S': case 's': { +#ifdef CONFIG_ALLSYMS FAR const struct symtab_s *symbol; FAR void *addr = (FAR void *)(uintptr_t)x; size_t symbolsize; @@ -1171,10 +1171,9 @@ str_lpad: continue; } - +#endif break; } -#endif default: fmt_ungetc(fmt);
Add trusty to the test matrix
language: cpp +cache: ccache + compiler: - gcc @@ -8,7 +10,12 @@ addons: - 127.0.0.1.xip.io - alternate.127.0.0.1.xip.io -before_install: +matrix: + include: + - os: linux + sudo: required + dist: precise + before_install: &bi # upgrade g++ and libstdc++ to build nghttp2 - sudo add-apt-repository --yes ppa:ubuntu-toolchain-r/test - sudo apt-add-repository --yes ppa:smspillaz/cmake-2.8.12 @@ -18,8 +25,7 @@ before_install: - $CXX --version # for speed, pre-install deps installed in `before_script` section as ubuntu packages - sudo apt-get install -qq cpanminus libipc-signal-perl liblist-moreutils-perl libwww-perl libio-socket-ssl-perl zlib1g-dev - -before_script: + before_script: &bs # install libuv >= 1.0.0 (optionally required for building / testing libh2o) - curl -L https://github.com/libuv/libuv/archive/v1.0.0.tar.gz | tar xzf - - (cd libuv-1.0.0 && ./autogen.sh && ./configure --prefix=/usr && make && sudo make install) @@ -41,9 +47,14 @@ before_script: - (cd nghttp2-1.4.0 && ./configure --prefix=/usr --disable-threads --enable-app && make && sudo make install) - curl -L https://curl.haxx.se/download/curl-7.50.0.tar.gz | tar xzf - - (cd curl-7.50.0 && ./configure --prefix=/usr --with-nghttp2 --disable-shared && make && sudo make install) - -script: + script: &s - cmake -DWITH_MRUBY=ON . - make all - make check - sudo make check-as-root + - os: linux + sudo: required + dist: trusty + before_install: *bi + before_script: *bs + script: *s
needed backticks
@@ -10,7 +10,7 @@ title: Known Issues As of this AppScope pre-release, known issues include: -- [#677](https://github.com/criblio/appscope/issues/677) HTTP/1.1 events are parsed incorrectly, populating http_method and http_target fields with junk. +- [#677](https://github.com/criblio/appscope/issues/677) HTTP/1.1 events are parsed incorrectly, populating the `http_method` and `http_target` fields with junk. ## AppScope 0.7
eyre: convert facts to json manually By doing a %watch instead of %watch-as %json for channel subscriptions, we can hopefully make better use of noun deduplication, when storing events in a channel's event queue until they get acked.
events =^ head queue ~(get to queue) =, p.head - =/ sign=(unit sign:agent:gall) - (channel-event-to-sign channel-event) - ?~ sign $ - $(events [(event-json-to-wall id (sign-to-json request-id u.sign)) events]) + ?~ sign=(channel-event-to-sign channel-event) $ + ?~ json=(sign-to-json request-id u.sign) $ + $(events [(event-json-to-wall id u.json) events]) :: send the start event to the client :: =^ http-moves state :^ duct %pass (subscription-wire channel-id request-id ship app) :* %g %deal [our ship] app - `task:agent:gall`[%watch-as %json path] + `task:agent:gall`[%watch path] == :: =. session.channel-state.state :: =? moves ?=([%| *] state.u.channel) ^- (list move) + ?~ json=(sign-to-json request-id sign) + moves :_ moves :+ p.state.u.channel %give ^- gift:able :: ^= data %- wall-to-octs - (event-json-to-wall event-id (sign-to-json request-id sign)) + (event-json-to-wall event-id u.json) :: complete=%.n == =/ res (mule |.((vale:dais noun.event))) ?: ?=(%| -.res) ((slog leaf+"eyre: stale fact of mark {(trip have)}" ~) ~) - =* vase p.res + `[%fact have p.res] + :: +sign-to-json: render sign from request-id as json channel event + :: + ++ sign-to-json + |= [request-id=@ud =sign:agent:gall] + ^- (unit json) + :: for facts, we try to convert the result to json + :: + =/ jsyn=(unit sign:agent:gall) + ?. ?=(%fact -.sign) `sign + ?: ?=(%json p.cage.sign) `sign :: find and use tube from fact mark to json - ::TODO move into sign-to-json :: + =* have=mark p.cage.sign =* desc=tape "from {(trip have)} to json" =/ tube=(unit tube:clay) - ?: =(have %json) `(bake same ^vase) =/ tuc=(unit (unit cage)) (scry [%141 %noun] ~ %cc [our %home da+now] (flop /[have]/json)) ?. ?=([~ ~ *] tuc) ~ ?~ tube ((slog leaf+"eyre: no tube {desc}" ~) ~) :: - =/ res (mule |.((u.tube vase))) + =/ res (mule |.((u.tube q.cage.sign))) ?: ?=(%& -.res) `[%fact %json p.res] ((slog leaf+"eyre: failed tube {desc}" ~) ~) - :: +sign-to-json: render sign from request-id as json channel event :: - ++ sign-to-json - |= [request-id=@ud =sign:agent:gall] - ^- json + ?~ jsyn ~ + %- some + =* sign u.jsyn =, enjs:format %- pairs ^- (list [@t json]) :~ ['response' [%s 'diff']] :: :- 'json' - ::TODO do mark conversion here + ~| [%unexpected-fact-mark p.cage.sign] ?> =(%json p.cage.sign) ;;(json q.q.cage.sign) ==
Fix definition of snprintf for MSVC MS _snprintf_s takes an additional argument for the size of the buffer, so is not a direct replacement (utest/ctest.h from which I copied was wrong)
@@ -37,7 +37,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #if defined(_WIN32) && defined(_MSC_VER) #if _MSC_VER < 1900 -#define snprintf _snprintf_s +#define snprintf _snprintf #endif #endif
Fix Nucleo-F767ZI clocks This updates the BSP to be clocked at 168MHz, enables PCLK2 for TRNG, and enables the instruction cache.
*/ #include "stm32f7xx.h" +#include "stm32f7xx_hal_gpio_ex.h" +#include "stm32f7xx_hal_flash.h" +#include "stm32f7xx_hal_rcc.h" +#include "stm32f7xx_hal_pwr.h" +#include "stm32f7xx_hal_pwr_ex.h" #include "mcu/cmsis_nvic.h" #if !defined (HSE_VALUE) /** @addtogroup STM32F7xx_System_Private_FunctionPrototypes * @{ */ +static void SystemClock_Config(void); /** * @} @@ -175,6 +181,9 @@ void SystemInit(void) /* Disable all interrupts */ RCC->CIR = 0x00000000; + /* Configure System Clock */ + SystemClock_Config(); + /* Relocate the vector table */ NVIC_Relocate(); } @@ -263,6 +272,56 @@ void SystemCoreClockUpdate(void) SystemCoreClock >>= tmp; } +/** System Clock Configuration. + * + * Configures CPU to run at 168 MHz. + * + * ((16 MHz / 10) * 210) / 2 = 168 MHz + */ +void SystemClock_Config(void) +{ + + RCC_OscInitTypeDef RCC_OscInitStruct; + RCC_ClkInitTypeDef RCC_ClkInitStruct; + + SCB_EnableICache(); + + /**Configure the main internal regulator output voltage + */ + __HAL_RCC_PWR_CLK_ENABLE(); + + __HAL_PWR_VOLTAGESCALING_CONFIG(PWR_REGULATOR_VOLTAGE_SCALE2); + + /* Initializes the CPU, AHB and APB busses clocks */ + RCC_OscInitStruct.OscillatorType = RCC_OSCILLATORTYPE_HSI; + RCC_OscInitStruct.HSIState = RCC_HSI_ON; + RCC_OscInitStruct.HSICalibrationValue = 16; + RCC_OscInitStruct.PLL.PLLState = RCC_PLL_ON; + RCC_OscInitStruct.PLL.PLLSource = RCC_PLLSOURCE_HSI; + RCC_OscInitStruct.PLL.PLLM = 10; + RCC_OscInitStruct.PLL.PLLN = 210; + RCC_OscInitStruct.PLL.PLLP = RCC_PLLP_DIV2; + RCC_OscInitStruct.PLL.PLLQ = 2; + if (HAL_RCC_OscConfig(&RCC_OscInitStruct) != HAL_OK) + { + /* TODO: Throw error */ + } + + /* Initializes the CPU, AHB and APB busses clocks */ + RCC_ClkInitStruct.ClockType = \ + RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_PCLK1 | \ + RCC_CLOCKTYPE_PCLK2; + RCC_ClkInitStruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK; + RCC_ClkInitStruct.AHBCLKDivider = RCC_SYSCLK_DIV1; + RCC_ClkInitStruct.APB1CLKDivider = RCC_HCLK_DIV4; + RCC_ClkInitStruct.APB2CLKDivider = RCC_HCLK_DIV2; + + if (HAL_RCC_ClockConfig(&RCC_ClkInitStruct, FLASH_LATENCY_5) != HAL_OK) + { + /* TODO: Throw error */ + } +} + /** * @} */
pydiag: Add implementation of cool_cpu_cache opt.
import nlb from diagtest import diagtest from opae.utils import cl_align, CACHELINE_BYTES + # pylint: disable=E0611 from opae import fpga + COOL_CACHE_LINES = 1024 COOL_CACHE_SIZE = CACHELINE_BYTES*COOL_CACHE_LINES +MAX_CPU_CACHE_SIZE = 100 * 1024 * 1024 class nlb3(diagtest): guid = "F7DF405C-BD7A-CF72-22F1-44B0B93ACD18" + _cpu_cache_buffer = '' def add_arguments(self, parser): super(nlb3, self).add_arguments(parser) @@ -72,12 +76,16 @@ class nlb3(diagtest): return self.args.end*self.args.strided_access*CACHELINE_BYTES def setup_buffers(self, handle, dsm, src, dst): + src.fill32(0xc0cac01a) + dst.fill(0) if self.args.warm_fpga_cache: self.warm_fpga_cache(handle, dsm, src, dst) elif self.args.cool_fpga_cache: self.cool_fpga_cache(handle, dsm) - else: - src.fill(0xcafe) + + if self.args.cool_cpu_cache: + with open("/dev/urandom", "rb") as rbytes: + self._cpu_cache_buffer = rbytes.read(MAX_CPU_CACHE_SIZE) def warm_fpga_cache(self, handle, dsm, src, dst): dsm.fill(0)
crypto/bio/build.info: split the source files in categories
LIBS=../../libcrypto + +# Base library SOURCE[../../libcrypto]=\ bio_lib.c bio_cb.c bio_err.c \ - bss_mem.c bss_null.c bss_fd.c \ - bss_file.c bss_sock.c bss_conn.c \ - bf_null.c bf_buff.c b_print.c b_dump.c b_addr.c \ - b_sock.c b_sock2.c bss_acpt.c bf_nbio.c bss_log.c bss_bio.c \ - bss_dgram.c bio_meth.c bf_lbuf.c + b_print.c b_dump.c b_addr.c \ + b_sock.c b_sock2.c \ + bio_meth.c + +# Source / sink implementations +SOURCE[../../libcrypto]=\ + bss_null.c bss_mem.c bss_bio.c bss_fd.c bss_file.c \ + bss_sock.c bss_conn.c bss_acpt.c bss_dgram.c \ + bss_log.c + +# Filters +SOURCE[../../libcrypto]=\ + bf_null.c bf_buff.c bf_lbuf.c bf_nbio.c
Protect `fiobj_free` from NULL
@@ -139,6 +139,8 @@ static int fiobj_free_or_mark(fiobj_s *o, void *arg) { */ void fiobj_free(fiobj_s *o) { #if DEBUG + if (!o) + return; if (OBJ2HEAD(o)->ref == 0) { fprintf(stderr, "ERROR: attempting to free an object that isn't a fiobj or already " @@ -147,7 +149,7 @@ void fiobj_free(fiobj_s *o) { kill(0, SIGABRT); } #endif - if (OBJREF_REM(o)) + if (!o || OBJREF_REM(o)) return; /* handle wrapping */
build: make FLB_RECORD_ACCESSOR dependent of FLB_REGEX
@@ -201,6 +201,15 @@ endif() # Record Accessor +# --------------- +# Make sure it dependency is enabled +if(FLB_RECORD_ACCESSOR AND NOT FLB_REGEX) + message(FATAL_ERROR + "FLB_RECORD_ACCESSOR depends on FLB_REGEX, " + "enable it with: -DFLB_REGEX=ON") +endif() + +# Build record accessor files if(FLB_RECORD_ACCESSOR) set(src ${src}
Support deprecated `publish` format
@@ -808,6 +808,9 @@ VALUE iodine_publish(int argc, VALUE *argv, VALUE self) { /* single argument must be a Hash */ Check_Type(argv[0], T_HASH); rb_ch = rb_hash_aref(argv[0], to_sym_id); + if (rb_ch == Qnil || rb_ch == Qfalse) { + rb_ch = rb_hash_aref(argv[0], channel_sym_id); + } rb_msg = rb_hash_aref(argv[0], message_sym_id); rb_engine = rb_hash_aref(argv[0], engine_varid); } break;
add rt_memory_info() for memheap.c
@@ -712,6 +712,20 @@ void *rt_calloc(rt_size_t count, rt_size_t size) } RTM_EXPORT(rt_calloc); +void rt_memory_info(rt_uint32_t *total, + rt_uint32_t *used, + rt_uint32_t *max_used) +{ + if (total != RT_NULL) + *total = _heap.pool_size; + + if (used != RT_NULL) + *used = _heap.pool_size - _heap.available_size; + + if (max_used != RT_NULL) + *max_used = _heap.max_used_size; +} + #endif #endif
output_calyptia: add comment for variable used by chunk traces.
@@ -849,6 +849,7 @@ static void cb_calyptia_flush(struct flb_event_chunk *event_chunk, size_t off = 0; size_t out_size = 0; char *out_buf = NULL; +/* used to create records for reporting traces to the cloud. */ #ifdef FLB_HAVE_CHUNK_TRACE flb_sds_t json; #endif /* FLB_HAVE_CHUNK_TRACE */
Zephyr port: explicitly convert port# between host<->network byte order This change adds byte order conversions on receive and send. It was previously assumed that the network stack does this conversion on both sides, but it was determined that it wasn't the case.
@@ -96,7 +96,7 @@ oc_network_receive(struct net_context *context, struct net_buf *buf, int status, message->endpoint.flags = IPV6; memcpy(message->endpoint.addr.ipv6.address, &NET_IPV6_BUF(buf)->src, 16); message->endpoint.addr.ipv6.scope = 0; - message->endpoint.addr.ipv6.port = NET_UDP_BUF(buf)->src_port; + message->endpoint.addr.ipv6.port = ntohs(NET_UDP_BUF(buf)->src_port); PRINT("oc_network_receive: received %d bytes\n", message->length); PRINT("oc_network_receive: incoming message: "); @@ -130,7 +130,7 @@ oc_send_buffer(oc_message_t *message) memcpy(peer_addr.sin6_addr.in6_u.u6_addr8, message->endpoint.addr.ipv6.address, 16); peer_addr.sin6_family = AF_INET6; - peer_addr.sin6_port = message->endpoint.addr.ipv6.port; + peer_addr.sin6_port = htons(message->endpoint.addr.ipv6.port); /* Network buffer to hold data to be sent */ struct net_buf *send_buf;
board/damu/led.c: Format with clang-format BRANCH=none TEST=none
@@ -19,32 +19,39 @@ __override const int led_charge_lvl_2 = 95; __override struct led_descriptor led_bat_state_table[LED_NUM_STATES][LED_NUM_PHASES] = { - [STATE_CHARGING_LVL_1] = {{EC_LED_COLOR_AMBER, LED_INDEFINITE} }, - [STATE_CHARGING_LVL_2] = {{EC_LED_COLOR_AMBER, LED_INDEFINITE} }, - [STATE_CHARGING_FULL_CHARGE] = {{EC_LED_COLOR_WHITE, LED_INDEFINITE} }, + [STATE_CHARGING_LVL_1] = { { EC_LED_COLOR_AMBER, + LED_INDEFINITE } }, + [STATE_CHARGING_LVL_2] = { { EC_LED_COLOR_AMBER, + LED_INDEFINITE } }, + [STATE_CHARGING_FULL_CHARGE] = { { EC_LED_COLOR_WHITE, + LED_INDEFINITE } }, [STATE_DISCHARGE_S0] = { { LED_OFF, LED_INDEFINITE } }, [STATE_DISCHARGE_S3] = { { LED_OFF, LED_INDEFINITE } }, [STATE_DISCHARGE_S5] = { { LED_OFF, LED_INDEFINITE } }, - [STATE_BATTERY_ERROR] = {{EC_LED_COLOR_AMBER, 1 * LED_ONE_SEC}, + [STATE_BATTERY_ERROR] = { { EC_LED_COLOR_AMBER, + 1 * LED_ONE_SEC }, { LED_OFF, 1 * LED_ONE_SEC } }, - [STATE_FACTORY_TEST] = {{EC_LED_COLOR_WHITE, 2 * LED_ONE_SEC}, - {EC_LED_COLOR_AMBER, 2 * LED_ONE_SEC} }, + [STATE_FACTORY_TEST] = { { EC_LED_COLOR_WHITE, + 2 * LED_ONE_SEC }, + { EC_LED_COLOR_AMBER, + 2 * LED_ONE_SEC } }, }; __override const struct led_descriptor led_pwr_state_table[PWR_LED_NUM_STATES][LED_NUM_PHASES] = { [PWR_LED_STATE_ON] = { { EC_LED_COLOR_WHITE, LED_INDEFINITE } }, - [PWR_LED_STATE_SUSPEND_AC] = {{EC_LED_COLOR_WHITE, 1 * LED_ONE_SEC}, - {LED_OFF, 3 * LED_ONE_SEC} }, - [PWR_LED_STATE_SUSPEND_NO_AC] = {{EC_LED_COLOR_WHITE, 1 * LED_ONE_SEC}, + [PWR_LED_STATE_SUSPEND_AC] = { { EC_LED_COLOR_WHITE, + 1 * LED_ONE_SEC }, { LED_OFF, 3 * LED_ONE_SEC } }, + [PWR_LED_STATE_SUSPEND_NO_AC] = { { EC_LED_COLOR_WHITE, + 1 * LED_ONE_SEC }, + { LED_OFF, + 3 * LED_ONE_SEC } }, [PWR_LED_STATE_OFF] = { { LED_OFF, LED_INDEFINITE } }, }; -const enum ec_led_id supported_led_ids[] = { - EC_LED_ID_POWER_LED, - EC_LED_ID_BATTERY_LED -}; +const enum ec_led_id supported_led_ids[] = { EC_LED_ID_POWER_LED, + EC_LED_ID_BATTERY_LED }; const int supported_led_ids_count = ARRAY_SIZE(supported_led_ids); __override void led_set_color_battery(enum ec_led_colors color)
Explicit braces
@@ -1270,7 +1270,9 @@ float compress_symbolic_block( prepare_block_statistics(bsd->texel_count, blk, ewb, &is_normal_map, &lowest_correl); if (is_normal_map && lowest_correl < 0.99f) + { lowest_correl = 0.99f; + } // next, test the four possible 1-partition, 2-planes modes for (int i = 0; i < 4; i++)
files: Use memfd_create when possible
#include <sys/types.h> #include <unistd.h> +#if defined(_HF_ARCH_LINUX) +#include <sys/syscall.h> +#endif /* defined(_HF_ARCH_LINUX) */ + #include "log.h" #include "util.h" @@ -573,6 +577,10 @@ uint8_t *files_mapFileShared(char *fileName, off_t * fileSz, int *fd) void *files_mapSharedMem(size_t sz, int *fd, const char *dir) { +#if defined(_HF_ARCH_LINUX) && defined(__NR_memfd_create) + *fd = syscall(__NR_memfd_create, "honggfuzz", 0); +#endif /* defined(_HF_ARCH_LINUX) && defined(__NR_memfd_create) */ + if (*fd == -1) { char template[PATH_MAX]; snprintf(template, sizeof(template), "%s/hfuzz.XXXXXX", dir); if ((*fd = mkstemp(template)) == -1) { @@ -580,6 +588,7 @@ void *files_mapSharedMem(size_t sz, int *fd, const char *dir) return MAP_FAILED; } unlink(template); + } if (ftruncate(*fd, sz) == -1) { PLOG_W("ftruncate(%d, %zu)", *fd, sz); close(*fd);
Improve gtest dependency.
@@ -12,8 +12,15 @@ if("${CMAKE_VERSION}" VERSION_LESS "3.11" AND POLICY CMP0037) set_policy(CMP0037 OLD) endif() +set(GTEST_VERSION 1.8.1) + +find_package(GTest ${GTEST_VERSION}) + +if(NOT GTEST_FOUND) include(ExternalProject) + find_package(Threads REQUIRED) + if(MINGW) set(GTEST_DISABLE_PTHREADS ON) else() @@ -23,7 +30,7 @@ endif() # Import Google Test Framework ExternalProject_Add(googletest GIT_REPOSITORY https://github.com/google/googletest.git - GIT_TAG release-1.8.1 + GIT_TAG release-${GTEST_VERSION} CMAKE_ARGS -Dgmock_build_tests=OFF -Dgtest_build_samples=OFF -Dgtest_build_tests=OFF @@ -33,8 +40,9 @@ ExternalProject_Add(googletest -DINSTALL_GTEST=OFF -DBUILD_GMOCK=ON PREFIX "${CMAKE_CURRENT_BINARY_DIR}" - UPDATE_COMMAND "" # Disable update step - INSTALL_COMMAND "" # Disable install step + UPDATE_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" ) # Google Test include and binary directories @@ -54,20 +62,27 @@ else() set(GTEST_LIBS_SUFFIX "a") endif() -set(GTEST_LIBS + # Define Paths + set(GTEST_INCLUDE_DIRS + "${GTEST_INCLUDE_DIR}" + "${GMOCK_INCLUDE_DIR}" + ) + + set(GTEST_LIBRARIES "${GTEST_LIBS_DIR}/libgtest.${GTEST_LIBS_SUFFIX}" "${GMOCK_LIBS_DIR}/libgmock.${GTEST_LIBS_SUFFIX}" + "${CMAKE_THREAD_LIBS_INIT}" ) +endif() target_include_directories(gtest SYSTEM INTERFACE - ${GTEST_INCLUDE_DIR} - ${GMOCK_INCLUDE_DIR} + ${GTEST_INCLUDE_DIRS} ) target_link_libraries(gtest INTERFACE - ${GTEST_LIBS} + ${GTEST_LIBRARIES} ) add_dependencies(gtest googletest)
compiler-families/intel-compilers-devel: spelling fix
@@ -73,7 +73,7 @@ versions_all=`rpm -qal | grep ${icc_subpath}` if [ $? -eq 1 ];then echo "" echo "Error: Unable to detect local Parallel Studio installation. The toolchain" - echo " providing ${icc_subpath} must be installed prior to this compatability package" + echo " providing ${icc_subpath} must be installed prior to this compatibility package" echo " " exit 1 fi @@ -94,7 +94,7 @@ for file in ${versions_all}; do done if [ -z "${versions}" ]; then echo "" - echo "Error: local PSXE compatability support is for versions > ${min_ver}" + echo "Error: local PSXE compatibility support is for versions > ${min_ver}" echo " " exit 1 fi @@ -109,7 +109,7 @@ versions=`rpm -qal | grep ${icc_subpath}$` if [ $? -eq 1 ];then echo "" echo "Error: Unable to detect local Parallel Studio installation. The toolchain" - echo " providing ${icc_subpath} must be installed prior to this compatability package" + echo " providing ${icc_subpath} must be installed prior to this compatibility package" exit 1 fi
test_suite_x509parse.function improvement as suggested in also removed two no longer necessary void casts
@@ -306,8 +306,6 @@ int parse_crt_ext_cb( void *p_ctx, mbedtls_x509_crt const *crt, mbedtls_x509_buf int critical, const unsigned char *cp, const unsigned char *end ) { ( void ) crt; - ( void ) cp; - ( void ) end; ( void ) critical; mbedtls_x509_buf *new_oid = (mbedtls_x509_buf *)p_ctx; if( oid->tag == MBEDTLS_ASN1_OID && @@ -352,6 +350,9 @@ int parse_crt_ext_cb( void *p_ctx, mbedtls_x509_crt const *crt, mbedtls_x509_buf MBEDTLS_ASN1_OID ) ) != 0 ) return( MBEDTLS_ERR_X509_INVALID_EXTENSIONS + ret ); + /* + * Recognize exclusively the policy with OID 1 + */ if( len != 1 || *p[0] != 1 ) parse_ret = MBEDTLS_ERR_X509_FEATURE_UNAVAILABLE;
cmdline: cast time_t to long
@@ -677,7 +677,7 @@ bool cmdlineParse(int argc, char* argv[], honggfuzz_t* hfuzz) { cmdlineYesNo(hfuzz->exe.fuzzStdin), cmdlineYesNo(hfuzz->io.saveUnique), hfuzz->mutationsPerRun, hfuzz->exe.externalCommand == NULL ? "NULL" : hfuzz->exe.externalCommand, - (int)hfuzz->timing.runEndTime, hfuzz->timing.tmOut, hfuzz->mutationsMax, + (int)hfuzz->timing.runEndTime, (long)hfuzz->timing.tmOut, hfuzz->mutationsMax, hfuzz->threads.threadsMax, hfuzz->io.fileExtn, hfuzz->exe.asLimit, hfuzz->exe.rssLimit, hfuzz->exe.dataLimit, hfuzz->exe.cmdline[0], hfuzz->linux.pid, cmdlineYesNo(hfuzz->monitorSIGABRT));