message
stringlengths
6
474
diff
stringlengths
8
5.22k
Clarify the status of bundled external perl modules
@@ -3,8 +3,9 @@ Intro If we find a useful Perl module that isn't one of the core Perl modules, we may choose to bundle it with the OpenSSL source. +They remain unmodified and retain their copyright and license. -Here, we simply list those modules and where we downloaded them from. +Here, we simply list those modules and where we got them from. Downloaded and bundled Perl modules -----------------------------------
[CUDA] Fix build for LLVM 7.0 The `addPassesToEmitFile` function has an additional argument.
@@ -135,6 +135,9 @@ int pocl_ptx_gen(const char *BitcodeFilename, const char *PTXFilename, llvm::SmallVector<char, 4096> Data; llvm::raw_svector_ostream PTXStream(Data); if (Machine->addPassesToEmitFile(Passes, PTXStream, +#if ! LLVM_OLDER_THAN_7_0 + nullptr, +#endif llvm::TargetMachine::CGFT_AssemblyFile)) { POCL_MSG_ERR("[CUDA] ptx-gen: failed to add passes\n"); return 1;
Makefile: remove newline in link rule The newline makes it difficult to cut and paste the link command in a teriminal when debugging link issues. Make it a long line instead.
@@ -482,8 +482,7 @@ TARGET_LIBEXTRAS = $(LD_START_GROUP) $(TARGET_LIBFILES) $(LD_END_GROUP) $(BUILD_DIR_BOARD)/%.$(TARGET): LIBNAME ?= $@ $(BUILD_DIR_BOARD)/%.$(TARGET): $(OBJECTDIR)/%.o $(LDSCRIPT) $(PROJECT_OBJECTFILES) $(PROJECT_LIBRARIES) $(CONTIKI_OBJECTFILES) $(TRACE_LD) - $(Q)$(LD) $(LDFLAGS) $(TARGET_STARTFILES) $(sort ${filter-out $(LDSCRIPT) %.a,$^}) \ - ${filter %.a,$^} $(TARGET_LIBEXTRAS) -o $(LIBNAME) + $(Q)$(LD) $(LDFLAGS) $(TARGET_STARTFILES) $(sort ${filter-out $(LDSCRIPT) %.a,$^}) ${filter %.a,$^} $(TARGET_LIBEXTRAS) -o $(LIBNAME) ifdef BINARY_SIZE_LOGFILE $(Q)$(SIZE) $(LIBNAME) | grep $(BUILD_DIR_BOARD) >> $(BINARY_SIZE_LOGFILE) endif
adc: skip linking static functions when not COMPILER_OPTIMIZATION_DEFAULT
@@ -30,6 +30,7 @@ entries: lcd_hal: lcd_hal_cal_pclk_freq (noflash) if ADC_ONESHOT_CTRL_FUNC_IN_IRAM = y: adc_oneshot_hal (noflash) + if COMPILER_OPTIMIZATION_DEFAULT = y: adc_hal_common: get_controller (noflash) adc_hal_common: adc_hal_set_controller (noflash) if SOC_ADC_ARBITER_SUPPORTED = y: @@ -40,6 +41,7 @@ entries: if ADC_CONTINUOUS_ISR_IRAM_SAFE = y: adc_hal: adc_hal_get_reading_result (noflash) adc_hal: adc_hal_digi_start (noflash) + if COMPILER_OPTIMIZATION_DEFAULT = y: adc_hal: adc_hal_digi_dma_link_descriptors (noflash) adc_hal: adc_hal_digi_stop (noflash) if IDF_TARGET_ESP32 = y || IDF_TARGET_ESP32S2 = y:
bn/bn_exp.c: harmonize BN_mod_exp_mont_consttime with negative input. All exponentiation subroutines but BN_mod_exp_mont_consttime produce non-negative result for negative input, which is confusing for fuzzer.
@@ -651,6 +651,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, } #ifdef RSAZ_ENABLED + if (!a->neg) { /* * If the size of the operands allow it, perform the optimized * RSAZ exponentiation. For further information see @@ -677,6 +678,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, ret = 1; goto err; } + } #endif /* Get the window size to use with size of p. */ @@ -748,7 +750,7 @@ int BN_mod_exp_mont_consttime(BIGNUM *rr, const BIGNUM *a, const BIGNUM *p, /* prepare a^1 in Montgomery domain */ if (a->neg || BN_ucmp(a, m) >= 0) { - if (!BN_mod(&am, a, m, ctx)) + if (!BN_nnmod(&am, a, m, ctx)) goto err; if (!BN_to_montgomery(&am, &am, mont, ctx)) goto err;
doc: fix vhm_request doxygen comment Table contents weren't rendered correctly in developer-guides/hld/hv-io-emulation.html, so fix the doxygen comments.
@@ -196,46 +196,46 @@ union vhm_io_request { * +-----------------------+-------------------------+----------------------+ * | SOS vCPU 0 | SOS vCPU x | UOS vCPU y | * +=======================+=========================+======================+ - * | | | **Hypervisor**: | + * | | | Hypervisor: | * | | | | * | | | - Fill in type, | * | | | addr, etc. | * | | | - Pause UOS vCPU y | * | | | - Set state to | - * | | | PENDING **(a)** | + * | | | PENDING (a) | * | | | - Fire upcall to | * | | | SOS vCPU 0 | * | | | | * +-----------------------+-------------------------+----------------------+ - * | **VHM**: | | | + * | VHM: | | | * | | | | * | - Scan for pending | | | * | requests | | | * | - Set state to | | | - * | PROCESSING **(b)** | | | + * | PROCESSING (b) | | | * | - Assign requests to | | | - * | clients **(c)** | | | + * | clients (c) | | | * | | | | * +-----------------------+-------------------------+----------------------+ - * | | **Client**: | | + * | | Client: | | * | | | | * | | - Scan for assigned | | * | | requests | | * | | - Handle the | | - * | | requests **(d)** | | + * | | requests (d) | | * | | - Set state to COMPLETE | | * | | - Notify the hypervisor | | * | | | | * +-----------------------+-------------------------+----------------------+ - * | | **Hypervisor**: | | + * | | Hypervisor: | | * | | | | * | | - resume UOS vCPU y | | - * | | **(e)** | | + * | | (e) | | * | | | | * +-----------------------+-------------------------+----------------------+ - * | | | **Hypervisor**: | + * | | | Hypervisor: | * | | | | - * | | | - Post-work **(f)** | + * | | | - Post-work (f) | * | | | - set state to FREE | * | | | | * +-----------------------+-------------------------+----------------------+
fix test/syscall/fs stat and fstat for fd_file_system
@@ -131,12 +131,12 @@ class oe_fd_file_system return oe_rmdir(pathname); } - int stat(const char* pathname, struct oe_stat_t* buf) + int stat(const char* pathname, stat_type* buf) { return oe_stat(pathname, buf); } - int fstat(file_handle file, struct oe_stat_t* buf) + int fstat(file_handle file, stat_type* buf) { return oe_fstat(file, buf); } @@ -192,7 +192,7 @@ class fd_file_system public: typedef int file_handle; typedef DIR* dir_handle; - typedef struct oe_stat_t stat_type; + typedef struct stat stat_type; typedef struct dirent dirent_type; static constexpr file_handle invalid_file_handle = -1; @@ -296,12 +296,12 @@ class fd_file_system return ::rmdir(pathname); } - int stat(const char* pathname, struct oe_stat_t* buf) + int stat(const char* pathname, stat_type* buf) { return ::stat(pathname, (struct stat*)buf); } - int fstat(file_handle file, struct oe_stat_t* buf) + int fstat(file_handle file, stat_type* buf) { return ::fstat(file, (struct stat*)buf); } @@ -579,12 +579,12 @@ class stream_file_system return ::rmdir(pathname); } - int stat(const char* pathname, struct stat* buf) + int stat(const char* pathname, stat_type* buf) { return ::stat(pathname, buf); } - int fstat(file_handle file, struct stat* buf) + int fstat(file_handle file, stat_type* buf) { return ::fstat(fileno(file), buf); }
stm32/main: Fix passing state.reset_mode to init_flash_fs. state.reset_mode is updated by `MICROPY_BOARD_BEFORE_SOFT_RESET_LOOP` but not passed to `init_flash_fs`, and so factory reset is not executed on boards that do not have a bootloader. This bug was introduced by Fixes
@@ -531,7 +531,7 @@ soft_reset: // Create it if needed, mount in on /flash, and set it as current dir. bool mounted_flash = false; #if MICROPY_HW_FLASH_MOUNT_AT_BOOT - mounted_flash = init_flash_fs(reset_mode); + mounted_flash = init_flash_fs(state.reset_mode); #endif bool mounted_sdcard = false;
Fixed building without debug.
@@ -779,7 +779,7 @@ nxt_controller_conf_apply(nxt_task_t *task, nxt_controller_request_t *req) void nxt_port_controller_data_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) { - size_t size, dump_size; + size_t size; nxt_buf_t *b; nxt_controller_request_t *req; nxt_controller_response_t resp; @@ -787,9 +787,7 @@ nxt_port_controller_data_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg) b = msg->buf; size = b->mem.free - b->mem.pos; - dump_size = size > 300 ? 300 : size; - - nxt_debug(task, "contoller data: %*s ...", dump_size, b->mem.pos); + nxt_debug(task, "contoller data: %*s ...", size, b->mem.pos); nxt_memzero(&resp, sizeof(nxt_controller_response_t));
fix bug in getMacAddr
@@ -317,40 +317,44 @@ getMacAddr(char *string) DIR *d; struct dirent *dir; struct stat buf; - char path_buf[256]; char mac_buf[MAC_ADDR_LEN]; - char *physical_if_name; + char dir_path[256]; + char link_path[256]; + char addr_path[256]; + bool found = FALSE; - d = scope_opendir("/sys/class/net"); + d = scope_opendir("/sys/class/net/"); if (!d) return 1; - // Check if interface eth0 exists + // Check if interface eth exists // Otherwise find an interface that does not contain "virtual" in the soft link while ((dir = scope_readdir(d)) != NULL) { - if (scope_strcmp(dir->d_name, "eth0") == 0) { - physical_if_name = dir->d_name; + scope_sprintf(dir_path, "/sys/class/net/%s", dir->d_name); + + if (scope_strcmp(dir->d_name, "eth") == 0) { + found = TRUE; break; } - if (scope_lstat(dir->d_name, &buf) != 0) { + + if (scope_lstat(dir_path, &buf) != 0) { break; } if (S_ISLNK(buf.st_mode)) { - (void)scope_readlink(dir->d_name, path_buf, sizeof(path_buf)); - if (scope_strstr(path_buf, "virtual") == NULL) { - physical_if_name = dir->d_name; + (void)scope_readlink(dir_path, link_path, sizeof(link_path)); + if (scope_strstr(link_path, "virtual") == NULL) { + found = TRUE; break; } } } scope_closedir(d); - if (!physical_if_name) { + if (!found) { scopeLogError("Error: getMacAddr: No physical interface found"); return 1; } - char addr_path[256]; - scope_sprintf(addr_path, "/sys/class/net/%s/address", physical_if_name); + scope_sprintf(addr_path, "%s/address", dir_path); FILE *fp; if ((fp = scope_fopen(addr_path, "r")) == NULL) {
Fix H09 header test.
@@ -960,6 +960,7 @@ int picoquic_h09_server_process_data_header( } else { /* Too much data */ + stream_ctx->method = -1; ret = -1; break; }
magolor: add thermal table Add setup_thermal for setting thermal table BRANCH=master TEST=thermal team verify value on AP Tested-by: Henry Sun
@@ -187,10 +187,23 @@ BUILD_ASSERT(ARRAY_SIZE(temp_sensors) == TEMP_SENSOR_COUNT); const static struct ec_thermal_config thermal_a = { + .temp_host = { + [EC_TEMP_THRESH_WARN] = 0, + [EC_TEMP_THRESH_HIGH] = C_TO_K(70), + [EC_TEMP_THRESH_HALT] = C_TO_K(85), + }, + .temp_host_release = { + [EC_TEMP_THRESH_WARN] = 0, + [EC_TEMP_THRESH_HIGH] = C_TO_K(65), + [EC_TEMP_THRESH_HALT] = 0, + }, +}; + +const static struct ec_thermal_config thermal_b = { .temp_host = { [EC_TEMP_THRESH_WARN] = 0, [EC_TEMP_THRESH_HIGH] = C_TO_K(73), - [EC_TEMP_THRESH_HALT] = C_TO_K(80), + [EC_TEMP_THRESH_HALT] = C_TO_K(85), }, .temp_host_release = { [EC_TEMP_THRESH_WARN] = 0, @@ -204,7 +217,7 @@ struct ec_thermal_config thermal_params[TEMP_SENSOR_COUNT]; static void setup_thermal(void) { thermal_params[TEMP_SENSOR_1] = thermal_a; - thermal_params[TEMP_SENSOR_2] = thermal_a; + thermal_params[TEMP_SENSOR_2] = thermal_b; } void board_init(void)
STORE: Stop the flood of errors The old 'file:' loader was recently changed to stop the flood of repeated nested ASN.1 errors when trying to decode a DER blob in diverse ways. That is now reproduced in ossl_store_handle_load_result()
@@ -83,6 +83,23 @@ static int try_crl(struct extracted_param_data_st *, OSSL_STORE_INFO **, static int try_pkcs12(struct extracted_param_data_st *, OSSL_STORE_INFO **, OSSL_STORE_CTX *, OPENSSL_CTX *, const char *); +#define SET_ERR_MARK() ERR_set_mark() +#define CLEAR_ERR_MARK() \ + do { \ + int err = ERR_peek_last_error(); \ + \ + if (ERR_GET_LIB(err) == ERR_LIB_ASN1 \ + && ERR_GET_REASON(err) == ERR_R_NESTED_ASN1_ERROR) \ + ERR_pop_to_mark(); \ + else \ + ERR_clear_last_mark(); \ + } while(0) +#define RESET_ERR_MARK() \ + do { \ + CLEAR_ERR_MARK(); \ + SET_ERR_MARK(); \ + } while(0) + int ossl_store_handle_load_result(const OSSL_PARAM params[], void *arg) { struct ossl_load_result_data_st *cbdata = arg; @@ -123,14 +140,26 @@ int ossl_store_handle_load_result(const OSSL_PARAM params[], void *arg) * The helper functions return 0 on actual errors, otherwise 1, even if * they didn't fill out |*v|. */ - if (!try_name(&helper_data, v) - || !try_key(&helper_data, v, ctx, provider, libctx, propq) - || !try_cert(&helper_data, v, libctx, propq) - || !try_crl(&helper_data, v, libctx, propq) - || !try_pkcs12(&helper_data, v, ctx, libctx, propq)) - return 0; + SET_ERR_MARK(); + if (!try_name(&helper_data, v)) + goto err; + RESET_ERR_MARK(); + if (!try_key(&helper_data, v, ctx, provider, libctx, propq)) + goto err; + RESET_ERR_MARK(); + if (!try_cert(&helper_data, v, libctx, propq)) + goto err; + RESET_ERR_MARK(); + if (!try_crl(&helper_data, v, libctx, propq)) + goto err; + RESET_ERR_MARK(); + if (!try_pkcs12(&helper_data, v, ctx, libctx, propq)) + goto err; + CLEAR_ERR_MARK(); return (*v != NULL); + err: + return 0; } static int try_name(struct extracted_param_data_st *data, OSSL_STORE_INFO **v)
Eliminate usage of rand().
@@ -4697,10 +4697,10 @@ process_job(ipp3d_job_t *job) /* I - Job */ else { /* - * Sleep for a random amount of time to simulate job processing. + * Sleep for a semi-random amount of time to simulate job processing. */ - sleep((unsigned)(5 + (rand() % 11))); + sleep((unsigned)(5 + (time(NULL) % 11))); } if (job->cancel)
hv:Revise sanitized page size now the size of sanitized_page is 32KB, revise it to 4KB. Acked-by: Anthony Xu
#include <reloc.h> static void *ppt_mmu_pml4_addr; -static void *sanitized_page[CPU_PAGE_SIZE]; +static uint8_t sanitized_page[PAGE_SIZE] __aligned(PAGE_SIZE); static struct vmx_capability { uint32_t ept;
Prevent doxygen generation
#include "esp/esp_mem.h" #include "esp/esp_input.h" +#if !__DOXYGEN__ + static uint8_t initialized = 0; DWORD thread_id; @@ -226,3 +228,5 @@ esp_ll_deinit(esp_ll_t* ll) { initialized = 0; /* Clear initialized flag */ return espOK; } + +#endif /* !__DOXYGEN__ */
Updating package version in requirements-dev.txt
@@ -7,7 +7,7 @@ crcmod<=1.7 fasteners<=0.15 gcs-oauth2-boto-plugin<=2.5 google_apitools<=0.5.30 -httplib2<=0.16.0 +httplib2<=0.19.0 google_reauth<=0.1.0 mock<=2.0.0 monotonic<=1.5 @@ -25,7 +25,7 @@ boto<=2.49.0 pyu2f<=0.1.4 funcsigs<=1.0.2 pbr<=5.4.4 -rsa<=4.0 +rsa<=4.7 pyasn1<=0.4.8 pyasn1_modules<=0.2.8 cryptography<=2.8
Renable :source argument to dofile. Allows for some more interesting usage of loaders.
[path &keys {:exit exit :env env + :source src :expander expander :evaluator evaluator}] (def f (if (= (type path) :core/file) (def path-is-file (= f path)) (default env (make-env)) (def spath (string path)) - (put env :current-file (if-not path-is-file spath)) - (put env :source (if-not path-is-file spath path)) + (put env :current-file (or src (if-not path-is-file spath))) + (put env :source (or src (if-not path-is-file spath path))) (defn chunks [buf _] (file/read f 2048 buf)) (defn bp [&opt x y] (def ret (bad-parse x y)) (if exit (os/exit 1) (eflush)))) :evaluator evaluator :expander expander - :source (if path-is-file "<anonymous>" spath)})) + :source (or src (if path-is-file "<anonymous>" spath))})) (if-not path-is-file (file/close f)) nenv)
Array: Fix minor spelling mistakes
@@ -127,7 +127,7 @@ int elektraReadArrayNumber (const char * baseName, kdb_long_long_t * oldIndex) * * @param key which base name will be incremented * - * @retval -1 on error (e.g. too large array, not validated array) + * @retval -1 on error (e.g. array too large, non-valid array) * @retval 0 on success */ int elektraArrayIncName (Key * key) @@ -175,7 +175,7 @@ int elektraArrayIncName (Key * key) * * Returns true (1) for all keys that are part of the array * identified by the supplied array parent. Only the array - * elements themself, but no subkeys of them will be filtered + * elements themselves, but no subkeys of them will be filtered * * @pre The supplied argument has to be of type (const Key *) * and is the parent of the array to be extracted. For example
honggfuzz: increase size of a report
#define _HF_VERIFIER_ITER 5 /* Size (in bytes) for report data to be stored in stack before written to file */ -#define _HF_REPORT_SIZE 8192 +#define _HF_REPORT_SIZE 32768 /* Perf bitmap size */ #define _HF_PERF_BITMAP_SIZE_16M (1024U * 1024U * 16U)
interface: fix new channel redirect
@@ -8,7 +8,7 @@ import { addTag, createManagedGraph, createUnmanagedGraph } from '@urbit/api'; import { Form, Formik } from 'formik'; import _ from 'lodash'; import React, { ReactElement } from 'react'; -import { useHistory } from 'react-router-dom'; +import { useHistory, useRouteMatch } from 'react-router-dom'; import * as Yup from 'yup'; import { resourceFromPath } from '~/logic/lib/group'; import { useWaitForProps } from '~/logic/lib/useWaitForProps'; @@ -49,6 +49,7 @@ type NewChannelProps = { export function NewChannel(props: NewChannelProps): ReactElement { const history = useHistory(); + const match = useRouteMatch(); const { group, workspace, existingMembers, ...rest } = props; const groups = useGroupState(state => state.groups); const waiter = useWaitForProps({ groups }, 5000); @@ -121,9 +122,9 @@ export function NewChannel(props: NewChannelProps): ReactElement { ); } actions.setStatus({ success: null }); - const resourceUrl = location.pathname.includes('/messages') + const resourceUrl = match.url.includes('/messages') ? '/~landscape/messages' - : parentPath(location.pathname); + : parentPath(match.path); history.push( `${resourceUrl}/resource/${moduleType}/ship/~${window.ship}/${resId}` );
error: fix example
@@ -57,7 +57,7 @@ if (FOUND_NAME GREATER -1) target_link_elektra (${EXAMPLE} elektra-kdb elektra-notification) - # TODO resolve issues.libelektra.org/2007 + # TODO resolve https://issues.libelektra.org/2007 check_binding_was_added ("io_uv" IS_INCLUDED) if (IS_INCLUDED) @@ -85,6 +85,7 @@ if (FOUND_NAME GREATER -1) else () target_compile_definitions (${EXAMPLE} PRIVATE "HAVE_LIBUV1") endif () + target_include_directories (${EXAMPLE} PRIVATE ${libuv_INCLUDE_DIRS}) endif ()
path REFACTOR error handling
@@ -414,22 +414,23 @@ ly_path_compile_prefix(const struct ly_ctx *ctx, const struct lysc_node *cur_nod /* find next node module */ if (pref) { - ret = LY_EVALID; - LOG_LOCSET(cur_node, NULL, NULL, NULL); *mod = ly_resolve_prefix(ctx, pref, len, format, prefix_data); if (!*mod) { LOGVAL(ctx, LYVE_XPATH, "No module connected with the prefix \"%.*s\" found (prefix format %s).", (int)len, pref, ly_format2str(format)); + ret = LY_EVALID; goto error; } else if (!(*mod)->implemented) { if (lref == LY_PATH_LREF_FALSE) { LOGVAL(ctx, LYVE_XPATH, "Not implemented module \"%s\" in path.", (*mod)->name); + ret = LY_EVALID; goto error; } + assert(unres); - LY_CHECK_GOTO(ret = lys_set_implemented_r((struct lys_module *)*mod, NULL, unres), error); + LY_CHECK_GOTO(lys_set_implemented_r((struct lys_module *)*mod, NULL, unres), error); } LOG_LOCBACK(cur_node ? 1 : 0, 0, 0, 0); @@ -469,7 +470,7 @@ ly_path_compile_prefix(const struct ly_ctx *ctx, const struct lysc_node *cur_nod error: LOG_LOCBACK(cur_node ? 1 : 0, 0, 0, 0); - return LY_EVALID; + return ret; } LY_ERR
docs: fix bihash doc bugs Change hash -> hash_table in the pro forma main_t structure. Remove trailing whitespace. Type: docs
@@ -123,9 +123,9 @@ Add an instance of the selected bihash data structure to e.g. a typedef struct { ... - BVT (clib_bihash) hash; + BVT (clib_bihash) hash_table; or - clib_bihash_8_8_t hash; + clib_bihash_8_8_t hash_table; ... } my_main_t; ```
Fix regression. With changes introduced in issue came back.
@@ -442,6 +442,9 @@ uint32_t load_encoded_field( int64_t name_idx = dex_get_integer( dex->object, "field_ids[%i].name_idx", *previous_field_idx); + if (name_idx == UNDEFINED) + return 0; + SIZED_STRING* field_name = dex_get_string( dex->object, "string_ids[%i].value", name_idx); @@ -579,6 +582,9 @@ uint32_t load_encoded_method( int64_t name_idx = dex_get_integer( dex->object, "method_ids[%i].name_idx", *previous_method_idx); + if (name_idx == UNDEFINED) + return 0; + #ifdef DEBUG_DEX_MODULE printf("[DEX]\tNAME_IDX 0x%x\n", name_idx); #endif
misc: update extras/scripts/lsnet Type: improvement
#!/bin/bash -echo "PCI Address MAC address Device Name Driver State Speed Port Type" -echo "============ ================= ============== ========== ======== ========== ====================" +declare -A IDS +IDS["8086:10fb"]="82599ES PF" +IDS["8086:1583"]="XL710 PF" +IDS["8086:158b"]="XXV710 PF" +IDS["8086:154c"]="XXV710 VF" +IDS["8086:37d1"]="X722 PF" +IDS["8086:37cd"]="X722 VF" +IDS["8086:1889"]="AVF" +IDS["8086:1593"]="E810 PF" +IDS["15b3:1015"]="CX-4 Lx PF" +IDS["15b3:1017"]="CX-5 PF" +IDS["15b3:1019"]="CX-5 Ex PF" -for f in /sys/class/net/*; do - dev=$(basename ${f}) - if [ -e $f/device ] ; then - dev=$(basename ${f}) - pci_addr=$(basename $(readlink $f/device)) - mac=$(cat $f/address) - driver=$(basename $(readlink $f/device/driver)) - oper=$(cat $f/operstate) - speed=$(sudo ethtool $dev | grep Speed | cut -d" " -f2) - port=$(ethtool $dev 2> /dev/null | sed -ne 's/.*Port: \(.*\)/\1/p') - printf "%-12s %-14s %-14s %-10s %-8s %-10s %-20s\n" $pci_addr $mac $dev $driver $oper $speed "$port" - # ethtool $dev | grep Port: +echo "PCI Address N PCI-ID Driver MAC address Device Name State Speed Port Type" +echo "============ = =========== ========== ================= ============== ===== ========== ====================" + +for d in /sys/bus/pci/devices/*; do + class=$(cat $d/class) + [ "${class}" == "0x020000" ] || continue + pci_addr=$(basename $d) + numa=$(cat $d/numa_node) + vid=$(cat $d/vendor | sed -e s/0x//g) + did=$(cat $d/device| sed -e s/0x//g) + pci_id=${IDS[$vid:$did]} + + [ "$pci_id" == "" ] && pci_id="$vid:$did" + + if [ -e $d/driver ] ; then + driver=$(basename $(readlink $d/driver)) + else + driver=" " fi + if [ -e $d/net ] ; then + dev=$(basename $(ls $d/net | head -1)) + n=/sys/class/net/$dev + mac=$(cat $n/address) + oper=$(cat $n/operstate) +# speed=$(sudo ethtool $dev | grep Speed | cut -d" " -f2) +##port=$(ethtool $dev 2> /dev/null | sed -ne 's/.*Port: \(.*\)/\1/p') + else + dev=" " + mac=" " + oper=" " + speed=" " + port=" " + fi + printf "%-12s %s %-11s %-10s %-17s %-14s %-5s %-10s %s\n" \ + $pci_addr $numa "$pci_id" $driver $mac $dev $oper $speed "$port" done -
DSA_generate_parameters_ex: use the old method for all small keys Fixes
@@ -58,7 +58,7 @@ int DSA_generate_parameters_ex(DSA *dsa, int bits, return 0; /* The old code used FIPS 186-2 DSA Parameter generation */ - if (bits <= 1024 && seed_len == 20) { + if (bits < 2048 && seed_len <= 20) { if (!ossl_dsa_generate_ffc_parameters(dsa, DSA_PARAMGEN_TYPE_FIPS_186_2, bits, 160, cb)) return 0;
arm/tlsr82: Fix warning: "IC_TAG_CACHE_ADDR_EQU_EN" is not defined
#define FLL_STK_EN 1 #define ZERO_IC_TAG_EN 1 -#define IC_TAG_CACEH_ADDR_EQU_EN 0 +#define IC_TAG_CACHE_ADDR_EQU_EN 0 #define FLASH_WAKEUP_EN 1 #define COPY_DATA_EN 1 #define MULTI_ADDRESS_START 1
Mirror fix for IPP attribute validation.
@@ -580,10 +580,7 @@ ipp_create_job(server_client_t *client) /* I - Client */ */ if (!valid_job_attributes(client)) - { - httpFlush(client->http); return; - } /* * Do we have a file to print? @@ -1626,10 +1623,7 @@ ipp_print_job(server_client_t *client) /* I - Client */ */ if (!valid_job_attributes(client)) - { - httpFlush(client->http); return; - } /* * Do we have a file to print?
tracef: ensure output hits the screen this slows down, but when tracing performance isn't the primary objective, seeing all things is...
extern unsigned char mode; #ifdef ENABLE_TRACE -#define tracef(...) if (mode & MODE_TRACE) fprintf(stdout, __VA_ARGS__) +#define tracef(...) if (mode & MODE_TRACE) { fprintf(stdout, __VA_ARGS__); fflush(stdout); } #else #define tracef(...) /* noop */ #endif
parallel-libs/petsc: Revert "parallel-libs/petsc: latest petsc build not finding MPI, specify wrappers directly" This reverts commit
@@ -108,16 +108,9 @@ unset FCFLAGS --with-f77=mpiifort \ %else %if "%{compiler_family}" == "gnu" - --with-cc=mpicc \ - --with-cxx=mpicxx \ - --with-fc=mpif90 \ --FFLAGS=-I$I_MPI_ROOT/include64/gfortran/4.9.0/ \ %endif %endif -%else - --with-cc=mpicc \ - --with-cxx=mpicxx \ - --with-fc=mpif90 \ %endif %if 0%{?OHPC_BUILD} --with-make-np=3 \
Generate a descriptive error message in case cc26xxware/cc13xxware does not exist
CPU_ABS_PATH = arch/cpu/cc26xx-cc13xx TI_XXWARE = $(CONTIKI_CPU)/$(TI_XXWARE_PATH) +ifeq (,$(wildcard $(TI_XXWARE))) + $(warning $(TI_XXWARE) does not exist.) + $(warning Did you run 'git submodule update --init' ?) + $(error "") +endif + ### cc26xxware sources under driverlib will be added to the MODULES list TI_XXWARE_SRC = $(CPU_ABS_PATH)/$(TI_XXWARE_PATH)/driverlib
fix compilation with cuda 9.2 nvcc on Windows
@@ -1920,13 +1920,8 @@ std::ostream& operator<<(std::ostream&, const TString&); namespace NPrivate { template <class Char> struct TCharToString { - class TDerivedString : public TBasicString<TDerivedString, Char, TCharTraits<Char>> { - using TBase = TBasicString<TDerivedString, Char, TCharTraits<Char>>; - public: - using TBase::TBase; - }; - - using type = TDerivedString; + // TODO: switch to TBaseString derived type when compilation with nvcc on windows will succeed + using type = TFixedString<Char>; }; template <>
Tests: added test for "procfs" option.
@@ -88,3 +88,26 @@ class TestPythonIsolation(TestApplicationPython): self.conf({"listeners": {}, "applications": {}}) assert waitforunmount(temp_dir), 'language_deps unmount' + + def test_python_isolation_procfs(self, is_su, temp_dir): + isolation_features = option.available['features']['isolation'].keys() + + if not is_su: + pytest.skip('requires root') + + isolation = {'rootfs': temp_dir, 'automount': {'procfs': False}} + + self.load('ns_inspect', isolation=isolation) + + assert ( + self.getjson(url='/?path=/proc/self')['body']['FileExists'] + == False + ), 'no /proc/self' + + isolation['automount']['procfs'] = True + + self.load('ns_inspect', isolation=isolation) + + assert ( + self.getjson(url='/?path=/proc/self')['body']['FileExists'] == True + ), '/proc/self'
linux/bfd: cover #include <diagnostics.h> with __has_include, because it appeared in 2018 only
#include "linux/bfd.h" #include <bfd.h> +#if defined __has_include +#if __has_include(<diagnostics.h>) #include <diagnostics.h> +#endif /* __has_include(<diagnostics.h>) */ +#endif /* defined __has_include */ #include <dis-asm.h> #include <inttypes.h> #include <pthread.h>
part_strategy does not need its very own keyword classification. This should be plain old ColId. Making it so makes the grammar less complicated, and makes the compiled tables a kilobyte or so smaller (likely because they don't have to deal with a keyword classification that's not used anyplace else).
@@ -595,7 +595,6 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type <boolean> opt_if_not_exists %type <ival> generated_when override_kind %type <partspec> PartitionSpec OptPartitionSpec -%type <str> part_strategy %type <partelem> part_elem %type <list> part_params %type <partboundspec> PartitionBoundSpec @@ -3894,7 +3893,7 @@ OptPartitionSpec: PartitionSpec { $$ = $1; } | /*EMPTY*/ { $$ = NULL; } ; -PartitionSpec: PARTITION BY part_strategy '(' part_params ')' +PartitionSpec: PARTITION BY ColId '(' part_params ')' { PartitionSpec *n = makeNode(PartitionSpec); @@ -3906,10 +3905,6 @@ PartitionSpec: PARTITION BY part_strategy '(' part_params ')' } ; -part_strategy: IDENT { $$ = $1; } - | unreserved_keyword { $$ = pstrdup($1); } - ; - part_params: part_elem { $$ = list_make1($1); } | part_params ',' part_elem { $$ = lappend($1, $3); } ;
publish: remove scrolling overflow from skeleton
@@ -33,7 +33,7 @@ export class Skeleton extends Component { path={props.path} invites={props.invites} /> - <div className={"h-100 w-100 overflow-container " + rightPanelHide} style={{ + <div className={"h-100 w-100 " + rightPanelHide} style={{ flexGrow: 1, }}> {props.children}
[core] default chunk size 8k (was 4k)
#define DEFAULT_TEMPFILE_SIZE (1 * 1024 * 1024) #define MAX_TEMPFILE_SIZE (128 * 1024 * 1024) -static size_t chunk_buf_sz = 4096; +static size_t chunk_buf_sz = 8192; static chunk *chunks, *chunks_oversized; static chunk *chunk_buffers; static const array *chunkqueue_default_tempdirs = NULL; @@ -33,7 +33,7 @@ static off_t chunkqueue_default_tempfile_size = DEFAULT_TEMPFILE_SIZE; void chunkqueue_set_chunk_size (size_t sz) { - chunk_buf_sz = sz > 0 ? ((sz + 1023) & ~1023uL) : 4096; + chunk_buf_sz = sz > 0 ? ((sz + 1023) & ~1023uL) : 8192; } void chunkqueue_set_tempdirs_default_reset (void)
Implement CodeLite workspace folders
-- -- Header -- - _p('<?xml version="1.0" encoding="UTF-8"?>') + p.w('<?xml version="1.0" encoding="UTF-8"?>') local tagsdb = "" -- local tagsdb = "./" .. wks.name .. ".tags" - _p('<CodeLite_Workspace Name="%s" Database="%s" SWTLW="No">', wks.name, tagsdb) + p.push('<CodeLite_Workspace Name="%s" Database="%s" SWTLW="No">', wks.name, tagsdb) -- -- Project list prjpath = path.getrelative(prj.workspace.location, prjpath) if (prj.name == wks.startproject) then - _x(1, '<Project Name="%s" Path="%s" Active="Yes"/>', prj.name, prjpath) + p.w('<Project Name="%s" Path="%s" Active="Yes"/>', prj.name, prjpath) else - _x(1, '<Project Name="%s" Path="%s"/>', prj.name, prjpath) + p.w('<Project Name="%s" Path="%s"/>', prj.name, prjpath) end end, - onbranch = function(n) - -- TODO: not sure what situation this appears...? - -- premake5.lua emit's one of these for 'contrib', which is a top-level folder with the zip projects + onbranchenter = function(n) + p.push('<VirtualDirectory Name="%s">', n.name) + end, + + onbranchexit = function(n) + p.pop('</VirtualDirectory>') end, }) end -- for each workspace config - _p(1, '<BuildMatrix>') + p.push('<BuildMatrix>') for cfg in workspace.eachconfig(wks) do local cfgname = codelite.cfgname(cfg) - _p(2, '<WorkspaceConfiguration Name="%s" Selected="yes">', cfgname) + p.push('<WorkspaceConfiguration Name="%s" Selected="yes">', cfgname) local tr = workspace.grouptree(wks) tree.traverse(tr, { onleaf = function(n) local prj = n.project - _p(3, '<Project Name="%s" ConfigName="%s"/>', prj.name, cfgname) + p.w('<Project Name="%s" ConfigName="%s"/>', prj.name, cfgname) end }) - _p(2, '</WorkspaceConfiguration>') + p.pop('</WorkspaceConfiguration>') end - _p(1, '</BuildMatrix>') + p.pop('</BuildMatrix>') - _p('</CodeLite_Workspace>') + p.pop('</CodeLite_Workspace>') end
Fix old defines in uac2_headset
@@ -72,8 +72,8 @@ static uint32_t blink_interval_ms = BLINK_NOT_MOUNTED; // Audio controls // Current states -int8_t mute[CFG_TUD_AUDIO_N_CHANNELS_TX + 1]; // +1 for master channel 0 -int16_t volume[CFG_TUD_AUDIO_N_CHANNELS_TX + 1]; // +1 for master channel 0 +int8_t mute[CFG_TUD_AUDIO_FUNC_1_N_CHANNELS_TX + 1]; // +1 for master channel 0 +int16_t volume[CFG_TUD_AUDIO_FUNC_1_N_CHANNELS_TX + 1]; // +1 for master channel 0 // Buffer for microphone data int16_t mic_buf[1000];
fixes missing full authentication for device_authorization endpoint
@@ -35,8 +35,9 @@ struct oidc_device_code* initDeviceFlow(struct oidc_account* account) { return NULL; } syslog(LOG_AUTHPRIV | LOG_DEBUG, "Data to send: %s", data); - char* res = sendPostDataWithoutBasicAuth(device_authorization_endpoint, data, - account_getCertPath(*account)); + char* res = sendPostDataWithBasicAuth( + device_authorization_endpoint, data, account_getCertPath(*account), + account_getClientId(*account), account_getClientSecret(*account)); secFree(data); if (res == NULL) { return NULL;
tests: fix test-checkstyle-diff if no .py changed Type: fix
@@ -271,8 +271,8 @@ checkstyle-diff: $(PIP_INSTALL_DONE) @bash -c "source $(VENV_PATH)/bin/activate &&\ python3 -m pip install pycodestyle" @bash -c "source $(VENV_PATH)/bin/activate &&\ - cd $(WS_ROOT) && git diff --name-only --no-color --relative HEAD~1 ':!*.patch' | grep '.py$$' | xargs -n 1 \ - pycodestyle --show-source --ignore=W504,E126,E241,E226,E305,E704,E741,E722 -v ||\ + cd $(WS_ROOT) && git diff --name-only --no-color --relative HEAD~1 ':!*.patch' | grep '.py$$' | xargs -I XXX \ + pycodestyle --show-source --ignore=W504,E126,E241,E226,E305,E704,E741,E722 -v XXX ||\ (echo \"*********************************************************************\" &&\ echo \"* Test framework PEP8 compliance check FAILED (checked changed files)\" &&\ echo \"*********************************************************************\" &&\
Stop enabling codegen in PR pipeline
@@ -47,8 +47,7 @@ function build_gpdb() { pushd gpdb_src source /opt/gcc_env.sh CC=$(which gcc) CXX=$(which g++) ./configure --enable-mapreduce --with-perl --with-libxml \ - --disable-orca --with-python --disable-gpfdist --prefix=${GREENPLUM_INSTALL_DIR} \ - --enable-codegen --with-codegen-prefix=/opt/llvm-3.7.1 ${CONFIGURE_FLAGS} + --disable-orca --with-python --disable-gpfdist --prefix=${GREENPLUM_INSTALL_DIR} # Use -j4 to speed up the build. (Doesn't seem worth trying to guess a better # value based on number of CPUs or anything like that. Going above -j4 wouldn't # make it much faster, and -j4 is small enough to not hurt too badly even on
board/helios/board.h: Format with clang-format BRANCH=none TEST=none
@@ -117,11 +117,7 @@ enum sensor_id { SENSOR_COUNT, }; -enum pwm_channel { - PWM_CH_KBLIGHT, - PWM_CH_FAN, - PWM_CH_COUNT -}; +enum pwm_channel { PWM_CH_KBLIGHT, PWM_CH_FAN, PWM_CH_COUNT }; enum fan_channel { FAN_CH_0 = 0,
const ngtcp2_path *path must not be NULL
@@ -1533,11 +1533,11 @@ NGTCP2_EXTERN int ngtcp2_accept(ngtcp2_pkt_hd *dest, const uint8_t *pkt, * initializes it as client. |dcid| is randomized destination * connection ID. |scid| is source connection ID. |version| is a * QUIC version to use. |path| is the network path where this QUIC - * connection is being established. |callbacks|, and |settings| must - * not be NULL, and the function make a copy of each of them. - * |user_data| is the arbitrary pointer which is passed to the - * user-defined callback functions. If |mem| is NULL, the memory - * allocator returned by `ngtcp2_mem_default()` is used. + * connection is being established and must not be NULL. |callbacks|, + * and |settings| must not be NULL, and the function make a copy of + * each of them. |user_data| is the arbitrary pointer which is passed + * to the user-defined callback functions. If |mem| is NULL, the + * memory allocator returned by `ngtcp2_mem_default()` is used. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -1558,12 +1558,12 @@ ngtcp2_conn_client_new(ngtcp2_conn **pconn, const ngtcp2_cid *dcid, * `ngtcp2_conn_server_new` creates new :type:`ngtcp2_conn`, and * initializes it as server. |dcid| is a destination connection ID. * |scid| is a source connection ID. |path| is the network path where - * this QUIC connection is being established. |version| is a QUIC - * version to use. |callbacks|, and |settings| must not be NULL, and - * the function make a copy of each of them. |user_data| is the - * arbitrary pointer which is passed to the user-defined callback - * functions. If |mem| is NULL, the memory allocator returned by - * `ngtcp2_mem_default()` is used. + * this QUIC connection is being established and must not be NULL. + * |version| is a QUIC version to use. |callbacks|, and |settings| + * must not be NULL, and the function make a copy of each of them. + * |user_data| is the arbitrary pointer which is passed to the + * user-defined callback functions. If |mem| is NULL, the memory + * allocator returned by `ngtcp2_mem_default()` is used. * * This function returns 0 if it succeeds, or one of the following * negative error codes: @@ -1591,8 +1591,8 @@ NGTCP2_EXTERN void ngtcp2_conn_del(ngtcp2_conn *conn); * * `ngtcp2_conn_read_pkt` decrypts QUIC packet given in |pkt| of * length |pktlen| and processes it. |path| is the network path the - * packet is delivered. This function performs QUIC handshake as - * well. + * packet is delivered and must not be NULL. This function performs + * QUIC handshake as well. * * This function must not be called from inside the callback * functions. @@ -2491,9 +2491,9 @@ NGTCP2_EXTERN const ngtcp2_addr *ngtcp2_conn_get_remote_addr(ngtcp2_conn *conn); * @function * * `ngtcp2_conn_initiate_migration` starts connection migration to the - * given |path|. Only client can initiate migration. This function - * does immediate migration; it does not probe peer reachability from - * a new local address. + * given |path| which must not be NULL. Only client can initiate + * migration. This function does immediate migration; it does not + * probe peer reachability from a new local address. * * This function returns 0 if it succeeds, or one of the following * negative error codes:
feat : delet file_path in .ignore_format.yml
# If you need to exclude an entire folder, add the folder path in dir_path; # If you need to exclude a file, add the path to the file in file_path. -file_path: - dir_path: - Libraries/N32_Std_Driver
Travis: Use POSIX Shell syntax for `if`-statements
@@ -210,11 +210,11 @@ matrix: before_install: - | - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then + if [ "$TRAVIS_OS_NAME" = 'osx' ]; then rvm install 2.5.1 rvm use 2.5.1 gem install test-unit --no-document - if [[ "$CC" == "gcc" ]]; then + if [ "$CC" = 'gcc' ]; then brew upgrade gcc@9 export CC=gcc-9 export CXX=g++-9 @@ -224,9 +224,9 @@ before_install: brew services start dbus fi - | - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then - [[ -n "$CC_COMPILER" ]] && export CC="$CC_COMPILER" - [[ -n "$CXX_COMPILER" ]] && export CXX="$CXX_COMPILER" + if [ "$TRAVIS_OS_NAME" = 'linux' ]; then + [ -n "$CC_COMPILER" ] && export CC="$CC_COMPILER" + [ -n "$CXX_COMPILER" ] && export CXX="$CXX_COMPILER" sudo apt-get -qq update sudo apt-get install ninja-build sudo apt-get install devscripts # contains `checkbashisms` @@ -248,7 +248,7 @@ before_install: before_script: - | cd $TRAVIS_BUILD_DIR - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then + if [ "$TRAVIS_OS_NAME" = 'linux' ]; then CMAKE_OPT+=(-DCMAKE_INSTALL_PREFIX="$PWD/install") export PATH=$PATH:"$PWD/install/bin" export LD_LIBRARY_PATH="$PWD/install/lib"
Fix an issue found with constant folding
@@ -545,7 +545,7 @@ static void binary(Compiler *compiler, Token previousToken, bool canAssign) { TokenType currentToken = compiler->parser->previous.type; // Attempt constant fold. - if ((previousToken.type == TOKEN_NUMBER || previousToken.type == TOKEN_RIGHT_PAREN) && + if ((previousToken.type == TOKEN_NUMBER) && (currentToken == TOKEN_NUMBER || currentToken == TOKEN_LEFT_PAREN) && foldBinary(compiler, operatorType) ) {
Improve FLS-PP3 DDF multi endpoint handling
{ "schema": "devcap1.schema.json", - "manufacturername": "dresden elektronik", - "modelid": "FLS-PP3", + "manufacturername": ["dresden elektronik", "dresden elektronik"], + "modelid": ["FLS-PP3", "FLS-PP3 White"], "product": "FLS-PP lp", "sleeper": false, - "status": "Draft", - "path": "/devices/dresden_elektronik/fls_pp3.json", + "status": "Gold", "subdevices": [ { "type": "$TYPE_EXTENDED_COLOR_LIGHT", "name": "attr/manufacturername" }, { - "name": "attr/modelid" + "name": "attr/modelid", + "static": "FLS-PP3", + "parse": {"fn": "zcl", "ep": 10, "cl": "0x0000", "at": "0x0005", "eval": "Item.val = Attr.val"} }, { "name": "attr/name" "name": "attr/manufacturername" }, { - "name": "attr/modelid" + "name": "attr/modelid", + "static": "FLS-PP3 White", + "parse": {"fn": "zcl", "ep": 11, "cl": "0x0000", "at": "0x0005", "eval": "Item.val = Attr.val"} }, { "name": "attr/name"
added weak Fibertel candidates
@@ -32,6 +32,7 @@ static bool wpsflag = false; static bool eudateflag = false; static bool usdateflag = false; static bool ngflag = false; +static bool ftflag = false; static FILE *fhpsk; @@ -128,6 +129,21 @@ if(fileflag == true) return; } /*===========================================================================*/ +static void keywriteft(void) +{ +int weakft; +for(weakft = 0; weakft < 10000000; weakft++) + { + snprintf(pskstring, 64, "004%07d", weakft); + writepsk(pskstring); + snprintf(pskstring, 64, "010%07d", weakft); + writepsk(pskstring); + snprintf(pskstring, 64, "014%07d", weakft); + writepsk(pskstring); + } +return; +} +/*===========================================================================*/ static void keywriteweakpass(void) { size_t w; @@ -1039,7 +1055,8 @@ printf("%s %s (C) %s ZeroBeat\n" "-W : include complete wps keys\n" "-D : include complete european dates\n" "-d : include complete american dates\n" - "-N : include complete NETGEARxx candidates\n" + "-N : include complete NETGEARxx weak candidates\n" + "-F : include complete Fibertel weak candidates\n" "-h : this help\n" "-v : version\n" "\n", eigenname, VERSION, VERSION_JAHR, eigenname, eigenname); @@ -1060,7 +1077,7 @@ eigenpfadname = strdupa(argv[0]); eigenname = basename(eigenpfadname); setbuf(stdout, NULL); -while ((auswahl = getopt(argc, argv, "i:o:swWDdNhv")) != -1) +while ((auswahl = getopt(argc, argv, "i:o:swWDdNFhv")) != -1) { switch (auswahl) { @@ -1097,6 +1114,10 @@ while ((auswahl = getopt(argc, argv, "i:o:swWDdNhv")) != -1) ngflag = true; break; + case 'F': + ftflag = true; + break; + default: usage(eigenname); } @@ -1147,6 +1168,8 @@ if((stdoutflag == true) || (fileflag == true)) keywriteyearyear(); if(ngflag == true) keywriteng(); + if(ftflag == true) + keywriteft(); } if(hcxdata != NULL)
Clipping doesn't work with tri() api
@@ -939,8 +939,9 @@ static void api_tri(tic_mem* memory, s32 x1, s32 y1, s32 x2, s32 y2, s32 x3, s32 ticLine(memory, x3, y3, x1, y1, color, triPixelFunc); u8 final_color = mapColor(&machine->memory, color); - s32 yt = max(0, min(y1, min(y2, y3))); - s32 yb = min(TIC80_HEIGHT, max(y1, max(y2, y3)) + 1); + s32 yt = max(machine->state.clip.t, min(y1, min(y2, y3))); + s32 yb = min(machine->state.clip.b, max(y1, max(y2, y3)) + 1); + for(s32 y = yt; y < yb; y++) { s32 xl = max(SidesBuffer.Left[y], machine->state.clip.l); s32 xr = min(SidesBuffer.Right[y]+1, machine->state.clip.r);
changed printf with xdag_fatal. finish
@@ -77,7 +77,7 @@ int xdag_mem_init(size_t size) size_t wrote = snprintf(g_tmpfile_to_remove, TMPFILE_PATH_LEN + TMPFILE_TEMPLATE_LEN,"%s%s", g_tmpfile_path, TMPFILE_TEMPLATE); if (wrote >= TMPFILE_PATH_LEN + TMPFILE_TEMPLATE_LEN){ - printf("Error: Temporary file path exceed the max length that is 1024 characters"); + xdag_fatal("Error: Temporary file path exceed the max length that is 1024 characters"); return -1; } g_fd = mkstemp(g_tmpfile_to_remove);
tneat: number of flows via commandline argument
@@ -102,6 +102,7 @@ print_usage() printf("tneat [OPTIONS] [HOST]\n"); printf("\t- c \tpath to server certificate (%s)\n", cert_file); + printf("\t- c \tnumber of outgoing flows (%d)\n", config_num_flows); printf("\t- k \tpath to server key (%s)\n", key_file); printf("\t- l \tsize for each message in byte (%d)\n", config_snd_buffer_size); printf("\t- L \tloop mode - tneat talking to itself\n"); @@ -391,7 +392,7 @@ main(int argc, char *argv[]) memset(&ops_client, 0, sizeof(ops_client)); memset(&op_server, 0, sizeof(op_server)); - while ((arg = getopt(argc, argv, "c:k:l:Ln:p:P:R:T:v:w:")) != -1) { + while ((arg = getopt(argc, argv, "c:f:k:l:Ln:p:P:R:T:v:w:")) != -1) { switch(arg) { case 'c': cert_file = optarg; @@ -399,6 +400,12 @@ main(int argc, char *argv[]) printf("option - server certificate file: %s\n", cert_file); } break; + case 'f': + config_num_flows = atoi(optarg); + if (config_log_level >= 1) { + printf("option - number of flows: %d\n", config_num_flows); + } + break; case 'k': key_file = optarg; if (config_log_level >= 1) {
Initialize file_size function as NULL for process scans.
@@ -54,6 +54,9 @@ YR_API int yr_process_open_iterator(int pid, YR_MEMORY_BLOCK_ITERATOR* iterator) iterator->context = context; iterator->first = yr_process_get_first_memory_block; iterator->next = yr_process_get_next_memory_block; + // In a process scan file size is undefined, when the file_size function is + // set to NULL the value returned by the filesize keyword is YR_UNDEFINED. + iterator->file_size = NULL; context->buffer = NULL; context->buffer_size = 0;
test: fix: remove invalid comment
@@ -393,7 +393,6 @@ void x509_crt_check(char *subject_key_file, char *subject_pwd, } #if defined(MBEDTLS_TEST_DEPRECATED) && defined(MBEDTLS_BIGNUM_C) - /* Try to set an invalid */ TEST_ASSERT(mbedtls_mpi_read_binary(&serial_mpi, serial_arg->x, serial_arg->len) == 0); TEST_ASSERT(mbedtls_x509write_crt_set_serial(&crt, &serial_mpi) == 0);
Flesh out specs for Image#chop
RSpec.describe Magick::Image, "#chop" do - it "works" do - image = described_class.new(20, 20) + def build_gray_image + image = Magick::Image.new(3, 3) + pixels = [ + [gray(1), gray(2), gray(3)], + [gray(4), gray(5), gray(6)], + [gray(7), gray(8), gray(9)] + ] + image.import_pixels(0, 0, 3, 3, "RGB", pixels.flatten) + end + + it "removes a cross from the middle of the image" do + image = build_gray_image + + new_image = image.chop(1, 1, 1, 1) + + expected_pixels = [ + [gray(1), gray(3)], + [gray(7), gray(9)] + ] + expect(new_image).to match_pixels(expected_pixels) + end + + it "removes an L-shape from the bottom left of the image" do + image = build_gray_image + + new_image = image.chop(0, 2, 1, 1) + + expected_pixels = [ + [gray(2), gray(3)], + [gray(5), gray(6)] + ] + expect(new_image).to match_pixels(expected_pixels) + end + + it "removes 1 column from the middle of the image" do + image = build_gray_image + + new_image = image.chop(1, 1, 1, 0) + + expected_pixels = [ + [gray(1), gray(3)], + [gray(4), gray(6)], + [gray(7), gray(9)] + ] + expect(new_image).to match_pixels(expected_pixels) + end + + it "removes 1 row from the middle of the image" do + image = build_gray_image + + new_image = image.chop(1, 1, 0, 1) + + expected_pixels = [ + [gray(1), gray(2), gray(3)], + [gray(7), gray(8), gray(9)] + ] + expect(new_image).to match_pixels(expected_pixels) + end + + it "removes 2 rows and 2 columns from the image" do + image = build_gray_image + + new_image = image.chop(0, 0, 2, 2) + + expect(new_image).to match_pixels([gray(9)]) + end + + it "raises an error when x is out of bounds" do + image = build_gray_image + + expect { image.chop(5, 1, 1, 1) }.to raise_error(RuntimeError) + end + + it "raises an error when y is out of bounds" do + image = build_gray_image + + expect { image.chop(1, 5, 1, 1) }.to raise_error(RuntimeError) + end + + it "does not raise an error when width or height are out of bounds" do + image = build_gray_image + + expect { image.chop(1, 1, 5, 5) }.not_to raise_error + end + + it "raises an error when the argument is the wrong type" do + image = build_gray_image - result = image.chop(10, 10, 10, 10) - expect(result).to be_instance_of(described_class) + expect { image.chop("hello", 1, 1, 1) }.to raise_error(TypeError) end end
Update install_dependencies.sh
@@ -149,7 +149,7 @@ cd .. if [[ "$OS_NAME" == "linux" ]] then echo "installing boost" - wget https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.gz -O ./boost.tar.gz + wget https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.gz -O ./boost.tar.gz tar zxf ./boost.tar.gz mv boost_1_72_0 boost && cd boost
output_thread: added extra argument to flb_sched_timer_cb_create call
@@ -214,7 +214,7 @@ static void output_thread(void *data) */ ret = flb_sched_timer_cb_create(sched, FLB_SCHED_TIMER_CB_PERM, - 1500, cb_thread_sched_timer, ins); + 1500, cb_thread_sched_timer, ins, NULL); if (ret == -1) { flb_plg_error(ins, "could not schedule permanent callback"); return;
[core] perf: simpler buffer_string_space() (fixed)
@@ -194,7 +194,7 @@ static inline size_t buffer_string_length(const buffer *b) { } static inline size_t buffer_string_space(const buffer *b) { - return NULL != b ? b->size - (0 != b->used) : 0; + return NULL != b ? b->size - b->used - (0 == b->used) : 0; } static inline void buffer_append_string_buffer(buffer *b, const buffer *src) {
s32k3xx:LPSPI register usage cleanup
@@ -517,32 +517,6 @@ static struct s32k3xx_lpspidev_s g_lpspi5dev = * Private Functions ****************************************************************************/ -/**************************************************************************** - * Name: spi_modifyreg - * - * Description: - * Atomic modification of the 32-bit contents of the SPI register at offset - * - * Input Parameters: - * priv - private SPI device structure - * offset - offset to the register of interest - * clearbits - bits to clear - * clearbits - bits to set - * - * Returned Value: - * None. - * - ****************************************************************************/ - -#ifdef CONFIG_S32K3XX_LPSPI_DMA -static inline void spi_modifyreg(struct s32k3xx_lpspidev_s *priv, - uint8_t offset, uint32_t clearbits, - uint32_t setbits) -{ - modifyreg32(priv->spibase + offset, clearbits, setbits); -} -#endif - /**************************************************************************** * Name: s32k3xx_lpspi_getreg8 * @@ -746,7 +720,7 @@ static inline uint16_t } /**************************************************************************** - * Name: s32k3xx_lpspi_modifyreg + * Name: s32k3xx_lpspi_modifyreg32 * * Description: * Clear and set bits in register @@ -1104,9 +1078,9 @@ static uint32_t s32k3xx_lpspi_setfrequency(struct spi_dev_s *dev, /* Write the best values in the CCR register */ - regval &= ~LPSPI_CCR_SCKDIV_MASK; - regval |= LPSPI_CCR_SCKDIV(best_scaler); - s32k3xx_lpspi_putreg32(priv, S32K3XX_LPSPI_CCR_OFFSET, regval); + s32k3xx_lpspi_modifyreg32(priv, S32K3XX_LPSPI_CCR_OFFSET, + LPSPI_CCR_SCKDIV_MASK, + LPSPI_CCR_SCKDIV(best_scaler)); /* Re-enable LPSPI if it was enabled previously */ @@ -1764,7 +1738,7 @@ static void s32k3xx_lpspi_exchange(struct spi_dev_s *dev, regval = s32k3xx_lpspi_getreg32(priv, S32K3XX_LPSPI_CFGR1_OFFSET); - spi_modifyreg(priv, S32K3XX_LPSPI_CR_OFFSET, + s32k3xx_lpspi_modifyreg32(priv, S32K3XX_LPSPI_CR_OFFSET, LPSPI_CR_RTF | LPSPI_CR_RRF, LPSPI_CR_RTF | LPSPI_CR_RRF); @@ -1821,7 +1795,7 @@ static void s32k3xx_lpspi_exchange(struct spi_dev_s *dev, /* Invoke SPI DMA */ - spi_modifyreg(priv, S32K3XX_LPSPI_DER_OFFSET, + s32k3xx_lpspi_modifyreg32(priv, S32K3XX_LPSPI_DER_OFFSET, 0, LPSPI_DER_TDDE | LPSPI_DER_RDDE); /* Then wait for each to complete */
doc: update code to "losetup -r"
@@ -126,7 +126,7 @@ Start the User OS (UOS) .. code-block:: none - # losetup -f -P --show ~/uos.img + # losetup -r -f -P --show ~/uos.img # mount /dev/loop0p3 /mnt # ls -l /mnt/usr/lib/kernel/
Fix bug in configure 'pcap drop trace on file xx.cap' command
@@ -1392,8 +1392,8 @@ pcap_drop_trace_command_fn (vlib_main_t * vm, if (im->pcap_filename) vec_free (im->pcap_filename); - vec_add1 (filename, 0); im->pcap_filename = chroot_filename; + im->pcap_main.file_name = (char *) im->pcap_filename; matched = 1; } else if (unformat (input, "status"))
options/posix: implement sem_timedwait as sem_wait
@@ -48,9 +48,9 @@ int sem_wait(sem_t *sem) { } } -int sem_timedwait(sem_t *, const struct timespec *) { - __ensure(!"sem_timedwait() is unimplemented"); - __builtin_unreachable(); +int sem_timedwait(sem_t *sem, const struct timespec *) { + mlibc::infoLogger() << "\e[31mmlibc: sem_timedwait is implemented as sem_wait\e[0m" << frg::endlog; + return sem_wait(sem); } int sem_post(sem_t *sem) {
Fix path for in-tree builds [ci skip]
}, "type" : "shell", "command" : "make", - "options" : { - "cwd" : "${workspaceRoot}/build" - }, + // Uncomment for out-of-tree build + //"options" : { + // "cwd" : "${workspaceRoot}/build" + //}, "problemMatcher": [ // Clang {
Corrected revision history. Moved the configuration file update from general RTOS2 rev. hist. to RTX 5 specific one.
@@ -80,9 +80,6 @@ File/Folder | Content <tr> <td>V2.1.0</td> <td> - Updated configuration files: RTX_Config.h for the configuration settings and RTX_config.c for - implementing the \ref rtx5_specific. - Support for critical and uncritical sections (nesting safe): - updated: \ref osKernelLock, \ref osKernelUnlock - added: \ref osKernelRestoreLock @@ -158,7 +155,7 @@ File/Folder | Content - Based on CMSIS-RTOS API V2.1. - Added support for Event recording. - Added support for IAR compiler. - - Configuration file split into .h and .c file. + - Updated configuration files: RTX_Config.h for the configuration settings and RTX_config.c for implementing the \ref rtx5_specific. - osRtx name-space for RTX specific symbols. </td> </tr>
Support Xiaomi Aquara wall switch QBKG03LM (experimental) Issue
@@ -114,6 +114,7 @@ static const SupportedDevice supportedDevices[] = { { VENDOR_JENNIC, "lumi.sensor_cube", jennicMacPrefix }, { VENDOR_JENNIC, "lumi.sensor_86sw1", jennicMacPrefix }, { VENDOR_JENNIC, "lumi.sensor_86sw2", jennicMacPrefix }, + { VENDOR_JENNIC, "lumi.ctrl_neural2", jennicMacPrefix }, { VENDOR_UBISYS, "D1", ubisysMacPrefix }, { VENDOR_NONE, "Z716A", netvoxMacPrefix }, { 0, 0, 0 } @@ -2568,7 +2569,8 @@ void DeRestPluginPrivate::addSensorNode(const deCONZ::Node *node) fpOpenCloseSensor.inClusters.push_back(ci->id()); } else if (modelId == QLatin1String("lumi.sensor_86sw1") || - modelId == QLatin1String("lumi.sensor_86sw2")) + modelId == QLatin1String("lumi.sensor_86sw2") || + modelId == QLatin1String("lumi.ctrl_neural2")) { if (i->endpoint() == 0x01) // create sensor only for first endpoint { @@ -3409,7 +3411,8 @@ void DeRestPluginPrivate::updateSensorNode(const deCONZ::NodeEvent &event) if ((event.node()->address().ext() & macPrefixMask) == jennicMacPrefix) { if (i->modelId() == QLatin1String("lumi.sensor_86sw1") || - i->modelId() == QLatin1String("lumi.sensor_86sw2")) + i->modelId() == QLatin1String("lumi.sensor_86sw2") || + i->modelId() == QLatin1String("lumi.ctrl_neural2")) { // 3 endpoints: 1 sensor } else @@ -3898,7 +3901,8 @@ void DeRestPluginPrivate::updateSensorNode(const deCONZ::NodeEvent &event) quint32 button; if (i->modelId() == QLatin1String("lumi.sensor_86sw1") || - i->modelId() == QLatin1String("lumi.sensor_86sw2")) + i->modelId() == QLatin1String("lumi.sensor_86sw2") || + i->modelId() == QLatin1String("lumi.ctrl_neural2")) { button = (S_BUTTON_1 * event.endpoint()) + S_BUTTON_ACTION_SHORT_RELEASED; }
Add tests for empty and default ChordLenghts
@@ -146,6 +146,34 @@ chordlengths_default_ctor(CuTest *tc) delete defaultCtor; } +void +chordlengths_empty_map(CuTest *tc) +{ + // Given + BSpline spline; + ChordLengths empty = spline.chordLenghts({}); + + // When/Then + try { + empty.tToKnot((real) 0.0); + CuFail(tc, "expected exception"); + } catch(std::exception &exc) {} +} + +void +chordlengths_default_map(CuTest *tc) +{ + // Given + ChordLengths *defaultCtor = new ChordLengths(); + + // When/Then + try { + defaultCtor->tToKnot((real) 0.0); + CuFail(tc, "expected exception"); + } catch(std::exception &exc) {} + delete defaultCtor; +} + CuSuite * get_chordlengths_suite() { @@ -155,5 +183,7 @@ get_chordlengths_suite() SUITE_ADD_TEST(suite, chordlengths_copy_assign); SUITE_ADD_TEST(suite, chordlengths_move_assign); SUITE_ADD_TEST(suite, chordlengths_default_ctor); + SUITE_ADD_TEST(suite, chordlengths_empty_map); + SUITE_ADD_TEST(suite, chordlengths_default_map); return suite; }
BugID:18444986:[utils memstatus]fix coredump:add mutex init before use
@@ -355,7 +355,9 @@ void *LITE_malloc_internal(const char *f, const int l, int size, ...) if (!ptr) { return NULL; } - + if (mutex_mem_stats == NULL) { + mutex_mem_stats = HAL_MutexCreate(); + } HAL_MutexLock(mutex_mem_stats); iterations_allocated += 1; @@ -450,6 +452,9 @@ void LITE_free_internal(void *ptr) return; } + if (mutex_mem_stats == NULL) { + mutex_mem_stats = HAL_MutexCreate(); + } HAL_MutexLock(mutex_mem_stats); pos = NULL;
snmp: remove double typedef This is already typedefed in snmp-mib.h.
* This group contains all the functions that can be used outside the OS level. */ -/** - * @brief The MIB Resource struct - */ -typedef struct snmp_mib_resource_s snmp_mib_resource_t; - /** * @brief Initializes statically an oid with the "null" terminator *
VERSION bump to version 1.1.25
@@ -42,7 +42,7 @@ set(CMAKE_C_FLAGS_PACKAGE "-g -O2 -DNDEBUG") # micro version is changed with a set of small changes or bugfixes anywhere in the project. set(LIBNETCONF2_MAJOR_VERSION 1) set(LIBNETCONF2_MINOR_VERSION 1) -set(LIBNETCONF2_MICRO_VERSION 24) +set(LIBNETCONF2_MICRO_VERSION 25) set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION}.${LIBNETCONF2_MICRO_VERSION}) # Version of the library
YAML CPP: Add limitation about metadata to ReadMe
@@ -224,5 +224,6 @@ level 1: - Adding and removing keys does remove **comments** inside the configuration file - The plugin currently lacks proper **type support** for scalars +- The plugin does not support **metadata** [yaml-cpp]: https://github.com/jbeder/yaml-cpp
apps/s_server.c: Avoid unused variable due to 'no-dtls' Fixes
@@ -2189,9 +2189,7 @@ static int sv_body(int s, int stype, int prot, unsigned char *context) SSL *con = NULL; BIO *sbio; struct timeval timeout; -#if defined(OPENSSL_SYS_WINDOWS) || defined(OPENSSL_SYS_MSDOS) - struct timeval tv; -#else +#if !(defined(OPENSSL_SYS_WINDOWS) || defined(OPENSSL_SYS_MSDOS)) struct timeval *timeoutp; #endif #ifndef OPENSSL_NO_DTLS @@ -2392,9 +2390,9 @@ static int sv_body(int s, int stype, int prot, unsigned char *context) * second and check for any keypress. In a proper Windows * application we wouldn't do this because it is inefficient. */ - tv.tv_sec = 1; - tv.tv_usec = 0; - i = select(width, (void *)&readfds, NULL, NULL, &tv); + timeout.tv_sec = 1; + timeout.tv_usec = 0; + i = select(width, (void *)&readfds, NULL, NULL, &timeout); if (has_stdin_waiting()) read_from_terminal = 1; if ((i < 0) || (!i && !read_from_terminal))
Update Sky130-OpenROAD-Tutorial.rst
@@ -72,18 +72,6 @@ Pull the Hammer environment into the shell: export HAMMER_HOME=$PWD/hammer source $HAMMER_HOME/sourceme.sh -Building the Design --------------------- -To elaborate the ``TinyRocketConfig`` and set up all prerequisites for the build system to push the design and SRAM macros through the flow: - -.. code-block:: shell - - make buildfile tech_name=sky130 CONFIG=TinyRocketConfig - -The ``CONFIG=TinyRocketConfig`` selects the target generator config in the same manner as the rest of the Chipyard framework. This elaborates a stripped-down Rocket Chip in the interest of minimizing tool runtime. - -For the curious, ``make buildfile`` generates a set of Make targets in ``build/hammer.d``. It needs to be re-run if environment variables are changed. It is recommended that you edit these variables directly in the Makefile rather than exporting them to your shell environment. - Running the VLSI Flow --------------------- @@ -101,8 +89,46 @@ First, set ``technology.sky130.<sky130A, sky130_nda, openram_lib>`` to the absol for details about the PDK setup. +Generating SRAMs +---------------- +To map the generic memory macros in the generarted Verilog to the SRAMs in your technology process, run the following command: + +.. code-block:: shell + + make srams tech_name=sky130 CONFIG=TinyRocketConfig + +Generating Verilog +------------------ +To elaborate the ``TinyRocketConfig`` from Chisel to Verilog, run: + +.. code-block:: shell + + make verilog tech_name=sky130 CONFIG=TinyRocketConfig + +The ``CONFIG=TinyRocketConfig`` selects the target generator config in the same manner as the rest of the Chipyard framework. This elaborates a stripped-down Rocket Chip in the interest of minimizing tool runtime. The resulting verilog is located in ``./generated-src/chipyard.TestHarness.TinyRocketConfig/chipyard.TestHarness.TinyRocketConfig.top.v``. + +Note that in the generated Verilog, there are generic memory macros for the various memory components (dcache, icache, tag array, PTW). +This is the same Verilog that is generated for RTL simulations in the ``~chipyard/sims/verilator`` directory, see ` :ref:`Simulation/Software-RTL-Simulation:sw-rtl-sim-intro` for directions on how to run these simulations. + +Building the Design +^^^^^^^^^^^^^^^^^^^ +To set up all prerequisites for the build system to push the design and SRAM macros through the flow: + +.. code-block:: shell + + make buildfile tech_name=sky130 CONFIG=TinyRocketConfig + +For the curious, ``make buildfile`` generates a set of Make targets in ``build/hammer.d``. It needs to be re-run if environment variables are changed. It is recommended that you edit these variables directly in the Makefile rather than exporting them to your shell environment. + + +example-openroad.yml +^^^^^^^^^^^^^^^^^^^^ +This contains the Hammer configuration for the OpenROAD tool flow. It selects tools for the SRAM "compiler" (already specified in ``example-sky130.yml``), synthesis (Yosys), place and route (OpenROAD), DRC (Magic), and LVS (NetGen). For the remaining commands, we will need to specify this file as the tool configuration to hammer via the ``TOOLS_CONF`` Makefile variable. + + Synthesis ^^^^^^^^^ + .. code-block:: shell make syn tech_name=sky130 TOOLS_CONF=example-openroad.yml CONFIG=TinyRocketConfig
build: consistent use of CMAKE_INSTALL_LIBDIR Set the RPATH to based on CMAKE_INSTALL_LIBDIR so that libraries are correctly found. Type: make
@@ -152,7 +152,7 @@ endif() ############################################################################## option(VPP_SET_RPATH "Set rpath for resulting binaries and libraries." ON) if(VPP_SET_RPATH) - set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib") + set(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}") endif() set(CMAKE_INSTALL_MESSAGE NEVER)
fix: nanosp version sdk in conftest
@@ -10,7 +10,7 @@ from ethereum_client.ethereum_cmd import EthereumCommand SCRIPT_DIR = Path(__file__).absolute().parent API_URL = "http://127.0.0.1:5000" -VERSION = {"nanos": "2.1", "nanox": "2.0.2", "nanosp": "1.0"} +VERSION = {"nanos": "2.1", "nanox": "2.0.2", "nanosp": "1.0.3"} def pytest_addoption(parser):
ci: fix redundant key `variables` under `.deploy_docs_template`
@@ -152,13 +152,12 @@ build_docs_pdf: image: $ESP_IDF_DOC_ENV_IMAGE variables: PYTHON_VER: 3.6.13 + DOCS_BUILD_DIR: "${IDF_PATH}/docs/_build/" + PYTHONUNBUFFERED: 1 stage: test_deploy tags: - deploy - shiny - variables: - DOCS_BUILD_DIR: "${IDF_PATH}/docs/_build/" - PYTHONUNBUFFERED: 1 dependencies: [] script: - add_doc_server_ssh_keys $DOCS_DEPLOY_PRIVATEKEY $DOCS_DEPLOY_SERVER $DOCS_DEPLOY_SERVER_USER
Fix lc823450_i2s.c:277:7: error: variable 'n' is used uninitialized whenever switch default is taken
@@ -257,10 +257,10 @@ extern unsigned int XT1OSC_CLK; static void _setup_audio_pll(uint32_t freq) { - DEBUGASSERT(24000000 == XT1OSC_CLK); + uint32_t m = 0; + uint32_t n = 0; - uint32_t m; - uint32_t n; + DEBUGASSERT(24000000 == XT1OSC_CLK); switch (freq) {
nat: fix deleting nat ei out interface feature Type: fix Set is_add function argument to 0 when deleting interface role.
@@ -1064,8 +1064,8 @@ nat44_ei_del_output_interface (u32 sw_if_index) } } - nat44_ei_add_del_addr_to_fib_foreach_addr (sw_if_index, 1); - nat44_ei_add_del_addr_to_fib_foreach_addr_only_sm (sw_if_index, 1); + nat44_ei_add_del_addr_to_fib_foreach_addr (sw_if_index, 0); + nat44_ei_add_del_addr_to_fib_foreach_addr_only_sm (sw_if_index, 0); return 0; }
common: improve memcpy/memset code The initial goal here was just to prefer __builtin_memcpy/memset if they're available, but I ended up making some additional changes which I think make the code a bit cleaner, too.
@@ -645,23 +645,28 @@ typedef SIMDE_FLOAT64_TYPE simde_float64; #endif /* Try to deal with environments without a standard library. */ -#if !defined(simde_memcpy) || !defined(simde_memset) - #if !defined(SIMDE_NO_STRING_H) && defined(__has_include) - #if __has_include(<string.h>) - #include <string.h> #if !defined(simde_memcpy) - #define simde_memcpy(dest, src, n) memcpy(dest, src, n) + #if HEDLEY_HAS_BUILTIN(__builtin_memcpy) + #define simde_memcpy(dest, src, n) __builtin_memcpy(dest, src, n) + #endif #endif #if !defined(simde_memset) - #define simde_memset(s, c, n) memset(s, c, n) + #if HEDLEY_HAS_BUILTIN(__builtin_memset) + #define simde_memset(s, c, n) __builtin_memset(s, c, n) #endif - #else +#endif +#if !defined(simde_memcpy) || !defined(simde_memset) + #if !defined(SIMDE_NO_STRING_H) + #if defined(__has_include) + #if !__has_include(<string.h>) #define SIMDE_NO_STRING_H #endif + #elif (SIMDE_STDC_HOSTED == 0) + #define SIMDE_NO_STRING_H #endif #endif -#if !defined(simde_memcpy) || !defined(simde_memset) - #if !defined(SIMDE_NO_STRING_H) && (SIMDE_STDC_HOSTED == 1) + + #if !defined(SIMDE_NO_STRING_H) #include <string.h> #if !defined(simde_memcpy) #define simde_memcpy(dest, src, n) memcpy(dest, src, n) @@ -669,13 +674,6 @@ typedef SIMDE_FLOAT64_TYPE simde_float64; #if !defined(simde_memset) #define simde_memset(s, c, n) memset(s, c, n) #endif - #elif (HEDLEY_HAS_BUILTIN(__builtin_memcpy) && HEDLEY_HAS_BUILTIN(__builtin_memset)) || HEDLEY_GCC_VERSION_CHECK(4,2,0) - #if !defined(simde_memcpy) - #define simde_memcpy(dest, src, n) __builtin_memcpy(dest, src, n) - #endif - #if !defined(simde_memset) - #define simde_memset(s, c, n) __builtin_memset(s, c, n) - #endif #else /* These are meant to be portable, not fast. If you're hitting them you * should think about providing your own (by defining the simde_memcpy @@ -708,8 +706,8 @@ typedef SIMDE_FLOAT64_TYPE simde_float64; } #define simde_memset(s, c, n) simde_memset_(s, c, n) #endif - #endif /* !defined(SIMDE_NO_STRING_H) && (SIMDE_STDC_HOSTED == 1) */ -#endif /* !defined(simde_memcpy) || !defined(simde_memset) */ + #endif +#endif #if defined(FE_ALL_EXCEPT) #define SIMDE_HAVE_FENV_H
Job management change
@@ -661,12 +661,18 @@ namespace MiningCore.Blockchain.Bitcoin lock (jobLock) { + if(isNew) + validJobs.Clear(); + validJobs.Add(job); + if (!isNew) + { // trim active jobs while(validJobs.Count > maxActiveJobs) validJobs.RemoveAt(0); } + } currentJob = job; }
sysdeps/managarm: Implement FIONREAD ioctl
@@ -1771,6 +1771,41 @@ int sys_ioctl(int fd, unsigned long request, void *arg, int *result) { } return 0; } + case FIONREAD: { + auto argp = reinterpret_cast<int *>(arg); + + auto handle = getHandleForFd(fd); + if (!handle) + return EBADF; + + if(!argp) + return EINVAL; + + managarm::fs::CntRequest<MemoryAllocator> req(getSysdepsAllocator()); + req.set_req_type(managarm::fs::CntReqType::PT_IOCTL); + req.set_command(FIONREAD); + + auto [offer, send_req, recv_resp] = + exchangeMsgsSync( + handle, + helix_ng::offer( + helix_ng::sendBragiHeadOnly(req, getSysdepsAllocator()), + helix_ng::recvInline() + ) + ); + + HEL_CHECK(offer.error()); + HEL_CHECK(send_req.error()); + HEL_CHECK(recv_resp.error()); + + managarm::fs::SvrResponse<MemoryAllocator> resp(getSysdepsAllocator()); + resp.ParseFromArray(recv_resp.data(), recv_resp.length()); + __ensure(resp.error() == managarm::fs::Errors::SUCCESS); + + *argp = resp.fionread_count(); + + return 0; + } case FIOCLEX: { managarm::posix::IoctlFioclexRequest<MemoryAllocator> req(getSysdepsAllocator()); req.set_fd(fd);
Fix incorrect parameter name in mbedtls_mpi_core_add_if() doc comment
@@ -256,7 +256,7 @@ mbedtls_mpi_uint mbedtls_mpi_core_sub( mbedtls_mpi_uint *X, * \param cond Condition bit dictating whether addition should * happen or not. This must be \c 0 or \c 1. * - * \warning If \p assign is neither 0 nor 1, the result of this function + * \warning If \p cond is neither 0 nor 1, the result of this function * is unspecified, and the resulting value in \p A might be * neither its original value nor \p A + \p B. *
py/mkrules.cmake: Add MICROPY_QSTRDEFS_PORT to qstr build process. This allows a port to specify a custom qstrdefsport.h file, the same as the QSTR_DEFS variable in a Makefile.
@@ -95,8 +95,10 @@ add_custom_command( add_custom_command( OUTPUT ${MICROPY_QSTRDEFS_PREPROCESSED} - COMMAND cat ${MICROPY_QSTRDEFS_PY} ${MICROPY_QSTRDEFS_COLLECTED} | sed "s/^Q(.*)/\"&\"/" | ${CMAKE_C_COMPILER} -E ${MICROPY_CPP_FLAGS} - | sed "s/^\\\"\\(Q(.*)\\)\\\"/\\1/" > ${MICROPY_QSTRDEFS_PREPROCESSED} - DEPENDS ${MICROPY_QSTRDEFS_COLLECTED} + COMMAND cat ${MICROPY_QSTRDEFS_PY} ${MICROPY_QSTRDEFS_PORT} ${MICROPY_QSTRDEFS_COLLECTED} | sed "s/^Q(.*)/\"&\"/" | ${CMAKE_C_COMPILER} -E ${MICROPY_CPP_FLAGS} - | sed "s/^\\\"\\(Q(.*)\\)\\\"/\\1/" > ${MICROPY_QSTRDEFS_PREPROCESSED} + DEPENDS ${MICROPY_QSTRDEFS_PY} + ${MICROPY_QSTRDEFS_PORT} + ${MICROPY_QSTRDEFS_COLLECTED} VERBATIM COMMAND_EXPAND_LISTS )
sa: translate lut level back for v2.1
@@ -91,7 +91,7 @@ vtx_detect_status_t vtx_smart_audio_update(vtx_settings_t *actual) { actual->channel = channel_index % VTX_CHANNEL_MAX; } - if (smart_audio_settings.version >= 2) { + if (smart_audio_settings.version == 2) { actual->power_level = smart_audio_settings.power; } else { actual->power_level = smart_audio_dac_power_level_index(smart_audio_settings.power);
Updated a comment for __register_atfork().
@@ -4692,7 +4692,16 @@ __register_atfork(void (*prepare) (void), void (*parent) (void), void (*child) ( return g_fn.__register_atfork(prepare, parent, child, __dso_handle); } - // what do we do if we can't resolve a symbol for __register_atfork? - // glibc returns ENOMEM on error. + /* + * What do we do if we can't resolve a symbol for __register_atfork? + * glibc returns ENOMEM on error. + * + * Note: __register_atfork() is defined to implement the + * functionality of pthread_atfork(); Therefore, it would seem + * reasonable to call pthread_atfork() here if the symbol for + * __register_atfork() is not resolved. However, glibc implements + * pthread_atfork() by calling __register_atfork() which causes + * a tight loop here and we would crash. + */ return ENOMEM; }
add ubisys J1 { "bri_inc": 0 } translates to stop command.
@@ -934,6 +934,28 @@ int DeRestPluginPrivate::setLightState(const ApiRequest &req, ApiResponse &rsp) { rsp.list.append(errorToMap(ERR_PARAMETER_NOT_AVAILABLE, QString("/lights/%1").arg(id), QString("parameter, /lights/%1/bri_inc, is not available.").arg(id))); } + //FIXME workaround ubisys J1 + else if (taskRef.lightNode->modelId().startsWith(QLatin1String("J1"))) + { + if (ok && (map["bri_inc"].type() == QVariant::Double) && (briIinc == 0)) + { + TaskItem task; + copyTaskReq(taskRef, task); + if (addTaskWindowCovering(task, 0x02 /*stop motion*/, 0, 0)) + { + QVariantMap rspItem; + QVariantMap rspItemState; + rspItemState[QString("/lights/%1/state/bri").arg(id)] = item->toNumber(); + rspItem["success"] = rspItemState; + rsp.list.append(rspItem); + taskToLocalData(task); + } + else + { + rsp.list.append(errorToMap(ERR_INTERNAL_ERROR, QString("/lights/%1").arg(id), QString("Internal error, %1").arg(ERR_BRIDGE_BUSY))); + } + } + } // FIXME end workaround ubisys J1 else if (!isOn) { rsp.list.append(errorToMap(ERR_DEVICE_OFF, QString("/lights/%1").arg(id), QString("parameter, /lights/%1/bri, is not modifiable. Device is set to off.").arg(id)));
Update appveyor.yml Adding conditions to post test tasks
@@ -81,15 +81,21 @@ test_script: - cd %BUILD_HOME%\tests - ctest -C %BUILD_CONFIG% --output-on-failure # run regression tests + - cd %EPANET_HOME% - IF "%BUILD_CONFIG%" == "Release" ( - cd %EPANET_HOME% & tools\run-nrtest.cmd %REF_BUILD_ID% %SUT_BUILD_ID% + tools\run-nrtest.cmd %REF_BUILD_ID% %SUT_BUILD_ID% ) on_success: - cd %TEST_HOME%\benchmark - - appveyor PushArtifact receipt.json + - IF "%BUILD_CONFIG%" == "Release" ( + appveyor PushArtifact receipt.json + ) on_failure: - cd %TEST_HOME%\benchmark # zip up the SUT benchmarks - - 7z a benchmark-%PLATFORM%-%SUT_BUILD_ID%.zip .\epanet-%SUT_BUILD_ID% - - appveyor PushArtifact benchmark-%PLATFORM%-%SUT_BUILD_ID%.zip + - IF "%BUILD_CONFIG%" == "Release" ( + 7z a benchmark-%PLATFORM%-%SUT_BUILD_ID%.zip .\epanet-%SUT_BUILD_ID% & + appveyor PushArtifact benchmark-%PLATFORM%-%SUT_BUILD_ID%.zip + ) +
Update requires for MBEDTLS_PSA_CRYPTO_DRIVERS In order to test various PSA crypto settings the Requires section needed updating to require MBEDTLS_PSA_CRYPTO_C or MBEDTLS_PSA_CRYPTO_CONFIG.
* * Enable support for the experimental PSA crypto driver interface. * - * Requires: MBEDTLS_PSA_CRYPTO_C. + * Requires: MBEDTLS_PSA_CRYPTO_C or MBEDTLS_PSA_CRYPTO_CONFIG * * \warning This interface is experimental and may change or be removed * without notice.
Fix construction of pattern variable bindings Previously, inserting the variable bindings would make the Oasn nodes appear before the decl nodes in a block.
@@ -158,19 +158,22 @@ structmemb(Node *n, Node *name, Type *ty) static Node * addcapture(Node *n, Node **cap, size_t ncap) { - Node **blk; - size_t nblk, i; - - nblk = 0; - blk = NULL; - - for (i = 0; i < ncap; i++) - lappend(&blk, &nblk, cap[i]); - for (i = 0; i < n->block.nstmts; i++) - lappend(&blk, &nblk, n->block.stmts[i]); - lfree(&n->block.stmts, &n->block.nstmts); - n->block.stmts = blk; - n->block.nstmts = nblk; + size_t i, j; + + for (i = 0; i < n->block.nstmts; i++) { + if (n->block.stmts[i]->type != Ndecl) + continue; + for (j = 0; j < ncap; j++) { + assert(cap[j]->expr.op == Oasn); + assert(cap[j]->expr.args[0]->expr.op == Ovar); + assert(cap[j]->expr.args[0]->expr.isconst == 0); + if (n->block.stmts[i]->decl.did != cap[j]->expr.args[0]->expr.did) + continue; + assert(n->block.stmts[i]->decl.init == NULL); + assert(cap[j]->expr.args[0]->expr.op == Ovar); + n->block.stmts[i]->decl.init = cap[j]->expr.args[1]; + } + } return n; }
tests: runtime: in_tail: remove unused var
@@ -287,7 +287,6 @@ void wait_with_timeout(uint32_t timeout_ms, struct tail_test_result *result, int struct flb_time end_time; struct flb_time diff_time; uint64_t elapsed_time_flb = 0; - int64_t ret = 0; flb_time_get(&start_time);
doc: tn13: more updates to ch6
@@ -1521,14 +1521,18 @@ delete_transferred_cap_reply(struct data *data) { \note{Caching NYI} \subsection{Two phase commit} -\note{NYI} -Use 2pc. +Use two-phase commit to synchronize operations that would otherwise conflict. +We do not discuss this solution in detail, as we incorporate two-phase commit +into the delete and revoke operations for the hybrid solution discussed in +section~\ref{sec:sol:hybrid}. \subsection{Sequencer} -\note{NYI} -Use a sequencer. This will order whole operations. +Use a sequencer. This will order whole operations. We do not discuss this +solution in depth, as the sequencer idea is incorporated into the hybrid +solution in section~\ref{sec:sol:hybrid}. \subsection{Hybrid} +\label{sec:sol:hybrid} We can, of course, combine facets of each of the previously discussed approaches to build a hybrid approach which combines some (or all) of them. @@ -1559,13 +1563,27 @@ Delete and revoke employ a form of two-phase commit, which is implemented as a Other operations treat capabilities that have been marked for deletion as already deleted, which avoids many otherwise conflicting operation sequences. +\subsubsection{Caching} +Caching for this solution is implemented using a bitfield which has one bit +each indicating the presence of remote copies, descendants, and ancestors +respectively. + +\subsection{Multicast vs Broadcast} +To implement two-phase commit, this solution could use multicast messages to +all cores that have remote copies/descendants/ancestors or simply use +broadcast and have cores that do not have any copies reply with a success +reply for the mark phase. + \subsection{Comparison} -Compare the approaches +\note{TODO: Compare the approaches} \subsection{Implementation} Currently, the capability operations are implemented using the hybrid technique outlined above. +The implementation uses broadcasts to implement 2PC, because the this way, the +implementation does not have to keep, and update, a list of remote cores that +have copies, descendants or ancestors for each capability. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
rtl8721csm: fix g_scan_list initialization fix g_scan_list initialization
@@ -393,7 +393,7 @@ trwifi_result_e wifi_netmgr_utils_deinit(struct netdev *dev) trwifi_result_e wifi_netmgr_utils_scan_ap(struct netdev *dev, trwifi_scan_config_s *config) { g_scan_num = 0; - g_scan_num = NULL; + g_scan_list = NULL; if (config) { if (config->ssid_length > 0) { int scan_buf_len = 500;
SLW: Configure self-restore for HRMOR Make a stop api call using libpore to restore HRMOR register. HRMOR needs to be cleared so that when thread exits stop, they arrives at linux system_reset vector (0x100).
@@ -1254,12 +1254,41 @@ static void slw_patch_regs(struct proc_chip *chip) static void slw_init_chip_p9(struct proc_chip *chip) { struct cpu_thread *c; + int rc; prlog(PR_DEBUG, "SLW: Init chip 0x%x\n", chip->id); /* At power ON setup inits for power-mgt */ for_each_available_core_in_chip(c, chip->id) slw_set_overrides_p9(chip, c); + + if (!chip->homer_base) { + log_simple_error(&e_info(OPAL_RC_SLW_REG), + "SLW: HOMER base not set %x\n", + chip->id); + return; + } + + prlog(PR_NOTICE, "SLW: Configuring self-restore for HRMOR\n"); + + /* Should this be for_each_present_cpu() ? */ + for_each_available_cpu(c) { + if (c->chip_id != chip->id) + continue; + + /* + * Clear HRMOR. Need to update only for thread + * 0 of each core. Doing it anyway for all threads + */ + rc = p9_stop_save_cpureg((void *)chip->homer_base, + P9_STOP_SPR_HRMOR, 0, + c->pir); + if (rc) { + log_simple_error(&e_info(OPAL_RC_SLW_REG), + "SLW: Failed to set HRMOR for CPU %x,RC=0x%x\n", + c->pir, rc); + } + } } static void slw_init_chip(struct proc_chip *chip) {
readme: +crashes
@@ -94,6 +94,7 @@ Honggfuzz has been used to find a few interesting security problems in major sof * panic() in sleep-parser [#1](https://github.com/datrs/sleep-parser/issues/3) * panic() in lewton [#1](https://github.com/RustAudio/lewton/issues/27) * panic()/DoS in Ethereum-Parity [#1](https://srlabs.de/bites/ethereum_dos/) + * crashes in rust-bitcoin/rust-lightning [#1](https://github.com/rust-bitcoin/rust-lightning/commit/a9aa3c37fe182dd266e0faebc788e0c9ee724783) * ... and more ## Projects utilizing Honggfuzz
Fix write after free with printing to files.
@@ -243,6 +243,13 @@ JANET_CORE_FN(cfun_io_fwrite, return argv[0]; } +static void io_assert_writeable(JanetFile *iof) { + if (iof->flags & JANET_FILE_CLOSED) + janet_panic("file is closed"); + if (!(iof->flags & (JANET_FILE_WRITE | JANET_FILE_APPEND | JANET_FILE_UPDATE))) + janet_panic("file is not writeable"); +} + /* Flush the bytes in the file */ JANET_CORE_FN(cfun_io_fflush, "(file/flush f)", @@ -250,10 +257,7 @@ JANET_CORE_FN(cfun_io_fflush, "buffered for efficiency reasons. Returns the file handle.") { janet_fixarity(argc, 1); JanetFile *iof = janet_getabstract(argv, 0, &janet_file_type); - if (iof->flags & JANET_FILE_CLOSED) - janet_panic("file is closed"); - if (!(iof->flags & (JANET_FILE_WRITE | JANET_FILE_APPEND | JANET_FILE_UPDATE))) - janet_panic("file is not writeable"); + io_assert_writeable(iof); if (fflush(iof->file)) janet_panic("could not flush file"); return argv[0]; @@ -269,6 +273,7 @@ int janet_file_close(JanetFile *file) { if (!(file->flags & (JANET_FILE_NOT_CLOSEABLE | JANET_FILE_CLOSED))) { ret = fclose(file->file); file->flags |= JANET_FILE_CLOSED; + file->file = NULL; /* NULL derefence is easier to debug then other problems */ return ret; } return 0; @@ -449,6 +454,7 @@ static Janet cfun_io_print_impl_x(int32_t argc, Janet *argv, int newline, if (janet_abstract_type(abstract) != &janet_file_type) return janet_wrap_nil(); JanetFile *iofile = abstract; + io_assert_writeable(iofile); f = iofile->file; break; } @@ -564,6 +570,10 @@ static Janet cfun_io_printf_impl_x(int32_t argc, Janet *argv, int newline, if (janet_abstract_type(abstract) != &janet_file_type) return janet_wrap_nil(); JanetFile *iofile = abstract; + if (iofile->flags & JANET_FILE_CLOSED) { + janet_panic("cannot print to closed file"); + } + io_assert_writeable(iofile); f = iofile->file; break; } @@ -688,6 +698,7 @@ void janet_dynprintf(const char *name, FILE *dflt_file, const char *format, ...) if (janet_abstract_type(abstract) != &janet_file_type) break; JanetFile *iofile = abstract; + io_assert_writeable(iofile); f = iofile->file; } fwrite(buffer.data, buffer.count, 1, f);
record in ChangeLog
+05/28/2018 +- send Basic header in OAuth www-authenticate response if that's the only accepted method; thanks @puiterwijk + 05/28/2018 - refactor Redis cache backend to solve issues on AUTH errors: a) memory leak and b) redisGetReply lagging behind - adjust copyright year/org
docs: dark-mode friendly logo
<div align="center"> <br /> <p> - <a href="https://cee-studio.github.io/orca"><img src="https://raw.githubusercontent.com/cee-studio/orca-docs/079bbbc5f2a27f457c324b1334b3644095db31ff/docs/source/images/logo-light.svg" width="546" alt="orca" style="background-color:red;" /></a> + <a href="https://cee-studio.github.io/orca"><img src="https://raw.githubusercontent.com/cee-studio/orca-docs/1f3aee92c6d29ba8a51651cebc524c037ce9ae7d/docs/source/images/logo.svg" width="546" alt="orca" style="background-color:red;" /></a> </p> <br /> <p>
Fix: Time-zone is not reflected
@@ -40,12 +40,10 @@ RUN autoreconf -fiv RUN CC="clang" CFLAGS="-O3 -static" LIBS="$(pkg-config --libs openssl)" ./configure --prefix="" --enable-utf8 --with-openssl --enable-geoip=mmdb RUN make && make DESTDIR=/dist install -# Time Zone -RUN tar Jcf /dist/tzdata.tar.xz -C /usr/share/zoneinfo/right . - # Container FROM busybox:musl COPY --from=builds /dist / +COPY --from=builds /usr/share/zoneinfo /usr/share/zoneinfo VOLUME /var/www/goaccess EXPOSE 7890 ENTRYPOINT ["/bin/goaccess"]
Update site URLs in README.md
The Dagger (XDAG) cryptocurrency ================================ -- Official site: http://xdag.me +- Community site: https://xdag.io - Developer's site: http://xdag.me - Main net is launched January 5, 2018 at 22:45 GMT. @@ -10,7 +10,7 @@ Principles: - Directed acyclic graph (DAG), not blockchain - Block = transaction = address -- Original main chain idea +- Original idea and implementation - Mineable, no premine, no ICO - Mining new money every 64 seconds
cmake - Add tools [ci skip]
@@ -390,3 +390,22 @@ macro(get_subdirectories result current_dir) endforeach() set(${result} ${dirs}) endmacro() + + +# Replacement for list(FILTER ...) (see cmake doc) +# when current cmake version < 3.6) +# Usage : +# list_filter(<listname> <matching expr>) +# example: +# set(mylist name src plugin) +# list_filter(mylist src) +# ==> mylist contains only name and plugin +function(list_filter inout_list_name regex) + foreach(name IN LISTS ${inout_list_name}) + string(FIND ${name} ${regex} result) + if(${result} GREATER -1) + list(REMOVE_ITEM ${inout_list_name} ${name}) + endif() + endforeach() + set(${inout_list_name} ${${inout_list_name}} PARENT_SCOPE) +endfunction()