message
stringlengths
6
474
diff
stringlengths
8
5.22k
more doxygen comments
@@ -316,6 +316,13 @@ void oc_ri_add_timed_event_callback_ticks(void *cb_data, oc_trigger_t event_callback, oc_clock_time_t ticks); +/** + * @brief add timed event callback in seconds + * * + * @param cb_data the timed event callback info + * @param event_callback the callback + * @param seconds time in seconds + */ #define oc_ri_add_timed_event_callback_seconds(cb_data, event_callback, \ seconds) \ do { \
appveyor: Turn this off until the problems can be diagnosed. I don't use Windows, so I'm not sure why appveyor is failing to build the precommit tests and use covlib. PR's gladly accepted if someone knows what's going on. Otherwise, I'll fix it after tackling other lower-numbered issues.
@@ -3,13 +3,14 @@ platform: - x64 build_script: - - cmd: mkdir build - - cmd: cd build - - cmd: cmake ../ -G "Visual Studio 14 2015 Win64" - - cmd: cmake --build . --config Release - - cmd: cp Release/lily.exe ../lily - - cmd: cp Release/pre-commit-tests.exe ../pre-commit-tests.exe - - cmd: cd .. + #- cmd: mkdir build + #- cmd: cd build + #- cmd: cmake ../ -G "Visual Studio 14 2015 Win64" + #- cmd: cmake --build . --config Release + #- cmd: cp Release/lily.exe ../lily + #- cmd: cp Release/pre-commit-tests.exe ../pre-commit-tests.exe + #- cmd: cd .. + # todo: Find out why AppVeyor is failing and fix it. - cmd: dir - # todo: Find out why covlib tests are failing. - - cmd: pre-commit-tests.exe --skip-covlib + ## todo: Find out why covlib tests are failing. + #- cmd: pre-commit-tests.exe --skip-covlib
`defer_thread_wait ` - make sure there's a valid pool object
@@ -305,7 +305,8 @@ void defer_thread_throttle(unsigned long microsec) { return; } */ #pragma weak defer_thread_wait void defer_thread_wait(pool_pt pool, void *p_thr) { - size_t throttle = (pool->count) * DEFER_THROTTLE; + size_t throttle = + pool ? ((pool->count) * DEFER_THROTTLE) : DEFER_THROTTLE_LIMIT; if (!throttle || throttle > DEFER_THROTTLE_LIMIT) throttle = DEFER_THROTTLE_LIMIT; if (throttle == DEFER_THROTTLE)
add recommendation to use docker #no_auto_pr
@@ -87,12 +87,21 @@ Some thoughts to consider when adding a new message: # Releasing New Versions of the Library -Oh boy, so you've decided to release a new version of libsbp. It's recommended -this process is performed on a Mac, as it has been known to go wrong on Linux. +## Using Docker + +It's highly recommended to use the docker container to run the release process, +the docker container can be pulled from DockerHub and launched via this command: + +``docker run -v $PWD:/mnt/workspace -i -t swiftnav/libsbp-build:2020.09.15`` -The v1.2.5 release for Python on PyPi failed to install due to missing -`requirements.txt` file in the uploaded archive after a deployment was done on -Ubuntu 16.04. +Check this [link](https://hub.docker.com/r/swiftnav/libsbp-build/tags) for newer tags. + +## The Process + +Oh boy, so you've decided to release a new version of libsbp. It's recommended +this process is performed using the above docker container. You'll likely want +to run the git commands outside of the container and the `make ...` commands +inside the container (so you don't have to setup git inside the docker container). 0. Branch and tag a new release. Tag the release version:
docker-jenkins-buildnode: adjust cabal update configuration
@@ -42,8 +42,7 @@ RUN apt-get -y install \ maven \ git \ libcurl4-gnutls-dev -RUN cabal update && \ - apt-get clean && \ +RUN apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # TODO use elektra for the configuration steps below @@ -65,6 +64,11 @@ RUN echo "\n\n\n\n\nY" | adduser --quiet --disabled-password jenkins && \ mkdir /home/jenkins/libelektra && \ echo "[user]\nname = Jenkins Buildbot\nemail = [email protected]" >> /home/jenkins/.gitconfig +# setup cabal for the jenkins user, then go back to root +USER jenkins +RUN cabal update +USER root + # setup the run- utilities COPY run-make /usr/local/bin/run-make COPY run-make-env /usr/local/bin/run-make-env
Improve error messages around REPLICATION and BYPASSRLS properties. Clarify wording as per suggestion from Wolfgang Walther. No back-patch; this doesn't seem worth thrashing translatable strings in the back branches. Tom Lane and Stephen Frost Discussion:
@@ -305,7 +305,7 @@ CreateRole(ParseState *pstate, CreateRoleStmt *stmt) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to change bypassrls attribute"))); + errmsg("must be superuser to create bypassrls users"))); } else { @@ -719,14 +719,14 @@ AlterRole(AlterRoleStmt *stmt) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to alter superusers"))); + errmsg("must be superuser to alter superuser roles or change superuser attribute"))); } else if (authform->rolreplication || isreplication >= 0) { if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to alter replication users"))); + errmsg("must be superuser to alter replication roles or change replication attribute"))); } else if (bypassrls >= 0) {
fix linop_stack
@@ -672,7 +672,7 @@ struct linop_s* linop_stack(int D, int E, const struct linop_s* a, const struct PTR_ALLOC(struct linop_s, c); c->forward = operator_stack(D, E, a->forward, b->forward); - c->adjoint = operator_stack(E, D, b->adjoint, a->adjoint); + c->adjoint = operator_stack(E, D, a->adjoint, b->adjoint); const struct operator_s* an = a->normal;
Increment version to 4.6.5.
#define MOD_WSGI_MAJORVERSION_NUMBER 4 #define MOD_WSGI_MINORVERSION_NUMBER 6 -#define MOD_WSGI_MICROVERSION_NUMBER 4 -#define MOD_WSGI_VERSION_STRING "4.6.4" +#define MOD_WSGI_MICROVERSION_NUMBER 5 +#define MOD_WSGI_VERSION_STRING "4.6.5" /* ------------------------------------------------------------------------- */
Update trigger script tabs for pointnclick scenes Point & Click scenes don't support on enter or on leave scripts. Instead the trigger script is activated when the interact button is pressed. The IDE now reflects that by changing the default script tab name to `On Interact` and not displaying `On Leave`
@@ -49,6 +49,10 @@ const scriptTabs = { leave: l10n("SIDEBAR_ON_LEAVE"), } as const; +const pointNClickScriptTabs = { + trigger: l10n("SIDEBAR_ON_INTERACT"), +} as const; + const getScriptKey = (tab: keyof typeof scriptTabs): TriggerScriptKey => { if (tab === "trigger") { return "script"; @@ -285,6 +289,17 @@ export const TriggerEditor = ({ )} <SidebarColumn> <StickyTabs> + {scene.type === "POINTNCLICK" ? ( + <TabBar + values={pointNClickScriptTabs} + buttons={ + <> + {lockButton} + {scriptButton} + </> + } + /> + ) : ( <TabBar value={scriptMode} values={scriptTabs} @@ -296,6 +311,7 @@ export const TriggerEditor = ({ </> } /> + )} </StickyTabs> <ScriptEditor value={trigger[scriptKey]}
Set USDT argument constraint for all architectures This sets the USDT argument constraint for all architectures rather than just restricting it to powerpc and x86 variants. However, the other architectures might still need additional argument parsing support.
#ifndef FOLLY_SDT_ARG_CONSTRAINT #if defined(__powerpc64__) || defined(__powerpc__) #define FOLLY_SDT_ARG_CONSTRAINT "nQr" -#elif defined(__x86_64__) || defined(__i386__) +#else #define FOLLY_SDT_ARG_CONSTRAINT "nor" #endif #endif
Add missing new line in error message
@@ -45,7 +45,7 @@ int ocf_ctx_register_volume_type(ocf_ctx_t ctx, uint8_t type_id, return 0; err: - ocf_log(ctx, log_err, "Failed to register volume operations '%s'", + ocf_log(ctx, log_err, "Failed to register volume operations '%s'\n", properties->name); return result; }
add back fd close
@@ -21,6 +21,7 @@ int osGetNumThreads(pid_t pid) } if (g_fn.read(fd, buf, sizeof(buf)) == -1) { + g_fn.close(fd); return -1; } @@ -28,6 +29,7 @@ int osGetNumThreads(pid_t pid) for (i = 1; i < 20; i++) { entry = strtok_r(NULL, delim, &last); } + g_fn.close(fd); if ((result = strtol(entry, NULL, 0)) == (long)0) { return -1;
Added error response logging. Every internal server error response should have a clear description in log.
@@ -3722,6 +3722,7 @@ static void nxt_router_response_ready_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg, void *data) { + size_t b_size, count; nxt_int_t ret; nxt_app_t *app; nxt_buf_t *b, *next; @@ -3787,15 +3788,20 @@ nxt_router_response_ready_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg, nxt_http_request_send_body(task, r, NULL); } else { - size_t b_size = nxt_buf_is_mem(b) ? nxt_buf_mem_used_size(&b->mem) : 0; + b_size = nxt_buf_is_mem(b) ? nxt_buf_mem_used_size(&b->mem) : 0; - if (nxt_slow_path(b_size < sizeof(*resp))) { + if (nxt_slow_path(b_size < sizeof(nxt_unit_response_t))) { + nxt_alert(task, "response buffer too small: %z", b_size); goto fail; } resp = (void *) b->mem.pos; - if (nxt_slow_path(b_size < sizeof(*resp) - + resp->fields_count * sizeof(nxt_unit_field_t))) { + count = (b_size - sizeof(nxt_unit_response_t)) + / sizeof(nxt_unit_field_t); + + if (nxt_slow_path(count < resp->fields_count)) { + nxt_alert(task, "response buffer too small for fields count: %D", + resp->fields_count); goto fail; }
[CLI] Change max message size change size to 10MB to support large block.
@@ -22,6 +22,8 @@ import ( const aergosystem = "aergo.system" +const MaxRPCMessageSize = 1024 * 1024 * 10 // 10MB + var ( // Used for test. test bool @@ -131,8 +133,9 @@ func connectAergo(cmd *cobra.Command, args []string) { } serverAddr := GetServerAddress() opts := []grpc.DialOption{ - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(1024 * 1024 * 256)), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxRPCMessageSize), grpc.MaxCallSendMsgSize(MaxRPCMessageSize)), } + if rootConfig.TLS.ClientCert != "" || rootConfig.TLS.ClientKey != "" { certificate, err := tls.LoadX509KeyPair(rootConfig.TLS.ClientCert, rootConfig.TLS.ClientKey) if err != nil {
blocklevel: smart_write: Rename size variable for clarity We're writing in chunks, so lets make it clear that size is relative to the chunk that we're writing.
@@ -562,19 +562,20 @@ int blocklevel_smart_write(struct blocklevel_device *bl, uint64_t pos, const voi while (len > 0) { uint32_t erase_block = pos & ~(erase_size - 1); uint32_t block_offset = pos & (erase_size - 1); - uint32_t size = erase_size > len ? len : erase_size; + uint32_t chunk_size = erase_size > len ? len : erase_size; int cmp; /* Write crosses an erase boundary, shrink the write to the boundary */ - if (erase_size < block_offset + size) { - size = erase_size - block_offset; + if (erase_size < block_offset + chunk_size) { + chunk_size = erase_size - block_offset; } rc = bl->read(bl, erase_block, erase_buf, erase_size); if (rc) goto out; - cmp = blocklevel_flashcmp(erase_buf + block_offset, write_buf, size); + cmp = blocklevel_flashcmp(erase_buf + block_offset, write_buf, + chunk_size); FL_DBG("%s: region 0x%08x..0x%08x ", __func__, erase_block, erase_size); if (cmp != 0) { @@ -584,16 +585,16 @@ int blocklevel_smart_write(struct blocklevel_device *bl, uint64_t pos, const voi bl->erase(bl, erase_block, erase_size); } FL_DBG("write\n"); - memcpy(erase_buf + block_offset, write_buf, size); + memcpy(erase_buf + block_offset, write_buf, chunk_size); rc = bl->write(bl, erase_block, erase_buf, erase_size); if (rc) goto out; } else { FL_DBG("clean\n"); } - len -= size; - pos += size; - write_buf += size; + len -= chunk_size; + pos += chunk_size; + write_buf += chunk_size; } out:
Add 8px and 10px montserrat fonts to build
CSRCS += lv_font.c CSRCS += lv_font_fmt_txt.c CSRCS += lv_font_loader.c +CSRCS += lv_font_montserrat_8.c +CSRCS += lv_font_montserrat_10.c CSRCS += lv_font_montserrat_12.c CSRCS += lv_font_montserrat_14.c CSRCS += lv_font_montserrat_16.c
external/wakaama : fixes memory leakage in tcp api This patch fixes memory leakage issue in tcp api. Without this commit, 32 bytes are not released when client is registered over tcp.
@@ -87,7 +87,8 @@ static int prv_getRegistrationQuery(lwm2m_context_t * contextP, index += res; } - if (contextP->protocol == COAP_TCP) + if (contextP->protocol == COAP_TCP + || contextP->protocol == COAP_TCP_TLS) { /* * We need to append the token to the parameters list @@ -100,6 +101,7 @@ static int prv_getRegistrationQuery(lwm2m_context_t * contextP, { int size = 1; lwm2m_data_t * dataP = lwm2m_data_new(size); + if (dataP == NULL) return 0; dataP->id = LWM2M_SECURITY_SECRET_KEY_ID; obj->readFunc(0, &size, &dataP, obj); @@ -129,6 +131,7 @@ static int prv_getRegistrationQuery(lwm2m_context_t * contextP, index += res; lwm2m_free(secret); } + lwm2m_data_free(size, dataP); } }
BugID:17291083:fix memory corruption caused by iotx_cm_close
@@ -188,6 +188,14 @@ int iotx_cm_close(int fd) return -1; } + if (--inited_conn_num == 0) { +#if (CONFIG_SDK_THREAD_COST == 1) + while (!yield_task_leave) { + HAL_SleepMs(10); + } +#endif + } + iotx_cm_close_fp close_func; HAL_MutexLock(fd_lock); close_func = _cm_fd[fd]->close_func; @@ -199,12 +207,7 @@ int iotx_cm_close(int fd) return -1; } - if (--inited_conn_num == 0) { -#if (CONFIG_SDK_THREAD_COST == 1) - while(!yield_task_leave) { - HAL_SleepMs(10); - } -#endif + if (inited_conn_num == 0) { if (fd_lock != NULL) { HAL_MutexDestroy(fd_lock); fd_lock = NULL;
revise processing of vendor
@@ -8,7 +8,6 @@ import tempfile arcadia_project_prefix = 'a.yandex-team.ru/' contrib_go_std_src_prefix = 'contrib/go/_std/src/' -contrib_go_prefix = 'vendor/' vendor_prefix = 'vendor/' @@ -16,15 +15,26 @@ def copy_args(args): return copy.copy(args) +def get_vendor_index(import_path): + index = import_path.rfind('/' + vendor_prefix) + if index < 0: + index = 0 if import_path.startswith(vendor_prefix) else index + else: + index = index + 1 + return index + + def get_import_path(module_path): assert len(module_path) > 0 import_path = module_path.replace('\\', '/') is_std_module = import_path.startswith(contrib_go_std_src_prefix) if is_std_module: import_path = import_path[len(contrib_go_std_src_prefix):] - elif import_path.startswith(contrib_go_prefix): - import_path = import_path[len(contrib_go_prefix):] - else: + index = get_vendor_index(import_path) + if index >= 0: + index += len(vendor_prefix) + import_path = import_path[index:] + elif not is_std_module: import_path = arcadia_project_prefix + import_path assert len(import_path) > 0 return import_path, is_std_module @@ -44,14 +54,14 @@ def classify_srcs(srcs, args): args.packages = list(filter(lambda x: x.endswith('.a'), srcs)) -def create_import_config(peers, remap_vendor, import_map={}, module_map={}): +def create_import_config(peers, import_map={}, module_map={}): content = '' for key, value in import_map.items(): content += 'importmap {}={}\n'.format(key, value) for peer in peers: peer_import_path, _ = get_import_path(os.path.dirname(peer)) - index = peer_import_path.find(vendor_prefix) if remap_vendor else -1 - if index == 0 or index > 0 and peer_import_path[index-1] == '/': + index = get_vendor_index(peer_import_path) + if index >= 0: index += len(vendor_prefix) content += 'importmap {}={}\n'.format(peer_import_path[index:], peer_import_path) content += 'packagefile {}={}\n'.format(peer_import_path, os.path.join(args.build_root, peer)) @@ -72,7 +82,7 @@ def do_compile_go(args): cmd.append('-std') if import_path == 'runtime': cmd.append('-+') - import_config_name = create_import_config(args.peers, True, args.import_map, args.module_map) + import_config_name = create_import_config(args.peers, args.import_map, args.module_map) if import_config_name: cmd += ['-importcfg', import_config_name] else: @@ -117,7 +127,7 @@ def do_link_exe(args): compile_args.output = os.path.join(args.output_root, 'main.a') do_link_lib(compile_args) cmd = [args.go_link, '-o', args.output] - import_config_name = create_import_config(args.peers, False, args.import_map, args.module_map) + import_config_name = create_import_config(args.peers, args.import_map, args.module_map) if import_config_name: cmd += ['-importcfg', import_config_name] cmd += ['-buildmode=exe', '-extld=gcc', compile_args.output]
board/kukui/led.c: Format with clang-format BRANCH=none TEST=none
@@ -35,8 +35,7 @@ static void kukui_led_set_battery(void) chstate = charge_get_state(); - if (prv_chstate == chstate && - chstate != PWR_STATE_DISCHARGE) + if (prv_chstate == chstate && chstate != PWR_STATE_DISCHARGE) return; prv_chstate = chstate; @@ -62,8 +61,7 @@ static void kukui_led_set_battery(void) return; } - if (prv_r == br[EC_LED_COLOR_RED] && - prv_g == br[EC_LED_COLOR_GREEN] && + if (prv_r == br[EC_LED_COLOR_RED] && prv_g == br[EC_LED_COLOR_GREEN] && prv_b == br[EC_LED_COLOR_BLUE]) return;
fix compilation added enum and fixed re-declaration
@@ -69,7 +69,7 @@ typedef enum SceHttpErrorCode { SCE_HTTP_ERROR_RESOLVER_ENORECORD = 0x8043600a } SceHttpErrorCode; -typedef SceHttpsErrorCode { +typedef enum SceHttpsErrorCode { SCE_HTTPS_ERROR_CERT = 0x80435060, SCE_HTTPS_ERROR_HANDSHAKE = 0x80435061, SCE_HTTPS_ERROR_IO = 0x80435062, @@ -77,14 +77,14 @@ typedef SceHttpsErrorCode { SCE_HTTPS_ERROR_PROXY = 0x80435064 } SceHttpsErrorCode; -typedef SceHttpsSslErrorCode { +typedef enum SceHttpsSslErrorCode { SCE_HTTPS_ERROR_SSL_INTERNAL = (0x01U), SCE_HTTPS_ERROR_SSL_INVALID_CERT = (0x02U), SCE_HTTPS_ERROR_SSL_CN_CHECK = (0x04U), SCE_HTTPS_ERROR_SSL_NOT_AFTER_CHECK = (0x08U), SCE_HTTPS_ERROR_SSL_NOT_BEFORE_CHECK = (0x10U), SCE_HTTPS_ERROR_SSL_UNKNOWN_CA = (0x20U) -} SceHttpsErrorCode; +} SceHttpsSslErrorCode; #define SCE_HTTP_ENABLE (1) #define SCE_HTTP_DISABLE (0) @@ -92,7 +92,7 @@ typedef SceHttpsSslErrorCode { #define SCE_HTTP_USERNAME_MAX_SIZE 256 #define SCE_HTTP_PASSWORD_MAX_SIZE 256 -typedef SceHttpStatusCode { +typedef enum SceHttpStatusCode { SCE_HTTP_STATUS_CODE_CONTINUE = 100, SCE_HTTP_STATUS_CODE_SWITCHING_PROTOCOLS = 101, SCE_HTTP_STATUS_CODE_PROCESSING = 102, @@ -142,7 +142,7 @@ typedef SceHttpStatusCode { SCE_HTTP_STATUS_CODE_INSUFFICIENT_STORAGE = 507 } SceHttpStatuscode; -typedef SceHttpUriBuildType { +typedef enum SceHttpUriBuildType { SCE_HTTP_URI_BUILD_WITH_SCHEME = 0x01, SCE_HTTP_URI_BUILD_WITH_HOSTNAME = 0x02, SCE_HTTP_URI_BUILD_WITH_PORT = 0x04, @@ -154,7 +154,7 @@ typedef SceHttpUriBuildType { SCE_HTTP_URI_BUILD_WITH_ALL = 0xFFFF } SceHttpUriBuildType; -typedef SceHttpsFlag { +typedef enum SceHttpsFlag { SCE_HTTPS_FLAG_SERVER_VERIFY = (0x01U), SCE_HTTPS_FLAG_CLIENT_VERIFY = (0x02U), SCE_HTTPS_FLAG_CN_CHECK = (0x04U),
Fix minor documentation issue Merges
@@ -62,7 +62,7 @@ esp_err_t sdmmc_write_sectors(sdmmc_card_t* card, const void* src, size_t start_sector, size_t sector_count); /** - * Write given number of sectors to SD/MMC card + * Read given number of sectors from the SD/MMC card * * @param card pointer to card information structure previously initialized * using sdmmc_card_init
AP font name changed
* Opts: ******************************************************************************/ -#ifndef PERSIAN_FONT -#define PERSIAN_FONT 1 +#ifndef LV_FONT_AP_18 +#define LV_FONT_AP_18 1 #endif -#if PERSIAN_FONT +#if LV_FONT_AP_18 /*----------------- * BITMAPS @@ -5487,7 +5487,7 @@ static lv_font_fmt_txt_dsc_t font_dsc = { *----------------*/ /*Initialize a public general font descriptor*/ -lv_font_t persian_font = { +lv_font_t lv_font_ap_18 = { .get_glyph_dsc = lv_font_get_glyph_dsc_fmt_txt, /*Function pointer to get glyph's data*/ .get_glyph_bitmap = lv_font_get_bitmap_fmt_txt, /*Function pointer to get glyph's bitmap*/ .line_height = 26, /*The maximum line height required by the font*/ @@ -5498,5 +5498,4 @@ lv_font_t persian_font = { .dsc = &font_dsc /*The custom font data. Will be accessed by `get_glyph_bitmap/dsc` */ }; -#endif /*#if PERSIAN_FONT*/ - +#endif /*#if LV_FONT_AP_18*/
BugID:23251508: lwip_itoa: fix implicit conversion warning commit Author: Simon Goldschmidt Date: Mon Jun 18 12:15:37 2018 +0200
@@ -235,6 +235,6 @@ lwip_itoa(char* result, size_t bufsize, int number) return; } /* move from temporary buffer to output buffer (sign is not moved) */ - memmove(res, tmp, (result + bufsize) - tmp); + memmove(res, tmp, (size_t)((result + bufsize) - tmp)); } #endif
update ya tool arc no -ononempty on darwin
}, "arc": { "formula": { - "sandbox_id": [397664968], + "sandbox_id": [398100724], "match": "arc" }, "executable": {
VERSION bump to version 1.2.1
@@ -27,7 +27,7 @@ endif() # micro version is changed with a set of small changes or bugfixes anywhere in the project. set(SYSREPO_MAJOR_VERSION 1) set(SYSREPO_MINOR_VERSION 2) -set(SYSREPO_MICRO_VERSION 0) +set(SYSREPO_MICRO_VERSION 1) set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION}) # Version of the library
apps/blemesh: rename bleprph functions
@@ -185,13 +185,13 @@ static const struct bt_mesh_prov prov = { }; static void -bleprph_on_reset(int reason) +blemesh_on_reset(int reason) { BLE_HS_LOG(ERROR, "Resetting state; reason=%d\n", reason); } static void -bleprph_on_sync(void) +blemesh_on_sync(void) { int err; @@ -218,8 +218,8 @@ main(void) /* Initialize the NimBLE host configuration. */ log_register("ble_hs", &ble_hs_log, &log_console_handler, NULL, LOG_SYSLEVEL); - ble_hs_cfg.reset_cb = bleprph_on_reset; - ble_hs_cfg.sync_cb = bleprph_on_sync; + ble_hs_cfg.reset_cb = blemesh_on_reset; + ble_hs_cfg.sync_cb = blemesh_on_sync; ble_hs_cfg.store_status_cb = ble_store_util_status_rr; hal_gpio_init_out(LED_2, 0);
doc: fix doc error filter patterns Changes to hypercall.h altered the error pattern match used to decide if doxygen errors are "expected". Update the pattern match.
^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] ^[ \t]* ^[ \t]*\^ -^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+] +# +^(?P<filename>[-._/\w]+/api/hypercall_api.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+] ^.*vhm_request.reqs ^[- \t]*\^ -^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] +# +^(?P<filename>[-._/\w]+/api/hypercall_api.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] ^.*union hc_ptdev_irq::\@1 hc_ptdev_irq::is ^[- \t]*\^ ^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] ^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] ^[ \t]* ^[ \t]*\^ -^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+] +# +^(?P<filename>[-._/\w]+/api/hypercall_api.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+] ^.*hc_ptdev_irq.is ^[- \t]*\^ ^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] ^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] ^[ \t]* ^[ \t]*\^ -^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+] +^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] +^[ \t]* +^[ \t]*\^ +^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] +^[ \t]* +^[ \t]*\^ +^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] +^[ \t]* +^[ \t]*\^ +# +^(?P<filename>[-._/\w]+/api/hypercall_api.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+] ^.*hc_ptdev_irq.is.intx ^[- \t]*\^ ^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected identifier in nested name. \[error at [0-9]+] ^[ \t]* ^[ \t]*\^ -^(?P=filename):(?P=lineno): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+] +# +^(?P<filename>[-._/\w]+/api/hypercall_api.rst):(?P<lineno>[0-9]+): WARNING: Invalid definition: Expected end of definition. \[error at [0-9]+] ^.*hc_ptdev_irq.is.msix ^[- \t]*\^ #
Add support for engine.json fields for plugins
import EventEmitter from "events"; import Path from "path"; -import { readJSON } from "fs-extra"; +import { readJSON, pathExists } from "fs-extra"; import { EngineFieldSchema } from "store/features/engine/engineState"; import { engineRoot } from "../../consts"; import l10n from "lib/helpers/l10n"; -import { clampToCType, is16BitCType } from "lib/helpers/engineFields"; +import { clampToCType } from "lib/helpers/engineFields"; import { setDefault } from "lib/helpers/setDefault"; import { ScriptEventFieldSchema } from "store/features/entities/entitiesTypes"; +import glob from "glob"; interface EngineData { fields?: EngineFieldSchema[]; @@ -77,6 +78,7 @@ export const initEngineFields = async (projectRoot: string) => { "engine", "engine.json" ); + const pluginsPath = Path.join(projectRoot, "plugins"); let defaultEngine: EngineData = {}; let localEngine: EngineData = {}; @@ -95,6 +97,19 @@ export const initEngineFields = async (projectRoot: string) => { fields = defaultEngine.fields; } + const enginePlugins = glob.sync(`${pluginsPath}/*/engine`); + for (const enginePluginPath of enginePlugins) { + const englinePluginJsonPath = Path.join(enginePluginPath, "engine.json"); + if (await pathExists(englinePluginJsonPath)) { + try { + const pluginEngine = await readJSON(englinePluginJsonPath); + fields = fields.concat(pluginEngine.fields); + } catch(e) { + console.warn(e); + } + } + } + engineFieldsEmitter.emit("sync", { fields, schemaLookup: getEngineFieldSchemas(fields),
Fixed a compilation error when not on ARM
@@ -124,7 +124,7 @@ EXPORT int my_snprintf(x86emu_t* emu, void* buff, uint32_t s, void * fmt, void * void* f = vsnprintf; return ((iFpupp_t)f)(buff, s, fmt, emu->scratch); #else - return vsnprintf((char*)buff, s, (char*)f, V); + return vsnprintf((char*)buff, s, (char*)vsnprintf, V); #endif } EXPORT int my___sprintf_chk(x86emu_t* emu, void* buff, uint32_t s, void * fmt, void * b, va_list V) __attribute__((alias("my_snprintf"))); @@ -135,7 +135,7 @@ EXPORT int my_sprintf(x86emu_t* emu, void* buff, void * fmt, void * b, va_list V void* f = vsprintf; return ((iFppp_t)f)(buff, fmt, emu->scratch); #else - return vsprintf((char*)buff, (char*)f, V); + return vsprintf((char*)buff, (char*)vsprintf, V); #endif }
improve version handler
@@ -2,8 +2,6 @@ package main import ( "net/http" - "strconv" - "strings" ) type versionResult struct { @@ -11,13 +9,6 @@ type versionResult struct { Elektra elektraVersion `json:"elektra"` } -type elektraVersion struct { - Version string `json:"version"` - Major int `json:"major"` - Minor int `json:"minor"` - Micro int `json:"micro"` -} - // getVersionHandler returns the current version of Elektra via // JSON (see the `versionResult` struct). // @@ -26,35 +17,10 @@ type elektraVersion struct { // // Example: `curl localhost:33333/version` func getVersionHandler(w http.ResponseWriter, r *http.Request) { - kdb := getHandle(r) - - version, _ := kdb.Version() - - major, minor, micro := parseSemVer(version) - response := versionResult{ API: 1, - Elektra: elektraVersion{ - Version: version, - Major: major, - Minor: minor, - Micro: micro, - }, + Elektra: version, } writeResponse(w, response) } - -func parseSemVer(version string) (major, minor, micro int) { - parts := strings.SplitN(version, ".", 3) - - if len(parts) != 3 { - return - } - - major, _ = strconv.Atoi(parts[0]) - minor, _ = strconv.Atoi(parts[1]) - micro, _ = strconv.Atoi(parts[2]) - - return -}
Update Vagrantfile to Ubuntu 20.04.
@@ -14,8 +14,8 @@ Vagrant.configure(2) do |config| # Full development and test environment which should be used by default. #------------------------------------------------------------------------------------------------------------------------------- config.vm.define "default", primary: true do |default| - default.vm.box = "ubuntu/bionic64" - default.vm.box_version = "20210928.0.0" + default.vm.box = "ubuntu/focal64" + default.vm.box_version = "20211007.0.0" default.vm.provider :virtualbox do |vb| vb.name = "pgbackrest-test" @@ -152,7 +152,7 @@ Vagrant.configure(2) do |config| # # Basic environment to build/test pgBackRest using homebrew installed in the local user account. #------------------------------------------------------------------------------------------------------------------------------- - # mkdir ~/homebrew && curl -L https://github.com/Homebrew/brew/tarball/master | tar xz --strip 1 -C ~/homebrew + # git clone --depth=1 https://github.com/Homebrew/brew homebrew # ~/homebrew/bin/brew install -q libpq libxml2 libyaml cpanm lcov # ~/homebrew/bin/cpanm --force --local-lib=~/homebrew/perl5 install YAML::XS XML::Checker::Parser #
Fixed broken wifi.sta.{dis,}connect() with event mon enabled.
@@ -979,6 +979,7 @@ static int wifi_station_connect4lua( lua_State* L ) if(lua_isfunction(L, 1)){ lua_pushnumber(L, EVENT_STAMODE_CONNECTED); lua_pushvalue(L, 1); + lua_remove(L, 1); wifi_event_monitor_register(L); } #endif @@ -993,6 +994,7 @@ static int wifi_station_disconnect4lua( lua_State* L ) if(lua_isfunction(L, 1)){ lua_pushnumber(L, EVENT_STAMODE_DISCONNECTED); lua_pushvalue(L, 1); + lua_remove(L, 1); wifi_event_monitor_register(L); } #endif
Further fake mode cleanups.
@@ -49,11 +49,10 @@ okayFakeAddr = \case ADGala _ _ -> True ADIpv4 _ p (Ipv4 a) -> a == localhost -destSockAddr :: NetworkMode -> AmesDest -> SockAddr -destSockAddr m = \case - -- As mentioned previously, "localhost" is wrong. - ADGala _ g -> SockAddrInet (galaxyPort m g) localhost - ADIpv4 _ p a -> SockAddrInet (fromIntegral p) (unIpv4 a) +fakeSockAddr :: AmesDest -> SockAddr +fakeSockAddr = \case + ADGala _ g -> SockAddrInet (galaxyPort Fake g) localhost + ADIpv4 _ p a -> SockAddrInet (fromIntegral p) localhost barnEv :: KingId -> Ev barnEv inst = @@ -171,7 +170,7 @@ ames inst who isFake mPort enqueueEv = sendPacket AmesDrv{..} Fake dest bs = do when (okayFakeAddr dest) $ do - atomically $ writeTQueue aSendingQueue ((destSockAddr Fake dest), bs) + atomically $ writeTQueue aSendingQueue ((fakeSockAddr dest), bs) sendPacket AmesDrv{..} Real (ADGala wen galaxy) bs = do galaxies <- readIORef aGalaxies
Change Ruuvi sample rate for LIS2DH12
@@ -236,7 +236,7 @@ config_lis2dh12_sensor(void) memset(&cfg, 0, sizeof(cfg)); cfg.lc_s_mask = SENSOR_TYPE_ACCELEROMETER; - cfg.lc_rate = LIS2DH12_DATA_RATE_1HZ; + cfg.lc_rate = LIS2DH12_DATA_RATE_HN_1344HZ_L_5376HZ; cfg.lc_fs = LIS2DH12_FS_2G; rc = lis2dh12_config((struct lis2dh12 *)dev, &cfg);
fix kinematics
@@ -145,22 +145,40 @@ class OMLinkKinematics OMLinkKinematics(){}; ~OMLinkKinematics(){}; - void forward(Manipulator* manipulator, Name from, bool* error = false) + void forward(Manipulator* manipulator, bool* error = false) { + Pose pose_to_wolrd; + Pose link_relative_pose; + Eigen::Matrix3f rodrigues_rotation_matrix; + Pose result_pose; - Name component_name = from; - - method_.solveKinematicsSinglePoint(manipulator, component_name, error); + parent_pose = getComponentPoseToWorld(manipulator, getComponentParentName(manipulator, component_name, error), error); + link_relative_pose = getComponentRelativePoseToParent(manipulator, component_name, error); + rodrigues_rotation_matrix = math_.rodriguesRotationMatrix(getComponentJointAxis(manipulator, component_name, error), getComponentJointAngle(manipulator, component_name, error)); + result_pose.poosition = parent_pose.position + parent_pose.orientation * link_relative_pose.relative_position; + result_pose.orientation = parent_pose.orientation * link_relative_pose.relative_orientation * rodrigues_rotation_matrix; + setComponentPoseToWorld(manipulator, component_name, result_pose, error); + method_.solveKinematicsSinglePoint(manipulator, component_name, error); for(int i = 0; i > getComponentChildName(manipulator, component_name, error).size(); i++) { - method_.solveKinematicsSinglePoint(manipulator, getComponentChildName(manipulator, component_name, error).at(i), error); + forward(manipulator, getComponentChildName(manipulator, component_name, error).at(i), error); + } } + void forward(Manipulator* manipulator, Name from, bool* error = false) + { + Name component_name = from; + method_.solveKinematicsSinglePoint(manipulator, component_name, error); + for(int i = 0; i > getComponentChildName(manipulator, component_name, error).size(); i++) + { + forward(manipulator, getComponentChildName(manipulator, component_name, error).at(i), error); + } + // method_.setBasePose(omlink.base_, base_position, base_orientation); // method_.getBaseJointPose(omlink,0);
Add dependency for generated test cases
@@ -59,6 +59,7 @@ class BignumModRawFixQuasiReduction(bignum_common.ModOperationCommon, test_name = "mbedtls_mpi_mod_raw_fix_quasi_reduction" input_style = "fixed" arity = 1 + dependencies = ["MBEDTLS_TEST_HOOKS"] # Extend the default values with n < x < 2n input_values = bignum_common.ModOperationCommon.input_values + [
firdespm/autotest: adding callback function test (V pattern)
@@ -165,6 +165,48 @@ void autotest_firdespm_lowpass() liquid_autotest_verbose ? "autotest_firdespm_lowpass.m" : NULL); } +// user-defined callback function defining response and weights +int callback_firdespm_autotest(double _frequency, + void * _userdata, + double * _desired, + double * _weight) +{ + *_desired = _frequency < 0.39 ? exp(20*fabs(_frequency)) : 0; + *_weight = _frequency < 0.39 ? exp(-10*_frequency) : 1; + return 0; +} + +void autotest_firdespm_callback() +{ + // filter design parameters + unsigned int h_len = 81; // inverse sinc filter length + liquid_firdespm_btype btype = LIQUID_FIRDESPM_BANDPASS; + unsigned int num_bands = 2; + float bands[4] = {0.0, 0.35, 0.4, 0.5}; + + // design filter + float h[h_len]; + firdespm q = firdespm_create_callback(h_len,num_bands,bands,btype, + callback_firdespm_autotest,NULL); + firdespm_execute(q,h); + firdespm_destroy(q); + + // verify resulting spectrum + autotest_psd_s regions[] = { + {.fmin=-0.50, .fmax=-0.40, .pmin= 0, .pmax=-20, .test_lo=0, .test_hi=1}, + {.fmin=-0.36, .fmax=-0.30, .pmin=52, .pmax= 62, .test_lo=1, .test_hi=1}, + {.fmin=-0.30, .fmax=-0.20, .pmin=34, .pmax= 53, .test_lo=1, .test_hi=1}, + {.fmin=-0.20, .fmax=-0.10, .pmin=15, .pmax= 36, .test_lo=1, .test_hi=1}, + {.fmin=-0.10, .fmax=+0.10, .pmin= 0, .pmax= 19, .test_lo=1, .test_hi=1}, + {.fmin= 0.10, .fmax= 0.20, .pmin=15, .pmax= 36, .test_lo=1, .test_hi=1}, + {.fmin= 0.20, .fmax= 0.30, .pmin=34, .pmax= 53, .test_lo=1, .test_hi=1}, + {.fmin= 0.30, .fmax= 0.36, .pmin=52, .pmax= 62, .test_lo=1, .test_hi=1}, + {.fmin= 0.40, .fmax= 0.50, .pmin= 0, .pmax=-20, .test_lo=0, .test_hi=1}, + }; + liquid_autotest_validate_psd_signalf(h, h_len, regions, 9, + liquid_autotest_verbose ? "autotest_firdespm_callback.m" : NULL); +} + void autotest_firdespm_config() { #if LIQUID_STRICT_EXIT
webterm: add glob url to desk.docket
:~ title+'Web Terminal' info+'A web interface for dill, through herm.' color+0xff.ffff - glob-http+'https://bootstrap.urbit.org/glob-XX.glob' + glob-http+'https://bootstrap.urbit.org/glob-0v4.8ui32.ui10d.t0v4d.n9g1s.1ftua.glob' base+'webterm' version+[0 0 1] website+'https://tlon.io'
OpenCanopy: Fix double-click regression introduced in 0.6.5 closes acidanthera/bugtracker#1386
@@ -531,7 +531,8 @@ InternalBootPickerEntryPtrEvent ( if (SameIter) { SameIter = FALSE; } else { - Context->BootEntry = Entry->Context; + Context->ReadyToBoot = TRUE; + ASSERT (Context->BootEntry == Entry->Context); } } //
Enable show smbios command on uefi release build Show smbios command uses function getdimmsmbiostable. In this function there were sections compiled only for debug build. In this change these compile time checks for debug build are removed so that the command provides similar output for release build.
@@ -3201,17 +3201,14 @@ GetDimmSmbiosTable( { EFI_STATUS ReturnCode = EFI_INVALID_PARAMETER; #ifndef OS_BUILD -#ifndef MDEPKG_NDEBUG + SMBIOS_STRUCTURE_POINTER DmiPhysicalDev; SMBIOS_STRUCTURE_POINTER DmiDeviceMappedAddr; SMBIOS_VERSION SmbiosVersion; DIMM *pDimm = NULL; -#endif + NVDIMM_ENTRY(); -#ifdef MDEPKG_NDEBUG - ReturnCode = EFI_UNSUPPORTED; -#else ZeroMem(&DmiPhysicalDev, sizeof(DmiPhysicalDev)); ZeroMem(&DmiDeviceMappedAddr, sizeof(DmiDeviceMappedAddr)); ZeroMem(&SmbiosVersion, sizeof(SmbiosVersion)); @@ -3262,7 +3259,6 @@ GetDimmSmbiosTable( } ReturnCode = EFI_SUCCESS; Finish: -#endif NVDIMM_EXIT_I64(ReturnCode); #endif return ReturnCode;
[bsp/stm32] fix the bug of 'can' being stucked when short cricuit the canH and canL, change some wrong annotations
@@ -343,7 +343,7 @@ static rt_err_t _can_control(struct rt_can_device *can, int cmd, void *arg) } /** * ID | CAN_FxR1[31:24] | CAN_FxR1[23:16] | CAN_FxR1[15:8] | CAN_FxR1[7:0] | - * MASK | CAN_FxR2[31:24] | CAN_FxR1[23:16] | CAN_FxR1[15:8] | CAN_FxR1[7:0] | + * MASK | CAN_FxR2[31:24] | CAN_FxR2[23:16] | CAN_FxR2[15:8] | CAN_FxR2[7:0] | * STD ID | STID[10:3] | STDID[2:0] |<- 21bit ->| * EXT ID | EXTID[28:21] | EXTID[20:13] | EXTID[12:5] | EXTID[4:0] IDE RTR 0| * @note the 32bit STD ID must << 21 to fill CAN_FxR1[31:21] and EXT ID must << 3, @@ -482,7 +482,7 @@ static int _can_sendmsg(struct rt_can_device *can, const void *buf, rt_uint32_t if (HAL_IS_BIT_SET(hcan->Instance->TSR, CAN_TSR_TME0) != SET) { /* Change CAN state */ - hcan->State = HAL_CAN_STATE_ERROR; + // hcan->State = HAL_CAN_STATE_ERROR; /* Return function status */ return -RT_ERROR; } @@ -491,7 +491,7 @@ static int _can_sendmsg(struct rt_can_device *can, const void *buf, rt_uint32_t if (HAL_IS_BIT_SET(hcan->Instance->TSR, CAN_TSR_TME1) != SET) { /* Change CAN state */ - hcan->State = HAL_CAN_STATE_ERROR; + // hcan->State = HAL_CAN_STATE_ERROR; /* Return function status */ return -RT_ERROR; } @@ -500,7 +500,7 @@ static int _can_sendmsg(struct rt_can_device *can, const void *buf, rt_uint32_t if (HAL_IS_BIT_SET(hcan->Instance->TSR, CAN_TSR_TME2) != SET) { /* Change CAN state */ - hcan->State = HAL_CAN_STATE_ERROR; + // hcan->State = HAL_CAN_STATE_ERROR; /* Return function status */ return -RT_ERROR; } @@ -732,6 +732,10 @@ void CAN1_TX_IRQHandler(void) /* Write 0 to Clear transmission status flag RQCPx */ SET_BIT(hcan->Instance->TSR, CAN_TSR_RQCP2); } + else + { + rt_hw_can_isr(&drv_can1.device, RT_CAN_EVENT_TX_FAIL | 0 << 8); + } rt_interrupt_leave(); } @@ -852,6 +856,10 @@ void CAN2_TX_IRQHandler(void) /* Write 0 to Clear transmission status flag RQCPx */ SET_BIT(hcan->Instance->TSR, CAN_TSR_RQCP2); } + else + { + rt_hw_can_isr(&drv_can2.device, RT_CAN_EVENT_TX_FAIL | 0 << 8); + } rt_interrupt_leave(); }
optimize _maketable
@@ -101,12 +101,6 @@ function serialize._maketable(object, opt, level, path, reftab) isarr = false end - -- make indent - local indent = "" - if opt.indent then - indent = string.rep(opt.indent, level) - end - -- make body local bodystrs = {} if isarr then @@ -114,38 +108,40 @@ function serialize._maketable(object, opt, level, path, reftab) bodystrs[i] = serialized[i] or "nil" end else - local con = opt.indent and " = " or "=" + local dformat = opt.indent and "%s = %s" or "%s=%s" + local sformat = opt.indent and "[%q] = %s" or "[%q]=%s" + local nformat = opt.indent and "[%s] = %s" or "[%s]=%s" for k, v in pairs(serialized) do + local format -- serialize key if type(k) == "string" then if keywords:has(k) or not k:match("^[%a_][%w_]*$") then - k = string.format("[%q]", k) + format = sformat + else + format = dformat end else -- type(k) == "number" - local nval, err = serialize._makedefault(k, opt) - if err ~= nil then - return nil, err - end - k = string.format("[%s]", nval) + format = nformat end -- concat k = v - table.insert(bodystrs, k .. con .. v) + table.insert(bodystrs, string.format(format, k, v)) end end - -- make head - local headstr = opt.indent and ("{\n" .. indent .. opt.indent) or "{" - - -- make tail - local tailstr + -- make head and tail + local headstr, bodysep, tailstr if opt.indent then - tailstr = "\n" .. indent .. "}" + local indent = "\n" .. string.rep(opt.indent, level) + tailstr = indent .. "}" + indent = indent .. opt.indent + headstr = "{" .. indent + bodysep = "," .. indent else - tailstr = "}" + headstr, bodysep, tailstr = "{", ",", "}" end -- concat together - return headstr .. table.concat(bodystrs, opt.indent and (",\n" .. indent .. opt.indent) or ",") .. tailstr + return headstr .. table.concat(bodystrs, bodysep) .. tailstr end function serialize._makefunction(func, opt)
Don't install npm It is no longer necessary since we were only using it for the node-based coap client
@@ -29,7 +29,6 @@ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E03280 mosquitto \ mosquitto-clients \ net-tools \ - npm \ openjdk-8-jdk \ python-pip \ python-serial \
Simplify tcheckExpr
@@ -373,23 +373,13 @@ func (q *checker) tcheckExpr(n *a.Expr, depth uint32) error { switch op := n.Operator(); { case op.IsXUnaryOp(): - if err := q.tcheckExprUnaryOp(n, depth); err != nil { - return err - } + return q.tcheckExprUnaryOp(n, depth) case op.IsXBinaryOp(): - if err := q.tcheckExprBinaryOp(n, depth); err != nil { - return err - } + return q.tcheckExprBinaryOp(n, depth) case op.IsXAssociativeOp(): - if err := q.tcheckExprAssociativeOp(n, depth); err != nil { - return err + return q.tcheckExprAssociativeOp(n, depth) } - default: - if err := q.tcheckExprOther(n, depth); err != nil { - return err - } - } - return nil + return q.tcheckExprOther(n, depth) } func (q *checker) tcheckExprOther(n *a.Expr, depth uint32) error {
Add RtlGetFileMUIPath
@@ -4775,6 +4775,19 @@ RtlFormatMessageEx( _Out_opt_ PPARSE_MESSAGE_CONTEXT ParseContext ); +NTSYSAPI +NTSTATUS +NTAPI +RtlGetFileMUIPath( + _In_ ULONG Flags, + _In_ PCWSTR FilePath, + _Inout_opt_ PWSTR Language, + _Inout_ PULONG LanguageLength, + _Out_opt_ PWSTR FileMUIPath, + _Inout_ PULONG FileMUIPathLength, + _Inout_ PULONGLONG Enumerator + ); + // Errors NTSYSAPI
mercator support
@@ -158,7 +158,21 @@ static int unpack_string(grib_accessor* a, char* v, size_t* len) if (err) return err; if (strcmp(grid_type, "mercator") == 0) { - sprintf(v, "%s", "mercator proj string"); + double earthMajorAxisInMetres = 0, earthMinorAxisInMetres = 0, radius = 0, LaDInDegrees = 0; + if (grib_is_earth_oblate(h)) { + if ((err = grib_get_double_internal(h, "earthMinorAxisInMetres", &earthMinorAxisInMetres)) != GRIB_SUCCESS) return err; + if ((err = grib_get_double_internal(h, "earthMajorAxisInMetres", &earthMajorAxisInMetres)) != GRIB_SUCCESS) return err; + } else { + if ((err = grib_get_double_internal(h, "radius", &radius)) != GRIB_SUCCESS) return err; + earthMinorAxisInMetres = earthMajorAxisInMetres = radius; + } + if ((err = grib_get_double_internal(h, "LaDInDegrees", &LaDInDegrees)) != GRIB_SUCCESS) + return err; + sprintf(v,"+proj=merc +lat_ts=%lf +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +a=%lf +b=%lf", + LaDInDegrees, earthMajorAxisInMetres, earthMinorAxisInMetres); + } + else if (strcmp(grid_type, "polar_stereographic") == 0) { + } else { grib_context_log(a->context, GRIB_LOG_ERROR, "proj string for grid '%s' not implemented", grid_type);
[rtmapreduce] yolint: fix migrations.
@@ -441,8 +441,6 @@ migrations: - a.yandex-team.ru/quasar/iot/steelix/server_test - a.yandex-team.ru/rtc/janus/janus - a.yandex-team.ru/rtc/janus/janus_test - - a.yandex-team.ru/rtmapreduce/tools/sli - - a.yandex-team.ru/rtmapreduce/tools/sli_test - a.yandex-team.ru/transfer_manager/go/cmd/cdc_server - a.yandex-team.ru/transfer_manager/go/cmd/pg_to_ch - a.yandex-team.ru/transfer_manager/go/internal/config
Skip key-value pairs in compresed KTX files
@@ -1079,12 +1079,13 @@ bool load_ktx_compressed_image( size_t actual = fread(&hdr, 1, sizeof(hdr), f); if (actual != sizeof(hdr)) { - printf("Failed to read header of KTX file %s\n", filename); + printf("Failed to read header from %s\n", filename); fclose(f); return true; } - if (memcmp(hdr.magic, ktx_magic, 12) != 0 || (hdr.endianness != 0x04030201 && hdr.endianness != 0x01020304)) + if (memcmp(hdr.magic, ktx_magic, 12) != 0 || + (hdr.endianness != 0x04030201 && hdr.endianness != 0x01020304)) { printf("File %s does not have a valid KTX header\n", filename); fclose(f); @@ -1098,7 +1099,8 @@ bool load_ktx_compressed_image( ktx_header_switch_endianness(&hdr); } - if (hdr.gl_type != 0 || hdr.gl_format != 0 || hdr.gl_type_size != 1 || hdr.gl_base_internal_format != GL_RGBA) + if (hdr.gl_type != 0 || hdr.gl_format != 0 || hdr.gl_type_size != 1 || + hdr.gl_base_internal_format != GL_RGBA) { printf("File %s is not a compressed ASTC file\n", filename); fclose(f); @@ -1113,29 +1115,41 @@ bool load_ktx_compressed_image( return true; } + // Skip over any key-value pairs + int seekerr; + seekerr = fseek(f, hdr.bytes_of_key_value_data, SEEK_CUR); + if (seekerr) + { + printf("Failed to skip key-value pairs in %s\n", filename); + fclose(f); + } + + // Read the length of the data and endianess convert unsigned int data_len; actual = fread(&data_len, 1, sizeof(data_len), f); if (actual != sizeof(data_len)) { - printf("Failed to read data size of KTX file %s\n", filename); + printf("Failed to read mip 0 size from %s\n", filename); fclose(f); return true; } + if (switch_endianness) + { + data_len = u32_byterev(data_len); + } + + // Read the data unsigned char* data = new unsigned char[data_len]; actual = fread(data, 1, data_len, f); if (actual != data_len) { - printf("Failed to data from KTX file %s\n", filename); + printf("Failed to read mip 0 data from %s\n", filename); fclose(f); + delete[] data; return true; } - if (switch_endianness) - { - data_len = u32_byterev(data_len); - } - img.block_x = fmt->x; img.block_y = fmt->y; img.block_z = fmt->z == 0 ? 1 : fmt->z;
TRIVIAL fix a typo in usage hint
@@ -48,7 +48,7 @@ def get_compiler_info(compiler): def main(): if len(sys.argv) != 4: - print >>sys.stderr, "Usage: svn_version_gen.py <output file> <CXX compiler> <CXX flags>" + print >>sys.stderr, "Usage: build_info_gen.py <output file> <CXX compiler> <CXX flags>" sys.exit(1) cxx_compiler = sys.argv[2] cxx_flags = sys.argv[3]
modified self.max_entries to be available from all the MAP types This commit introduces the self.max_entries attribute both into Queue/Stack maps and to all those whwqo extend TableBase
@@ -314,6 +314,8 @@ class TableBase(MutableMapping): self.flags = lib.bpf_table_flags_id(self.bpf.module, self.map_id) self._cbs = {} self._name = name + self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module, + self.map_id)) def get_fd(self): return self.map_fd @@ -670,8 +672,6 @@ class TableBase(MutableMapping): class HashTable(TableBase): def __init__(self, *args, **kwargs): super(HashTable, self).__init__(*args, **kwargs) - self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module, - self.map_id)) def __len__(self): i = 0 @@ -685,8 +685,6 @@ class LruHash(HashTable): class ArrayBase(TableBase): def __init__(self, *args, **kwargs): super(ArrayBase, self).__init__(*args, **kwargs) - self.max_entries = int(lib.bpf_table_max_entries_id(self.bpf.module, - self.map_id)) def _normalize_key(self, key): if isinstance(key, int):
fix not writing frame
@@ -280,10 +280,10 @@ def draw_frame(shared, lock, beatmap, skin, skin_path, replay_event, resultinfo, print("setup done") while frame_info.osr_index < end_index: # len(replay_event) - 3: - render_draw(beatmap, component, cursor_event, frame_info, img, np_img, pbuffer, + status = render_draw(beatmap, component, cursor_event, frame_info, img, np_img, pbuffer, preempt_followpoint, replay_event, start_index, time_preempt, updater) - if img.size[0] != 1: + if status: lock.value = 1 while lock.value == 1:
Don't call `h2o_http2_conn_request_write` from `emit_writereq_of_openref` This fixes an issue seen when 'server-timing: enforced' is set in the configuration: proceed would set the write callback, and we'd hit the assert in `do_emit_writereq()` checking that `buf_in_flight` is `NULL`: ``` h2o: lib/http2/connection.c:1229: do_emit_writereq: Assertion `conn->_write.buf_in_flight == NULL' failed. ```
@@ -1216,7 +1216,6 @@ static int emit_writereq_of_openref(h2o_http2_scheduler_openref_t *ref, int *sti } h2o_hpack_flatten_trailers(&conn->_write.buf, &conn->_output_header_table, stream->stream_id, conn->peer_settings.max_frame_size, trailers, num_trailers); - h2o_http2_conn_request_write(conn); } h2o_linklist_insert(&conn->_write.streams_to_proceed, &stream->_refs.link); }
fix several bugs in the source routing table
@@ -115,9 +115,11 @@ uip_sr_expire_parent(void *graph, const uip_ipaddr_t *child, const uip_ipaddr_t uip_sr_node_t *l = uip_sr_get_node(graph, child); /* Check if parent matches */ if(l != NULL && node_matches_address(graph, l->parent, parent)) { + if(l->lifetime > UIP_SR_REMOVAL_DELAY) { l->lifetime = UIP_SR_REMOVAL_DELAY; } } +} /*---------------------------------------------------------------------------*/ uip_sr_node_t * uip_sr_update_node(void *graph, const uip_ipaddr_t *child, const uip_ipaddr_t *parent, uint32_t lifetime) @@ -213,11 +215,15 @@ uip_sr_periodic(unsigned seconds) next = list_item_next(l); if(l->lifetime == 0) { uip_sr_node_t *l2; + int can_be_removed = 1; for(l2 = list_head(nodelist); l2 != NULL; l2 = list_item_next(l2)) { if(l2->parent == l) { + can_be_removed = 0; break; } } + if(can_be_removed) { + /* No child found, deallocate node */ if(LOG_INFO_ENABLED) { uip_ipaddr_t node_addr; NETSTACK_ROUTING.get_sr_node_ipaddr(&node_addr, l); @@ -225,10 +231,10 @@ uip_sr_periodic(unsigned seconds) LOG_INFO_6ADDR(&node_addr); LOG_INFO_("\n"); } - /* No child found, deallocate node */ list_remove(nodelist, l); memb_free(&nodememb, l); num_nodes--; + } } else if(l->lifetime != UIP_SR_INFINITE_LIFETIME) { l->lifetime = l->lifetime > seconds ? l->lifetime - seconds : 0; }
fix: use vc->udp_service.ssrc instead of vc->ssrc
@@ -426,7 +426,7 @@ discord_send_speaking(struct discord_voice *vc, enum discord_voice_speaking_flag "}", &flag, &delay, - &vc->ssrc); + &vc->udp_service.ssrc); ASSERT_S(ret < sizeof(payload), "Out of bounds write attempt"); log_info("Sending VOICE_SPEAKING(%d bytes)", ret);
fix: fix test_002InitWallet_0007SetNodeUrlFailureErrorNodeUrlFormat issue
@@ -724,6 +724,8 @@ START_TEST(test_002InitWallet_0007SetNodeUrlFailureErrorNodeUrlFormat) BoatEthWallet *wallet_ptr = BoatMalloc(sizeof(BoatEthWallet)); BoatEthWalletConfig wallet; + ck_assert_ptr_ne(wallet_ptr, NULL); + /* 1. execute unit test */ strncpy(wallet.node_url_str, "abcd", strlen("abcd")); wallet_ptr->network_info.node_url_ptr = NULL;
Version String Macro
@@ -13,6 +13,15 @@ Feel free to copy, use and enjoy according to the license provided. #define FACIL_VERSION_MINOR 7 #define FACIL_VERSION_PATCH 0 +/* Automatically convert version data to a string constant*/ +#define FACIL_VERSION_STR_FROM_MACRO_STEP2(major, minor, patch) \ +#major "." #minor "." #patch +#define FACIL_VERSION_STR_FROM_MACRO_STEP1(major, minor, patch) \ + FACIL_VERSION_STR_FROM_MACRO_STEP2(major, minor, patch) +#define FACIL_VERSION_STRING \ + FACIL_VERSION_STR_FROM_MACRO_STEP1(FACIL_VERSION_MAJOR, FACIL_VERSION_MINOR, \ + FACIL_VERSION_PATCH) + #ifndef FACIL_PRINT_STATE /** * When FACIL_PRINT_STATE is set to 1, facil.io will print out common messages
Skyrim: Remove CONFIG_SYSTEM_UNLOCKED Skyrim is at a stage where this CONFIG can be removed. BRANCH=None TEST=zmake build skyrim; boot up on a D4 skyrim
# Skyrim reference-board-specific Kconfig settings. CONFIG_BOARD_SKYRIM=y -# TODO(b/215404321): Remove later in board development +# CBI WP pin present CONFIG_PLATFORM_EC_EEPROM_CBI_WP=y -CONFIG_PLATFORM_EC_SYSTEM_UNLOCKED=y # LED CONFIG_PLATFORM_EC_LED_DT=y
Log more info on ZMK config dir usage.
@@ -45,9 +45,11 @@ set(CACHED_ZMK_CONFIG ${ZMK_CONFIG} CACHE STRING "Selected user ZMK config") if (ZMK_CONFIG) if(EXISTS "${ZMK_CONFIG}/boards") + message(STATUS "Adding ZMK config directory as board root: ${ZMK_CONFIG}") list(APPEND BOARD_ROOT "${ZMK_CONFIG}") endif() if(EXISTS "${ZMK_CONFIG}/dts") + message(STATUS "Adding ZMK config directory as DTS root: ${ZMK_CONFIG}") list(APPEND DTS_ROOT "${ZMK_CONFIG}") endif() endif()
Minor comment improvements for instrumentation.h Remove a duplicated word. Add "of" or "# of" in a couple places for clarity and consistency. Start comments with a lower case letter as we do elsewhere in this file. Rafia Sabih
@@ -48,20 +48,20 @@ typedef struct Instrumentation bool need_bufusage; /* true if we need buffer usage data */ /* Info about current plan cycle: */ bool running; /* true if we've completed first tuple */ - instr_time starttime; /* Start time of current iteration of node */ - instr_time counter; /* Accumulated runtime for this node */ - double firsttuple; /* Time for first tuple of this cycle */ - double tuplecount; /* Tuples emitted so far this cycle */ - BufferUsage bufusage_start; /* Buffer usage at start */ + instr_time starttime; /* start time of current iteration of node */ + instr_time counter; /* accumulated runtime for this node */ + double firsttuple; /* time for first tuple of this cycle */ + double tuplecount; /* # of tuples emitted so far this cycle */ + BufferUsage bufusage_start; /* buffer usage at start */ /* Accumulated statistics across all completed cycles: */ - double startup; /* Total startup time (in seconds) */ - double total; /* Total total time (in seconds) */ - double ntuples; /* Total tuples produced */ - double ntuples2; /* Secondary node-specific tuple counter */ + double startup; /* total startup time (in seconds) */ + double total; /* total time (in seconds) */ + double ntuples; /* total tuples produced */ + double ntuples2; /* secondary node-specific tuple counter */ double nloops; /* # of run cycles for this node */ - double nfiltered1; /* # tuples removed by scanqual or joinqual */ - double nfiltered2; /* # tuples removed by "other" quals */ - BufferUsage bufusage; /* Total buffer usage */ + double nfiltered1; /* # of tuples removed by scanqual or joinqual */ + double nfiltered2; /* # of tuples removed by "other" quals */ + BufferUsage bufusage; /* total buffer usage */ } Instrumentation; typedef struct WorkerInstrumentation
board/dood/board.h: Format with clang-format BRANCH=none TEST=none
#define CONFIG_ACCELGYRO_BMI160_INT_EVENT \ TASK_EVENT_MOTION_SENSOR_INTERRUPT(BASE_ACCEL) -#define CONFIG_SYNC_INT_EVENT \ - TASK_EVENT_MOTION_SENSOR_INTERRUPT(VSYNC) +#define CONFIG_SYNC_INT_EVENT TASK_EVENT_MOTION_SENSOR_INTERRUPT(VSYNC) #define CONFIG_LID_ANGLE #define CONFIG_LID_ANGLE_UPDATE /* prevent pd reset when battery soc under 2% */ #define CONFIG_USB_PD_RESET_MIN_BATT_SOC 2 - #ifndef __ASSEMBLER__ /* support factory keyboard test */ @@ -85,12 +83,7 @@ enum temp_sensor_id { }; /* Motion sensors */ -enum sensor_id { - LID_ACCEL, - BASE_ACCEL, - BASE_GYRO, - SENSOR_COUNT -}; +enum sensor_id { LID_ACCEL, BASE_ACCEL, BASE_GYRO, SENSOR_COUNT }; /* List of possible batteries */ enum battery_type {
more AtkEvent types
namespace FFXIVClientStructs.FFXIV.Component.GUI { - // there's 70+ of these + // max known: 79 + // seems to have generic events followed by component-specific events public enum AtkEventType { MouseDown = 3, MouseUp = 4, MouseMove = 5, - MouseOver = 6, // used for changing the cursor state when you mouseover stuff + MouseOver = 6, MouseOut = 7, + MouseClick = 9, InputReceived = 12, - StateChanged = 25, // sent by AtkButtonComponent (and others?) when button state changes + // AtkComponentButt on & children + ButtonPress = 23, // sent on MouseDown on button + ButtonRelease = 24, // sent on MouseUp and MouseOut + ButtonClick = 25, // sent on MouseUp and MouseClick on button + // AtkComponentDragDrop + DragDropRollOver = 52, + DragDropRollOut = 53, + DragDropUnk54 = 54, + DragDropUnk55 = 55, + // AtkComponentIconText + IconTextRollOver = 56, + IconTextRollOut = 57, + IconTextClick = 58 } [StructLayout(LayoutKind.Explicit, Size=0x30)]
change "unsigned char list" to "byte list"
@@ -15,7 +15,7 @@ Blockly.Blocks['lists_create_with'] = { init: function() { this.setColour(Blockly.Blocks.lists.HUE); this.appendDummyInput("") - .appendField(new Blockly.FieldDropdown([[Blockly.LANG_MATH_INT, 'long'],[Blockly.LANG_MATH_FLOAT, 'float'],[Blockly.LANG_MATH_CHAR, 'char'],[Blockly.LANG_MATH_BYTE, 'unsigned char']]), "TYPE") + .appendField(new Blockly.FieldDropdown([[Blockly.LANG_MATH_INT, 'long'],[Blockly.LANG_MATH_FLOAT, 'float'],[Blockly.LANG_MATH_CHAR, 'char'],[Blockly.LANG_MATH_BYTE, 'byte']]), "TYPE") .appendField(' ') .appendField(new Blockly.FieldTextInput('mylist'), 'VAR') .appendField('[') @@ -142,7 +142,7 @@ Blockly.Blocks['lists_create_with_text'] = { init: function() { this.setColour(Blockly.Blocks.lists.HUE); this.appendDummyInput("") - .appendField(new Blockly.FieldDropdown([[Blockly.LANG_MATH_INT, 'long'],[Blockly.LANG_MATH_FLOAT, 'float'],[Blockly.LANG_MATH_CHAR, 'char'], [Blockly.LANG_MATH_BYTE, 'unsigned char']]), "TYPE") + .appendField(new Blockly.FieldDropdown([[Blockly.LANG_MATH_INT, 'long'],[Blockly.LANG_MATH_FLOAT, 'float'],[Blockly.LANG_MATH_CHAR, 'char'], [Blockly.LANG_MATH_BYTE, 'byte']]), "TYPE") .appendField(' ') .appendField(new Blockly.FieldTextInput('mylist'), 'VAR') .appendField('[') @@ -175,7 +175,7 @@ Blockly.Blocks['lists_create_with2'] = { init: function() { this.setColour(Blockly.Blocks.lists.HUE); this.appendDummyInput("") - .appendField(new Blockly.FieldDropdown([[Blockly.LANG_MATH_INT, 'long'],[Blockly.LANG_MATH_FLOAT, 'float'],[Blockly.LANG_MATH_CHAR, 'char'], [Blockly.LANG_MATH_BYTE, 'unsigned char']]), "TYPE") + .appendField(new Blockly.FieldDropdown([[Blockly.LANG_MATH_INT, 'long'],[Blockly.LANG_MATH_FLOAT, 'float'],[Blockly.LANG_MATH_CHAR, 'char'], [Blockly.LANG_MATH_BYTE, 'byte']]), "TYPE") .appendField(' ') .appendField(new Blockly.FieldTextInput('mylist'), 'VAR') .appendField('[') @@ -302,7 +302,7 @@ Blockly.Blocks['lists_create_with_text2'] = { init: function() { this.setColour(Blockly.Blocks.lists.HUE); this.appendDummyInput("") - .appendField(new Blockly.FieldDropdown([[Blockly.LANG_MATH_INT, 'long'],[Blockly.LANG_MATH_FLOAT, 'float'],[Blockly.LANG_MATH_CHAR, 'char'],[Blockly.LANG_MATH_BYTE, 'unsigned char']]), "TYPE") + .appendField(new Blockly.FieldDropdown([[Blockly.LANG_MATH_INT, 'long'],[Blockly.LANG_MATH_FLOAT, 'float'],[Blockly.LANG_MATH_CHAR, 'char'],[Blockly.LANG_MATH_BYTE, 'byte']]), "TYPE") .appendField(' ') .appendField(new Blockly.FieldTextInput('mylist'), 'VAR') .appendField('[')
Flash algo fix sectors for stm32f4xx
@@ -40,6 +40,9 @@ static const sector_info_t sectors_info[] = { { 0x08000000, 0x00004000 }, // 4 x 16KB { 0x08010000, 0x00010000 }, // 1 x 64KB { 0x08020000, 0x00020000 }, // 7 x 128KB + { 0x08100000, 0x00004000 }, + { 0x08110000, 0x00010000 }, + { 0x08120000, 0x00020000 }, }; static const program_target_t flash = {
Fix std_stream_create initialiser
@@ -59,6 +59,7 @@ char std_stream_popchar(std_stream_t* stream) { std_stream_t* std_stream_create() { std_stream_t* st = kmalloc(sizeof(std_stream_t)); memset(st, 0, sizeof(std_stream_t)); + st->buf = calloc(1, sizeof(circular_buffer)); cb_init(st->buf, 256, sizeof(char)); return st; }
Improve adding sensors and only allow doing so in active find sensor state
@@ -1115,6 +1115,11 @@ void DeRestPluginPrivate::gpDataIndication(const deCONZ::GpDataIndication &ind) if (!sensor) { + if (findSensorsState != FindSensorsActive) + { + return; + } + // create new sensor Sensor sensorNode; @@ -2240,6 +2245,11 @@ void DeRestPluginPrivate::addSensorNode(const deCONZ::Node *node) return; } + if (findSensorsState != FindSensorsActive) + { + return; + } + { // check existing sensors std::vector<Sensor>::iterator i = sensors.begin(); std::vector<Sensor>::iterator end = sensors.end(); @@ -6519,10 +6529,9 @@ void DeRestPluginPrivate::handleCommissioningClusterIndication(TaskItem &task, c */ void DeRestPluginPrivate::handleDeviceAnnceIndication(const deCONZ::ApsDataIndication &ind) { - std::vector<LightNode>::iterator i = nodes.begin(); // TODO + std::vector<LightNode>::iterator i = nodes.begin(); std::vector<LightNode>::iterator end = nodes.end(); - // TODO use actual zdp payload for ext and nwk address quint16 nwk; quint64 ext; quint8 macCapabilities; @@ -6617,35 +6626,40 @@ void DeRestPluginPrivate::handleDeviceAnnceIndication(const deCONZ::ApsDataIndic } } + int found = 0; std::vector<Sensor>::iterator si = sensors.begin(); std::vector<Sensor>::iterator send = sensors.end(); for (; si != send; ++si) { - if ((ind.srcAddress().hasExt() && si->address().ext() == ind.srcAddress().ext()) || - (ind.srcAddress().hasNwk() && si->address().nwk() == ind.srcAddress().nwk())) + if ((si->address().ext() == ext) || (si->address().nwk() == nwk)) { + found++; DBG_Printf(DBG_INFO, "DeviceAnnce of SensorNode: %s\n", qPrintable(si->address().toStringExt())); checkSensorNodeReachable(&(*si)); - /* - if (si->deletedState() == Sensor::StateDeleted) + } + } + + if (findSensorsState == FindSensorsActive) + { + if (!found && apsCtrl) { - si->setIsAvailable(true); - si->setNextReadTime(QTime::currentTime().addMSecs(ReadAttributesLongDelay)); - si->enableRead(READ_BINDING_TABLE | READ_GROUP_IDENTIFIERS | READ_MODEL_ID | READ_SWBUILD_ID); - si->setLastRead(idleTotalCounter); - si->setDeletedState(Sensor::StateNormal); + int i = 0; + const deCONZ::Node *node; - updateEtag(si->etag); - updateEtag(gwConfigEtag); - queSaveDb(DB_SENSORS, DB_SHORT_SAVE_DELAY); + // try to add sensor nodes even if they existed in deCONZ bevor and therefore + // no node added event will be triggert in this phase + while (apsCtrl->getNode(i, &node) == 0) + { + if (ext == node->address().ext()) + { + addSensorNode(node); + break; } - */ + i++; } } - if (findSensorsState == FindSensorsActive) - { deCONZ::ZclFrame zclFrame; // dummy handleIndicationFindSensors(ind, zclFrame); }
vatmeta: fixing potential null ref
@@ -8,8 +8,8 @@ export function VatMeta(props: { vat: Vat }) { const { vat } = props; const { desk, arak, cass, hash } = vat; - const { desk: foreignDesk, ship, next } = arak.rail!; - const pluralUpdates = next.length !== 1; + const { desk: foreignDesk, ship, next } = arak.rail || {}; + const pluralUpdates = next?.length !== 1; return ( <div className="mt-5 sm:mt-8 space-y-5 sm:space-y-8"> <Attribute title="Developer Desk" attr="desk"> @@ -24,7 +24,7 @@ export function VatMeta(props: { vat: Vat }) { <Attribute title="Installed into" attr="local-desk"> %{desk} </Attribute> - {next.length > 0 ? ( + {next && next.length > 0 ? ( <Attribute attr="next" title="Pending Updates"> {next.length} update{pluralUpdates ? 's are' : ' is'} pending a System Update </Attribute>
[core] cold func http_response_omit_header()
#include <string.h> #include <time.h> +__attribute_cold__ +static int http_response_omit_header(connection *con, const data_string * const ds) { + const size_t klen = buffer_string_length(ds->key); + if (klen == sizeof("X-Sendfile")-1 + && buffer_eq_icase_ssn(ds->key->ptr,CONST_STR_LEN("X-Sendfile"))) + return 1; + if (klen >= sizeof("X-LIGHTTPD-")-1 + && buffer_eq_icase_ssn(ds->key->ptr,CONST_STR_LEN("X-LIGHTTPD-"))) { + if (klen == sizeof("X-LIGHTTPD-KBytes-per-second")-1 + && buffer_eq_icase_ssn(ds->key->ptr+sizeof("X-LIGHTTPD-")-1, + CONST_STR_LEN("KBytes-per-second"))) { + /* "X-LIGHTTPD-KBytes-per-second" */ + long limit = strtol(ds->value->ptr, NULL, 10); + if (limit > 0 + && (limit < con->conf.kbytes_per_second + || 0 == con->conf.kbytes_per_second)) { + if (limit > USHRT_MAX) limit= USHRT_MAX; + con->conf.kbytes_per_second = limit; + } + } + return 1; + } + return 0; +} + int http_response_write_header(server *srv, connection *con) { buffer * const b = chunkqueue_prepend_buffer_open(con->write_queue); @@ -60,23 +85,10 @@ int http_response_write_header(server *srv, connection *con) { for (size_t i = 0; i < con->response.headers->used; ++i) { const data_string * const ds = (data_string *)con->response.headers->data[i]; - if (buffer_string_is_empty(ds->value) || buffer_string_is_empty(ds->key)) continue; - if ((ds->key->ptr[0] & 0xdf) == 'X') { - if (0 == strncasecmp(ds->key->ptr, CONST_STR_LEN("X-Sendfile"))) continue; - if (0 == strncasecmp(ds->key->ptr, CONST_STR_LEN("X-LIGHTTPD-"))) { - if (0 == strncasecmp(ds->key->ptr+sizeof("X-LIGHTTPD-")-1, CONST_STR_LEN("KBytes-per-second"))) { - /* "X-LIGHTTPD-KBytes-per-second" */ - long limit = strtol(ds->value->ptr, NULL, 10); - if (limit > 0 - && (limit < con->conf.kbytes_per_second - || 0 == con->conf.kbytes_per_second)) { - if (limit > USHRT_MAX) limit= USHRT_MAX; - con->conf.kbytes_per_second = limit; - } - } + if (buffer_string_is_empty(ds->value)) continue; + if (buffer_string_is_empty(ds->key)) continue; + if ((ds->key->ptr[0] & 0xdf)=='X' && http_response_omit_header(con, ds)) continue; - } - } buffer_append_string_len(b, CONST_STR_LEN("\r\n")); buffer_append_string_buffer(b, ds->key);
store: Avoid spurious error from decoding at EOF Fixes
@@ -518,6 +518,7 @@ static int file_load_file(struct file_ctx_st *ctx, OSSL_PASSPHRASE_CALLBACK *pw_cb, void *pw_cbarg) { struct file_load_data_st data; + int ret, err; /* Setup the decoders (one time shot per session */ @@ -533,7 +534,16 @@ static int file_load_file(struct file_ctx_st *ctx, /* Launch */ - return OSSL_DECODER_from_bio(ctx->_.file.decoderctx, ctx->_.file.file); + ERR_set_mark(); + ret = OSSL_DECODER_from_bio(ctx->_.file.decoderctx, ctx->_.file.file); + if (BIO_eof(ctx->_.file.file) + && ((err = ERR_peek_last_error()) != 0) + && ERR_GET_LIB(err) == ERR_LIB_OSSL_DECODER + && ERR_GET_REASON(err) == ERR_R_UNSUPPORTED) + ERR_pop_to_mark(); + else + ERR_clear_last_mark(); + return ret; } /*-
Better way to recognise mingw64 in config script
@@ -320,6 +320,15 @@ case "${SYSTEM}:${RELEASE}:${VERSION}:${MACHINE}" in echo "${MACHINE}-v11-${SYSTEM}"; exit 0; ;; + # The following combinations are supported + # MINGW64* on x86_64 => mingw64 + # MINGW32* on x86_64 => mingw + # MINGW32* on i?86 => mingw + # + # MINGW64* on i?86 isn't expected to work... + MINGW64*:*:*:x86_64) + echo "${MACHINE}-whatever-mingw64"; exit 0; + ;; MINGW*) echo "${MACHINE}-whatever-mingw"; exit 0; ;; @@ -801,8 +810,6 @@ case "$GUESSOS" in options="$options no-asm" fi ;; - i[3456]86-*-mingw) OUT="mingw" ;; - x86_64-*-mingw) OUT="mingw64" ;; # these are all covered by the catchall below i[3456]86-*-cygwin) OUT="Cygwin-x86" ;; *-*-cygwin) OUT="Cygwin-${MACHINE}" ;;
bluetooth: fix missing braces and indentation
@@ -578,7 +578,7 @@ void bta_gattc_clear_notif_registration(tBTA_GATTC_SERV *p_srcb, UINT16 conn_id, for (i = 0 ; i < BTA_GATTC_NOTIF_REG_MAX; i ++) { if (p_clrcb->notif_reg[i].in_use && !bdcmp(p_clrcb->notif_reg[i].remote_bda, remote_bda)) - + { /* It's enough to get service or characteristic handle, as * clear boundaries are always around service. */ @@ -587,6 +587,7 @@ void bta_gattc_clear_notif_registration(tBTA_GATTC_SERV *p_srcb, UINT16 conn_id, memset(&p_clrcb->notif_reg[i], 0, sizeof(tBTA_GATTC_NOTIF_REG)); } } + } } else { APPL_TRACE_ERROR("can not clear indication/notif registration for unknown app"); }
Samples:Host Exerciser Enhancement Fixing conflicts
@@ -34,8 +34,6 @@ BuildRequires: python3-jsonschema BuildRequires: python3-pip BuildRequires: python3-virtualenv BuildRequires: systemd-devel -BuildRequires: libcap-devel -BuildRequires: libudev-devel %description Open Programmable Acceleration Engine (OPAE) is a software framework
Make `peek()` tail recursive instead of using `goto` Compilation is identical with `gcc` or `clang`, -O3` or `-O2`
@@ -900,10 +900,7 @@ static char const *readInterpolation(size_t depth); static int peek(void) { - int c; - -restart: - c = peekInternal(0); + int c = peekInternal(0); if (lexerState->macroArgScanDistance > 0) return c; @@ -924,7 +921,7 @@ restart: * expanded, so skip it and keep peeking. */ if (!str || !str[0]) - goto restart; + return peek(); beginExpansion(str, c == '#', NULL); @@ -945,7 +942,7 @@ restart: if (str && str[0]) beginExpansion(str, false, str); - goto restart; + return peek(); } return c;
HV: trace leaf and subleaf of cpuid We care more about leaf and subleaf of cpuid than vcpu_id. So, this patch changes the cpuid trace-entry to trace the leaf and subleaf of this cpuid vmexit.
@@ -274,6 +274,7 @@ int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu) rbx = vcpu_get_gpreg(vcpu, CPU_REG_RBX); rcx = vcpu_get_gpreg(vcpu, CPU_REG_RCX); rdx = vcpu_get_gpreg(vcpu, CPU_REG_RDX); + TRACE_2L(TRACE_VMEXIT_CPUID, rax, rcx); guest_cpuid(vcpu, (uint32_t *)&rax, (uint32_t *)&rbx, (uint32_t *)&rcx, (uint32_t *)&rdx); vcpu_set_gpreg(vcpu, CPU_REG_RAX, rax); @@ -281,8 +282,6 @@ int32_t cpuid_vmexit_handler(struct acrn_vcpu *vcpu) vcpu_set_gpreg(vcpu, CPU_REG_RCX, rcx); vcpu_set_gpreg(vcpu, CPU_REG_RDX, rdx); - TRACE_2L(TRACE_VMEXIT_CPUID, (uint64_t)vcpu->vcpu_id, 0UL); - return 0; }
kdbtypes.h: use PRI-macros for C99
@@ -64,25 +64,12 @@ typedef uint16_t kdb_unsigned_short_t; typedef uint32_t kdb_unsigned_long_t; typedef uint64_t kdb_unsigned_long_long_t; -#if SIZEOF_LONG == 4 -#define ELEKTRA_LONG_F "%ld" -#define ELEKTRA_UNSIGNED_LONG_F "%lu" -#elif SIZEOF_INT == 4 -#define ELEKTRA_LONG_F "%d" -#define ELEKTRA_UNSIGNED_LONG_F "%u" -#endif - -#if SIZEOF_LONG == 8 && !defined(__APPLE__) -#define ELEKTRA_LONG_LONG_F "%ld" -#define ELEKTRA_LONG_LONG_S strtol -#define ELEKTRA_UNSIGNED_LONG_LONG_F "%lu" -#define ELEKTRA_UNSIGNED_LONG_LONG_S strtoul -#elif defined(HAVE_SIZEOF_LONG_LONG) && (SIZEOF_LONG_LONG == 8) -#define ELEKTRA_LONG_LONG_F "%lld" +#define ELEKTRA_LONG_F "%" PRIi32 +#define ELEKTRA_UNSIGNED_LONG_F "%" PRIu32 +#define ELEKTRA_LONG_LONG_F "%" PRIi64 #define ELEKTRA_LONG_LONG_S strtoll -#define ELEKTRA_UNSIGNED_LONG_LONG_F "%llu" +#define ELEKTRA_UNSIGNED_LONG_LONG_F "%" PRIu64 #define ELEKTRA_UNSIGNED_LONG_LONG_S strtoull -#endif #else // for C89 typedef unsigned char kdb_boolean_t;
Make Key::setString accept a (const) reference This avoids an unnecessaryy copy. The underlaying key copies the data anyway.
@@ -175,7 +175,7 @@ public: inline void set (T x); inline std::string getString () const; - inline void setString (std::string newString); + inline void setString (const std::string & newString); inline ssize_t getStringSize () const; typedef void (*func_t) (); @@ -1205,7 +1205,7 @@ inline void Key::setCallback (callback_t fct) /** * @copydoc keySetString */ -inline void Key::setString (std::string newString) +inline void Key::setString (const std::string & newString) { ckdb::keySetString (getKey (), newString.c_str ()); }
doc: describe how to run single tests
@@ -50,6 +50,12 @@ The alternative to `make run_nokdbtests`: ctest -T Test --output-on-failure -LE kdbtests -j 6 ``` +To only run tests whose names match a regular expression, you can use: + +```sh +ctest -V -R <regex> +``` + ## Required Environment To run the tests successfully, the environment
[software] Use DMA for memcpy
@@ -39,6 +39,7 @@ int volatile error __attribute__((section(".l1"))); dump(start, 2); dump(end, 3); +dump(dma, 7); void *mempool_memcpy(void *destination, const void *source, size_t num) { if ((((size_t)destination | (size_t)source | num) & @@ -145,17 +146,27 @@ int main() { } int32_t volatile *const src_a = (int32_t *)0x80002000; - int32_t volatile *const src_b = (int32_t *)0x80004000; + int32_t volatile *const src_b = (int32_t *)0x000C0000; int32_t volatile *const src_c = (int32_t *)0x80008000; int32_t volatile *const src_d = (int32_t *)0x8000C000; + // Init + for (int i = core_id; i < SIZE; i+=num_cores) { + src_a[i] = i; + } + + mempool_barrier(num_cores); + // Benchmark mempool_start_benchmark(); uint32_t time = mempool_get_timer(); dump_start(time); - if (core_id == 0) - { - dma_memcpy((void *)src_b, (const void *)src_a, SIZE * 4); + if (core_id == 0) { + dma_memcpy((void *)src_b, (const void *)src_a, 16 * 4 * 4); + while (*((volatile uint32_t *)(DMA_ADDRESS + MEMPOOL_DMA_FRONTEND_STATUS_REG_OFFSET)) != 0); + for (int i = 0; i < SIZE; ++i) { + dump_dma(src_b[i]); + } } time = mempool_get_timer() - time; dump_end(time);
hdata/i2c: log unknown i2c devices An i2c device is unknown if either the i2c device list is outdated or the device is marked as unknown (0xFF) in the hdat. This log both cases.
@@ -165,6 +165,7 @@ int parse_i2c_devs(const struct HDIF_common_hdr *hdr, int idata_index, uint32_t i2c_addr; uint32_t version; uint32_t size; + uint32_t purpose; int i, count; /* @@ -226,11 +227,9 @@ int parse_i2c_devs(const struct HDIF_common_hdr *hdr, int idata_index, */ i2c_addr = dev->i2c_addr >> 1; - prlog(PR_TRACE, "HDAT I2C: found e%dp%d - %x\n", - dev->i2cm_engine, dev->i2cm_port, i2c_addr); - + purpose = be32_to_cpu(dev->purpose); type = map_type(dev->type); - label = map_label(be32_to_cpu(dev->purpose)); + label = map_label(purpose); if (type) { compat = type->compat; name = type->name; @@ -239,6 +238,20 @@ int parse_i2c_devs(const struct HDIF_common_hdr *hdr, int idata_index, compat = NULL; } + /* + * An i2c device is unknown if either the i2c device list is + * outdated or the device is marked as unknown (0xFF) in the + * hdat. Log both cases to see what/where/why. + */ + if (!type || dev->type == 0xFF) + prlog(PR_WARNING, "HDAT I2C: found e%dp%d - %s@%x (%#x:%s)\n", + dev->i2cm_engine, dev->i2cm_port, name, i2c_addr, + purpose, label); + else + prlog(PR_TRACE, "HDAT I2C: found e%dp%d - %s@%x (%#x:%s)\n", + dev->i2cm_engine, dev->i2cm_port, name, i2c_addr, + purpose, label); + node = dt_new_addr(bus, name, i2c_addr); if (!node) continue;
One instance of kernel_4x1 is used even on SKX
@@ -172,6 +172,7 @@ static void sgemv_kernel_4x2( BLASLONG n, FLOAT **ap, FLOAT *x, FLOAT *y, FLOAT } +#endif #endif #ifndef HAVE_KERNEL_4x1 @@ -248,8 +249,6 @@ static void sgemv_kernel_4x1(BLASLONG n, FLOAT *ap, FLOAT *x, FLOAT *y, FLOAT *a #endif -#endif - static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest) __attribute__ ((noinline)); static void add_y(BLASLONG n, FLOAT *src, FLOAT *dest, BLASLONG inc_dest)
docs: update description about deploy contracts
@@ -84,7 +84,7 @@ See blockchain official website for details: #### Deploy the smart contract The smart contracts used in the demo locate in ./contract. -Follow blockchain official website for details on how to compile and deploy the contract. +Follow the blockchain's official website for details on how to compile and deploy the contract. #### Modify the demo code Modify the demo code in ./demo:
Fixed a bug introduced in the changeset
@@ -347,7 +347,7 @@ nxt_master_start_worker_process(nxt_task_t *task, nxt_runtime_t *rt, group = (char *) last; nxt_memcpy(group, app_conf->group.start, app_conf->group.length); - end = nxt_pointer_to(group, app_conf->group.length); + last = nxt_pointer_to(group, app_conf->group.length); *last++ = '\0'; } else {
tests: internal: fuzzer: fix missing data type declaration
#define GET_MOD_EQ(max, idx) (data[0] % max) == idx #define MOVE_INPUT(offset) data += offset; size -= offset; -char *get_null_terminated(size_t size, char **data, *total_data_size) { +char *get_null_terminated(size_t size, char **data, size_t *total_data_size) +{ char *tmp = flb_malloc(size+1); memcpy(tmp, *data, size); tmp[size] = '\0';
nimble/ll: Check if truncated on CRC error
@@ -3199,7 +3199,7 @@ scan_continue: #if MYNEWT_VAL(BLE_LL_CFG_FEAT_LL_EXT_ADV) if (aux_data) { - if (evt_possibly_truncated) { + if (evt_possibly_truncated || !BLE_MBUF_HDR_CRC_OK(hdr)) { ble_ll_scan_end_adv_evt(aux_data); }
ya tool: add flexible I/O tester Note: mandatory check (NEED_CHECK) was skipped
"vmexec": { "description": "VMEXEC run script inside qemu-vm" }, "yfm": { "description": "YFM-extended markdown processor for Cloud deploy" }, "aws": { "description": "Run aws" }, - "sedem": { "description": "SEDEM tool - Service management tool for Maps services" } + "sedem": { "description": "SEDEM tool - Service management tool for Maps services" }, + "fio": { "description": "flexible I/O tester" }, }, "toolchain": { "msvc2017-i686": { {"host": {"os": "LINUX"}, "default": true} ] }, + "fio": { + "tools": { + "fio": { "bottle" : "fio", "executable": "fio" }, + }, + "platforms": [ + {"host": {"os": "LINUX"}, "default": true} + ] + }, "vmexec": { "tools": { "vmexec": { "bottle" : "vmexec", "executable": "vmexec" }, "qemu-nbd": ["qemu", "bin", "qemu-nbd"] } }, + "fio": { + "formula": { + "sandbox_id": [498288960], + "match": "infra/kernel/tools/fio/build/fio-static.tar.gz" + }, + "executable": { + "fio": ["fio", "bin", "fio"], + } + }, "vmexec": { "formula": { "sandbox_id": [496883223],
sim: remove deprecated usage of sync::ONCE_INIT Switch to `Once::new()`.
//! the tests. use env_logger; -use std::sync::{Once, ONCE_INIT}; +use std::sync::Once; -static INIT: Once = ONCE_INIT; +static INIT: Once = Once::new(); /// Setup the logging system. Intended to be called at the beginning of each test. pub fn setup() {
vapi: break if parsing progress cannot be made
@@ -393,6 +393,7 @@ class JsonParser(object): if progress <= last_progress: # cannot make forward progress self.exceptions.extend(exceptions) + break exceptions = [] last_progress = progress progress = 0
BCH address validation
@@ -651,9 +651,17 @@ namespace MiningCore.Blockchain.Bitcoin isPoS = difficultyResponse.Values().Any(x => x.Path == "proof-of-stake"); // Create pool address script from response - poolAddressDestination = !isPoS ? - AddressToDestination(validateAddressResponse.Address) : - new PubKey(validateAddressResponse.PubKey); + if (!isPoS) + { + // bitcoincashd returns a different address than what was passed in + if(!validateAddressResponse.Address.StartsWith("bitcoincash:")) + poolAddressDestination = AddressToDestination(validateAddressResponse.Address); + else + poolAddressDestination = AddressToDestination(poolConfig.Address); + } + + else + poolAddressDestination = new PubKey(validateAddressResponse.PubKey); // chain detection if (!hasLegacyDaemon)
preset optimization level for apple clang
-//#if defined(__apple_build_version__) && __clang_major__ == 11 && __clang_minor__ == 0 && __clang_patchlevel__ == 3 -//#pragma clang optimize off -//#endif +#if defined(__apple_build_version__) && __clang_major__ == 11 && __clang_minor__ == 0 && __clang_patchlevel__ == 3 +#pragma clang optimize "O2" +#endif /* %0 = "+r"(a_pointer), %1 = "+r"(b_pointer), %2 = "+r"(c_pointer), %3 = "+r"(ldc_in_bytes), %4 for k_count, %5 for c_store */
Add coreclr to .NET modules
@@ -5448,6 +5448,7 @@ BOOLEAN NTAPI PhpIsDotNetEnumProcessModulesCallback( ) { static PH_STRINGREF clrString = PH_STRINGREF_INIT(L"clr.dll"); + static PH_STRINGREF clrcoreString = PH_STRINGREF_INIT(L"coreclr.dll"); static PH_STRINGREF mscorwksString = PH_STRINGREF_INIT(L"mscorwks.dll"); static PH_STRINGREF mscorsvrString = PH_STRINGREF_INIT(L"mscorsvr.dll"); static PH_STRINGREF mscorlibString = PH_STRINGREF_INIT(L"mscorlib.dll"); @@ -5530,6 +5531,10 @@ BOOLEAN NTAPI PhpIsDotNetEnumProcessModulesCallback( { *(PULONG)Context |= PH_CLR_JIT_PRESENT; } + else if (PhEqualStringRef(&baseDllName, &clrcoreString, TRUE)) + { + *(PULONG)Context |= PH_CLR_JIT_PRESENT; + } return TRUE; }
synthetic: one TCP read per incoming request
@@ -18,7 +18,7 @@ extern crate test; use std::collections::BTreeMap; use std::f32::INFINITY; use std::io; -use std::io::{ErrorKind, Write}; +use std::io::{ErrorKind, Read, Write}; use std::net::{Ipv4Addr, SocketAddrV4}; use std::slice; use std::str::FromStr; @@ -185,10 +185,11 @@ fn run_linux_udp_server(backend: Backend, addr: SocketAddrV4, nthreads: usize, w } fn socket_worker(socket: &mut Connection, worker: FakeWorker) { - let mut v = vec![0; 4096]; + let mut v = vec![0; 16]; let mut r = || { + socket.read_exact(&mut v[..16])?; + let payload = Payload::deserialize(&mut &v[..16])?; v.clear(); - let payload = Payload::deserialize(socket)?; worker.work(payload.work_iterations); payload.serialize_into(&mut v)?; Ok(socket.write_all(&v[..])?)
Packages: added epel-release to unit-go dependencies on CentOS 6.
@@ -12,7 +12,12 @@ MODULE_INSTARGS_go= go-install MODULE_SOURCES_go= unit.example-go-app \ unit.example-go-config +ifeq ($(OSVER), centos6) +BUILD_DEPENDS_go= epel-release golang +else BUILD_DEPENDS_go= golang +endif + BUILD_DEPENDS+= $(BUILD_DEPENDS_go) define MODULE_DEFINITIONS_go
CI/CD: fix a typo
@@ -124,7 +124,7 @@ jobs: tar xzf sgx_rpm_local_repo.tgz; yum-config-manager --add-repo sgx_rpm_local_repo; yum makecache; - yum install --nogpgcheck -y libsgx-dcap-quote-verify-${{ env.DCAP_CENTOS_VERSION }} libsgx-dcap-default-qply-${{ env.DCAP_CENTOS_VERSION }}; + yum install --nogpgcheck -y libsgx-dcap-quote-verify-${{ env.DCAP_CENTOS_VERSION }} libsgx-dcap-default-qpl-${{ env.DCAP_CENTOS_VERSION }}; rm -f sgx_rpm_local_repo.tgz; cd /root/inclavare-containers/${{ matrix.tag }}; rpm -ivh *.rpm'
sse: don't use _mm_undefined_* on MSVC
@@ -3191,7 +3191,7 @@ simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) { # if __has_builtin(__builtin_ia32_undef128) # define SIMDE__HAVE_UNDEFINED128 # endif -# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) +# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER) # define SIMDE__HAVE_UNDEFINED128 # endif #endif
include/switch.h: Format with clang-format BRANCH=none TEST=none
*/ void switch_interrupt(enum gpio_signal signal); #else -static inline void switch_interrupt(enum gpio_signal signal) { } +static inline void switch_interrupt(enum gpio_signal signal) +{ +} #endif /* !CONFIG_SWITCH */ #endif /* __CROS_EC_SWITCH_H */
docker: update container documentation Add docker container usage examples and document the newly included +code scripts.
@@ -29,6 +29,51 @@ The image includes `EXPOSE` directives for TCP port 80 and UDP port 34343. Port You can either pass the `-P` flag to docker to map ports directly to the corresponding ports on the host, or map them individually with `-p` flags. For local testing the latter is often convenient, for instance to remap port 80 to an unprivileged port. +You should be able to use port mapping for most purposes but you can force Ames to use a custom port. +`--port=$AMES_PORT` can be passed as an argument to the `docker start` command. Passing `--port=13436` for example, would use port 13436. + +### Examples +Creating a volume for ~sampel=palnet: +``` +docker volume create sampel-palnet +``` + +Copying key to sampel-palnet's volume (assumes default docker location) +``` +sudo cp ~/sampel-palnet.key /var/lib/docker/volumes/sampel-palnet/_data/sampel-palnet.key +``` + +Using that volume and launching ~sampel-palnet on host port 8080 with Ames talking on host port 27000: +``` +docker run -d -p 8080:80 -p 27000:34343/udp --name sampel-palnet \ + --mount type=volume,source=sampel-palnet,destination=/urbit \ + tloncorp/urbit +``` + +Using host port 8088 with Ames talking on host port 23232 while forcing Ames to start internally on port 13436: +``` +docker run -d -p 8088:80 -p 23232:13436/udp --name sampel-palnet \ + --mount type=volume,source=sampel-palnet,destination=/urbit \ + tloncorp/urbit --port=13436 +``` + +### Getting and resetting the Landscape +code +This docker image includes tools for retrieving and resetting the Landscape login code belonging to the planet, for programmatic use so the container does not need a tty. These scripts can be called using `docker container exec`. + +Getting the code: +``` +$ docker container exec sampel-palnet /bin/get-urbit-code +sampel-sampel-sampel-sampel +``` + +Resetting the code: +``` +$ docker container exec sampel-palnet /bin/reset-urbit-code +OK +``` + +Once the code has been reset the new code can be obtained from `/bin/get-urbit-code`. + ## Extending You likely do not want to extend this image. External applications which interact with Urbit do so primarily via an HTTP API, which should be exposed as described above. For containerized applications using Urbit, it is more appropriate to use a container orchestration service such as Docker Compose or Kubernetes to run Urbit alongside other containers which will interface with its API.
config tool: hide PTM in schema change acrn:view attribute to ""
@@ -400,7 +400,7 @@ argument and memory.</xs:documentation> </xs:annotation> </xs:element> <xs:element name="PTM" type="Boolean" default="y" minOccurs="0"> - <xs:annotation acrn:title="Precision Time Measurement" acrn:applicable-vms="pre-launched, post-launched" acrn:views="advanced"> + <xs:annotation acrn:title="Precision Time Measurement" acrn:applicable-vms="pre-launched, post-launched" acrn:views=""> <xs:documentation>Enable virtualization of PCIe Precision Time Measurement (PTM) mechanism for devices with PTM capability and for real-time application. The hypervisor provides PCIe root port emulation instead of host bridge emulation for the VM. PTM coordinates timing between the device and root port with the device's local timebases without relying on software.</xs:documentation> </xs:annotation> </xs:element>
DOCS: Mention that libcrypto has helper functions for OSSL_PARAMs Fixes
@@ -71,6 +71,12 @@ is NULL. The usual full terminating template is: This can also be specified using L<OSSL_PARAM_END(3)>. +=head2 Functional support + +Libcrypto offers a limited set of helper functions to handle +B<OSSL_PARAM> items and arrays, please see L<OSSL_PARAM_get_int(3)>. +Developers are free to extend or replace those as they see fit. + =head2 B<OSSL_PARAM> fields =over 4
fix bugs for optlen output on size not big enough for timeout events
@@ -191,11 +191,16 @@ int win32_connect(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen) int win32_getsockopt(SOCKET sockfd, int level, int optname, void *optval, socklen_t *optlen) { int ret = 0; if ((level == SOL_SOCKET) && ((optname == SO_RCVTIMEO) || (optname == SO_SNDTIMEO))) { + if (*optlen >= sizeof (struct timeval)) { struct timeval *tv = (struct timeval *)optval; - DWORD timeout = 0; socklen_t dwlen = 0; + DWORD timeout = 0; + socklen_t dwlen = 0; ret = getsockopt(sockfd, level, optname, (char *)&timeout, &dwlen); tv->tv_sec = timeout / 1000; tv->tv_usec = timeout * 1000; + } else { + ret = WSAEFAULT; + } *optlen = sizeof (struct timeval); } else { ret = getsockopt(sockfd, level, optname, (char*)optval, optlen);
small changes to text
@@ -491,7 +491,7 @@ The \citet{Tinker2010} model parameterizes the halo mass function and the halo b f(\nu) &=& \alpha[1+(\beta\nu)^{-2\phi}]\nu^{2\eta}e(-\gamma\nu^2/2). \end{eqnarray} -Again, while high {\em numerical} accuracy has been verified, there is a remaining uncertainty. \citet{Tinker2010} found a $\sim6\%$ scatter when determining the halo bias due to differences in simulations alone. In addition, this parameterization does not include a careful exploration of any impact due to changes in the dark energy equation of state. As with the halo mass function, studies will be required to reach accuracy at the percent level for any cosmological predictions. \elisa{I think we need some further references here of scale-dependen bias, assembly bias, etc.} +Again, while high {\em numerical} accuracy has been verified, there is a remaining uncertainty to the {\em physical accuracy}. \citet{Tinker2010} found a $\sim6\%$ scatter when determining the halo bias due to differences in simulations alone. In addition, this parameterization does not include a careful exploration of any impact due to changes in the dark energy equation of state. As with the halo mass function, studies will be required to reach accuracy at the percent level for any cosmological predictions. \elisa{I think we need some further references here of scale-dependen bias, assembly bias, etc.} We note that many observables are tuned to utilizing a linear halo bias (e.g. scale independent). While the current functionality of \ccl is to return the \citet{Tinker2010} model for halo bias, it is entirely possible to generate simpler models of halo bias and pass this into further functions. \elisa{I think this is a bit confusing: from this I understand that \ccl can predict $b(k)$ but cannot pass anything but linear bias to the angular power spectra.} @@ -706,7 +706,7 @@ Also note that for $\sigma(M)$, it is important to set the desired precision lev Derivatives are calculated utilizing a spline that is built off of the previously described $\sigma(M)$ spline. As such, these splines cover the range from $10^6$ to $10^{17} M_\odot$. For each value of $\log(M)$ in our spline evaluation, we calculate the value of $\sigma(M)$ half a step in either direction. We use the difference compared to the mass spacing to calculate an approximate derivative, which is then used in the spline interpolation. This has been tested to meet our necessary precision for the halo mass function within the mass range explored by \citet{Tinker2010}. We note that there the accuracy is reduced at the edges of these splines and exploring extreme mass ranges may require changes in the parameters to initialize these splines. -In order to accomodate a wide range of values of the overdensity parameter $\Delta$, we have generated a spline interpolation between best fit values as defined by \citet{Tinker2008} and \citet{Tinker2010}. This covers a dynamic range from $\Delta=200$ to $3200$. Within this range, we interpolate in the space of the fit parameter and $\log\Delta$ using the Akima interpolation built from piecewise third order polynomials. We have chosen this rather than the fitting formulas utilized in \citet{Tinker2010} in order to assure high precision match to the Tinker halo mass function when choosing a value of $\Delta$ directly from the paper. +In order to accomodate a wide range of values of the overdensity parameter $\Delta$, we have generated a spline interpolation between best fit values as defined by \citet{Tinker2008} and \citet{Tinker2010}. This covers a dynamic range from $\Delta=200$ to $3200$, with respect to the mean density. Within this range, we interpolate in the space of the fit parameter and $\log\Delta$ using the Akima interpolation built from piecewise third order polynomials. We have chosen this rather than the fitting formulas utilized in \citet{Tinker2010} in order to assure high precision match to the Tinker halo mass function when choosing a value of $\Delta$ directly from the paper. %-------------------------------------------------------------------------------