message
stringlengths
6
474
diff
stringlengths
8
5.22k
Decisions: Reformat text about internal cache
@@ -41,10 +41,14 @@ internal caches lead to duplication of memory consumption in some cases caches cannot be avoided? -> filesys, databases? -cache discussion: -+ not more keys than needed -+ kdbGet avoids IO even if done somewhere else -+ KDB handles could be more locally +### Cache Discussion + +**Pros:** +- not more keys than needed +- kdbGet avoids IO even if done somewhere else +- KDB handles could be more locally + +**Cons:** - not possible to access cache with current architecture, KDB high level API - implementation overhead - where should the caches be
Check argc>0 before strcasecmp. Should shortcircuit
@@ -921,19 +921,13 @@ struct cmd_results *cmd_move(int argc, char **argv) { --argc; ++argv; } - if (strcasecmp(argv[0], "window") == 0 || - strcasecmp(argv[0], "container") == 0) { - --argc; - if (argc > 0) { - ++argv; - } + if (argc > 0 && (strcasecmp(argv[0], "window") == 0 || + strcasecmp(argv[0], "container") == 0)) { + --argc; ++argv; } - if (strcasecmp(argv[0], "to") == 0) { - --argc; - if (argc > 0) { - ++argv; - } + if (argc > 0 && strcasecmp(argv[0], "to") == 0) { + --argc; ++argv; } if (!argc) {
CMake: Update next release notes
@@ -419,7 +419,7 @@ This section keeps you up-to-date with the multi-language support provided by El - Fix warning for CMP0115 _(0x6178656c)_ - <<TODO>> -- <<TODO>> +- Fix developer warning for package DISCOUNT. _(Dennis Toth @dtdirect)_ - Pass `--stacktrace` to gradle for the JNA builds. _(Maximilian Irlinger @atmaxinger)_ - <<TODO>> - <<TODO>>
test_psa_compliance.py: checkout fix-pr-5139 tag
@@ -47,7 +47,7 @@ EXPECTED_FAILURES = { # # Web URL: https://github.com/bensze01/psa-arch-tests/tree/fixes-for-mbedtls-3 PSA_ARCH_TESTS_REPO = 'https://github.com/bensze01/psa-arch-tests.git' -PSA_ARCH_TESTS_REF = 'fix-pr-5272' +PSA_ARCH_TESTS_REF = 'fix-pr-5139-2' #pylint: disable=too-many-branches,too-many-statements def main():
fix(discord.h): remove dead comment
@@ -797,13 +797,6 @@ void* discord_set_data(struct discord *client, void *data); */ void* discord_get_data(struct discord *client); -/** - * @brief Replace the Client presence with a struct discord_presence_status - * - * @note discord_set_presence() is a more comprehensible alternative - * @see discord_set_presence() - */ - /** * @brief Set the Client presence state *
Add payload marker within opencoap_send instead of in the app.
@@ -485,6 +485,11 @@ owerror_t opencoap_send( tokenPos+=2; } + if (msg->length > 0 ) { // contains payload, add payload marker + packetfunctions_reserveHeaderSize(msg,1); + msg->payload[0] = COAP_PAYLOAD_MARKER; + } + // fake run of opencoap_options_encode in order to get the necessary length packetfunctions_reserveHeaderSize(msg, opencoap_options_encode(NULL, options, optionsLen, TRUE));
vtx: fix pitmode write
@@ -270,7 +270,7 @@ static void tramp_set_power_level(vtx_power_level_t power) { } static void tramp_set_pit_mode(vtx_pit_mode_t pit_mode) { - serial_tramp_send_payload('I', pit_mode == VTX_PIT_MODE_ON ? 1 : 0); + serial_tramp_send_payload('I', pit_mode == VTX_PIT_MODE_ON ? 0 : 1); tramp_settings.frequency = 0; }
show weak candidate on start
@@ -3188,6 +3188,7 @@ snprintf(servermsg, SERVERMSG_MAX, "\e[?25l\nstart capturing (stop with ctrl+c)\ "FILTERLIST ACCESS POINT.: %d entries\n" "FILTERLIST CLIENT.......: %d entries\n" "FILTERMODE..............: %d\n" + "WEAK CANDIDATE..........: %s\n" "PREDEFINED ACCESS POINT.: %d entries\n" "MAC ACCESS POINT........: %02x%02x%02x%02x%02x%02x (incremented on every new client)\n" "MAC CLIENT..............: %02x%02x%02x%02x%02x%02x\n" @@ -3197,7 +3198,7 @@ snprintf(servermsg, SERVERMSG_MAX, "\e[?25l\nstart capturing (stop with ctrl+c)\ "\n", nmeasentence, interfacename, mac_orig[0], mac_orig[1], mac_orig[2], mac_orig[3], mac_orig[4], mac_orig[5], drivername, driverversion, driverfwversion, - maxerrorcount, filteraplistentries, filterclientlistentries, filtermode, + maxerrorcount, filteraplistentries, filterclientlistentries, filtermode, weakcandidate, beaconextlistlen, mac_myap[0], mac_myap[1], mac_myap[2], mac_myap[3], mac_myap[4], mac_myap[5], mac_myclient[0], mac_myclient[1], mac_myclient[2], mac_myclient[3], mac_myclient[4], mac_myclient[5],
Check OpenGL error code before texture allocation. This will make sure that the MacOS error I've been having is actually caused by this code.
@@ -73,6 +73,8 @@ static int upload_gl_tile(struct TCOD_TilesetAtlasOpenGL* atlas, int tile_id) { */ TCOD_NODISCARD static int prepare_gl_atlas(struct TCOD_TilesetAtlasOpenGL* atlas) { + GLenum gl_error = glGetError(); + if (gl_error) return TCOD_set_errorvf("Unexpected OpenGL error before texture allocation: %u", gl_error); int new_size = atlas->texture_size ? atlas->texture_size : 64; int max_size; glGetIntegerv(GL_MAX_TEXTURE_SIZE, &max_size); @@ -103,7 +105,7 @@ static int prepare_gl_atlas(struct TCOD_TilesetAtlasOpenGL* atlas) { glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, atlas->texture_size, atlas->texture_size, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL); glBindTexture(GL_TEXTURE_2D, 0); - GLenum gl_error = glGetError(); + gl_error = glGetError(); switch (gl_error) { case GL_NO_ERROR: break;
examples/jpg: increase maximum img size
@@ -36,7 +36,7 @@ static const char* const cdjpeg_message_table[] = { NULL }; -static uint64_t max_total_pixels = 100000000ULL; /* 100M */ +static uint64_t max_total_pixels = 1000000000ULL; /* 1G */ int LLVMFuzzerInitialize(int* argc, char*** argv) { null_fd = open("/dev/null", O_WRONLY);
Make FPGA targets build properly out-of-tree [ci skip]
project(de2_115) +# The working directory for these is the source dir, which means these will +# put output files in the source hierarchy even if this is an out-of-tree +# build. This is because the Quartus project file specifies relative paths. +# XXX it might be cleaner to have this generate the .qsf file. + add_custom_target(synthesize COMMAND quartus_map de2_115 COMMAND quartus_fit de2_115 @@ -23,10 +28,12 @@ add_custom_target(synthesize COMMAND quartus_sta de2_115 COMMAND ./print_summary.py COMMENT "Synthesizing" - DEPENDS bootrom) + DEPENDS bootrom + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) add_custom_target(program - COMMAND quartus_pgm -m jtag -o \"P\;output_files/de2_115.sof\") + COMMAND quartus_pgm -m jtag -o \"P\;output_files/de2_115.sof\" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
adding highlights section to top of ChangeLog
-Version 1.3.1 (June 2017) +# -*- mode: sh; fill-column: 120; -*- + +Version 1.3.1 (16 June 2017) + +[Important Highlights/Notices] + + * A new compiler variant (gnu7) is introduced with this release. In the case of a fresh install, OpenHPC recipes + default to installing the new variant along with matching runtimes and libraries. However, if upgrading a previously + installed system, administrators can opt-in to enable the gnu7 variant . This procedure is detailed in Appendix B of + the OpenHPC Install Guide(s). + + * There are significant changes included in the warewulf-httpd.conf file that ships with the + warewulf-provision-server-ohpc package. If upgrading from a version prior to 1.3, the updated config file will be + saved as /etc/httpd/conf.d/warewulf-httpd.conf.rpmnew locally. You will need to copy this new version to the + production file and restart the web server to ensure correct provisioning behavior. As an example for CentOS: + + [sms]# cp /etc/httpd/conf.d/warewulf-httpd.conf.rpmnew /etc/httpd/conf.d/warewulf-httpd.conf + [sms]# systemctl restart httpd [General] * addition of example xCAT based recipe (https://github.com/openhpc/ohpc/issues/323) * inclusion of optional BeeGFS client enablement for installation recipes (https://github.com/openhpc/ohpc/issues/340) - * switch to use of convenience meta packages versus groups/patterns used in previous releases (https://github.com/openhpc/ohpc/issues/443) + * switch to use of convenience meta packages versus groups/patterns used in previous releases. A list of provided + meta-packages can be found in Appendix E of the OpenHPC Install Guide(s). + (https://github.com/openhpc/ohpc/issues/443) + * 'R' language package name changed from R_base-ohpc to R-gnu7-ohpc, and the lmod module changed from R_base to R + (https://github.com/openhpc/ohpc/issues/472) * fix missing dependency for phdf5 (https://github.com/openhpc/ohpc/issues/436) + * documentation fix for installation template paths (https://github.com/openhpc/ohpc/issues/423) * split pdsh packaging to facilitate optional SLURM add-on (https://github.com/openhpc/ohpc/issues/435) * updated package groups for consistency of several administrative packages * Lmod packaging updated to conflict SLES Modules package (https://github.com/openhpc/ohpc/issues/440) @@ -13,7 +35,6 @@ Version 1.3.1 (June 2017) * updates to OHPC_macros to support builds outside of OBS (https://github.com/openhpc/ohpc/pull/459) * variety of component version updates and other additions highlighted below - [Component Additions] * hwloc-ohpc (v1.11.6)
lyd mods BUFGIX recursive implemented dependency check Fixes
@@ -1188,6 +1188,37 @@ cleanup: return err_info; } +/** + * @brief Check data dependencies of a module and all its implemented imports, recursively. + * + * @param[in] ly_mod Libyang module to check. + * @param[in] sr_mods Sysrepo module data. + * @param[out] fail Whether any dependant module was not implemented. + * @return err_info, NULL on success. + */ +static sr_error_info_t * +sr_lydmods_check_data_deps_r(const struct lys_module *ly_mod, const struct lyd_node *sr_mods, int *fail) +{ + sr_error_info_t *err_info = NULL; + uint32_t i; + + /* check data deps of this module */ + if ((err_info = sr_lydmods_check_data_deps(ly_mod, sr_mods, fail)) || *fail) { + return err_info; + } + + /* check data deps of all the implemented dependencies, recursively */ + for (i = 0; i < ly_mod->imp_size; ++i) { + if (ly_mod->imp[i].module->implemented) { + if ((err_info = sr_lydmods_check_data_deps_r(ly_mod->imp[i].module, sr_mods, fail)) || *fail) { + return err_info; + } + } + } + + return NULL; +} + /** * @brief Load new installed modules into context from sysrepo module data. * @@ -1240,7 +1271,7 @@ sr_lydmods_sched_ctx_install_modules(const struct lyd_node *sr_mods, struct ly_c } /* check that all the dependant modules are implemented */ - if ((err_info = sr_lydmods_check_data_deps(ly_mod, sr_mods, fail)) || *fail) { + if ((err_info = sr_lydmods_check_data_deps_r(ly_mod, sr_mods, fail)) || *fail) { goto cleanup; }
Disable pcap to file by default: accidentally enabled in
@@ -352,7 +352,8 @@ PCI_BUS= pci.0 endif QEMU_TAP= -netdev tap,id=n0,ifname=tap0,script=no,downscript=no QEMU_NET= -device $(NETWORK)$(NETWORK_BUS),mac=7e:b8:7e:87:4a:ea,netdev=n0 $(QEMU_TAP) -QEMU_USERNET= -device $(NETWORK)$(NETWORK_BUS),netdev=n0 -netdev user,id=n0,hostfwd=tcp::8080-:8080,hostfwd=tcp::9090-:9090,hostfwd=udp::5309-:5309 -object filter-dump,id=filter0,netdev=n0,file=/tmp/nanos.pcap +QEMU_USERNET= -device $(NETWORK)$(NETWORK_BUS),netdev=n0 -netdev user,id=n0,hostfwd=tcp::8080-:8080,hostfwd=tcp::9090-:9090,hostfwd=udp::5309-:5309 +#QEMU_USERNET+= -object filter-dump,id=filter0,netdev=n0,file=/tmp/nanos.pcap QEMU_FLAGS= #QEMU_FLAGS+= -smp 4 #QEMU_FLAGS+= -d int -D int.log
Fix SDL2_net.framework installation on Mac
@@ -179,6 +179,7 @@ function(blit_executable NAME SOURCES) # install the SDL frameworks install(DIRECTORY ${SDL2_LIBRARIES} DESTINATION "bin/$<TARGET_FILE_NAME:${NAME}>.app/Contents/Frameworks") install(DIRECTORY ${SDL2_IMAGE_LIBRARY} DESTINATION "bin/$<TARGET_FILE_NAME:${NAME}>.app/Contents/Frameworks") + install(DIRECTORY ${SDL2_NET_LIBRARY} DESTINATION "bin/$<TARGET_FILE_NAME:${NAME}>.app/Contents/Frameworks") endif() endfunction()
in_cpu: parenthesis to protect division by zero
@@ -106,13 +106,7 @@ static inline double CPU_METRIC_SYS_AVERAGE(unsigned long pre, unsigned long now } diff = ULL_ABS(now, pre); - - if (ctx->interval_sec > 0) { - total = ((diff / ctx->cpu_ticks) * 100) / ctx->n_processors / ctx->interval_sec; - } - else { - total = ((diff / ctx->cpu_ticks) * 100) / ctx->n_processors; - } + total = (((diff / ctx->cpu_ticks) * 100) / ctx->n_processors) / ctx->interval_sec; return total; } @@ -129,14 +123,7 @@ static inline double CPU_METRIC_USAGE(unsigned long pre, unsigned long now, } diff = ULL_ABS(now, pre); - - if (ctx->interval_sec > 0) { - total = (diff * 100) / ctx->cpu_ticks / ctx->interval_sec; - } - else { - total = (diff * 100) / ctx->cpu_ticks; - } - + total = ((diff * 100) / ctx->cpu_ticks) / ctx->interval_sec; return total; }
dawn: fix /lib/vere compilation errors
~&([%czar-take-dawn %invalid-json] ~) :- ~ %+ roll u.res - |= $: [id=@t deet=[rift life =pass]] + |= $: [id=@t deet=[=rift =life =pass]] kyz=(map ship [=rift =life =pass]) == ^+ kyz =/ jon=(unit json) (de-json:html q.rep) ?~ jon ~&([%point-take-dawn %invalid-json] ~) - =; ^= res - %- unit - $: [spawn=@ own=[@ @ @ @]] - [=rift =life =pass sponsor=[? ship]] - == - ?~ res + =- ?~ res ~&([%point-take-dawn %incomplete-json] ~) =, u.res %- some `[life pass rift sponsor ~] ::NOTE escape unknown ::TODO could be! ?. (gth who 0xffff) ~ `[spawn ~] ::NOTE spawned unknown + ^- $= res + %- unit + $: [spawn=@ own=[@ @ @ @]] + [=rift =life =pass sponsor=[? ship]] + == %. u.jon =, dejs-soft:format =- (ot result+- ~)
Fix typo in comment The peek result is now called 'err', not just 'e'.
@@ -169,7 +169,7 @@ int pkey_main(int argc, char **argv) while ((err = ERR_peek_error()) != 0) { BIO_printf(out, "Detailed error: %s\n", ERR_reason_error_string(err)); - ERR_get_error(); /* remove e from error stack */ + ERR_get_error(); /* remove err from error stack */ } } EVP_PKEY_CTX_free(ctx);
CI/CD: add "no-epm" option testing
@@ -69,5 +69,14 @@ jobs: sed -i '145 c \"enclave.runtime.path\": \"/var/run/rune/liberpal-skeleton-v3.so\",' config.json; rune --debug run ra" + - name: Run skeleton v3 with no epm + if: always() + run: docker exec $rune_test bash -c "docker run -i --rm --runtime=rune -e ENCLAVE_TYPE=intelSgx -e ENCLAVE_RUNTIME_PATH=/usr/lib/liberpal-skeleton-v3.so -e ENCLAVE_RUNTIME_ARGS=debug,no-epm -e ENCLAVE_RUNTIME_LOGLEVEL="info" skeleton-enclave" + + - name: Run skeleton v3 bundle with no epm + run: | + docker exec $rune_test bash -c "sed -i '143 c \"enclave.runtime.args\": \"debug,no-epm\",' config.json; + rune --debug run ra" + - name: Kill the container run: docker stop $rune_test
stm32/sdcard: Fix H7 build when using SDMMC2. Changes are: Fix missing IRQ handler when SDMMC2 is used instead of SDMMC1 with H7 MCUs. Removed outdated H7 series compatibility macros. Defined common IRQ handler macro for F4 series.
#if defined(MICROPY_HW_SDMMC2_CK) #define SDIO SDMMC2 +#define SDMMC_IRQHandler SDMMC2_IRQHandler #define SDMMC_CLK_ENABLE() __HAL_RCC_SDMMC2_CLK_ENABLE() #define SDMMC_CLK_DISABLE() __HAL_RCC_SDMMC2_CLK_DISABLE() #define SDMMC_IRQn SDMMC2_IRQn #define SDMMC_DMA dma_SDMMC_2 #else #define SDIO SDMMC1 +#define SDMMC_IRQHandler SDMMC1_IRQHandler #define SDMMC_CLK_ENABLE() __HAL_RCC_SDMMC1_CLK_ENABLE() #define SDMMC_CLK_DISABLE() __HAL_RCC_SDMMC1_CLK_DISABLE() #define SDMMC_IRQn SDMMC1_IRQn #define SDIO_HARDWARE_FLOW_CONTROL_ENABLE SDMMC_HARDWARE_FLOW_CONTROL_ENABLE #if defined(STM32H7) -#define GPIO_AF12_SDIO GPIO_AF12_SDIO1 -#define SDIO_IRQHandler SDMMC1_IRQHandler #define SDIO_TRANSFER_CLK_DIV SDMMC_NSpeed_CLK_DIV #define SDIO_USE_GPDMA 0 #else #define SDMMC_CLK_ENABLE() __SDIO_CLK_ENABLE() #define SDMMC_CLK_DISABLE() __SDIO_CLK_DISABLE() #define SDMMC_IRQn SDIO_IRQn +#define SDMMC_IRQHandler SDIO_IRQHandler #define SDMMC_DMA dma_SDIO_0 #define SDIO_USE_GPDMA 1 #define STATIC_AF_SDMMC_CK STATIC_AF_SDIO_CK @@ -398,21 +399,11 @@ STATIC void sdmmc_irq_handler(void) { } } -#if !defined(MICROPY_HW_SDMMC2_CK) -void SDIO_IRQHandler(void) { - IRQ_ENTER(SDIO_IRQn); +void SDMMC_IRQHandler(void) { + IRQ_ENTER(SDMMC_IRQn); sdmmc_irq_handler(); - IRQ_EXIT(SDIO_IRQn); + IRQ_EXIT(SDMMC_IRQn); } -#endif - -#if defined(STM32F7) -void SDMMC2_IRQHandler(void) { - IRQ_ENTER(SDMMC2_IRQn); - sdmmc_irq_handler(); - IRQ_EXIT(SDMMC2_IRQn); -} -#endif STATIC void sdcard_reset_periph(void) { // Fully reset the SDMMC peripheral before calling HAL SD DMA functions.
sudo for macOS to cp xmake
@@ -26,7 +26,7 @@ script: - echo 'require("luacov")' > tmp - cat xmake/core/_xmake_main.lua >> tmp - mv tmp xmake/core/_xmake_main.lua - - cp core/build/xmake $(which xmake) + - cp core/build/xmake $(which xmake) || sudo cp core/build/xmake $(which xmake) - tests/tests after_success:
Work CD-CI Fix prepare release task.
@@ -94,17 +94,9 @@ jobs: git checkout develop - cd source - # prepare release and capture output $release = nbgv prepare-release - # get commit message for the merge - $commitMessage = git log -1 --pretty=%B - - # amend commit message to skip build - git commit --amend -m "$commitMessage" -m "***NO_CI***" > $null - # push all changes to github git -c http.extraheader="AUTHORIZATION: $auth" push --all origin
add ctime_r() and re-implement ctime()
* 2021-02-05 Meco Man add timegm() * 2021-02-07 Meco Man fixed gettimeofday() * 2021-02-08 Meco Man add settimeofday() stime() + * 2021-02-10 Meco Man add ctime_r() and re-implement ctime() */ #include <sys/time.h> @@ -149,9 +150,15 @@ char* asctime(const struct tm *timeptr) return asctime_r(timeptr, buf); } -char* ctime(const time_t *timep) +char *ctime_r (const time_t * tim_p, char * result) { - return asctime(localtime(timep)); + struct tm tm; + return asctime_r (localtime_r (tim_p, &tm), result); +} + +char* ctime(const time_t *tim_p) +{ + return asctime (localtime (tim_p)); } /**
Update the Mynewt slack join URL
@@ -109,7 +109,7 @@ want to talk to a human about what you're working on, you can contact us via the [developers mailing list](mailto:[email protected]). Although not a formal channel, you can also find a number of core developers -on the #mynewt channel on Freenode IRC or #general channel on [Mynewt Slack](https://join.slack.com/mynewt/shared_invite/MTkwMTg1ODM1NTg5LTE0OTYxNzQ4NzQtZTU1YmNhYjhkMg) +on the #mynewt channel on Freenode IRC or #general channel on [Mynewt Slack](https://mynewt.slack.com/join/shared_invite/enQtNjA1MTg0NzgyNzg3LTcyMmZiOGQzOGMxM2U4ODFmMTIwNjNmYTE5Y2UwYjQwZWIxNTE0MTUzY2JmMTEzOWFjYWZkNGM0YmM4MzAxNWQ) Also, be sure to checkout the [Frequently Asked Questions](https://mynewt.apache.org/faq/answers) for some help troubleshooting first.
Don't try to force TERM to a fixed value in tab-completion test. Right at the moment, this is making things worse not better in the buildfarm. I'm not happy with anything about the current state, but let's at least try to have a green buildfarm report while further investigation continues. Discussion:
@@ -41,9 +41,6 @@ $ENV{PSQL_HISTORY} = $historyfile; # Debug investigation note "TERM is set to '" . ($ENV{TERM} || "<undef>") . "'"; -# Ensure that readline/libedit puts out xterm escapes, not something else. -$ENV{TERM} = 'xterm'; - # regexp to match one xterm escape sequence (CSI style only, for now) my $escseq = "(\e\\[[0-9;]*[A-Za-z])";
Support demangling lambdas with auto params.
@@ -144,6 +144,9 @@ void display(__node* x, int indent = 0) #endif +__node __t_lambda_node; +__node* __t_lambda = &__t_lambda_node; + class __vtable : public __node { @@ -6923,7 +6926,14 @@ __demangle_tree::__parse_unnamed_type_name(const char* first, const char* last) } else { + // Demangle all T_ within lambda type signature as "auto" + // because "a <template-param> in a <lambda-sig> can only + // ever refer to a template parameter of a generic lambda". + __node** prev_t_begin = __t_begin_; + __t_begin_ = &__t_lambda; first = __parse_bare_function_type(first, last); + __t_begin_ = prev_t_begin; + if (first[0] == 'E') ++first; else @@ -8188,7 +8198,17 @@ __demangle_tree::__parse_template_param(const char* first, const char* last) { if (*first == 'T') { - if (first[1] == '_') + if (__t_begin_ == &__t_lambda) + { + const char* t = first + 1; + while (t != last && isdigit(*t)) + ++t; + if (t == last || *t != '_') + return first; + if (__make<__auto>()) + first = t + 1; + } + else if (first[1] == '_') { if (__t_begin_ != __t_end_) {
Fix segfault when running foomatic-rip by hand and env PRINTER is missing
@@ -94,7 +94,6 @@ void init_cups(list_t *arglist, dstr_t *filelist, jobparams_t *job) CUPS puts the print queue name into the PRINTER environment variable when calling filters. */ strncpy(job->printer, getenv("PRINTER"), 256); - if (strlen(getenv("PRINTER")) > 255) job->printer[255] = '\0'; free(cups_options);
ip: fix the punt redirect for ip4 Type: fix
@@ -334,10 +334,10 @@ ip4_punt_redirect_cmd (vlib_main_t * vm, vlib_cli_command_t * cmd) { unformat_input_t _line_input, *line_input = &_line_input; - fib_route_path_t *rpaths = NULL, rpath; - dpo_proto_t payload_proto; + ip46_address_t nh = { 0 }; clib_error_t *error = 0; u32 rx_sw_if_index = ~0; + u32 tx_sw_if_index = ~0; vnet_main_t *vnm; u8 is_add; @@ -358,9 +358,13 @@ ip4_punt_redirect_cmd (vlib_main_t * vm, else if (unformat (line_input, "rx %U", unformat_vnet_sw_interface, vnm, &rx_sw_if_index)) ; + else if (unformat (line_input, "via %U %U", + unformat_ip4_address, unformat_vnet_sw_interface, + &nh.ip4, vnm, &tx_sw_if_index)) + ; else if (unformat (line_input, "via %U", - unformat_fib_route_path, &rpath, &payload_proto)) - vec_add1 (rpaths, rpath); + unformat_vnet_sw_interface, vnm, &tx_sw_if_index)) + ; else { error = unformat_parse_error (line_input); @@ -376,8 +380,7 @@ ip4_punt_redirect_cmd (vlib_main_t * vm, if (is_add) { - if (vec_len (rpaths)) - ip4_punt_redirect_add_paths (rx_sw_if_index, rpaths); + ip4_punt_redirect_add (rx_sw_if_index, tx_sw_if_index, &nh); } else {
better description. - harden-algo-downgrade: no also makes unbound more lenient about digest algorithms in DS records.
10 April 2017: Wouter - - harden algo downgrade also makes unbound more lenient about digest - algorithms in DS records. + - harden-algo-downgrade: no also makes unbound more lenient about + digest algorithms in DS records. 10 April 2017: Ralph - Remove ECS option after REFUSED answer.
provisioning/warewulf-vnfs: include correct firmware patch
---- a/etc/bootstrap.conf 2018-07-12 10:18:54.000000000 -0700 -+++ b/etc/bootstrap.conf 2018-07-12 10:33:29.000000000 -0700 -@@ -32,6 +32,10 @@ - modprobe += xhci-hcd, sl811-hcd, sd_mod - # modprobe += ib_ipoib +--- a/etc/bootstrap.conf.orig 2020-04-01 15:40:31.760000000 +0000 ++++ b/etc/bootstrap.conf 2020-04-01 15:40:56.070000000 +0000 +@@ -25,6 +25,7 @@ + # What Firmware images should be included in the bootstrap image. + firmware += 3com/*, acenic/*, bnx2*, cxgb3/*, e100/*, myricom/*, ti_* + firmware += tigon/* ++firmware += qed/* -+# support USB and SoftIron network devices -+modprobe += xhci_pci, usbcore, libphy, mii, usbnet, asix -+modprobe += amd-xgbe -+ - #OpenHPC additions for SLES12 - drivers += af_packet, dns_resolver, auth_rpcgss, lockd, sunrpc - drivers += dm_mod, dcdbas, dell_rbu, ipmi_msghandler, shpchp, ehci_pci + # Modules that should be called directly by /sbin/modprobe (including + # options if any exist). Modules will be loaded in the order given, before
Convert .align to .p2align for OSX compatibility
@@ -99,7 +99,7 @@ static void dscal_kernel_inc_8(BLASLONG n, FLOAT *alpha, FLOAT *x, BLASLONG inc_ "leaq (%1,%4,4), %2 \n\t" - ".align 16 \n\t" + ".p2align 4 \n\t" "1: \n\t" "movsd (%1) , %%xmm4 \n\t"
fix cv with cat_features param
@@ -2934,6 +2934,12 @@ def cv(pool=None, params=None, dtrain=None, iterations=None, num_boost_round=Non else: assert nfold is None or nfold == fold_count + if 'cat_features' in params: + if set(pool.get_cat_feature_indices()) != set(params['cat_features']): + raise CatboostError("categorical features in params are different from ones in pool " + str(params['cat_features']) + + " vs " + str(pool.get_cat_feature_indices())) + del params['cat_features'] + with log_fixup(), plot_wrapper(plot, params): return _cv(params, pool, fold_count, inverted, partition_random_seed, shuffle, stratified, as_pandas, max_time_spent_on_fixed_cost_ratio, dev_max_iterations_batch_size)
Free memory in test
@@ -193,6 +193,8 @@ static void test_bytestring_add_chunk(void **state) { ((struct cbor_indefinite_string_data *)bytestring->data) ->chunk_capacity, 0); + cbor_decref(&chunk); + cbor_decref(&bytestring); }, 5, MALLOC, MALLOC, MALLOC, MALLOC, REALLOC_FAIL); }
Corrected offsets in OS Configuration Structure.
<member name="robin_timeout" type="uint32_t" offset="8" info="Round Robim timeout tick"/> <member name="isr_queue_data" type="uint32_t" offset="12" info="ISR post processing queue (value is void **)"/> - <member name="isr_queue_max" type="uint16_t" offset="14" info="Maximum data"/> - <member name="isr_queue_padding" type="uint16_t" offset="16" info="Padding bytes"/> + <member name="isr_queue_max" type="uint16_t" offset="16" info="Maximum data"/> + <member name="isr_queue_padding" type="uint16_t" offset="18" info="Padding bytes"/> <member name="mem_stack_addr" type="uint32_t" offset="20" info="Stack memory address"/> <member name="mem_stack_size" type="uint32_t" offset="24" info="Stack memory size"/>
[core] reload c after chunkqueue_compact_mem() reload c = cq->first after calling chunkqueue_compact_mem()
@@ -616,8 +616,10 @@ static int connection_handle_read_state(connection * const con) { if (NULL == c) continue; clen = buffer_string_length(c->mem) - c->offset; if (0 == clen) continue; - if (c->offset > USHRT_MAX) /*(highly unlikely)*/ + if (c->offset > USHRT_MAX) { /*(highly unlikely)*/ chunkqueue_compact_mem(cq, clen); + c = cq->first; /*(reload c after chunkqueue_compact_mem())*/ + } hoff[0] = 1; /* number of lines */ hoff[1] = (unsigned short)c->offset; /* base offset for all lines */
Parameter: Update process noise model
@@ -59,12 +59,12 @@ STRUCT_CONFIG_SECTION(SurviveKalmanTracker) STRUCT_CONFIG_ITEM("lightcap-rampin-length", "Number of lightcap measures to ramp in variance", 5000, t->light_rampin_length) - STRUCT_CONFIG_ITEM("process-weight-acc", "Acc variance per second", 1e-1, t->params.process_weight_acc) - STRUCT_CONFIG_ITEM("process-weight-ang-vel", "Angular velocity variance per second", 1e-2, + STRUCT_CONFIG_ITEM("process-weight-acc", "Acc variance per second", 97, t->params.process_weight_acc) + STRUCT_CONFIG_ITEM("process-weight-ang-vel", "Angular velocity variance per second", 60, t->params.process_weight_ang_velocity) - STRUCT_CONFIG_ITEM("process-weight-vel", "Velocity variance per second", 1e-2, t->params.process_weight_vel) - STRUCT_CONFIG_ITEM("process-weight-pos", "Position variance per second", 0, t->params.process_weight_pos) - STRUCT_CONFIG_ITEM("process-weight-rot", "Rotation variance per second", 0, t->params.process_weight_rotation) + STRUCT_CONFIG_ITEM("process-weight-vel", "Velocity variance per second", 4.34302956e-05, t->params.process_weight_vel) + STRUCT_CONFIG_ITEM("process-weight-pos", "Position variance per second", 1.19271301e-03, t->params.process_weight_pos) + STRUCT_CONFIG_ITEM("process-weight-rot", "Rotation variance per second", 8.10001976e-06, t->params.process_weight_rotation) STRUCT_CONFIG_ITEM("process-weight-acc-bias", "Acc bias variance per second", 0, t->params.process_weight_acc_bias) STRUCT_CONFIG_ITEM("process-weight-gyro-bias", "Gyro bias variance per seconid", 0, t->params.process_weight_gyro_bias) STRUCT_CONFIG_ITEM("kalman-minimize-state-space", "Minimize the state space", 1, t->minimize_state_space) @@ -76,7 +76,7 @@ STRUCT_CONFIG_SECTION(SurviveKalmanTracker) STRUCT_CONFIG_ITEM("kalman-zvu-stationary", "", 1e-2, t->zvu_stationary_var) STRUCT_CONFIG_ITEM("kalman-zvu-no-light", "", 1e-4, t->zvu_no_light_var) - STRUCT_CONFIG_ITEM("kalman-noise-model", "0 is jerk acceleration model, 1 is simple model", 0, t->noise_model) + STRUCT_CONFIG_ITEM("kalman-noise-model", "0 is jerk acceleration model, 1 is simple model", 1, t->noise_model) STRUCT_CONFIG_ITEM("imu-acc-norm-penalty", "", 0, t->acc_norm_penalty) STRUCT_CONFIG_ITEM("imu-acc-variance", "Variance of accelerometer", 1e-3, t->acc_var)
fskmodem/example: cleaning up memory like a good developer
@@ -112,6 +112,10 @@ int main(int argc, char*argv[]) spgramcf_write(periodogram, buf_rx, k); } + // destroy modulator/demodulator pair + fskmod_destroy(mod); + fskdem_destroy(dem); + printf("symbol errors: %u / %u\n", num_symbol_errors, num_symbols); // compute power spectral density of received signal
test(ethereum): Fix an ambiguous judgment
@@ -264,7 +264,7 @@ START_TEST(test_001CreateWallet_0009CreateWalletWithInternalGeneration) rtnVal = BoatWalletCreate(BOAT_PROTOCOL_ETHEREUM, NULL, &wallet, sizeof(BoatEthWalletConfig)); /* 2. verify test result */ /* 2-1. verify the return value */ - ck_assert_int_eq(rtnVal, BOAT_SUCCESS); + ck_assert_int_eq(rtnVal, 0); /* 2-2. verify the global variables that be affected */ ck_assert(g_boat_iot_sdk_context.wallet_list[0].is_used == true);
sdl/surface: fix sdl.Surface.Set() on PIXELFORMAT_ARGB8888 surface displaying wrong color
@@ -569,9 +569,9 @@ func (surface *Surface) Set(x, y int, c color.Color) { switch surface.Format.Format { case PIXELFORMAT_ARGB8888: col := surface.ColorModel().Convert(c).(color.RGBA) - pix[i+0] = col.R + pix[i+0] = col.B pix[i+1] = col.G - pix[i+2] = col.B + pix[i+2] = col.R pix[i+3] = col.A case PIXELFORMAT_ABGR8888: col := surface.ColorModel().Convert(c).(color.RGBA)
ip: indent format typo fix Type: style
@@ -245,14 +245,12 @@ ip6_link_delegate_flush (ip6_link_t * il) { ip6_link_delegate_t *ild; - /* *INDET-OFF* */ - FOREACH_IP6_LINK_DELEGATE (ild, il, ( - { - il_delegate_vfts[ild-> - ild_type].ildv_disable - (ild->ild_index); + /* *INDENT-OFF* */ + FOREACH_IP6_LINK_DELEGATE (ild, il, + ({ + il_delegate_vfts[ild->ild_type].ildv_disable(ild->ild_index); })); - /* *INDET-ON* */ + /* *INDENT-ON* */ vec_free (il->il_delegates); il->il_delegates = NULL;
use relaxed load for region count as that is monotonic
@@ -240,7 +240,7 @@ static bool mi_region_is_suitable(const mem_region_t* region, int numa_node, boo static bool mi_region_try_claim(int numa_node, size_t blocks, bool allow_large, mem_region_t** region, mi_bitmap_index_t* bit_idx, mi_os_tld_t* tld) { // try all regions for a free slot - const size_t count = mi_atomic_load_acquire(&regions_count); + const size_t count = mi_atomic_load_relaxed(&regions_count); // monotonic, so ok to be relaxed size_t idx = tld->region_idx; // Or start at 0 to reuse low addresses? Starting at 0 seems to increase latency though for (size_t visited = 0; visited < count; visited++, idx++) { if (idx >= count) idx = 0; // wrap around
admin/clustershell: update to v1.8.2
%define pname clustershell Name: clustershell%{PROJ_DELIM} -Version: 1.8.1 +Version: 1.8.2 Release: 1%{?dist} Summary: Python framework for efficient cluster administration
Update py_import_test. Note: mandatory check (NEED_CHECK) was skipped
from __future__ import print_function -import itertools import os import re import sys @@ -24,6 +23,7 @@ def check_imports(no_check=(), extra=(), skip_func=None): exceptions = list(no_check) for key, _ in __res.iter_keys(b'py/no_check_imports/'): exceptions += str_(__res.find(key)).split() + if exceptions: exceptions.sort() print('NO_CHECK_IMPORTS', ' '.join(exceptions))
py/dynruntime: Implement uint new/get, mp_obj_len and mp_obj_subscr.
@@ -97,6 +97,7 @@ static inline void *m_realloc_dyn(void *ptr, size_t new_num_bytes) { #define mp_obj_new_bool(b) ((b) ? (mp_obj_t)mp_fun_table.const_true : (mp_obj_t)mp_fun_table.const_false) #define mp_obj_new_int(i) (mp_fun_table.native_to_obj(i, MP_NATIVE_TYPE_INT)) +#define mp_obj_new_int_from_uint(i) (mp_fun_table.native_to_obj(i, MP_NATIVE_TYPE_UINT)) #define mp_obj_new_str(data, len) (mp_fun_table.obj_new_str((data), (len))) #define mp_obj_new_str_of_type(t, d, l) (mp_obj_new_str_of_type_dyn((t), (d), (l))) #define mp_obj_new_bytes(data, len) (mp_fun_table.obj_new_bytes((data), (len))) @@ -106,11 +107,14 @@ static inline void *m_realloc_dyn(void *ptr, size_t new_num_bytes) { #define mp_obj_get_type(o) (mp_fun_table.obj_get_type((o))) #define mp_obj_get_int(o) (mp_fun_table.native_from_obj(o, MP_NATIVE_TYPE_INT)) +#define mp_obj_get_int_truncated(o) (mp_fun_table.native_from_obj(o, MP_NATIVE_TYPE_UINT)) #define mp_obj_str_get_str(s) ((void*)mp_fun_table.native_from_obj(s, MP_NATIVE_TYPE_PTR)) #define mp_obj_str_get_data(o, len) (mp_obj_str_get_data_dyn((o), (len))) #define mp_get_buffer_raise(o, bufinfo, fl) (mp_fun_table.get_buffer_raise((o), (bufinfo), (fl))) #define mp_get_stream_raise(s, flags) (mp_fun_table.get_stream_raise((s), (flags))) +#define mp_obj_len(o) (mp_obj_len_dyn(o)) +#define mp_obj_subscr(base, index, val) (mp_fun_table.obj_subscr((base), (index), (val))) #define mp_obj_list_append(list, item) (mp_fun_table.list_append((list), (item))) static inline mp_obj_t mp_obj_new_str_of_type_dyn(const mp_obj_type_t *type, const byte* data, size_t len) { @@ -128,6 +132,11 @@ static inline void *mp_obj_str_get_data_dyn(mp_obj_t o, size_t *l) { return bufinfo.buf; } +static inline mp_obj_t mp_obj_len_dyn(mp_obj_t o) { + // If bytes implemented MP_UNARY_OP_LEN could use: mp_unary_op(MP_UNARY_OP_LEN, o) + return mp_fun_table.call_function_n_kw(mp_fun_table.load_name(MP_QSTR_len), 1, &o); +} + /******************************************************************************/ // General runtime functions
Reorder Logger() constructor args Currently the order varies across the struct and constructor. As a nice reminder GCC will throw a lovely warning at us. Fix the ordering.
@@ -150,9 +150,9 @@ void logging(){ } Logger::Logger(overlay_params* in_params) - : m_logging_on(false), - m_values_valid(false), - m_params(in_params) + : m_params(in_params), + m_logging_on(false), + m_values_valid(false) { m_log_end = Clock::now() - 15s; SPDLOG_DEBUG("Logger constructed!");
api: set the UNMAP fields when needed
@@ -211,6 +211,7 @@ void tcmu_zero_iovec(struct iovec *iovec, size_t iov_cnt) iov_cnt--; } } + /* * Copy data into an iovec, and consume the space in the iovec. * @@ -621,6 +622,7 @@ finish_page83: /* Optimal xfer length */ memcpy(&data[12], &val32, 4); + if (rhandler->unmap) { /* MAXIMUM UNMAP LBA COUNT */ val32 = htobe32(max_xfer_length); memcpy(&data[20], &val32, 4); @@ -632,6 +634,7 @@ finish_page83: /* OPTIMAL UNMAP GRANULARITY */ val32 = htobe32(max_xfer_length); memcpy(&data[28], &val32, 4); + } /* MAXIMUM WRITE SAME LENGTH */ val64 = htobe64(VPD_MAX_WRITE_SAME_LENGTH);
dev-tools/scipy: enable multi-build macros
@@ -41,6 +41,12 @@ Requires: openblas-%{compiler_family}%{PROJ_DELIM} %define pname scipy %define PNAME %(echo %{pname} | tr [a-z] [A-Z]) +%if 0%{?sles_version} || 0%{?suse_version} +%define python_module() python-%{**} python3-%{**} +%else +%define python_module() python-%{**} python34-%{**} +%endif + Name: python-%{pname}-%{compiler_family}-%{mpi_family}%{PROJ_DELIM} Version: 1.0.0 Release: 1%{?dist} @@ -54,13 +60,14 @@ Source1: OHPC_macros BuildRequires: fdupes %endif BuildRequires: fftw-%{compiler_family}-%{mpi_family}%{PROJ_DELIM} -BuildRequires: python-devel -BuildRequires: python-setuptools -BuildRequires: python-Cython%{PROJ_DELIM} -BuildRequires: python-numpy-%{compiler_family}%{PROJ_DELIM} +BuildRequires: %{python_module devel} +BuildRequires: %{python_module setuptools} +BuildRequires: %{python_module Cython}%{PROJ_DELIM} +BuildRequires: %{python_module numpy}-%{compiler_family}%{PROJ_DELIM} BuildRequires: swig Requires: lmod%{PROJ_DELIM} >= 7.6.1 -Requires: python-numpy-%{compiler_family}%{PROJ_DELIM} +Requires: %{python_module numpy}-%{compiler_family}%{PROJ_DELIM} +%python_subpackages # Default library install path %define install_path %{OHPC_LIBS}/%{compiler_family}/%{mpi_family}/%{pname}/%version @@ -113,9 +120,9 @@ EOF CFLAGS="%{optflags} -fno-strict-aliasing" \ %if "%{compiler_family}" == "intel" LDSHARED="icc -shared" \ -python setup.py config --compiler=intelm --fcompiler=intelem build_clib --compiler=intelem --fcompiler=intelem build_ext --compiler=intelem --fcompiler=intelem build +%python_exec setup.py config --compiler=intelm --fcompiler=intelem build_clib --compiler=intelem --fcompiler=intelem build_ext --compiler=intelem --fcompiler=intelem build %else -python setup.py config_fc --fcompiler=gnu95 --noarch build +%python_exec setup.py config_fc --fcompiler=gnu95 --noarch build %endif %install @@ -127,7 +134,7 @@ module load openblas %endif module load numpy -python setup.py install --prefix=%{install_path} --root=%{buildroot} +%python_exec setup.py install --prefix=%{install_path} --root=%{buildroot} find %{buildroot}%{install_path}/lib64/python2.7/site-packages/scipy -type d -name tests | xargs rm -rf # Don't ship tests # Don't ship weave examples, they're marked as documentation: find %{buildroot}%{install_path}/lib64/python2.7/site-packages/scipy/weave -type d -name examples | xargs rm -rf @@ -179,7 +186,7 @@ EOF %{__mkdir_p} ${RPM_BUILD_ROOT}/%{_docdir} -%files +%files %{python_Files} %defattr(-,root,root,-) %{OHPC_PUB} %doc THANKS.txt
Add test case for comment inside hex string. Also move all comments-related tests to the test_comments function.
@@ -626,26 +626,6 @@ static void test_hex_strings() condition: $a }", "1234567890"); - assert_true_rule( - "rule test { \ - strings: $a = { 31 32 [-] // Inline comment\n\r \ - 38 39 } \ - condition: $a }", - "1234567890"); - - assert_true_rule( - "rule test { \ - strings: $a = { 31 32 /* Inline comment */ [-] 38 39 } \ - condition: $a }", - "1234567890"); - - assert_true_rule( - "rule test { \ - strings: $a = { 31 32 /* Inline multi-line\n\r \ - comment */ [-] 38 39 } \ - condition: $a }", - "1234567890"); - assert_true_rule( "rule test { \ strings: $a = {\n 31 32 [-] 38 39 \n\r} \ @@ -1477,6 +1457,32 @@ static void test_comments() true\n\ }", NULL); + + assert_true_rule( + "rule test { \ + strings: $a = { 31 32 [-] // Inline comment\n\r \ + 38 39 } \ + condition: $a }", + "1234567890"); + + assert_true_rule( + "rule test { \ + strings: $a = { 31 32 /* Inline comment */ [-] 38 39 } \ + condition: $a }", + "1234567890"); + + assert_true_rule( + "rule test { \ + strings: $a = { 31 32 /* Inline multi-line\n\r \ + comment */ [-] 38 39 } \ + condition: $a }", + "1234567890"); + + assert_true_rule( + "rule test { \ + strings: $a = { 31 /* A */ 32 /*B*/ 33 34 35 36 /* C */} \ + condition: $a }", + "1234567890"); } static void test_matches_operator()
use static_cast in error test
@@ -571,7 +571,7 @@ MultiPartInputFile::Data::chunkOffsetReconstruction(OPENEXR_IMF_INTERNAL_NAMESPA - if(partNumber<0 || partNumber>int(parts.size())) + if(partNumber<0 || partNumber> static_cast<int>(parts.size())) { throw IEX_NAMESPACE::IoExc("part number out of range"); }
Fix segfault from unsafe NULL pointer
@@ -247,6 +247,7 @@ const char* findMatchedDelim(Code* code, const char* current) code->syntax[current - start] == SyntaxTypeString) continue; if(*current == seeking) return current; if(*current == initial) current = findMatchedDelim(code, current); + if(!current) break; } return NULL;
add patients dataset generation
@@ -103,6 +103,27 @@ def generate_concatenated_random_labeled_dataset(nrows, nvals, labels, seed=2018 feature = prng.random_sample([nrows, nvals]) return np.concatenate([label, feature], axis=1) +def generate_patients_datasets(train_path, test_path): + samples = 237 + + for samples, path in zip([237, 154], [train_path, test_path]): + data = DataFrame() + data['age'] = np.random.randint(20, 71, size=samples) + data['gender'] = np.where(np.random.binomial(1, 0.7, samples) == 1, 'male', 'female') + data['diet'] = np.where(np.random.binomial(1, 0.1, samples) == 1, 'yes', 'no') + data['glucose'] = np.random.uniform(4, 12, size=samples) + data['platelets'] = np.random.randint(100, 500, size=samples) + data['cholesterol'] = np.random.uniform(4.5, 6.5, size=samples) + data['survival_in_days'] = np.random.randint(30, 500, size=samples) + data['outcome'] = np.where(np.random.binomial(1, 0.8, size=samples) == 1, 'dead', 'alive') + data['target'] = np.where(data['outcome'] == 'dead', data['survival_in_days'], - data['survival_in_days']) + data = data.drop(['outcome', 'survival_in_days'], axis=1) + data.to_csv( + path, + header=False, + index=False, + sep='\t' + ) # returns (features : numpy.ndarray, labels : list) tuple def generate_random_labeled_dataset(
Add command to configure model in userland driver
@@ -31,6 +31,7 @@ int ltc294x_configure(ltc294x_model_e model, interrupt_pin_conf_e int_pin, uint16_t prescaler, vbat_alert_adc_mode_e vbat) { + int rc; uint8_t M = 0; if (model == LTC2941 || model == LTC2942) { // ltc2941/2 expects log_2 of prescaler value @@ -52,6 +53,10 @@ int ltc294x_configure(ltc294x_model_e model, default: M = 4; break; } } + + rc = command(DRIVER_NUM_LTC294X, 10, model); + if (rc != TOCK_SUCCESS) return rc; + uint8_t cmd = (int_pin & 0x03) | ((M & 0x07) << 2) | ((vbat & 0x03) << 5); return command(DRIVER_NUM_LTC294X, 2, cmd); }
fix rule for $(OUT)/run_from_file_gpu_indirect
@@ -75,7 +75,7 @@ $(OUT)/run_from_file_direct: test/run_from_file.c $(OUT)/libscsdir.a $(OUT)/run_from_file_indirect: test/run_from_file.c $(OUT)/libscsindir.a $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) -$(OUT)/run_from_file_gpu: test/run_from_file.c $(OUT)/libscsgpuindir.a +$(OUT)/run_from_file_gpu_indirect: test/run_from_file.c $(OUT)/libscsgpuindir.a $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) $(CULDFLAGS) # basic testing
tree data UPDATE allow insert top-level ext siblings
@@ -759,7 +759,6 @@ lyd_insert_sibling(struct lyd_node *sibling, struct lyd_node *node, struct lyd_n struct lyd_node *iter; LY_CHECK_ARG_RET(NULL, node, LY_EINVAL); - LY_CHECK_CTX_EQUAL_RET(sibling ? LYD_CTX(sibling) : NULL, LYD_CTX(node), LY_EINVAL); if (sibling) { LY_CHECK_RET(lyd_insert_check_schema(NULL, sibling->schema, node->schema));
Updated Travis token. See advisory:
@@ -9,7 +9,7 @@ notifications: on_success: change on_failure: always env: - - secure: "0kkcYHaW88jKB9/nvZKJIsmDv4KALyy4DrcWs3wncRa4rAp0Wk4Ge/2o5GgubcowswiX9EZFawVRoM3h8nOf+awPu/gO6OkZx1LhYwQa/5lo7/LYq5Bzf2NNM+1xmNSgRKP+84/gi8pm/Ytv81+4hDmlN2APSMMq9u6UMzQCkVM=" + - secure: "BbB1KVY0Yb6DJwxdfFDF1PJwSx9euNfNX94oDKftiH8LE0nEzfS6xZc2sBkWTWOThHml9ttBkDIx/NhxEThOjyVcX6uv4kibP6moV5EqxqC+kLoZSEZnVuAdTJfGRKBdzmRp66R5a/GiMzzz/F3+smdVFMb6XR06sPQa5TQZjEc=" before_install: - sudo apt-add-repository -y ppa:libreoffice/libreoffice-4-2 - sudo apt-get update -q
VERSION bump to version 0.12.23
@@ -32,7 +32,7 @@ set(CMAKE_C_FLAGS_DEBUG "-g -O0") # set version set(LIBNETCONF2_MAJOR_VERSION 0) set(LIBNETCONF2_MINOR_VERSION 12) -set(LIBNETCONF2_MICRO_VERSION 22) +set(LIBNETCONF2_MICRO_VERSION 23) set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION}.${LIBNETCONF2_MICRO_VERSION}) set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION})
[FRDMKL27 IAR]Fix IAR project linker file issue Shall use relative path, not absolute path for linker file.
</option> <option> <name>IlinkIcfFile</name> - <state>D:\code\alios-things\platform\mcu\mkl27z644\iar\MKL27Z64xxx4_flash.icf</state> + <state>$PROJ_DIR$\..\..\..\platform\mcu\mkl27z644\iar\MKL27Z64xxx4_flash.icf</state> </option> <option> <name>IlinkIcfFileSlave</name> </option> <option> <name>IlinkIcfFile</name> - <state>D:\code\alios-things\platform\mcu\mkl27z644\iar\MKL27Z64xxx4_flash.icf</state> + <state>$PROJ_DIR$\..\..\..\platform\mcu\mkl27z644\iar\MKL27Z64xxx4_flash.icf</state> </option> <option> <name>IlinkIcfFileSlave</name>
Updated wait command to use update fn
@@ -154,6 +154,14 @@ UBYTE ScriptUpdate_AwaitFade() { return !IsFading(); } UBYTE ScriptUpdate_AwaitUIClosed() { return UIIsClosed(); } +UBYTE ScriptUpdate_Wait() { + if (wait_time == 0) { + return TRUE; + } + wait_time--; + return FALSE; +} + /* * Command: Noop * ---------------------------- @@ -373,7 +381,7 @@ void Script_CameraLock_b() { void Script_Wait_b() { wait_time = script_cmd_args[0]; script_ptr += 1 + script_cmd_args_len; - script_action_complete = FALSE; + script_update_fn = ScriptUpdate_Wait; } /*
os/os_trace: remove os_trace_task_* defs
@@ -45,36 +45,6 @@ os_trace_isr_exit(void) { } -static inline void -os_trace_task_info(const struct os_task *t) -{ -} - -static inline void -os_trace_task_create(const struct os_task *t) -{ -} - -static inline void -os_trace_task_start_exec(const struct os_task *t) -{ -} - -static inline void -os_trace_task_stop_exec(void) -{ -} - -static inline void -os_trace_task_start_ready(const struct os_task *t) -{ -} - -static inline void -os_trace_task_stop_ready(const struct os_task *t, unsigned reason) -{ -} - static inline void os_trace_idle(void) {
regex: fix counter of matching captures
@@ -128,7 +128,7 @@ ssize_t flb_regex_do(struct flb_regex *r, unsigned char *str, size_t slen, result->region = region; result->str = str; - return region->num_regs; + return (region->num_regs - 1); } int flb_regex_parse(struct flb_regex *r, struct flb_regex_search *result,
sctp: sctp_output.c failed to compile when VLIB_BUFFER_TRACE_TRAJECTORY is enabled Fixed a typo in sctp_push_header(). It was inherited from tcp_output.c
@@ -1381,7 +1381,7 @@ sctp_push_header (transport_connection_t * trans_conn, vlib_buffer_t * b) sctp_push_hdr_i (sctp_conn, b, SCTP_STATE_ESTABLISHED); - sctp_trajectory_add_start (b0, 3); + sctp_trajectory_add_start (b, 3); return 0; }
chip/npcx/espi.c: Format with clang-format BRANCH=none TEST=none
@@ -164,8 +164,7 @@ static void espi_vw_config_in(const struct vwevms_config_t *config) if (index == config->idx) { /* Get Wire field */ val = NPCX_VWEVMS(i) & 0x0F; - val |= VWEVMS_FIELD(config->idx, - config->idx_en, + val |= VWEVMS_FIELD(config->idx, config->idx_en, config->pltrst_en, config->int_en, config->espirst_en); @@ -196,8 +195,7 @@ static void espi_vw_config_out(const struct vwevsm_config_t *config) if (index == config->idx) { /* Preserve WIRE(3-0) and HW_WIRE (27-24). */ val = NPCX_VWEVSM(i) & 0x0F00000F; - val |= VWEVSM_FIELD(config->idx, - config->idx_en, + val |= VWEVSM_FIELD(config->idx, config->idx_en, config->valid, config->pltrst_en, config->cdrst_en); @@ -625,9 +623,8 @@ static void espi_interrupt(void) */ if (boot_load_done == 0 && IS_PERIPHERAL_CHAN_ENABLE(NPCX_ESPI_CH_VW)) { - - espi_vw_set_wire( - VW_PERIPHERAL_BTLD_STATUS_DONE, 1); + espi_vw_set_wire(VW_PERIPHERAL_BTLD_STATUS_DONE, + 1); boot_load_done = 1; } } @@ -733,6 +730,5 @@ static int command_espi(int argc, char **argv) } return EC_SUCCESS; } -DECLARE_CONSOLE_COMMAND(espi, command_espi, - "cfg/vms/vsm/en/dis [channel]", +DECLARE_CONSOLE_COMMAND(espi, command_espi, "cfg/vms/vsm/en/dis [channel]", "eSPI configurations");
[core] config mallopt(M_ARENA_MAX, 2) Preemptively cap the max number of arenas that might be created by glibc Each thread attempts to use the previously-used arena. lighttpd is single-threaded, so in general, only one arena is used. x-ref: "Memory fragmentation with HTTP/2 enabled"
@@ -65,6 +65,15 @@ static const buffer default_server_tag = { CONST_STR_LEN(PACKAGE_DESC)+1, 0 }; # include <sys/prctl.h> #endif +#ifdef HAVE_MALLOC_H +#ifndef LIGHTTPD_STATIC +#ifdef HAVE_DLFCN_H +#include <dlfcn.h> +#endif +#endif +#include <malloc.h> +#endif + #include "sys-crypto.h" #if defined(USE_OPENSSL_CRYPTO) \ || defined(USE_MBEDTLS_CRYPTO) \ @@ -1978,6 +1987,18 @@ int main (int argc, char **argv) { } #endif + #if defined(HAVE_MALLOPT) && defined(M_ARENA_MAX) + #ifdef LIGHTTPD_STATIC + mallopt(M_ARENA_MAX, 2); /*(ignore error, if any)*/ + #else + { + int (*mallopt_fn)(int, int); + mallopt_fn = (int (*)(int, int))(intptr_t)dlsym(RTLD_DEFAULT,"mallopt"); + if (mallopt_fn) mallopt_fn(M_ARENA_MAX, 2); /*(ignore error, if any)*/ + } + #endif + #endif + /* for nice %b handling in strftime() */ setlocale(LC_TIME, "C"); tzset();
VERSION bump to version 0.7.43
@@ -38,7 +38,7 @@ set(CMAKE_C_FLAGS_DEBUG "-g -O0") # set version set(LIBNETCONF2_MAJOR_VERSION 0) set(LIBNETCONF2_MINOR_VERSION 7) -set(LIBNETCONF2_MICRO_VERSION 42) +set(LIBNETCONF2_MICRO_VERSION 43) set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION}.${LIBNETCONF2_MICRO_VERSION}) set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION})
Forcing UI SPI transfers to be synchronous
@@ -185,7 +185,11 @@ void InternalSendBytes(CLR_UINT8 *data, CLR_UINT32 length, bool sendAsync) SPI_WRITE_READ_SETTINGS wrc; wrc.Bits16ReadWrite = false; - wrc.callback = sendAsync ? spi_callback : 0; + + // setting this to 0 forces the transfer to be synchronous + // reverting to this for the time being until a definitive solution is found + wrc.callback = 0; + wrc.fullDuplex = false; wrc.readOffset = 0;
min dragging distance on slop
@@ -2369,7 +2369,8 @@ void drawwindow(const Arg *arg) { c = selmon->sel; - if (width > 50 && height > 50 && x > -40 && y > -40 && width < selmon->mw + 40 && height < selmon->mh + 40) { + if (width > 50 && height > 50 && x > -40 && y > -40 && width < selmon->mw + 40 && height < selmon->mh + 40 && + (abs(c->w - width) > 20 || abs(c->h - height) > 20 || abs(c->x - x) > 20 || abs(c->y - y) > 20)) { if ((m = recttomon(x, y, width, height)) != selmon) { sendmon(c, m); selmon = m;
Use a slightly clearer example of the `doc` fun in README
@@ -58,9 +58,9 @@ Documentation is also available locally in the REPL. Use the `(doc symbol-name)` macro to get API documentation for symbols in the core library. For example, ``` -(doc doc) +(doc apply) ``` -Shows documentation for the doc macro. +Shows documentation for the `apply` function. To get a list of all bindings in the default environment, use the `(all-bindings)` function. You
doc: explain how to use perf with skb programs explain how to use perf with skb programs
@@ -521,6 +521,8 @@ Return: 0 on success A method of a BPF_PERF_OUTPUT table, for submitting custom event data to user space. See the BPF_PERF_OUTPUT entry. (This ultimately calls bpf_perf_event_output().) +The ```ctx``` parameter is provided in [kprobes](#1-kprobes) or [kretprobes](#2-kretprobes). For ```SCHED_CLS``` or ```SOCKET_FILTER``` programs, the ```struct __sk_buff *skb``` must be used instead. + Examples in situ: [search /examples](https://github.com/iovisor/bcc/search?q=perf_submit+path%3Aexamples&type=Code), [search /tools](https://github.com/iovisor/bcc/search?q=perf_submit+path%3Atools&type=Code)
Add device model_numbers
@@ -282,7 +282,8 @@ struct key_value model_number_subtypes[] = { {"Knuckles Left", SURVIVE_OBJECT_SUBTYPE_KNUCKLES_L}, {"Knuckles EV3.0 Left", SURVIVE_OBJECT_SUBTYPE_KNUCKLES_L}, {"Utah MP", SURVIVE_OBJECT_SUBTYPE_INDEX_HMD}, {"Vive Controller MV", SURVIVE_OBJECT_SUBTYPE_WAND}, {"Vive. Controller MV", SURVIVE_OBJECT_SUBTYPE_WAND}, {"VIVE Tracker Pro MV", SURVIVE_OBJECT_SUBTYPE_TRACKER_GEN2}, - {"Vive Tracker MV", SURVIVE_OBJECT_SUBTYPE_TRACKER}, {"Vive MV", SURVIVE_OBJECT_SUBTYPE_VIVE_HMD}}; + {"Vive. Tracker MV", SURVIVE_OBJECT_SUBTYPE_TRACKER}, {"Vive Tracker MV", SURVIVE_OBJECT_SUBTYPE_TRACKER}, + {"Vive MV", SURVIVE_OBJECT_SUBTYPE_VIVE_HMD}, {"Vive. MV", SURVIVE_OBJECT_SUBTYPE_VIVE_HMD}}; static int process_jsontok(scratch_space_t *scratch, char *d, stack_entry_t *stack, jsmntok_t *t, int count) { int i, j, k; @@ -322,7 +323,8 @@ static int process_jsontok(scratch_space_t *scratch, char *d, stack_entry_t *sta if (scratch->so->object_subtype == SURVIVE_OBJECT_SUBTYPE_GENERIC) { SurviveContext *ctx = scratch->so->ctx; - SV_WARN("Unknown model_number %.*s. Please submit an issue with this value describing your device " + SV_WARN( + "Unknown model_number '%.*s'. Please submit an issue with this value describing your device " "so it can be added to the known list.", (int)len, str); }
sdio_slave: fix the recv crash when trans finish and load at the same time
@@ -1143,9 +1143,12 @@ static void sdio_intr_recv(void* arg) // This may cause the ``cur_ret`` pointer to be NULL, indicating the list is empty, // in this case the ``tx_done`` should happen no longer until new desc is appended. // The app is responsible to place the pointer to the right place again when appending new desc. + critical_enter_recv(); context.recv_cur_ret = STAILQ_NEXT(context.recv_cur_ret, qe); + critical_exit_recv(); ESP_EARLY_LOGV(TAG, "intr_recv: Give"); xSemaphoreGiveFromISR(context.recv_event, &yield); + SLC.slc0_int_clr.tx_done = 1; }; } if (yield) portYIELD_FROM_ISR(); @@ -1166,19 +1169,9 @@ esp_err_t sdio_slave_recv_load_buf(sdio_slave_buf_handle_t handle) buf_desc_t *const tail = STAILQ_LAST(queue, buf_desc_s, qe); STAILQ_INSERT_TAIL(queue, desc, qe); - if (tail == NULL || (tail->owner == 0)) { - //in this case we have to set the ret pointer - if (tail != NULL) { - /* if the owner of the tail is returned to the software, the ISR is - * expect to write this pointer to NULL in a short time, wait until - * that and set new value for this pointer - */ - while (context.recv_cur_ret != NULL) {} - } - assert(context.recv_cur_ret == NULL); + if (context.recv_cur_ret == NULL) { context.recv_cur_ret = desc; } - assert(context.recv_cur_ret != NULL); if (tail == NULL) { //no one in the ll, start new ll operation.
[mod_auth] include unistd.h for crypt() on Mac OS
#elif defined(__linux__) /* linux needs _XOPEN_SOURCE */ # define _XOPEN_SOURCE +#elif defined(__APPLE__) && defined(__MACH__) +#include <unistd.h> #endif #if defined(HAVE_LIBCRYPT) && !defined(HAVE_CRYPT)
fix issue when SSL_read/SSL_write are not funchooked when attaching to a running process (i.e node js process)
@@ -1048,20 +1048,6 @@ load_func(const char *module, const char *func) return addr; } -static int -findLibscopePath(struct dl_phdr_info *info, size_t size, void *data) -{ - int len = strlen(info->dlpi_name); - int libscope_so_len = 11; - - if(len > libscope_so_len && !strcmp(info->dlpi_name + len - libscope_so_len, "libscope.so")) { - *(char **)data = (char *) info->dlpi_name; - return 1; - } - return 0; -} - - typedef struct { const char *library; // Input: e.g. libpthread.so @@ -1127,6 +1113,19 @@ findInjected(struct dl_phdr_info *info, size_t size, void *data) return 0; } +static int +findLibscopePath(struct dl_phdr_info *info, size_t size, void *data) +{ + int len = strlen(info->dlpi_name); + int libscope_so_len = 11; + + if(len > libscope_so_len && !strcmp(info->dlpi_name + len - libscope_so_len, "libscope.so")) { + *(char **)data = (char *) info->dlpi_name; + return 1; + } + return findInjected(info, size, data); +} + /** * Detects whether the libscope library has been injected * using the `ldscope --attach PID command` and if yes, performs GOT hooking.
Adds link to blocking issue to enable upload to coveralls
@@ -41,6 +41,7 @@ jobs: make coverage lcx="lcov --output-file=coverage.info " && for i in `find . -name "*.info.cleaned"`; do lcx+=" --add-tracefile=$i"; done && $lcx # NOTE For now disabled, need to be able to configure coveralls thresholds +# See https://github.com/lemurheavy/coveralls-public/issues/1431 # - name: Coveralls # uses: coverallsapp/github-action@master # with:
fix: reset discord_gateway states after complete shutdown
@@ -1123,6 +1123,10 @@ discord_gateway_run(struct discord_gateway *gw) ++gw->reconnect.attempt; log_info("Reconnect attempt #%d", gw->reconnect.attempt); } + // reset if set + gw->is_resumable = false; + gw->reconnect.enable = false; + gw->reconnect.attempt = 0; log_error("Could not reconnect to Discord Gateway after %d tries", gw->reconnect.threshold); }
extmod/moducryptolib: Use "static" not "STATIC" for inline functions.
@@ -89,7 +89,7 @@ typedef struct _mp_obj_aes_t { uint8_t key_type: 2; } mp_obj_aes_t; -STATIC inline bool is_ctr_mode(int block_mode) { +static inline bool is_ctr_mode(int block_mode) { #if MICROPY_PY_UCRYPTOLIB_CTR return block_mode == UCRYPTOLIB_MODE_CTR; #else @@ -97,7 +97,7 @@ STATIC inline bool is_ctr_mode(int block_mode) { #endif } -STATIC inline struct ctr_params *ctr_params_from_aes(mp_obj_aes_t *o) { +static inline struct ctr_params *ctr_params_from_aes(mp_obj_aes_t *o) { // ctr_params follows aes object struct return (struct ctr_params*)&o[1]; }
ci: Fix build error
@@ -69,7 +69,7 @@ jobs: ./ci/build_nghttp3.sh - name: Setup environment variables run: | - PKG_CONFIG_PATH="$PWD/openssl/build/lib/pkgconfig:$PWD/nghttp3/build/lib/pkgconfig:$PWD/gnutls-3.7.2/build/lib/pkgconfig:$PWD/nettle-3.6/build/lib64/pkgconfig" + PKG_CONFIG_PATH="$PWD/openssl/build/lib/pkgconfig:$PWD/openssl/build/lib64/pkgconfig:$PWD/nghttp3/build/lib/pkgconfig:$PWD/gnutls-3.7.2/build/lib/pkgconfig:$PWD/nettle-3.6/build/lib64/pkgconfig" LDFLAGS="$EXTRA_LDFLAGS -Wl,-rpath,$PWD/openssl/build/lib -Wl,-rpath,$PWD/openssl/build/lib64 -Wl,-rpath,$PWD/nettle-3.6/build/lib64" BORINGSSL_CFLAGS="-I$PWD/boringssl/include/" BORINGSSL_LIBS="-L$PWD/boringssl/build/ssl -lssl -L$PWD/boringssl/build/crypto -lcrypto"
[Kernel] amc allows the Rust file manager to launch files
@@ -148,7 +148,12 @@ static void _trampoline(const char* program_name, void* buf, uint32_t buf_size) static void _amc_core_file_manager_exec_buffer(const char* source_service, void* buf, uint32_t buf_size) { // Only file_manager is allowed to invoke this code! - assert(!strncmp(source_service, "com.axle.file_manager", AMC_MAX_SERVICE_NAME_LEN), "Only File Manager may use this syscall"); + printf("Sourec service %s\n", source_service); + assert( + !strncmp(source_service, "com.axle.file_manager", AMC_MAX_SERVICE_NAME_LEN) + || !strncmp(source_service, "com.axle.file_manager2", AMC_MAX_SERVICE_NAME_LEN), + "Only File Manager may use this syscall" + ); amc_exec_buffer_cmd_t* cmd = (amc_exec_buffer_cmd_t*)buf; printf("exec buffer(program_name: %s, buffer_addr: 0x%p, buffer_size: %p)\n", cmd->program_name, cmd->buffer_addr, cmd->buffer_size);
SOVERSION bump to version 4.3.14
@@ -39,7 +39,7 @@ set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_ # with backward compatible change and micro version is connected with any internal change of the library. set(SYSREPO_MAJOR_SOVERSION 4) set(SYSREPO_MINOR_SOVERSION 3) -set(SYSREPO_MICRO_SOVERSION 13) +set(SYSREPO_MICRO_SOVERSION 14) set(SYSREPO_SOVERSION_FULL ${SYSREPO_MAJOR_SOVERSION}.${SYSREPO_MINOR_SOVERSION}.${SYSREPO_MICRO_SOVERSION}) set(SYSREPO_SOVERSION ${SYSREPO_MAJOR_SOVERSION})
hv: serializng: use mfence to ensure trampoline code was updated Using the MFENCE to make sure trampoline code has been updated (clflush) into memory beforing start APs.
@@ -322,6 +322,10 @@ static void start_pcpu(uint16_t pcpu_id) write_trampoline_stack_sym(pcpu_id); clac(); + /* Using the MFENCE to make sure trampoline code + * has been updated (clflush) into memory beforing start APs. + */ + cpu_memory_barrier(); send_startup_ipi(pcpu_id, startup_paddr); /* Wait until the pcpu with pcpu_id is running and set the active bitmap or
Ensure all node identifers are compiled to Prolog atoms
@@ -35,7 +35,7 @@ instance PrologGenerator AST.NetSpec where in predicate "net" [atom, node] ++ "." instance PrologGenerator AST.NodeId where - generate (AST.NodeId id) = map toLower id + generate (AST.NodeId id) = quotes $ map toLower id instance PrologGenerator AST.NodeSpec where generate nodeSpec = predicate "node" [nodeType, accept, translate, overlay] @@ -78,3 +78,6 @@ parens = enclose "(" ")" brackets :: String -> String brackets = enclose "[" "]" + +quotes :: String -> String +quotes = enclose "'" "'" \ No newline at end of file
bonding: fix packet trace in bond-input
@@ -331,15 +331,11 @@ VLIB_NODE_FN (bond_input_node) (vlib_main_t * vm, sw_if_index = sw_if_indices; next = nexts; bond_packet_trace_t *t0; - uword n_trace = vlib_get_trace_count (vm, node); - while (n_left && n_trace) + while (n_left) { if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { - vlib_trace_buffer (vm, node, next[0], b[0], - 0 /* follow_chain */ ); - vlib_set_trace_count (vm, node, --n_trace); t0 = vlib_add_trace (vm, node, b[0], sizeof (*t0)); t0->sw_if_index = sw_if_index[0]; clib_memcpy (&t0->ethernet, vlib_buffer_get_current (b[0]),
nimble/gap: Fix error code on closing already closed periodic sync
@@ -3423,10 +3423,8 @@ ble_gap_periodic_adv_terminate_sync(uint16_t sync_handle) * the app wants to terminate that sync handle */ psync = ble_hs_periodic_sync_find(sync_handle); if (!psync) { - /* Sync already terminated. TODO: Not sure what error code - * describes this scnario the best */ - rc = BLE_HS_EALREADY; - goto done; + /* Sync already terminated.*/ + return BLE_HS_ENOTCONN; } /* Remove the handle from the list */ @@ -3446,7 +3444,6 @@ ble_gap_periodic_adv_terminate_sync(uint16_t sync_handle) rc = ble_hs_hci_cmd_tx_empty_ack(opcode, buf, sizeof(buf)); -done: return rc; }
ci/integration-tests: Install fixed versions of SPOv0.4.2 and CertManagerv1.7.2 Use the latest releases and do not patch the security-profiles-operator-webhook was removed in
@@ -92,9 +92,9 @@ var deploySPO *command = &command{ // security-profiles-operator-webhook to be started, hence the long // timeout cmd: ` - kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.7.1/cert-manager.yaml + kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.7.2/cert-manager.yaml kubectl --namespace cert-manager wait --for condition=ready pod -l app.kubernetes.io/instance=cert-manager - curl https://raw.githubusercontent.com/kubernetes-sigs/security-profiles-operator/main/deploy/operator.yaml | \ + curl https://raw.githubusercontent.com/kubernetes-sigs/security-profiles-operator/v0.4.2/deploy/operator.yaml | \ sed 's/replicas: 3/replicas: 1/'|grep -v cpu: | \ kubectl apply -f - for i in $(seq 1 120); do @@ -103,8 +103,6 @@ var deploySPO *command = &command{ fi sleep 1 done - kubectl patch deploy -n security-profiles-operator security-profiles-operator-webhook --type=json \ - -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}]' kubectl patch ds -n security-profiles-operator spod --type=json \ -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/resources"}, {"op": "remove", "path": "/spec/template/spec/containers/1/resources"}, {"op": "remove", "path": "/spec/template/spec/initContainers/0/resources"}]' kubectl --namespace security-profiles-operator wait --for condition=ready pod -l app=security-profiles-operator || (kubectl get pod -n security-profiles-operator ; kubectl get events -n security-profiles-operator ; false) @@ -129,8 +127,8 @@ var cleanupSPO *command = &command{ name: "Remove Security Profiles Operator (SPO)", cmd: ` kubectl delete seccompprofile -n security-profiles-operator --all - kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/security-profiles-operator/main/deploy/operator.yaml - kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v1.7.1/cert-manager.yaml + kubectl delete -f https://raw.githubusercontent.com/kubernetes-sigs/security-profiles-operator/v0.4.2/deploy/operator.yaml + kubectl delete -f https://github.com/jetstack/cert-manager/releases/download/v1.7.2/cert-manager.yaml `, cleanup: true, }
Avoid flushing amid packet on sudden connection termination
@@ -384,7 +384,7 @@ od_relay_flush(od_relay_t *relay) if (relay->dst == NULL) return OD_OK; - if (! machine_iov_pending(relay->iov)) + if (! machine_iov_pending(relay->iov) && (relay->packet == 0)) return OD_OK; int rc; @@ -392,7 +392,7 @@ od_relay_flush(od_relay_t *relay) if (rc != OD_OK) return rc; - if (! machine_iov_pending(relay->iov)) + if (! machine_iov_pending(relay->iov) && (relay->packet == 0)) return OD_OK; rc = machine_write_start(relay->dst->io, relay->dst->on_write); @@ -401,7 +401,7 @@ od_relay_flush(od_relay_t *relay) for (;;) { - if (! machine_iov_pending(relay->iov)) + if (! machine_iov_pending(relay->iov) && (relay->packet == 0)) break; machine_cond_wait(relay->dst->on_write, UINT32_MAX);
Make auxiliary function static
@@ -467,7 +467,7 @@ exit: return( status ); } -void aead_multipart_decrypt_internal( int key_type_arg, data_t *key_data, +static void aead_multipart_decrypt_internal( int key_type_arg, data_t *key_data, int alg_arg, data_t *nonce, data_t *additional_data,
Move special cases of config to separate function
#include "recon.h" +static void set_bloch_conf(enum mdb_t mode, struct mdb_irgnm_l1_conf* conf2, struct moba_conf_s* data) +{ + + // T2 estimation turned off for IR FLASH Simulation + + if (MDB_BLOCH == mode) { + + assert(NULL != data); + + if (SEQ_IRFLASH == data->sim.seq.seq_type) { + + conf2->constrained_maps = 1; // only R1 map: bitmask (1 0 0 0) = 1 + conf2->not_wav_maps = 2; // no wavelet for T2 and B1 map + } + else if (SEQ_IRBSSFP == data->sim.seq.seq_type) { + + conf2->constrained_maps = 5; // only T1 and T2: bitmask(1 0 1 0) = 5 + conf2->not_wav_maps = 1; // no wavelet for B1 map + } + } + + // No Wavelet penalty on flip angle map + if (MDB_T1_PHY == mode) { + + conf2->constrained_maps = 2; // only R1 map: bitmask (0 1 0) = 2 + conf2->not_wav_maps = 1; + } +} + static struct mobamod exp_create(const long dims[DIMS], const complex float* mask, const complex float* TE, const complex float* psf, const struct noir_model_conf_s* conf) @@ -183,30 +212,7 @@ static void recon(const struct moba_conf* conf, struct moba_conf_s* data, .not_wav_maps = 0 }; - // T2 estimation turned off for IR FLASH Simulation - - if (MDB_BLOCH == conf->mode) { - - assert(NULL != data); - - if (SEQ_IRFLASH == data->sim.seq.seq_type) { - - conf2.constrained_maps = 1; // only R1 map: bitmask (1 0 0 0) = 1 - conf2.not_wav_maps = 2; // no wavelet for T2 and B1 map - } - else if (SEQ_IRBSSFP == data->sim.seq.seq_type) { - - conf2.constrained_maps = 5; // only T1 and T2: bitmask(1 0 1 0) = 5 - conf2.not_wav_maps = 1; // no wavelet for B1 map - } - } - - // No Wavelet penalty on flip angle map - if (MDB_T1_PHY == conf->mode) { - - conf2.constrained_maps = 2; // only R1 map: bitmask (0 1 0) = 2 - conf2.not_wav_maps = 1; - } + set_bloch_conf(conf->mode, &conf2, data); long irgnm_conf_dims[DIMS]; md_select_dims(DIMS, fft_flags|MAPS_FLAG|COEFF_FLAG|TIME2_FLAG, irgnm_conf_dims, imgs_dims);
Fix assertion failure in ngtcp2_conn_loss_detection_timer Don't reset cstat->last_tx_pkt_ts which makes assert(earliest_pktns) fail.
@@ -514,16 +514,22 @@ static int ts_retired_less(const ngtcp2_pq_entry *lhs, } static void conn_reset_conn_stat(ngtcp2_conn *conn, ngtcp2_conn_stat *cstat) { - memset(cstat, 0, sizeof(*cstat)); + cstat->latest_rtt = 0; + cstat->min_rtt = UINT64_MAX; cstat->smoothed_rtt = NGTCP2_DEFAULT_INITIAL_RTT; cstat->rttvar = NGTCP2_DEFAULT_INITIAL_RTT / 2; - cstat->min_rtt = UINT64_MAX; + cstat->pto_count = 0; + cstat->loss_detection_timer = 0; // Initializes them with UINT64_MAX. - memset(cstat->last_tx_pkt_ts, 0xff, sizeof(cstat->last_tx_pkt_ts)); memset(cstat->loss_time, 0xff, sizeof(cstat->loss_time)); - cstat->max_udp_payload_size = conn->local.settings.max_udp_payload_size; - cstat->cwnd = ngtcp2_cc_compute_initcwnd(cstat->max_udp_payload_size); + cstat->cwnd = + ngtcp2_cc_compute_initcwnd(conn->local.settings.max_udp_payload_size); cstat->ssthresh = UINT64_MAX; + cstat->congestion_recovery_start_ts = 0; + cstat->bytes_in_flight = 0; + cstat->max_udp_payload_size = conn->local.settings.max_udp_payload_size; + cstat->delivery_rate_sec = 0; + cstat->recv_rate_sec = 0; } static void conn_reset_rx_rate(ngtcp2_conn *conn) {
Compile triggers
@@ -111,17 +111,6 @@ const compile = async ( // Add scene data const scenePtrs = precompiled.sceneData.map((scene, sceneIndex) => { - // console.log("SCENE: " + sceneIndex, { - // imageIndex: scene.imageIndex, - // hi: hi(scene.imageIndex), - // lo: lo(scene.imageIndex), - // spriteLen: scene.sprites.length, - // sprties: scene.sprites, - // actors: compileActors(scene.actors, { - // eventPtrs: eventPtrs[sceneIndex].actors, - // sprites: precompiled.usedSprites - // }) - // }); return banked.push( [].concat( hi(scene.imageIndex), @@ -134,32 +123,10 @@ const compile = async ( sprites: precompiled.usedSprites }), scene.triggers.length, - // flatten( - // scene.actors.map( - // compileActor({ eventPtrs: eventPtrs[sceneIndex].actors }) - // ) - // ), - scene.collisions - // scene.actors.reduce( - // (memo, actor, actorIndex) => - // [].concat( - // memo, - // compileActor(actor, { - // eventsPtr: eventPtrs[sceneIndex].actors[actorIndex] - // }) - // ), - // [] - // ), - // scene.triggers.reduce( - // (memo, trigger, triggerIndex) => - // [].concat( - // memo, - // compileTrigger(trigger, { - // eventsPtr: eventPtrs[sceneIndex].triggers[triggerIndex] - // }) - // ), - // [] - // ) + compileTriggers(scene.triggers, { + eventPtrs: eventPtrs[sceneIndex].triggers + }), + scene.collisions.slice(0, (32 * 32) / 8) ) ); }); @@ -476,6 +443,7 @@ export const compileActors = (actors, { eventPtrs, sprites }) => { ); }; +/* export const compileTrigger = (trigger, { eventsPtr }) => { // console.log("TRIGGER", trigger, eventsPtr); return [ @@ -489,6 +457,24 @@ export const compileTrigger = (trigger, { eventsPtr }) => { lo(eventsPtr.offset) ]; }; +*/ + +export const compileTriggers = (triggers, { eventPtrs }) => { + return flatten( + triggers.map((trigger, triggerIndex) => { + return [ + trigger.x, + trigger.y, + trigger.width, + trigger.height, + trigger.trigger === "action" ? 1 : 0, + eventPtrs[triggerIndex].bank, // Event bank ptr + hi(eventPtrs[triggerIndex].offset), // Event offset ptr + lo(eventPtrs[triggerIndex].offset) + ]; + }) + ); +}; //#endregion
Update PORT_LIBRARY.md
@@ -66,21 +66,7 @@ When the extension compiled, see the content of the `build/lib[..]`. You should Now, if you have many `.so` files, this will be hard. `.so` files cannot be directly embedded on the app bundle because the App Store will automatically reject that. We have to make frameworks from those binaries. -`cd` into the Pyto repo and create a folder named as the library you compiled with a capital. Then, create a `.framework` folder with an `Info.plist` inside it for each `.so` file you have. - - $ cd Pyto - $ mkdir NumPy - $ cd NumPy - $ mkdir _umath_linalg.framework fftpack_lite.framework lapack_lite.framework mtrand.framework _multiarray_umath.framework - $ for FRAMEWORK in *.framework - do - touch $FRAMEWORK/Info.plist - done - $ - -Also, create an `Info.plist` file inside the folder containing frameworks. The content can be anything. It's just for the Xcode project. - - $ touch Info.plist +Make a framework for each `.so` file. Now, copy every `.so` file into its corresponding framework. @@ -121,11 +107,7 @@ NOTE: Here, by library name, I mean the name of the `.so` file and not the entir Replace `[FILE NAME]` by the `.so` contained on the framework file name (include the extension), replace `[BUNDLE NAME]` by the name of the library, without extensions. And replace `[BUNDLE IDENTIFIER]` by the bundle identifier. For example: "com.yourcompany.libraryname". It cannot contain underscores. -Add the folder you created to the Xcode project (as group and NOT folder reference!). Then, add each `Info.plist` file contained in frameworks on the just created group. Make sure to UNCHECK "Copy Items if Needed" . - -Then, select Xcode project on sidebar and select "Pyto" target. Go to Build Phases and add all frameworks you added on "Embed Frameworks" if they are not already added. - -For each framework, add a Copy Files Phase. Set "Frameworks" as destination. Drag a `.so` file and the corresponding `Info.plist`. Type the corresponding framework file name with extension on "Subpath". Make sure "Code Sign On Copy" is checked for the `.so` file. +Now you can embed the frameworks, without linking. Run on device to check it works.
s390x assembly pack: fix x448 handling of non-canonical values The s390x x448 implementation does not correctly reduce non-canonical values i.e., u-coordinates >= p = 2^448 - 2^224 - 1.
@@ -907,11 +907,9 @@ static void s390x_x448_mod_p(unsigned char u[56]) c >>= 8; } - if (u_red[0] & 0x80) { - u_red[0] &= 0x7f; + if (c) memcpy(u, u_red, sizeof(u_red)); } -} static int s390x_x25519_mul(unsigned char u_dst[32], const unsigned char u_src[32], @@ -966,7 +964,7 @@ static int s390x_x448_mul(unsigned char u_dst[56], memcpy(param.x448.d_src, d_src, 56); s390x_flip_endian64(param.x448.u_src, param.x448.u_src); - s390x_x448_mod_p(param.x448.u_src); + s390x_x448_mod_p(param.x448.u_src + 8); s390x_flip_endian64(param.x448.d_src, param.x448.d_src); param.x448.d_src[63] &= 252;
constrain replace
@@ -25,7 +25,7 @@ import("core.project.project") function _get_builtinvars(target, installdir) return {TARGETNAME = target:name(), PROJECTNAME = project.name() or target:name(), - TARGETFILENAME = path.filename(target:targetfile()):replace("dll", "lib"), + TARGETFILENAME = is_plat("windows", "mingw") and path.filename(target:targetfile()):gsub(".dll$", ".lib") or path.filename(target:targetfile()), TARGETKIND = target:is_shared() and "SHARED" or "STATIC", PACKAGE_VERSION = target:get("version") or "1.0.0", TARGET_PTRBYTES = target:is_arch("x86", "i386") and "4" or "8"}
add unit test for YAML anchors
@@ -2402,6 +2402,68 @@ cfgReadCustomOrder(void **state) cfgDestroy(&config); } +static void +cfgReadCustomAnchor(void **state) +{ + const char *yamlText = + // define a config using an anchor + "disable-metrics: &disable-metrics\n" + " metric:\n" + " enable: false\n" + "custom:\n" + " eg1:\n" + " filter:\n" + " procname: test\n" + // use that config as a custom config + " config: *disable-metrics\n" + "# EOF\n"; + const char *yamlFilename = "/tmp/eg-scope.yml"; + writeFile(yamlFilename, yamlText); + initProc("test", "test --with args", "myhost"); + config_t* config = cfgRead(yamlFilename); + deleteFile(yamlFilename); + assert_non_null(config); + + assert_int_equal(cfgMtcEnable(config), FALSE); + + cfgDestroy(&config); +} + +static void +cfgReadCustomAnchorExtend(void **state) +{ + // We're skipping this test right now because the << extend syntax is not + // supported OOB by libyaml. We would need to add support for this ourself. + skip(); + + const char *yamlText = + // define a config using an anchor + "disable-metrics: &disable-metrics\n" + " metric:\n" + " enable: false\n" + "custom:\n" + " eg1:\n" + " filter:\n" + " procname: test\n" + " config:\n" + // include that config and extend it in a custom config + " <<: *disable-metrics\n" + " payload:\n" + " enable: true\n" + "# EOF\n"; + const char *yamlFilename = "/tmp/eg-scope.yml"; + writeFile(yamlFilename, yamlText); + initProc("test", "test --with args", "myhost"); + config_t* config = cfgRead(yamlFilename); + deleteFile(yamlFilename); + assert_non_null(config); + + assert_int_equal(cfgMtcEnable(config), FALSE); + assert_int_equal(cfgPayEnable(config), TRUE); + + cfgDestroy(&config); +} + // Defined in src/cfgutils.c // This is not a proper test, it just exists to make valgrind output // more readable when analyzing this test, by deallocating the compiled @@ -2491,6 +2553,8 @@ main(int argc, char* argv[]) cmocka_unit_test(cfgReadCustomMultipleFilters), cmocka_unit_test(cfgReadCustomOverride), cmocka_unit_test(cfgReadCustomOrder), + cmocka_unit_test(cfgReadCustomAnchor), + cmocka_unit_test(cfgReadCustomAnchorExtend), cmocka_unit_test(envRegexFree), }; return cmocka_run_group_tests(tests, groupSetup, groupTeardown);
[mod_webdav] ignore PROPFIND Depth for files (thx meeb5) ignore PROPFIND "Depth" request header for files (non-collections) RFC4918 10.2. Depth Header "If a resource does not have internal members, then the Depth header MUST be ignored." x-ref: "Webdav + rclone backup"
@@ -4014,9 +4014,8 @@ mod_webdav_propfind (request_st * const r, const plugin_config * const pconf) http_status_set_error(r, 403); return HANDLER_FINISHED; } - else if (0 != pb.depth) { - http_status_set_error(r, 403); - return HANDLER_FINISHED; + else { + pb.depth = 0; } pb.proplist.ptr = NULL;
Enable CRT_SHADER_SUPPORT and SDLGPU in wasm build
@@ -78,6 +78,10 @@ if(NOT BUILD_SDL) set(BUILD_SDLGPU OFF) endif() +if (EMSCRIPTEN) + set(BUILD_SDLGPU ON) +endif() + message("BUILD_SDLGPU: ${BUILD_SDLGPU}") message("BUILD_TOUCH_INPUT: ${BUILD_TOUCH_INPUT}") @@ -746,6 +750,10 @@ if(BUILD_SDLGPU) target_compile_definitions(tic80studio PUBLIC CRT_SHADER_SUPPORT) endif() +if(EMSCRIPTEN) + target_compile_definitions(tic80studio PUBLIC CRT_SHADER_SUPPORT) +endif() + ################################ # SDL GPU ################################
Style fixed for keyboard.c
* Update the shortcut model state in response to new input */ static void update_shortcut_model(struct sway_shortcut_state *state, - struct wlr_event_keyboard_key * event, - uint32_t new_key, + struct wlr_event_keyboard_key *event, uint32_t new_key, bool last_key_was_a_modifier) { if (event->state == WLR_KEY_PRESSED) { if (last_key_was_a_modifier && state->last_key_index >= 0) { @@ -26,7 +25,7 @@ static void update_shortcut_model(struct sway_shortcut_state* state, } // Add current key to set; there may be duplicates - for (size_t i = 0; i < SWAY_KEYBOARD_PRESSED_KEYS_CAP; i++) { + for (size_t i = 0; i < SWAY_KEYBOARD_PRESSED_KEYS_CAP; ++i) { if (!state->pressed_keys[i]) { state->pressed_keys[i] = new_key; state->pressed_keycodes[i] = event->keycode; @@ -35,7 +34,7 @@ static void update_shortcut_model(struct sway_shortcut_state* state, } } } else { - for (size_t i = 0; i < SWAY_KEYBOARD_PRESSED_KEYS_CAP; i++) { + for (size_t i = 0; i < SWAY_KEYBOARD_PRESSED_KEYS_CAP; ++i) { // The same keycode may match multiple keysyms. if (state->pressed_keycodes[i] == event->keycode) { state->pressed_keys[i] = 0; @@ -54,7 +53,7 @@ static struct sway_binding* check_shortcut_model( struct sway_shortcut_state *state, list_t *bindings, uint32_t modifiers, bool locked) { int npressed_keys = 0; - for (size_t i = 0; i < SWAY_KEYBOARD_PRESSED_KEYS_CAP; i++) { + for (size_t i = 0; i < SWAY_KEYBOARD_PRESSED_KEYS_CAP; ++i) { if (state->pressed_keys[i]) { ++npressed_keys; } @@ -73,7 +72,7 @@ static struct sway_binding* check_shortcut_model( uint32_t key = *(uint32_t *)binding->keys->items[j]; bool key_found = false; - for (int k = 0; k < SWAY_KEYBOARD_PRESSED_KEYS_CAP; k++) { + for (int k = 0; k < SWAY_KEYBOARD_PRESSED_KEYS_CAP; ++k) { if (state->pressed_keys[k] == key) { key_found = true; break; @@ -217,12 +216,12 @@ static void handle_keyboard_key(struct wl_listener *listener, void *data) { // Update shortcut models update_shortcut_model(&keyboard->state_keycodes, event, (uint32_t)keycode, last_key_was_a_modifier); - for (size_t i=0;i<translated_keysyms_len;i++) { + for (size_t i = 0; i < translated_keysyms_len; ++i) { update_shortcut_model(&keyboard->state_keysyms_translated, event, (uint32_t)translated_keysyms[i], last_key_was_a_modifier); } - for (size_t i=0;i<raw_keysyms_len;i++) { + for (size_t i = 0; i < raw_keysyms_len; ++i) { update_shortcut_model(&keyboard->state_keysyms_raw, event, (uint32_t)raw_keysyms[i], last_key_was_a_modifier);
Bundle step for source
@@ -34,6 +34,10 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Bundle + working-directory: ${{runner.workspace}}/libsurvive + run: 7z a ${{runner.workspace}}/${{ steps.bundle.outputs.BUNDLE_FILE_NAME }} ${{runner.workspace}}/libsurvive + - uses: actions/upload-artifact@v2 name: Upload with:
BugID:17784971:move kernel/hal to osal 5
@@ -27,7 +27,7 @@ $(NAME)_SOURCES-y += interfaces/netmgr_net.c GLOBAL_INCLUDES-y += ../include/hal/ endif -$(NAME)_COMPONENTS-y += kernel.fs.kv yloop kernel.hal +$(NAME)_COMPONENTS-y += kernel.fs.kv yloop GLOBAL_INCLUDES-y += include ../../middleware/alink/protocol/os/platform/
symbols: Add libelektra_1.0 as new version
@@ -9,6 +9,9 @@ libelektra_0.8 { libelektra_0.9 { } libelektra_0.8; +libelektra_1.0 { +} libelektra_0.9; + # This is used for private symbols, which shall only be used by Elektra itself. # In future this may be used for ABI-checking tools. #
tests: runtime: filter_parser: fix test case, use new double timestamp
@@ -101,7 +101,7 @@ void flb_test_filter_parser_extract_fields() TEST_CHECK_(output != NULL, "Expected output to not be NULL"); if (output != NULL) { /* check timestamp */ - expected = "[\"\\x56\\x54\\xffffffe1\\xffffff8c\\x00\\x00\\x00\\x00\", {"; + expected = "[1448403340.000000, {"; TEST_CHECK_(strstr(output, expected) != NULL, "Expected output to contain '%s', got '%s'", expected, output); /* check fields were extracted */ expected = "\"INT\":\"100\", \"FLOAT\":\"0.5\", \"BOOL\":\"true\", \"STRING\":\"This is an example\""; @@ -262,7 +262,7 @@ void flb_test_filter_parser_handle_time_key() if (output != NULL) { /* check the timestamp field was updated correctly */ /* this is in fluent-bits extended timestamp format */ - expected = "[\"\\x59\\xfffffffa\\x49\\xffffffd1\\x26\\xffffff9f\\xffffffb2\\x00\", {"; + expected = "[1509575121.648000, {"; TEST_CHECK_(strstr(output, expected) != NULL, "Expected output to contain '%s', got '%s'", expected, output); /* check additional field is preserved */ expected = "\"message\":\"This is an example\""; @@ -378,7 +378,9 @@ void flb_test_filter_parser_ignore_malformed_time() NULL); /* Parser */ - parser = flb_parser_create("timestamp", "regex", "^(?<time>.*)$", "%Y-%m-%dT%H:%M:%S.%L", "time", NULL, MK_FALSE, + parser = flb_parser_create("timestamp", "regex", + "^(?<time>.*)$", "%Y-%m-%dT%H:%M:%S.%L", "time", + NULL, FLB_FALSE, NULL, 0, NULL, ctx->config); TEST_CHECK(parser != NULL); @@ -416,7 +418,7 @@ void flb_test_filter_parser_ignore_malformed_time() TEST_CHECK_(output != NULL, "Expected output to not be NULL"); if (output != NULL) { /* check the timestamp field was ignored and we received everything else */ - expected = "[\"\\x56\\x54\\xffffffe1\\xffffff8c\\x00\\x00\\x00\\x00\", {\"@timestamp\":\"2017_$!^-11-01T22:25:21.648\", \"log\":\"An example\"}]"; + expected = "[1448403340.000000, {\"@timestamp\":\"2017_$!^-11-01T22:25:21.648\", \"log\":\"An example\"}]"; TEST_CHECK_(strcmp(output, expected) == 0, "Expected output to be '%s', got '%s'", expected, output); free(output); }
HyperV storage driver: use LUN as volume attachment point The Azure API allows specifying a LUN (Logical Unit Number) when attaching a volume to an instance, thus the LUN value can be used in the disk driver to identify the volume attachment point.
@@ -732,7 +732,7 @@ closure_function(5, 0, void, storvsc_read_capacity_done, block_io in = closure(s->general, storvsc_read, s); block_io out = closure(s->general, storvsc_write, s); - apply(bound(a), storage_init_req_handler(&s->req_handler, in, out), s->capacity, -1); + apply(bound(a), storage_init_req_handler(&s->req_handler, in, out), s->capacity, lun); out: closure_finish(); }