message
stringlengths
6
474
diff
stringlengths
8
5.22k
mangle: function name change
@@ -405,7 +405,7 @@ static void mangle_Magic(run_t* run, bool printable) { } } -static void mangle_CmpFeedback(run_t* run, bool printable) { +static void mangle_ConstCmpFeedback(run_t* run, bool printable) { if (!run->global->feedback.cmpFeedback) { return mangle_Magic(run, printable); } @@ -644,7 +644,7 @@ void mangle_mangleContent(run_t* run) { mangle_Bit, mangle_Bytes, mangle_Magic, - mangle_CmpFeedback, + mangle_ConstCmpFeedback, mangle_IncByte, mangle_DecByte, mangle_NegByte,
Updated documentation to use cmake
-cmake_minimum_required(VERSION 3.0) +cmake_minimum_required(VERSION 3.2) # Adding customized cmake module list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules/") @@ -8,7 +8,18 @@ project(ccl) # Uses system libraries or downloads and build if necessary include(BuildFFTW) include(BuildGSL) + + # If a CLASS installation folder is provided, use it, + # otherwise download and compile CLASS + if(EXTERNAL_CLASS_PATH) + message("Using external install of CLASS") + set(CLASS_LIBRARY_DIRS ${EXTERNAL_CLASS_PATH}) + set(CLASS_INCLUDE_DIRS ${EXTERNAL_CLASS_PATH}/include) + set(CLASS_LIBRARIES -lclass) + set(CLASS_EXTERNAL True) + else(EXTERNAL_CLASS_PATH) include(BuildCLASS) + endif(EXTERNAL_CLASS_PATH) # Compilation flags if ("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang") @@ -38,8 +49,10 @@ project(ccl) # Builds the main CCL library add_library(ccl ${CCL_SRC}) - add_dependencies(ccl CLASS) target_link_libraries(ccl ${GSL_LIBRARIES} ${FFTW_LIBRARIES} ${CLASS_LIBRARIES} m) + if(NOT CLASS_EXTERNAL) + add_dependencies(ccl CLASS) + endif() if(NOT GSL_FOUND) add_dependencies(ccl GSL) endif() @@ -66,7 +79,7 @@ project(ccl) #find_package(PythonInterp) find_package(PythonLibsNew) - find_package(NumPy REQUIRED) + find_package(NumPy) include_directories(BEFORE ${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIRS}) # Builds swig python module in place
ensure has queries check
@@ -34,6 +34,11 @@ namespace NCatboostCuda { return stat; } + template<class TObjective> + void EnsureHasQueries(const TObjective& objective) { + CB_ENSURE(objective.GetSamplesGrouping().GetQueryCount() < objective.GetTarget().GetSamplesMapping().GetObjectsSlice().Size(), "Error: no queries or all query sizes are 1"); + } + /* * Target is objective function for samples subset from one dataset * Indices are index of samples in dataSet @@ -188,6 +193,7 @@ namespace NCatboostCuda { const TSlice& slice) : TTargetFunc<TMapping>(dataSet, random, slice) { + EnsureHasQueries(*this); } //for template costructs are generated on use. So will fail in compile time with wrong types :) @@ -195,6 +201,7 @@ namespace NCatboostCuda { TQuerywiseTarget(const TDataSet& dataSet, TGpuAwareRandom& random) : TTargetFunc<TMapping>(dataSet, random) { + EnsureHasQueries(*this); } //to make stripe target from mirror one @@ -202,15 +209,18 @@ namespace NCatboostCuda { TTarget<TMapping>&& target) : TTargetFunc<TMapping>(basedOn, std::move(target)) { + EnsureHasQueries(*this); } TQuerywiseTarget(const TQuerywiseTarget& target, const TSlice& slice) : TTargetFunc<TMapping>(target, slice) { + EnsureHasQueries(*this); } TQuerywiseTarget(const TQuerywiseTarget& target) : TTargetFunc<TMapping>(target) { + EnsureHasQueries(*this); } TQuerywiseTarget(TQuerywiseTarget&& other) = default; @@ -234,6 +244,7 @@ namespace NCatboostCuda { TGpuAwareRandom& random) : TTargetFunc<TMapping>(dataSet, random) { + EnsureHasQueries(*this); } TNonDiagQuerywiseTarget(TNonDiagQuerywiseTarget&& other) = default;
ci: improve docs pattern from Kconfig to Kconfig* for example: Kconfig.spiram.common will now be included
- ".gitlab/ci/docs.yml" - "docs/**/*" - "components/**/*.h" - - "components/**/Kconfig" + - "components/**/Kconfig*" - "components/**/CMakeList.txt" - "components/**/sdkconfig*" - "tools/kconfig_new/**/*"
tell about loose tests
@@ -89,7 +89,13 @@ int main_nrmse(int argc, char* argv[argc]) unmap_cfl(DIMS, ref_dims, ref); unmap_cfl(DIMS, in_dims, in); - return ((test == -1.) || (err <= test)) ? 0 : 1; + if (test == -1.) + test = err; + + if (err < test * 0.1) + debug_printf(DP_DEBUG2, "Loose test: %f <= %f x %f\n", err, 0.1, test); + + return (err <= test) ? 0 : 1; }
Add info to button events
@@ -1014,7 +1014,10 @@ static ButtonQueueEntry *incrementAndPostButtonQueue(SurviveObject *so) { return 0; ButtonQueueEntry *entry = &(ctx->buttonQueue.entry[ctx->buttonQueue.nextWriteIndex]); - SV_VERBOSE(100, "%s Button event %d %d", survive_colorize_codename(so), entry->eventType, entry->buttonId); + SV_VERBOSE(100, "%s Button event %s %d %s %f", survive_colorize_codename(so), + SurviveInputEventStr(entry->eventType), entry->buttonId, + SurviveAxisStr(so->object_subtype, entry->ids[0]), entry->axisValues[0]); + for (int i = 0; i < (sizeof(entry->ids) / sizeof(entry->ids[0])) && entry->ids[i] != 255; i++) so->axis[entry->ids[i]] = entry->axisValues[i];
NODE: real v0.3.0 with proper prebuild-install command
"lib": "lib" }, "scripts": { - "install": "prebuild-install || node-gyp rebuild", + "install": "prebuild-install --tag-prefix node-v || node-gyp rebuild", "format-cxx": "git-clang-format", "format-js": "prettier --print-width 100 --tab-width 2 --single-quote --write index.js 'test/**/*.js' 'lib/**/*.js'", "lint": "eslint lib test",
out_tcp: use new upstream prototype for tls handling
@@ -72,7 +72,7 @@ struct flb_out_tcp *flb_tcp_conf_create(struct flb_output_instance *ins, upstream = flb_upstream_create(config, ins->host.name, ins->host.port, - io_flags, (void *) &ins->tls); + io_flags, ins->tls); if (!upstream) { flb_plg_error(ctx->ins, "could not create upstream context"); flb_free(ctx);
libhfuzz/memcmp: no need to save haystacks
@@ -103,7 +103,6 @@ static inline char* HF_strstr(const char* haystack, const char* needle, uintptr_ return (char*)haystack; } - instrumentAddConstStr(haystack); instrumentAddConstStr(needle); const char* h = haystack; @@ -122,7 +121,6 @@ static inline char* HF_strcasestr(const char* haystack, const char* needle, uint return (char*)haystack; } - instrumentAddConstStr(haystack); instrumentAddConstStr(needle); for (size_t i = 0; haystack[i]; i++) { @@ -169,7 +167,6 @@ static inline void* HF_memmem(const void* haystack, size_t haystacklen, const vo return (void*)haystack; } - instrumentAddConstMem(haystack, haystacklen, /* check_if_ro= */ true); instrumentAddConstMem(needle, needlelen, /* check_if_ro= */ true); const char* h = haystack;
deprecated must be error, not warning Note: mandatory check (NEED_CHECK) was skipped
@@ -1374,10 +1374,6 @@ class GnuCompiler(Compiler): if self.tc.is_clang: self.sfdl_flags.append('-Qunused-arguments') - self.c_warnings += [ - '-Wno-error=deprecated', - ] - self.cxx_warnings += [ '-Wimport-preprocessor-directive-pedantic', '-Wno-c++17-extensions',
tls: Modify tls optimize both mbedtls and wolfssl
@@ -309,18 +309,19 @@ static int base_destroy(esp_transport_handle_t t) return 0; } - void esp_transport_ssl_enable_global_ca_store(esp_transport_handle_t t) { GET_SSL_FROM_TRANSPORT_OR_RETURN(ssl, t); ssl->cfg.use_global_ca_store = true; } +#ifdef CONFIG_ESP_TLS_PSK_VERIFICATION void esp_transport_ssl_set_psk_key_hint(esp_transport_handle_t t, const psk_hint_key_t* psk_hint_key) { GET_SSL_FROM_TRANSPORT_OR_RETURN(ssl, t); ssl->cfg.psk_hint_key = psk_hint_key; } +#endif void esp_transport_ssl_set_cert_data(esp_transport_handle_t t, const char *data, int len) { @@ -371,11 +372,13 @@ void esp_transport_ssl_set_client_key_data_der(esp_transport_handle_t t, const c ssl->cfg.clientkey_bytes = len; } +#if defined(CONFIG_MBEDTLS_SSL_ALPN) || defined(CONFIG_WOLFSSL_HAVE_ALPN) void esp_transport_ssl_set_alpn_protocol(esp_transport_handle_t t, const char **alpn_protos) { GET_SSL_FROM_TRANSPORT_OR_RETURN(ssl, t); ssl->cfg.alpn_protos = alpn_protos; } +#endif void esp_transport_ssl_skip_common_name_check(esp_transport_handle_t t) { @@ -383,17 +386,21 @@ void esp_transport_ssl_skip_common_name_check(esp_transport_handle_t t) ssl->cfg.skip_common_name = true; } +#ifdef CONFIG_ESP_TLS_USE_SECURE_ELEMENT void esp_transport_ssl_use_secure_element(esp_transport_handle_t t) { GET_SSL_FROM_TRANSPORT_OR_RETURN(ssl, t); ssl->cfg.use_secure_element = true; } +#endif +#ifdef CONFIG_MBEDTLS_CERTIFICATE_BUNDLE void esp_transport_ssl_crt_bundle_attach(esp_transport_handle_t t, esp_err_t ((*crt_bundle_attach)(void *conf))) { GET_SSL_FROM_TRANSPORT_OR_RETURN(ssl, t); ssl->cfg.crt_bundle_attach = crt_bundle_attach; } +#endif static int base_get_socket(esp_transport_handle_t t) { @@ -404,11 +411,13 @@ static int base_get_socket(esp_transport_handle_t t) return INVALID_SOCKET; } +#ifdef CONFIG_ESP_TLS_USE_DS_PERIPHERAL void esp_transport_ssl_set_ds_data(esp_transport_handle_t t, void *ds_data) { GET_SSL_FROM_TRANSPORT_OR_RETURN(ssl, t); ssl->cfg.ds_data = ds_data; } +#endif void esp_transport_ssl_set_keep_alive(esp_transport_handle_t t, esp_transport_keep_alive_t *keep_alive_cfg) {
Fix cli integration test closes
@@ -153,8 +153,8 @@ starttest "Scope ps" # Scope ps run scope ps -outputs "ID PID USER COMMAND SCOPED -1 ${sleep_pid} root sleep true" +outputs "ID PID USER COMMAND +1 ${sleep_pid} root sleep 1000" returns 0
test: keep lineno in conf because it's used in some tests amending
@@ -226,7 +226,14 @@ sub spawn_h2o { $max_ssl_version = $conf->{max_ssl_version} || undef; $conf = $conf->{conf}; } + my $user; + if ($conf =~ /^user:/) { # do not override the specified user + $user = ""; + } else { + $user = $< == 0 ? "user: root" : ""; + } $conf = <<"EOT"; +$conf listen: host: 0.0.0.0 port: $port @@ -237,8 +244,7 @@ listen: key-file: examples/h2o/server.key certificate-file: examples/h2o/server.crt @{[$max_ssl_version ? "max-version: $max_ssl_version" : ""]} -@{[$< == 0 ? "user: root" : ""]} -$conf +$user EOT my $ret = spawn_h2o_raw($conf, [$port, $tls_port], \@opts);
baseboard/intelrvp/adlrvp.c: Format with clang-format BRANCH=none TEST=none
@@ -253,8 +253,8 @@ void board_overcurrent_event(int port, int is_overcurrented) { /* Port 0 & 1 and 2 & 3 share same line for over current indication */ #if defined(HAS_TASK_PD_C2) - enum ioex_signal oc_signal = port < TYPE_C_PORT_2 ? - IOEX_USB_C0_C1_OC : IOEX_USB_C2_C3_OC; + enum ioex_signal oc_signal = port < TYPE_C_PORT_2 ? IOEX_USB_C0_C1_OC : + IOEX_USB_C2_C3_OC; #else enum ioex_signal oc_signal = IOEX_USB_C0_C1_OC; #endif @@ -340,11 +340,11 @@ void set_charger_system_voltage(void) * on AC or AC+battery */ if (extpower_is_present() && battery_is_present()) { - bq25710_set_min_system_voltage(CHARGER_SOLO, - battery_get_info()->voltage_min); + bq25710_set_min_system_voltage( + CHARGER_SOLO, battery_get_info()->voltage_min); } else { - bq25710_set_min_system_voltage(CHARGER_SOLO, - battery_get_info()->voltage_max); + bq25710_set_min_system_voltage( + CHARGER_SOLO, battery_get_info()->voltage_max); } break; @@ -353,8 +353,7 @@ void set_charger_system_voltage(void) break; } } -DECLARE_HOOK(HOOK_AC_CHANGE, set_charger_system_voltage, - HOOK_PRIO_DEFAULT); +DECLARE_HOOK(HOOK_AC_CHANGE, set_charger_system_voltage, HOOK_PRIO_DEFAULT); static void configure_charger(void) { @@ -381,8 +380,7 @@ static void configure_retimer_usbmux(void) /* enable TUSB1044RNQR redriver on Port0 */ usb_muxes[TYPE_C_PORT_0].i2c_addr_flags = TUSB1064_I2C_ADDR14_FLAGS; - usb_muxes[TYPE_C_PORT_0].driver = - &tusb1064_usb_mux_driver; + usb_muxes[TYPE_C_PORT_0].driver = &tusb1064_usb_mux_driver; usb_muxes[TYPE_C_PORT_0].hpd_update = tusb1044_hpd_update; #if defined(HAS_TASK_PD_C1) @@ -404,11 +402,11 @@ static void configure_retimer_usbmux(void) * Change the default usb mux config on runtime to support * dual retimer topology. */ - usb_muxes[TYPE_C_PORT_0].next_mux - = &soc_side_bb_retimer0_usb_mux; + usb_muxes[TYPE_C_PORT_0].next_mux = + &soc_side_bb_retimer0_usb_mux; #if defined(HAS_TASK_PD_C1) - usb_muxes[TYPE_C_PORT_1].next_mux - = &soc_side_bb_retimer1_usb_mux; + usb_muxes[TYPE_C_PORT_1].next_mux = + &soc_side_bb_retimer1_usb_mux; #endif break;
Put all the functions added for libmusl together in wrap.c
@@ -236,24 +236,6 @@ typedef struct // extern void *_dl_sym(void *, const char *, void *); -EXPORTWEAK int -__vfprintf_chk(FILE *fp, int flag, const char *format, va_list ap) -{ - return vfprintf(fp, format, ap); -} - -EXPORTWEAK int -__vsnprintf_chk(char *s, size_t maxlen, int flag, size_t slen, const char *format, va_list args) -{ - return vsnprintf(s, slen, format, args); -} - -EXPORTWEAK void * -_dl_sym(void *handle, const char *name, void *who) -{ - return dlsym(handle, name); -} - static int findSymbol(struct dl_phdr_info *info, size_t size, void *data) { @@ -4705,3 +4687,23 @@ __register_atfork(void (*prepare) (void), void (*parent) (void), void (*child) ( */ return ENOMEM; } + +EXPORTWEAK int +__vfprintf_chk(FILE *fp, int flag, const char *format, va_list ap) +{ + return vfprintf(fp, format, ap); +} + +EXPORTWEAK int +__vsnprintf_chk(char *s, size_t maxlen, int flag, size_t slen, const char *format, va_list args) +{ + return vsnprintf(s, slen, format, args); +} + +EXPORTWEAK void * +_dl_sym(void *handle, const char *name, void *who) +{ + return dlsym(handle, name); +} + +
[bsp][bluetrum] Compatible romfs
#include <rtthread.h> -#ifdef BSP_USING_SDIO +#if defined (BSP_USING_SDCARD) #include <dfs_elm.h> #include <dfs_fs.h> @@ -58,4 +58,23 @@ int ab32_sdcard_mount(void) return RT_EOK; } INIT_APP_EXPORT(ab32_sdcard_mount); +#elif defined (RT_USING_DFS_ROMFS) + +#include <dfs_fs.h> +#include "dfs_romfs.h" + +int ab32_romfs_mount(void) +{ + if (dfs_mount(RT_NULL, "/", "rom", 0, &(romfs_root)) == 0) + { + rt_kprintf("ROM file system initializated!\n"); + } + else + { + rt_kprintf("ROM file system initializate failed!\n"); + } + + return 0; +} +INIT_ENV_EXPORT(ab32_romfs_mount); #endif
ci: Add dtc dependencies for rawhide Both F28 and Rawhide build their own dtc version. Rawhide was missing the required build deps.
FROM fedora:rawhide RUN dnf -y install wget curl xterm gcc git xz make diffutils findutils expect valgrind valgrind-devel ccache dtc openssl-devel RUN dnf -y install gcc-powerpc64-linux-gnu +# below packages are for building dtc +RUN dnf -y install flex bison RUN if [ `arch` = "x86_64" ]; then dnf -y install http://public.dhe.ibm.com/software/server/powerfuncsim/p9/packages/v1.1-0/systemsim-p9-1.1-0.f22.x86_64.rpm; fi COPY . /build/ WORKDIR /build
Readme: Sorted sites alphabetically.
@@ -7,11 +7,11 @@ OpenCore bootloader front end. ## Discussion -- [PCbeta.com](http://bbs.pcbeta.com/viewthread-1815623-1-1.html) in Chinese -- [InsanelyMac](https://www.insanelymac.com/forum/topic/338527-opencore-development/) in English +- [AppleLife.ru](https://applelife.ru/threads/razrabotka-opencore.2943955) in Russian - [Hackintosh-Forum.de](https://www.hackintosh-forum.de/forum/thread/42353-opencore-bootloader) in German +- [InsanelyMac](https://www.insanelymac.com/forum/topic/338527-opencore-development/) in English - [macOS86.it](https://www.macos86.it/viewtopic.php?p=32103) in Italian -- [AppleLife.ru](https://applelife.ru/threads/razrabotka-opencore.2943955) in Russian +- [PCbeta.com](http://bbs.pcbeta.com/viewthread-1815623-1-1.html) in Chinese ## Credits
i2c: the user is now allowed to call i2c_master_write even for sending a single byte
@@ -1147,6 +1147,12 @@ esp_err_t i2c_master_write(i2c_cmd_handle_t cmd_handle, const uint8_t *data, siz I2C_CHECK((data != NULL), I2C_ADDR_ERROR_STR, ESP_ERR_INVALID_ARG); I2C_CHECK(cmd_handle != NULL, I2C_CMD_LINK_INIT_ERR_STR, ESP_ERR_INVALID_ARG); + if (data_len == 1) { + /* If data_len if 1, i2c_master_write_byte should have been called, + * correct this here. */ + return i2c_master_write_byte(cmd_handle, *data, ack_en); + } + i2c_cmd_t cmd = { .hw_cmd = { .ack_en = ack_en,
refactor(shield): Formatting tweaks for QAZ.
}; num_sym { bindings = < - &kp NUM_1 &kp NUM_2 &kp NUM_3 &kp NUM_4 &kp NUM_5 &kp NUM_6 &kp NUM_7 &kp NUM_8 &kp NUM_9 &kp NUM_0 + &kp N1 &kp N2 &kp N3 &kp N4 &kp N5 &kp N6 &kp N7 &kp N8 &kp N9 &kp N0 &trans &trans &trans &trans &trans &trans &trans &trans &kp EQL &kp MINUS &kp DEL &none &none &none &none &none &none &none &kp DOT &bootloader &reset &none &trans &trans &kp RET &trans &kp FSLH
doc: acrn_configuration_tool add one more scenario xml element description
@@ -109,6 +109,8 @@ Additional scenario XML elements: ``guest_flags``: Select all applicable flags for the VM. +``vcpu_affinity``: vCPU affinity map; Each vCPU will pin to the selected pCPU ID and different vCPU should not pin to same pCPU. + ``size`` under parent of ``epc_section``: SGX EPC section base; must be page aligned. ``base`` under parent of ``epc_section``: SGX EPC section size in Bytes; must be page aligned.
Make dojo not produce incorrect beams when they start with '/'.
;~(pose sym (easy dp-default-app)) == ++ dp-beam :: ++beam - %+ cook |=(a/path =+((de-beam a) ?^(- u [he-beak (flop a)]))) + %+ cook + |= a/path + :: hack: fixup paths that come out of the hoon parser + :: + :: We currently invoke the hoon parser to read relative paths from + :: the command line, and this parser will produce leading ~ path + :: components with paths that start with a `/`. + :: + :: This entire path is nuts and we shouldn't be representing paths + :: as arbitrary hoons. + :: + =? a &(?=(^ a) =('' i.a)) + t.a + =+((de-beam a) ?^(- u [he-beak (flop a)])) =+ vez=(vang | dp-path) (sear plex:vez (stag %clsg poor:vez)) ::
rework fast_fprint
@@ -295,47 +295,47 @@ uint8_t lastvm0 = 0x55; //TODO ... should we monitor lastvm0 and handle any unexpected changes using check_osd() ... not sure if/when an osd chip becomes unstable due to voltage or some other reason //stuffs a float into a char array. parameters are array length and precision. only pads spaces for 0's up to the thousands place. + +uint8_t count_digits(uint32_t value) { + uint8_t count = 0; + while (value > 0) { + value /= 10; + count++; + } + return count; +} + void fast_fprint(uint8_t *str, uint8_t length, float v, uint8_t precision) { - uint32_t value = v * (ipow(10, precision)); - uint8_t digitsinfrontofdecimal = length - (precision + 1); - static uint32_t last_cast = 0; - for (uint8_t i = 0; i < digitsinfrontofdecimal; i++) { - uint32_t cast_value = value / ipow(10, (digitsinfrontofdecimal + (precision - 1) - i)); - str[i] = ((cast_value) - (10 * last_cast)) + 48; - last_cast = cast_value; - } - - for (uint8_t i = digitsinfrontofdecimal; i < length; i++) { - if (i == digitsinfrontofdecimal) { - if (precision > 0) - str[i] = 46; - else - str[i] = ' '; - } else { - uint32_t cast_value = value / ipow(10, (digitsinfrontofdecimal + precision - i)); - str[i] = ((cast_value) - (10 * last_cast)) + 48; - last_cast = cast_value; - } - } - last_cast = 0; - - if (digitsinfrontofdecimal > 3) { - if ((str[0] == 48) && (str[1] == 48) && (str[2] == 48)) - str[2] = ' '; - if ((str[0] == 48) && (str[1] == 48)) - str[1] = ' '; - if (str[0] == 48) - str[0] = ' '; - } - if (digitsinfrontofdecimal > 2) { - if ((str[0] == 48) && (str[1] == 48)) - str[1] = ' '; - if (str[0] == 48) - str[0] = ' '; - } - if (digitsinfrontofdecimal > 1) { - if (str[0] == 48) - str[0] = ' '; + + // make sure our string is empty + memset(str, 0, length); + + // calculate what we want to multiply by + const uint32_t multiplier = ipow(10, precision); + const uint8_t digits = count_digits(v); + + // move our decimal point + uint32_t value = v * multiplier; + uint32_t divider = ipow(10, digits + precision - 1); + + for (uint32_t i = 0; i < length; ++i) { + if (value <= 0) { + if ((i - digits) <= precision) { + str[i] = '0'; + continue; + } + break; + } + + if (i == digits) { + str[i] = '.'; + continue; + } + + uint8_t digit = value / divider; + str[i] = '0' + digit; + value = value % divider; + divider /= 10; } }
need to create a folder for cee-utils
@@ -138,6 +138,7 @@ test: cee_utils common discord slack github reddit $(TEST_EXES) mkdir : mkdir -p $(OBJDIR)/cee-utils + mkdir -p $(ACTOR_OBJDIR)/cee-utils mkdir -p $(ACTOR_OBJDIR)/common/third-party $(ACTOR_OBJDIR)/specs mkdir -p $(OBJDIR)/common/third-party $(LIBDIR) mkdir -p $(addprefix $(SPECSDIR)/, $(SPECS_SUBDIR)) $(addprefix $(OBJDIR)/$(SPECSDIR)/, $(SPECS_SUBDIR))
update acx_nlnetlabs with ws2_32 link.
@@ -688,8 +688,8 @@ AC_DEFUN([ACX_SSL_CHECKS], [ # check if -lwsock32 or -lgdi32 are needed. BAKLIBS="$LIBS" BAKSSLLIBS="$LIBSSL_LIBS" - LIBS="$LIBS -lgdi32" - LIBSSL_LIBS="$LIBSSL_LIBS -lgdi32" + LIBS="$LIBS -lgdi32 -lws2_32" + LIBSSL_LIBS="$LIBSSL_LIBS -lgdi32 -lws2_32" AC_MSG_CHECKING([if -lcrypto needs -lgdi32]) AC_TRY_LINK([], [ int HMAC_Update(void); @@ -839,8 +839,12 @@ dnl see if on windows if test "$ac_cv_header_windows_h" = "yes"; then AC_DEFINE(USE_WINSOCK, 1, [Whether the windows socket API is used]) USE_WINSOCK="1" + if echo $LIBS | grep 'lws2_32' >/dev/null; then + : + else LIBS="$LIBS -lws2_32" fi +fi ], dnl no quick getaddrinfo, try mingw32 and winsock2 library. ORIGLIBS="$LIBS"
BugID:17918368: Update KV Config.in descriptor
menuconfig AOS_COMP_KV - bool "Key-value Style Storage" + bool "Key-value Storage" select AOS_COMP_MBEDTLS if KV_CONFIG_SECURE_CRYPT_IMPL = 1 select MBEDTLS_CONFIG_CRYPTO if KV_CONFIG_SECURE_CRYPT_IMPL = 1 select MBEDTLS_CONFIG_CRYPTO_AES if KV_CONFIG_SECURE_CRYPT_IMPL = 1 select MBEDTLS_CONFIG_CRYPTO_MODE_OFB if KV_CONFIG_SECURE_CRYPT_IMPL = 1 default n help - lightweight key-value style storage module + lightweight key-value storage module if AOS_COMP_KV # Configurations for component kv
refactor windows config paths
#include "defines/msys.h" #ifdef ANY_MSYS #include "utils/file_io/fileUtils.h" +#include "utils/logger.h" #include "utils/string/stringUtils.h" #ifndef CONFIG_PATH "$ProgramData/oidc-agent" // The full path has two / oidc-agent the second // one is appended later #endif -#define CERT_PATH CONFIG_PATH "/oidc-agent/ca-bundle.crt" -char* _cert_file = NULL; char* _config_path = NULL; +char* _cert_file = NULL; char* _etc_issuer_config_file = NULL; char* _etc_pubclients_config_file = NULL; +const char* config_path() { + if (_config_path == NULL) { + _config_path = fillEnvVarsInPath(CONFIG_PATH); + } + return _config_path; +} + const char* CERT_FILE() { if (_cert_file == NULL) { - _cert_file = fillEnvVarsInPath(CERT_PATH); + _cert_file = oidc_pathcat(config_path(), "oidc-agent/ca-bundle.crt"); } return _cert_file; } const char* ETC_ISSUER_CONFIG_FILE() { - if (_config_path == NULL) { - _config_path = fillEnvVarsInPath(CONFIG_PATH); - } if (_etc_issuer_config_file == NULL) { _etc_issuer_config_file = - oidc_pathcat(_config_path, "oidc-agent/" ISSUER_CONFIG_FILENAME); + oidc_pathcat(config_path(), "oidc-agent/" ISSUER_CONFIG_FILENAME); } return _etc_issuer_config_file; } const char* ETC_PUBCLIENTS_CONFIG_FILE() { - if (_config_path == NULL) { - _config_path = fillEnvVarsInPath(CONFIG_PATH); - } if (_etc_pubclients_config_file == NULL) { _etc_pubclients_config_file = - oidc_pathcat(_config_path, "oidc-agent/" PUBCLIENTS_FILENAME); + oidc_pathcat(config_path(), "oidc-agent/" PUBCLIENTS_FILENAME); } return _etc_pubclients_config_file; }
VERSION bump to version 1.4.71
@@ -37,7 +37,7 @@ endif() # micro version is changed with a set of small changes or bugfixes anywhere in the project. set(SYSREPO_MAJOR_VERSION 1) set(SYSREPO_MINOR_VERSION 4) -set(SYSREPO_MICRO_VERSION 70) +set(SYSREPO_MICRO_VERSION 71) set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION}) # Version of the library
Fix compilation error for old decoding API Commits and updated the code only for the new decoding API.
@@ -61,7 +61,7 @@ decoder_push(struct decoder *decoder, const AVPacket *packet) { #else int got_picture; int len = avcodec_decode_video2(decoder->codec_ctx, - decoder->video_buffer->decoding_frame, + decoder->video_buffer->producer_frame, &got_picture, packet); if (len < 0) { @@ -69,7 +69,7 @@ decoder_push(struct decoder *decoder, const AVPacket *packet) { return false; } if (got_picture) { - push_frame(decoder); + video_buffer_producer_offer_frame(decoder->video_buffer); } #endif return true;
Update 'F' for new behavior
@@ -554,7 +554,8 @@ BEGIN_DUAL_PHASE_1(if) STOP_IF_DUAL_INACTIVE; Glyph g0 = PEEK(0, 1); Glyph g1 = PEEK(0, 2); - POKE(1, 0, g0 == g1 ? '1' : '0'); + POKE(1, 0, g0 == g1 ? '*' : '.'); + STUN(1, 0); END_PHASE BEGIN_DUAL_PHASE_0(generator)
Added check if max-merge is out of bounds
@@ -1228,8 +1228,14 @@ int kvz_config_parse(kvz_config *cfg, const char *name, const char *value) } else if (OPT("fast-residual-cost")) cfg->fast_residual_cost_limit = atoi(value); - else if (OPT("max-merge")) - cfg->max_merge = atoi(value); + else if (OPT("max-merge")) { + int max_merge = atoi(value); + if (max_merge < 1 || max_merge > 5) { + fprintf(stderr, "max-merge needs to be between 1 and 5\n"); + return 0; + } + cfg->max_merge = (uint8_t)max_merge; + } else { return 0; }
fix funny unsigned overflow bug while counting needed threads
@@ -60,8 +60,9 @@ void GenerateBorders(const TPool& pool, TLearnContext* ctx, TVector<TFloatFeatur const size_t bytesGenerateBorders = sizeof(float) * samplesToBuildBorders; const size_t bytesRequiredPerThread = bytesThreadStack + bytesGenerateBorders + bytesBestSplit; const auto usedRamLimit = ctx->Params.SystemOptions->CpuUsedRamLimit; - const size_t threadCount = Min(reasonCount, (usedRamLimit - bytesUsed) / bytesRequiredPerThread); - if (!(usedRamLimit >= bytesUsed && threadCount > 0)) { + const i64 availableMemory = (i64)usedRamLimit - bytesUsed; + const size_t threadCount = availableMemory > 0 ? Min(reasonCount, (ui64)availableMemory / bytesRequiredPerThread) : 1; + if (!(usedRamLimit >= bytesUsed)) { MATRIXNET_WARNING_LOG << "CatBoost needs " << (bytesUsed + bytesRequiredPerThread) / bytes1M + 1 << " Mb of memory to generate borders" << Endl; } TAtomic taskFailedBecauseOfNans = 0; @@ -107,9 +108,13 @@ void GenerateBorders(const TPool& pool, TLearnContext* ctx, TVector<TFloatFeatur floatFeature.Borders.swap(bordersBlock); }; size_t nReason = 0; + if (threadCount > 1) { for (; nReason + threadCount <= reasonCount; nReason += threadCount) { - ctx->LocalExecutor.ExecRange(calcOneFeatureBorder, nReason, nReason + threadCount, NPar::TLocalExecutor::WAIT_COMPLETE); - CB_ENSURE(taskFailedBecauseOfNans == 0, "There are nan factors and nan values for float features are not allowed. Set nan_mode != Forbidden."); + ctx->LocalExecutor.ExecRange(calcOneFeatureBorder, nReason, nReason + threadCount, + NPar::TLocalExecutor::WAIT_COMPLETE); + CB_ENSURE(taskFailedBecauseOfNans == 0, + "There are nan factors and nan values for float features are not allowed. Set nan_mode != Forbidden."); + } } for (; nReason < reasonCount; ++nReason) { calcOneFeatureBorder(nReason);
docs: gnu9 -> gnu12 for mpi stacks
@@ -41,12 +41,12 @@ MVAPICH2 (psm2) & & % ohpc_command if [[ ${enable_mpi_defaults} -eq 1 && ${enable_pmix} -eq 0 ]];then % ohpc_indent 5 \begin{lstlisting}[language=bash] -[sms](*\#*) (*\install*) openmpi4-gnu9-ohpc mpich-ofi-gnu9-ohpc +[sms](*\#*) (*\install*) openmpi4-gnu12-ohpc mpich-ofi-gnu12-ohpc \end{lstlisting} % ohpc_indent 0 % ohpc_command elif [[ ${enable_mpi_defaults} -eq 1 && ${enable_pmix} -eq 1 ]];then % ohpc_indent 5 -% ohpc_command (*\install*) openmpi4-pmix-slurm-gnu9-ohpc mpich-ofi-gnu9-ohpc +% ohpc_command (*\install*) openmpi4-pmix-slurm-gnu12-ohpc mpich-ofi-gnu12-ohpc % ohpc_indent 0 % ohpc_command fi % end_ohpc_run @@ -65,7 +65,7 @@ semantics. Alternatively, a site can choose to install the {\texttt ucx} variant instead as a drop-in MPICH replacement: \begin{lstlisting}[language=bash] -[sms](*\#*) (*\install*) mpich-ucx-gnu9-ohpc +[sms](*\#*) (*\install*) mpich-ucx-gnu12-ohpc \end{lstlisting} In the case where both MPICH variants are installed, two modules will be @@ -75,7 +75,7 @@ highlighted is below. \begin{lstlisting}[language=bash] [sms](*\#*) module avail mpich --------------------- /opt/ohpc/pub/moduledeps/gnu9 --------------------- +-------------------- /opt/ohpc/pub/moduledeps/gnu12--------------------- mpich/3.3.2-ofi mpich/3.3.2-ucx (D) \end{lstlisting} @@ -88,7 +88,7 @@ MVAPICH2 family is available for use: % ohpc_command if [[ ${enable_ib} -eq 1 ]];then % ohpc_indent 5 \begin{lstlisting}[language=bash] -[sms](*\#*) (*\install*) mvapich2-gnu9-ohpc +[sms](*\#*) (*\install*) mvapich2-gnu12-ohpc \end{lstlisting} % ohpc_indent 0 % ohpc_command fi @@ -101,7 +101,7 @@ variant of MVAPICH2 instead: % ohpc_command if [[ ${enable_opa} -eq 1 ]];then % ohpc_indent 5 \begin{lstlisting}[language=bash] -[sms](*\#*) (*\install*) mvapich2-psm2-gnu9-ohpc +[sms](*\#*) (*\install*) mvapich2-psm2-gnu12-ohpc \end{lstlisting} % ohpc_indent 0 % ohpc_command fi
Use generic kernels for complex (I)AMAX to support softfp
@@ -2,13 +2,13 @@ include $(KERNELDIR)/KERNEL.ARMV5 SAMAXKERNEL = amax_vfp.S DAMAXKERNEL = amax_vfp.S -CAMAXKERNEL = amax_vfp.S -ZAMAXKERNEL = amax_vfp.S +#CAMAXKERNEL = amax_vfp.S +#ZAMAXKERNEL = amax_vfp.S SAMINKERNEL = amax_vfp.S DAMINKERNEL = amax_vfp.S -CAMINKERNEL = amax_vfp.S -ZAMINKERNEL = amax_vfp.S +#CAMINKERNEL = amax_vfp.S +#ZAMINKERNEL = amax_vfp.S SMAXKERNEL = amax_vfp.S DMAXKERNEL = amax_vfp.S @@ -18,13 +18,13 @@ DMINKERNEL = amax_vfp.S ISAMAXKERNEL = iamax_vfp.S IDAMAXKERNEL = iamax_vfp.S -ICAMAXKERNEL = iamax_vfp.S -IZAMAXKERNEL = iamax_vfp.S +#ICAMAXKERNEL = iamax_vfp.S +#IZAMAXKERNEL = iamax_vfp.S ISAMINKERNEL = iamax_vfp.S IDAMINKERNEL = iamax_vfp.S -ICAMINKERNEL = iamax_vfp.S -IZAMINKERNEL = iamax_vfp.S +#ICAMINKERNEL = iamax_vfp.S +#IZAMINKERNEL = iamax_vfp.S ISMAXKERNEL = iamax_vfp.S IDMAXKERNEL = iamax_vfp.S
Fix WindowsPhone AppIdentifier detection - use PackageId, instead of Store Id (which is not available for non-public apps)
@@ -61,10 +61,11 @@ namespace carto { std::string PlatformUtils::GetAppIdentifier() { std::wstring wid; try { - wid = Windows::ApplicationModel::Store::CurrentApp::AppId.ToString()->Data(); + Windows::ApplicationModel::PackageId^ packageId = Windows::ApplicationModel::Package::Current->Id; + wid = packageId->Name->Data(); } catch (Platform::Exception^ e) { - Log::Error("PlatformUtils::GetAppIdentifier: Exception while reading AppId"); + Log::Error("PlatformUtils::GetAppIdentifier: Exception while reading PackageId"); } std::string id; utf8::utf16to8(wid.begin(), wid.end(), std::back_inserter(id));
ESP32 platform change only: update UART clock source for esp32s3.
@@ -276,6 +276,10 @@ int32_t uPortUartOpen(int32_t uart, int32_t baudRate, config.parity = UART_PARITY_DISABLE; #if SOC_UART_SUPPORT_REF_TICK config.source_clk = UART_SCLK_REF_TICK; +#elif SOC_UART_SUPPORT_XTAL_CLK + config.source_clk = UART_SCLK_XTAL; +#else + config.source_clk = UART_SCLK_APB; #endif // Set the baud rate
Fixed inverse logic mistakes when converting
@@ -164,7 +164,7 @@ std::string Type::FunctionDescriptor(RED4ext::CBaseFunction* apFunc, bool aWithH { auto* param = apFunc->params[i]; - if (param->flags.isOut) + if (!param->flags.isOut) { // ignone non-out params cause we've dealt with them above continue; @@ -284,7 +284,7 @@ sol::variadic_results Type::Execute(RED4ext::CClassFunction* apFunc, const std:: args[i].value = nullptr; } - if (!args[i].value && apFunc->params[i]->flags.isOptional) + if (!args[i].value && !apFunc->params[i]->flags.isOptional) { auto* pType = apFunc->params[i]->type; @@ -333,7 +333,7 @@ sol::variadic_results Type::Execute(RED4ext::CClassFunction* apFunc, const std:: for (auto i = 0; i < apFunc->params.size; ++i) { - if (apFunc->params[i]->flags.isOut) + if (!apFunc->params[i]->flags.isOut) continue; results.push_back(Scripting::ToLua(m_lua, args[i]));
lwip_base: Don't attempt to use ND6 if disabled Neighbor discovery was made configurable in commit That commit was incomplete; it missed one use of nd6 in ethip6.c. This commit ensures ND6 is only used if it is enabled.
@@ -99,12 +99,17 @@ ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) /* We have a unicast destination IP address */ /* @todo anycast? */ +#if !LWIP_ND6 + /* Neighbor discovery disabled; indicate no router. */ + return ERR_RTE; +#else /* Ask ND6 what to do with the packet. */ result = nd6_get_next_hop_addr_or_queue(netif, q, ip6addr, &hwaddr); if (result != ERR_OK) { return result; } + /* If no hardware address is returned, nd6 has queued the packet for later. */ if (hwaddr == NULL) { return ERR_OK; @@ -113,6 +118,7 @@ ethip6_output(struct netif *netif, struct pbuf *q, const ip6_addr_t *ip6addr) /* Send out the packet using the returned hardware address. */ SMEMCPY(dest.addr, hwaddr, 6); return ethernet_output(netif, q, (const struct eth_addr*)(netif->hwaddr), &dest, ETHTYPE_IPV6); +#endif } #endif /* LWIP_IPV6 && LWIP_ETHERNET */
Fixed CartoVectorTileDecoder layer ordering issues
@@ -209,20 +209,24 @@ namespace carto { if (layerInvisibleSet.count(it->first) > 0) { continue; } - mvt::MBVTTileReader reader(it->second, *layerSymbolizerContexts[it->first], decoder); - reader.setLayerNameOverride(it->first); - if (std::shared_ptr<vt::Tile> tile = reader.readTile(targetTile)) { + std::size_t index = std::distance(_layerIds.begin(), std::find(_layerIds.begin(), _layerIds.end(), it->first)); - if (index < tiles.size()) { - tiles.push_back(tile); - } + if (index >= tiles.size()) { + continue; } + + mvt::MBVTTileReader reader(it->second, *layerSymbolizerContexts[it->first], decoder); + reader.setLayerNameOverride(it->first); + tiles[index] = reader.readTile(targetTile); } std::vector<std::shared_ptr<vt::TileLayer> > tileLayers; - for (const std::shared_ptr<vt::Tile>& tile : tiles) { - if (tile) { - tileLayers.insert(tileLayers.end(), tile->getLayers().begin(), tile->getLayers().end()); + for (std::size_t i = 0; i < tiles.size(); i++) { + if (std::shared_ptr<vt::Tile> tile = tiles[i]) { + for (const std::shared_ptr<vt::TileLayer>& tileLayer : tile->getLayers()) { + int layerIdx = static_cast<int>(i * 65536) + tileLayer->getLayerIndex(); + tileLayers.push_back(std::make_shared<vt::TileLayer>(layerIdx, tileLayer->getOpacity(), tileLayer->getCompOp(), tileLayer->getBitmaps(), tileLayer->getGeometries(), tileLayer->getLabels())); + } } }
rowan: config SPI pins Configure B12/B13/B14/B15 as SPI pins. BRANCH=master TEST=EC_FIRMWARE=rowan emerge-rowan chromeos-ec Commit-Ready: Patrick Berny Tested-by: Patrick Berny
@@ -421,10 +421,8 @@ static void board_chipset_pre_init(void) gpio_config_module(MODULE_SPI_MASTER, 1); /* Set all four SPI pins to high speed */ - /* pins D0/D1/D3/D4 */ - STM32_GPIO_OSPEEDR(GPIO_D) |= 0x000003cf; - /* pins F6 */ - STM32_GPIO_OSPEEDR(GPIO_F) |= 0x00003000; + /* pins B12/B13/B14/B15 */ + STM32_GPIO_OSPEEDR(GPIO_B) |= 0xff000000; /* Enable clocks to SPI2 module */ STM32_RCC_APB1ENR |= STM32_RCC_PB1_SPI2; @@ -454,8 +452,7 @@ static void board_chipset_shutdown(void) * Calling gpio_config_module sets disabled alternate function pins to * GPIO_INPUT. But to prevent leakage we want to set GPIO_OUT_LOW */ - gpio_set_flags_by_mask(GPIO_D, 0x1a, GPIO_OUT_LOW); - gpio_set_level(GPIO_SPI2_NSS, 0); + gpio_set_flags_by_mask(GPIO_B, 0xf000, GPIO_OUT_LOW); } DECLARE_HOOK(HOOK_CHIPSET_SHUTDOWN, board_chipset_shutdown, HOOK_PRIO_DEFAULT);
OcDeviceMiscLib: Fix multiple memory corruptions rebar code
@@ -680,6 +680,8 @@ ResizeGpuBarsPciIo ( } } + FreePool (HandleBuffer); + if (HasSuccess) { return EFI_SUCCESS; } @@ -752,6 +754,8 @@ ResizeGpuBarsRbIo ( ASSERT (Size < PciBarTotal); + HasSuccess = FALSE; + Status = gBS->LocateHandleBuffer ( ByProtocol, &gEfiPciRootBridgeIoProtocolGuid, @@ -772,12 +776,12 @@ ResizeGpuBarsRbIo ( (VOID **)&PciRootBridgeIo ); if (EFI_ERROR (Status)) { - goto free; + continue; } - PciRootBridgeIo->Configuration (PciRootBridgeIo, (VOID **)&Descriptors); + Status = PciRootBridgeIo->Configuration (PciRootBridgeIo, (VOID **)&Descriptors); if (EFI_ERROR (Status)) { - goto free; + continue; } // @@ -835,11 +839,10 @@ ResizeGpuBarsRbIo ( } } } - -free: - FreePool (HandleBuffer[Index]); } + FreePool (HandleBuffer); + if (HasSuccess) { return EFI_SUCCESS; }
Fix memory leaks in tests Tests were failing when run with ASAN enabled.
@@ -11,6 +11,7 @@ static void test_get_ip_single_line() { char *ip = sc_adb_parse_device_ip_from_output(ip_route, sizeof(ip_route)); assert(ip); assert(!strcmp(ip, "192.168.12.34")); + free(ip); } static void test_get_ip_single_line_without_eol() { @@ -20,6 +21,7 @@ static void test_get_ip_single_line_without_eol() { char *ip = sc_adb_parse_device_ip_from_output(ip_route, sizeof(ip_route)); assert(ip); assert(!strcmp(ip, "192.168.12.34")); + free(ip); } static void test_get_ip_single_line_with_trailing_space() { @@ -29,6 +31,7 @@ static void test_get_ip_single_line_with_trailing_space() { char *ip = sc_adb_parse_device_ip_from_output(ip_route, sizeof(ip_route)); assert(ip); assert(!strcmp(ip, "192.168.12.34")); + free(ip); } static void test_get_ip_multiline_first_ok() { @@ -40,6 +43,7 @@ static void test_get_ip_multiline_first_ok() { char *ip = sc_adb_parse_device_ip_from_output(ip_route, sizeof(ip_route)); assert(ip); assert(!strcmp(ip, "192.168.1.2")); + free(ip); } static void test_get_ip_multiline_second_ok() { @@ -51,6 +55,7 @@ static void test_get_ip_multiline_second_ok() { char *ip = sc_adb_parse_device_ip_from_output(ip_route, sizeof(ip_route)); assert(ip); assert(!strcmp(ip, "192.168.1.3")); + free(ip); } static void test_get_ip_no_wlan() {
add section to README - see question at
@@ -225,15 +225,48 @@ Iodine::Rack.run My_Broadcast Of course, if you still want to use Rack's `hijack` API, Iodine will support you - but be aware that you will need to implement your own reactor and thread pool for any sockets you hijack, as well as a socket buffer for non-blocking `write` operations (why do that when you can write a protocol object and have the main reactor manage the socket?). +### Performance oriented design - but safety first + +Iodine is an evened server, similar in it's architecture to `nginx` and `puma`. It's different than the simple "thread-per-client" design that is often taught when we begin to learn about network programming. + +By leveraging `epoll` (on Linux) and `kqueue` (on BSD), iodine can listen to multiple network events on multiple sockets using a single thread. + +All these events go into a task queue, together with the application events and any user generated tasks, such as ones scheduled by [`Iodine.run`](http://www.rubydoc.info/github/boazsegev/iodine/Iodine#run-class_method). + +In pseudo-code, this might look like this + +```ruby +QUEUE = Queue.new + +def server_cycle + QUEUE << get_next_32_socket_events # these events schedule the proper user code to run + QUEUE << [server] +end + +def run_server + while ((event = QUEUE.pop)) + event.shift.call(*event) + end +end +``` + +In pure Ruby (without using C extensions or Java), it's possible to do the same by using `select`... and although `select` has some issues, it works well for smaller concurrency levels. + +The server events are fairly fast and fragmented (longer code is fragmented across multiple events), so one thread is enough to run the server including it's static file service and everything... but single threaded mode should probably be avoided. + +The thread pool is there to help slow user code. It's very common that the application's code will run slower and require external resources (i.e., databases, a pub/sub service, etc'). This slow code could "starve" the server (that is patiently waiting to run it's tasks on the same thread) - which is why a thread pool is often necessary. + +The slower your application code, the more threads you will need to keep the server running smoothly. + ### How does it compare to other servers? -Personally, after looking around, the only comparable servers are Puma and Passenger (the open source version), which Iodine significantly outperformed on my tests. +Personally, after looking around, the only comparable servers are Puma and Passenger, which Iodine significantly outperformed on my tests (I didn't test Passenger's enterprise version). Since the HTTP and Websocket parsers are written in C (with no RegExp), they're fairly fast. Also, Iodine's core and parsers are running outside of Ruby's global lock, meaning that they enjoy true concurrency before entering the Ruby layer (your application) - this offers Iodine a big advantage over other Ruby servers. -Another assumption Iodine makes is that it is behind a load balancer / proxy (which is the normal way Ruby applications are deployed) - this allows Iodine to disregard header validity checks (we're not checking for invalid characters) which speeds up the parsing process even further. +Another assumption Iodine makes is that it is behind a load balancer / proxy (which is the normal way Ruby applications are deployed) - this allows Iodine to disregard header validity checks (we're not checking for invalid characters) and focus it's resources on other security and performance concerns. I recommend benchmarking the performance for yourself using `wrk` or `ab`:
Fixed an invalid assert when ASTCENC_DECOMPRESS_ONLY is used
@@ -950,10 +950,10 @@ static void construct_block_size_descriptor_2d( bsd.always_block_mode_count = always_block_mode_count; bsd.always_decimation_mode_count = always_decimation_mode_count; +#if !defined(ASTCENC_DECOMPRESS_ONLY) assert(bsd.always_block_mode_count > 0); assert(bsd.always_decimation_mode_count > 0); -#if !defined(ASTCENC_DECOMPRESS_ONLY) delete[] percentiles; #endif
Init{RGB,YUV}Rescaler: fix a few more int overflows promote out_width to size_t before multiplying src/dec/io_dec.c:301:30: runtime error: signed integer overflow: 2 * cannot be represented in type 'int' 0x55fd9e8de2bd in InitYUVRescaler src/dec/io_dec.c:301:30 0x55fd9e8de2bd in CustomSetup src/dec/io_dec.c:571:54
@@ -298,7 +298,8 @@ static int InitYUVRescaler(const VP8Io* const io, WebPDecParams* const p) { const int uv_out_height = (out_height + 1) >> 1; const int uv_in_width = (io->mb_w + 1) >> 1; const int uv_in_height = (io->mb_h + 1) >> 1; - const size_t work_size = 2 * out_width; // scratch memory for luma rescaler + // scratch memory for luma rescaler + const size_t work_size = 2 * (size_t)out_width; const size_t uv_work_size = 2 * uv_out_width; // and for each u/v ones uint64_t total_size; size_t rescaler_size; @@ -486,7 +487,8 @@ static int InitRGBRescaler(const VP8Io* const io, WebPDecParams* const p) { const int out_height = io->scaled_height; const int uv_in_width = (io->mb_w + 1) >> 1; const int uv_in_height = (io->mb_h + 1) >> 1; - const size_t work_size = 2 * out_width; // scratch memory for one rescaler + // scratch memory for one rescaler + const size_t work_size = 2 * (size_t)out_width; rescaler_t* work; // rescalers work area uint8_t* tmp; // tmp storage for scaled YUV444 samples before RGB conversion uint64_t tmp_size1, tmp_size2, total_size;
YAML Benchmark: Support multiple input files
BUILD_DIRECTORY="@CMAKE_BINARY_DIR@" SOURCE_DIRECTORY="@CMAKE_SOURCE_DIR@" PLUGINS=(yamlcpp yanlr yambi yawn yaypeg) -DATA_DIRECTORY="benchmarks/data" +DATA_DIRECTORY="benchmarks/data/temporary" BENCHMARK_TOOL="$BUILD_DIRECTORY/bin/benchmark_plugingetset" +INPUT_FILES=( + benchmarks/data/test.yaml +) trap cleanup EXIT INT QUIT TERM cleanup() { - for PLUGIN in "${PLUGINS[@]}"; do - rm -f "$DATA_DIRECTORY/test.$PLUGIN.in" - done + rm -rf "$DATA_DIRECTORY" } cd "$SOURCE_DIRECTORY" || { @@ -29,13 +30,21 @@ command -v hyperfine > /dev/null 2>&1 || { exit 1 } +mkdir "$DATA_DIRECTORY" +for FILEPATH in "${INPUT_FILES[@]}"; do + DIRECTORY="$DATA_DIRECTORY/$(basename "$FILEPATH" | cut -f1 -d'.')" + mkdir "$DIRECTORY" for PLUGIN in "${PLUGINS[@]}"; do - cp "$DATA_DIRECTORY/test.yaml" "$DATA_DIRECTORY/test.$PLUGIN.in" + cp "$FILEPATH" "$DIRECTORY/test.$PLUGIN.in" + done done +for DIRECTORY in $(find $DATA_DIRECTORY -mindepth 1 -maxdepth 1 -type d); do hyperfine --warmup 3 \ - "\"$BENCHMARK_TOOL\" \"$DATA_DIRECTORY\" user ${PLUGINS[0]} get" \ - "\"$BENCHMARK_TOOL\" \"$DATA_DIRECTORY\" user ${PLUGINS[1]} get" \ - "\"$BENCHMARK_TOOL\" \"$DATA_DIRECTORY\" user ${PLUGINS[2]} get" \ - "\"$BENCHMARK_TOOL\" \"$DATA_DIRECTORY\" user ${PLUGINS[3]} get" \ - "\"$BENCHMARK_TOOL\" \"$DATA_DIRECTORY\" user ${PLUGINS[4]} get" | sed -e "s~$BUILD_DIRECTORY/bin/~~g" + "\"$BENCHMARK_TOOL\" \"$DIRECTORY\" user ${PLUGINS[0]} get" \ + "\"$BENCHMARK_TOOL\" \"$DIRECTORY\" user ${PLUGINS[1]} get" \ + "\"$BENCHMARK_TOOL\" \"$DIRECTORY\" user ${PLUGINS[2]} get" \ + "\"$BENCHMARK_TOOL\" \"$DIRECTORY\" user ${PLUGINS[3]} get" \ + "\"$BENCHMARK_TOOL\" \"$DIRECTORY\" user ${PLUGINS[4]} get" | + sed -e "s~$BUILD_DIRECTORY/bin/~~g" -e "s~$DATA_DIRECTORY/bin/~~g" +done
add desktop help binding
@@ -159,6 +159,7 @@ static Key dkeys[] = { {0, XK_n, spawn, {.v = nautiluscmd } }, {0, XK_space, spawn, {.v = panther} }, {0, XK_f, spawn, {.v = firefoxcmd} }, + {0, XK_F1, spawn, {.v = helpcmd} }, {0, XK_m, spawn, {.v = spoticli} }, {0, XK_Return, spawn, {.v = termcmd} }, {0, XK_plus, spawn, {.v = upvol} },
Refactored and simplified vector_get
@@ -2,12 +2,11 @@ typedef buffer vector; static inline void *vector_get(vector v, int offset) { - bytes base = v->start + offset * sizeof(void *); - if ((offset < 0) || ((base + sizeof(void *)) > v->end)) - // should be INVALID_VIRTUAL (? ) + bytes boffset = offset * sizeof(void *); + if (offset < 0 || boffset + sizeof(void *) > buffer_length(v)) return 0; - return ((void **)(v->contents + v->start))[offset]; + return *(void **)buffer_ref(v, boffset); } static inline boolean vector_set(vector v, int offset, void *value)
boot.lua uses headset clip distance;
@@ -139,9 +139,11 @@ function lovr.run() local headset = lovr.headset and lovr.headset.getTexture() if headset then local pass = lovr.graphics.getPass('render', headset) + local near, far = lovr.headset.getClipDistance() for i = 1, lovr.headset.getViewCount() do pass:setViewPose(i, lovr.headset.getViewPose(i)) - pass:setProjection(i, lovr.headset.getViewAngles(i)) + local left, right, up, down = lovr.headset.getViewAngles(i) + pass:setProjection(i, left, right, up, down, near, far) end local skip = lovr.draw and lovr.draw(pass) if not skip then lovr.graphics.submit(pass) end @@ -195,9 +197,11 @@ function lovr.errhand(message, traceback) local texture = lovr.headset.getTexture() if texture then local pass = lovr.graphics.getPass('render', texture) + local near, far = lovr.headset.getClipDistance() for i = 1, lovr.headset.getViewCount() do pass:setViewPose(i, lovr.headset.getViewPose(i)) - pass:setProjection(i, lovr.headset.getViewAngles(i)) + local left, right, up, down = lovr.headset.getViewAngles(i) + pass:setProjection(i, left, right, up, down, near, far) end render(pass) passes[#passes + 1] = pass
improve mrbc_raise, mrbc_raisef functions.
@@ -140,9 +140,14 @@ void mrbc_exception_delete(mrbc_value *value) */ void mrbc_raise( struct VM *vm, struct RClass *exc_cls, const char *msg ) { + if( vm ) { vm->exception = mrbc_exception_new( vm, exc_cls ? exc_cls : MRBC_CLASS(RuntimeError), msg, 0 ); vm->flag_preemption = 2; + + } else { + mrbc_printf("Exception : %s (%s)\n", msg ? msg : mrbc_symid_to_str(exc_cls->sym_id), mrbc_symid_to_str(exc_cls->sym_id)); + } } @@ -158,16 +163,22 @@ void mrbc_raisef( struct VM *vm, struct RClass *exc_cls, const char *fstr, ... ) va_list ap; va_start( ap, fstr ); + if( vm ) { char *buf = mrbc_alloc( vm, 32 ); if( !buf ) return; // ENOMEM mrbc_vsprintf( buf, 32, fstr, ap ); - va_end( ap ); vm->exception = mrbc_exception_new_alloc( vm, exc_cls ? exc_cls : MRBC_CLASS(RuntimeError), buf, strlen(buf) ); vm->flag_preemption = 2; + } else { + + mrbc_vprintf( fstr, ap ); + } + + va_end( ap ); }
Fix fake controller positioning;
@@ -172,9 +172,10 @@ static ControllerHand fakeControllerGetHand(Controller* controller) { static void fakeControllerGetPose(Controller* controller, float* x, float* y, float* z, float* angle, float* ax, float* ay, float* az) { *x = 0; - *y = state.offset; - *z = -1; + *y = 0; + *z = -.75; mat4_transform(state.transform, x, y, z); + *y += state.offset; float q[4]; quat_fromMat4(q, state.transform);
fix for next_powerof2 (thanks to Nguyen Damien)
#include <assert.h> #include <complex.h> #include <strings.h> +#include <stdint.h> #include "num/multind.h" #include "num/flpmath.h" @@ -58,7 +59,9 @@ static unsigned int next_powerof2(unsigned int x) { x--; - for (unsigned int i = 0, n = 1; i < 6; i++, n *= 2) + assert(x <= (UINT32_MAX >> 1)); + + for (unsigned int i = 0, n = 1; i < 5; i++, n *= 2) x = (x >> n) | x; return x + 1;
use PROJECT_BINARY_DIR for sys/ compat headers
@@ -95,7 +95,7 @@ endif() if (NOT HAVE_SYS_TREE) CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/compat/sys/tree.h.in - ${CMAKE_CURRENT_BINARY_DIR}/compat/sys/tree.h) + ${PROJECT_BINARY_DIR}/compat/sys/tree.h) include_directories(${PROJECT_BINARY_DIR}/compat) @@ -103,7 +103,9 @@ endif(NOT HAVE_SYS_TREE) if (NOT HAVE_SYS_QUEUE) CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/compat/sys/queue.h.in - ${CMAKE_CURRENT_BINARY_DIR}/compat/sys/queue.h) + ${PROJECT_BINARY_DIR}/compat/sys/queue.h) + + include_directories(${PROJECT_BINARY_DIR}/compat) endif(NOT HAVE_SYS_QUEUE) if (NOT HAVE_STRNDUP)
HW: Fixing format in hardware/README.md
@@ -36,7 +36,7 @@ The environment variable `DIMMTEST` needs to point to the directory containing t The path to the set of actions that shall be included is defined via the environment variable `ACTION_ROOT`. **Currently it has to point to a directory within** - `$DONUT_HARDWARE_ROOT/action_examples`. + $DONUT_HARDWARE_ROOT/action_examples This directory needs to contain an action_wrapper entity as interface between the actions and the SNAP framework.
Fix build error if assert is defined in apriltag.c
@@ -55,6 +55,7 @@ either expressed or implied, of the Regents of The University of Michigan. #define malloc(size) ({ void *_r = umm_malloc(size); if(!_r) umm_alloc_fail(); _r; }) #define realloc(ptr, size) ({ void *_r = umm_realloc((ptr), (size)); if(!_r) umm_alloc_fail(); _r; }) #define calloc(num, item_size) ({ void *_r = umm_calloc((num), (item_size)); if(!_r) umm_alloc_fail(); _r; }) +#undef assert #define assert(expression) #define sqrt(x) fast_sqrtf(x) #define sqrtf(x) fast_sqrtf(x)
move enqueue under lock, remove old comment
@@ -32,8 +32,7 @@ typedef struct qbuf { buffer b; } *qbuf; -/* return true if sendq empty */ -static void direct_conn_send_internal(direct_conn dc) +static void direct_conn_send_internal(direct_conn dc, qbuf q) { direct_debug("dc %p\n", dc); list next; @@ -43,6 +42,8 @@ static void direct_conn_send_internal(direct_conn dc) tcp_write or tcp_output, this will need to be revised to avoid deadlock. */ spin_lock(&dc->send_lock); + if (q) + list_insert_before(&dc->sendq_head, &q->l); while ((next = list_get_next(&dc->sendq_head))) { qbuf q = struct_from_list(next, qbuf, l); if (!q->b) { @@ -89,7 +90,7 @@ static void direct_conn_send_internal(direct_conn dc) static err_t direct_conn_sent(void *arg, struct tcp_pcb *pcb, u16 len) { assert(arg); - direct_conn_send_internal((direct_conn)arg); + direct_conn_send_internal((direct_conn)arg, 0); return ERR_OK; } @@ -108,10 +109,7 @@ closure_function(1, 1, status, direct_conn_send, } else { /* queue even if b == 0 (acts as close connection command) */ q->b = b; - u64 flags = irq_disable_save(); - list_insert_before(&dc->sendq_head, &q->l); /* really need CAS version */ - irq_restore(flags); - direct_conn_send_internal(dc); + direct_conn_send_internal(dc, q); } return s; }
file_write: refactor by calling helper functions This commit adds the begin_file_write() and file_write_complete_internal() helper functions, which are now called by file_write() and file_write_complete(), respectively, and will be reused in the next commit.
@@ -734,27 +734,41 @@ closure_function(2, 6, sysreturn, file_sg_read, return bh ? SYSRETURN_CONTINUE_BLOCKING : file_op_maybe_sleep(t); } -closure_function(6, 1, void, file_write_complete, - thread, t, file, f, sg_list, sg, u64, length, boolean, is_file_offset, io_completion, completion, - status, s) +static void begin_file_write(thread t, file f, u64 len) +{ + if (len > 0) + filesystem_update_mtime(t->p->fs, file_get_meta(f)); + file_op_begin(t); +} + +static void file_write_complete_internal(thread t, file f, u64 len, + boolean is_file_offset, + io_completion completion, status s) { - thread_log(bound(t), "%s: f %p, sg, %p, completion %F, status %v", - __func__, bound(f), bound(sg), bound(completion), s); sysreturn rv; - sg_list_release(bound(sg)); - deallocate_sg_list(bound(sg)); if (is_ok(s)) { - file f = bound(f); /* if regular file, update length */ if (f->fsf) f->length = fsfile_get_length(f->fsf); - if (bound(is_file_offset)) /* vs specified offset (pread) */ - f->offset += bound(length); - rv = bound(length); + if (is_file_offset) + f->offset += len; + rv = len; } else { rv = sysreturn_from_fs_status_value(s); } - apply(bound(completion), bound(t), rv); + apply(completion, t, rv); +} + +closure_function(6, 1, void, file_write_complete, + thread, t, file, f, sg_list, sg, u64, length, boolean, is_file_offset, io_completion, completion, + status, s) +{ + thread_log(bound(t), "%s: f %p, sg, %p, completion %F, status %v", + __func__, bound(f), bound(sg), bound(completion), s); + sg_list_release(bound(sg)); + deallocate_sg_list(bound(sg)); + file_write_complete_internal(bound(t), bound(f), bound(length), + bound(is_file_offset), bound(completion), s); closure_finish(); } @@ -785,10 +799,7 @@ closure_function(2, 6, sysreturn, file_write, sgb->offset = 0; sgb->refcount = 0; - if (length > 0) { - filesystem_update_mtime(t->p->fs, file_get_meta(f)); - } - file_op_begin(t); + begin_file_write(t, f, length); apply(f->fs_write, sg, irangel(offset, length), closure(h, file_write_complete, t, f, sg, length, is_file_offset, completion)); /* possible direct return in top half */
Update CHANGES for X448 and Ed448
Changes between 1.1.0g and 1.1.1 [xx XXX xxxx] + *) Added support for X448 and Ed448. Currently this is only supported in + libcrypto (not libssl). Heavily based on original work by Mike Hamburg. + [Matt Caswell] + *) Extend OSSL_STORE with capabilities to search and to narrow the set of objects loaded. This adds the functions OSSL_STORE_expect() and OSSL_STORE_find() as well as needed tools to construct searches and
CMSIS-DSP: Improved doxygen comments.
- The states of the vectorized filters are different from their scalar versions (which may break some algorithms which are working on the state of a filter like Goertzel) + - For performance reasons, some vectorized algorithms may read one vector + after the end of buffer (but the data read is not used). + If the buffer is close to the end of a memory zone, it is advised + to leave some free memory area between the end of the buffer and the end + of the memory zone. </td> </tr>
Avoid passing NULL to strcmp()
@@ -1149,7 +1149,7 @@ parse_headers (WSHeaders * headers) { free (tmp); line = next ? (next + 2) : NULL; - if (strcmp (next, "\r\n\r\n") == 0) + if (next && strcmp (next, "\r\n\r\n") == 0) break; }
Add tests for server paths
test.isequal("%HOME%/user", path.getabsolute("%HOME%/user")) end + function suite.getabsolute_onServerPath() + test.isequal("//Server/Volume", path.getabsolute("//Server/Volume")) + end + function suite.getabsolute_onMultipleEnvVar() test.isequal("$(HOME)/$(USER)", path.getabsolute("$(HOME)/$(USER)")) end test.isequal("obj/debug", path.getrelative("C:/Code/Premake4", "C:/Code/Premake4/obj/debug")) end + function suite.getrelative_ReturnsChildPath_OnServerPath() + test.isequal("../Volume", path.getrelative("//Server/Shared", "//Server/Volume")) + end + function suite.getrelative_ReturnsAbsPath_OnDifferentDriveLetters() test.isequal("D:/Files", path.getrelative("C:/Code/Premake4", "D:/Files")) end test.isequal("/opt/include", path.getrelative("/home/me/src/project", "/opt/include")) end + function suite.getrelative_ReturnsAbsPath_OnServerPath() + test.isequal("//Server/Volume", path.getrelative("C:/Files", "//Server/Volume")) + end + + function suite.getrelative_ReturnsAbsPath_OnDifferentServers() + test.isequal("//Server/Volume", path.getrelative("//Computer/Users", "//Server/Volume")) + end + function suite.getrelative_ignoresExtraSlashes2() test.isequal("..", path.getrelative("/a//b/c","/a/b")) end
tests: fix calyptia custom test for macos.
@@ -41,12 +41,12 @@ void flb_custom_calyptia_pipeline_config_get_test() flb_custom_set_property(calyptia, "add_label", "pipeline_id 7DDD2941-3ED6-4B8C-9F84-DD04C4A018A4"); flb_custom_set_property(calyptia, "calyptia_host", "cloud-api.calyptia.com"); flb_custom_set_property(calyptia, "calyptia_port", "443"); - flb_custom_set_property(calyptia, "tls", "on"); - flb_custom_set_property(calyptia, "tls_verify", "on"); cfg = custom_calyptia_pipeline_config_get(ctx->config); TEST_CHECK(strcmp(cfg, cfg_str) == 0); flb_sds_destroy(cfg); + flb_start(ctx); + flb_stop(ctx); flb_destroy(ctx); }
Fix obsolete FSM remarks in nbtree README. The free space map has used a dedicated relation fork rather than shared memory segments for over a decade.
@@ -347,14 +347,11 @@ guaranteed to be "visible to everyone". As collateral damage, this implementation also waits for running XIDs with no snapshots and for snapshots taken until the next transaction to allocate an XID commits. -Reclaiming a page doesn't actually change its state on disk --- we simply -record it in the shared-memory free space map, from which it will be -handed out the next time a new page is needed for a page split. The -deleted page's contents will be overwritten by the split operation. -(Note: if we find a deleted page with an extremely old transaction -number, it'd be worthwhile to re-mark it with FrozenTransactionId so that -a later xid wraparound can't cause us to think the page is unreclaimable. -But in more normal situations this would be a waste of a disk write.) +Reclaiming a page doesn't actually change the state of the page --- we +simply record it in the free space map, from which it will be handed out +the next time a new page is needed for a page split. The deleted page's +contents will be overwritten by the split operation (it will become the +new right page). Because we never delete the rightmost page of any level (and in particular never delete the root), it's impossible for the height of the tree to
porting/npl/nuttx: fix callout_handler implement callout_thread is working for all timeout callout, it need an endless loop to catch all message.
@@ -44,6 +44,7 @@ callout_handler(pthread_addr_t arg) { struct ble_npl_callout *c; + while (true) { pthread_mutex_lock(&callout_mutex); while (!pending_callout) { pthread_cond_wait(&callout_cond, &callout_mutex); @@ -60,6 +61,7 @@ callout_handler(pthread_addr_t arg) } else { c->c_ev.ev_cb(&c->c_ev); } + } return NULL; }
kdb: call procgetstorage on cache hits, fix missing pregetstorage in debug mode
@@ -809,11 +809,16 @@ static int elektraCacheLoadSplit (KDB * handle, Split * split, KeySet * ks, KeyS if (debugGlobalPositions) { + keySetName (parentKey, keyName (initialParent)); + elektraGlobalGet (handle, ks, parentKey, PREGETSTORAGE, INIT); + elektraGlobalGet (handle, ks, parentKey, PREGETSTORAGE, MAXONCE); + elektraGlobalGet (handle, ks, parentKey, PREGETSTORAGE, DEINIT); + } + keySetName (parentKey, keyName (initialParent)); elektraGlobalGet (handle, *cache, parentKey, PROCGETSTORAGE, INIT); elektraGlobalGet (handle, *cache, parentKey, PROCGETSTORAGE, MAXONCE); elektraGlobalGet (handle, *cache, parentKey, PROCGETSTORAGE, DEINIT); - } ksRewind (*cache); if (ks->size == 0)
Fix profile test I modified profile to align values, which added extra whitespace. I was using the version of split that took a delimiter (space), which will not consolidate whitespace. Switch to the version without a parameter, which does consolidate.
@@ -51,7 +51,7 @@ def profile(*unused): ] profile_output = subprocess.check_output(' '.join(profile_args), shell=True) profile_lines = profile_output.decode().split('\n') - profile_tuples = [line.split(' ') for line in profile_lines if line] + profile_tuples = [line.split() for line in profile_lines if line] profile_map = {func: int(count) for count, _, func in profile_tuples} # These tests don't end up being exactly 2x the number of samples. Because
Updated README.md for upcoming version.
@@ -97,9 +97,9 @@ GoAccess can be compiled and used on *nix systems. Download, extract and compile GoAccess with: - $ wget https://tar.goaccess.io/goaccess-1.4.6.tar.gz - $ tar -xzvf goaccess-1.4.6.tar.gz - $ cd goaccess-1.4.6/ + $ wget https://tar.goaccess.io/goaccess-1.5.tar.gz + $ tar -xzvf goaccess-1.5.tar.gz + $ cd goaccess-1.5/ $ ./configure --enable-utf8 --enable-geoip=mmdb $ make # make install
brya: LED comments improvement Fix typo, add functional description BRANCH=none TEST=none (changed comments only)
* found in the LICENSE file. */ -/* Waddledoo specific PWM LED settings. */ +/* Brya specific PWM LED settings: there are 2 LEDs on each side of the board, + * each one can be controlled separately. The LED colors are white or amber, + * and the default behavior is tied to the charging process: both sides are + * amber while charging the battery and white when the battery is charged. + */ #include "common.h" #include "ec_commands.h" @@ -19,7 +23,7 @@ const enum ec_led_id supported_led_ids[] = { const int supported_led_ids_count = ARRAY_SIZE(supported_led_ids); /* - * We only have a white and an amber LED, so setting any other colour results in + * We only have a white and an amber LED, so setting any other color results in * both LEDs being off. */ struct pwm_led led_color_map[EC_LED_COLOR_COUNT] = {
latest obs package config
@@ -62,8 +62,8 @@ compiler_families=["gnu8","intel"] mpi_families=["openmpi3","mpich","mvapich2","impi"] -standalone = ["charliecloud","clustershell","cmake","docs","easybuild","lmod", - "meta-packages","papi","slurm","test-suite","valgrind"] +standalone = ["!charliecloud","clustershell","cmake","docs","easybuild","lmod", + "meta-packages","papi","singularity","slurm","test-suite","valgrind"] # define (compiler dependent) packages compiler_dependent = ["hdf5","openmpi","mvapich2","likwid","!plasma","R"] @@ -72,4 +72,4 @@ compiler_dependent = ["hdf5","openmpi","mvapich2","likwid","!plasma","R"] R_compiler=["gnu8"] opencoarrays_compiler=["gnu8"] -mpi_dependent = ["boost","extrae","geopm","mumps","opencoarrays","omb","petsc","phdf5","scorep","slepc"] +mpi_dependent = ["boost","dimemas","extrae","geopm","mumps","opencoarrays","omb","petsc","phdf5","scorep","slepc"]
doc: cosmetic fixup of reference to stale header
@@ -24,7 +24,7 @@ Secure and Trusted Boot Library (LibSTB) Documentation In order to support Secure and Trusted Boot, the flash driver calls libSTB to verify and measure the code it fetches from PNOR. -LibSTB is initialized by calling *stb_init()*, see ``libstb/stb.h``. +LibSTB is initialized by calling *stb_init()*, see ``libstb/secureboot.h``. Secure Boot -----------
Allow relative paths for jpm commands (deps) Also default headerpath, libpath, and binpath of of (dyn :syspath) instead of $JANET_MODPATH. This allows setting $JANET_MODPATH without needing to mess with the other settings.
(def JANET_MODPATH (or (os/getenv "JANET_MODPATH") (dyn :syspath))) (def JANET_HEADERPATH (or (os/getenv "JANET_HEADERPATH") - (if-let [j JANET_MODPATH] + (if-let [j (dyn :syspath)] (string j "/../../include/janet")))) (def JANET_BINPATH (or (os/getenv "JANET_BINPATH") - (if-let [j JANET_MODPATH] + (if-let [j (dyn :syspath)] (string j "/../../bin")))) (def JANET_LIBPATH (or (os/getenv "JANET_LIBPATH") - (if-let [j JANET_MODPATH] + (if-let [j (dyn :syspath)] (string j "/..")))) # @@ -512,7 +512,9 @@ int main(int argc, const char **argv) { "Create an absolute path. Does not resolve . and .. (useful for generating entries in install manifest file)." [path] - (if (string/has-prefix? absprefix) + (if (if is-win + (peg/match '(+ "\\" (* (range "AZ" "az") ":\\")) path) + (string/has-prefix? "/" path)) path (string (os/cwd) sep path))) @@ -581,9 +583,13 @@ int main(int argc, const char **argv) { (set fresh true) (os/execute ["git" "clone" repo module-dir] :p)) (def olddir (os/cwd)) - (os/cd module-dir) (try - (with-dyns [:rules @{}] + (with-dyns [:rules @{} + :modpath (abspath (dyn :modpath JANET_MODPATH)) + :headerpath (abspath (dyn :headerpath JANET_HEADERPATH)) + :libpath (abspath (dyn :libpath JANET_LIBPATH)) + :binpath (abspath (dyn :binpath JANET_BINPATH))] + (os/cd module-dir) (unless fresh (os/execute ["git" "pull" "origin" "master"] :p)) (when tag
Yardoc: Add missing document in Magick module
@@ -29,6 +29,29 @@ module Magick IMAGEMAGICK_VERSION = Magick::Magick_version.split[1].split('-').first class << self + # Describes the image formats supported by ImageMagick. + # If the optional block is present, calls the block once for each image format. + # The first argument, +k+, is the format name. The second argument, +v+, is the + # properties string described below. + # + # - +B+ is "*" if the format has native blob support, or " " otherwise. + # - +R+ is "r" if ImageMagick can read that format, or "-" otherwise. + # - +W+ is "w" if ImageMagick can write that format, or "-" otherwise. + # - +A+ is "+" if the format supports multi-image files, or "-" otherwise. + # + # @overload formats + # @return [Hash] the formats hash + # + # @overload formats + # @yield [k, v] + # @yieldparam k [String] the format name + # @yieldparam v [String] the properties string + # @return [Magick] + # + # @example + # p Magick.formats + # => {"3FR"=>" r-+", "3G2"=>" r-+", "3GP"=>" r-+", "A"=>"*rw+", + # ... def formats @formats ||= init_formats @@ -40,7 +63,26 @@ module Magick end end - # remove reference to the proc at exit + # If the Magick module attribute +trace_proc+ is set to a Proc object, + # RMagick calls the proc whenever an image is created or destroyed. + # + # You can use this proc to keep track of which images your program has created + # and which have been destroyed. + # + # @param p [Proc] The proc object. + # The following value will be passed into the proc object. + # - +which+ - A symbol that indicates which operation the proc is being called for. + # If the proc is called for an image creation, the value is +:c+. + # If called for an image destruction, the value is +:d+. + # - +description+ - A string describing the image. This is the same string that + # would be returned by calling the image's inspect method. + # - +id+ - A unique identifier for the image. This identifier is not the same as the object's +object_id+. + # - +method+ - The name of the method responsible for creating or destroying the image. + # + # @example + # Magick.trace_proc = proc do |which, description, id, method| + # ... + # end def trace_proc=(p) m = Mutex.new m.synchronize do
fuzz: mk_check: cleanup service code
@@ -19,6 +19,7 @@ void cb_main(mk_request_t *request, void *data) mk_http_status(request, 200); mk_http_header(request, "X-Monkey", 8, "OK", 2); + mk_http_send(request, ":)\n", 3, NULL); mk_http_done(request); } @@ -77,32 +78,9 @@ static void signal_init() signal(SIGTERM, &signal_handler); } -static void cb_queue_message(mk_mq_t *queue, void *data, size_t size, void *ctx) -{ - size_t i; - char *buf; - (void) ctx; - (void) queue; - - printf("=== cb queue message === \n"); - printf(" => %lu bytes\n", size); - printf(" => "); - - buf = data; - for (i = 0; i < size; i++) { - printf("%c", buf[i]); - } - printf("\n\n"); -} - - int main() { - int i = 0; - int len; int vid; - int qid; - char msg[800000]; signal_init(); @@ -111,9 +89,6 @@ int main() return -1; } - /* Create a message queue and a callback for each message */ - qid = mk_mq_create(ctx, "/data", cb_queue_message, NULL); - mk_config_set(ctx, "Listen", API_PORT, NULL); @@ -129,11 +104,6 @@ int main() mk_info("Service: http://%s:%s/test_chunks", API_ADDR, API_PORT); mk_start(ctx); - for (i = 0; i < 5; i++) { - len = snprintf(msg, sizeof(msg) - 1, "[...] message ID: %i\n", i); - mk_mq_send(ctx, qid, &msg, len); - } - sleep(3600); mk_stop(ctx);
fix setting google zone permissions in nightly job
@@ -38,7 +38,7 @@ jobs: - run: echo $GCLOUD_SERVICE_KEY | gcloud auth activate-service-account --key-file=- - run: sudo gcloud config set project ${GOOGLE_PROJECT_ID} - - run: gcloud --quiet config set compute/zone ${GOOGLE_COMPUTE_ZONE} + - run: sudo gcloud --quiet config set compute/zone ${GOOGLE_COMPUTE_ZONE} - run: mkdir temp && cp output/platform/pc/bin/kernel.img temp/ && cp output/platform/pc/boot/boot.img temp/ && cp output/tools/bin/mkfs temp/ && cp output/tools/bin/dump temp/ && mkdir temp/klibs && cp output/klib/bin/* temp/klibs - run: cd temp && tar cvzf nanos-nightly-linux.tar.gz * && gsutil cp nanos-nightly-linux.tar.gz gs://nanos/release/nightly
Bugfix gui slider mouse translation handling
@@ -36,8 +36,12 @@ static void _gui_slider_handle_mouse_moved(gui_slider_t* s, Point mouse_pos) { static void _gui_slider_handle_mouse_dragged(gui_slider_t* s, Point mouse_pos) { mouse_pos = point_make(mouse_pos.x - s->superview->frame.origin.x, mouse_pos.y - s->superview->frame.origin.y); if (s->slider_percent_updated_cb && s->in_left_click) { - float percent = (mouse_pos.x - s->slider_origin_x) / (float)s->slidable_width; + float percent = 0.0; + if (mouse_pos.x >= (int32_t)s->slider_origin_x) { + percent = (max(mouse_pos.x, s->slider_origin_x) - s->slider_origin_x) / (float)s->slidable_width; + } percent = max(min(percent, 1.0), 0.0); + s->slider_percent = percent; s->slider_percent_updated_cb(s, percent); _set_needs_display(s);
PROV: Adapt the DSA keymgmt implementation to no ex_fields
@@ -135,9 +135,8 @@ static int key_to_params(DSA *dsa, OSSL_PARAM_BLD *tmpl) static void *dsa_importdomparams(void *provctx, const OSSL_PARAM params[]) { DSA *dsa; - OPENSSL_CTX *libctx = PROV_LIBRARY_CONTEXT_OF(provctx); - if ((dsa = dsa_new(libctx)) == NULL + if ((dsa = DSA_new()) == NULL || !params_to_domparams(dsa, params)) { DSA_free(dsa); dsa = NULL; @@ -166,9 +165,8 @@ static int dsa_exportdomparams(void *domparams, static void *dsa_importkey(void *provctx, const OSSL_PARAM params[]) { DSA *dsa; - OPENSSL_CTX *libctx = PROV_LIBRARY_CONTEXT_OF(provctx); - if ((dsa = dsa_new(libctx)) == NULL + if ((dsa = DSA_new()) == NULL || !params_to_key(dsa, params)) { DSA_free(dsa); dsa = NULL;
Ensure docker images are using latest versions of base OS packages.
@@ -38,7 +38,7 @@ default: @echo "valid targets: all build dockerfiles push tag export clean" dockerfiles: $(addprefix Dockerfile., $(MODULES)) -build: $(addprefix build-,$(MODULES)) +build: refresh-base $(addprefix build-,$(MODULES)) tag: $(addprefix tag-,$(MODULES)) push: $(addprefix push-,$(MODULES)) latest export: $(addsuffix .tar.gz,$(addprefix $(EXPORT_DIR)/nginx-unit-$(VERSION)-,$(MODULES))) $(addsuffix .tar.gz.sha512, $(addprefix $(EXPORT_DIR)/nginx-unit-$(VERSION)-,$(MODULES))) @@ -51,7 +51,7 @@ Dockerfile.%: ../../version > $@ build-%: Dockerfile.% - docker build -t unit:$(VERSION)-$* -f Dockerfile.$* . + docker build --no-cache -t unit:$(VERSION)-$* -f Dockerfile.$* . tag-%: build-% docker tag unit:$(VERSION)-$* nginx/unit:$(VERSION)-$* @@ -63,6 +63,9 @@ latest: docker tag nginx/unit:$(VERSION)-full nginx/unit:latest docker push nginx/unit:latest +refresh-base: + docker pull $(shell head -n 1 Dockerfile.tmpl | cut -d' ' -f 2) + $(EXPORT_DIR): mkdir -p $@ @@ -78,4 +81,4 @@ clean: rm -f $(addprefix Dockerfile., $(MODULES)) rm -rf $(EXPORT_DIR) -.PHONY: default all build dockerfiles latest push tag export clean +.PHONY: default all build dockerfiles latest push tag export clean refresh-base
support 8K5 in sim/README
@@ -102,15 +102,15 @@ This means, that PSLSE&sim only runs, as long as the app/list/xterm is avail. If you start an app in the xterm and cntl-C it without exiting from the xterm, the simulation keeps running. ## card and action settings -Currently supported are AlphaData KU3, Nallatech 250S and 250S+, Semptian NSA121B +Currently supported are AlphaData KU3 and 8K5, Nallatech 250S and 250S+, Semptian NSA121B Regression tests are in place for the following cards: ``` -card ADKU3 N250S N250SP S121B # set with -system POWER8 POWER8 POWER9 POWER8 # -memory_avail DDR3/BRAM DDR4/BRAM DDR4/BRAM DDR4/BRAM # SDRAM_USED, BRAM_USED -NVMe no yes no no # NVME_USED=TRUE -cloud access yes partial no no # not avail yet for NVMe +card ADKU3 AD8K5 N250S N250SP S121B # set with +system POWER8 POWER8 POWER8 POWER9 POWER8 # +memory_avail DDR3/BRAM DDR4/BRAM DDR4/BRAM DDR4/BRAM DDR4/BRAM # SDRAM_USED, BRAM_USED +NVMe no no yes no no # NVME_USED=TRUE +cloud access yes no partial no no # not avail yet for NVMe ``` Depending on the used memory, the card and framework can be combined with the following actions (one action only): ```
Fix `Rugged::Tree.diff` to allow the first given tree to be `nil`.
@@ -391,6 +391,8 @@ static VALUE rb_git_diff_tree_to_tree(VALUE self, VALUE rb_repo, VALUE rb_tree, struct nogvl_diff_args args; Data_Get_Struct(rb_repo, git_repository, repo); + + if(RTEST(rb_tree)) Data_Get_Struct(rb_tree, git_tree, tree); if(RTEST(rb_other_tree))
Windows CI: explicitly use windows-2019 instead of using windows-latest
@@ -15,7 +15,7 @@ jobs: strategy: matrix: os: - - windows-latest + - windows-2019 - windows-2022 platform: - arch: win64 @@ -56,7 +56,7 @@ jobs: strategy: matrix: os: - - windows-latest + - windows-2019 - windows-2022 runs-on: ${{matrix.os}} steps: @@ -80,7 +80,7 @@ jobs: strategy: matrix: os: - - windows-latest + - windows-2019 - windows-2022 runs-on: ${{matrix.os}} steps:
external/trace: Fix endianness detection in Makefile The Makefile for the dump_trace tool does not correctly determine endianness on Power. Instead Big Endian is always used on Power. Fix so Little Endian will be detected.
-HOSTEND=$(shell uname -m | sed -e 's/^i.*86$$/LITTLE/' -e 's/^x86.*/LITTLE/' -e 's/^ppc.*/BIG/') +HOSTEND=$(shell uname -m | sed -e 's/^i.*86$$/LITTLE/' -e 's/^x86.*/LITTLE/' -e 's/^ppc64le/LITTLE/' -e 's/^ppc.*/BIG/') CFLAGS=-g -Wall -DHAVE_$(HOSTEND)_ENDIAN -I../../include -I../../ dump_trace: dump_trace.c
dev-tools/numpy: downgrade to v15.4 (last version that supports python3.4 which is what is available on SLE 12 SP4)
@@ -22,7 +22,7 @@ Requires: openblas-%{compiler_family}%{PROJ_DELIM} %define pname numpy Name: %{python_prefix}-%{pname}-%{compiler_family}%{PROJ_DELIM} -Version: 1.17.2 +Version: 1.15.4 Release: 1%{?dist} Url: https://github.com/numpy/numpy Summary: NumPy array processing for numbers, strings, records and objects
Handle tabs in fonts;
@@ -106,6 +106,14 @@ void lovrFontRender(Font* font, const char* str, float wrap, HorizontalAlign hal continue; } + // Tabs + if (codepoint == '\t') { + Glyph* space = lovrFontGetGlyph(font, ' '); + cx += space->advance * 4; + str += bytes; + continue; + } + // Kerning cx += lovrFontGetKerning(font, previous, codepoint); previous = codepoint; @@ -180,6 +188,14 @@ float lovrFontGetWidth(Font* font, const char* str, float wrap) { continue; } + // Tabs + if (codepoint == '\t') { + Glyph* space = lovrFontGetGlyph(font, ' '); + x += space->advance * 4; + str += bytes; + continue; + } + Glyph* glyph = lovrFontGetGlyph(font, codepoint); x += glyph->advance + lovrFontGetKerning(font, previous, codepoint); previous = codepoint;
[run_rocm_test] - Remove 5.0 passes that throw assertions. Assertions are disabled for ROCm releases. These tests were not ready to be in expected passes.
-lsms_triangular_packing.cpp -test_allocate_allocator.c test_allocate.c test_declare_target_parallel_for.c test_loop_lastprivate_device.F90 @@ -38,6 +36,4 @@ test_target_teams_distribute_reduction_eqv.F90 test_target_teams_distribute_reduction_multiply.F90 test_target_teams_distribute_reduction_neqv.F90 test_target_teams_distribute_reduction_sub.F90 -test_target_uses_allocators_cgroup.c -test_target_uses_allocators_large_cap.c test_target_uses_allocators_pteam.c
[bsp][simulator] Fix compilation errors
@@ -254,6 +254,7 @@ off_t lseek(int fd, off_t offset, int whence) } RTM_EXPORT(lseek); +#ifndef _WIN32 /* we can not implement these functions */ /** * this function is a POSIX compliant version, which will rename old file name * to new file name. @@ -280,6 +281,7 @@ int rename(const char *old_file, const char *new_file) return 0; } RTM_EXPORT(rename); +#endif /** * this function is a POSIX compliant version, which will unlink (remove) a @@ -305,7 +307,6 @@ int unlink(const char *pathname) } RTM_EXPORT(unlink); -#ifndef _WIN32 /* we can not implement these functions */ /** * this function is a POSIX compliant version, which will get file information. * @@ -370,7 +371,6 @@ int fstat(int fildes, struct stat *buf) return RT_EOK; } RTM_EXPORT(fstat); -#endif /** * this function is a POSIX compliant version, which shall request that all data
update the docs for running rune containers in kubernets cluster
.PHONY: all install clean uninstall package export INCLAVARE_CONTAINERS_VERSION := $(shell cat ./VERSION) -components := rune shim sgx-tools +components := rune shim epm sgx-tools all: for name in $(components); do \
npu2: Fix argument order to npu2_scom_write in BAR setup The arguments to npu2_scom_write() in npu2_write_bar() resulting in incorrect BAR setup in some circumstances. This patch swaps the arguments so they are correct.
@@ -196,7 +196,7 @@ static void npu2_write_bar(struct npu2 *p, if (p) npu2_write(p, reg, val); else - npu2_scom_write(gcid, scom, reg, val, NPU2_MISC_DA_LEN_8B); + npu2_scom_write(gcid, scom, reg, NPU2_MISC_DA_LEN_8B, val); } }
tests: internal: config_format: allow 'service' section to be set twice
@@ -44,9 +44,9 @@ void test_api() kv = flb_cf_property_add(cf, &service->properties, " ", 3, "", 0); TEST_CHECK(kv == NULL); - /* try to add another 'SERVICE' section, it should fail */ + /* try to add another 'SERVICE' section, it should return the same one */ s_tmp = flb_cf_section_create(cf, "SERVICE", 7); - TEST_CHECK(s_tmp == NULL); + TEST_CHECK(s_tmp == service); /* add a valid section */ s_tmp = flb_cf_section_create(cf, "INPUT", 5);
Make error sheet two-sided as needed.
@@ -1517,8 +1517,10 @@ generate_job_error_sheet( pdfio_stream_t *st; // Page stream pdfio_obj_t *courier; // Courier font pdfio_dict_t *dict; // Page dictionary + size_t i, // Looping var + count; // Number of pages const char *msg; // Current message - size_t count; // Number of messages + size_t mcount; // Number of messages // Create a page dictionary with the Courier font... @@ -1527,6 +1529,15 @@ generate_job_error_sheet( pdfioPageDictAddFont(dict, "F1", courier); + // Figure out how many impressions to produce... + if (!strcmp(p->options->sides, "one-sided")) + count = 1; + else + count = 2; + + // Create pages... + for (i = 0; i < count; i ++) + { // Create the error sheet... st = pdfio_start_page(p, dict); @@ -1548,16 +1559,16 @@ generate_job_error_sheet( pdfioContentSetTextFont(st, "F1", XFORM_TEXT_SIZE); pdfioContentSetTextLeading(st, XFORM_TEXT_HEIGHT); - for (msg = (const char *)cupsArrayGetFirst(p->errors), count = 0; msg; msg = (const char *)cupsArrayGetNext(p->errors)) + for (msg = (const char *)cupsArrayGetFirst(p->errors), mcount = 0; msg; msg = (const char *)cupsArrayGetNext(p->errors)) { if (*msg == 'E') { pdfioContentTextShowf(st, false, " %s\n", msg + 1); - count ++; + mcount ++; } } - if (count == 0) + if (mcount == 0) pdfioContentTextShow(st, false, " No Errors\n"); pdfioContentSetTextFont(st, "F1", 2.0 * XFORM_TEXT_SIZE); @@ -1568,20 +1579,21 @@ generate_job_error_sheet( pdfioContentSetTextFont(st, "F1", XFORM_TEXT_SIZE); pdfioContentSetTextLeading(st, XFORM_TEXT_HEIGHT); - for (msg = (const char *)cupsArrayGetFirst(p->errors), count = 0; msg; msg = (const char *)cupsArrayGetNext(p->errors)) + for (msg = (const char *)cupsArrayGetFirst(p->errors), mcount = 0; msg; msg = (const char *)cupsArrayGetNext(p->errors)) { if (*msg == 'I') { pdfioContentTextShowf(st, false, " %s\n", msg + 1); - count ++; + mcount ++; } } - if (count == 0) + if (mcount == 0) pdfioContentTextShow(st, false, " No Warnings\n"); pdfioContentTextEnd(st); pdfio_end_page(p, st); + } return (true); }
Force query of incomplete simple descriptors
@@ -140,7 +140,7 @@ void DEV_SimpleDescriptorStateHandler(Device *device, const Event &event) for (const auto ep : device->node()->endpoints()) { deCONZ::SimpleDescriptor sd; - if (device->node()->copySimpleDescriptor(ep, &sd) != 0) + if (device->node()->copySimpleDescriptor(ep, &sd) != 0 || sd.deviceId() == 0xffff) { needFetchEp = ep; break;
zuse: boundary assertions for schnorr Just assert on mis-sized values.
~/ %sosi |= [sk=@I m=@I a=@I] ^- @J + ?> (gte 32 (met 3 sk)) + ?> (gte 32 (met 3 m)) + ?> (gte 32 (met 3 a)) =/ c curve ?< |(=(0 sk) (gte sk n.domain.c)) =/ pp ~/ %sove |= [pk=@I m=@I sig=@J] ^- ? + ?> (gte 32 (met 3 pk)) + ?> (gte 32 (met 3 m)) + ?> (gte 64 (met 3 sig)) =/ c curve =/ pup (lift-x pk) ?~ pup
sysrepoctl BUGFIX leading space in submodules list
@@ -242,7 +242,9 @@ srctl_list_collect(sr_conn_ctx_t *conn, struct lyd_node *sr_data, const struct l for (i = 0; i < ly_mod->inc_size; ++i) { str = ly_mod->inc[i].submodule->name; cur_item->submodules = realloc(cur_item->submodules, strlen(cur_item->submodules) + 1 + strlen(str) + 1); + if (i) { strcat(cur_item->submodules, " "); + } strcat(cur_item->submodules, str); }
Doc: fix typos. "PGcon" should be "PGconn". Noted by D. Frey. Discussion:
@@ -449,7 +449,7 @@ pg_int64 lo_tell64(PGconn *conn, int fd); <indexterm><primary>lo_truncate</primary></indexterm> To truncate a large object to a given length, call <synopsis> -int lo_truncate(PGcon *conn, int fd, size_t len); +int lo_truncate(PGconn *conn, int fd, size_t len); </synopsis> This function truncates the large object descriptor <parameter>fd</parameter> to length <parameter>len</parameter>. The @@ -477,7 +477,7 @@ int lo_truncate(PGcon *conn, int fd, size_t len); When dealing with large objects that might exceed 2GB in size, instead use <synopsis> -int lo_truncate64(PGcon *conn, int fd, pg_int64 len); +int lo_truncate64(PGconn *conn, int fd, pg_int64 len); </synopsis> This function has the same behavior as <function>lo_truncate</function>, but it can accept a
Fix missing parentheses around parameter names in macros.
@@ -78,8 +78,8 @@ static float lattice( perlin_data_t *data, int ix, float fx, int iy, float fy, i #define DELTA 1e-6f #define SWAP(a, b, t) t = a; a = b; b = t -#define FLOOR(a) ((a)> 0 ? ((int)a) : (((int)a)-1) ) -#define CUBIC(a) ( a * a * (3 - 2*a) ) +#define FLOOR(a) ((a) > 0 ? (int)(a) : ((int)(a) - 1) ) +#define CUBIC(a) ((a) * (a) * (3 - 2 * (a))) static void normalize(perlin_data_t *data, float *f) {
Promote DDF status to gold
"manufacturername": "$MF_PHILIPS", "modelid": "SML003", "product": "Motion Sensor 2. Gen (SML003)", - "status": "Silver", + "status": "Gold", "sleeper": false, "md:known_issues": [ ], "subdevices": [
Increased MAXARRY in vplanet.h to 128 to allow for more than 64 output options.
#define NAMELEN 100 #define MAXFILES 24 /* Maximum number of input files */ -#define MAXARRAY 64 /* Maximum number of options in +#define MAXARRAY 128 /* Maximum number of options in * an option array */ #define NUMOPT 1000 /* Number of options that could be * in MODULE */
BugID:18438196: Remove .gdbinit from .PHONY
@@ -119,7 +119,7 @@ COMPONENT_DEPENDENCY_SCRIPT := $(SCRIPTS_PATH)/component_dependencies.py COMPONENT_DEPENDENCY = $(PYTHON) $(COMPONENT_DEPENDENCY_SCRIPT) export COMPONENT_DEPENDENCY -.PHONY: $(BUILD_STRING) main_app bootloader clean Help download total run download_bootloader .gdbinit .gdbinit$(MBINSTYPE_LOWER) export_linkkit_sdk restore_linkkit_sdk +.PHONY: $(BUILD_STRING) main_app bootloader clean Help download total run download_bootloader export_linkkit_sdk restore_linkkit_sdk Help: $(TOOLCHAIN_HOOK_TARGETS)
Update release notes with window-save size fixes for Windows OS.
@@ -95,6 +95,7 @@ enhancements and bug-fixes that were added to this release.</p> <li>Fixed a type related compile error in vtkMergeTree that broke builds with gcc 4.9 on Raspberry Pis.</li> <li>Corrected an issue with newer versions of the Mac operating system (10.13, High Sierra), where VisIt couldn't connect to remote instances of VisIt.</li> <li>Corrected bug constructing material plots from database plugins that serve up dense volume fractions.</li> + <li>Fixed the problem saving images greater than screen resolution using non-screen capture on Windows.</li> </ul> <a name="Configuration_changes"></a>
common/host_command_pd.c: Format with clang-format BRANCH=none TEST=none
@@ -126,8 +126,8 @@ static void pd_check_chg_status(struct ec_response_pd_status *pd_status) #endif /* Set input current limit */ - rv = charge_set_input_current_limit(MAX(pd_status->curr_lim_ma, - CONFIG_CHARGER_INPUT_CURRENT), 0); + rv = charge_set_input_current_limit( + MAX(pd_status->curr_lim_ma, CONFIG_CHARGER_INPUT_CURRENT), 0); if (rv < 0) CPRINTS("Failed to set input curr limit from PD MCU"); }
vscode: Remove duplicated RTT terminal for evkninab3[Zephyr]
"interface": "swd", "servertype": "jlink", "runToMain": true, - "rttConfig": { - "enabled": true, - "address": "auto", - "decoders": [ - { - "label": "", - "port": 0, - "type": "console" - } - ] - }, "armToolchainPath": "${input:getArmToolchainPath}", "preLaunchTask": "Predebug evkninab3 runner [Zephyr]" },