message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
Change exceptional case for running negative tests
Update maybe_requires_ciphersuite_enabled so that it will not skip the
ciphersuite requirement when running a test case where the test expects
a ciphersuite mismatch | @@ -257,8 +257,7 @@ requires_ciphersuite_enabled() {
# maybe_requires_ciphersuite_enabled CMD [RUN_TEST_OPTION...]
# If CMD (call to a TLS client or server program) requires a specific
# ciphersuite, arrange to only run the test case if this ciphersuite is
-# enabled. As an exception, do run the test case if it expects a ciphersuite
-# mismatch.
+# enabled.
maybe_requires_ciphersuite_enabled() {
case "$1" in
*\ force_ciphersuite=*) :;;
@@ -268,15 +267,7 @@ maybe_requires_ciphersuite_enabled() {
ciphersuite="${ciphersuite%%[!-0-9A-Z_a-z]*}"
shift
- case "$*" in
- *"-s SSL - The handshake negotiation failed"*)
- # This test case expects a ciphersuite mismatch, so it doesn't
- # require the ciphersuite to be enabled.
- ;;
- *)
requires_ciphersuite_enabled "$ciphersuite"
- ;;
- esac
unset ciphersuite
}
|
Fix fpermissive | @@ -45,6 +45,12 @@ if((CMAKE_C_COMPILER_ID MATCHES "GNU") OR
add_definitions(-DM3_COMPILED_AS_CPP=1)
endif()
+
+set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DDEBUG=1 -ggdb")
+if (CMAKE_C_COMPILER_ID MATCHES "GNU")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fpermissive") # TODO: remove
+endif()
+
if(WIN32)
add_definitions(-D_CRT_SECURE_NO_WARNINGS -D_MBCS) # -DM3_RUN_LOOPS=10
@@ -59,9 +65,9 @@ if(WIN32)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE}") # /GR- /GX-
else()
if (CMAKE_C_COMPILER_ID MATCHES "GNU")
- set(OPT_FLAGS "-O3 -fpermissive")
+ set(OPT_FLAGS "-O3")
else()
- set(OPT_FLAGS "-Oz")
+ set(OPT_FLAGS "-Oz") # -Ofast
endif()
set(CMAKE_C_FLAGS_RELEASE "${OPT_FLAGS} -fomit-frame-pointer -march=native -w -Wfatal-errors") #-fno-inline
|
Increase the timeout to 20 seconds
This will make the test more reliable, because extra waiting in the relcache
initialization caused some case the segment took longer to recover. | @@ -431,21 +431,21 @@ jobs:
image: centos-gpdb-dev-6
params:
MAKE_TEST_COMMAND: fts_transitions_part01
- BLDWRAP_POSTGRES_CONF_ADDONS: gp_segment_connect_timeout=10 gp_fts_probe_interval=20
+ BLDWRAP_POSTGRES_CONF_ADDONS: gp_segment_connect_timeout=20 gp_fts_probe_interval=20
TEST_OS: centos
- task: fts_transitions_part02
file: gpdb_src/concourse/tasks/tinc_gpdb.yml
image: centos-gpdb-dev-6
params:
MAKE_TEST_COMMAND: fts_transitions_part02
- BLDWRAP_POSTGRES_CONF_ADDONS: gp_segment_connect_timeout=10 gp_fts_probe_interval=20
+ BLDWRAP_POSTGRES_CONF_ADDONS: gp_segment_connect_timeout=20 gp_fts_probe_interval=20
TEST_OS: centos
- task: fts_transitions_part03
file: gpdb_src/concourse/tasks/tinc_gpdb.yml
image: centos-gpdb-dev-6
params:
MAKE_TEST_COMMAND: fts_transitions_part03
- BLDWRAP_POSTGRES_CONF_ADDONS: gp_segment_connect_timeout=10 gp_fts_probe_interval=20
+ BLDWRAP_POSTGRES_CONF_ADDONS: gp_segment_connect_timeout=20 gp_fts_probe_interval=20
TEST_OS: centos
- name: storage
|
[Kernel] Use indirect trampoline for launching fs_server | @@ -69,7 +69,7 @@ static void _kernel_bootstrap_part2(void) {
//
// Launch the file server, which will load the ramdisk and launch all the
// specified startup programs
- task_spawn__with_args("launch_fs_server", launch_fs_server, 0, 0, 0);
+ task_spawn__with_args("launch_fs_server", FS_SERVER_EXEC_TRAMPOLINE_NAME, 0, 0, 0);
// Bootstrapping complete - kill this process
printf("[t = %d] Bootstrap task [PID %d] will exit\n", time(), getpid());
|
Fixed small issues with README.rst. | @@ -8,7 +8,7 @@ Overview
=================== ============
**Date** 10/03/18
**Author** Peter Driscoll
-**Modules** `radheat, thermint <../src/radheat.html, ../src/thermint.html>`_
+**Modules** radheat thermint
**Approx. runtime** <1 second
**Source code** `GitHub <https://github.com/VirtualPlanetaryLaboratory/vplanet-private/tree/master/examples/EarthInterior>`_
=================== ============
|
Enable websecurity in production play window | @@ -209,7 +209,7 @@ const createPlay = async (url: string) => {
autoHideMenuBar: true,
webPreferences: {
nodeIntegration: false,
- webSecurity: false
+ webSecurity: process.env.NODE_ENV !== 'development'
}
});
} else {
|
Fix showing console at bottom | @@ -81,7 +81,7 @@ import SavannaKit
splitVC.ratio = 0
}
- if traitCollection.horizontalSizeClass == .compact {
+ if traitCollection.horizontalSizeClass == .compact && !EditorSplitViewController.shouldShowConsoleAtBottom {
splitVC.ratio = 1
}
|
oc_cloud:notify cloudconf obsvrs after register | @@ -195,6 +195,8 @@ _register_handler(oc_cloud_context_t *ctx, oc_client_response_t *data)
ctx->store.status |= OC_CLOUD_REGISTERED;
ctx->store.cps = OC_CPS_REGISTERED;
+ oc_notify_observers(ctx->cloud_conf);
+
return 0;
error:
|
Fix win config | @@ -189,16 +189,11 @@ jobs:
resource_class: 'windows.medium'
machine:
image: 'windows-server-2022-gui:current'
- shell: 'powershell.exe -ExecutionPolicy Bypass'
+ shell: 'bash.exe'
steps:
- checkout
- - run:
- command: cmake -H. -Bbuild
- shell: bash.exe
- - run:
- command: cmake --build build
- shell: bash.exe
-
+ - run: cmake -H. -Bbuild
+ - run: cmake --build build
workflows:
build-and-test:
|
Added get(index) method | @@ -73,7 +73,7 @@ public class Tileset extends Resource
tiles = new ArrayList<>();
- // important to always use the same loop order when building Tileset and Tilemap object
+ // important to always use the same loop order when building Tileset and Tilemap/Map object
for (int j = 0; j < heightTile; j++)
{
for (int i = 0; i < widthTile; i++)
@@ -153,6 +153,11 @@ public class Tileset extends Resource
return tiles.size();
}
+ public Tile get(int index)
+ {
+ return tiles.get(index);
+ }
+
public int getTileIndex(Tile tile, TileOptimization opt)
{
// no optimization allowed --> need to duplicate tile
@@ -224,5 +229,4 @@ public class Tileset extends Resource
outS.append(" dc.l " + bin.id + "\n");
outS.append("\n");
}
-
}
|
Validate getcpu pointers only if not null | @@ -2335,8 +2335,8 @@ sysreturn umask(int mask)
sysreturn getcpu(unsigned int *cpu, unsigned int *node, void *tcache)
{
cpuinfo ci = current_cpu();
- if (!validate_user_memory(cpu, sizeof *cpu, true) ||
- !validate_user_memory(node, sizeof *node, true))
+ if ((cpu != 0 && !validate_user_memory(cpu, sizeof *cpu, true)) ||
+ (node != 0 && !validate_user_memory(node, sizeof *node, true)))
return -EFAULT;
if (cpu)
*cpu = ci->id;
|
fixes memory leak on proxy reverse connection failure | @@ -1779,8 +1779,10 @@ _proxy_reverse_connect(u3_proxy_client* cli_u)
if ( 0 != (sas_i = uv_tcp_connect(upc_u, &con_u->don_u,
(const struct sockaddr*)&add_u,
_proxy_reverse_connect_cb)) ) {
- uL(fprintf(uH, "proxy: reverse: %s\n", uv_strerror(sas_i)));
+ uL(fprintf(uH, "proxy: reverse connect: %s\n", uv_strerror(sas_i)));
_proxy_conn_close(con_u);
+ _proxy_client_free(cli_u);
+ free(upc_u);
}
}
@@ -1801,7 +1803,6 @@ _proxy_reverse_client_resolve_cb(uv_getaddrinfo_t* adr_u,
// XX traverse struct a la _ames_czar_cb
cli_u->ipf_w = ntohl(((struct sockaddr_in *)aif_u->ai_addr)->sin_addr.s_addr);
_proxy_reverse_connect(cli_u);
- // xx free cli_u where?
}
free(adr_u);
|
doc: update doxygen configuration for API change
Some functions that were in arch/x86/irq.h were moved
into common/irq.h and arch/x86/guest/virq.h. | @@ -800,6 +800,7 @@ INPUT = custom-doxygen/mainpage.md \
../hypervisor/include/arch/x86/pgtable.h \
../hypervisor/include/arch/x86/vtd.h \
../hypervisor/include/arch/x86/irq.h \
+ ../hypervisor/include/arch/x86/guest/virq.h \
../hypervisor/include/arch/x86/guest/vmtrr.h \
../hypervisor/include/arch/x86/guest/vlapic.h \
../hypervisor/include/dm/vioapic.h \
@@ -808,6 +809,7 @@ INPUT = custom-doxygen/mainpage.md \
../hypervisor/include/arch/x86/guest/vmx_io.h \
../hypervisor/include/arch/x86/guest/assign.h \
../hypervisor/include/common/hypercall.h \
+ ../hypervisor/include/common/irq.h \
../hypervisor/include/common/ptdev.h \
../hypervisor/include/public/acrn_common.h \
../hypervisor/include/public/acrn_hv_defs.h \
|
Protect msg tasks modifications from any concurrent modification in IT | @@ -588,8 +588,11 @@ static inline void MsgAlloc_ClearMsgTask(void)
LuosHAL_SetIrqState(false);
msg_tasks[rm] = msg_tasks[rm + 1];
}
+ if (msg_tasks_stack_id != 0)
+ {
msg_tasks_stack_id--;
msg_tasks[msg_tasks_stack_id] = 0;
+ }
LuosHAL_SetIrqState(true);
MsgAlloc_FindNewOldestMsg();
}
|
Jenkins: Remove outdated ToDo item | @@ -523,7 +523,6 @@ def generateFullBuildStages() {
tasks << buildAndTestMingwW64()
// Build Elektra on alpine
- // TODO: Remove bash after #2077 is resolved
// TODO: Add more deps to test them with musl
// TODO: add date when their issues are resolved
// TODO: reenable kdb testing once it is POSIX conformant
|
netlify: add Godbolt redirects for NEON | [[redirects]]
from = "/godbolt/demo"
- to = "https://godbolt.org/z/v9R-cP"
+ to = "https://godbolt.org/z/keBtF9"
+ status = 302
+ force = true
+
+[[redirects]]
+ from = "/godbolt/arm/neon/simple"
+ to = "https://godbolt.org/z/6rdp4L"
+ status = 302
+ force = true
+
+[[redirects]]
+ from = "/godbolt/arm/neon/demo"
+ to = "https://godbolt.org/z/sHVz49"
status = 302
force = true
|
[update] change default lwip stack to lwip2.0.3 | @@ -114,7 +114,7 @@ config RT_USING_LWIP
if RT_USING_LWIP
choice
prompt "lwIP version"
- default RT_USING_LWIP212
+ default RT_USING_LWIP203
help
Select the lwIP version
|
Fix 80-test_ssl_old.t: only count the ciphers if there are any. | @@ -434,9 +434,9 @@ sub testssl {
if ($ciphersstatus) {
$ciphersuites{$protocol} = [ map { s|\R||; split(/:/, $_) }
@ciphers ];
- }
$protocolciphersuitecount += scalar @{$ciphersuites{$protocol}};
}
+ }
plan skip_all => "None of the ciphersuites to test are available in this OpenSSL build"
if $protocolciphersuitecount + scalar(keys %ciphersuites) == 0;
|
Update drv_wdt.c | @@ -25,7 +25,7 @@ static rt_err_t wdt_init(rt_watchdog_t *wdt)
hiwdg.Instance = IWDG;
hiwdg.Init.Prescaler = IWDG_PRESCALER_32;
- hiwdg.Init.Reload = 0x00000FFE;
+ hiwdg.Init.Reload = 0x00000FFF;
#if defined(SOC_SERIES_STM32F0) || defined(SOC_SERIES_STM32L4) || defined(SOC_SERIES_STM32F7)
hiwdg.Init.Window = 0x00000FFF;
#endif
|
Remove /usr/local/include from native Makefile
As referenced in issue Including this directory at such
a build stage means that the dir ends up being the first in the
search path, which can lead to conflicts with local files with
the same name. | @@ -17,7 +17,7 @@ STRIP ?= strip
ifeq ($(WERROR),1)
CFLAGSWERROR=-Werror
endif
-CFLAGSNO = -Wall -g -I/usr/local/include $(CFLAGSWERROR)
+CFLAGSNO = -Wall -g $(CFLAGSWERROR)
CFLAGS += $(CFLAGSNO)
### Are we building with code size optimisations?
|
build: improve calling libtoolize | cd $(dirname "$0")
-if [ "$(uname)" = "Darwin" ]; then
-libtoolBin=$(which glibtoolize)
-libtoolBinDir=$(dirname "${libtoolBin}")
+autoheader
-if [ ! -f "${libtoolBinDir}/libtoolize" ]; then
-ln -s $libtoolBin "${libtoolBinDir}/libtoolize"
-fi
+if [ "$(uname)" = "Darwin" ]; then
+ glibtoolize
+else
+ libtoolize
fi
-autoheader
-libtoolize
aclocal -I m4
autoconf
automake --add-missing --copy
|
Update testContainer/splunk/Dockerfile from 7.3.0 to 8.0.0. | -FROM cribl/splunk:7.3.0
+FROM cribl/splunk:8.0.0
ARG CRIBL_DISTRO=cribl-splunk-app-linux-x64
RUN sh -c 'echo dash dash/sh boolean false | debconf-set-selections' && \
sh -c 'DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash' && \
- echo "deb [check-valid-until=no] http://archive.debian.org/debian jessie-backports main" > /etc/apt/sources.list.d/jessie-backports.list && \
- sed -i '/deb http:\/\/deb.debian.org\/debian jessie-updates main/d' /etc/apt/sources.list && \
apt-get -o Acquire::Check-Valid-Until=false update && \
apt-get install -y vim nano curl ca-certificates jq
-RUN apt-get install -y gcc make \
+ARG DEBIAN_FRONTEND=noninteractive
+RUN apt-get --no-install-recommends install -y gcc make \
build-essential checkinstall libreadline-gplv2-dev libncursesw5-dev libssl-dev \
libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev libffi-dev zlib1g-dev
|
[io] remove unnecessary cmake output from | @@ -71,11 +71,8 @@ if(HAVE_SICONOS_MECHANICS)
# swig modules must be built before vview and vexport can generate manpages
foreach(_SWIG_MODULE ${${COMPONENT}_PYTHON_MODULES})
- message(STATUS _SWIG_MODULE=${_SWIG_MODULE})
get_filename_component(_SWIG_MODULE_NAME ${_SWIG_MODULE} NAME)
- message(STATUS _SWIG_MODULE_NAME=${_SWIG_MODULE_NAME})
list(APPEND _OTHER_COMPONENTS ${_SWIG_MODULE_NAME}_swig_compilation _${_SWIG_MODULE_NAME})
- message(STATUS _OTHER_COMPONENTS=${_OTHER_COMPONENTS})
endforeach()
set(MANPAGES_FOR_TOOLS)
|
libhfuzz: more informative log/fatal messages | @@ -46,7 +46,8 @@ __attribute__((constructor)) static void mapBB(void) {
my_thread_no = atoi(my_thread_no_str);
if (my_thread_no >= _HF_THREAD_MAX) {
- LOG_F("my_thread_no > _HF_THREAD_MAX (%" PRIu32 " > %d)\n", my_thread_no, _HF_THREAD_MAX);
+ LOG_F("Received (via envvar) my_thread_no > _HF_THREAD_MAX (%" PRIu32 " > %d)\n",
+ my_thread_no, _HF_THREAD_MAX);
}
struct stat st;
if (fstat(_HF_BITMAP_FD, &st) == -1) {
@@ -54,11 +55,12 @@ __attribute__((constructor)) static void mapBB(void) {
}
if (st.st_size != sizeof(feedback_t)) {
LOG_F(
- "st.size != sizeof(feedback_t) (%zu != %zu)\n", (size_t)st.st_size, sizeof(feedback_t));
+ "size of the feedback structure mismatch: st.size != sizeof(feedback_t) (%zu != %zu)\n",
+ (size_t)st.st_size, sizeof(feedback_t));
}
if ((feedback = mmap(NULL, sizeof(feedback_t), PROT_READ | PROT_WRITE, MAP_SHARED,
_HF_BITMAP_FD, 0)) == MAP_FAILED) {
- PLOG_F("mmap");
+ PLOG_F("mmap of the feedback structure");
}
feedback->pidFeedbackPc[my_thread_no] = 0U;
feedback->pidFeedbackEdge[my_thread_no] = 0U;
|
examples: Update missing DJOYSTICK macro to new INPUT template | /* Configuration ************************************************************/
-#ifndef CONFIG_DJOYSTICK
-# error "CONFIG_DJOYSTICK is not defined in the configuration"
+#ifndef CONFIG_INPUT_DJOYSTICK
+# error "CONFIG_INPUT_DJOYSTICK is not defined in the configuration"
#endif
#ifndef CONFIG_EXAMPLES_DJOYSTICK_DEVNAME
|
pflash: Increase stack frame size warning threshold
pflash is a userspace tool, stack space isn't really a constraint that
we care about. | @@ -50,6 +50,7 @@ $(LIBFLASH_OBJS): libflash-%.o : libflash/%.c | links
$(CCAN_OBJS): ccan-list-%.o: ccan/list/%.c | links
$(Q_CC)$(CC) $(CFLAGS) -c $< -o $@
+$(EXE): CFLAGS += -Wframe-larger-than=2048
$(EXE): $(OBJS)
$(Q_CC)$(CC) $(LDFLAGS) $(CFLAGS) $^ -lrt -o $@
|
Toyota safety: block LTA message in TSS2 cars | @@ -206,8 +206,9 @@ static int toyota_fwd_hook(int bus_num, CAN_FIFOMailBox_TypeDef *to_fwd) {
if (bus_num == 2) {
int addr = GET_ADDR(to_fwd);
// block stock lkas messages and stock acc messages (if OP is doing ACC)
- int is_lkas_msg = ((addr == 0x2E4) || (addr == 0x412));
- // in TSSP 2.0 the camera does ACC as well, so filter 0x343
+ // in TSS2, 0.191 is LTA which we need to block to avoid controls collision
+ int is_lkas_msg = ((addr == 0x2E4) || (addr == 0x412) || (addr == 0x191));
+ // in TSS2 the camera does ACC as well, so filter 0x343
int is_acc_msg = (addr == 0x343);
int block_msg = is_lkas_msg || (is_acc_msg && long_controls_allowed);
if (!block_msg) {
|
respect jobs for cmake/msvc | @@ -518,10 +518,12 @@ end
-- do build for msvc
function _build_for_msvc(package, configs, opt)
+ local njob = opt.jobs or option.get("jobs") or nil
local slnfile = assert(find_file("*.sln", os.curdir()), "*.sln file not found!")
local runenvs = _get_msvc_runenvs(package)
local msbuild = find_tool("msbuild", {envs = runenvs})
- os.vrunv(msbuild.program, {slnfile, "-nologo", "-t:Rebuild", "-m",
+ os.vrunv(msbuild.program, {slnfile, "-nologo", "-t:Rebuild",
+ (njob ~= nil and format("-m:%d", njob) or "-m"),
"-p:Configuration=" .. (package:is_debug() and "Debug" or "Release"),
"-p:Platform=" .. (package:is_arch("x64") and "x64" or "Win32")}, {envs = runenvs})
end
@@ -558,10 +560,12 @@ end
-- do install for msvc
function _install_for_msvc(package, configs, opt)
+ local njob = opt.jobs or option.get("jobs") or nil
local slnfile = assert(find_file("*.sln", os.curdir()), "*.sln file not found!")
local runenvs = _get_msvc_runenvs(package)
local msbuild = assert(find_tool("msbuild", {envs = runenvs}), "msbuild not found!")
- os.vrunv(msbuild.program, {slnfile, "-nologo", "-t:Rebuild", "-m",
+ os.vrunv(msbuild.program, {slnfile, "-nologo", "-t:Rebuild",
+ (njob ~= nil and format("-m:%d", njob) or "-m"),
"-p:Configuration=" .. (package:is_debug() and "Debug" or "Release"),
"-p:Platform=" .. (package:is_arch("x64") and "x64" or "Win32")}, {envs = runenvs})
local projfile = os.isfile("INSTALL.vcxproj") and "INSTALL.vcxproj" or "INSTALL.vcproj"
|
linux/arch: adjust oom score for fuzzed tasks | @@ -122,6 +122,13 @@ bool arch_launchChild(run_t* run) {
return false;
}
+ /* Increase our OOM score, so fuzzed processes die faster */
+ static const char score100[] = "+500";
+ if (!files_writeBufToFile(
+ "/proc/self/oom_score_adj", (uint8_t*)score100, strlen(score100), O_WRONLY)) {
+ LOG_W("Couldn't increase our oom_score");
+ }
+
/*
* Disable ASLR:
* This might fail in Docker, as Docker blocks __NR_personality. Consequently
|
Update ya-binAuthor: hereticRevision: r3862339Task ID: | @@ -4,8 +4,8 @@ import sys
import platform
import json
-URLS = ["https://storage.mds.yandex.net/get-devtools-opensource/250854/867fbc990c88a2d3921f0afb6cba107c"]
-MD5 = "867fbc990c88a2d3921f0afb6cba107c"
+URLS = ["https://storage.mds.yandex.net/get-devtools-opensource/373962/e315957bc0932bb931183caa2db56ddd"]
+MD5 = "e315957bc0932bb931183caa2db56ddd"
RETRIES = 5
HASH_PREFIX = 10
|
find_apps.py: app with no supported targets will be skipped correctly
fix deprecated logging.warn() | @@ -146,9 +146,9 @@ def find_apps(build_system_class, path, recursive, exclude_list, target):
logging.debug("Looking for {} apps in {}{}".format(build_system_name, path, " recursively" if recursive else ""))
if not recursive:
if exclude_list:
- logging.warn("--exclude option is ignored when used without --recursive")
+ logging.warning("--exclude option is ignored when used without --recursive")
if not build_system_class.is_app(path):
- logging.warn("Path {} specified without --recursive flag, but no {} app found there".format(
+ logging.warning("Path {} specified without --recursive flag, but no {} app found there".format(
path, build_system_name))
return []
return [path]
@@ -168,12 +168,15 @@ def find_apps(build_system_class, path, recursive, exclude_list, target):
del dirs[:]
supported_targets = build_system_class.supported_targets(root)
- if supported_targets and target not in supported_targets:
+ if supported_targets and (target in supported_targets):
+ apps_found.append(root)
+ else:
+ if supported_targets:
logging.debug("Skipping, app only supports targets: " + ", ".join(supported_targets))
+ else:
+ logging.debug("Skipping, app has no supported targets")
continue
- apps_found.append(root)
-
return apps_found
|
rtc_tmpsensor: remove redundant semaphore on c3 | @@ -58,8 +58,6 @@ static const tsens_dac_offset_t dac_offset[TSENS_DAC_MAX] = {
{TSENS_DAC_L4, 2, 10, -40, 20, 3},
};
-static SemaphoreHandle_t s_rtc_tsens_mux = NULL;
-
esp_err_t temp_sensor_set_config(temp_sensor_config_t tsens)
{
REG_SET_BIT(SYSTEM_PERIP_CLK_EN1_REG, SYSTEM_TSENS_CLK_EN);
@@ -94,10 +92,6 @@ esp_err_t temp_sensor_get_config(temp_sensor_config_t *tsens)
esp_err_t temp_sensor_start(void)
{
- if (s_rtc_tsens_mux == NULL) {
- s_rtc_tsens_mux = xSemaphoreCreateMutex();
- }
- TSENS_CHECK(s_rtc_tsens_mux != NULL, ESP_ERR_NO_MEM);
REG_SET_BIT(SYSTEM_PERIP_CLK_EN1_REG, SYSTEM_TSENS_CLK_EN);
APB_SARADC.apb_tsens_ctrl2.tsens_clk_sel = 1;
APB_SARADC.apb_tsens_ctrl.tsens_pu = 1;
@@ -108,20 +102,13 @@ esp_err_t temp_sensor_stop(void)
{
APB_SARADC.apb_tsens_ctrl.tsens_pu = 0;
APB_SARADC.apb_tsens_ctrl2.tsens_clk_sel = 0;
- if (s_rtc_tsens_mux != NULL) {
- vSemaphoreDelete(s_rtc_tsens_mux);
- s_rtc_tsens_mux = NULL;
- }
return ESP_OK;
}
esp_err_t temp_sensor_read_raw(uint32_t *tsens_out)
{
TSENS_CHECK(tsens_out != NULL, ESP_ERR_INVALID_ARG);
- TSENS_CHECK(s_rtc_tsens_mux != NULL, ESP_ERR_INVALID_STATE);
- xSemaphoreTake(s_rtc_tsens_mux, portMAX_DELAY);
*tsens_out = APB_SARADC.apb_tsens_ctrl.tsens_out;
- xSemaphoreGive(s_rtc_tsens_mux);
return ESP_OK;
}
|
vfs: fdopen: add missing file stream flags clearing.
Clear file stream structure regardless of config options.
Structure clearing is needed as previous use of stream
list entry might leave fs_flags set.
Thia patch comes from nuttx community commit id: | @@ -228,11 +228,7 @@ FAR struct file_struct *fs_fdopen(int fd, int oflags, FAR struct tcb_s *tcb)
if (stream->fs_fd < 0) {
/* Zero the structure */
-#if CONFIG_STDIO_BUFFER_SIZE > 0
memset(stream, 0, sizeof(FILE));
-#elif CONFIG_NUNGET_CHARS > 0
- stream->fs_nungotten = 0;
-#endif
#if CONFIG_STDIO_BUFFER_SIZE > 0
/* Initialize the semaphore the manages access to the buffer */
|
travis FEATURE add test job with ASAN and UBSAN | @@ -88,6 +88,20 @@ jobs:
- mkdir build && cd build && cmake .. && make -j2 && ctest --output-on-failure && cd -
after_success:
- bash <(curl -s https://codecov.io/bash)
+ - stage: Test
+ name: Linux with CLang ASAN and UBSAN
+ os: linux
+ compiler: clang
+ before_install:
+ - sudo apt-get update -qq && sudo apt-get install -y valgrind
+ - wget https://cmocka.org/files/1.1/cmocka-1.1.2.tar.xz
+ - tar -xf cmocka-1.1.2.tar.xz
+ - cd cmocka-1.1.2 && mkdir build && cd build && cmake .. && make -j2 && sudo make install && cd ../..
+ - wget https://ftp.pcre.org/pub/pcre/pcre2-10.30.tar.gz
+ - tar -xzf pcre2-10.30.tar.gz
+ - cd pcre2-10.30 && ./configure && make -j2 && sudo -i -- sh -c 'cd /home/travis/build/CESNET/libyang/pcre2-10.30/ && make install' && cd ..
+ script:
+ - mkdir build && cd build && cmake -DCMAKE_C_FLAGS="-fsanitize=address,undefined" -DENABLE_VALGRIND_TESTS=OFF .. && make -j2 && ctest --output-on-failure && cd -
- stage: Test
name: ABI check
os: linux
|
stm32/adc: Add ADC auto-calibration for L4 MCUs.
This increases the precision of the ADC. | @@ -263,6 +263,9 @@ STATIC void adcx_init_periph(ADC_HandleTypeDef *adch, uint32_t resolution) {
#if defined(STM32H7)
HAL_ADCEx_Calibration_Start(adch, ADC_CALIB_OFFSET, ADC_SINGLE_ENDED);
#endif
+ #if defined(STM32L4)
+ HAL_ADCEx_Calibration_Start(adch, ADC_SINGLE_ENDED);
+ #endif
}
STATIC void adc_init_single(pyb_obj_adc_t *adc_obj) {
|
can/driver: Add module reset before enabling | @@ -677,6 +677,7 @@ esp_err_t can_driver_install(const can_general_config_t *g_config, const can_tim
ret = ESP_ERR_INVALID_STATE;
goto err;
}
+ periph_module_reset(PERIPH_CAN_MODULE);
periph_module_enable(PERIPH_CAN_MODULE); //Enable APB CLK to CAN peripheral
configASSERT(can_enter_reset_mode() == ESP_OK); //Must enter reset mode to write to config registers
can_config_pelican(); //Use PeliCAN addresses
|
Use macros to convert between radians and fixed-point 65536ths | #define fx2double(i) ((double)((i) / 65536.0))
#define double2fx(d) ((int32_t)round((d) * 65536.0))
+// pi radians == 32768 fixed-point "degrees"
+#define fdeg2rad(f) ((f) * (M_PI / 32768.0))
+#define rad2fdeg(r) ((r) * (32768.0 / M_PI))
+
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
@@ -58,7 +62,7 @@ void math_Print(int32_t i)
*/
int32_t math_Sin(int32_t i)
{
- return double2fx(sin(fx2double(i) * 2 * M_PI / 65536));
+ return double2fx(sin(fdeg2rad(fx2double(i))));
}
/*
@@ -66,7 +70,7 @@ int32_t math_Sin(int32_t i)
*/
int32_t math_Cos(int32_t i)
{
- return double2fx(cos(fx2double(i) * 2 * M_PI / 65536));
+ return double2fx(cos(fdeg2rad(fx2double(i))));
}
/*
@@ -74,7 +78,7 @@ int32_t math_Cos(int32_t i)
*/
int32_t math_Tan(int32_t i)
{
- return double2fx(tan(fx2double(i) * 2 * M_PI / 65536));
+ return double2fx(tan(fdeg2rad(fx2double(i))));
}
/*
@@ -82,7 +86,7 @@ int32_t math_Tan(int32_t i)
*/
int32_t math_ASin(int32_t i)
{
- return double2fx(asin(fx2double(i)) / 2 / M_PI * 65536);
+ return double2fx(rad2fdeg(asin(fx2double(i))));
}
/*
@@ -90,7 +94,7 @@ int32_t math_ASin(int32_t i)
*/
int32_t math_ACos(int32_t i)
{
- return double2fx(acos(fx2double(i)) / 2 / M_PI * 65536);
+ return double2fx(rad2fdeg(acos(fx2double(i))));
}
/*
@@ -98,7 +102,7 @@ int32_t math_ACos(int32_t i)
*/
int32_t math_ATan(int32_t i)
{
- return double2fx(atan(fx2double(i)) / 2 / M_PI * 65536);
+ return double2fx(rad2fdeg(atan(fx2double(i))));
}
/*
@@ -106,7 +110,7 @@ int32_t math_ATan(int32_t i)
*/
int32_t math_ATan2(int32_t i, int32_t j)
{
- return double2fx(atan2(fx2double(i), fx2double(j)) / 2 / M_PI * 65536);
+ return double2fx(rad2fdeg(atan2(fx2double(i), fx2double(j))));
}
/*
|
Restore former Ergo network hashrate reporting | @@ -142,7 +142,7 @@ public class ErgoJobManager : JobManagerBase<ErgoJob>
BlockchainStats.BlockHeight = job.Height;
BlockchainStats.ConnectedPeers = info.PeersCount;
BlockchainStats.NetworkDifficulty = (double) info.Difficulty;
- BlockchainStats.NetworkHashrate = (BlockchainStats.NetworkDifficulty / Math.Pow(2, 32)) / blockTimeAvg;
+ BlockchainStats.NetworkHashrate = BlockchainStats.NetworkDifficulty / blockTimeAvg;
}
else
|
configs/artik05x: fix wrong path of rsa_private.key on download script
The rsa_private.key is at build/configs/artik05x/tools/condesigner folder.
Not in each machine folder.
This commit fixs | @@ -238,7 +238,7 @@ signing() {
exit 1
fi
- ${CODESIGNER_PATH}/${CODESIGNER_TOOL} ${CODESIGNER_PATH}/rsa_private.key $TIZENRT_BIN
+ ${CODESIGNER_PATH}/${CODESIGNER_TOOL} ${CODESIGNER_DIR_PATH}/rsa_private.key $TIZENRT_BIN
TIZENRT_BIN=${TIZENRT_BIN}-signed
}
|
Do not perform probe processing if no probes are queued. | @@ -3050,16 +3050,17 @@ int picoquic_prepare_packet(picoquic_cnx_t* cnx,
picoquic_delete_abandoned_paths(cnx, current_time, &next_wake_time);
}
- /* Remove failed probes */
- picoquic_delete_failed_probes(cnx);
-
/* Check whether to insert a hole in the sequence of packets */
picoquic_insert_hole_in_send_sequence_if_needed(cnx, current_time);
+ if (cnx->probe_first != NULL) {
+ /* Remove failed probes */
+ picoquic_delete_failed_probes(cnx);
/* If probes are in waiting, send the first one */
ret = picoquic_prepare_probe(cnx, current_time, send_buffer, send_buffer_max, send_length,
p_addr_to, to_len, p_addr_from, from_len, &addr_to_log, &next_wake_time);
+ }
/* If alternate challenges are waiting, send them */
if (ret == 0 && *send_length == 0 && cnx->alt_path_challenge_needed) {
|
add experimental task_small_t definition | #include <kernel/util/ipc/ipc.h>
#include <gfx/lib/surface.h>
-#define KERNEL_STACK_SIZE 2048 //use 2kb kernel stack
+#define KERNEL_STACK_SIZE 4096 //use 4kb kernel stack
#define FD_MAX 64
//if a task has PROC_MASTER_PERMISSION set,
@@ -38,6 +38,22 @@ typedef enum mlfq_option {
PRIORITIZE_INTERACTIVE, //use more queues, allowing interactive tasks to dominate
} mlfq_option;
+typedef struct task_context {
+ vmm_pdir_t* page_dir; //address space state of process
+
+ uint32_t esp; //stack pointer
+ uint32_t ebp; //base pointer
+ uint32_t eip; //instruction pointer
+ uint32_t kernel_stack;
+} task_context_t;
+
+typedef struct task_small {
+ char* name;
+ int id;
+ task_state state; // TODO(pt) change to task_wait_state?
+ task_context_t context;
+} task_small_t;
+
struct fd_entry;
typedef struct task {
char* name; //user-printable process name
|
Zip the documentation
Because Apple rejected fonts for some reason | import UIKit
import WebKit
import SafariServices
+import Zip
/// A View controller showing offline documentation.
class DocumentationViewController: UIViewController, WKNavigationDelegate {
@@ -68,12 +69,27 @@ class DocumentationViewController: UIViewController, WKNavigationDelegate {
}
}
+ private var docsURL: URL {
+ let url = FileManager.default.urls(for: .libraryDirectory, in: .allDomainsMask)[0].appendingPathComponent("docs_build")
+
+ if /*!FileManager.default.fileExists(atPath: url.path),*/ let bundledDocs = Bundle.main.url(forResource: "docs", withExtension: "zip") {
+
+ do {
+ try Zip.unzipFile(bundledDocs, destination: url.deletingLastPathComponent(), overwrite: true, password: nil)
+ } catch {
+ print(error.localizedDescription)
+ }
+ }
+
+ return url
+ }
+
// MARK: - View controller
override func viewDidLoad() {
super.viewDidLoad()
- title = "Documentation" // TODO: Localize
+ title = Localizable.Help.documentation
edgesForExtendedLayout = []
@@ -83,8 +99,11 @@ class DocumentationViewController: UIViewController, WKNavigationDelegate {
webView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
view.addSubview(webView)
- if let url = Bundle.main.url(forResource: "docs_build/html", withExtension: "") {
- self.webView.loadFileURL(url.appendingPathComponent("index.html"), allowingReadAccessTo: url)
+ DispatchQueue.global().async {
+ let url = self.docsURL
+ DispatchQueue.main.async {
+ self.webView.loadFileURL(url.appendingPathComponent("html/index.html"), allowingReadAccessTo: url)
+ }
}
goBackButton = UIBarButtonItem(image: UIImage(named: "back"), style: .plain, target: self, action: #selector(goBack))
@@ -92,7 +111,7 @@ class DocumentationViewController: UIViewController, WKNavigationDelegate {
toolbarItems = [goBackButton, UIBarButtonItem(barButtonSystemItem: .flexibleSpace, target: nil, action: nil), goForwardButton]
- navigationItem.leftBarButtonItem = UIBarButtonItem(barButtonSystemItem: .stop, target: self, action: #selector(close))
+ navigationItem.leftBarButtonItem = UIBarButtonItem(barButtonSystemItem: .done, target: self, action: #selector(close))
if UIDevice.current.userInterfaceIdiom == .pad, #available(iOS 13.0, *) {
navigationItem.rightBarButtonItem = UIBarButtonItem(image: UIImage(systemName: "chevron.down.square.fill"), style: .plain, target: self, action: #selector(openInNewWindow))
@@ -106,7 +125,7 @@ class DocumentationViewController: UIViewController, WKNavigationDelegate {
navigationController?.setToolbarHidden(false, animated: true)
if #available(iOS 13.0, *) {
- view.window?.windowScene?.title = "Documentation" // TODO: Localize
+ view.window?.windowScene?.title = Localizable.Help.documentation
}
}
|
Create a real path instead of faking it | @@ -482,7 +482,9 @@ int parse_frame_test()
pc = picoquic_context_from_epoch(test_skip_list[i].epoch);
cnx->pkt_ctx[0].send_sequence = 0x0102030406;
- cnx->path_sequence_next = 2;
+
+ /* create a path which can be retired with a connection_id_retire frame */
+ picoquic_create_path(cnx, cnx->start_time, &cnx->path[0]->local_addr, NULL);
t_ret = picoquic_decode_frames(cnx, cnx->path[0], buffer, byte_max, test_skip_list[i].epoch, NULL, NULL, simulated_time);
|
Remove the StringUtils class, since there's no state (static or instance), thus it's not really adding value. | namespace MAT_NS_BEGIN
{
- class StringUtils
+ namespace StringUtils
{
- public:
-
- static void SplitString(const std::string& s, const char separator, std::vector<std::string>& parts);
- static bool AreAllCharactersWhitelisted(const std::string& stringToTest, const std::string& whitelist);
-
- private:
-
- StringUtils(const StringUtils&) = delete;
- StringUtils& operator=(const StringUtils&) = delete;
- };
+ void SplitString(const std::string& s, const char separator, std::vector<std::string>& parts);
+ bool AreAllCharactersWhitelisted(const std::string& stringToTest, const std::string& whitelist);
+ }
std::string toString(char const* value);
std::string toString(bool value);
|
Fix a segment fault issue in TLS
The root cause is it uses a dangling reference after memory move
Need to call session_alloc first, then use index to get the app
listener point | @@ -156,14 +156,15 @@ tls_notify_app_accept (tls_ctx_t * ctx)
app = application_get (ctx->parent_app_index);
lctx = tls_listener_ctx_get (ctx->listener_ctx_index);
- app_listener = listen_session_get_from_handle (lctx->app_session_handle);
- sm = application_get_listen_segment_manager (app, app_listener);
app_session = session_alloc (vlib_get_thread_index ());
app_session->app_index = ctx->parent_app_index;
app_session->connection_index = ctx->tls_ctx_handle;
+
+ app_listener = listen_session_get_from_handle (lctx->app_session_handle);
app_session->session_type = app_listener->session_type;
app_session->listener_index = app_listener->session_index;
+ sm = application_get_listen_segment_manager (app, app_listener);
if ((rv = session_alloc_fifos (sm, app_session)))
{
TLS_DBG (1, "failed to allocate fifos");
|
Avoid NULL deref is the there's no request body yet | @@ -1392,7 +1392,7 @@ void tunnel_write(struct st_h2o_http3_server_stream_t *stream)
assert(!stream->tunnel->up.is_inflight);
- if ((bytes_to_send = stream->req_body->size) == 0)
+ if (stream->req_body == NULL || (bytes_to_send = stream->req_body->size) == 0)
return;
/* move chunk of data into stream->tunnel.up.buf */
|
bugfix of balance from Daniel | @@ -223,6 +223,7 @@ static uint64_t unapply_block(struct block_internal *bi)
}
bi->flags &= ~BI_MAIN_REF;
+ bi->ref = 0;
for (i = 0; i < bi->nlinks; ++i) {
if (bi->link[i]->ref == bi && bi->link[i]->flags & BI_MAIN_REF) {
@@ -260,6 +261,7 @@ static void set_main(struct block_internal *m)
}
accept_amount(m, apply_block(m));
+ m->ref = m;
log_block((m->flags & BI_OURS ? "MAIN +" : "MAIN "), m->hash, m->time, m->storage_pos);
}
@@ -623,6 +625,7 @@ static int add_block_nolock(struct xdag_block *newBlock, xdag_time_t limit)
noref_last = blockRef0;
}
+ blockRef->ref = 0;
tmpNodeBlock.link[i]->flags |= BI_REF;
g_xdag_extstats.nnoref--;
}
|
Change variant to uint when building option lists.
Enums are uints on most platforms so this works without casting.
Found on MacOS M1. | @@ -336,7 +336,7 @@ helpRender(void)
section = strNew("command");
}
- kvAdd(optionKv, VARSTR(section), VARINT((int)optionId));
+ kvAdd(optionKv, VARSTR(section), VARUINT(optionId));
if (strlen(cfgParseOptionName(optionId)) > optionSizeMax)
optionSizeMax = strlen(cfgParseOptionName(optionId));
@@ -357,7 +357,7 @@ helpRender(void)
for (unsigned int optionIdx = 0; optionIdx < varLstSize(optionList); optionIdx++)
{
- ConfigOption optionId = varInt(varLstGet(optionList, optionIdx));
+ ConfigOption optionId = varUInt(varLstGet(optionList, optionIdx));
// Get option summary
String *summary = strFirstLower(
|
Document option unicast. | @@ -411,6 +411,10 @@ for tunnel interfaces, and
.B false
otherwise.
.TP
+.BR unicast " {" true | false }
+Send multiple copies of TLVs (except Hellos and IHUs) to all neighbours
+rather than sending a single multicast packet. The default is false.
+.TP
.BR rfc6126\-compatible " {" true | false }
Only send TLVs that are defined by RFC 6126, the older version of Babel.
The default is
|
Update Docker Build for CLOG Dependencies | @@ -5,7 +5,6 @@ RUN apt-get update -y \
build-essential \
cmake \
liblttng-ust-dev \
- lttng-tools \
&& apt-get clean
COPY . /src
@@ -14,7 +13,7 @@ WORKDIR /src/Debug
RUN chmod +x /src/scripts/install-powershell-docker.sh
RUN /src/scripts/install-powershell-docker.sh
ENV PATH="/root/.dotnet/tools:${PATH}"
-RUN cmake -DQUIC_BUILD_TEST=OFF ..
+RUN cmake -DQUIC_BUILD_TEST=OFF -DQUIC_BUILD_PERF=OFF ..
RUN cmake --build .
RUN openssl ecparam -out server.eckey -noout -name prime256v1 -genkey
RUN openssl pkcs8 -topk8 -inform pem -in server.eckey -nocrypt \
@@ -26,10 +25,13 @@ FROM martenseemann/quic-network-simulator-endpoint
RUN apt-get update -y \
&& apt-get install -y \
libatomic1 \
+ liblttng-ust-dev \
+ lttng-tools \
&& apt-get clean
COPY --from=build /src/Debug/bin/Release /bin
COPY --from=build /src/Debug/bin/Release/*.so /lib/x86_64-linux-gnu/
COPY --from=source /src/scripts/run_endpoint.sh /run_endpoint.sh
+COPY --from=source /src/src/manifest/clog.sidecar /clog.sidecar
COPY --from=build /src/Debug/server.* /
RUN chmod +x /run_endpoint.sh
ENTRYPOINT [ "/run_endpoint.sh" ]
|
group-store: fix flag declaration | `[flag members.u.group u.assoc graph]
::
++ import-flags
- |= [=^groups =associations:met =network:gra]
+ |= [our=ship =^groups =associations:met =network:gra]
|= =mark
^- (set flag:i)
- ~(key by ((import-for-mark ~ groups associations network) mark))
+ %- ~(gas in *(set flag:i))
+ %+ skim
+ ~(tap in ~(key by ((import-for-mark ~ groups associations network) mark)))
+ |= =flag:i
+ !=(our p.flag)
::
++ import-for-mark
|= [her=(unit ship) =^groups =associations:met =network:gra]
~
`[flag u.assoc chans roles group]
=/ dms (~(get by graphs:network) [our.bowl %dm-inbox])
- =/ flag-importer (import-flags groups associations network)
+ =/ flag-importer (import-flags our.bowl groups associations network)
=+ :* chat-flags=(flag-importer %graph-validator-chat)
heap-flags=(flag-importer %graph-validator-link)
diary-flags=(flag-importer %graph-validator-publish)
|
wireless: gs2200m: Fix sendto_request() in gs2200m_main.c | @@ -802,7 +802,9 @@ static int sendto_request(int fd, FAR struct gs2200m_s *priv,
goto prepare;
}
- ret = req->buflen;
+ /* return length which gs2200m sent */
+
+ ret = smsg.len;
}
prepare:
|
arm: fix type castings for ARM Neon | @@ -34,17 +34,19 @@ glmm_hadd(float32x4_t v) {
static inline
float
glmm_hmin(float32x4_t v) {
- v = vpmin_f32(vget_low_f32(v), vget_high_f32(v));
- v = vpmin_f32(v, v);
- return vget_lane_f32(v, 0);
+ float32x2_t t;
+ t = vpmin_f32(vget_low_f32(v), vget_high_f32(v));
+ t = vpmin_f32(t, t);
+ return vget_lane_f32(t, 0);
}
static inline
float
glmm_hmax(float32x4_t v) {
- v = vpmax_f32(vget_low_f32(v), vget_high_f32(v));
- v = vpmax_f32(v, v);
- return vget_lane_f32(v, 0);
+ float32x2_t t;
+ t = vpmax_f32(vget_low_f32(v), vget_high_f32(v));
+ t = vpmax_f32(t, t);
+ return vget_lane_f32(t, 0);
}
static inline
|
vppinfra: fix the vector funcs test for march variants
Type: fix | test_registration_t *test_registrations[CLIB_MARCH_TYPE_N_VARIANTS] = {};
+int
+test_march_supported (clib_march_variant_type_t type)
+{
+#define _(s, n) \
+ if (CLIB_MARCH_VARIANT_TYPE_##s == type) \
+ return clib_cpu_march_priority_##s ();
+ foreach_march_variant
+#undef _
+ return 0;
+}
+
int
main (int argc, char *argv[])
{
@@ -16,7 +27,7 @@ main (int argc, char *argv[])
{
test_registration_t *r = test_registrations[i];
- if (r == 0)
+ if (r == 0 || test_march_supported (i) < 0)
continue;
fformat (stdout, "\nMultiarch Variant: %U\n", format_march_variant, i);
|
grib_to_netcdf assertion: 'handle_to_request(r,h) == 0' | @@ -610,6 +610,8 @@ static err handle_to_request(request *r, grib_handle* g)
strcpy(name, "stepUnits");
if((e = grib_get_string(g, name, value, &len)) == GRIB_SUCCESS) {
set_value(r, name, "%s", value);
+ } else {
+ grib_context_log(ctx, GRIB_LOG_ERROR, "Cannot get %s as string (%s)", name, grib_get_error_message(e));
}
/*
@@ -4257,7 +4259,9 @@ int grib_tool_new_filename_action(grib_runtime_options* options, const char* fil
g = read_field(file, h->offset, length);
r = empty_request("");
- Assert(handle_to_request(r,h) == 0);
+ if (handle_to_request(r,h) != GRIB_SUCCESS) {
+ grib_context_log(ctx, GRIB_LOG_ERROR, "Failed to convert GRIB handle to a request");
+ }
/* Keep full MARS description */
/* copy = clone_one_request(r); */
|
cc: Fix SEGV when there is no build-id section | @@ -241,7 +241,7 @@ static int find_debuglink(Elf *e, char **debug_file, unsigned int *crc) {
static int find_buildid(Elf *e, char *buildid) {
Elf_Data *data = get_section_elf_data(e, ".note.gnu.build-id");
- if (data->d_size <= 16 || strcmp((char *)data->d_buf + 12, "GNU"))
+ if (!data || data->d_size <= 16 || strcmp((char *)data->d_buf + 12, "GNU"))
return 0;
char *buf = (char *)data->d_buf + 16;
|
add explicit petsc linking to tests | @@ -19,7 +19,6 @@ fi
# set compilers to use MPI toolchain
CC=mpicc
-CXX=mpicxx
F77=mpif77
FC=mpif90
@@ -27,21 +26,20 @@ FC=mpif90
AC_PROG_CC
AC_PROG_F77
AC_PROG_FC
-AC_PROG_CXX
# Checks for typedefs, structures, and compiler characteristics.
AC_C_CONST
if test "x$LMOD_FAMILY_COMPILER" = "xintel"; then
CFLAGS="-I ${SLEPC_INC} -I ${MKLROOT}/include ${CFLAGS}"
- CXXFLAGS="-I ${SLEPC_INC} -I ${MKLROOT}/include ${CXXFLAGS} -fopenmp"
FFLAGS="-I ${SLEPC_INC} -I ${MKLROOT}/include ${FFLAGS}"
- LDFLAGS="-L${SLEPC_LIB} -lSLEPC ${LDFLAGS}"
+ FCFLAGS="-I ${SLEPC_INC} -I ${MKLROOT}/include ${FFLAGS}"
+ LDFLAGS="-L${SLEPC_LIB} -lslepc ${LDFLAGS}"
else
CFLAGS="-I ${SLEPC_INC} ${CFLAGS}"
- CXXFLAGS="-I ${SLEPC_INC} ${CXXFLAGS} -fopenmp"
- FFLAGS="-I ${SLEPC_INC} ${FFLAGS}"
- LDFLAGS="-L${SLEPC_LIB} -lSLEPC ${LDFLAGS}"
+ FFLAGS="-I ${SLEPC_INC} -I ${PETSC_INC} ${FFLAGS}"
+ FCFLAGS="-I ${SLEPC_INC} -I ${PETSC_INC} ${FFLAGS}"
+ LDFLAGS="-L${SLEPC_LIB} -lslepc -L${PETSC_LIB} -lpetsc ${LDFLAGS}"
fi
@@ -59,11 +57,11 @@ echo OHPC compiler toolchain........ : $LMOD_FAMILY_COMPILER
echo OHPC MPI toolchain............. : $LMOD_FAMILY_MPI
echo
echo C compiler.................... : `which $CC`
-echo C++ compiler.................. : `which $CXX`
-echo Fortran compiler ............. : `which $F77`
+echo F77 compiler ................. : `which $F77`
+echo F90 compiler ................. : `which $F77`
echo
echo C compiler flags.............. : $CFLAGS
-echo C++ compiler flags............ : $CXXFLAGS
-echo Fortran compiler flags........ : $FFLAGS
+echo F77 compiler flags............ : $FFLAGS
+echo F90 compiler flags............ : $FCFLAGS
echo
echo '-------------------------------------------------------------------------------------------------------------'
|
harness: increasing timeout for the mt_waitset test, to avoid timeout on armv8 machines | @@ -20,8 +20,8 @@ class MultithreadedWaitsetTest(TestCommon):
def setup(self, build, machine, testdir):
super(MultithreadedWaitsetTest, self).setup(build, machine, testdir)
- self.test_timeout_delta *= 2
- debug.verbose("%s: increasing test timeout delta by factor 2: new = %s" %
+ self.test_timeout_delta *= 3
+ debug.verbose("%s: increasing test timeout delta by factor 3: new = %s" %
(self.name, self.test_timeout_delta))
def get_modules(self, build, machine):
|
docs/README: Remove references to MICROPY_PORT when building docs.
The docs are now built as one for all ports. | @@ -21,18 +21,16 @@ preferably in a virtualenv:
In `micropython/docs`, build the docs:
- make MICROPY_PORT=<port_name> html
+ make html
-Where `<port_name>` can be `unix`, `pyboard`, `wipy` or `esp8266`.
-
-You'll find the index page at `micropython/docs/build/<port_name>/html/index.html`.
+You'll find the index page at `micropython/docs/build/html/index.html`.
PDF manual generation
---------------------
This can be achieved with:
- make MICROPY_PORT=<port_name> latexpdf
+ make latexpdf
but require rather complete install of LaTeX with various extensions. On
Debian/Ubuntu, try (500MB+ download):
|
rune: add the banner info about rune | +# rune announcement
+`rune` is a CLI tool for spawning and running enclaves in containers according to the OCI specificattion. The codebase of `rune` is a fork of [runc](https://github.com/opencontainers/runc), so `rune` can be used as `runc` if enclave is not configured or available.
+
# runc
[](https://travis-ci.org/opencontainers/runc)
|
show warning if user tries to run hcxdumptool on a monitor interface | @@ -268,6 +268,21 @@ if(fp)
return;
}
/*===========================================================================*/
+static inline bool checkmonitorinterface(char *checkinterfacename)
+{
+static char *monstr = "mon";
+
+if(checkinterfacename == NULL)
+ {
+ return true;
+ }
+if(strstr(checkinterfacename, monstr) == NULL)
+ {
+ return false;
+ }
+return true;
+}
+/*===========================================================================*/
static inline void checkallunwanted()
{
static char *networkmanager = "pidof NetworkManager";
@@ -4087,6 +4102,10 @@ static struct sockaddr_ll ll;
static struct ethtool_perm_addr *epmaddr;
checkallunwanted();
+if(checkmonitorinterface(interfacename) == true)
+ {
+ printf("warning: %s is probably monitor interface\n", interfacename);
+ }
fd_socket = 0;
if((fd_socket = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL))) < 0)
{
@@ -4322,8 +4341,15 @@ else
{
printf("%02x", (permaddr[p]));
}
+ if(checkmonitorinterface(ifa->ifa_name) == false)
+ {
printf(" %s (%s)\n", ifa->ifa_name, drivername);
}
+ else
+ {
+ printf(" %s (%s) warning: probably monitor interface!\n", ifa->ifa_name, drivername);
+ }
+ }
}
}
freeifaddrs(ifaddr);
|
mmx: use 64-bit vector for __m64, not 128-bit
C&P error from SSE; oops. | #else
typedef union {
- char i8 __attribute__((__vector_size__(16)));
- short i16 __attribute__((__vector_size__(16)));
- int i32 __attribute__((__vector_size__(16)));
+ char i8 __attribute__((__vector_size__(8)));
+ short i16 __attribute__((__vector_size__(8)));
+ int i32 __attribute__((__vector_size__(8)));
} SIMDE__TYPE(m64);
#if defined(SIMDE__EMULATE_NATIVE)
# if defined(__m64)
|
Update mangohud-setup.sh | @@ -10,10 +10,10 @@ install() {
uninstall() {
[ "$UID" -eq 0 ] || exec sudo bash "$0" uninstall
rm -rfv "/usr/lib/mangohud"
- rm -fv "/usr/share/vulkan/implicit_layer.d/mangohud.json"
- rm -fv "/etc/ld.so.conf.d/libmangohud.conf"
- rm -fv "/etc/ld.so.conf.d/lib32-libmangohud.conf"
+ rm -fv "/usr/share/vulkan/implicit_layer.d/MangoHud.x86.json"
+ rm -fv "/usr/share/vulkan/implicit_layer.d/MangoHud.x86_64.json"
rm -fv "/usr/bin/mangohud"
+ rm -fv "/usr/bin/mangohud.x86"
}
for a in $@; do
|
plugins: added `warningf` explanation | @@ -360,7 +360,8 @@ be called and the mounted file will be updated.
We haven't discussed `ELEKTRA_SET_<CONCRETE>_ERROR` yet. Because Elektra is a library, printing errors to stderr wouldn't be a good idea. Instead, errors
and warnings can be appended to a key in the form of metadata. This is what `ELEKTRA_SET_<CONCRETE>_ERROR` does. The `<CONCRETE>` in the
-text means the concrete error type such as `RESOURCE`, `INSTALLATION`, etc.
+text means the concrete error type such as `RESOURCE`, `INSTALLATION`, etc. Note that you also have a varargs macro with `...ERRORF`
+that allows you to insert a string and substitute parts with variables.
You can see all available error types as well as their categorization guidelines [here](/doc/dev/error-categorization.md).
Because the parentKey always exists
even if a critical error occurs, we write the error to the parentKey. The error does not necessarily have to be in a configuration.
@@ -423,7 +424,8 @@ keyNew ("system/elektra/modules/" ELEKTRA_PLUGIN_NAME "/exports/checkconf", KEY_
Within the `checkconf` function all of the plugin configuration values should be validated.
Errors should be reported via Elektra's error handling mechanism (see section [ELEKTRA*SET*<CONCRETE>\_ERROR](#elektra_set_concrete_error) for further details).
If `checkconf` encounters a configuration value, that is not strictly invalid but can not be parsed by the plugin (e.g. a parameter which is not part of the plugin configuration),
-then a warning should be appended to `errorKey`, using `ELEKTRA_ADD_<CONCRETE>_WARNING`.
+then a warning should be appended to `errorKey`, using `ELEKTRA_ADD_<CONCRETE>_WARNING`. You also have a `...WARNINGF` vararg macro that
+allows you to substitute parts of the message with variables.
### `ELEKTRA_PLUGIN_EXPORT`
|
honggfuzz: use size_t and %zu for printing rlimit values, which can be bigger than int | @@ -87,12 +87,12 @@ static void setupRLimits(void) {
return;
}
if (rlim.rlim_max < 1024) {
- LOG_E("RLIMIT_NOFILE max limit < 1024 (%u). Expect troubles!", (unsigned int)rlim.rlim_max);
+ LOG_E("RLIMIT_NOFILE max limit < 1024 (%zu). Expect troubles!", (size_t)rlim.rlim_max);
return;
}
rlim.rlim_cur = MIN(1024, rlim.rlim_max); // we don't need more
if (setrlimit(RLIMIT_NOFILE, &rlim) == -1) {
- PLOG_E("Couldn't setrlimit(RLIMIT_NOFILE, cur=max=%u)", (unsigned int)rlim.rlim_max);
+ PLOG_E("Couldn't setrlimit(RLIMIT_NOFILE, cur=max=%zu)", (size_t)rlim.rlim_max);
}
}
|
Update (some of) BattleChara.cs | @@ -17,7 +17,7 @@ namespace FFXIVClientStructs.FFXIV.Client.Game.Character
{
[FieldOffset(0x0)] public Character Character;
- [FieldOffset(0x19F0)] public StatusManager StatusManager;
+ [FieldOffset(0x1A48)] public StatusManager StatusManager;
[FieldOffset(0x1B80)] public CastInfo SpellCastInfo;
|
Retry macos cross-compile | @@ -20,6 +20,8 @@ cmake_minimum_required(VERSION 3.15)
cmake_policy(SET CMP0069 NEW) # LTO support
cmake_policy(SET CMP0091 NEW) # MSVC runtime support
+project(astcenc VERSION 2.2.0)
+
# Command line configuration
function(printopt optName optVal optArch tgtArch)
if(${optVal})
@@ -77,12 +79,6 @@ else()
message(" -- No SIMD backend - OFF")
endif()
-if(${ARCH} MATCHES "aarch64")
- set(CMAKE_OSX_ARCHITECTURES arm64)
-else()
- set(CMAKE_OSX_ARCHITECTURES x86_64)
-endif()
-
option(ISA_INVARIANCE "Enable builds for ISA invariance")
if(${ISA_INVARIANCE})
message(" -- ISA invariant backend - ON")
@@ -102,8 +98,14 @@ if(NOT ${ANY_ISA})
endif()
# Project configuration
-# Must be done after setting CMAKE_OSX_ARCHITECTURES
-project(astcenc VERSION 2.2.0)
+
+# Must be done after project() but before compiler settings
+# or it gets overriden (this contradicts the official docs)
+if(${ARCH} MATCHES "aarch64")
+ set(CMAKE_OSX_ARCHITECTURES "arm64")
+else()
+ set(CMAKE_OSX_ARCHITECTURES "x86_64")
+endif()
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
test: fix comparing floats in bezier tests | @@ -51,8 +51,8 @@ test_bezier(void **state) {
Bs_plain = test_bezier_plain(s, p0, c0, c1, p1);
assert_true(glm_eq(Bs, Bs_plain));
- assert_true(glm_eq(smc, Bs_plain));
- assert_true(glm_eq(Bs, smc));
+ test_assert_eqf(smc, Bs_plain);
+ test_assert_eqf(Bs, smc);
/* test cubic hermite */
smc = glm_smc(s, GLM_HERMITE_MAT, (vec4){p0, p1, c0, c1});
|
IssuID:1626:Comparing an array to null is not useful "dp->vfs_dirent.d_name != NULL" since the test will always evaluate as true
[Detail]
Remove null check "dp->vfs_dirent.d_name != NULL" for "dp->vfs_dirent.d_name" is an array.
[Verified Cases]
Build Pass: <solution list>
Test Pass: <test case list> | @@ -330,14 +330,10 @@ static vfs_dirent_t *ramfs_vfs_readdir(vfs_file_t *fp, vfs_dir_t *dir)
int32_t res = -1;
dp = fp->f_arg;
-
- if (dp->vfs_dirent.d_name != NULL) {
res = ramfs_readdir(&(dp->ramfs_dir), dp->vfs_dirent.d_name);
-
if (res == 0) {
ret = &dp->vfs_dirent;
}
- }
return ret;
}
|
iokernel: clarify scheduler policy descriptions | @@ -133,9 +133,9 @@ void dataplane_loop(void)
static void print_usage(void)
{
printf("usage: POLICY [noht/core_list/nobw/mutualpair]\n");
- printf("\tsimple: the standard, basic scheduler policy\n");
- printf("\tias: a policy aware of CPU interference\n");
- printf("\tnuma: a policy aware of NUMA architectures\n");
+ printf("\tsimple: a simplified scheduler policy intended for testing\n");
+ printf("\tias: the Caladan scheduler policy (manages CPU interference)\n");
+ printf("\tnuma: an incomplete and experimental policy for NUMA architectures\n");
}
int main(int argc, char *argv[])
|
heap: fix build failure due to missing header
Closes | @@ -63,6 +63,7 @@ inline static void multi_heap_assert(bool condition, const char *format, int lin
__LINE__, (intptr_t)(ADDRESS))
#ifdef CONFIG_HEAP_TASK_TRACKING
+#include <freertos/task.h>
#define MULTI_HEAP_BLOCK_OWNER TaskHandle_t task;
#define MULTI_HEAP_SET_BLOCK_OWNER(HEAD) (HEAD)->task = xTaskGetCurrentTaskHandle()
#define MULTI_HEAP_GET_BLOCK_OWNER(HEAD) ((HEAD)->task)
|
links: key linkItems by date | @@ -71,10 +71,11 @@ export function LinkResource(props: LinkResourceProps) {
<Row width="100%" flexShrink='0'>
<LinkSubmit s3={s3} name={name} ship={ship.slice(1)} api={api} />
</Row>
- {Array.from(graph.values()).map((node: GraphNode) => {
+ {Array.from(graph).map(([date, node]) => {
const contact = contactDetails[node.post.author];
return (
<LinkItem
+ key={date}
resource={resourcePath}
node={node}
nickname={contact?.nickname}
|
[numerics] gfc3d_admm : missing free on transpose pointer | @@ -312,6 +312,7 @@ void gfc3d_ADMM(GlobalFrictionContactProblem* restrict problem, double* restrict
q = rescaled_problem->q;
b = rescaled_problem->b;
NM_clear(Htrans);
+ free(Htrans);
Htrans = NM_transpose(H);
DEBUG_EXPR
(double norm_q = cblas_dnrm2(n , problem->q , 1);
|
out_stackdriver: fix leak on exception (CID 316623) | @@ -280,6 +280,9 @@ static flb_sds_t get_google_token(struct flb_stackdriver *ctx)
if (pthread_mutex_unlock(&ctx->token_mutex)){
flb_plg_error(ctx->ins, "error unlocking mutex");
+ if (output) {
+ flb_sds_destroy(output);
+ }
return NULL;
}
|
Allow NULL path in HRN_STORAGE_MODE() macro. | @@ -314,7 +314,7 @@ hrnStorageMode(const Storage *const storage, const char *const path, HrnStorageM
ASSERT(storage != NULL);
- const char *const pathFull = strZ(storagePathP(storage, STR(path)));
+ const char *const pathFull = strZ(storagePathP(storage, path == NULL ? NULL : STR(path)));
// If no mode specified then default the mode based on the file type
if (param.mode == 0)
|
Fix non-trivial designated initializers compile error | @@ -204,13 +204,11 @@ void hcd_event_xfer_complete(uint8_t dev_addr, uint8_t ep_addr, uint32_t xferred
.rhport = 0, // TODO correct rhport
.event_id = HCD_EVENT_XFER_COMPLETE,
.dev_addr = dev_addr,
- .xfer_complete =
- {
- .ep_addr = ep_addr,
- .result = result,
- .len = xferred_bytes
- }
};
+ event.xfer_complete.ep_addr = ep_addr;
+ event.xfer_complete.result = result;
+ event.xfer_complete.len = xferred_bytes;
+
hcd_event_handler(&event, in_isr);
}
|
get RSSI from device | @@ -304,6 +304,7 @@ static const uint8_t fakenonce2[] =
0x75, 0x1f, 0x53, 0xcc, 0xb5, 0x81, 0xd1, 0x52, 0x3b, 0xb4, 0xba, 0xad, 0x23, 0xab, 0x01, 0x07
};
+static char rssi;
static uint8_t myaktap[6];
static uint8_t myaktclient[6];
static uint8_t myaktanonce[32];
@@ -3793,17 +3794,35 @@ else if(loba->family == LOBA_IPV630) processipv6(timestamp, caplen -LOBA_SIZE, p
return;
}
/*===========================================================================*/
-static void getradiotapfield(uint16_t rthlen, uint8_t *capptr)
+static void getradiotapfield(uint16_t rthlen, uint32_t caplen, uint8_t *capptr)
{
-uint16_t p;
+static int i;
+static uint16_t pf;
static rth_t *rth;
+static uint32_t *pp;
-p = RTH_SIZE;
rth = (rth_t*)capptr;
+pf = RTH_SIZE;
if(((rth->it_present >> IEEE80211_RADIOTAP_EXT) & 1) == 1)
{
- if(p > rthlen) return;
+ pf += 4;
+ pp = (uint32_t*)capptr;
+ for(i = 2; i < rthlen /4; i++)
+ {
+ if(((pp[i] >> IEEE80211_RADIOTAP_EXT) & 1) == 0) break;
+ pf += 4;
+ }
}
+if((pf %8) != 0) pf +=4;
+if(((rth->it_present >> IEEE80211_RADIOTAP_TSFT) & 1) == 1) pf += 8;
+if(((rth->it_present >> IEEE80211_RADIOTAP_FLAGS) & 1) == 1) pf += 1;
+if(((rth->it_present >> IEEE80211_RADIOTAP_RATE) & 1) == 1) pf += 1;
+if(((rth->it_present >> IEEE80211_RADIOTAP_CHANNEL) & 1) == 1) pf += 4;
+if(((rth->it_present >> IEEE80211_RADIOTAP_FHSS) & 1) == 1) pf += 2;
+rssi = 0;
+if(((rth->it_present >> IEEE80211_RADIOTAP_DBM_ANTSIGNAL) & 1) == 0) return;
+if(pf > caplen) return;
+rssi = capptr[pf];
return;
}
/*===========================================================================*/
@@ -3885,7 +3904,7 @@ if(linktype == DLT_IEEE802_11_RADIO)
if(fh_log != NULL) fprintf(fh_log, "unsupported radiotap header version: %ld\n", rawpacketcount);
return;
}
- getradiotapfield(rth->it_len, capptr);
+ getradiotapfield(rth->it_len, caplen, capptr);
packetlen = caplen -rth->it_len;
packetptr = capptr +rth->it_len;
}
|
Explicitly set invalid value for the end of the signiture algorithm set | @@ -1632,13 +1632,14 @@ read_record_header:
uint16_t *set = ssl->handshake->received_sig_algs;
const uint16_t sig_algs[] = {
MBEDTLS_SSL_SIG_ALG_SET( MBEDTLS_SSL_HASH_SHA1 )
- MBEDTLS_TLS_SIG_NONE
};
+ const uint16_t invalid_sig_alg = MBEDTLS_TLS_SIG_NONE;
size_t count = sizeof( sig_algs ) / sizeof( sig_algs[0] );
- if( count <= MBEDTLS_RECEIVED_SIG_ALGS_SIZE )
+ if( count < MBEDTLS_RECEIVED_SIG_ALGS_SIZE )
{
memcpy( set, sig_algs, sizeof( sig_algs ) );
+ memcpy( &set[count], &invalid_sig_alg, sizeof( sig_algs[0] ) );
}
else
{
@@ -1647,7 +1648,7 @@ read_record_header:
memcpy( set, sig_algs, size );
memcpy( &set[MBEDTLS_RECEIVED_SIG_ALGS_SIZE - 1],
- &sig_algs[count - 1], sizeof( sig_algs[0] ) );
+ &invalid_sig_alg, sizeof( sig_algs[0] ) );
}
}
|
bricks/ev3dev: drop unused enum generators | @@ -73,32 +73,7 @@ def get_sensor_path(port, driver_name):
return full_dir
raise OSError('No such sensor on Port S' + chr(port))
-#
-# This version would create an enum making files accessible as Sound.color.red
-#
-# def tree_as_enum(folder):
-# """Create an enum type represention of a file tree with one subfolder."""
-# categorydict = {}
-# for category in listdir(folder):
-# categorypath = path.join(folder, category)
-# filedict = {
-# filename[0:len(filename)-4]: path.join(categorypath, filename) for filename in listdir(categorypath)
-# }
-# categorydict[category] = type(category, (object,), filedict)
-# return type('Enum', (type(category),), categorydict)
-
-# # This version would create an enum making files accessible as Sound.color_red
-# def tree_as_enum(folder):
-# """Create a flat enum type represention of a file tree with one subfolder."""
-# treedict = {}
-# for category in listdir(folder):
-# categorypath = path.join(folder, category)
-# for filename in listdir(categorypath):
-# name = category + '_' + filename[0:len(filename)-4]
-# treedict[name] = path.join(categorypath, filename)
-# return type('Enum', (type(object),), treedict)
-
-# This version would create an enum making files accessible as Sound.red
+
def tree_as_enum(folder):
"""Create a flat enum type represention of a file tree with one subfolder."""
treedict = {}
|
External API -> Internal API for DM_Connectivity | @@ -136,11 +136,13 @@ int dm_conn_unregister_linkup_cb(conn_cb cb);
*/
int dm_conn_unregister_linkdown_cb(conn_cb cb);
+// @cond
/**
* @brief Perform a WiFi scan over all channels
*
* @param[in] None.
* @return On success, 0 is returned. On failure, a negative value is returned.
+ * @internal
*/
int dm_conn_wifi_scan(void);
@@ -149,6 +151,7 @@ int dm_conn_wifi_scan(void);
*
* @param[out] pointer to WiFi scan structure to hold result.
* @return On success, 0 is returned. On failure, a negative value is returned.
+ * @internal
*/
int dm_conn_get_scan_result(dm_scan_info_t **result);
@@ -157,6 +160,7 @@ int dm_conn_get_scan_result(dm_scan_info_t **result);
*
* @param[out] pointer to WiFi scan structure that holds result.
* @return On completion, 0 is returned.
+ * @internal
*/
int dm_conn_free_scan_result(dm_scan_info_t **result);
@@ -165,6 +169,7 @@ int dm_conn_free_scan_result(dm_scan_info_t **result);
*
* @param[in] callback functions to handle WiFi link being up and down.
* @return On completion, 0 is returned. On failure, a negative value is returned.
+ * @internal
*/
int dm_conn_wifi_connect(conn_cb linkUpEvent, conn_cb linkDownEvent);
@@ -173,6 +178,7 @@ int dm_conn_wifi_connect(conn_cb linkUpEvent, conn_cb linkDownEvent);
*
* @param None
* @return On completion, 0 is returned. On failure, a negative value is returned.
+ * @internal
*/
int dm_conn_dhcp_init(void);
@@ -181,9 +187,10 @@ int dm_conn_dhcp_init(void);
*
* @param None
* @return On completion, 0 is returned. On failure, a negative value is returned.
+ * @internal
*/
int dm_conn_wifi_disconnect(void);
-
+// @endcond
#ifdef __cplusplus
}
#endif
|
Add flake8 to CONTRIBUTING | @@ -21,13 +21,14 @@ Reviewing a pull request (PR) on github
directory successfully).
5. Make sure the python tests pass (i.e. run `python setup.py test` from the
root directory successfully).
- 6. Look at the code (see "Files changed" on the top of the GitHub pull request
+ 6. Make sure flake8 is not generating any warnings on pyccl (i.e., run `flake8 pyccl`).
+ 7. Look at the code (see "Files changed" on the top of the GitHub pull request
page) and check that the changes make sense to you.
- 7. If new science has been implemented, and if possible, try to compare the
+ 8. If new science has been implemented, and if possible, try to compare the
output of the code against your own predictions. Ask the developer to
implement appropriate unit tests for it.
- 8. Make sure that the unit tests pass on Travis-CI.
- 9. Make sure that the changes come with documentation, e.g. internally in the
+ 9. Make sure that the unit tests pass on Travis-CI.
+ 10. Make sure that the changes come with documentation, e.g. internally in the
C code and through Python docstrings, and that the doxygen documentation
has been regenerated. Make sure that example code in the `examples/`
directory has been updated appropriately, and that the CCL note has been
@@ -132,6 +133,8 @@ This may take some time to run in its entirety. If you changed the API, you may
have to modify the tests to account for this. You should also add your own
tests for any new functions or behaviors that were added.
+The python syntax can be checked with `flake8 pyccl`. You should run this command as part of the tests while developing `pyccl` features. More information on the capabilities of `flake8` can be found in http://flake8.pycqa.org/en/latest/manpage.html.
+
Occasionally, modifications made correctly as described above will still not
function properly. This might be due to multiple `pyccl` installation files not being
properly overwritten, and the interpreter is getting confused which version to
@@ -242,7 +245,7 @@ libraries with your new changes and run the unit tests. You can check the
status of your build here: https://travis-ci.org/LSSTDESC/CCL/builds. If you
click in your build you will find more information about its status and a log
describing the process. If your build errors or fails, you can scroll through
-the log to find out what went wrong. If your additions require new dependencies
+the log to find out what went wrong. Warnings from flake8 will result in tests not passing. If your additions require new dependencies
make sure that you include them in `.travis.yml`.
|
pbio/logger: fix counter
Number of samples previously sampled should be read before incrementing call count.
Fixes
Also be more consistent when logging at divisor intervals, by always logging at end. | @@ -63,15 +63,15 @@ pbio_error_t pbio_logger_update(pbio_log_t *log, int32_t *buf) {
return PBIO_SUCCESS;
}
+ // Number of samples already logged previously
+ int32_t sampled = log->calls / log->sample_div;
+
// Skip logging unless we are at a multiple of subsample_div
// and increment call counter.
- if (log->calls++ % log->sample_div != 0) {
+ if (log->calls++ % log->sample_div != log->sample_div - 1) {
return PBIO_SUCCESS;
}
- // Number of samples already logged so far
- int32_t sampled = log->calls / log->sample_div;
-
// Raise error if log is full, which should not happen
if (sampled > log->len) {
log->active = false;
|
Fix for broken asserts in ServoCluster | @@ -307,22 +307,22 @@ namespace servo {
}
float ServoCluster::min_value(uint8_t servo) const {
- assert(is_assigned(servo));
+ assert(servo < pwms.get_chan_count());
return states[servo].get_min_value();
}
float ServoCluster::mid_value(uint8_t servo) const {
- assert(is_assigned(servo));
+ assert(servo < pwms.get_chan_count());
return states[servo].get_mid_value();
}
float ServoCluster::max_value(uint8_t servo) const {
- assert(is_assigned(servo));
+ assert(servo < pwms.get_chan_count());
return states[servo].get_max_value();
}
void ServoCluster::to_min(uint8_t servo, bool load) {
- assert(is_assigned(servo));
+ assert(servo < pwms.get_chan_count());
float new_pulse = states[servo].to_min_with_return();
apply_pulse(servo, new_pulse, load);
}
@@ -354,7 +354,7 @@ namespace servo {
}
void ServoCluster::to_mid(uint8_t servo, bool load) {
- assert(is_assigned(servo));
+ assert(servo < pwms.get_chan_count());
float new_pulse = states[servo].to_mid_with_return();
apply_pulse(servo, new_pulse, load);
}
@@ -386,7 +386,7 @@ namespace servo {
}
void ServoCluster::to_max(uint8_t servo, bool load) {
- assert(is_assigned(servo));
+ assert(servo < pwms.get_chan_count());
float new_pulse = states[servo].to_max_with_return();
apply_pulse(servo, new_pulse, load);
}
@@ -418,7 +418,7 @@ namespace servo {
}
void ServoCluster::to_percent(uint8_t servo, float in, float in_min, float in_max, bool load) {
- assert(is_assigned(servo));
+ assert(servo < pwms.get_chan_count());
float new_pulse = states[servo].to_percent_with_return(in, in_min, in_max);
apply_pulse(servo, new_pulse, load);
}
@@ -450,7 +450,7 @@ namespace servo {
}
void ServoCluster::to_percent(uint8_t servo, float in, float in_min, float in_max, float value_min, float value_max, bool load) {
- assert(is_assigned(servo));
+ assert(servo < pwms.get_chan_count());
float new_pulse = states[servo].to_percent_with_return(in, in_min, in_max, value_min, value_max);
apply_pulse(servo, new_pulse, load);
}
@@ -482,12 +482,12 @@ namespace servo {
}
Calibration& ServoCluster::calibration(uint8_t servo) {
- assert(is_assigned(servo));
+ assert(servo < pwms.get_chan_count());
return states[servo].calibration();
}
const Calibration& ServoCluster::calibration(uint8_t servo) const {
- assert(is_assigned(servo));
+ assert(servo < pwms.get_chan_count());
return states[servo].calibration();
}
|
common/button.c: Format with clang-format
BRANCH=none
TEST=none | @@ -76,10 +76,10 @@ static int raw_button_pressed(const struct button_config *button)
if (!(button->flags & BUTTON_FLAG_DISABLED)) {
if (IS_ENABLED(CONFIG_ADC_BUTTONS) &&
button_is_adc_detected(button->gpio)) {
- physical_value =
- adc_to_physical_value(button->gpio);
+ physical_value = adc_to_physical_value(button->gpio);
} else {
- physical_value = (!!gpio_get_level(button->gpio) ==
+ physical_value =
+ (!!gpio_get_level(button->gpio) ==
!!(button->flags & BUTTON_FLAG_ACTIVE_HIGH));
}
#ifdef CONFIG_SIMULATED_BUTTON
@@ -260,7 +260,6 @@ int button_disable_gpio(enum button button_type)
}
#endif
-
/*
* Handle debounced button changing state.
*/
@@ -308,9 +307,8 @@ static void button_change_deferred(void)
hook_call_deferred(
&debug_mode_handle_data, 0);
#endif
- CPRINTS("Button '%s' was %s",
- buttons[i].name, new_pressed ?
- "pressed" : "released");
+ CPRINTS("Button '%s' was %s", buttons[i].name,
+ new_pressed ? "pressed" : "released");
if (IS_ENABLED(CONFIG_MKBP_INPUT_DEVICES)) {
mkbp_button_update(buttons[i].type,
new_pressed);
@@ -327,7 +325,8 @@ static void button_change_deferred(void)
* Make sure the next deferred call happens on or before
* each button needs it.
*/
- soonest_debounce_time = (soonest_debounce_time == 0) ?
+ soonest_debounce_time =
+ (soonest_debounce_time == 0) ?
state[i].debounce_time :
MIN(soonest_debounce_time,
state[i].debounce_time);
@@ -465,8 +464,7 @@ static int console_command_button(int argc, char **argv)
return EC_SUCCESS;
}
-DECLARE_CONSOLE_COMMAND(button, console_command_button,
- "vup|vdown|rec msec",
+DECLARE_CONSOLE_COMMAND(button, console_command_button, "vup|vdown|rec msec",
"Simulate button press");
#endif /* CONFIG_CMD_BUTTON */
@@ -494,7 +492,6 @@ DECLARE_HOST_COMMAND(EC_CMD_BUTTON, host_command_button, EC_VER_MASK(0));
#endif /* CONFIG_HOSTCMD_BUTTON */
-
#ifdef CONFIG_EMULATED_SYSRQ
#ifdef CONFIG_DEDICATED_RECOVERY_BUTTON
@@ -721,7 +718,8 @@ static void debug_mode_handle(void)
* Schedule a deferred call in case timeout hasn't
* occurred yet.
*/
- hook_call_deferred(&debug_mode_handle_data,
+ hook_call_deferred(
+ &debug_mode_handle_data,
(debug_state_deadline.val - now.val));
}
|
sysdeps/managarm: Handle sigaction when action is null | @@ -45,10 +45,10 @@ int sys_sigaction(int number, const struct sigaction *__restrict action,
managarm::posix::CntRequest<MemoryAllocator> req(getSysdepsAllocator());
req.set_request_type(managarm::posix::CntReqType::SIG_ACTION);
+ req.set_sig_number(number);
if(action) {
req.set_mode(1);
req.set_flags(action->sa_flags);
- req.set_sig_number(number);
req.set_sig_mask(action->sa_mask);
if(action->sa_flags & SA_SIGINFO) {
req.set_sig_handler(reinterpret_cast<uintptr_t>(action->sa_sigaction));
@@ -56,6 +56,8 @@ int sys_sigaction(int number, const struct sigaction *__restrict action,
req.set_sig_handler(reinterpret_cast<uintptr_t>(action->sa_handler));
}
req.set_sig_restorer(reinterpret_cast<uintptr_t>(&__mlibc_signal_restore));
+ } else {
+ req.set_mode(0);
}
frg::string<MemoryAllocator> ser(getSysdepsAllocator());
|
Try to go to Trusty distro on Travis
Change sudo to true and add a travis wait command. | -sudo: false
+sudo: true
install: true
addons:
apt:
packages:
- sshpass
language: java
-dist: precise
+dist: trusty
jdk:
- oraclejdk8
@@ -15,7 +15,7 @@ cache:
- $HOME/.m2
script:
-- bash debian7/bin/deployment_script.sh
+- travis_wait 30 bash debian7/bin/deployment_script.sh
notifications:
email:
|
Use unique client id for Wifi mqtt tests | uMqttClientContext_t *mqttClientCtx;
+char uniqueClientId[U_SHORT_RANGE_SERIAL_NUMBER_LENGTH];
+
const uMqttClientConnection_t mqttUnsecuredConnection = {
.pBrokerNameStr = "ubxlib.it-sgn.u-blox.com",
.pUserNameStr = "test_user",
.pPasswordStr = "test_passwd",
- .pClientIdStr = "test_client_id",
+ .pClientIdStr = uniqueClientId,
.localPort = 1883
};
@@ -113,7 +115,7 @@ const uMqttClientConnection_t mqttSecuredConnection = {
.pBrokerNameStr = "ubxlib.it-sgn.u-blox.com",
.pUserNameStr = "test_user",
.pPasswordStr = "test_passwd",
- .pClientIdStr = "test_client_id",
+ .pClientIdStr = uniqueClientId,
.localPort = 8883,
.keepAlive = true
};
@@ -177,6 +179,22 @@ static uShortRangeUartConfig_t uart = { .uartPort = U_CFG_APP_SHORT_RANGE_UART,
* STATIC FUNCTIONS
* -------------------------------------------------------------- */
+static void setUniqueClientId(uDeviceHandle_t devHandle)
+{
+ int32_t len = uShortRangeGetSerialNumber(devHandle, uniqueClientId);
+ if (len > 2) {
+ if (uniqueClientId[0] == '"') {
+ // Remove the quote characters
+ memmove(uniqueClientId, uniqueClientId + 1, len - 1);
+ uniqueClientId[len - 2] = 0;
+ }
+
+ } else {
+ // Failed to get serial number, use a random number
+ snprintf(uniqueClientId, sizeof(uniqueClientId), "%d", rand());
+ }
+}
+
static void wifiConnectionCallback(uDeviceHandle_t devHandle,
int32_t connId,
int32_t status,
@@ -302,6 +320,8 @@ static int32_t wifiMqttUnsubscribeTest(bool isSecuredConnection)
int32_t topicId1;
int32_t count;
+ setUniqueClientId(gHandles.devHandle);
+
// Malloc space to read messages and topics into
pTopicOut1 = (char *) malloc(U_MQTT_CLIENT_TEST_READ_TOPIC_MAX_LENGTH_BYTES);
U_PORT_TEST_ASSERT(pTopicOut1 != NULL);
@@ -447,6 +467,8 @@ static int32_t wifiMqttPublishSubscribeTest(bool isSecuredConnection)
int32_t topicId2;
int32_t count;
+ setUniqueClientId(gHandles.devHandle);
+
// Malloc space to read messages and topics into
pTopicOut1 = (char *) malloc(U_MQTT_CLIENT_TEST_READ_TOPIC_MAX_LENGTH_BYTES);
U_PORT_TEST_ASSERT(pTopicOut1 != NULL);
|
Update USAGE.md
update Options | @@ -77,8 +77,8 @@ Options:
Disable ANSI console; use simple log output
--verifier|-V
Enable crashes verifier
- --debug_level|-d VALUE
- Debug level (0 - FATAL ... 4 - DEBUG), (default: '3' [INFO])
+ --debug|-d
+ Show debug messages (level >= 4)
--extension|-e VALUE
Input file extension (e.g. 'swf'), (default: 'fuzz')
--workspace|-W VALUE
|
component/bt: fix remove bond list failed | @@ -38,14 +38,9 @@ static void _btc_storage_save(void)
//store the next iter, if remove section, then will not loss the point
const char *section = btc_config_section_name(iter);
- if (!string_is_bdaddr(section) ||
- !btc_config_get_int(section, BTC_BLE_STORAGE_DEV_TYPE_STR, (int *)&device_type) ||
- ((device_type & BT_DEVICE_TYPE_BLE) != BT_DEVICE_TYPE_BLE)) {
- iter = btc_config_section_next(iter);
- continue;
- }
- if (!btc_config_exist(section, BTC_BLE_STORAGE_DEV_TYPE_STR) &&
+ if (string_is_bdaddr(section) &&
+ !btc_config_exist(section, BTC_BLE_STORAGE_DEV_TYPE_STR) &&
!btc_config_exist(section, BTC_BLE_STORAGE_ADDR_TYPE_STR) &&
!btc_config_exist(section, BTC_BLE_STORAGE_LINK_KEY_STR) &&
!btc_config_exist(section, BTC_BLE_STORAGE_LE_KEY_PENC_STR) &&
@@ -57,6 +52,14 @@ static void _btc_storage_save(void)
btc_config_remove_section(section);
continue;
}
+
+ if (!string_is_bdaddr(section) ||
+ !btc_config_get_int(section, BTC_BLE_STORAGE_DEV_TYPE_STR, (int *)&device_type) ||
+ ((device_type & BT_DEVICE_TYPE_BLE) != BT_DEVICE_TYPE_BLE)) {
+ iter = btc_config_section_next(iter);
+ continue;
+ }
+
if(addr_section_count == BONED_DEVICES_MAX_COUNT) {
need_remove_iter = iter;
}
@@ -239,6 +242,9 @@ static bt_status_t _btc_storage_remove_ble_bonding_keys(bt_bdaddr_t *remote_bd_a
if (btc_config_exist(bdstr, BTC_BLE_STORAGE_LE_KEY_LCSRK_STR)) {
ret |= btc_config_remove(bdstr, BTC_BLE_STORAGE_LE_KEY_LCSRK_STR);
}
+ if (btc_config_exist(bdstr, BTC_BLE_STORAGE_LE_KEY_LID_STR)) {
+ ret |= btc_config_remove(bdstr, BTC_BLE_STORAGE_LE_KEY_LID_STR);
+ }
//here don't remove section, because config_save will check it
_btc_storage_save();
return ret ? BT_STATUS_SUCCESS : BT_STATUS_FAIL;
@@ -761,7 +767,6 @@ bt_status_t btc_storage_load_bonded_ble_devices(void)
bt_status_t btc_storage_get_bonded_ble_devices_list(esp_ble_bond_dev_t *bond_dev, int dev_num)
{
bt_bdaddr_t bd_addr;
- uint32_t device_type = 0;
char buffer[sizeof(tBTM_LE_KEY_VALUE)] = {0};
btc_config_lock();
@@ -771,12 +776,13 @@ bt_status_t btc_storage_get_bonded_ble_devices_list(esp_ble_bond_dev_t *bond_dev
if (dev_num-- <= 0) {
break;
}
-
+ uint32_t device_type = 0;
const char *name = btc_config_section_name(iter);
if (!string_is_bdaddr(name) ||
!btc_config_get_int(name, BTC_BLE_STORAGE_DEV_TYPE_STR, (int *)&device_type) ||
!(device_type & BT_DEVICE_TYPE_BLE)) {
+ dev_num ++;
continue;
}
|
Dispose, not just CloseHandle upstream | @@ -37,7 +37,6 @@ namespace MiningCore.JsonRpc
this.upstream = upstream;
- // convert input into sequence of chars
var incomingLines = Observable.Create<string>(observer =>
{
upstream.OnRead((handle, buffer) =>
@@ -110,7 +109,7 @@ namespace MiningCore.JsonRpc
public void Close()
{
- upstream.CloseHandle();
+ upstream?.Dispose();
}
#endregion
|
Modify worldgen to use Timer. | */
#include "main.hpp"
+#include <libtcod/timer.h>
+
#include <algorithm>
static constexpr auto WIDTH = 80;
@@ -115,9 +117,8 @@ void render() {
int main(int argc, char* argv[]) {
// initialize the game window
- TCODConsole::initRoot(WIDTH, HEIGHT, "World generator", false, TCOD_RENDERER_SDL);
- TCODSystem::setFps(25);
- TCODMouse::showCursor(true);
+ TCODConsole::initRoot(WIDTH, HEIGHT, "World generator", false, TCOD_RENDERER_OPENGL2);
+ int desired_fps = 25;
TCOD_key_t k = {};
TCOD_mouse_t mouse = {};
@@ -129,6 +130,8 @@ int main(int argc, char* argv[]) {
static float lightDir[3] = {1.0f, 1.0f, 0.0f};
worldGen.computeSunLight(lightDir);
+ auto timer = tcod::Timer();
+
while (!TCODConsole::isWindowClosed()) {
// read keyboard
// TCOD_key_t k=TCODConsole::checkForKeypress(TCOD_KEY_PRESSED|TCOD_KEY_RELEASED);
@@ -146,7 +149,7 @@ int main(int argc, char* argv[]) {
k.vk = TCODK_NONE;
}
// update the game
- update(TCODSystem::getLastFrameLength(), k, mouse);
+ update(timer.sync(desired_fps), k, mouse);
// render the game screen
render();
|
quickdump: disable xxd test again | @@ -101,11 +101,11 @@ kdb set user/tests/quickdump/otherkey "other value"
#> Create a new key user/tests/quickdump/otherkey with string "other value"
# Show resulting file (not part of test, because xxd is not available everywhere)
-xxd $(kdb file user/tests/quickdump/key)
-#> 00000000: 454b 4442 0000 0003 076b 6579 730b 7661 EKDB.....keys.va
-#> 00000010: 6c75 656d 096d 6574 6113 6d65 7461 7661 luem.meta.metava
-#> 00000020: 6c75 6500 116f 7468 6572 6b65 7973 176f lue..otherkeys.o
-#> 00000030: 7468 6572 2076 616c 7565 00 ther value.
+# xxd $(kdb file user/tests/quickdump/key)
+# 00000000: 454b 4442 0000 0003 076b 6579 730b 7661 EKDB.....keys.va
+# 00000010: 6c75 656d 096d 6574 6113 6d65 7461 7661 luem.meta.metava
+# 00000020: 6c75 6500 116f 7468 6572 6b65 7973 176f lue..otherkeys.o
+# 00000030: 7468 6572 2076 616c 7565 00 ther value.
# Change mounted file (in a very stupid way to enable shell-recorder testing):
|
sdl: error: Fix documentation | @@ -14,7 +14,7 @@ const (
LASTERROR = C.SDL_LASTERROR // the highest numbered predefined error
)
-// ErrorCode is an SDL error code for common errors.
+// ErrorCode is an error code used in SDL error messages.
type ErrorCode uint32
type cErrorCode C.SDL_errorcode
@@ -41,7 +41,7 @@ func ClearError() {
C.SDL_ClearError()
}
-// Error sets the SDL error message to the predefined string specified by code.
+// Error sets the SDL error message to the specified error code.
func Error(code ErrorCode) {
C.SDL_Error(code.c())
}
|
Fix late opening of output file
For 'openssl dhparams', the output file was opened after calculations
were made, which is a waste of cycles and time if the output file
turns out not to be writable.
Fixes | @@ -153,6 +153,11 @@ int dhparam_main(int argc, char **argv)
goto end;
}
# endif
+
+ out = bio_open_default(outfile, 'w', outformat);
+ if (out == NULL)
+ goto end;
+
/* DH parameters */
if (num && !g)
g = 2;
@@ -260,10 +265,6 @@ int dhparam_main(int argc, char **argv)
/* dh != NULL */
}
- out = bio_open_default(outfile, 'w', outformat);
- if (out == NULL)
- goto end;
-
if (text) {
DHparams_print(out, dh);
}
|
Disable reading of scenes until refactor (wip)
Implementation is problematic in larger networks where queues fill up
rapidly. The refactoring should use the poll manager and only request one item
at a time. | @@ -4671,6 +4671,8 @@ bool DeRestPluginPrivate::processZclAttributes(LightNode *lightNode)
}
}
+#if 0 // TODO add this to poll manager
+ // this is very problematic and causes queues to fill up extremely
if ((processed < 2) && lightNode->mustRead(READ_SCENES) && !lightNode->groups().empty()&& tNow > lightNode->nextReadTime(READ_SCENES))
{
std::vector<GroupInfo>::iterator i = lightNode->groups().begin();
@@ -4758,6 +4760,7 @@ bool DeRestPluginPrivate::processZclAttributes(LightNode *lightNode)
}
}
+#endif
return (processed > 0);
}
|
eccodes should build with openjpeg 2.3 | # granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
-# - Try to find the OpenJPEG includes and library (version 1.5.x or 2.1.x)
+# - Try to find the OpenJPEG includes and library (version 1.5.x, 2.1.x, 2.2.x, 2.3.x)
# This module defines
#
# OPENJPEG_FOUND - System has OpenJPEG
# Note: OpenJPEG has a version-specific subdirectory in the include
# e.g. include/openjpeg-2.0 or include/openjpeg-2.1.
-# Only version 1.5.x and 2.1.x are supported.
+# Only version 1.5.x and 2.[123].x are supported.
# The library name is different for 1.x (libopenjpeg) and 2.x (libopenjp2).
-set( _suff include include/openjpeg include/openjpeg-1.5 include/openjpeg-2.1 )
+set( _suff include include/openjpeg include/openjpeg-1.5 include/openjpeg-2.1 include/openjpeg-2.2 include/openjpeg-2.3)
find_path( OPENJPEG_INCLUDE_DIR openjpeg.h
PATHS ${OPENJPEG_PATH} ENV OPENJPEG_PATH
${OPENJPEG_DIR} ENV OPENJPEG_DIR
|
Add python setup.py install step to travis.yml to see if it fixes build failures | @@ -35,6 +35,7 @@ install:
- python --version
script:
- python setup.py build
+- python setup.py install
- nosetests tests/run_tests.py --all --debug --detailed-errors --verbose --process-restartworker
--with-coverage --cover-package=pyccl
- make -Cbuild && sudo make -Cbuild install
|
[swig] fix some SWIG warnings for mechanics | %include MechanicsBase.i
+// Teach SWIG about the SiconosContactorSet base class (std::vector<SiconosContactor>)
+class SiconosContactor;
+%shared_ptr(std::vector< std11::shared_ptr< SiconosContactor > >);
+%template(VectorOfSPSiconosContactor) std::vector< std11::shared_ptr< SiconosContactor > >;
+
+// Ignore some shadowed (redundant for Python) functions
+%ignore SiconosShape::setDimensions(SP::SiconosVector dim);
+
PY_REGISTER_WITHOUT_HEADER(SiconosSphere);
PY_REGISTER_WITHOUT_HEADER(SiconosPlane);
PY_REGISTER_WITHOUT_HEADER(SiconosBox);
|
Workaround in docker compose tests for logs. | #
# Enable BuildKit whenever possible
+export COMPOSE_DOCKER_CLI_BUILD=1
export DOCKER_BUILDKIT=1
export BUILDKIT_PROGRESS=plain
@@ -72,16 +73,8 @@ sub_rebuild() {
# Build MetaCall Docker Compose with Sanitizer for testing (link manually dockerignore files)
sub_test() {
- # In order to get rid of the log limit:
- # [output clipped, log limit 1MiB reached]
- # Use this command:
- # docker buildx create --use --name larger_log --driver-opt env.BUILDKIT_STEP_LOG_MAX_SIZE=50000000
- # export BUILDX_BUILDER=larger_log
- # Or in GitHub Workflow:
- # - name: Set up Docker Buildx
- # uses: docker/setup-buildx-action@v1
- # with:
- # driver-opts: env.BUILDKIT_STEP_LOG_MAX_SIZE=50000000
+ # Disable BuildKit as workaround due to log limits (TODO: https://github.com/docker/buildx/issues/484)
+ export DOCKER_BUILDKIT=0
ln -sf tools/deps/.dockerignore .dockerignore
docker-compose -f docker-compose.yml -f docker-compose.test.yml build --force-rm deps
|
admin/docs: bump docs package to v1.3.4 | %{!?PROJ_DELIM: %global PROJ_DELIM -ohpc}
Name: docs%{PROJ_DELIM}
-Version: 1.3.3
+Version: 1.3.4
Release: 1
Summary: OpenHPC documentation
License: BSD-3-Clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.