message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
Solve bug in cobol loader. | #include <log/log.h>
-#include <stdlib.h>
+#include <cstring>
#include <libcob.h>
@@ -65,9 +65,9 @@ function_return function_cob_interface_invoke(function func, function_impl impl,
}
else
{
- void **cob_args = static_cast<void **>(malloc(sizeof(void *) * size));
+ void **cob_args = new void *[size];
- if (cob_args == NULL)
+ if (cob_args == nullptr)
{
return NULL;
}
@@ -79,7 +79,7 @@ function_return function_cob_interface_invoke(function func, function_impl impl,
int result = cobcall(name, size, cob_args);
- free(cob_args);
+ delete[] cob_args;
return value_create_int(result);
}
|
Fix bytesting test broken on 32b systems | @@ -159,27 +159,17 @@ static void test_serialize_indefinite_bytestring(void **_CBOR_UNUSED(_state)) {
cbor_decref(&item);
}
-static void test_serialize_4b_bytestring(void **_CBOR_UNUSED(_state)) {
- cbor_item_t *item = cbor_new_definite_bytestring();
-
- // Fake having a huge chunk of data
- unsigned char *data = malloc(1);
- cbor_bytestring_set_handle(item, data, UINT32_MAX);
-
- assert_size_equal(cbor_serialize(item, buffer, 512), 0);
- assert_size_equal(cbor_serialized_size(item), (uint64_t)UINT32_MAX + 5);
- cbor_decref(&item);
-}
-
-static void test_serialize_8b_bytestring(void **_CBOR_UNUSED(_state)) {
+static void test_serialize_bytestring_size_overflow(
+ void **_CBOR_UNUSED(_state)) {
cbor_item_t *item = cbor_new_definite_bytestring();
// Fake having a huge chunk of data
unsigned char *data = malloc(1);
- cbor_bytestring_set_handle(item, data, (uint64_t)UINT32_MAX + 1);
+ cbor_bytestring_set_handle(item, data, SIZE_MAX);
+ // Would require 1 + 8 + SIZE_MAX bytes, which overflows size_t
assert_size_equal(cbor_serialize(item, buffer, 512), 0);
- assert_size_equal(cbor_serialized_size(item), (uint64_t)UINT32_MAX + 1 + 9);
+ assert_size_equal(cbor_serialized_size(item), 0);
cbor_decref(&item);
}
|
website: fix regression with new Markdown style
fix | "path": "src/plugins/README.md",
"target_file": ["README.md", "README", "readme.md", "readme"],
"parsing": {
- "start_regex": "# Plugins #",
- "stop_regex": "####### UNUSED ######",
- "section_regex": "### ([^#]+) ###",
+ "start_regex": "## Plugins",
+ "stop_regex": "####### UNUSED",
+ "section_regex": "### ([^#]+)",
"entry_regex": "^\\- \\[(.+)\\]\\(([^\\)]+)\\)(.*)"
},
"name": {
"path": "doc/decisions/README.md",
"target_file": [],
"parsing": {
- "stop_regex": "####### UNUSED ######",
+ "stop_regex": "####### UNUSED",
"section_regex": "## ([^#]+)",
"entry_regex": "^\\- \\[(.+)\\]\\(([^\\)]+)\\)(.*)"
},
|
Fix insertRndPrintable | @@ -974,7 +974,7 @@ static void mangle_InsertRndPrintable(run_t* run) {
size_t off = util_rndGet(0, run->dynamicFileSz - 1);
size_t len = util_rndGet(1, run->dynamicFileSz - off);
- mangle_Inflate(run, off, len);
+ mangle_InflatePrintable(run, off, len);
mangle_Move(run, off, off + len, run->dynamicFileSz);
util_rndBuf(&run->dynamicFile[off], len);
util_turnToPrintable(&run->dynamicFile[off], len);
|
[build_aomp.sh]-Small fix to check if arguments were passed in for partial build options. Before, if no arguments were given a syntax error would show and abort the build on some systems. | @@ -90,7 +90,9 @@ else
components="roct rocr project libdevice comgr rocminfo hip extras atmi openmp pgmath flang flang_runtime"
fi
-#Start build from given one component (./build_aomp.sh continue openmp)
+#Partial build options. Check if argument was given.
+if [ -n "$1" ] ; then
+#Start build from given component (./build_aomp.sh continue openmp)
if [ $1 == 'continue' ] ; then
for COMPONENT in $components ; do
if [ $COMPONENT == $2 ] ; then
@@ -118,6 +120,7 @@ elif [ $1 == 'select' ] ; then
#Remove arguments so they are not passed to build_aomp_component
set --
fi
+fi
for COMPONENT in $components ; do
echo
|
Node.js: tune package.json for fresh express.js | {
"name": "rhonodejsapplication",
"version": "0.0.1",
- "engines": {
- "node": "0.10.x",
- "npm": "1.3.x"
- },
"private": true,
- "scripts": {
- "start": "node app.js"
- },
"dependencies": {
- "express": "3.3.7",
- "jade": "*"
+ "express": "*"
}
}
|
Add part numbers for A715 and X3 aliased to A710/X2 | @@ -202,10 +202,14 @@ int detect(void)
return CPU_CORTEXA510;
else if (strstr(cpu_part, "0xd47"))
return CPU_CORTEXA710;
+ else if (strstr(cpu_part, "0xd4d")) //A715
+ return CPU_CORTEXA710;
else if (strstr(cpu_part, "0xd44"))
return CPU_CORTEXX1;
else if (strstr(cpu_part, "0xd4c"))
return CPU_CORTEXX2;
+ else if (strstr(cpu_part, "0xd4e")) //X3
+ return CPU_CORTEXX2;
}
// Qualcomm
else if (strstr(cpu_implementer, "0x51") && strstr(cpu_part, "0xc00"))
|
Fix typo, Add LdrQueryImageFileExecutionOption | @@ -284,6 +284,21 @@ LdrGetProcedureAddressEx(
);
#endif
+#if (PHNT_VERSION >= PHNT_THRESHOLD)
+// rev
+NTSYSAPI
+NTSTATUS
+NTAPI
+LdrGetProcedureAddressForCaller(
+ _In_ PVOID DllHandle,
+ _In_opt_ PANSI_STRING ProcedureName,
+ _In_opt_ ULONG ProcedureNumber,
+ _Out_ PVOID *ProcedureAddress,
+ _In_ ULONG Flags,
+ _In_ PVOID *Callback
+ );
+#endif
+
#define LDR_LOCK_LOADER_LOCK_FLAG_RAISE_ON_ERRORS 0x00000001
#define LDR_LOCK_LOADER_LOCK_FLAG_TRY_ONLY 0x00000002
@@ -750,7 +765,20 @@ LdrQueryImageFileExecutionOptions(
_In_ ULONG ValueSize,
_Out_ PVOID Buffer,
_In_ ULONG BufferSize,
- _Out_opt_ PULONG RetunedLength
+ _Out_opt_ PULONG ReturnedLength
+ );
+
+NTSYSAPI
+NTSTATUS
+NTAPI
+LdrQueryImageFileExecutionOptionsEx(
+ _In_ PUNICODE_STRING SubKey,
+ _In_ PCWSTR ValueName,
+ _In_ ULONG Type,
+ _Out_ PVOID Buffer,
+ _In_ ULONG BufferSize,
+ _Out_opt_ PULONG ReturnedLength,
+ _In_ BOOLEAN Wow64
);
#endif // (PHNT_MODE != PHNT_MODE_KERNEL)
|
Add more options to asan. | @@ -56,7 +56,9 @@ if(OPTION_BUILD_SANITIZER AND (CMAKE_BUILD_TYPE STREQUAL "Debug" OR CMAKE_BUILD_
set(DEFAULT_LIBRARIES -lasan -lubsan)
set(TESTS_SANITIZER_ENVIRONMENT_VARIABLES
"LSAN_OPTIONS=verbosity=1:log_threads=1:print_suppressions=false:suppressions=${CMAKE_SOURCE_DIR}/source/tests/sanitizer/lsan.supp"
- "ASAN_OPTIONS=strict_string_checks=1:detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1"
+ # Specify use_sigaltstack=0 as coreclr uses own alternate stack for signal handlers (https://github.com/swgillespie/coreclr/commit/bec020aa466d08e49e007d0011b0e79f8f7c7a62)
+ "ASAN_OPTIONS=symbolize=1:alloc_dealloc_mismatch=0:use_sigaltstack=0:strict_string_checks=1:detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1"
+ "UBSAN_OPTIONS=print_stacktrace=1"
)
set(SANITIZER_COMPILE_DEFINITIONS
"__ADDRESS_SANITIZER__=1"
|
gpgme: reformat FindLibGpgme.cmake | @@ -17,8 +17,7 @@ set (LIBGPGME_CFLAGS)
# if gpgme-config has been found
if (GPGME_EXECUTABLE)
- # workaround for MinGW/MSYS
- # CMake can't starts shell scripts on windows so it need to use sh.exe
+ # workaround for MinGW/MSYS CMake can't starts shell scripts on windows so it needs to use sh.exe
execute_process (COMMAND sh ${GPGME_EXECUTABLE} --libs
RESULT_VARIABLE _return_VALUE
OUTPUT_VARIABLE LIBGPGME_LIBRARIES
@@ -45,9 +44,13 @@ if (LIBGPGME_FOUND)
endif (NOT LibGpgme_FIND_QUIETLY)
# parse include directory from C-Flags
- string (LENGTH "${LIBGPGME_CFLAGS}" LIBGPGME_CFLAGS_LEN)
+ string (LENGTH "${LIBGPGME_CFLAGS}"
+ LIBGPGME_CFLAGS_LEN)
if (${LIBGPGME_CFLAGS_LEN} GREATER 1)
- string (REPLACE "-I" "" LIBGPGME_INCLUDE_DIR "${LIBGPGME_CFLAGS}")
+ string (REPLACE "-I"
+ ""
+ LIBGPGME_INCLUDE_DIR
+ "${LIBGPGME_CFLAGS}")
endif ()
unset (LIBGPGME_CFLAGS_LEN)
|
added freeing of old array list data | @@ -1373,16 +1373,17 @@ array_list_insert(
/* case we need to expand array */
if (bucket_idx >= array_list->current_size) {
int old_size = array_list->current_size;
+
array_list->current_size = array_list->current_size * 2;
+
ion_byte_t *bucket_map_cache = alloca(old_size * sizeof(ion_fpos_t));
+
memcpy(bucket_map_cache, array_list->data, old_size * sizeof(ion_fpos_t));
+ free(array_list->data);
array_list->data = NULL;
array_list->data = malloc(2 * old_size * sizeof(ion_fpos_t));
memset(array_list->data, 0, array_list->current_size * sizeof(ion_fpos_t));
memcpy(array_list->data, bucket_map_cache, old_size * sizeof(ion_fpos_t));
-//
-// array_list->data = (ion_fpos_t *) realloc(array_list->data, array_list->current_size * sizeof(ion_fpos_t));
-// memset(array_list->data + old_size * sizeof(ion_fpos_t), 0, sizeof(ion_fpos_t) * old_size);
if (NULL == array_list->data) {
free(array_list->data);
|
vere: add ames counters for forwarded packets
Track the amount of packets we forward, and the amount we drop due to
forward queue pressure. Once every 1000 packets, printf the total. | c3_o fit_o; // filtering active
c3_y ver_y; // protocol version
c3_d foq_d; // forward queue size
+ c3_d fow_d; // forwarded count
+ c3_d fod_d; // forwards dropped count
} u3_ames;
/* u3_head: ames packet header
@@ -599,6 +601,11 @@ _ames_forward(u3_panc* pac_u, u3_noun lan)
u3z(bod);
}
+ pac_u->sam_u->fow_d++;
+ if ( 0 == (pac_u->sam_u->fow_d % 1000) ) {
+ u3l_log("ames: forwarded %" PRIu64 " total\n", pac_u->sam_u->fow_d);
+ }
+
pac_u->sam_u->foq_d--;
_ames_ef_send(pac_u->sam_u, lan, _ames_serialize_packet(pac_u));
_ames_panc_free(pac_u);
@@ -718,6 +725,11 @@ _ames_recv_cb(uv_udp_t* wax_u,
if ( (1000 < sam_u->foq_d)
&& !((c3y == u3a_is_cat(rec)) && (256 > rec)) )
{
+ sam_u->fod_d++;
+ if ( 0 == (sam_u->fod_d % 1000) ) {
+ u3l_log("ames: dropped %" PRIu64 " forwards total\n", sam_u->fod_d);
+ }
+
u3z(sen); u3z(rec);
}
// otherwise, proceed with forwarding
|
tools: fix log message | @@ -1168,7 +1168,6 @@ class fpgaotsu_n3000(fpgaotsu):
fpgaotsu_n3000.cfm2_erase_start ,
(fpgaotsu_n3000.cfm2_erase_end - fpgaotsu_n3000.cfm2_erase_start )+1)
-
retval = mtd.update_verify(first_mtd_dev,
self._fpga_cfg_data.max10_factory.file,
self._fpga_cfg_data.max10_factory.start,
@@ -1180,7 +1179,6 @@ class fpgaotsu_n3000(fpgaotsu):
LOG.exception('Failed update & verify Updates MAX10 factory')
raise Exception("Failed update & verify Updates MAX10 factory")
-
# MAX10 User
LOG.info('Updates MAX10 CFM1/User %s from 0x%08x to 0x%08x ',self._fpga_cfg_data.max10_user.file,
self._fpga_cfg_data.max10_user.start, self._fpga_cfg_data.max10_user.end)
@@ -1400,11 +1398,11 @@ def fpga_update(path, rsu,rsu_only):
d5005 = fpgaotsu_d5005(o,fpga_cfg_instance)
retval = d5005.d5005_fpga_update()
if retval == 0:
- LOG.info('Successfully updated with RoT')
+ LOG.info('One time udpate successfully updated to RoT')
else:
- LOG.error('Failed to update D5005 FPGA')
+ LOG.error('One time udpate failed update RoT')
except Exception as e:
- LOG.exception('Failed to update D5005 FPGA')
+ LOG.exception('One time udpate failed update RoT')
LOG.exception(e.message,e.args)
retval = -1
@@ -1416,13 +1414,13 @@ def fpga_update(path, rsu,rsu_only):
n3000 = fpgaotsu_n3000(o,fpga_cfg_instance)
retval = n3000.n3000_fpga_update()
if retval == 0:
- LOG.info('Successfully update & verified N3000 FPGA')
+ LOG.info('One time udpate successfully updated to RoT')
else:
- LOG.error('Failed to update N3000 FPGA ')
+ LOG.error('One time udpate failed update RoT')
continue
except Exception as e:
- LOG.exception('Failed to update N3000 FPGA')
+ LOG.exception('One time udpate failed update RoT')
LOG.exception(e.message,e.args)
retval = -1
continue
@@ -1431,12 +1429,12 @@ def fpga_update(path, rsu,rsu_only):
try:
retval = n3000.do_rsu_only(o.pci_node.pci_address)
if retval == 0:
- LOG.info('Successfully Done RSU')
+ LOG.info('One time udpate successfully updated to RoT')
else:
- LOG.error('Failed to do RSU ')
+ LOG.error('One time udpate failed update RoT')
except Exception as e:
- LOG.exception('Failed to do RSU')
+ LOG.exception('One time udpate failed update RoT')
LOG.exception(e.message,e.args)
retval = -1
@@ -1449,10 +1447,8 @@ def sig_handler(signum, frame):
LOG.error('fpgaotsu update interrupted')
def parse_args():
-
"""Parses fpgaotsu command line arguments
"""
-
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
|
rnndb/falcon: Add missing Falcon register INTR_MODE
This MMIO register was already documented in docs/hw/falcon/io.rst | <reg32 offset="0x000" name="INTR_TRIGGER" access="w" type="falcon_intr"/>
<reg32 offset="0x004" name="INTR_ACK" access="w" type="falcon_intr"/>
<reg32 offset="0x008" name="INTR" access="r" type="falcon_intr"/>
+ <reg32 offset="0x00c" name="INTR_MODE" variants="GT215-"/> <!-- v3+ -->
<reg32 offset="0x010" name="INTR_EN_SET" access="w" type="falcon_intr"/>
<reg32 offset="0x014" name="INTR_EN_CLR" access="w" type="falcon_intr"/>
<reg32 offset="0x018" name="INTR_EN" access="r" type="falcon_intr"/>
|
discard: Added missing io_put in case of error | @@ -77,7 +77,7 @@ static int _ocf_discard_core(struct ocf_request *req)
ocf_io_set_cmpl(io, req, NULL, _ocf_discard_core_complete);
err = ocf_io_set_data(io, req->data, 0);
if (err) {
- _ocf_discard_complete_req(req, err);
+ _ocf_discard_core_complete(io, err);
return err;
}
|
Update Video codec list.txt | @@ -9,6 +9,7 @@ avrp: Avid 1:1 10-bit RGB Packer
avui: Avid Meridien Uncompressed
ayuv: Uncompressed packed MS 4:4:4:4
bmp: BMP (Windows and OS/2 bitmap)
+cfhd: Cineform HD
cinepak: Cinepak
cljr: Cirrus Logic AccuPak
dnxhd: VC3/DNxHD
@@ -78,7 +79,6 @@ zlib: LCL (LossLess Codec Library) ZLIB
zmbv: Zip Motion Blocks Video
libaom-av1: libaom AV1
libopenjpeg: OpenJPEG JPEG 2000
-librav1e: librav1e AV1
libtheora: libtheora Theora
libvpx: libvpx VP8
libvpx-vp9: libvpx VP9
@@ -88,6 +88,17 @@ libx264: libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10
libx264rgb: libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 RGB
libx265: libx265 H.265 / HEVC
libxvid: libxvidcore MPEG-4 part 2
-h264_videotoolbox: VideoToolbox H.264 Encoder
-hevc_videotoolbox: VideoToolbox H.265 Encoder
-h264_nvenc: NVidia hardware assisted H.264 video encoder
+h264_amf: AMD AMF H.264 Encoder
+h264_mf: H264 via MediaFoundation
+h264_nvenc: NVIDIA NVENC H.264 encoder
+h264_qsv: H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (Intel Quick Sync Video acceleration)
+nvenc: NVIDIA NVENC H.264 encoder
+nvenc_h264: NVIDIA NVENC H.264 encoder
+nvenc_hevc: NVIDIA NVENC hevc encoder
+hevc_amf: AMD AMF HEVC encoder
+hevc_mf: HEVC via MediaFoundation
+hevc_nvenc: NVIDIA NVENC hevc encoder
+hevc_qsv: HEVC (Intel Quick Sync Video acceleration)
+mjpeg_qsv: MJPEG video (Intel Quick Sync Video acceleration)
+mpeg2_qsv: MPEG-2 video (Intel Quick Sync Video acceleration)
+vp9_qsv: VP9 video (Intel Quick Sync Video acceleration)
|
improves u3r_mug_bytes() efficiency (a little) | @@ -1495,15 +1495,19 @@ u3r_mug_bytes(const c3_y *buf_y,
c3_w syd_w = 0xcafebabe;
c3_w ham_w = 0;
- while ( 0 == ham_w ) {
+ while ( 1 ) {
c3_w haz_w;
MurmurHash3_x86_32(buf_y, len_w, syd_w, &haz_w);
ham_w = (haz_w >> 31) ^ (haz_w & 0x7fffffff);
+
+ if ( 0 == ham_w ) {
syd_w++;
}
-
+ else {
return ham_w;
}
+ }
+}
/* u3r_mug_chub(): Compute the mug of `num`, LSW first.
*/
|
ci: Update OpenBSD's MEM_PER_CONNECTION, based on error message | #ifdef __FreeBSD__
#define MEM_PER_CONNECTION 57
#elif defined(__OpenBSD__)
- #define MEM_PER_CONNECTION 71
+ #define MEM_PER_CONNECTION 75
#else
#define MEM_PER_CONNECTION 49
#endif
|
init bang for train | @@ -18,7 +18,7 @@ typedef struct _train
t_inlet *x_width_inlet;
t_inlet *x_offset_inlet;
double x_phase;
- double x_rec_sr_khz;
+ t_float x_sr;
t_outlet *x_bangout;
t_clock *x_clock;
} t_train;
@@ -38,13 +38,13 @@ static t_int *train_perform(t_int *w)
t_float *in2 = (t_float *)(w[4]);
t_float *in3 = (t_float *)(w[5]);
t_float *out = (t_float *)(w[6]);
- double rec_sr_khz = x->x_rec_sr_khz;
+ t_float sr = x->x_sr;
double phase = x->x_phase;
double phase_step, wrap_low, wrap_high;
t_float period, phase_offset, width;
while (nblock--) {
period = *in1++;
- phase_step = period > 0 ? rec_sr_khz / (double)period : 0.5;
+ phase_step = period > 0 ? (double)1000. / ((double)sr * (double)period) : 0.5;
if (phase_step > 0.5)
phase_step = 0.5; // smallest period corresponds to nyquist
width = *in2++;
@@ -62,9 +62,10 @@ static t_int *train_perform(t_int *w)
if (phase < wrap_low)
*out++ = 0.;
else if (phase >= wrap_low && x->x_start)
- {
- x->x_start = 0;
+ { // force 1st bang & 1st sample = 1 (even when width = 0)
*out++ = 1.;
+ clock_delay(x->x_clock, 0);
+ x->x_start = 0;
}
else if (phase >= wrap_high) {
*out++ = 1.; // 1st always = 1
@@ -85,7 +86,7 @@ static t_int *train_perform(t_int *w)
static void train_dsp(t_train *x, t_signal **sp)
{
- x->x_rec_sr_khz = 1000. / (double)sp[0]->s_sr; // reciprocal sample rate in Khz
+ x->x_sr = sp[0]->s_sr; // sample rate
dsp_add(train_perform, 6, x, sp[0]->s_n,
sp[0]->s_vec, sp[1]->s_vec, sp[2]->s_vec, sp[3]->s_vec);
}
|
Avoid a warning on Luos_assert(__FILE__, __LINE__) by explicitly casting file into char* | #if defined(LUOS_ASSERTION)
#define LUOS_ASSERT(expr) \
if (!(expr)) \
- Luos_assert(__FILE__, __LINE__)
+ Luos_assert((char *)__FILE__, __LINE__)
#else
#define LUOS_ASSERT(expr) ()
#endif
|
build BUGFIX apple does not require rt library | @@ -188,12 +188,17 @@ add_library(sysrepo SHARED $<TARGET_OBJECTS:srobj>)
set_target_properties(sysrepo PROPERTIES VERSION ${SYSREPO_SOVERSION_FULL} SOVERSION ${SYSREPO_SOVERSION})
# dependencies
+# librt (shm_open, shm_unlink, not required on OSX)
+if(NOT APPLE)
target_link_libraries(sysrepo rt)
+endif()
+# libyang
find_package(YANG REQUIRED)
target_link_libraries(sysrepo ${LIBYANG_LIBRARIES})
include_directories(${LIBYANG_INCLUDE_DIRS})
+# pthread
set(CMAKE_THREAD_PREFER_PTHREAD TRUE)
find_package(Threads REQUIRED)
target_link_libraries(sysrepo ${CMAKE_THREAD_LIBS_INIT})
|
msp: ignore flash save and reboot when armed | #include "util/util.h"
#include "vtx.h"
+enum {
+ MSP_REBOOT_FIRMWARE = 0,
+ MSP_REBOOT_BOOTLOADER_ROM,
+ MSP_REBOOT_MSC,
+ MSP_REBOOT_MSC_UTC,
+ MSP_REBOOT_BOOTLOADER_FLASH,
+ MSP_REBOOT_COUNT,
+};
+
extern uint8_t msp_vtx_detected;
extern vtx_settings_t msp_vtx_settings;
extern const uint16_t frequency_table[VTX_BAND_MAX][VTX_CHANNEL_MAX];
@@ -439,14 +448,31 @@ static void msp_process_serial_cmd(msp_t *msp, msp_magic_t magic, uint16_t cmd,
case MSP_EEPROM_WRITE: {
msp_vtx_detected = 1;
+ if (!flags.arm_switch) {
flash_save();
+ }
msp_send_reply(msp, magic, cmd, NULL, 0);
break;
}
case MSP_REBOOT: {
+ if (flags.arm_switch) {
+ break;
+ }
+
+ switch (payload[0]) {
+ case MSP_REBOOT_FIRMWARE:
system_reset();
break;
+
+ case MSP_REBOOT_BOOTLOADER_ROM:
+ system_reset_to_bootloader();
+ break;
+
+ default:
+ break;
+ }
+ break;
}
default:
|
Type: Disable container overflow detection | +# ~~~
+# The unit test for the plugin fails on an ASAN enabled macOS build, if we do not disable container overflow detection.
+# Reference: https://travis-ci.org/sanssecours/elektra/jobs/418554628
+# ~~~
add_plugin (type
ADD_TEST
CPP_TEST
@@ -7,4 +11,5 @@ add_plugin (type
types.hpp
types.cpp
type_checker.hpp
- TEST_README)
+ TEST_README
+ TEST_ENVIRONMENT "ASAN_OPTIONS=detect_container_overflow=0")
|
** Not done ** moving the code between VMs | @@ -188,15 +188,15 @@ struct FuncArgs{
va_end(__args); \
}while(0)
-extern void *_dl_sym(void *, const char *, void *);
#define WRAP_CHECK_ONLY(func) \
if (g_fn.func == NULL ) { \
- if ((g_fn.func = _dl_sym(RTLD_NEXT, #func, func)) == NULL) { \
+ if ((g_fn.func = dlsym(RTLD_NEXT, #func)) == NULL) { \
scopeLog("ERROR: "#func":NULL\n", -1, CFG_LOG_ERROR); \
return -1; \
} \
}
-
+#ifdef __LINUX__
+extern void *_dl_sym(void *, const char *, void *);
#define WRAP_CHECK(func) \
if (g_fn.func == NULL ) { \
if ((g_fn.func = _dl_sym(RTLD_NEXT, #func, func)) == NULL) { \
@@ -204,6 +204,15 @@ extern void *_dl_sym(void *, const char *, void *);
return -1; \
} \
}
+#else
+#define WRAP_CHECK(func) \
+ if (g_fn.func == NULL ) { \
+ if ((g_fn.func = dlsym(RTLD_NEXT, #func)) == NULL) { \
+ scopeLog("ERROR: "#func":NULL\n", -1, CFG_LOG_ERROR); \
+ return -1; \
+ } \
+ }
+#endif // __LINUX__
#define WRAP_CHECK_VOID(func) \
if (g_fn.func == NULL ) { \
|
Redundant (channel is the `obj` part of the key) | @@ -130,16 +130,14 @@ static inline void client_test4free(client_s *cl) {
free(cl);
}
-static inline uint64_t client_compute_hash(client_s client,
- uint64_t channel_hash) {
- return (channel_hash ^
- (((((uint64_t)(client.on_message) *
+static inline uint64_t client_compute_hash(client_s client) {
+ return (((((uint64_t)(client.on_message) *
((uint64_t)client.udata1 ^ 0x736f6d6570736575ULL)) >>
5) |
(((uint64_t)(client.on_unsubscribe) *
((uint64_t)client.udata1 ^ 0x736f6d6570736575ULL))
<< 47)) ^
- ((uint64_t)client.udata2 ^ 0x646f72616e646f6dULL)));
+ ((uint64_t)client.udata2 ^ 0x646f72616e646f6dULL));
}
static client_s *pubsub_client_new(client_s client, channel_s channel) {
@@ -153,7 +151,7 @@ static client_s *pubsub_client_new(client_s client, channel_s channel) {
return NULL;
}
uint64_t channel_hash = fiobj_sym_id(channel.name);
- uint64_t client_hash = client_compute_hash(client, channel_hash);
+ uint64_t client_hash = client_compute_hash(client);
spn_lock(&lock);
/* ignore if client exists. */
client_s *cl = fio_hash_find(
@@ -211,7 +209,7 @@ static int pubsub_client_destroy(client_s *client) {
fio_hash_s *ch_hashmap = (ch->use_pattern ? &patterns : &channels);
uint64_t channel_hash = fiobj_sym_id(ch->name);
- uint64_t client_hash = client_compute_hash(*client, channel_hash);
+ uint64_t client_hash = client_compute_hash(*client);
uint8_t is_ch_any;
spn_lock(&lock);
if ((client->sub_count -= 1)) {
@@ -252,8 +250,7 @@ static inline client_s *pubsub_client_find(client_s client, channel_s channel) {
if (!client.on_message || !channel.name) {
return NULL;
}
- uint64_t channel_hash = fiobj_sym_id(channel.name);
- uint64_t client_hash = client_compute_hash(client, channel_hash);
+ uint64_t client_hash = client_compute_hash(client);
spn_lock(&lock);
client_s *cl = fio_hash_find(
&clients, (fio_hash_key_s){.hash = client_hash, .obj = channel.name});
|
Makefile: simplify darwin build | @@ -108,7 +108,7 @@ else ifeq ($(OS),Darwin)
CC := $(shell xcrun --sdk $(SDK_NAME) --find cc)
LD := $(shell xcrun --sdk $(SDK_NAME) --find cc)
- ARCH_CFLAGS += -arch x86_64 -isysroot $(SDK) \
+ ARCH_CFLAGS += -isysroot $(SDK) \
-x objective-c -pedantic -fblocks \
-Wimplicit -Wunused -Wcomment -Wchar-subscripts -Wuninitialized \
-Wreturn-type -Wpointer-arith -Wno-gnu-case-range -Wno-gnu-designator \
@@ -122,11 +122,6 @@ else ifeq ($(OS),Darwin)
-framework CommerceKit $(CRASH_REPORT)
XCODE_VER := $(shell xcodebuild -version | grep $(GREP_COLOR) "^Xcode" | cut -d " " -f2)
- ifeq "8.3" "$(word 1, $(sort 8.3 $(XCODE_VER)))"
- ARCH_LDFLAGS += -F/Applications/Xcode.app/Contents/SharedFrameworks \
- -framework CoreSymbolicationDT \
- -Wl,-rpath,/Applications/Xcode.app/Contents/SharedFrameworks
- endif
MIG_RET := $(shell mig -header mac/mach_exc.h -user mac/mach_excUser.c -sheader mac/mach_excServer.h \
-server mac/mach_excServer.c $(SDK)/usr/include/mach/mach_exc.defs &>/dev/null; echo $$?)
|
task: remove unused variable | @@ -83,7 +83,6 @@ void flb_task_retry_destroy(struct flb_task_retry *retry)
struct flb_task_retry *flb_task_retry_create(struct flb_task *task,
void *data)
{
- int ret;
struct mk_list *head;
struct mk_list *tmp;
struct flb_task_retry *retry = NULL;
|
zephyr: base32: Fix unit tests
This change fixes unit test breakage caused by zephyr already
declaring tolower.
BRANCH=none
TEST=zmake testall | @@ -104,6 +104,7 @@ int isspace(int c);
int isalpha(int c);
int isupper(int c);
int isprint(int c);
+int tolower(int c);
#endif
int memcmp(const void *s1, const void *s2, size_t len);
@@ -154,8 +155,6 @@ char *strzcpy(char *dest, const char *src, int len);
* Other strings return 0 and leave *dest unchanged.
*/
int parse_bool(const char *s, int *dest);
-
-int tolower(int c);
#endif /* !HIDE_EC_STDLIB */
/**
|
removed all int int tests to isolate string key problems | @@ -30,5 +30,5 @@ void
runalltests_behaviour_linear_hash(
void
) {
- bhdct_run_tests(linear_hash_dict_init, 15, ION_BHDCT_ALL_TESTS);
+ bhdct_run_tests(linear_hash_dict_init, 15, ION_BHDCT_ALL_TESTS & ~ION_BHDCT_INT_INT);
}
|
android: reverting usage of requestLegacyExternalStorage flag in manifest | <supports-screens android:smallScreens='true' android:normalScreens='true' android:largeScreens='true' android:xlargeScreens='true'/>
<application android:name='com.rhomobile.rhodes.RhodesApplication'
- android:requestLegacyExternalStorage="true"
<% if canRenderNetworkSecurityConfig %>
android:networkSecurityConfig="@xml/network_security_config"
<% end %>
|
Tests: ability to run unitd with specified "--user" option. | @@ -48,6 +48,11 @@ def pytest_addoption(parser):
action="store_true",
help="Run unsafe tests",
)
+ parser.addoption(
+ "--user",
+ type=str,
+ help="Default user for non-privileged processes of unitd",
+ )
unit_instance = {}
@@ -60,6 +65,7 @@ def pytest_configure(config):
option.print_log = config.option.print_log
option.save_log = config.option.save_log
option.unsafe = config.option.unsafe
+ option.user = config.option.user
option.generated_tests = {}
option.current_dir = os.path.abspath(
@@ -283,9 +289,7 @@ def unit_run():
os.mkdir(temp_dir + '/state')
- with open(temp_dir + '/unit.log', 'w') as log:
- unit_instance['process'] = subprocess.Popen(
- [
+ unitd_args = [
unitd,
'--no-daemon',
'--modules',
@@ -300,9 +304,13 @@ def unit_run():
'unix:' + temp_dir + '/control.unit.sock',
'--tmp',
temp_dir,
- ],
- stderr=log,
- )
+ ]
+
+ if option.user:
+ unitd_args.extend(['--user', option.user])
+
+ with open(temp_dir + '/unit.log', 'w') as log:
+ unit_instance['process'] = subprocess.Popen(unitd_args, stderr=log)
if not waitforfiles(temp_dir + '/control.unit.sock'):
_print_log()
|
chat: code uses fontsize 0 | @@ -16,7 +16,6 @@ export default class CodeContent extends Component {
p='1'
my='0'
borderRadius='1'
- fontSize='14px'
overflow='auto'
maxHeight='10em'
maxWidth='100%'
@@ -35,7 +34,6 @@ export default class CodeContent extends Component {
my='0'
p='1'
borderRadius='1'
- fontSize='14px'
overflow='auto'
maxHeight='10em'
maxWidth='100%'
|
Bootstrap: Set 16 MB limit to booted loader | @@ -47,7 +47,7 @@ LoadOpenCore (
ASSERT (ImageHandle != NULL);
BufferSize = 0;
- Buffer = ReadFile (FileSystem, OPEN_CORE_IMAGE_PATH, &BufferSize);
+ Buffer = ReadFile (FileSystem, OPEN_CORE_IMAGE_PATH, &BufferSize, BASE_16MB);
if (Buffer == NULL) {
DEBUG ((DEBUG_ERROR, "BS: Failed to locate valid OpenCore image - %p!\n", Buffer));
return EFI_NOT_FOUND;
|
Hide replace tile button (not implemented yet) when multiple tiles are selected | @@ -293,12 +293,17 @@ export const SpriteEditor = ({
/>
</FormField>
</FormRow>
+
+ {selectedTileIds.length === 1 && (
+ <>
<FormDivider />
<FormRow>
<Button>Replace Tile</Button>
</FormRow>
</>
)}
+ </>
+ )}
{!metaspriteTile && (
<>
|
Fix torque resolution issue | @@ -122,7 +122,8 @@ namespace carto {
std::shared_ptr<mvt::SymbolizerContext> symbolizerContext;
{
std::lock_guard<std::mutex> lock(_mutex);
- resolution = _resolution;
+ int settingsResolution = (_map ? _map->getTorqueSettings().resolution : -1);
+ resolution = _resolution / (settingsResolution > 0 ? settingsResolution : 1);
map = _map;
symbolizerContext = _symbolizerContext;
}
@@ -175,7 +176,7 @@ namespace carto {
_map = map;
_mapSettings = std::make_shared<mvt::Map::Settings>(map->getSettings());
- _mapSettings->backgroundColor = std::static_pointer_cast<mvt::TorqueMap>(_map)->getTorqueSettings().clearColor;
+ _mapSettings->backgroundColor = _map->getTorqueSettings().clearColor;
_symbolizerContext = symbolizerContext;
_styleSet = styleSet;
}
|
Workaround for pip install on Window into a virtual environment. | @@ -424,6 +424,8 @@ PYTHON_VERSION = get_python_config('VERSION')
if os.name == 'nt':
if hasattr(sys, 'real_prefix'):
PYTHON_LIBDIR = sys.real_prefix
+ elif hasattr(sys, 'base_prefix'):
+ PYTHON_LIBDIR = sys.base_prefix
else:
PYTHON_LIBDIR = get_python_config('BINDIR')
|
Fix PortManager when ot builds with musl | @@ -220,7 +220,7 @@ def get_ephemeral_range():
filename = "/proc/sys/net/ipv4/ip_local_port_range"
if os.path.exists(filename):
with open(filename) as afile:
- data = afile.read()
+ data = afile.read(1024) # fix for musl
port_range = tuple(map(int, data.strip().split()))
if len(port_range) == 2:
return port_range
|
nissa/nereid: enable keyboard
With eSPI enabled, we can now enable the keyboard for Nereid because
keyboard code depends on eSPI.
TEST=kblight, kbpress and ksstate commands now work on Nereid console
BRANCH=none | @@ -13,10 +13,6 @@ CONFIG_PLATFORM_EC_ACCELGYRO_BMI_COMM_I2C=y
CONFIG_PLATFORM_EC_ACCELGYRO_BMI3XX=y
CONFIG_PLATFORM_EC_ACCEL_BMA4XX=y
-# Keyboard - disabled until host interface is available (b:211772002)
-CONFIG_CROS_KB_RAW_ITE=n
-CONFIG_PLATFORM_EC_KEYBOARD=n
-
# TCPC+PPC: ITE on-chip for C0, PS8745 for optional C1
CONFIG_PLATFORM_EC_USB_PD_TCPM_ITE_ON_CHIP=y
CONFIG_PLATFORM_EC_USB_PD_TCPM_DRIVER_IT8XXX2=y
|
Virtqueue allocation should be initialized to zero | @@ -241,7 +241,7 @@ status virtqueue_alloc(vtdev dev,
queue sched_queue)
{
u64 vq_alloc_size = sizeof(struct virtqueue) + size * sizeof(vqmsg);
- virtqueue vq = allocate(dev->general, vq_alloc_size);
+ virtqueue vq = allocate_zero(dev->general, vq_alloc_size);
vq->avail_offset = size * sizeof(struct vring_desc);
vq->used_offset = pad(vq->avail_offset + sizeof(*vq->avail) + sizeof(vq->avail->ring[0]) * size, align);
bytes alloc = vq->used_offset + pad(sizeof(*vq->used) + sizeof(vq->used->ring[0]) * size, align);
|
chat: fixes scroll keys
fixes | @@ -58,6 +58,7 @@ export default class VirtualScroller extends PureComponent<VirtualScrollerProps,
this.heightOf = this.heightOf.bind(this);
this.setScrollTop = this.setScrollTop.bind(this);
this.scrollToData = this.scrollToData.bind(this);
+ this.scrollKeyMap = this.scrollKeyMap.bind(this);
this.loadRows = _.memoize(this.loadRows).bind(this);
}
@@ -164,7 +165,7 @@ export default class VirtualScroller extends PureComponent<VirtualScrollerProps,
this.loadRows(firstNeededKey, firstVisibleKey - 1);
}
const lastVisibleKey = Array.from(visibleItems.keys())[visibleItems.size - 1] ?? this.estimateIndexFromScrollTop(scrollTop + windowHeight);
- const lastNeededKey = Math.min(lastVisibleKey + this.OVERSCAN_SIZE, totalSize)
+ const lastNeededKey = Math.min(lastVisibleKey + this.OVERSCAN_SIZE, totalSize);
if (!data.has(lastNeededKey - 1)) {
this.loadRows(lastVisibleKey + 1, lastNeededKey);
}
@@ -198,25 +199,46 @@ export default class VirtualScroller extends PureComponent<VirtualScrollerProps,
};
}
+ scrollKeyMap(): Map<string, number> {
+ return new Map([
+ ['ArrowUp', this.state.averageHeight],
+ ['ArrowDown', this.state.averageHeight * -1],
+ ['PageUp', this.window.offsetHeight],
+ ['PageDown', this.window.offsetHeight * -1],
+ ['Home', this.window.scrollHeight],
+ ['End', this.window.scrollHeight * -1],
+ ['Space', this.window.offsetHeight * -1]
+ ]);
+ }
+
invertedKeyHandler(event): void | false {
- if (event.code === 'ArrowUp' || event.code === 'ArrowDown') {
+ const map = this.scrollKeyMap();
+ if (map.has(event.code) && document.body.isSameNode(document.activeElement)) {
event.preventDefault();
event.stopImmediatePropagation();
- if (event.code === 'ArrowUp') {
- this.window.scrollBy(0, 30);
- } else if (event.code === 'ArrowDown') {
- this.window.scrollBy(0, -30);
+ let distance = map.get(event.code);
+ if (event.code === 'Space' && event.shiftKey) {
+ distance = distance * -1;
}
+ this.window.scrollBy(0, distance);
return false;
}
}
componentWillUnmount() {
- window.removeEventListener('keydown', this.invertedKeyHandler, true);
+ window.removeEventListener('keydown', this.invertedKeyHandler);
}
setWindow(element) {
- if (this.window) return;
+ if (!element) return;
+ if (this.window) {
+ if (this.window.isSameNode(element)) {
+ return;
+ } else {
+ window.removeEventListener('keydown', this.invertedKeyHandler);
+ }
+ }
+
this.window = element;
if (this.props.origin === 'bottom') {
element.addEventListener('wheel', (event) => {
|
automation: Fix issue with Jenkins pipeline not picking up commit message
The "Test: <instance>" commit message feature didn't work due to git command being
echoed. | @@ -273,7 +273,7 @@ node('TEST-gb-cmb-dt-022')
// Use git to get the last commit message, @echo off to avoid capturing the command
// output as well as the returned text
- git_commit_text = bat(script: "git log -1 --pretty=%%B", returnStdout: true)
+ git_commit_text = bat(script: "@echo off & git log -1 --pretty=%%B", returnStdout: true)
if (git_commit_text) {
// Convert newlines to "/n" and quotation marks to back-ticks to prevent them
|
api/ethereum/signmsg: document signing format | @@ -46,6 +46,9 @@ pub async fn process(request: &pb::EthSignMessageRequest) -> Result<Response, Er
verify_message::verify(&request.msg).await?;
+ // Construct message to be signed. There is no standard for this. We match what MyEtherWallet,
+ // Trezor, etc. do, e.g.:
+ // https://github.com/ethereumjs/ethereumjs-util/blob/dd2882d790c1d3b50b75bee6f88031433cbd5bef/src/signature.ts#L140
let mut msg: Vec<u8> = Vec::new();
msg.extend(b"\x19Ethereum Signed Message:\n");
msg.extend(format!("{}", request.msg.len()).as_bytes());
|
fix README to show correct branch | @@ -4,6 +4,8 @@ To get started with f18 development using the amd-fir-dev branch run these comma
mkdir -p $HOME/git/f18
cd $HOME/git/f18
git clone https://github.com/rocm-developer-tools/aomp
+cd $HOME/git/f18/aomp
+git checkout amd-stg-openmp
cd $HOME/git/f18/aomp/f18bin
./clone_f18.sh
./build_f18.sh
|
zephyr: Implement gpio_or_ioex_[gs]et_level
This implements gpio_or_ioex_set_level and gpio_or_ioex_get_level in the
shim layer to match cros ec functionality.
BRANCH=none
TEST=zmake build for volteer and brya pass | #include "gpio.h"
#include "gpio/gpio.h"
+#include "ioexpander.h"
#include "sysjump.h"
#include "cros_version.h"
@@ -148,6 +149,22 @@ void gpio_set_level_verbose(enum console_channel channel,
gpio_set_level(signal, value);
}
+void gpio_or_ioex_set_level(int signal, int value)
+{
+ if (IS_ENABLED(CONFIG_PLATFORM_EC_IOEX) && signal_is_ioex(signal))
+ ioex_set_level(signal, value);
+ else
+ gpio_set_level(signal, value);
+}
+
+int gpio_or_ioex_get_level(int signal, int *value)
+{
+ if (IS_ENABLED(CONFIG_PLATFORM_EC_IOEX) && signal_is_ioex(signal))
+ return ioex_get_level(signal, value);
+ *value = gpio_get_level(signal);
+ return EC_SUCCESS;
+}
+
/* GPIO flags which are the same in Zephyr and this codebase */
#define GPIO_CONVERSION_SAME_BITS \
(GPIO_OPEN_DRAIN | GPIO_PULL_UP | GPIO_PULL_DOWN | GPIO_INPUT | \
|
Updated LICENSE.txt copyright year | -Copyright (c) 2003-2019 Jason Perkins and individual contributors.
+Copyright (c) 2003-2022 Jason Perkins and individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
|
Add renaming of ompModule.so, introduced with the Python3 build of ompd. | @@ -261,6 +261,18 @@ if [ "$1" == "install" ] ; then
echo "ERROR make install failed "
exit 1
fi
+
+ # Rename ompd module library. This was introduced with the Python3 build. The include
+ # of ompdModule would fail otherwise.
+ if [ -f "$AOMP_INSTALL_DIR/lib-debug/ompd/ompdModule.cpython-36m-x86_64-linux-gnu.so" ]; then
+ if [ -f "$AOMP_INSTALL_DIR/lib-debug/ompd/ompdModule.so" ]; then
+ echo "==> Removing old ompdModule.so"
+ rm "$AOMP_INSTALL_DIR/lib-debug/ompd/ompdModule.so"
+ fi
+ echo "==> Renaming ompdModule.cpython-36m-x86_64-linux-gnu.so to ompdModule.so"
+ mv $AOMP_INSTALL_DIR/lib-debug/ompd/ompdModule.cpython-36m-x86_64-linux-gnu.so $AOMP_INSTALL_DIR/lib-debug/ompd/ompdModule.so
+ fi
+
# we do not yet have OMPD in llvm 12, disable this for now.
if [ "$AOMP_VERSION" != "12.0" ] ; then
# Copy selected debugable runtime sources into the installation lib-debug/src directory
|
Fixed string length. | @@ -3434,7 +3434,7 @@ sigterm_handler(int sig) /* I - Signal */
* Flag that the job should be canceled...
*/
- write(2, "DEBUG: sigterm_handler: job_canceled = 1.\n", 25);
+ write(2, "DEBUG: sigterm_handler: job_canceled = 1.\n", 42);
job_canceled = 1;
return;
|
lora; include mic failures with join in statistics. | @@ -1404,6 +1404,8 @@ lora_mac_process_radio_rx(struct os_event *ev)
LoRaMacParams.ChannelsDatarate = LoRaMacParamsDefaults.ChannelsDatarate;
lora_mac_send_join_confirm(LORAMAC_EVENT_INFO_STATUS_OK,
JoinRequestTrials);
+ } else {
+ STATS_INC(lora_mac_stats, rx_mic_failures);
}
break;
case FRAME_TYPE_DATA_CONFIRMED_DOWN:
|
Windows makefile: Don't quote generator arguments
Rely on the build.info constructor to do the right thing.
Fixes | @@ -406,7 +406,8 @@ reconfigure reconf:
sub generatesrc {
my %args = @_;
(my $target = $args{src}) =~ s/\.[sS]$/.asm/;
- my $generator = '"'.join('" "', @{$args{generator}}).'"';
+ my ($gen0, @gens) = @{$args{generator}};
+ my $generator = '"'.$gen0.'"'.join('', map { " $_" } @gens);
my $generator_incs = join("", map { " -I \"$_\"" } @{$args{generator_incs}});
my $incs = join("", map { " /I \"$_\"" } @{$args{incs}});
my $deps = @{$args{deps}} ?
|
app-prefs: fixing logic around enabled | @@ -10,7 +10,7 @@ export const AppPrefs = ({ match }: RouteComponentProps<{ desk: string }>) => {
const charge = useCharge(desk);
const vat = useVat(desk);
const tracking = !!vat?.arak.rail;
- const otasEnabled = vat?.arak.rail?.paused;
+ const otasEnabled = !vat?.arak.rail?.paused;
const otaSource = vat?.arak.rail?.ship;
const toggleOTAs = useKilnState((s) => s.toggleOTAs);
@@ -21,7 +21,7 @@ export const AppPrefs = ({ match }: RouteComponentProps<{ desk: string }>) => {
<h2 className="h3 mb-7">{charge?.title} Settings</h2>
<div className="space-y-3">
{tracking ? (
- <Setting on={!!otasEnabled} toggle={toggleUpdates} name="Automatic Updates">
+ <Setting on={otasEnabled} toggle={toggleUpdates} name="Automatic Updates">
<p>Automatically download and apply updates to keep {charge?.title} up to date.</p>
{otaSource && (
<p>
|
mdns: fix missing bye packet if services removed with mdns_service_remove_all() or mdns_free()
Closes | @@ -1676,9 +1676,7 @@ static void _mdns_send_final_bye(bool include_ip)
size_t srv_count = 0;
mdns_srv_item_t * a = _mdns_server->services;
while (a) {
- if (!a->service->instance) {
srv_count++;
- }
a = a->next;
}
if (!srv_count) {
@@ -1688,9 +1686,7 @@ static void _mdns_send_final_bye(bool include_ip)
size_t i = 0;
a = _mdns_server->services;
while (a) {
- if (!a->service->instance) {
services[i++] = a;
- }
a = a->next;
}
_mdns_send_bye(services, srv_count, include_ip);
@@ -1699,7 +1695,7 @@ static void _mdns_send_final_bye(bool include_ip)
/**
* @brief Stop the responder on all services without instance
*/
-static void _mdns_send_bye_all_pcbs_no_instance(void)
+static void _mdns_send_bye_all_pcbs_no_instance(bool include_ip)
{
size_t srv_count = 0;
mdns_srv_item_t * a = _mdns_server->services;
@@ -1721,7 +1717,7 @@ static void _mdns_send_bye_all_pcbs_no_instance(void)
}
a = a->next;
}
- _mdns_send_bye(services, srv_count, false);
+ _mdns_send_bye(services, srv_count, include_ip);
}
/**
@@ -3728,14 +3724,14 @@ static void _mdns_execute_action(mdns_action_t * action)
action->data.sys_event.event_id, action->data.sys_event.interface);
break;
case ACTION_HOSTNAME_SET:
- _mdns_send_final_bye(true);
+ _mdns_send_bye_all_pcbs_no_instance(true);
free((char*)_mdns_server->hostname);
_mdns_server->hostname = action->data.hostname;
_mdns_restart_all_pcbs();
break;
case ACTION_INSTANCE_SET:
- _mdns_send_bye_all_pcbs_no_instance();
+ _mdns_send_bye_all_pcbs_no_instance(false);
free((char*)_mdns_server->instance);
_mdns_server->instance = action->data.instance;
_mdns_restart_all_pcbs_no_instance();
|
ci: Add extra_default_build_targets logic to check_build_test_rules.py | @@ -209,8 +209,10 @@ def check_test_scripts(
paths: List[str],
exclude_dirs: Optional[List[str]] = None,
bypass_check_test_targets: Optional[List[str]] = None,
+ extra_default_build_targets: Optional[List[str]] = None,
) -> None:
from idf_build_apps import App, find_apps
+ from idf_build_apps.constants import SUPPORTED_TARGETS
# takes long time, run only in CI
# dict:
@@ -323,6 +325,7 @@ def check_test_scripts(
manifest_files=[
str(p) for p in Path(IDF_PATH).glob('**/.build-test-rules.yml')
],
+ default_build_targets=SUPPORTED_TARGETS + extra_default_build_targets,
)
)
exit_code = 0
@@ -467,17 +470,17 @@ if __name__ == '__main__':
else:
_exclude_dirs = []
- extra_default_build_targets: List[str] = []
- bypass_check_test_targets: List[str] = []
+ extra_default_build_targets_list: List[str] = []
+ bypass_check_test_targets_list: List[str] = []
if arg.config:
with open(arg.config) as fr:
configs = yaml.safe_load(fr)
if configs:
- extra_default_build_targets = (
+ extra_default_build_targets_list = (
configs.get('extra_default_build_targets') or []
)
- bypass_check_test_targets = (
+ bypass_check_test_targets_list = (
configs.get('bypass_check_test_targets') or []
)
@@ -485,11 +488,12 @@ if __name__ == '__main__':
check_readme(
list(check_dirs),
exclude_dirs=_exclude_dirs,
- extra_default_build_targets=extra_default_build_targets,
+ extra_default_build_targets=extra_default_build_targets_list,
)
elif arg.action == 'check-test-scripts':
check_test_scripts(
list(check_dirs),
exclude_dirs=_exclude_dirs,
- bypass_check_test_targets=bypass_check_test_targets,
+ bypass_check_test_targets=bypass_check_test_targets_list,
+ extra_default_build_targets=extra_default_build_targets_list,
)
|
dbus: removed todo for flags | @@ -37,8 +37,6 @@ static int dbusToFlags (int dbus)
{
flags |= ELEKTRA_IO_WRITABLE;
}
- // from docs: "all watches implicitly include a watch for hangups, errors, and other exceptional conditions."
- // TODO add DBUS_WATCH_ERROR and DBUS_WATCH_ERROR to flags
return flags;
}
|
OcLog.c: more conversion mismatches fixed | @@ -111,7 +111,7 @@ OcLogAddEntry (
UINT32 LineLength;
PLATFORM_DATA_HEADER *Entry;
UINTN KeySize;
- UINTN DataSize;
+ UINT32 DataSize;
UINT32 TotalSize;
Private = OC_LOG_PRIVATE_DATA_FROM_OC_LOG_THIS (OcLog);
@@ -184,7 +184,7 @@ OcLogAddEntry (
if (Private->DataHub != NULL) {
KeySize = (L_STR_LEN (OC_LOG_VARIABLE_NAME) + 6) * sizeof (CHAR16);
DataSize = TimingLength + LineLength + 1;
- TotalSize = sizeof (*Entry) + KeySize + DataSize;
+ TotalSize = KeySize + DataSize + sizeof (*Entry);
Entry = AllocatePool (TotalSize);
|
removed INT64_C define
No longer used. | @@ -146,11 +146,6 @@ typedef unsigned long long libssh2_uint64_t;
typedef long long libssh2_int64_t;
#endif
-#ifndef INT64_C
-#define INT64_C(x) x ## I64
-#endif
-
-
#ifdef WIN32
typedef SOCKET libssh2_socket_t;
#define LIBSSH2_INVALID_SOCKET INVALID_SOCKET
|
Return SOTER_INVALID_PARAMETER when the caller tries to reuse a hash context after finalising
Although it did not crash for OpenSSL backend before, this is to bring it in
line with BoringSSL and to make error codes consistent. | @@ -64,6 +64,11 @@ soter_status_t soter_hash_update(soter_hash_ctx_t *hash_ctx, const void *data, s
return SOTER_INVALID_PARAMETER;
}
+ if (!EVP_MD_CTX_md(hash_ctx->evp_md_ctx))
+ {
+ return SOTER_INVALID_PARAMETER;
+ }
+
if (EVP_DigestUpdate(hash_ctx->evp_md_ctx, data, length))
{
return SOTER_SUCCESS;
@@ -83,6 +88,11 @@ soter_status_t soter_hash_final(soter_hash_ctx_t *hash_ctx, uint8_t* hash_value,
return SOTER_INVALID_PARAMETER;
}
+ if (!EVP_MD_CTX_md(hash_ctx->evp_md_ctx))
+ {
+ return SOTER_INVALID_PARAMETER;
+ }
+
md_length = (size_t)EVP_MD_CTX_size(hash_ctx->evp_md_ctx);
if (!hash_value || (md_length > *hash_length))
|
Resolve merge conflict in aws_dev_mode_key_provisioning.c | @@ -1155,56 +1155,7 @@ CK_RV xProvisionDevice( CK_SESSION_HANDLE xSession,
( CK_TRUE == xKeyPairGenerationMode ) ) &&
( CK_FALSE == xImportedPrivateKey ) )
{
-<<<<<<< HEAD
- configPRINTF( ( "Warning: no client certificate is available. Please see https://aws.amazon.com/freertos/getting-started/.\r\n" ) );
- prvWriteHexBytesToConsole( xSession,
- "Device public key",
- xProvisionedState.pucDerPublicKey,
- xProvisionedState.ulDerPublicKeyLength );
-
- /* Delay since the downstream demo code is likely to fail quickly if
- * provisioning isn't complete, and device certificate creation in the
- * lab may depend on the developer obtaining the public key. */
- vTaskDelay( pdMS_TO_TICKS( 100 ) );
- }
-
- /* Free memory. */
- if( NULL != xProvisionedState.pucDerPublicKey )
- {
- vPortFree( xProvisionedState.pucDerPublicKey );
- }
-
- return xResult;
-}
-
-/*-----------------------------------------------------------*/
-
-/* Perform common token initialization as per the PKCS #11 standard. For
- * compatibility reasons, this may include authentication with a static PIN. */
-CK_RV xInitializePkcs11Token()
-{
- CK_RV xResult;
-
- CK_FUNCTION_LIST_PTR pxFunctionList;
- CK_SLOT_ID * pxSlotId = NULL;
- CK_ULONG xSlotCount;
- CK_FLAGS xTokenFlags = 0;
- CK_TOKEN_INFO_PTR pxTokenInfo = NULL;
-
- xResult = C_GetFunctionList( &pxFunctionList );
-
- if( xResult == CKR_OK )
- {
- xResult = xInitializePKCS11();
- }
-
- if( ( xResult == CKR_OK ) || ( xResult == CKR_CRYPTOKI_ALREADY_INITIALIZED ) )
- {
- xResult = xGetSlotList( &pxSlotId, &xSlotCount );
- }
-=======
configPRINTF( ( "Warning: the client certificate should be updated. Please see https://aws.amazon.com/freertos/getting-started/.\r\n" ) );
->>>>>>> release
if( NULL != xProvisionedState.pcIdentifier )
{
|
network: make async dns query use TCP socket instead of UDP | @@ -508,7 +508,6 @@ static struct addrinfo *flb_net_translate_ares_addrinfo(struct ares_addrinfo *in
if (1 == failure_detected) {
if (NULL != output) {
flb_net_free_translated_addrinfo(output);
-
output = NULL;
}
}
@@ -562,6 +561,7 @@ static int flb_net_ares_sock_create_callback(ares_socket_t socket_fd,
int type,
void *userdata)
{
+ int ret;
struct flb_dns_lookup_context *context;
int event_mask;
@@ -577,7 +577,8 @@ static int flb_net_ares_sock_create_callback(ares_socket_t socket_fd,
event_mask = MK_EVENT_READ;
- /* c-ares doesn't use a macro for the socket type so :
+ /*
+ * c-ares doesn't use a macro for the socket type so :
* 1 means it's a TCP socket
* 2 means it's a UDP socket
*
@@ -589,16 +590,22 @@ static int flb_net_ares_sock_create_callback(ares_socket_t socket_fd,
event_mask |= MK_EVENT_WRITE;
}
- mk_event_add(context->event_loop, socket_fd, FLB_ENGINE_EV_CUSTOM,
+ ret = mk_event_add(context->event_loop, socket_fd, FLB_ENGINE_EV_CUSTOM,
event_mask, &context->response_event);
+ if (ret != 0) {
+ return -1;
+ }
return ARES_SUCCESS;
}
-struct flb_dns_lookup_context *flb_net_dns_lookup_context_create(struct mk_event_loop *event_loop, struct flb_coro *coroutine)
+struct flb_dns_lookup_context *flb_net_dns_lookup_context_create(struct mk_event_loop *evl,
+ struct flb_coro *coroutine)
{
- struct flb_dns_lookup_context *context;
int result;
+ int optmask = 0;
+ struct ares_options opts = {0};
+ struct flb_dns_lookup_context *context;
/* The initialization order here is important since it makes it easier to handle
* failures
@@ -609,7 +616,13 @@ struct flb_dns_lookup_context *flb_net_dns_lookup_context_create(struct mk_event
return NULL;
}
- result = ares_init((ares_channel *)&context->ares_channel);
+ /* c-ares options: make sure it uses TCP and limit number of tries to 2 */
+ optmask = ARES_OPT_FLAGS;
+ opts.flags = ARES_FLAG_USEVC;
+ opts.tries = 2;
+
+ result = ares_init_options((ares_channel *) &context->ares_channel,
+ &opts, optmask);
if (ARES_SUCCESS != result) {
flb_free(context);
@@ -617,23 +630,20 @@ struct flb_dns_lookup_context *flb_net_dns_lookup_context_create(struct mk_event
}
context->ares_socket_created = 0;
- context->event_loop = event_loop;
+ context->event_loop = evl;
context->coroutine = coroutine;
context->finished = 0;
ares_set_socket_callback(context->ares_channel,
flb_net_ares_sock_create_callback,
context);
-
return context;
}
void flb_net_dns_lookup_context_destroy(struct flb_dns_lookup_context *context)
{
mk_event_del(context->event_loop, &context->response_event);
-
ares_destroy(context->ares_channel);
-
flb_free(context);
}
|
UName: Remove `nodoc` from `info/status` | - infos/provides = storage/info
- infos/needs =
- infos/placements = getstorage setstorage
-- infos/status = maintained unittest shelltest nodep readonly limited nodoc concept
+- infos/status = maintained unittest shelltest nodep readonly limited concept
- infos/description = Includes uname information into the key database.
## Introduction
|
Update lv_cb.c
fix bug: when out of the range of the checkbox, and press, when move to the checkbox, the checkbox's state will change. | @@ -83,6 +83,7 @@ lv_obj_t * lv_cb_create(lv_obj_t * par, const lv_obj_t * copy)
lv_btn_set_layout(new_cb, LV_LAYOUT_ROW_M);
lv_btn_set_fit(new_cb, true, true);
lv_btn_set_toggle(new_cb, true);
+ lv_obj_set_protect(new_cb, LV_PROTECT_PRESS_LOST);
/*Set the default styles*/
lv_theme_t * th = lv_theme_get_current();
|
turn off failing unit tests on Mac | @@ -187,7 +187,7 @@ matrix:
compiler: gcc-4.8
script:
- make test
- - make utest
+ # make utest
before_install:
- brew update
- brew install fftw gcc48 homebrew/science/openblas
|
imageinput: respect canUpload
fixes urbit/landscape#893 | @@ -14,8 +14,20 @@ export type ImageInputProps = Parameters<typeof Box>[0] & {
placeholder?: string;
};
-const prompt = (field, focus, uploading, meta, clickUploadButton) => {
- if (!focus && !field.value && !uploading && meta.error === undefined) {
+const prompt = (
+ field,
+ focus,
+ uploading,
+ meta,
+ clickUploadButton,
+ canUpload
+) => {
+ if (
+ !focus &&
+ !field.value &&
+ !uploading &&
+ meta.error === undefined
+ ) {
return (
<Text
color='black'
@@ -26,17 +38,22 @@ const prompt = (field, focus, uploading, meta, clickUploadButton) => {
style={{ pointerEvents: 'none' }}
onSelect={e => e.preventDefault}
>
- Paste a link here, or{' '}
+ Paste a link here
+ {canUpload ? (
+ <>
+ , or{' '}
<Text
- fontWeight='500'
- cursor='pointer'
- color='blue'
+ fontWeight="500"
+ cursor="pointer"
+ color="blue"
style={{ pointerEvents: 'all' }}
onClick={clickUploadButton}
>
upload
</Text>{' '}
a file
+ </>
+ ) : null}
</Text>
);
}
@@ -157,7 +174,7 @@ export function ImageInput(props: ImageInputProps): ReactElement {
</Label>
) : null}
<Row mt={2} alignItems="flex-end" position='relative' width='100%'>
- {prompt(field, focus, uploading, meta, clickUploadButton)}
+ {prompt(field, focus, uploading, meta, clickUploadButton, canUpload)}
{clearButton(field, uploading, clearEvt)}
{uploadingStatus(uploading, meta)}
{errorRetry(meta, focus, uploading, clickUploadButton)}
|
Update uid/gid and remove cron job | @@ -70,16 +70,8 @@ make
%install
make DESTDIR=%{buildroot} install
-%{__mkdir_p} %{buildroot}%{_sysconfdir}/cron.daily
%{__mkdir_p} %{buildroot}%{_localstatedir}/log/%{pname}
cp %{SOURCE1} %{buildroot}%{_sysconfdir}
-%{__cat} << EOF > %{buildroot}%{_sysconfdir}/cron.daily/%{pname}
-#!/bin/sh
-
-# Daily cleanup script for %{pname}
-%{install_path}/sbin/ws_expirer -c > %{_localstatedir}/log/%{pname}/expirer-`date +%y.%m.%d`
-find %{_localstatedir}/log/%{pname} -type f -ctime +90 -exec rm {} \;
-EOF
%{__mkdir} -p %{buildroot}/%{OHPC_MODULES}/%{pname}
%{__cat} << EOF > %{buildroot}/%{OHPC_MODULES}/%{pname}/%{version}
@@ -118,7 +110,6 @@ EOF
%dir %{_sysconfdir}/cron.daily
%{OHPC_ADMIN}/%{pname}
%{OHPC_MODULES}/%{pname}
-%{_sysconfdir}/cron.daily/%{pname}
%attr(4755, root, root) %{install_path}/bin/ws_allocate
%attr(4755, root, root) %{install_path}/bin/ws_release
%attr(4755, root, root) %{install_path}/bin/ws_restore
@@ -127,8 +118,8 @@ EOF
%pre
# provide specific uid/gid to ensure that it is the same across the cluster
/usr/bin/getent group hpcws >/dev/null 2>&1 || \
- /usr/sbin/groupadd -r hpcws -g 85
+ /usr/sbin/groupadd -r hpcws -g 203
/usr/bin/getent passwd hpcws >/dev/null 2>&1 || \
/usr/sbin/useradd -c "HPC Workspace manager" \
- -d %{_sysconfdir} -g hpcws -s /sbin/nologin -r hpcws -u 85
+ -d %{_sysconfdir} -g hpcws -s /sbin/nologin -r hpcws -u 203
exit 0
|
RP2: Update NINA bsp. | @@ -34,21 +34,30 @@ int nina_bsp_init()
gpio_init(WIFI_GPIO0_PIN);
gpio_set_dir(WIFI_GPIO0_PIN, GPIO_OUT);
- gpio_put(WIFI_GPIO0_PIN, 1);
+ gpio_init(WIFI_SCLK_PIN);
+ gpio_set_function(WIFI_SCLK_PIN, GPIO_FUNC_SPI);
+
+ gpio_init(WIFI_MOSI_PIN);
+ gpio_set_function(WIFI_MOSI_PIN, GPIO_FUNC_SPI);
+
+ gpio_init(WIFI_MISO_PIN);
+ gpio_set_function(WIFI_MISO_PIN, GPIO_FUNC_SPI);
+
+ // Reset module in WiFi mode
gpio_put(WIFI_CS_PIN, 1);
+ gpio_put(WIFI_GPIO0_PIN, 1);
gpio_put(WIFI_RST_PIN, 0);
- mp_hal_delay_ms(10);
+ mp_hal_delay_ms(100);
+
gpio_put(WIFI_RST_PIN, 1);
mp_hal_delay_ms(750);
- gpio_put(WIFI_GPIO0_PIN, 0);
+ gpio_put(WIFI_GPIO0_PIN, 1);
+ // Initialize SPI.
spi_init(WIFI_SPI, 8 * 1000 * 1000);
-
- gpio_set_function(WIFI_SCLK_PIN, GPIO_FUNC_SPI);
- gpio_set_function(WIFI_MOSI_PIN, GPIO_FUNC_SPI);
-
+ spi_set_format(WIFI_SPI, 8, SPI_CPOL_0, SPI_CPHA_0, SPI_MSB_FIRST);
return 0;
}
@@ -60,7 +69,7 @@ int nina_bsp_reset()
int nina_bsp_spi_slave_select(uint32_t timeout)
{
// Wait for ACK to go low.
- for (mp_uint_t start = mp_hal_ticks_ms(); gpio_get(WIFI_ACK_PIN) != 0; mp_hal_delay_ms(1)) {
+ for (mp_uint_t start = mp_hal_ticks_ms(); gpio_get(WIFI_ACK_PIN) == 1; mp_hal_delay_ms(1)) {
if ((mp_hal_ticks_ms() - start) >= timeout) {
return -1;
}
@@ -88,15 +97,17 @@ int nina_bsp_spi_slave_deselect()
int nina_bsp_spi_transfer(const uint8_t *tx_buf, uint8_t *rx_buf, uint32_t size)
{
+ int rsize = 0;
+
gpio_put(WIFI_CS_PIN, 0);
if (tx_buf && rx_buf) {
- spi_write_read_blocking(WIFI_SPI, tx_buf, rx_buf, size);
+ rsize = spi_write_read_blocking(WIFI_SPI, tx_buf, rx_buf, size);
} else if (tx_buf) {
- spi_write_blocking(WIFI_SPI, tx_buf, size);
+ rsize = spi_write_blocking(WIFI_SPI, tx_buf, size);
} else if (rx_buf) {
- spi_read_blocking(WIFI_SPI, 0x00, rx_buf, size);
+ rsize = spi_read_blocking(WIFI_SPI, 0xFF, rx_buf, size);
}
gpio_put(WIFI_CS_PIN, 1);
- return 0;
+ return ((rsize == size) ? 0 : -1);
}
#endif //MICROPY_PY_NINAW10
|
Remove some obsolete basic tests | @@ -143,38 +143,6 @@ const char* test_basic_status_is_error() {
return NULL;
}
-const char* test_basic_status_strings() {
- CHECK_FOCUS(__func__);
- const char* s1 = wuffs_base__error__bad_wuffs_version;
- const char* t1 = "#base: bad wuffs version";
- if (strcmp(s1, t1)) {
- RETURN_FAIL("got \"%s\", want \"%s\"", s1, t1);
- }
- const char* s2 = wuffs_base__suspension__short_write;
- const char* t2 = "$base: short write";
- if (strcmp(s2, t2)) {
- RETURN_FAIL("got \"%s\", want \"%s\"", s2, t2);
- }
- const char* s3 = wuffs_gif__error__bad_header;
- const char* t3 = "#gif: bad header";
- if (strcmp(s3, t3)) {
- RETURN_FAIL("got \"%s\", want \"%s\"", s3, t3);
- }
- return NULL;
-}
-
-const char* test_basic_status_used_package() {
- CHECK_FOCUS(__func__);
- // The function call here is from "std/gif" but the argument is from
- // "std/lzw". The former package depends on the latter.
- const char* s0 = wuffs_lzw__error__bad_code;
- const char* t0 = "#lzw: bad code";
- if (strcmp(s0, t0)) {
- RETURN_FAIL("got \"%s\", want \"%s\"", s0, t0);
- }
- return NULL;
-}
-
const char* test_basic_sub_struct_initializer() {
CHECK_FOCUS(__func__);
wuffs_gif__decoder dec;
@@ -2269,8 +2237,6 @@ proc tests[] = {
test_basic_bad_wuffs_version, //
test_basic_initialize_not_called, //
test_basic_status_is_error, //
- test_basic_status_strings, //
- test_basic_status_used_package, //
test_basic_sub_struct_initializer, //
test_wuffs_gif_call_interleaved, //
|
Rename join_path to join | @@ -21,7 +21,7 @@ module Foreign.Lua.Module.Paths (
, has_extension
, is_absolute
, is_relative
- , join_path
+ , join
, normalise
, split_directories
, take_directory
@@ -83,7 +83,7 @@ functions =
, ("has_extension", has_extension)
, ("is_absolute", is_absolute)
, ("is_relative", is_relative)
- , ("join_path", join_path)
+ , ("join", join)
, ("normalise", normalise)
, ("split_directories", split_directories)
, ("take_directory", take_directory)
@@ -124,8 +124,8 @@ is_relative = toHsFnPrecursor Path.isRelative
#? "Checks whether a path is relative or fixed to a root."
-- | See @System.FilePath.joinPath@
-join_path :: HaskellFunction
-join_path = toHsFnPrecursor Path.joinPath
+join :: HaskellFunction
+join = toHsFnPrecursor Path.joinPath
<#> Parameter
{ parameterPeeker = peekList peekFilePath
, parameterDoc = ParameterDoc
|
garden: Use 'latest' version by default; chop any "lily-" prefix.
Many packages have a valid name for import if "lily-" is removed as a
prefix. It's also brand recognition.
Garden also now claims to always be drawning the latest version,
instead of version -1. | @@ -28,14 +28,17 @@ from docopt import docopt
fields = ["Author", "Description"]
-def lily_github(repo, operator="=", version="-1"):
+def lily_github(repo, operator="=", version="latest"):
'''Fetches a given repository from GitHub with a given version you
can control your versioning with the operator the version should
match the version in the Github release'''
print("Fetching Repository: " + repo + " - Version " + version)
cwd = os.getcwd()
- repo_dir = "packages/" + repo.split("/")[1]
+ repo_basename = repo.split("/")[1]
+ if repo_basename.startswith("lily-"):
+ repo_basename = repo_basename[5:]
+ repo_dir = "packages/{0}".format(repo_basename)
repo_name = "git://github.com/{0}".format(repo)
command = ["git", "clone", "--depth", "1", repo_name, repo_dir]
subprocess.call(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -109,7 +112,7 @@ def perform_install(args):
lily_parse_file(relfile)
else:
source = args.get("<source>")
- version = args.get("<version>") or "-1"
+ version = args.get("<version>") or "latest"
operator = args.get("<operator>") or "="
if provider == "github":
|
sway.5: make formatting more consistent | @@ -179,8 +179,8 @@ set|plus|minus <amount>
*layout* toggle [split|all]
Cycles the layout mode of the focused container though a preset list of
layouts. If no argument is given, then it cycles through stacking, tabbed
- and the last split layout. If "split" is given, then it cycles through
- splith and splitv. If "all" is given, then it cycles through every layout.
+ and the last split layout. If _split_ is given, then it cycles through
+ splith and splitv. If _all_ is given, then it cycles through every layout.
*layout* toggle [split|tabbed|stacking|splitv|splith] [split|tabbed|stacking|splitv|splith]...
Cycles the layout mode of the focused container through a list of layouts.
@@ -230,7 +230,7 @@ set|plus|minus <amount>
Moves the focused container to the specified mark.
*move* [--no-auto-back-and-forth] [container|window] [to] workspace [number] <name>
- Moves the focused container to the specified workspace. The string "number"
+ Moves the focused container to the specified workspace. The string _number_
is optional and is used to match a workspace with the same number, even if
it has a different name.
@@ -826,7 +826,7 @@ The default colors are:
*workspace_auto_back_and_forth* yes|no
When _yes_, repeating a workspace switch command will switch back to the
prior workspace. For example, if you are currently on workspace 1,
- switch to workspace 2, then invoke the "workspace 2" command again, you
+ switch to workspace 2, then invoke the *workspace 2* command again, you
will be returned to workspace 1. Default is _no_.
# CRITERIA
@@ -911,8 +911,8 @@ The following attributes may be matched with:
currently focused window.
*urgent*
- Compares the urgent state of the window. Can be "first", "last", "latest",
- "newest", "oldest" or "recent".
+ Compares the urgent state of the window. Can be _first_, _last_, _latest_,
+ _newest_, _oldest_ or _recent_.
*window_role*
Compare against the window role (WM_WINDOW_ROLE). Can be a regular
|
docs - remove persistent table information. | @@ -299,15 +299,6 @@ FROM gp_master_mirroring;</codeblock></p>
database:<codeblock>gpcheckcat -O</codeblock></entry>
<entry>Run repair scripts for any issues detected.</entry>
</row>
- <row>
- <entry>Run a persistent table catalog check.<p>Recommended
- frequency: monthly</p><p>Severity: CRITICAL</p></entry>
- <entry>During a downtime, with no users on the system, run the
- Greenplum
- <codeph>gpcheckcat</codeph> utility in each
- database:<codeblock>gpcheckcat -R persistent</codeblock></entry>
- <entry>Run repair scripts for any issues detected.</entry>
- </row>
<row>
<entry>Check for <codeph>pg_class</codeph> entries that have no
corresponding pg_<codeph>attribute</codeph> entry.<p>Recommended
|
add an example to the maya scripting docs
of how to assign custom unique names to output meshes | @@ -300,7 +300,51 @@ global proc postSync(string $assetNode, int $syncOnlyAttributes, int $syncOnlyOu
}
@endverbatim
<br>
+@verbatim
+// This proc shows how to give a unique custom name to all your output meshes
+global proc postSyncRename(string $assetNode, int $syncOnlyAttributes, int $syncOnlyOutputs)
+{
+ // Here's an example that looks for output meshes and renames the mesh and its parent transform
+ // The # in the name will force it to resolve to a unique node name
+
+ if($syncOnlyAttributes)
+ return;
+
+ string $baseName = "customName#";
+
+ string $outputObjCon[] = `listConnections -p on ($assetNode + ".outputObjects")`;
+ for($con in $outputObjCon) {
+ string $src = `connectionInfo -sfd $con`;
+ // if a mesh output has auxiliary nodes or downstream history
+ // need a temporary mesh shape to backstop the history
+ if(endsWith($src, "outputPartMeshData")) {
+ string $outNode = plugNode($con);
+
+ if(nodeType($outNode) == "mesh") {
+ // renaming the Xform will rename the shape as well;
+ string $outXforms[] = `listRelatives -parent -path $outNode`;
+ rename $outXforms[0] $baseName;
+ } else {
+ // if there were groups on the output mesh
+ // there will be groupParts nodes in between the hda and the output mesh
+ // list connections returns the name of a non-dag node
+ // and the shortest unique substring of the path of dag node
+ // since we didn't specify shape, we get the transform of the mesh
+ // which is what we wanted to rename anyway
+ while(nodeType($outNode) == "groupParts") {
+ string $cons[] = `listConnections ($outNode + ".outputGeometry")`;
+ $outNode = $cons[0];
+ }
+ if(nodeType($outNode) == "transform") {
+ rename $outNode $baseName;
+ }
+ }
+ }
+ }
+}
+@endverbatim
+<br>
@section Maya_Scripting_Python Python
import maya.cmds as cmds
|
[mod_authn_dbi] copy strings before escaping
dbi_conn_escape_string_copy() requires '\0'-terminated string.
While that is currently the case for strings in http_auth_info_t,
that will soon change, so consumers must use ai->username with ai->ulen,
and ai->realm with ai->rlen | @@ -416,6 +416,7 @@ mod_authn_dbi_password_cmp (const char *userpw, unsigned long userpwlen, http_au
static buffer *
mod_authn_dbi_query_build (buffer * const sqlquery, dbi_config * const dbconf, http_auth_info_t * const ai)
{
+ char buf[1024];
buffer_clear(sqlquery);
int qcount = 0;
for (char *b = dbconf->sqlquery->ptr, *d; *b; b = d+1) {
@@ -427,10 +428,22 @@ mod_authn_dbi_query_build (buffer * const sqlquery, dbi_config * const dbconf, h
const char *v;
switch (++qcount) {
case 1:
- v = ai->username;
+ if (ai->ulen < sizeof(buf)) {
+ memcpy(buf, ai->username, ai->ulen);
+ buf[ai->ulen] = '\0';
+ v = buf;
+ }
+ else
+ return NULL;
break;
case 2:
- v = ai->realm;
+ if (ai->rlen < sizeof(buf)) {
+ memcpy(buf, ai->realm, ai->rlen);
+ buf[ai->rlen] = '\0';
+ v = buf;
+ }
+ else
+ return NULL;
break;
case 3:
if (ai->dalgo & HTTP_AUTH_DIGEST_SHA256)
|
Add while loop netcoon client to test manual TCP receive | @@ -240,7 +240,6 @@ main_thread(void* arg) {
esp_ping("majerle.eu", &ping_time, NULL, NULL, 1);
printf("Ping time: %d\r\n", (int)ping_time);
-
/*
* Check if device has set IP address
*
@@ -262,8 +261,8 @@ main_thread(void* arg) {
// printf("Device IP: %d.%d.%d.%d; is DHCP: %d\r\n", (int)ip.ip[0], (int)ip.ip[1], (int)ip.ip[2], (int)ip.ip[3], (int)is_dhcp);
//}
- esp_sta_setip(&dev_ip, NULL, NULL, NULL, NULL, 1);
- esp_dhcp_configure(1, 0, 1, NULL, NULL, 1);
+ //esp_sta_setip(&dev_ip, NULL, NULL, NULL, NULL, 1);
+ //esp_dhcp_configure(1, 0, 1, NULL, NULL, 1);
/* Start server on port 80 */
//http_server_start();
@@ -283,6 +282,37 @@ main_thread(void* arg) {
/* Notify user */
esp_sys_thread_create(NULL, "input", (esp_sys_thread_fn)input_thread, NULL, 0, ESP_SYS_THREAD_PRIO);
+ {
+ espr_t res;
+ esp_pbuf_p pbuf;
+ esp_netconn_p client;
+
+ client = esp_netconn_new(ESP_NETCONN_TYPE_TCP);
+ if (client != NULL) {
+ while (1) {
+ res = esp_netconn_connect(client, "10.57.218.181", 123);
+ if (res == espOK) { /* Are we successfully connected? */
+ printf("Connected to host\r\n");
+ do {
+ res = esp_netconn_receive(client, &pbuf);
+ if (res == espCLOSED) { /* Was the connection closed? This can be checked by return status of receive function */
+ printf("Connection closed by remote side...\r\n");
+ break;
+ }
+ if (res == espOK && pbuf != NULL) {
+ printf("Received new data packet of %d bytes\r\n", (int)esp_pbuf_length(pbuf, 1));
+ esp_pbuf_free(pbuf);
+ pbuf = NULL;
+ }
+ } while (1);
+ } else {
+ printf("Cannot connect to remote host!\r\n");
+ }
+ }
+ }
+ esp_netconn_delete(client); /* Delete netconn structure */
+ }
+
/* Terminate thread */
esp_sys_thread_terminate(NULL);
}
|
sdl/surface: implement Set() for ARGB4444 | @@ -76,10 +76,12 @@ static inline SDL_Surface* SDL_CreateRGBSurfaceWithFormatFrom(void* pixels, int
#endif
*/
import "C"
-import "unsafe"
-import "reflect"
-import "image"
-import "image/color"
+import (
+ "image"
+ "image/color"
+ "reflect"
+ "unsafe"
+)
// Surface flags (internal use)
const (
@@ -543,6 +545,8 @@ func (surface *Surface) ColorModel() color.Model {
return BGR555Model
case PIXELFORMAT_BGR565:
return BGR565Model
+ case PIXELFORMAT_ARGB4444:
+ return ARGB4444Model
default:
panic("Not implemented yet")
}
@@ -643,6 +647,14 @@ func (surface *Surface) Set(x, y int, c color.Color) {
g := uint32(col.G) >> 3 & 0xFF
b := uint32(col.B) >> 3 & 0xFF
*buf = b<<10 | g<<5 | r
+ case PIXELFORMAT_ARGB4444:
+ col := surface.ColorModel().Convert(c).(color.RGBA)
+ buf := (*uint32)(unsafe.Pointer(&pix[i]))
+ a := uint32(col.A) >> 4 & 0x0F
+ r := uint32(col.R) >> 4 & 0x0F
+ g := uint32(col.G) >> 4 & 0x0F
+ b := uint32(col.B) >> 4 & 0x0F
+ *buf = a<<12 | r<<8 | g<<4 | b
default:
panic("Unknown pixel format!")
}
|
[SDL2] fix mouse locate 2 | @@ -131,7 +131,7 @@ void taskmng_rol(void) {
case SDL_BUTTON_LEFT:
if (menuvram != NULL)
{
- menubase_moving(e.button.x, e.button.y, 1);
+ menubase_moving(lmx, lmy, 1);
} else {
mousemng_buttonevent(&e.button);
}
|
provide more hints about how open source can work | <!--
-NOTE WELL
-A new issue should be about a bug or a feature!
+NOTE WELL:
+A new issue should be about a bug verified with a minimized example or about a new feature request!
-A question should go to the mailinglist at:
+Randomly opened "bug" or "feature" reports to debug your setup will be closed as "invalid".
+
+Questions should go to the mailinglist at:
[email protected]
The corresponding forum/archive is at:
https://groups.google.com/forum/#!forum/mod_auth_openidc
-->
-### Expected behaviour
+###### Environment
+
+- mod_auth_openidc version (e.g. 2.1.5)
+- Apache version (e.g. 2.4.8)
+- platform/distro (e.g. Ubuntu Xenial or Centos 7)
+
+###### Expected behaviour
+
+###### Actual behaviour
-### Actual behaviour
+###### Minimized example
+*Minimal, complete configuration that reproduces the behavior. Use the mailing list or get commercial support to discuss your own (full) setup.*
-### Minimized example that reproduces the behaviour
+###### Configuration and Log files
+*For the minimized example, possibly provided as attachments.*
\ No newline at end of file
|
Error out if getcwd fails. | #include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
+#include <errno.h>
#include "util.h"
#include "parse.h"
@@ -471,7 +472,8 @@ gengas(Node *file, FILE *fd)
}
popstab();
- getcwd(dir, sizeof dir);
+ if (!getcwd(dir, sizeof dir))
+ die("could not get cwd: %s\n", strerror(errno));
for (i = 0; i < file->file.nfiles; i++) {
path = file->file.files[i];
fprintf(fd, ".file %zd \"%s/%s\"\n", i + 1, dir, path);
|
Send protected ACK early from server once 1-RTT key is available | @@ -1966,7 +1966,15 @@ ssize_t ngtcp2_conn_write_pkt(ngtcp2_conn *conn, uint8_t *dest, size_t destlen,
conn->state = NGTCP2_CS_SERVER_WAIT_HANDSHAKE;
return nwrite;
case NGTCP2_CS_SERVER_WAIT_HANDSHAKE:
- return conn_write_server_handshake(conn, dest, destlen, 0, ts);
+ nwrite = conn_write_server_handshake(conn, dest, destlen, 0, ts);
+ if (nwrite != 0) {
+ return nwrite;
+ }
+ assert(conn->tx_ckm);
+ // We have 1-RTT key in this state, and sent all handshake data.
+ // Usually, we don't have any data to send here. So just send
+ // acks.
+ return conn_write_protected_ack_pkt(conn, dest, destlen, ts);
case NGTCP2_CS_SERVER_TLS_HANDSHAKE_FAILED:
return conn_write_server_handshake(conn, dest, destlen,
conn->strm0->tx_offset == 0, ts);
@@ -1995,10 +2003,12 @@ ssize_t ngtcp2_conn_write_ack_pkt(ngtcp2_conn *conn, uint8_t *dest,
case NGTCP2_CS_CLIENT_WAIT_HANDSHAKE:
case NGTCP2_CS_CLIENT_HANDSHAKE_ALMOST_FINISHED:
case NGTCP2_CS_SERVER_INITIAL:
- case NGTCP2_CS_SERVER_WAIT_HANDSHAKE:
nwrite = conn_write_handshake_ack_pkt(conn, dest, destlen,
NGTCP2_PKT_HANDSHAKE, ts);
break;
+ case NGTCP2_CS_SERVER_WAIT_HANDSHAKE:
+ assert(conn->tx_ckm);
+ // We have 1-RTT key in this state.
case NGTCP2_CS_POST_HANDSHAKE:
nwrite = conn_write_protected_ack_pkt(conn, dest, destlen, ts);
break;
|
record the network start time. | @@ -33,6 +33,9 @@ else:
network_start_time = time.time()
print "Network starts at {0}\n".format(time.ctime(network_start_time))
+ with open('network_start_time.txt','a') as f:
+ f.write(str(network_start_time))
+
# open socket
socket_handler = socket.socket(socket.AF_INET6,socket.SOCK_DGRAM)
socket_handler.bind(('',61617))
@@ -130,7 +133,7 @@ for mote,data in mote_counter_asn_cellusage.items():
# pdr
fig, ax = plt.subplots()
ax.bar(num_node_list,node_e2e_reliability)
-ax.set_xlabel('nodes')
+# ax.set_xlabel('nodes')
ax.set_xticks(num_node_list)
ax.set_xticklabels(node_list_label, rotation=90)
ax.set_ylabel('end-to-end reliability')
@@ -140,7 +143,7 @@ plt.clf()
# latency
fig, ax = plt.subplots()
ax.bar(num_node_list,node_e2e_avg_latency)
-ax.set_xlabel('nodes')
+# ax.set_xlabel('nodes')
ax.set_xticks(num_node_list)
ax.set_xticklabels(node_list_label, rotation=90)
ax.set_ylabel('end-to-end latency')
@@ -155,7 +158,7 @@ plt.text(len(num_node_list)/2, 0.77, 'LIM_NUMCELLSUSED_HIGH', color='red')
ax.plot(num_node_list,node_avg_cell_usage_LOW,'r-')
plt.text(len(num_node_list)/2, 0.20, 'LIM_NUMCELLSUSED_LOW', color='red')
ax.plot(num_node_list,node_avg_cell_usage,'b-^')
-ax.set_xlabel('nodes')
+# ax.set_xlabel('nodes')
ax.set_xticks(num_node_list)
ax.set_xticklabels(node_list_label, rotation=90)
ax.set_ylabel('node cell usage')
|
input: lower the first chunk of the file to be tested from 8kB to 1kB | @@ -447,7 +447,7 @@ static bool input_shouldReadNewFile(run_t* run) {
if (!run->staticFileTryMore) {
run->staticFileTryMore = true;
/* Start with a 8kB beginning of a file, increase the size in following iterations */
- input_setSize(run, HF_MIN(8192U, run->global->mutate.maxInputSz));
+ input_setSize(run, HF_MIN(1024U, run->global->mutate.maxInputSz));
return true;
}
|
esp32/machine_pwm: On deinit stop routing PWM signal to the pin.
Fixes issue | @@ -234,6 +234,7 @@ STATIC mp_obj_t esp32_pwm_deinit(mp_obj_t self_in) {
ledc_stop(PWMODE, chan, 0);
self->active = 0;
self->channel = -1;
+ gpio_matrix_out(self->pin, SIG_GPIO_OUT_IDX, false, false);
}
return mp_const_none;
}
|
fix(bar): stop animation when set with LV_ANIM_OFF | @@ -584,6 +584,11 @@ static void lv_bar_set_value_with_anim(lv_obj_t * obj, int32_t new_value, int32_
if(en == LV_ANIM_OFF) {
*value_ptr = new_value;
lv_obj_invalidate((lv_obj_t *)obj);
+
+ /*Stop the previous animation if it exists*/
+ lv_anim_del(anim_info, NULL);
+ /*Reset animation state*/
+ lv_bar_init_anim(obj, anim_info);
}
else {
/*No animation in progress -> simply set the values*/
|
Remove changes entry for RIPEMD160 in 3.2
It is already in 3.0.7. | @@ -24,10 +24,6 @@ OpenSSL 3.2
### Changes between 3.0 and 3.2 [xx XXX xxxx]
- * Added RIPEMD160 to the default provider.
-
- *Paul Dale*
-
* Add support for certificate compression (RFC8879), including
library support for Brotli and Zstandard compression.
|
Temporarily put GOP hack to DataHubTest | @@ -89,13 +89,42 @@ TestDataHub (
STATIC UINT32 FirmwareFeatures = 0xE00FE137;
STATIC UINT32 FirmwareFeaturesMask = 0xFF1FFF3F;
STATIC UINT32 CsrActiveConfig = 0;
+ STATIC CHAR8 SecurityMode[] = "full";
gRT->SetVariable (L"MLB", &gAppleVendorVariableGuid, Attributes, AsciiStrLen (Mlb), Mlb);
gRT->SetVariable (L"ROM", &gAppleVendorVariableGuid, Attributes, sizeof (Rom), Rom);
gRT->SetVariable (L"FirmwareFeatures", &gAppleVendorVariableGuid, Attributes, sizeof (FirmwareFeatures), &FirmwareFeatures);
gRT->SetVariable (L"FirmwareFeaturesMask", &gAppleVendorVariableGuid, Attributes, sizeof (FirmwareFeaturesMask), &FirmwareFeaturesMask);
gRT->SetVariable (L"csr-active-config", &gAppleBootVariableGuid, Attributes, sizeof (CsrActiveConfig), &CsrActiveConfig);
+ gRT->SetVariable (L"security-mode", &gAppleBootVariableGuid, Attributes, sizeof (SecurityMode), SecurityMode);
+ }
+ //TODO: this is done by AMF or ConSplitter normally, here it temporarily exists for legacy.
+ {
+ EFI_STATUS Status;
+ VOID *Gop;
+
+ Gop = NULL;
+ Status = gBS->HandleProtocol (gST->ConsoleOutHandle, &gEfiGraphicsOutputProtocolGuid, &Gop);
+
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_WARN, "Missing GOP on ConsoleOutHandle - %r\n", Status));
+ Status = gBS->LocateProtocol (&gEfiGraphicsOutputProtocolGuid, NULL, &Gop);
+
+ if (!EFI_ERROR (Status)) {
+ Status = gBS->InstallMultipleProtocolInterfaces (
+ &gST->ConsoleOutHandle,
+ &gEfiGraphicsOutputProtocolGuid,
+ Gop,
+ NULL
+ );
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_WARN, "Failed to install GOP on ConsoleOutHandle - %r\n", Status));
+ }
+ } else {
+ DEBUG ((DEBUG_WARN, "Missing GOP entirely - %r\n", Status));
+ }
+ }
}
return EFI_SUCCESS;
}
|
autotools: use PKG_CHECK_MODULES
Given that a `pkg-config` file is distributed by Criterion, it is better
to make use of it. | @@ -2,9 +2,9 @@ AC_INIT([Criterion Autotools Tests], [1.0], [[email protected]])
AC_CONFIG_AUX_DIR([build-aux])
AM_INIT_AUTOMAKE([foreign -Wall -Werror])
-AC_CHECK_LIB([criterion], [criterion_initialize], [], [
+PKG_CHECK_MODULES([CRITERION], [criterion], [], [
AC_MSG_ERROR([unable to find Criterion])
-], [])
+])
AC_PROG_AWK
AC_PROG_CC
|
Fixed project debug settings so you can connect to the board | <NotGenerated>0</NotGenerated>
<InvalidFlash>1</InvalidFlash>
</TargetStatus>
- <OutputDirectory>out\Objects\</OutputDirectory>
+ <OutputDirectory>.\out\Objects\</OutputDirectory>
<OutputName>aws_demos</OutputName>
<CreateExecutable>1</CreateExecutable>
<CreateLib>0</CreateLib>
<UseTargetDll>1</UseTargetDll>
<UseExternalTool>0</UseExternalTool>
<RunIndependent>0</RunIndependent>
- <UpdateFlashBeforeDebugging>1</UpdateFlashBeforeDebugging>
+ <UpdateFlashBeforeDebugging>0</UpdateFlashBeforeDebugging>
<Capability>1</Capability>
- <DriverSelection>4096</DriverSelection>
+ <DriverSelection>4099</DriverSelection>
</Flash1>
- <bUseTDR>1</bUseTDR>
- <Flash2>BIN\UL2CM3.DLL</Flash2>
+ <bUseTDR>0</bUseTDR>
+ <Flash2>BIN\CMSIS_AGDI.dll</Flash2>
<Flash3>"" ()</Flash3>
<Flash4>.\flash_e3.ini</Flash4>
<pFcarmOut></pFcarmOut>
- <pFcarmGrp></pFcarmGrp>
+ <pFcarmGrp>mediatek/sdk/driver/chip/mt7687</pFcarmGrp>
<pFcArmRoot></pFcArmRoot>
<FcArmLst>0</FcArmLst>
</Utilities>
|
schema compile BUGFIX unhandled error when compiling type in leaf
Fixes | @@ -3670,6 +3670,8 @@ lys_compile_node_leaf(struct lysc_ctx *ctx, struct lysp_node *node_p, struct lys
/* the dflt member is just filled to avoid getting the default value from the type */
leaf->dflt = (void*)leaf_p->dflt;
ret = lys_compile_node_type(ctx, node_p, &leaf_p->type, leaf);
+ leaf->dflt = NULL;
+ LY_CHECK_RET(ret);
if (leaf_p->dflt) {
struct ly_err_item *err = NULL;
@@ -3688,12 +3690,12 @@ lys_compile_node_leaf(struct lysc_ctx *ctx, struct lysp_node *node_p, struct lys
}
if (ret == LY_EINCOMPLETE) {
/* postpone default compilation when the tree is complete */
- LY_CHECK_GOTO(lysc_incomplete_dflts_add(ctx, node, leaf->dflt, leaf->dflt_mod), done);
+ LY_CHECK_RET(lysc_incomplete_dflts_add(ctx, node, leaf->dflt, leaf->dflt_mod));
/* but in general result is so far ok */
ret = LY_SUCCESS;
}
- LY_CHECK_GOTO(ret, done);
+ LY_CHECK_RET(ret);
leaf->flags |= LYS_SET_DFLT;
}
|
apollo2 timer - Simplify stop of first timer.
No need to remove and re-add the timer. Instead, just re-set the OCMP. | @@ -779,8 +779,7 @@ hal_timer_stop(struct hal_timer *timer)
if (reset_ocmp) {
timer = TAILQ_FIRST(&bsp_timer->hal_timer_q);
if (timer != NULL) {
- TAILQ_REMOVE(&bsp_timer->hal_timer_q, timer, link);
- hal_timer_start_at(timer, timer->expiry);
+ apollo2_timer_set_ocmp_at(bsp_timer, timer->expiry);
} else {
apollo2_timer_clear_ocmp(bsp_timer);
}
|
[core] save ptr to avoid static analyzer realloc warn | @@ -18,7 +18,8 @@ void *vector_resize(void *data, size_t elem_size, size_t *size, size_t used, siz
ck_assert(*size <= SIZE_MAX / elem_size);
const size_t total_size = elem_size * *size;
const size_t used_size = elem_size * used;
- data = realloc(data, total_size);
+ void *odata = data; /*(save ptr to avoid static analyzer realloc warn)*/
+ data = realloc(odata, total_size);
ck_assert(NULL != data);
/* clear new memory */
|
[CI] Reenable OS build | @@ -48,10 +48,10 @@ jobs:
python3 scripts/build_os_toolchain.py
echo "Toolchain built successfully"
- #- name: Build OS image
- # shell: bash
- # run: |
- # git submodule update --init --recursive
- # # TODO(PT): For nasm - Install deps can be removed once toolchain cache is rebuilt
- # python3 scripts/install_dependencies.py
- # python3 scripts/build_kernel.py
+ - name: Build OS image
+ shell: bash
+ run: |
+ git submodule update --init --recursive
+ # TODO(PT): For nasm - Install deps can be removed once toolchain cache is rebuilt
+ python3 scripts/install_dependencies.py
+ python3 scripts/build_kernel.py
|
Fix app limit cc test | @@ -9200,7 +9200,7 @@ int app_limit_cc_test_one(
uint64_t picoseq_per_byte_1 = (1000000ull * 8) / 1;
picoquic_test_tls_api_ctx_t* test_ctx = NULL;
picoquic_tp_t client_parameters;
- uint64_t cwin_limit = 100000;
+ uint64_t cwin_limit = 120000;
picoquic_connection_id_t initial_cid = { {0xac, 0xc1, 2, 3, 4, 5, 6, 7}, 8 };
int ret = 0;
@@ -9316,7 +9316,7 @@ int app_limit_cc_test()
23500000,
21000000,
21000000,
- 23500000 };
+ 25000000 };
int ret = 0;
for (size_t i = 0; i < sizeof(ccalgos) / sizeof(picoquic_congestion_algorithm_t*); i++) {
|
config.h: remove unused CONFIG_USB_PORT_POWER_IN_S3
CONFIG_USB_PORT_POWER_IN_S3 is nowhere to be found in the EC source.
BRANCH=none
TEST=verified there are no references to CONFIG_USB_PORT_POWER_IN_S3 | /* Support simple control of power to the device's USB ports */
#undef CONFIG_USB_PORT_POWER_DUMB
-/*
- * Support supplying USB power in S3, if the host leaves the port enabled when
- * entering S3.
- */
-#undef CONFIG_USB_PORT_POWER_IN_S3
-
/*
* Support smart power control to the device's USB ports, using
* dedicated power control chips. This potentially enables automatic
|
libbarrelfish: pass the fault type int he first argument | @@ -1532,8 +1532,8 @@ void thread_deliver_exception_disabled(dispatcher_handle_t handle,
registers_set_initial(&thread->regs, thread,
(lvaddr_t)exception_handler_wrapper,
- stack_top, (lvaddr_t)cpuframe, 0,
- hack_arg, (lvaddr_t)addr);
+ stack_top, (lvaddr_t)cpuframe,
+ hack_arg, (lvaddr_t)addr, 0);
disp_resume(handle, &thread->regs);
}
|
[gpdemo] delete clusterConfigPostgresAddonsFile when clean demo cluster
`clusterConfigPostgresAddonsFile` should be removed when destructing
the demo cluster. | @@ -139,6 +139,10 @@ cleanDemo(){
echo "Deleting clusterConfigFile"
rm -f clusterConfigFile
fi
+ if [ -f clusterConfigPostgresAddonsFile ]; then
+ echo "Deleting clusterConfigPostgresAddonsFile"
+ rm -f clusterConfigPostgresAddonsFile
+ fi
if [ -d ${DATADIRS} ]; then
echo "Deleting ${DATADIRS}"
rm -rf ${DATADIRS}
|
Exit with error code if jpm install fails. | @@ -761,7 +761,7 @@ int main(int argc, const char **argv) {
(unless no-deps (do-rule "install-deps"))
(do-rule "build")
(do-rule "install"))
- ([err] (print "Error building git repository dependency: " err)))
+ ([err f] (print "Error building git repository dependency: " err) (propagate err f)))
(os/cd olddir))
(defn install-rule
|
docker: fixup spelling in release notes | @@ -175,8 +175,8 @@ These notes are of interest for people developing Elektra:
- `clang-5.0` is now used for clang tests by the build system *(Lukas Winkler)*
- An additional build job on Ubuntu:xenial has been added *(Lukas Winkler)*
- Several improvments to the build system have been implemented *(Lukas Winkler)*:
- - Better Docker image handling
- - abort of previously queued but unfinished runs on new commits
+ - Better Docker image handling.
+ - Abort of previously queued but unfinished runs on new commits.
- Document how to locally replicate the Docker environment used for tests.
- <<TODO>>
- <<TODO>>
|
add new variable for RPATH without new-dtags | @@ -43,7 +43,8 @@ AOMP_CHECK_GIT_BRANCH=${AOMP_CHECK_GIT_BRANCH:-1}
AOMP_APPLY_ROCM_PATCHES=${AOMP_APPLY_ROCM_PATCHES:-1}
#Set common rpath for build scripts
-AOMP_ORIGIN_RPATH="-DCMAKE_SHARED_LINKER_FLAGS='-Wl,--disable-new-dtags' -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_RPATH=\$ORIGIN:\$ORIGIN/../lib:\$ORIGIN/../hsa/lib:\$ORIGIN/../../lib64:\$ORIGIN/../../hsa/lib:$AOMP_INSTALL_DIR/lib:$AOMP_INSTALL_DIR/hsa/lib"
+AOMP_ORIGIN_RPATH_NO_DTAGS="-DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_RPATH=\$ORIGIN:\$ORIGIN/../lib:\$ORIGIN/../hsa/lib:\$ORIGIN/../../lib64:\$ORIGIN/../../hsa/lib:$AOMP_INSTALL_DIR/lib:$AOMP_INSTALL_DIR/hsa/lib"
+AOMP_ORIGIN_RPATH="-DCMAKE_SHARED_LINKER_FLAGS='-Wl,--disable-new-dtags' $AOMP_ORIGIN_RPATH_NO_DTAGS"
CUDA=${CUDA:-/usr/local/cuda}
CUDAT=${CUDAT:-$CUDA/targets}
|
Fix add_tag after refactoring
Tested-by: Build Bot | @@ -78,7 +78,7 @@ void lcbtrace_span_add_tag_str_nocopy(lcbtrace_SPAN *span, const char *name, con
if (!span || name == NULL || value == NULL) {
return;
}
- span->add_tag(name, 0, value);
+ span->add_tag(name, 0, value, 0);
}
LIBCOUCHBASE_API
@@ -87,7 +87,7 @@ void lcbtrace_span_add_tag_str(lcbtrace_SPAN *span, const char *name, const char
if (!span || name == NULL || value == NULL) {
return;
}
- span->add_tag(name, 1, value);
+ span->add_tag(name, 1, value, 1);
}
LIBCOUCHBASE_API
|
Scale outgoing ACK Delay field | @@ -422,7 +422,6 @@ static int conn_ensure_ack_blks(ngtcp2_conn *conn, ngtcp2_frame **pfr,
static int conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr,
ngtcp2_tstamp ts) {
uint64_t first_pkt_num;
- ngtcp2_tstamp ack_delay;
uint64_t last_pkt_num;
ngtcp2_ack_blk *blk;
int initial = 1;
@@ -457,9 +456,11 @@ static int conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr,
ack = &fr->ack;
first_pkt_num = last_pkt_num = (*prpkt)->pkt_num;
- ack_delay = ts - (*prpkt)->tstamp;
ack->type = NGTCP2_FRAME_ACK;
+ ack->largest_ack = first_pkt_num;
+ ack->ack_delay =
+ (ts - (*prpkt)->tstamp) >> conn->local_settings.ack_delay_exponent;
ack->num_blks = 0;
prpkt = &(*prpkt)->next;
@@ -472,8 +473,6 @@ static int conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr,
if (initial) {
initial = 0;
- ack->largest_ack = first_pkt_num;
- ack->ack_delay = ack_delay;
ack->first_ack_blklen = first_pkt_num - last_pkt_num;
} else {
blk_idx = ack->num_blks++;
@@ -497,8 +496,6 @@ static int conn_create_ack_frame(ngtcp2_conn *conn, ngtcp2_frame **pfr,
}
if (initial) {
- ack->largest_ack = first_pkt_num;
- ack->ack_delay = ack_delay;
ack->first_ack_blklen = first_pkt_num - last_pkt_num;
} else if (first_pkt_num != last_pkt_num) {
blk_idx = ack->num_blks++;
|
Help kiln transition by inlining new types.
These should be removed after an update has happened. | {$dirk wire @tas} ::
{$ogre wire $@(@tas beam)} ::
{$merg wire @p @tas @p @tas case germ} ::
- {$perm wire ship desk path rite:clay} ::
+ {$perm wire ship desk path rite} ::
{$poke wire dock pear} ::
{$wipe wire @p $~} ::
{$wait wire @da} ::
q/path
r/cage
==
+ ++ rite ::tmp
+ $% {$r red/(unit rule)}
+ {$w wit/(unit rule)}
+ {$rw red/(unit rule) wit/(unit rule)}
+ ==
+ ++ rule {mod/?($black $white) who/(set whom)} ::tmp
+ ++ whom (each ship @ta) ::tmp
--
|_ moz/(list move)
++ abet :: resolve
|
console: Ignore var cloberred warning for argtable | #include "argtable3.h"
+#pragma GCC diagnostic ignored "-Wclobbered"
+
/*******************************************************************************
* This file is part of the argtable3 library.
*
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.