message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
sse2: work around GCC bug
Fixes | @@ -4546,7 +4546,10 @@ simde_mm_srl_epi64 (simde__m128i a, simde__m128i count) {
return simde_mm_setzero_si128();
const int s = (int) (count_.u64[0]);
+ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94488 */
+ #if !defined(HEDLEY_GCC_VERSION) || !defined(SIMDE_ARCH_AARCH64)
SIMDE__VECTORIZE
+ #endif
for (size_t i = 0 ; i < (sizeof(r_.u64) / sizeof(r_.u64[0])) ; i++) {
r_.u64[i] = a_.u64[i] >> s;
}
|
Performance remove emote_timer != 0 when checking script_ptr
I don't know how an emote could run and not be a part of script_ptr. | @@ -720,7 +720,7 @@ void SceneUpdateTimer_b()
}
// Don't update timer while script is running
- if (script_ptr != 0 || emote_timer != 0 || fade_running)
+ if (script_ptr != 0 || fade_running)
{
return;
}
@@ -845,7 +845,8 @@ static void SceneHandleInput()
}
// Can't move while script is running
- if (script_ptr != 0 || emote_timer != 0 || fade_running)
+ // (removed "emote_timer != 0" as it should only happen if script was set)
+ if (script_ptr != 0 || fade_running)
{
actors[0].moving = FALSE;
return;
|
fix overlay on wrong monitor | @@ -399,7 +399,12 @@ showoverlay() {
}
Client *c = selmon->overlay;
+
+ detach(c);
+ detachstack(c);
c->mon = selmon;
+ attach(c);
+ attachstack(c);
if (c->islocked) {
switch (selmon->overlaymode) {
@@ -2307,9 +2312,7 @@ movemouse(const Arg *arg)
occ = 0;
tagx = 0;
colorclient = 0;
- if (!(c = selmon->sel))
- return;
- if (c->isfullscreen && !c->isfakefullscreen) /* no support moving fullscreen windows by mouse */
+ if (!(c = selmon->sel) || (c->isfullscreen && !c->isfakefullscreen) || c == selmon->overlay)
return;
if (c == selmon->fullscreen) {
|
test-suite: reduce boost MPI tests by one | @@ -49,14 +49,14 @@ PKG="Boost/MPI"
assert_success
}
-@test "[$PKG] broadcast_test under resource manager ($rm/$LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
- test=broadcast_test
- if [ ! -x $test ];then
- flunk "$test does not exist"
- fi
- run_mpi_binary ./$test atest 2 16
- assert_success
-}
+## @test "[$PKG] broadcast_test under resource manager ($rm/$LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
+## test=broadcast_test
+## if [ ! -x $test ];then
+## flunk "$test does not exist"
+## fi
+## run_mpi_binary ./$test atest 2 16
+## assert_success
+## }
@test "[$PKG] ring_test under resource manager ($rm/$LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
test=ring_test
|
board/spherion/battery.c: Format with clang-format
BRANCH=none
TEST=none | @@ -88,10 +88,11 @@ int charger_profile_override(struct charge_state_data *curr)
temp_sensor_read(TEMP_SENSOR_CHARGER, &charger_temp);
charger_temp_c = K_TO_C(charger_temp);
if (charger_temp_c > 52)
- curr->requested_current = MIN(curr->requested_current,
- 2200);
+ curr->requested_current =
+ MIN(curr->requested_current, 2200);
else if (charger_temp_c > 48)
- curr->requested_current = MIN(curr->requested_current,
+ curr->requested_current =
+ MIN(curr->requested_current,
CONFIG_CHARGER_MAX_INPUT_CURRENT);
}
|
Fix a comment typo from the previous commit | @@ -295,7 +295,7 @@ outer_loop:
{
uint32_t n = (table_entry >> 4) & 0x0F;
#if !defined(WUFFS_DEFLATE__HAVE_64_BIT_UNALIGNED_LITTLE_ENDIAN_LOADS)
- // Ensure that we have at least 15 bits of input.
+ // Ensure that we have at least n bits of input.
if (n_bits < n) {
bits |= ((uint32_t)(*psrc++)) << n_bits;
n_bits += 8;
|
prevent overflow in hufUncompress if nBits is large | @@ -1093,7 +1093,9 @@ hufUncompress (const char compressed[],
const char *ptr = compressed + 20;
- if ( ptr + (nBits+7 )/8 > compressed+nCompressed)
+ uint64_t nBytes = (static_cast<uint64_t>(nBits)+7) / 8 ;
+
+ if ( ptr + nBytes > compressed+nCompressed)
{
notEnoughData();
return;
|
media: Check audioType first at Decoder::create
If audioType is AUDIO_TYPE_UNKNOWN, just return nullptr. | @@ -25,7 +25,10 @@ namespace media {
Decoder::Decoder(audio_type_t audioType, unsigned short channels, unsigned int sampleRate)
#ifdef CONFIG_AUDIO_CODEC
- : mAudioType(audioType), mChannels(channels), mSampleRate(sampleRate)
+ :
+ mAudioType(audioType),
+ mChannels(channels),
+ mSampleRate(sampleRate)
#endif
{
#ifdef CONFIG_AUDIO_CODEC
@@ -47,15 +50,19 @@ std::shared_ptr<Decoder> Decoder::create(audio_type_t audioType, unsigned short
#ifdef CONFIG_AUDIO_CODEC
medvdbg("(%d,%d,%d)\n", audioType, channels, sampleRate);
- std::shared_ptr<Decoder> instance(new Decoder(audioType, channels, sampleRate));
- if (instance) {
- if (instance->init()) {
- return instance;
- }
+ if (audioType == AUDIO_TYPE_UNKNOWN) {
+ meddbg("%s[line : %d] Fail : audio type is unknown\n", __func__, __LINE__);
+ return nullptr;
}
- meddbg("Error! (%d,%d,%d), failed!\n", audioType, channels, sampleRate);
+ auto instance = std::make_shared<Decoder>(audioType, channels, sampleRate);
+ if (instance && instance->init()) {
+ return instance;
+ } else {
+ meddbg("%s[line : %d] Fail : init is failed\n", __func__, __LINE__);
+ meddbg("audioType : %d, channels : %d, sampleRate : %d\n", audioType, channels, sampleRate);
return nullptr;
+ }
#else
return nullptr;
#endif
@@ -64,19 +71,14 @@ std::shared_ptr<Decoder> Decoder::create(audio_type_t audioType, unsigned short
bool Decoder::init(void)
{
#ifdef CONFIG_AUDIO_CODEC
- if (mAudioType == AUDIO_TYPE_UNKNOWN) {
- meddbg("Error! unknown audio type!\n");
- return false;
- }
-
if (audio_decoder_init(&mDecoder, CONFIG_AUDIO_CODEC_RINGBUFFER_SIZE) != AUDIO_DECODER_OK) {
- meddbg("Error! audio_decoder_init failed!\n");
+ meddbg("%s[line : %d] Fail : audio_decoder_init is failed\n", __func__, __LINE__);
return false;
}
mDecoder.audio_type = mAudioType;
if (!mConfig(mDecoder.audio_type)) {
- meddbg("Error! mConfig() failed!\n");
+ meddbg("%s[line : %d] Fail : mConfig is failed\n", __func__, __LINE__);
return false;
}
|
Replace SDL renderer with the SDL2 renderer in software mode.
This officially removes the old SDL rendering code as a requirement. | @@ -108,14 +108,14 @@ TCOD_Error TCOD_console_init_root_(
strncpy(TCOD_ctx.window_title, title ? title : "",
sizeof(TCOD_ctx.window_title) - 1);
TCOD_ctx.fullscreen = fullscreen;
- switch (renderer) {
- case TCOD_RENDERER_OPENGL2:
- case TCOD_RENDERER_SDL2: {
+ struct TCOD_Tileset* tileset = ensure_tileset();
+ if (!tileset) { return TCOD_E_ERROR; }
int renderer_flags = SDL_RENDERER_PRESENTVSYNC * vsync;
int window_flags = (SDL_WINDOW_RESIZABLE |
(fullscreen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0));
- struct TCOD_Tileset* tileset = ensure_tileset();
- if (!tileset) { return TCOD_E_ERROR; }
+ switch (renderer) {
+ case TCOD_RENDERER_SDL:
+ renderer_flags |= SDL_RENDERER_SOFTWARE;
TCOD_ctx.engine = TCOD_renderer_init_sdl2(
w * tileset->tile_width, h * tileset->tile_height,
title, window_flags, renderer_flags, tileset);
@@ -123,7 +123,15 @@ TCOD_Error TCOD_console_init_root_(
return TCOD_E_ERROR;
}
break;
+ case TCOD_RENDERER_OPENGL2:
+ case TCOD_RENDERER_SDL2:
+ TCOD_ctx.engine = TCOD_renderer_init_sdl2(
+ w * tileset->tile_width, h * tileset->tile_height,
+ title, window_flags, renderer_flags, tileset);
+ if (!TCOD_ctx.engine) {
+ return TCOD_E_ERROR;
}
+ break;
default:
return TCOD_console_init(TCOD_ctx.root, title, fullscreen);
}
|
reset tx table in IRQ on reset command | @@ -498,6 +498,8 @@ void MsgAlloc_Reset(void)
{
// We will need to reset
reset_needed = true;
+ tx_tasks_stack_id = 0;
+ memset((void *)tx_tasks, 0, sizeof(tx_tasks));
}
error_return_t MsgAlloc_IsReseted(void)
|
fix(Makefile): fix target ordering to allow for parallelism with specs_gen | @@ -180,10 +180,11 @@ echo:
@ echo SPECS_APIS: $(SPECS_APIS)
@ echo DISCORD_OBJS: $(DISCORD_OBJS)
-specs_gen: cee_utils | $(SPECSDEPS_OBJS)
- @ $(MAKE) clean specsdeps_clean specs_clean specs_code
+specs_gen: $(SPECSDEPS_OBJS) | $(CEE_UTILS_DIR)
+ @ $(MAKE) clean specsdeps_clean specs_clean
+ @ $(MAKE) specs_code
-specs_code: specs-gen.exe
+specs_code: | specs-gen.exe
@ rm -rf $(SPECS_WDIR)/*/one-specs.h
# Generate header files (specs-code/%/*.h)
$(foreach var, $(SPECS_JSON), \
|
changed http-server to eyre | [%diff diff]
[%build wire ? schematic:ford]
[%kill wire ~]
- [%connect wire binding:http-server term]
+ [%connect wire binding:eyre term]
[%http-response http-event:http]
- [%disconnect binding:http-server]
+ [%disconnect binding:eyre]
==
::
+$ poke
==
::
++ bound
- |= [wir=wire success=? binding=binding:http-server]
+ |= [wir=wire success=? binding=binding:eyre]
^- (quip move _this)
[~ this]
::
::
++ poke-handle-http-request
%- (require-authorization:app ost.bol move this)
- |= =inbound-request:http-server
+ |= =inbound-request:eyre
^- (quip move _this)
::
=/ request-line (parse-request-line url.request.inbound-request)
:: +poke-handle-http-cancel: received when a connection was killed
::
++ poke-handle-http-cancel
- |= =inbound-request:http-server
+ |= =inbound-request:eyre
^- (quip move _this)
[~ this]
::
|
Add Visual Studio projects for ippdoclint, ippproxy, and ipptransform. | <ProjectReference Include="dnssdstub.vcxproj">
<Project>{c0899b3a-43e7-4bc3-a785-659e1fd2ea83}</Project>
</ProjectReference>
- <ProjectReference Include="libcups2.vcxproj">
+ <ProjectReference Include="libcups3.vcxproj">
<Project>{cb4aa6f2-3e84-45be-b505-95cd375e8be3}</Project>
<ReferenceOutputAssembly>false</ReferenceOutputAssembly>
</ProjectReference>
|
Updated README.md why section. | @@ -81,7 +81,7 @@ needing to use your browser (_great if you want to do a quick analysis of your
access log via SSH, or if you simply love working in the terminal_).
While the terminal output is the default output, it has the capability to
-generate a complete self-contained real-time [**`HTML`**](https://rt.goaccess.io/?src=gh)
+generate a complete, self-contained, real-time [**`HTML`**](https://rt.goaccess.io/?src=gh)
report, as well as a [**`JSON`**](https://goaccess.io/json?src=gh), and
[**`CSV`**](https://goaccess.io/goaccess_csv_report.csv?src=gh) report.
|
README: correct cmake invocation note
the default invocation given generates makefiles, not a configure script | @@ -123,7 +123,7 @@ minimal build:
$ sudo apt-get install build-essential cmake
When building from git sources, you will need to run cmake to generate the
-configure script.
+makefiles.
mkdir build && cd build && cmake ../
make
|
Update Docs to state what the requirements for Client Auth API's are | @@ -570,7 +570,11 @@ The callback can return 0 to continue handshake in s2n or it can return negative
value to make s2n terminate handshake early with fatal handshake failure alert.
## Client Auth Related calls
+Client Auth Related API's are not recommended for normal users. Use of these API's is discouraged.
+1. Using these API's requires users to: Complete full x509 parsing and hostname validation in the application layer
+2. Application knowledge of TLS code points for certificate types
+3. Application dependency on libcrypto to give a libcrypto RSA struct back to s2n
### s2n\_config\_set\_client\_auth\_type and s2n\_connection\_set\_client\_auth\_type
```c
|
Add Emscripten section to README.md | @@ -114,6 +114,15 @@ or [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-down
3. Run `build_win` to compile janet.
4. Run `build_win test` to make sure everything is working.
+### Emscripten
+
+To build janet for the web via [Emscripten](https://kripken.github.io/emscripten-site/), make sure you
+have `emcc` installed and on your path. On a linux or macOS system, use `make janet.js` to build
+`janet.js` and `janet.wasm` - both are needed to run janet in a browser or in node.
+The JavaScript build is what runs the repl on the main website,
+but really serves mainly as a proof of concept. Janet will run much slower in a browser.
+Building with emscripten on windows is currently unsupported.
+
## Examples
See the examples directory for some example janet code.
|
Simplify loop. | @@ -221,9 +221,9 @@ static void drawHLineDma(tic_mem* memory, s32 xl, s32 xr, s32 y, u8 color)
tic_tool_poke4(&memory->ram.vram.screen.data, y * TIC80_WIDTH + xl, color);
xl++;
}
- for(s32 x = xl; x + 1 < xr; x+=2) {
- memory->ram.vram.screen.data[(y * TIC80_WIDTH + x) >> 1] = color;
- }
+ s32 count = (xr - xl) >> 1;
+ u8 *screen = memory->ram.vram.screen.data + ((y * TIC80_WIDTH + xl) >> 1);
+ for(s32 i = 0; i < count; i++) *screen++ = color;
if (xr & 1) {
tic_tool_poke4(&memory->ram.vram.screen.data, y * TIC80_WIDTH + xr - 1, color);
}
|
BugID:16859273:awss can build with iar | @@ -8,11 +8,7 @@ $(NAME)_COMPONENTS += network/netmgr \
middleware/uagent/uota \
utility/cjson
-ifeq ($(COMPILER),iar)
-$(NAME)_COMPONENTS += feature.linkkit-gateway-noawss
-else
$(NAME)_COMPONENTS += feature.linkkit-gateway
-endif
GLOBAL_CFLAGS += -DCONFIG_DM_DEVTYPE_GATEWAY \
-DMQTT_DIRECT \
|
ImfAcesFile.cpp: Remove redundant equality check.
Since fileChr.white is 'fileNeutral' and acesChr.white is 'acesNeutral'
always. | @@ -407,8 +407,7 @@ AcesInputFile::Data::initColorConversion ()
if (fileChr.red == acesChr.red &&
fileChr.green == acesChr.green &&
fileChr.blue == acesChr.blue &&
- fileChr.white == acesChr.white &&
- fileNeutral == acesNeutral)
+ fileChr.white == acesChr.white)
{
//
// The file already contains ACES data,
|
uflow: convert bytes to str
Python 3 fix, similar to commit and commit for ucalls.
Closes | @@ -196,7 +196,9 @@ def print_event(cpu, data, size):
direction = "<- " if event.depth & (1 << 63) else "-> "
print("%-3d %-6d %-6d %-8.3f %-40s" % (cpu, event.pid >> 32,
event.pid & 0xFFFFFFFF, time.time() - start_ts,
- (" " * (depth - 1)) + direction + event.clazz + "." + event.method))
+ (" " * (depth - 1)) + direction + \
+ event.clazz.decode('utf-8', 'replace') + "." + \
+ event.method.decode('utf-8', 'replace')))
bpf["calls"].open_perf_buffer(print_event)
while 1:
|
gpexpand.status_detail should be distributed by "table_oid".
The table gpexpand.status_detail used by gpexpand should be
distributed by "table_oid", to avoid the updating workload burden of
tens thounsand tables in a segment, concentrated on only one single
segment with the prior distribution policy (distributed by dbname).
Authored-by: Peng Han | @@ -299,7 +299,7 @@ status_detail_table_sql = """CREATE TABLE gpexpand.status_detail
status text,
expansion_started timestamp,
expansion_finished timestamp,
- source_bytes numeric ) """
+ source_bytes numeric ) distributed by (table_oid)"""
# gpexpand views
progress_view_simple_sql = """CREATE VIEW gpexpand.expansion_progress AS
SELECT
|
Remove gpfaulinject related patterns
Author: Xin Zhang
Author: Asim R P | --- start_matchignore
-# Mask out timestamp and whoami message for gpfaultinjector
-m/^\d+.*gpfaultinjector.*-\[INFO\]:-/
-s/^\d+.*gpfaultinjector.*-\[INFO\]:-//
--- end_matchignore
-
-- start_matchsubs
# entry db matches
m/\s+\(entry db(.*)+\spid=\d+\)/
|
apply HOSTDRV | @@ -524,6 +524,8 @@ static const INITBL iniitem[] = {
{"bmap_Num", INITYPE_UINT32, &bmpfilenumber, 0},
{"fontfile", INITYPE_STR, np2cfg.fontfile, MAX_PATH},
{"biospath", INIRO_STR, np2cfg.biospath, MAX_PATH},
+ {"hdrvroot", INIRO_STR, np2cfg.hdrvroot, MAX_PATH},
+ {"hdrv_acc", INIRO_UINT8, &np2cfg.hdrvacc, 0},
{"pc_model", INITYPE_STR, np2cfg.model, sizeof(np2cfg.model)},
|
py/makeversionhdr: Honor SOURCE_DATE_EPOCH if present.
This environment variable, if defined during the build process,
indicates a fixed time that should be used in place of "now" when
such a time is explicitely referenced.
This allows for reproducible builds of micropython.
See | @@ -80,6 +80,12 @@ def make_version_header(filename):
git_tag, git_hash = info
+ build_date = datetime.date.today()
+ if "SOURCE_DATE_EPOCH" in os.environ:
+ build_date = datetime.datetime.utcfromtimestamp(
+ int(os.environ["SOURCE_DATE_EPOCH"])
+ ).date()
+
# Generate the file with the git and version info
file_data = """\
// This file was generated by py/makeversionhdr.py
@@ -89,7 +95,7 @@ def make_version_header(filename):
""" % (
git_tag,
git_hash,
- datetime.date.today().strftime("%Y-%m-%d"),
+ build_date.strftime("%Y-%m-%d"),
)
# Check if the file contents changed from last time
|
king: TODO.md cleanup. | -# New IPC
+# New IPC Protocol
Stubbed out:
@@ -19,7 +19,7 @@ Stubbed out:
King-Haskell specific features:
- [x] Re-implement `collectFX` flow in Serf/Pier.
-- [ ] Hook up `collectFX` to CLI.
+- [x] Hook up `collectFX` to CLI.
- [ ] Test new `collectFX` flow
Performance:
@@ -31,18 +31,19 @@ Polish:
- [x] Cleanup batching flow.
- [x] Think through how to shutdown the serf on exception.
+- [ ] King should shutdown promptly on ^C. Always takes 2s in practice.
- [ ] Logging for new IPC flow.
- [ ] Logging for boot sequence.
- [ ] Bring back progress bars.
# Misc Bugs
+- [ ] `king run --collect-fx` flag does nothing. Remove or implement.
- [ ] Handle ^C in connected terminals. It should interrupt current
event (send SIGINT to serf, which will cause the current event to
fail promptly).
- [ ] The terminal driver seems to have a race condition when spinner
changed too quickly.
-- [ ] King should shutdown promptly on ^C. Always takes 2s in practice.
# Cleanup
|
use md_notriv_dims | @@ -171,10 +171,9 @@ int main_pics(int argc, char* argv[])
complex float* kspace = load_cfl(argv[1], DIMS, ksp_dims);
complex float* maps = load_cfl(argv[2], DIMS, map_dims);
- unsigned int map_flags = FFT_FLAGS | SENS_FLAGS;
- for (unsigned int d = 0; d < DIMS; d++)
- if (map_dims[d] > 1)
- map_flags = MD_SET(map_flags, d);
+ unsigned int map_flags = md_nontriv_dims(DIMS, map_dims);
+
+ map_flags |= FFT_FLAGS | SENS_FLAGS;
complex float* traj = NULL;
|
VERSION bump to version 1.4.97 | @@ -46,7 +46,7 @@ endif()
# micro version is changed with a set of small changes or bugfixes anywhere in the project.
set(SYSREPO_MAJOR_VERSION 1)
set(SYSREPO_MINOR_VERSION 4)
-set(SYSREPO_MICRO_VERSION 96)
+set(SYSREPO_MICRO_VERSION 97)
set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION})
# Version of the library
|
imxrt1050-evk/loadable_elf_apps/defconfig: Enable fault manager
This patch enables the support of fault manager for imxrt1050
Fault manager is used to handle any kind of fault in the system.
NOTE: For now only assert cases are being handled. | @@ -270,7 +270,7 @@ CONFIG_ARCH_BOARD="imxrt1050-evk"
# Common Board Options
#
CONFIG_ARCH_HAVE_LEDS=y
-CONFIG_ARCH_LEDS=y
+# CONFIG_ARCH_LEDS is not set
CONFIG_ARCH_HAVE_BUTTONS=y
# CONFIG_ARCH_BUTTONS is not set
CONFIG_ARCH_HAVE_IRQBUTTONS=y
@@ -410,7 +410,7 @@ CONFIG_SCHED_HPWORKSTACKSIZE=2048
#
# fault Manager
#
-# CONFIG_FAULT_MGR is not set
+CONFIG_FAULT_MGR=y
#
# Stack size information
|
stm32/Makefile: Use -Wno-attributes for ll_usb.c HAL source file.
A recent version of arm-none-eabi-gcc (8.2.0) will warn about unused packed
attributes in USB_WritePacket and USB_ReadPacket. This patch suppresses
such warnings for this file only. | @@ -303,6 +303,7 @@ SRC_HAL = $(addprefix $(HAL_DIR)/Src/stm32$(MCU_SERIES)xx_,\
)
ifeq ($(MCU_SERIES),$(filter $(MCU_SERIES),f4 f7 h7 l4))
+$(BUILD)/$(HAL_DIR)/Src/stm32$(MCU_SERIES)xx_ll_usb.o: CFLAGS += -Wno-attributes
SRC_HAL += $(addprefix $(HAL_DIR)/Src/stm32$(MCU_SERIES)xx_,\
hal_sd.c \
ll_sdmmc.c \
|
bq25710: Replace CONFIG_BQ* with CONFIG_CHARGER_BQ*
This fixes a series of copy-paste errors where CONFIG_BQ25710_* was used
instead of CONFIG_CHARGER_BQ25710_*.
BRANCH=none
TEST=buildall passes | #define CONFIG_CHARGER_BQ25710_VSYS_MIN_VOLTAGE_MV 0
#endif
-#ifndef CONFIG_BQ25720_VSYS_UVP_CUSTOM
-#define CONFIG_BQ25720_VSYS_UVP 0
+#ifndef CONFIG_CHARGER_BQ25720_VSYS_UVP_CUSTOM
+#define CONFIG_CHARGER_BQ25720_VSYS_UVP 0
#endif
-#ifndef CONFIG_BQ25720_IDCHG_DEG2_CUSTOM
-#define CONFIG_BQ25720_IDCHG_DEG2 1
+#ifndef CONFIG_CHARGER_BQ25720_IDCHG_DEG2_CUSTOM
+#define CONFIG_CHARGER_BQ25720_IDCHG_DEG2 1
#endif
/*
@@ -431,19 +431,20 @@ static int bq257x0_init_charge_option_4(int chgnum)
if (!IS_ENABLED(CONFIG_CHARGER_BQ25720))
return EC_SUCCESS;
- if (!IS_ENABLED(CONFIG_BQ25720_VSYS_UVP_CUSTOM) &&
- !IS_ENABLED(CONFIG_BQ25720_IDCHG_DEG2_CUSTOM))
+ if (!IS_ENABLED(CONFIG_CHARGER_BQ25720_VSYS_UVP_CUSTOM) &&
+ !IS_ENABLED(CONFIG_CHARGER_BQ25720_IDCHG_DEG2_CUSTOM))
return EC_SUCCESS;
rv = raw_read16(chgnum, BQ25720_REG_CHARGE_OPTION_4, ®);
if (rv)
return rv;
- if (IS_ENABLED(CONFIG_BQ25720_VSYS_UVP_CUSTOM))
- reg = SET_CO4(VSYS_UVP, CONFIG_BQ25720_VSYS_UVP, reg);
+ if (IS_ENABLED(CONFIG_CHARGER_BQ25720_VSYS_UVP_CUSTOM))
+ reg = SET_CO4(VSYS_UVP, CONFIG_CHARGER_BQ25720_VSYS_UVP, reg);
- if (IS_ENABLED(CONFIG_BQ25720_IDCHG_DEG2_CUSTOM))
- reg = SET_CO4(IDCHG_DEG2, CONFIG_BQ25720_IDCHG_DEG2, reg);
+ if (IS_ENABLED(CONFIG_CHARGER_BQ25720_IDCHG_DEG2_CUSTOM))
+ reg = SET_CO4(IDCHG_DEG2, CONFIG_CHARGER_BQ25720_IDCHG_DEG2,
+ reg);
return raw_write16(chgnum, BQ25720_REG_CHARGE_OPTION_4, reg);
}
|
Implemented VmaBlockMetadata_Linear::Clear
A step towards enabling virtual allocator with linear algorithm. | @@ -10524,7 +10524,14 @@ void VmaBlockMetadata_Linear::GetAllocationInfo(VkDeviceSize offset, VmaVirtualA
void VmaBlockMetadata_Linear::Clear()
{
- VMA_ASSERT(0 && "TODO implement");
+ m_SumFreeSize = GetSize();
+ m_Suballocations0.clear();
+ m_Suballocations1.clear();
+ m_1stVectorIndex = 0;
+ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
+ m_1stNullItemsBeginCount = 0;
+ m_1stNullItemsMiddleCount = 0;
+ m_2ndNullItemsCount = 0;
}
void VmaBlockMetadata_Linear::SetAllocationUserData(VkDeviceSize offset, void* userData)
|
edit diff BUGFIX double free | @@ -2549,11 +2549,10 @@ sr_edit_add_check_same_node_op(sr_session_ctx_t *session, const char *xpath, con
return err_info;
} else if (set->number == 1) {
node = set->set.d[0];
- ly_set_free(set);
-
op = sr_edit_find_oper(node, 1, NULL);
if (!strcmp(op, operation)) {
/* same node with same operation, silently ignore and clear the error */
+ ly_set_free(set);
ly_err_clean(session->conn->ly_ctx, NULL);
return NULL;
} /* else node has a different operation, error */
|
[chainmaker][#547]modify return value | @@ -190,10 +190,37 @@ BoatHlchainmakerWallet *BoatHlchainmakerWalletInit(const BoatHlchainmakerWalletC
wallet_ptr->node_info.chain_id_info = NULL;
wallet_ptr->node_info.org_id_info = NULL;
- BoatChainmakerWalletSetNodeUrl(wallet_ptr, config_ptr->node_url_cfg);
- BoatChainmakerWalletSetHostName(wallet_ptr, config_ptr->host_name_cfg);
- BoatChainmakerWalletSetChainId(wallet_ptr, config_ptr->chain_id_cfg);
- BoatChainmakerWalletSetOrgId(wallet_ptr, config_ptr->org_id_cfg);
+ result = BoatChainmakerWalletSetNodeUrl(wallet_ptr, config_ptr->node_url_cfg);
+ if (result != BOAT_SUCCESS)
+ {
+ BoatLog(BOAT_LOG_CRITICAL, "BoatChainmakerWalletSetNodeUrl failed");
+ BoatFree(wallet_ptr);
+ return NULL;
+ }
+
+ result = BoatChainmakerWalletSetHostName(wallet_ptr, config_ptr->host_name_cfg);
+ if (result != BOAT_SUCCESS)
+ {
+ BoatLog(BOAT_LOG_CRITICAL, "BoatChainmakerWalletSetHostName failed");
+ BoatFree(wallet_ptr);
+ return NULL;
+ }
+
+ result = BoatChainmakerWalletSetChainId(wallet_ptr, config_ptr->chain_id_cfg);
+ if (result != BOAT_SUCCESS)
+ {
+ BoatLog(BOAT_LOG_CRITICAL, "BoatChainmakerWalletSetChainId failed");
+ BoatFree(wallet_ptr);
+ return NULL;
+ }
+
+ result = BoatChainmakerWalletSetOrgId(wallet_ptr, config_ptr->org_id_cfg);
+ if (result != BOAT_SUCCESS)
+ {
+ BoatLog(BOAT_LOG_CRITICAL, "BoatChainmakerWalletSetOrgId failed");
+ BoatFree(wallet_ptr);
+ return NULL;
+ }
/* assignment */
#if (BOAT_CHAINMAKER_TLS_SUPPORT == 1)
@@ -203,6 +230,7 @@ BoatHlchainmakerWallet *BoatHlchainmakerWalletInit(const BoatHlchainmakerWalletC
if (wallet_ptr->tls_ca_cert_info.field_ptr == NULL)
{
BoatLog(BOAT_LOG_CRITICAL, "Failed to malloc tls_ca_cert_info memory.");
+ BoatFree(wallet_ptr);
return NULL;
}
@@ -217,6 +245,7 @@ BoatHlchainmakerWallet *BoatHlchainmakerWalletInit(const BoatHlchainmakerWalletC
if (result != BOAT_SUCCESS)
{
BoatLog(BOAT_LOG_CRITICAL, "Failed to set accountInfo|TlsUInfo|networkInfo.");
+ BoatFree(wallet_ptr);
return NULL;
}
return wallet_ptr;
@@ -584,6 +613,8 @@ BOAT_RESULT BoatChainmakerWalletSetNodeUrl(BoatHlchainmakerWallet *wallet_ptr, c
return BOAT_ERROR_COMMON_INVALID_ARGUMENT;
}
+ printf("data is %s\n", node_url_ptr);
+
if (strchr(node_url_ptr, ':') == NULL)
{
BoatLog(BOAT_LOG_CRITICAL, "node URL has a invalid format: %s.", node_url_ptr);
|
Block 900k New Protocol Mandatory | @@ -3634,8 +3634,8 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
if (pfrom->nVersion < MIN_PEER_PROTO_VERSION)
oldVersion = true;
- // Disconnect nodes that are over block height 800k and have an old peer version
- if (nBestHeight >= 800000 && pfrom->nVersion < PROTOCOL_VERSION)
+ // Disconnect nodes that are over block height 900k and have an old peer version
+ if (nBestHeight >= 900000 && pfrom->nVersion < PROTOCOL_VERSION)
oldVersion = true;
if (oldVersion == true)
|
TestsUser/Base.h: Declare EFI_IMAGE_UNLOAD | @@ -1783,6 +1783,12 @@ STATIC EFI_RUNTIME_SERVICES gNilRT = {
STATIC EFI_RUNTIME_SERVICES *gRT = &gNilRT;
+typedef
+EFI_STATUS
+(EFIAPI* EFI_IMAGE_UNLOAD)(
+ IN EFI_HANDLE ImageHandle
+ );
+
//
// Specific
//
|
linux-cp: fix issue of possibly closing negative fd
Type: fix
Primarily fix an issue reported by Coverity in
lcp_nl_open_sync_socket() that close() could possibly be run with
negative fd. Also, add more checks and error logging there. | @@ -940,19 +940,28 @@ lcp_nl_open_sync_socket (nl_sock_type_t sock_type)
nm->sk_route_sync[sock_type] = sk_route = nl_socket_alloc ();
dest_ns_fd = lcp_get_default_ns_fd ();
- if (dest_ns_fd)
+ if (dest_ns_fd > 0)
{
curr_ns_fd = clib_netns_open (NULL /* self */);
- clib_setns (dest_ns_fd);
+ if (clib_setns (dest_ns_fd) == -1)
+ NL_ERROR ("Cannot set destination ns");
}
nl_connect (sk_route, NETLINK_ROUTE);
- if (dest_ns_fd)
+ if (dest_ns_fd > 0)
{
- clib_setns (curr_ns_fd);
+ if (curr_ns_fd == -1)
+ {
+ NL_ERROR ("No previous ns to set");
+ }
+ else
+ {
+ if (clib_setns (curr_ns_fd) == -1)
+ NL_ERROR ("Cannot set previous ns");
close (curr_ns_fd);
}
+ }
NL_INFO ("Opened netlink synchronization socket %d of type %d",
nl_socket_get_fd (sk_route), sock_type);
|
Join contrib/libs/swig with contrib/tools/swig.
swig is not needed as a library. This simplifies its future updates. | @@ -5,7 +5,7 @@ import _import_wrapper as iw
import _common as common
-_SWIG_LIB_PATH = 'contrib/libs/swig/Lib'
+_SWIG_LIB_PATH = 'contrib/tools/swig/Lib'
_PREDEFINED_INCLUDES = [
os.path.join(_SWIG_LIB_PATH, 'python', 'python.swg'),
|
removes or clarifies %ames todo comments | ?: =(our seg)
sen:gus
cluy:(myx:gus seg)
- :: XX should anything else be signed?
- ::
?& =(lyf.yig life.wed)
?=(^ oath.wed)
.= (need (sure:as:cub.yig u.oath.wed))
::
%turf
=. tuf.fox turf.sih
- :: XX give %turf if already recvd %bar?
[~ +>.$]
::
%pubs
%init
:_ fox [[%sake p.kyz] [%brew ~] ~]
::
- :: XX confirm unused, remove
+ :: XX this is unused, but they only way to set
+ :: entropy for symmetric keys. Review.
::
%junk
[~ fox(any.ton (shax (mix any.ton.fox p.kyz)))]
|
Fix bug that wrong tx fifo drop in buffer chains flow of dgram mode
Not drop session dgram hdr when to drop processed udp message from tx fifo | @@ -307,7 +307,10 @@ session_tx_fifo_chain_tail (vlib_main_t * vm, session_tx_context_t * ctx,
hdr->data_offset += n_bytes_read;
if (hdr->data_offset == hdr->data_length)
- svm_fifo_dequeue_drop (f, hdr->data_length);
+ {
+ u32 offset = hdr->data_length + SESSION_CONN_HDR_LEN;
+ svm_fifo_dequeue_drop (f, offset);
+ }
}
else
n_bytes_read = svm_fifo_dequeue_nowait (ctx->s->server_tx_fifo,
|
misc/opts: check success of parse_cfl | @@ -523,8 +523,7 @@ bool opt_float(void* ptr, char c, const char* optarg)
bool opt_cfl(void* ptr, char c, const char* optarg)
{
UNUSED(c);
- parse_cfl((complex float*) ptr, optarg);
- return false;
+ return 0 != parse_cfl((complex float*) ptr, optarg);
}
bool opt_string(void* ptr, char c, const char* optarg)
|
[Readme] Add engine_version | @@ -154,6 +154,7 @@ A partial list of parameters are below. See the config file for a complete list.
| `vulkan_driver` | Displays used vulkan driver, radv/amdgpu-pro/amdvlk |
| `gpu_name` | Displays GPU name from pci.ids |
| `gpu_power` | Display GPU draw in watts |
+| `engine_version` | Display OpenGL or vulkan and vulkan-based render engine's version |
Example: `MANGOHUD_CONFIG=cpu_temp,gpu_temp,position=top-right,height=500,font_size=32`
|
docs: mark bulk delete checkbox | - [x] [Delete All Reactions for Emoji](https://discord.com/developers/docs/resources/channel#delete-all-reactions-for-emoji)
- [x] [Edit Message](https://discord.com/developers/docs/resources/channel#edit-message)
- [X] [Delete Message](https://discord.com/developers/docs/resources/channel#delete-message)
-- [ ] [Bulk Delete Messages](https://discord.com/developers/docs/resources/channel#bulk-delete-messages)
+- [X] [Bulk Delete Messages](https://discord.com/developers/docs/resources/channel#bulk-delete-messages)
- [ ] [Edit Channel Permissions](https://discord.com/developers/docs/resources/channel#edit-channel-permissions)
- [ ] [Get Channel Invites](https://discord.com/developers/docs/resources/channel#get-channel-invites)
- [ ] [Create Channel Invite](https://discord.com/developers/docs/resources/channel#create-channel-invite)
|
Update fails in globals [STILL NOT READY] | @@ -17,15 +17,15 @@ globals:
0x141F5FBC8: g_CharacterManager_CompanionMemoryPtr
0x141F5FBF8: g_GameObjectManager_ObjectList
0x141F60948: g_GameObjectManager_ObjectListEnd
-#fail 0x141EA7388: g_Client::Game::UI::Buddy.Pet # not a pointer
-#fail 0x141EA7398: g_Client::Game::UI::Buddy.Pet.BuffList # not a pointer
-#fail 0x141EA8048: g_Client::Game::UI::Buddy.CompanionStats # not a pointer. Member of UIState
-#fail 0x141EA8050: g_Client::Game::UI::Buddy.CompanionStats.TimeRemaining # not a pointer
-#fail 0x141EA80A0: g_Client::Game::UI::Buddy.BattleBuddyListPtr # Member of UIState
-#fail 0x141EA81E8: g_Client::Game::UI::RelicNote # not a pointer
-#fail 0x141EB43F0: g_TitleController
-#fail 0x141EB43F8: g_TitleList
-#fail 0x141ECBA88: g_FateTablePtr
+ 0x141F695B8: g_Client::Game::UI::Buddy.Pet # not a pointer
+ 0x141F695C8: g_Client::Game::UI::Buddy.Pet.BuffList # not a pointer
+ 0x141F6A278: g_Client::Game::UI::Buddy.CompanionStats # not a pointer. Member of UIState
+ 0x141F6A280: g_Client::Game::UI::Buddy.CompanionStats.TimeRemaining # not a pointer
+ 0x141F6A2D0: g_Client::Game::UI::Buddy.BattleBuddyListPtr # Member of UIState
+ 0x141F6A430: g_Client::Game::UI::RelicNote # not a pointer
+ 0x141F76720: g_TitleController
+ 0x141F76728: g_TitleList
+ 0x141F88FA8: g_FateTablePtr
0x141F88C40: g_ClientObjectManager
0x141F19EC0: g_SomeOtherRenderingState
0x141F2C194: g_InvSqrt3
|
doc: fix some typos+some clarifications | @@ -53,7 +53,7 @@ Otherwise, you can visit the [the API documentation](http://doc.libelektra.org/a
### Resolver
Before configuration is actually written, the file name needs to be
-determined (will be automatically added by kdb mount):
+determined (resolvers will be automatically added by kdb mount):
- [resolver](resolver/) uses advanced POSIX APIs to handle conflicts gracefully
- [wresolver](wresolver/) minimalistic resolver for non-POSIX systems
@@ -65,7 +65,7 @@ harddisc (recommended to add at every kdb mount):
- [blockresolver](blockresolver/) resolves tagged blocks inside config files
- [multifile](multifile/)
-- [sync](sync/) uses POSIX APIs to sync configuration file with harddisc
+- [sync](sync/) uses POSIX APIs to sync configuration files with the hard disk
### Storage
@@ -161,7 +161,7 @@ Doing other stuff:
- [crypto](crypto/) encrypts / decrypts confidential values
- [fcrypt](fcrypt/) encrypts / decrypts entire backend files
-- [iconv](iconv/) make sure the configuration will have correct
+- [iconv](iconv/) makes sure the configuration will have correct
character encoding
- [hidden](hidden/) hides keys whose names start with a `.`.
- [null](null/) takes care of null values and other binary specialities
@@ -170,29 +170,31 @@ Doing other stuff:
Log/Send out all changes to configuration to:
-- [dbus](dbus/)
-- [journald](journald/)
-- [syslog](syslog/)
+- [dbus](dbus/) sends notifications for every change via dbus
+- [syslog](syslog/) logs key database changes to syslog
+- [journald](journald/) logs key database changes to journald
- [logchange](logchange/) prints the change of every key on the console
### Debug
Trace everything that happens within KDB:
-- [timeofday](timeofday/) print timestamps
-- [tracer](tracer/)
-- [counter](counter/) count and print how often plugin is used
+- [timeofday](timeofday/) prints timestamps
+- [tracer](tracer/) traces all calls
+- [counter](counter/) count and print how often a plugin is used
### Checker
Copies metadata to keys:
-- [glob](glob/) using globbing techniques
+- [spec](spec/) copies metadata from spec namespace (the
+ standard way)
+- [glob](glob/) using globbing techniques (needed by some plugins)
- [struct](struct/) using a defined structure (may also reject
configuration not conforming to that structure)
-- [spec](spec/) copies metadata from spec namespace
+
Plugins that check if values are valid based on metadata (typically
-copied by another plugin just before):
+copied by the `spec` plugin just before):
**Value Validation**
|
os/board/rtl8721csm/src/Makefile : To use arch from board, add CFLAGS
There is a build error for header include. To fix this, add CFLAGS about arch.
rtl8721csm_boot.c:76:24: fatal error: amebad_i2c.h: No such file or directory
#include "amebad_i2c.h"
^ | @@ -197,9 +197,12 @@ ifeq ($(WINTOOL),y)
CFLAGS += -I "${shell cygpath -w $(BOARD_SRCDIR)/common}"
CFLAGS += -I "${shell cygpath -w $(BOARD_SRCDIR)/$(CONFIG_ARCH_BOARD)/src}"
else
+ CFLAGS += -I$(ARCH_SRCDIR)/chip
CFLAGS += -I$(ARCH_SRCDIR)/common
+ CFLAGS += -I$(COMPONENT_DIR)/soc/realtek/amebad/cmsis
CFLAGS += -I$(BOARD_SRCDIR)/common
CFLAGS += -I$(ARCH_SRCDIR)/armv8-m
+ CFLAGS += -I$(BOARD_SRCDIR)/$(CONFIG_ARCH_BOARD)/src
endif
all: libboard$(LIBEXT)
|
Fix the last week regression in restricted panning zoom handling | @@ -276,10 +276,16 @@ namespace carto {
}
MapRange zoomRange = options.getZoomRange();
- float zoom = GeneralUtils::Clamp(getZoom(), getMinZoom(), zoomRange.getMax());
+ float zoom = GeneralUtils::Clamp(_zoom, getMinZoom(), zoomRange.getMax());
if (zoom != getZoom()) {
+ MapVec cameraVec = _cameraPos - _focusPos;
+ double length = cameraVec.length();
+ double newLength = _zoom0Distance / std::pow(2.0f, zoom);
+ MapPos cameraPos = _focusPos + cameraVec * (newLength / length);
+
setZoom(zoom);
+ setCameraPos(cameraPos);
cameraChanged();
}
@@ -387,9 +393,7 @@ namespace carto {
MapVec cameraVec = _cameraPos - _focusPos;
double length = cameraVec.length();
double newLength = _zoom0Distance / std::pow(2.0f, _zoom);
- cameraVec *= newLength / length;
- _cameraPos = _focusPos;
- _cameraPos += cameraVec;
+ _cameraPos = _focusPos + cameraVec * (newLength / length);
_cameraChanged = true;
}
|
Update CMake version. | -cmake_minimum_required(VERSION 2.6)
-cmake_policy(SET CMP0001 NEW) # don't use MAKE_BACKWARDS_COMPATIBILITY but policies instead
-# CMP0042 is an apple specific option
-if (${APPLE})
- cmake_policy(SET CMP0042 NEW) # use MACOSX_RPATH
-endif ()
+cmake_minimum_required(VERSION 3.7)
project(chipmunk)
|
Update ChangeLog.d/systematically_store_bit_size_3740.txt | Changes
- * PSA persistent storage format is updated to always store the key bits
+ * The PSA persistent storage format is updated to always store the key bits
attribute. No automatic upgrade path is provided. Previously stored keys
must be erased, or manually upgraded based on the key storage format
- specification (mbed-crypto-storage-specification.md). The storage format
- for the dynamic Secure Element driver has also been updated. Previously
- stored keys must be erased, or manually upgraded. #3740
+ specification (docs/architecture/mbed-crypto-storage-specification.md). #3740
|
Remove non-existing nid entry for SceCpu | @@ -8968,8 +8968,6 @@ modules:
ksceKernelCpuIcacheInvalidateAllU: 0xAEE0B489
ksceKernelCpuIcacheInvalidateRange: 0xF4C7F578
ksceKernelCpuPreloadEngineKill: 0xD0D85FF8
- ksceKernelCpuRestoreContext: 0x0A4F0FB9
- ksceKernelCpuSaveContext: 0x211B89DA
ksceKernelCpuUnrestrictedMemcpy: 0x8C683DEC
ksceKernelCpuUpdateSCTLR: 0x04008CF7
ksceKernelMMUVAtoPAWithMode: 0x67343A07
|
tcmu-runner:fix the exception return value for function tcmu_get_alua_grps() | @@ -280,6 +280,7 @@ int tcmu_get_alua_grps(struct tcmu_device *dev,
struct dirent **namelist;
char path[PATH_MAX];
int i, n;
+ int ret = 0;
snprintf(path, sizeof(path), CFGFS_CORE"/%s/%s/alua",
dev->tcm_hba_name, dev->tcm_dev_name);
@@ -294,10 +295,14 @@ int tcmu_get_alua_grps(struct tcmu_device *dev,
continue;
group = tcmu_get_alua_grp(dev, namelist[i]->d_name);
- if (!group)
+ if (!group) {
+ tcmu_dev_err(dev, "Could not get alua group %s.\n", namelist[i]->d_name);
+ ret = -1;
goto free_groups;
+ }
list_add_tail(group_list, &group->entry);
}
+ ret = 0;
goto free_names;
free_groups:
@@ -306,7 +311,7 @@ free_names:
for (i = 0; i < n; i++)
free(namelist[i]);
free(namelist);
- return 0;
+ return ret;
}
/*
|
Update: Cycle.c: adjustment for the case that a different usage durability value is set in a different configuration | @@ -316,6 +316,8 @@ void Cycle_ProcessInputGoalEvents(long currentTime)
Decision_Execute(&decision);
//reset cycling goal events after execution to avoid "residue actions"
PriorityQueue_RESET(&cycling_goal_events, cycling_goal_events.items, cycling_goal_events.maxElements);
+ //also don't re-add the selected goal:
+ goalsSelectedCnt = 0;
}
}
|
tools/echo and cleanup fix in export scripts
Closes | # This script should be sourced, not executed.
-realpath_int() {
+__realpath() {
wdir="$PWD"; [ "$PWD" = "/" ] && wdir=""
arg=$1
case "$arg" in
@@ -12,7 +12,12 @@ realpath_int() {
}
-idf_export_main() {
+__verbose() {
+ [[ -n ${IDF_QUIET} ]] && return
+ echo "$@"
+}
+
+__main() {
# The file doesn't have executable permissions, so this shouldn't really happen.
# Doing this in case someone tries to chmod +x it and execute...
@@ -82,15 +87,15 @@ idf_export_main() {
echo "Detecting the Python interpreter"
. "${IDF_PATH}/tools/detect_python.sh"
- echo "Adding ESP-IDF tools to PATH..."
+ __verbose "Adding ESP-IDF tools to PATH..."
# Call idf_tools.py to export tool paths
export IDF_TOOLS_EXPORT_CMD=${IDF_PATH}/export.sh
export IDF_TOOLS_INSTALL_CMD=${IDF_PATH}/install.sh
idf_exports=$("$ESP_PYTHON" "${IDF_PATH}/tools/idf_tools.py" export) || return 1
eval "${idf_exports}"
- echo "Using Python interpreter in $(which python)"
- echo "Checking if Python packages are up to date..."
+ __verbose "Using Python interpreter in $(which python)"
+ __verbose "Checking if Python packages are up to date..."
python "${IDF_PATH}/tools/check_python_dependencies.py" || return 1
@@ -108,17 +113,17 @@ idf_export_main() {
# shellcheck disable=SC2169,SC2039 # unreachable with 'dash'
paths="${path_prefix//:/ }"
if [ -n "${paths}" ]; then
- echo "Added the following directories to PATH:"
+ __verbose "Added the following directories to PATH:"
else
- echo "All paths are already set."
+ __verbose "All paths are already set."
fi
for path_entry in ${paths}
do
- echo " ${path_entry}"
+ __verbose " ${path_entry}"
done
else
- echo "Updated PATH variable:"
- echo " ${PATH}"
+ __verbose "Updated PATH variable:"
+ __verbose " ${PATH}"
fi
# Clean up
@@ -133,11 +138,11 @@ idf_export_main() {
# Not unsetting IDF_PYTHON_ENV_PATH, it can be used by IDF build system
# to check whether we are using a private Python environment
- echo "Done! You can now compile ESP-IDF projects."
- echo "Go to the project directory and run:"
- echo ""
- echo " idf.py build"
- echo ""
+ __verbose "Done! You can now compile ESP-IDF projects."
+ __verbose "Go to the project directory and run:"
+ __verbose ""
+ __verbose " idf.py build"
+ __verbose ""
}
enable_autocomplete() {
@@ -164,9 +169,10 @@ enable_autocomplete() {
}
-idf_export_main
+__main
enable_autocomplete
-unset realpath_int
-unset idf_export_main
+unset __realpath
+unset __main
+unset __verbose
unset enable_autocomplete
|
increased max clients per net up to 50 | #include "common.h"
-#define MAXPCAPOUT 40
+#define MAXPCAPOUT 50
#define HANDSHAKELEVEL 50
#define HANDSHAKEART1 1
#define HANDSHAKEART2 2
@@ -449,7 +449,7 @@ printf("%s %s (C) %s ZeroBeat\n"
"options:\n"
"-i <file> : input hccapx file\n"
"-o <file> : output cap file\n"
- "-m <digit> : output maximum clients per net (default 40)\n"
+ "-m <digit> : output maximum clients per net (default 50)\n"
"\n", eigenname, VERSION, VERSION_JAHR, eigenname);
exit(EXIT_FAILURE);
}
@@ -482,6 +482,14 @@ while ((auswahl = getopt(argc, argv, "i:o:m:hv")) != -1)
maxcapout = strtoul(optarg, NULL, 10);
break;
+ case 'h':
+ usage(eigenname);
+ break;
+
+ case 'v':
+ usage(eigenname);
+ break;
+
default:
usage(eigenname);
break;
|
bricks/movehub: disable floating point support
This will be disabled for the foreseeable future. This saves 20K to allow continued development across all hubs. | // Set to MICROPY_FLOAT_IMPL_FLOAT to enable floating point support in user code or
// set to MICROPY_FLOAT_IMPL_NONE to disable floating point support in user code
// Requires about 20.1K (20620) of flash
-#define MICROPY_FLOAT_IMPL (MICROPY_FLOAT_IMPL_FLOAT)
+#define MICROPY_FLOAT_IMPL (MICROPY_FLOAT_IMPL_NONE)
#include "../stm32/configport.h"
|
testing: getprime: Fix the thread number info
Summary:
I noticed that getprime sometimes shows a wrong thread number
This commit fixes this issue
Impact:
None
Testing:
Tested with sabre-6quad:netnsh | @@ -114,6 +114,7 @@ static void get_prime_in_parallel(int n)
pthread_t thread[MAX_THREADS];
pthread_attr_t attr;
pthread_addr_t result;
+ int arg[MAX_THREADS];
int status;
int i;
@@ -141,9 +142,10 @@ static void get_prime_in_parallel(int n)
for (i = 0; i < n; i++)
{
+ arg[i] = i;
printf("Start thread #%d\n", i);
status = pthread_create(&thread[i], &attr,
- thread_func, (FAR void *)&i);
+ thread_func, (FAR void *)&arg[i]);
ASSERT(status == OK);
}
|
Reduce unused data files from Emscripten preloads. | @@ -128,12 +128,25 @@ endif()
if (EMSCRIPTEN)
# Attach data to Emscripten builds.
- foreach(project_it samples_c samples_cpp frost hmtool navier rad weather worldgen)
+ foreach(project_it samples_c samples_cpp)
target_link_options(
${project_it} PRIVATE
- --preload-file "${CMAKE_CURRENT_SOURCE_DIR}/../data@data/"
+ "SHELL:--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/../data/fonts/dejavu10x10_gs_tc.png@/data/fonts/"
+ "SHELL:--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/../data/img@/data/img/"
+ "SHELL:--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/../data/namegen@/data/namegen/"
)
endforeach()
+ target_link_options(
+ frost PRIVATE
+ "SHELL:--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/../data/fonts/terminal8x8_gs_tc.png@/data/fonts/"
+ )
+ foreach(project_it hmtool navier rad weather worldgen)
+ target_link_options(
+ ${project_it} PRIVATE
+ "SHELL:--preload-file ${CMAKE_CURRENT_SOURCE_DIR}/../data@/data/"
+ )
+ endforeach()
+
# Set output to html to generate preview pages, otherwise you'll have to make your own html.
set_target_properties(
samples_c samples_cpp frost hmtool navier rad weather worldgen
|
Fix sdsConfigRewrite() to prevent freeing invalid memory(#10598) | @@ -1848,7 +1848,7 @@ static sds sdsConfigGet(standardConfig *config) {
static void sdsConfigRewrite(standardConfig *config, const char *name, struct rewriteConfigState *state) {
sds val = config->flags & MODULE_CONFIG ? getModuleStringConfig(config->privdata) : *config->data.sds.config;
rewriteConfigSdsOption(state, name, val, config->data.sds.default_value);
- if (val) sdsfree(val);
+ if ((val) && (config->flags & MODULE_CONFIG)) sdsfree(val);
}
|
Remove cycle count | @@ -118,8 +118,6 @@ uint32_t blit_update_dac(FIL *audio_file) {
void blit_tick() {
if(needs_render) {
blit::render(blit::now());
- blit::fb.pen(rgba(255, 255, 255));
- blit::fb.text(std::to_string(flip_cycle_count), &minimal_font[0][0], point(0, 0));
HAL_LTDC_ProgramLineEvent(&hltdc, 252);
needs_render = false;
@@ -574,7 +572,7 @@ void blit_process_input() {
}
blit_last_buttons = blit::buttons;
- flip_cycle_count = DWT->CYCCNT - scc;
+ //flip_cycle_count = DWT->CYCCNT - scc;
}
char *get_fr_err_text(FRESULT err){
|
move creation of merge from constructor to compute
so that we're able to check attribute values, especially on file open,
so that we can tell if the node is frozen, and then not create the merge. | @@ -59,20 +59,6 @@ InputMergeNode::initialize()
InputMergeNode::InputMergeNode() :
myGeometryNodeId(-1)
{
- Util::PythonInterpreterLock pythonInterpreterLock;
-
- CHECK_HAPI(HAPI_CreateNode(
- Util::theHAPISession.get(),
- -1,
- "Sop/merge",
- NULL,
- false,
- &myGeometryNodeId
- ));
- if(!Util::statusCheckLoop())
- {
- DISPLAY_ERROR(MString("Unexpected error when creating merge node."));
- }
}
InputMergeNode::~InputMergeNode()
@@ -100,6 +86,34 @@ InputMergeNode::compute(
MDataBlock &dataBlock
)
{
+ if(myGeometryNodeId == -1) {
+ Util::PythonInterpreterLock pythonInterpreterLock;
+
+ MFnDependencyNode mergeNodeFn(thisMObject());
+ MPlug frozenPlug = mergeNodeFn.findPlug("frozen", true);
+ bool frozen = frozenPlug.asBool();
+
+ if(!frozen) {
+ fprintf(stderr, "not frozen\n");
+ CHECK_HAPI(HAPI_CreateNode(
+ Util::theHAPISession.get(),
+ -1,
+ "Sop/merge",
+ NULL,
+ false,
+ &myGeometryNodeId
+ ));
+ if(!Util::statusCheckLoop())
+ {
+ DISPLAY_ERROR(MString("Unexpected error when creating merge node."));
+ }
+ } else {
+ // if the node has been frozen ever since file open
+ // we don't create a merge or compute
+ return MStatus::kSuccess;
+ }
+ }
+
if(plug == InputMergeNode::outputNodeId)
{
MArrayDataHandle inputNodeArrayHandle =
|
[kservice] Update show version year info to 2018. | @@ -542,7 +542,7 @@ void rt_show_version(void)
rt_kprintf("- RT - Thread Operating System\n");
rt_kprintf(" / | \\ %d.%d.%d build %s\n",
RT_VERSION, RT_SUBVERSION, RT_REVISION, __DATE__);
- rt_kprintf(" 2006 - 2017 Copyright by rt-thread team\n");
+ rt_kprintf(" 2006 - 2018 Copyright by rt-thread team\n");
}
RTM_EXPORT(rt_show_version);
|
ethernet: fix ARP feature arc definition
This patch gets rid of an ugly warning during vpp startup:
"vnet_feature_init:143: WARNING: arp arc: last node is arp-disabled,
but expected error-drop!"
Type: fix
Fixes: | @@ -1711,7 +1711,7 @@ VNET_FEATURE_ARC_INIT (arp_feat, static) =
{
.arc_name = "arp",
.start_nodes = VNET_FEATURES ("arp-input"),
- .last_in_arc = "error-drop",
+ .last_in_arc = "arp-disabled",
.arc_index_ptr = ðernet_arp_main.feature_arc_index,
};
@@ -1730,10 +1730,17 @@ VNET_FEATURE_INIT (arp_proxy_feat_node, static) =
.runs_before = VNET_FEATURES ("arp-disabled"),
};
-VNET_FEATURE_INIT (arp_drop_feat_node, static) =
+VNET_FEATURE_INIT (arp_disabled_feat_node, static) =
{
.arc_name = "arp",
.node_name = "arp-disabled",
+ .runs_before = VNET_FEATURES ("error-drop"),
+};
+
+VNET_FEATURE_INIT (arp_drop_feat_node, static) =
+{
+ .arc_name = "arp",
+ .node_name = "error-drop",
.runs_before = 0, /* last feature */
};
|
config_tools: update condition for bootargs error check
add bootargs error check only when kernel type is KERNEL_BZIMAGE. | @@ -446,9 +446,9 @@ def main(args):
PASSTHROUGH_PTCT = False
kern_args = common.get_leaf_tag_map(scenario, "os_config", "bootargs")
- vm_type = common.get_leaf_tag_map(scenario, "vm_type")
+ kern_type = common.get_leaf_tag_map(scenario, "os_config", "kern_type")
for vm_id, passthru_devices in dict_passthru_devices.items():
- if kern_args[int(vm_id)].find('reboot=acpi') == -1 and vm_type[int(vm_id)] not in ['SAFETY_VM']:
+ if kern_args[int(vm_id)].find('reboot=acpi') == -1 and kern_type[int(vm_id)] in ['KERNEL_BZIMAGE']:
emsg = "you need to specify 'reboot=acpi' in scenario file's bootargs for VM{}".format(vm_id)
print(emsg)
err_dic['vm,bootargs'] = emsg
|
Fix Apple Clang issue, and small fragment retransmit | @@ -2067,7 +2067,7 @@ int picoquic_prepare_packet_client_init(picoquic_cnx_t* cnx, picoquic_path_t * p
* server is performing anti-dos mitigation and the client has nothing to repeat */
if ((packet->ptype == picoquic_packet_initial && cnx->crypto_context[picoquic_epoch_handshake].aead_encrypt == NULL &&
cnx->pkt_ctx[picoquic_packet_context_initial].retransmit_newest == NULL &&
- cnx->pkt_ctx[picoquic_packet_context_initial].first_sack_item.end_of_sack_range >= 0) ||
+ cnx->pkt_ctx[picoquic_packet_context_initial].first_sack_item.end_of_sack_range != UINT64_MAX) ||
(packet->ptype == picoquic_packet_handshake &&
cnx->pkt_ctx[picoquic_packet_context_handshake].retransmit_newest == NULL &&
cnx->pkt_ctx[picoquic_packet_context_handshake].first_sack_item.end_of_sack_range == UINT64_MAX &&
@@ -3069,7 +3069,12 @@ int picoquic_prepare_packet_ready(picoquic_cnx_t* cnx, picoquic_path_t* path_x,
packet->pc = pc;
- if ((length = picoquic_retransmit_needed(cnx, pc, path_x, current_time, next_wake_time, packet,
+ /* The first action is normally to retransmit lost packets. But if retransmit follows an
+ * MTU drop, the stream frame will be fragmented and a fragment will be queued as a
+ * misc frame. These fragments should have chance to go out before more retransmit is
+ * permitted, hence the test here for the misc-frame */
+ if (cnx->first_misc_frame == NULL &&
+ (length = picoquic_retransmit_needed(cnx, pc, path_x, current_time, next_wake_time, packet,
send_buffer_min_max, &header_length)) > 0) {
/* Check whether it makes sense to add an ACK at the end of the retransmission */
/* Don't do that if it risks mixing clear text and encrypted ack */
|
sse4.1: fix AArch64 implementation of simde_x_mm_blendv_epi64
Fixes | @@ -321,7 +321,7 @@ simde_x_mm_blendv_epi64 (simde__m128i a, simde__m128i b, simde__m128i mask) {
mask_ = simde__m128i_to_private(mask);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
- mask_.u64 = vcltq_s64(mask_.i64, vdupq_n_s64(UINT64_C(0)));
+ mask_.neon_u64 = vcltq_s64(mask_.neon_i64, vdupq_n_s64(UINT64_C(0)));
r_.neon_i64 = vbslq_s64(mask_.neon_u64, b_.neon_i64, a_.neon_i64);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
v128_t m = wasm_i64x2_shr(mask_.wasm_v128, 63);
|
More formatting attempts. | @@ -15,11 +15,11 @@ Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
- * ``Demonstrating empathy and kindness toward other people``
- * ``respectful of differing opinions, viewpoints, and experiences``
- * ``Giving and gracefully accepting constructive feedback``
- * ``Accepting responsibility, apologizing to those affected by our mistakes, and learning from the experience``
- * ``Focusing on what is best not just for us as individuals, but for the overall community``
+ - Demonstrating empathy and kindness toward other people
+ - Being respectful of differing opinions, viewpoints, and experiences
+ - Giving and gracefully accepting constructive feedback
+ - Accepting responsibility, apologizing to those affected by our mistakes, and learning from the experience
+ - Focusing on what is best not just for us as individuals, but for the overall community
The VPLanet community strives to:
|
add Guake to start as floating | @@ -296,6 +296,7 @@ static const Rule rules[] = {
{"Welcome.py", NULL, NULL, 0, 1, -1},
{"Pamac-installer", NULL, NULL, 0, 1, -1},
{"xpad", NULL, NULL, 0, 1, -1},
+ {"Guake", NULL, NULL, 0, 1, -1},
{"instantfloat", NULL, NULL, 0, 2, -1},
{scratchpadname, NULL, NULL, 0, 4, -1},
{"kdeconnect.daemon", NULL, NULL, 0, 3, -1},
|
Add more paths to node port. | @@ -4,9 +4,11 @@ const Path = require('path');
const addon = (() => {
const paths = [
+ __dirname,
Path.join(__dirname, 'build'),
process.cwd(),
process.env.LOADER_LIBRARY_PATH,
+ Path.join(process.env.LOADER_LIBRARY_PATH, 'build'),
];
const names = [
|
Documentation fix on "set interface ip[6] table" | @@ -761,7 +761,7 @@ ip6_table_bind_cmd (vlib_main_t * vm,
/*?
* Place the indicated interface into the supplied IPv4 FIB table (also known
- * as a VRF). If the FIB table does not exist, this command creates it. To
+ * as a VRF). The FIB table must be created using "ip table add" already. To
* display the current IPv4 FIB table, use the command '<em>show ip fib</em>'.
* FIB table will only be displayed if a route has been added to the table, or
* an IP Address is assigned to an interface in the table (which adds a route
@@ -789,7 +789,7 @@ VLIB_CLI_COMMAND (set_interface_ip_table_command, static) =
/*?
* Place the indicated interface into the supplied IPv6 FIB table (also known
- * as a VRF). If the FIB table does not exist, this command creates it. To
+ * as a VRF). The FIB table must be created using "ip6 table add" already. To
* display the current IPv6 FIB table, use the command '<em>show ip6 fib</em>'.
* FIB table will only be displayed if a route has been added to the table, or
* an IP Address is assigned to an interface in the table (which adds a route
|
Change ungelpful error messages in cpp key bindings | @@ -337,7 +337,7 @@ TEST (key, exceptions)
}
catch (kdb::KeyInvalidName const &)
{
- succeed_if (test.getName () == "/", "name not unchanged");
+ succeed_if (test.getName () == "/", "name not unchanged after trying to set invalid name to key");
}
test.setName ("user:/name");
@@ -349,7 +349,7 @@ TEST (key, exceptions)
}
catch (kdb::KeyInvalidName const &)
{
- succeed_if (test.getName () == "user:/name", "name not unchanged");
+ succeed_if (test.getName () == "user:/name", "name not unchanged after trying to set invalid name to key");
}
try
@@ -358,7 +358,7 @@ TEST (key, exceptions)
}
catch (kdb::KeyInvalidName const &)
{
- succeed_if (test.getName () == "user:/name", "name not unchanged");
+ succeed_if (test.getName () == "user:/name", "name not unchanged after trying to add invalid name to key");
}
Key test1;
|
tutorial-improvements: small fixes and unifications for application-integration | @@ -69,8 +69,8 @@ or cross-cutting concerns.
To sum up, if a developer wants to **elektrify** software, he or she can
do that without any need for changes to the outside world regarding the
format and semantics of the configuration. In the interconnected
-world it's a matter of time until other software also wants to
-access the configuration, and with elektrified software it's possible
+world it is a matter of time until other software also wants to
+access the configuration, and with elektrified software it is possible
for every application to do so.
@@ -107,7 +107,7 @@ Key *parentKey = keyNew("/sw/org/myapp/#0/current", KEY_END);
- `current` is the [profile](/src/plugins/profile/README.md)
to use. Administrators need it
if they want to start up applications with different configurations.
-- `KEY_END` is required by C needs a proper termination of variable
+- `KEY_END` as C needs a proper termination of variable
length arguments.
The key name is standardized to make it easier to locate configuration.
@@ -146,7 +146,7 @@ Now we have everything ready to fetch the latest configuration:
kdbGet(repo, conf, parentKey);
```
-Note it's important for applications that the parentKey starts with a slash `/`.
+Note it is important for applications that the parentKey starts with a slash `/`.
This ensures pulling in all keys of the so-called [namespace](/doc/help/elektra-namespaces.md).
Such a name cannot physically exist in configuration files, but they are
the most important key names to actually work with configuration within
@@ -170,7 +170,7 @@ in configuration files, but are "representatives", "proxies" or
"logical placeholders" for keys from any other [namespace](/doc/help/elektra-namespaces.md).
So that every tool has a consistent view to the key database
-it's vital that every application does a `ksLookup` for every
+it is vital that every application does a `ksLookup` for every
key it uses. So even if your application iterates over keys,
always remember to do a [cascading](cascading.md) lookup for every single key!
@@ -180,7 +180,7 @@ Thus we are interested in the value we use:
char *val = keyString(k);
```
-Now we need to convert the configuration value to the data-type we
+We need to convert the configuration value to the data type we
need.
To do this manually has severe drawbacks:
@@ -206,7 +206,7 @@ But, we did not discuss how we can actually achieve application integration,
the goal of Elektra.
Elektra 0.8.11 introduces the so called specification for the
-application's configuration. It's located below its own [namespace](/doc/help/elektra-namespaces.md)
+application's configuration, located below its own [namespace](/doc/help/elektra-namespaces.md)
`spec` (next to user and system).
Keys in `spec` allow us to specify which keys the application reads,
|
hoon: use +dvr instead of +div/+mod in |co helpers | ^- tape
?: &(=(0 hol) =(0 min))
rep
- =+ [rad=(mod hol bas) dar=(div hol bas)]
+ =/ [dar=@ rad=@] (dvr hol bas)
%= $
min ?:(=(0 min) 0 (dec min))
hol dar
^- {tape @}
?: &(=(0 hol) =(0 min))
[rep cou]
- =+ [rad=(mod hol bas) dar=(div hol bas)]
+ =/ [dar=@ rad=@] (dvr hol bas)
%= $
min ?:(=(0 min) 0 (dec min))
hol dar
|
mm/mempool/mempool_multiple.c: Remove void * arithmetic
Cast substraction arguments to FAR char *, which gives the same result as the
gcc extension on the original void * arithmetic. | @@ -220,7 +220,7 @@ mempool_multiple_get_dict(FAR struct mempool_multiple_s *mpool,
col = index - (row << mpool->dict_col_num_log2);
if (mpool->dict[row] == NULL ||
mpool->dict[row][col].addr != addr ||
- blk - addr >= mpool->dict[row][col].size)
+ (FAR char *)blk - (FAR char *)addr >= mpool->dict[row][col].size)
{
return NULL;
}
|
document clang issues | @@ -49,6 +49,9 @@ Updates and further information can be found here:
GCC compiler, the FFTW library, and optionally CUDA.
(see recon/Makefile to turn options on or off)
+It should be possible to use the clang compiler, but older
+version before version 4.0 are known to cause problems.
+
The software can be used in combination with Matlab or octave.
|
doc: release notes
see | @@ -93,6 +93,7 @@ We added:
- the private headerfiles `kdbnotificationinternal.h`, `kdbioplugin.h`.
- `kdb get`, `kdb mv` and `kdb cp` use error code `11` if keys are not found
+- the constant `ENABLE_ASAN` in the constants plugin
We removed:
|
Try to find related nodes for sensors on-the-fly | @@ -2710,6 +2710,7 @@ void DeRestPluginPrivate::checkUpdatedFingerPrint(const deCONZ::Node *node, quin
fp.endpoint = sd.endpoint();
fp.profileId = sd.profileId();
+ updateSensorEtag(&*i);
i->setUniqueId(generateUniqueId(i->address().ext(), fp.endpoint, clusterId));
i->setNeedSaveDatabase(true);
queSaveDb(DB_SENSORS, DB_SHORT_SAVE_DELAY);
@@ -8471,6 +8472,16 @@ void DeRestPlugin::idleTimerFired()
Sensor *sensorNode = &d->sensors[d->sensorIter];
d->sensorIter++;
+ if (!sensorNode->node())
+ {
+ deCONZ::Node *node = d->getNodeForAddress(sensorNode->address().ext());
+ if (node)
+ {
+ sensorNode->setNode(node);
+ sensorNode->fingerPrint().checkCounter = SENSOR_CHECK_COUNTER_INIT; // force check
+ }
+ }
+
if (sensorNode->node())
{
sensorNode->fingerPrint().checkCounter++;
@@ -8481,6 +8492,7 @@ void DeRestPlugin::idleTimerFired()
{
d->checkUpdatedFingerPrint(sensorNode->node(), ep, sensorNode);
}
+ d->checkSensorNodeReachable(sensorNode);
}
}
|
fix call to do_isfile | @@ -66,7 +66,7 @@ int os_touchfile(lua_State* L)
const char* dst = luaL_checkstring(L, 1);
// if destination exist, mark the file as modified
- if (do_isfile(dst))
+ if (do_isfile(L, dst))
{
if (truncate_file(dst))
{
|
No jira ticket: fixed compilation error if the only timer
allowed by syscfg was the RTC (TIMER_3). | @@ -365,7 +365,7 @@ hal_timer_chk_queue(struct nrf51_hal_timer *bsptimer)
* This is the global timer interrupt routine.
*
*/
-#if NRF51_TIMER_DEFINED
+#if (MYNEWT_VAL(TIMER_0) || MYNEWT_VAL(TIMER_1) || MYNEWT_VAL(TIMER_2))
static void
hal_timer_irq_handler(struct nrf51_hal_timer *bsptimer)
{
|
serial-libs/R: pcre -> pcre2 | @@ -41,7 +41,7 @@ BuildRequires: xdg-utils
BuildRequires: pango-devel
BuildRequires: tcl-devel
BuildRequires: xz-devel
-BuildRequires: pcre-devel
+BuildRequires: pcre2-devel
BuildRequires: libcurl-devel
BuildRequires: tk-devel
%if 0%{?rhel}
|
Swap in `scope.yml` file's updated contents | @@ -69,16 +69,16 @@ event:
# Creates events from data written to files.
# Designed for monitoring log files, but capable of capturing
# any file writes.
-# - type: file
-# name: .*log.* # whitelist ex regex describing log file names
-# value: .* # whitelist ex regex describing field values
+ - type: file
+ name: .*log.* # whitelist ex regex describing log file names
+ value: .* # whitelist ex regex describing field values
# Creates events from data written to stdout, stderr, or both.
# May be most useful for capturing debug output from processes
# running in containerized environments.
-# - type: console
-# name: stdout # (stdout|stderr)
-# value: .*
+ - type: console
+ name: stdout # (stdout|stderr)
+ value: .*
# Creates events from libscope's metric (statsd) data. May be
# most useful for data which could overwhelm metric aggregation
@@ -90,28 +90,29 @@ event:
# value: .*
# Enable extraction of HTTP headers
-# - type: http
-# name: .* # (http-resp)|(http-metrics)
-# field: .* # whitelist regex describing field names
-# value: .*
+ - type: http
+ name: .* # (http-resp)|(http-metrics)
+ field: .* # whitelist regex describing field names
+ value: .*
+ headers: # (?i)X-myHeader.*
# Creates events describing network connectivity
-# - type: net
-# name: .* #
-# field: .* # whitelist regex describing field names
-# value: .*
+ - type: net
+ name: .* #
+ field: .* # whitelist regex describing field names
+ value: .*
# Creates events describing file connectivity
-# - type: fs
-# name: .* #
-# field: .* # whitelist regex describing field names
-# value: .*
+ - type: fs
+ name: .* #
+ field: .* # whitelist regex describing field names
+ value: .*
# Creates events describing dns activity
-# - type: dns
-# name: .* #
-# field: .* # whitelist regex describing field names
-# value: .*
+ - type: dns
+ name: .* #
+ field: .* # whitelist regex describing field names
+ value: .*
payload:
enable: false # true, false
@@ -123,7 +124,7 @@ libscope:
commanddir : '/tmp'
# commanddir supports changes to configuration settings of running
# processes. At every summary period the library looks in commanddir
- # to see if a file named scope.<pid> exists. (where pid is the process ID
+ # to see if a file named scope.<pid> exists. (Where pid is the process ID
# of the process running with scope.) If it exists, it processes every
# line for environment variable-style commands:
# SCOPE_METRIC_VERBOSITY=9
@@ -131,7 +132,7 @@ libscope:
# scope.<pid> file when it's complete.
log:
- level: error # debug, info, warning, error, none
+ level: warning # debug, info, warning, error, none
transport:
type: file
path: '/tmp/scope.log'
|
explicit define dlamch | @@ -151,7 +151,7 @@ module load openblas
%endif
-FLAGS="%optflags -fPIC -Dhypre_dgesvd=dgesvd_"
+FLAGS="%optflags -fPIC -Dhypre_dgesvd=dgesvd_ -Dhypre_dlamch=dlamch_ "
cd src
./configure \
--prefix=%{install_path} \
|
groups: fix metadata editing for groups | @@ -20,6 +20,9 @@ import { StatelessAsyncButton } from "~/views/components/StatelessAsyncButton";
import { ColorInput } from "~/views/components/ColorInput";
import { useHistory } from "react-router-dom";
+import { uxToHex } from '~/logic/lib/util';
+
+
interface FormSchema {
title: string;
description: string;
@@ -57,10 +60,11 @@ export function GroupSettings(props: GroupSettingsProps) {
) => {
try {
const { title, description, color, isPrivate } = values;
+ const uxColor = uxToHex(color);
await props.api.metadata.update(props.association, {
title,
description,
- color,
+ color: uxColor
});
if (isPrivate !== currentPrivate) {
const resource = resourceFromPath(props.association["group-path"]);
|
framework/st_things: Fix null pointer dereference issue
Pass a null pointer to strlen() would cause a crash. | @@ -910,7 +910,6 @@ int parse_things_cloud_json(const char *filename)
json_str = get_json_string_from_file(filename);
#endif
-
if (json_str == NULL) {
THINGS_LOG_V(TAG, "cloud file initialization.");
#ifdef CONFIG_ST_THINGS_SECURESTORAGE
@@ -924,8 +923,7 @@ int parse_things_cloud_json(const char *filename)
return 0;
}
#endif
- }
-
+ } else {
if (strlen(json_str) > 0) {
cJSON *root = cJSON_Parse((const char *)json_str);
if (root != NULL) {
@@ -943,6 +941,8 @@ int parse_things_cloud_json(const char *filename)
}
}
things_free(json_str);
+ }
+
THINGS_LOG_D(TAG, THINGS_FUNC_EXIT);
return ret;
}
|
not easily supported on VMS command line yet
Some tests are designed to test on the command line.
We simply disable those on VMS. | @@ -40,7 +40,7 @@ if (eval { require Win32::API; 1; }) {
}
} elsif ($^O eq "MSWin32") {
plan skip_all => "Win32::API unavailable";
-} else {
+} elsif ($^O ne "VMS") {
# Running MinGW tests transparently under Wine apparently requires
# UTF-8 locale...
@@ -63,11 +63,16 @@ ok(run(test(["pkcs12_format_test", "-legacy"])), "test pkcs12 formats using lega
# Test with a non-default library context (and no loaded providers in the default context)
ok(run(test(["pkcs12_format_test", "-context"])), "test pkcs12 formats using a non-default library context");
+SKIP: {
+ skip "VMS doesn't have command line UTF-8 support yet in DCL", 1
+ if $^O eq "VMS";
+
# just see that we can read shibboleth.pfx protected with $pass
ok(run(app(["openssl", "pkcs12", "-noout",
"-password", "pass:$pass",
"-in", srctop_file("test", "shibboleth.pfx")])),
"test_load_cert_pkcs12");
+}
my @path = qw(test certs);
my $outfile1 = "out1.p12";
|
MinGW 64: Add required plugins to configure script | @@ -18,7 +18,7 @@ cmake -DENABLE_TESTING=OFF \
-DBUILD_STATIC=ON \
-DBUILD_FULL=ON \
-DBUILD_SHARED=OFF \
- -DPLUGINS="wresolver;dump" \
+ -DPLUGINS="wresolver;dump;list;spec" \
-DKDB_DEFAULT_RESOLVER=wresolver \
-DKDB_DEFAULT_STORAGE=dump \
-DKDB_DB_SYSTEM=kdb -DKDB_DB_SPEC=spec \
|
Test ASCII-like unknown encoding correctly rejects invalid characters | @@ -5511,6 +5511,18 @@ START_TEST(test_unknown_ascii_encoding_ok)
}
END_TEST
+START_TEST(test_unknown_ascii_encoding_fail)
+{
+ const char *text =
+ "<?xml version='1.0' encoding='experimental'?>\n"
+ "<doc>Hello, \x80 world</doc>";
+
+ XML_SetUnknownEncodingHandler(parser, AsciiAsUnknownEncodingHandler, NULL);
+ expect_failure(text, XML_ERROR_INVALID_TOKEN,
+ "Invalid character not faulted");
+}
+END_TEST
+
/*
* Namespaces tests.
@@ -10777,6 +10789,7 @@ make_suite(void)
tcase_add_test(tc_basic, test_unknown_encoding_long_name_2);
tcase_add_test(tc_basic, test_invalid_unknown_encoding);
tcase_add_test(tc_basic, test_unknown_ascii_encoding_ok);
+ tcase_add_test(tc_basic, test_unknown_ascii_encoding_fail);
suite_add_tcase(s, tc_namespace);
tcase_add_checked_fixture(tc_namespace,
|
explain how to read the certificate. | @@ -1473,6 +1473,10 @@ To use a nondefault port for DNS communication append '@' with the port number.
If tls is enabled, then you can append a '#' and a name, then it'll check
the tls authentication certificates with that name. If you combine
the '@' and '#', the '@' comes first.
+.IP
+At high verbosity it logs the TLS certificate, with TLS enabled.
+If you leave out the '#' and auth name from the forward\-addr, any
+name is accepted. The cert must also match a CA from the tls\-cert\-bundle.
.TP
.B forward\-first: \fI<yes or no>
If enabled, a query is attempted without the forward clause if it fails.
|
Updating test_decomps: pass correct arguments to PIOc_readmap() and fix memory leaks. | @@ -54,9 +54,9 @@ int main(int argc, char **argv)
int bad_slice_dimlen[2]; /* Invalid values. */
int ioid; /* The decomposition ID. */
int ndims;
- int gdims[NDIM];
+ int *gdims = NULL;
PIO_Offset fmaplen;
- PIO_Offset map[16];
+ PIO_Offset *map = NULL;
int ret; /* Return code. */
/* Initialize test. */
@@ -125,23 +125,23 @@ int main(int argc, char **argv)
return ret;
/* These should not work. */
- if (PIOc_readmap(NULL, &ndims, (int **)&gdims, &fmaplen, (PIO_Offset **)&map,
+ if (PIOc_readmap(NULL, &ndims, &gdims, &fmaplen, &map,
test_comm) != PIO_EINVAL)
return ERR_WRONG;
- if (PIOc_readmap(DECOMP_FILE, NULL, (int **)&gdims, &fmaplen, (PIO_Offset **)&map,
+ if (PIOc_readmap(DECOMP_FILE, NULL, &gdims, &fmaplen, &map,
test_comm) != PIO_EINVAL)
return ERR_WRONG;
- if (PIOc_readmap(DECOMP_FILE, &ndims, NULL, &fmaplen, (PIO_Offset **)&map,
+ if (PIOc_readmap(DECOMP_FILE, &ndims, NULL, &fmaplen, &map,
test_comm) != PIO_EINVAL)
return ERR_WRONG;
- if (PIOc_readmap(DECOMP_FILE, &ndims, (int **)&gdims, NULL, (PIO_Offset **)&map,
+ if (PIOc_readmap(DECOMP_FILE, &ndims, &gdims, NULL, &map,
test_comm) != PIO_EINVAL)
return ERR_WRONG;
- if (PIOc_readmap(DECOMP_FILE, &ndims, (int **)&gdims, &fmaplen, NULL, test_comm) != PIO_EINVAL)
+ if (PIOc_readmap(DECOMP_FILE, &ndims, &gdims, &fmaplen, NULL, test_comm) != PIO_EINVAL)
return ERR_WRONG;
/* Read the decomp file and check results. */
- if ((ret = PIOc_readmap(DECOMP_FILE, &ndims, (int **)&gdims, &fmaplen, (PIO_Offset **)&map,
+ if ((ret = PIOc_readmap(DECOMP_FILE, &ndims, &gdims, &fmaplen, &map,
test_comm)))
return ret;
printf("ndims = %d fmaplen = %lld\n", ndims, fmaplen);
@@ -156,6 +156,10 @@ int main(int argc, char **argv)
printf("map[%d] = %lld\n", m, map[m]);
}
+ /* Free memory allocated inside PIOc_readmap() */
+ free(gdims);
+ free(map);
+
/* Free the PIO decomposition. */
printf("%d Freeing PIO decomposition...\n", my_rank);
if ((ret = PIOc_freedecomp(iosysid, ioid)))
|
opae.admin: update spi path for dfl drivers
The glob to find the location of the spi path in sysfs has changed
to dfl-fme.*.*/subdev_spi_altera.*.auto/... Update opae.admin to use
the new glob pattern for dfl drivers. | @@ -207,7 +207,9 @@ class fme(region):
@property
def spi_bus(self):
if os.path.basename(self.sysfs_path).startswith('dfl'):
- return self.find_one('dfl-fme.*.*/spi*/spi_master/spi*/spi*')
+ return self.find_one('dfl-fme.*.*/'
+ '*spi*/'
+ 'spi_master/spi*/spi*')
return self.find_one('spi*/spi_master/spi*/spi*')
@property
|
CMSIS-NN: Modified cmake for Reshape folders to find include files. | @@ -10,3 +10,8 @@ add_library(CMSISNNReshape STATIC ${SRC})
configLib(CMSISNNReshape ${ROOT})
configDsp(CMSISNNReshape ${ROOT})
+
+### Includes
+target_include_directories(CMSISNNReshape PUBLIC "${NN}/Include")
+target_include_directories(CMSISNNReshape PUBLIC "${ROOT}/CMSIS/DSP/Include")
+
|
Changelog for 4.4.3RC2 | +CARTO Mobile SDK 4.4.3RC2
+-------------------
+
+### Changes, fixes:
+
+* Fixed 'TileLayer' not properly recalculating tiles when visibility changes, causing layer to remain hidden.
+* Fixed deadlock in 'ClusteredVectorLayer' when its data source is non-empty with all elements being hidden
+* Fixed stale tiles remaining in caches when offline packages were removed
+* Fixed subtle synchronization issues in 'PackageManager'
+* Added support for parallel requests to 'ValhallaOfflineRoutingService'
+* Added javadoc to published Android artifacts to Maven central
+* Minor fixes to iOS build script
+* Updated internal libjpeg-turbo, harfbuzz libraries to latest stable versions
+
+
CARTO Mobile SDK 4.4.3RC1
-------------------
|
Update scrollbar position processing. | @@ -216,7 +216,6 @@ static void StartInertialScrolling( LCUI_Widget w )
static void Slider_OnMouseMove( LCUI_Widget slider,
LCUI_WidgetEvent e, void *arg )
{
- LCUI_Pos pos;
LCUI_Widget layer;
LCUI_Widget w = e->data;
LCUI_ScrollBar scrollbar;
@@ -227,11 +226,10 @@ static void Slider_OnMouseMove( LCUI_Widget slider,
return;
}
layer = scrollbar->layer;
- LCUICursor_GetPos( &pos );
if( scrollbar->direction == SBD_HORIZONTAL ) {
y = 0;
x = scrollbar->slider_x;
- x += pos.x - scrollbar->mouse_x;
+ x += e->motion.x - scrollbar->mouse_x;
if( scrollbar->box ) {
box_size = scrollbar->box->box.content.width;
} else {
@@ -256,7 +254,7 @@ static void Slider_OnMouseMove( LCUI_Widget slider,
} else {
x = 0;
y = scrollbar->slider_y;
- y += pos.y - scrollbar->mouse_y;
+ y += e->motion.y - scrollbar->mouse_y;
if( scrollbar->box ) {
box_size = scrollbar->box->box.content.height;
} else {
|
fix reporting of fd | @@ -20,6 +20,7 @@ int osGetNumThreads(pid_t pid)
}
if (read(fd, buf, sizeof(buf)) == -1) {
+ close(fd);
return -1;
}
@@ -27,7 +28,7 @@ int osGetNumThreads(pid_t pid)
for (i = 1; i < 20; i++) {
entry = strtok(NULL, delim);
}
-
+ close(fd);
return atoi((const char *)entry);
}
@@ -50,7 +51,7 @@ int osGetNumFds(pid_t pid)
}
closedir(dirp);
- return nfile;
+ return nfile - 1; // we opened one fd to read /fd :)
}
int osGetNumChildProcs(pid_t pid)
|
[awm] Closing a window informs the dock | @@ -54,9 +54,14 @@ static void _awm_animation_close_window_step(awm_animation_close_window_t* anim,
}
static void _awm_animation_close_window_finish(awm_animation_close_window_t* anim) {
- windows_invalidate_drawable_regions_in_rect(anim->destination_frame);
- compositor_queue_rect_to_redraw(anim->destination_frame);
+ //compositor_queue_rect_to_redraw(anim->destination_frame);
+ //windows_invalidate_drawable_regions_in_rect(anim->destination_frame);
user_window_t* window = anim->window;
+ awm_dock_window_closed_event_t msg = {
+ .event = AWM_DOCK_WINDOW_CLOSED,
+ .window_id = window->window_id
+ };
+ amc_message_send(AWM_DOCK_SERVICE_NAME, &msg, sizeof(msg));
window_destroy(window);
}
|
Add a simple test for the z modifier | #include <stdio.h>
#include <string.h>
#include <openssl/bio.h>
+#include "internal/numbers.h"
+
+#define nelem(x) (sizeof(x)/sizeof((x)[0]))
static int justprint = 0;
@@ -138,10 +141,42 @@ static void dofptest(int test, double val, char *width, int prec, int *fail)
}
}
+struct z_data_st {
+ size_t value;
+ const char *format;
+ const char *expected;
+};
+static struct z_data_st zu_data[] = {
+ { SIZE_MAX, "%zu", (sizeof(size_t) == 4 ? "4294967295"
+ : sizeof(size_t) == 8 ? "18446744073709551615"
+ : "") },
+ /*
+ * in 2-complement, the unsigned number divided by two plus one becomes the
+ * smallest possible negative signed number of the corresponding type
+ */
+ { SIZE_MAX / 2 + 1, "%zi", (sizeof(size_t) == 4 ? "-2147483648"
+ : sizeof(size_t) == 8 ? "-9223372036854775808"
+ : "") },
+ { 0, "%zu", "0" },
+ { 0, "%zi", "0" },
+};
+
+static void dozutest(int test, const struct z_data_st *data, int *fail)
+{
+ char bio_buf[80];
+
+ BIO_snprintf(bio_buf, sizeof(bio_buf) - 1, data->format, data->value);
+ if (strcmp(bio_buf, data->expected) != 0) {
+ printf("Test %d failed. Expected \"%s\". Got \"%s\". "
+ "Format \"%s\"\n", test, data->expected, bio_buf, data->format);
+ *fail = 1;
+ }
+}
+
int main(int argc, char **argv)
{
int test = 0;
- int i;
+ size_t i;
int fail = 0;
int prec = -1;
char *width = "";
@@ -207,6 +242,16 @@ int main(int argc, char **argv)
fail = 1;
}
+ for (i = 0; i < nelem(zu_data); i++) {
+ dozutest(test++, &zu_data[i], &fail);
+ }
+
+#if 0
+ for (i = 0; i < nelem(zi_data); i++) {
+ dozitest(test++, &zu_data[i], &fail);
+ }
+#endif
+
#ifndef OPENSSL_NO_CRYPTO_MDEBUG
if (CRYPTO_mem_leaks_fp(stderr) <= 0)
return 1;
|
Minor consistency update | @@ -126,9 +126,9 @@ void CPAbtHandler(uint32_t IFSR, uint32_t IFAR, uint32_t LR) {
//this will be 2 when we have performed some maintenance and want to retry the instruction in Thumb (state == 2)
//this will be 4 when we have performed some maintenance and want to retry the instruction in ARM (state == 4)
uint32_t CUndefHandler(uint32_t opcode, uint32_t state, uint32_t LR) {
- (void)LR;
const uint32_t THUMB = 2U;
const uint32_t ARM = 4U;
+ (void)LR;
//Lazy VFP/NEON initialisation and switching
// (ARM ARM section A7.5) VFP data processing instruction?
|
Include extra PhDeleteDirectory checks | @@ -8566,7 +8566,7 @@ NTSTATUS PhDeleteDirectory(
status = PhCreateFileWin32(
&directoryHandle,
PhGetString(DirectoryPath),
- FILE_GENERIC_READ | DELETE,
+ FILE_LIST_DIRECTORY | DELETE | SYNCHRONIZE,
FILE_ATTRIBUTE_DIRECTORY,
FILE_SHARE_READ | FILE_SHARE_DELETE,
FILE_OPEN,
@@ -8575,7 +8575,7 @@ NTSTATUS PhDeleteDirectory(
if (NT_SUCCESS(status))
{
- // Remove any files or folders inside the directory.
+ // Remove any files or folders inside the directory. (dmex)
status = PhEnumDirectoryFile(
directoryHandle,
NULL,
@@ -8583,8 +8583,11 @@ NTSTATUS PhDeleteDirectory(
DirectoryPath
);
- // Remove the directory.
- PhDeleteFile(directoryHandle);
+ if (NT_SUCCESS(status))
+ {
+ // Remove the directory. (dmex)
+ status = PhDeleteFile(directoryHandle);
+ }
NtClose(directoryHandle);
}
|
improved prev/next playermodeln() logic | @@ -5505,7 +5505,7 @@ s_model *nextplayermodeln(s_model *current, int p)
{
++used_player_count;
// all busy players? return the next natural
- if (used_player_count >= player_count) return nextplayermodel(current);
+ if (used_player_count >= player_count) return model;
}
}
@@ -5588,7 +5588,7 @@ s_model *prevplayermodeln(s_model *current, int p)
{
++used_player_count;
// all busy players? return the prev natural
- if (used_player_count >= player_count) return prevplayermodel(current);
+ if (used_player_count >= player_count) return model;
}
}
|
perf-tools/scorep: new url for source download | @@ -23,7 +23,7 @@ Release: 1%{?dist}
License: BSD
Group: %{PROJ_NAME}/perf-tools
URL: http://www.vi-hps.org/projects/score-p/
-Source0: http://www.vi-hps.org/upload/packages/scorep/scorep-%{version}.tar.gz
+Source0: http://www.vi-hps.org/cms/upload/packages/scorep/scorep-%{version}.tar.gz
%if 0%{?sles_version} || 0%{?suse_version}
BuildRequires: fdupes
|
apps/examples/userfs: Fix check for open with write+append mode. | @@ -232,7 +232,7 @@ static int ufstest_open(FAR void *volinfo, FAR const char *relpath,
return -ENOMEM;
}
- if ((oflags && O_APPEND) != 0)
+ if ((oflags & (O_WROK | O_APPEND)) == (O_WROK | O_APPEND))
{
opriv->pos = file->inuse;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.