message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
drop NEHALEM from the DYNLIST for Windows/mingw to save time | @@ -81,7 +81,7 @@ jobs:
vmImage: 'windows-latest'
steps:
- script: |
- mingw32-make CC=gcc FC=gfortran DYNAMIC_ARCH=1 DYNAMIC_LIST="NEHALEM SANDYBRIDGE HASWELL"
+ mingw32-make CC=gcc FC=gfortran DYNAMIC_ARCH=1 DYNAMIC_LIST="SANDYBRIDGE HASWELL"
- job: Windows_clang_cmake
pool:
|
viofs-svc: SetBasicInfo now support FileAttributes modifications.
This is a test solution which allow setting a read-only flag
(FILE_ATTRIBUTE_READONLY) to a file or directory. | @@ -1145,8 +1145,6 @@ static NTSTATUS SetBasicInfo(FSP_FILE_SYSTEM *FileSystem, PVOID FileContext0,
FUSE_SETATTR_IN setattr_in;
FUSE_SETATTR_OUT setattr_out;
- UNREFERENCED_PARAMETER(FileAttributes);
-
DBG("fh: %Iu nodeid: %Iu", FileContext->FileHandle, FileContext->NodeId);
FUSE_HEADER_INIT(&setattr_in.hdr, FUSE_SETATTR, FileContext->NodeId,
@@ -1154,12 +1152,29 @@ static NTSTATUS SetBasicInfo(FSP_FILE_SYSTEM *FileSystem, PVOID FileContext0,
ZeroMemory(&setattr_in.setattr, sizeof(setattr_in.setattr));
- if (FileContext->FileHandle != INVALID_FILE_HANDLE)
+ if ((FileContext->IsDirectory == FALSE) &&
+ (FileContext->FileHandle != INVALID_FILE_HANDLE))
{
setattr_in.setattr.valid |= FATTR_FH;
setattr_in.setattr.fh = FileContext->FileHandle;
}
+ if (FileAttributes != INVALID_FILE_ATTRIBUTES)
+ {
+ setattr_in.setattr.valid |= FATTR_MODE;
+ setattr_in.setattr.mode = 0664 /* -rw-rw-r-- */;
+
+ if (!!(FileAttributes & FILE_ATTRIBUTE_READONLY) == TRUE)
+ {
+ setattr_in.setattr.mode &= ~0222;
+ }
+
+ if (!!(FileAttributes & FILE_ATTRIBUTE_DIRECTORY) == TRUE)
+ {
+ setattr_in.setattr.mode |= 040111;
+ }
+ }
+
if (LastAccessTime != 0)
{
setattr_in.setattr.valid |= FATTR_ATIME;
|
Small renaming in docs | @@ -20,7 +20,7 @@ from the DUT.
Requests for a harness clock is done by the ``HarnessClockInstantiator`` class in ``generators/chipyard/src/main/scala/TestHarness.scala``.
This class is accessed in harness components by referencing the Rocket Chip parameters key ``p(HarnessClockInstantiatorKey)``.
-Then you can request a clock and syncronized reset at a particular frequency by invoking the ``getClockBundle`` function.
+Then you can request a clock and syncronized reset at a particular frequency by invoking the ``requestClockBundle`` function.
Take the following example:
.. literalinclude:: ../../generators/chipyard/src/main/scala/HarnessBinders.scala
|
Updates TSG.cmd for SMB over QUIC
Add details about collecting SMB over QUIC traces. | @@ -19,6 +19,7 @@ This document is meant to be a step-by-step guide for trouble shooting any issue
3. [No application (stream) data seems to be flowing.](#why-isnt-application-data-flowing)
4. [Why is this API failing?](#why-is-this-api-failing)
5. [An MsQuic API is hanging.](#why-is-the-api-hanging-or-deadlocking)
+6. [I am having problems with SMB over QUIC.](#trouble-shooting-smb-over-quic-issues)
## Understanding Error Codes
@@ -183,6 +184,26 @@ As you can see, the last event/log on the MsQuic worker thread was an indication
The solution here is that the app **must not** hold the lock when it calls into the blocking API, if that lock may also be acquired on the MsQuic thread.
+## Trouble Shooting SMB over QUIC issues
+
+To troubleshoot any SMB over QUIC issues on windows platforms, the best way is to collect SMB and QUIC traces and sharing it with SMB developers. Following are the steps:
+
+```
+Copy msquic/scripts/t.cmd to a local folder.
+
+For SMB Client (a.k.a. RDR) WPP traces
+t.cmd clion
+// repro and get the relevant error.
+t.cmd off
+
+For SMB Server WPP traces
+t.cmd srvon
+// repro and get the relevant error.
+t.cmd off
+
+Share the generated cab file with SMB developers.
+```
+
# Trouble Shooting a Performance Issue
1. [Is it a problem with just a single (or very few) connection?](#why-in-performance-bad-for-my-connection)
@@ -321,3 +342,4 @@ IndirectionTable: [Group:Number] : 0:0 0:2 0:4 0:6
```
The output above indicates RSS is configured with 8 queues, so there should be spreading of the incoming flows to 8 different CPUs (and then passed to 8 different workers) instead of just the 1 that we are seeing. So, finally, in cases where everything seems to be configured correctly, but things **still** aren't working, that usually indicates a problem with the network card driver. Make sure the driver is up to date with the latest version available. If that still doesn't fix the problem, you will likely need to contact support from the network card vendor.
+
|
Make sure that keyboard side-effects trigger. | @@ -809,7 +809,8 @@ TCOD_event_t TCOD_sys_handle_mouse_event(
*/
TCOD_event_t TCOD_sys_handle_key_event(const SDL_Event* ev, TCOD_key_t* key)
{
- if (!ev || !key) { return TCOD_EVENT_NONE; }
+ if (!ev) { return TCOD_EVENT_NONE; }
+ if (!key) { key = &TCOD_ctx.key_state; }
switch(ev->type) {
case SDL_KEYDOWN:
*key = TCOD_sys_SDLtoTCOD(ev, TCOD_KEY_PRESSED);
@@ -884,7 +885,6 @@ static TCOD_event_t TCOD_sys_handle_event(
static TCOD_event_t TCOD_sys_check_for_event_(
SDL_Event *ev, int eventMask, TCOD_key_t *key, TCOD_mouse_t *mouse) {
TCOD_event_t retMask = TCOD_EVENT_NONE;
- if (eventMask == 0) { return TCOD_EVENT_NONE; }
tcod_mouse.lbutton_pressed = 0;
tcod_mouse.rbutton_pressed = 0;
tcod_mouse.mbutton_pressed = 0;
@@ -929,7 +929,7 @@ TCOD_event_t TCOD_sys_wait_for_event(int eventMask, TCOD_key_t *key,
TCOD_event_t retMask = TCOD_EVENT_NONE;
if (eventMask == 0) { return TCOD_EVENT_NONE; }
if (flush) {
- while (SDL_PollEvent(&ev)) {
+ while (SDL_PollEvent(NULL)) {
TCOD_sys_check_for_event_(&ev, 0, NULL, NULL);
}
}
|
add warning for dirty git versions | /* Copyright 2015. The Regents of the University of California.
- * Copyright 2015-2018. Martin Uecker.
+ * Copyright 2015-2021. Martin Uecker.
+ Copyright 2018. Damien Nguyen.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
- * 2014-2016 Martin Uecker <[email protected]>
+ * 2014-2021 Martin Uecker <[email protected]>
*/
#include <stdlib.h>
#include "misc/io.h"
#include "misc/misc.h"
+#include "misc/version.h"
#include "misc/debug.h"
#include "misc/cppmap.h"
@@ -135,11 +136,18 @@ int main_bart(int argc, char* argv[argc])
return main_bart(argc - 1, argv + 1);
}
+ unsigned int v[5];
+ version_parse(v, bart_version);
+
+ if (0 != v[4])
+ debug_printf(DP_WARN, "BART version is not reproducible.\n");
+
for (int i = 0; NULL != dispatch_table[i].name; i++)
if (0 == strcmp(bn, dispatch_table[i].name))
return dispatch_table[i].main_fun(argc, argv);
fprintf(stderr, "Unknown bart command: \"%s\".\n", bn);
+
return -1;
}
|
soto: tombstone | +:: soto [tombstone]: former dojo relay for urbit's landscape interface
::
-:: soto [landscape]: A Dojo relay for Urbit's Landscape interface
-::
-:: Relays sole-effects to subscribers and forwards sole-action pokes
-::
-/- sole
-/+ *soto, default-agent
-|%
-+$ card card:agent:gall
-::
-+$ versioned-state
- $@ state-null
- state-zero
-::
-+$ state-null ~
-::
-+$ state-zero [%0 ~]
---
-=| state-zero
-=* state -
-^- agent:gall
-|_ bol=bowl:gall
-+* this .
- soto-core +>
- sc ~(. soto-core bol)
- def ~(. (default-agent this %|) bol)
-::
-++ on-init
- :_ this
- :_ ~
- :* %pass /srv %agent [our.bol %file-server]
- %poke %file-server-action
- !>([%serve-dir /'~dojo' /app/landscape %.n %.y])
- ==
-++ on-save !>(state)
-::
-++ on-load
- |= old-vase=vase
- =/ old
- !<(versioned-state old-vase)
- ?^ old
- [~ this(state old)]
- :_ this(state [%0 ~])
- :~ [%pass /bind/soto %arvo %e %disconnect [~ /'~dojo']]
- :* %pass /srv %agent [our.bol %file-server]
- %poke %file-server-action
- !>([%serve-dir /'~dojo' /app/landscape %.n %.y])
- ==
- ==
-::
-++ on-poke on-poke:def
-++ on-watch
- |= pax=path
- ^- (quip card _this)
- ?+ pax (on-watch:def pax)
- [%sototile ~]
- :_ this
- [%give %fact ~ %json !>(~)]~
- ==
-::
-++ on-agent on-agent:def
-::
-++ on-arvo
- |= [wir=wire sin=sign-arvo]
- ^- (quip card _this)
- ?: ?=(%bound +<.sin)
- [~ this]
- (on-arvo:def wir sin)
-::
-++ on-fail on-fail:def
-++ on-leave on-leave:def
-++ on-peek on-peek:def
-::
---
+/+ default-agent
+(default-agent *agent:gall %|)
|
doc: add enlish wikipedia to todo/documentation | @@ -31,3 +31,4 @@ Websites writing about Elektra or linking to Elektra
- https://nl.wikipedia.org/wiki/Configuratiebestand
- https://fr.wikipedia.org/wiki/Elektra_%28registre_linux%29
- https://de.wikipedia.org/wiki/Elektra_%28Software%29
+- https://en.wikipedia.org/wiki/Draft:Elektra_(Software)
\ No newline at end of file
|
fix when stdout is not a tty | #include "prefix.h"
#ifdef TB_CONFIG_OS_LINUX
#include <sys/ioctl.h>
-#include <termios.h>
-#include <unistd.h>
+#include <errno.h> // for errno
+#include <unistd.h> // for STDOUT_FILENO
#endif
/* //////////////////////////////////////////////////////////////////////////////////////
@@ -53,9 +53,14 @@ tb_int_t xm_os_getwinsize(lua_State* lua)
// get winsize
# ifdef TB_CONFIG_OS_LINUX
struct winsize size;
- ioctl(STDOUT_FILENO, TIOCGWINSZ, &size);
+ if(ioctl(STDOUT_FILENO, TIOCGWINSZ, &size)==0){
w=size.ws_col;
h=size.ws_row;
+ }else if(errno == ENOTTY)
+ w=h=-1; // set to INF if stdout is not a tty
+ //
+ // if stdout is a file there is no
+ // need to consider winsize limit
# endif
// done os.getwinsize()
|
Add serial to flywoof411rx | {
"name": "flywoof411rx",
"configurations": [
+ {
+ "name": "brushless.serial",
+ "defines": {
+ "BRUSHLESS_TARGET": "",
+ "RX_UNIFIED_SERIAL": ""
+ }
+ },
{
"name": "brushless.frsky",
"defines": {
|
VERSION bump to version 1.4.54 | @@ -37,7 +37,7 @@ endif()
# micro version is changed with a set of small changes or bugfixes anywhere in the project.
set(SYSREPO_MAJOR_VERSION 1)
set(SYSREPO_MINOR_VERSION 4)
-set(SYSREPO_MICRO_VERSION 53)
+set(SYSREPO_MICRO_VERSION 54)
set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION})
# Version of the library
|
eve: Swap volume up and down GPIO
The buton behavior is inverted if we follow the schematic, so swap the
GPIO on these inputs so they match the expected behavior.
BRANCH=none
TEST=manual test of side volume button behavior | @@ -18,8 +18,9 @@ GPIO_INT(PMIC_DPWROK, PIN(9, 7), GPIO_INT_BOTH, power_signal_interrupt)
GPIO_INT(POWER_BUTTON_L, PIN(0, 4), GPIO_INT_BOTH | GPIO_PULL_UP, power_button_interrupt)
GPIO_INT(LID_OPEN, PIN(6, 7), GPIO_INT_BOTH, lid_interrupt)
GPIO_INT(TABLET_MODE_L, PIN(3, 6), GPIO_INT_BOTH, tablet_mode_interrupt)
-GPIO_INT(VOLUME_DOWN_L, PIN(8, 3), GPIO_INT_BOTH | GPIO_PULL_UP, button_interrupt)
-GPIO_INT(VOLUME_UP_L, PIN(8, 2), GPIO_INT_BOTH | GPIO_PULL_UP, button_interrupt)
+/* Volume buttons are swapped in the schematic */
+GPIO_INT(VOLUME_DOWN_L, PIN(8, 2), GPIO_INT_BOTH | GPIO_PULL_UP, button_interrupt)
+GPIO_INT(VOLUME_UP_L, PIN(8, 3), GPIO_INT_BOTH | GPIO_PULL_UP, button_interrupt)
GPIO_INT(WP_L, PIN(4, 0), GPIO_INT_BOTH, switch_interrupt)
GPIO_INT(AC_PRESENT, PIN(C, 1), GPIO_INT_BOTH, extpower_interrupt)
GPIO_INT(ACCELGYRO3_INT_L, PIN(9, 3), GPIO_INT_FALLING, bmi160_interrupt)
|
Print function from which panic() is called. | @@ -96,6 +96,7 @@ terminate_non_graceful(void) {
#define panic(...) \
do { \
+ SCTP_PRINTF("%s(): ", __FUNCTION__);\
SCTP_PRINTF(__VA_ARGS__); \
SCTP_PRINTF("\n"); \
terminate_non_graceful(); \
|
remove ffi gem dependency | @@ -35,7 +35,6 @@ Gem::Specification.new do |s|
s.add_dependency('simctl', '1.6.8')
s.add_dependency('listen', '3.0.6')
s.add_dependency('rubyzip', '1.3.0')
- s.add_dependency('ffi', '1.12.2')
s.add_dependency('rdoc', '4.2.2')
s.add_dependency('deep_merge','1.1.1')
s.add_dependency('nokogiri', '1.10.10')
|
fix null pointer ref in logger for bin update cmd
reported by viennadd on github. (SourceBrella Inc) | @@ -2287,7 +2287,8 @@ static void process_bin_update(conn *c) {
}
/* FIXME: losing c->cmd since it's translated below. refactor? */
LOGGER_LOG(c->thread->l, LOG_MUTATIONS, LOGGER_ITEM_STORE,
- NULL, status, 0, key, nkey, it->exptime, ITEM_clsid(it));
+ NULL, status, 0, key, nkey, req->message.body.expiration,
+ ITEM_clsid(it));
/* Avoid stale data persisting in cache because we failed alloc.
* Unacceptable for SET. Anywhere else too? */
|
Add test for timestamp format
valid against following format YYYY-MM-DDThh:mm:ss.sTZD
(e.g.: "1997-07-16T19:20:30.452+01:00")
ref: | @@ -6,6 +6,7 @@ FAILED_TEST_LIST=""
FAILED_TEST_COUNT=0
EVT_FILE="/opt/test-runner/logs/events.log"
+SCOPE_LOG_FILE="/opt/test-runner/logs/scope.log"
starttest(){
CURRENT_TEST=$1
@@ -39,6 +40,7 @@ endtest(){
fi
rm -f $EVT_FILE
+ rm -f $SCOPE_LOG_FILE
}
#
@@ -104,6 +106,22 @@ fi
endtest
+#
+# verify timestamp
+#
+starttest timestamp_verify
+
+SCOPE_LOG_LEVEL=debug ldscope ls >/dev/null
+
+timestamp=$(grep 'Constructor' $SCOPE_LOG_FILE | grep -Po "(?<=\[).*?(?=\])")
+if [[ $timestamp =~ [0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{3}(Z|\+[0-9]{4})$ ]]; then
+ echo "Timeststamp $timestamp is in expect format"
+else
+ ERR+=1
+fi
+
+endtest
+
if (( $FAILED_TEST_COUNT == 0 )); then
echo ""
echo ""
|
hslua-marshalling: use unsafe next when peeking lists/key-val pairs | @@ -73,6 +73,7 @@ import qualified Data.ByteString.Lazy as BL
import qualified Data.Map as Map
import qualified Data.Set as Set
import qualified Data.Text as T
+import qualified HsLua.Core.Unsafe as Unsafe
import qualified HsLua.Core.Utf8 as Utf8
-- | Record to keep track of failure contexts while retrieving objects
@@ -344,10 +345,12 @@ peekKeyValuePairs keyPeeker valuePeeker =
-- key to be on the top of the stack and the table at the given
-- index @idx@. The next key, if it exists, is left at the top of
-- the stack.
-nextPair :: LuaError e
- => Peeker e a -> Peeker e b -> Peeker e (Maybe (a, b))
+--
+-- The key must be either nil or must exist in the table, or this
+-- function will crash with an unrecoverable error.
+nextPair :: Peeker e a -> Peeker e b -> Peeker e (Maybe (a, b))
nextPair keyPeeker valuePeeker idx = retrieving "key-value pair" $ do
- hasNext <- next idx
+ hasNext <- Unsafe.next idx
if not hasNext
then return $ Success Nothing
else do
|
Update README.md
Grammar error correction (line 29) | @@ -26,7 +26,7 @@ you have the latest version
- **[major change]** by starting v0.5.1, built-in alignment is removed from **vec3** and **mat3** types
#### Note for C++ developers:
-If you don't aware about original GLM library yet, you may also want to look at:
+If you are not aware of the original GLM library yet, you may also want to look at:
https://github.com/g-truc/glm
#### Note for new comers (Important):
|
mpich: disable pmix for default build | BuildRequires: slurm-devel%{PROJ_DELIM} slurm%{PROJ_DELIM}
%endif
-%{!?with_pmix: %define with_pmix 1}
+%{!?with_pmix: %define with_pmix 0}
%if 0%{with_pmix}
BuildRequires: pmix%{PROJ_DELIM}
BuildRequires: libevent-devel
@@ -61,6 +61,7 @@ Message Passing Interface (MPI) standard.
%ohpc_setup_compiler
%if 0%{with_pmix}
module load pmix
+export CPATH=${PMIX_INC}
%endif
./configure --prefix=%{install_path} \
@@ -68,7 +69,7 @@ module load pmix
--with-pm=no --with-pmi=slurm \
%endif
%if 0%{with_pmix}
- CFLAGS="-I${PMIX_INC}" LIBS="-L%{OHPC_ADMIN}/pmix/pmix/lib -lpmix" --with-pm=none --with-pmi=slurm \
+ LIBS="-L%{OHPC_ADMIN}/pmix/pmix/lib -lpmix" --with-pm=none --with-pmi=slurm \
%endif
|| { cat config.log && exit 1; }
|
test-suite: move test enabled summary for extrae to be in performance tools group | @@ -625,12 +625,6 @@ echo Dev Tools:
echo ' 'EasyBuild................. : disabled
fi
- if test "x$enable_extrae" = "xyes"; then
- echo ' 'Extrae.................... : enabled
- else
- echo ' 'Extrae.................... : disabled
- fi
-
if test "x$enable_hwloc" = "xyes"; then
echo ' 'hwloc..................... : enabled
else
@@ -815,6 +809,12 @@ echo Libraries:
echo Performance Tools:
+ if test "x$enable_extrae" = "xyes"; then
+ echo ' 'Extrae.................... : enabled
+ else
+ echo ' 'Extrae.................... : disabled
+ fi
+
if test "x$enable_imb" = "xyes"; then
echo ' 'IMB....................... : enabled
else
|
Add timestamps for idf_monitor.py
Closes | @@ -93,7 +93,8 @@ class Monitor(object):
decode_coredumps=COREDUMP_DECODE_INFO,
decode_panic=PANIC_DECODE_DISABLE,
target='esp32',
- websocket_client=None, enable_address_decoding=True):
+ websocket_client=None, enable_address_decoding=True,
+ timestamps=False):
# type: (serial.Serial, str, str, str, bool, str, str, str, str, str, WebSocketClient, bool) -> None
super(Monitor, self).__init__()
self.event_queue = queue.Queue() # type: queue.Queue
@@ -142,6 +143,7 @@ class Monitor(object):
self._panic_buffer = b''
self.gdb_exit = False
self.start_cmd_sent = False
+ self._timestamps = timestamps
def invoke_processing_last_line(self):
# type: () -> None
@@ -591,6 +593,13 @@ class Monitor(object):
if console_printer is None:
console_printer = self.console.write_bytes
if self._output_enabled:
+ if self._timestamps:
+ t = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S ")
+ if isinstance(string, type(u'')):
+ console_printer(t + string)
+ else:
+ console_printer(t.encode('ascii') + string)
+ else:
console_printer(string)
if self._log_file:
try:
@@ -729,6 +738,12 @@ def main(): # type: () -> None
help='WebSocket URL for communicating with IDE tools for debugging purposes'
)
+ parser.add_argument(
+ '--timestamps',
+ help='Add timestamp for each line',
+ default=False,
+ action='store_true')
+
args = parser.parse_args()
# GDB uses CreateFile to open COM port, which requires the COM name to be r'\\.\COMx' if the COM
@@ -771,7 +786,8 @@ def main(): # type: () -> None
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.encrypted,
args.toolchain_prefix, args.eol,
args.decode_coredumps, args.decode_panic, args.target,
- ws, enable_address_decoding=not args.disable_address_decoding)
+ ws, enable_address_decoding=not args.disable_address_decoding,
+ timestamps=args.timestamps)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
|
dev-tools/cmake: update to v3.13.4 | %define pname cmake
-%define major_version 3.12
-%define minor_version 3
+%define major_version 3.13
+%define minor_version 4
Summary: CMake is an open-source, cross-platform family of tools designed to build, test and package software.
Name: %{pname}%{PROJ_DELIM}
|
WIP Event autolabel custom event actor names | @@ -35,6 +35,14 @@ const Wrapper = styled.div`
max-width: 100%;
`;
+const customEventActorsLookup = keyBy(
+ Array.from(Array(10).keys()).map((i) => ({
+ id: String(i),
+ name: `Actor ${String.fromCharCode("A".charCodeAt(0) + i)}`,
+ })),
+ "id"
+);
+
const ScriptEventTitle = ({ command, args = {} }: ScriptEventTitleProps) => {
const isComment = command === EVENT_COMMENT;
const localisedCommand = l10n(command);
@@ -100,6 +108,15 @@ const ScriptEventTitle = ({ command, args = {} }: ScriptEventTitleProps) => {
return arg;
};
const actorNameForId = (value: unknown) => {
+ if (
+ editorType === "customEvent" &&
+ customEventActorsLookup[value as string]
+ ) {
+ return customEventActorsLookup[value as string].name.replace(
+ / /g,
+ ""
+ );
+ }
if (value === "$self$" && editorType === "actor") {
return l10n("FIELD_SELF");
} else if (value === "$self$" || value === "player") {
@@ -164,7 +181,7 @@ const ScriptEventTitle = ({ command, args = {} }: ScriptEventTitleProps) => {
return variableNameForId(value);
} else if (isPropertyField(command, key, arg)) {
const propertyParts = String(value).split(":");
- return `${actorNameForId(propertyParts[0])} ${propertyNameForId(
+ return `${actorNameForId(propertyParts[0])}.${propertyNameForId(
propertyParts[1]
)}`;
} else if (fieldType === "matharea") {
|
Remove macos update_dyld_shared_cache
This just doesn't work well for a non global install.
It is better packages that need this to run it themselves. | @@ -42,13 +42,13 @@ CFLAGS=-std=c99 -Wall -Wextra -Isrc/include -Isrc/conf -fpic -O2 -fvisibility=hi
LDFLAGS=-rdynamic
# For installation
-LDCONFIG:=ldconfig
+LDCONFIG:=ldconfig "$(LIBDIR)"
# Check OS
UNAME:=$(shell uname -s)
ifeq ($(UNAME), Darwin)
CLIBS:=$(CLIBS) -ldl
- LDCONFIG:=update_dyld_shared_cache
+ LDCONFIG:=
else ifeq ($(UNAME), Linux)
CLIBS:=$(CLIBS) -lrt -ldl
endif
@@ -311,7 +311,7 @@ install: $(JANET_TARGET) build/janet.pc
cp jpm.1 '$(MANPATH)'
mkdir -p '$(PKG_CONFIG_PATH)'
cp build/janet.pc '$(PKG_CONFIG_PATH)/janet.pc'
- -$(LDCONFIG) $(LIBDIR)
+ -$(LDCONFIG)
uninstall:
-rm '$(BINDIR)/janet'
|
libbpf-tools/opensnoop: disable open on aarch64
aarch64 has no open syscall, do not attempt to trace it.
Fixes | @@ -242,6 +242,16 @@ int main(int argc, char **argv)
obj->rodata->targ_uid = env.uid;
obj->rodata->targ_failed = env.failed;
+#ifdef __aarch64__
+ /* aarch64 has no open syscall, only openat variants.
+ * Disable associated tracepoints that do not exist. See #3344.
+ */
+ bpf_program__set_autoload(
+ obj->progs.tracepoint__syscalls__sys_enter_open, false);
+ bpf_program__set_autoload(
+ obj->progs.tracepoint__syscalls__sys_exit_open, false);
+#endif
+
err = opensnoop_bpf__load(obj);
if (err) {
fprintf(stderr, "failed to load BPF object: %d\n", err);
|
[readme] No more MANGOHUD_OUTPUT | @@ -181,7 +181,7 @@ All vulkan vsync options might not be supported on your device, you can check wh
## MangoHud FPS logging
-When you toggle logging (using the keybind `F2`), a file is created with your chosen name (using `MANGOHUD_OUTPUT`) plus a date & timestamp.
+When you toggle logging (using the keybind `F2`), a file is created with your chosen name (using `output_file`) plus a date & timestamp.
This file can be uploaded to [Flightlessmango.com](https://flightlessmango.com/games/user_benchmarks) to create graphs automatically.
you can share the created page with others, just link it.
|
rsa: remove the limit on the maximum key strength | #define RSA_FIPS1864_MIN_KEYGEN_KEYSIZE 2048
#define RSA_FIPS1864_MIN_KEYGEN_STRENGTH 112
-#define RSA_FIPS1864_MAX_KEYGEN_STRENGTH 256
/*
* Generate probable primes 'p' & 'q'. See FIPS 186-4 Section B.3.6
@@ -174,8 +173,7 @@ int ossl_rsa_sp800_56b_validate_strength(int nbits, int strength)
int s = (int)ossl_ifc_ffc_compute_security_bits(nbits);
#ifdef FIPS_MODULE
- if (s < RSA_FIPS1864_MIN_KEYGEN_STRENGTH
- || s > RSA_FIPS1864_MAX_KEYGEN_STRENGTH) {
+ if (s < RSA_FIPS1864_MIN_KEYGEN_STRENGTH) {
ERR_raise(ERR_LIB_RSA, RSA_R_INVALID_MODULUS);
return 0;
}
|
Add extra wintrust typdefs | @@ -54,6 +54,16 @@ typedef BOOL (WINAPI *_CryptCATAdminCalcHashFromFileHandle2)(
_In_ ULONG dwFlags
);
+#define CRYPTCATADMIN_CALCHASH_FLAG_NONCONFORMANT_FILES_FALLBACK_FLAT 0x1
+
+typedef BOOL (WINAPI *_CryptCATAdminCalcHashFromFileHandle3)(
+ _In_ HCATADMIN hCatAdmin,
+ _In_ HANDLE hFile,
+ _Out_ PULONG pcbHash,
+ _Out_ PBYTE pbHash,
+ _In_ ULONG dwFlags
+ );
+
typedef BOOL (WINAPI *_CryptCATAdminAcquireContext)(
_Out_ HCATADMIN *phCatAdmin,
_In_ PGUID pgSubsystem,
@@ -104,6 +114,21 @@ typedef PCRYPT_PROVIDER_SGNR (WINAPI *_WTHelperGetProvSignerFromChain)(
_In_ ULONG idxCounterSigner
);
+typedef BOOL (WINAPI *_WTHelperIsChainedToMicrosoftFromStateData)(
+ _In_ CRYPT_PROVIDER_DATA *pProvData
+ );
+
+typedef BOOL (WINAPI* _WTHelperIsChainedToMicrosoft)(
+ _In_ PCCERT_CONTEXT pCertContext,
+ _In_ HCERTSTORE hSiblingStore,
+ _In_ BOOL IncludeMicrosoftTestRootCerts
+ );
+
+typedef BOOL (WINAPI* _WTHelperCheckCertUsage)(
+ _In_ PCCERT_CONTEXT pCertContext,
+ _In_ PCSTR pszOID
+ );
+
typedef LONG (WINAPI *_WinVerifyTrust)(
_In_ HWND hWnd,
_In_ GUID *pgActionID,
|
Add infos to help using for_window to man 5 | @@ -770,17 +770,20 @@ Mark all Firefox windows with "Browser":
[class="Firefox"] mark Browser
```
+You may like to use swaymsg -t get_tree for finding the values of these
+properties in practice for your applications.
+
The following attributes may be matched with:
*app_id*
Compare value against the app id. Can be a regular expression. If value is
\_\_focused\_\_, then the app id must be the same as that of the currently
- focused window.
+ focused window. _app_id_ are specific to Wayland applications.
*class*
Compare value against the window class. Can be a regular expression. If
value is \_\_focused\_\_, then the window class must be the same as that of
- the currently focused window.
+ the currently focused window. _class_ are specific to X11 applications.
*con_id*
Compare against the internal container ID, which you can find via IPC. If
|
tool: webd fix supported version | @@ -31,14 +31,12 @@ getVersions()
error(`are you sure you have libelektra and kdb installed?`);
process.exit(1);
} else {
- const { major, minor, micro } = versions.elektra;
- const versionSupported = major >= 0 && minor >= 8 && micro >= 23;
+ const { major, minor, micro } = versions.elektra
+ const versionSupported = major >= 0 && minor >= 9 && micro >= 0
if (!versionSupported) {
- error(
- `you are running an old libelektra version, which is not supported`
- );
- error(`please upgrade to libelektra 0.8.23 or higher`);
- process.exit(1);
+ error(`you are running an old libelektra version, which is not supported`)
+ error(`please upgrade to libelektra 0.9.0 or higher`)
+ process.exit(1)
}
return getInstances() // make sure yajl is installed
.then(() => {
|
chat-hook: clarify backlog wire handling logic | [%pass chat-history %agent [ship %chat-hook] %watch chat-history]~
::
[%backlog @ @ *]
- =/ pax `path`(oust [(dec (lent t.wir)) 1] `(list @ta)`t.wir)
- ?. (~(has by synced) pax) [~ state]
+ =/ chat=path (oust [(dec (lent t.wir)) 1] `(list @ta)`t.wir)
+ ?. (~(has by synced) chat) [~ state]
=/ =ship
?: =('~' i.t.wir)
(slav %p i.t.t.wir)
(slav %p i.t.wir)
- =. pax ?~((chat-scry pax) wir [%mailbox pax])
+ =/ =path ?~((chat-scry chat) wir [%mailbox chat])
:_ state
- [%pass pax %agent [ship %chat-hook] %watch pax]~
+ [%pass path %agent [ship %chat-hook] %watch path]~
==
::
++ watch-ack
(poke-chat-hook-action %remove t.wir)
::
[%backlog @ @ @ *]
- =/ pax `path`(oust [(dec (lent t.wir)) 1] `(list @ta)`t.wir)
- %. (poke-chat-hook-action %remove pax)
+ =/ chat=path (oust [(dec (lent t.wir)) 1] `(list @ta)`t.wir)
+ %. (poke-chat-hook-action %remove chat)
%- slog
- :* leaf+"chat-hook failed subscribe on {(spud pax)}"
+ :* leaf+"chat-hook failed subscribe on {(spud chat)}"
leaf+"stack trace:"
u.saw
==
|
Do not bundle shared library with Maven. | <arg value="--build"/>
<arg value="."/>
<arg value="--target"/>
- <arg value="tinysplinejar"/>
+ <arg value="tinysplinejava"/>
</exec>
</target>
</configuration>
</goals>
<configuration>
<target>
- <echo>Replacing Maven generated jar with CMake generated jar</echo>
- <!-- delete original jar -->
- <delete file="${project.build.directory}/${project.artifactId}-${project.version}.jar" />
- <!-- create new jar that contains the shared library -->
- <unzip src="${basedir}/lib/tinyspline.jar" dest="${project.build.directory}/tmp-java-dir" />
- <copy todir="${project.build.directory}/tmp-java-dir">
- <fileset dir="${basedir}/lib">
- <include name="*tinysplinejava*" />
- </fileset>
+ <echo>Copying CMake generated jar to target</echo>
+ <copy file="${basedir}/lib/tinyspline.jar"
+ tofile="${project.build.directory}/${project.artifactId}-${project.version}.jar">
</copy>
- <jar destfile="${project.build.directory}/${project.artifactId}-${project.version}.jar" basedir="${project.build.directory}/tmp-java-dir" />
- <!-- cleanup -->
- <delete dir="${project.build.directory}/tmp-java-dir" />
- <delete file="${project.build.directory}/tinyspline.jar" />
</target>
</configuration>
</execution>
|
sub shm BUGFIX parse oper parent as data not config only
Also, print LY error if parent fails to be parsed.
Refs | @@ -2194,8 +2194,12 @@ sr_shmsub_oper_listen_process_module_events(struct modsub_oper_s *oper_subs, sr_
/* parse data parent */
ly_errno = 0;
parent = lyd_parse_mem(conn->ly_ctx, oper_sub->sub_shm.addr + sizeof(sr_sub_shm_t) + sr_strshmlen(request_xpath),
- LYD_LYB, LYD_OPT_CONFIG | LYD_OPT_STRICT);
- SR_CHECK_INT_GOTO(ly_errno, err_info, error_rdunlock);
+ LYD_LYB, LYD_OPT_DATA | LYD_OPT_STRICT);
+ if (ly_errno) {
+ sr_errinfo_new_ly(&err_info, conn->ly_ctx);
+ SR_ERRINFO_INT(&err_info);
+ goto error_rdunlock;
+ }
/* go to the actual parent, not the root */
if ((err_info = sr_ly_find_last_parent(&parent, 0))) {
goto error_rdunlock;
|
core/minute-ia/interrupts.h: Format with clang-format
BRANCH=none
TEST=none | @@ -21,10 +21,8 @@ typedef struct {
#define INTR_DESC(__irq, __vector, __trig) \
{ \
- .irq = __irq, \
- .trigger = __trig, \
- .polarity = IOAPIC_REDTBL_INTPOL_HIGH, \
- .vector = __vector \
+ .irq = __irq, .trigger = __trig, \
+ .polarity = IOAPIC_REDTBL_INTPOL_HIGH, .vector = __vector \
}
#define LEVEL_INTR(__irq, __vector) \
|
[DFS] clean the unnecessary checking | @@ -129,7 +129,7 @@ const char *dfs_filesystem_get_mounted_path(struct rt_device *device)
for (iter = &filesystem_table[0];
iter < &filesystem_table[DFS_FILESYSTEMS_MAX]; iter++)
{
- /* fint the mounted device */
+ /* find the mounted device */
if (iter->ops == NULL) continue;
else if (iter->dev_id == device)
{
@@ -321,7 +321,7 @@ int dfs_mount(const char *device_name,
if (rt_device_open(fs->dev_id,
RT_DEVICE_OFLAG_RDWR) != RT_EOK)
{
- /* The underlaying device has error, clear the entry. */
+ /* The underlying device has error, clear the entry. */
dfs_lock();
memset(fs, 0, sizeof(struct dfs_filesystem));
@@ -568,8 +568,7 @@ int dfs_unmount_device(rt_device_t dev)
iter < &filesystem_table[DFS_FILESYSTEMS_MAX]; iter++)
{
/* check if the PATH is mounted */
- if ((iter->dev_id->parent.name != NULL)
- && (strcmp(iter->dev_id->parent.name, dev->parent.name) == 0))
+ if (strcmp(iter->dev_id->parent.name, dev->parent.name) == 0)
{
fs = iter;
break;
|
Update HDKF label for handshake packets | @@ -78,7 +78,7 @@ int derive_cleartext_secret(uint8_t *dest, size_t destlen, uint64_t secret,
int derive_client_cleartext_secret(uint8_t *dest, size_t destlen,
const uint8_t *secret, size_t secretlen) {
- static constexpr uint8_t LABEL[] = "QUIC client cleartext Secret";
+ static constexpr uint8_t LABEL[] = "QUIC client handshake Secret";
Context ctx;
prf_sha256(ctx);
return crypto::hkdf_expand_label(dest, destlen, secret, secretlen, LABEL,
@@ -87,7 +87,7 @@ int derive_client_cleartext_secret(uint8_t *dest, size_t destlen,
int derive_server_cleartext_secret(uint8_t *dest, size_t destlen,
const uint8_t *secret, size_t secretlen) {
- static constexpr uint8_t LABEL[] = "QUIC server cleartext Secret";
+ static constexpr uint8_t LABEL[] = "QUIC server handshake Secret";
Context ctx;
prf_sha256(ctx);
return crypto::hkdf_expand_label(dest, destlen, secret, secretlen, LABEL,
|
fix bug when execute undo with if_else_block | @@ -221,6 +221,15 @@ Blockly.Blocks['controls_if'] = {
* @this Blockly.Block
*/
domToMutation: function(xmlElement) {
+ // Delete everything.
+ if (this.elseCount_) {
+ this.removeInput('ELSE');
+ }
+ for (var i = this.elseifCount_; i > 0; i--) {
+ this.removeInput('IF' + i);
+ this.removeInput('DO' + i);
+ }
+ // Rebuild block.
this.elseifCount_ = parseInt(xmlElement.getAttribute('elseif'), 10);
this.elseCount_ = parseInt(xmlElement.getAttribute('else'), 10);
for (var i = 1; i <= this.elseifCount_; i++) {
|
esp_netif: Fix dhcps state transitions
When the DHCP server is stopped before starting the netif,
it should remain stopped -- as per compatibility with previous tcpip_adapter behavior | @@ -726,7 +726,7 @@ static esp_err_t esp_netif_start_api(esp_netif_api_msg_t *msg)
}
if (esp_netif->flags & ESP_NETIF_DHCP_SERVER) {
#if ESP_DHCPS
- if (esp_netif->dhcps_status != ESP_NETIF_DHCP_STARTED) {
+ if (esp_netif->dhcps_status == ESP_NETIF_DHCP_INIT) {
if (p_netif != NULL && netif_is_up(p_netif)) {
esp_netif_ip_info_t *default_ip = esp_netif->ip_info;
ip4_addr_t lwip_ip;
@@ -745,9 +745,11 @@ static esp_err_t esp_netif_start_api(esp_netif_api_msg_t *msg)
esp_netif->dhcps_status = ESP_NETIF_DHCP_INIT;
return ESP_OK;
}
- }
+ } else if (esp_netif->dhcps_status == ESP_NETIF_DHCP_STARTED) {
ESP_LOGD(TAG, "DHCP server already started");
return ESP_ERR_ESP_NETIF_DHCP_ALREADY_STARTED;
+ }
+ return ESP_OK;
#else
LOG_NETIF_DISABLED_AND_DO("DHCP Server", return ESP_ERR_NOT_SUPPORTED);
#endif
|
Fix coverity issue: CID - Resource leak in apps/pkcs12.c | @@ -541,13 +541,15 @@ int pkcs12_main(int argc, char **argv)
X509_STORE_free(store);
if (vret == X509_V_OK) {
+ int add_certs;
/* Remove from chain2 the first (end entity) certificate */
X509_free(sk_X509_shift(chain2));
/* Add the remaining certs (except for duplicates) */
- if (!X509_add_certs(certs, chain2, X509_ADD_FLAG_UP_REF
- | X509_ADD_FLAG_NO_DUP))
- goto export_end;
+ add_certs = X509_add_certs(certs, chain2, X509_ADD_FLAG_UP_REF
+ | X509_ADD_FLAG_NO_DUP);
sk_X509_pop_free(chain2, X509_free);
+ if (!add_certs)
+ goto export_end;
} else {
if (vret != X509_V_ERR_UNSPECIFIED)
BIO_printf(bio_err, "Error getting chain: %s\n",
|
Fix xGEMMT argument lists | @@ -566,7 +566,8 @@ void LAPACK(sgemmt)(
const float *B, const blasint *ldB,
const float *beta, float *C, const blasint *ldC
) {
- RELAPACK_sgemmt(uplo, n, A, ldA, info);
+ blasint info;
+ RELAPACK_sgemmt(uplo, transA, transB, n, k, alpha, A, ldA, B, ldB, beta, C, info);
}
#endif
@@ -578,7 +579,8 @@ void LAPACK(dgemmt)(
const double *B, const blasint *ldB,
const double *beta, double *C, const blasint *ldC
) {
- RELAPACK_dgemmt(uplo, n, A, ldA, info);
+ blasint info;
+ RELAPACK_dgemmt(uplo, transA, transB, n, k, alpha, A, ldA, B, ldB, beta, C, info);
}
#endif
@@ -590,7 +592,8 @@ void LAPACK(cgemmt)(
const float *B, const blasint *ldB,
const float *beta, float *C, const blasint *ldC
) {
- RELAPACK_cgemmt(uplo, n, A, ldA, info);
+ blasint info;
+ RELAPACK_cgemmt(uplo, transA, transB, n, k, alpha, A, ldA, B, ldB, beta, C, info);
}
#endif
@@ -602,6 +605,7 @@ void LAPACK(zgemmt)(
const double *B, const blasint *ldB,
const double *beta, double *C, const blasint *ldC
) {
- RELAPACK_zgemmt(uplo, n, A, ldA, info);
+ blasint info;
+ RELAPACK_zgemmt(uplo, transA, transB, n, k, alpha, A, ldA, B, ldB, beta, C, info);
}
#endif
|
Don't Use v4 Local Addr CMsg | @@ -191,7 +191,6 @@ typedef struct CXPLAT_SEND_DATA {
typedef struct CXPLAT_RECV_MSG_CONTROL_BUFFER {
char Data[CMSG_SPACE(sizeof(struct in6_pktinfo)) +
- CMSG_SPACE(sizeof(struct in_pktinfo)) +
2 * CMSG_SPACE(sizeof(int))];
} CXPLAT_RECV_MSG_CONTROL_BUFFER;
@@ -1143,25 +1142,6 @@ CxPlatSocketContextInitialize(
goto Exit;
}
- Option = TRUE;
- Result =
- setsockopt(
- SocketContext->SocketFd,
- IPPROTO_IP,
- IP_PKTINFO,
- (const void*)&Option,
- sizeof(Option));
- if (Result == SOCKET_ERROR) {
- Status = errno;
- QuicTraceEvent(
- DatapathErrorStatus,
- "[data][%p] ERROR, %u, %s.",
- Binding,
- Status,
- "setsockopt(IP_PKTINFO) failed");
- goto Exit;
- }
-
//
// Set socket option to receive TOS (= DSCP + ECN) information from the
// incoming packet.
@@ -1819,14 +1799,7 @@ CxPlatSocketContextRecvComplete(
CXPLAT_DBG_ASSERT(FALSE);
}
} else if (CMsg->cmsg_level == IPPROTO_IP) {
- if (CMsg->cmsg_type == IP_PKTINFO) {
- struct in_pktinfo* PktInfo = (struct in_pktinfo*)CMSG_DATA(CMsg);
- LocalAddr->Ip.sa_family = QUIC_ADDRESS_FAMILY_INET;
- LocalAddr->Ipv4.sin_addr = PktInfo->ipi_addr;
- LocalAddr->Ipv4.sin_port = SocketContext->Binding->LocalAddress.Ipv6.sin6_port;
- LocalAddr->Ipv6.sin6_scope_id = PktInfo->ipi_ifindex;
- FoundLocalAddr = TRUE;
- } else if (CMsg->cmsg_type == IP_TOS) {
+ if (CMsg->cmsg_type == IP_TOS) {
CXPLAT_DBG_ASSERT_CMSG(CMsg, uint8_t);
TOS = *(uint8_t*)CMSG_DATA(CMsg);
FoundTOS = TRUE;
|
when not building for CUDA on a system with CUDA installed, one must fake it by setting CUDA_TOOLKIT_ROOT_DIR to OFF | @@ -114,6 +114,10 @@ if [ "$AOMP_BUILD_CUDA" == 1 ] ; then
-DLIBOMPTARGET_NVPTX_CUDA_COMPILER=$AOMP/bin/clang++
-DLIBOMPTARGET_NVPTX_BC_LINKER=$AOMP/bin/llvm-link
-DLIBOMPTARGET_NVPTX_COMPUTE_CAPABILITIES=$NVPTXGPUS"
+else
+# Need to force CUDA off this way in case cuda is installed in this system
+ COMMON_CMAKE_OPTS="$COMMON_CMAKE_OPTS
+-DCUDA_TOOLKIT_ROOT_DIR=OFF"
fi
# This is how we tell the hsa plugin where to find hsa
|
Add option for using tags in jpm deps. | @@ -569,17 +569,25 @@ int main(int argc, const char **argv) {
(defn install-git
"Install a bundle from git. If the bundle is already installed, the bundle
is reinistalled (but not rebuilt if artifacts are cached)."
- [repo]
+ [repotab]
+ (def repo (if (string? repotab) repotab (repotab :repo)))
+ (def tag (unless (string? repotab) (repotab :tag)))
(def cache (find-cache))
(os/mkdir cache)
(def id (filepath-replace repo))
(def module-dir (string cache sep id))
+ (var fresh false)
(when (os/mkdir module-dir)
+ (set fresh true)
(os/execute ["git" "clone" repo module-dir] :p))
(def olddir (os/cwd))
(os/cd module-dir)
(try
(with-dyns [:rules @{}]
+ (unless fresh
+ (os/execute ["git" "pull" "origin" "master"] :p))
+ (when tag
+ (os/execute ["git" "reset" "--hard" tag] :p))
(os/execute ["git" "submodule" "update" "--init" "--recursive"] :p)
(import-rules "./project.janet")
(do-rule "install-deps")
|
packaging: add parallel option | @@ -6,6 +6,7 @@ ELEKTRA_PLUGINS='ALL;mozprefs;multifile;gitresolver;jni;ruby;yamlcpp;toml'
ELEKTRA_TOOLS='ALL'
ELEKTRA_BINDINGS='cpp;lua;python;ruby;jna;glib;IO;INTERCEPT'
+PARALLEL=${PARALLEL:-1}
PACKAGE_REVISION=${1:-1}
DIST_NAME=${2:-$(grep "^NAME=" /etc/os-release | awk -F= {' print $2'} | sed 's/\"//g')}
@@ -68,4 +69,4 @@ else
fi
cmake $CMAKE_ARGS_BASE $CMAKE_ARGS_SPECIFIC ..
-LD_LIBRARY_PATH=$(pwd)/lib:${LD_LIBRARY_PATH} make package
+LD_LIBRARY_PATH=$(pwd)/lib:${LD_LIBRARY_PATH} make -j ${PARALLEL} package
|
More changes for the docker docs. | -# Docker Support for IPP Sample Code
+Docker Support for IPP Sample Code
+==================================
This repository includes a sample Dockerfile for compiling and running `ippserver` in a Docker container.
-**To run IPP sample code on Docker:**
-1. From a shell prompt in the directory (on Windows 10|2016, macOS, or Linux) containing this docker file run:
+Building and Running on Docker
+------------------------------
+
+From a shell prompt in the directory (on Windows 10|2016, macOS, or Linux)
+containing this docker file run:
```
docker build -t ippsample .
```
+
You now can run the container with a bash terminal and go to the `/root/ippsample` folder manually.
```
@@ -17,40 +22,46 @@ This repository includes a sample Dockerfile for compiling and running `ippserve
You can also run one of the IPP binaries instead of the bash terminal.
-**To start the IPP server:**
-1. Run the IPP server with all its arguments:
+Starting the IPP Server
+-----------------------
+
+Run the IPP server with all its arguments:
```
docker run -it ippsample ippserver -M byMyself -l rightHere -m coolPrinter -n myHost -p 631 -s 72 -vvvv myPrintService
```
- OR to run the server in debug mode using `gdb`:
+*or* to run the server in debug mode using `gdb`:
```
docker run -it ippsample gdb ippserver
run -M byMyself -l rightHere -m coolPrinter -n myHost -p 631 -s 72 -vvvv myPrintService
```
-**Run the IPP Client:**
-1. First find all IPP printers from other Docker containers:
+Running the IPP Client
+----------------------
+
+First find all IPP printers from other Docker containers:
```
docker run --rm ippsample ippfind
```
- (Note the URL returned, e.g., `ipp://f8a365cfc7ec.local:631/ipp/print`)
+> Note the URL returned, e.g., `ipp://f8a365cfc7ec.local:631/ipp/print`.
-2. Now run the IPP tool in a new container in the `/root/ippsample/examples` directory with the IPP Server running, run:
+Next run the IPP tool in a new container in the `/root/ippsample/examples` directory with the IPP Server running, run:
```
docker run --rm -it -w /root/ippsample/examples ippsample ipptool [URL returned] identify-printer-display.test
```
- (Note the `IDENTIFY from 172.17.0.4: Hello, World!` message in stdout on the `ippserver` container)
+> Note the `IDENTIFY from 172.17.0.4: Hello, World!` message in stdout on the
+> `ippserver` container.
-2. To run the IPP everywhere tests on the IPP Client using setup from step #1, run:
+To run the IPP everywhere tests on the IPP Client using setup from the previous
+commands, run:
```
docker run --rm -it -w /root/ippsample/examples ippsample ipptool -V 2.0 -tf document-letter.pdf [URL returned] ipp-everywhere.test
|
pybricks.common.System: Shutdown placeholder.
Reset will not be part of the end-user API. We will only keep it for backwards-compatibility for system updates.
There will be a graceful shutdown instead. This adds the placeholder and moves the corresponding TODO note. | @@ -23,10 +23,6 @@ STATIC MP_DEFINE_CONST_FUN_OBJ_0(pb_type_System_name_obj, pb_type_System_name);
#if PBDRV_CONFIG_RESET
-// REVISIT: there should be a pbio_reset() instead of pbdrv_reset() to gracefully
-// shut down the hub (e.g. if the power button is pressed or USB is plugged in,
-// some hubs will not actually shut down).
-
#include <pbdrv/reset.h>
STATIC mp_obj_t pb_type_System_reset(mp_obj_t action_in) {
@@ -48,6 +44,17 @@ STATIC mp_obj_t pb_type_System_reset_reason(void) {
}
STATIC MP_DEFINE_CONST_FUN_OBJ_0(pb_type_System_reset_reason_obj, pb_type_System_reset_reason);
+STATIC mp_obj_t pb_type_System_shutdown(void) {
+
+ // TODO: there should be a pbio function instead of pbdrv function to gracefully
+ // shut down the hub (e.g. if the power button is pressed or USB is plugged in,
+ // some hubs will not actually power off).
+
+ mp_raise_NotImplementedError(NULL);
+ return mp_const_none;
+}
+STATIC MP_DEFINE_CONST_FUN_OBJ_0(pb_type_System_shutdown_obj, pb_type_System_shutdown);
+
#endif // PBDRV_CONFIG_RESET
#if PBIO_CONFIG_ENABLE_SYS
@@ -97,6 +104,7 @@ STATIC const mp_rom_map_elem_t common_System_locals_dict_table[] = {
#if PBDRV_CONFIG_RESET
{ MP_ROM_QSTR(MP_QSTR_reset), MP_ROM_PTR(&pb_type_System_reset_obj) },
{ MP_ROM_QSTR(MP_QSTR_reset_reason), MP_ROM_PTR(&pb_type_System_reset_reason_obj) },
+ { MP_ROM_QSTR(MP_QSTR_shutdown), MP_ROM_PTR(&pb_type_System_shutdown_obj) },
#endif // PBDRV_CONFIG_RESET
#if PBIO_CONFIG_ENABLE_SYS
{ MP_ROM_QSTR(MP_QSTR_set_stop_button), MP_ROM_PTR(&pb_type_System_set_stop_button_obj) },
|
Add more iterators to TArrayRef
([arc::pullid] d3c08a74-dcddbdfe-a919b2cf-d06d60c4) | #include <algorithm>
#include <initializer_list>
+#include <iterator>
/**
* `TArrayRef` works pretty much like `std::span` with dynamic extent, presenting
@@ -31,6 +32,8 @@ public:
using reference = T&;
using const_reference = const T&;
using value_type = T;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
constexpr inline TArrayRef() noexcept
: T_(nullptr)
@@ -95,6 +98,30 @@ public:
return (T_ + S_);
}
+ inline const_iterator cbegin() const noexcept {
+ return T_;
+ }
+
+ inline const_iterator cend() const noexcept {
+ return (T_ + S_);
+ }
+
+ inline reverse_iterator rbegin() const noexcept {
+ return reverse_iterator(T_ + S_);
+ }
+
+ inline reverse_iterator rend() const noexcept {
+ return reverse_iterator(T_);
+ }
+
+ inline const_reverse_iterator crbegin() const noexcept {
+ return const_reverse_iterator(T_ + S_);
+ }
+
+ inline const_reverse_iterator crend() const noexcept {
+ return const_reverse_terator(T_);
+ }
+
inline reference front() const noexcept {
return *T_;
}
|
README.md: Replace note about gopkg.in with future Go versioning system | @@ -122,7 +122,7 @@ To get the bindings, type:\
or type this if you use Bash terminal:\
`go get -v github.com/veandco/go-sdl2/{sdl,img,mix,ttf}`
-Due to `go-sdl2` being under active development, a lot of breaking changes are going to happen during v0.x. Therefore if you want to stay with the latest stable version, you should replace `github.com/veandco/go-sdl2` with `gopkg.in/veandco/go-sdl2.v0` so it will refer to the latest stable version e.g. `gopkg.in/veandco/go-sdl2.v0/sdl`.
+Due to `go-sdl2` being under active development, a lot of breaking changes are going to happen during v0.x. With [versioning system](https://github.com/golang/proposal/blob/master/design/24301-versioned-go.md) coming to Go soon, we'll make use of semantic versioning to ensure stability in the future.
# Cross-compiling
|
Remove redundancy GETNAME in client help command message
probably a copy paste error. | @@ -2868,8 +2868,6 @@ void clientCommand(client *c) {
" Control the replies sent to the current connection.",
"SETNAME <name>",
" Assign the name <name> to the current connection.",
-"GETNAME",
-" Get the name of the current connection.",
"UNBLOCK <clientid> [TIMEOUT|ERROR]",
" Unblock the specified blocked client.",
"TRACKING (ON|OFF) [REDIRECT <id>] [BCAST] [PREFIX <prefix> [...]]",
|
Updated README.md Docker section (SSL). | @@ -217,7 +217,8 @@ example, for Apache's *combined* log format:
output /srv/report/index.html
real-time-html true
-If you want a secure connection, a TLS/SSL certificate and a key files must be configured as well:
+If you want a secure connection, a TLS/SSL certificate and a key files must be
+configured as well in your config file:
ssl-cert /srv/data/domain.crt
ssl-key /srv/data/domain.key
|
show system host: handle unrecognized display values | @@ -32,6 +32,14 @@ struct Command ShowHostServerCommand = {
ShowHostServer
};
+CHAR16 *mppAllowedShowHostServerDisplayValues[] = {
+ DISPLAYED_NAME_STR,
+ DISPLAYED_OS_NAME_STR,
+ DISPLAYED_OS_VERSION_STR,
+ DISPLAYED_MIXED_SKU_STR,
+ DISPLAYED_SKU_VIOLATION_STR
+};
+
/**
Execute the show host server command
@@ -92,6 +100,16 @@ ShowHostServer(
goto Finish;
}
+ /** check that the display parameters are correct (if display option is set) **/
+ if (DisplayOptionSet) {
+ ReturnCode = CheckDisplayList(pDisplayValues, mppAllowedShowHostServerDisplayValues,
+ ALLOWED_DISP_VALUES_COUNT(mppAllowedShowHostServerDisplayValues));
+ if (EFI_ERROR(ReturnCode)) {
+ Print(FORMAT_STR_NL, CLI_ERR_INCORRECT_VALUE_OPTION_DISPLAY);
+ goto Finish;
+ }
+ }
+
ShowAll = (!AllOptionSet && !DisplayOptionSet) || AllOptionSet;
ReturnCode = ReadRunTimeCliDisplayPreferences(&DisplayPreferences);
|
fix third attempt | @@ -80,8 +80,8 @@ uint8_t const * tud_descriptor_device_cb(void)
TUD_USBTMC_BULK_DESCRIPTORS(/* OUT = */0x01, /* IN = */ 0x81, /* packet size = */USBTMCD_MAX_PACKET_SIZE)
#if CFG_TUD_USBTMC_ENABLE_INT_EP
-// USBTMC Interrupt xfer always has length of 2, but we use epMaxSize=8 here for compatibility
-// with microcontrollers that only allow 8, 16, 32 or 64 for FS endpoints
+// USBTMC Interrupt xfer always has length of 2, but we use epMaxSize=8 for
+// compatibility with mcus that only allow 8, 16, 32 or 64 for FS endpoints
# define TUD_USBTMC_DESC(_itfnum) \
TUD_USBTMC_DESC_MAIN(_itfnum, /* _epCount = */ 3), \
TUD_USBTMC_INT_DESCRIPTOR(/* INT ep # */ 0x82, /* epMaxSize = */ 8, /* bInterval = */16u )
|
gall: suspend agents on upgrade | [%g %wiping-eyre-subs]
|- ^+ mo-core
?~ apps mo-core
+ ?. =(%base q.beak.egg.i.apps)
+ ~> %slog.[0 leaf+"gall: suspending {<dap.i.apps>}"]
+ =. old-state.egg.i.apps
+ =/ old old-state.egg.i.apps
+ |/?-(-.old %| p.old, %& p.old)
+ =/ ap-core (ap-abut:ap:mo-core i.apps)
+ $(apps t.apps, mo-core ap-abet:ap-core)
~> %slog.[0 leaf+"gall: upgrading {<dap.i.apps>}"]
=/ ap-core (ap-abut:ap:mo-core i.apps)
=? ap-core ?=(%& -.old-state.egg.i.apps)
=^ maybe-tang ap-core (ap-upgrade-state old-agent-state)
::
=. agent-config
- =/ =term ?~(old-agent-state %boot %bump)
- =/ possibly-suss
- ?~ maybe-tang
- =/ =suss [agent-name term now]
- [%.y suss]
- [%.n u.maybe-tang]
- [possibly-suss agent-config]
+ :_ agent-config
+ ^- (each suss tang)
+ ?^ maybe-tang
+ |/u.maybe-tang
+ &/[agent-name ?~(old-agent-state %boot %bump) now]
::
[maybe-tang ap-core]
:: +ap-upgrade-state: low-level install.
|
Reformat CMake: Do not execute commands in strings | @@ -11,12 +11,12 @@ SCRIPTS_DIR=$(dirname "$0")
CMAKE_FORMAT=$(which cmake-format)
if [ -z "${CMAKE_FORMAT}" ]; then
- echo "Please install `cmake-format`"
+ echo 'Please install `cmake-format`'
exit 0
fi
if [ -z "$(which sponge)" ]; then
- echo "Please install `sponge`"
+ echo 'Please install `sponge`'
exit 0
fi
|
unicode_gif.h: fix -Wdeclaration-after-statement
when building for windows with _UNICODE defined
unicode_gif.h|49 col 3| warning: ISO C90 forbids mixed declarations and
code [-Wdeclaration-after-statement] | @@ -45,7 +45,7 @@ static GifFileType* DGifOpenFileUnicode(const W_CHAR* file_name, int* error) {
}
#if defined(_WIN32) && defined(_UNICODE)
-
+ {
int file_handle = _wopen(file_name, _O_RDONLY | _O_BINARY);
if (file_handle == -1) {
if (error != NULL) *error = D_GIF_ERR_OPEN_FAILED;
@@ -57,6 +57,7 @@ static GifFileType* DGifOpenFileUnicode(const W_CHAR* file_name, int* error) {
#else
return DGifOpenFileHandle(file_handle);
#endif
+ }
#else
|
parser: remove unused variable | @@ -115,7 +115,6 @@ int flb_parser_logfmt_do(struct flb_parser *parser,
*/
static void flb_interim_parser_destroy(struct flb_parser *parser)
{
- int i = 0;
if (parser->type == FLB_PARSER_REGEX) {
flb_regex_destroy(parser->regex);
flb_free(parser->p_regex);
|
fix(Makefile): missing mujs dependency for test executables | @@ -127,7 +127,7 @@ $(OBJDIR)/%.c.o : %.c
$(CC) $(CFLAGS) $(LIBS_CFLAGS) -c -o $@ $<
$(BOTS_DIR)/%.exe: $(BOTS_DIR)/%.c
$(CC) $(CFLAGS) $(LIBS_CFLAGS) -o $@ $< $(LIBDISCORD_LDFLAGS) $(LIBREDDIT_LDFLAGS) $(LIBGITHUB_LDFLAGS) $(LIBS_LDFLAGS)
-%.exe: %.c all_api_libs
+%.exe: %.c mujs all_api_libs
$(CC) $(CFLAGS) $(LIBS_CFLAGS) -o $@ $< $(LIBDISCORD_LDFLAGS) $(LIBREDDIT_LDFLAGS) $(LIBGITHUB_LDFLAGS) -lmujs -lsqlite3 $(LIBS_LDFLAGS)
%.bx: %.c mujs all_api_libs
$(CC) $(CFLAGS) $(LIBS_CFLAGS) -o $@ $< $(LIBDISCORD_LDFLAGS) -lmujs -lsqlite3 $(LIBS_LDFLAGS)
|
Copy `port` data, since we're printing it after the fact and it might be dynamically allocated. | @@ -284,7 +284,7 @@ struct ListenerProtocol {
sock_rw_hook_s *(*set_rw_hooks)(intptr_t uuid, void *udata);
void (*on_start)(void *udata);
void (*on_finish)(void *udata);
- const char *port;
+ char port[16];
};
static sock_rw_hook_s *listener_set_rw_hooks(intptr_t uuid, void *udata) {
@@ -378,8 +378,9 @@ listener_alloc(struct facil_listen_args settings) {
.on_finish = settings.on_finish,
.set_rw_hooks = settings.set_rw_hooks,
.rw_udata = settings.rw_udata,
- .port = settings.port,
};
+ size_t tmp = strlen(settings.port);
+ memcpy(listener->port, settings.port, tmp + 1);
return listener;
}
return NULL;
|
driver/tcpm/fusb307.c: Format with clang-format
BRANCH=none
TEST=none | @@ -44,25 +44,29 @@ int fusb307_tcpm_set_polarity(int port, enum tcpc_cc_polarity polarity)
tcpm_get_cc(port, &cc1, &cc2);
if (cc1) {
if (pd_get_power_role(port) == PD_ROLE_SINK) {
- int role = TCPC_REG_ROLE_CTRL_SET(0,
- tcpci_get_cached_rp(port), TYPEC_CC_RD, TYPEC_CC_OPEN);
+ int role = TCPC_REG_ROLE_CTRL_SET(
+ 0, tcpci_get_cached_rp(port), TYPEC_CC_RD,
+ TYPEC_CC_OPEN);
tcpc_write(port, TCPC_REG_ROLE_CTRL, role);
} else {
- int role = TCPC_REG_ROLE_CTRL_SET(0,
- tcpci_get_cached_rp(port), TYPEC_CC_RP, TYPEC_CC_OPEN);
+ int role = TCPC_REG_ROLE_CTRL_SET(
+ 0, tcpci_get_cached_rp(port), TYPEC_CC_RP,
+ TYPEC_CC_OPEN);
tcpc_write(port, TCPC_REG_ROLE_CTRL, role);
}
} else if (cc2) {
if (pd_get_power_role(port) == PD_ROLE_SINK) {
- int role = TCPC_REG_ROLE_CTRL_SET(0,
- tcpci_get_cached_rp(port), TYPEC_CC_OPEN, TYPEC_CC_RD);
+ int role = TCPC_REG_ROLE_CTRL_SET(
+ 0, tcpci_get_cached_rp(port), TYPEC_CC_OPEN,
+ TYPEC_CC_RD);
tcpc_write(port, TCPC_REG_ROLE_CTRL, role);
} else {
- int role = TCPC_REG_ROLE_CTRL_SET(0,
- tcpci_get_cached_rp(port), TYPEC_CC_OPEN, TYPEC_CC_RP);
+ int role = TCPC_REG_ROLE_CTRL_SET(
+ 0, tcpci_get_cached_rp(port), TYPEC_CC_OPEN,
+ TYPEC_CC_RP);
tcpc_write(port, TCPC_REG_ROLE_CTRL, role);
}
|
dpdk: failsafe PMD initialization code
Added code to initialize failsafe PMD
This is part of initial effort to enable vpp running over
dpdk on failsafe PMD in Microsoft Azure(4/4). | @@ -503,6 +503,11 @@ dpdk_lib_init (dpdk_main_t * dm)
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
break;
+ case VNET_DPDK_PMD_FAILSAFE:
+ xd->port_type = VNET_DPDK_PORT_TYPE_FAILSAFE;
+ xd->port_conf.intr_conf.lsc = 1;
+ break;
+
default:
xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN;
}
@@ -632,6 +637,26 @@ dpdk_lib_init (dpdk_main_t * dm)
}
}
}
+
+ if (xd->pmd == VNET_DPDK_PMD_FAILSAFE)
+ {
+ /* failsafe device numerables are reported with active device only,
+ * need to query the mtu for current device setup to overwrite
+ * reported value.
+ */
+ uint16_t dev_mtu;
+ if (!rte_eth_dev_get_mtu (i, &dev_mtu))
+ {
+ mtu = dev_mtu;
+ max_rx_frame = mtu + sizeof (ethernet_header_t);
+
+ if (xd->port_conf.rxmode.hw_strip_crc)
+ {
+ max_rx_frame += 4;
+ }
+ }
+ }
+
/*Set port rxmode config */
xd->port_conf.rxmode.max_rx_pkt_len = max_rx_frame;
|
fix cv test | @@ -550,7 +550,7 @@ def test_copy_model():
def test_cv():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
- results = cv({"iterations": 5, "random_seed": 0, "loss_function": "Logloss"}, pool)
+ results = cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "Logloss"})
assert isinstance(results, dict)
assert "Logloss_train_avg" in results
@@ -642,4 +642,4 @@ def test_full_history():
def test_bad_params_in_cv():
pool = Pool(TRAIN_FILE, column_description=CD_FILE)
with pytest.warns(UserWarning):
- cv({"iterations": 5, "random_seed": 0, "loss_function": "Logloss", "use_best_model": True}, pool)
+ cv(pool, {"iterations": 5, "random_seed": 0, "loss_function": "Logloss", "use_best_model": True})
|
VERSION bump to version 0.12.55 | @@ -34,7 +34,7 @@ set(CMAKE_C_FLAGS_DEBUG "-g -O0")
# set version
set(LIBNETCONF2_MAJOR_VERSION 0)
set(LIBNETCONF2_MINOR_VERSION 12)
-set(LIBNETCONF2_MICRO_VERSION 54)
+set(LIBNETCONF2_MICRO_VERSION 55)
set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION}.${LIBNETCONF2_MICRO_VERSION})
set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION})
|
Allow getting and printing of cookie options | @@ -398,6 +398,9 @@ config_print_zone(nsd_options_type* opt, const char* k, int s, const char *o,
SERV_GET_STR(tls_service_ocsp, o);
SERV_GET_STR(tls_service_pem, o);
SERV_GET_STR(tls_port, o);
+ SERV_GET_STR(cookie_secret, o);
+ SERV_GET_STR(cookie_secret_file, o);
+ SERV_GET_BIN(answer_cookie, o);
/* int */
SERV_GET_INT(server_count, o);
SERV_GET_INT(tcp_count, o);
@@ -609,6 +612,9 @@ config_test_print_server(nsd_options_type* opt)
printf("\tanswer-cookie: %s\n", opt->answer_cookie?"yes":"no");
if (opt->cookie_secret)
print_string_var("cookie-secret:", opt->cookie_secret);
+ if (opt->cookie_secret_file)
+ print_string_var("cookie-secret-file:", opt->cookie_secret_file);
+
#ifdef USE_DNSTAP
printf("\ndnstap:\n");
|
Fix cast warning on 64 bit VS | @@ -38,7 +38,7 @@ static const uint8_t* picoquic_log_fixed_skip(const uint8_t* bytes, const uint8_
static const uint8_t* picoquic_log_varint_skip(const uint8_t* bytes, const uint8_t* bytes_max)
{
- return bytes == NULL ? NULL : (bytes < bytes_max ? picoquic_log_fixed_skip(bytes, bytes_max, VARINT_LEN(bytes)) : NULL);
+ return bytes == NULL ? NULL : (bytes < bytes_max ? picoquic_log_fixed_skip(bytes, bytes_max, (uint64_t) VARINT_LEN(bytes)) : NULL);
}
static const uint8_t* picoquic_log_varint(const uint8_t* bytes, const uint8_t* bytes_max, uint64_t* n64)
|
Fix regression when connecting with Unix sockets | @@ -472,7 +472,7 @@ int redisContextConnectBindTcp(redisContext *c, const char *addr, int port,
int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout) {
int blocking = (c->flags & REDIS_BLOCK);
- struct sockaddr_un sa;
+ struct sockaddr_un *sa;
long timeout_msec = -1;
if (redisCreateSocket(c,AF_UNIX) < 0)
@@ -499,9 +499,10 @@ int redisContextConnectUnix(redisContext *c, const char *path, const struct time
if (redisContextTimeoutMsec(c,&timeout_msec) != REDIS_OK)
return REDIS_ERR;
- sa.sun_family = AF_UNIX;
- strncpy(sa.sun_path,path,sizeof(sa.sun_path)-1);
- if (connect(c->fd, (struct sockaddr*)&sa, sizeof(sa)) == -1) {
+ sa = (struct sockaddr_un*)(c->saddr = malloc(sizeof(struct sockaddr_un)));
+ sa->sun_family = AF_UNIX;
+ strncpy(sa->sun_path,path,sizeof(sa->sun_path)-1);
+ if (connect(c->fd, (struct sockaddr*)sa, sizeof(*sa)) == -1) {
if (errno == EINPROGRESS && !blocking) {
/* This is ok. */
} else {
|
updates %eyre state adaptation in +load | -- ::
|% :: models
++ bolo :: eyre state
- $: $0 :: version
+ $: $1 :: version
dom/(set (list @t)) :: domain names
fig/http-config :: config
por/{clr/@ud sek/(unit @ud)} :: live ports
~
::
++ load :: take previous state
- ::|= * %. (bolo +<)
- |= old/?(bolo) ^+ ..^$
+ => |%
+ ++ bolo-old (cork bolo |=(bolo [%0 |5.+<]))
+ --
+ |= old/?(bolo-old bolo)
+ ^+ ..^$
?- -.old
- $0 ..^$(+>- old)
+ $0 $(old [%1 ~ *http-config [8.080 ~] [~ ~] +.old])
+ $1 ..^$(+>- old)
==
::
++ scry
|
main: Added option (-a) to set the ua agent string. | @@ -73,6 +73,7 @@ static void usage(void)
#if HAVE_INET6
"\t-6 Force IPv6 only\n"
#endif
+ "\t-a <software> Specify SIP User-Agent string\n"
"\t-d Daemon\n"
"\t-e <commands> Execute commands (repeat)\n"
"\t-f <path> Config path\n"
@@ -92,6 +93,8 @@ int main(int argc, char *argv[])
{
int af = AF_UNSPEC, run_daemon = false;
const char *ua_eprm = NULL;
+ const char *software =
+ "baresip v" BARESIP_VERSION " (" ARCH "/" OS ")";
const char *execmdv[16];
const char *net_interface = NULL;
const char *audio_path = NULL;
@@ -124,7 +127,7 @@ int main(int argc, char *argv[])
#ifdef HAVE_GETOPT
for (;;) {
- const int c = getopt(argc, argv, "46de:f:p:hu:n:vst:m:");
+ const int c = getopt(argc, argv, "46a:de:f:p:hu:n:vst:m:");
if (0 > c)
break;
@@ -135,6 +138,10 @@ int main(int argc, char *argv[])
usage();
return -2;
+ case 'a':
+ software = optarg;
+ break;
+
case '4':
af = AF_INET;
break;
@@ -263,8 +270,7 @@ int main(int argc, char *argv[])
}
/* Initialise User Agents */
- err = ua_init("baresip v" BARESIP_VERSION " (" ARCH "/" OS ")",
- true, true, true);
+ err = ua_init(software, true, true, true);
if (err)
goto out;
|
Make test_alloc_parse_xdecl() robust vs allocation pattern changes | @@ -7846,25 +7846,21 @@ START_TEST(test_alloc_parse_xdecl)
"<?xml version='1.0' encoding='utf-8'?>\n"
"<doc>Hello, world</doc>";
int i;
- int repeat = 0;
-#define MAX_ALLOC_COUNT 10
+#define MAX_ALLOC_COUNT 15
for (i = 0; i < MAX_ALLOC_COUNT; i++) {
- /* Repeat some (most) counts to defeat cached allocations */
- if (i == 2 && repeat != 1) {
- i--;
- repeat++;
- }
- else if (i == 3) {
- i -= 2;
- repeat++;
- }
allocation_count = i;
XML_SetXmlDeclHandler(parser, dummy_xdecl_handler);
if (_XML_Parse_SINGLE_BYTES(parser, text, strlen(text),
XML_TRUE) != XML_STATUS_ERROR)
break;
- XML_ParserReset(parser, NULL);
+ /* Resetting the parser is insufficient, because some memory
+ * allocations are cached within the parser. Instead we use
+ * the teardown and setup routines to ensure that we have the
+ * right sort of parser back in our hands.
+ */
+ alloc_teardown();
+ alloc_setup();
}
if (i == 0)
fail("Parse succeeded despite failing allocator");
|
updates :dns to depend on %ford's implicit =~ | ::
:: moves and state
::
-=> |%
+|%
+$ move (pair bone card)
+$ poke
$% [%dns-command command]
==
--
::
-:: helpers
+:: oauth2 implementation, and helpers
::
-=> |%
+|%
:: +join: join list of cords with separator
::
:: XX move to zuse?
--
--
::
-=> |%
+:: app/provider helpers
+::
+|%
:: +name: fully-qualified domain name for :ship
::
++ name
::
:: service providers
::
-=> |%
+|%
:: +provider: initialize provider-specific core
::
++ provider
|
VCL: Fix socket_test.sh to build test apps.
Replace autoconf base test app build detection
with appropriate cmake based incantation.
Use the dpdk_devbind.py that is in the VPP tree. | @@ -9,7 +9,7 @@ vpp_shm_dir="/dev/shm/"
vpp_run_dir="/run/vpp"
lib_dir="$WS_ROOT/build-root/install-vpp-native/vpp/lib/"
lib_debug_dir="$WS_ROOT/build-root/install-vpp_debug-native/vpp/lib/"
-dpdk_devbind="/usr/share/dpdk/usertools/dpdk-devbind.py"
+dpdk_devbind="$WS_ROOT/extras/vpp_config/scripts/dpdk-devbind.py"
docker_vpp_dir="/vpp/"
docker_app_dir="/vpp/"
docker_lib_dir="/vpp-lib/"
@@ -270,10 +270,13 @@ if [ -z "$WS_ROOT" ] ; then
exit 1
fi
-if [[ "$(grep bin_PROGRAMS $WS_ROOT/src/vcl.am)" = "" ]] ; then
- $WS_ROOT/extras/vagrant/vcl_test.sh $WS_ROOT $USER
+if [ ! -d $vpp_dir ] ; then
+ if [ -z "$title_dbg" ] ; then
+ (cd $WS_ROOT; make build-release)
+ else
(cd $WS_ROOT; make build)
fi
+fi
if [ ! -d $vpp_dir ] ; then
echo "ERROR: Missing VPP$title_dbg bin directory!" >&2
|
NULL protection for pool stop | @@ -337,6 +337,8 @@ static void *defer_worker_thread(void *pool_) {
/** Signals a running thread pool to stop. Returns immediately. */
void defer_pool_stop(pool_pt pool) {
+ if (!pool)
+ return;
pool->flag = 0;
for (size_t i = 0; i < pool->count; ++i) {
defer_thread_signal();
|
Export last_ast_pass
This makes the driver.test_ast function more useful | @@ -22,7 +22,7 @@ local ast_passes = {
{ name = "checker", f = checker.check },
{ name = "upvalues", f = upvalues.analyze },
}
-local last_ast_pass = ast_passes[#ast_passes].name
+driver.last_ast_pass = ast_passes[#ast_passes].name
local function compile_pallene_to_ast(pallene_filename, stop_after)
local err, errs
@@ -66,7 +66,7 @@ local function compile_pallene_to_c(pallene_filename, c_filename, modname)
local ok, errs
local ast
- ast, errs = compile_pallene_to_ast(pallene_filename, last_ast_pass)
+ ast, errs = compile_pallene_to_ast(pallene_filename, driver.last_ast_pass)
if not ast then return false, errs end
ok, errs = compile_ast_to_c(ast, c_filename, modname)
|
Fix idle in transaction session timeout pgoption
FIx a typo. Advanced PGOPTIONS parsing was added between
1.1 and 1.2 odyssey versions, including idle_in_transaction_session_timeout
parameter passing. Thats means odyssey will lookup in startup parameter
key/value pairs and parse all hardcoded pgoptions, but this require full match
between hardcoded pgoption name and startup key | @@ -22,7 +22,7 @@ typedef enum {
KIWI_VAR_SEARCH_PATH,
KIWI_VAR_STATEMENT_TIMEOUT,
KIWI_VAR_LOCK_TIMEOUT,
- KIWI_VAR_IDLE_IN_TRANSACTION_TIMEOUT,
+ KIWI_VAR_IDLE_IN_TRANSACTION_SESSION_TIMEOUT,
KIWI_VAR_DEFAULT_TABLE_ACCESS_METHOD,
KIWI_VAR_DEFAULT_TOAST_COMPRESSION,
KIWI_VAR_CHECK_FUNCTION_BODIES,
@@ -116,9 +116,9 @@ static inline void kiwi_vars_init(kiwi_vars_t *vars)
"statement_timeout", sizeof("statement_timeout"));
kiwi_var_init(&vars->vars[KIWI_VAR_LOCK_TIMEOUT], "lock_timeout",
sizeof("lock_timeout"));
- kiwi_var_init(&vars->vars[KIWI_VAR_IDLE_IN_TRANSACTION_TIMEOUT],
- "idle_in_transaction_timeout",
- sizeof("idle_in_transaction_timeout"));
+ kiwi_var_init(&vars->vars[KIWI_VAR_IDLE_IN_TRANSACTION_SESSION_TIMEOUT],
+ "idle_in_transaction_session_timeout",
+ sizeof("idle_in_transaction_session_timeout"));
kiwi_var_init(&vars->vars[KIWI_VAR_DEFAULT_TABLE_ACCESS_METHOD],
"default_table_access_method",
sizeof("default_table_access_method"));
|
Pushing back overwritten changes vol2 | @@ -59,6 +59,7 @@ typedef struct testcase_s {
#define UNKNOWN_PKT(src, dst, ...) {FAKE_PKT, 0, 0, ETH(src, dst, ##__VA_ARGS__), WAIT_CONTROLPLANE_REPLY, 0, ETH(src, dst, ##__VA_ARGS__)}
#define LEARNED_PKT(port, src, dst, ...) {FAKE_PKT, 0, 0, ETH(src, dst, ##__VA_ARGS__), 0, port, ETH(src, dst, ##__VA_ARGS__)}
+#define NO_OUTPUT -1
#define IPPROTOCOL_ICMP "01"
#define IPPROTOCOL_IPv4 "04"
|
Travis: Improve cmake format install command | @@ -336,7 +336,7 @@ before_install:
sudo apt-get install libzmq3-dev
sudo apt-get install libxerces-c-dev
sudo apt-get install moreutils # contains `sponge` required by `reformat-cmake`
- sudo pip install cmake-format==0.4.5 pyyaml
+ sudo pip install cmake-format[yaml]==0.4.5
git clone --branch 2.7.1 --depth 1 https://github.com/taocpp/PEGTL.git && sudo cp -R PEGTL/include/tao /usr/local/include
mkdir -p "$HOME/bin" && cd "$HOME/bin" && \
curl -L "https://github.com/mvdan/sh/releases/download/v2.6.3/shfmt_v2.6.3_linux_amd64" -o shfmt && \
|
oc_discovery: Minor fixes | @@ -586,33 +586,12 @@ oc_ri_process_discovery_payload(uint8_t *payload, int len,
rep = rep->next;
}
- oc_rep_t *link = links->value.object;
-
- while (link != NULL) {
- switch (link->type) {
- case OC_REP_STRING: {
- if (oc_string_len(link->name) == 6 &&
- memcmp(oc_string(link->name), "anchor", 6) == 0) {
- anchor = &link->value.string;
- }
- } break;
- default:
- break;
- }
- if (anchor) {
- break;
- }
- link = link->next;
- }
-
- oc_uuid_t di;
- oc_str_to_uuid(oc_string(*anchor) + 6, &di);
-
while (links != NULL) {
/* Reset bm in every round as this can be omitted if 0. */
+ oc_uuid_t di;
oc_resource_properties_t bm = 0;
oc_endpoint_t *eps_list = NULL;
- link = links->value.object;
+ oc_rep_t *link = links->value.object;
while (link != NULL) {
switch (link->type) {
@@ -620,6 +599,7 @@ oc_ri_process_discovery_payload(uint8_t *payload, int len,
if (oc_string_len(link->name) == 6 &&
memcmp(oc_string(link->name), "anchor", 6) == 0) {
anchor = &link->value.string;
+ oc_str_to_uuid(oc_string(*anchor) + 6, &di);
} else if (oc_string_len(link->name) == 4 &&
memcmp(oc_string(link->name), "href", 4) == 0) {
uri = &link->value.string;
@@ -653,8 +633,9 @@ oc_ri_process_discovery_payload(uint8_t *payload, int len,
memcmp(oc_string(ep->name), "ep", 2) == 0) {
if (oc_string_to_endpoint(&ep->value.string, &temp_ep, NULL) ==
0) {
- if (((endpoint->flags & IPV4) && (temp_ep.flags & IPV6)) ||
- ((endpoint->flags & IPV6) && (temp_ep.flags & IPV4))) {
+ if (!(temp_ep.flags & TCP) &&
+ (((endpoint->flags & IPV4) && (temp_ep.flags & IPV6)) ||
+ ((endpoint->flags & IPV6) && (temp_ep.flags & IPV4)))) {
goto next_ep;
}
if (eps_cur) {
|
Synchronize clipboard on Ctrl+v
Pressing Ctrl+v on the device will typically paste the clipboard
content.
Before sending the key event, synchronize the computer clipboard to the
device clipboard to allow seamless copy-paste. | @@ -326,13 +326,15 @@ input_manager_process_key(struct input_manager *im,
struct controller *controller = im->controller;
- // The shortcut modifier is pressed
- if (smod) {
SDL_Keycode keycode = event->keysym.sym;
bool down = event->type == SDL_KEYDOWN;
- int action = down ? ACTION_DOWN : ACTION_UP;
+ bool ctrl = event->keysym.mod & KMOD_CTRL;
+ bool shift = event->keysym.mod & KMOD_SHIFT;
bool repeat = event->repeat;
- bool shift = event->keysym.mod & (KMOD_LSHIFT | KMOD_RSHIFT);
+
+ // The shortcut modifier is pressed
+ if (smod) {
+ int action = down ? ACTION_DOWN : ACTION_UP;
switch (keycode) {
case SDLK_h:
if (control && !shift && !repeat) {
@@ -457,6 +459,12 @@ input_manager_process_key(struct input_manager *im,
im->repeat = 0;
}
+ if (ctrl && !shift && keycode == SDLK_v && down && !repeat) {
+ // Synchronize the computer clipboard to the device clipboard before
+ // sending Ctrl+v, to allow seamless copy-paste.
+ set_device_clipboard(controller, false);
+ }
+
struct control_msg msg;
if (convert_input_key(event, &msg, im->prefer_text, im->repeat)) {
if (!controller_push_msg(controller, &msg)) {
|
Do not upload integration test asset
This file doesn't exist anymore | @@ -665,16 +665,6 @@ jobs:
pattern: "*-gadget-*-*-tar-gz/*-gadget-*-*.tar.gz"
github-token: ${{ secrets.GITHUB_TOKEN }}
release-url: ${{ steps.create_release.outputs.upload_url }}
- - name: Upload Testing Asset
- id: upload-release-asset-testing
- uses: actions/[email protected]
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- with:
- upload_url: ${{ steps.create_release.outputs.upload_url }}
- asset_path: integration-asset/gadget-integration-tests-job.yaml
- asset_name: gadget-integration-tests-job.yaml
- asset_content_type: application/x-yaml
- name: Update new version in krew-index
if: github.repository == 'kinvolk/inspektor-gadget'
uses: rajatjindal/[email protected]
|
programs: cert_write: fixed bug in parsing dec serial | @@ -277,7 +277,7 @@ int parse_serial_decimal_format(unsigned char *obuf, size_t obufmax,
val = (dec >> ((remaining_bytes - 1) * 8)) & 0xFF;
/* Skip leading zeros */
- if ((val) != 0) {
+ if ((val != 0) || (*len != 0)) {
*p = val;
(*len)++;
p++;
|
Update doc/getting-started/building-from-source.rst | @@ -182,7 +182,7 @@ You can also build ACRN with your customized scenario:
.. code-block:: none
- $ make BOARD=$PWD/misc/config_tools/data/nuc6cayh/nuc6cayh.xml SCENARIO=/path/to/scenario.xml
+ $ make BOARD=nuc6cayh SCENARIO=/path/to/scenario.xml
* Build with your own board and scenario configuration, assuming the board and
scenario XML files are ``/path/to/board.xml`` and ``/path/to/scenario.xml``:
|
x509: add missing X509 dup functions | @@ -246,10 +246,13 @@ X509_ALGOR_new,
X509_ATTRIBUTE_dup,
X509_ATTRIBUTE_free,
X509_ATTRIBUTE_new,
+X509_CERT_AUX_dup,
X509_CERT_AUX_free,
X509_CERT_AUX_new,
+X509_CINF_dup,
X509_CINF_free,
X509_CINF_new,
+X509_CRL_INFO_dup,
X509_CRL_INFO_free,
X509_CRL_INFO_new,
X509_CRL_dup,
@@ -264,6 +267,7 @@ X509_NAME_ENTRY_new,
X509_NAME_dup,
X509_NAME_free,
X509_NAME_new,
+X509_REQ_INFO_dup,
X509_REQ_INFO_free,
X509_REQ_INFO_new,
X509_REQ_dup,
@@ -272,6 +276,7 @@ X509_REQ_new,
X509_REVOKED_dup,
X509_REVOKED_free,
X509_REVOKED_new,
+X509_SIG_dup,
X509_SIG_free,
X509_SIG_new,
X509_VAL_free,
|
Fix typo that cause find-doc-nits failure | @@ -34,7 +34,7 @@ OCSP_single_get0_status, OCSP_check_validity
const STACK_OF(X509) *OCSP_resp_get0_certs(const OCSP_BASICRESP *bs);
- int OCSP_resp get0_signer(OCSP_BASICRESP *bs, X509 **signer,
+ int OCSP_resp_get0_signer(OCSP_BASICRESP *bs, X509 **signer,
STACK_OF(X509) *extra_certs);
int OCSP_resp_get0_id(const OCSP_BASICRESP *bs,
|
Tests: Changed tests to accept variables in "location". | @@ -83,7 +83,7 @@ Connection: close
assert resp['body'] == ''
def test_return_location(self):
- reserved = ":/?#[]@!$&'()*+,;="
+ reserved = ":/?#[]@!&'()*+,;="
unreserved = (
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
"0123456789-._~"
@@ -107,15 +107,15 @@ Connection: close
check_location(reserved)
# After first "?" all other "?" encoded.
- check_location("/?" + reserved, "/?:/%3F#[]@!$&'()*+,;=")
+ check_location("/?" + reserved, "/?:/%3F#[]@!&'()*+,;=")
check_location("???", "?%3F%3F")
# After first "#" all other "?" or "#" encoded.
- check_location("/#" + reserved, "/#:/%3F%23[]@!$&'()*+,;=")
+ check_location("/#" + reserved, "/#:/%3F%23[]@!&'()*+,;=")
check_location("##?#?", "#%23%3F%23%3F")
# After first "?" next "#" not encoded.
- check_location("/?#" + reserved, "/?#:/%3F%23[]@!$&'()*+,;=")
+ check_location("/?#" + reserved, "/?#:/%3F%23[]@!&'()*+,;=")
check_location("??##", "?%3F#%23")
check_location("/?##?", "/?#%23%3F")
|
Keep track of attached drivers so that pci_discover() can be called more than once. | @@ -40,6 +40,11 @@ static heap pages;
// assume the single bus layout
static u32 *msi_map[PCI_SLOTMAX];
+struct pci_driver {
+ pci_probe probe;
+ boolean attached;
+};
+
/* enable configuration space accesses and return data port address */
static int pci_cfgenable(pci_dev dev, int reg, int bytes)
{
@@ -166,9 +171,12 @@ void pci_setup_msix(pci_dev dev, int msi_slot, thunk h)
msi_map[dev->slot][msi_slot*4 + 3] = vector_control;
}
-void register_pci_driver(pci_probe p)
+void register_pci_driver(pci_probe probe)
{
- vector_push(drivers, p);
+ struct pci_driver *d = allocate(drivers->h, sizeof(struct pci_driver));
+ d->probe = probe;
+ d->attached = false;
+ vector_push(drivers, d);
}
void pci_discover()
@@ -181,9 +189,11 @@ void pci_discover()
if (pci_get_vendor(dev) == 0xffff && pci_get_device(dev) == 0xffff)
continue;
- pci_probe p;
- vector_foreach(drivers, p) {
- apply(p, dev);
+ struct pci_driver *d;
+ vector_foreach(drivers, d) {
+ if (!d->attached && apply(d->probe, dev)) {
+ d->attached = true;
+ }
}
}
}
|
py/objdeque: Use m_new0 when allocating items to avoid need to clear.
Saves a few bytes of code space, and is more efficient because with
MICROPY_GC_CONSERVATIVE_CLEAR enabled by default all memory is already
cleared when allocated. | @@ -60,8 +60,7 @@ STATIC mp_obj_t deque_make_new(const mp_obj_type_t *type, size_t n_args, size_t
o->base.type = type;
o->alloc = maxlen + 1;
o->i_get = o->i_put = 0;
- o->items = m_new(mp_obj_t, o->alloc);
- mp_seq_clear(o->items, 0, o->alloc, sizeof(*o->items));
+ o->items = m_new0(mp_obj_t, o->alloc);
if (n_args > 2) {
o->flags = mp_obj_get_int(args[2]);
|
Dirty solution for NodeJS event loop problem, but it seems to work at leasst... | @@ -5,7 +5,6 @@ const Module = require('module');
const path = require('path');
const util = require('util');
const fs = require('fs');
-const async_hooks = require('async_hooks');
/* Require the JavaScript parser */
const cherow = require(path.join(__dirname, 'node_modules', 'cherow'));
@@ -383,29 +382,51 @@ module.exports = ((impl, ptr) => {
'destroy': node_loader_trampoline_destroy,
});
- function node_loader_trampoline_destroy() {
- /* Initialize async hooks for keeping track the amount of async handles that are in the event queue */
- const asyncHook = async_hooks.createHook({
- destroy: node_loader_trampoline_async_hook_destroy,
+ const activeTimers = new Map();
+
+ /* Timers are not included after Node.js 10 */
+ const NODE_VERSION = process.version.slice(1).split('.').map((v) => parseInt(v, 10));
+
+ const disableHooks = (NODE_VERSION[0] > 10) ? (() => {
+ /* Track them with the hooks */
+ const hook = asyncHooks.createHook({
+ init (asyncId, type, triggerAsyncId, resource) {
+ /* Track only timers */
+ if (type === 'Timeout') {
+ activeTimers.set(asyncId, {type, triggerAsyncId, resource});
+ }
+ },
+ destroy (asyncId) {
+ activeTimers.delete(asyncId);
+ },
});
- /* We need to use this cleanup variable because asyncHook.disable() seems not to work */
- let cleanup = false;
+ hook.enable();
- /* Enable async hooks for tracking all destroyed (and garbage collected) async resources */
- asyncHook.enable();
+ return () => { hook.disable() };
+ })() : () => {};
- function node_loader_trampoline_async_hook_destroy() {
+ function node_loader_trampoline_destroy() {
+ /* Initialize a timer for keeping track the amount of async handles that are in the event queue,
+ * before this async hooks have been used, but due to V8 GC, some destroy events were never triggered
+ * leaving the event loop always open, the only alternative I have found is this one, or refactoring
+ * the node loader, dropping support for Node 10 and 12, and implementing ourselves the event loop
+ * instead of using node::Start. This solution is dirty but at least works, lets keep it until
+ * we found something better, at least it makes the node loader stable */
+ const handle = setInterval(() => {
/* At this point we may have reached an "empty" event loop, it
* only contains the async resources that the Node Loader has populated,
- * but it does not contain any user defined async resource */
- if (trampoline.active_handles(node_loader_ptr) <= 0 && cleanup === false) {
- /* Disable Hooks and destroy the Node Loader */
- cleanup = true;
- asyncHook.disable();
+ * including this timer (1 for active_handles, 3 for _getActiveHandles() which includes
+ * stdin, stdout and the timer), but it does not contain any user defined async resource */
+ const timers = [...activeTimers.values()].filter(timer => !(typeof timer.resource.hasRef === 'function' && !timer.resource.hasRef()));
+
+ if (trampoline.active_handles(node_loader_ptr) <= 1 && process._getActiveHandles().length + timers.length <= 3 && process._getActiveRequests().length === 0) {
+ /* Clear the timer, disable hooks and destroy the Node Loader */
+ clearInterval(handle);
+ disableHooks();
trampoline.destroy(node_loader_ptr);
}
- }
+ }, 1000);
}
} catch (ex) {
console.log('Exception in bootstrap.js trampoline initialization:', ex);
|
Move -s USE_SDL=2 to BUILD_SDL part | @@ -148,11 +148,6 @@ if(MSVC)
else()
set(CMAKE_C_STANDARD 99)
-
- if(EMSCRIPTEN AND NOT BUILD_LIBRETRO)
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -s USE_SDL=2")
- endif()
-
endif()
set(THIRDPARTY_DIR ${CMAKE_SOURCE_DIR}/vendor)
@@ -748,6 +743,7 @@ if(BUILD_SDL)
if(EMSCRIPTEN)
set_target_properties(${TIC80_OUTPUT} PROPERTIES LINK_FLAGS "-s WASM=1 -s USE_SDL=2 -s TOTAL_MEMORY=67108864 --pre-js ${CMAKE_SOURCE_DIR}/build/html/prejs.js")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -s USE_SDL=2")
elseif(NOT ANDROID)
target_link_libraries(${TIC80_OUTPUT} SDL2main)
endif()
|
Avoid loading alpha channel in c_e_s_rgb | @@ -83,7 +83,7 @@ static float compute_error_squared_rgb_single_partition(
continue;
}
- vfloat4 point = blk->texel(i);
+ vfloat4 point = blk->texel3(i);
float param = dot3_s(point, lin->bs);
vfloat4 rp1 = lin->amod + param * lin->bis;
vfloat4 dist = rp1 - point;
|
update BLAKE2 to have additional init arguments | @@ -87,19 +87,36 @@ static size_t blake2_mac_size(void *vmacctx)
return macctx->params.digest_length;
}
-static int blake2_mac_init(void *vmacctx)
+static int blake2_setkey(struct blake2_mac_data_st *macctx,
+ const unsigned char *key, size_t keylen)
+{
+ if (keylen > BLAKE2_KEYBYTES || keylen == 0) {
+ ERR_raise(ERR_LIB_PROV, PROV_R_INVALID_KEY_LENGTH);
+ return 0;
+ }
+ memcpy(macctx->key, key, keylen);
+ /* Pad with zeroes at the end if required */
+ if (keylen < BLAKE2_KEYBYTES)
+ memset(macctx->key + keylen, 0, BLAKE2_KEYBYTES - keylen);
+ BLAKE2_PARAM_SET_KEY_LENGTH(&macctx->params, (uint8_t)keylen);
+ return 1;
+}
+
+static int blake2_mac_init(void *vmacctx, const unsigned char *key,
+ size_t keylen, const OSSL_PARAM params[])
{
struct blake2_mac_data_st *macctx = vmacctx;
- if (!ossl_prov_is_running())
+ if (!ossl_prov_is_running() || !blake2_mac_set_ctx_params(macctx, params))
return 0;
-
+ if (key != NULL) {
+ if (!blake2_setkey(macctx, key, keylen))
+ return 0;
+ } else if (macctx->params.key_length == 0) {
/* Check key has been set */
- if (macctx->params.key_length == 0) {
ERR_raise(ERR_LIB_PROV, PROV_R_NO_KEY_SET);
return 0;
}
-
return BLAKE2_INIT_KEY(&macctx->ctx, &macctx->params, macctx->key);
}
@@ -180,19 +197,9 @@ static int blake2_mac_set_ctx_params(void *vmacctx, const OSSL_PARAM params[])
BLAKE2_PARAM_SET_DIGEST_LENGTH(&macctx->params, (uint8_t)size);
}
- if ((p = OSSL_PARAM_locate_const(params, OSSL_MAC_PARAM_KEY)) != NULL) {
- size_t len;
- void *key_p = macctx->key;
-
- if (!OSSL_PARAM_get_octet_string(p, &key_p, BLAKE2_KEYBYTES, &len)) {
- ERR_raise(ERR_LIB_PROV, PROV_R_INVALID_KEY_LENGTH);
+ if ((p = OSSL_PARAM_locate_const(params, OSSL_MAC_PARAM_KEY)) != NULL
+ && !blake2_setkey(macctx, p->data, p->data_size))
return 0;
- }
- /* Pad with zeroes at the end */
- memset(macctx->key + len, 0, BLAKE2_KEYBYTES - len);
-
- BLAKE2_PARAM_SET_KEY_LENGTH(&macctx->params, (uint8_t)len);
- }
if ((p = OSSL_PARAM_locate_const(params, OSSL_MAC_PARAM_CUSTOM))
!= NULL) {
|
YAMBi: Specify expected number of conflicts | %skeleton "lalr1.cc" /* Generate an LALR(1) parser */
+%expect 0 /* We expect no shift/reduce and no reduce/reduce conflicts */
+
%define @PARSER_NAME_DIRECTIVE@ { Parser } /* Call the generator parser class `Parser` */
%define api.prefix {yambi} /* Use namespace `yambi` for parser code */
%define api.token.constructor /* Store type, value and location data in symbol objects */
|
components/bt: Fix crash in Bluetooth when esp_restart | @@ -1442,16 +1442,12 @@ esp_err_t esp_bt_controller_deinit(void)
static void bt_shutdown(void)
{
esp_err_t ret = ESP_OK;
- ESP_LOGD(BTDM_LOG_TAG, "stop/deinit bt");
+ ESP_LOGD(BTDM_LOG_TAG, "stop Bluetooth");
ret = esp_bt_controller_disable();
if (ESP_OK != ret) {
ESP_LOGW(BTDM_LOG_TAG, "controller disable ret=%d", ret);
}
- ret = esp_bt_controller_deinit();
- if (ESP_OK != ret) {
- ESP_LOGW(BTDM_LOG_TAG, "controller deinit ret=%d", ret);
- }
return;
}
|
update fix update docs. | @@ -78,12 +78,12 @@ end 1 and one for end 2.
When using this option, it is required that the BAM
file is sorted/grouped by the read name. This keeps the resulting records
in the two output FASTQ files in the same order. One can sort the BAM
- file by query name with ``samtools sort -n aln.bam aln.qsort``.
+ file by query name with ``samtools sort -n -o aln.qsort.bam aln.bam``.
.. code-block:: bash
- $ samtools sort -n aln.bam aln.qsort
+ $ samtools sort -n -o aln.qsort.bam aln.bam
$ bedtools bamtofastq -i aln.qsort.bam \
-fq aln.end1.fq \
|
Simplify checking whether it is Float object | @@ -346,21 +346,6 @@ rm_str2cstr(VALUE str, long *len)
}
-/**
- * Try to convert the argument to a double, raise an exception if fail.
- *
- * No Ruby usage (internal function)
- *
- * @param arg the argument
- * @return arg
- */
-static VALUE
-arg_is_number(VALUE arg)
-{
- return DBL2NUM(NUM2DBL(arg));
-}
-
-
/**
* Called when `rb_str_to_str' raises an exception.
*
@@ -396,10 +381,7 @@ rm_percentage(VALUE arg, double max)
char *end;
int not_num;
- // Try to convert the argument to a number. If failure, sets not_num to non-zero.
- rb_protect(arg_is_number, arg, ¬_num);
-
- if (not_num)
+ if (!rm_check_num2dbl(arg))
{
char *pct_str;
long pct_long;
@@ -543,10 +525,7 @@ rm_fuzz_to_dbl(VALUE fuzz_arg)
char *end;
int not_num;
- // Try to convert the argument to a number. If failure, sets not_num to non-zero.
- rb_protect(arg_is_number, fuzz_arg, ¬_num);
-
- if (not_num)
+ if (!rm_check_num2dbl(fuzz_arg))
{
char *fuzz_str;
|
Hover funcs are likely AtkResNode. | @@ -1329,10 +1329,8 @@ factory.register(0x1417CDB58, "Client::UI::AddonMateriaAttach", "Component::GUI:
factory.register(0x1417CDF98, "Client::UI::AddonMateriaAttachDialog", "Component::GUI::AtkUnitBase", {})
factory.register(0x1417DE1D0, "Client::UI::AddonGathering", "Component::GUI::AtkUnitBase", {
0x140ecfeb0: "ctor",
- 0x1404cdf70: "ReceiveEvent_HoverEnter",
0x140ed0610: "ReceiveEvent_ToggleQuickGathering",
0x140ed06c0: "ReceiveEvent_Gather",
- 0x140ed0850: "ReceiveEvent_HoverExit",
})
factory.register(0x1417F8530, "Client::UI::AddonWeeklyPuzzle", "Component::GUI::AtkUnitBase", {}) # Faux Hollows
factory.register(0x141808FE8, "Client::UI::AddonPartyList", "Component::GUI::AtkUnitBase", {
|
Force sockaddr_storage to the desired alignment
First configure nuttx to support ipv4 and ipv6 dual stack,
then start two simulators and run Iperf ipv4 udp speed test,
unaligned access exception to sockaddr_in occours.
The root cause is that struct sockaddr_storage isn't set to
the desired alignment. | #define SCM_CREDENTIALS 0x02 /* rw: struct ucred */
#define SCM_SECURITY 0x03 /* rw: security label */
+/* Desired design of maximum size and alignment (see RFC2553) */
+
+#define SS_MAXSIZE 128 /* Implementation specific max size */
+#define SS_ALIGNSIZE (sizeof(FAR struct sockaddr *))
+ /* Implementation specific desired alignment */
+
/****************************************************************************
* Type Definitions
****************************************************************************/
struct sockaddr_storage
{
sa_family_t ss_family; /* Address family */
- char ss_data[126]; /* 126-bytes of address data */
-};
+ char ss_data[SS_MAXSIZE - sizeof(sa_family_t)];
+}
+aligned_data(SS_ALIGNSIZE); /* Force desired alignment */
/* The sockaddr structure is used to define a socket address which is used
* in the bind(), connect(), getpeername(), getsockname(), recvfrom(), and
|
nimble/host: Fix host event buffer count
Fixes the problem of not putting events such as,
for example ble_hs_hci_evt_le_scan_timeout to the queue. | #include "nimble/nimble_port.h"
#endif
-#define BLE_HS_HCI_EVT_COUNT MYNEWT_VAL(BLE_TRANSPORT_EVT_COUNT)
+#define BLE_HS_HCI_EVT_COUNT (MYNEWT_VAL(BLE_TRANSPORT_EVT_COUNT) + \
+ MYNEWT_VAL(BLE_TRANSPORT_EVT_DISCARDABLE_COUNT))
static void ble_hs_event_rx_hci_ev(struct ble_npl_event *ev);
#if NIMBLE_BLE_CONNECT
|
tcp-connection-limit in example config. | @@ -737,6 +737,9 @@ server:
# 0 blocks when ip is ratelimited, otherwise let 1/xth traffic through
# ip-ratelimit-factor: 10
+ # Limit the number of connections simultaneous from a netblock
+ # tcp-connection-limit: 192.0.2.0/24 12
+
# what is considered a low rtt (ping time for upstream server), in msec
# low-rtt: 45
# select low rtt this many times out of 1000. 0 means the fast server
|
doc: Convert list into markdown enumeration | @@ -36,8 +36,11 @@ type = float
default = 1.1
```
-In Elektra a specification is defined through the metadata of keys in the `spec` namespace. The specification above contains metadata for
-three keys: 1. the parent key (`@`), 2. `@/mydouble` and 3. `@/myfloatarray/#`. The `#` at the end of `myfloatarray/#` indicates that it is an array.
+In Elektra a specification is defined through the metadata of keys in the `spec` namespace. The specification above contains metadata for three keys:
+
+1. the parent key (`@`)
+2. `@/mydouble`
+3. `@/myfloatarray/#` (The `#` at the end of `myfloatarray/#` indicates that it is an array
The `mountpoint` metadata on the parent key sets the name of our application's config file (the location is defined by Elektra), it should
be unique.
|
When dumping function, append callback function attribute if needed.
Attribute 'DESCRIBE' is added by gpdb to describe
the name of a callback function. Currently pg_dump
does not handle this attribute. | @@ -8259,6 +8259,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
char *procost;
char *prorows;
char *lanname;
+ char *callbackfunc;
char *prodataaccess;
char *proexeclocation;
char *rettypename;
@@ -8298,7 +8299,8 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
"proiswindow, provolatile, proisstrict, prosecdef, "
"proconfig, procost, prorows, prodataaccess, "
"proexeclocation, "
- "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) as lanname "
+ "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) as lanname, "
+ "(SELECT procallback FROM pg_catalog.pg_proc_callback WHERE profnoid::pg_catalog.oid = oid) as callbackfunc "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
finfo->dobj.catId.oid);
@@ -8317,7 +8319,8 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
"proiswindow, provolatile, proisstrict, prosecdef, "
"proconfig, procost, prorows, prodataaccess, "
"'a' as proexeclocation, "
- "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) as lanname "
+ "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) as lanname, "
+ "(SELECT procallback FROM pg_catalog.pg_proc_callback WHERE profnoid::pg_catalog.oid = oid) as callbackfunc "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
finfo->dobj.catId.oid);
@@ -8331,7 +8334,8 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
"provolatile, proisstrict, prosecdef, "
"null as proconfig, 0 as procost, 0 as prorows, %s"
"'a' as proexeclocation, "
- "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) as lanname "
+ "(SELECT lanname FROM pg_catalog.pg_language WHERE oid = prolang) as lanname, "
+ "(SELECT procallback FROM pg_catalog.pg_proc_callback WHERE profnoid::pg_catalog.oid = oid) as callbackfunc "
"FROM pg_catalog.pg_proc "
"WHERE oid = '%u'::pg_catalog.oid",
(isGE43 ? "prodataaccess, " : ""),
@@ -8377,6 +8381,7 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
procost = PQgetvalue(res, 0, PQfnumber(res, "procost"));
prorows = PQgetvalue(res, 0, PQfnumber(res, "prorows"));
lanname = PQgetvalue(res, 0, PQfnumber(res, "lanname"));
+ callbackfunc = PQgetvalue(res, 0, PQfnumber(res, "callbackfunc"));
prodataaccess = PQgetvalue(res, 0, PQfnumber(res, "prodataaccess"));
proexeclocation = PQgetvalue(res, 0, PQfnumber(res, "proexeclocation"));
@@ -8618,7 +8623,13 @@ dumpFunc(Archive *fout, FuncInfo *finfo)
appendStringLiteralAH(q, pos, fout);
}
- appendPQExpBuffer(q, "\n %s;\n", asPart->data);
+ appendPQExpBuffer(q, "\n %s", asPart->data);
+
+ /* Append callback function */
+ if (callbackfunc && callbackfunc[0] != '\0')
+ appendPQExpBuffer(q, "\n WITH (describe = %s)", callbackfunc);
+
+ appendPQExpBuffer(q, ";\n");
appendPQExpBuffer(labelq, "FUNCTION %s", funcsig);
|
pic32: Add option to use hal _sbrk
pic32 BSPs still use private _sbrk().
This adds option to call hal provided _sbrk() version
that other architectures use now.
This will be needed for new BSPs that have more
common mynewt features. | #include "xc.h"
#include <cp0defs.h>
+#include <syscfg/syscfg.h>
#ifdef __LIBBUILD__
# Replace the standard debugging information with a simple filename. This
@@ -571,6 +572,13 @@ _cache_err_vector:
.align 2
.ent _main_entry
_main_entry:
+#if MYNEWT_VAL_HAL_SBRK
+ la a0, __HeapBase
+ la a1, __HeapLimit
+ la t0, _sbrkInit
+ jalr t0
+ nop
+#endif
#if defined(CPP_INIT)
.weak _init
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.