message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
Leaf: Catch exceptions in `get`/`set` functions | #include <kdbhelper.h>
+using std::exception;
+
using elektra::LeafDelegate;
using CppKey = kdb::Key;
@@ -70,7 +72,16 @@ int elektraLeafGet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned, Key * par
keys.release ();
return ELEKTRA_PLUGIN_STATUS_SUCCESS;
}
- int status = delegator::get (handle)->convertToDirectories (keys);
+
+ int status = ELEKTRA_PLUGIN_STATUS_ERROR;
+ try
+ {
+ status = delegator::get (handle)->convertToDirectories (keys);
+ }
+ catch (exception const & error)
+ {
+ ELEKTRA_SET_ERROR (ELEKTRA_ERROR_UNCAUGHT_EXCEPTION, *parent, error.what ());
+ }
parent.release ();
keys.release ();
@@ -81,9 +92,19 @@ int elektraLeafGet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned, Key * par
int elektraLeafSet (Plugin * handle, KeySet * returned ELEKTRA_UNUSED, Key * parentKey ELEKTRA_UNUSED)
{
CppKeySet keys{ returned };
+ CppKey parent{ parentKey };
- int status = delegator::get (handle)->convertToLeaves (keys);
+ int status = ELEKTRA_PLUGIN_STATUS_ERROR;
+ try
+ {
+ status = delegator::get (handle)->convertToLeaves (keys);
+ }
+ catch (exception const & error)
+ {
+ ELEKTRA_SET_ERROR (ELEKTRA_ERROR_UNCAUGHT_EXCEPTION, *parent, error.what ());
+ }
+ parent.release ();
keys.release ();
return status;
}
|
kdb: factory add () | @@ -108,7 +108,7 @@ public:
m_factory.insert (std::make_pair ("umount", new Cnstancer<UmountCommand> ()));
m_factory.insert (std::make_pair ("file", new Cnstancer<FileCommand> ()));
m_factory.insert (std::make_pair ("sget", new Cnstancer<ShellGetCommand> ()));
- m_factory.insert (std::make_pair ("merge", new Cnstancer<MergeCommand>));
+ m_factory.insert (std::make_pair ("merge", new Cnstancer<MergeCommand> ()));
m_factory.insert (std::make_pair ("list", new Cnstancer<ListCommand> ()));
m_factory.insert (std::make_pair ("editor", new Cnstancer<EditorCommand> ()));
m_factory.insert (std::make_pair ("spec-mount", new Cnstancer<SpecMountCommand> ()));
|
l2: Fix compile error on unused next_index
Type: fix | @@ -53,7 +53,6 @@ typedef struct
{
/* per-pkt trace data */
u8 dst_and_src[12];
- u32 next_index;
u32 sw_if_index;
u32 feat_mask;
} l2input_trace_t;
@@ -237,14 +236,12 @@ l2input_node_inline (vlib_main_t * vm,
int do_trace)
{
u32 n_left, *from;
- l2input_next_t next_index;
l2input_main_t *msm = &l2input_main;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
from = vlib_frame_vector_args (frame);
n_left = frame->n_vectors; /* number of packets to process */
- next_index = node->cached_next_index;
vlib_get_buffers (vm, from, bufs, n_left);
|
Fix library installation directory. | @@ -29,7 +29,7 @@ docker run \
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr \
- -DTINYSPLINE_INSTALL_LIBRARY_DIR=/lib \
+ -DTINYSPLINE_INSTALL_LIBRARY_DIR=lib \
-DTINYSPLINE_ENABLE_CSHARP=True \
-DTINYSPLINE_ENABLE_DLANG=True \
-DTINYSPLINE_ENABLE_GO=True \
|
Reformulate the if condition in tls_process_new_session_ticket
Improves readability | @@ -2566,9 +2566,8 @@ MSG_PROCESS_RETURN tls_process_new_session_ticket(SSL *s, PACKET *pkt)
&& (!PACKET_get_net_4(pkt, &age_add)
|| !PACKET_get_length_prefixed_1(pkt, &nonce)))
|| !PACKET_get_net_2(pkt, &ticklen)
- || (!SSL_IS_TLS13(s) && PACKET_remaining(pkt) != ticklen)
- || (SSL_IS_TLS13(s)
- && (ticklen == 0 || PACKET_remaining(pkt) < ticklen))) {
+ || (SSL_IS_TLS13(s) ? (ticklen == 0 || PACKET_remaining(pkt) < ticklen)
+ : PACKET_remaining(pkt) != ticklen)) {
SSLfatal(s, SSL_AD_DECODE_ERROR, SSL_F_TLS_PROCESS_NEW_SESSION_TICKET,
SSL_R_LENGTH_MISMATCH);
goto err;
|
Mark projectile wiki documented. | @@ -2552,7 +2552,7 @@ typedef struct entity
e_edge_state edge; // At an edge (unbalanced).
e_invincible_state invincible; // Attack invulnerability. ~~
e_direction normaldamageflipdir; // Used to reset backpain direction. ~~
- e_blasted_state projectile; // Blasted or tossed (bowl over other entities in fall).
+ e_blasted_state projectile; // Blasted or tossed (bowl over other entities in fall). ~~
e_rising_state rising; // Rise/Rise attacking. ~~
int seal; // If 0+, entity can't perform special with >= energy cost. ~~
e_explode_state toexplode; // Bomb projectiles prepared or time to detonate. ~~
|
secure_boot_v2(doc): secure_boot_v2 key/s must be readable | -Subproject commit e39896e1243418c7e28e4e0c4532661f3c7c5d2d
+Subproject commit 9876dfe58353f01c873e1543dd0654c5b04314a4
|
removes orphaned telnet constants and forward declarations | #include "vere/vere.h"
static void _term_spinner_cb(void*);
-static void _term_read_tn_cb(uv_stream_t* tcp_u,
- ssize_t siz_i,
- const uv_buf_t * buf_u);
static void _term_read_cb(uv_stream_t* tcp_u,
ssize_t siz_i,
const uv_buf_t * buf_u);
static inline void _term_suck(u3_utty*, const c3_y*, ssize_t);
-
-#define _T_ECHO 1 // local echo
-#define _T_CTIM 3 // suppress GA/char-at-a-time
-#define _T_NAWS 31 // negotiate about window size
-
#define _SPIN_COOL_US 500000 // spinner activation delay when cool
#define _SPIN_WARM_US 50000 // spinner activation delay when warm
#define _SPIN_RATE_US 250000 // spinner rate (microseconds/frame)
|
Add various glossary entries | # Glossary
+#### Axiom
+
+A named rule for asserting new facts. See the
+[assertions](/doc/note/assertions.md#axioms) note for more details.
+
+#### Assertion
+
+A compile-time directive that introduces a new fact (or fails to compile, if
+the assertion cannot be proved). See the [assertions](/doc/note/assertions.md)
+note for more details.
+
+#### Call Sequence
+
+An API restriction that e.g. the `foo` method needs to be called before the
+`bar` method. Out of order calls may result in a `"#bad call sequence"` error.
+
#### Coroutine
A function that can suspend execution and, when called again later, resume
@@ -99,6 +115,13 @@ Arithmetic that stops at certain bounds, such as `0` and `255` for the
The set of [facts](/doc/note/facts.md) at a given point in a program.
+#### Unsafe
+
+A programming language mechanism (e.g. an `unsafe` keyword or package) to
+circumvent the language's safety enforcement, typically used for performance or
+very low level programming. Unlike some other memory-safe languages, Wuffs
+doesn't have such an 'escape hatch'.
+
#### Utility Type
An empty struct (with no fields) used as a placeholder. Every written-in-Wuffs
@@ -109,3 +132,14 @@ are to its package what C++ or Java's static methods are to its class.
package has a type called `base.utility`, similar to how the `zlib` package has
a type called `zlib.decoder`. Unlike "dependent type" or "refinement type",
"utility type" is not a phrase used in programming language type theory.
+
+#### Work Buffer
+
+Scratch space in addition to the primary destination buffer. For example, in
+image decoding, the primary destination buffer holds the decoded pixels, but
+while decoding is in progress, a decoder might want to store additonal state
+per image column.
+
+Wuffs' codecs can state the (input dependent) size of their work buffer needs
+as a range, not just a single value. Callers then have the option to trade off
+memory for performance.
|
rsa_test: add return value check
Fixes | @@ -231,7 +231,9 @@ static int pad_unknown(void)
static int rsa_setkey(RSA** key, unsigned char* ctext, int idx)
{
int clen = 0;
+
*key = RSA_new();
+ if (*key != NULL)
switch (idx) {
case 0:
clen = key1(*key, ctext);
|
fix building with empty LIBRESSL_INCLUDE_DIRS | @@ -109,7 +109,9 @@ if (NOT ENABLE_SSL STREQUAL OFF)
if (LIBRESSL_FOUND)
message ("-- Found ${LIBRESSL_LIBRARIES}")
set (SSL_LIBRARIES ${LIBRESSL_LIBRARIES})
+ if (${LIBRESSL_INCLUDE_DIRS})
include_directories ("${LIBRESSL_INCLUDE_DIRS}")
+ endif ()
link_directories ("${LIBRESSL_LIBRARY_DIRS}")
set (LIBRESSL 1)
else ()
|
Removing ROSS_MEMORY option from the build | @@ -99,13 +99,6 @@ IF(AVL_TREE)
SET(ross_srcs ${ross_srcs} avl_tree.h avl_tree.c)
ENDIF(AVL_TREE)
-# ROSS_MEMORY is either on or off depending on whether or not we desire
-# memory buffers. If it's not set to YES, it defaults to NO
-OPTION(ROSS_MEMORY "ROSS Memory Buffers (membufs)" OFF)
-IF(ROSS_MEMORY)
- SET(ross_srcs ${ross_srcs} tw-memory.c tw-memoryq.h tw-memory.h)
-ENDIF(ROSS_MEMORY)
-
# RIO: Restart IO
OPTION(USE_RIO "Enable RIO checkpointing library?" OFF)
IF(USE_RIO)
|
do not terminate programm if channel can't be set | @@ -1173,11 +1173,7 @@ strncpy(wrq.ifr_name, interfacename , IFNAMSIZ);
if((sock = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
{
fprintf(stderr, "socket open for ioctl() on '%s' failed with '%d'\n", interfacename, sock);
-#ifdef DOGPIOSUPPORT
- if(system("reboot") != 0)
- printf("can't reboot\n");
-#endif
- programmende(SIGINT);
+ internalpcaperrors++;
return;
}
@@ -1189,13 +1185,7 @@ if(ioctl(sock, SIOCSIWFREQ, &wrq) < 0)
usleep(100);
if((result = ioctl(sock, SIOCSIWFREQ, &wrq)) < 0)
{
- fprintf(stderr, "ioctl(SIOCSIWFREQ) on '%s' failed with '%d'\n", interfacename, result);
- fprintf(stderr, "unable to set channel %d on '%s'\n", channellist[chptr], interfacename);
-#ifdef DOGPIOSUPPORT
- if(system("reboot") != 0)
- printf("can't reboot\n");
-#endif
- programmende(SIGINT);
+ internalpcaperrors++;
}
}
close(sock);
|
Fix vat_api_hookup name collision | @@ -214,8 +214,8 @@ _(udp_ping_add_del_req, "src <local IPv6 address> start-src-port <first local p
_(udp_ping_export_req, "export [disable]") \
-void
-vat_api_hookup (vat_main_t * vam)
+static void
+udp_ping_test_api_hookup (vat_main_t * vam)
{
udp_ping_test_main_t *sm = &udp_ping_test_main;
/* Hook up handlers for replies from the data plane plug-in */
@@ -253,7 +253,7 @@ vat_plugin_register (vat_main_t * vam)
sm->msg_id_base = vl_client_get_first_plugin_msg_id ((char *) name);
if (sm->msg_id_base != (u16) ~ 0)
- vat_api_hookup (vam);
+ udp_ping_test_api_hookup (vam);
vec_free (name);
|
CMakeLists.txt,cosmetics: normalize if() formatting
+ break a long line | @@ -329,7 +329,8 @@ if(WEBP_BUILD_IMG2WEBP)
parse_Makefile_am(${CMAKE_CURRENT_SOURCE_DIR}/examples "IMG2WEBP_SRCS"
"img2webp")
add_executable(img2webp ${IMG2WEBP_SRCS})
- target_link_libraries(img2webp exampleutil imagedec imageioutil webp libwebpmux)
+ target_link_libraries(img2webp exampleutil imagedec imageioutil webp
+ libwebpmux)
target_include_directories(img2webp PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src)
install(TARGETS img2webp RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
endif()
|
Ignore `ts_bspline_insert_knot` in R. | %ignore tinyspline::BSpline::operator=;
%ignore tinyspline::DeBoorNet::operator=;
%ignore tinyspline::Domain::operator=;
+%ignore ts_bspline_insert_knot;
%ignore ts_deboornet_result;
// Map std::vector<tinyspline::real> to R vector.
|
rust/bitbox02: unit test strlen_ptr | @@ -104,6 +104,14 @@ mod tests {
assert_eq!(truncate_str("test", 6), "test");
}
+ #[test]
+ fn test_strlen_ptr() {
+ assert_eq!(unsafe { strlen_ptr(b"\0".as_ptr()) }, 0);
+ assert_eq!(unsafe { strlen_ptr(b"a\0".as_ptr()) }, 1);
+ assert_eq!(unsafe { strlen_ptr(b"abcdef\0".as_ptr()) }, 6);
+ assert_eq!(unsafe { strlen_ptr(b"abcdef\0defghji".as_ptr()) }, 6);
+ }
+
#[test]
fn test_str_from_null_terminated() {
assert_eq!(str_from_null_terminated(b"\0"), Ok(""));
|
fix passing testkdb_ensure test | @@ -88,10 +88,10 @@ buildelektra-sid
After starting the container, you should be automatically inside it in the working directory `/home/jenkins/workspace`.
-Create folder for building the project, `cd` to it and create another folder where Elektra will be installed like this:
+Create folder folder where Elektra will be installed, create another folder for building the project and `cd` to it and like this:
```sh
-mkdir /home/jenkins/elektra-build-docker && mkdir /home/jenkins/elektra-install && cd /home/jenkins/elektra-build-docker
+mkdir elektra-install && mkdir elektra-build-docker && cd elektra-build-docker
```
Build it with
@@ -101,12 +101,15 @@ Build it with
-DBINDINGS="ALL;-DEPRECATED;-haskell" \
-DPLUGINS="ALL;-DEPRECATED" \
-DTOOLS="ALL" \
--DENABLE_DEBUG=ON \
--DKDB_DB_HOME="/home/jenkins/workspace" \
--DKDB_DB_SYSTEM="/home/jenkins/workspace/.config/kdb/system" \
--DKDB_DB_SPEC="/home/jenkins/workspace/.config/kdb/system" \
+-DENABLE_DEBUG="ON" \
+-DKDB_DB_HOME="/home/jenkins/workspace/elektra-build-docker/.config/kdb/home" \
+-DKDB_DB_SYSTEM="/home/jenkins/workspace/elektra-build-docker/.config/kdb/system" \
+-DKDB_DB_SPEC="/home/jenkins/workspace/elektra-build-docker/.config/kdb/spec" \
-DINSTALL_SYSTEM_FILES="OFF" \
--DCMAKE_INSTALL_PREFIX="/home/jenkins/elektra-install"
+-DBUILD_DOCUMENTATION="OFF" \
+-DCMAKE_RULE_MESSAGES="OFF" \
+-DCMAKE_INSTALL_PREFIX="/home/jenkins/workspace/elektra-install" \
+-DCOMMON_FLAGS="-Werror"
```
and then with
@@ -128,8 +131,8 @@ make install
After Elektra has been installed we need to add it to the PATH variable, meaning you and the tests can interact with Elektra by typing/executing `kdb` in the command line.
```sh
-export PATH="/home/jenkins/elektra-install/bin:$PATH"
-export LD_LIBRARY_PATH="/home/jenkins/elektra-install/lib:$PATH"
+export PATH="/home/jenkins/workspace/elektra-install/bin:$PATH"
+export LD_LIBRARY_PATH="/home/jenkins/workspace/elektra-install/lib:$PATH"
```
### 4. Run Tests
|
fix tests/client_get_linux compile fail error.
Tested-by: IoTivity Jenkins | @@ -80,7 +80,7 @@ check_resource_cb(oc_client_response_t *data)
static oc_discovery_flags_t
discovery_cb(const char *di, const char *uri, oc_string_array_t types,
- oc_interface_mask_t interfaces, oc_server_handle_t *server,
+ oc_interface_mask_t interfaces, oc_endpoint_t *server,
oc_resource_properties_t bm, void *user_data)
{
(void)bm;
@@ -217,10 +217,10 @@ register_resources(void)
oc_resource_t *res;
r = snprintf(name, sizeof(name), "/test/%d", i);
- if (r < 0 || r >= sizeof(name))
+ if (r < 0 || r >= (int)sizeof(name))
exit(EXIT_FAILURE);
- res = oc_new_resource(name, 1, 0);
+ res = oc_new_resource(NULL,name, 1, 0);
oc_resource_bind_resource_type(res, "constrained.r.test");
oc_resource_bind_resource_interface(res, OC_IF_RW);
oc_resource_set_default_interface(res, OC_IF_RW);
|
pass correct verilator root | @@ -13,6 +13,9 @@ source $SCRIPT_DIR/defaults.sh
# call clean on exit
trap clean EXIT
+cd $LOCAL_CHIPYARD_DIR
+./scripts/init-submodules-no-riscv-tools.sh
+
# set stricthostkeychecking to no (must happen before rsync)
run "echo \"Ping $SERVER\""
@@ -27,9 +30,8 @@ copy $LOCAL_CHIPYARD_DIR/ $SERVER:$REMOTE_CHIPYARD_DIR
copy $LOCAL_VERILATOR_DIR/ $SERVER:$REMOTE_VERILATOR_DIR
# enter the verisim directory and build the specific config on remote server
-run "cd $REMOTE_CHIPYARD_DIR && ./scripts/init-submodules-no-riscv-tools.sh"
run "make -C $REMOTE_SIM_DIR clean"
-run "export RISCV=\"$REMOTE_RISCV_DIR\"; make -C $REMOTE_SIM_DIR VERILATOR_INSTALL_DIR=$REMOTE_VERILATOR_DIR JAVA_ARGS=\"-Xmx8G -Xss8M\" $@"
+run "export RISCV=\"$REMOTE_RISCV_DIR\"; export VERILATOR_ROOT=$REMOTE_VERILATOR_DIR; make -C $REMOTE_SIM_DIR VERILATOR_INSTALL_DIR=$REMOTE_VERILATOR_DIR JAVA_ARGS=\"-Xmx8G -Xss8M\" $@"
run "rm -rf $REMOTE_CHIPYARD_DIR/project"
# copy back the final build
|
Update PyPI cache at runtime if it wasn't downloaded | @@ -13,6 +13,13 @@ import Combine
func search(for package: String) -> [String] {
let index = Bundle.main.url(forResource: "pypi_index", withExtension: "html") ?? FileManager.default.urls(for: .libraryDirectory, in: .allDomainsMask)[0].appendingPathComponent("pypi_index.html")
+ guard FileManager.default.fileExists(atPath: index.path) else {
+ #if MAIN
+ AppDelegate.shared.updatePyPiCache()
+ #endif
+ return []
+ }
+
do {
let content = try String(contentsOf: index)
let lines = content.components(separatedBy: "\n")
|
NetKVM: fix possible 'Nbl' NULL pointer usage (CA)
Protect NdisSendNetBufferLists from passing Nbl == NULL. | @@ -1162,7 +1162,7 @@ static void RetrieveSourceHandle(PNET_BUFFER_LIST start, PNET_BUFFER_LIST stopAt
bool CProtocolBinding::Send(PNET_BUFFER_LIST Nbl, ULONG Count)
{
- if (!m_TxStateMachine.RegisterOutstandingItems(Count))
+ if (Nbl == NULL || !m_TxStateMachine.RegisterOutstandingItems(Count))
{
return false;
}
|
add radius var | @@ -193,11 +193,16 @@ void fromProcessing(String data)
float init_arg[2] = {move_time, ACTUATOR_CONTROL_TIME};
void *p_init_arg = init_arg;
+ static float radius = 0.005f;
+
SCARA.drawInit(CIRCLE, p_init_arg);
- SCARA.setRadiusForDrawing(CIRCLE, 0.050);
+ SCARA.setRadiusForDrawing(CIRCLE, radius);
SCARA.setTimeForDrawing(move_time);
SCARA.setStartPositionForDrawing(CIRCLE, SCARA.getComponentPositionToWorld(TOOL));
SCARA.draw();
+
+ radius += 0.005f;
+
// if (DYNAMIXEL)
// sendAngle2Processing(getAngle());
|
Clone submodules as well, otherwise building fails | @@ -9,7 +9,10 @@ A modification of the Mesa Vulkan overlay. Including GUI improvements, temperatu
First, clone this repository and cd into it:
-`git clone https://github.com/flightlessmango/MangoHud.git; cd MangoHud`
+```
+git clone --recurse-submodules https://github.com/flightlessmango/MangoHud.git
+cd MangoHud
+```
Then simply run the following command:
|
ci: coverage: skip ra.[ly] files | @@ -67,8 +67,8 @@ if [[ "$FLB_OPT" =~ COVERAGE ]]
then
mkdir -p coverage
find lib -name "*.gcda" -o -name "*.gcno" -print0 | xargs -0 -r rm
- gcovr -e "build/sql.l" -e "build/sql.y" -p -r .. . | cut -c1-100
- gcovr -e "build/sql.l" -e "build/sql.y" --html --html-details -p -r .. -o coverage/index.html .
+ gcovr -e "build/sql.l" -e "build/sql.y" -e "build/ra.l" -e "build/ra.y" -p -r .. . | cut -c1-100
+ gcovr -e "build/sql.l" -e "build/sql.y" -e "build/ra.l" -e "build/ra.y" --html --html-details -p -r .. -o coverage/index.html .
echo
echo "See coverage/index.html for code-coverage details"
fi
|
lib/utils/pyexec: Add missing MP_ERROR_TEXT when compiler disabled. | @@ -96,7 +96,7 @@ STATIC int parse_compile_execute(const void *source, mp_parse_input_kind_t input
mp_parse_tree_t parse_tree = mp_parse(lex, input_kind);
module_fun = mp_compile(&parse_tree, source_name, exec_flags & EXEC_FLAG_IS_REPL);
#else
- mp_raise_msg(&mp_type_RuntimeError, "script compilation not supported");
+ mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("script compilation not supported"));
#endif
}
|
Do not use obsolete API for OpenSSL 3.0 | #include <openssl/engine.h>
#include <openssl/conf.h>
#include <openssl/ssl.h>
+#if !defined(LIBRESSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x30000000L
+#include <openssl/provider.h>
+#endif
#include <stdio.h>
#include <string.h>
#include "picoquic_unified_log.h"
@@ -132,11 +135,15 @@ static void picoquic_init_openssl()
openssl_is_init = 1;
ERR_load_crypto_strings();
OpenSSL_add_all_algorithms();
+#if !defined(LIBRESSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x30000000L
+ /* OSSL_PROVIDER *dflt = */(void) OSSL_PROVIDER_load(NULL, "default");
+#else
#if !defined(OPENSSL_NO_ENGINE)
/* Load all compiled-in ENGINEs */
ENGINE_load_builtin_engines();
ENGINE_register_all_ciphers();
ENGINE_register_all_digests();
+#endif
#endif
}
}
@@ -527,7 +534,14 @@ int picoquic_openssl_set_tls_root_certificates(picoquic_quic_t* quic, ptls_iovec
/* Explain OPENSSL errors */
int picoquic_open_ssl_explain_crypto_error(char const** err_file, int* err_line)
{
+#if !defined(LIBRESSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER >= 0x30000000L
+ const char *func = NULL;
+ const char *data = NULL;
+ int flags=0;
+ return (int)ERR_get_error_all(err_file, err_line, &func, &data, &flags);
+#else
return ERR_get_error_line(err_file, err_line);
+#endif
}
/* Clear the recorded errors in the crypto stack, e.g. before
|
fix ambiguous usages | @@ -215,10 +215,10 @@ void MasternodeManager::updateNodeList()
rankItem->setData(Qt::UserRole, GetMasternodeRank(mn.vin, pindexBest->nHeight));
rankItem->setData(Qt::DisplayRole, QString::number(GetMasternodeRank(mn.vin, pindexBest->nHeight)));
SortedWidgetItem *activeSecondsItem = new SortedWidgetItem();
- activeSecondsItem->setData(Qt::UserRole, mn.lastTimeSeen - mn.now);
+ activeSecondsItem->setData(Qt::UserRole, (qint64)(mn.lastTimeSeen - mn.now));
activeSecondsItem->setData(Qt::DisplayRole, seconds_to_DHMS((qint64)(mn.lastTimeSeen - mn.now)));
SortedWidgetItem *lastSeenItem = new SortedWidgetItem();
- lastSeenItem->setData(Qt::UserRole, mn.lastTimeSeen);
+ lastSeenItem->setData(Qt::UserRole, (qint64)mn.lastTimeSeen);
lastSeenItem->setData(Qt::DisplayRole, QString::fromStdString(DateTimeStrFormat(mn.lastTimeSeen)));
CScript pubkey;
|
interface: fixed variable name | @@ -101,7 +101,7 @@ export default function index(associations, apps, currentGroup) {
app.charAt(0).toUpperCase() + app.slice(1),
cite(shipStart.slice(0, shipStart.indexOf('/')))
);
- groups.push(obj);
+ landscape.push(obj);
} else {
const app = each.metadata.module || each['app-name'];
const obj = result(
|
wsman-xml-serialize: use right sscanf parameters
struct tm members are signed, so use %d instead of %u in sscanf | @@ -1869,7 +1869,7 @@ int ws_deserialize_datetime(const char *text, XML_DATETIME * tmx)
}
bzero(tmx, sizeof(XML_DATETIME));
- r = sscanf(text, "%u-%u-%uT%u:%u:%u%d:%u", &tmx->tm.tm_year,
+ r = sscanf(text, "%d-%d-%dT%d:%d:%d%d:%d", &tmx->tm.tm_year,
&tmx->tm.tm_mon, &tmx->tm.tm_mday,
&tmx->tm.tm_hour, &tmx->tm.tm_min, &tmx->tm.tm_sec,
&hours, &mins);
|
Relaxing interface | @@ -97,14 +97,14 @@ namespace celix {
* @param closeFunction
* @return builder style same object
*/
- [[nodiscard]] PushStream<T>& onClose(CloseFunction closeFunction);
+ PushStream<T>& onClose(CloseFunction closeFunction);
/**
* Given method will be called on error
* @param errorFunction
* @return builder style same object
*/
- [[nodiscard]] PushStream<T>& onError(ErrorFunction errorFunction);
+ PushStream<T>& onError(ErrorFunction errorFunction);
/**
* Close this PushStream by sending an event of type PushEvent.EventType.CLOSE downstream
|
Correct a couple of typos in the release notes. | @@ -29,7 +29,7 @@ enhancements and bug-fixes that were added to this release.</p>
<li>Fixed a bug with the Pixie reader when it read 3D curvilinear meshes in parallel.</li>
<li>Fixed a bug where the parallel engine crashed when creating ghost zones from global ids.</li>
<li>Fixed a bug with the progress dialog staying visible when a client connection fails.</li>
- <li>Added a menu indicator icon to the Color Table buttons so that it is more obvious that the button can be pushed to see available options.<li>
+ <li>Added a menu indicator icon to the Color Table buttons so that it is more obvious that the button can be pushed to see available options.</li>
<li>Fixed a bug preventing the user from being able to performa a Pick Through Time using mesh quality metrics.</li>
<li>Fixed a couple of bugs with the MutilCurve plot. Fixed a bug where portions of the axes were missing or incomplete after switching the orientation. Fixed a bug where there were no tickmarks when displaying 16 curves.</li>
<li>Fixed a bug in the ColorTable window that limited the number of color control points to 200.</li>
@@ -45,7 +45,7 @@ enhancements and bug-fixes that were added to this release.</p>
<li>Fixed a bug which caused external surfaces to be lost in the pseudocolor plot when using a gradient expression on a Curvilinear mesh (Structured Grid).</li>
<li>Changed the vertical scroll bar mode for the plot list to <b>ScrollPerPixel</b> and to use a single step so that the bottom of the plot list is not cutoff.</li>
<li>Fixed a bug preventing VisIt from displaying the manuals when using an out-of-source build.</li>
- <li>Fixed a bug where the Plots Add menu would become disabled after drawing plots via the CLI, then calling OpenGUI() and deleting a plot.
+ <li>Fixed a bug where the Plots Add menu would become disabled after drawing plots via the CLI, then calling OpenGUI() and deleting a plot.</li>
</ul>
<a name="Enhancements"></a>
|
Dedup conflicting use statements. | @@ -24,6 +24,7 @@ static Node *unpickle(FILE *fd);
/* type fixup list */
static Htab *tydeduptab; /* map from name -> type, contains all Tynames loaded ever */
+static Htab *trdeduptab; /* map from name -> type, contains all Tynames loaded ever */
static Htab *tidmap; /* map from tid -> type */
static Htab *trmap; /* map from trait id -> trait */
static Htab *initmap; /* map from init name -> int */
@@ -796,7 +797,7 @@ static void fixtypemappings(Stab *st)
static void fixtraitmappings(Stab *st)
{
size_t i;
- Trait *t;
+ Trait *t, *tr;
/*
* merge duplicate definitions.
@@ -808,10 +809,16 @@ static void fixtraitmappings(Stab *st)
t = htget(trmap, itop(traitfixid[i]));
if (!t)
die("Unable to find trait for id %zd\n", traitfixid[i]);
+
+ tr = htget(trdeduptab, t->name);
+ if (!tr) {
+ htput(trdeduptab, t->name, t);
+ tr = t;
+ }
if (traitfixdest[i])
- *traitfixdest[i] = t;
+ *traitfixdest[i] = tr;
if (traitfixtype[i])
- settrait(traitfixtype[i], t);
+ settrait(traitfixtype[i], tr);
}
lfree(&traitfixdest, &ntraitfixdest);
@@ -888,6 +895,8 @@ int loaduse(char *path, FILE *f, Stab *st, Vis vis)
pushstab(file->file.globls);
if (!tydeduptab)
tydeduptab = mkht(tyhash, tyeq);
+ if (!trdeduptab)
+ trdeduptab = mkht(namehash, nameeq);
if (fgetc(f) != 'U')
return 0;
v = rdint(f);
@@ -962,8 +971,8 @@ foundextlib:
break;
case 'R':
tr = traitunpickle(f);
- tr->vis = vis;
if (!tr->ishidden) {
+ tr->vis = vis;
puttrait(s, tr->name, tr);
for (i = 0; i < tr->nfuncs; i++) {
putdcl(s, tr->funcs[i]);
|
improved detection if hashcat --nonce-error-corrections is working on that file | @@ -102,6 +102,14 @@ replaycount = be64toh(eap->replaycount);
return replaycount;
}
/*===========================================================================*/
+int sort_by_nonce_ap(const void *a, const void *b)
+{
+hcx_t *ia = (hcx_t *)a;
+hcx_t *ib = (hcx_t *)b;
+
+return memcmp(ia->nonce_ap, ib->nonce_ap, 32);
+}
+/*===========================================================================*/
void writehcxinfo(long int hcxrecords, int outmode)
{
hcx_t *zeigerhcx;
@@ -141,6 +149,9 @@ char essidoutstr[34];
uint8_t nonceold[32];
memset(nonceold, 0, 32);
+
+qsort(hcxdata, hcxrecords, HCX_SIZE, sort_by_nonce_ap);
+
c = 0;
while(c < hcxrecords)
{
@@ -398,7 +409,7 @@ printf("%s %s (C) %s ZeroBeat\n"
"-p : list messagepair\n"
"-l : list essid len\n"
"-e : list essid\n"
- "\n", eigenname, eigenname, VERSION, VERSION_JAHR, eigenname);
+ "\n", eigenname, VERSION, VERSION_JAHR, eigenname, eigenname);
exit(EXIT_FAILURE);
}
/*===========================================================================*/
|
oc_oscore:return BAD_OPTION if req lacks 'kid' | #include "oc_oscore.h"
#include "oc_oscore_crypto.h"
#include "oc_oscore_context.h"
+#include "oc_pstat.h"
#include "api/oc_events.h"
#include "util/oc_process.h"
#include "oc_store.h"
@@ -148,7 +149,7 @@ oc_oscore_recv_message(oc_message_t *message)
} else {
/* OSCORE message is request and lacks kid, return error */
OC_ERR("***OSCORE protected request lacks kid param***");
- oscore_send_error(oscore_pkt, UNAUTHORIZED_4_01, &message->endpoint);
+ oscore_send_error(oscore_pkt, BAD_OPTION_4_02, &message->endpoint);
goto oscore_recv_error;
}
}
|
update armlibc/stdio.c | @@ -64,12 +64,28 @@ int libc_stdio_get_console(void)
}
int libc_stdio_read(void *buffer, size_t size)
+{
+ if (std_fd >= 0)
{
return read(std_fd, buffer, size);
}
+ else
+ {
+ rt_kprintf("Illegal stdio input!\n");
+ return 0;
+ }
+}
int libc_stdio_write(const void *buffer, size_t size)
+{
+ if (std_fd >= 0)
{
return write(std_fd, buffer, size);
}
+ else
+ {
+ rt_kprintf("Illegal stdio output!\n");
+ return size;
+ }
+}
#endif
|
Fix range limit exceeding data size in last step | @@ -246,6 +246,7 @@ int CNAME(BLASLONG n, BLASLONG k, FLOAT *alpha, FLOAT *a, BLASLONG lda, FLOAT *x
range_m[MAX_CPU_NUMBER - num_cpu - 1] = range_m[MAX_CPU_NUMBER - num_cpu] - width;
range_n[num_cpu] = num_cpu * (((n + 15) & ~15) + 16);
+ if (range_n[num_cpu] > n) range_n[num_cpu] = n;
queue[num_cpu].mode = mode;
queue[num_cpu].routine = sbmv_kernel;
@@ -285,6 +286,7 @@ int CNAME(BLASLONG n, BLASLONG k, FLOAT *alpha, FLOAT *a, BLASLONG lda, FLOAT *x
range_m[num_cpu + 1] = range_m[num_cpu] + width;
range_n[num_cpu] = num_cpu * (((n + 15) & ~15) + 16);
+ if (range_n[num_cpu] > n) range_n[num_cpu] = n;
queue[num_cpu].mode = mode;
queue[num_cpu].routine = sbmv_kernel;
|
Dedicate motion sensor to Sengled PAR38 bulp | @@ -4128,7 +4128,8 @@ void DeRestPluginPrivate::addSensorNode(const deCONZ::Node *node, const deCONZ::
modelId.startsWith(QLatin1String("902010/22")) || // Bitron motion sensor
modelId.startsWith(QLatin1String("SN10ZW")) || // ORVIBO motion sensor
modelId.startsWith(QLatin1String("MOSZB-130")) || // Develco motion sensor
- modelId == QLatin1String("4in1-Sensor-ZB3.0")) // Immax NEO ZB3.0 4 in 1 sensor
+ modelId == QLatin1String("4in1-Sensor-ZB3.0") || // Immax NEO ZB3.0 4 in 1 sensor E13-A21
+ modelId == QLatin1String("E13-A21")) // Sengled E13-A21 PAR38 bulp with motion sensor
{
fpPresenceSensor.inClusters.push_back(ci->id());
}
|
tools:acrn-crashlog:fix potential issue
this patch fix potential issue of initialization
about local variable in fsutils.c
Acked-by: Chen, Gang | @@ -1014,7 +1014,7 @@ fail_open:
int read_file(const char *path, unsigned long *size, void **data)
{
- char tmp[1024] = "\0";
+ char tmp[1024];
int len = 0;
int fd = 0;
int memsize = 1; /* for '\0' */
|
adds additional replacement events
%hole (negative ack) and %warn (%crud failed) | @@ -388,10 +388,50 @@ _worker_send_slog(u3_noun hod)
static void
_worker_lame(c3_d evt_d, u3_noun ovo, u3_noun why, u3_noun tan)
{
- // %crud will be sent on the original wire.
+ u3_noun rep;
+ u3_noun wir, tag, cad;
+
+ u3x_trel(ovo, &wir, &tag, &cad);
+
+ // a deterministic error (%exit) in a network packet (%hear)
+ // generates a negative-acknowlegement attempt (%hole).
+ //
+ // A comment from the old implementation:
+ // There should be a separate path for crypto failures,
+ // to prevent timing attacks, but isn't right now. To deal
+ // with a crypto failure, just drop the packet.
+ //
+ if ( (c3__hear == tag) && (c3__exit == why) ) {
+ rep = u3nt(u3k(wir), c3__hole, u3k(cad));
+ }
+ // failed event notifications (%crud) are replaced with
+ // an even more generic notifications, on a generic arvo wire.
+ //
+ // N.B this must not be allowed to fail!
+ //
+ else if ( c3__crud == tag ) {
+ rep = u3nc(u3nt(u3_blip, c3__arvo, u3_nul),
+ u3nc(c3__warn, u3i_tape("crude crashed!")));
+ }
+ // failed failure failing fails
//
- _worker_send_replace(evt_d, u3nc(u3k(u3h(ovo)), u3nt(c3__crud, why, tan)));
- u3z(ovo);
+ else if ( c3__warn == tag ) {
+ _worker_fail(0, "%warn replacement event failed");
+ c3_assert(0);
+ }
+ // failure notifications are sent on the same wire
+ //
+ else {
+ // prepend failure mote to tank
+ //
+ u3_noun tap = u3kb_weld(u3i_tape("bail: "), u3qc_rip(3, why));
+ u3_noun nat = u3nc(u3nc(c3__leaf, tap), u3k(tan));
+ rep = u3nc(u3k(wir), u3nt(c3__crud, u3k(tag), nat));
+ }
+
+ _worker_send_replace(evt_d, rep);
+
+ u3z(ovo); u3z(why); u3z(tan);
}
/* _worker_sure(): event succeeded, report completion.
|
Avoid crash when flist has no type field
When the flist has no type field, BAYER_TYPE_UNKNOWN should be
used as the type. | @@ -148,6 +148,11 @@ static void list_elem_decode(char* buf, elem_t* elem)
elem->detail = 0;
const char* type = strtok(NULL, "|");
+ if (type == NULL) {
+ elem->type = BAYER_TYPE_UNKNOWN;
+ return;
+ }
+
char c = type[0];
if (c == 'F') {
elem->type = BAYER_TYPE_FILE;
|
Changed default element-count from 0 to 1 | @@ -989,7 +989,6 @@ static void ZydisSetOperandSizeAndElementInfo(ZydisDecoderContext* context,
ZYDIS_ASSERT(definition->size[context->eoszIndex] == 0);
operand->size = info->addressWidth;
operand->elementType = ZYDIS_ELEMENT_TYPE_INT;
- operand->elementCount = 1;
} else
{
ZYDIS_ASSERT(definition->size[context->eoszIndex]);
@@ -1145,7 +1144,7 @@ static void ZydisSetOperandSizeAndElementInfo(ZydisDecoderContext* context,
ZYDIS_UNREACHABLE;
}
- // Element info
+ // Element-type and -size
if (definition->elementType && (definition->elementType != ZYDIS_IELEMENT_TYPE_VARIABLE))
{
ZydisGetElementInfo(definition->elementType, &operand->elementType, &operand->elementSize);
@@ -1156,6 +1155,9 @@ static void ZydisSetOperandSizeAndElementInfo(ZydisDecoderContext* context,
operand->elementSize = operand->size;
}
}
+
+ // Element count
+ operand->elementCount = 1;
if (operand->elementSize && operand->size)
{
operand->elementCount = operand->size / operand->elementSize;
|
Fix C# code for operator overloading (2.0). | %ignore tinyspline::Vec3::operator*;
%typemap(cscode) tinyspline::Vec3 %{
- public static Vec3 operator+(Vec3 a, Vec3 b) => a.Add(b);
- public static Vec3 operator-(Vec3 a, Vec3 b) => a.Subtract(b);
- public static Vec3 operator*(Vec3 vec, double val) => vec.Multiply(val);
- public static Vec3 operator*(Vec3 vec, float val) => vec.Multiply(val);
+ public static Vec3 operator+(Vec3 a, Vec3 b)
+ { return a.Add(b); }
+
+ public static Vec3 operator-(Vec3 a, Vec3 b)
+ { return a.Subtract(b); }
+
+ public static Vec3 operator*(Vec3 vec, double val)
+ { return vec.Multiply(val); }
+
+ public static Vec3 operator*(Vec3 vec, float val)
+ { return vec.Multiply(val); }
%}
%csmethodmodifiers tinyspline::BSpline::toString "public override";
|
Add missing OpenVR pointer actions; | "actions" : [
{ "name" : "/actions/lovr/in/leftHandPose", "type" : "pose" },
{ "name" : "/actions/lovr/in/rightHandPose", "type" : "pose" },
+ { "name" : "/actions/lovr/in/leftHandPoint", "type" : "pose" },
+ { "name" : "/actions/lovr/in/rightHandPoint", "type" : "pose" },
{ "name" : "/actions/lovr/in/leftElbowPose", "type" : "pose" },
{ "name" : "/actions/lovr/in/rightElbowPose", "type" : "pose" },
{ "name" : "/actions/lovr/in/leftShoulderPose", "type" : "pose" },
{
"/actions/lovr/in/leftHandPose": "Left Hand Pose",
"/actions/lovr/in/rightHandPose": "Right Hand Pose",
+ "/actions/lovr/in/leftHandPoint": "Left Hand Point",
+ "/actions/lovr/in/rightHandPoint": "Right Hand Point",
"/actions/lovr/in/leftTriggerDown": "Left Trigger Press",
"/actions/lovr/in/leftTriggerTouch": "Left Trigger Touch",
"/actions/lovr/in/leftTriggerAxis": "Left Trigger Axis",
|
HLS search : enable irq | @@ -409,7 +409,7 @@ int main(int argc, char *argv[])
goto out_error1;
}
- action = snap_attach_action(card, SEARCH_ACTION_TYPE, 0, 60);
+ action = snap_attach_action(card, SEARCH_ACTION_TYPE, action_irq, 60);
if (action == NULL) {
fprintf(stderr, "err: failed to attach action %u: %s\n",
card_no, strerror(errno));
|
fsp/lxvpd: Print more LXVPD slot information
Useful to know since it changes the behaviour of the slot core. | @@ -149,6 +149,9 @@ void lxvpd_extract_info(struct pci_slot *slot, struct lxvpd_pci_slot *s)
slot->card_desc = s->card_desc;
slot->card_mech = s->card_mech;
slot->wired_lanes = s->wired_lanes;
+
+ prlog(PR_DEBUG, "[%s]: pluggable: %d power_ctrl: %d\n",
+ s->label, (int) s->pluggable, (int) s->power_ctl);
}
static struct lxvpd_pci_slot_data *lxvpd_alloc_slots(struct phb *phb,
|
[cmake] remove unneeded lib | @@ -34,31 +34,8 @@ find_path(UMFPACK_INCLUDE_DIR
find_library(UMFPACK_LIBRARIES umfpack PATHS $ENV{UMFPACKDIR})
-if(UMFPACK_LIBRARIES)
-
- if (NOT UMFPACK_LIBDIR)
- get_filename_component(UMFPACK_LIBDIR ${UMFPACK_LIBRARIES} PATH)
- endif(NOT UMFPACK_LIBDIR)
-
- find_library(COLAMD_LIBRARY colamd PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR})
- if (COLAMD_LIBRARY)
- set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${COLAMD_LIBRARY})
- endif (COLAMD_LIBRARY)
-
- find_library(AMD_LIBRARY amd PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR})
- if (AMD_LIBRARY)
- set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${AMD_LIBRARY})
- endif (AMD_LIBRARY)
-
- find_library(SUITESPARSE_LIBRARY SuiteSparse PATHS ${UMFPACK_LIBDIR} $ENV{UMFPACKDIR})
- if (SUITESPARSE_LIBRARY)
- set(UMFPACK_LIBRARIES ${UMFPACK_LIBRARIES} ${SUITESPARSE_LIBRARY})
- endif (SUITESPARSE_LIBRARY)
-
-endif(UMFPACK_LIBRARIES)
-
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(UMFPACK DEFAULT_MSG
UMFPACK_INCLUDE_DIR UMFPACK_LIBRARIES)
-mark_as_advanced(UMFPACK_INCLUDE_DIR UMFPACK_LIBRARIES AMD_LIBRARY COLAMD_LIBRARY SUITESPARSE_LIBRARY)
+mark_as_advanced(UMFPACK_INCLUDE_DIR UMFPACK_LIBRARIES)
|
tweak tar assembly options for src tarball | @@ -170,7 +170,7 @@ foreach my $distro (@distros) {
# $tar_args .= " $tmp_dir/$distro";
$tar_args .= " $distro";
print "\nCreating dist tarball for $distro:$arch -- \n";
- print "tar command -> tar $tar_args\n";
+ print "tar command -> tar $tar_args\n\n";
system("tar $tar_args");
my $md5sum;
@@ -186,7 +186,7 @@ foreach my $distro (@distros) {
# generate src tarball
my $src_filename = "$dest_dir/OpenHPC-$release.$distro\_src.tar";
my $tar_args = "-cvf $src_filename \\\n";
- $tar_args .= "--cd $tmp_dir \\\n";
+
for my $arch (@arches) {
$tar_args .= "--exclude $tmp_dir/$distro/$arch \\\n";
}
@@ -200,12 +200,11 @@ foreach my $distro (@distros) {
}
}
$tar_args .= "--exclude $tmp_dir/$distro/iso \\\n";
- $tar_args .= " $tmp_dir/$distro";
-
-
+ $tar_args .= "-C $tmp_dir \\\n";
+ $tar_args .= " $distro";
print "dist for $distro:src -- \n";
- print "tar command -> tar $tar_args\n";
+ print "tar command -> tar $tar_args\n\n";
system("tar $tar_args");
my $md5sum;
|
HAL: fix - use HAL_GPIO_INDEX to know the pin number | @@ -162,10 +162,11 @@ hal_gpio_deinit(int pin)
{
uint32_t conf;
NRF_GPIO_Type *port;
+ int pin_index = HAL_GPIO_INDEX(pin);
conf = GPIO_PIN_CNF_INPUT_Disconnect << GPIO_PIN_CNF_INPUT_Pos;
port = HAL_GPIO_PORT(pin);
- port->PIN_CNF[pin] = conf;
+ port->PIN_CNF[pin_index] = conf;
port->DIRCLR = HAL_GPIO_MASK(pin);
return 0;
|
test-ipmi-hiomap: Add erase-one-block-twice test
Cc: stable | @@ -1328,6 +1328,66 @@ static void test_hiomap_protocol_erase_two_blocks(void)
scenario_exit();
}
+static const struct scenario_event
+scenario_hiomap_protocol_erase_one_block_twice[] = {
+ { .type = scenario_event_p, .p = &hiomap_ack_call, },
+ { .type = scenario_event_p, .p = &hiomap_get_info_call, },
+ { .type = scenario_event_p, .p = &hiomap_get_flash_info_call, },
+ {
+ .type = scenario_event_p,
+ .p = &hiomap_create_write_window_qs0l1_rs0l1_call,
+ },
+ { .type = scenario_event_p, .p = &hiomap_erase_qs0l1_call, },
+ { .type = scenario_event_p, .p = &hiomap_flush_call, },
+ {
+ .type = scenario_cmd,
+ .c = {
+ .req = {
+ .cmd = HIOMAP_C_ERASE,
+ .seq = 7,
+ .args = {
+ [0] = 0x00, [1] = 0x00,
+ [2] = 0x01, [3] = 0x00,
+ },
+ },
+ .resp = {
+ .cmd = HIOMAP_C_ERASE,
+ .seq = 7,
+ },
+ },
+ },
+ {
+ .type = scenario_cmd,
+ .c = {
+ .req = {
+ .cmd = HIOMAP_C_FLUSH,
+ .seq = 8,
+ },
+ .resp = {
+ .cmd = HIOMAP_C_FLUSH,
+ .seq = 8,
+ },
+ },
+ },
+ SCENARIO_SENTINEL,
+};
+
+static void test_hiomap_protocol_erase_one_block_twice(void)
+{
+ struct blocklevel_device *bl;
+ struct ipmi_hiomap *ctx;
+ size_t len;
+
+ scenario_enter(scenario_hiomap_protocol_erase_one_block_twice);
+ assert(!ipmi_hiomap_init(&bl));
+ ctx = container_of(bl, struct ipmi_hiomap, bl);
+ len = 1 << ctx->block_size_shift;
+ assert(!bl->erase(bl, 0, len));
+ assert(!bl->erase(bl, 0, len));
+ ipmi_hiomap_exit(bl);
+ scenario_exit();
+}
+
static void test_hiomap_protocol_erase_one_block(void)
{
struct blocklevel_device *bl;
@@ -3024,6 +3084,7 @@ struct test_case test_cases[] = {
TEST_CASE(test_hiomap_protocol_event_during_write),
TEST_CASE(test_hiomap_protocol_erase_one_block),
TEST_CASE(test_hiomap_protocol_erase_two_blocks),
+ TEST_CASE(test_hiomap_protocol_erase_one_block_twice),
TEST_CASE(test_hiomap_protocol_event_before_erase),
TEST_CASE(test_hiomap_protocol_event_during_erase),
TEST_CASE(test_hiomap_protocol_bad_sequence),
|
SOVERSION bump to version 4.1.4 | @@ -35,7 +35,7 @@ set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_
# with backward compatible change and micro version is connected with any internal change of the library.
set(SYSREPO_MAJOR_SOVERSION 4)
set(SYSREPO_MINOR_SOVERSION 1)
-set(SYSREPO_MICRO_SOVERSION 3)
+set(SYSREPO_MICRO_SOVERSION 4)
set(SYSREPO_SOVERSION_FULL ${SYSREPO_MAJOR_SOVERSION}.${SYSREPO_MINOR_SOVERSION}.${SYSREPO_MICRO_SOVERSION})
set(SYSREPO_SOVERSION ${SYSREPO_MAJOR_SOVERSION})
|
Test public external parameter entity in little-endian | @@ -6605,6 +6605,44 @@ START_TEST(test_entity_public_utf16_be)
}
END_TEST
+START_TEST(test_entity_public_utf16_le)
+{
+ const char text[] =
+ /* <!DOCTYPE d [ */
+ "<\0!\0D\0O\0C\0T\0Y\0P\0E\0 \0d\0 \0[\0\n\0"
+ /* <!ENTITY % e PUBLIC 'foo' 'bar.ent'> */
+ "<\0!\0E\0N\0T\0I\0T\0Y\0 \0%\0 \0e\0 \0P\0U\0B\0L\0I\0C\0 \0"
+ "'\0f\0o\0o\0'\0 \0'\0b\0a\0r\0.\0e\0n\0t\0'\0>\0\n\0"
+ /* %e; */
+ "%\0e\0;\0\n\0"
+ /* ]> */
+ "]\0>\0\n\0"
+ /* <d>&j;</d> */
+ "<\0d\0>\0&\0j\0;\0<\0/\0d\0>\0";
+ ExtTest2 test_data = {
+ /* <!ENTITY j 'baz'> */
+ "<\0!\0E\0N\0T\0I\0T\0Y\0 \0j\0 \0'\0b\0a\0z\0'\0>\0",
+ 34,
+ NULL,
+ NULL,
+ EE_PARSE_NONE
+ };
+ const XML_Char *expected = "baz";
+ CharData storage;
+
+ CharData_Init(&storage);
+ test_data.storage = &storage;
+ XML_SetParamEntityParsing(parser, XML_PARAM_ENTITY_PARSING_ALWAYS);
+ XML_SetExternalEntityRefHandler(parser, external_entity_loader2);
+ XML_SetUserData(parser, &test_data);
+ XML_SetCharacterDataHandler(parser, ext2_accumulate_characters);
+ if (_XML_Parse_SINGLE_BYTES(parser, text, sizeof(text)-1,
+ XML_TRUE) == XML_STATUS_ERROR)
+ xml_failure(parser);
+ CharData_CheckXMLChars(&storage, expected);
+}
+END_TEST
+
/*
* Namespaces tests.
*/
@@ -12085,6 +12123,7 @@ make_suite(void)
tcase_add_test(tc_basic, test_entity_in_utf16_be_attr);
tcase_add_test(tc_basic, test_entity_in_utf16_le_attr);
tcase_add_test(tc_basic, test_entity_public_utf16_be);
+ tcase_add_test(tc_basic, test_entity_public_utf16_le);
suite_add_tcase(s, tc_namespace);
tcase_add_checked_fixture(tc_namespace,
|
MQTT parse_publish_vhdr: added missing check of topic length | @@ -881,7 +881,11 @@ parse_publish_vhdr(struct mqtt_connection *conn,
conn->in_packet.topic_len |= input_data_ptr[(*pos)++];
conn->in_packet.byte_counter++;
conn->in_packet.topic_len_received = 1;
-
+ /* Abort if topic is longer than our topic buffer */
+ if(conn->in_packet.topic_len > MQTT_MAX_TOPIC_LENGTH) {
+ DBG("MQTT - topic too long %u/%u\n", conn->in_packet.topic_len, MQTT_MAX_TOPIC_LENGTH);
+ return;
+ }
DBG("MQTT - Read PUBLISH topic len %i\n", conn->in_packet.topic_len);
/* WARNING: Check here if TOPIC fits in payload area, otherwise error */
}
|
critical sections during oldest_message computing | @@ -268,26 +268,34 @@ static inline void MsgAlloc_OldestMsgCandidate(msg_t *oldest_stack_msg_pt)
if ((uint32_t)oldest_stack_msg_pt > (uint32_t)current_msg)
{
// The oldest task is between `data_end_estimation` and the end of the buffer
+ LuosHAL_SetIrqState(false);
stack_delta_space = (uint32_t)oldest_stack_msg_pt - (uint32_t)current_msg;
+ LuosHAL_SetIrqState(true);
}
else
{
// The oldest task is between the begin of the buffer and `data_end_estimation`
// we have to decay it to be able to define delta
+ LuosHAL_SetIrqState(false);
stack_delta_space = ((uint32_t)oldest_stack_msg_pt - (uint32_t)&msg_buffer[0]) + ((uint32_t)&msg_buffer[MSG_BUFFER_SIZE] - (uint32_t)current_msg);
+ LuosHAL_SetIrqState(true);
}
// recompute oldest_msg into delta byte from current message
uint32_t oldest_msg_delta_space;
if ((uint32_t)oldest_msg > (uint32_t)current_msg)
{
// The oldest msg is between `data_end_estimation` and the end of the buffer
+ LuosHAL_SetIrqState(false);
oldest_msg_delta_space = (uint32_t)oldest_msg - (uint32_t)current_msg;
+ LuosHAL_SetIrqState(true);
}
else
{
// The oldest msg is between the begin of the buffer and `data_end_estimation`
// we have to decay it to be able to define delta
+ LuosHAL_SetIrqState(false);
oldest_msg_delta_space = ((uint32_t)oldest_msg - (uint32_t)&msg_buffer[0]) + ((uint32_t)&msg_buffer[MSG_BUFFER_SIZE] - (uint32_t)current_msg);
+ LuosHAL_SetIrqState(true);
}
// Compare deltas
if (stack_delta_space < oldest_msg_delta_space)
|
VERSION bump to version 2.1.59 | @@ -64,7 +64,7 @@ endif()
# micro version is changed with a set of small changes or bugfixes anywhere in the project.
set(SYSREPO_MAJOR_VERSION 2)
set(SYSREPO_MINOR_VERSION 1)
-set(SYSREPO_MICRO_VERSION 58)
+set(SYSREPO_MICRO_VERSION 59)
set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION})
# Version of the library
|
Speed up the test suite with GNU Parallel
If GNU Parallel is installed we use it to run multiple test scripts at the same time. This cuts
the time it takes to run the test suite roughly in half. | -#!/bin/sh
-
-# We encourage our developers to use busted --no-keep-going by default. Sometimes the test suite
-# is failing because of something silly such as forgetting to run make and in those cases there is
-# a desire to interrupt the test suite with Ctrl-C. With --keep-going (the default busted behavior)
-# you need to press Ctrl-C multiple times and one of those Ctrl-C's will likely kill the busted
-# process itself, meaning that the "teardown" routines are not run. On the other hand, with
-# --no-keep-going we only need to press Ctrl-C once and busted usually gets to exit gracefully.
+#!/bin/bash
+
+# HOW TO USE: This is a wrapper around busted. The command-line arguments are the same.
+#
+# EXAMPLES:
+# ./test-project
+# ./test-project -k
+# ./test-project spec/coder_spec.lua
+
+# We encourage using -no-keep-going by default. Sometimes the test suite is failing because of
+# something silly such as forgetting to run make and in those cases there is a desire to interrupt
+# the test suite with Ctrl-C. With --keep-going (the default busted behavior) you need to press
+# Ctrl-C multiple times and one of those Ctrl-C's will likely kill the busted process itself,
+# meaning that the "teardown" routines are not run. On the other hand, with --no-keep-going we only
+# need to press Ctrl-C once and busted usually gets to exit gracefully.
+FLAGS=(--verbose --no-keep-going)
+
+# To speed things up, we tell the C compiler to skip optimizations.
+# Don't worry, the continuous integration server still tests with optimizations.
export CFLAGS=-O0
-busted --no-keep-going "$@"
+
+# We can also speed things up by running multiple busted processes in parallel. But unfortunately,
+# busted is single-threaded so we have to parallelize it ourselves. We use GNU parallel, albeit only
+# in the default case where no command-line arguments are given. I didn't want to wrap busted in the
+# case where we are testing a single spec file, because in that case GNU parallel would only show
+# the output after the last test has finished. I'd rather see the little green circle for each test
+# case as soon as it completes.
+if [ "$#" -eq 0 ]; then
+ if command -v parallel >/dev/null; then
+ parallel busted -o utfTerminal "${FLAGS[@]}" ::: spec/*_spec.lua
+ else
+ echo "GNU Parallel is not installed. Running the test suite in single threaded mode..."
+ busted "${FLAGS[@]}" spec/*_spec.lua
+ fi
+else
+ busted "${FLAGS[@]}" "$@"
+fi
|
[ya.conf.json] update yt tool to r3346082
via | },
"yt": {
"formula": {
- "sandbox_id": 135978941,
+ "sandbox_id": 204320868,
"match": "YT"
},
"executable": {
|
Better masking for ELM mode | @@ -11,8 +11,8 @@ static int elm327_tx_hook(CAN_FIFOMailBox_TypeDef *to_send) {
//Check valid 29 bit send addresses for ISO 15765-4
//Check valid 11 bit send addresses for ISO 15765-4
- if ((addr != 0x18DB33F1) && ((addr & 0x1FFF00FF) != 0x18DA00F1) &&
- ((addr != 0x7DF) && ((addr & 0x7F8) != 0x7E0))) {
+ if ((addr != 0x18DB33F1) && ((addr & 0xFFFF00FF) != 0x18DA00F1) &&
+ ((addr != 0x7DF) && ((addr & 0xFFFFFFF8) != 0x7E0))) {
tx = 0;
}
return tx;
|
Fix stats update in cleaner.
Core is not assigned to request in cleaner, so to increase it's stats it has to
be retrieved from mapping. | @@ -616,11 +616,12 @@ static void _ocf_cleaner_cache_io_cmpl(struct ocf_io *io, int error)
{
struct ocf_map_info *map = io->priv1;
struct ocf_request *req = io->priv2;
+ ocf_core_t core = ocf_cache_get_core(req->cache, map->core_id);
if (error) {
map->invalid |= 1;
_ocf_cleaner_set_error(req);
- ocf_core_stats_cache_error_update(req->core, OCF_READ);
+ ocf_core_stats_cache_error_update(core, OCF_READ);
}
_ocf_cleaner_cache_io_end(req);
|
More effective implementation of nxt_popcount().
This method requires as many iterations as there are set bits,
while the previous one has to shift up to the position of the
highest bit. | @@ -143,8 +143,8 @@ nxt_popcount(unsigned int x)
{
int count;
- for (count = 0; x != 0; x >>= 1) {
- count += (x & 1);
+ for (count = 0; x != 0; count++) {
+ x &= x - 1;
}
return count;
|
Call unmodified on project load, wasn't firing on page refresh before | store.dispatch(actions.setSection(section))
});
- let modified = false;
+ let modified = true;
store.subscribe(() => {
const state = store.getState();
if(!modified && state.document.modified) {
|
once more version of -l flag | @@ -325,7 +325,6 @@ static int out_balances_callback(void *data, cheatcoin_hash_t hash, cheatcoin_am
}
memcpy(d->blocks + d->nblocks, &f, sizeof(struct cheatcoin_field));
d->nblocks++;
- if (!(d->nblocks % 10000)) printf("blocks: %u\n", d->nblocks);
return 0;
}
@@ -335,15 +334,19 @@ static int out_sort_callback(const void *l, const void *r) {
}
static void *add_block_callback(void *block, void *data) {
+ unsigned *i = (unsigned *)data;
cheatcoin_add_block((struct cheatcoin_block *)block);
+ if (!(++*i % 10000)) printf("blocks: %u\n", *i);
return 0;
}
static int out_balances(void) {
struct out_balances_data d;
- unsigned i;
+ unsigned i = 0;
+ cheatcoin_set_log_level(0);
+ xdag_mem_init((cheatcoin_main_time() - cheatcoin_start_main_time()) << 8);
memset(&d, 0, sizeof(struct out_balances_data));
- cheatcoin_load_blocks(cheatcoin_start_main_time() << 16, cheatcoin_main_time() << 16, 0, add_block_callback);
+ cheatcoin_load_blocks(cheatcoin_start_main_time() << 16, cheatcoin_main_time() << 16, &i, add_block_callback);
cheatcoin_traverse_all_blocks(&d, out_balances_callback);
qsort(d.blocks, d.nblocks, sizeof(struct cheatcoin_field), out_sort_callback);
for (i = 0; i < d.nblocks; ++i)
|
Fix GUID generator | @@ -24,6 +24,7 @@ HRESULT Library_corlib_native_System_Guid::GenerateNewGuid___STATIC__SZARRAY_U1(
NANOCLR_CHECK_HRESULT(CLR_RT_HeapBlock_Array::CreateInstance( top, 16, g_CLR_RT_WellKnownTypes.m_UInt8 ));
buf = top.DereferenceArray()->GetFirstElement();
+ rand.Initialize();
rand.NextBytes(buf, 16); // fill with random numbers
buf[7] = (buf[7] & 0x0f) | 0x40; // Set verion
|
More sorting of run_epsdb | @@ -36,21 +36,22 @@ echo $aompdir
echo
set -x
-sort -f $aompdir/test/smoke/passing-tests.txt > $$ptests
-cat $aompdir/bin/epsdb/epsdb_passes.txt
+sort -f -d $aompdir/test/smoke/passing-tests.txt > $$ptests
+sort -f -d $aompdir/bin/epsdb/epsdb_passes.txt > $$etests
+cat $$etests
cat $$ptests
set +x
-epasses=`diff $aompdir/bin/epsdb/epsdb_passes.txt $$ptests | grep '>' | wc -l`
+epasses=`diff $$etests $$ptests | grep '>' | wc -l`
echo Unexpected Passes $epasses
echo "====================="
-diff $aompdir/bin/epsdb/epsdb_passes.txt $$ptests | grep '>' | sed 's/> //'
+diff $$etests $$ptests | grep '>' | sed 's/> //'
echo
-efails=`diff $aompdir/bin/epsdb/epsdb_passes.txt $$ptests | grep '<' | wc -l`
+efails=`diff $$etests $$ptests | grep '<' | wc -l`
echo Unexpected Fails $efails
echo "===================="
-diff $aompdir/bin/epsdb/epsdb_passes.txt $$ptests | grep '<' | sed s'/< //'
+diff $$etests $$ptests | grep '<' | sed s'/< //'
echo
-rm -f $$ptests
+rm -f $$ptests $$etests
echo Done
|
stm32/main: Make board-defined UART REPL use a static object and buffer.
This way the UART REPL does not need the MicroPython heap and exists
outside the MicroPython runtime, allowing characters to still be received
during a soft reset. | @@ -73,6 +73,14 @@ STATIC pyb_thread_t pyb_thread_main;
STATIC fs_user_mount_t fs_user_mount_flash;
#endif
+#if defined(MICROPY_HW_UART_REPL)
+#ifndef MICROPY_HW_UART_REPL_RXBUF
+#define MICROPY_HW_UART_REPL_RXBUF (64)
+#endif
+STATIC pyb_uart_obj_t pyb_uart_repl_obj;
+STATIC uint8_t pyb_uart_repl_rxbuf[MICROPY_HW_UART_REPL_RXBUF];
+#endif
+
void flash_error(int n) {
for (int i = 0; i < n; i++) {
led_state(PYB_LED_RED, 1);
@@ -541,6 +549,19 @@ void stm32_main(uint32_t reset_mode) {
lwip_init();
#endif
+ #if defined(MICROPY_HW_UART_REPL)
+ // Set up a UART REPL using a statically allocated object
+ pyb_uart_repl_obj.base.type = &pyb_uart_type;
+ pyb_uart_repl_obj.uart_id = MICROPY_HW_UART_REPL;
+ pyb_uart_repl_obj.is_static = true;
+ pyb_uart_repl_obj.timeout = 0;
+ pyb_uart_repl_obj.timeout_char = 2;
+ uart_init(&pyb_uart_repl_obj, MICROPY_HW_UART_REPL_BAUD, UART_WORDLENGTH_8B, UART_PARITY_NONE, UART_STOPBITS_1, 0);
+ uart_set_rxbuf(&pyb_uart_repl_obj, sizeof(pyb_uart_repl_rxbuf), pyb_uart_repl_rxbuf);
+ uart_attach_to_repl(&pyb_uart_repl_obj, true);
+ MP_STATE_PORT(pyb_uart_obj_all)[MICROPY_HW_UART_REPL - 1] = &pyb_uart_repl_obj;
+ #endif
+
soft_reset:
#if defined(MICROPY_HW_LED2)
@@ -588,27 +609,17 @@ soft_reset:
// we can run Python scripts (eg boot.py), but anything that is configurable
// by boot.py must be set after boot.py is run.
- readline_init0();
- pin_init0();
- extint_init0();
- timer_init0();
-
- // Define MICROPY_HW_UART_REPL to be PYB_UART_6 and define
- // MICROPY_HW_UART_REPL_BAUD in your mpconfigboard.h file if you want a
- // REPL on a hardware UART as well as on USB VCP
#if defined(MICROPY_HW_UART_REPL)
- {
- mp_obj_t args[2] = {
- MP_OBJ_NEW_SMALL_INT(MICROPY_HW_UART_REPL),
- MP_OBJ_NEW_SMALL_INT(MICROPY_HW_UART_REPL_BAUD),
- };
- MP_STATE_PORT(pyb_stdio_uart) = pyb_uart_type.make_new((mp_obj_t)&pyb_uart_type, MP_ARRAY_SIZE(args), 0, args);
- uart_attach_to_repl(MP_STATE_PORT(pyb_stdio_uart), true);
- }
+ MP_STATE_PORT(pyb_stdio_uart) = &pyb_uart_repl_obj;
#else
MP_STATE_PORT(pyb_stdio_uart) = NULL;
#endif
+ readline_init0();
+ pin_init0();
+ extint_init0();
+ timer_init0();
+
#if MICROPY_HW_ENABLE_CAN
can_init0();
#endif
|
acrn-config: removed_nested returns None instead return Error
qemu xml has no devices list. Tool will receive the empty devices
list while parsing the "platform" xml. Remove the error of resolved
nested mmio address window that the input couldn't be None. Simply
return an None list. | @@ -235,7 +235,7 @@ def get_mmio_windows_with_key(keywords):
def removed_nested(list1, list2):
if not list1 or not list2:
- raise ValueError("Invalid inputs: None, list1 is {}, \list2 is {}".format (list1, list2))
+ return list1
resolvedList = list1[:]
for w1 in resolvedList:
@@ -265,6 +265,10 @@ def merged_windows(windowslist):
def get_free_mmio(windowslist, used, size):
+ if not size:
+ raise ValueError("allocate size cannot be {}".format(size))
+ if not windowslist:
+ raise ValueError("No mmio range is specified:{}".format(windowslist))
for w in windowslist:
window = MmioWindow(start=w.start, end=w.start+size-1)
for u in used:
|
fix RWFromMem of sdl/audio_test.go
* fix RWFromMem of sdl/audio_test.go
* ci: improve Travis CI script, using Xvfb for Headless SDL test
using libsdl2-dev from dist
* using xenial dist
* freetype/raster test failed
* reverse CI patch | @@ -66,10 +66,7 @@ func TestAudioInitQuit(t *testing.T) {
func TestLoadWAVRW(t *testing.T) {
// load WAV from *RWOps pointing to WAV data
- src, err := RWFromMem(squareWave)
- if err != nil {
- t.Errorf("LoadWAVRW() returned error %v", err)
- }
+ src, _ := RWFromMem(squareWave)
buf, spec := LoadWAVRW(src, false)
// test returned []byte
|
tests/print_exception: Test user exception with __str__ in traceback. | @@ -79,6 +79,22 @@ try:
except Exception as e:
print_exc(e)
+
+# For user exceptions, printing traceback should use __str__, not __repr__
+class MyException(Exception):
+ def __repr__(self):
+ return "repr"
+
+ def __str__(self):
+ return "str"
+
+
+try:
+ raise MyException
+except Exception as e:
+ print_exc(e)
+
+
# Test non-stream object passed as output object, only valid for uPy
if hasattr(sys, 'print_exception'):
try:
|
[chainmaker]add request struct | @@ -58,6 +58,26 @@ typedef struct TBoatChainmakerTxHeader {
} BoatChainmakerTxHeader;
+typedef struct TKeyValuePair {
+ char* key;
+ char* value;
+} KeyValuePair;
+
+typedef struct TBoatChainmakerTransactPayload {
+
+ char* contractName;
+ char* method;
+ KeyValuePair* Parameters;
+} BoatChainmakerTransactPayload;
+
+//request
+typedef struct TBoatChainmkaerTxRequest {
+ BoatChainmakerTxHeader tx_header;
+ BoatChainmakerTransactPayload* payload;
+ char* signature;
+} BoatChainmkaerTxRequest;
+
+
|
Update documentation for qaut_from_vecs | @@ -32,6 +32,7 @@ Functions:
#. :c:func:`glm_quat`
#. :c:func:`glm_quatv`
#. :c:func:`glm_quat_copy`
+#. :c:func:`glm_quat_from_vecs`
#. :c:func:`glm_quat_norm`
#. :c:func:`glm_quat_normalize`
#. :c:func:`glm_quat_normalize_to`
@@ -123,6 +124,20 @@ Functions documentation
| *[in]* **q** source quaternion
| *[out]* **dest** destination quaternion
+.. c:function:: void glm_quat_from_vecs(vec3 a, vec3 b, versor dest)
+
+ | compute unit quaternion needed to rotate a into b
+
+ References:
+ * `Finding quaternion representing the rotation from one vector to another <https://stackoverflow.com/a/11741520/183120>`_
+ * `Quaternion from two vectors <http://lolengine.net/blog/2014/02/24/quaternion-from-two-vectors-final>`_
+ * `Angle between vectors <http://www.euclideanspace.com/maths/algebra/vectors/angleBetween/minorlogic.htm>`_
+
+ Parameters:
+ | *[in]* **a** unit vector
+ | *[in]* **b** unit vector
+ | *[in]* **dest** unit quaternion
+
.. c:function:: float glm_quat_norm(versor q)
| returns norm (magnitude) of quaternion
|
fix lv_align_y/mid_y to align y and not x | @@ -984,7 +984,7 @@ void lv_obj_align_y(lv_obj_t * obj, const lv_obj_t * base, lv_align_t align, lv_
LV_ASSERT_OBJ(base, LV_OBJX_NAME);
- obj_align_core(obj, base, align, true, false, 0, y_ofs);
+ obj_align_core(obj, base, align, false, true, 0, y_ofs);
}
/**
@@ -1058,7 +1058,7 @@ void lv_obj_align_mid_y(lv_obj_t * obj, const lv_obj_t * base, lv_align_t align,
LV_ASSERT_OBJ(base, LV_OBJX_NAME);
- obj_align_mid_core(obj, base, align, true, false, 0, y_ofs);
+ obj_align_mid_core(obj, base, align, false, true, 0, y_ofs);
}
/**
|
landscape: loosen owner check in popover
Fixes urbit/landscape#960 | @@ -31,7 +31,7 @@ export function PopoverRoutes(
const groupSize = props.group.members.size;
- const owner = resourceFromPath(props.association.group).ship.slice(1) === window.ship;
+ const owner = resourceFromPath(props.association?.group ?? '~zod/group').ship.slice(1) === window.ship;
const admin = props.group?.tags?.role?.admin.has(window.ship) || false;
|
SOVERSION bump to version 3.3.3 | @@ -67,7 +67,7 @@ set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION
# with backward compatible change and micro version is connected with any internal change of the library.
set(LIBNETCONF2_MAJOR_SOVERSION 3)
set(LIBNETCONF2_MINOR_SOVERSION 3)
-set(LIBNETCONF2_MICRO_SOVERSION 2)
+set(LIBNETCONF2_MICRO_SOVERSION 3)
set(LIBNETCONF2_SOVERSION_FULL ${LIBNETCONF2_MAJOR_SOVERSION}.${LIBNETCONF2_MINOR_SOVERSION}.${LIBNETCONF2_MICRO_SOVERSION})
set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_SOVERSION})
|
Parse 'errors' field of online tile service and report the errors in the log | @@ -109,11 +109,16 @@ namespace carto {
std::shared_ptr<BinaryData> responseData;
if (!NetworkUtils::GetHTTP(url, responseData, Log::IsShowDebug())) {
- Log::Error("CartoOnlineTileDataSource: Failed to fetch tile source configuration");
- return false;
+ Log::Warnf("CartoOnlineTileDataSource: Failed to fetch tile source configuration"); // NOTE: we may have error messages, thus do not return from here
}
- std::string result(reinterpret_cast<const char*>(responseData->data()), responseData->size());
+ std::string result;
+ if (responseData) {
+ result = std::string(reinterpret_cast<const char*>(responseData->data()), responseData->size());
+ } else {
+ Log::Error("CartoOnlineTileDataSource: Empty response");
+ return false;
+ }
picojson::value config;
std::string err = picojson::parse(config, result);
if (!err.empty()) {
@@ -121,14 +126,22 @@ namespace carto {
return false;
}
+ if (config.get("errors").is<picojson::array>()) {
+ for (const picojson::value& error : config.get("errors").get<picojson::array>()) {
+ if (error.get("message").is<std::string>()) {
+ Log::Errorf("CartoOnlineTileDataSource: Error: %s", error.get("message").get<std::string>().c_str());
+ }
+ }
+ }
+
_tileURLs.clear();
if (!config.get("tiles").is<picojson::array>()) {
Log::Error("CartoOnlineTileDataSource: Tile URLs missing from configuration");
return false;
}
- for (const picojson::value& val : config.get("tiles").get<picojson::array>()) {
- if (val.is<std::string>()) {
- _tileURLs.push_back(val.get<std::string>());
+ for (const picojson::value& tileURL : config.get("tiles").get<picojson::array>()) {
+ if (tileURL.is<std::string>()) {
+ _tileURLs.push_back(tileURL.get<std::string>());
}
}
|
sys/socket/scm: return NULL when cmsg_len is zero | @@ -348,11 +348,12 @@ static inline FAR struct cmsghdr *__cmsg_nxthdr(FAR void *__ctl,
unsigned int __size,
FAR struct cmsghdr *__cmsg)
{
- FAR struct cmsghdr *__ptr;
+ size_t len = CMSG_ALIGN(__cmsg->cmsg_len);
+ FAR struct cmsghdr *__ptr =
+ (FAR struct cmsghdr *)(((FAR char *)__cmsg) + len);
- __ptr = (FAR struct cmsghdr *)
- (((FAR char *)__cmsg) + CMSG_ALIGN(__cmsg->cmsg_len));
- if ((unsigned long)((FAR char *)(__ptr + 1) - (FAR char *)__ctl) > __size)
+ if (len < sizeof(*__cmsg) ||
+ (unsigned long)((FAR char *)(__ptr + 1) - (FAR char *)__ctl) > __size)
{
return NULL;
}
|
pyapi CHANGE ignore server hostkey in serverinfo example | @@ -12,6 +12,9 @@ def interactive_auth(name, instruct, prompt, data):
def password_auth(user, host, data):
return getpass.getpass((user if user else os.getlogin()) + '@' + host + ' password : ')
+def hostkey_check(hostname, state, keytype, hexa, priv):
+ return True
+
#
# get know where to connect
#
@@ -31,6 +34,7 @@ else:
ssh = nc.SSH()
ssh.setAuthInteractiveClb(interactive_auth)
ssh.setAuthPasswordClb(password_auth)
+ssh.setAuthHostkeyCheckClb(hostkey_check)
#
# create NETCONF session to the server
|
test-suite: include compiler/mpi family designation in names for several TAU tests | @@ -21,7 +21,7 @@ setup() {
module load $module
}
-@test "[$testname] Verify $module module is loaded and matches rpm version" {
+@test "[$testname] Verify $module module is loaded and matches rpm version ($LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
module list $module | grep "1) $module" >& .cmd_output || exit 1
run grep $module .cmd_output
assert_success
@@ -32,7 +32,7 @@ setup() {
assert_output " 1) $module/$version"
}
-@test "[$testname] Verify module ${PKG}_DIR is defined and exists" {
+@test "[$testname] Verify module ${PKG}_DIR is defined and exists ($LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
DIR=${PKG}_DIR
if [ -z ${!DIR} ];then
flunk "${PKG}_DIR directory not defined"
@@ -47,7 +47,7 @@ setup() {
# Binaries
# ----------
-@test "[$testname] Verify module ${PKG}_BIN is defined and exists" {
+@test "[$testname] Verify module ${PKG}_BIN is defined and exists ($LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
BIN=${PKG}_BIN
if [ -z ${!BIN} ];then
flunk "${PKG}_BIN directory not defined"
@@ -58,12 +58,12 @@ setup() {
fi
}
-@test "[$testname] Verify availability of taucc binary" {
+@test "[$testname] Verify availability of taucc binary ($LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
run which taucc
assert_success
}
-@test "[$testname] Verify availability of pprof binary" {
+@test "[$testname] Verify availability of pprof binary ($LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
run which pprof
assert_success
}
@@ -72,7 +72,7 @@ setup() {
# Man pages
# ----------
-@test "[$testname] Verify availability of man page for taucc" {
+@test "[$testname] Verify availability of man page for taucc ($LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
DIR=${PKG}_DIR
if [ -z ${!DIR} ];then
@@ -92,7 +92,7 @@ setup() {
# Lib Tests
# ----------
-@test "[$testname] Verify module ${PKG}_LIB is defined and exists" {
+@test "[$testname] Verify module ${PKG}_LIB is defined and exists ($LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
LIB=${PKG}_LIB
if [ -z ${!LIB} ];then
@@ -104,7 +104,7 @@ setup() {
fi
}
-@test "[$testname] Verify dynamic library available in ${PKG}_LIB" {
+@test "[$testname] Verify dynamic library available in ${PKG}_LIB ($LMOD_FAMILY_COMPILER/$LMOD_FAMILY_MPI)" {
LIB=${PKG}_LIB
if [ -z ${!LIB} ];then
|
Update secure_comparator_t.h
Some refinements of a comment | @@ -31,7 +31,8 @@ typedef themis_status_t (*secure_compare_handler)(secure_comparator_t *comp_ctx,
* secret -> value to compare of each peer (x - for Alice, y - for Bob)
* rand, rand2, rand3 -> random values of each peer(r, a2, a3 - for Alice; s, b2, b3 - for Bob)
* g2, g3, P, Q -> intermediate parameters of each peer while protocol execution (G2a, G3a, Pa, Qa - for Alice; G2b, G3b, Pb, Qb - for Bob)
- * Pp, Qa_Qb, g3p -> temporable parameters
+ * Pp, g3p -> intermediate parameters received from your peer
+ * Qa_Qb -> local temporary intermediate parameter
*
*/
struct secure_comparator_type
|
crypto: tutorial correction | @@ -66,14 +66,14 @@ An example backend configuration is given as follows:
kdb mount test.ini user/test crypto_gcrypt "crypto/key=DDEBEF9EE2DC931701338212DAF635B17F230E8D" base64 ini
-We recommend to add the `base64` plugin to the backend, because `crypto` will output binary data.
+We recommend adding the `base64` plugin to the backend, because `crypto` will output binary data.
Having binary data in configuration files is hardly ever feasible.
`base64` encodes all binary values within a configuration file and transforms them into Base64 strings.
### Marking Keys For Encryption
To tell the `crypto` plugin which Keys it should process, the meta-key `crypto/encrypt` is used.
-The `crypto` plugin searches for the meta-key `crypto/encryp`.
+The `crypto` plugin searches for the meta-key `crypto/encrypt`.
If the value is equal to `1`, the value of the Key will be encrypted.
Let's demonstrate this using an example.
|
Add AARCH64 architecture to default seccomp profile | {
"defaultAction": "SCMP_ACT_ERRNO",
- "architectures": ["SCMP_ARCH_X86_64", "SCMP_ARCH_X86", "SCMP_ARCH_X32"],
+ "architectures": ["SCMP_ARCH_X86_64", "SCMP_ARCH_X86", "SCMP_ARCH_X32", "SCMP_ARCH_AARCH64"],
"syscalls": [
{
"names": [
|
Update with 0.3.7 changes | OpenBLAS ChangeLog
+====================================================================
+Version 0.3.7
+11-Aug 2019
+
+common:
+ * having the gmake special variables TARGET_ARCH or TARGET_MACH
+ defined no longer causes build failures in ctest or utest
+ * defining NO_AFFINITY or USE_TLS to 0 in gmake builds no longer
+ has the same effect as setting them to 1
+ * a new test program was added to allow checking the library for
+ thread safety
+ * a new option USE_LOCKING was added to ensure thread safety when
+ OpenBLAS itself is built without multithreading but will be
+ called from multiple threads.
+ * a build failure on Linux with glibc versions earlier than 2.5
+ was fixed
+ * a runtime error with CPU enumeration (and NO_AFFINITY not set)
+ on glibc 2.6 was fixed
+ * NO_AFFINITY was added to the CMAKE options (and defaults to being
+ active on Linux, as in the gmake builds)
+
+x86_64:
+ * the build-time logic for detection of AVX512 availability in
+ the processor and compiler was fixed
+ * gmake builds on OSX now set the internal name of the library to
+ libopenblas.0.dylib (consistent with CMAKE)
+ * the Haswell DGEMM kernel received a significant speedup through
+ improved prefetch and load instructions
+ * performance of DGEMM, DTRMM, DTRSM and ZDOT on Zen/Zen2 was markedly
+ increased by avoiding vpermpd instructions
+ * the SKYLAKEX (AVX512) DGEMM helper functions have now been disabled
+ to fix remaining errors in DGEMM, DSYMM and DTRMM
+
+## POWER:
+ * added support for building on FreeBSD/powerpc64 and FreeBSD/ppc970
+ * added optimized kernels for POWER9 single and double precision complex BLAS3
+ * added optimized kernels for POWER9 SGEMM and STRMM
+
+## ARMV7:
+ * fixed the softfp implementations of xAMAX and IxAMAX
+ * removed the predefined -march= flags on both ARMV5 and ARMV6 as
+ they were appropriate for only a subset of platforms
+
====================================================================
Version 0.3.6
29-Apr-2019
|
added much better suggestion
mess | |= {gol/goal mod/dojo-model} ^- dojo-command
[[%poke gol] [0 [%ge mod(q.p [q.gol q.p.mod])]]]
::
- ++ dp-command-line ;~(sfix dp-command (just '\0a'))
+ ++ dp-command-line ;~(sfix dp-command (star ace) (just '\0a'))
++ dp-variable :: %verb or %brev
|* {sym/rule src/rule}
%+ cook
++ he-duke :: ++he-dope variant
|= txt/tape
^- (each (unit (each dojo-command tape)) @ud)
- =+ trim-space=;~(pfix spac:poja (star next))
- =+ trimed-text=(flop (scan (flop txt) trim-space))
- =+ foy=(he-dope trimed-text)
+ =+ foy=(he-dope txt)
?- -.foy
$| [%| q.p.foy]
$& [%& p.foy]
|
Improve documentation for Callback and AnyValue | @@ -21,7 +21,8 @@ import Control.Exception (IOException, try)
import Foreign.Lua (Lua, NumResults(..), Peekable, Pushable, StackIndex)
import qualified Foreign.Lua as Lua
--- | Lua callback function
+-- | Lua callback function. This type is similar to @'AnyValue'@, and
+-- the same caveats apply.
newtype Callback = Callback StackIndex
instance Peekable Callback where
@@ -35,7 +36,11 @@ instance Pushable Callback where
push (Callback idx) = Lua.pushvalue idx
--- | Any value of unknown type
+-- | Any value of unknown type.
+--
+-- This simply wraps the function's index on the Lua stack. Changes to
+-- the stack may only be made with great care, as they can break the
+-- reference.
newtype AnyValue = AnyValue { fromAnyValue :: StackIndex }
instance Peekable AnyValue where
|
Typo: USE_NOFITY_POLL -> USE_NOTIFY_POLL | #if !defined(CONFIG_DISABLE_SIGNALS) && !defined(CONFIG_DISABLE_POLL)
# define USE_NOTIFY_SIGNAL 1
#else
-# define USE_NOFITY_POLL 1
+# define USE_NOTIFY_POLL 1
#endif
#ifndef CONFIG_BUTTONS_NPOLLWAITERS
|
Test with both 32-bits and 64-bits on AppVeyor | build: off
+clone_folder: "c:\\hslua"
+
+environment:
+ matrix:
+ - STACK_VERSION: "windows-i386"
+ STACK_ROOT: "c:\\sr32"
+ STACK: "%STACK_ROOT%\\stack.exe"
+ STACK_FLAGS: "--flag hslua:lua_32bits"
+ - STACK_VERSION: "windows-x86_64"
+ STACK_ROOT: "c:\\sr64"
+ STACK: "%STACK_ROOT%\\stack.exe"
+ STACK_FLAGS: ""
+
+matrix:
+ fast_finish: true
+
+cache:
+ - "%STACK_ROOT%"
before_test:
# http://help.appveyor.com/discussions/problems/6312-curl-command-not-found
- set PATH=C:\Program Files\Git\mingw64\bin;%PATH%
-- curl -sS -ostack.zip -L --insecure http://www.stackage.org/stack/windows-x86_64
-- 7z x stack.zip stack.exe
-
-clone_folder: "c:\\hslua"
-environment:
- global:
- STACK_ROOT: "c:\\sr"
+ - |
+ %STACK% --version || curl -sS -ostack.zip -L --insecure http://www.stackage.org/stack/%STACK_VERSION% && 7z e stack.zip -o"%STACK_ROOT%" stack.exe
test_script:
-- stack setup > nul
# The ugly echo "" hack is to avoid complaints about 0 being an invalid file
# descriptor
-- echo "" | stack --no-terminal test
+ - '%STACK% setup > nul'
+ - '%STACK% path'
+ - 'echo "" | %STACK% clean'
+ - 'echo "" | %STACK% --no-terminal test %STACK_FLAGS%'
|
Node.js: changed the 'data' event calling sequence for the request.
The problem is caused by Promises' inconsistency.
The 'date' event could have been triggered before the user has started
listening for it. To resolve the issue, we override the 'on' method of
the request's emitter. | @@ -288,6 +288,28 @@ ServerRequest.prototype.resume = function resume() {
return [];
};
+/*
+ * The "on" method is overridden to defer reading data until user code is
+ * ready, that is (ev === "data"). This can occur after req.emit("end") is
+ * executed, since the user code can be scheduled asynchronously by Promises
+ * and so on. Passing the data is postponed by process.nextTick() until
+ * the "on" method caller completes.
+ */
+ServerRequest.prototype.on = function on(ev, fn) {
+ Server.prototype.on.call(this, ev, fn);
+
+ if (ev === "data") {
+ process.nextTick(function () {
+ if (this.server.buffer.length !== 0) {
+ this.emit("data", this.server.buffer);
+ }
+
+ }.bind(this));
+ }
+};
+
+ServerRequest.prototype.addListener = ServerRequest.prototype.on;
+
function Server(requestListener) {
EventEmitter.call(this);
@@ -321,22 +343,20 @@ Server.prototype.listen = function () {
};
Server.prototype.run_events = function (server, req, res) {
+ req.server = server;
+ res.server = server;
+ req.res = res;
+ res.req = req;
+
+ server.buffer = server.unit._read(req.socket.req_pointer);
+
/* Important!!! setImmediate starts the next iteration in Node.js loop. */
setImmediate(function () {
server.emit("request", req, res);
- Promise.resolve().then(() => {
- let buf = server.unit._read(req.socket.req_pointer);
-
- if (buf.length != 0) {
- req.emit("data", buf);
- }
-
- req.emit("end");
- });
-
Promise.resolve().then(() => {
req.emit("finish");
+ req.emit("end");
if (res.finished) {
unit_lib.unit_response_end(res);
|
Fix GetFrequency() check for TDD mode | @@ -1297,12 +1297,16 @@ double LMS7_Device::GetFrequency(bool tx, unsigned chan) const
if (!tx)
{
+ const uint8_t mac = lms->Get_SPI_Reg_bits(LMS7_MAC);
lms->Modify_SPI_Reg_bits(LMS7_MAC, 2);
- if (lms->Get_SPI_Reg_bits(LMS7_PD_LOCH_T2RBUF)==0);
+ if (lms->Get_SPI_Reg_bits(LMS7_PD_LOCH_T2RBUF)==0)
+ {
lms->Modify_SPI_Reg_bits(LMS7_MAC, 1);
if (lms->Get_SPI_Reg_bits(LMS7_PD_VCO)==1)
tx = true; //TDD - Tx PLL used for TX and RX
}
+ lms->Modify_SPI_Reg_bits(LMS7_MAC, mac);
+ }
return lms->GetFrequencySX(tx) - offset;
}
|
Add name to program too large error message
Add name to program too large error message | @@ -481,8 +481,8 @@ int bpf_prog_load(enum bpf_prog_type prog_type, const char *name,
if (attr.insn_cnt > BPF_MAXINSNS) {
errno = EINVAL;
fprintf(stderr,
- "bpf: %s. Program too large (%u insns), at most %d insns\n\n",
- strerror(errno), attr.insn_cnt, BPF_MAXINSNS);
+ "bpf: %s. Program %s too large (%u insns), at most %d insns\n\n",
+ strerror(errno), name, attr.insn_cnt, BPF_MAXINSNS);
return -1;
}
|
Add note about USB/serial adapters
[ci skip] | @@ -10,10 +10,10 @@ In addition to the packages listed in the top level README, this also requires
## Setup
1. This loads programs onto the board over the serial port, so your development
-machine must be connected to the FPGA board with a serial cable.
+machine must be connected to the FPGA board with a serial cable.<sup>1</sup>
2. Set the environment variable SERIAL_PORT to the path of the serial device.
-For a Prolific USB based dongle, for example, the path is.
+For example:
export SERIAL_PORT="/dev/ttyUSB0"
@@ -53,6 +53,18 @@ Reboot or execute the following command:
sudo udevadm control --reload
sudo killall -9 jtagd
+<sup>1<sup> *Since most computers don't have native serial ports any more, this
+will probably require a USB to serial adapter. Almost all of the adapters you
+can buy use one of two chipsets, produced by either FTDI or Prolific. The
+Prolific chips are more... common, especially in cheaper adapters. But the
+OS drivers for these chips are unstable, especially when transferring large
+amounts of data like this project does. They often hang mid transfer or cause
+the host machine to reboot. I would recommend finding one with a FTDI based
+chipset. Unfortunately, most serial cables do not advertise which chipset
+they use, but you can sometimes tell by going to their website to download
+the drivers. Also, if you search for 'FTDI USB serial' on a retail site like
+Amazon, there are a number that do explicitly note the chipset type.*
+
## Synthesizing and Running Programs
The build system is command line based and does not use the Quartus GUI.
|
input: don't show backbutton when it doesn't do anything | @@ -208,7 +208,11 @@ static void _render(component_t* component)
data->confirm_component->f->render(data->confirm_component);
}
if (!confirm_gesture_active) {
+ if (data->string_index != 0 ||
+ trinary_input_char_in_progress(data->trinary_char_component) ||
+ data->cancel_cb != NULL) {
data->left_arrow_component->f->render(data->left_arrow_component);
+ }
if (data->keyboard_switch_component != NULL) {
data->keyboard_switch_component->f->render(data->keyboard_switch_component);
}
|
Remove Hard Coded Alias and WI from Ingest PR | @@ -53,8 +53,6 @@ class CheckinBranch {
[string]$name;
[string]$completePR = "False";
[string]$pullRequestTitle;
- [string]$workitem = "37338822"
- [string]$optionalReviewers = "[email protected]"
[CheckinFile[]]$CheckinFiles;
CheckinBranch($ManifestFile, $BranchToPushTo, $PRTitle) {
|
Simplified UB tag output in collapseUMI 1MM_All using unordered_map. | @@ -317,10 +317,10 @@ void SoloFeature::collapseUMI(uint32 iCB, uint32 *umiArray)
const uint32 bitTopMask=~(1<<31);
vector<array<uint32,2>> umiBest(graphN,{0,0});
- uint32 umiCorrN=0;//number of umi to error-correct
+ unordered_map<uintUMI,uint32> umiCorrColor;
for (uint32 iu=0; iu<umiArrayStride*nU0; iu+=umiArrayStride) {
//switch low/high to recover original UMIs
- pSolo.umiSwapHalves(umiArray[iu]);//halves were swapped, need to reurn back to UMIs
+ pSolo.umiSwapHalves(umiArray[iu]);//halves were swapped, need to return back to UMIs
//find best UMI (highest count) for each connected component
if (umiArray[iu+2]==def_MarkNoColor)
continue;
@@ -330,30 +330,18 @@ void SoloFeature::collapseUMI(uint32 iCB, uint32 *umiArray)
umiBest[color1][0] = count1;
umiBest[color1][1] = umiArray[iu];
};
- //reuse umiArray: now the stride is 2, and it contains only UMI that may be error corrected and the colors
- umiArray[umiCorrN*2]=umiArray[iu];
- umiArray[umiCorrN*2+1]=color1;
- ++umiCorrN;
+ umiCorrColor[umiArray[iu]] = color1;
};
- if (umiCorrN==0) {
- umiArray[0]=(uint32)-1;//to make sure that no UMI can match this first element
- } else {//sort UMIs
- qsort(umiArray, umiCorrN, 2*sizeof(uint32), funCompareNumbers<uint32>);
- };
-
- uint32 iUmi=0;
for (uint32 iR=0; iR<gReadS[iG+1]-gReadS[iG]; iR+=rguStride) {//cycle over reads
uint64 iread1 = rGU1[iR+rguR];
readInfo[iread1].cb = indCB[iCB] ;
- if (iUmi < umiCorrN && rGU1[iR+rguU]>umiArray[iUmi*2]) //advance in the umiArray sorted list
- ++iUmi;
-
- if (iUmi < umiCorrN && rGU1[iR+rguU]==umiArray[iUmi*2]) {//correct UMI
- readInfo[iread1].umi=umiBest[umiArray[iUmi*2+1]][1];
+ uintUMI umi = rGU1[iR+rguU];
+ if ( umiCorrColor.count(umi)>0 ) {//correct UMI
+ readInfo[iread1].umi = umiBest[umiCorrColor[umi]][1];
} else {//no UMI correction
- readInfo[iread1].umi=rGU1[iR+rguU];
+ readInfo[iread1].umi = umi;
};
};
};//else if (readInfo.size()>0)
|
Changelog entry for client side XoT | +9 July 2021: Willem
+ - Client side DNS Zone Transfer-over-TLS (XoT) support as per
+ draft-ietf-dprive-xfr-over-tls
+
29 June 2021: Willem
- Fix #168: Buffer overflow in the dname_to_string() function
|
Remove IAS enrollment check
Covered already by IAS zone refactor | @@ -2901,11 +2901,6 @@ bool DeRestPluginPrivate::checkSensorBindingsForAttributeReporting(Sensor *senso
val.maxInterval -= 5; // report before going presence: false
}
}
-
- if (sensor->manufacturer() == QLatin1String("Immax"))
- {
- checkIasEnrollmentStatus(sensor);
- }
}
else if (*i == VENDOR_CLUSTER_ID)
{
|
avf: free spinlock on txq removal
Type: fix | @@ -304,6 +304,7 @@ avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
{
qid = qid % ad->num_queue_pairs;
txq = vec_elt_at_index (ad->txqs, qid);
+ ASSERT (txq->lock == 0);
clib_spinlock_init (&txq->lock);
ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
return 0;
@@ -1529,6 +1530,7 @@ avf_delete_if (vlib_main_t * vm, avf_device_t * ad, int with_barrier)
clib_ring_free (txq->rs_slots);
vec_free (txq->tmp_bufs);
vec_free (txq->tmp_descs);
+ clib_spinlock_free (&txq->lock);
}
/* *INDENT-ON* */
vec_free (ad->txqs);
|
fix issue with splitting big queries in dynamic boosting | @@ -256,7 +256,7 @@ namespace NCatboostCuda {
for (ui32 i = 0; i < devCount; ++i) {
const ui64 devSize = (i + 1 != devCount ? docsPerDevice : (docCount - total));
- ui64 nextDevDoc = samplesGrouping.NextQueryOffsetForLine(total + devSize - 1);
+ ui64 nextDevDoc = samplesGrouping.NextQueryOffsetForLine(Min<ui32>(total + devSize - 1, docCount));
slices[i] = TSlice(total, nextDevDoc);
total = nextDevDoc;
CB_ENSURE(slices[i].Size(), "Error: insufficient query (or document) count to split data between several GPUs. Can't continue learning (" << docCount << ")");
|
OCSP_resp_find_status.pod: Slightly improve the documentation of various flags | @@ -121,24 +121,25 @@ signed and that the signer certificate can be validated. It takes I<st> as
the trusted store and I<certs> as a set of untrusted intermediate certificates.
The function first tries to find the signer certificate of the response
in I<certs>. It then searches the certificates the responder may have included
-in I<bs> unless the I<flags> contain B<OCSP_NOINTERN>.
+in I<bs> unless I<flags> contains B<OCSP_NOINTERN>.
It fails if the signer certificate cannot be found.
-Next, unless the I<flags> contain B<OCSP_NOSIGS>, the function checks
+Next, unless I<flags> contains B<OCSP_NOSIGS>, the function checks
the signature of I<bs> and fails on error. Then the function already returns
-success if the I<flags> contain B<OCSP_NOVERIFY> or if the signer certificate
-was found in I<certs> and the I<flags> contain B<OCSP_TRUSTOTHER>.
+success if I<flags> contains B<OCSP_NOVERIFY> or if the signer certificate
+was found in I<certs> and I<flags> contains B<OCSP_TRUSTOTHER>.
Otherwise the function continues by validating the signer certificate.
-If I<flags> contains B<OCSP_PARTIAL_CHAIN>, intermediate CA certificates
-in I<st> are trust anchors.
+If I<flags> contains B<OCSP_PARTIAL_CHAIN> it takes intermediate CA
+certificates in I<st> as trust anchors.
For more details, see the description of B<X509_V_FLAG_PARTIAL_CHAIN>
in L<X509_VERIFY_PARAM_set_flags(3)/VERIFICATION FLAGS>.
-To this end, all certificates in I<cert> and in I<bs> are considered as
-untrusted certificates for the construction of the validation path for the
-signer certificate unless the B<OCSP_NOCHAIN> flag is set. After successful path
+If I<flags> contains B<OCSP_NOCHAIN> it ignores all certificates in I<certs>
+and in I<bs>, else it takes them as untrusted intermediate CA certificates
+and uses them for constructing the validation path for the signer certificate.
+After successful path
validation the function returns success if the B<OCSP_NOCHECKS> flag is set.
Otherwise it verifies that the signer certificate meets the OCSP issuer
criteria including potential delegation. If this does not succeed and the
-I<flags> do not contain B<OCSP_NOEXPLICIT> the function checks for explicit
+B<OCSP_NOEXPLICIT> flag is not set the function checks for explicit
trust for OCSP signing in the root CA certificate.
=head1 RETURN VALUES
|
Added --process-and-exit command line option to the man page. | @@ -536,6 +536,9 @@ Ensure clients send the specified origin header upon the WebSocket handshake.
Specify the port to use. By default GoAccess' WebSocket server listens on port
7890.
.TP
+\fB\-\-process-and-exit
+Parse log and exit without outputting data.
+.TP
\fB\-\-real-os
Display real OS names. e.g, Windows XP, Snow Leopard.
.TP
|
Year increment | -Copyright 2017-2018 YANDEX LLC
+Copyright 2017-2019 YANDEX LLC
Apache License
Version 2.0, January 2004
|
website-frontend: ... | @@ -408,6 +408,7 @@ plugins, bindings and tools are always up to date. Furthermore, we changed:
- The Website now lives in the folders [website-frontend](/src/tools/website-frontend) and [website-backend](/src/tools/website-backend) to avoid confusion with the REST backend of the Web-UI. _(Markus Raab)_
- Renamed the sub menu _Getting Started_ in the menu _Documentation_ to _About Elektra_ and removed the sub-sub-menus _Compiling_, _Installation_ and _Tutorials_. These sub-sub-menus are now sub-menus of _Documentation_. A new sub menu labeled _Get Started_ added to the menu _Documentation_ with some newcomer-friendly informations. Renamed the _Getting Started_ sub menu in _Development_ to _Contribute to Elektra_. The green button on the main page is routed to the new _Get Started_ page. _(Hani Torabi)_
- <<TODO>>
+- <<TODO>>
## Outlook
|
Subsets and Splits