message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
disable NaN checks before BLAS calls dsolve.R (shorter config part) | argv <- commandArgs(trailingOnly = TRUE)
-if (!is.null(options("matprod")[[1]])) {
- options(matprod = "blas")
-}
+if (!is.null(options("matprod")[[1]])) options(matprod = "blas")
nfrom <- 128
nto <- 2048
@@ -42,11 +40,10 @@ while (n <= nto) {
solve(A, B)
})
- mflops <- (2.0/3 * n * n * n + 2 * n * n * n) * loops/ (z[3] * 1e6)
+ mflops <- (2.0/3 * n * n * n + 2 * n * n * n) * loops/ (z[3] * 1e+06)
st <- sprintf("%.0fx%.0f :", n, n)
cat(sprintf("%20s %10.2f MFlops %10.6f sec\n", st, mflops, z[3]))
n <- n + nstep
}
-
|
Disable Runas checkbox when RunasTI selected | * run as dialog
*
* Copyright (C) 2010-2013 wj32
- * Copyright (C) 2018 dmex
+ * Copyright (C) 2018-2022 dmex
*
* This file is part of Process Hacker.
*
@@ -2815,6 +2815,11 @@ INT_PTR CALLBACK PhpRunFileWndProc(
PhFreeFileDialog(fileDialog);
}
break;
+ case IDC_TRUSTEDINSTALLER:
+ {
+ EnableWindow(context->RunAsCheckboxHandle, Button_GetCheck(context->RunAsInstallerCheckboxHandle) == BST_UNCHECKED);
+ }
+ break;
}
}
break;
|
DOCS: Fix documentation on asymmetric keydata types
Some type specs didn't correspond to actual use. | @@ -279,29 +279,29 @@ The following Import/Export types are available for the built-in RSA algorithm:
=over 4
-=item "n" (B<OSSL_PKEY_PARAM_RSA_N>) <integer>
+=item "n" (B<OSSL_PKEY_PARAM_RSA_N>) <unsigned integer>
The RSA "n" value.
-=item "e" (B<OSSL_PKEY_PARAM_RSA_E>) <integer>
+=item "e" (B<OSSL_PKEY_PARAM_RSA_E>) <unsigned integer>
The RSA "e" value.
-=item "d" (B<OSSL_PKEY_PARAM_RSA_D>) <integer>
+=item "d" (B<OSSL_PKEY_PARAM_RSA_D>) <unsigned integer>
The RSA "d" value.
-=item "rsa-factor" (B<OSSL_PKEY_PARAM_RSA_FACTOR>) <integer>
+=item "rsa-factor" (B<OSSL_PKEY_PARAM_RSA_FACTOR>) <unsigned integer>
An RSA factor. In 2 prime RSA these are often known as "p" or "q". This value
may be repeated up to 10 times in a single key.
-=item "rsa-exponent" (B<OSSL_PKEY_PARAM_RSA_EXPONENT>) <integer>
+=item "rsa-exponent" (B<OSSL_PKEY_PARAM_RSA_EXPONENT>) <unsigned integer>
An RSA CRT (Chinese Remainder Theorem) exponent. This value may be repeated up
to 10 times in a single key.
-=item "rsa-coefficient" (B<OSSL_PKEY_PARAM_RSA_COEFFICIENT>) <integer>
+=item "rsa-coefficient" (B<OSSL_PKEY_PARAM_RSA_COEFFICIENT>) <unsigned integer>
An RSA CRT (Chinese Remainder Theorem) coefficient. This value may be repeated
up to 9 times in a single key.
@@ -315,23 +315,23 @@ Diffie-Hellman algorithms:
=over 4
-=item "pub" (B<OSSL_PKEY_PARAM_PUB_KEY>) <integer> or <octet string>
+=item "pub" (B<OSSL_PKEY_PARAM_PUB_KEY>) <unsigned integer>
The public key value.
-=item "priv" (B<OSSL_PKEY_PARAM_PRIV_KEY>) <integer> or <octet string>
+=item "priv" (B<OSSL_PKEY_PARAM_PRIV_KEY>) <unsigned integer>
The private key value.
-=item "p" (B<OSSL_PKEY_PARAM_FFC_P>) <integer>
+=item "p" (B<OSSL_PKEY_PARAM_FFC_P>) <unsigned integer>
A DSA or Diffie-Hellman "p" value.
-=item "q" (B<OSSL_PKEY_PARAM_FFC_Q>) <integer>
+=item "q" (B<OSSL_PKEY_PARAM_FFC_Q>) <unsigned integer>
A DSA or Diffie-Hellman "q" value.
-=item "g" (B<OSSL_PKEY_PARAM_FFC_G>) <integer>
+=item "g" (B<OSSL_PKEY_PARAM_FFC_G>) <unsigned integer>
A DSA or Diffie-Hellman "g" value.
@@ -374,7 +374,7 @@ EC curve's cofactor (note for some curves the cofactor is 1).
The public key value in EC point format.
-=item "priv" (B<OSSL_PKEY_PARAM_PRIV_KEY>) <integer>
+=item "priv" (B<OSSL_PKEY_PARAM_PRIV_KEY>) <unsigned integer>
The private key value.
|
driver/als_si114x.h: Format with clang-format
BRANCH=none
TEST=none | @@ -109,8 +109,7 @@ struct si114x_drv_data_t {
struct si114x_typed_data_t type_data[2];
};
-#define SI114X_GET_DATA(_s) \
- ((struct si114x_drv_data_t *)(_s)->drv_data)
+#define SI114X_GET_DATA(_s) ((struct si114x_drv_data_t *)(_s)->drv_data)
#define SI114X_GET_TYPED_DATA(_s) \
(&SI114X_GET_DATA(_s)->type_data[(_s)->type - MOTIONSENSE_TYPE_PROX])
|
Fixed fps limiter accuracy | @@ -2154,8 +2154,11 @@ static void overlay_DestroySwapchainKHR(
void FpsLimiter(){
int64_t now = os_time_get_nano();
sleepTime = targetFrameTime - (now - frameEnd);
- this_thread::sleep_for(chrono::nanoseconds(sleepTime - frameOverhead));
- frameOverhead = (now - frameStart);
+ if ( sleepTime > frameOverhead ) {
+ int64_t adjustedSleep = sleepTime - frameOverhead;
+ this_thread::sleep_for(chrono::nanoseconds(adjustedSleep));
+ frameOverhead = ((os_time_get_nano() - frameStart) - adjustedSleep + (frameOverhead * 99)) / 100;
+ }
}
static VkResult overlay_QueuePresentKHR(
|
added missing inits | @@ -34,8 +34,8 @@ static int essidpartfilterlen = 0;
static char *essidfiltername = NULL;
static char *essidpartfiltername = NULL;
-static int pmkid1count;
-static int pmkid2count;
+static int pmkid1count = 0;
+static int pmkid2count = 0;
static char *pmkid1name = NULL;
static char *pmkid2name = NULL;
@@ -44,11 +44,11 @@ static char *pmkid1outname = NULL;
static char *pmkid2outname = NULL;
static char *pmkidoutname = NULL;
-static intpmkid_t *pmkid1list, *pmkidzeiger1, *pmkidzeigerakt1;
-static intpmkid_t *pmkid2list, *pmkidzeiger2, *pmkidzeigerakt2;
+static intpmkid_t *pmkid1list = NULL, *pmkidzeiger1 = NULL, *pmkidzeigerakt1 = NULL;
+static intpmkid_t *pmkid2list = NULL, *pmkidzeiger2 = NULL, *pmkidzeigerakt2 = NULL;
-static int hccapx1count;
-static int hccapx2count;
+static int hccapx1count = 0;
+static int hccapx2count = 0;;
static char *hccapx1name = NULL;
static char *hccapx2name = NULL;
@@ -60,13 +60,13 @@ static char *hccapxoutname = NULL;
static bool pmkidgroupflag = false;
static bool hccapxgroupflag = false;
-static inthccapx_t *hccapx1list, *hccapxzeiger1, *hccapxzeigerakt1;
-static inthccapx_t *hccapx2list, *hccapxzeiger2, *hccapxzeigerakt2;
+static inthccapx_t *hccapx1list = NULL, *hccapxzeiger1 = NULL, *hccapxzeigerakt1 = NULL;
+static inthccapx_t *hccapx2list = NULL, *hccapxzeiger2 = NULL, *hccapxzeigerakt2 = NULL;
static char *essidoutname = NULL;
static char *essidmacapoutname = NULL;
-static char separator;
+static char separator = ':';
/*===========================================================================*/
static void globalclose()
{
@@ -1303,7 +1303,6 @@ if(argc < 2)
return EXIT_SUCCESS;
}
-
if(pmkid1name != NULL)
{
readpmkid1file();
|
common BUGFIX perm value -1 means keep current values | @@ -1371,6 +1371,7 @@ sr_chmodown(const char *path, const char *owner, const char *group, mode_t perm)
assert(path);
+ if ((int)perm != -1) {
if (perm > 00666) {
sr_errinfo_new(&err_info, SR_ERR_INVAL_ARG, NULL, "Only read and write permissions can be set.");
return err_info;
@@ -1378,6 +1379,7 @@ sr_chmodown(const char *path, const char *owner, const char *group, mode_t perm)
sr_errinfo_new(&err_info, SR_ERR_INVAL_ARG, NULL, "Setting execute permissions has no effect.");
return err_info;
}
+ }
/* we are going to change the owner */
if (owner && (err_info = sr_get_pwd(&uid, (char **)&owner))) {
|
fix tmp env var issue when we get a path starting with /cygdrive/... | #ifndef ANY_MSYS
#include <stdlib.h>
#else
+#include <ctype.h>
#include <string.h>
#include <windows.h>
@@ -26,7 +27,7 @@ const char* get_tmp_env() {
return getenv("temp");
}
// windows
- const char* p = getenv("TEMP"); // For some reason in cmd the userprofile
+ char* p = getenv("TEMP"); // For some reason in cmd the userprofile
// part was cut in my tests, but apparently
// this is not always the case
if (p == NULL) {
@@ -35,6 +36,20 @@ const char* get_tmp_env() {
if (p[0] != '/') {
return p;
}
+ if (strstarts(p, "/cygdrive") && strlen(p) > 10) {
+ // expects that path has the form /cygdrive/c/Users/<user>/...
+ // We want to transform it into C:/Users/<user>/...
+ if (p[9] == '/') { // The first time we call this the string has the form
+ // /cygdrive/c/... but then we edit the string in the
+ // env (of this programm) so on future call it is
+ // already /cygdriveC:/... so don't do any
+ // transformation, just return the relevant part
+ p[9] = toupper(p[10]); // capitalize drive letter and move it one
+ // position to the front
+ p[10] = ':';
+ }
+ return p + 9;
+ }
const char* up = getenv("USERPROFILE");
size_t up_len = strlen(up);
if (up_len + strlen(p) >= B_SIZE) {
|
perf(non-null judgment):
add non-null judgment of "orderCfg.endorser.nodeUrl" in BoatHlfabricDiscoverySubmit() | @@ -983,13 +983,17 @@ BOAT_RESULT BoatHlfabricDiscoverySubmit(BoatHlfabricTx *tx_ptr, const BoatHlfabr
tx_ptr->wallet_ptr->network_info.orderCfg.endorserNumber = discoverResult.discoverConfig.discoverOrders.num;
tx_ptr->wallet_ptr->network_info.orderCfg.endorser = BoatMalloc(discoverResult.discoverConfig.discoverOrders.num * sizeof(BoatHlfabricNodeInfoCfg));
if(tx_ptr->wallet_ptr->network_info.orderCfg.endorser == NULL){
- BoatLog(BOAT_LOG_CRITICAL, "Fail to allocate orderCfg.endorser buffer.",i,j,k);
+ BoatLog(BOAT_LOG_CRITICAL, "Fail to allocate orderCfg.endorser buffer.");
boat_throw(BOAT_ERROR_COMMON_OUT_OF_MEMORY, BoatHlfabricDiscoverySubmit_exception);
}
for ( i = 0; i < discoverResult.discoverConfig.discoverOrders.num; i++)
{
len = sizeof(discoverResult.discoverConfig.discoverOrders.discoverOrderinfo[i].port) + strlen(discoverResult.discoverConfig.discoverOrders.discoverOrderinfo[i].host) + 1;
tx_ptr->wallet_ptr->network_info.orderCfg.endorser[i].nodeUrl = BoatMalloc(len+1);
+ if(tx_ptr->wallet_ptr->network_info.orderCfg.endorser[i].nodeUrl == NULL){
+ BoatLog(BOAT_LOG_CRITICAL, "Fail to allocate orderCfg.endorser[%d].nodeUrl buffer.",i);
+ boat_throw(BOAT_ERROR_COMMON_OUT_OF_MEMORY, BoatHlfabricDiscoverySubmit_exception);
+ }
memset(tx_ptr->wallet_ptr->network_info.orderCfg.endorser[i].nodeUrl,0,len+1);
offset = 0;
memcpy(tx_ptr->wallet_ptr->network_info.orderCfg.endorser[i].nodeUrl + offset, discoverResult.discoverConfig.discoverOrders.discoverOrderinfo[i].host, strlen(discoverResult.discoverConfig.discoverOrders.discoverOrderinfo[i].host));
|
splits +tapp declaration in :dns to shorten lines | $% [%dns-binding =binding:dns]
==
+$ out-peer-data ~
- ++ tapp (^tapp app-state peek-data in-poke-data out-poke-data in-peer-data out-peer-data)
+ ++ tapp
+ %: ^tapp
+ app-state
+ peek-data
+ in-poke-data
+ out-poke-data
+ in-peer-data
+ out-peer-data
+ ==
++ stdio (^stdio out-poke-data out-peer-data)
--
::
|
Remove libcurl dependency | @@ -3,7 +3,7 @@ set_xmakever("2.5.1")
set_languages("cxx20")
set_arch("x64")
-add_requires("spdlog", "nlohmann_json", "hopscotch-map", "minhook", "mem", "imgui", "sol2", "tiltedcore", "libcurl")
+add_requires("spdlog", "nlohmann_json", "hopscotch-map", "minhook", "mem", "imgui", "sol2", "tiltedcore")
add_requireconfs("imgui", { configs = {cxflags = "/DNDEBUG"} })
add_rules("mode.debug","mode.releasedbg", "mode.release")
@@ -66,7 +66,7 @@ target("cyber_engine_tweaks")
add_headerfiles("src/**.h")
add_includedirs("src/")
add_syslinks("User32", "Version", "d3d11")
- add_packages("spdlog", "nlohmann_json", "minhook", "hopscotch-map", "imgui", "mem", "sol2", "tiltedcore", "libcurl")
+ add_packages("spdlog", "nlohmann_json", "minhook", "hopscotch-map", "imgui", "mem", "sol2", "tiltedcore")
add_deps("RED4ext.SDK")
on_package(function(target)
|
pcie-slot: Don't fail powering on an already on switch
If the power state is already the required value, return
OPAL_SUCCESS rather than OPAL_PARAMETER to avoid spurrious
errors during boot.
Acked-By: Michael Neuling | @@ -205,7 +205,7 @@ static int64_t pcie_slot_set_power_state_ext(struct pci_slot *slot, uint8_t val,
uint16_t state;
if (slot->power_state == val)
- return OPAL_PARAMETER;
+ return OPAL_SUCCESS;
/* Update the power state and return immediately if the power
* control functionality isn't supported on the PCI slot.
|
news: add draft for introduction text | @@ -15,6 +15,40 @@ Please add your name at the end of every contribution.
We are proud to release Elektra 0.8.<<VERSION>>.
+## Possible Introduction Text
+
+We are proud to present our largest release so far.
+It is the first release of the 0.9.\* version series,
+which goal is it:
+
+- to prepare Elektra for 1.0.0
+- to remove compatibility layers
+- to prototypes that did not reach maturity
+
+The 0.8.\* version series will still be maintained
+but with a low priority -- the highest priority
+will be to get Elektra 1.0.0 done. If you have
+maintenance requests and want 0.8.26 to be released,
+please contact us via [email protected]
+
+## Business Plans
+
+To get away from a purely research-oriented approach
+to a mature foundation, we also need paid employees
+to fix problems.
+
+We plan to introduce following ways of income:
+
+1. donations
+2. paid support/feature requests
+3. consultancy
+
+If you are interested in any of these, please
+contact us via [email protected]
+
+Please note, that Elektra will definitely stay
+100% free software (BSD licensed).
+
## What is Elektra?
Elektra serves as a universal and secure framework to access
|
bumpt Lmod to version v7.6.1 | Summary: Lua based Modules (lmod)
Name: %{pname}%{PROJ_DELIM}
-Version: 7.5.17
+Version: 7.6.1
Release: 1
License: MIT
Group: %{PROJ_NAME}/admin
@@ -60,8 +60,6 @@ Patch1: lmod.consulting.patch
Patch2: lmod.site.patch
# 4/25/17 [email protected] - upping patch fuzz factor for newer lmod
%global _default_patch_fuzz 2
-# 7/27/17 [email protected] - patch to enable depends_on support for tcl modules
-Patch3: depends_on.patch
# Known dependencies
Requires: lua >= %{luaver}
@@ -81,7 +79,6 @@ Supports a Software Hierarchy
# OpenHPC patches
%patch1 -p1
%patch2 -p1
-%patch3 -p1
%build
unset MODULEPATH
|
Fix logm for stackdump and interrupt status
Stackdump messages are not shown on assertion
Because logm's print buffer can't be flushed after assert.
These messages should be delivered with low output directly. | @@ -39,7 +39,7 @@ int logm_internal(int priority, const char *fmt, va_list ap)
int ret = 0;
buffer_state_t op;
- if (g_logm_isready) {
+ if (g_logm_isready && !up_interrupt_context()) {
flags = irqsave();
if (g_logm_count < LOGM_RSVBUF_COUNT) {
@@ -76,7 +76,7 @@ int logm_internal(int priority, const char *fmt, va_list ap)
irqrestore(flags);
} else {
- /* Low Output: Sytem is not yet completely ready */
+ /* Low Output: Sytem is not yet completely ready or this is called from interrupt handler */
#ifdef CONFIG_ARCH_LOWPUTC
struct lib_outstream_s strm;
lib_lowoutstream(&strm);
|
Like previous, but actually works. | fel
apse:docs
==
+ ++ wrip
+ |* fel/rule
+ %+ cook
+ |= {a/whit b/plan c/whit}
+ ^- plan
+ b
+ ::
+ :: XX performance: this makes the parser about 50% slower.
+ :: because we double-parse most of the spaces in the file.
+ :: just so we can do a postfix doc-comment.
+ ::
+ :: the correct solution to this problem is to unify the
+ :: parsing of docs with the parsing of comments/spaces.
+ :: but at this point we're pretty much in parser rewrite.
+ ::
+ :: it should go without saying that ++vast needs a rewrite.
+ :: it dates to 2011.
+ ::
+ ;~ plug
+ apex:docs
+ fel
+ apse:docs
+ ==
++ tall :: full tall form
%+ knee *hoon
|.(~+((wart (wrap ;~(pose expression:(norm &) long lute apex:(sail &))))))
++ till :: mold tall form
%+ knee *plan
- |.(~+((wert ;~(pose structure:(norm &) scad))))
+ |.(~+((wert (wrip ;~(pose structure:(norm &) scad)))))
++ wede :: wide bulb
;~(pfix ;~(pose lus fas) wide)
++ wide :: full wide form
|
Fix StencilAction; | @@ -159,6 +159,7 @@ StringEntry lovrStackType[] = {
StringEntry lovrStencilAction[] = {
[STENCIL_KEEP] = ENTRY("keep"),
+ [STENCIL_ZERO] = ENTRY("zero"),
[STENCIL_REPLACE] = ENTRY("replace"),
[STENCIL_INCREMENT] = ENTRY("increment"),
[STENCIL_DECREMENT] = ENTRY("decrement"),
|
Fix periodic reading of ZCL attributes | @@ -6939,7 +6939,6 @@ void DeRestPlugin::idleTimerFired()
if (d->idleLimit <= 0)
{
- DBG_Printf(DBG_INFO, "OTA last busy dt %d s\n", d->otauLastBusyTimeDelta());
QTime t = QTime::currentTime();
if (d->queryTime > t)
@@ -7005,17 +7004,11 @@ void DeRestPlugin::idleTimerFired()
if (clusters[i] != 0xffff)
{
const NodeValue &val = lightNode->getZclValue(clusters[i], attrs[i]);
- if (val.updateType == NodeValue::UpdateByZclReport)
- {
- if (val.timestampLastReport.elapsed() < (tRead[i] * 1000))
- {
- // fresh enough
- continue;
- }
- }
- if (val.updateType == NodeValue::UpdateByZclRead)
+
+ if (val.updateType == NodeValue::UpdateByZclRead ||
+ val.updateType == NodeValue::UpdateByZclReport)
{
- if (val.timestampLastReadRequest.elapsed() < (tRead[i] * 1000))
+ if (val.timestamp.isValid() && val.timestamp.elapsed() < (tRead[i] * 1000))
{
// fresh enough
continue;
|
Updated man page no-global-config option. | @@ -444,6 +444,7 @@ Log invalid requests to the specified file.
Do not load the global configuration file. This directory should normally be
/usr/local/etc, unless specified with
.I --sysconfdir=/dir.
+See --dcf option for finding the default configuration file.
.SS
PARSE OPTIONS
.TP
|
removed writing of junk data as not helpful, added checks in freeing of bucket_map and bucket_map->data | @@ -34,7 +34,6 @@ linear_hash_init(
/* open datafile */
linear_hash->database = fopen(data_filename, "w+b");
- fwrite(&(int) { 0xADDE }, sizeof(int), 1, linear_hash->database);
/* initialize linear_hash fields */
linear_hash->initial_size = initial_size;
@@ -650,7 +649,8 @@ write_new_bucket(
/* seek to end of file to append new bucket */
ion_fpos_t bucket_loc;
-// ion_fpos_t record_total_size = linear_hash->super.record.key_size + linear_hash->super.record.value_size + sizeof(ion_byte_t);
+
+ fseek(linear_hash->database, 0, SEEK_SET);
if (idx == 0) {
fseek(linear_hash->database, 0, SEEK_SET);
@@ -688,6 +688,7 @@ linear_hash_get_bucket(
linear_hash_table_t *linear_hash
) {
fseek(linear_hash->database, 0, SEEK_END);
+
/* create a temporary store for records that are read */
linear_hash_bucket_t bucket;
@@ -999,9 +1000,15 @@ ion_err_t
linear_hash_close(
linear_hash_table_t *linear_hash
) {
+ if (linear_hash->bucket_map->data != NULL) {
+ free(linear_hash->bucket_map->data);
+ linear_hash->bucket_map->data = NULL;
+ }
+
+ if (linear_hash->bucket_map != NULL) {
free(linear_hash->bucket_map->data);
- free(linear_hash->bucket_map);
linear_hash->bucket_map = NULL;
+ }
if (0 != fclose(linear_hash->database)) {
return err_file_close_error;
|
Fixes for 2 SIGSEGVs when section is NULL | @@ -787,7 +787,7 @@ static void loadByHash(Console* console, const char* name, const char* hash, con
{
console->active = false;
- LoadByHashData loadByHashData = { console, strdup(name), strdup(section), callback, data};
+ LoadByHashData loadByHashData = { console, strdup(name), section ? strdup(section) : NULL, callback, data};
tic_fs_hashload(console->fs, hash, loadByHashDone, MOVE(loadByHashData));
}
@@ -877,7 +877,7 @@ static void onLoadCommandConfirmed(Console* console)
if (tic_fs_ispubdir(console->fs))
{
- LoadPublicCartData loadPublicCartData = { console, strdup(name), NULL, strdup(section) };
+ LoadPublicCartData loadPublicCartData = { console, strdup(name), NULL, section ? strdup(section) : NULL };
tic_fs_enum(console->fs, compareFilename, fileFound, MOVE(loadPublicCartData));
return;
|
Add missing $CFLAGS | @@ -282,7 +282,7 @@ have_boringssl=no
if test "x${request_boringssl}" != "xno"; then
save_CFLAGS="$CFLAGS"
save_LIBS="$LIBS"
- CFLAGS="$BORINGSSL_CFLAGS"
+ CFLAGS="$BORINGSSL_CFLAGS $CFLAGS"
LIBS="$BORINGSSL_LIBS $LIBS"
AC_MSG_CHECKING([for SSL_set_quic_early_data_context])
|
SWIG: Find package quietly | @@ -128,10 +128,10 @@ endmacro ()
macro (find_swig)
if (NOT SWIG_FOUND)
- find_package (SWIG 3)
+ find_package (SWIG 3 QUIET)
if (NOT SWIG_FOUND)
message (STATUS "Search for swig2 instead")
- find_package (SWIG 2)
+ find_package (SWIG 2 QUIET)
endif ()
endif (NOT SWIG_FOUND)
endmacro (find_swig)
|
tools/pyboard: Run exec: command as a string.
The Python documentation recommends to pass the command as a string when
using Popen(..., shell=True). This is because "sh -c <string>" is used to
execute the command and additional arguments after the command string are
passed to the shell itself (not the executing command). | @@ -152,7 +152,7 @@ class ProcessToSerial:
def __init__(self, cmd):
import subprocess
- self.subp = subprocess.Popen(cmd.split(), bufsize=0, shell=True, preexec_fn=os.setsid,
+ self.subp = subprocess.Popen(cmd, bufsize=0, shell=True, preexec_fn=os.setsid,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Initially was implemented with selectors, but that adds Python3
|
Use -g if using valgrind | @@ -13,6 +13,8 @@ jobs:
citests:
name: CI-Tests
runs-on: ubuntu-latest
+ env:
+ PTLS_CMAKE_OPTS: "-DCMAKE_C_FLAGS=-g -DCMAKE_CXX_FLAGS=-g"
steps:
- name: Checkout repository
@@ -27,6 +29,8 @@ jobs:
sudo apt-get install -y libssl-dev
./ci/build_picotls.sh
cmake .
+ CXXFLAGS="-Wall -Wextra -Wpedantic -Werror -g"
+ CFLAGS="-Wall -Wextra -Wpedantic -Werror -g"
make
- name: Perform Unit Tests
|
If Actor Facing Direction codegen | @@ -8,6 +8,7 @@ import {
tempVariableName,
} from "../helpers/variables";
import {
+ ActorDirection,
ScriptEvent,
Variable,
} from "../../store/features/entities/entitiesTypes";
@@ -908,7 +909,7 @@ class ScriptBuilder {
this._addComment("Actor Push");
this._setConst("ACTOR", 0);
- this._stackPushConst(3);
+ this._stackPushConst(0);
this._actorGetDirection("^/(ACTOR - 1)/", ".ARG0");
this._setConst("^/(ACTOR - 1)/", this.actorIndex);
this._actorGetPosition("^/(ACTOR - 1)/");
@@ -1007,7 +1008,7 @@ class ScriptBuilder {
this._addNL();
};
- actorSetDirection = (direction: string) => {
+ actorSetDirection = (direction: ActorDirection) => {
this._addComment("Actor Set Direction");
this._actorSetDirection("ACTOR", toASMDir(direction));
this._addNL();
@@ -1285,7 +1286,7 @@ class ScriptBuilder {
sceneId: string,
x: number = 0,
y: number = 0,
- direction: string = "down",
+ direction: ActorDirection = "down",
fadeSpeed: number = 2
) => {
this.includeActor = true;
@@ -1917,6 +1918,25 @@ class ScriptBuilder {
this._addNL();
};
+ ifActorDirection = (
+ direction: ActorDirection,
+ truePath = [],
+ falsePath = []
+ ) => {
+ const falseLabel = this.getNextLabel();
+ const endLabel = this.getNextLabel();
+ this._addComment(`If Actor Facing Direction`);
+ this._stackPushConst(0);
+ this._actorGetDirection("^/(ACTOR - 1)/", ".ARG0");
+ this._ifConst(".NE", ".ARG0", toASMDir(direction), falseLabel, 1);
+ this._compilePath(truePath);
+ this._jump(endLabel);
+ this._label(falseLabel);
+ this._compilePath(falsePath);
+ this._label(endLabel);
+ this._addNL();
+ };
+
ifDataSaved = (
slot: number,
truePath: ScriptEvent[] | ScriptBuilderPathFunction = [],
|
Yan LR: Extend list of restrictions | @@ -217,6 +217,16 @@ sudo kdb umount user/tests/yanlr
- Yan LR does not provide write support for data. Please use the [YAML Smith](../yamlsmith/) plugin for that purpose.
+### Input Restrictions
+
+The plugin should, but does not, limit the amount
+
+- of **nesting levels**,
+- the **length of numbers**, and
+- the **length of scalars**
+
+. These restrictions would make it less easy to crash the parser, by feeding it unrestricted data.
+
### Comments
The [lexer](yaml_lexer.cpp) does currently tokenize comments. Consequently the [plugin grammar](YAML.g4) of the plugin does also match comments. However, the [listener](listener.cpp) does currently **ignore comments**.
|
Don't use R when we are solving for LH | @@ -661,8 +661,9 @@ static FLT run_mpfit_find_3d_structure(MPFITData *d, PoserDataLight *pdl, Surviv
mp_result result = {};
+ int nfree = survive_optimizer_get_free_parameters_count(&mpfitctx);
survive_release_ctx_lock(ctx);
- int res = survive_optimizer_run(&mpfitctx, &result, R);
+ int res = survive_optimizer_run(&mpfitctx, &result, nfree == 7 ? R : 0);
survive_get_ctx_lock(ctx);
return handle_optimizer_results(&mpfitctx, res, &result, &user_data, out);
|
Corrected homebridge-install log dir path in delete function | @@ -299,7 +299,7 @@ function checkUpdate {
# delete old Log files
if [ $(ls -1 "${LOG_DIR}" | wc -l) -gt 3 ]; then
oldest=$(ls "${LOG_DIR}" -t | tail -n1)
- rm -f "$oldest"
+ rm -f "${LOG_DIR}/$oldest"
fi
if [[ $AUTO_UPDATE = false ]] || [ -z $AUTO_UPDATE ]; then
|
Tweak how dumbindent handles #preproc lines | @@ -118,7 +118,7 @@ import (
// 'Constants', but their type is []byte, not string.
var (
backTick = []byte("`")
- externC = []byte("extern \"C\" {")
+ extern = []byte("extern ")
namespace = []byte("namespace ")
starSlash = []byte("*/")
@@ -177,6 +177,7 @@ func FormatBytes(dst []byte, src []byte, opts *Options) []byte {
nBraces := 0 // The number of unbalanced '{'s.
nParens := 0 // The number of unbalanced '('s.
hanging := false // Whether the previous non-blank line ends with '=' or '\\'.
+ preproc := false // Whether we're in a #preprocessor line.
for line, remaining := src, []byte(nil); len(src) > 0; src = remaining {
src = trimLeadingWhiteSpace(src)
@@ -200,34 +201,38 @@ func FormatBytes(dst []byte, src []byte, opts *Options) []byte {
nBlankLines = 0
}
- // Preprocessor lines (#ifdef, #pragma, etc) are never indented.
- //
- // Also catch `extern "C" {` and `namespace foo {`.
- if (line[0] == '#') ||
- ((line[0] == 'e') && bytes.HasPrefix(line, externC)) ||
- ((line[0] == 'n') && bytes.HasPrefix(line, namespace)) {
+ // Handle preprocessor lines (#ifdef, #pragma, etc).
+ if preproc || (line[0] == '#') {
+ if preproc {
+ dst = appendRepeatedBytes(dst, indentBytes, indentCount*2)
+ }
line = trimTrailingWhiteSpace(line)
dst = append(dst, line...)
dst = append(dst, '\n')
- hanging = lastNonWhiteSpace(line) == '\\'
+ hanging = false
+ preproc = lastNonWhiteSpace(line) == '\\'
continue
}
- // Account for leading '}'s before we print the line's indentation.
closeBraces := 0
+
+ // Don't indent for `extern "C" {` or `namespace foo {`.
+ if ((line[0] == 'e') && hasPrefixAndBrace(line, extern)) ||
+ ((line[0] == 'n') && hasPrefixAndBrace(line, namespace)) {
+ nBraces--
+
+ } else {
+ // Account for leading '}'s before we print the line's indentation.
for ; (closeBraces < len(line)) && line[closeBraces] == '}'; closeBraces++ {
}
nBraces -= closeBraces
- // The heuristics aren't perfect, and sometimes do not catch braces or
- // parentheses in #define macros. They also don't increment nBraces for
- // `extern "C"` or namespace lines. We work around that here, clamping
- // to zero.
+ // Because the "{" in "extern .*{" and "namespace .*{" is had no
+ // net effect on nBraces, the matching "}" can cause the nBraces
+ // count to dip below zero. Correct for that here.
if nBraces < 0 {
nBraces = 0
}
- if nParens < 0 {
- nParens = 0
}
// Output a certain number of spaces to roughly approximate
@@ -308,6 +313,13 @@ func FormatBytes(dst []byte, src []byte, opts *Options) []byte {
return dst
}
+// hasPrefixAndBrace returns whether line starts with prefix and after that
+// contains a '{'.
+func hasPrefixAndBrace(line []byte, prefix []byte) bool {
+ return bytes.HasPrefix(line, prefix) &&
+ bytes.IndexByte(line[len(prefix):], '{') >= 0
+}
+
// trimLeadingWhiteSpaceAndNewLines converts "\t\n foo bar " to "foo bar ".
func trimLeadingWhiteSpaceAndNewLines(s []byte) []byte {
for (len(s) > 0) && ((s[0] == ' ') || (s[0] == '\t') || (s[0] == '\n')) {
|
[bsp][stm32][unite] input format by bytes | @@ -3,6 +3,9 @@ config BSP_USING_CRC
bool "Enable CRC (CRC-32 0x04C11DB7 Polynomial)"
select RT_USING_HWCRYPTO
select RT_HWCRYPTO_USING_CRC
+ # "Crypto device frame dose not support above 8-bits granularity"
+ # "Reserve progress, running well, about 32-bits granularity, such as stm32f1, stm32f4"
+ depends on (SOC_SERIES_STM32L4 || SOC_SERIES_STM32F0 || SOC_SERIES_STM32F7 || SOC_SERIES_STM32H7)
default n
config BSP_USING_RNG
|
README: add a section on usefulness | @@ -28,14 +28,12 @@ implementation in the relevant header.
## Portability
The code currently requires GCC (or a compiler which implements GCC's
-vector extensions, like clang or icc). It wouldn't be too difficult
-to support other compilers, one would mostly just need to add macros
-for accessing individual elements in the vector types, and define
-vector types in an ifdef to avoid relying on the `vector_size` GNU C
-extension.
+vector extensions, like clang or icc).
-I don't know when, or if, I'll get around to it, but if you're willing
-to work on it patches are welcome.
+I plan to move from GCC's vector extensions to loops annotated with
+OpenMP 4 SIMD pragmas soon (note that this doesn't mean the code will
+require OpenMP; the SIMD pragmas don't have any runtime dependencies,
+they just provide a standard way to annotate code for the compiler).
## Related Projects
@@ -44,6 +42,17 @@ This is very similar to the builtins module in
the future, I may even choose to roll this project into
portable-snippets.
+## Why?
+
+Ideas include:
+
+ * Run code which relies on ISA extensions on CPUs which don't support
+ them without a rewrite.
+ * An easy way to port code to an instruction set your CPU doesn't
+ support without having to run your tests in an emulator.
+ * Understanding how an instruction works when the documentation fails
+ you.
+
## License
To the extent possible under law, the authors have waived all
|
test: allow /. in redirect URLs | /%2f%5c%2f%67%6f%6f%67%6c%65%2e%63%6f%6d/
/%5cexample.com
/%68%74%74%70%3a%2f%2f%67%6f%6f%67%6c%65%2e%63%6f%6d
-/.example.com
//%09/example.com
//%5cexample.com
///%09/example.com
@@ -554,7 +553,6 @@ javascripT://anything%0D%0A%0D%0Awindow.alert(document.cookie)
/%2f%2flocaldomain.pw
/localdomain.pw/%2f%2e%2e
/http:/localdomain.pw
-/.localdomain.pw
http://.localdomain.pw
.localdomain.pw
///\;@localdomain.pw
@@ -831,7 +829,6 @@ javascripT://anything%0D%0A%0D%0Awindow.alert(document.cookie)
/%2f%2fgoogle.com
/google.com/%2f%2e%2e
/http:/google.com
-/.google.com
///\;@google.com
///google.com
/////google.com/
\ No newline at end of file
|
build: fallback to dwarf-4 format because of lesser binary size (dwarf-5 is by default in GCC 11) | @@ -103,6 +103,9 @@ function(__build_set_default_build_specifications)
"-Wextra"
"-Wno-unused-parameter"
"-Wno-sign-compare"
+ # Default is dwarf-5 since GCC 11, fallback to dwarf-4 because of binary size
+ # TODO: IDF-5160
+ "-gdwarf-4"
# always generate debug symbols (even in release mode, these don't
# go into the final binary so have no impact on size
"-ggdb")
|
spec: remove duplicate key in navigation.yaml | @@ -893,7 +893,6 @@ definitions:
id: 0x0204
public: false
short_desc: Velocity in ECEF
- public: False
desc: |
This message reports the velocity in Earth Centered Earth Fixed
(ECEF) coordinates. The full GPS time is given by the preceding
|
remove unnecessary test
also optimize check sum | @@ -419,14 +419,11 @@ static int ssl_tls13_parse_pre_shared_key_ext( mbedtls_ssl_context *ssl,
matched_identity = identity_id;
/* Update handshake parameters */
- if( psk_type == MBEDTLS_SSL_TLS1_3_PSK_EXTERNAL )
- {
ssl->session_negotiate->ciphersuite = cipher_suite;
ssl->handshake->ciphersuite_info = ciphersuite_info;
MBEDTLS_SSL_DEBUG_MSG( 2, ( "overwrite ciphersuite: %04x - %s",
- cipher_suite,
- ciphersuite_info->name ) );
- }
+ cipher_suite, ciphersuite_info->name ) );
+
}
if( p_identity_len != identities_end || p_binder_len != binders_end )
@@ -1454,6 +1451,8 @@ static int ssl_tls13_parse_client_hello( mbedtls_ssl_context *ssl,
if( ret < 0 )
return( ret );
+ mbedtls_ssl_optimize_checksum( ssl, ssl->handshake->ciphersuite_info );
+
return( hrr_required ? SSL_CLIENT_HELLO_HRR_REQUIRED : SSL_CLIENT_HELLO_OK );
}
|
FLIR: Faster UVC streaming. | @@ -346,7 +346,7 @@ void HAL_SPI_RxCpltCallback(SPI_HandleTypeDef *hspi)
}
}
-static int snapshot(sensor_t *sensor, image_t *image, streaming_cb_t cb)
+static int snapshot(sensor_t *sensor, image_t *image, streaming_cb_t streaming_cb)
{
fb_update_jpeg_buffer();
@@ -354,6 +354,10 @@ static int snapshot(sensor_t *sensor, image_t *image, streaming_cb_t cb)
return -1;
}
+ bool frame_ready = false;
+ bool streaming = (streaming_cb != NULL); // Streaming mode.
+
+ do {
// The SPI DMA device is always clocking the FLIR Lepton in the background.
// The code below resets the vospi control values to let data be pulled in.
// If we need to re-sync we do it. Otherwise, after we finish pulling data
@@ -368,7 +372,13 @@ static int snapshot(sensor_t *sensor, image_t *image, streaming_cb_t cb)
if (vospi_resync == true) {
lepton_sync();
}
+ if (frame_ready == true && streaming_cb != NULL) {
+ // Start streaming the frame while a new one is captured.
+ streaming = streaming_cb(image);
+ frame_ready = false;
+ } else {
__WFI();
+ }
} while (vospi_pid < vospi_packets); // only checking one volatile var so atomic.
MAIN_FB()->w = MAIN_FB()->u;
@@ -440,6 +450,8 @@ static int snapshot(sensor_t *sensor, image_t *image, streaming_cb_t cb)
}
}
+ frame_ready = true;
+ } while (streaming && streaming_cb != NULL);
return 0;
}
|
I accidentally push a modified setup.py file, back to original file, sorry for that | @@ -15,9 +15,9 @@ except AttributeError:
_ccllib = Extension(
"_ccllib",
["pyccl/ccl.i",],
- libraries = ['m', 'gsl', 'gslcblas', 'ccl'], #'gomp',
- include_dirs = [numpy_include, "include/", "class/include", "/opt/local/include"],
- extra_compile_args=['-O4', '-std=c99'],
+ libraries = ['m', 'gsl', 'gslcblas', 'gomp', 'ccl'],
+ include_dirs = [numpy_include, "include/", "class/include"],
+ extra_compile_args=['-O4', '-fopenmp', '-std=c99'],
swig_opts=['-threads'],
)
@@ -29,3 +29,4 @@ setup( name = "pyccl",
packages = ['pyccl'],
ext_modules = [_ccllib,],
)
+
|
gall: fix virtualization of agent compilation | |= =term
^- [(each vase tang) _ap-core]
::
- =/ compiled
+ =/ virtual
=/ =type p.running-state.current-agent
=/ =hoon [%limb term]
- (~(mint wa cache.current-agent) type hoon)
- ::
- =/ virtual
- =/ trap |.(compiled)
- (mule trap)
+ %- mule
+ |. (~(mint wa cache.current-agent) type hoon)
::
?: ?=(%.n -.virtual)
=/ =tang p.virtual
|= [=term gat=vase arg=vase]
^- [(each vase tang) _ap-core]
::
- =/ compiled
+ =/ virtual
=/ =type [%cell p.gat p.arg]
=/ =hoon [%cnsg [%$ ~] [%$ 2] [%$ 3] ~]
- (~(mint wa cache.current-agent) type hoon)
- ::
- =/ virtual
- =/ trap |.(compiled)
- (mule trap)
+ %- mule
+ |. (~(mint wa cache.current-agent) type hoon)
::
?: ?=(%.n -.virtual)
=/ =tang (ap-tang "call: {<term>}: type mismatch")
|
doc: update Language Extensions in coding guidelines
update Language Extensions in coding guidelines | @@ -3640,3 +3640,10 @@ b) '__builtin_va_start', refers to section 6.20 in GCC 7.3 Manual.
c) '__builtin_va_end', refers to section 6.20 in GCC 7.3 Manual.
d) '__builtin_offsetof', refers to section 6.51 in GCC 7.3 Manual.
+C-LE-05: Use of extended designated initializers is allowed
+===========================================================
+
+This rule applies to the following designated initializer: writing '[first ...
+last] = value' to initialize a range of elements to the same value, refers to
+section 6.27 in GCC 7.3 Manual.
+
|
posix: set POLLNVAL on bad fd, POLLHUP on msgSend failure in do_poll_iteration | @@ -1766,7 +1766,7 @@ static int do_poll_iteration(struct pollfd *fds, nfds_t nfds)
msg.i.attr.val = fds[i].events;
if (posix_getOpenFile(fds[i].fd, &f) < 0) {
- err = -EBADF;
+ err = POLLNVAL;
}
else {
hal_memcpy(&msg.i.attr.oid, &f->oid, sizeof(oid_t));
@@ -1776,7 +1776,7 @@ static int do_poll_iteration(struct pollfd *fds, nfds_t nfds)
}
if (err < 0)
- fds[i].revents |= POLLNVAL;
+ fds[i].revents |= POLLHUP;
else if (err > 0)
fds[i].revents |= err;
|
[mechanics] expose BulletR::distance() | @@ -59,6 +59,11 @@ public:
_contactPoints = p;
};
+ double distance() const
+ {
+ return _contactDistance;
+ };
+
double y_correction_A() { return _y_correction_A; }
double y_correction_B() { return _y_correction_A; }
double y_correction() { return _y_correction_A + _y_correction_B; }
|
Fix a typo in user_mbuf.c | @@ -506,7 +506,7 @@ mbuf_initialize(void *dummy)
#else
zone_mbuf = umem_cache_create(MBUF_MEM_NAME, MSIZE, 0,
mb_ctor_mbuf, mb_dtor_mbuf, NULL,
- NUULL,
+ NULL,
NULL, 0);
#endif
/*zone_ext_refcnt = umem_cache_create(MBUF_EXTREFCNT_MEM_NAME, sizeof(u_int), 0,
|
Reduce possibility of stale shares for Bitcoin family of pools | @@ -66,9 +66,9 @@ namespace MiningCore.Blockchain.Bitcoin
protected readonly BitcoinExtraNonceProvider extraNonceProvider;
protected readonly IHashAlgorithm sha256d = new Sha256D();
protected readonly IHashAlgorithm sha256dReverse = new DigestReverser(new Sha256D());
-
+ protected const int MaxActiveJobs = 4;
protected readonly IHashAlgorithm sha256s = new Sha256S();
- protected readonly Dictionary<string, TJob> validJobs = new Dictionary<string, TJob>();
+ protected readonly List<TJob> validJobs = new List<TJob>();
protected IHashAlgorithm blockHasher;
protected IHashAlgorithm coinbaseHasher;
protected bool hasSubmitBlockMethod;
@@ -327,7 +327,7 @@ namespace MiningCore.Blockchain.Bitcoin
lock (jobLock)
{
- validJobs.TryGetValue(jobId, out job);
+ job = validJobs.FirstOrDefault(x=> x.JobId == jobId);
}
if (job == null)
@@ -359,10 +359,12 @@ namespace MiningCore.Blockchain.Bitcoin
lock (jobLock)
{
- if(!validJobs.TryGetValue(jobId, out job))
- throw new StratumException(StratumError.JobNotFound, "job not found");
+ job = validJobs.FirstOrDefault(x => x.JobId == jobId);
}
+ if (job == null)
+ throw new StratumException(StratumError.JobNotFound, "job not found");
+
// extract worker/miner/payoutid
var split = workerValue.Split('.');
var minerName = split[0];
@@ -627,15 +629,15 @@ namespace MiningCore.Blockchain.Bitcoin
ShareMultiplier,
coinbaseHasher, headerHasher, blockHasher);
- if (isNew)
- {
- validJobs.Clear();
-
// update stats
+ if (isNew)
BlockchainStats.LastNetworkBlockTime = clock.UtcNow;
- }
- validJobs[currentJob.JobId] = currentJob;
+ validJobs.Add(currentJob);
+
+ // trim active jobs
+ while (validJobs.Count > MaxActiveJobs)
+ validJobs.RemoveAt(0);
}
return isNew;
|
haskell-cmake-improvements: fix mountpoint | @@ -135,7 +135,7 @@ until we have finished writing the whole specification.
kdb get spec/tests/prelude/fallback/#) || \
(sudo kdb umount spec/tests/prelude && \
sudo kdb mount "$PWD/src/plugins/typechecker/typechecker/prelude.ini" spec/tests/prelude ini)
-sudo kdb mount simplespecification.ini spec/tests/simplespecification ini typechecker prelude=spec/tests/prelude
+sudo kdb mount simplespecification.ini spec/tests/typechecker ini typechecker prelude=spec/tests/prelude
echo 'kdbGet spec/tests/typechecker \
keySetName spec/tests/typechecker/key1 \
@@ -165,7 +165,7 @@ kdb setmeta spec/tests/typechecker/key2 fallback/#1 spec/tests/typechecker/key1
# RET: 5
# STDERR-REGEX: .*Couldn't match type.*
-sudo kdb umount spec/tests/simplespecification
+sudo kdb umount spec/tests/typechecker
sudo kdb umount spec/tests/prelude
```
|
neon/qrdmulh_lane: fix typo in undefs
Fixes | @@ -61,7 +61,7 @@ SIMDE_BEGIN_DECLS_
#define simde_vqrdmulhq_lane_s16(a, v, lane) simde_vqrdmulhq_s16((a), simde_vdupq_lane_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
- #undef vqrdmulhq_s16
+ #undef vqrdmulhq_lane_s16
#define vqrdmulhq_lane_s16(a, v, lane) simde_vqrdmulhq_lane_s16((a), (v), (lane))
#endif
@@ -71,7 +71,7 @@ SIMDE_BEGIN_DECLS_
#define simde_vqrdmulhq_lane_s32(a, v, lane) simde_vqrdmulhq_s32((a), simde_vdupq_lane_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_ENABLE_NATIVE_ALIASES)
- #undef vqrdmulhq_s32
+ #undef vqrdmulhq_lane_s32
#define vqrdmulhq_lane_s32(a, v, lane) simde_vqrdmulhq_lane_s32((a), (v), (lane))
#endif
@@ -101,7 +101,7 @@ SIMDE_BEGIN_DECLS_
#define simde_vqrdmulhq_laneq_s16(a, v, lane) simde_vqrdmulhq_s16((a), simde_vdupq_laneq_s16((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
- #undef vqrdmulhq_s16
+ #undef vqrdmulhq_laneq_s16
#define vqrdmulhq_laneq_s16(a, v, lane) simde_vqrdmulhq_laneq_s16((a), (v), (lane))
#endif
@@ -111,7 +111,7 @@ SIMDE_BEGIN_DECLS_
#define simde_vqrdmulhq_laneq_s32(a, v, lane) simde_vqrdmulhq_s32((a), simde_vdupq_laneq_s32((v), (lane)))
#endif
#if defined(SIMDE_ARM_NEON_A64V8_ENABLE_NATIVE_ALIASES)
- #undef vqrdmulhq_s32
+ #undef vqrdmulhq_laneq_s32
#define vqrdmulhq_laneq_s32(a, v, lane) simde_vqrdmulhq_laneq_s32((a), (v), (lane))
#endif
|
worklow/show_mnemonic: use workflow_blocking | // See the License for the specific language governing permissions and
// limitations under the License.
+#include "show_mnemonic.h"
+
+#include "blocking.h"
+#include "password.h"
+#include "status.h"
+#include "workflow.h"
+
#include <hardfault.h>
#include <random.h>
#include <ui/components/ui_components.h>
#include <ui/screen_process.h>
#include <ui/screen_stack.h>
#include <util.h>
-#include <workflow/password.h>
-#include <workflow/show_mnemonic.h>
-
-#include "workflow.h"
#define BIP39_NUM_WORDS 24
#define MAX_WORDLENGTH 20
@@ -47,12 +50,6 @@ static void _check_word(uint8_t selection);
static const char* _back_label = "Back to seed phrase";
-static bool _done = false;
-static bool _is_done(void)
-{
- return _done;
-}
-
static void _split_and_save_wordlist(uint8_t* length)
{
char* next_word = strtok(_mnemonic, " ");
@@ -132,11 +129,9 @@ static void _confirm_mnemonic(void)
// If we're at the last screen, we pop the check-word screen for the last word and replace
// the underlying BIP39 seed phrase screen. We can only replace it now, because during
// word-checking, the user might still want to go back to the seed phrase.
- ui_screen_stack_pop();
- ui_screen_stack_switch(status_create("Success", true, -1, NULL));
util_zero(_mnemonic, _mnemonic_length);
free(_mnemonic);
- _done = true;
+ workflow_blocking_unblock();
return;
}
_current_correct_idx = _create_random_unique_words(
@@ -174,10 +169,10 @@ bool workflow_show_mnemonic_create(void)
_mnemonic_length = strlens(_mnemonic);
uint8_t length;
_split_and_save_wordlist(&length);
- component_t* show_mnemonic =
- scroll_through_all_variants_create(_wordlist, NULL, length, true, _confirm_mnemonic, NULL);
- ui_screen_stack_switch(show_mnemonic);
- _done = false;
- ui_screen_process(_is_done);
- return true;
+ ui_screen_stack_push(
+ scroll_through_all_variants_create(_wordlist, NULL, length, true, _confirm_mnemonic, NULL));
+ bool unblock_result = workflow_blocking_block();
+ ui_screen_stack_pop();
+ workflow_status_create("Success", true);
+ return unblock_result;
}
|
blocklevel: smart_write: Deny writes intersecting ECC protected regions
Other code paths don't handle writes spanning mixed regions, and it's a
headache, so deny it here too. | @@ -512,6 +512,7 @@ int blocklevel_smart_write(struct blocklevel_device *bl, uint64_t pos, const voi
uint64_t ecc_start;
void *erase_buf;
int rc = 0;
+ int ecc_protection;
if (!buf || !bl) {
errno = EINVAL;
@@ -529,7 +530,14 @@ int blocklevel_smart_write(struct blocklevel_device *bl, uint64_t pos, const voi
if (rc)
return rc;
- if (ecc_protected(bl, pos, len, &ecc_start)) {
+ ecc_protection = ecc_protected(bl, pos, len, &ecc_start);
+ if (ecc_protection == -1) {
+ FL_ERR("%s: Can't cope with partial ecc\n", __func__);
+ errno = EINVAL;
+ return FLASH_ERR_PARM_ERROR;
+ }
+
+ if (ecc_protection) {
FL_DBG("%s: region has ECC\n", __func__);
len = ecc_buffer_size(len);
|
Disable check_handle_against_spec | @@ -400,11 +400,12 @@ static void print_values(grib_context* c, const grib_util_grid_spec2* spec,
#endif
}
+/*
static int DBL_EQUAL(double d1, double d2, double tolerance)
{
return fabs(d1-d2) < tolerance;
}
-
+*/
/* Returns a boolean: 1 if angle can be encoded, 0 otherwise */
static int grib1_angle_can_be_encoded(const double angle)
{
@@ -418,7 +419,7 @@ static int grib1_angle_can_be_encoded(const double angle)
}
/* Returns a boolean: 1 if angle is too small, 0 otherwise */
-static int angle_too_small(const double angle, const double angular_precision)
+/*static int angle_too_small(const double angle, const double angular_precision)
{
const double a = fabs(angle);
if (a > 0 && a < angular_precision) return 1;
@@ -430,8 +431,9 @@ static double normalise_angle(double angle)
while (angle<0) angle += 360;
while (angle>360) angle -= 360;
return angle;
-}
+}*/
+#if 0
/* Check what is coded in the handle is what is requested by the spec. */
/* Return GRIB_SUCCESS if the geometry matches, otherwise the error code */
static int check_handle_against_spec(grib_handle* handle, const long edition,
@@ -557,6 +559,7 @@ static int check_handle_against_spec(grib_handle* handle, const long edition,
}
return GRIB_SUCCESS;
}
+#endif
static const char* get_grid_type_name(const int spec_grid_type)
{
@@ -1211,8 +1214,10 @@ grib_handle* grib_util_set_spec2(grib_handle* h,
}
}
- if (h->context->debug==-1)
+ if (h->context->debug==-1) {
+ printf("ECCODES DEBUG grib_util: global_grid = %d\n", global_grid);
print_values(h->context,spec,data_values,data_values_count,values,count);
+ }
if((*err = grib_set_values(outh,values,count)) != 0)
{
@@ -1341,9 +1346,10 @@ grib_handle* grib_util_set_spec2(grib_handle* h,
}
/* Disable check: need to re-examine GRIB-864 */
+#if 0
if ( (*err = check_handle_against_spec(outh, editionNumber, spec, global_grid)) != GRIB_SUCCESS)
{
-#if 0
+
grib_context* c=grib_context_get_default();
fprintf(stderr,"GRIB_UTIL_SET_SPEC: Geometry check failed! %s\n", grib_get_error_message(*err));
if (editionNumber == 1) {
@@ -1352,9 +1358,8 @@ grib_handle* grib_util_set_spec2(grib_handle* h,
if (c->write_on_fail)
grib_write_message(outh,"error.grib","w");
goto cleanup;
-#endif
}
-
+#endif
if (h->context->debug==-1)
printf("ECCODES DEBUG: grib_util_set_spec end\n");
|
updated mo depth | @@ -427,7 +427,7 @@ void InitializeOptionsMagmOc(OPTIONS *options,fnReadOption fnRead[]) {
options[OPT_WATERMASSATM].iType = 2;
options[OPT_WATERMASSATM].bMultiFile = 1;
options[OPT_WATERMASSATM].dNeg = TOMASS; // for input: factor to mulitply for SI - for output: divide (e.g. 1/TOMASS)
- options[OPT_WATERMASSATM].dDefault = 1;
+ options[OPT_WATERMASSATM].dDefault = TOMASS;
sprintf(options[OPT_WATERMASSATM].cNeg,"Terrestrial Oceans");
fnRead[OPT_WATERMASSATM] = &ReadWaterMassAtm;
@@ -487,7 +487,7 @@ void InitializeOptionsMagmOc(OPTIONS *options,fnReadOption fnRead[]) {
options[OPT_DEPTHMO].iType = 2;
options[OPT_DEPTHMO].bMultiFile = 1;
options[OPT_DEPTHMO].dNeg = 1e3;
- options[OPT_DEPTHMO].dDefault = 1e6;
+ options[OPT_DEPTHMO].dDefault = 1e9;
sprintf(options[OPT_DEPTHMO].cNeg,"km");
fnRead[OPT_DEPTHMO] = &ReadDepthMO;
@@ -580,14 +580,14 @@ void InitializeBodyMagmOc(BODY *body,CONTROL *control,UPDATE *update,int iBody,i
dSolidRadiusLocalHigh = body[iBody].dRadius - ( (BHIGHPRESSURE-body[iBody].dPotTemp) / (body[iBody].dGravAccelSurf*(body[iBody].dPotTemp*THERMALEXPANCOEFF/SILICATEHEATCAP - AHIGHPRESSURE*body[iBody].dManMeltDensity)));
body[iBody].dSolidRadius = fmin(dSolidRadiusLocalLow,dSolidRadiusLocalHigh);
+ if (body[iBody].dDepthMO < 9e8) {
+ body[iBody].dSolidRadius = body[iBody].dRadius - body[iBody].dDepthMO;
+ }
+
if (body[iBody].dSolidRadius < body[iBody].dCoreRadius) {
body[iBody].dSolidRadius = body[iBody].dCoreRadius;
}
- // if (body[iBody].dDepthMO < 9e8) {
- // body[iBody].dSolidRadius = body[iBody].dRadius - body[iBody].dDepthMO;
- // }
-
// other variables
double dTransPressSol = 5.19964e9; // pressure at which to swith from low to high pressure treatment of solidus (Hirschmann, 2000) in Pa
body[iBody].dPrefactorA = AHIGHPRESSURE;
|
doc: Fix description of pg_replication_origin_oid() in error case
This function returns NULL if the replication origin given in input
argument does not exist, contrary to what the docs described
previously.
Author: Ian Barwick
Discussion:
Backpatch-through: 10 | @@ -21095,8 +21095,9 @@ postgres=# SELECT * FROM pg_walfile_name_offset(pg_stop_backup());
<type>oid</type>
</entry>
<entry>
- Lookup a replication origin by name and return the internal id. If no
- corresponding replication origin is found an error is thrown.
+ Lookup a replication origin by name and return the internal ID. If
+ no such replication origin is found, <literal>NULL</literal> is
+ returned.
</entry>
</row>
|
Lazy way to report errors while parsing assembly report | @@ -87,6 +87,7 @@ namespace ebi
size_t const default_line_buffer_size = 64 * 1024;
size_t const assembly_report_column_count = 10;
report_line.reserve(default_line_buffer_size);
+ std::vector<std::string> assembly_report_errors; // to contain errors while parsing
int contig_index = 0, line_num = -1;
while (util::readline(report, report_line).size() != 0) {
@@ -100,11 +101,9 @@ namespace ebi
boost::algorithm::trim(line);
util::string_split(line, "\t", columns);
if(columns.size() != assembly_report_column_count) {
- // todo : think some lazy way of it. process whole file before throwing the error
- throw std::runtime_error("Error while parsing assembly report on line num : "
- + std::to_string(line_num) + ", "
- + " found " + std::to_string(columns.size()) + " columns, "
- + " expected " + std::to_string(assembly_report_column_count));
+ std::string error = "Expected " + std::to_string(assembly_report_column_count)
+ + " columns, found " + std::to_string(columns.size()) + "\n";
+ assembly_report_errors.push_back(error);
}
auto contig_synonyms = extract_synonyms(columns);
@@ -115,6 +114,14 @@ namespace ebi
contig_index++;
}
+
+ if (!assembly_report_errors.empty()) {
+ std::string error_report = "Some errors occurred while parsing assembly report file \n";
+ for(auto error : assembly_report_errors) {
+ error_report += error;
+ }
+ throw std::runtime_error(error_report);
+ }
}
};
}
|
crypto-native: fix ghash function naming
Type: refactor | @@ -124,13 +124,13 @@ gmul_lo_lo (u8x16 a, u8x16 b)
}
static_always_inline u8x16
-gmul_lo_hi (u8x16 a, u8x16 b)
+gmul_hi_lo (u8x16 a, u8x16 b)
{
return (u8x16) _mm_clmulepi64_si128 ((__m128i) a, (__m128i) b, 0x01);
}
static_always_inline u8x16
-gmul_hi_lo (u8x16 a, u8x16 b)
+gmul_lo_hi (u8x16 a, u8x16 b)
{
return (u8x16) _mm_clmulepi64_si128 ((__m128i) a, (__m128i) b, 0x10);
}
@@ -165,7 +165,7 @@ ghash_mul_first (ghash_data_t * gd, u8x16 a, u8x16 b)
/* a0 * b0 */
gd->lo = gmul_lo_lo (a, b);
/* a0 * b1 ^ a1 * b0 */
- gd->mid = (gmul_lo_hi (a, b) ^ gmul_hi_lo (a, b));
+ gd->mid = (gmul_hi_lo (a, b) ^ gmul_lo_hi (a, b));
/* set gd->pending to 0 so next invocation of ghash_mul_next(...) knows that
there is no pending data in tmp_lo and tmp_hi */
@@ -198,7 +198,7 @@ ghash_mul_next (ghash_data_t * gd, u8x16 a, u8x16 b)
}
/* gd->mid ^= a0 * b1 ^ a1 * b0 */
- gd->mid = ghash_xor3 (gd->mid, gmul_lo_hi (a, b), gmul_hi_lo (a, b));
+ gd->mid = ghash_xor3 (gd->mid, gmul_hi_lo (a, b), gmul_lo_hi (a, b));
}
static_always_inline void
@@ -223,7 +223,7 @@ ghash_reduce (ghash_data_t * gd)
gd->hi ^= midr;
}
- r = gmul_lo_hi (ghash_poly2, gd->lo);
+ r = gmul_hi_lo (ghash_poly2, gd->lo);
gd->lo ^= u8x16_word_shift_left (r, 8);
}
@@ -231,7 +231,7 @@ static_always_inline void
ghash_reduce2 (ghash_data_t * gd)
{
gd->tmp_lo = gmul_lo_lo (ghash_poly2, gd->lo);
- gd->tmp_hi = gmul_hi_lo (ghash_poly2, gd->lo);
+ gd->tmp_hi = gmul_lo_hi (ghash_poly2, gd->lo);
}
static_always_inline u8x16
|
Check if ARRAY_SIZE is already defined
Check if ARRAY_SIZE is already defined before defining in the OS | ((type *)(((char *)(ptr)) - offsetof(type, field)))
/* Helper to calculate number of elements in array */
+#ifndef ARRAY_SIZE
#define ARRAY_SIZE(array) \
(sizeof(array) / sizeof((array)[0]))
-
+#endif
#endif
|
Have that the RAC+Zlib Leaf TTag must be 0xFF | @@ -468,8 +468,9 @@ is stored big-endian, like Zlib's other checksums, and its 4 byte value must
match the `DICTID` (in RFC 1950 terminology) given in the `Primary CRange`'s
Zlib-formatted data.
-The `Tertiary CRange` and `Leaf TTag` values are ignored. The `Leaf STag` value
-is also ignored, other than deriving the `Secondary CRange`.
+The `Leaf TTag` must be `0xFF`. All other `Leaf TTag` values (below `0xC0`) are
+reserved. The empty `Tertiary CRange` is ignored. The `Leaf STag` value is also
+ignored, other than deriving the `Secondary CRange`.
## RAC + Brotli
|
Remove obsolete server variables
Removed in | @@ -47,11 +47,9 @@ main(int argc, /* I - Number of command-line args */
"ErrorLog",
"FatalErrors",
"FileDevice",
- "FontPath",
"Group",
"Listen",
"LogFilePerm",
- "LPDConfigFile",
"PageLog",
"PassEnv",
"Port",
@@ -65,7 +63,6 @@ main(int argc, /* I - Number of command-line args */
"ServerKeychain",
"ServerRoot",
"SetEnv",
- "SMBConfigFile",
"StateDir",
"SystemGroup",
"SystemGroupAuthKey",
|
More TLSv1.3 cookie tests
Test sending a cookie without a key_share | @@ -28,6 +28,11 @@ plan skip_all => "$test_name needs TLS1.3 enabled"
$ENV{OPENSSL_ia32cap} = '~0x200000200000000';
+use constant {
+ COOKIE_ONLY => 0,
+ COOKIE_AND_KEY_SHARE => 1
+};
+
my $proxy = TLSProxy::Proxy->new(
undef,
cmdstr(app(["openssl"]), display => 1),
@@ -36,22 +41,31 @@ my $proxy = TLSProxy::Proxy->new(
);
my $cookieseen = 0;
+my $testtype;
#Test 1: Inserting a cookie into an HRR should see it echoed in the ClientHello
+$testtype = COOKIE_ONLY;
$proxy->filter(\&cookie_filter);
-$proxy->serverflags("-curves P-256");
+$proxy->serverflags("-curves X25519");
$proxy->start() or plan skip_all => "Unable to start up Proxy for tests";
-plan tests => 1;
+plan tests => 2;
+ok(TLSProxy::Message->success() && $cookieseen == 1, "Cookie seen");
+
+#Test 2: Same as test 1 but should also work where a new key_share is also
+# required
+$testtype = COOKIE_AND_KEY_SHARE;
+$proxy->clear();
+$proxy->clientflags("-curves P-256:X25519");
+$proxy->serverflags("-curves X25519");
+$proxy->start();
ok(TLSProxy::Message->success() && $cookieseen == 1, "Cookie seen");
sub cookie_filter
{
my $proxy = shift;
- # We're only interested in the HRR and subsequent ClientHello
- if ($proxy->flight != 1 && $proxy->flight != 2) {
- return;
- }
+ # We're only interested in the HRR and both ClientHellos
+ return if ($proxy->flight > 2);
my $ext = pack "C8",
0x00, 0x06, #Cookie Length
@@ -60,12 +74,27 @@ sub cookie_filter
0x04, 0x05;
foreach my $message (@{$proxy->message_list}) {
- if ($message->mt == TLSProxy::Message::MT_HELLO_RETRY_REQUEST) {
-
+ if ($message->mt == TLSProxy::Message::MT_HELLO_RETRY_REQUEST
+ && ${$message->records}[0]->flight == 1) {
+ $message->delete_extension(TLSProxy::Message::EXT_KEY_SHARE)
+ if ($testtype == COOKIE_ONLY);
$message->set_extension(TLSProxy::Message::EXT_COOKIE, $ext);
$message->repack();
- } elsif ($message->mt == TLSProxy::Message::MT_CLIENT_HELLO
- && ${$message->records}[0]->flight == 2) {
+ } elsif ($message->mt == TLSProxy::Message::MT_CLIENT_HELLO) {
+ if (${$message->records}[0]->flight == 0) {
+ if ($testtype == COOKIE_ONLY) {
+ my $ext = pack "C7",
+ 0x00, 0x05, #List Length
+ 0x00, 0x17, #P-256
+ 0x00, 0x01, #key_exchange data length
+ 0xff; #Dummy key_share data
+ # Trick the server into thinking we got an unacceptable
+ # key_share
+ $message->set_extension(
+ TLSProxy::Message::EXT_KEY_SHARE, $ext);
+ $message->repack();
+ }
+ } else {
#cmp can behave differently dependent on locale
no locale;
my $cookie =
@@ -79,3 +108,4 @@ sub cookie_filter
}
}
}
+}
|
spi: fix spi eeprom example build
Fix GNU make build error introduced in | # (Uses default behaviour of compiling all source files in directory, adding 'include' to include path.)
COMPONENT_ADD_LDFRAGMENTS += linker.lf
+COMPONENT_ADD_INCLUDEDIRS := .
|
common/power_button.c: Format with clang-format
BRANCH=none
TEST=none | @@ -41,8 +41,11 @@ static const struct button_config power_button = {
int power_button_signal_asserted(void)
{
- return !!(gpio_get_level(power_button.gpio)
- == (power_button.flags & BUTTON_FLAG_ACTIVE_HIGH) ? 1 : 0);
+ return !!(
+ gpio_get_level(power_button.gpio) ==
+ (power_button.flags & BUTTON_FLAG_ACTIVE_HIGH) ?
+ 1 :
+ 0);
}
/**
@@ -93,8 +96,8 @@ int power_button_wait_for_release(int timeout_us)
* the power button is debounced but not changed, or the power
* button has not been debounced.
*/
- task_wait_event(MIN(power_button.debounce_us,
- deadline.val - now.val));
+ task_wait_event(
+ MIN(power_button.debounce_us, deadline.val - now.val));
}
CPRINTS("%s released in time", power_button.name);
@@ -164,8 +167,8 @@ static void power_button_change_deferred(void)
debounced_power_pressed = new_pressed;
power_button_is_stable = 1;
- CPRINTS("%s %s",
- power_button.name, new_pressed ? "pressed" : "released");
+ CPRINTS("%s %s", power_button.name,
+ new_pressed ? "pressed" : "released");
/* Call hooks */
hook_notify(HOOK_POWER_BUTTON_CHANGE);
@@ -221,6 +224,5 @@ static int command_powerbtn(int argc, char **argv)
return EC_SUCCESS;
}
-DECLARE_CONSOLE_COMMAND(powerbtn, command_powerbtn,
- "[msec]",
+DECLARE_CONSOLE_COMMAND(powerbtn, command_powerbtn, "[msec]",
"Simulate power button press");
|
Mention hslua-classes in README | @@ -43,11 +43,14 @@ Below are the packages which make up HsLua:
making the relevant types instances of QuickCheck's Arbitrary
typeclass.
- - **hslua-core**: Wrappers and types which make working with Lua
+ - **hslua-core**: Wrappers and types that make working with Lua
less C-like and more idiomatic -- from a Haskell point of
view.
- - **tasty-hslua**: Helper functions for writing tasty tests to
- check Lua operations.
+ - **hslua-classes**: Type classes that can make interfacing with
+ Lua more convenient.
- **hslua**: Additional helpers and convenience mechanisms.
+
+ - **tasty-hslua**: Helper functions for writing tasty tests to
+ check Lua operations.
|
db-path, default db folder extended with PID | @@ -687,8 +687,8 @@ Only if configured with --enable-tcb=btree
.TP
\fB\-\-db-path=<dir>
Path where the on-disk database files are stored. The default value is the
-.I /tmp
-directory.
+.I /tmp/goaccess<PID>
+directory (created on-demand).
Only if configured with --enable-tcb=btree
.TP
|
out_file: add complementary 'path' and 'file' options | #endif
struct flb_file_conf {
+ const char *out_path;
const char *out_file;
const char *delimiter;
const char *label_delimiter;
@@ -315,7 +316,7 @@ static void cb_file_flush(const void *data, size_t bytes,
size_t last_off = 0;
size_t alloc_size = 0;
size_t total;
- const char *out_file;
+ char out_file[PATH_MAX];
char *buf;
char *tag_buf;
msgpack_object *obj;
@@ -324,12 +325,24 @@ static void cb_file_flush(const void *data, size_t bytes,
(void) i_ins;
(void) config;
- /* Set the right output */
- if (!ctx->out_file) {
- out_file = tag;
+ /* Set the right output file */
+ if (ctx->out_path) {
+ if (ctx->out_file) {
+ snprintf(out_file, PATH_MAX - 1, "%s/%s",
+ ctx->out_path, ctx->out_file);
+ }
+ else {
+ snprintf(out_file, PATH_MAX - 1, "%s/%s",
+ ctx->out_path, tag);
+ }
+ }
+ else {
+ if (ctx->out_file) {
+ snprintf(out_file, PATH_MAX - 1, "%s", ctx->out_file);
}
else {
- out_file = ctx->out_file;
+ snprintf(out_file, PATH_MAX - 1, "%s", tag);
+ }
}
/* Open output file with default name as the Tag */
@@ -438,6 +451,11 @@ static int cb_file_exit(void *data, struct flb_config *config)
static struct flb_config_map config_map[] = {
{
FLB_CONFIG_MAP_STR, "path", NULL,
+ 0, FLB_TRUE, offsetof(struct flb_file_conf, out_path),
+ NULL
+ },
+ {
+ FLB_CONFIG_MAP_STR, "file", NULL,
0, FLB_TRUE, offsetof(struct flb_file_conf, out_file),
NULL
},
|
Import Haskell functions instead of importing via FFI
The Raw.Error module used FFI bindings to access some functions to avoid
circular module dependencies. This is no longer a concern, so we can use
the Haskell bindings without having to FFI import the raw functions
again. | @@ -16,10 +16,11 @@ module Foreign.Lua.Raw.Error
) where
import Data.ByteString (ByteString)
-import Foreign.C (CChar, CInt (CInt), CSize (..))
-import Foreign.Lua.Raw.Types (Lua, StackIndex)
+import Foreign.Lua.Raw.Auxiliary (hsluaL_tolstring)
+import Foreign.Lua.Raw.Functions (lua_pop, lua_pushlstring)
+import Foreign.Lua.Raw.Types (Lua)
import Foreign.Marshal.Alloc (alloca)
-import Foreign.Ptr (Ptr, nullPtr)
+import Foreign.Ptr (nullPtr)
import qualified Data.ByteString as B
import qualified Data.ByteString.Char8 as Char8
@@ -43,12 +44,6 @@ errorMessage l = alloca $ \lenPtr -> do
lua_pop l 2
return msg
-foreign import ccall safe "error-conversion.h hsluaL_tolstring"
- hsluaL_tolstring :: Lua.State -> StackIndex -> Ptr CSize -> IO (Ptr CChar)
-
-foreign import capi unsafe "lua.h lua_pop"
- lua_pop :: Lua.State -> CInt -> IO ()
-
-- | Helper function which uses proper error-handling to throw an
-- exception with the given message.
throwMessage :: String -> Lua a
@@ -57,6 +52,3 @@ throwMessage msg = do
B.unsafeUseAsCStringLen (Utf8.fromString msg) $ \(msgPtr, z) ->
lua_pushlstring l msgPtr (fromIntegral z)
Lua.errorConversion >>= Lua.liftLua . Lua.errorToException
-
-foreign import capi unsafe "lua.h lua_pushlstring"
- lua_pushlstring :: Lua.State -> Ptr CChar -> CSize -> IO ()
|
fix example which is not intended to write to flash, but actually was | @@ -453,6 +453,7 @@ Sets the WiFi station configuration.
station_cfg={}
station_cfg.ssid="NODE-AABBCC"
station_cfg.pwd="password"
+station_cfg.save=false
wifi.sta.config(station_cfg)
--connect to Access Point (DO save config to flash)
@@ -462,14 +463,14 @@ station_cfg.pwd="password"
station_cfg.save=true
wifi.sta.config(station_cfg)
---connect to Access Point with specific MAC address
+--connect to Access Point with specific MAC address (DO save config to flash)
station_cfg={}
station_cfg.ssid="NODE-AABBCC"
station_cfg.pwd="password"
station_cfg.bssid="AA:BB:CC:DD:EE:FF"
wifi.sta.config(station_cfg)
---configure station but don't connect to Access point
+--configure station but don't connect to Access point (DO save config to flash)
station_cfg={}
station_cfg.ssid="NODE-AABBCC"
station_cfg.pwd="password"
|
Build: Fix macbuild.tool not removing files from previous archive | @@ -63,7 +63,7 @@ package() {
cp -r "${selfdir}/UDK/OcSupportPkg/Utilities/Recovery" tmp/Utilities/ || exit 1
cp -r "${selfdir}/UDK/OcSupportPkg/Utilities/BootInstall" tmp/Utilities/ || exit 1
pushd tmp || exit 1
- zip -qry ../"OpenCore-v${ver}-${2}.zip" * || exit 1
+ zip -qry -FS ../"OpenCore-v${ver}-${2}.zip" * || exit 1
popd || exit 1
rm -rf tmp || exit 1
popd || exit 1
|
Fix rgb10a2 constant; | @@ -150,7 +150,7 @@ const char* TextureFormats[] = {
[FORMAT_RG16F] = "rg16f",
[FORMAT_RG32F] = "rg32f",
[FORMAT_RGB5A1] = "rgb5a1",
- [FORMAT_RGB10A2] = "rgb5a2",
+ [FORMAT_RGB10A2] = "rgb10a2",
[FORMAT_RG11B10F] = "rg11b10f",
[FORMAT_D16] = "d16",
[FORMAT_D32F] = "d32f",
|
[CHAIN] minor logging for TODO | @@ -73,6 +73,8 @@ func (core *Core) init(dbType string, dataDir string, testModeOn bool, forceRese
return err
}
+ // TODO recovery for crashed chain mapping
+
if forceResetHeight > 0 {
if err := core.cdb.ResetBest(forceResetHeight); err != nil {
logger.Fatal().Err(err).Uint64("height", forceResetHeight).Msg("failed to reset chaindb")
|
publish: join.js proper catch and UI transition | @@ -3,19 +3,33 @@ import classnames from 'classnames';
import { Route, Link } from 'react-router-dom';
import urbitOb from 'urbit-ob';
-//TODO textarea + join button to make an api call
export class JoinScreen extends Component {
constructor(props) {
super(props);
this.state = {
book: '/',
- error: false
+ error: false,
+ awaiting: null
};
this.bookChange = this.bookChange.bind(this);
}
+ componentDidUpdate() {
+ if (this.props.notebooks) {
+ if (this.state.awaiting) {
+ let book = this.state.awaiting.split("/");
+ let ship = book[0];
+ let notebook = book[1];
+ if ((ship in this.props.notebooks) &&
+ (notebook in this.props.notebooks[ship])) {
+ this.props.history.push(`/~publish/notebook/${ship}/${notebook}`)
+ }
+ }
+ }
+ }
+
notebooksInclude(text, notebookObj) {
let verdict = false;
let keyPair = [];
@@ -40,16 +54,17 @@ export class JoinScreen extends Component {
let text = state.book;
// an error condition to prevent double joins?
- if (this.notebooksInclude(state.book,props.notebooks) ||
- text.length === 0) {
- props.history.push('/~publish');
- }
let book = text.split('/');
let ship = book[0];
book.splice(0, 1);
book = '/' + book.join('/');
+ if (this.notebooksInclude(state.book, props.notebooks)) {
+ let href = `/~publish/notebook/${ship}${book}`
+ return props.history.push(href);
+ }
+
if (book.length < 2 || !urbitOb.isValidPatp(ship)) {
this.setState({
error: true,
@@ -65,7 +80,13 @@ export class JoinScreen extends Component {
}
// TODO: askHistory setting
- window.api.action("publish","publish-action", actionData);
+ window.api.setSpinner(true);
+ window.api.action("publish","publish-action", actionData).catch((err) => {
+ console.log(err)
+ }).then(() => {
+ this.setState({awaiting: text})
+ window.api.setSpinner(false);
+ });
}
|
framework/arastorage: refine exceptional protection of attribute add
in function relation_attribute_add, modify ouput para rel after
call storage_put_attribute successfully. otherwise need to rollback
the modification of rel if put attribute failed. | @@ -356,11 +356,6 @@ attribute_t *relation_attribute_add(relation_t *rel, db_direction_t dir, char *n
attribute->index = NULL;
attribute->flags = 0 /*ATTRIBUTE_FLAG_UNIQUE */ ;
- rel->row_length += element_size;
-
- list_add(rel->attributes, attribute);
- rel->attribute_count++;
-
if (dir == DB_STORAGE) {
if (DB_ERROR(storage_put_attribute(rel, attribute))) {
DB_LOG_E("DB: Failed to store attribute %s\n", attribute->name);
@@ -369,6 +364,10 @@ attribute_t *relation_attribute_add(relation_t *rel, db_direction_t dir, char *n
}
}
+ rel->row_length += element_size;
+ list_add(rel->attributes, attribute);
+ rel->attribute_count++;
+
return attribute;
}
|
CCode: Mark problematic code reported by OCLint | @@ -58,8 +58,7 @@ CppKeySet percentConfig ()
void testRoundTrip (string const decodedString, string const encodedString = "", CppKeySet config = defaultConfig ())
#ifdef __llvm__
- __attribute__ ((annotate ("oclint:suppress[empty if statement]"), annotate ("oclint:suppress[high cyclomatic complexity]"),
- annotate ("oclint:suppress[high ncss method]"), annotate ("oclint:suppress[too few branches in switch statement]")))
+ __attribute__ ((annotate ("oclint:suppress[high cyclomatic complexity]"), annotate ("oclint:suppress[high ncss method]")))
#endif
{
CppKeySet modules{ 0, KS_END };
@@ -67,22 +66,24 @@ void testRoundTrip (string const decodedString, string const encodedString = "",
CppKey parent{ "system/elektra/modules/type", KEY_END };
Plugin * plugin = elektraPluginOpen ("ccode", modules.getKeySet (), config.getKeySet (), *parent);
- exit_if_fail (plugin != NULL, "Could not open ccode plugin");
+ exit_if_fail (plugin != NULL, "Could not open ccode plugin"); //! OCLint (empty if, too few branches switch)
CppKeySet keys{ 20, keyNew ("user/tests/ccode/key", KEY_VALUE, decodedString.c_str (), KEY_END), KS_END };
- succeed_if_same (plugin->kdbSet (plugin, keys.getKeySet (), *parent), ELEKTRA_PLUGIN_STATUS_SUCCESS,
- "Call of `kdbset` was not successful");
+ succeed_if_same (plugin->kdbSet (plugin, keys.getKeySet (), *parent), //! OCLint (empty if, too few branches switch)
+ ELEKTRA_PLUGIN_STATUS_SUCCESS, "Call of `kdbset` was not successful");
if (!encodedString.empty ())
{
CppKey encoded = keys.lookup ("user/tests/ccode/key");
- succeed_if_same (encoded.getString (), encodedString, "String not correctly encoded");
+ succeed_if_same (encoded.getString (), encodedString, //! OCLint (empty if, too few branches switch)
+ "String not correctly encoded");
}
- succeed_if_same (plugin->kdbGet (plugin, keys.getKeySet (), *parent), ELEKTRA_PLUGIN_STATUS_SUCCESS,
- "Call of `kdbGet` was not successful");
+ succeed_if_same (plugin->kdbGet (plugin, keys.getKeySet (), *parent), //! OCLint (empty if, too few branches switch)
+ ELEKTRA_PLUGIN_STATUS_SUCCESS, "Call of `kdbGet` was not successful");
CppKey decoded = keys.lookup ("user/tests/ccode/key");
- succeed_if_same (decoded.getString (), decodedString, "String not correctly decoded");
+ succeed_if_same (decoded.getString (), decodedString, //! OCLint (empty if, too few branches switch)
+ "String not correctly decoded");
elektraPluginClose (plugin, 0);
ksDel (modules.release ());
|
Extract cache.flush() method from cache.stop() | @@ -386,31 +386,39 @@ class Cache:
return self.io_queues[0]
def stop(self, flush: bool = True):
+ if flush:
+ self.flush()
+
self.get_and_write_lock()
- if flush:
c = OcfCompletion(
[("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
)
- self.owner.lib.ocf_mngt_cache_flush(self.cache_handle, False, c, None)
+
+ self.owner.lib.ocf_mngt_cache_stop(self.cache_handle, c, None)
+
c.wait()
if c.results["error"]:
self.put_and_write_unlock()
- raise OcfError("Couldn't flush cache", c.results["error"])
+ raise OcfError("Failed stopping cache", c.results["error"])
+
+ self.put_and_write_unlock()
+ self.owner.caches.remove(self)
+
+ def flush(self):
+ self.get_and_write_lock()
c = OcfCompletion(
[("cache", c_void_p), ("priv", c_void_p), ("error", c_int)]
)
-
- self.owner.lib.ocf_mngt_cache_stop(self.cache_handle, c, None)
-
+ self.owner.lib.ocf_mngt_cache_flush(self.cache_handle, False, c, None)
c.wait()
if c.results["error"]:
self.put_and_write_unlock()
- raise OcfError("Failed stopping cache", c.results["error"])
+ raise OcfError("Couldn't flush cache", c.results["error"])
self.put_and_write_unlock()
- self.owner.caches.remove(self)
+
lib = OcfLib.getInstance()
lib.ocf_mngt_cache_remove_core.argtypes = [c_void_p, c_void_p, c_void_p]
|
DeviceJS: Silence debug output | @@ -50,7 +50,6 @@ JsResourceItem::~JsResourceItem()
{
if (item)
{
- DBG_Printf(DBG_INFO, "dtor %s\n", item->descriptor().suffix);
item = nullptr;
}
}
@@ -95,7 +94,7 @@ void JsResourceItem::setValue(const QVariant &val)
{
if (item)
{
- DBG_Printf(DBG_INFO, "JsResourceItem.setValue(%s) = %s\n", item->descriptor().suffix, qPrintable(val.toString()));
+// DBG_Printf(DBG_INFO, "JsResourceItem.setValue(%s) = %s\n", item->descriptor().suffix, qPrintable(val.toString()));
item->setValue(val, ResourceItem::SourceDevice);
}
}
|
os_web: Unregister callbacks in lovrPlatformDestroy; | @@ -204,7 +204,15 @@ bool lovrPlatformInit() {
}
void lovrPlatformDestroy() {
- //
+ emscripten_set_beforeunload_callback(NULL, NULL);
+ emscripten_set_focus_callback(CANVAS, NULL, true, NULL);
+ emscripten_set_blur_callback(CANVAS, NULL, true, NULL);
+ emscripten_set_resize_callback(EMSCRIPTEN_EVENT_TARGET_WINDOW, NULL, true, NULL);
+ emscripten_set_mousedown_callback(CANVAS, NULL, true, NULL);
+ emscripten_set_mouseup_callback(CANVAS, NULL, true, NULL);
+ emscripten_set_mousemove_callback(CANVAS, NULL, true, NULL);
+ emscripten_set_keydown_callback(CANVAS, NULL, true, NULL);
+ emscripten_set_keyup_callback(CANVAS, NULL, true, NULL);
}
const char* lovrPlatformGetName() {
|
stm32l4: set LSE oscilator to medium drive strength to increase RTC precision | @@ -842,27 +842,22 @@ void _stm32_init(void)
/* Set DBP bit */
*(stm32_common.pwr + pwr_cr1) |= 1 << 8;
+ hal_cpuDataBarrier();
- /* Enable LSE clock source */
- *(stm32_common.rcc + rcc_bdcr) |= 1;
-
+ /* Enable LSE clock source, set it as RTC source and set medium xtal drive strength */
+ t = *(stm32_common.rcc + rcc_bdcr) & ~((3 << 24) | (3 << 15) | (3 << 8) | 0x7f);
+ *(stm32_common.rcc + rcc_bdcr) = t | (1 << 25) | (1 << 15) | (1 << 8) | (1 << 3) | 1;
hal_cpuDataBarrier();
/* And wait for it to turn on */
while (!(*(stm32_common.rcc + rcc_bdcr) & (1 << 1)));
- *(stm32_common.rcc + rcc_bdcr) |= 1 << 25;
-
- /* Initialize RTC */
-
- /* Select LSE as clock source for RTC and LCD */
- *(stm32_common.rcc + rcc_bdcr) = (*(stm32_common.rcc + rcc_bdcr) & ~(0x3 << 8)) | (1 << 8);
-
/* Select system clock for ADC */
*(stm32_common.rcc + rcc_ccipr) |= 0x3 << 28;
hal_cpuDataBarrier();
+ /* Initialize RTC */
/* Unlock RTC */
_stm32_rtcUnlockRegs();
|
Added --log-size command line option to man page. | @@ -439,6 +439,10 @@ FILE OPTIONS
Specify the path to the input log file. If set in the config file, it will take
priority over -f from the command line.
.TP
+\fB\-S \-\-log-size=<bytes>
+Specify the log size in bytes. This is useful when piping in logs for
+processing in which the log size can be explicitly set.
+.TP
\fB\-l \-\-debug-file=<debugfile>
Send all debug messages to the specified file.
.TP
|
options: fix segfault when TERM or HOME isn't set
Regression from if TERM isn't
set in the environment, getenv returns NULL which we shouldn't strcmp.
Similarly guard against using home_dir if it's null when looking for
global gitignore files.
This can be reproduced and tested with `env -i ./ag foo`. | @@ -150,7 +150,7 @@ void init_options(void) {
memset(&opts, 0, sizeof(opts));
opts.casing = CASE_DEFAULT;
opts.color = TRUE;
- if (strcmp(term, "dumb") == 0) {
+ if (term && !strcmp(term, "dumb")) {
opts.color = FALSE;
}
opts.color_win_ansi = FALSE;
@@ -713,8 +713,10 @@ void parse_options(int argc, char **argv, char **base_paths[], char **paths[]) {
const char *config_home = getenv("XDG_CONFIG_HOME");
if (config_home) {
ag_asprintf(&gitconfig_res, "%s/%s", config_home, "git/ignore");
- } else {
+ } else if (home_dir) {
ag_asprintf(&gitconfig_res, "%s/%s", home_dir, ".config/git/ignore");
+ } else {
+ gitconfig_res = ag_strdup("");
}
}
log_debug("global core.excludesfile: %s", gitconfig_res);
|
removed redundant arguments from angular_cl | @@ -285,8 +285,7 @@ def _check_array_params(f_arg):
def angular_cl(cosmo, cltracer1, cltracer2, ell,
- l_limber=-1., l_logstep=1.05, l_linstep=20., dchi=3.,
- dlk=0.003, zmin=0.05) :
+ l_limber=-1., l_logstep=1.05, l_linstep=20.) :
"""Calculate the angular (cross-)power spectrum for a pair of tracers.
Args:
|
pyocf: reintroduce trace device | @@ -473,6 +473,56 @@ class ErrorDevice(Volume):
return self.vol.get_copy()
+class TraceDevice(Volume):
+ class IoType(IntEnum):
+ Data = 1
+ Flush = 2
+ Discard = 3
+
+ def __init__(self, vol, trace_fcn=None, uuid=None):
+ self.vol = vol
+ super().__init__(uuid)
+ self.trace_fcn = trace_fcn
+
+ def _trace(self, io, io_type):
+ submit = True
+
+ if self.trace_fcn:
+ submit = self.trace_fcn(self, io, io_type)
+
+ return submit
+
+ def do_submit_io(self, io):
+ submit = self._trace(io, TraceDevice.IoType.Data)
+
+ if submit:
+ self.vol.do_submit_io(io)
+
+ def do_submit_flush(self, io):
+ submit = self._trace(io, TraceDevice.IoType.Flush)
+
+ if submit:
+ self.vol.do_submit_flush(io)
+
+ def get_length(self):
+ return self.vol.get_length()
+
+ def get_max_io_size(self):
+ return self.vol.get_max_io_size()
+
+ def do_submit_discard(self, discard):
+ return self.vol.do_submit_discard(discard)
+
+ def dump(self, offset=0, size=0, ignore=VOLUME_POISON, **kwargs):
+ return self.vol.dump(offset, size, ignore=ignore, **kwargs)
+
+ def md5(self):
+ return self.vol.md5()
+
+ def get_copy(self):
+ return self.vol.get_copy()
+
+
lib = OcfLib.getInstance()
lib.ocf_io_get_priv.restype = POINTER(VolumeIoPriv)
lib.ocf_io_get_volume.argtypes = [c_void_p]
|
in_prometheus_scrape: fix invalid type reference in log call format string | @@ -75,8 +75,8 @@ static int collect_metrics(struct prom_scrape *ctx)
/* get upstream connection */
u_conn = flb_upstream_conn_get(ctx->upstream);
if (!u_conn) {
- flb_plg_error(ctx->ins, "could not get an upstream connection to %s:%s",
- ctx->ins->host.port, ctx->ins->host.port);
+ flb_plg_error(ctx->ins, "could not get an upstream connection to %s:%u",
+ ctx->ins->host.name, ctx->ins->host.port);
return -1;
}
|
add the Keycloak example of as a "unit test" | @@ -1322,7 +1322,23 @@ static char * test_authz_worker(request_rec *r) {
"\"two\","
"\"three\""
"],"
- "\"somebool\": false"
+ "\"somebool\": false,"
+
+ "\"realm_access\": {"
+ "\"roles\": ["
+ "\"someRole1\","
+ "\"someRole2\""
+ "]"
+ "},"
+ "\"resource_access\": {"
+ "\"someClient\": {"
+ "\"roles\": ["
+ "\"someRole3\","
+ "\"someRole4\""
+ "]"
+ "}"
+ "}"
+
"}";
json = json_loads(claims, 0, &err);
@@ -1362,6 +1378,10 @@ static char * test_authz_worker(request_rec *r) {
rc = oidc_authz_worker24(r, json, require_args, oidc_authz_match_claim);
TST_ASSERT("auth status (8: nested non-array)", rc == AUTHZ_DENIED);
+ require_args = "Require claim resource_access.someClient.roles:someRole4";
+ rc = oidc_authz_worker24(r, json, require_args, oidc_authz_match_claim);
+ TST_ASSERT("auth status (9: keycloak sample)", rc == AUTHZ_GRANTED);
+
json_decref(json);
return 0;
|
nrf/drivers/bluetooth: Start advertising after disconnect.
Disconnecting after a connect would not restart advertising, so
reconnecting may get harder. | @@ -141,6 +141,7 @@ STATIC void gap_event_handler(mp_obj_t self_in, uint16_t event_id, uint16_t conn
} else if (event_id == 17) { // disconnect event
self->conn_handle = 0xFFFF; // invalid connection handle
m_connected = false;
+ ble_uart_advertise();
}
}
|
Add linux/sched.h to list of libbpf.c includes
This should hopefully fix compile errors on some ubuntu systems.
Also, reorders the includes according to clang-format | #include <fcntl.h>
#include <limits.h>
#include <linux/bpf.h>
+#include <linux/bpf_common.h>
#include <linux/if_packet.h>
-#include <linux/pkt_cls.h>
#include <linux/perf_event.h>
+#include <linux/pkt_cls.h>
#include <linux/rtnetlink.h>
+#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/version.h>
-#include <linux/bpf_common.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <sched.h>
+#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/resource.h>
-#include <unistd.h>
-#include <stdbool.h>
#include <sys/stat.h>
#include <sys/types.h>
+#include <unistd.h>
#include "libbpf.h"
#include "perf_reader.h"
|
travis: add GENERATE=yes no-makedepend to the ARM64 build. | @@ -54,7 +54,7 @@ jobs:
- os: linux
arch: arm64
compiler: gcc
- env: CONFIG_OPTS="no-asm no-deprecated enable-buildtest-c++ --strict-warnings -D_DEFAULT_SOURCE" BUILDONLY="yes" CHECKDOCS="yes" CPPFLAGS="-ansi"
+ env: CONFIG_OPTS="no-asm no-makedepend no-deprecated enable-buildtest-c++ --strict-warnings -D_DEFAULT_SOURCE" BUILDONLY="yes" CHECKDOCS="yes" GENERATE="yes" CPPFLAGS="-ansi"
- os: linux
arch: s390x
compiler: gcc
|
webp-container-spec: clarify background color note
replace ...a transparency value (alpha)... with clearer text
based on comments from: | @@ -400,8 +400,8 @@ Background Color: 32 bits (_uint32_)
**Note**:
- * Background color MAY contain a transparency value (alpha), even if the
- _Alpha_ flag in [VP8X chunk](#extended_header) is unset.
+ * Background color MAY contain a non-opaque alpha value, even if the _Alpha_
+ flag in [VP8X chunk](#extended_header) is unset.
* Viewer applications SHOULD treat the background color value as a hint, and
are not required to use it.
|
gimble: configure PPVAR_SYS
The PPVAR_SYS must same as battery voltage(3 cells * 4.4V)
BRANCH=none
TEST=make -j BOARD=gimble | #include "common.h"
#include "compile_time_macros.h"
#include "console.h"
+#include "driver/charger/bq25710.h"
#include "gpio.h"
#include "gpio_signal.h"
#include "hooks.h"
@@ -114,6 +115,13 @@ enum battery_present battery_hw_present(void)
return gpio_get_level(batt_pres) ? BP_NO : BP_YES;
}
+static void board_init(void)
+{
+ /* The PPVAR_SYS must same as battery voltage(3 cells * 4.4V) */
+ bq25710_set_min_system_voltage(CHARGER_SOLO, 13200);
+}
+DECLARE_HOOK(HOOK_INIT, board_init, HOOK_PRIO_DEFAULT);
+
__override void board_set_charge_limit(int port, int supplier, int charge_ma,
int max_ma, int charge_mv)
{
|
cmake: allow / in placements
thank to | @@ -590,7 +590,7 @@ function (generate_readme p)
STRING(REGEX REPLACE ";" " " PROVIDES "${PROVIDES}")
STRING(REGEX REPLACE "\"- +infos/provides *= *([a-zA-Z0-9/ ]*)\\\\n\"" "keyNew(\"system/elektra/modules/${p}/infos/provides\",\nKEY_VALUE, \"${PROVIDES}\", KEY_END)," contents "${contents}")
- STRING(REGEX REPLACE "\"- +infos/placements *= *([a-zA-Z0-9 ]*)\\\\n\"" "keyNew(\"system/elektra/modules/${p}/infos/placements\",\nKEY_VALUE, \"\\1\", KEY_END)," contents "${contents}")
+ STRING(REGEX REPLACE "\"- +infos/placements *= *([a-zA-Z0-9/ ]*)\\\\n\"" "keyNew(\"system/elektra/modules/${p}/infos/placements\",\nKEY_VALUE, \"\\1\", KEY_END)," contents "${contents}")
STRING(REGEX REPLACE "\"- +infos/recommends *= *([a-zA-Z0-9 ]*)\\\\n\"" "keyNew(\"system/elektra/modules/${p}/infos/recommends\",\nKEY_VALUE, \"\\1\", KEY_END)," contents "${contents}")
STRING(REGEX REPLACE "\"- +infos/ordering *= *([a-zA-Z0-9 ]*)\\\\n\"" "keyNew(\"system/elektra/modules/${p}/infos/ordering\",\nKEY_VALUE, \"\\1\", KEY_END)," contents "${contents}")
STRING(REGEX REPLACE "\"- +infos/stacking *= *([a-zA-Z0-9 ]*)\\\\n\"" "keyNew(\"system/elektra/modules/${p}/infos/stacking\",\nKEY_VALUE, \"\\1\", KEY_END)," contents "${contents}")
|
Adding tol in primalResidual and dualResidual functions.
The function setErrorArray has one more parameter (projection error). The value is currently set to 0.0. | @@ -454,7 +454,7 @@ void grfc3d_IPM(GlobalRollingFrictionContactProblem* restrict problem, double* r
double gapVal = 1e300;
// double dualgap = 1e300;
double relgap = 1e300;
- double error_array[5];
+ double error_array[6];
error_array[0] = pinfeas;
error_array[1] = dinfeas;
error_array[2] = relgap;
@@ -686,10 +686,10 @@ void grfc3d_IPM(GlobalRollingFrictionContactProblem* restrict problem, double* r
/* Primal residual = velocity - H * globalVelocity - w */
- primalResidual(velocity, H, globalVelocity, w, primalConstraint, &pinfeas);
+ primalResidual(velocity, H, globalVelocity, w, primalConstraint, &pinfeas, tol);
/* Dual residual = M*globalVelocity - H'*reaction + f */
- dualResidual(M, globalVelocity, H, reaction, f, dualConstraint, &dinfeas);
+ dualResidual(M, globalVelocity, H, reaction, f, dualConstraint, &dinfeas, tol);
@@ -744,7 +744,7 @@ void grfc3d_IPM(GlobalRollingFrictionContactProblem* restrict problem, double* r
complem_2 = complemResidualNorm(velocity_2, reaction_2, n_dminus2, n);
// setErrorArray(error, pinfeas, dinfeas, dualgap, complem, complem_p);
- setErrorArray(error_array, pinfeas, dinfeas, relgap, complem_1, complem_2);
+ setErrorArray(error_array, pinfeas, dinfeas, relgap, complem_1, complem_2, 0.0);
/* ----- return to original variables ------ */
@@ -1915,11 +1915,11 @@ void grfc3d_IPM_set_default(SolverOptions* options)
options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_UPDATE_S] = 0;
/* 0: without scaling; 1: NT scaling using Qp; 2: NT scaling using F */
- options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_NESTEROV_TODD_SCALING] = 2;
+ options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_NESTEROV_TODD_SCALING] = 1;
options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_ITERATES_MATLAB_FILE] = 0;
- options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_REDUCED_SYSTEM] = 0;
+ options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_REDUCED_SYSTEM] = 1;
options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_FINISH_WITHOUT_SCALING] = 0;
|
enable HLS_CLOCK_PERIOD_CONSTRAINT to be commented in snap_env.sh | @@ -31,10 +31,17 @@ PART_NUMBER ?= $(FPGACHIP)
snap_env_sh = $(SNAP_ROOT)/snap_env.sh
HLS_ACTION_CLOCK_DEFAULT = 4
ifneq ("$(wildcard $(snap_env_sh))","")
+ HLS_ACTION_CLOCK_COMMENTED = $(shell grep HLS_CLOCK_PERIOD_CONSTRAINT $(snap_env_sh) | grep "\#")
+ ifeq "$(HLS_ACTION_CLOCK_COMMENTED)" ""
+ #if line not commented, then get the value
HLS_ACTION_CLOCK = $(shell grep HLS_CLOCK_PERIOD_CONSTRAINT $(snap_env_sh) | cut -d = -f 2 | tr -d 'ns"')
ifeq "$(HLS_ACTION_CLOCK)" ""
HLS_ACTION_CLOCK = $(HLS_ACTION_CLOCK_DEFAULT)
endif
+ $(info HLS CLOCK PERIOD is set to: $(HLS_ACTION_CLOCK) ns)
+ else
+ HLS_ACTION_CLOCK = $(HLS_ACTION_CLOCK_DEFAULT)
+ endif
endif
@@ -94,6 +101,7 @@ $(SOLUTION_NAME): $(objs)
#
check: $(syn_dir)
@if [ $(HLS_ACTION_CLOCK) != $(shell grep "Setting up clock" $(SOLUTION_DIR)*/$(SOLUTION_NAME)/$(SOLUTION_NAME).log |cut -d " " -f 12|cut -d "n" -f 1) ]; then \
+ echo " ---------------------------------------------------------- "; \
echo " ERROR: Action was last compiled with a different HLS clock."; \
echo " Please force the recompilation with a 'make clean' command"; \
echo " ---------------------------------------------------------- "; exit -1; \
@@ -106,6 +114,7 @@ check: $(syn_dir)
echo -n " Checking for critical timings during HLS synthesis .... "; \
grep -A8 critical $(SOLUTION_DIR)*/$(SOLUTION_NAME)/$(SOLUTION_NAME).log ; \
if [ $$? -eq 0 ]; then \
+ echo "------------------------------------------------------------------ "; \
echo "TIMING ERROR: Please correct your action code before going further"!; \
echo "------------------------------------------------------------------ "; exit -1; \
fi; \
|
ectool: Fix array size check for pchg_state_text
This patch fixes the runtime check for pchg_state_text size and adds
BUILD_ASSERT.
BRANCH=none
TEST=Verify 'ectool pchg 0' prints states properly on CoachZ. | @@ -9433,7 +9433,9 @@ static int cmd_pchg_info(const struct ec_response_pchg *res)
{
static const char * const pchg_state_text[] = EC_PCHG_STATE_TEXT;
- printf("State: %s (%d)\n", res->state < sizeof(pchg_state_text)
+ BUILD_ASSERT(ARRAY_SIZE(pchg_state_text) == PCHG_STATE_COUNT);
+
+ printf("State: %s (%d)\n", res->state < PCHG_STATE_COUNT
? pchg_state_text[res->state] : "UNDEF", res->state);
printf("Battery: %u%%\n", res->battery_percentage);
printf("Errors: 0x%x\n", res->error);
|
Fix: Add Parenthesis around if-statement in macro | #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
#define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
- #define portEND_SWITCHING_ISR( xSwitchRequired ) if( xSwitchRequired != pdFALSE ) portYIELD()
+
+ #define portEND_SWITCHING_ISR( xSwitchRequired ) \
+ do { \
+ if( (xSwitchRequired) != pdFALSE ) portYIELD(); \
+ } while (0)
+
#define portYIELD_FROM_ISR( x ) portEND_SWITCHING_ISR( x )
/*-----------------------------------------------------------*/
|
UserNotes: Update default settings for 3.x builds | @@ -1331,7 +1331,7 @@ LOGICAL DllMain(
PPH_PLUGIN_INFORMATION info;
PH_SETTING_CREATE settings[] =
{
- { StringSettingType, SETTING_NAME_DATABASE_PATH, L"%APPDATA%\\Process Hacker 2\\usernotesdb.xml" },
+ { StringSettingType, SETTING_NAME_DATABASE_PATH, L"%APPDATA%\\Process Hacker\\usernotesdb.xml" },
{ StringSettingType, SETTING_NAME_CUSTOM_COLOR_LIST, L"" }
};
|
Travis: Use recent compilers for Linux builds | @@ -26,17 +26,35 @@ matrix:
apt:
sources: [ ubuntu-toolchain-r-test ]
packages: [ g++-7 ]
- env: [ ASAN=ON ]
+ env:
+ - ASAN=ON
+ - CC_COMPILER=gcc-7
+ - CXX_COMPILER=g++-7
- os: linux
compiler: clang
- env: [ ASAN=ON ]
+ addons:
+ apt:
+ sources: [ llvm-toolchain-trusty-6.0, ubuntu-toolchain-r-test ]
+ packages: [ clang-6.0 ]
+ env:
+ - ASAN=ON
+ - CC_COMPILER=clang-6.0
+ - CXX_COMPILER=clang++-6.0
+
# FULL: Build full version of Elektra (BUILD_FULL=ON)
- os: linux
compiler: gcc
- env: [ FULL=ON ]
+ addons:
+ apt:
+ sources: [ ubuntu-toolchain-r-test ]
+ packages: [ g++-8 ]
+ env:
+ - FULL=ON
+ - CC_COMPILER=gcc-8
+ - CXX_COMPILER=g++-8
- os: osx
# Translating the `syslog` plugin with GCC on macOS 10.13 does not work, since GCC is unable to compile `sys/syslog.h`.
@@ -56,16 +74,23 @@ matrix:
- os: linux
compiler: gcc
+ addons:
apt:
- sources: [ llvm-toolchain-trusty-6.0, ubuntu-toolchain-r-test]
- packages: [ clang-format-6.0 ]
+ sources: [ ubuntu-toolchain-r-test ]
+ packages: [ g++-7 ]
+ env:
+ - CC_COMPILER=gcc-7
+ - CXX_COMPILER=g++-7
- os: linux
compiler: clang
addons:
apt:
- sources: [ llvm-toolchain-trusty-5.0 ]
- packages: [ clang-format-5.0 ]
+ sources: [ llvm-toolchain-trusty-6.0, ubuntu-toolchain-r-test ]
+ packages: [ clang-6.0 ]
+ env:
+ - CC_COMPILER=clang-6.0
+ - CXX_COMPILER=clang++-6.0
before_install:
- |
@@ -128,7 +153,8 @@ before_install:
fi
- |
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
- [[ $ASAN == ON && $CC == gcc ]] && export CC=gcc-7 CXX=g++-7
+ [[ -n "$CC_COMPILER" ]] && export CC="$CC_COMPILER"
+ [[ -n "$CXX_COMPILER" ]] && export CXX="$CXX_COMPILER"
sudo apt-get -qq update
sudo apt-get install ninja-build
sudo apt-get install devscripts # contains `checkbashisms`
|
Disable KPH by default | * program settings cache
*
* Copyright (C) 2010-2016 wj32
- * Copyright (C) 2017-2018 dmex
+ * Copyright (C) 2017-2020 dmex
*
* This file is part of Process Hacker.
*
@@ -41,7 +41,7 @@ VOID PhAddDefaultSettings(
PhpAddIntegerSetting(L"ElevationLevel", L"1"); // PromptElevateAction
PhpAddIntegerSetting(L"EnableCycleCpuUsage", L"1");
PhpAddIntegerSetting(L"EnableInstantTooltips", L"0");
- PhpAddIntegerSetting(L"EnableKph", L"1");
+ PhpAddIntegerSetting(L"EnableKph", L"0");
PhpAddIntegerSetting(L"EnableKphWarnings", L"0");
PhpAddIntegerSetting(L"EnableHandleSnapshot", L"1");
PhpAddIntegerSetting(L"EnableNetworkResolve", L"1");
|
YAwn: Add token number to logging message | @@ -68,12 +68,12 @@ void ErrorListener::syntaxError (int errorTokenNumber, void * errorTokenData, in
if (ignoredTokenData != nullptr)
{
auto data = **static_cast<unique_ptr<Token> *> (ignoredTokenData);
- ELEKTRA_LOG_DEBUG ("Ignored token: %s", to_string (data).c_str ());
+ ELEKTRA_LOG_DEBUG ("Ignored token number %d: %s", ignoredToken, to_string (data).c_str ());
}
if (recoveredTokenData != nullptr)
{
auto data = **static_cast<unique_ptr<Token> *> (recoveredTokenData);
- ELEKTRA_LOG_DEBUG ("Recovered token: %s", to_string (data).c_str ());
+ ELEKTRA_LOG_DEBUG ("Recovered on token number %d: %s", recoveredToken, to_string (data).c_str ());
}
#endif
}
|
[io] add the possibility to add solver_options for gmp | @@ -1697,7 +1697,6 @@ class MechanicsHdf5Runner(siconos.io.mechanics_hdf5.MechanicsHdf5):
time = self.current_time()
positions = self._io.positions(self._nsds)
-
if positions is not None:
self._dynamic_data.resize(current_line + positions.shape[0], 0)
@@ -1761,7 +1760,6 @@ class MechanicsHdf5Runner(siconos.io.mechanics_hdf5.MechanicsHdf5):
time = self.current_time()
contact_points = self._io.contactPoints(self._nsds,
self._contact_index_set)
-
if contact_points is not None:
current_line = self._cf_data.shape[0]
# Increase the number of lines in cf_data
@@ -2316,9 +2314,12 @@ class MechanicsHdf5Runner(siconos.io.mechanics_hdf5.MechanicsHdf5):
osnspb.setMStorageType(sn.NM_SPARSE_BLOCK)
osnspb.setMaxSize(osnspb_max_size)
else:
- if (nb_of_nslaw_type > 1):
+ if ('EqualityConditionNSL' in set(nslaw_type_list)):
+ if (solver_options is None):
osnspb = sk.GenericMechanical(
sn.SICONOS_FRICTION_3D_ONECONTACT_NSN)
+ else:
+ osnspb = sk.GenericMechanical(solver_options)
else:
if 'NewtonImpactFrictionNSL' in set(nslaw_type_list):
osnspb = sk.FrictionContact(self._dimension,
|
[chainmaker][#472]add NULL para | @@ -145,7 +145,7 @@ int main(int argc, char *argv[])
return -1;
}
- result = BoatHlchainmakerAddTxParam(&tx_ptr, 6, "time", "6543235", "file_hash", "ab3456df5799b87c77e7f85", "file_name", "name005");
+ result = BoatHlchainmakerAddTxParam(&tx_ptr, 6, "time", "6543235", "file_hash", "ab3456df5799b87c77e7f85", "file_name", "name005", NULL);
if (result != BOAT_SUCCESS)
{
BoatLog(BOAT_LOG_CRITICAL, "BoatHlchainmakerAddTxParam() failed.");
@@ -163,7 +163,7 @@ int main(int argc, char *argv[])
/* step-5: wait seconds and 'query' the gas */
BoatSleep(2);
- result = BoatHlchainmakerAddTxParam(&tx_ptr, 2, "file_hash", "ab3456df5799b87c77e7f85");
+ result = BoatHlchainmakerAddTxParam(&tx_ptr, 2, "file_hash", "ab3456df5799b87c77e7f85", NULL);
if (result != BOAT_SUCCESS)
{
return -1;
|
added -h for help | @@ -420,6 +420,7 @@ printf("%s %s (C) %s ZeroBeat\n"
"-p : list messagepair\n"
"-l : list essid len\n"
"-e : list essid\n"
+ "-h : this help\n"
"\n", eigenname, VERSION, VERSION_JAHR, eigenname, eigenname);
exit(EXIT_FAILURE);
}
@@ -505,6 +506,10 @@ while ((auswahl = getopt(argc, argv, "i:o:aAsSMRwpPlehv")) != -1)
outmode |= OM_ESSID;
break;
+ case 'h':
+ usage(eigenname);
+ break;
+
default:
usage(eigenname);
break;
|
nimble/ll: Fix AuxPtr offset calculations
Rounds which were cause by changing from usec->ticks->usec made AuxPtr
incorrect. | @@ -394,9 +394,6 @@ static void
ble_ll_adv_put_aux_ptr(struct ble_ll_adv_sm *advsm, uint32_t offset,
uint8_t *dptr)
{
- /* in usecs */
- offset = os_cputime_ticks_to_usecs(offset);
-
dptr[0] = advsm->adv_secondary_chan;
if (offset > 245700) {
@@ -468,7 +465,7 @@ ble_ll_adv_pdu_make(uint8_t *dptr, void *pducb_arg, uint8_t *hdr_byte)
/* AuxPtr */
if (AUX_CURRENT(advsm)->sch.enqueued) {
- offset = AUX_CURRENT(advsm)->start_time - advsm->adv_pdu_start_time;
+ offset = os_cputime_ticks_to_usecs(AUX_CURRENT(advsm)->start_time - advsm->adv_pdu_start_time);
} else {
offset = 0;
}
@@ -547,12 +544,10 @@ ble_ll_adv_aux_pdu_make(uint8_t *dptr, void *pducb_arg, uint8_t *hdr_byte)
*/
offset = 0;
} else if (advsm->rx_ble_hdr) {
- offset = advsm->rx_ble_hdr->rem_usecs +
- ble_ll_pdu_tx_time_get(12, advsm->sec_phy) + BLE_LL_IFS + 30;
- offset = AUX_NEXT(advsm)->start_time - advsm->rx_ble_hdr->beg_cputime -
- os_cputime_usecs_to_ticks(offset);
+ offset = os_cputime_ticks_to_usecs(AUX_NEXT(advsm)->start_time - advsm->rx_ble_hdr->beg_cputime);
+ offset -= (advsm->rx_ble_hdr->rem_usecs + ble_ll_pdu_tx_time_get(12, advsm->sec_phy) + BLE_LL_IFS);
} else {
- offset = AUX_NEXT(advsm)->start_time - aux->start_time;
+ offset = os_cputime_ticks_to_usecs(AUX_NEXT(advsm)->start_time - aux->start_time);
}
ble_ll_adv_put_aux_ptr(advsm, offset, dptr);
|
drv_serial: fix warning | @@ -93,7 +93,7 @@ void serial_smart_audio_send_data(uint8_t *data, uint32_t size) {
uint8_t serial_smart_audio_read_byte() {
for (uint32_t timeout = 0x10000; USART_GetFlagStatus(USART.channel, USART_FLAG_RXNE) == RESET;)
if (!timeout--)
- return;
+ return 0;
return USART_ReceiveData(USART.channel);
}
|
esp_netif: Make GARP default netif flags only if enabled in lwip
Closes | @@ -17,9 +17,16 @@ extern "C" {
// Macros to assemble master configs with partial configs from netif, stack and driver
//
+#ifdef CONFIG_LWIP_ESP_GRATUITOUS_ARP
+// If GARP enabled in menuconfig (default), make it also a default config for common netifs
+#define ESP_NETIF_DEFAULT_ARP_FLAGS (ESP_NETIF_FLAG_GARP)
+#else
+#define ESP_NETIF_DEFAULT_ARP_FLAGS (0)
+#endif
+
#define ESP_NETIF_INHERENT_DEFAULT_WIFI_STA() \
{ \
- .flags = (esp_netif_flags_t)(ESP_NETIF_DHCP_CLIENT | ESP_NETIF_FLAG_GARP | ESP_NETIF_FLAG_EVENT_IP_MODIFIED), \
+ .flags = (esp_netif_flags_t)(ESP_NETIF_DHCP_CLIENT | ESP_NETIF_DEFAULT_ARP_FLAGS | ESP_NETIF_FLAG_EVENT_IP_MODIFIED), \
ESP_COMPILER_DESIGNATED_INIT_AGGREGATE_TYPE_EMPTY(mac) \
ESP_COMPILER_DESIGNATED_INIT_AGGREGATE_TYPE_EMPTY(ip_info) \
.get_ip_event = IP_EVENT_STA_GOT_IP, \
@@ -47,7 +54,7 @@ extern "C" {
#define ESP_NETIF_INHERENT_DEFAULT_ETH() \
{ \
- .flags = (esp_netif_flags_t)(ESP_NETIF_DHCP_CLIENT | ESP_NETIF_FLAG_GARP | ESP_NETIF_FLAG_EVENT_IP_MODIFIED), \
+ .flags = (esp_netif_flags_t)(ESP_NETIF_DHCP_CLIENT | ESP_NETIF_DEFAULT_ARP_FLAGS | ESP_NETIF_FLAG_EVENT_IP_MODIFIED), \
ESP_COMPILER_DESIGNATED_INIT_AGGREGATE_TYPE_EMPTY(mac) \
ESP_COMPILER_DESIGNATED_INIT_AGGREGATE_TYPE_EMPTY(ip_info) \
.get_ip_event = IP_EVENT_ETH_GOT_IP, \
@@ -76,7 +83,7 @@ extern "C" {
#define ESP_NETIF_INHERENT_DEFAULT_BR() \
{ \
- .flags = (esp_netif_flags_t)(ESP_NETIF_DHCP_CLIENT | ESP_NETIF_FLAG_GARP | ESP_NETIF_FLAG_EVENT_IP_MODIFIED | ESP_NETIF_FLAG_IS_BRIDGE), \
+ .flags = (esp_netif_flags_t)(ESP_NETIF_DHCP_CLIENT | ESP_NETIF_DEFAULT_ARP_FLAGS | ESP_NETIF_FLAG_EVENT_IP_MODIFIED | ESP_NETIF_FLAG_IS_BRIDGE), \
ESP_COMPILER_DESIGNATED_INIT_AGGREGATE_TYPE_EMPTY(mac) \
ESP_COMPILER_DESIGNATED_INIT_AGGREGATE_TYPE_EMPTY(ip_info) \
.get_ip_event = IP_EVENT_ETH_GOT_IP, \
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.