message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
BugID:20072916: Update aos make prompt
Remove the command that does't work in Windows Powershell | @@ -110,10 +110,7 @@ $(info $$ aos make $(BUILD_STRING) -c config)
$(info ---> Or more config: $$ aos make menuconfig)
$(info )
$(info Step 2: build:)
-$(info $$ aos make # VAR=value if needed)
-$(info )
-$(info ---> Or oneline command for config and build:)
-$(info $$ aos make $(BUILD_STRING) -c config && aos make)
+$(info $$ aos make # append VAR=value if needed)
$(info *********************************************************************)
$(call TARGET_DEPRECATED_ERROR)
endif
|
Travis: Update snapshot release before deploying with Windows VM. | @@ -69,6 +69,11 @@ jobs:
- cmake -G "Visual Studio 15 2017" -A x64 -DTINYSPLINE_ENABLE_PYTHON=True ..
- C:\\Python37\\python setup.py bdist_wheel
- popd
+ before_deploy:
+ - git tag -f snapshot
+ - git remote add gh https://${TRAVIS_REPO_SLUG%/*}:${GITHUB_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git
+ - git push -f gh snapshot
+ - git remote remove gh
deploy:
- provider: releases
api-key: $GITHUB_TOKEN
|
header needed for python scipy mmread compatibility | @@ -26,7 +26,7 @@ void SoloFeature::outputNumUMIperGeneCB()
ofstream &countMatrixStream=ofstrOpen(matrixFileName,ERROR_OUT, P);
//header
- countMatrixStream <<"%%\n%\n" << Trans.nGe<< ' ' << pSolo.cbWL.size() <<' '<< nCellGeneEntries << '\n';
+ countMatrixStream <<"%%MatrixMarket matrix coordinate integer general\n%\n" << Trans.nGe<< ' ' << pSolo.cbWL.size() <<' '<< nCellGeneEntries << '\n';
for (uint32 icb=0; icb<nCB; icb++) {
uint32 *rCBpp=rCBp[icb];
|
Update doc/benchmarks.md. | @@ -26,7 +26,6 @@ bcheckOptimizeXxx methods."
The benchmark programs aim to be runnable "out of the box" without any
configuration or installation. For example, to run the `std/flate` benchmarks:
- # TODO: confirm that we will live at github.com/google/puffs
git clone https://github.com/google/puffs.git
cd puffs/test/c/std
gcc -O3 flate.c
|
i2s/driver: Add module reset before enabling | @@ -886,12 +886,6 @@ static esp_err_t i2s_param_config(i2s_port_t i2s_num, const i2s_config_t *i2s_co
I2S_CHECK(!((i2s_config->mode & I2S_MODE_DAC_BUILT_IN) && (i2s_num != I2S_NUM_0)), "I2S DAC built-in only support on I2S0", ESP_ERR_INVALID_ARG);
I2S_CHECK(!((i2s_config->mode & I2S_MODE_PDM) && (i2s_num != I2S_NUM_0)), "I2S DAC PDM only support on I2S0", ESP_ERR_INVALID_ARG);
- if (i2s_num == I2S_NUM_1) {
- periph_module_enable(PERIPH_I2S1_MODULE);
- } else {
- periph_module_enable(PERIPH_I2S0_MODULE);
- }
-
if(i2s_config->mode & I2S_MODE_ADC_BUILT_IN) {
//in ADC built-in mode, we need to call i2s_set_adc_mode to
//initialize the specific ADC channel.
@@ -1099,8 +1093,10 @@ esp_err_t i2s_driver_install(i2s_port_t i2s_num, const i2s_config_t *i2s_config,
//To make sure hardware is enabled before any hardware register operations.
if (i2s_num == I2S_NUM_1) {
+ periph_module_reset(PERIPH_I2S1_MODULE);
periph_module_enable(PERIPH_I2S1_MODULE);
} else {
+ periph_module_reset(PERIPH_I2S0_MODULE);
periph_module_enable(PERIPH_I2S0_MODULE);
}
|
Fix build for PPC970 on FreeBSD pt. 1
FreeBSD needs DCBT_ARG=0 as well. | @@ -241,7 +241,7 @@ static inline int blas_quickdivide(blasint x, blasint y){
#define HAVE_PREFETCH
#endif
-#if defined(POWER3) || defined(POWER6) || defined(PPCG4) || defined(CELL) || defined(POWER8) || defined(POWER9) || ( defined(PPC970) && defined(OS_DARWIN) )
+#if defined(POWER3) || defined(POWER6) || defined(PPCG4) || defined(CELL) || defined(POWER8) || defined(POWER9) || ( defined(PPC970) && ( defined(OS_DARWIN) || defined(OS_FREEBSD) ) )
#define DCBT_ARG 0
#else
#define DCBT_ARG 8
|
JoinGroup: check correct associations key | @@ -85,7 +85,7 @@ export function JoinGroup(props: JoinGroupProps) {
await waiter((p: JoinGroupProps) => {
return group in p.groups &&
(group in (p.associations?.graph ?? {})
- || group in (p.associations?.contacts ?? {}))
+ || group in (p.associations?.groups ?? {}))
});
if(props.groups?.[group]?.hidden) {
const { metadata } = associations.graph[group];
|
Android: update default build settings for thew new app template | @@ -110,7 +110,12 @@ uwp:
android:
#manifest_template: 'AndroidManifest.erb'
- version: 4.4.2
+
+ #specifies compile SDK version ( 9 corresponds to API 28). If not specified then latest available API level will be used.
+ #version: 9
+
+ #targetSDK: 28
+
## Note: in order to simplify debugging only app messages and system channels with priority informative and higher, and any errors are enabled by default
logcatFilter: APP:I StrictMode:I DEBUG:I *:E
|
Fix test_misc_version() for various XML_UNICODE issues. | @@ -8010,10 +8010,16 @@ START_TEST(test_misc_version)
if (!versions_equal(&read_version, &parsed_version))
fail("Version mismatch");
-#if ! defined(XML_UNICODE)
+#if ! defined(XML_UNICODE) || defined(XML_UNICODE_WCHAR_T)
+ if (xcstrcmp(version_text, XCS("expat_2.2.4"))) /* needs bump on releases */
+ fail("XML_*_VERSION in expat.h out of sync?\n");
+#else
+ /* If we have XML_UNICODE defined but not XML_UNICODE_WCHAR_T
+ * then XML_LChar is defined as char, for some reason.
+ */
if (strcmp(version_text, "expat_2.2.4")) /* needs bump on releases */
fail("XML_*_VERSION in expat.h out of sync?\n");
-#endif /* ! defined(XML_UNICODE) */
+#endif /* ! defined(XML_UNICODE) || defined(XML_UNICODE_WCHAR_T) */
}
END_TEST
|
ColorInput: fix initial field state | @@ -37,8 +37,7 @@ function padHex(hex: string) {
export function ColorInput(props: ColorInputProps) {
const { id, placeholder, label, caption, disabled, ...rest } = props;
const [{ value, onBlur }, meta, { setValue, setTouched }] = useField(id);
- const [field, setField] = useState('');
- // const [error, setError] = useState<string | undefined>();
+ const [field, setField] = useState(uxToHex(value));
useEffect(() => {
const newValue = hexToUx(padHex(field));
|
yanglint UPDATE create empty features array | @@ -127,7 +127,8 @@ int
parse_features(const char *fstring, struct ly_set *fset)
{
struct schema_features *rec;
- char *p;
+ uint32_t count;
+ char *p, **fp;
rec = calloc(1, sizeof *rec);
if (!rec) {
@@ -148,10 +149,11 @@ parse_features(const char *fstring, struct ly_set *fset)
}
rec->mod_name = strndup(fstring, p - fstring);
- /* start count on 2 to include terminating NULL byte */
- for (int count = 2; p; ++count) {
+ count = 0;
+ while (p) {
size_t len = 0;
char *token = p + 1;
+
p = strchr(token, ',');
if (!p) {
/* the last item, if any */
@@ -159,19 +161,28 @@ parse_features(const char *fstring, struct ly_set *fset)
} else {
len = p - token;
}
+
if (len) {
- char **fp = realloc(rec->features, count * sizeof *rec->features);
+ fp = realloc(rec->features, (count + 1) * sizeof *rec->features);
if (!fp) {
YLMSG_E("Unable to store features list information (%s).\n", strerror(errno));
return -1;
}
rec->features = fp;
- rec->features[count - 1] = NULL; /* terminating NULL-byte */
- fp = &rec->features[count - 2]; /* array item to set */
+ fp = &rec->features[count++]; /* array item to set */
(*fp) = strndup(token, len);
}
}
+ /* terminating NULL */
+ fp = realloc(rec->features, (count + 1) * sizeof *rec->features);
+ if (!fp) {
+ YLMSG_E("Unable to store features list information (%s).\n", strerror(errno));
+ return -1;
+ }
+ rec->features = fp;
+ rec->features[count++] = NULL;
+
return 0;
}
|
cups/dest.c: Look for default printer on network as well
The current code of `cupsGetNamedDest()` doesn't look up on network for
a default printer. The result is a temporary queue cannot be set as a
default printer.
Fixes | @@ -1835,31 +1835,22 @@ cupsGetNamedDest(http_t *http, /* I - Connection to server or @code CUPS_HTT
*/
if (!_cupsGetDests(http, op, dest_name, &dest, 0, 0))
- {
- if (name)
{
_cups_namedata_t data; /* Callback data */
DEBUG_puts("1cupsGetNamedDest: No queue found for printer, looking on network...");
- data.name = name;
+ data.name = dest_name;
data.dest = NULL;
cupsEnumDests(0, 1000, NULL, 0, 0, (cups_dest_cb_t)cups_name_cb, &data);
if (!data.dest)
- {
- _cupsSetError(IPP_STATUS_ERROR_NOT_FOUND, _("The printer or class does not exist."), 1);
- return (NULL);
- }
-
- dest = data.dest;
- }
- else
{
switch (set_as_default)
{
default :
+ _cupsSetError(IPP_STATUS_ERROR_NOT_FOUND, _("The printer or class does not exist."), 1);
break;
case 1 : /* Set from env vars */
@@ -1886,6 +1877,8 @@ cupsGetNamedDest(http_t *http, /* I - Connection to server or @code CUPS_HTT
return (NULL);
}
+
+ dest = data.dest;
}
DEBUG_printf(("1cupsGetNamedDest: Got dest=%p", (void *)dest));
|
testcase: fix up tc_roundrobin.c problem
tc_roundrobin try to create 15 pthread. In case of CONFIG_MAX_TASKS is
16 or smaller than 16. This should fail. This patch fix up this problem. | #define LOOP 10
#define LOG_SIZE 400
#define TESTCASE 3
+#if CONFIG_MAX_TASKS > 16
#define NTHREAD 5
+#else
+#define NTHREAD 3
+#endif
#define MAX_TASKS_MASK (CONFIG_MAX_TASKS-1)
#define PIDHASH(pid) ((pid) & MAX_TASKS_MASK)
/****************************************************************************
@@ -47,7 +51,11 @@ int log_timeslice[LOG_SIZE];
volatile int logidx;
int start, created;
int pid_prio[CONFIG_MAX_TASKS];
+#if CONFIG_MAX_TASKS > 16
int priority[TESTCASE][NTHREAD] = { {90, 100, 100, 100, 110}, {110, 120, 120, 120, 130}, {70, 80, 80, 80, 90} };
+#else
+int priority[TESTCASE][NTHREAD] = { {90, 100, 110}, {110, 120, 130}, {70, 80, 90} };
+#endif
pthread_attr_t attr;
struct sched_param param;
|
Fix SEGV in PUT sensor/<id>/config
`sensor` was dereferenced without checking for null. | @@ -693,9 +693,6 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse &
TaskItem task;
QString id = req.path[3];
Sensor *sensor = id.length() < MIN_UNIQUEID_LENGTH ? getSensorNodeForId(id) : getSensorNodeForUniqueId(id);
- Device *device = (sensor && sensor->parentResource()) ? static_cast<Device*>(sensor->parentResource()) : nullptr;
- Resource *rsub = DEV_GetSubDevice(device, nullptr, sensor->uniqueId());
- const bool devManaged = device && device->managed();
bool ok;
bool updated;
bool save = false;
@@ -708,9 +705,6 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse &
QVariant var = Json::parse(req.content, ok);
QVariantMap map = var.toMap();
-// QRegExp latitude("^\\d{3,3}\\.\\d{4,4}(W|E)$");
-// QRegExp longitude("^\\d{3,3}\\.\\d{4,4}(N|S)$");
-
rsp.httpStatus = HttpStatusOk;
if (!ok)
@@ -727,6 +721,16 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse &
return REQ_READY_SEND;
}
+ Device *device = static_cast<Device*>(sensor->parentResource());
+ Resource *rsub = sensor;
+ bool devManaged = false;
+
+ if (device)
+ {
+ rsub = DEV_GetSubDevice(device, nullptr, sensor->uniqueId());
+ devManaged = device->managed();
+ }
+
bool isClip = sensor->type().startsWith(QLatin1String("CLIP"));
if (req.sock)
|
npu2: hw-procedures: Add obus_brick_index()
We have code in reset_ntl() which finds the index number of our brick
within its obus chiplet. Move that logic to a separate function for
reuse.
No functional change.
Acked-by: Alistair Popple | @@ -191,6 +191,18 @@ static uint32_t nop(struct npu2_dev *npu_dev __unused)
}
DEFINE_PROCEDURE(nop);
+/* Return the brick number (0-2) within an obus chiplet */
+static int obus_brick_index(struct npu2_dev *ndev)
+{
+ int index = ndev->index % 3;
+
+ /* On the second obus chiplet, index is reversed */
+ if ((ndev->pl_xscom_base & 0x3F000000) != 0x09000000)
+ return 2 - index;
+
+ return index;
+}
+
static bool poll_fence_status(struct npu2_dev *ndev, uint64_t val)
{
uint64_t fs;
@@ -212,22 +224,7 @@ static uint32_t reset_ntl(struct npu2_dev *ndev)
uint64_t val;
/* Write PRI */
- if ((ndev->pl_xscom_base & 0xFFFFFFFF) == 0x9010C3F)
- val = SETFIELD(PPC_BITMASK(0,1), 0ull, ndev->index % 3);
- else {
- switch (ndev->index % 3) {
- case 0:
- val = SETFIELD(PPC_BITMASK(0,1), 0ull, 2);
- break;
- case 1:
- val = SETFIELD(PPC_BITMASK(0,1), 0ull, 1);
- break;
- case 2:
- val = SETFIELD(PPC_BITMASK(0,1), 0ull, 0);
- break;
- }
- }
-
+ val = SETFIELD(PPC_BITMASK(0,1), 0ull, obus_brick_index(ndev));
npu2_write_mask(ndev->npu, NPU2_NTL_PRI_CFG(ndev), val, -1ULL);
/* NTL Reset */
|
king kong fix update
now possible to run exe directly with default parameters
initial startup defaulted to desktop resolution
fixed an issue in windowed mode (text was too big) | @@ -205,6 +205,48 @@ void Init()
}
}; injector::MakeInline<MouseSensHook>(pattern.get_first(0), pattern.get_first(6));
}
+
+ static const std::string defaultCmd("/B /lang:01 /spg:50 /GDBShaders KKMaps.bf");
+ pattern = hook::pattern("C7 45 ? ? ? ? ? 68 04 01 00 00 8D 85 D8 FB FF FF 50"); //0x401A10
+ struct StartupHook
+ {
+ void operator()(injector::reg_pack& regs)
+ {
+ *(uint32_t*)(regs.ebp - 0x4) = 0;
+ *(uint32_t*)(regs.ebp + 0x10) = (uint32_t)defaultCmd.data();
+ _asm nop
+ }
+ }; injector::MakeInline<StartupHook>(pattern.get_first(0), pattern.get_first(7));
+
+ if (true) //windowed mode text fix
+ {
+ auto[ResX, ResY] = GetDesktopRes();
+ HKEY phkResult;
+ DWORD cbData, Type;
+ BYTE Data[4];
+
+ if (RegOpenKeyExA(HKEY_CURRENT_USER, "Software\\Ubisoft\\KingKong\\{2C391F94-B8B9-4832-9C57-3AFC332CC037}\\Basic video", 0, KEY_READ | KEY_SET_VALUE, &phkResult))
+ RegCreateKeyA(HKEY_CURRENT_USER, "Software\\Ubisoft\\KingKong\\{2C391F94-B8B9-4832-9C57-3AFC332CC037}\\Basic video", &phkResult);
+
+ cbData = 4;
+ if (!RegQueryValueExA(phkResult, "ResolutionWidth", 0, &Type, Data, &cbData) && Type == 4 && cbData == 4)
+ ResX = *(int32_t*)Data;
+ else
+ RegSetValueExA(phkResult, "ResolutionWidth", 0, REG_DWORD, (const BYTE*)&ResX, cbData);
+ cbData = 4;
+ if (!RegQueryValueExA(phkResult, "ResolutionHeight", 0, &Type, Data, &cbData) && Type == 4 && cbData == 4)
+ ResY = *(int32_t*)Data;
+ else
+ RegSetValueExA(phkResult, "ResolutionHeight", 0, REG_DWORD, (const BYTE*)&ResY, cbData);
+ RegCloseKey(phkResult);
+
+ pattern = hook::pattern("68 E0 01 00 00 68 80 02 00 00");
+ for (size_t i = 0; i < pattern.size(); ++i)
+ {
+ injector::WriteMemory(pattern.get(i).get<uint32_t>(6), ResX, true);
+ injector::WriteMemory(pattern.get(i).get<uint32_t>(1), ResY, true);
+ }
+ }
}
CEXP void InitializeASI()
|
Install SCons on Python 3 in TravisCI job. | @@ -19,13 +19,13 @@ if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
python3 -m pip install --user virtualenv
python3 -m virtualenv ~/venv
source ~/venv/bin/activate
- if [[ "$BUILD_TOOL" == "scons" ]]; then
- pip install scons
- elif [[ "$BUILD_TOOL" == "autotools" ]]; then
+ if [[ "$BUILD_TOOL" == "autotools" ]]; then
HOMEBREW_NO_AUTO_UPDATE=1 brew install sdl2
fi
fi
if [[ "$BUILD_TOOL" == "conan" ]]; then
pip install -U conan_package_tools
+elif [[ "$BUILD_TOOL" == "scons" ]]; then
+ pip install -U scons
fi
|
libhfuzz/instrument: require popcnt for more functions | @@ -272,7 +272,7 @@ HF_REQUIRE_SSE42_POPCNT void __sanitizer_cov_trace_cmpd(
/*
* -fsanitize-coverage=trace-div
*/
-void __sanitizer_cov_trace_div8(uint64_t Val) {
+HF_REQUIRE_SSE42_POPCNT void __sanitizer_cov_trace_div8(uint64_t Val) {
uintptr_t pos = (uintptr_t)__builtin_return_address(0) % _HF_PERF_BITMAP_SIZE_16M;
uint8_t v = ((sizeof(Val) * 8) - __builtin_popcountll(Val));
uint8_t prev = ATOMIC_GET(feedback->bbMapCmp[pos]);
@@ -282,7 +282,7 @@ void __sanitizer_cov_trace_div8(uint64_t Val) {
}
}
-void __sanitizer_cov_trace_div4(uint32_t Val) {
+HF_REQUIRE_SSE42_POPCNT void __sanitizer_cov_trace_div4(uint32_t Val) {
uintptr_t pos = (uintptr_t)__builtin_return_address(0) % _HF_PERF_BITMAP_SIZE_16M;
uint8_t v = ((sizeof(Val) * 8) - __builtin_popcount(Val));
uint8_t prev = ATOMIC_GET(feedback->bbMapCmp[pos]);
|
netutils/dhcpc: minor patch for handling exception cases
This commit is the patch to handling exception cases
- when abnormal interface name is coming from applications,
dhcpc should not run
- arguments used by dhcpc_request must not be null | @@ -369,7 +369,11 @@ void *dhcpc_open(const char *intf)
uint8_t macaddr[IFHWADDRLEN];
int maclen = IFHWADDRLEN;
- netlib_getmacaddr(intf, macaddr);
+ if (netlib_getmacaddr(intf, macaddr) != OK) {
+ /* Do not open dhcpc socket on wrong interface name */
+ ndbg("ERROR : failed to netlib_getmacaddr\n");
+ return NULL;
+ }
ndbg("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", ((uint8_t *)macaddr)[0], ((uint8_t *)macaddr)[1], ((uint8_t *)macaddr)[2], ((uint8_t *)macaddr)[3], ((uint8_t *)macaddr)[4], ((uint8_t *)macaddr)[5]);
@@ -449,8 +453,15 @@ int g_dhcpc_state;
int dhcpc_request(void *handle, struct dhcpc_state *presult)
{
- if (!handle)
+ if (!handle) {
+ ndbg("ERROR : handle must not be null\n");
+ return -100;
+ }
+
+ if (!presult) {
+ ndbg("ERROR : presult must not be null\n");
return -100;
+ }
struct dhcpc_state_s *pdhcpc = (struct dhcpc_state_s *)handle;
g_pResult = presult;
|
Moved setting ctx for temporary cache object before metadata init
This way debug prints during metadata init phase won't cause crash
(because of the fact that temporary cache object does not have proper
ctx set hence does not have logger obj). | @@ -1206,6 +1206,7 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
}
tmp_cache = params.cache;
+ tmp_cache->owner = ctx;
/*
* Initialize metadata selected segments of metadata in memory
@@ -1218,8 +1219,6 @@ static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
}
params.flags.metadata_inited = true;
- tmp_cache->owner = ctx;
-
result = ocf_cache_set_name(tmp_cache, cfg->name, OCF_CACHE_NAME_SIZE);
if (result) {
env_rmutex_unlock(&ctx->lock);
|
Add a test for CVE-2021-3449
We perform a reneg handshake, where the second ClientHello drops the
sig_algs extension. It must also contain cert_sig_algs for the test to
work. | @@ -38,7 +38,7 @@ my $proxy = TLSProxy::Proxy->new(
$proxy->clientflags("-no_tls1_3");
$proxy->reneg(1);
$proxy->start() or plan skip_all => "Unable to start up Proxy for tests";
-plan tests => 3;
+plan tests => 4;
ok(TLSProxy::Message->success(), "Basic renegotiation");
#Test 2: Client does not send the Reneg SCSV. Reneg should fail
@@ -78,6 +78,20 @@ SKIP: {
"Check ClientHello version is the same");
}
+SKIP: {
+ skip "TLSv1.2 disabled", 1
+ if disabled("tls1_2");
+
+ #Test 4: Test for CVE-2021-3449. client_sig_algs instead of sig_algs in
+ # resumption ClientHello
+ $proxy->clear();
+ $proxy->filter(\&sigalgs_filter);
+ $proxy->clientflags("-tls1_2");
+ $proxy->reneg(1);
+ $proxy->start();
+ ok(TLSProxy::Message->fail(), "client_sig_algs instead of sig_algs");
+}
+
sub reneg_filter
{
my $proxy = shift;
@@ -97,3 +111,23 @@ sub reneg_filter
}
}
}
+
+sub sigalgs_filter
+{
+ my $proxy = shift;
+ my $cnt = 0;
+
+ # We're only interested in the second ClientHello message
+ foreach my $message (@{$proxy->message_list}) {
+ if ($message->mt == TLSProxy::Message::MT_CLIENT_HELLO) {
+ next if ($cnt++ == 0);
+
+ my $sigs = pack "C10", 0x00, 0x08,
+ # rsa_pkcs_sha{256,384,512,1}
+ 0x04, 0x01, 0x05, 0x01, 0x06, 0x01, 0x02, 0x01;
+ $message->set_extension(TLSProxy::Message::EXT_SIG_ALGS_CERT, $sigs);
+ $message->delete_extension(TLSProxy::Message::EXT_SIG_ALGS);
+ $message->repack();
+ }
+ }
+}
|
multifile: fix storage kdbGet after partial cache miss | @@ -563,7 +563,7 @@ static Codes doGetStorage (MultiConfig * mc, Key * parentKey)
while ((k = ksNext (mc->childBackends)) != NULL)
{
SingleConfig * s = *(SingleConfig **) keyValue (k);
- if (s->rcResolver != SUCCESS) continue;
+ if (s->rcResolver != SUCCESS && s->rcResolver != CACHE_HIT) continue;
keySetName (parentKey, s->parentString);
keySetString (parentKey, s->fullPath);
Plugin * storage = s->storage;
@@ -637,7 +637,7 @@ int elektraMultifileGet (Plugin * handle, KeySet * returned, Key * parentKey ELE
if (mc->getPhase == MULTI_GETRESOLVER)
{
rc = updateFiles (handle, mc, returned, parentKey);
- if (rc == SUCCESS || rc == CACHE_HIT)
+ if (rc == SUCCESS)
{
mc->getPhase = MULTI_GETSTORAGE;
}
@@ -645,7 +645,7 @@ int elektraMultifileGet (Plugin * handle, KeySet * returned, Key * parentKey ELE
else if (mc->getPhase == MULTI_GETSTORAGE)
{
rc = doGetStorage (mc, parentKey);
- if (rc == SUCCESS || rc == CACHE_HIT || mc->hasDeleted)
+ if (rc == SUCCESS || mc->hasDeleted)
{
fillReturned (mc, returned);
mc->hasDeleted = 0;
|
drv_spi: clean DMA flags for blocking transfer | @@ -269,6 +269,9 @@ void spi_dma_transfer_bytes(spi_ports_t port, uint8_t *buffer, uint32_t length)
while (DMA_GetFlagStatus(PORT.dma.tx_stream, PORT.dma.tx_tci_flag) == RESET)
;
+ DMA_ClearFlag(PORT.dma.rx_stream, PORT.dma.rx_tci_flag);
+ DMA_ClearFlag(PORT.dma.tx_stream, PORT.dma.tx_tci_flag);
+
SPI_I2S_DMACmd(PORT.channel, SPI_I2S_DMAReq_Tx, DISABLE);
SPI_I2S_DMACmd(PORT.channel, SPI_I2S_DMAReq_Rx, DISABLE);
|
diagnostics version | @@ -3,7 +3,7 @@ RESOURCES_LIBRARY()
IF(USE_SYSTEM_COVERAGE_PUSH_TOOL)
MESSAGE(WARNING System coverage push tool $USE_SYSTEM_COVERAGE_PUSH_TOOL will be used)
ELSEIF(HOST_OS_LINUX)
- DECLARE_EXTERNAL_RESOURCE(COVERAGE_PUSH_TOOL sbr:1413604407)
+ DECLARE_EXTERNAL_RESOURCE(COVERAGE_PUSH_TOOL sbr:1415592743)
ELSE()
MESSAGE(FATAL_ERROR Unsupported host for COVERAGE_PUSH_TOOL)
ENDIF()
|
avx: correct range of imm8 argument on _mm_permute2f128_*
Thanks to for pointing this out. | @@ -4636,7 +4636,7 @@ simde_mm256_permutevar_pd (simde__m256d a, simde__m256i b) {
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
simde_mm256_permute2f128_ps (simde__m256 a, simde__m256 b, const int imm8)
- SIMDE_REQUIRE_RANGE(imm8, 0, 15) {
+ SIMDE_REQUIRE_RANGE(imm8, 0, 255) {
simde__m256_private
r_,
a_ = simde__m256_to_private(a),
@@ -4658,7 +4658,7 @@ simde_mm256_permute2f128_ps (simde__m256 a, simde__m256 b, const int imm8)
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
simde_mm256_permute2f128_pd (simde__m256d a, simde__m256d b, const int imm8)
- SIMDE_REQUIRE_RANGE(imm8, 0, 15) {
+ SIMDE_REQUIRE_RANGE(imm8, 0, 255) {
simde__m256d_private
r_,
a_ = simde__m256d_to_private(a),
@@ -4680,7 +4680,7 @@ simde_mm256_permute2f128_pd (simde__m256d a, simde__m256d b, const int imm8)
SIMDE_FUNCTION_ATTRIBUTES
simde__m256i
simde_mm256_permute2f128_si256 (simde__m256i a, simde__m256i b, const int imm8)
- SIMDE_REQUIRE_RANGE(imm8, 0, 15) {
+ SIMDE_REQUIRE_RANGE(imm8, 0, 255) {
simde__m256i_private
r_,
a_ = simde__m256i_to_private(a),
|
Ensure operator precedence for open shared bit.
This update adds parenthesis to ensure operator precedence/readability
for the shared bit of fpgaOpen flags param. | @@ -91,7 +91,7 @@ fpga_result __FPGA_API__ fpgaOpen(fpga_token token, fpga_handle *handle, int fla
_handle->wsid_root = NULL;
// Open resources in exclusive mode unless FPGA_OPEN_SHARED is given
- open_flags = O_RDWR | (flags & FPGA_OPEN_SHARED ? 0 : O_EXCL);
+ open_flags = O_RDWR | ((flags & FPGA_OPEN_SHARED) ? 0 : O_EXCL);
fddev = open(_token->devpath, open_flags);
if (-1 == fddev) {
FPGA_MSG("Open failed: %s", strerror(errno));
|
[dfs] Update DFS_FD_MAX default size to 16. | @@ -24,8 +24,7 @@ if RT_USING_DFS
config DFS_FD_MAX
int "The maximal number of opened files"
- default 16 if RT_USING_DFS_NFS
- default 4
+ default 16
config RT_USING_DFS_ELMFAT
bool "Enable elm-chan fatfs"
|
chat: pointer cursor for attachment + dojo in input | @@ -189,6 +189,7 @@ class ChatInput extends Component<ChatInputProps, ChatInputState> {
) : (
<Icon
icon='Attachment'
+ cursor='pointer'
width='16'
height='16'
onClick={() =>
@@ -201,6 +202,7 @@ class ChatInput extends Component<ChatInputProps, ChatInputState> {
<Box mr={2} flexShrink={0} height='16px' width='16px' flexBasis='16px'>
<Icon
icon='Dojo'
+ cursor='pointer'
onClick={this.toggleCode}
color={state.inCodeMode ? 'blue' : 'black'}
/>
|
Add comments in dockerfile for clarity | @@ -10,17 +10,21 @@ libboost-program-options-dev \
libboost-regex-dev \
libsqlite3-dev \
ragel \
+# Clean up to reduce layer size
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /usr/share/doc /usr/share/doc-base
-# update needed again, as it was removed before; it fetches less than 1 mb
+# apt-get update needed again, as it was removed before
RUN apt-get update && apt-get install -y --no-install-recommends wget bzip2 \
+# Built-in ODB packages don't seem to work due to a broken ABI, see this thread: http://codesynthesis.com/pipermail/odb-users/2016-May/003277.html
&& wget http://www.codesynthesis.com/download/odb/2.4/odb_2.4.0-1_amd64.deb -O /opt/odb_2.4.0-1_amd64.deb \
&& wget http://codesynthesis.com/download/odb/2.4/libodb-2.4.0.tar.bz2 -O /opt/libodb.tar.bz2 \
&& wget http://codesynthesis.com/download/odb/2.4/libodb-sqlite-2.4.0.tar.bz2 -O /opt/libodb-sqlite.tar.bz2 \
+# Build ODB dependencies
&& cd /opt && tar jxvf libodb.tar.bz2 && tar jxvf libodb-sqlite.tar.bz2 && dpkg -i odb_2.4.0-1_amd64.deb \
&& cd libodb-2.4.0 && ./configure && make && make install && cd .. \
&& cd libodb-sqlite-2.4.0 && ./configure --with-libodb=../libodb-2.4.0 && make && make install && cd .. \
+# Clean up to reduce layer size
&& apt-get clean && apt-get remove --purge -y wget bzip2 \
&& rm -rf /opt /var/lib/apt/lists/* /usr/share/doc /usr/share/doc-base
|
settings: use async buttons for s3
Fixes urbit/landscape#290 | import React, { ReactElement, useCallback } from 'react';
-import { Formik } from 'formik';
+import { Formik, FormikHelpers } from 'formik';
import {
ManagedTextInputField as Input,
@@ -10,6 +10,7 @@ import {
Col,
Anchor
} from '@tlon/indigo-react';
+import { AsyncButton } from "~/views/components/AsyncButton";
import GlobalApi from '~/logic/api/global';
import { BucketList } from './BucketList';
@@ -35,19 +36,19 @@ export default function S3Form(props: S3FormProps): ReactElement {
const { api } = props;
const s3 = useStorageState((state) => state.s3);
- const onSubmit = useCallback(
- (values: FormSchema) => {
+ const onSubmit = useCallback(async (values: FormSchema, actions: FormikHelpers<FormSchema>) => {
if (values.s3secretAccessKey !== s3.credentials?.secretAccessKey) {
- api.s3.setSecretAccessKey(values.s3secretAccessKey);
+ await api.s3.setSecretAccessKey(values.s3secretAccessKey);
}
if (values.s3endpoint !== s3.credentials?.endpoint) {
- api.s3.setEndpoint(values.s3endpoint);
+ await api.s3.setEndpoint(values.s3endpoint);
}
if (values.s3accessKeyId !== s3.credentials?.accessKeyId) {
- api.s3.setAccessKeyId(values.s3accessKeyId);
+ await api.s3.setAccessKeyId(values.s3accessKeyId);
}
+ actions.setStatus({ success: null });
},
[api, s3]
);
@@ -95,9 +96,9 @@ export default function S3Form(props: S3FormProps): ReactElement {
label='Secret Access Key'
id='s3secretAccessKey'
/>
- <Button style={{ cursor: 'pointer' }} type='submit'>
+ <AsyncButton primary style={{ cursor: 'pointer' }} type='submit'>
Submit
- </Button>
+ </AsyncButton>
</Col>
</Form>
</Formik>
|
Update README.md
Update Azure Pipelines badges. | @@ -40,8 +40,8 @@ This repo contains:
| Component | Build Status | Build Status (develop) |
|:-|---|---|
-| nanoBooter + nanoCLR | [](https://ci.appveyor.com/project/nfbot/nf-interpreter-852pb/branch/master) | [](https://dev.azure.com/nanoframework/nf-interpreter/_build/latest?definitionId=2) |
-| Win32 test project | [](https://ci.appveyor.com/project/nfbot/nf-interpreter-852pb/branch/master) | [](https://dev.azure.com/nanoframework/nf-interpreter/_build/latest?definitionId=2) |
+| nanoBooter + nanoCLR | [](https://ci.appveyor.com/project/nfbot/nf-interpreter-852pb/branch/master) | [](https://dev.azure.com/nanoframework/nf-interpreter/_build/latest?definitionId=34?branchName=develop) |
+| Win32 test project | [](https://ci.appveyor.com/project/nfbot/nf-interpreter-852pb/branch/master) | [](https://dev.azure.com/nanoframework/nf-interpreter/_build/latest?definitionId=34?branchName=develop) |
## Firmware for reference boards
|
tinyprintf: Include '-' for floats in range (-1,0)
When floating point numbers in the range (-1, 0) (i.e., between -1 and
0, exclusive) were printed, the negative sign was not included. This
commit fixes the printf functions such that the negative sign is
preserved. | @@ -347,10 +347,17 @@ size_t tfp_format(FILE *putp, const char *fmt, va_list va)
case 'f':
p.base = 10;
d = va_arg(va, double);
- /* Cast to an int to get the integer part of the number */
+ /* Convert to an int to get the integer part of the number. */
n = d;
/* Convert to ascii */
i2a(n, &p);
+ /* When the double was converted to an int it was truncated
+ * towards 0. If the number is in the range (-1, 0), the
+ * negative sign was lost. Preserve the sign in this case.
+ */
+ if (d < 0.0) {
+ p.sign = 1;
+ }
/* Ignore left align for integer part */
p.left = 0;
/* Subtract width for decimal part and decimal point */
|
main shm CHANGE debug ext shm output improved | @@ -336,19 +336,19 @@ sr_shmmain_ext_print(sr_shm_t *shm_main, char *ext_shm_addr, size_t ext_shm_size
printed = 0;
for (i = 0; i < item_count; ++i) {
if (items[i].start > cur_off) {
- printed += sr_sprintf(&msg, &msg_len, printed, "%04ld-%04ld: (wasted %ld)\n",
- cur_off, items[i].start, items[i].start - cur_off);
+ printed += sr_sprintf(&msg, &msg_len, printed, "%06ld-%06ld[%06lX]: (wasted %ld)\n",
+ cur_off, items[i].start, items[i].start, items[i].start - cur_off);
cur_off = items[i].start;
}
- printed += sr_sprintf(&msg, &msg_len, printed, "%04ld-%04ld: %s\n",
- items[i].start, items[i].start + items[i].size, items[i].name);
+ printed += sr_sprintf(&msg, &msg_len, printed, "%06ld-%06ld[%06lX]: %s\n",
+ items[i].start, items[i].start + items[i].size, items[i].start + items[i].size, items[i].name);
cur_off += items[i].size;
free(items[i].name);
}
if ((unsigned)cur_off < ext_shm_size) {
- printed += sr_sprintf(&msg, &msg_len, printed, "%04ld-%04ld: (wasted %ld)\n",
- cur_off, ext_shm_size, ext_shm_size - cur_off);
+ printed += sr_sprintf(&msg, &msg_len, printed, "%06ld-%06ld[%06lX]: (wasted %ld)\n",
+ cur_off, ext_shm_size, ext_shm_size, ext_shm_size - cur_off);
}
free(items);
|
Fix a couple nits in DEFINE_STACK_OF.pod
Only the 'new' variant of sk_TYPE_new_reserve() deals with
compression functions.
Mention both new 'reserve' APIs as being added in OpenSSL 1.1.1. | @@ -109,7 +109,7 @@ sk_TYPE_new_reserve() allocates a new stack. The new stack will have additional
memory allocated to hold B<n> elements if B<n> is positive. The next B<n> calls
to sk_TYPE_insert(), sk_TYPE_push() or sk_TYPE_unshift() will not fail or cause
memory to be allocated or reallocated. If B<n> is zero or less than zero, no
-memory is allocated. sk_TYPE_reserve() also sets the comparison function
+memory is allocated. sk_TYPE_new_reserve() also sets the comparison function
B<compare> to the newly created stack. If B<compare> is B<NULL> then no
comparison function is used.
@@ -257,7 +257,7 @@ stack.
Before OpenSSL 1.1.0, this was implemented via macros and not inline functions
and was not a public API.
-sk_TYPE_new_reserve() was added in OpenSSL 1.1.1.
+sk_TYPE_reserve() and sk_TYPE_new_reserve() were added in OpenSSL 1.1.1.
=head1 COPYRIGHT
|
libtcmu: add dev and ring buffer cmd_tail update macro APIs
This patch add two cmd_tail update APIs to simplify the code. | @@ -567,6 +567,12 @@ device_cmd_tail(struct tcmu_device *dev)
return (struct tcmu_cmd_entry *) ((char *) mb + mb->cmdr_off + dev->cmd_tail);
}
+/* update the tcmu_device's tail */
+#define TCMU_UPDATE_DEV_TAIL(dev, mb, ent) \
+do { \
+ dev->cmd_tail = (dev->cmd_tail + tcmu_hdr_get_len((ent)->hdr.len_op)) % mb->cmdr_size; \
+} while (0);
+
struct tcmulib_cmd *tcmulib_get_next_command(struct tcmu_device *dev)
{
struct tcmu_mailbox *mb = dev->map;
@@ -603,7 +609,7 @@ struct tcmulib_cmd *tcmulib_get_next_command(struct tcmu_device *dev)
cmd->cdb = (uint8_t *) (cmd->iovec + cmd->iov_cnt);
memcpy(cmd->cdb, (void *) mb + ent->req.cdb_off, cdb_len);
- dev->cmd_tail = (dev->cmd_tail + tcmu_hdr_get_len(ent->hdr.len_op)) % mb->cmdr_size;
+ TCMU_UPDATE_DEV_TAIL(dev, mb, ent);
return cmd;
}
default:
@@ -611,12 +617,18 @@ struct tcmulib_cmd *tcmulib_get_next_command(struct tcmu_device *dev)
ent->hdr.uflags |= TCMU_UFLAG_UNKNOWN_OP;
}
- dev->cmd_tail = (dev->cmd_tail + tcmu_hdr_get_len(ent->hdr.len_op)) % mb->cmdr_size;
+ TCMU_UPDATE_DEV_TAIL(dev, mb, ent);
}
return NULL;
}
+/* update the ring buffer's tail */
+#define TCMU_UPDATE_RB_TAIL(mb, ent) \
+do { \
+ mb->cmd_tail = (mb->cmd_tail + tcmu_hdr_get_len((ent)->hdr.len_op)) % mb->cmdr_size; \
+} while (0);
+
void tcmulib_command_complete(
struct tcmu_device *dev,
struct tcmulib_cmd *cmd,
@@ -629,7 +641,7 @@ void tcmulib_command_complete(
while (ent != (void *) mb + mb->cmdr_off + mb->cmd_head) {
if (tcmu_hdr_get_op(ent->hdr.len_op) == TCMU_OP_CMD)
break;
- mb->cmd_tail = (mb->cmd_tail + tcmu_hdr_get_len(ent->hdr.len_op)) % mb->cmdr_size;
+ TCMU_UPDATE_RB_TAIL(mb, ent);
ent = (void *) mb + mb->cmdr_off + mb->cmd_tail;
}
@@ -657,7 +669,7 @@ void tcmulib_command_complete(
ent->rsp.scsi_status = result;
}
- mb->cmd_tail = (mb->cmd_tail + tcmu_hdr_get_len(ent->hdr.len_op)) % mb->cmdr_size;
+ TCMU_UPDATE_RB_TAIL(mb, ent);
free(cmd);
}
|
Config: Removing USE_PRFLOW from snap_env
(USE_PRFLOW is part of snap_config) | @@ -115,16 +115,9 @@ while [ -z "$SETUP_DONE" ]; do
####### settings for Partial Reconfiguration flow
- echo "=====PR flow setup====================================="
- if [ -z "$USE_PRFLOW" ]; then
- export USE_PRFLOW=FALSE
- echo "Setting USE_PRFLOW to: \"$USE_PRFLOW\""
- else
- echo "USE_PRFLOW is set to: \"$USE_PRFLOW\""
- fi
- SNAP_ENV="$SNAP_ENV""export USE_PRFLOW=$USE_PRFLOW\n"
-
+ # Note: USE_PRFLOW is defined via snap_config
if [ "$USE_PRFLOW" = "TRUE" ]; then
+ echo "=====PR flow setup====================================="
if [ -z "$DCP_ROOT" ]; then
export DCP_ROOT='${SNAP_ROOT}/dcp'
echo "Setting DCP_ROOT to: \"$DCP_ROOT\""
@@ -145,6 +138,7 @@ while [ -z "$SETUP_DONE" ]; do
####### Cadence simulation setup:
+ # Note: SIMULATOR is defined via snap_config
if [ "$SIMULATOR" == "irun" ]; then
echo "=====Cadence simulation setup=========================="
@@ -176,6 +170,7 @@ while [ -z "$SETUP_DONE" ]; do
fi
fi
+ # Note: NVME_USED is defined via snap_config
if [[ "$NVME_USED" = "TRUE" ]]; then
echo "=====Denali setup======================================"
if [ -z "$DENALI_TOOLS" ]; then
|
Fix sentinel acl change test. Timing issue. | @@ -126,7 +126,7 @@ test "Sentinels (re)connection following master ACL change" {
wait_for_condition 100 50 {
[string match "*disconnected*" [dict get [S $sent2re SENTINEL MASTER mymaster] flags]] != 0
} else {
- fail "Expected: Sentinel to be disconnected from master due to wrong password"
+ fail "Expected: Restarted sentinel to be disconnected from master due to obsolete password"
}
# Verify sentinel with updated password managed to connect (wait for sentinelTimer to reconnect)
@@ -137,8 +137,10 @@ test "Sentinels (re)connection following master ACL change" {
}
# Verify sentinel untouched gets failed to connect master
- if {![string match "*disconnected*" [dict get [S $sent2un SENTINEL MASTER mymaster] flags]]} {
- fail "Expected: Sentinel to be disconnected from master due to wrong password"
+ wait_for_condition 100 50 {
+ [string match "*disconnected*" [dict get [S $sent2un SENTINEL MASTER mymaster] flags]] != 0
+ } else {
+ fail "Expected: Sentinel to be disconnected from master due to obsolete password"
}
# Now update all sentinels with new password
|
Sensor: Check window size. | @@ -242,10 +242,13 @@ static mp_obj_t py_sensor_set_framesize(mp_obj_t framesize) {
}
static mp_obj_t py_sensor_set_windowing(mp_obj_t roi_obj) {
- mp_uint_t array_len;
+ int x, y, w, h;
+ int res_w = resolution[sensor.framesize][0];
+ int res_h = resolution[sensor.framesize][1];
+
mp_obj_t *array;
+ mp_uint_t array_len;
mp_obj_get_array(roi_obj, &array_len, &array);
- int x, y, w, h;
if (array_len == 4) {
x = mp_obj_get_int(array[0]);
@@ -255,11 +258,21 @@ static mp_obj_t py_sensor_set_windowing(mp_obj_t roi_obj) {
} else if (array_len == 2) {
w = mp_obj_get_int(array[0]);
h = mp_obj_get_int(array[1]);
- x = (resolution[sensor.framesize][0] / 2) - (w / 2);
- y = (resolution[sensor.framesize][1] / 2) - (h / 2);
+ x = (res_w / 2) - (w / 2);
+ y = (res_h / 2) - (h / 2);
} else {
nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
- "tuple/list must either be (x, y, w, h) or (w, h)"));
+ "The tuple/list must either be (x, y, w, h) or (w, h)"));
+ }
+
+ if (w < 8 || h < 8) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "The selected window is too small"));
+ }
+
+ if (x < 0 || (x + w) > res_w || y < 0 || (y + h) > res_h) {
+ nlr_raise(mp_obj_new_exception_msg(&mp_type_ValueError,
+ "The selected window is outside the bounds of the frame"));
}
if (sensor_set_windowing(x, y, w, h) != 0) {
|
set m_axi_awcache to 4'b1111 in axis_ram_writer | @@ -139,7 +139,7 @@ module axis_ram_writer #
assign m_axi_awlen = 4'd15;
assign m_axi_awsize = ADDR_SIZE;
assign m_axi_awburst = 2'b01;
- assign m_axi_awcache = 4'b0011;
+ assign m_axi_awcache = 4'b1111;
assign m_axi_awvalid = int_awvalid_reg;
assign m_axi_wid = int_wid_reg;
assign m_axi_wdata = int_wdata_wire[AXI_DATA_WIDTH-1:0];
|
add redpine to targets.json | "RX_FRSKY_D8": "",
"DSHOT": "300"
}
+ },
+ {
+ "name": "dshot300.redpine",
+ "defines": {
+ "BRUSHLESS_TARGET": "",
+ "RX_REDPINE": "",
+ "DSHOT": "300"
+ }
}
]
},
"RX_FRSKY_D8": ""
}
},
+ {
+ "name": "brushless.redpine",
+ "defines": {
+ "BRUSHLESS_TARGET": "",
+ "RX_REDPINE": ""
+ }
+ },
{
"name": "brushless.serial",
"defines": {
|
Fix argument representation for raw queue debug mesaage.
vfprintf was segfaulting because it was trying to interpret
the %s with a missing argument. | @@ -1105,7 +1105,7 @@ int main(int argc, char** argv)
if (dontparse == 2) {
/* Raw queue, simply pass the input into the postpipe (or to STDOUT
when there is no postpipe) */
- _log("Raw printing, executing \"cat %s\"\n\n");
+ _log("Raw printing, executing \"cat %%s\"\n\n");
snprintf(tmp, 1024, "cat %s", postpipe->data);
run_system_process("raw-printer", tmp);
continue;
|
stdioproc: add test to README | - infos/needs =
- infos/provides =
- infos/recommends =
-- infos/placements = pregetstorage getstorage procgetstorage postgetstorage presetstorage setstorage
+- infos/placements = postgetstorage presetstorage
- infos/status = maintained unittest shelltest experimental
- infos/metadata =
- infos/description = one-line description of stdioproc
@@ -18,7 +18,7 @@ This plugin spawns a new process with a user-defined executable and delegates al
Set the config key `app` and the arrays `args/#`, `env/#` to the path of an executable, the arguments that shall be passed and the environment variables to be set.
```
-kdb mount test.dump /tests/stdioproc specload 'app=/usr/bin/pluginproc' 'args=#1' 'args/#0=--load-plugin' 'args/#1=myplugin'
+kdb mount test.dump /tests/stdioproc stdioproc 'app=/usr/bin/pluginproc' 'args=#1' 'args/#0=--load-plugin' 'args/#1=myplugin'
```
During `elektraStdprocioOpen` the plugin will collect the `args/#` and `env/#` values into two arrays `argv` and `envp`.
@@ -169,9 +169,33 @@ If an unexpected error occurs on either side of the protocol, the connection sho
## Examples
-TODO
+```sh
+# mount the Whitelist Java Plugin via stdioproc
+sudo kdb mount config.file user:/tests/stdioproc dump stdioproc 'app=/usr/bin/java' 'args=#3' 'args/#0=-cp' "args/#1=$BUILD_DIR/src/bindings/jna/plugins/whitelist/build/libs/whitelist-$(kdb --version | sed -nE 's/KDB_VERSION: (.+)/\1/gp')-all.jar" 'args/#2=org.libelektra.stdioproc.StdIoProcApp' 'args/#3=org.libelektra.plugin.WhitelistPlugin'
+
+# Define whitelist
+kdb meta-set user:/tests/stdioproc/key "check/whitelist/#0" ""
+kdb meta-set user:/tests/stdioproc/key "check/whitelist/#1" allowed0
+kdb meta-set user:/tests/stdioproc/key "check/whitelist/#2" allowed1
+
+# Should be allowed
+kdb set user:/tests/stdioproc/key allowed0
+#> Set string to "allowed0"
+
+kdb set user:/tests/stdioproc/key allowed1
+#> Set string to "allowed1"
+
+# Should cause error
+kdb set user:/tests/stdioproc/key not_allowed
+# RET: 5
+# STDERR:.*Validation Semantic: .*'not_allowed' does not adhere to whitelist.*
+
+# cleanup
+sudo kdb umount user:/tests/stdioproc
+```
## Limitations
- The `error` and `commit` functions are currently not supported. Therefore, implementing a resolver is not supported.
- Exporting additional functions (e.g. `checkconf`) is currently not supported.
+- With the current backend system, `stdioproc` can only be used for plugins in the `postgetstorage` or `presetstorage` positions.
|
hpet: fixing dependencies | build library {
target = "hpet_module",
cFiles = ["hpet.c" ],
- addLibraries = libDeps["skb", "int_route_client" , "int_msix_ctrl"],
+ addLibraries = libDeps["int_route_client"],
flounderDefs = [ "hpet" ],
mackerelDevices = ["hpet"],
architectures = [ "x86_64" ]
target = "hpet",
cFiles = [ "main.c"],
addLinkFlags = ["-T" ++ Config.source_dir ++ "/lib/driverkit/bfdrivers.ld" ],
- addLibraries = libDeps["driverkit" , "int_route_client" ,"int_msix_ctrl" ],
+ addLibraries = libDeps["driverkit"],
addModules = ["hpet_module", "hpet_comp_module"],
mackerelDevices = [ "hpet"],
architectures = ["x86_64"]
|
nimble/ll: Rename mempool used for AUX data in extended scan
This is used for extended scanning only so name it appropriately. | @@ -169,15 +169,15 @@ static TAILQ_HEAD(ble_ll_scan_dup_list, ble_ll_scan_dup_entry) g_scan_dup_list;
#if MYNEWT_VAL(BLE_LL_CFG_FEAT_LL_EXT_ADV)
#if MYNEWT_VAL(BLE_LL_EXT_ADV_AUX_PTR_CNT) != 0
-static os_membuf_t ext_adv_mem[ OS_MEMPOOL_SIZE(
+static os_membuf_t ext_scan_aux_mem[ OS_MEMPOOL_SIZE(
MYNEWT_VAL(BLE_LL_EXT_ADV_AUX_PTR_CNT),
sizeof (struct ble_ll_aux_data))
];
#else
-#define ext_adv_mem NULL
+#define ext_scan_aux_mem NULL
#endif
-static struct os_mempool ext_adv_pool;
+static struct os_mempool ext_scan_aux_pool;
static int ble_ll_scan_start(struct ble_ll_scan_sm *scansm,
struct ble_ll_sched_item *sch);
@@ -245,7 +245,7 @@ ble_ll_scan_ext_adv_init(struct ble_ll_aux_data **aux_data)
{
struct ble_ll_aux_data *e;
- e = os_memblock_get(&ext_adv_pool);
+ e = os_memblock_get(&ext_scan_aux_pool);
if (!e) {
return -1;
}
@@ -1123,7 +1123,7 @@ ble_ll_scan_aux_data_free(struct ble_ll_aux_data *aux_data)
ble_hci_trans_buf_free((uint8_t *)aux_data->evt);
aux_data->evt = NULL;
}
- os_memblock_put(&ext_adv_pool, aux_data);
+ os_memblock_put(&ext_scan_aux_pool, aux_data);
STATS_INC(ble_ll_stats, aux_freed);
}
}
@@ -3940,7 +3940,7 @@ ble_ll_scan_reset(void)
#if MYNEWT_VAL(BLE_LL_CFG_FEAT_LL_EXT_ADV)
/* clear memory pool for AUX scan results */
- os_mempool_clear(&ext_adv_pool);
+ os_mempool_clear(&ext_scan_aux_pool);
#endif
/* Call the common init function again */
@@ -3959,10 +3959,10 @@ ble_ll_scan_init(void)
os_error_t err;
#if MYNEWT_VAL(BLE_LL_CFG_FEAT_LL_EXT_ADV)
- err = os_mempool_init(&ext_adv_pool,
+ err = os_mempool_init(&ext_scan_aux_pool,
MYNEWT_VAL(BLE_LL_EXT_ADV_AUX_PTR_CNT),
sizeof (struct ble_ll_aux_data),
- ext_adv_mem,
+ ext_scan_aux_mem,
"ble_ll_aux_scan_pool");
BLE_LL_ASSERT(err == 0);
#endif
|
Validate parser parameter to XML_SetUnknownEncodingHandler | @@ -1577,6 +1577,8 @@ XML_SetUnknownEncodingHandler(XML_Parser parser,
XML_UnknownEncodingHandler handler,
void *data)
{
+ if (parser == NULL)
+ return;
unknownEncodingHandler = handler;
unknownEncodingHandlerData = data;
}
|
Alarm systems return error if code0 can't be set
This can happen when no usable OpenSSL library is found. | @@ -571,6 +571,11 @@ bool AlarmSystem::setCode(int index, const QString &code)
sec.secret = CRYPTO_ScryptPassword(code0, CRYPTO_GenerateSalt());
sec.state = 1;
+ if (sec.secret.empty())
+ {
+ return false;
+ }
+
if (DB_StoreSecret(sec))
{
setValue(RConfigConfigured, true);
|
demo_cluster.sh: remove GPSEARCH
Use GPHOME directly. There is no need for GPSEARCH.
This enables demo_cluster.sh to handle symlinks which is useful when
GPDB is installed using RPMs and a demo cluster is desired. | @@ -178,8 +178,6 @@ if [ -z "${GPHOME}" ]; then
echo " file in your Greenplum installation directory."
echo ""
exit 1
-else
- GPSEARCH=$GPHOME
fi
cat <<-EOF
@@ -210,16 +208,16 @@ cat <<-EOF
EOF
-GPPATH=`find $GPSEARCH -name gpstart| tail -1`
+GPPATH=`find -H $GPHOME -name gpstart| tail -1`
RETVAL=$?
if [ "$RETVAL" -ne 0 ]; then
- echo "Error attempting to find Greenplum executables in $GPSEARCH"
+ echo "Error attempting to find Greenplum executables in $GPHOME"
exit 1
fi
if [ ! -x "$GPPATH" ]; then
- echo "No executables found for Greenplum installation in $GPSEARCH"
+ echo "No executables found for Greenplum installation in $GPHOME"
exit 1
fi
GPPATH=`dirname $GPPATH`
|
morphius: update cpu thermal protect point
This patch update CPU temperature shut down point to 105.
BRANCH=zork
TEST=make BOARD=morphius | @@ -465,7 +465,7 @@ BUILD_ASSERT(ARRAY_SIZE(temp_sensors) == TEMP_SENSOR_COUNT);
const static struct ec_thermal_config thermal_cpu = {
.temp_host = {
[EC_TEMP_THRESH_HIGH] = C_TO_K(90),
- [EC_TEMP_THRESH_HALT] = C_TO_K(99),
+ [EC_TEMP_THRESH_HALT] = C_TO_K(105),
},
.temp_host_release = {
[EC_TEMP_THRESH_HIGH] = C_TO_K(80),
|
VERSION bump to version 1.4.45 | @@ -37,7 +37,7 @@ endif()
# micro version is changed with a set of small changes or bugfixes anywhere in the project.
set(SYSREPO_MAJOR_VERSION 1)
set(SYSREPO_MINOR_VERSION 4)
-set(SYSREPO_MICRO_VERSION 44)
+set(SYSREPO_MICRO_VERSION 45)
set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION})
# Version of the library
|
test_bpf_table: Re-add deletion of 'bpf' module
This was removed in presumably
because sanitizer complained. | @@ -26,13 +26,13 @@ TEST_CASE("test bpf table", ebpf::bpf_module_rw_engine_enabled() ? "[bpf_table]"
BPF_TABLE("hash", int, int, myhash, 128);
)";
- ebpf::BPF bpf;
+ auto bpf = std::make_unique<ebpf::BPF>();
ebpf::StatusTuple res(0);
std::vector<std::pair<std::string, std::string>> elements;
- res = bpf.init(BPF_PROGRAM);
+ res = bpf->init(BPF_PROGRAM);
REQUIRE(res.ok());
- ebpf::BPFTable t = bpf.get_table("myhash");
+ ebpf::BPFTable t = bpf->get_table("myhash");
// update element
std::string value;
@@ -78,7 +78,8 @@ TEST_CASE("test bpf table", ebpf::bpf_module_rw_engine_enabled() ? "[bpf_table]"
REQUIRE(res.ok());
REQUIRE(elements.size() == 0);
-
+ // delete bpf_module, call to key/leaf printf/scanf must fail
+ bpf.reset();
res = t.update_value("0x07", "0x42");
REQUIRE(!res.ok());
|
pyocf: implement test_attach_cleaner_disabled_non_default | @@ -150,7 +150,6 @@ def test_cleaner_disabled_nop(pyocf_ctx):
cache.set_cleaning_policy(CleaningPolicy.NOP)
[email protected](reason="not implemented")
def test_attach_cleaner_disabled_non_default(pyocf_ctx):
"""
title: Attach cache with default config does not set clener_disabled.
@@ -169,4 +168,11 @@ def test_attach_cleaner_disabled_non_default(pyocf_ctx):
requirements:
- disable_cleaner::default_setting
"""
- pass
+ cache_device = RamVolume(S.from_MiB(50))
+
+ cache = Cache.start_on_device(cache_device)
+
+ cleaning_size = get_metadata_segment_size(cache, CacheMetadataSegment.CLEANING)
+ assert (
+ cleaning_size > 0
+ ), f'Metadata cleaning segment size expected: "> 0", got: "{cleaning_size}"'
|
Fix theta tokens | @@ -351,8 +351,9 @@ tokenDefinition_t *getKnownToken(uint8_t *contractAddress) {
break;
case CHAIN_KIND_FLARE:
currentToken = (tokenDefinition_t *) PIC(&TOKENS_FLARE[i]);
- break case CHAIN_KIND_THETA : currentToken =
- (tokenDefinition_t *) PIC(&TOKENS_THETA[i]);
+ break;
+ case CHAIN_KIND_THETA:
+ currentToken = (tokenDefinition_t *) PIC(&TOKENS_THETA[i]);
break;
case CHAIN_KIND_BSC:
currentToken = (tokenDefinition_t *) PIC(&TOKENS_BSC[i]);
|
wapi: correct scan return value | @@ -1256,14 +1256,14 @@ int wapi_scan_stat(int sock, FAR const char *ifname)
return 1;
}
- printf("err[%d]: %s\n", errno, strerror(errno));
- }
- else
- {
int errcode = errno;
WAPI_IOCTL_STRERROR(SIOCGIWSCAN, errcode);
ret = -errcode;
}
+ else
+ {
+ ret = 0;
+ }
return ret;
}
|
Test shorter long context URI with a failing reallocator
One last code path to exercise | @@ -9264,6 +9264,37 @@ START_TEST(test_nsalloc_realloc_long_context_5)
}
END_TEST
+START_TEST(test_nsalloc_realloc_long_context_6)
+{
+ const char *text =
+ "<!DOCTYPE doc SYSTEM 'foo' [\n"
+ " <!ENTITY en SYSTEM 'bar'>\n"
+ "]>\n"
+ "<doc xmlns='http://example.org/"
+ /* 64 characters per line */
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/"
+ "ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNO/ABCDEFGHIJKLMNOP"
+ "'>\n"
+ "&en;"
+ "</doc>";
+
+ context_realloc_test(parser, text);
+}
+END_TEST
+
static Suite *
make_suite(void)
@@ -9523,6 +9554,7 @@ make_suite(void)
tcase_add_test(tc_nsalloc, test_nsalloc_realloc_long_context_3);
tcase_add_test(tc_nsalloc, test_nsalloc_realloc_long_context_4);
tcase_add_test(tc_nsalloc, test_nsalloc_realloc_long_context_5);
+ tcase_add_test(tc_nsalloc, test_nsalloc_realloc_long_context_6);
return s;
}
|
graph-store lib: skip trip and crip, use rap | ==
::
++ index
- |= i=^index
+ |= ind=^index
^- json
- ?: =(~ i) s+'/'
- =/ j=^tape ""
- |-
- ?~ i [%s (crip j)]
- =/ k=json (numb i.i)
- ?> ?=(%n -.k)
- %_ $
- i t.i
- j (weld j (weld "/" (trip +.k)))
- ==
+ :- %s
+ ?: =(~ ind)
+ '/'
+ %+ roll ind
+ |= [cur=@ acc=@t]
+ ^- @t
+ =/ num (numb cur)
+ ?> ?=(%n -.num)
+ (rap 3 acc '/' p.num ~)
::
++ uid
|= u=^uid
|
out_es: remove unused variables | @@ -235,11 +235,7 @@ static int elasticsearch_format(struct flb_config *config,
msgpack_sbuffer tmp_sbuf;
msgpack_packer tmp_pck;
uint16_t hash[8];
- const char *es_index_custom;
int es_index_custom_len;
- int i;
- msgpack_object key;
- msgpack_object val;
struct flb_elasticsearch *ctx = plugin_context;
/* Iterate the original buffer and perform adjustments */
@@ -348,7 +344,6 @@ static int elasticsearch_format(struct flb_config *config,
else {
memcpy(logstash_index, v, len);
}
- es_index_custom = v;
es_index_custom_len = len;
flb_sds_destroy(v);
}
|
phonon doesn't try to make a mesh if there are no vertices; | @@ -343,8 +343,10 @@ bool phonon_setGeometry(float* vertices, uint32_t* indices, uint32_t vertexCount
status = phonon_iplCreateScene(state.context, NULL, IPL_SCENETYPE_PHONON, 1, materials, NULL, NULL, NULL, NULL, NULL, &state.scene);
if (status != IPL_STATUS_SUCCESS) goto fail;
+ if (vertexCount > 0 && indexCount > 0) {
status = phonon_iplCreateStaticMesh(state.scene, vertexCount, indexCount / 3, (IPLVector3*) vertices, (IPLTriangle*) indices, triangleMaterials, &state.mesh);
if (status != IPL_STATUS_SUCCESS) goto fail;
+ }
status = phonon_iplCreateEnvironment(state.context, NULL, settings, state.scene, NULL, &state.environment);
if (status != IPL_STATUS_SUCCESS) goto fail;
|
[protobuf] Fix invalid function call reported by ubsan
contrib/libs/protobuf/python/google/protobuf/pyext/message.cc:1406:3: runtime error: call to function google::protobuf::python::extension_dict::dealloc(google::protobuf::python::ExtensionDict*) through pointer to incorrect function type 'void (*)(_object *)' | @@ -239,7 +239,8 @@ ExtensionDict* NewExtensionDict(CMessage *parent) {
return self;
}
-void dealloc(ExtensionDict* self) {
+void dealloc(PyObject* object) {
+ ExtensionDict* self = reinterpret_cast<ExtensionDict*>(object);
Py_CLEAR(self->values);
self->owner.reset();
Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self));
|
[x86_64] Re-enable tick callback kickoffs | @@ -23,12 +23,11 @@ static int tick_callback(register_state_t* regs) {
if (tick % 1000 == 0) {
printf("tick %d\n", tick);
}
- // TODO(PT): x86_64
// Wake sleeping services befores ending EOI, or else we
// might get interrupted by another tick while the AMC spinlock is held
- //amc_wake_sleeping_services();
+ amc_wake_sleeping_services();
pic_signal_end_of_interrupt(regs->int_no);
- //task_switch_if_quantum_expired();
+ task_switch_if_quantum_expired();
return 0;
}
|
warn about incorrectly scaled trajectories | @@ -98,6 +98,17 @@ int main_nufft(int argc, char* argv[argc])
assert(3 == traj_dims[0]);
+ long coilest_dims[DIMS];
+
+ estimate_im_dims(DIMS, FFT_FLAGS, coilest_dims, traj_dims, traj);
+
+ if (8 >= md_calc_size(3, coilest_dims)) {
+
+ debug_printf(DP_WARN, "\tThe estimated image size %ldx%ldx%ld is very small.\n"
+ "\tDid you scale your trajectory correctly?\n"
+ "\tThe unit of measurement is pixel_size / FOV.\n",
+ coilest_dims[0], coilest_dims[1], coilest_dims[2]);
+ }
(use_gpu ? num_init_gpu : num_init)();
@@ -111,8 +122,8 @@ int main_nufft(int argc, char* argv[argc])
if (0 == md_calc_size(3, coilim_dims)) {
- estimate_im_dims(DIMS, FFT_FLAGS, coilim_dims, traj_dims, traj);
- debug_printf(DP_INFO, "Est. image size: %ld %ld %ld\n", coilim_dims[0], coilim_dims[1], coilim_dims[2]);
+ md_copy_dims(DIMS, coilim_dims, coilest_dims);
+ debug_printf(DP_INFO, "Est. image size: %ldx%ldx%ld\n", coilim_dims[0], coilim_dims[1], coilim_dims[2]);
if (!conf.decomp) {
|
Fix for algorithms other than hash-then-sign | @@ -2642,11 +2642,12 @@ psa_status_t psa_sign_message_internal(
size_t *signature_length )
{
psa_status_t status = PSA_ERROR_CORRUPTION_DETECTED;
- size_t hash_length;
- uint8_t hash[PSA_HASH_MAX_SIZE];
if ( PSA_ALG_IS_HASH_AND_SIGN( alg ) )
{
+ size_t hash_length;
+ uint8_t hash[PSA_HASH_MAX_SIZE];
+
status = psa_driver_wrapper_hash_compute(
PSA_ALG_SIGN_GET_HASH( alg ),
input, input_length,
@@ -2654,7 +2655,6 @@ psa_status_t psa_sign_message_internal(
if( status != PSA_SUCCESS )
return status;
- }
return psa_driver_wrapper_sign_hash(
attributes, key_buffer, key_buffer_size,
@@ -2662,6 +2662,12 @@ psa_status_t psa_sign_message_internal(
signature, signature_size, signature_length );
}
+ return psa_driver_wrapper_sign_hash(
+ attributes, key_buffer, key_buffer_size,
+ alg, input, input_length,
+ signature, signature_size, signature_length );
+}
+
psa_status_t psa_sign_message( mbedtls_svc_key_id_t key,
psa_algorithm_t alg,
const uint8_t * input,
@@ -2686,11 +2692,12 @@ psa_status_t psa_verify_message_internal(
size_t signature_length )
{
psa_status_t status = PSA_ERROR_CORRUPTION_DETECTED;
- size_t hash_length;
- uint8_t hash[PSA_HASH_MAX_SIZE];
if ( PSA_ALG_IS_HASH_AND_SIGN( alg ) )
{
+ size_t hash_length;
+ uint8_t hash[PSA_HASH_MAX_SIZE];
+
status = psa_driver_wrapper_hash_compute(
PSA_ALG_SIGN_GET_HASH( alg ),
input, input_length,
@@ -2698,7 +2705,6 @@ psa_status_t psa_verify_message_internal(
if( status != PSA_SUCCESS )
return status;
- }
return psa_driver_wrapper_verify_hash(
attributes, key_buffer, key_buffer_size,
@@ -2706,6 +2712,12 @@ psa_status_t psa_verify_message_internal(
signature, signature_length );
}
+ return psa_driver_wrapper_verify_hash(
+ attributes, key_buffer, key_buffer_size,
+ alg, input, input_length,
+ signature, signature_length );
+}
+
psa_status_t psa_verify_message( mbedtls_svc_key_id_t key,
psa_algorithm_t alg,
const uint8_t * input,
|
record cache keeps only the variants of the current contig | @@ -47,7 +47,7 @@ namespace ebi
* @param capacity: maximum amount of RecordCores that this instance can hold at any time.
* A value of 0 disables the limit, thus storing every RecordCore received. Use with caution.
*/
- RecordCache(size_t capacity) : capacity{capacity}, unlimited{capacity == 0} { }
+ explicit RecordCache(size_t capacity) : capacity{capacity}, unlimited{capacity == 0} { }
/**
* Getter function which returns a vector of Errors.
@@ -105,6 +105,25 @@ namespace ebi
list_duplicates.clear();
list_symbolic_duplicates.clear();
+ if (current_chromosome != record.chromosome) {
+ // EVA-1950: the cache removes "lexicographically lower" chromosomes when the cache is full, but with
+ // that sorting, "chr10" is lower than "chr2", so new variants in "chr10" get deleted to keep the ones
+ // in "chr2", and duplicates in "chr10" will go undetected. Possible solutions (better first):
+ // 1. The chosen simple solution is to keep in the cache variants from only one chromosome.
+ // 2. Another solution could be using the line number (Record::line) as sorting criteria, but that can
+ // get very surprising and unintuitive if used for other purposes outside of this class, so a custom
+ // comparator could be provided to the cache multiset constructor.
+ // 3. A third solution could be making a Least Recently Used cache, but will require using esoteric data
+ // structures and/or dependencies, or keeping several simpler structures in sync, like a map and a
+ // queue that point to each other's contents.
+ // 4. A poor solution could be changing the ordering to "numerically lower", but that will be very tricky
+ // to get right with weird chrs like (versioned) accessions. e.g.: NC_01.1 is a chr, which is likely to
+ // appear before a contig GK_01.1, and we would need a sorting method that evaluates NC_01.1 < GK_01.1.
+ cache_duplicates.clear();
+ cache_symbolic_duplicates.clear();
+ current_chromosome = record.chromosome;
+ }
+
for (RecordCore &record_core: record_cores) {
// create references to the appropriate data structures for th alternate allele type
@@ -162,6 +181,7 @@ namespace ebi
std::multiset<RecordCore> cache_symbolic_duplicates;
std::vector<std::unique_ptr<Error>> list_duplicates;
std::vector<std::unique_ptr<Error>> list_symbolic_duplicates;
+ std::string current_chromosome;
size_t capacity; ///< max amount of RecordCores that the cache can hold
bool unlimited; ///< if true, the set is not capped and will not erase any RecordCore
};
|
Support versioned lua executable file.
Fixes CMake not looking for the Lua library when
the executable's name is not "lua". | @@ -77,7 +77,7 @@ if (LUA)
NAMES luajit libluajit
PATHS ${LUA_LIBDIR}
NO_DEFAULT_PATH)
- ELSEIF(LUA_EXEC_NAME STREQUAL "lua")
+ ELSEIF(LUA_EXEC_NAME MATCHES "lua.*")
FIND_LIBRARY(LUA_LIBRARIES
NAMES lua lua53 lua52 lua51 liblua liblua53 liblua52 liblua51
PATHS ${LUA_LIBDIR}
|
print some final stats | @@ -260,5 +260,13 @@ int main(int argc, char** argv) {
cleanupSocketFuzzer();
}
+ uint64_t exec_per_sec = 0;
+ uint64_t elapsed_sec = time(NULL) - hfuzz.timing.timeStart;
+ if (elapsed_sec) {
+ exec_per_sec = hfuzz.cnts.mutationsCnt / elapsed_sec;
+ }
+ LOG_I("Summary: iters:%zu time:%" PRIu64 " execs/s:%" PRIu64, hfuzz.cnts.mutationsCnt,
+ elapsed_sec, exec_per_sec);
+
return EXIT_SUCCESS;
}
|
Have build-example.sh build with -O3 | @@ -21,13 +21,13 @@ for f in example/*; do
echo "Building $f"
if [ "$f" = "example/crc32" ]; then
# example/crc32 is unusual in that it's C++, not C.
- g++ -Wall -Werror $f/*.cc -o $f/a.out
+ g++ -O3 $f/*.cc -o $f/a.out
elif [ "$f" = "example/library" ]; then
# example/library is unusual in that it uses separately compiled libraries
# (built by "wuffs genlib" above) instead of directly #include'ing Wuffs'
# .c files.
- gcc -Wall -Werror -static -I.. $f/*.c gen/lib/c/gcc-static/libwuffs.a -o $f/a.out
+ gcc -O3 -static -I.. $f/*.c gen/lib/c/gcc-static/libwuffs.a -o $f/a.out
elif [ -e $f/*.c ]; then
- gcc -Wall -Werror $f/*.c -o $f/a.out
+ gcc -O3 $f/*.c -o $f/a.out
fi
done
|
Web UI: Reformat for coding guidelines | @@ -413,7 +413,8 @@ static int elektraVersionGet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned,
KeySet * info = elektraVersionKeySet ();
keySetMeta(info->array[0], "restrict/write", "1");
keySetMeta(info->array[0], "restrict/remove", "1");
- for (size_t i = 1; i < info->size; i++) {
+ for (size_t i = 1; i < info->size; i++)
+ {
keyCopyAllMeta(info->array[i], info->array[0]);
}
ksAppend (returned, info);
|
LICENSE file update. No changes in terms, just changed CartoDB to CARTO and minor formatting changes. | -Copyright (c) 2016, CartoDB, Inc.
+BSD 3-Clause License
+
+Copyright (c) 2018, CARTO
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
-1. Redistributions of source code must retain the above copyright notice, this
+* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright notice,
+* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
-3. Neither the name of the copyright holder nor the names of its contributors
-may be used to endorse or promote products derived from this software without
-specific prior written permission.
+* Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
tcpci: tcpci_tcpm_set_polarity should not clobber
Bits 1-7 were getting clobbered when we updated Bit 0
BRANCH=none
TEST=verify tcpci still functions | @@ -353,8 +353,10 @@ int tcpci_enter_low_power_mode(int port)
int tcpci_tcpm_set_polarity(int port, int polarity)
{
- return tcpc_write(port, TCPC_REG_TCPC_CTRL,
- TCPC_REG_TCPC_CTRL_SET(polarity));
+ return tcpc_update8(port,
+ TCPC_REG_TCPC_CTRL,
+ TCPC_REG_TCPC_CTRL_SET(1),
+ (polarity) ? MASK_SET : MASK_CLR);
}
#ifdef CONFIG_USBC_PPC
|
ldns/net.c: Fix uninitialized variable usage.
Windows was throwing an assertion failure in debug builds as a result of
the src_len variable not being initialized. This commit fixes that
issue. | @@ -461,7 +461,7 @@ ldns_send_buffer(ldns_pkt **result, ldns_resolver *r, ldns_buffer *qb, ldns_rdf
uint8_t i;
struct sockaddr_storage *src = NULL;
- size_t src_len;
+ size_t src_len = 0;
struct sockaddr_storage *ns;
size_t ns_len;
struct timeval tv_s;
|
config_tools: add the missing GUEST_FLAG_NVMX_ENABLED to common.py
Without this, the GUEST_FLAG_NVMX_ENABLED doesn't show up in the
drop-down list of "guest_flags" in the ACRN config GUI. | @@ -22,7 +22,8 @@ DATACHECK_SCHEMA_FILE = SOURCE_ROOT_DIR + 'misc/config_tools/schema/datachecks.x
PY_CACHES = ["__pycache__", "../board_config/__pycache__", "../scenario_config/__pycache__"]
GUEST_FLAG = ["0", "0UL", "GUEST_FLAG_SECURE_WORLD_ENABLED", "GUEST_FLAG_LAPIC_PASSTHROUGH",
- "GUEST_FLAG_IO_COMPLETION_POLLING", "GUEST_FLAG_HIDE_MTRR", "GUEST_FLAG_RT"]
+ "GUEST_FLAG_IO_COMPLETION_POLLING", "GUEST_FLAG_NVMX_ENABLED", "GUEST_FLAG_HIDE_MTRR",
+ "GUEST_FLAG_RT"]
MULTI_ITEM = ["guest_flag", "pcpu_id", "vcpu_clos", "input", "block", "network", "pci_dev", "shm_region", "communication_vuart"]
|
Remove openssh_server container on test exit | @@ -159,12 +159,14 @@ static int start_openssh_server(char **container_id_out)
const char *container_host_port = openssh_server_port();
if(container_host_port != NULL) {
return run_command(container_id_out,
- "docker run -d -p %s:22 libssh2/openssh_server",
+ "docker run --rm -d -p %s:22 "
+ "libssh2/openssh_server",
container_host_port);
}
else {
return run_command(container_id_out,
- "docker run -d -p 22 libssh2/openssh_server");
+ "docker run --rm -d -p 22 "
+ "libssh2/openssh_server");
}
}
|
i kno how 2 kode | @@ -242,8 +242,7 @@ static void print_usage(void)
printf(" -t, --text - use with -o; write processed list to file in ascii format\n");
printf(" -l, --lite - walk file system without stat\n");
printf(" -s, --sort <fields> - sort output by comma-delimited fields\n");
- printf(" -d, --distribution <field>:<separators>\n
- - print distribution by field\n");
+ printf(" -d, --distribution <field>:<separators>\n - print distribution by field\n");
printf(" -p, --print - print files to screen\n");
printf(" -v, --verbose - verbose output\n");
printf(" -q, --quiet - quiet output\n");
|
Don't use __builtin_unreachable on GCC < 4.5. | # else
# if defined(HEDLEY_ASSUME)
# define simde_assert(expr) HEDLEY_ASSUME(expr)
-# elif defined(__GNUC__)
+# elif HEDLEY_GCC_VERSION_CHECK(4,5,0)
# define simde_assert(expr) ((void) (!!(expr) ? 1 : (__builtin_unreachable(), 1)))
-# elif defined(_MSC_VER)
+# elif HEDLEY_MSVC_VERSION_CHECK(13,10,0)
# define simde_assert(expr) __assume(expr)
# else
# define simde_assert(expr)
|
commander: static func naming | @@ -167,7 +167,7 @@ static commander_error_t _api_show_mnemonic(void)
return COMMANDER_OK;
}
-static commander_error_t commander_check_sdcard(CheckSDCardResponse* response)
+static commander_error_t _api_check_sdcard(CheckSDCardResponse* response)
{
response->inserted = sd_card_inserted();
return COMMANDER_OK;
@@ -243,7 +243,7 @@ static commander_error_t _api_process(const Request* request, Response* response
return commander_btc_sign(request, response);
case Request_check_sdcard_tag:
response->which_response = Response_check_sdcard_tag;
- return commander_check_sdcard(&(response->response.check_sdcard));
+ return _api_check_sdcard(&(response->response.check_sdcard));
case Request_insert_remove_sdcard_tag:
response->which_response = Response_success_tag;
_api_insert_remove_sdcard(&(request->request.insert_remove_sdcard));
|
docs/ide-support.md: Run mdformat
mdformat -w --compatibility docs/ide-support.md
BRANCH=none
TEST=Check in Gitiles | @@ -34,18 +34,24 @@ You can use the `ide-config.sh` tool to generate a VSCode configuration that
includes selectable sub-configurations for every board/image pair.
1. From the root `ec` directory, do the following:
+
```bash
mkdir -p .vscode
./util/ide-config.sh vscode all:RW all:RO | tee .vscode/c_cpp_properties.json
```
+
2. Open VSCode and navigate to some C source file.
+
3. Run `C/C++ Reset IntelliSense Database` from the `Ctrl-Shift-P` menu
+
4. Select the config in the bottom right, next to the `Select Language Mode`.
You will only see this option when a C/C++ file is open. Additionally, you
can select a configuration by pressing `Ctrl-Shift-P` and selecting the
`C/C++ Select a Configuration...` option.
-5. Add the EC specific file associations and style settings.
- Modify `.vscode/settings.json` to have the following elements:
+
+5. Add the EC specific file associations and style settings. Modify
+ `.vscode/settings.json` to have the following elements:
+
```json
{
"editor.rulers": [80],
|
Fix arm type | @@ -2447,7 +2447,7 @@ typedef struct _SYSTEM_GDI_DRIVER_INFORMATION
PVOID ImageAddress;
PVOID SectionPointer;
PVOID EntryPoint;
- PIMAGE_EXPORT_DIRECTORY ExportSectionPointer;
+ struct _IMAGE_EXPORT_DIRECTORY* ExportSectionPointer;
ULONG ImageLength;
} SYSTEM_GDI_DRIVER_INFORMATION, *PSYSTEM_GDI_DRIVER_INFORMATION;
|
Checking if source is null in memcpy_check | /* Check memcpy and memset's arguments, if these are not right, log an error
*/
-#define memcpy_check( d, s, n ) do { if ( (n) ) { notnull_check( (d) ); memcpy( (d), (s), (n)); } } while(0)
+#define memcpy_check( d, s, n ) do { if ( (n) ) { notnull_check( (s) ); notnull_check( (d) ); memcpy( (d), (s), (n)); } } while(0)
#define memset_check( d, c, n ) do { if ( (n) ) { notnull_check( (d) ); memset( (d), (c), (n)); } } while(0)
/* Range check a number */
|
Fix dshow compilation errors (conflicting declarations with qedit.h) | *
* https://code.google.com/p/webm/source/browse/qedit.h?repo=udpsample
*/
-static const
-IID IID_ISampleGrabber = {
- 0x6b652fff, 0x11fe, 0x4fce,
- { 0x92, 0xad, 0x02, 0x66, 0xb5, 0xd7, 0xc7, 0x8f }
-};
-static const
-IID IID_ISampleGrabberCB = {
- 0x0579154a, 0x2b53, 0x4994,
- { 0xb0, 0xd0, 0xe7, 0x73, 0x14, 0x8e, 0xff, 0x85 }
-};
#include "qedit.h"
-/*
const CLSID CLSID_SampleGrabber = { 0xc1f400a0, 0x3f08, 0x11d3,
{ 0x9f, 0x0b, 0x00, 0x60, 0x08, 0x03, 0x9e, 0x37 }
};
-*/
class Grabber;
|
Restore the brief FIB entry printing | @@ -118,8 +118,8 @@ format_fib_entry (u8 * s, va_list * args)
s = format (s, "%U", format_fib_prefix, &fib_entry->fe_prefix);
-// if (level >= FIB_ENTRY_FORMAT_DETAIL)
-// {
+ if (level >= FIB_ENTRY_FORMAT_DETAIL)
+ {
s = format (s, " fib:%d", fib_entry->fe_fib_index);
s = format (s, " index:%d", fib_entry_get_index(fib_entry));
s = format (s, " locks:%d", fib_entry->fe_node.fn_locks);
@@ -154,11 +154,11 @@ format_fib_entry (u8 * s, va_list * args)
}));
s = format (s, "\n forwarding: ");
-// }
-// else
-// {
-// s = format (s, "\n");
-// }
+ }
+ else
+ {
+ s = format (s, "\n");
+ }
fct = fib_entry_get_default_chain_type(fib_entry);
|
Update version to v3.5.117 | #define MAT_VERSION_HPP
// WARNING: DO NOT MODIFY THIS FILE!
// This file has been automatically generated, manual changes will be lost.
-#define BUILD_VERSION_STR "3.5.113.1"
-#define BUILD_VERSION 3,5,113,1
+#define BUILD_VERSION_STR "3.5.117.1"
+#define BUILD_VERSION 3,5,117,1
#ifndef RESOURCE_COMPILER_INVOKED
#include <ctmacros.hpp>
@@ -18,7 +18,7 @@ namespace MAT_NS_BEGIN {
uint64_t const Version =
((uint64_t)3 << 48) |
((uint64_t)5 << 32) |
- ((uint64_t)113 << 16) |
+ ((uint64_t)117 << 16) |
((uint64_t)1);
} MAT_NS_END
|
Update ffxiv_idarename.py
Added CountdownPointer | @@ -64,6 +64,7 @@ NameAddr(0x1401F2990, "Client::UI::Agent::AgentModule_ctor")
NameAddr(0x1401F7840, "Client::UI::Agent::AgentModule::GetAgentByInternalID")
NameAddr(0x1401F7850, "Client::UI::Agent::AgentModule::GetAgentByInternalID_2") # dupe?
NameAddr(0x14020CD10, "Client::UI::Agent::AgentLobby_ctor")
+NameAddr(0x14029f340, "CountdownPointer")
NameAddr(0x1402F2860, "Client::Graphics::Kernel::Texture_ctor")
NameAddr(0x1402F9ED0, "Client::Graphics::Kernel::Device_ctor")
NameAddr(0x140318C20, "Client::Graphics::Render::GraphicsConfig_ctor")
|
Fix environment tab memory leak | @@ -76,18 +76,21 @@ BOOLEAN PhIsProcessSuspended(
_In_ HANDLE ProcessId
)
{
+ BOOLEAN suspended = FALSE;
PVOID processes;
PSYSTEM_PROCESS_INFORMATION process;
if (NT_SUCCESS(PhEnumProcesses(&processes)))
{
if (process = PhFindProcessInformation(processes, ProcessId))
- return PhGetProcessIsSuspended(process);
+ {
+ suspended = PhGetProcessIsSuspended(process);
+ }
PhFree(processes);
}
- return FALSE;
+ return suspended;
}
/**
|
Windows yells at ode less; | @@ -153,7 +153,10 @@ if(LOVR_ENABLE_PHYSICS)
set(ODE_BUILD_SHARED ON CACHE BOOL "")
endif()
add_subdirectory(deps/ode ode)
- if(NOT WIN32)
+ if(MSVC)
+ set_target_properties(ode PROPERTIES COMPILE_FLAGS "/wd4244 /wd4267")
+ target_compile_definitions(ode PRIVATE _CRT_SECURE_NO_WARNINGS)
+ else()
set_target_properties(ode PROPERTIES COMPILE_FLAGS "-Wno-unused-volatile-lvalue -Wno-array-bounds -Wno-undefined-var-template")
endif()
include_directories(deps/ode/include "${CMAKE_CURRENT_BINARY_DIR}/ode/include")
|
docs/fingerprint: Add Chrome OS Project Configuration details
model.yaml files no longer have to be created by hand; instead the
Chrome OS Project Configuration tools will generate it.
BRANCH=none
TEST=view in gitiles | @@ -481,7 +481,16 @@ that should be built as part of the build.
See the [`model.yaml` for the Hatch board][hatch model.yaml] as an example.
-You can test your changes by
+Instead of crafting the `model.yaml` by hand, newer boards are moving to the
+[Chrome OS Project Configuration] model, where the config is generated using
+[Starlark]. The common [`create_fingerprint`] function can be used across models
+to configure the fingerprint settings. See the [Morphius `config.star`] for an
+example of how to call `create_fingerprint`. After you modify a `config.star`
+file you will need to [regenerate the config]. If you need to change many
+projects (e.g., modifying [`create_fingerprint`]), you can use the [`CLFactory`]
+tool.
+
+Once you have updated the config, you can test your changes by
[running `cros_config`](#chromeos-config-fingerprint). The Chrome OS Config
documentation has a [section on testing properties] that describes this in more
detail.
@@ -552,3 +561,9 @@ a given device can be found by viewing `chrome://system/#platform_identity_sku`.
[Nucleo H743ZI2]: https://www.digikey.com/en/products/detail/stmicroelectronics/NUCLEO-H743ZI2/10130892
[CBI Info]: https://chromium.googlesource.com/chromiumos/docs/+/HEAD/design_docs/cros_board_info.md
[Chrome OS Config SKU]: https://chromium.googlesource.com/chromiumos/platform2/+/HEAD/chromeos-config/README.md#identity
+[Chrome OS Project Configuration]: https://chromium.googlesource.com/chromiumos/config/+/HEAD/README.md
+[Starlark]: https://docs.bazel.build/versions/master/skylark/language.html
+[`create_fingerprint`]: https://chromium.googlesource.com/chromiumos/config/+/e1fa0d7f56eb3dd6e9378e4326de086ada46b7d3/util/hw_topology.star#444
+[Morphius `config.star`]: https://chrome-internal.googlesource.com/chromeos/project/zork/morphius/+/593b657a776ed6b320c826916adc9cd845faf709/config.star#85
+[regenerate the config]: https://chromium.googlesource.com/chromiumos/config/+/HEAD/README.md#making-configuration-changes-for-your-project
+[`CLFactory`]: https://chromium.googlesource.com/chromiumos/config/+/HEAD/README.md#making-bulk-changes-across-repos
|
Fix occurs check.
If we had a cycle within a type, the occurs check would
recurse infinitely:
a -> b -> c -> b
would never return, because `a` doesn't occur in that
sequence. | @@ -281,43 +281,63 @@ static int isbound(Inferstate *st, Type *t)
* Recursive types that contain themselves through
* pointers or slices are fine, but any other self-inclusion
* would lead to a value of infinite size */
-static int occurs(Inferstate *st, Type *t, Type *sub)
+static int occurs_rec(Inferstate *st, Type *sub, Bitset *bs)
{
size_t i;
- assert(t != NULL);
- if (t == sub) /* FIXME: is this actually right? */
+ if (bshas(bs, sub->tid))
return 1;
- /* if we're on the first iteration, the subtype is the type
- * itself. The assignment must come after the equality check
- * for obvious reasons. */
- if (!sub)
- sub = t;
-
+ bsput(bs, sub->tid);
switch (sub->type) {
+ case Typtr:
+ case Tyslice:
+ break;
case Tystruct:
for (i = 0; i < sub->nmemb; i++)
- if (occurs(st, t, decltype(sub->sdecls[i])))
+ if (occurs_rec(st, decltype(sub->sdecls[i]), bs))
return 1;
break;
case Tyunion:
for (i = 0; i < sub->nmemb; i++) {
- if (sub->udecls[i]->etype && occurs(st, t, sub->udecls[i]->etype))
+ if (!sub->udecls[i]->etype)
+ continue;
+ if (occurs_rec(st, sub->udecls[i]->etype, bs))
return 1;
}
break;
- case Typtr:
- case Tyslice:
- return 0;
default:
for (i = 0; i < sub->nsub; i++)
- if (occurs(st, t, sub->sub[i]))
+ if (occurs_rec(st, sub->sub[i], bs))
return 1;
break;
}
+ bsdel(bs, sub->tid);
return 0;
}
+static int occursin(Inferstate *st, Type *a, Type *b)
+{
+ Bitset *bs;
+ int r;
+
+ bs = mkbs();
+ bsput(bs, b->tid);
+ r = occurs_rec(st, a, bs);
+ bsfree(bs);
+ return r;
+}
+
+static int occurs(Inferstate *st, Type *t)
+{
+ Bitset *bs;
+ int r;
+
+ bs = mkbs();
+ r = occurs_rec(st, t, bs);
+ bsfree(bs);
+ return r;
+}
+
static int needfreshenrec(Inferstate *st, Type *t, Bitset *visited)
{
size_t i;
@@ -446,7 +466,7 @@ static void tyresolve(Inferstate *st, Type *t)
bsunion(t->traits, base->traits);
else if (base->traits)
t->traits = bsdup(base->traits);
- if (occurs(st, t, NULL))
+ if (occurs(st, t))
lfatal(t->loc, "type %s includes itself", tystr(t));
st->ingeneric--;
if (t->type == Tyname || t->type == Tygeneric) {
@@ -973,7 +993,7 @@ static Type *unify(Inferstate *st, Node *ctx, Type *u, Type *v)
/* Disallow recursive types */
if (a->type == Tyvar && b->type != Tyvar) {
- if (occurs(st, a, b))
+ if (occursin(st, a, b))
fatal(ctx, "%s occurs within %s, leading to infinite type near %s\n",
tystr(a), tystr(b), ctxstr(st, ctx));
}
|
State is fixed. | @@ -1342,13 +1342,13 @@ static int bi_compar(const void *l, const void *r)
static const char* xdag_get_block_state_info(struct block_internal *block)
{
- if(block->flags & (BI_REF | BI_MAIN_REF)) {
- if(block->flags & BI_MAIN) {
+ if(block->flags == (BI_REF | BI_MAIN_REF | BI_APPLIED | BI_MAIN | BI_MAIN_CHAIN)) { //1F
return "Main";
}
- if(block->flags & BI_APPLIED) {
+ if(block->flags == (BI_REF | BI_MAIN_REF | BI_APPLIED)) { //1C
return "Accepted";
}
+ if(block->flags == (BI_REF | BI_MAIN_REF)) { //18
return "Rejected";
}
return "Pending";
|
[update] delete extra null string. | @@ -177,7 +177,6 @@ static void pclkx_doubler_get(rt_uint32_t *pclk1_doubler, rt_uint32_t *pclk2_dou
*pclk2_doubler = 2;
}
#else
-
if (RCC_ClkInitStruct.APB1CLKDivider != RCC_HCLK_DIV1)
{
*pclk1_doubler = 2;
|
board/beadrix/led.c: Format with clang-format
BRANCH=none
TEST=none | #define BAT_LED_ON 0
#define BAT_LED_OFF 1
-const enum ec_led_id supported_led_ids[] = {
- EC_LED_ID_BATTERY_LED
-};
+const enum ec_led_id supported_led_ids[] = { EC_LED_ID_BATTERY_LED };
const int supported_led_ids_count = ARRAY_SIZE(supported_led_ids);
enum led_color {
|
firdespm: removing redundant check in create() | @@ -240,8 +240,6 @@ firdespm firdespm_create(unsigned int _h_len,
return liquid_error_config("firdespm_create(), invalid bands");
if (!weights_valid)
return liquid_error_config("firdespm_create(), invalid weights (must be positive)");
- if (_num_bands == 0)
- return liquid_error_config("firdespm_create(), number of bands must be > 0");
// create object
firdespm q = (firdespm) malloc(sizeof(struct firdespm_s));
|
Update Readme.md
Running Section
The path needed to be in quotes (for me at least)
Added a '.' to step 2. See | @@ -227,8 +227,8 @@ need to link to those libraries.
Murl is not supported in windows at this time.
## Running
-1. `export LD_LIBRARY_PATH=<path to ssl lib;path to curl lib>`
-2. Modify and run `scripts/nist_setup.sh`
+1. `export LD_LIBRARY_PATH="<path to ssl lib;path to curl lib>"`
+2. Modify and run `. 6scripts/nist_setup.sh`
3. `./app/acvp_app --<options>`
Use `./app/acvp_app --help` for more information on available options.
|
BugID:25749969: fix issue of missing arch optional components | @@ -65,17 +65,19 @@ def append_depends_config_in(board, mandatory_configs, optional_configs):
board_config_in += 'source "$AOS_SDK_PATH/%s"\n' % config
elif config.startswith("core/"):
kernel_config_in += 'source "$AOS_SDK_PATH/%s"\n' % config
- elif config.startswith("components/"):
+ else:
component_config_in += 'source "$AOS_SDK_PATH/%s"\n' % config
if optional_configs:
for config in optional_configs:
""" one dependency: comp_name, config_file, condition [[]] """
- if config["config_file"].startswith("core/"):
tmp_config_in, conds_list = get_comp_optional_depends_text(config["condition"], config["config_file"])
+ if config["config_file"].startswith("core/"):
kernel_config_in += tmp_config_in
- elif config["config_file"].startswith("components/"):
- tmp_config_in, conds_list = get_comp_optional_depends_text(config["condition"], config["config_file"])
+ elif config["config_file"].startswith("platform/"):
+ board_config_in += tmp_config_in
+ else:
component_config_in += tmp_config_in
+
board_config_in += "endmenu\n\n"
kernel_config_in += "endmenu\n\n"
component_config_in += "endmenu\n\n"
@@ -111,7 +113,7 @@ def write_depends_config(config_file, board, app=None):
if comp["comp_name"] in comp_info:
config = comp
config["config_file"] = comp_info[comp["comp_name"]]["config_file"]
- # print("config is", config)
+ if config["config_file"]:
optional_configs.append(config)
text_config = 'menu "%s"\n' % APP_CONFIG_KEYWORD
|
BugID:16982987: Add BT mac storage support. | #include <stdlib.h>
#include <hal/ais_ota.h>
#include <aos/aos.h>
+#include <bluetooth/bluetooth.h>
+#include <bluetooth/storage.h>
+#include <bluetooth/hci.h>
static flash_event_handler_t flash_handler;
static settings_event_handler_t settings_hanlder;
@@ -121,8 +124,52 @@ void ais_ota_update_setting_after_xfer_finished(uint32_t img_size, uint32_t img_
}
-int ais_ota_bt_storage_init()
+#define BT_MAC_STR "bt_mac"
+static ssize_t storage_write(const bt_addr_le_t *addr, u16_t key,
+ const void *data, size_t length)
{
+ int ret;
+
+ ret = aos_kv_set(BT_MAC_STR, ((bt_addr_le_t *)data)->a.val,
+ sizeof(bt_addr_le_t), 1);
+
+ if (ret !=0 ) {
+ printf("bt mac store failed.\r\n");
+ return 0;
+ }
+
+ return sizeof(bt_addr_le_t);
+}
+
+static ssize_t storage_read(const bt_addr_le_t *addr, u16_t key, void *data,
+ size_t length)
+{
+ int ret, len = sizeof(bt_addr_le_t);
+
+ ret = aos_kv_get(BT_MAC_STR, ((bt_addr_le_t *)data)->a.val, &len);
+ if (ret != 0) {
+ printf("Failed to get bt mac.\r\n");
+ return 0;
+ }
+
+ return sizeof(bt_addr_le_t);
+}
+
+static int storage_clear(const bt_addr_le_t *addr)
+{
+
+}
+
+int ais_ota_bt_storage_init(void)
+{
+ static const struct bt_storage storage = {
+ .read = storage_read,
+ .write = storage_write,
+ .clear = storage_clear
+ };
+
+ bt_storage_register(&storage);
+
return 0;
}
|
mpu6xxx: add delay and run sync spi-transaction to ensure gyro is ready | @@ -44,6 +44,8 @@ static uint32_t mpu6xxx_fast_divider() {
}
uint8_t mpu6xxx_detect() {
+ time_delay_ms(100);
+
const uint8_t id = mpu6xxx_read(MPU_RA_WHO_AM_I);
switch (id) {
case MPU6000_ID:
@@ -98,6 +100,7 @@ uint8_t mpu6xxx_read(uint8_t reg) {
spi_txn_add_seg(txn, buffer, buffer, 2);
spi_txn_submit(txn);
+ spi_txn_continue_ex(&gyro_bus, true);
spi_txn_wait(&gyro_bus);
return buffer[1];
|
clean arch linux file, thanks | @@ -12,21 +12,22 @@ depends=('mbedtls')
backup=('etc/kadnode/kadnode.conf' 'etc/kadnode/peers.txt')
-#source=(https://github.com/mwarning/KadNode.git)
#source=(https://github.com/mwarning/KadNode/archive/v${pkgver}.tar.gz)
-source=(git+file:///home/mwarning/myProjects/KadNode)
+#source=(git+file:///home/user/KadNode)
+source=(git+https://github.com/mwarning/KadNode.git)
md5sums=('SKIP')
install="kadnode.install"
build() {
- cd $srcdir/KadNode
+ cd ${srcdir}/KadNode
+ make clean
make FEATURES="tls bob cmd lpd nss"
}
package() {
- cd $srcdir/KadNode
+ cd ${srcdir}/KadNode
install -Dm755 build/kadnode "$pkgdir"/usr/bin/kadnode
install -Dm755 build/kadnode-ctl "$pkgdir"/usr/bin/kadnode-ctl
|
acrn-config: always generate .clos boilerplate code in vm_configurations.c
so that vm_configurations.h/vm_configurations.c are consistent for different boards
The boilerplate code is protected by #ifdef CONFIG_RDT_ENABLED/#endif, so it will
not hurt if we always produce the .clos code. | @@ -187,7 +187,6 @@ def clos_output(scenario_items, i, config):
"""
hv_info = scenario_items['hv']
- if board_cfg_lib.is_rdt_supported():
print("#ifdef CONFIG_RDT_ENABLED", file=config)
print("\t\t.clos = VM{}_VCPU_CLOS,".format(i), file=config)
print("#endif", file=config)
|
nrf/main: Remove unnecessary repl_info(0) call.
It's statically initialized to 0. | @@ -121,8 +121,6 @@ soft_reset:
mp_obj_list_append(mp_sys_path, MP_OBJ_NEW_QSTR(MP_QSTR_)); // current dir (or base dir of the script)
mp_obj_list_init(mp_sys_argv, 0);
- pyb_set_repl_info(MP_OBJ_NEW_SMALL_INT(0));
-
readline_init0();
|
Extend text space in footer
Allows longer translations for help and closing texts.
See | @@ -386,8 +386,8 @@ render_screens (void) {
wattron (stdscr, color->attr | COLOR_PAIR (color->pair->idx));
mvaddstr (row - 1, 1, T_HELP_ENTER);
- mvprintw (row - 1, 30, "%d - %s", chg, time_str_buf);
- mvaddstr (row - 1, col - 21, T_QUIT);
+ mvprintw (row - 1, col/2 - 10, "%d - %s", chg, time_str_buf);
+ mvaddstr (row - 1, col - 6 - strlen(T_QUIT), T_QUIT);
mvprintw (row - 1, col - 5, "%s", GO_VERSION);
wattroff (stdscr, color->attr | COLOR_PAIR (color->pair->idx));
|
docs: update xCAT repo URL | @@ -15,7 +15,7 @@ enablement by downloading the latest \xCAT{} repo file.
\begin{lstlisting}[language=bash,keywords={},basicstyle=\fontencoding{T1}\fontsize{8.0}{10}\ttfamily,
literate={VER}{\OHPCVerTree{}}1 {OSREPO}{\OSTree{}}1 {TAG}{\OSTag{}}1 {ARCH}{\arch{}}1 {-}{-}1]
[sms](*\#*) (*\install*) yum-utils
-[sms](*\#*) (*\addrepo*) https://xcat.org/files/xcat/repos/yum/latest/xcat-core/xCAT-core.repo
+[sms](*\#*) (*\addrepo*) https://xcat.org/files/xcat/repos/yum/latest/xcat-core/xcat-core.repo
\end{lstlisting}
% end_ohpc_run
|
sse: use portable implementation to work around llvm bug
Just disabling the warning doesn't work if you pass the
fno-lax-vector-conversions flag, and clang should provide a good
implementation of the protable versions anyways. | @@ -2549,17 +2549,8 @@ simde_mm_extract_pi16 (simde__m64 a, const int imm8)
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
-#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
-# if defined(SIMDE_BUG_CLANG_44589)
-# define simde_mm_extract_pi16(a, imm8) ( \
- HEDLEY_DIAGNOSTIC_PUSH \
- _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
- HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
- HEDLEY_DIAGNOSTIC_POP \
- )
-# else
+#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) && !defined(SIMDE_BUG_CLANG_44589)
#define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
-# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
@@ -2574,25 +2565,14 @@ simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
- r_,
a_ = simde__m64_to_private(a);
- r_.i64[0] = a_.i64[0];
- r_.i16[imm8] = i;
+ a_.i16[imm8] = i;
- return simde__m64_from_private(r_);
+ return simde__m64_from_private(a_);
}
-#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
-# if defined(SIMDE_BUG_CLANG_44589)
-# define ssimde_mm_insert_pi16(a, i, imm8) ( \
- HEDLEY_DIAGNOSTIC_PUSH \
- _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
- (_mm_insert_pi16((a), (i), (imm8))) \
- HEDLEY_DIAGNOSTIC_POP \
- )
-# else
+#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) && !defined(SIMDE_BUG_CLANG_44589)
#define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
-# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
|
GAMEPAD: add 8Bitdo support | @@ -14,6 +14,7 @@ int xbox_count = 0;
int ds4_count = 0;
int ds5_count = 0;
int switch_count = 0;
+int bitdo_count = 0;
std::string xbox_paths [2]{"gip","xpadneo"};
bool operator<(const gamepad& a, const gamepad& b)
@@ -29,6 +30,7 @@ void gamepad_update(){
ds4_count = 0;
ds5_count = 0;
switch_count = 0;
+ bitdo_count = 0;
for (auto &p : fs::directory_iterator(path)) {
string fileName = p.path().filename();
//CHECK XONE AND XPADNEO DEVICES
@@ -56,6 +58,12 @@ void gamepad_update(){
gamepad_found = true;
switch_count += 1;
}
+ //CHECK * BITDO DEVICES
+ if (fileName.find("hid-e4") != std::string::npos) {
+ list.push_back(p.path());
+ gamepad_found = true;
+ bitdo_count += 1;
+ }
}
}
@@ -67,6 +75,7 @@ void gamepad_info () {
int ds4_counter = 0;
int ds5_counter = 0;
int switch_counter = 0;
+ int bitdo_counter = 0;
for (auto &path : list ) {
//Set devices paths
@@ -80,7 +89,7 @@ void gamepad_info () {
gamepad_data.push_back(gamepad());
- //Xone nad xpadneo devices
+ //Xone and xpadneo devices
if (path.find("gip") != std::string::npos || path.find("xpadneo") != std::string::npos) {
if (xbox_count == 1 )
gamepad_data[gamepad_count].name = "XBOX PAD";
@@ -112,6 +121,14 @@ void gamepad_info () {
gamepad_data[gamepad_count].name = "SWITCH PAD-" + to_string(switch_counter + 1);
switch_counter++;
}
+ //8bitdo devices
+ if (path.find("hid-e4") != std::string::npos) {
+ if (switch_count == 1)
+ gamepad_data[gamepad_count].name = "8BITDO PAD";
+ else
+ gamepad_data[gamepad_count].name = "8BITDO PAD-" + to_string(switch_counter + 1);
+ bitdo_counter++;
+ }
//Get device status
if (std::getline(input_status, line))
gamepad_data[gamepad_count].state = line;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.