message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
modify README for ARTIK 053
That means that we need to program (flash) binaries, not prepare binaries. | @@ -32,8 +32,8 @@ TIZENRT_BASEDIR was set at 'Getting the sources' tab of [[Quick Start]](../../..
## How to program a binary
-At first, the complete set of binaries are needed to program.
-After buiding a Tizen RT, execute below at os folder.
+At first, programming the complete set of binaries are needed.
+After buiding a Tizen RT, execute as follows at os folder.
```bash
openocd -f artik053.cfg -c ' \
flash_write bl1 ../build/configs/artik053/bin/bl1.bin; \
|
tools: Fix tcpconnect with kernel 5.14
Kernel commit (0) changes iov_iter->type to iov_iter->iter_type,
which breaks tcpconnect. This commit fixes it by detecting kernel struct field
existence.
Closes
[0]: | @@ -295,7 +295,7 @@ int trace_udp_ret_recvmsg(struct pt_regs *ctx)
return 0;
struct msghdr *msghdr = (struct msghdr *)*msgpp;
- if (msghdr->msg_iter.type != ITER_IOVEC)
+ if (msghdr->msg_iter.TYPE_FIELD != ITER_IOVEC)
goto delete_and_return;
int copied = (int)PT_REGS_RC(ctx);
@@ -361,6 +361,10 @@ bpf_text = bpf_text.replace('FILTER_FAMILY', '')
bpf_text = bpf_text.replace('FILTER_UID', '')
if args.dns:
+ if BPF.kernel_struct_has_field(b'iov_iter', b'iter_type') == 1:
+ dns_bpf_text = dns_bpf_text.replace('TYPE_FIELD', 'iter_type')
+ else:
+ dns_bpf_text = dns_bpf_text.replace('TYPE_FIELD', 'type')
bpf_text += dns_bpf_text
if debug or args.ebpf:
|
Fix windows issue with (file/read file :all)
When file was created with file/popen, the current optimization
of using fseek on windows fails due to windows not properly returning
and error code and just returning 0. Windows :(. | @@ -221,27 +221,11 @@ static Janet cfun_io_fread(int32_t argc, Janet *argv) {
if (janet_checktype(argv[1], JANET_KEYWORD)) {
const uint8_t *sym = janet_unwrap_keyword(argv[1]);
if (!janet_cstrcmp(sym, "all")) {
- /* Read whole file */
- int status = fseek(iof->file, 0, SEEK_SET);
- if (status) {
- /* backwards fseek did not work (stream like popen) */
int32_t sizeBefore;
do {
sizeBefore = buffer->count;
- read_chunk(iof, buffer, 1024);
+ read_chunk(iof, buffer, 4096);
} while (sizeBefore < buffer->count);
- } else {
- fseek(iof->file, 0, SEEK_END);
- long fsize = ftell(iof->file);
- if (fsize < 0) {
- janet_panicf("could not get file size of %v", argv[0]);
- }
- if (fsize > (INT32_MAX)) {
- janet_panic("file to large to read into buffer");
- }
- fseek(iof->file, 0, SEEK_SET);
- read_chunk(iof, buffer, (int32_t) fsize);
- }
/* Never return nil for :all */
return janet_wrap_buffer(buffer);
} else if (!janet_cstrcmp(sym, "line")) {
|
Fix return code handling for new sprintf | @@ -1826,7 +1826,7 @@ void picoquic_open_cc_dump(picoquic_cnx_t * cnx)
char cc_log_file_name[512];
if (ret == 0) {
- ret = picoquic_sprintf(cc_log_file_name, sizeof(cc_log_file_name), "%s%c%s-log.bin", cnx->quic->cc_log_dir, PICOQUIC_FILE_SEPARATOR, cnxid_str) <= 0;
+ ret = picoquic_sprintf(cc_log_file_name, sizeof(cc_log_file_name), "%s%c%s-log.bin", cnx->quic->cc_log_dir, PICOQUIC_FILE_SEPARATOR, cnxid_str);
}
if (ret != 0) {
|
gpcloud: Enable debug_curl for gpcheckcloud | @@ -73,12 +73,10 @@ S3Params InitConfig(const string& urlWithOptions) {
string content = s3Cfg.Get(configSection, "loglevel", "WARNING");
s3ext_loglevel = getLogLevel(content.c_str());
-#ifndef S3_STANDALONE_CHECKCLOUD
content = s3Cfg.Get(configSection, "logtype", "INTERNAL");
s3ext_logtype = getLogType(content.c_str());
params.setDebugCurl(s3Cfg.GetBool(configSection, "debug_curl", "false"));
-#endif
params.setCred(s3Cfg.Get(configSection, "accessid", ""), s3Cfg.Get(configSection, "secret", ""),
s3Cfg.Get(configSection, "token", ""));
|
nissa: Remove unnecessary label attribute
The I2C DTS config does not require a label attribute.
TEST=zmake configure -b nivviks
BRANCH=none | &i2c0_0 {
status = "okay";
clock-frequency = <I2C_BITRATE_FAST>;
- label = "I2C_EEPROM";
};
&i2c_ctrl0 {
&i2c1_0 {
status = "okay";
clock-frequency = <I2C_BITRATE_FAST>;
- label = "I2C_SENSOR";
};
&i2c_ctrl1 {
&i2c3_0 {
status = "okay";
clock-frequency = <I2C_BITRATE_FAST_PLUS>;
- label = "I2C_USB_C0";
};
&i2c_ctrl3 {
&i2c5_1 {
status = "okay";
clock-frequency = <I2C_BITRATE_FAST_PLUS>;
- label = "I2C_SUB_USB_C1";
};
&i2c_ctrl5 {
&i2c7_0 {
status = "okay";
clock-frequency = <I2C_BITRATE_STANDARD>;
- label = "I2C_BATTERY";
};
&i2c_ctrl7 {
|
Add space between number and units for some fields of the show -dimm output
Add space between number and units ("ms") in string to match the rest of the fields.
Specifically for AveragePowerReportingTimeConstant and MemoryBandwidthBoostAveragePowerTimeConstant on command show -a -dimm | @@ -1141,7 +1141,7 @@ ShowDimms(
/** AveragePowerReportingTimeConstant (FIS 2.1 and higher) **/
if (ShowAll || (pDispOptions->DisplayOptionSet && ContainsValue(pDispOptions->pDisplayValues, AVG_PWR_REPORTING_TIME_CONSTANT))) {
- PRINTER_SET_KEY_VAL_WIDE_STR_FORMAT(pPrinterCtx, pPath, AVG_PWR_REPORTING_TIME_CONSTANT, ConvertDimmInfoAttribToString((VOID*)&pDimms[DimmIndex].AvgPowerReportingTimeConstant, FORMAT_UINT64 TIME_MSR_MS));
+ PRINTER_SET_KEY_VAL_WIDE_STR_FORMAT(pPrinterCtx, pPath, AVG_PWR_REPORTING_TIME_CONSTANT, ConvertDimmInfoAttribToString((VOID*)&pDimms[DimmIndex].AvgPowerReportingTimeConstant, FORMAT_UINT64 L" " TIME_MSR_MS));
}
if (pDimms[DimmIndex].ErrorMask & DIMM_INFO_ERROR_VIRAL_POLICY) {
@@ -1179,7 +1179,7 @@ ShowDimms(
/** AvgPowerTimeConstant **/
if (ShowAll || (pDispOptions->DisplayOptionSet && ContainsValue(pDispOptions->pDisplayValues, AVG_POWER_TIME_CONSTANT_STR))) {
- PRINTER_SET_KEY_VAL_WIDE_STR_FORMAT(pPrinterCtx, pPath, AVG_POWER_TIME_CONSTANT_STR, ConvertDimmInfoAttribToString((VOID*)&pDimms[DimmIndex].AveragePowerTimeConstant, FORMAT_UINT64 TIME_MSR_MS));
+ PRINTER_SET_KEY_VAL_WIDE_STR_FORMAT(pPrinterCtx, pPath, AVG_POWER_TIME_CONSTANT_STR, ConvertDimmInfoAttribToString((VOID*)&pDimms[DimmIndex].AveragePowerTimeConstant, FORMAT_UINT64 L" " TIME_MSR_MS));
}
/** 2.1/2.0: MemoryBandwidthBoostFeature/TurboModeState **/
@@ -1210,7 +1210,7 @@ ShowDimms(
/** MemoryBandwidthBoostAveragePowerTimeConstant **/
if (ShowAll || (pDispOptions->DisplayOptionSet && ContainsValue(pDispOptions->pDisplayValues, MEMORY_BANDWIDTH_BOOST_AVERAGE_POWER_TIME_CONSTANT_STR))) {
- PRINTER_SET_KEY_VAL_WIDE_STR_FORMAT(pPrinterCtx, pPath, MEMORY_BANDWIDTH_BOOST_AVERAGE_POWER_TIME_CONSTANT_STR, ConvertDimmInfoAttribToString((VOID*)&pDimms[DimmIndex].MemoryBandwidthBoostAveragePowerTimeConstant, FORMAT_UINT64 TIME_MSR_MS));
+ PRINTER_SET_KEY_VAL_WIDE_STR_FORMAT(pPrinterCtx, pPath, MEMORY_BANDWIDTH_BOOST_AVERAGE_POWER_TIME_CONSTANT_STR, ConvertDimmInfoAttribToString((VOID*)&pDimms[DimmIndex].MemoryBandwidthBoostAveragePowerTimeConstant, FORMAT_UINT64 L" " TIME_MSR_MS));
}
/** MaxAveragePowerLimit **/
|
OcMiscLib/DebugHelp: Manually poll event instead of waiting (EFI_UNSUPPORTED on Insyde) | @@ -71,9 +71,7 @@ WaitForKeyIndex (
EFI_STATUS Status;
EFI_INPUT_KEY Key;
INTN Index;
- UINTN EventIndex;
EFI_EVENT TimerEvent;
- EFI_EVENT WaitList[2];
//
// Skip previously pressed characters.
@@ -100,26 +98,35 @@ WaitForKeyIndex (
//
// Wait for the keystroke event or the timer
//
- WaitList[0] = gST->ConIn->WaitForKey;
- WaitList[1] = TimerEvent;
- Status = gBS->WaitForEvent (TimerEvent != NULL ? 2 : 1, WaitList, &EventIndex);
if (TimerEvent != NULL) {
- gBS->CloseEvent (TimerEvent);
+ do {
+ //
+ // Read our key otherwise.
+ //
+ Status = gST->ConIn->ReadKeyStroke (gST->ConIn, &Key);
+ if (!EFI_ERROR (Status)) {
+ break;
}
+ Status = gBS->CheckEvent (TimerEvent);
//
// Check for the timer expiration
//
- if (!EFI_ERROR (Status) && EventIndex == 1) {
+ if (!EFI_ERROR (Status)) {
+ gBS->CloseEvent (TimerEvent);
return OC_INPUT_TIMEOUT;
}
+ } while (Status == EFI_NOT_READY);
+ gBS->CloseEvent (TimerEvent);
+ } else {
//
// Read our key otherwise.
//
do {
Status = gST->ConIn->ReadKeyStroke (gST->ConIn, &Key);
} while (EFI_ERROR (Status));
+ }
if (Key.ScanCode == SCAN_ESC || Key.UnicodeChar == '0') {
return OC_INPUT_ABORTED;
|
VERSION bump to version 1.4.89 | @@ -45,7 +45,7 @@ endif()
# micro version is changed with a set of small changes or bugfixes anywhere in the project.
set(SYSREPO_MAJOR_VERSION 1)
set(SYSREPO_MINOR_VERSION 4)
-set(SYSREPO_MICRO_VERSION 88)
+set(SYSREPO_MICRO_VERSION 89)
set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION})
# Version of the library
|
Fix bug while incrementing profiling counters.
In synchronization functions were added for guaranteeing that increments where atomic, but the wrong rule pointer was used. | @@ -1273,9 +1273,9 @@ int yr_execute_code(
#ifdef PROFILING_ENABLED
assert(current_rule != NULL);
#ifdef _WIN32
- InterlockedAdd64(&rule->time_cost, elapsed_time - start_time);
+ InterlockedAdd64(¤t_rule->time_cost, elapsed_time - start_time);
#else
- __sync_fetch_and_add(&rule->time_cost, elapsed_time - start_time);
+ __sync_fetch_and_add(¤t_rule->time_cost, elapsed_time - start_time);
#endif
#endif
result = ERROR_SCAN_TIMEOUT;
|
tests: runtime: kubernetes: fix expected data output for incoming JSON log | -{"log":{"message_id":1, "word":"eagers"}, "stream":"stdout", "time":"2018-02-22T13:14:50.773975651Z", "message_id":1, "word":"eagers", "kubernetes":{"pod_name":"json-logs", "namespace_name":"default", "pod_id":"5ac76e2f-17d2-11e8-b34e-080027749cbc", "labels":{"run":"json-logs"}, "host":"minikube", "container_name":"json-logs", "docker_id":"c053db7370be9c33d64677f9759863d850ebe35104069bec241cd1bb4674bd19"}}
+{"log":"{\"message_id\": 1, \"word\": \"eagers\"}", "stream":"stdout", "time":"2018-02-22T13:14:50.773975651Z", "message_id":1, "word":"eagers", "kubernetes":{"pod_name":"json-logs", "namespace_name":"default", "pod_id":"5ac76e2f-17d2-11e8-b34e-080027749cbc", "labels":{"run":"json-logs"}, "host":"minikube", "container_name":"json-logs", "docker_id":"c053db7370be9c33d64677f9759863d850ebe35104069bec241cd1bb4674bd19"}}
|
Tests: fixed test_java_conf_error to pass with "--modules=" option. | @@ -24,6 +24,7 @@ class TestJavaApplication(TestApplicationJava):
"working_directory": self.current_dir
+ "/java/empty",
"webapp": self.testdir + "/java",
+ "unit_jars": self.testdir + "/no_such_dir",
}
},
}
|
Obfuscate just a tiny bit more. | @@ -838,8 +838,8 @@ uint64_t picoquic_crypto_uniform_random(picoquic_quic_t* quic, uint64_t rnd_max)
*
* In order to provide a minimum of protection against casual analysis, we run
* an obfuscation step before providing the result. The obfuscation involves
- * an XOR with the obfuscator, then multiply by a constant modulo 2^64,
- * then XOR the result with the obfuscator again. The obfuscator changes
+ * an XOR with obfuscator, then multiply by a constant modulo,
+ * then XOR the result with obfuscator again. The obfuscator changes
* each time the random generator is seeded.
*
* If we were really paranoid, we would want to break possible discovery by passing
@@ -853,7 +853,7 @@ uint64_t picoquic_crypto_uniform_random(picoquic_quic_t* quic, uint64_t rnd_max)
static uint64_t public_random_seed[16] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
static int public_random_index = 0;
static const uint64_t public_random_multiplier = 1181783497276652981ull;
-static uint64_t public_random_obfuscator[2] = { 0xcafe1234deadbeefull, 0xaaaa5555aaaa5555ull };
+static uint64_t public_random_obfuscator = 0xcafe1234deadbeefull;
static uint64_t picoquic_public_random_step(void)
{
@@ -871,7 +871,12 @@ static uint64_t picoquic_public_random_step(void)
uint64_t picoquic_public_random_64(void)
{
uint64_t s1 = picoquic_public_random_step();
- return (public_random_obfuscator[0] ^ (public_random_multiplier * (s1 ^ public_random_obfuscator[1])));
+ s1 ^= public_random_obfuscator;
+ s1 ^= (((s1 >> 32)* public_random_multiplier) & 0xffffffff);
+ s1 ^= (((s1 & 0xFFFFFFFF) * public_random_multiplier) << 32);
+ s1 ^= (((s1 >> 32)* public_random_multiplier) & 0xffffffff);
+ s1 ^= public_random_obfuscator;
+ return s1;
}
void picoquic_public_random_seed_64(uint64_t seed, int reset)
@@ -897,8 +902,7 @@ void picoquic_public_random_seed(picoquic_quic_t* quic)
picoquic_crypto_random(quic, &seed, sizeof(seed));
picoquic_public_random_seed_64(seed[0], 0);
- public_random_obfuscator[0] ^= seed[1];
- public_random_obfuscator[1] ^= seed[2];
+ public_random_obfuscator = seed[1];
}
void picoquic_public_random(void* buf, size_t len)
|
doc: clarify goals
Idea from of metametadata in one file | @@ -120,11 +120,11 @@ they get the same KeySet (aka "round trip").
## Possibility to Represent any Configuration File Format
Elektra must be powerful and flexible enough to be able to represent any configuration file
-format. We do not judge if a feature of a configuration file format is useful and support
-the development of fully-conforming parsers and emitters.
+format. We support the development of fully-conforming parsers and emitters.
> This means, that given a correctly written storage plugin, a KeySet can be found
-> that represents the content and the hierarchical structure of the configuration file.
+> that represents the configuration, its metadata and the hierarchical structure of
+> the configuration file.
## Flexibility of Administrators
|
YAML Smith: Use function to retrieve leaf keys | @@ -91,32 +91,21 @@ bool sameLevelOrBelow (CppKey const & key1, CppKey const & key2)
}
/**
- * @brief This class provides additional functionality for the key set class.
- */
-class CppKeySetPlus : public CppKeySet
-{
-public:
- /**
- * @copydoc KeySet::KeySet(ckdb::KeySet)
- */
- CppKeySetPlus (ckdb::KeySet * keys) : CppKeySet (keys)
- {
- }
-
- /**
- * @brief Collect leaf keys (keys without any key below) for this key set.
+ * @brief This function collects leaf keys (keys without any key below) for a given key set.
*
- * @return A key set containing all leaf keys
+ * @param keys This parameter stores the key set for which this function retrieves all leaf keys.
+ *
+ * @return A key set containing only leaf keys
*/
- CppKeySet leaves ()
+CppKeySet leaves (CppKeySet const & keys)
{
CppKeySet leaves;
- auto current = this->begin ();
- if (current == this->end ()) return leaves;
+ auto current = keys.begin ();
+ if (current == keys.end ()) return leaves;
CppKey previous = *current;
- while (++current != this->end ())
+ while (++current != keys.end ())
{
bool isLeaf = !current->isBelow (previous);
if (isLeaf)
@@ -131,7 +120,6 @@ public:
return leaves;
}
-};
/**
* @brief This function writes a YAML collection entry (either mapping key or array element) to the given stream.
@@ -164,7 +152,7 @@ inline void writeCollectionEntry (ofstream & output, CppKey const & key, string
* @param keys This parameter stores the key set which this function converts to YAML data.
* @param parent This value represents the root key of `keys`.
*/
-void writeYAML (ofstream & output, CppKeySet & keys, CppKey const & parent)
+void writeYAML (ofstream & output, CppKeySet && keys, CppKey const & parent)
{
ELEKTRA_LOG_DEBUG ("Convert %zu key%s", keys.size (), keys.size () == 1 ? "" : "s");
keys.rewind ();
@@ -222,13 +210,12 @@ int elektraYamlsmithGet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned, Key
int elektraYamlsmithSet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned, Key * parentKey)
{
CppKey parent{ parentKey };
- CppKeySetPlus keys{ returned };
+ CppKeySet keys{ returned };
ofstream file{ parent.getString () };
if (file.is_open ())
{
- CppKeySet leaves = keys.leaves ();
- writeYAML (file, leaves, parent);
+ writeYAML (file, leaves (keys), parent);
}
else
{
|
Set DCMI byte select mode for H7. | @@ -118,7 +118,7 @@ static int dcmi_config(uint32_t jpeg_mode)
DCMIHandle.Init.CaptureRate = DCMI_CR_ALL_FRAME; // Capture rate all frames
DCMIHandle.Init.ExtendedDataMode = DCMI_EXTEND_DATA_8B; // Capture 8 bits on every pixel clock
DCMIHandle.Init.JPEGMode = jpeg_mode; // Set JPEG Mode
- #if defined(STM32F765xx) || defined(STM32F769xx)
+ #if defined(MCU_SERIES_F7) || defined(MCU_SERIES_H7)
DCMIHandle.Init.ByteSelectMode = DCMI_BSM_ALL; // Capture all received bytes
DCMIHandle.Init.ByteSelectStart = DCMI_OEBS_ODD; // Ignored
DCMIHandle.Init.LineSelectMode = DCMI_LSM_ALL; // Capture all received lines
|
out_kafka-rest: debug JSON payload on error | @@ -120,6 +120,7 @@ static char *kafka_rest_format(void *data, size_t bytes,
msgpack_pack_str(&mp_pck, 5);
msgpack_pack_str_body(&mp_pck, "value", 5);
+
msgpack_pack_map(&mp_pck, map_size);
/* Time key and time formatted */
@@ -239,6 +240,10 @@ static void cb_kafka_flush(void *data, size_t bytes,
/* The request was issued successfully, validate the 'error' field */
flb_debug("[out_kafka_rest] HTTP Status=%i", c->resp.status);
if (c->resp.status != 200) {
+ if (c->resp.payload_size > 0) {
+ flb_debug("[out_kafka_rest] Kafka REST response\n%s",
+ c->resp.payload);
+ }
goto retry;
}
|
Bug with payments for archive miners is fixed | @@ -998,15 +998,40 @@ static double connection_calculate_unpaid_shares(struct connection_pool_data *co
return diff2pay(sum, count);
}
+// calculates the rest of shares and clear shares
+static double process_outdated_miner(struct miner_pool_data *miner)
+{
+ double sum = 0;
+ int diff_count = 0;
+
+ for(int i = 0; i < CONFIRMATIONS_COUNT; ++i) {
+ if(miner->maxdiff[i] > 0) {
+ sum += miner->maxdiff[i];
+ miner->maxdiff[i] = 0;
+ ++diff_count;
+ }
+ }
+
+ if(diff_count > 0) {
+ sum /= diff_count;
+ }
+
+ return sum;
+}
+
static double countpay(struct miner_pool_data *miner, int confirmation_index, double *pay)
{
double sum = 0;
int diff_count = 0;
- if(miner->maxdiff[confirmation_index] > 0) {
+ //if miner is in archive state and last connection was disconnected more than 16 minutes ago we pay for the rest of shares and clear shares
+ if(miner->state == MINER_ARCHIVE && g_xdag_pool_task_index - miner->task_index > XDAG_POOL_CONFIRMATIONS_COUNT) {
+ sum += process_outdated_miner(miner);
+ diff_count++;
+ } else if(miner->maxdiff[confirmation_index] > 0) {
sum += miner->maxdiff[confirmation_index];
miner->maxdiff[confirmation_index] = 0;
- diff_count++;
+ ++diff_count;
}
*pay = diff2pay(sum, diff_count);
@@ -1381,7 +1406,7 @@ void* pool_remove_inactive_connections(void* arg)
pthread_mutex_lock(&g_descriptors_mutex);
LL_FOREACH(g_connection_list_head, elt)
{
- if(current_time - elt->connection_data.last_share_time > 180) { //last share is received more than 3 minutes ago
+ if(current_time - elt->connection_data.last_share_time > 300) { //last share is received more than 5 minutes ago
elt->connection_data.deleted = 1;
elt->connection_data.disconnection_reason = strdup("inactive connection");
}
|
hv: refine 'init_percpu_lapic_id'
This patch refines 'init_percpu_lapic_id' to move the error
handling to 'init_pcpu_pre'. | @@ -45,24 +45,26 @@ static void print_hv_banner(void);
static uint16_t get_pcpu_id_from_lapic_id(uint32_t lapic_id);
static uint64_t start_tsc __attribute__((__section__(".bss_noinit")));
-static void init_percpu_lapic_id(void)
+static bool init_percpu_lapic_id(void)
{
uint16_t i;
uint16_t pcpu_num;
uint32_t lapic_id_array[CONFIG_MAX_PCPU_NUM];
+ bool success = false;
/* Save all lapic_id detected via parse_mdt in lapic_id_array */
pcpu_num = parse_madt(lapic_id_array);
- if (pcpu_num == 0U) {
- /* failed to get the physcial cpu number */
- panic("failed to get the physcial cpu number");
- }
+ if (pcpu_num != 0U) {
phys_cpu_num = pcpu_num;
for (i = 0U; (i < pcpu_num) && (i < CONFIG_MAX_PCPU_NUM); i++) {
per_cpu(lapic_id, i) = lapic_id_array[i];
}
+ success = true;
+ }
+
+ return success;
}
static void pcpu_set_current_state(uint16_t pcpu_id, enum pcpu_boot_state state)
@@ -132,7 +134,9 @@ void init_pcpu_pre(uint16_t pcpu_id_args)
early_init_lapic();
- init_percpu_lapic_id();
+ if (!init_percpu_lapic_id()) {
+ panic("failed to init_percpu_lapic_id!");
+ }
ret = init_ioapic_id_info();
if (ret != 0) {
|
Change activity::dati to use activitity_types::code instead of types::code | @@ -155,7 +155,7 @@ enum {
https://discord.com/developers/docs/resources/channel#message-object-message-activity-structure */
namespace activity {
struct dati {
- types::code type;
+ activity_types::code type;
//@todo missing party_id;
};
|
Emit LH poses as external poses | @@ -198,6 +198,21 @@ static int parse_and_run_imu(const char *line, SurvivePlaybackData *driver, bool
return 0;
}
+static int parse_and_run_lhpose(const char *line, struct SurvivePlaybackData *driver) {
+ SurvivePose pose;
+ int lh = -1;
+ int rr = sscanf(line, "%d LH_POSE " SurvivePose_sformat "\n", &lh, &pose.Pos[0], &pose.Pos[1], &pose.Pos[2],
+ &pose.Rot[0], &pose.Rot[1], &pose.Rot[2], &pose.Rot[3]);
+
+ SurviveContext *ctx = driver->ctx;
+ if (driver->outputExternalPose) {
+ char buffer[32] = {0};
+ snprintf(buffer, 31, "previous_LH%d", lh);
+ ctx->external_poseproc(ctx, buffer, &pose);
+ }
+ return 0;
+}
+
static int parse_and_run_externalpose(const char *line, SurvivePlaybackData *driver) {
char name[128] = { 0 };
SurvivePose pose;
@@ -332,6 +347,10 @@ static int playback_pump_msg(struct SurviveContext *ctx, void *_driver) {
parse_and_run_rawlight(line, driver);
break;
case 'L':
+ if (strcmp(op, "LH_POSE") == 0) {
+ parse_and_run_lhpose(line, driver);
+ break;
+ }
case 'R':
if (op[1] == 0 && driver->hasRawLight == false)
parse_and_run_lightcode(line, driver);
|
input: chunk: use 'trace' message instead of 'warn' for chunks without matching routes | @@ -161,7 +161,7 @@ int flb_intput_chunk_count_dropped_chunks(struct flb_input_chunk *ic,
/*
* Find a slot in the output instance to append the new data with size chunk_size, it
- * will drop the the oldest chunks when the limitaion on local disk is reached.
+ * will drop the the oldest chunks when the limitation on local disk is reached.
*
* overlimit_routes_mask: A bit mask used to check whether the output instance will
* reach the limit when buffering the new data
@@ -428,8 +428,8 @@ struct flb_input_chunk *flb_input_chunk_create(struct flb_input_instance *in,
/* Calculate the routes_mask for the input chunk */
chunk_routes_mask = flb_router_get_routes_mask_by_tag(tag, tag_len, in);
if (chunk_routes_mask == 0) {
- flb_warn("[input chunk] no matching route for input chunk %s",
- flb_input_chunk_get_name(ic));
+ flb_trace("[input chunk] no matching route for input chunk '%s' with tag '%s'",
+ flb_input_chunk_get_name(ic), tag);
}
ic->routes_mask = chunk_routes_mask;
|
make: Add utils to help message
BRANCH=none
TEST=make help | @@ -676,6 +676,7 @@ help:
@echo " proj-<boardname> - Build a single board (similar to 'all BOARD=boardname')"
@echo " savesizes - Save the filesizes of currently built boards for comparison"
@echo " newsizes - Compare previously saved filesizes against new sizes"
+ @echo " utils - Build all host utilities"
@echo ""
@echo " tests [BOARD=] - Build all unit tests for a specific board"
@echo " hosttests - Build all host unit tests"
|
framer-802154: do away with "type" of create_frame()
The argument, "type", is not used effectively in create_frame(). In
addition, it's confusing because create() calls create_frame() with
FRAME802154_DATAFRAME as "type" though it does not always create a data
frame. | @@ -57,7 +57,7 @@ static uint8_t initialized = 0;
/*---------------------------------------------------------------------------*/
static int
-create_frame(int type, int do_create)
+create_frame(int do_create)
{
frame802154_t params;
int hdr_len;
@@ -218,13 +218,13 @@ framer_802154_setup_params(packetbuf_attr_t (*get_attr)(uint8_t type),
static int
hdr_length(void)
{
- return create_frame(FRAME802154_DATAFRAME, 0);
+ return create_frame(0);
}
/*---------------------------------------------------------------------------*/
static int
create(void)
{
- return create_frame(FRAME802154_DATAFRAME, 1);
+ return create_frame(1);
}
/*---------------------------------------------------------------------------*/
static int
|
programs_decode: fix incrementing progssz on error
The progssz value should be incremented only if
parsing and allocating is successful | @@ -73,7 +73,7 @@ int programs_decode(vm_map_t *kmap, vm_object_t *kernel)
if (!hal_strcmp(cpio->name, "TRAILER!!!"))
break;
- pr = &syspage->progs[syspage->progssz++];
+ pr = &syspage->progs[syspage->progssz];
/* Initialize cmdline */
k = hal_strlen((char *)cpio->name);
@@ -84,12 +84,12 @@ int programs_decode(vm_map_t *kmap, vm_object_t *kernel)
fs = programs_a2i(cpio->c_filesize);
if (fs == -EINVAL) {
- lib_printf("programs: invalid filesize");
+ lib_printf("programs: invalid filesize\n");
return -EINVAL;
}
ns = programs_a2i(cpio->c_namesize);
if (ns == -EINVAL) {
- lib_printf("programs: invalid namesize");
+ lib_printf("programs: invalid namesize\n");
return -EINVAL;
}
@@ -112,6 +112,7 @@ int programs_decode(vm_map_t *kmap, vm_object_t *kernel)
pr->end = (typeof(pr->end))p->addr + fs;
}
+ syspage->progssz++;
cpio = (void *)(((ptr_t)cpio + fs + CPIO_PAD) & ~CPIO_PAD);
}
|
test: cleanup t::run_picotls_client() | @@ -705,10 +705,8 @@ sub run_picotls_client {
my $host = $opts->{host} // '127.0.0.1';
my $path = $opts->{path} // '/';
my $cli_opts = $opts->{opts} // '';
- my $connection = $opts->{keep_alive} ? "keep-alive" : "close";
my $cli = bindir() . "/picotls/cli";
- die "picotls-cli ($cli) not found" unless -e $cli;
my $tempdir = tempdir();
my $cmd = "exec $cli $cli_opts $host $port > $tempdir/resp.txt 2>&1";
@@ -719,10 +717,10 @@ sub run_picotls_client {
print $fh <<"EOT";
GET $path HTTP/1.1\r
Host: $host:$port\r
-Connection: $connection\r
+Connection: close\r
\r
EOT
- Time::HiRes::sleep(0.1) until -e "$tempdir/resp.txt" && -s "$tempdir/resp.txt" > 0;
+ sleep 1;
close $fh;
open $fh, "<", "$tempdir/resp.txt"
|
Fix libunbound return for root key sentinel. | @@ -513,7 +513,8 @@ libworker_enter_result(struct ub_result* res, sldns_buffer* buf,
res->nxdomain = 1;
if(msg_security == sec_status_secure)
res->secure = 1;
- if(msg_security == sec_status_bogus)
+ if(msg_security == sec_status_bogus ||
+ msg_security == sec_status_secure_sentinel_fail)
res->bogus = 1;
}
|
Move protocol test module before config module.
The protocol module should be tested before modules that have a dependency on it. | @@ -444,6 +444,33 @@ unit:
- postgres/interface
- postgres/interface/page
+ # ********************************************************************************************************************************
+ - name: protocol
+
+ test:
+ # ----------------------------------------------------------------------------------------------------------------------------
+ - name: protocol
+ total: 9
+ harness:
+ name: protocol
+ shim:
+ protocol/helper:
+ function:
+ - protocolLocalExec
+ containerReq: true
+ binReq: true
+
+ coverage:
+ - protocol/client
+ - protocol/command
+ - protocol/helper
+ - protocol/parallel
+ - protocol/parallelJob
+ - protocol/server
+
+ include:
+ - common/exec
+
# ********************************************************************************************************************************
- name: config
@@ -572,33 +599,6 @@ unit:
include:
- storage/write
- # ********************************************************************************************************************************
- - name: protocol
-
- test:
- # ----------------------------------------------------------------------------------------------------------------------------
- - name: protocol
- total: 9
- harness:
- name: protocol
- shim:
- protocol/helper:
- function:
- - protocolLocalExec
- containerReq: true
- binReq: true
-
- coverage:
- - protocol/client
- - protocol/command
- - protocol/helper
- - protocol/parallel
- - protocol/parallelJob
- - protocol/server
-
- include:
- - common/exec
-
# ********************************************************************************************************************************
- name: info
|
highlevel: Fix long_double tests | @@ -400,8 +400,8 @@ static void test_arraySetters ()
elektraSetDoubleArrayElement (elektra, "doublearraykey", 0, 1.1, &error);
elektraSetDoubleArrayElement (elektra, "doublearraykey", 1, 2.1, &error);
- elektraSetLongDoubleArrayElement (elektra, "longdoublearraykey", 0, 1.1, &error);
- elektraSetLongDoubleArrayElement (elektra, "longdoublearraykey", 1, 2.1, &error);
+ elektraSetLongDoubleArrayElement (elektra, "longdoublearraykey", 0, 1.1L, &error);
+ elektraSetLongDoubleArrayElement (elektra, "longdoublearraykey", 1, 2.1L, &error);
// Add new keys.
@@ -447,8 +447,8 @@ static void test_arraySetters ()
elektraSetDoubleArrayElement (elektra, "newdoublearraykey", 0, 1.1, &error);
elektraSetDoubleArrayElement (elektra, "newdoublearraykey", 1, 2.1, &error);
- elektraSetLongDoubleArrayElement (elektra, "newlongdoublearraykey", 0, 1.1, &error);
- elektraSetLongDoubleArrayElement (elektra, "newlongdoublearraykey", 1, 2.1, &error);
+ elektraSetLongDoubleArrayElement (elektra, "newlongdoublearraykey", 0, 1.1L, &error);
+ elektraSetLongDoubleArrayElement (elektra, "newlongdoublearraykey", 1, 2.1L, &error);
if (error)
{
|
YIN parser CHANGE simplify copying of the tested string
parsed revision date is checked and its copying into a constant-size
buffer can be simplified by memcpy(). | @@ -1711,7 +1711,7 @@ yin_parse_revision(struct lys_yin_parser_ctx *ctx, struct lysp_revision **revs)
FREE_STRING(ctx->xmlctx->ctx, temp_date);
return LY_EVALID;
}
- strncpy(rev->date, temp_date, LY_REV_SIZE);
+ memcpy(rev->date, temp_date, LY_REV_SIZE);
FREE_STRING(ctx->xmlctx->ctx, temp_date);
/* parse content */
|
build: Makefile dep change for ubuntu
Alter dep name and location for ubuntu-20 package naming
Dropping 14.04 support while keeping 16.04 and 18.04
Dropping python2-dev for ubuntu-20
Type: make | @@ -65,7 +65,7 @@ DEB_DEPENDS = curl build-essential autoconf automake ccache
DEB_DEPENDS += debhelper dkms git libtool libapr1-dev dh-systemd
DEB_DEPENDS += libconfuse-dev git-review exuberant-ctags cscope pkg-config
DEB_DEPENDS += lcov chrpath autoconf indent clang-format libnuma-dev
-DEB_DEPENDS += python-all python3-all python3-setuptools python-dev
+DEB_DEPENDS += python-all python3-all python3-setuptools
DEB_DEPENDS += python-virtualenv python-pip libffi6 check
DEB_DEPENDS += libboost-all-dev libffi-dev python3-ply libmbedtls-dev
DEB_DEPENDS += cmake ninja-build uuid-dev python3-jsonschema python3-yaml
@@ -73,8 +73,10 @@ DEB_DEPENDS += python3-venv # ensurepip
DEB_DEPENDS += python3-dev # needed for python3 -m pip install psutil
# python3.6 on 16.04 requires python36-dev
-ifeq ($(OS_VERSION_ID),14.04)
- DEB_DEPENDS += libssl-dev
+ifeq ($(OS_VERSION_ID),16.04)
+ DEB_DEPENDS += python-dev
+else ifeq ($(OS_VERSION_ID),18.04)
+ DEB_DEPENDS += python-dev
else ifeq ($(OS_ID)-$(OS_VERSION_ID),debian-8)
DEB_DEPENDS += libssl-dev
APT_ARGS = -t jessie-backports
|
Update cache in opengl rendering | @@ -454,6 +454,11 @@ bool TCOD_opengl_render(
tcod::ColorRGB(console->tiles[i].fg),
tcod::ColorRGB(console->tiles[i].bg)
);
+
+ if (track_changes)
+ {
+ cache->tiles[i] = console->tiles[i];
+ }
}
}
/* check if any of the textures have changed since they were last uploaded */
|
h2olog: remove appdata fields from block_fields | @@ -82,13 +82,7 @@ struct_map = {
}
# A block list to list useless or secret data fields
-# TODO: replace this dict with /* @appdata */ annotations
block_fields = {
- "quicly:crypto_decrypt": set(["decrypted"]),
- "quicly:crypto_update_secret": set(["secret"]),
- "quicly:crypto_send_key_update": set(["secret"]),
- "quicly:crypto_receive_key_update": set(["secret"]),
- "quicly:crypto_receive_key_update_prepare": set(["secret"]),
}
# The block list for probes.
|
Fixed wrong output information
to -> too | @@ -150,7 +150,7 @@ rt_err_t rt_wlan_dev_ap_start(struct rt_wlan_device *device, struct rt_wlan_info
if ((password_len > RT_WLAN_PASSWORD_MAX_LENGTH) ||
(info->ssid.len > RT_WLAN_SSID_MAX_LENGTH))
{
- LOG_E("L:%d password or ssid is to long", __LINE__);
+ LOG_E("L:%d password or ssid is too long", __LINE__);
return -RT_ERROR;
}
|
Force nekbone down to O1 while AAPointer assert worked | @@ -9,6 +9,7 @@ thisdir=`dirname $realpath`
. $thisdir/aomp_common_vars
# --- end standard header ----
+export CCC_OVERRIDE_OPTIONS="+-O1"
if [ "$1" == "rerun" ]; then
cd $AOMP_REPOS_TEST/Nekbone
cd test/nek_gpu1
@@ -50,4 +51,5 @@ else
tail -7 nek.log
fi
fi
+echo "running Nekbone at -O1, AAPointer issue"
exit $ret
|
Fix missing SP-GiST support in
misses setting of amoptsprocnum for SP-GiST. This commit fixes
that. | @@ -44,6 +44,7 @@ spghandler(PG_FUNCTION_ARGS)
amroutine->amstrategies = 0;
amroutine->amsupport = SPGISTNProc;
+ amroutine->amoptsprocnum = SPGIST_OPTIONS_PROC;
amroutine->amcanorder = false;
amroutine->amcanorderbyop = true;
amroutine->amcanbackward = false;
|
use threaded mkl | @@ -98,9 +98,9 @@ plasma-installer_%{version}/setup.py \
%if %{compiler_family} == intel
--cflags="${RPM_OPT_FLAGS} ${PIC_OPT}" \
--fflags="${RPM_OPT_FLAGS} ${PIC_OPT}" \
- --blaslib="-L/intel/mkl/lib/em64t -lmkl_intel_lp64 -lmkl_sequential -lmkl_core" \
- --cblaslib="-L/intel/mkl/lib/em64t -lmkl_intel_lp64 -lmkl_sequential -lmkl_core" \
- --lapacklib="-L/intel/mkl/lib/em64t -lmkl_intel_lp64 -lmkl_sequential -lmkl_core" \
+ --blaslib="-L/intel/mkl/lib/em64t -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core" \
+ --cblaslib="-L/intel/mkl/lib/em64t -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core" \
+ --lapacklib="-L/intel/mkl/lib/em64t -lmkl_intel_lp64 -lmkl_gnu_thread -lmkl_core" \
%endif
--downlapc
|
fix(FreeRTOS): Initialize uxTaskNumber at task initialization
Closes | @@ -917,6 +917,13 @@ UBaseType_t x;
}
#endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
+ #if( configUSE_TRACE_FACILITY == 1 )
+ {
+ /* Zero the uxTaskNumber TCB member to avoid random value from dynamically allocated TCBs */
+ pxNewTCB->uxTaskNumber = 0;
+ }
+ #endif /* ( configUSE_TRACE_FACILITY == 1 ) */
+
/* Calculate the top of stack address. This depends on whether the stack
grows from high memory to low (as per the 80x86) or vice versa.
portSTACK_GROWTH is used to make the result positive or negative as required
|
chat: changed a =: to a =. to fix message storage bug | =/ =hoon (ream expression.letter)
letter(output (eval bol hoon))
=: length.config.u.mailbox +(length.config.u.mailbox)
- number.envelope.act length.config.u.mailbox
+ number.envelope.act +(length.config.u.mailbox)
envelopes.u.mailbox (snoc envelopes.u.mailbox envelope.act)
- inbox (~(put by inbox) path.act u.mailbox)
==
- :_ this(inbox inbox)
- (send-diff path.act act)
+ :- (send-diff path.act act)
+ this(inbox (~(put by inbox) path.act u.mailbox))
::
++ handle-read
|= act=chat-action
=/ mailbox=(unit mailbox) (~(get by inbox) path.act)
?~ mailbox
[~ this]
- =: read.config.u.mailbox length.config.u.mailbox
- inbox (~(put by inbox) path.act u.mailbox)
- ==
- :_ this(inbox inbox)
- (send-diff path.act act)
+ =. read.config.u.mailbox length.config.u.mailbox
+ :- (send-diff path.act act)
+ this(inbox (~(put by inbox) path.act u.mailbox))
::
++ update-subscribers
|= [pax=path act=chat-action]
|
added additional counter for NMEA sentences | @@ -150,6 +150,7 @@ unsigned long long int tacacspcount;
tacacspl_t *tacacspliste;
unsigned long long int gpsdframecount;
+unsigned long long int gpsnmeaframecount;
unsigned long long int fcsframecount;
unsigned long long int wdsframecount;
unsigned long long int beaconframecount;
@@ -582,9 +583,10 @@ printf( " \n"
"maximum time stamp...............: %s (GMT)\n"
"packets inside...................: %llu\n"
"skipped damaged packets..........: %llu\n"
- "packets with GPS data............: %llu\n"
+ "packets with GPS NMEA data.......: %llu\n"
+ "packets with GPS data (JSON old).: %llu\n"
"packets with FCS.................: %llu\n"
- , basename(pcapinname), pcaptype, version_major, version_minor, pcapnghwinfo, pcapngdeviceinfo[0], pcapngdeviceinfo[1], pcapngdeviceinfo[2], pcapngosinfo, pcapngapplinfo, hcxsignedptr, getdltstring(networktype), networktype, getendianessstring(endianess), geterrorstat(pcapreaderrors), mintimestring, maxtimestring, rawpacketcount, skippedpacketcount, gpsdframecount, fcsframecount);
+ , basename(pcapinname), pcaptype, version_major, version_minor, pcapnghwinfo, pcapngdeviceinfo[0], pcapngdeviceinfo[1], pcapngdeviceinfo[2], pcapngosinfo, pcapngapplinfo, hcxsignedptr, getdltstring(networktype), networktype, getendianessstring(endianess), geterrorstat(pcapreaderrors), mintimestring, maxtimestring, rawpacketcount, skippedpacketcount, gpsnmeaframecount, gpsdframecount, fcsframecount);
if(tscleanflag == true)
{
printf("warning..........................: zero value time stamps detected\n"
@@ -6276,7 +6278,7 @@ while(0 < restlen)
memset(&nmeasentence, 0, NMEA_MAX);
memcpy(&nmeasentence, &option->data, option->option_length);
if(fhnmea != NULL) fprintf(fhnmea, "%s\n", nmeasentence);
- gpsdframecount++;
+ gpsnmeaframecount++;
}
}
optr += option->option_length +padding +OH_SIZE;
@@ -6862,6 +6864,7 @@ apstaessidcount = 0;
apstaessidcountcleaned = 0;
eapolcount = 0;
gpsdframecount = 0;
+gpsnmeaframecount = 0;
fcsframecount = 0;
wdsframecount = 0;
beaconframecount = 0;
@@ -7113,6 +7116,14 @@ if(gpxflag == true)
{
printf("%llu track points written to %s\n", gpsdframecount, gpxoutname);
}
+ if(gpsnmeaframecount == 1)
+ {
+ printf("%llu track point written to %s\n", gpsnmeaframecount, nmeaoutname);
+ }
+ else if(gpsnmeaframecount > 1)
+ {
+ printf("%llu track points written to %s\n", gpsnmeaframecount, nmeaoutname);
+ }
}
if(leapliste != NULL)
|
dill: don't sync %kids on galaxies | =/ myt (flop (fall tem ~))
=. tem ~
=. ..mere (pass / %g %jolt %base ram)
- =. ..mere
- ?- (clan:title our)
- %pawn ..mere
- %czar (kiln-sync %kids our %base)
- * (kiln-install %base sponsor %kids)
- ==
+ =? ..mere ?=(?(%earl %duke %king) (clan:title our))
+ (kiln-install %base sponsor %kids)
=. ..mere (show-desk %kids)
=. ..mere drum-watch
|- ^+ ..mere
|
Fix optee test
Pass --optee when initializing oeapkman, obtaining root. | @@ -29,11 +29,17 @@ endif ()
# Fetch the location of oeapkman binary.
get_target_property(OEAPKMAN oeapkman LOCATION)
+if (OE_TRUSTZONE)
+ set(APKMAN_ARCH "--optee")
+else ()
+ set(APKMAN_ARCH "")
+endif ()
+
# Execute oeapkman once so that it is initialized.
-execute_process(COMMAND "${OEAPKMAN}")
+execute_process(COMMAND "${OEAPKMAN}" ${APKMAN_ARCH})
# Execute oeapkman again to fetch the root folder.
-execute_process(COMMAND "${OEAPKMAN}" root
+execute_process(COMMAND "${OEAPKMAN}" ${APKMAN_ARCH} root
OUTPUT_VARIABLE APKMAN_ROOT OUTPUT_STRIP_TRAILING_WHITESPACE)
message("APKMAN_ROOT is ${APKMAN_ROOT}")
|
[Panic] also print to secondary USB Serial/JTAG Console | #include "esp_gdbstub.h"
#endif
-#if CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG
+#if CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG || CONFIG_ESP_CONSOLE_SECONDARY_USB_SERIAL_JTAG
#include "hal/usb_serial_jtag_ll.h"
#endif
@@ -73,7 +73,7 @@ static wdt_hal_context_t rtc_wdt_ctx = {.inst = WDT_RWDT, .rwdt_dev = &RTCCNTL};
#if CONFIG_ESP_CONSOLE_UART
static uart_hal_context_t s_panic_uart = { .dev = CONFIG_ESP_CONSOLE_UART_NUM == 0 ? &UART0 :&UART1 };
-void panic_print_char(const char c)
+static void panic_print_char_uart(const char c)
{
uint32_t sz = 0;
while (!uart_hal_get_txfifo_len(&s_panic_uart));
@@ -83,21 +83,21 @@ void panic_print_char(const char c)
#if CONFIG_ESP_CONSOLE_USB_CDC
-void panic_print_char(const char c)
+static void panic_print_char_usb_cdc(const char c)
{
esp_usb_console_write_buf(&c, 1);
/* result ignored */
}
#endif // CONFIG_ESP_CONSOLE_USB_CDC
-#if CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG
+#if CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG || CONFIG_ESP_CONSOLE_SECONDARY_USB_SERIAL_JTAG
//Timeout; if there's no host listening, the txfifo won't ever
//be writable after the first packet.
#define USBSERIAL_TIMEOUT_MAX_US 50000
static int s_usbserial_timeout = 0;
-void panic_print_char(const char c)
+static void panic_print_char_usb_serial_jtag(const char c)
{
while (!usb_serial_jtag_ll_txfifo_writable() && s_usbserial_timeout < (USBSERIAL_TIMEOUT_MAX_US / 100)) {
esp_rom_delay_us(100);
@@ -108,15 +108,21 @@ void panic_print_char(const char c)
s_usbserial_timeout = 0;
}
}
-#endif //CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG
+#endif //CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG || CONFIG_ESP_CONSOLE_SECONDARY_USB_SERIAL_JTAG
-#if CONFIG_ESP_CONSOLE_NONE
void panic_print_char(const char c)
{
- /* no-op */
+#if CONFIG_ESP_CONSOLE_UART
+ panic_print_char_uart(c);
+#endif
+#if CONFIG_ESP_CONSOLE_USB_CDC
+ panic_print_char_usb_cdc(c);
+#endif
+#if CONFIG_ESP_CONSOLE_USB_SERIAL_JTAG || CONFIG_ESP_CONSOLE_SECONDARY_USB_SERIAL_JTAG
+ panic_print_char_usb_serial_jtag(c);
+#endif
}
-#endif // CONFIG_ESP_CONSOLE_NONE
void panic_print_str(const char *str)
{
|
SOVERSION bump to version 1.2.10 | @@ -51,7 +51,7 @@ set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION
# with backward compatible change and micro version is connected with any internal change of the library.
set(LIBNETCONF2_MAJOR_SOVERSION 1)
set(LIBNETCONF2_MINOR_SOVERSION 2)
-set(LIBNETCONF2_MICRO_SOVERSION 9)
+set(LIBNETCONF2_MICRO_SOVERSION 10)
set(LIBNETCONF2_SOVERSION_FULL ${LIBNETCONF2_MAJOR_SOVERSION}.${LIBNETCONF2_MINOR_SOVERSION}.${LIBNETCONF2_MICRO_SOVERSION})
set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_SOVERSION})
|
taniks: update fan table
Update fan table for power on noise.
BRANCH=none
TEST=make buildall -j | @@ -387,7 +387,7 @@ BUILD_ASSERT(ARRAY_SIZE(temp_sensors) == TEMP_SENSOR_COUNT);
[EC_TEMP_THRESH_HIGH] = C_TO_K(85), \
}, \
.temp_fan_off = C_TO_K(35), \
- .temp_fan_max = C_TO_K(60), \
+ .temp_fan_max = C_TO_K(70), \
}
__maybe_unused static const struct ec_thermal_config thermal_cpu = THERMAL_CPU;
@@ -417,7 +417,7 @@ __maybe_unused static const struct ec_thermal_config thermal_cpu = THERMAL_CPU;
[EC_TEMP_THRESH_HIGH] = C_TO_K(85), \
}, \
.temp_fan_off = C_TO_K(35), \
- .temp_fan_max = C_TO_K(60), \
+ .temp_fan_max = C_TO_K(70), \
}
__maybe_unused static const struct ec_thermal_config thermal_fan = THERMAL_FAN;
|
Typec: Add disconnected status event
Add an event bit to represent that the port has been disconnected.
BRANCH=None
TEST=make -j buildall | @@ -6691,6 +6691,7 @@ enum tcpc_cc_polarity {
#define PD_STATUS_EVENT_SOP_DISC_DONE BIT(0)
#define PD_STATUS_EVENT_SOP_PRIME_DISC_DONE BIT(1)
#define PD_STATUS_EVENT_HARD_RESET BIT(2)
+#define PD_STATUS_EVENT_DISCONNECTED BIT(3)
/*
* Encode and decode for BCD revision response
|
message_drop_test: send only what hasn't been sent yet | @@ -14,23 +14,22 @@ if JUNGLE:
from panda_jungle import PandaJungle # pylint: disable=import-error
# Generate unique messages
-NUM_MESSAGES_PER_BUS = 2000
+NUM_MESSAGES_PER_BUS = 10000
messages = [bytes(struct.pack("Q", i)) for i in range(NUM_MESSAGES_PER_BUS)]
tx_messages = list(itertools.chain.from_iterable(map(lambda msg: [[0xaa, None, msg, 0], [0xaa, None, msg, 1], [0xaa, None, msg, 2]], messages)))
def flood_tx(panda):
print('Sending!')
-
- BLOCK_SIZE = 50
- for i in range((len(tx_messages) + 1) // BLOCK_SIZE):
+ transferred = 0
while True:
try:
- print(f"Sending block {BLOCK_SIZE * i}-{ BLOCK_SIZE * (i + 1)}: ", end="")
- panda.can_send_many(tx_messages[BLOCK_SIZE * i : BLOCK_SIZE * (i + 1)])
+ print(f"Sending block {transferred}-{len(tx_messages)}: ", end="")
+ panda.can_send_many(tx_messages[transferred:], timeout=10)
print("OK")
break
- except usb1.USBErrorTimeout:
- print("timeout")
+ except usb1.USBErrorTimeout as e:
+ transferred += (e.transferred // 16)
+ print("timeout, transferred: ", transferred)
print(f"Done sending {3*NUM_MESSAGES_PER_BUS} messages!")
|
Jenkins: Enable tasks for Web UI and website | @@ -1031,9 +1031,9 @@ def generateArtifactStages() {
def tasks = [:]
tasks << buildDebianPackage("buildPackage/debian/stretch", DOCKER_IMAGES.stretch)
// tasks << buildDebianPackage("buildPackage/debian/buster", DOCKER_IMAGES.buster)
- tasks << buildDebianPackage("buildPackage/debian/bionic", DOCKER_IMAGES.bionic)
- // tasks << buildWebsite()
- // tasks << buildWebUI()
+ // tasks << buildDebianPackage("buildPackage/debian/bionic", DOCKER_IMAGES.bionic)
+ tasks << buildWebsite()
+ tasks << buildWebUI()
return tasks
}
|
Wait for imu to inform velocity before reporting it | @@ -1197,5 +1197,7 @@ void survive_kalman_tracker_report_state(PoserData *pd, SurviveKalmanTracker *tr
if (so->OutPose_timecode < pd->timecode) {
SURVIVE_INVOKE_HOOK_SO(imupose, so, pd->timecode, &pose);
}
+ if(tracker->stats.imu_count > 100) {
SURVIVE_INVOKE_HOOK_SO(velocity, so, pd->timecode, &velocity);
}
+}
|
CI/CD: extend the timeout value when creating pods
Fix dragonwell-web and openjdk-pod may not start within 3s. | @@ -47,22 +47,22 @@ jobs:
run: docker exec $rune_test bash -c "containerd" &
docker exec $rune_test bash -c "cd /root/samples && ./clean.sh;
- crictl run --timeout 3s hello.yaml pod.yaml && ./show.sh"
+ crictl run --timeout 30s hello.yaml pod.yaml && ./show.sh"
- name: Run dragonwell-web pod
if: always()
run: docker exec $rune_test bash -c "cd /root/samples && ./clean.sh;
- crictl run --timeout 3s dragonwell.yaml pod.yaml && ./show.sh"
+ crictl run --timeout 30s dragonwell.yaml pod.yaml && ./show.sh"
- name: Run openjdk-web pod
if: always()
run: docker exec $rune_test bash -c "cd /root/samples && ./clean.sh;
- crictl run --timeout 3s jdk.yaml pod.yaml && ./show.sh"
+ crictl run --timeout 30s jdk.yaml pod.yaml && ./show.sh"
- name: Run golang-web pod
if: always()
run: docker exec $rune_test bash -c "cd /root/samples && ./clean.sh;
- crictl run --timeout 3s golang.yaml pod.yaml && ./show.sh"
+ crictl run --timeout 30s golang.yaml pod.yaml && ./show.sh"
- name: Kill the container
run: docker stop $rune_test
|
SOVERSION bump to version 2.8.16 | @@ -63,7 +63,7 @@ set(LIBYANG_VERSION ${LIBYANG_MAJOR_VERSION}.${LIBYANG_MINOR_VERSION}.${LIBYANG_
# set version of the library
set(LIBYANG_MAJOR_SOVERSION 2)
set(LIBYANG_MINOR_SOVERSION 8)
-set(LIBYANG_MICRO_SOVERSION 15)
+set(LIBYANG_MICRO_SOVERSION 16)
set(LIBYANG_SOVERSION_FULL ${LIBYANG_MAJOR_SOVERSION}.${LIBYANG_MINOR_SOVERSION}.${LIBYANG_MICRO_SOVERSION})
set(LIBYANG_SOVERSION ${LIBYANG_MAJOR_SOVERSION})
|
Add support for holding the on/off switch | @@ -135,6 +135,10 @@ static const Sensor::ButtonMap ikeaOnOffMap[] = {
// mode ep cluster cmd param button name
{ Sensor::ModeScenes, 0x01, 0x0006, 0x01, 0, S_BUTTON_1 + S_BUTTON_ACTION_SHORT_RELEASED, "On" },
{ Sensor::ModeScenes, 0x01, 0x0006, 0x00, 0, S_BUTTON_2 + S_BUTTON_ACTION_SHORT_RELEASED, "Off" },
+ { Sensor::ModeScenes, 0x01, 0x0008, 0x05, 0, S_BUTTON_1 + S_BUTTON_ACTION_HOLD, "Move up (with on/off)" },
+ { Sensor::ModeScenes, 0x01, 0x0008, 0x01, 1, S_BUTTON_2 + S_BUTTON_ACTION_HOLD, "Move down" },
+ { Sensor::ModeScenes, 0x01, 0x0008, 0x07, 0, S_BUTTON_1 + S_BUTTON_ACTION_LONG_RELEASED, "Stop (with on/off)" },
+ { Sensor::ModeScenes, 0x01, 0x0008, 0x07, 1, S_BUTTON_2 + S_BUTTON_ACTION_LONG_RELEASED, "Stop" },
};
static const Sensor::ButtonMap ikeaRemoteMap[] = {
|
libhfuzz: better instrumentation for strncmp/strncasecmp/memcmp | @@ -51,19 +51,21 @@ int strncmp(const char *s1, const char *s2, size_t n)
}
unsigned int v = 0;
+ int ret = 0;
- size_t i = 0;
- for (i = 0; (s1[i] == s2[i]) && i < n; i++) {
+ for (size_t i = 0; i < n; i++) {
+ if (s1[i] != s2[i]) {
+ ret = ret ? ret : ((unsigned char)s1[i] - (unsigned char)s2[i]);
+ } else {
+ v++;
+ }
if (s1[i] == '\0' || s2[i] == '\0') {
break;
}
- v++;
}
+
libhfuzz_instrumentUpdateCmpMap(__builtin_return_address(0), v);
- if (i == n) {
- return 0;
- }
- return (s1[i] - s2[i]);
+ return ret;
}
int strncasecmp(const char *s1, const char *s2, size_t n)
@@ -73,19 +75,21 @@ int strncasecmp(const char *s1, const char *s2, size_t n)
}
unsigned int v = 0;
+ int ret = 0;
- size_t i = 0;
- for (i = 0; (tolower(s1[i]) == tolower(s2[i])) && i < n; i++) {
+ for (size_t i = 0; i < n; i++) {
+ if (tolower(s1[i]) != tolower(s2[i])) {
+ ret = ret ? ret : (tolower(s1[i]) - tolower(s2[i]));
+ } else {
+ v++;
+ }
if (s1[i] == '\0' || s2[i] == '\0') {
break;
}
- v++;
}
+
libhfuzz_instrumentUpdateCmpMap(__builtin_return_address(0), v);
- if (i == n) {
- return 0;
- }
- return (s1[i] - s2[i]);
+ return ret;
}
char *strstr(const char *haystack, const char *needle)
@@ -116,22 +120,21 @@ static inline int _memcmp(const void *m1, const void *m2, size_t n, void *addr)
}
unsigned int v = 0;
+ int ret = 0;
- const char *s1 = (const char *)m1;
- const char *s2 = (const char *)m2;
+ const unsigned char *s1 = (const unsigned char *)m1;
+ const unsigned char *s2 = (const unsigned char *)m2;
- size_t i = 0;
- for (i = 0; i < n; i++) {
+ for (size_t i = 0; i < n; i++) {
if (s1[i] != s2[i]) {
- break;
- }
+ ret = ret ? ret : s1[i] != s2[i];
+ } else {
v++;
}
- libhfuzz_instrumentUpdateCmpMap(addr, v);
- if (i == n) {
- return 0;
}
- return (s1[i] - s2[i]);
+
+ libhfuzz_instrumentUpdateCmpMap(addr, v);
+ return ret;
}
int memcmp(const void *m1, const void *m2, size_t n)
|
argument is the project, not a config. | --
-- Extend global properties
--
- premake.override(vc2010.elements, "globals", function (oldfn, cfg)
- local elements = oldfn(cfg)
+ premake.override(vc2010.elements, "globals", function (oldfn, prj)
+ local elements = oldfn(prj)
- if cfg.system == premake.ANDROID and cfg.kind ~= premake.ANDROIDPROJ then
+ if prj.system == premake.ANDROID and prj.kind ~= premake.ANDROIDPROJ then
-- Remove "IgnoreWarnCompileDuplicatedFilename".
local pos = table.indexof(elements, vc2010.ignoreWarnDuplicateFilename)
table.remove(elements, pos)
|
fix(demo): get widget by "btn-ok" will return NULL | @@ -31,7 +31,7 @@ void App::Load( Platform::String^ entryPoint )
}
Widget_Append( root, pack );
Widget_Unwrap( pack );
- btn = LCUIWidget_GetById( "btn-ok" );
+ btn = LCUIWidget_GetById( "btn" );
Widget_BindEvent( btn, "click", OnBtnClick, NULL, NULL );
}
|
docs/glossary: Add definitions for "memory allocation" and "small integer". | @@ -120,6 +120,52 @@ Glossary
require much less power. MicroPython is designed to be small and
optimized enough to run on an average modern microcontroller.
+ memory allocation
+ Computers store data in memory, and memory allocation is a process
+ they perform to store *new* data in memory. This process has its
+ cost (in terms of time required), as usually involves scanning
+ thru memory to find a suitable free chunk. It may also fail if
+ suitable free chunk is not found. Computers also have "registers",
+ which allow to store limited amount of data without special memory
+ allocation. While MicroPython is a high-level language, these basic
+ traits of computers still apply to some aspects of its functioning,
+ and worth to keep in mind when e.g. optimizing an application, or
+ trying to achieve real-time/failure-free operation.
+
+ MicroPython stores majority of objects in memory, thus when creating
+ a new object it needs to perform memory allocation. However, there
+ are exceptions. Some special objects may be created without
+ allocation. One notable example is :term:`small integer`'s. There may
+ be also other objects like, e.g. short repeated strings which are
+ automatically :term:`interned <interned string>`, etc. These are
+ however considered an implementation detail, and often differ
+ by a :term:`MicroPython port`.
+
+ Besides using allocation-free objects (set of which is very limited,
+ as explained above), there's another way to avoid, or at least limit
+ memory allocation: avoid creating new objects during operations (and
+ growing object size, as that leads to the need to allocate more memory
+ too). These are known as inplace operations.
+
+ An advanced MicroPython programmer should know about the memory
+ allocation aspects because:
+
+ * MicroPython features automatic memory management. Allocation
+ operations are usually performed fast, until available memory
+ is exhausted, then garbage collection (GC) needs to be performed.
+ The GC is a relatively long operation, which can lead to delays
+ in application response.
+ * Allocation leads to :term:`fragmentation`.
+ * If GC didn't reclaim free block of memory of suitable size (which
+ can be due to :term:`fragmentation`), allocation will simply fail,
+ aborting an application unless special care is taken.
+ * Even without effects of GC, memory allocation takes non-zero
+ time, and this time may vary. This may both slow down tight
+ processing loops, and make them non real-time (processing time
+ may vary noticeably).
+ * Memory allocation may be disallowed in special execution contexts,
+ e.g. in interrupt handlers.
+
micropython-lib
MicroPython is (usually) distributed as a single executable/binary
file with just few builtin modules. There is no extensive standard
@@ -170,6 +216,21 @@ Glossary
from context, it's recommended to use full specification like one
of the above.
+ small integer
+ An integer value of limited range which can be produced and operated
+ on without memory allocation. See :term:`memory allocation` for why this
+ is useful. A small integer fits within a machine word, and as it
+ needs to be distinguished from values of other types, which is done
+ by means of a special tag bit(s) in a machine word, it has necessarily
+ small range than the machine word. To reinstate that, a small int cannot
+ hold an entire value of a machine word, which is useful fact to keep in
+ mind for developers interested in optimization, e.g. for real-time
+ operations. Also keep in mind that Python integers are signed, so
+ small integer is signed too. As an example, with minimum 1 bit required
+ for a tag, and 1 bit for a sign, on a typical 32-bit system, a small
+ integer can hold a value in range ``-2**30 .. 2**30-1``, or roughly
+ +/- one billion.
+
stream
Also known as a "file-like object". An object which provides sequential
read-write access to the underlying data. A stream object implements
|
CI: remove unused code from .travis.yml | @@ -53,14 +53,9 @@ matrix:
fast_finish: true
before_install:
- - export PATH=/opt/ghc/$GHCVER/bin:/opt/cabal/$CABALVER/bin:$HOME/.cabal/bin:$PATH
- cabal update
install:
- - |
- printf "$(ghc --version)"
- printf " [$(ghc --print-project-git-commit-id 2> /dev/null || echo '?')]"
- - cabal --version
- cabal new-build --only-dependencies --enable-tests --disable-optimization
script:
|
[core] silence coverity warning | @@ -264,7 +264,10 @@ static int network_server_init(server *srv, buffer *host_token, size_t sidx, int
return -1;
}
- fdevent_fcntl_set_nb(srv->ev, srv_socket->fd);
+ if (-1 == fdevent_fcntl_set_nb(srv->ev, srv_socket->fd)) {
+ log_error_write(srv, __FILE__, __LINE__, "ss", "fcntl:", strerror(errno));
+ return -1;
+ }
} else
#endif
{
|
Array: Check file with OCLint | @@ -10,6 +10,7 @@ test -f "@PROJECT_BINARY_DIR@/compile_commands.json" || { echo "Compilation data
cd "@CMAKE_SOURCE_DIR@" || exit
oclint -p "@PROJECT_BINARY_DIR@" -enable-global-analysis -enable-clang-static-analyzer \
+ "@CMAKE_SOURCE_DIR@/src/libs/ease/array.c" \
"@CMAKE_SOURCE_DIR@/src/libs/ease/keyname.c" \
"@CMAKE_SOURCE_DIR@/src/libs/utility/text.c" \
"@CMAKE_SOURCE_DIR@/src/plugins/base64/"*.{c,cpp} \
|
Update the comment on ssl3_write_pending()
The struct s->s3 has been modified. | @@ -1151,7 +1151,7 @@ int do_ssl3_write(SSL *s, int type, const unsigned char *buf,
return -1;
}
-/* if s->s3.wbuf.left != 0, we need to call this
+/* if SSL3_BUFFER_get_left() != 0, we need to call this
*
* Return values are as per SSL_write()
*/
|
Test that bad UDP payload sizes trigger error | @@ -340,6 +340,25 @@ uint8_t client_param_err7[] = {
picoquic_tp_handshake_connection_id, 8, LOCAL_CONNECTION_ID
};
+/* error 8, UDP Payload too small */
+uint8_t client_param_err8[] = {
+ picoquic_tp_initial_max_stream_data_bidi_local, 4, 0x80, 0, 0xFF, 0xFF,
+ picoquic_tp_initial_max_data, 4, 0x80, 0x40, 0, 0,
+ picoquic_tp_idle_timeout, 1, 0x1E,
+ picoquic_tp_max_packet_size, 2, 0x44, 0xAF,
+ picoquic_tp_handshake_connection_id, 8, LOCAL_CONNECTION_ID
+};
+
+
+/* error 9, UDP Payload too large */
+uint8_t client_param_err9[] = {
+ picoquic_tp_initial_max_stream_data_bidi_local, 4, 0x80, 0, 0xFF, 0xFF,
+ picoquic_tp_initial_max_data, 4, 0x80, 0x40, 0, 0,
+ picoquic_tp_idle_timeout, 1, 0x1E,
+ picoquic_tp_max_packet_size, 4, 0x80, 0, 0xFF, 0xf8,
+ picoquic_tp_handshake_connection_id, 8, LOCAL_CONNECTION_ID
+};
+
typedef struct st_transport_param_error_test_t {
int mode;
@@ -354,7 +373,9 @@ static transport_param_error_test_t transport_param_error_case[] = {
{ 0, client_param_err4, sizeof(client_param_err4), PICOQUIC_TRANSPORT_PARAMETER_ERROR},
{ 0, client_param_err5, sizeof(client_param_err5), PICOQUIC_TRANSPORT_PARAMETER_ERROR},
{ 0, client_param_err6, sizeof(client_param_err6), PICOQUIC_TRANSPORT_PARAMETER_ERROR},
- { 0, client_param_err7, sizeof(client_param_err7), PICOQUIC_TRANSPORT_PARAMETER_ERROR}
+ { 0, client_param_err7, sizeof(client_param_err7), PICOQUIC_TRANSPORT_PARAMETER_ERROR},
+ { 0, client_param_err8, sizeof(client_param_err7), PICOQUIC_TRANSPORT_PARAMETER_ERROR},
+ { 0, client_param_err9, sizeof(client_param_err7), PICOQUIC_TRANSPORT_PARAMETER_ERROR}
};
static size_t nb_transport_param_error_case = sizeof(transport_param_error_case) / sizeof(transport_param_error_test_t);
|
bugfix: make remove_intx_remapping static
remove_intx_remapping is not global function, make static.
Unlike global functions in C, access to static functions is restricted to the file where they are declared. Another reason for making functions static can be reuse of the same function name in other files | @@ -422,7 +422,7 @@ add_intx_remapping(struct vm *vm, uint8_t virt_pin,
}
/* deactive & remove mapping entry of vpin for vm */
-void remove_intx_remapping(struct vm *vm, uint8_t virt_pin, bool pic_pin)
+static void remove_intx_remapping(struct vm *vm, uint8_t virt_pin, bool pic_pin)
{
int phys_irq;
struct ptdev_remapping_info *entry;
|
Remove observations for disconnected endpoint
Fixes removing observation over TCP connection, when a peer close
the connection. | @@ -150,6 +150,10 @@ oc_handle_session(oc_endpoint_t *endpoint, oc_session_state_t state)
oc_tls_remove_peer(endpoint);
}
#endif /* OC_SECURITY */
+#ifdef OC_SERVER
+ /* remove all observations for the endpoint */
+ coap_remove_observer_by_client(endpoint);
+#endif /* OC_SERVER */
}
#ifdef OC_SESSION_EVENTS
handle_session_event_callback(endpoint, state);
|
signalfd_read_bh(): deallocate data for dequeued signals
The existing code is failing to deallocate the queued_signal
structs that are being dequeued by a given thread. | @@ -902,6 +902,7 @@ closure_function(5, 1, sysreturn, signalfd_read_bh,
}
sig_debug(" sig %d, errno %d, code %d\n", qs->si.si_signo, qs->si.si_errno, qs->si.si_code);
signalfd_siginfo_fill(info, qs);
+ free_queued_signal(qs);
info++;
ninfos++;
}
|
Devug failure of CI test | @@ -35,6 +35,7 @@ jobs:
cd build
make test && QUICRESULT=$?
if [[ ${QUICRESULT} == 0 ]]; then exit 0; fi;
+ cat /home/runner/work/picoquic/picoquic/build/Testing/Temporary/LastTest.log
exit 1
- name: Run Valgrind
|
apps/pkcs12: Detect missing PKCS12KDF support on import
Report error message with hint to use -nomacver if
MAC verification is not required. | #include <openssl/pem.h>
#include <openssl/pkcs12.h>
#include <openssl/provider.h>
+#include <openssl/kdf.h>
#define NOKEYS 0x1
#define NOCERTS 0x2
@@ -733,6 +734,15 @@ int pkcs12_main(int argc, char **argv)
tsalt != NULL ? ASN1_STRING_length(tsalt) : 0L);
}
if (macver) {
+ EVP_KDF *pkcs12kdf;
+
+ pkcs12kdf = EVP_KDF_fetch(NULL, "PKCS12KDF", NULL);
+ if (pkcs12kdf == NULL) {
+ BIO_printf(bio_err, "Error verifying PKCS12 MAC; no PKCS12KDF support.\n");
+ BIO_printf(bio_err, "Use -nomacver if MAC verification is not required.\n");
+ goto end;
+ }
+ EVP_KDF_free(pkcs12kdf);
/* If we enter empty password try no password first */
if (!mpass[0] && PKCS12_verify_mac(p12, NULL, 0)) {
/* If mac and crypto pass the same set it to NULL too */
|
kernel/init/os_start : Fix wrong assignment for idle stack information
'g_idle_topstack - 4' should be set to idle task's adj_stack_ptr, not adj_stack_size. | @@ -363,7 +363,7 @@ void os_start(void)
/* Fill the stack information to Idle task's tcb */
g_idletcb.cmn.adj_stack_size = CONFIG_IDLETHREAD_STACKSIZE;
g_idletcb.cmn.stack_alloc_ptr = (void *)(g_idle_topstack - CONFIG_IDLETHREAD_STACKSIZE);
- g_idletcb.cmn.adj_stack_size = g_idle_topstack - 4;
+ g_idletcb.cmn.adj_stack_ptr = (void *)(g_idle_topstack - 4);
/* Then add the idle task's TCB to the head of the ready to run list */
|
Transfer ~nul to Matthew Liston. | 0w0 :: 193, ~duc, Tlon
0w0 :: 194, ~fur, Tlon
0w0 :: 195, ~fex, Tlon
- 0w0 :: 196, ~nul, Tlon
+ 0w0 :: 196, ~nul, Matthew Liston
0w0 :: 197, ~luc, Tlon
0w0 :: 198, ~len, Tlon
0w0 :: 199, ~ner, Tlon
|
mpi-families/openmpi: fix for static pbs linkage; temporarily disable libpbs.la during build | @@ -195,6 +195,11 @@ export BASEFLAGS
%{__cp} %{SOURCE3} .
%{__chmod} 700 pbs-config
export PATH="./:$PATH"
+
+# temporarily disable dynamic linkage for pbs
+if [ -e /opt/pbs/lib/libpbs.la ]; then
+ mv /opt/pbs/lib/libpbs.la /tmp
+fi
%endif
./configure ${BASEFLAGS} || { cat config.log && exit 1; }
@@ -206,6 +211,13 @@ export PATH="./:$PATH"
make %{?_smp_mflags}
+# restore dynamic linkage for pbs
+%if %{with_tm}
+if [ -e /tmp/libpbs.la ]; then
+ mv /tmp/libpbs.la /opt/pbs/lib/libpbs.la
+fi
+%endif
+
%install
# OpenHPC compiler designation
%ohpc_setup_compiler
|
Remove check made redundant by CheckModelAndPoolCompatibility. | @@ -42,7 +42,6 @@ TVector<TVector<double>> ApplyModelMulti(const TFullModel& model,
TVector<TConstArrayRef<float>> repackedFeatures(model.ObliviousTrees.GetFlatFeatureVectorExpectedSize());
const int blockFirstId = blockParams.FirstId + blockId * blockParams.GetBlockSize();
const int blockLastId = Min(blockParams.LastId, blockFirstId + blockParams.GetBlockSize());
- CB_ENSURE((size_t)pool.Docs.GetEffectiveFactorCount() >= model.ObliviousTrees.GetFlatFeatureVectorExpectedSize());
if (columnReorderMap.empty()) {
for (size_t i = 0; i < model.ObliviousTrees.GetFlatFeatureVectorExpectedSize(); ++i) {
repackedFeatures[i] = MakeArrayRef(pool.Docs.Factors[i].data() + blockFirstId, blockLastId - blockFirstId);
|
Improve error handling in rand_init function | @@ -310,19 +310,31 @@ void rand_fork()
DEFINE_RUN_ONCE_STATIC(do_rand_init)
{
- int ret = 1;
-
#ifndef OPENSSL_NO_ENGINE
rand_engine_lock = CRYPTO_THREAD_lock_new();
- ret &= rand_engine_lock != NULL;
+ if (rand_engine_lock == NULL)
+ return 0;
#endif
+
rand_meth_lock = CRYPTO_THREAD_lock_new();
- ret &= rand_meth_lock != NULL;
+ if (rand_meth_lock == NULL)
+ goto err1;
rand_nonce_lock = CRYPTO_THREAD_lock_new();
- ret &= rand_meth_lock != NULL;
+ if (rand_nonce_lock == NULL)
+ goto err2;
- return ret;
+ return 1;
+
+err2:
+ CRYPTO_THREAD_lock_free(rand_meth_lock);
+ rand_meth_lock = NULL;
+err1:
+#ifndef OPENSSL_NO_ENGINE
+ CRYPTO_THREAD_lock_free(rand_engine_lock);
+ rand_engine_lock = NULL;
+#endif
+ return 0;
}
void rand_cleanup_int(void)
@@ -334,9 +346,12 @@ void rand_cleanup_int(void)
RAND_set_rand_method(NULL);
#ifndef OPENSSL_NO_ENGINE
CRYPTO_THREAD_lock_free(rand_engine_lock);
+ rand_engine_lock = NULL;
#endif
CRYPTO_THREAD_lock_free(rand_meth_lock);
+ rand_meth_lock = NULL;
CRYPTO_THREAD_lock_free(rand_nonce_lock);
+ rand_nonce_lock = NULL;
}
/*
|
anim_utils.c: remove warning when !defined(WEBP_HAVE_GIF) | #include <stdio.h>
#include <string.h>
-#ifdef WEBP_HAVE_GIF
+#if defined(WEBP_HAVE_GIF)
#include <gif_lib.h>
#endif
#include "webp/format_constants.h"
@@ -33,11 +33,13 @@ static const int kNumChannels = 4;
// -----------------------------------------------------------------------------
// Common utilities.
+#if defined(WEBP_HAVE_GIF)
// Returns true if the frame covers the full canvas.
static int IsFullFrame(int width, int height,
int canvas_width, int canvas_height) {
return (width == canvas_width && height == canvas_height);
}
+#endif // WEBP_HAVE_GIF
static int CheckSizeForOverflow(uint64_t size) {
return (size == (size_t)size);
@@ -85,6 +87,7 @@ void ClearAnimatedImage(AnimatedImage* const image) {
}
}
+#if defined(WEBP_HAVE_GIF)
// Clear the canvas to transparent.
static void ZeroFillCanvas(uint8_t* rgba,
uint32_t canvas_width, uint32_t canvas_height) {
@@ -126,6 +129,7 @@ static void CopyFrameRectangle(const uint8_t* src, uint8_t* dst, int stride,
dst += stride;
}
}
+#endif // WEBP_HAVE_GIF
// Canonicalize all transparent pixels to transparent black to aid comparison.
static void CleanupTransparentPixels(uint32_t* rgba,
@@ -280,7 +284,7 @@ static int ReadAnimatedWebP(const char filename[],
// -----------------------------------------------------------------------------
// GIF Decoding.
-#ifdef WEBP_HAVE_GIF
+#if defined(WEBP_HAVE_GIF)
// Returns true if this is a valid GIF bitstream.
static int IsGIF(const WebPData* const data) {
|
Disable portable build by default
The default value of a boolean meson option is true. We want
non-portable build by default. | @@ -3,6 +3,6 @@ option('build_server', type: 'boolean', value: true, description: 'Build the ser
option('crossbuild_windows', type: 'boolean', value: false, description: 'Build for Windows from Linux')
option('windows_noconsole', type: 'boolean', value: false, description: 'Disable console on Windows (pass -mwindows flag)')
option('prebuilt_server', type: 'string', description: 'Path of the prebuilt server')
-option('portable', type: 'boolean', description: 'Use scrcpy-server.jar from the same directory as the scrcpy executable')
+option('portable', type: 'boolean', value: false, description: 'Use scrcpy-server.jar from the same directory as the scrcpy executable')
option('skip_frames', type: 'boolean', value: true, description: 'Always display the most recent frame')
option('hidpi_support', type: 'boolean', value: true, description: 'Enable High DPI support')
|
slightly improve bash completion for oidc-gen | @@ -154,31 +154,16 @@ if [[ "$prev" == "--print=" ]]; then
local oidcDirFiles=`ls $agentdir 2>/dev/null | sed -e 's/$/& /g'`
COMPREPLY+=( $(compgen -W "${oidcDirFiles}" -- ${cur}) )
fi
+if [[ "$prev" == "--update=" ]]; then
+ local oidcDirFiles=`ls $agentdir 2>/dev/null | sed -e 's/$/& /g'`
+ COMPREPLY+=( $(compgen -W "${oidcDirFiles}" -- ${cur}) )
+fi
+if [[ "$prev" == "--manual" ]]; then
+ COMPREPLY=( $(compgen -W "${shortnames}" -- ${cur}) )
+fi
if [[ "x$ret" == "x1" ]]; then
return 0
fi
-# case $prev in
-# "--print=")
-# _matchFiles ${cur}
-# local oidcDirFiles=`ls $agentdir 2>/dev/null | sed -e 's/$/& /g'`
-# COMPREPLY+=( $(compgen -W "${oidcDirFiles}" -- ${cur}) )
-# return 0
-# ;;
-# "--file=")
-# ;&
-# "--cp=")
-# ;&
-# "--output=")
-# _matchFiles ${cur}
-# return 0
-# ;;
-# "--flow=")
-# local IFS=$'#\n'
-# local someScopes="code #device #password #refresh "
-# COMPREPLY=( $(compgen -W "${someScopes}" -- ${cur}) )
-# return 0
-# ;;
-# esac
if [[ ${cur} == -* ]] ; then
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
Client Snap support in cupsd: Additional NULL check
When testing the newest snapd whether it works correctly together
with CUPS, I hit a segfault in the chackwhether the client is a Snap
plugging cups-control, for the case when CUPS is not snapped.
Fixed this with an additional NULL check. | @@ -2120,7 +2120,7 @@ check_admin_access(cupsd_client_t *con) // I - Client connection
cupsdLogClient(con, CUPSD_LOG_DEBUG, "Unable to get client Snap plugs: %s", error->message);
ret = 0;
}
- else if (plugs->len <= 0)
+ else if (!plugs || plugs->len <= 0)
{
cupsdLogClient(con, CUPSD_LOG_DEBUG, "Snap without cups-control plug - denied.");
ret = 0;
|
Adds -std=c+=11 compiler flags, unintentionally deleted this | @@ -36,7 +36,7 @@ IF (ANDROID)
set(CMAKE_C_FLAGS "-D_GNU_SOURCE -std=gnu99 -Wall ${CMAKE_C_FLAGS}")
ELSE ()
set(CMAKE_C_FLAGS "-D_GNU_SOURCE -std=gnu99 -Wall -Werror -fPIC ${CMAKE_C_FLAGS}") #TODO add -Wextra
- set(CMAKE_CXX_FLAGS "-Wall -Werror -Wextra -Weffc++ -fno-rtti -fno-exceptions ${CMAKE_CXX_FLAGS}")
+ set(CMAKE_CXX_FLAGS "-std=c++11 -Wall -Werror -Wextra -Weffc++ -fno-rtti -fno-exceptions ${CMAKE_CXX_FLAGS}")
set(CMAKE_C_FLAGS_DEBUG "-g -DDEBUG ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS_DEBUG "-g -DDEBUG ${CMAKE_CXX_FLAGS}")
ENDIF()
|
fixed uninitialized counter | @@ -401,6 +401,7 @@ eapolm3errorcount = 0;
eapolm4count = 0;
eapolm4errorcount = 0;
eapolwrittencount = 0;
+eapolncwrittencount = 0;
eapolaplesscount = 0;
eapolwrittenjcountdeprecated = 0;
eapolwrittenhcpxcountdeprecated = 0;
|
chip/stm32/gpio.c: Format with clang-format
BRANCH=none
TEST=none | @@ -83,8 +83,8 @@ test_mockable int gpio_get_level(enum gpio_signal signal)
void gpio_set_level(enum gpio_signal signal, int value)
{
- STM32_GPIO_BSRR(gpio_list[signal].port) =
- gpio_list[signal].mask << (value ? 0 : 16);
+ STM32_GPIO_BSRR(gpio_list[signal].port) = gpio_list[signal].mask
+ << (value ? 0 : 16);
}
int gpio_enable_interrupt(enum gpio_signal signal)
@@ -103,8 +103,8 @@ int gpio_enable_interrupt(enum gpio_signal signal)
g_old += exti_events[bit];
if ((exti_events[bit]) && (exti_events[bit] != signal)) {
- CPRINTS("Overriding %s with %s on EXTI%d",
- g_old->name, g->name, bit);
+ CPRINTS("Overriding %s with %s on EXTI%d", g_old->name, g->name,
+ bit);
}
exti_events[bit] = signal;
@@ -112,8 +112,9 @@ int gpio_enable_interrupt(enum gpio_signal signal)
shift = (bit % 4) * 4;
bank = (g->port - STM32_GPIOA_BASE) / 0x400;
- STM32_SYSCFG_EXTICR(group) = (STM32_SYSCFG_EXTICR(group) &
- ~(0xF << shift)) | (bank << shift);
+ STM32_SYSCFG_EXTICR(group) =
+ (STM32_SYSCFG_EXTICR(group) & ~(0xF << shift)) |
+ (bank << shift);
STM32_EXTI_IMR |= g->mask;
return EC_SUCCESS;
|
Style tweaks
I think the original version of this code was written back when we had an 80-column limit. | @@ -297,8 +297,8 @@ function ToIR:convert_stat(cmds, stat)
-- the rest of the RHS has been evaluated. However, we don't bother optimizing this last
-- case because if the programmer has written a complicated multiple-assignment then it is
-- likely that it isn't something that could have been written as a sequence of single
- -- assignments. (Our implementation always ends up creating a temporary variable in this
- -- case because save_if_necessary calls exp_to_value.)
+ -- assignments. Our implementation always ends up creating a temporary variable in this
+ -- case because save_if_necessary calls exp_to_value.
local vals = {}
for i, exp in ipairs(exps) do
local is_last = (i == #exps or exps[i+1]._tag == "ast.Exp.ExtraRet")
@@ -314,23 +314,21 @@ function ToIR:convert_stat(cmds, stat)
local lhs = lhss[i]
local val = vals[i]
if val then
- local cmd
local ltag = lhs._tag
if ltag == "to_ir.LHS.Local" then
- cmd = ir.Cmd.Move(loc, lhs.id, val)
+ table.insert(cmds, ir.Cmd.Move(loc, lhs.id, val))
elseif ltag == "to_ir.LHS.Global" then
- cmd = ir.Cmd.SetGlobal(loc, lhs.id, val)
+ table.insert(cmds, ir.Cmd.SetGlobal(loc, lhs.id, val))
elseif ltag == "to_ir.LHS.Array" then
- cmd = ir.Cmd.SetArr(loc, lhs.typ, lhs.arr, lhs.i, val)
+ table.insert(cmds, ir.Cmd.SetArr(loc, lhs.typ, lhs.arr, lhs.i, val))
elseif ltag == "to_ir.LHS.Table" then
local str = ir.Value.String(lhs.field)
- cmd = ir.Cmd.SetTable(loc, lhs.typ, lhs.t, str, val)
+ table.insert(cmds, ir.Cmd.SetTable(loc, lhs.typ, lhs.t, str, val))
elseif ltag == "to_ir.LHS.Record" then
- cmd = ir.Cmd.SetField(loc, lhs.typ, lhs.rec, lhs.field, val)
+ table.insert(cmds, ir.Cmd.SetField(loc, lhs.typ, lhs.rec, lhs.field, val))
else
typedecl.tag_error(ltag)
end
- table.insert(cmds, cmd)
end
end
|
fix redundent check, refactor to findVisibleClient
Uses function findVisibleClient and the Elvis operator to clean the code up. Elvis depends on a gcc extension resulting in a warning from -wpedantic. | @@ -200,6 +200,19 @@ monocle(Monitor *m)
}
+void
+focusstack2(const Arg *arg)
+{
+ Client *nextVisibleClient = findVisibleClient(selmon->sel->next) ?: findVisibleClient(selmon->clients);
+
+ if (nextVisibleClient) {
+ if (nextVisibleClient->mon != selmon)
+ selmon = nextVisibleClient->mon;
+ detachstack(nextVisibleClient);
+ attachstack(nextVisibleClient);
+ selmon->sel = nextVisibleClient;
+ }
+}
void
focusstack2(const Arg *arg)
{
|
win32: fixing rhosimulator JS bridge | @@ -1073,14 +1073,7 @@ var Rho = Rho || (function ($) {
addBridge(rhoPlatform.id.RHOSIMULATOR, function() {
return {
- apiCall: function (cmdText, async, resultHandler) {
- var nativeApiResult = {};
-
- if (window[RHO_API_TAG] && 'function' == typeof window[RHO_API_TAG]['apiCall']) {
- nativeApiResult = window[RHO_API_TAG].apiCall(cmdText, async);
- }
- resultHandler(JSON.parse(nativeApiResult));
- }
+ apiCall: ajaxBridgeApiCall
};
});
|
Fix typo in sdot function
it looks like my previous pull request was short the final commit;
fix a typo in sdot | @@ -67,7 +67,7 @@ static void sdot_kernel_16( BLASLONG n, FLOAT *x, FLOAT *y, FLOAT *dot)
accum_0 = _mm512_extractf32x8_ps(accum_05, 0) + _mm512_extractf32x8_ps(accum_05, 1);
accum_1 = _mm512_extractf32x8_ps(accum_15, 0) + _mm512_extractf32x8_ps(accum_15, 1);
accum_2 = _mm512_extractf32x8_ps(accum_25, 0) + _mm512_extractf32x8_ps(accum_25, 1);
- accum_3 = _mm512_extractf32x8_ps(accum_35, 0) + _mm512_extractf32x8_ps(accum_35, 1))
+ accum_3 = _mm512_extractf32x8_ps(accum_35, 0) + _mm512_extractf32x8_ps(accum_35, 1);
#endif
for (; i < n; i += 32) {
|
Update apps/graphics/NxWidgets/Kconfig | @@ -736,21 +736,6 @@ if NXWM_TOUCHSCREEN
comment "Touchscreen Device Settings"
-config NXWM_TOUCHSCREEN_DEVINIT
- bool "Touchscreen Device Initialization"
- default y
- depends on !BUILD_PROTECTED && !BUILD_KERNEL
- ---help---
- It this option is selected, then the NxWM:CTouchscreen listener
- thread will call a function boardctl() in order to instantiate the
- touchscreen driver at path NXWM_TOUCHSCREEN_DEVPATH. If
- NXWM_TOUCHSCREEN_DEVINIT is not selected, then the NxWM:CTouchscreen
- listener thread will assume that the driver has already been
- initialized at NXWM_TOUCHSCREEN_DEVPATH.
-
- NOTE that in the kernel build, all touchscreen initialize must be
- performed in kernel logic prior to the execution of NxWM.
-
config NXWM_TOUCHSCREEN_DEVNO
int "Touchscreen Device Number"
default 0
|
SOVERSION bump to version 2.5.6 | @@ -63,7 +63,7 @@ set(LIBYANG_VERSION ${LIBYANG_MAJOR_VERSION}.${LIBYANG_MINOR_VERSION}.${LIBYANG_
# set version of the library
set(LIBYANG_MAJOR_SOVERSION 2)
set(LIBYANG_MINOR_SOVERSION 5)
-set(LIBYANG_MICRO_SOVERSION 5)
+set(LIBYANG_MICRO_SOVERSION 6)
set(LIBYANG_SOVERSION_FULL ${LIBYANG_MAJOR_SOVERSION}.${LIBYANG_MINOR_SOVERSION}.${LIBYANG_MICRO_SOVERSION})
set(LIBYANG_SOVERSION ${LIBYANG_MAJOR_SOVERSION})
|
Add auto detect for VisualStudioToolset v143
Fixes for | @@ -23,6 +23,10 @@ Common PlatformTools definitions used by all projects:
<VisualStudioToolset>v142</VisualStudioToolset>
<CETCompatSupported>true</CETCompatSupported>
</PropertyGroup>
+ <PropertyGroup Condition="'$(VisualStudioVersionMajor)'>='17'">
+ <VisualStudioToolset>v143</VisualStudioToolset>
+ <CETCompatSupported>true</CETCompatSupported>
+ </PropertyGroup>
<PropertyGroup Condition= "'$(PlatformToolset)'=='' OR '$(PlatformToolset)'=='AutoVSToolset'">
<PlatformToolset>$(VisualStudioToolset)</PlatformToolset>
|
Disable error on CHACHAPOLY misconfiguration
As the test tries this in multiple configurations, an #error here will
fail CI. | @@ -364,7 +364,7 @@ extern "C" {
#if defined(PSA_WANT_KEY_TYPE_CHACHA20)
#define MBEDTLS_CHACHAPOLY_C
#else /* not PSA_WANT_KEY_TYPE_CHACHA20 */
-#error "PSA_WANT_ALG_CHACHA20_POLY1305 requires PSA_WANT_KEY_TYPE_CHACHA20"
+// #error "PSA_WANT_ALG_CHACHA20_POLY1305 requires PSA_WANT_KEY_TYPE_CHACHA20"
#endif /* PSA_WANT_KEY_TYPE_CHACHA20 */
#endif /* PSA_WANT_ALG_CHACHA20_POLY1305 */
|
Clean build. | {$velo p/@t q/@t} :: reboot
{$verb ~} :: verbose mode
== ::
- ++ stub (list (pair stye (list @c))) :: styled tuba
- ++ stye (pair (set deco) (pair tint tint)) :: decos/bg/fg
- ++ styl :: cascading stye
- %+ pair (unit deco) ::
- (pair (unit tint) (unit tint)) ::
- :: ::
- ++ styx (list $@(@t (pair styl styx))) :: styled text
- ++ tint ?(~ $r $g $b $c $m $y $k $w) :: text color
-- ::dill
:: ::::
:::: ++eyre :: (1e) oldweb
task:able:ford-api
==
++ note-arvo :: out request $->
- $~ [%a %init ~zod]
- $% {@tas $meta vase}
+ $~ [%a %wake ~]
$% {$a task:able:ames}
{$b task:able:behn}
{$c task:able:clay}
{$f task:able:ford}
{$g task:able:gall}
{$t task:able:ford-api}
- == ==
+ {@tas $meta vase}
+ ==
++ sign-arvo :: in result $<-
$% {$a gift:able:ames}
{$b gift:able:behn}
|
add buster-debsource to workaround libcjson bug in buster | @@ -731,17 +731,25 @@ preparedeb: clean
debsource: distclean preparedeb
dpkg-source -b .
+.PHONY: buster-debsource
+debsource: distclean preparedeb
+ @mv debian/rules debian/rules.orig
+ @cat debian/rules.orig \
+ | sed s/"export USE_CJSON_SO = 1"/"export USE_CJSON_SO = 0"/ \
+ > debian/rules
+ dpkg-source -b . || mv debian/rules.orig debian/rules
+ @rm debian/rules || mv debian/rules.orig debian/rules
+ @mv debian/rules.orig debian/rules
+
.PHONY: ubuntu-bionic-source
ubuntu-bionic-source: distclean preparedeb
- mv debian/control debian/control.orig
- cat debian/control.orig \
+ @mv debian/control debian/control.orig
+ @cat debian/control.orig \
| sed s/"Build-Depends: debhelper-compat (= 13),"/"Build-Depends: debhelper-compat (= 12),"/ \
> debian/control
-
dpkg-source -b . || mv debian/control.orig debian/control
-
- rm debian/control || mv debian/control.orig debian/control
- mv debian/control.orig debian/control
+ @rm debian/control || mv debian/control.orig debian/control
+ @mv debian/control.orig debian/control
.PHONY: deb
deb: cleanapi create_obj_dir_structure preparedeb
|
Allow Ethereum coinbasePassword to be empty, but not missing | @@ -333,7 +333,7 @@ namespace MiningCore.Blockchain.Ethereum
// validate mandatory extra config
var extraConfig = poolConfig.PaymentProcessing.Extra.SafeExtensionDataAs<EthereumPoolPaymentProcessingConfigExtra>();
- if (string.IsNullOrEmpty(extraConfig?.CoinbasePassword))
+ if (extraConfig?.CoinbasePassword == null)
logger.ThrowLogPoolStartupException("\"paymentProcessing.coinbasePassword\" pool-configuration property missing or empty (required for unlocking wallet during payment processing)");
}
|
zephyr test: Make ppc_discharge_vbus mockable
Allow tests to mock ppc_discharge_vbus. Support tests of discharging
behavior that are decoupled from the discharge indicator of a specific
PPC.
TEST=make buildall
BRANCH=none | @@ -200,8 +200,12 @@ int ppc_err_prints(const char *string, int port, int error);
* @param port: The Type-C port number.
* @param enable: 1 -> discharge vbus, 0 -> stop discharging vbus
* @return EC_SUCCESS on success, error otherwise.
+ *
+ * TODO(b/255413715): Remove test_mockable when it is practical to convert the
+ * usb_pd_flags test to use the unit-testing framework and not require weak
+ * functions for mocking.
*/
-int ppc_discharge_vbus(int port, int enable);
+test_mockable int ppc_discharge_vbus(int port, int enable);
/**
* Initializes the PPC for the specified port.
|
build: abort previous run if still running | @@ -96,7 +96,7 @@ CMAKE_FLAGS_BASE = [
'KDB_DB_SPEC': '${HOME}/.config/kdb/spec',
'CMAKE_INSTALL_PREFIX': '${WORKSPACE}/system',
'INSTALL_SYSTEM_FILES': 'OFF',
- 'buildDocUMENTATION': 'OFF'
+ 'BUILD_DOCUMENTATION': 'OFF'
]
// TODO Remove -DEPRECATED after #1954 is resolved
@@ -125,6 +125,9 @@ TEST_INSTALL = 'install'
NOW = new Date()
+// If previous run is still running, cancel it
+abortPreviousRun()
+
/*****************************************************************************
* Main Stages
*
@@ -891,3 +894,18 @@ def publishDebianPackages(remote="a7") {
def apiary(input, output) {
sh "apiary preview --path=${input} --output=${output}"
}
+
+def abortPreviousRun() {
+ def exec = currentBuild
+ ?.rawBuild
+ ?.getPreviousBuildInProgress()
+ ?.getExecutor()
+ if(exec) {
+ exec.interrupt(
+ Result.ABORTED,
+ new CauseOfInterruption.UserInterruption(
+ "Aborted by Build#${currentBuild.number}"
+ )
+ )
+ }
+}
|
Disable AVX512 (Skylake X) support if the build system is too old | @@ -201,6 +201,21 @@ $architecture = zarch if ($data =~ /ARCH_ZARCH/);
$binformat = bin32;
$binformat = bin64 if ($data =~ /BINARY_64/);
+$no_avx512= 0;
+if (($architecture eq "x86") || ($architecture eq "x86_64")) {
+ $code = '"vaddps %zmm1, %zmm0, %zmm0"';
+ print $tmpf "void main(void){ __asm__ volatile($code); }\n";
+ $args = " -o $tmpf.o -x c $tmpf";
+ my @cmd = ("$compiler_name $args");
+ system(@cmd) == 0;
+ if ($? != 0) {
+ $no_avx512 = 1;
+ } else {
+ $no_avx512 = 0;
+ }
+ unlink("tmpf.o");
+}
+
$data = `$compiler_name -S ctest1.c && grep globl ctest1.s | head -n 1 && rm -f ctest1.s`;
$data =~ /globl\s([_\.]*)(.*)/;
@@ -288,6 +303,7 @@ print MAKEFILE "CROSS=1\n" if $cross != 0;
print MAKEFILE "CEXTRALIB=$linker_L $linker_l $linker_a\n";
print MAKEFILE "HAVE_MSA=1\n" if $have_msa eq 1;
print MAKEFILE "MSA_FLAGS=$msa_flags\n" if $have_msa eq 1;
+print MAKEFILE "NO_AVX512=1\n" if $no_avx512 eq 1;
$os =~ tr/[a-z]/[A-Z]/;
$architecture =~ tr/[a-z]/[A-Z]/;
|
MinGW uses also ; as PATH-separator | @@ -116,7 +116,7 @@ function (UPNP_findTestEnv testName resultVar)
UPNP_findTestLibs (${testName} ${resultVar})
set (tempEnv "PATH=")
- if (MSVC)
+ if (MSVC OR MINGW)
set (separator "\\\;")
else()
set (separator ":")
|
Remove $Id$ now we use git rather than svn | @@ -17,7 +17,6 @@ dnl
dnl -------------------------------------------------------------------------
dnl Author Pier Fumagalli <mailto:[email protected]>
-dnl Version $Id$
dnl -------------------------------------------------------------------------
dnl -------------------------------------------------------------------------
|
admin/meta-packages: re-enable trilinos in some meta-packages | @@ -240,7 +240,7 @@ Requires: scalapack-%{compiler_family}-mpich%{PROJ_DELIM}
Requires: slepc-%{compiler_family}-mpich%{PROJ_DELIM}
Requires: ptscotch-%{compiler_family}-mpich%{PROJ_DELIM}
## TODO Requires: superlu_dist-%{compiler_family}-mpich%{PROJ_DELIM}
-## TODO Requires: trilinos-%{compiler_family}-mpich%{PROJ_DELIM}
+Requires: trilinos-%{compiler_family}-mpich%{PROJ_DELIM}
%description -n %{PROJ_NAME}-%{compiler_family}-mpich-parallel-libs
Collection of parallel library builds for use with GNU compiler toolchain and the MPICH runtime
@@ -257,7 +257,7 @@ Requires: scalapack-%{compiler_family}-%{mpi_family}%{PROJ_DELIM}
Requires: slepc-%{compiler_family}-%{mpi_family}%{PROJ_DELIM}
Requires: ptscotch-%{compiler_family}-%{mpi_family}%{PROJ_DELIM}
## TODO Requires: superlu_dist-%{compiler_family}-%{mpi_family}%{PROJ_DELIM}
-## TODO Requires: trilinos-%{compiler_family}-%{mpi_family}%{PROJ_DELIM}
+Requires: trilinos-%{compiler_family}-%{mpi_family}%{PROJ_DELIM}
%description -n %{PROJ_NAME}-%{compiler_family}-%{mpi_family}-parallel-libs
Collection of parallel library builds for use with GNU compiler toolchain and the OpenMPI runtime
|
ble_mesh: using bt device name in mesh proxy adv | @@ -1075,8 +1075,30 @@ static const struct bt_mesh_adv_data net_id_ad[] = {
BLE_MESH_ADV_DATA(BLE_MESH_DATA_SVC_DATA16, proxy_svc_data, NET_ID_LEN),
};
+static size_t gatt_proxy_adv_create(struct bt_mesh_adv_data *proxy_sd)
+{
+ const char *name = device_name;
+ size_t name_len = strlen(name);
+ /* One octet for Length, and another octet for AD type */
+ size_t sd_space = 29;
+
+ if (name_len > sd_space) {
+ proxy_sd->type = BLE_MESH_DATA_NAME_SHORTENED;
+ proxy_sd->data_len = sd_space;
+ } else {
+ proxy_sd->type = BLE_MESH_DATA_NAME_COMPLETE;
+ proxy_sd->data_len = name_len;
+ }
+
+ proxy_sd->data = (const u8_t *)name;
+
+ return 1;
+}
+
static int node_id_adv(struct bt_mesh_subnet *sub)
{
+ struct bt_mesh_adv_data proxy_sd = {0};
+ size_t proxy_sd_len;
u8_t tmp[16];
int err;
@@ -1099,9 +1121,10 @@ static int node_id_adv(struct bt_mesh_subnet *sub)
}
memcpy(proxy_svc_data + 3, tmp + 8, 8);
+ proxy_sd_len = gatt_proxy_adv_create(&proxy_sd);
err = bt_le_adv_start(&fast_adv_param, node_id_ad,
- ARRAY_SIZE(node_id_ad), NULL, 0);
+ ARRAY_SIZE(node_id_ad), &proxy_sd, proxy_sd_len);
if (err) {
BT_WARN("Failed to advertise using Node ID (err %d)", err);
return err;
@@ -1114,6 +1137,8 @@ static int node_id_adv(struct bt_mesh_subnet *sub)
static int net_id_adv(struct bt_mesh_subnet *sub)
{
+ struct bt_mesh_adv_data proxy_sd = {0};
+ size_t proxy_sd_len;
int err;
BT_DBG("%s", __func__);
@@ -1124,9 +1149,10 @@ static int net_id_adv(struct bt_mesh_subnet *sub)
bt_hex(sub->keys[sub->kr_flag].net_id, 8));
memcpy(proxy_svc_data + 3, sub->keys[sub->kr_flag].net_id, 8);
+ proxy_sd_len = gatt_proxy_adv_create(&proxy_sd);
err = bt_le_adv_start(&slow_adv_param, net_id_ad,
- ARRAY_SIZE(net_id_ad), NULL, 0);
+ ARRAY_SIZE(net_id_ad), &proxy_sd, proxy_sd_len);
if (err) {
BT_WARN("Failed to advertise using Network ID (err %d)", err);
return err;
|
Fix memcache getNodeByQuery coredump | @@ -2518,7 +2518,7 @@ int processCommand(client *c) {
!(c->flags & CLIENT_LUA &&
server.lua_caller->flags & CLIENT_MASTER) &&
!(c->cmd->getkeys_proc == NULL && c->cmd->firstkey == 0 &&
- c->cmd->proc != execCommand))
+ c->cmd->proc != execCommand) && (c->reqtype == PROTO_REQ_INLINE || c->reqtype == PROTO_REQ_MULTIBULK))
{
int hashslot;
int error_code;
|
Note example use of tls authentication. | 19 April 2018: Wouter
- Can set tls authentication with forward-addr: IP#tls.auth.name
And put the public cert bundle in tls-cert-bundle: "ca-bundle.pem".
+ such as forward-addr: 9.9.9.9@853#dns.quad9.net or
+ 1.1.1.1@853#cloudflare-dns.com
18 April 2018: Wouter
- Fix auth-zone retry timer to be on schedule with retry timeout,
|
WithFireSimFAME5 to allow non Rocket/BOOM build | @@ -170,6 +170,7 @@ class WithFireSimFAME5 extends ComposeIOBinder({
annotate(EnableModelMultiThreadingAnnotation(b.module))
case r: RocketTile =>
annotate(EnableModelMultiThreadingAnnotation(r.module))
+ case _ => Nil
}
(Nil, Nil)
}
|
attach_storage(): fix sector range for reading MBR from disk
The range parameter in block_io functions is expressed in sectors,
not bytes, thus attach_storage() was reading the first 512 sectors
from an attached disk instead of just the first sector. Fix it. | @@ -222,7 +222,7 @@ closure_function(0, 3, void, attach_storage,
deallocate(h, mbr, SECTOR_SIZE);
return;
}
- apply(r, mbr, irange(0, SECTOR_SIZE), sh);
+ apply(r, mbr, irange(0, 1), sh);
}
static void read_kernel_syms()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.