message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
[swig] Make sequenceToUnsignedIntVector accept any int-typed numpy array.
Test with python2.7 and python3.5 using numpy 1.11.1.
Fixes | @@ -1015,28 +1015,60 @@ struct IsDense : public Question<bool>
%{
static inline int sequenceToUnsignedIntVector(
PyObject *input,
- std11::shared_ptr<std::vector<unsigned int> > ptr)
+ std11::shared_ptr<std::vector<unsigned int> >& ptr)
{
if (!PySequence_Check(input)) {
PyErr_SetString(PyExc_TypeError,"Expecting a sequence");
return 0;
}
+ ptr.reset(new std::vector<unsigned int>());
assert(ptr);
+ PyArray_Descr* descrto = PyArray_DescrFromType(NPY_ULONG);
for (int i =0; i < PyObject_Length(input); i++)
{
PyObject *o = PySequence_GetItem(input,i);
- if (!PyInt_Check(o)) {
+ unsigned int u;
+
+ if (PyInt_Check(o)) {
+ long v = PyInt_AsLong(o);
+ if (v == -1 && PyErr_Occurred())
+ return 0;
+ u = static_cast<unsigned int>(v);
+ } else if (PyLong_Check(o)) {
+ long v = PyLong_AsLong(o);
+ if (v == -1 && PyErr_Occurred())
+ return 0;
+ u = static_cast<unsigned int>(v);
+ } else if (PyArray_CheckScalar(o)) {
+ PyArray_Descr* descrfrom = PyArray_DescrFromObject(o, NULL);
+ if (!PyDataType_ISINTEGER(descrfrom)) {
Py_XDECREF(o);
PyErr_SetString(PyExc_ValueError,"Expecting a sequence of ints");
return 0;
}
- if (PyInt_AsLong(o) == -1 && PyErr_Occurred())
+ // We must use UNSAFE casting, otherwise user would have to
+ // ensure to provide unsigned numpy arrays.
+ if (PyArray_CanCastTypeTo(descrfrom, descrto,
+ NPY_UNSAFE_CASTING))
+ {
+ PyArray_CastScalarToCtype(o, &u, descrto);
+ }
+ else {
+ Py_XDECREF(o);
+ PyErr_SetString(PyExc_ValueError,"Expecting a sequence of ints");
return 0;
+ }
+ }
+ else {
+ Py_XDECREF(o);
+ PyErr_SetString(PyExc_ValueError,"Expecting a sequence of ints");
+ return 0;
+ }
- ptr->push_back(static_cast<unsigned int>(PyInt_AsLong(o)));
+ ptr->push_back(u);
Py_DECREF(o);
}
@@ -1048,13 +1080,10 @@ struct IsDense : public Question<bool>
// int sequence => std::vector<unsigned int>
%typemap(in,fragment="NumPy_Fragments") std11::shared_ptr<std::vector<unsigned int> > (std11::shared_ptr<std::vector<unsigned int> > temp)
{
- temp.reset(new std::vector<unsigned int>());
- if (!sequenceToUnsignedIntVector($input, temp))
+ if (!sequenceToUnsignedIntVector($input, $1))
{
SWIG_fail;
}
- $1 = temp; // temp deallocation is done at object destruction
- // thanks to shared ptr ref counting
}
|
docs: add fullname of wsl for search | - [Amazon Linux 1](#amazon-linux-1---binary)
- [Amazon Linux 2](#amazon-linux-2---binary)
- [Alpine](#alpine---binary)
- - [WSL](#wsl---binary)
+ - [WSL](#wslwindows-subsystem-for-linux---binary)
* [Source](#source)
- [libbpf Submodule](#libbpf-submodule)
- [Debian](#debian---source)
@@ -271,16 +271,12 @@ sudo docker run --rm -it --privileged \
alpine:3.12
```
-## WSL - Binary
+## WSL(Windows Subsystem for Linux) - Binary
### Install dependencies
The compiling depends on the headers and lib of linux kernel module which was not found in wsl distribution packages repo. We have to compile the kernel moudle manually.
```bash
-apt-get install flex bison libssl-dev libelf-dev
-```
-For wsl kernel 5.10.y
-```
-apt-get install dwarves
+apt-get install flex bison libssl-dev libelf-dev dwarves
```
### Install packages
```
|
input: set missing collector id on socket type | @@ -879,6 +879,7 @@ int flb_input_set_collector_socket(struct flb_input_instance *in,
return -1;
}
+ collector->id = collector_id(in);
collector->type = FLB_COLLECT_FD_SERVER;
collector->cb_collect = cb_new_connection;
collector->fd_event = fd;
|
Arm code uses dwarf before arm for finding proc info, but unlike
dwarf code, ignores error return from dwarf_callback. Change arm
code to behave like dwarf code in that case. | @@ -523,6 +523,12 @@ arm_find_proc_info (unw_addr_space_t as, unw_word_t ip,
ret = dl_iterate_phdr (dwarf_callback, &cb_data);
SIGPROCMASK (SIG_SETMASK, &saved_mask, NULL);
+ if (ret <= 0)
+ {
+ Debug (14, "IP=0x%lx not found\n", (long) ip);
+ return -UNW_ENOINFO;
+ }
+
if (cb_data.single_fde)
/* already got the result in *pi */
return 0;
|
Soft diable qlog test if no chacha | @@ -6446,6 +6446,13 @@ int packet_trace_test()
#define QLOG_TRACE_BIN "qlog_trace.bin"
#define QLOG_TRACE_QLOG "qlog_trace.qlog"
+#ifdef PTLS_OPENSSL_HAVE_CHACHA20_POLY1305
+const int has_chacha_poly = 1;
+#else
+const int has_chacha_poly = 0;
+#endif
+
+
void qlog_trace_cid_fn(picoquic_quic_t* quic, picoquic_connection_id_t cnx_id_local,
picoquic_connection_id_t cnx_id_remote, void* cnx_id_cb_data, picoquic_connection_id_t* cnx_id_returned)
{
@@ -6478,6 +6485,12 @@ int qlog_trace_test()
ret = -1;
}
+ if (!has_chacha_poly) {
+ /* Do not run this test if chacha20 is not available, because
+ * the TLS messages would be different from expected */
+ return 0;
+ }
+
/* Set the logging policy on the server side, to store data in the
* current working directory, and run a basic test scenario */
if (ret == 0) {
|
cheza: Config the SPI flash size to 1MB
The NPCX7M7WB has 1MB internal SPI flash. Config it correctly.
BRANCH=none
TEST=Checked the EC image size is 1MB. Ran flashrom to flash EC.
Tested-by: Philip Chen | #define NPCX7_PWM1_SEL 0 /* GPIO C2 is not used as PWM1. */
/* Internal SPI flash on NPCX7 */
-#define CONFIG_FLASH_SIZE (512 * 1024) /* It's really 1MB. */
+#define CONFIG_FLASH_SIZE (1024 * 1024) /* 1MB internal spi flash */
#define CONFIG_SPI_FLASH_REGS
#define CONFIG_SPI_FLASH_W25Q80 /* Internal SPI flash type. */
|
invoke: fix invalid pointer | @@ -8,6 +8,7 @@ typedef struct
{
Plugin * plugin;
KeySet * modules;
+ KeySet * exports;
}ElektraInvokeHandle;
void * elektraInvokeInitialize(const char *elektraPluginName)
@@ -47,24 +48,33 @@ const void * elektraInvokeGetFunction(void * invokeHandle, const char *elektraPl
return NULL;
}
Plugin * plugin = handle->plugin;
- KeySet * exports = ksNew(0, KS_END);
+ KeySet * exports = NULL;
+
Key * exportParent = keyNew("system/elektra/modules", KEY_END);
keyAddBaseName(exportParent, plugin->name);
+
+ if(handle->exports)
+ {
+ exports = handle->exports;
+ }
+ else
+ {
+ exports = ksNew(0, KS_END);
+ handle->exports = exports;
plugin->kdbGet(plugin, exports, exportParent);
+ }
keyAddBaseName(exportParent, "exports");
keyAddBaseName(exportParent, elektraPluginFunctionName);
- const void * functionPtr = NULL;
Key *functionKey = ksLookup(exports, exportParent, 0);
+ keyDel(exportParent);
if(!functionKey)
{
- keyDel(exportParent);
- ksDel(exports);
return NULL;
}
- functionPtr = keyValue(functionKey);
- keyDel(exportParent);
- ksDel(exports);
- return functionPtr;
+ else
+ {
+ return keyValue(functionKey);
+ }
}
void elektraInvokeClose(void *invokeHandle)
@@ -79,5 +89,6 @@ void elektraInvokeClose(void *invokeHandle)
keyDel(errorKey);
elektraModulesClose(handle->modules, NULL);
ksDel(handle->modules);
+ ksDel(handle->exports);
elektraFree(handle);
}
|
Slightly simplify nbtree split point choice loop.
Spotted during post-commit review of the nbtree deduplication commit
(commit 0d861bbb). | @@ -820,18 +820,14 @@ _bt_bestsplitloc(FindSplitData *state, int perfectpenalty,
penalty = _bt_split_penalty(state, state->splits + i);
- if (penalty <= perfectpenalty)
- {
- bestpenalty = penalty;
- lowsplit = i;
- break;
- }
-
if (penalty < bestpenalty)
{
bestpenalty = penalty;
lowsplit = i;
}
+
+ if (penalty <= perfectpenalty)
+ break;
}
final = &state->splits[lowsplit];
|
sdspi_host: bugfix The clock may be sent out before the bus that was used immediately before is released.
Merges | @@ -75,6 +75,8 @@ static esp_err_t start_command_default(slot_info_t *slot, int flags, sdspi_hw_cm
static esp_err_t shift_cmd_response(sdspi_hw_cmd_t *cmd, int sent_bytes);
+static esp_err_t poll_busy(slot_info_t *slot, int timeout_ms, bool polling);
+
/// A few helper functions
/// Map handle to pointer of slot information
@@ -435,6 +437,8 @@ esp_err_t sdspi_host_start_command(sdspi_dev_handle_t handle, sdspi_hw_cmd_t *cm
ESP_LOGV(TAG, "%s: slot=%i, CMD%d, arg=0x%08x flags=0x%x, data=%p, data_size=%i crc=0x%02x",
__func__, handle, cmd_index, cmd_arg, flags, data, data_size, cmd->crc7);
+ spi_device_acquire_bus(slot->spi_handle, portMAX_DELAY);
+ poll_busy(slot, 40, true);
// For CMD0, clock out 80 cycles to help the card enter idle state,
// *before* CS is asserted.
@@ -444,7 +448,6 @@ esp_err_t sdspi_host_start_command(sdspi_dev_handle_t handle, sdspi_hw_cmd_t *cm
// actual transaction
esp_err_t ret = ESP_OK;
- spi_device_acquire_bus(slot->spi_handle, portMAX_DELAY);
cs_low(slot);
if (flags & SDSPI_CMD_FLAG_DATA) {
const bool multi_block = flags & SDSPI_CMD_FLAG_MULTI_BLK;
|
YAML CPP: Fix minor spelling mistake in comment | @@ -30,7 +30,7 @@ using KeySetPair = pair<KeySet, KeySet>;
*
* @param keys This parameter contains the key set this function searches for array parents.
*
- * @return A key sets that contains all array parents stored in `keys`
+ * @return A key set that contains all array parents stored in `keys`
*/
KeySet splitArrayParents (KeySet const & keys)
{
|
removing abs~ & log~ | ==~ signal equal to
>=~ signal greater than or equal to
>~ signal greater than
-abs~ signal absolute
accum accumulate to a value
acos arc cosine function
acos~ signal arc cosine function
@@ -104,7 +103,6 @@ lessthaneq~ alias of <=~
line~ linear ramp generator
linedrive exponential scaler for [line~]
loadmess send messages at loading a patch
-log~ signal logarithm
lookup~ transfer function lookup table
lores~ lowpass resonant filter
match output a matching input
|
[Cita][#1193]modify valid_until_block function | @@ -139,7 +139,7 @@ BOAT_RESULT BoatCitaTxInit(BoatCitaWallet *wallet_ptr,
//quota
tx_ptr->rawtx_fields.quota = quota;
- // less than current blocknumber plus 100.
+ // valid_until_block = current blocknumber + 100.
retval_str = BoatCitaGetBlockNumber(tx_ptr);
result = BoatCitaParseRpcResponseStringResult(retval_str,
&tx_ptr->wallet_ptr->web3intf_context_ptr->web3_result_string_buf);
@@ -148,20 +148,9 @@ BOAT_RESULT BoatCitaTxInit(BoatCitaWallet *wallet_ptr,
BoatLog(BOAT_LOG_CRITICAL, "BoatCitaGetBlockNumber failed.");
return result;
}
- BUINT8 Block_bumber[8] = {0};
-
- UtilityHexToBin(Block_bumber, 32,
- (BCHAR *)tx_ptr->wallet_ptr->web3intf_context_ptr->web3_result_string_buf.field_ptr,
- TRIMBIN_TRIM_NO, BOAT_TRUE);
-
-
-
-
- BoatLog(BOAT_LOG_CRITICAL, "1111111111 = %s\n", (BCHAR *)tx_ptr->wallet_ptr->web3intf_context_ptr->web3_result_string_buf.field_ptr);
-
-
- tx_ptr->rawtx_fields.valid_until_block = 0x258c;
- BoatLog(BOAT_LOG_CRITICAL, "3333333333333 = %x\n", tx_ptr->rawtx_fields.valid_until_block);
+ char *stopstring;
+ BUINT64 valid_until_block_value = strtoll((BCHAR *)tx_ptr->wallet_ptr->web3intf_context_ptr->web3_result_string_buf.field_ptr, &stopstring, 16);
+ tx_ptr->rawtx_fields.valid_until_block = valid_until_block_value + 100;
// Initialize value = 0
//CITA DOES NOT SET VALUE, IT'S DE-COINIZED
|
coroutine -> dill_coroutine | @@ -108,7 +108,7 @@ DILL_EXPORT int dill_hclose(int h);
/* Coroutines */
/******************************************************************************/
-#define coroutine __attribute__((noinline))
+#define dill_coroutine __attribute__((noinline))
DILL_EXPORT extern volatile void *dill_unoptimisable;
@@ -251,6 +251,7 @@ DILL_EXPORT int dill_bundle_wait(int h, int64_t deadline);
DILL_EXPORT int dill_yield(void);
#if !defined DILL_DISABLE_RAW_NAMES
+#define coroutine dill_coroutine
#define go dill_go
#define go_mem dill_go_mem
#define bundle_go dill_bundle_go
|
MARS: New stream for GFAS reanalysis | 1252 gfas Global fire assimilation system
1253 ocda Ocean data assimilation
1254 olda Ocean Long window data assimilation
+1255 gfar Global fire assimilation system reanalysis
2231 cnrm Meteo France climate centre
2232 mpic Max Plank Institute
2233 ukmo UKMO climate centre
|
Fix mod-by-zero | @@ -21,20 +21,28 @@ static inline Term term_lowered(Term c) {
return (c >= 'A' && c <= 'Z') ? c - ('a' - 'A') : c;
}
+// Always returns 0 through (sizeof indexed_terms) - 1, and works on
+// capitalized terms as well. The index of the lower-cased term is returned if
+// the term is capitalized.
+static inline size_t semantic_index_of_term(Term c) {
+ Term c0 = term_lowered(c);
+ for (size_t i = 0; i < Terms_array_num; ++i) {
+ if (indexed_terms[i] == c0)
+ return i;
+ }
+ return 0;
+}
+
static inline Term terms_sum(Term a, Term b) {
- size_t ia = index_of_term(term_lowered(a));
- size_t ib = index_of_term(term_lowered(b));
- if (ia == SIZE_MAX) ia = 0;
- if (ib == SIZE_MAX) ib = 0;
+ size_t ia = semantic_index_of_term(a);
+ size_t ib = semantic_index_of_term(b);
return indexed_terms[(ia + ib) % Terms_array_num];
}
static inline Term terms_mod(Term a, Term b) {
- size_t ia = index_of_term(term_lowered(a));
- size_t ib = index_of_term(term_lowered(b));
- if (ia == SIZE_MAX) ia = 0;
- if (ib == SIZE_MAX) ib = 0;
- return indexed_terms[ia % ib];
+ size_t ia = semantic_index_of_term(a);
+ size_t ib = semantic_index_of_term(b);
+ return indexed_terms[ib == 0 ? 0 : (ia % ib)];
}
static inline void act_a(Field* f, U32 y, U32 x) {
|
Make aof buf alloc size more accurate in overhead
In theory we should have used zmalloc_usable_size, but it may be slower,
and now after there's no actual difference.
So this change isn't making a dramatic change. | @@ -342,7 +342,7 @@ size_t freeMemoryGetNotCountedMemory(void) {
}
}
if (server.aof_state != AOF_OFF) {
- overhead += sdsalloc(server.aof_buf)+aofRewriteBufferSize();
+ overhead += sdsAllocSize(server.aof_buf)+aofRewriteBufferSize();
}
return overhead;
}
|
unix: use (sane %ta) out of arvo | **
*/
#include "all.h"
-#include <ctype.h>
#include <ftw.h>
#include "vere/vere.h"
@@ -59,6 +58,7 @@ struct _u3_ufil;
c3_c* pax_c; // pier directory
c3_o alm; // timer set
c3_o dyr; // ready to update
+ u3_noun sat; // (sane %ta) handle
#ifdef SYNCLOG
c3_w lot_w; // sync-slot
struct _u3_sylo {
@@ -112,18 +112,9 @@ u3_unix_safe(const c3_c* pax_c)
** (star ;~(pose nud low hep dot sig cab))
*/
static c3_t
-_unix_sane_ta(c3_c* pax_c, c3_w len_w)
+_unix_sane_ta(u3_unix* unx_u, c3_c* pax_c, c3_w len_w)
{
- for ( ; len_w; pax_c++, len_w-- ) {
- if ( !islower(*pax_c)
- && !isdigit(*pax_c)
- && '-' != *pax_c && '.' != *pax_c
- && '~' != *pax_c && '_' != *pax_c )
- {
- return 0;
- }
- }
- return 1;
+ return _(u3n_slam_on(u3k(unx_u->sat), u3i_bytes(len_w, (c3_y*)pax_c)));
}
/* u3_readdir_r():
@@ -528,7 +519,7 @@ _unix_scan_mount_point(u3_unix* unx_u, u3_umon* mon_u)
if ( '.' != out_u->d_name[len_w]
|| '\0' == out_u->d_name[len_w + 1]
|| '~' == out_u->d_name[strlen(out_u->d_name) - 1]
- || !_unix_sane_ta(out_u->d_name, len_w) )
+ || !_unix_sane_ta(unx_u, out_u->d_name, len_w) )
{
c3_free(pax_c);
continue;
@@ -971,7 +962,7 @@ _unix_update_dir(u3_unix* unx_u, u3_udir* dir_u)
c3_w len_w = strlen(out_u->d_name);
if ( !strchr(out_u->d_name,'.')
- || !_unix_sane_ta(out_u->d_name, len_w)
+ || !_unix_sane_ta(unx_u, out_u->d_name, len_w)
|| '~' == out_u->d_name[len_w - 1] )
{
c3_free(pax_c);
@@ -1458,6 +1449,7 @@ _unix_io_exit(u3_auto* car_u)
{
u3_unix* unx_u = (u3_unix*)car_u;
+ u3z(unx_u->sat);
c3_free(unx_u->pax_c);
c3_free(unx_u);
}
@@ -1472,6 +1464,7 @@ u3_unix_io_init(u3_pier* pir_u)
unx_u->pax_c = strdup(pir_u->pax_c);
unx_u->alm = c3n;
unx_u->dyr = c3n;
+ unx_u->sat = u3do("sane", c3__ta);
u3_auto* car_u = &unx_u->car_u;
car_u->nam_m = c3__unix;
|
get list in attach | @@ -2380,6 +2380,7 @@ list_t operator_get_list(const struct operator_s* op) {
auto data_perm = CAST_MAYBE(permute_data_s, op->data);
auto data_plus = CAST_MAYBE(operator_plus_s, op->data);
auto data_copy = CAST_MAYBE(copy_data_s, op->data);
+ auto data_attach = CAST_MAYBE(attach_data_s, op->data);
if (NULL != data_combi) {
@@ -2434,6 +2435,11 @@ list_t operator_get_list(const struct operator_s* op) {
return operator_get_list(data_copy->op);
}
+ if (NULL != data_attach) {
+
+ return operator_get_list(data_attach->op);
+ }
+
list_t result = list_create();
list_append(result, (void*)op);
|
test: fix typo in a function name | @@ -460,7 +460,7 @@ static void test_reset_during_loss(void)
static uint16_t test_close_error_code;
-static void test_closeed_by_remote(quicly_closed_by_remote_t *self, quicly_conn_t *conn, int err, uint64_t frame_type,
+static void test_closed_by_remote(quicly_closed_by_remote_t *self, quicly_conn_t *conn, int err, uint64_t frame_type,
const char *reason, size_t reason_len)
{
ok(QUICLY_ERROR_IS_QUIC_APPLICATION(err));
@@ -472,7 +472,7 @@ static void test_closeed_by_remote(quicly_closed_by_remote_t *self, quicly_conn_
static void test_close(void)
{
- quicly_closed_by_remote_t closed_by_remote = {test_closeed_by_remote}, *orig_closed_by_remote = quic_ctx.closed_by_remote;
+ quicly_closed_by_remote_t closed_by_remote = {test_closed_by_remote}, *orig_closed_by_remote = quic_ctx.closed_by_remote;
quicly_address_t dest, src;
struct iovec datagram;
uint8_t datagram_buf[quic_ctx.transport_params.max_udp_payload_size];
|
power/rk3288.c: Format with clang-format
BRANCH=none
TEST=none | @@ -101,11 +101,9 @@ enum power_request_t {
static enum power_request_t power_request;
-
/* Forward declaration */
static void chipset_turn_off_power_rails(void);
-
/**
* Set the PMIC WARM RESET signal.
*
@@ -117,7 +115,6 @@ static void set_pmic_warm_reset(int asserted)
gpio_set_level(GPIO_PMIC_WARM_RESET_L, asserted ? 0 : 1);
}
-
/**
* Set the PMIC PWRON signal.
*
@@ -294,13 +291,11 @@ static int check_for_power_on_event(void)
/* check if system is already ON */
if (power_get_signals() & IN_POWER_GOOD) {
if (ap_off_flag) {
- CPRINTS(
- "system is on, but "
+ CPRINTS("system is on, but "
"EC_RESET_FLAG_AP_OFF is on");
return 0;
} else {
- CPRINTS(
- "system is on, thus clear "
+ CPRINTS("system is on, thus clear "
"auto_power_on");
/* no need to arrange another power on */
auto_power_on = 0;
@@ -443,10 +438,11 @@ enum power_state power_handle_state(enum power_state state)
set_pmic_pwron(0);
/* setup misc gpio for S3/S0 functionality */
- gpio_set_flags(GPIO_SUSPEND_L, GPIO_INPUT
- | GPIO_INT_BOTH | GPIO_PULL_DOWN);
- gpio_set_flags(GPIO_EC_INT_L, GPIO_OUTPUT
- | GPIO_OUT_HIGH);
+ gpio_set_flags(GPIO_SUSPEND_L,
+ GPIO_INPUT | GPIO_INT_BOTH |
+ GPIO_PULL_DOWN);
+ gpio_set_flags(GPIO_EC_INT_L,
+ GPIO_OUTPUT | GPIO_OUT_HIGH);
/* Call hooks now that AP is running */
hook_notify(HOOK_CHIPSET_STARTUP);
@@ -577,6 +573,4 @@ static int command_power(int argc, char **argv)
return EC_SUCCESS;
}
-DECLARE_CONSOLE_COMMAND(power, command_power,
- "on/off",
- "Turn AP power on/off");
+DECLARE_CONSOLE_COMMAND(power, command_power, "on/off", "Turn AP power on/off");
|
Add todo note for deleteUser() | @@ -1752,6 +1752,8 @@ int DeRestPluginPrivate::deleteUser(const ApiRequest &req, ApiResponse &rsp)
std::vector<ApiAuth>::iterator i = apiAuths.begin();
std::vector<ApiAuth>::iterator end = apiAuths.end();
+ // TODO compare error not found on hue bridge
+
for (; i != end; ++i)
{
if (username2 == i->apikey && i->state == ApiAuth::StateNormal)
|
ikev2: fix wrong index computation
Type: fix | @@ -3374,7 +3374,7 @@ ikev2_initiate_sa_init (vlib_main_t * vm, u8 * name)
ikev2_sa_free_proposal_vector (&proposals);
sa.is_initiator = 1;
- sa.profile_index = km->profiles - p;
+ sa.profile_index = p - km->profiles;
sa.is_profile_index_set = 1;
sa.state = IKEV2_STATE_SA_INIT;
sa.tun_itf = p->tun_itf;
|
Fix UAT track calculation when one velocity component = 0 | @@ -549,7 +549,7 @@ func parseDownlinkReport(s string, signalLevel int) {
}
}
if ns_vel_valid && ew_vel_valid {
- if ns_vel != 0 && ew_vel != 0 {
+ if ns_vel != 0 || ew_vel != 0 {
//TODO: Track type
track = uint16((360 + 90 - (int16(math.Atan2(float64(ns_vel), float64(ew_vel)) * 180 / math.Pi))) % 360)
}
|
Reordered cmake file from cli. | @@ -176,11 +176,22 @@ else()
set(GREP_COMMAND grep)
endif()
+include(TestEnvironmentVariables)
+
add_test(NAME ${target}
COMMAND ${TEST_COMMAND} "echo 'load mock a.mock\ninspect\nexit' | $<TARGET_FILE:${target}> | ${GREP_COMMAND} \"function three_str(a_str, b_str, c_str)\""
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
+set_property(TEST ${target}
+ PROPERTY LABELS ${target}
+)
+
+test_environment_variables(${target}
+ ""
+ ${TESTS_ENVIRONMENT_VARIABLES}
+)
+
#
# Define dependencies
#
@@ -202,17 +213,6 @@ add_loader_dependencies(${target}
# Define tests
#
-set_property(TEST ${target}
- PROPERTY LABELS ${target}
-)
-
-include(TestEnvironmentVariables)
-
-test_environment_variables(${target}
- ""
- ${TESTS_ENVIRONMENT_VARIABLES}
-)
-
if(OPTION_BUILD_LOADERS AND OPTION_BUILD_LOADERS_NODE AND OPTION_BUILD_SCRIPTS AND OPTION_BUILD_SCRIPTS_NODE)
add_test(NAME ${target}-node
COMMAND ${TEST_COMMAND} "echo 'load node nod.js\ninspect\ncall hello_boy(300, 400)\nexit' | $<TARGET_FILE:${target}> | ${GREP_COMMAND} \"700.0\""
|
reconstruct coil images | @@ -893,6 +893,21 @@ static nn_t reconet_apply_op_create(const struct reconet_s* config, int N, const
nn_apply = reconet_sort_args(nn_apply);
nn_apply = nn_get_wo_weights_F(nn_apply, config->weights, false);
+ if (config->coil_image) {
+
+ long cim_dims[N];
+ long img_dims[N];
+ long col_dims[N];
+
+ md_select_dims(N, config->mri_config->coil_image_flags, cim_dims, max_dims);
+ md_select_dims(N, config->mri_config->image_flags, img_dims, max_dims);
+ md_select_dims(N, config->mri_config->coil_flags, col_dims, max_dims);
+
+ nn_apply = nn_chain2_FF(nn_apply , 0, "reconstruction", nn_from_nlop_F(nlop_tenmul_create(N, cim_dims, img_dims, col_dims)), 0, NULL);
+ nn_apply = nn_dup_F(nn_apply , 0, "coil", 0, NULL);
+ nn_apply = nn_set_output_name_F(nn_apply , 0, "reconstruction");
+ }
+
debug_printf(DP_INFO, "Apply RecoNet\n");
nn_debug(DP_INFO, nn_apply);
|
tweak Makefile verbiage | @@ -181,18 +181,18 @@ image: require-qemu-binfmt
docs-generate: TAG := cribl/scope:docs-$(ARCH)
docs-generate: require-docker-buildx-builder
- @echo Building the AppScope Document generator
+ @echo Building the AppScope docs generator
@docker buildx build \
--tag $(TAG) \
--platform linux/$(PLATFORM_$(ARCH)) \
--file docker/docs/Dockerfile \
.
- @echo Running the AppScope Document generator
+ @echo Running the AppScope docs generator
@docker run \
-v $(shell pwd)/docs:/md \
-u $(shell id -u):$(shell id -g) \
--rm cribl/scope:docs-$(ARCH)
- @echo AppScope Document generator finished: md files are available in docs/md_files
+ @echo AppScope docs generator finished: md files are available in docs/md_files
k8s-test: require-kind require-kubectl image
docker tag cribl/scope:dev-x86_64 cribl/scope:$(VERSION)
|
prun: verify_launcher should be verify_launcher_avail | @@ -105,11 +105,11 @@ function launch_mpich () {
verify_launcher_avail srun
cmd="srun --mpi=pmix $@"
else
- verify_launcher mpiexec.hydra
+ verify_launcher_avail mpiexec.hydra
cmd="mpiexec.hydra -bootstrap slurm $@"
fi
elif [[ ${RM} == "pbspro" ]];then
- verify_launcher mpiexec.hydra
+ verify_launcher_avail mpiexec.hydra
cmd="mpiexec.hydra -rmk pbs $@"
else
_error "Unknown resource manager -> ${RM}"
|
peview: Add ctrl+a and ctrl+c events to pdb tab | @@ -365,6 +365,37 @@ BOOLEAN NTAPI PvSymbolTreeNewCallback(
}
return TRUE;
case TreeNewKeyDown:
+ {
+ PPH_TREENEW_KEY_EVENT keyEvent = Parameter1;
+
+ if (!keyEvent)
+ break;
+
+ switch (keyEvent->VirtualKey)
+ {
+ case 'C':
+ {
+ if (GetKeyState(VK_CONTROL) < 0)
+ {
+ PPH_STRING text;
+
+ text = PhGetTreeNewText(hwnd, 0);
+ PhSetClipboardString(hwnd, &text->sr);
+ PhDereferenceObject(text);
+ }
+ }
+ break;
+ case 'A':
+ {
+ if (GetKeyState(VK_CONTROL) < 0)
+ {
+ TreeNew_SelectRange(hwnd, 0, -1);
+ }
+ }
+ break;
+ }
+ }
+ return TRUE;
case TreeNewNodeExpanding:
return TRUE;
case TreeNewLeftDoubleClick:
|
Add AgentRequest and its ID | @@ -113,6 +113,7 @@ public enum AgentId : uint {
ScreenLog = 35,
// NPCTrade,
+ Request = 36,
Status = 37,
Map = 38,
Loot = 39, //NeedGreed
|
Update netdb.txt
change 14.152.81.132 >> 117.27.159.97 | 5.189.132.84:16775
-14.152.81.132:13655
45.76.37.252:13654
47.100.202.206:56600
47.100.254.68:13654
104.40.243.113:31337
109.196.45.218:3355
111.231.212.158:16775
+117.27.159.97:13655
119.28.37.154:16775
124.160.119.90:13655
124.161.87.210:13655
|
Bugfix for counter reads from CP | @@ -208,31 +208,31 @@ for table in hlir.tables:
#} #endif
#} }
-hack_i = 0
-for smem in unique_everseen([smem for table, smem in hlir.all_counters]):
+hack_i={}
+for table, smem in hlir.all_counters:
for target in smem.smem_for:
if not smem.smem_for[target]:
continue
- hack_i += 1
- if hack_i%2==1:
for c in smem.components:
cname = c['name']
+ if cname not in hack_i:
+ hack_i[cname] = 1
if smem.smem_type not in ["register", "direct_counter", "direct_meter"]:
#[ uint32_t ctrl_${cname}[${smem.amount}];
#{ uint32_t* read_counter_value_by_name(char* counter_name, int* size, bool is_bytes){
#[ int i;
-hack_i = 0
-for smem in unique_everseen([smem for table, smem in hlir.all_counters]):
+hack_i = {}
+for table, smem in hlir.all_counters:
for target in smem.smem_for:
if not smem.smem_for[target]:
continue
- hack_i += 1
- if hack_i%2==0:
- continue
for c in smem.components:
cname = c['name']
+ if cname in hack_i:
+ continue
+ hack_i[cname] = 1
pre_bytes = ''
if c['for'] == "packets":
pre_bytes = '!'
|
updates to correlation benchmarking file | @@ -82,8 +82,7 @@ def set_up(request):
th, xi = np.loadtxt(fname, unpack=True)
return th, xi
- # DL: need to add a bunch of benchmarks here but also make sure the
- # names are correct.
+ # DL: need to make sure the names are correct.
pre = dirdat + 'run_'
post = nztyp + "_log_wt_"
bms = {}
@@ -103,7 +102,7 @@ def set_up(request):
bms['theta'] = theta
# Read error bars
- # DL: what are we using for errors?? Not sure at all. Ask on telecon.
+ # DL: need to make sure these match what we are using.
ers = {}
d = np.loadtxt("benchmarks/data/sigma_clustering_Nbin5",
unpack=True)
@@ -151,6 +150,8 @@ def set_up(request):
bounds_error=False)(theta)
return cosmo, trc, bms, ers, fl
+# DL probably need to change this back to less bins to match where
+# we have error calculations.
@pytest.mark.parametrize("t1,t2,bm,er,kind,pref",
[('g1', 'g1', 'dd_11', 'dd_11', 'gg', 1),
('g2', 'g2', 'dd_22', 'dd_22', 'gg', 1),
|
Remove assertion => fix debug spec tests | @@ -236,7 +236,7 @@ M3Result EvaluateExpression (IM3Module i_module, void * o_expressed, u8 i_type
M3Result InitMemory (IM3Runtime io_runtime, IM3Module i_module)
{
- M3Result result = c_m3Err_none; d_m3Assert (not io_runtime->memory.wasmPages);
+ M3Result result = c_m3Err_none; //d_m3Assert (not io_runtime->memory.wasmPages);
if (not i_module->memoryImported)
{
|
don't do payment checks early on in long chains as the mn list is stale
payment for blocks in the past is confirmed by network anyway | @@ -2478,7 +2478,7 @@ bool CBlock::ConnectBlock(CTxDB& txdb, CBlockIndex* pindex, bool fJustCheck)
}
}
- if(pindex->GetBlockTime() > GetTime() - 30*nCoinbaseMaturity && !IsInitialBlockDownload() && FortunastakePayments == true)
+ if(pindex->GetBlockTime() > GetTime() - 30*nCoinbaseMaturity && (pindex->nHeight < pindexBest->nHeight+20) && !IsInitialBlockDownload() && FortunastakePayments == true)
{
LOCK2(cs_main, mempool.cs);
|
ev3dev/modules/nxtdevices: add vernier adapter | """Classes for LEGO MINDSTORMS NXT Devices."""
+
from nxtdevices_c import *
+from pybricks.iodevices import AnalogSensor
+
+
+class VernierAdapter(AnalogSensor):
+
+ def __init__(self, port, conversion=None):
+ # create AnalogSensor object
+ super().__init__(port, False)
+
+ # Store conversion function if given
+ if conversion is not None:
+ self.conversion = conversion
+
+ # Verify that conversion is valid
+ try:
+ for v in (0, 10, 1000, 3000, 5000):
+ _ = self.conversion(v)
+ except Exception as e:
+ print(
+ """\nThere is an error in the conversion function. """
+ """Make sure it has the following form:\n"""
+ """\ndef my_conversion_function(voltage):"""
+ """\n value = voltage * 3 """
+ """# This is just an example. Use your own formula here."""
+ """\n return value\n"""
+ """\nThe technical description of the error is as follows:\n"""
+ )
+ raise e
+
+ def conversion(self, voltage):
+ return voltage
+
+ def value(self):
+ return self.conversion(self.voltage())
|
grib_get_data: iterator consumes vast amounts of memory when Ni is missing | @@ -163,8 +163,18 @@ static int init(grib_iterator* i, grib_handle* h, grib_arguments* args)
return ret;
if ((ret = grib_get_long_internal(h, s_Ni, &Ni)))
return ret;
+ if (grib_is_missing(h, s_Ni, &ret) && ret == GRIB_SUCCESS) {
+ grib_context_log(h->context, GRIB_LOG_ERROR, "Key %s cannot be missing for a regular grid!", s_Ni);
+ return GRIB_WRONG_GRID;
+ }
+
if ((ret = grib_get_long_internal(h, s_Nj, &Nj)))
return ret;
+ if (grib_is_missing(h, s_Nj, &ret) && ret == GRIB_SUCCESS) {
+ grib_context_log(h->context, GRIB_LOG_ERROR, "Key %s cannot be missing for a regular grid!", s_Nj);
+ return GRIB_WRONG_GRID;
+ }
+
if ((ret = grib_get_long_internal(h, s_iScansNeg, &self->iScansNegatively)))
return ret;
|
Check Bashisms: Use `egrep` regex for GNU find
After this update the `find` command used in the script should work the
same on Linux and macOS.
This commit closes | @@ -8,12 +8,13 @@ command -v checkbashisms >/dev/null 2>&1 || { echo "checkbashisms command needed
cd "@CMAKE_SOURCE_DIR@"
-find -version > /dev/null 2>&1 > /dev/null && FIND=find || FIND='find -E'
+# Use (non-emacs) extended regex for GNU find or BSD find
+find -version > /dev/null 2>&1 > /dev/null && FIND='find scripts -regextype egrep' || FIND='find -E scripts'
# this way we also check subdirectories
# The script `check-env-dep` uses process substitution which is **not** a standard `sh` feature!
# See also: https://unix.stackexchange.com/questions/151925
-scripts=$($FIND scripts/ -type f -not -regex \
+scripts=$($FIND -type f -not -regex \
'.+(check-env-dep|gitignore|kdb_zsh_completion|run_dev_env|sed|(Docker|Jenkins|Vagrant)file.*|\.(cmake|fish|in|md|txt))$' | \
xargs)
checkbashisms $scripts
|
Fix test_ns_utf16_doctype() to work in builds | @@ -7811,7 +7811,11 @@ START_TEST(test_ns_utf16_doctype)
"\0x\0m\0l\0n\0s\0:\0f\0o\0o\0=\0'\0U\0R\0I\0'\0>"
"\0&\0b\0a\0r\0;"
"\0<\0/\0f\0o\0o\0:\x0e\x04\0>";
- const XML_Char *expected = "URI \xe0\xb8\x84";
+#ifdef XML_UNICODE
+ const XML_Char *expected = XCS("URI \x0e04");
+#else
+ const XML_Char *expected = XCS("URI \xe0\xb8\x84");
+#endif
CharData storage;
CharData_Init(&storage);
|
options/ansi: Implement scanf | @@ -684,9 +684,12 @@ static int do_scanf(H &handler, const char *fmt, __gnuc_va_list args) {
return match_count;
}
-int scanf(const char *__restrict, ...) {
- __ensure(!"Not implemented");
- __builtin_unreachable();
+int scanf(const char *__restrict format, ...) {
+ va_list args;
+ va_start(args, format);
+ int result = vfscanf(stdin, format, args);
+ va_end(args);
+ return result;
}
int snprintf(char *__restrict buffer, size_t max_size, const char *__restrict format, ...) {
|
Fix ordering problem with +intercepted-scry.
This fixes generators which scry into %a. | ::
~| hoon-version=hoon-version
?> ?=(?(%143 %151) hoon-version)
+ :: if the actual scry produces a value, use that value; otherwise use local
+ ::
+ =/ scry-response (scry +<.$)
+ ::
+ ?^ scry-response
+ scry-response
::
=/ vane=(unit ?(%c %g)) ((soft ?(%c %g)) (end 3 1 term))
?~ vane
p.r.beam
::
=/ =build [date %scry resource]
- :: if the actual scry produces a value, use that value; otherwise use local
- ::
- =/ scry-response (scry +<.$)
- ::
- ?^ scry-response
- scry-response
:: look up the scry result from our permanent state
::
:: Note: we can't freshen this cache entry because we can't modify
|
admin/ohpc-filesystem: include url for this package | @@ -15,6 +15,7 @@ Summary: Common top-level OpenHPC directories
Group: ohpc/admin
License: ASL 2.0
+URL: https://github.com/openhpc/ohpc
Source0: OHPC_setup_compiler
Source1: OHPC_setup_mpi
Source2: ohpc-find-requires
|
status: print mmap_errors with the %zu specifier | @@ -98,13 +98,13 @@ static h2o_iovec_t events_status_final(void *priv, h2o_globalconf_t *gconf, h2o_
" \"http2.read-closed\": %" PRIu64 ", \n"
" \"http2.write-closed\": %" PRIu64 ", \n"
" \"ssl.errors\": %" PRIu64 ", \n"
- " \"memory.mmap_errors\": %" PRIu64 "\n",
+ " \"memory.mmap_errors\": %zu\n",
H1_AGG_ERR(400), H1_AGG_ERR(403), H1_AGG_ERR(404), H1_AGG_ERR(405), H1_AGG_ERR(416), H1_AGG_ERR(417),
H1_AGG_ERR(500), H1_AGG_ERR(502), H1_AGG_ERR(503), H2_AGG_ERR(PROTOCOL), H2_AGG_ERR(INTERNAL),
H2_AGG_ERR(FLOW_CONTROL), H2_AGG_ERR(SETTINGS_TIMEOUT), H2_AGG_ERR(STREAM_CLOSED), H2_AGG_ERR(FRAME_SIZE),
H2_AGG_ERR(REFUSED_STREAM), H2_AGG_ERR(CANCEL), H2_AGG_ERR(COMPRESSION), H2_AGG_ERR(CONNECT),
H2_AGG_ERR(ENHANCE_YOUR_CALM), H2_AGG_ERR(INADEQUATE_SECURITY), esc->h2_read_closed, esc->h2_write_closed,
- esc->ssl_errors, (uint64_t)mmap_errors);
+ esc->ssl_errors, mmap_errors);
pthread_mutex_destroy(&esc->mutex);
free(esc);
return ret;
|
index.md: update the Espressif references to reflect readme.md | @@ -16,7 +16,7 @@ Currently MCUboot works with the following operating systems and SoCs:
- [Apache NuttX](https://nuttx.apache.org/)
- [RIOT](https://www.riot-os.org/)
- [Mbed OS](https://os.mbed.com/)
-- [Espressif IDF](https://idf.espressif.com/)
+- [Espressif](https://www.espressif.com/)
- [Cypress/Infineon](https://www.cypress.com/)
RIOT is supported only as a boot target. We will accept any new port
@@ -44,7 +44,7 @@ The MCUboot documentation is composed of the following pages:
- [Apache NuttX](readme-nuttx.md)
- [RIOT](readme-riot.md)
- [Mbed OS](readme-mbed.md)
- - [Espressif IDF](readme-espressif.md)
+ - [Espressif](docs/readme-espressif.md)
- [Cypress/Infineon](../boot/cypress/readme.md)
- [Simulator](../sim/README.rst)
- Testing
|
config tool: update cpu affinity check algorithm
pcpu in pre-launched RTVM cannot share with other VM | </xs:annotation>
</xs:assert>
- <xs:assert test="every $pcpu in /acrn-config/vm[load_order = 'PRE_LAUNCHED_VM']//cpu_affinity/pcpu_id satisfies
+ <xs:assert test="every $pcpu in /acrn-config/vm[load_order = 'PRE_LAUNCHED_VM' and vm_type = 'RTVM']//cpu_affinity/pcpu_id satisfies
count(/acrn-config/vm[@id != $pcpu/ancestor::vm//companion_vmid ]//cpu_affinity[pcpu_id = $pcpu]) <= 1">
<xs:annotation acrn:severity="error" acrn:report-on="//vm//cpu_affinity[pcpu_id = $pcpu]">
- <xs:documentation>Physical CPU {$pcpu} is assigned to pre-launched VM "{$pcpu/ancestor::vm/name}" and thus cannot be shared among multiple VMs. Look for, and probably remove any affinity assignments to {$pcpu} in these VM's settings: {//vm[cpu_affinity/pcpu_id = $pcpu]/name}.</xs:documentation>
+ <xs:documentation>Physical CPU {$pcpu} is assigned to pre-launched real-time VM (RTVM) "{$pcpu/ancestor::vm/name}" and thus cannot be shared among multiple VMs. Look for, and probably remove, any affinity assignments to {$pcpu} in this VM's settings: {//vm[cpu_affinity/pcpu_id = $pcpu]/name}.</xs:documentation>
</xs:annotation>
</xs:assert>
|
build: bump to v1.7.0 | @@ -3,7 +3,7 @@ project(fluent-bit)
# Fluent Bit Version
set(FLB_VERSION_MAJOR 1)
-set(FLB_VERSION_MINOR 6)
+set(FLB_VERSION_MINOR 7)
set(FLB_VERSION_PATCH 0)
set(FLB_VERSION_STR "${FLB_VERSION_MAJOR}.${FLB_VERSION_MINOR}.${FLB_VERSION_PATCH}")
|
Long word breaking algorithm | @@ -175,6 +175,28 @@ uint16_t lv_txt_get_next_line(const char * txt, const lv_font_t * font,
/*If the txt is too long then finish, this is the line end*/
if(cur_w > max_width) {
+ /* Continue searching for next breakable character to see if the next word will fit */
+ uint32_t i_tmp = i;
+ cur_w = 0;
+ while(txt[i_tmp] != 0) {
+ letter = lv_txt_encoded_next(txt, &i_tmp);
+ /*Check for new line chars*/
+ if(letter == '\n' || letter == '\r') {
+ uint32_t i_tmp2 = i;
+ uint32_t letter_next = lv_txt_encoded_next(txt, &i_tmp2);
+ if(letter == '\r' && letter_next == '\n') i = i_tmp2;
+ break;
+ }
+ else if (is_break_char(letter)) {
+ break;
+ }
+ lv_coord_t letter_width = lv_font_get_width(font, letter);
+ cur_w += letter_width;
+ if(cur_w > max_width) {
+ return i;
+ }
+ }
+
/*If this a break char then break here.*/
if(is_break_char(letter)) {
/* Now 'i' points to the next char because of txt_utf8_next()
|
Added single-line duplicate values check for ID
According to VCF 4.3 specifications, ID cannot have duplicate values, so added a new check for duplicate values. If any duplicated IDs are found in a single line (semicolon separated list), then an error is thrown and file is declared as invalid. | @@ -119,6 +119,14 @@ namespace ebi
throw new IdBodyError{line, "ID must not contain semicolons or whitespaces"};
}
}
+
+ std::map<std::string, int> counter;
+ for (auto id = ids.begin(); id != ids.end(); ++id) {
+ counter[*id]++;
+ if (counter[*id] >= 2) {
+ throw new IdBodyError{line, "ID must not have duplicate values"};
+ }
+ }
}
void Record::check_alternate_alleles() const
|
Add 6503 error for plugin error | @@ -521,6 +521,7 @@ The following standard Status Words are returned for all APDUs - some specific S
| *SW* | *Description*
| 6501 | TransactionType not supported
| 6502 | Output buffer too small for snprintf input
+| 6503 | Plugin error
| 6700 | Incorrect length
| 6982 | Security status not satisfied (Canceled by user)
| 6A80 | Invalid data
|
proc/process.c: initialize process' zombies | @@ -103,6 +103,7 @@ int proc_start(void (*initthr)(void *), void *arg, const char *path)
process->ports = NULL;*/
process->ports = NULL;
+ process->zombies = NULL;
process->sigpend = 0;
process->sigmask = 0;
@@ -322,6 +323,7 @@ int proc_vfork(void)
process->sigpend = 0;
process->sigmask = 0;
process->sighandler = NULL;
+ process->zombies = NULL;
// vm_mapCreate(&process->map, (void *)VADDR_MIN, process_common.kmap->start);
// vm_mapCopy(&parent->mapp, process->mapp);
|
Avoid error messages when subscribing before starting facil.io | @@ -579,24 +579,31 @@ Cluster Engine
/* Must subscribe channel. Failures are ignored. */
void pubsub_en_cluster_subscribe(const pubsub_engine_s *eng, FIOBJ channel,
uint8_t use_pattern) {
+ if (facil_is_running()) {
facil_cluster_send((use_pattern ? PUBSUB_FACIL_CLUSTER_PATTERN_SUB_FILTER
: PUBSUB_FACIL_CLUSTER_CHANNEL_SUB_FILTER),
channel, FIOBJ_INVALID);
+ }
(void)eng;
}
/* Must unsubscribe channel. Failures are ignored. */
void pubsub_en_cluster_unsubscribe(const pubsub_engine_s *eng, FIOBJ channel,
uint8_t use_pattern) {
- facil_cluster_send((use_pattern ? PUBSUB_FACIL_CLUSTER_PATTERN_UNSUB_FILTER
+ if (facil_is_running()) {
+ facil_cluster_send((use_pattern
+ ? PUBSUB_FACIL_CLUSTER_PATTERN_UNSUB_FILTER
: PUBSUB_FACIL_CLUSTER_CHANNEL_UNSUB_FILTER),
channel, FIOBJ_INVALID);
+ }
(void)eng;
}
/** Should return 0 on success and -1 on failure. */
int pubsub_en_cluster_publish(const pubsub_engine_s *eng, FIOBJ channel,
FIOBJ msg) {
+ if (facil_is_running()) {
facil_cluster_send(PUBSUB_FACIL_CLUSTER_CHANNEL_FILTER, channel, msg);
+ }
return PUBSUB_PROCESS_ENGINE->publish(PUBSUB_PROCESS_ENGINE, channel, msg);
(void)eng;
}
|
Solved minor bugs of the previous commit in python loader. | @@ -1998,7 +1998,7 @@ int py_loader_impl_load_from_file_relative(loader_impl_py py_impl, loader_impl_p
else
{
/* Stop loading if we found an exception like SyntaxError, continue if the file is not found */
- if (!py_loader_impl_import_exception(*exception))
+ if (*exception != NULL && !py_loader_impl_import_exception(*exception))
{
return 1;
}
@@ -2050,14 +2050,10 @@ loader_handle py_loader_impl_load_from_file(loader_impl impl, const loader_namin
}
/* Stop loading if we found an exception like SyntaxError, continue if the file is not found */
- if (!py_loader_impl_import_exception(exception))
+ if (exception != NULL && !py_loader_impl_import_exception(exception))
{
goto error_import_module;
}
- else
- {
- exception = NULL;
- }
if (PyErr_Occurred() != NULL)
{
@@ -2068,10 +2064,15 @@ loader_handle py_loader_impl_load_from_file(loader_impl impl, const loader_namin
if (result != 0 && py_loader_impl_load_from_module(py_impl, &py_handle->modules[iterator], paths[iterator], &exception) != 0)
{
/* Show error message if the module was not found */
- if (py_loader_impl_import_exception(exception))
+ if (exception != NULL && py_loader_impl_import_exception(exception))
{
log_write("metacall", LOG_LEVEL_ERROR, "Python Error: Module '%s' not found", paths[iterator]);
}
+ else
+ {
+ /* TODO: Print the error message of the exception */
+ log_write("metacall", LOG_LEVEL_ERROR, "Python Error: Exception raised while loading the module '%s' [%s]", paths[iterator], Py_TYPE(exception)->tp_name);
+ }
goto error_import_module;
}
|
[io] fix small bug in default simulation class and explode version | @@ -3083,7 +3083,7 @@ class MechanicsHdf5Runner(siconos.io.mechanics_hdf5.MechanicsHdf5):
osnspb._stepcounter = self._k
if self._run_options.get('explode_Newton_solve'):
- if(self._time_stepping_class == TimeStepping):
+ if(self._time_stepping == sk.TimeStepping):
self.log(self.explode_Newton_solve, with_timer,
before=False)(with_timer)
else:
|
Documentation for Ubuntu Bionic Packages. | @@ -23,6 +23,16 @@ For the following Linux distributions and package managers 0.8 packages are avai
For [OpenSUSE, CentOS, Fedora, RHEL and SLE](https://build.opensuse.org/package/show/home:bekun:devel/elektra)
Kai-Uwe Behrmann kindly provides packages [for download](http://software.opensuse.org/download.html?project=home%3Abekun%3Adevel&package=libelektra4).
+### Ubuntu-Bionic
+
+To use the Ubuntu-Bionic packages following steps need to be made:
+
+1. Run `sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys D919CE8B27A64C16656FCA9FF1532673651F9C6C` to obtain the key.
+
+2. Add `deb [trusted=yes] https://ubuntu-bionic-repo.libelektra.org/ bionic main` into `/etc/apt/sources.list`
+
+3. `sudo apt-get update`
+
### Debian
To use the debian repository of the latest builds from master put following lines in
|
Fixed json file for 1.4.6 released. | },
"url": "https://github.com/ROBOTIS-GIT/OpenCR/releases/download/1.4.6/opencr_core_1.4.6.tar.bz2",
"archiveFileName": "opencr_core_1.4.6.tar.bz2",
- "checksum": "SHA-256:59F903350D0767BDB4C8575C43CB3EF5EB80D425B0FFE7210A0E4BF9138379A0",
- "size": "1961278",
+ "checksum": "SHA-256:B7E696AC7673925AD2C5CC82E43CEA87F0039A80CCF455A5F09058D3D1A35379",
+ "size": "1960242",
"help": {
"online": "http://emanual.robotis.com/docs/en/parts/controller/opencr10/"
},
|
build: add more src dirs for generate_json.py
Because file vpe.api is in src/vpp/api/
and memclnt.api is in src/vlibmemory/.
Also removed api_types, as iteration can be done over output_dir_map.
Type: fix
Fixes:
Ticket: | @@ -21,20 +21,23 @@ BASE_DIR = subprocess.check_output('git rev-parse --show-toplevel',
vppapigen_bin = pathlib.Path(
'%s/src/tools/vppapigen/vppapigen.py' % BASE_DIR).as_posix()
-api_types = ['vnet', 'plugins']
src_dir_depth = 3
output_path = pathlib.Path(
'%s/build-root/install-vpp-native/vpp/share/vpp/api/' % BASE_DIR)
output_path_debug = pathlib.Path(
'%s/build-root/install-vpp_debug-native/vpp/share/vpp/api/' % BASE_DIR)
-output_dir_map = {'vnet': 'core',
- 'plugins': 'plugins'}
+output_dir_map = {
+ 'plugins': 'plugins',
+ 'vlibmemory': 'core',
+ 'vnet': 'core',
+ 'vpp': 'core',
+}
def api_search_globs(src_dir):
globs = []
- for g in api_types:
+ for g in output_dir_map:
globs.extend(list(src_dir.glob('%s/**/*.api' % g)))
return globs
|
BugID:16944925:support pca10040e kv;dir mcu | @@ -48,9 +48,9 @@ OUTPUT_FORMAT ("elf32-littlearm", "elf32-bigarm", "elf32-littlearm")
MEMORY
{
- FLASH (rx) : ORIGIN = 0x00000000, LENGTH = 0x30000
- /* 0x7D000 ~ 0x7F000 is used for KV module temporary */
- BOOTLOADER_SETTINGS (rw) : ORIGIN = 0x0007F000, LENGTH = 0x1000
+ FLASH (rx) : ORIGIN = 0x00000000, LENGTH = 0x2E000
+ /* 0x2E000 ~ 0x2F000 is used for KV module temporary */
+ BOOTLOADER_SETTINGS (rw) : ORIGIN = 0x0002F000, LENGTH = 0x1000
RAM (rwx) : ORIGIN = 0x20000000, LENGTH = 0x6000
}
ENTRY(Reset_Handler)
|
block comment works | @@ -404,24 +404,59 @@ static bool isWord(char symbol) {return isLetter(symbol) || isNumber(symbol);}
#include <ctype.h>
-static inline bool isLineEnd(char c) {return c == '\n' || c == '\0';}
+static inline bool is_lineend(char c) {return c == '\n' || c == '\0';}
static void parse(const char* start, u8* color)
{
const char* ptr = start;
- const char* digitStart = NULL;
+ // const char* digitStart = NULL;
+ const char* blockCommentStart = NULL;
const char* singleCommentStart = NULL;
- static const char Comment[] = "--";
+ static const char Comment = '-';
+ static const char BlockCommentStart[] = "--[[";
+ static const char BlockCommentEnd[] = "]]";
+
+ enum{BlockCommentStartSize = sizeof BlockCommentStart - 1};
while(true)
{
char c = *ptr;
+ if(blockCommentStart)
+ {
+ const char* end = strstr(ptr, BlockCommentEnd);
+
+ if(end)
+ {
+ ptr = end = end + strlen(BlockCommentEnd);
+ }
+ else
+ {
+ ptr = end = blockCommentStart + strlen(blockCommentStart);
+ }
+
+ memset(color + (blockCommentStart - start), getConfig()->theme.code.comment, end - blockCommentStart);
+ blockCommentStart = NULL;
+ }
+ else
+ {
+ if(c == BlockCommentStart[0] && memcmp(ptr, BlockCommentStart, BlockCommentStartSize) == 0)
+ {
+ blockCommentStart = ptr;
+ ptr += BlockCommentStartSize;
+ continue;
+ }
+ else
+ {
+ // do other stuff
+ }
+ }
+
if(singleCommentStart)
{
- if(isLineEnd(c))
+ if(is_lineend(c))
{
memset(color + (singleCommentStart - start), getConfig()->theme.code.comment, ptr - singleCommentStart);
singleCommentStart = NULL;
@@ -429,7 +464,7 @@ static void parse(const char* start, u8* color)
}
else
{
- if(c == Comment[1] && ptr > start && *(ptr-1) == Comment[0])
+ if(c == Comment && ptr > start && *(ptr-1) == Comment)
{
singleCommentStart = ptr-1;
}
|
Now file will be correctly deleted after exit. | @@ -45,13 +45,14 @@ int xdag_free_all(void)
#include <errno.h>
#define MEM_PORTION ((size_t)1 << 25)
+#define TMPFILE_TEMPLATE "xdag-tmp-XXXXXX"
static int g_fd = -1;
static size_t g_pos = 0, g_fsize = 0, g_size = 0;
static void *g_mem;
static pthread_mutex_t g_mem_mutex = PTHREAD_MUTEX_INITIALIZER;
static char g_tmpfile_path[1024] = "";
-static char g_tmpname[64] = "xdag-tmp-XXXXXX";
+static char g_tmpfile_to_remove[1088];
void xdag_mem_tempfile_path(const char *tempfile_path)
{
@@ -60,8 +61,6 @@ void xdag_mem_tempfile_path(const char *tempfile_path)
int xdag_mem_init(size_t size)
{
- char tmpfilename[1024];
-
if (!size) {
return 0;
}
@@ -74,13 +73,13 @@ int xdag_mem_init(size_t size)
size |= MEM_PORTION - 1;
size++;
- printf("%s , %s\n",g_tmpfile_path, g_tmpname);
- sprintf(tmpfilename, "%s%s", g_tmpfile_path, g_tmpname);
- g_fd = mkstemp(tmpfilename);
+ sprintf(g_tmpfile_to_remove, "%s%s", g_tmpfile_path, TMPFILE_TEMPLATE);
+ g_fd = mkstemp(g_tmpfile_to_remove);
if (g_fd < 0) {
- xdag_fatal("Unable to create temporary file %s errno:%d", tmpfilename, errno);
+ xdag_fatal("Unable to create temporary file %s errno:%d", g_tmpfile_to_remove, errno);
return -1;
}
+ printf("Temporary file created: %s\n", g_tmpfile_to_remove);
g_mem = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, g_fd, 0);
if (g_mem == MAP_FAILED) {
@@ -139,7 +138,7 @@ void xdag_mem_finish(void)
munmap(g_mem, g_size);
close(g_fd);
- remove(g_tmpname);
+ remove(g_tmpfile_to_remove);
}
int xdag_free_all(void)
|
Add testcases for EVP_PKEY_get1_encoded_public_key | @@ -897,6 +897,7 @@ static int test_EC_priv_pub(void)
EVP_PKEY *params_and_keypair = NULL;
BIGNUM *priv = NULL;
int ret = 0;
+ unsigned char *encoded = NULL;
/*
* Setup the parameters for our pkey object. For our purposes they don't
@@ -1005,6 +1006,17 @@ static int test_EC_priv_pub(void)
|| !TEST_int_gt(EVP_PKEY_eq(params_and_keypair, params_and_priv), 0))
goto err;
+ /* Positive and negative testcase for EVP_PKEY_get1_encoded_public_key */
+ if (!TEST_int_gt(EVP_PKEY_get1_encoded_public_key(params_and_pub, &encoded), 0))
+ goto err;
+ OPENSSL_free(encoded);
+ encoded = NULL;
+ if (!TEST_int_eq(EVP_PKEY_get1_encoded_public_key(just_params, &encoded), 0)) {
+ OPENSSL_free(encoded);
+ encoded = NULL;
+ goto err;
+ }
+
ret = 1;
err:
OSSL_PARAM_free(params);
|
[ci] Fix Halide's library install directory for GitLab | @@ -189,10 +189,7 @@ apps-halide:
- git submodule update --init --recursive -- hardware/deps/*
- make patch-hw
- cd hardware
- - |
- for APP in ${APPS}; do
- app=${APP} make simc
- done
+ - app=halide-2d_convolution make simc
variables:
COMPILER: "llvm"
APP: "halide-2d_convolution"
|
windows/msvc: Use same default python command as core. | <PyQstrDefs>$(PySrcDir)qstrdefs.h</PyQstrDefs>
<QstrDefsCollected>$(DestDir)qstrdefscollected.h</QstrDefsCollected>
<QstrGen>$(DestDir)qstrdefs.generated.h</QstrGen>
+ <PyPython Condition="'$(PyPython)' == ''">$(MICROPY_CPYTHON3)</PyPython>
<PyPython Condition="'$(PyPython)' == ''">python</PyPython>
<CLToolExe Condition="'$(CLToolExe)' == ''">cl.exe</CLToolExe>
<PyClTool>$([System.IO.Path]::Combine(`$(CLToolPath)`, `$(CLToolExe)`))</PyClTool>
|
[mod_magnet] ignore 1xx return in response start
ignore 1xx return code from lua in response start phase.
Since response is about to start, send any added/modified headers
along with final response. (If we did not ignore, then 1xx return
code from lua would incorrectly overwrite the final response status.) | @@ -2385,7 +2385,8 @@ static handler_t magnet_attract(request_st * const r, plugin_data * const p, scr
}
result = HANDLER_FINISHED;
- } else if (lua_return_value >= 100) {
+ } else if (lua_return_value >= 100 && p->conf.stage != -1) {
+ /*(skip for response-start; send response as-is w/ added headers)*/
/*(custom lua code should not return 101 Switching Protocols)*/
r->http_status = lua_return_value;
result = http_response_send_1xx(r)
|
Add --clean and --clean-only options to test.pl. | @@ -74,6 +74,8 @@ test.pl [options]
--run execute only the specified test run
--dry-run show only the tests that would be executed but don't execute them
--no-cleanup don't cleanup after the last test is complete - useful for debugging
+ --clean clean working and result paths for a completely fresh build
+ --clean-only execute --clean and exit
--pg-version version of postgres to test (all, defaults to minimal)
--log-force force overwrite of current test log files
--build-only build the binary (and honor --build-package) but don't run tests
@@ -126,6 +128,8 @@ test.pl [options]
####################################################################################################################################
# Command line parameters
####################################################################################################################################
+my $bClean;
+my $bCleanOnly;
my $strLogLevel = lc(INFO);
my $strLogLevelTest = lc(OFF);
my $strLogLevelTestFile = lc(TRACE);
@@ -179,6 +183,8 @@ my @cmdOptions = @ARGV;
GetOptions ('q|quiet' => \$bQuiet,
'version' => \$bVersion,
'help' => \$bHelp,
+ 'clean' => \$bClean,
+ 'clean-only' => \$bCleanOnly,
'pgsql-bin=s' => \$strPgSqlBin,
'test-path=s' => \$strTestPath,
'log-level=s' => \$strLogLevel,
@@ -368,6 +374,27 @@ eval
my $oStorageBackRest = new pgBackRestTest::Common::Storage(
$strBackRestBase, new pgBackRestTest::Common::StoragePosix({bFileSync => false, bPathSync => false}));
+ ################################################################################################################################
+ # Clean working and result paths
+ ################################################################################################################################
+ if ($bClean || $bCleanOnly)
+ {
+ &log(INFO, "clean working (${strTestPath}) and result (${strBackRestBase}/test/result) paths");
+
+ if ($oStorageTest->pathExists($strTestPath))
+ {
+ executeTest("find ${strTestPath} -mindepth 1 -print0 | xargs -0 rm -rf");
+ }
+
+ if ($oStorageTest->pathExists("${strBackRestBase}/test/result"))
+ {
+ executeTest("find ${strBackRestBase}/test/result -mindepth 1 -print0 | xargs -0 rm -rf");
+ }
+
+ # Exit when clean-only
+ exit 0 if $bCleanOnly;
+ }
+
################################################################################################################################
# Build Docker containers
################################################################################################################################
|
sysrepo DOC wrong argument name | @@ -1124,7 +1124,7 @@ typedef enum sr_ev_notif_type_e {
* @param[in] private_data Private context opaque to sysrepo,
* as passed to ::sr_event_notif_subscribe call.
*/
-typedef void (*sr_event_notif_cb)(sr_session_ctx_t *session, const sr_ev_notif_type_t notif_type, const char *xpath,
+typedef void (*sr_event_notif_cb)(sr_session_ctx_t *session, const sr_ev_notif_type_t notif_type, const char *path,
const sr_val_t *values, const size_t values_cnt, time_t timestamp, void *private_data);
/**
|
Guybrush: Correct zephyr power interrupts
Correct the S5 PGOOD interrupt to go to the baseboard function to
process it.
BRANCH=None
TEST=builds, continues to "boot" to S0 | @@ -49,7 +49,7 @@ enum power_signal {
power_signal_interrupt) \
GPIO_INT(GPIO_PCH_SLP_S0_L, GPIO_INT_EDGE_BOTH, \
power_signal_interrupt) \
- GPIO_INT(GPIO_S5_PGOOD, GPIO_INT_EDGE_BOTH, extpower_interrupt) \
+ GPIO_INT(GPIO_S5_PGOOD, GPIO_INT_EDGE_BOTH, baseboard_en_pwr_s0) \
GPIO_INT(GPIO_S0_PGOOD, GPIO_INT_EDGE_BOTH, power_signal_interrupt) \
GPIO_INT(GPIO_EC_PCORE_INT_ODL, GPIO_INT_EDGE_BOTH, \
power_signal_interrupt) \
|
bn: fix BN_DEBUG + BN_DEBUG_RAND support
Couple of updates to make this code work properly again;
* use OPENSSL_assert() instead of assert() (and #include <assert.h>)
* the circular-dependency-avoidance uses RAND_bytes() (not pseudo) | @@ -146,13 +146,10 @@ extern "C" {
# ifdef BN_DEBUG
-/* We only need assert() when debugging */
-# include <assert.h>
-
# ifdef BN_DEBUG_RAND
/* To avoid "make update" cvs wars due to BN_DEBUG, use some tricks */
-# ifndef RAND_pseudo_bytes
-int RAND_pseudo_bytes(unsigned char *buf, int num);
+# ifndef RAND_bytes
+int RAND_bytes(unsigned char *buf, int num);
# define BN_DEBUG_TRIX
# endif
# define bn_pollute(a) \
@@ -171,7 +168,7 @@ int RAND_pseudo_bytes(unsigned char *buf, int num);
} \
} while(0)
# ifdef BN_DEBUG_TRIX
-# undef RAND_pseudo_bytes
+# undef RAND_bytes
# endif
# else
# define bn_pollute(a)
@@ -180,7 +177,7 @@ int RAND_pseudo_bytes(unsigned char *buf, int num);
do { \
const BIGNUM *_bnum2 = (a); \
if (_bnum2 != NULL) { \
- assert((_bnum2->top == 0) || \
+ OPENSSL_assert((_bnum2->top == 0) || \
(_bnum2->d[_bnum2->top - 1] != 0)); \
bn_pollute(_bnum2); \
} \
@@ -192,7 +189,8 @@ int RAND_pseudo_bytes(unsigned char *buf, int num);
# define bn_wcheck_size(bn, words) \
do { \
const BIGNUM *_bnum2 = (bn); \
- assert((words) <= (_bnum2)->dmax && (words) >= (_bnum2)->top); \
+ OPENSSL_assert((words) <= (_bnum2)->dmax && \
+ (words) >= (_bnum2)->top); \
/* avoid unused variable warning with NDEBUG */ \
(void)(_bnum2); \
} while(0)
|
CAN flasher: add timeout to CanHandle on transmit
add timeout to canhandle | @@ -8,11 +8,17 @@ class CanHandle(object):
self.bus = bus
def transact(self, dat):
- self.p.isotp_send(1, dat, self.bus, recvaddr=2)
-
def _handle_timeout(signum, frame):
- # will happen on reset
- raise Exception("timeout")
+ # will happen on reset or can error
+ raise TimeoutError
+
+ signal.signal(signal.SIGALRM, _handle_timeout)
+ signal.alarm(1)
+
+ try:
+ self.p.isotp_send(1, dat, self.bus, recvaddr=2)
+ finally:
+ signal.alarm(0)
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(1)
|
forget to assign spd_index to config | @@ -78,6 +78,8 @@ ipsec_set_interface_spd (vlib_main_t * vm, u32 sw_if_index, u32 spd_id,
vnet_feature_enable_disable ("ip6-output", "ipsec-output-ip6", sw_if_index,
is_add, 0, 0);
+ config.spd_index = spd_index;
+
/* enable IPsec on RX */
vnet_feature_enable_disable ("ip4-unicast", "ipsec-input-ip4", sw_if_index,
is_add, &config, sizeof (config));
|
GA: Build shared/static lib with float/double. | -name: CMake
+name: CI
on: [push]
@@ -11,6 +11,8 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macOS-latest]
+ shared: [On, Off]
+ float: [On, Off]
steps:
- uses: actions/checkout@v2
@@ -24,7 +26,7 @@ jobs:
# Note the current convention is to use the -S and -B options here to specify source
# and build directories, but this is only available with CMake 3.13 and higher.
# The CMake binaries on the Github Actions machines are (as of this writing) 3.12
- run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE
+ run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DBUILD_SHARED_LIBS=${{ matrix.shared }} -DTINYSPLINE_FLOAT_PRECISION=${{ matrix.float }}
- name: Build
working-directory: ${{runner.workspace}}/build
|
kernel/binary_manager: Fix a bug about the length of binary file path
A path of binary file has a BINARY_PATH_LEN length so it should be fixed to BINARY_PATH_LEN. | @@ -69,7 +69,7 @@ static int binary_manager_clear_binfile(int bin_idx)
/* Remove binary file which is not running */
if (DIRENT_ISFILE(entryp->d_type) && !strncmp(entryp->d_name, bin_name, name_len) \
&& entryp->d_name[name_len] == '_' && strncmp(entryp->d_name, running_file, strlen(running_file))) {
- snprintf(filepath, NAME_MAX, "%s/%s", BINARY_DIR_PATH, entryp->d_name);
+ snprintf(filepath, BINARY_PATH_LEN, "%s/%s", BINARY_DIR_PATH, entryp->d_name);
bmvdbg("unlink %s\n", entryp->d_name);
unlink(filepath);
}
|
Fix memory leak when a menu is defined multiple times. | @@ -136,7 +136,7 @@ void SetRootMenu(const char *indexes, Menu *m)
unsigned y;
char found = 0;
for(y = 0; y < ROOT_MENU_COUNT; y++) {
- if(x != y && rootMenu[y] == rootMenu[x]) {
+ if(index != y && rootMenu[y] == rootMenu[index]) {
found = 1;
break;
}
|
Fix show_incremental_sort_info with force_parallel_mode
When executed with force_parallel_mode=regress, the function was exiting
too early and thus failed to print the worker stats. Fixed by making it
more like show_sort_info.
Discussion: | @@ -2880,9 +2880,11 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
fullsortGroupInfo = &incrsortstate->incsort_info.fullsortGroupInfo;
- if (!(es->analyze && fullsortGroupInfo->groupCount > 0))
+ if (!es->analyze)
return;
+ if (fullsortGroupInfo->groupCount > 0)
+ {
show_incremental_sort_group_info(fullsortGroupInfo, "Full-sort", true, es);
prefixsortGroupInfo = &incrsortstate->incsort_info.prefixsortGroupInfo;
if (prefixsortGroupInfo->groupCount > 0)
@@ -2893,6 +2895,7 @@ show_incremental_sort_info(IncrementalSortState *incrsortstate,
}
if (es->format == EXPLAIN_FORMAT_TEXT)
appendStringInfo(es->str, "\n");
+ }
if (incrsortstate->shared_info != NULL)
{
|
ut: fix touch sensor denoise ci fail | @@ -1412,13 +1412,13 @@ TEST_CASE("Touch Sensor denoise test (cap, level)", "[touch]")
TEST_ESP_OK( test_touch_denoise(val_2, NULL, TOUCH_PAD_DENOISE_BIT8, TOUCH_PAD_DENOISE_CAP_L0) );
TEST_ESP_OK( test_touch_denoise(val_3, NULL, TOUCH_PAD_DENOISE_BIT12, TOUCH_PAD_DENOISE_CAP_L0) );
- if ((denoise_val[0] & 0xFF) < (0xFF - 10) && (denoise_val[0] & 0xFF) > 10) {
+ /*`TOUCH_PAD_DENOISE_BIT4` has a small denoise value, which may be smaller than the noise amplitude of the touch reading, so no verification for it.*/
+ if ((((denoise_val[0] >> 4) & 0xF) != 0) && (((denoise_val[0] >> 8) & 0xF) != 0)) {
for (int i = 0; i < TEST_TOUCH_CHANNEL; i++) {
- TEST_ASSERT_GREATER_OR_EQUAL(val_3[i], val_2[i]);
- TEST_ASSERT_GREATER_OR_EQUAL(val_2[i], val_1[i]);
+ TEST_ASSERT_GREATER_THAN(val_3[i], val_2[i]);
}
} else {
- /* If the value of (denoise_val[0] & 0xFF) is approximately 0,
+ /* If the value of denoise is approximately 0,
The difference between touch reading is very small. Should skip value test. */
ESP_LOGI(TAG, "denoise value is %d", denoise_val[0]);
}
|
fix the sync bar logic | @@ -843,7 +843,7 @@ void BitcoinGUI::setNumBlocks(int count, int nTotalBlocks)
text = tr("%n day(s) ago","",secs/(60*60*24));
}
- if (IsInitialBlockDownload() || count < nTotalBlocks-30 || secs > 30*30) // if we're in initial download or more than 30 blocks behind
+ if (IsInitialBlockDownload() || count < nTotalBlocks-30) // if we're in initial download or more than 30 blocks behind
{
int nRemainingBlocks = nTotalBlocks - count;
float nPercentageDone = count / (nTotalBlocks * 0.01f);
|
Added TLS specific src string for LogStream integ. | @@ -2467,17 +2467,25 @@ doPayload()
net_info *net = &pinfo->net;
size_t hlen = 1024;
char pay[hlen];
- char *srcstr = NULL, rx[]="rx", tx[]="tx", none[]="none";
+ char *srcstr = NULL,
+ netrx[]="netrx", nettx[]="nettx", none[]="none",
+ tlsrx[]="tlsrx", tlstx[]="tlstx";
switch (pinfo->src) {
case NETTX:
+ srcstr = nettx;
+ break;
+
case TLSTX:
- srcstr = tx;
+ srcstr = tlstx;
break;
case NETRX:
+ srcstr = netrx;
+ break;
+
case TLSRX:
- srcstr = rx;
+ srcstr = tlsrx;
break;
default:
|
improve debugging message | @@ -6448,10 +6448,15 @@ static size_t fio_mem_block_count;
"(fio) Total memory blocks allocated before cleanup %zu\n" \
" Maximum memory blocks allocated at a single time %zu\n", \
fio_mem_block_count, fio_mem_block_count_max)
+#define FIO_MEMORY_PRINT_BLOCK_STAT_END() \
+ FIO_LOG_INFO("(fio) Total memory blocks allocated " \
+ "after cleanup (possible leak) %zu\n", \
+ fio_mem_block_count)
#else
#define FIO_MEMORY_ON_BLOCK_ALLOC()
#define FIO_MEMORY_ON_BLOCK_FREE()
#define FIO_MEMORY_PRINT_BLOCK_STAT()
+#define FIO_MEMORY_PRINT_BLOCK_STAT_END()
#endif
/* *****************************************************************************
Per-CPU Arena management
@@ -6722,7 +6727,7 @@ static void fio_mem_destroy(void) {
if (!memory.forked && fio_ls_embd_any(&memory.available)) {
FIO_LOG_WARNING("facil.io detected memory traces remaining after cleanup"
" - memory leak?");
- FIO_MEMORY_PRINT_BLOCK_STAT();
+ FIO_MEMORY_PRINT_BLOCK_STAT_END();
size_t count = 0;
FIO_LS_EMBD_FOR(&memory.available, node) { ++count; }
FIO_LOG_DEBUG("Memory pool size: %zu (%zu blocks per allocation).", count,
|
fix BeastChakraType | @@ -91,8 +91,8 @@ namespace FFXIVClientStructs.FFXIV.Client.Game.Gauge
{
None = 0,
Coeurl = 1,
- Raptor = 2,
- OpoOpo = 3,
+ OpoOpo = 2,
+ Raptor = 3,
}
[Flags]
|
cirrus: bump Fedora to version 36 | -FROM fedora:35
+FROM fedora:36
RUN dnf upgrade --refresh -y && dnf install -y \
augeas-devel \
@@ -16,7 +16,7 @@ RUN dnf upgrade --refresh -y && dnf install -y \
git \
glib2 \
gpgme-devel \
- java-11-openjdk-devel \
+ java-17-openjdk-devel \
jna \
libasan \
libcurl-devel \
|
fix typo in specification of VmaAlignDown | @@ -3341,7 +3341,7 @@ static inline T VmaAlignUp(T val, T alignment)
return (val + alignment - 1) & ~(alignment - 1);
}
-// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
+// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8.
// Use types like uint32_t, uint64_t as T.
template <typename T>
static inline T VmaAlignDown(T val, T alignment)
|
Fixed VRAM organization for Bitmap mode and used new mul/div functions | @@ -146,6 +146,9 @@ void BMP_end()
// try to pack memory free blocks (before to avoid memory fragmentation)
MEM_pack();
+ // restore 64x32 cells sized plane
+ VDP_setPlaneSize(64, 32, TRUE);
+
// we can re enable ints
// FIXME: for some reason disabling interrupts generally break BMP init :-/
// SYS_enableInts();
@@ -176,6 +179,11 @@ void BMP_reset()
// need 64x64 cells sized plane
VDP_setPlaneSize(64, 64, TRUE);
+ // we need to tweak VRAM setup as Bitmap mode may require double buffering in VRAM
+ VDP_setSpriteListAddress(0xBC00);
+ VDP_setHScrollTableAddress(0xB800);
+ // so window plan can be use only on first top half of the screen
+
// clear plane (complete tilemap)
VDP_clearPlane(bmp_plan, TRUE);
@@ -1111,7 +1119,7 @@ u16 BMP_drawBitmap(const Bitmap *bitmap, u16 x, u16 y, u16 loadpal)
MEM_free(b);
}
else
- BMP_drawBitmapData((u8*) FAR_SAFE(bitmap->image, (w * h) / 2), x, y, w, h, w >> 1);
+ BMP_drawBitmapData((u8*) FAR_SAFE(bitmap->image, mulu(w , h) / 2), x, y, w, h, w >> 1);
// load the palette
if (loadpal)
@@ -1143,7 +1151,7 @@ u16 BMP_drawBitmapScaled(const Bitmap *bitmap, u16 x, u16 y, u16 w, u16 h, u16 l
MEM_free(b);
}
else
- BMP_scale(FAR_SAFE(bitmap->image, (w * h) / 2), bmp_wb, bmp_h, bmp_wb, BMP_getWritePointer(x, y), w >> 1, h, BMP_PITCH);
+ BMP_scale(FAR_SAFE(bitmap->image, mulu(w, h) / 2), bmp_wb, bmp_h, bmp_wb, BMP_getWritePointer(x, y), w >> 1, h, BMP_PITCH);
// load the palette
if (loadpal)
@@ -1179,10 +1187,10 @@ void BMP_getBitmapPalette(const Bitmap *bitmap, u16 *dest)
// works only for 8 bits image (x doubled)
void BMP_scale(const u8 *src_buf, u16 src_wb, u16 src_h, u16 src_pitch, u8 *dst_buf, u16 dst_wb, u16 dst_h, u16 dst_pitch)
{
- const s32 yd = ((src_h / dst_h) * src_wb) - src_wb;
- const u16 yr = src_h % dst_h;
- const s32 xd = src_wb / dst_wb;
- const u16 xr = src_wb % dst_wb;
+ const s32 yd = mulu(divu(src_h, dst_h), src_wb) - src_wb;
+ const u16 yr = modu(src_h, dst_h);
+ const u32 xd = divu(src_wb, dst_wb);
+ const u16 xr = modu(src_wb, dst_wb);
const u32 adj_src = src_pitch - src_wb;
const u32 adj_dst = dst_pitch - dst_wb;
|
fix missing dependencies for .symabis command | @@ -4785,7 +4785,8 @@ macro _GO_GEN_COVER_GO(GO_FILE, VAR_ID) {
}
macro _GO_COMPILE_SYMABIS(ASM_FILES...) {
- .CMD=${hide:GO_FAKEID} $GO_TOOLS_ROOT/pkg/tool/${GO_HOST_OS}_${GO_HOST_ARCH}/asm -trimpath $ARCADIA_BUILD_ROOT ${hide;input:"build/scripts/go_fake_include/go_asm.h"} -I $ARCADIA_ROOT/build/scripts/go_fake_include -I $GO_TOOLS_ROOT/pkg/include -D GOOS_${GO_TARG_OS} -D GOARCH_${GO_TARG_ARCH} -gensymabis -o ${output:"gen.symabis"} ${input:ASM_FILES} ${kv;hide:"p go"} ${kv;hide:"pc light-blue"} ${kv;hide:"show_out"}
+ .CMD=${hide:GO_FAKEID} $GO_TOOLS_ROOT/pkg/tool/${GO_HOST_OS}_${GO_HOST_ARCH}/asm -trimpath $ARCADIA_BUILD_ROOT ${pre=-I :INCLUDE} -I $GO_TOOLS_ROOT/pkg/include -D GOOS_${GO_TARG_OS} -D GOARCH_${GO_TARG_ARCH} -gensymabis -o ${output:"gen.symabis"} ${input:ASM_FILES} ${kv;hide:"p go"} ${kv;hide:"pc light-blue"} ${kv;hide:"show_out"}
+ .ADDINCL=build/scripts/go_fake_include
}
macro _GO_COMPILE_CGO1(NAME, FLAGS[], FILES...) {
@@ -4967,6 +4968,8 @@ module _GO_BASE_UNIT: _BASE_UNIT {
GO_COMPILE_FLAGS_VALUE += -race
GO_LINK_FLAGS_VALUE += -race
}
+
+ ADDINCL(${GOSTD}/runtime)
}
### @usage: GO_LIBRARY([name])
|
stm32: implement reboot wait-ext
This was missed on stm32, but is helpful for servod
to work reliably.
TEST=it waits 10 sec for external reboot. | @@ -398,6 +398,15 @@ void system_reset(int flags)
while (1)
;
} else {
+ if (flags & SYSTEM_RESET_WAIT_EXT) {
+ int i;
+
+ /* Wait 10 seconds for external reset */
+ for (i = 0; i < 1000; i++) {
+ watchdog_reload();
+ udelay(10000);
+ }
+ }
CPU_NVIC_APINT = 0x05fa0004;
}
|
Clean up the code
remove redundant local buffer
fix code style | @@ -151,7 +151,6 @@ static int ssl_tls13_generate_and_write_ecdh_key_exchange(
psa_status_t status = PSA_ERROR_GENERIC_ERROR;
int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE;
psa_key_attributes_t key_attributes;
- unsigned char own_pubkey[MBEDTLS_PSA_MAX_EC_PUBKEY_LENGTH];
size_t own_pubkey_len;
mbedtls_ssl_handshake_params *handshake = ssl->handshake;
size_t ecdh_bits = 0;
@@ -186,7 +185,7 @@ static int ssl_tls13_generate_and_write_ecdh_key_exchange(
/* Export the public part of the ECDH private key from PSA. */
status = psa_export_public_key( handshake->ecdh_psa_privkey,
- own_pubkey, sizeof( own_pubkey ),
+ buf, (size_t)( end - buf ),
&own_pubkey_len );
if( status != PSA_SUCCESS )
{
@@ -204,8 +203,6 @@ static int ssl_tls13_generate_and_write_ecdh_key_exchange(
*out_len = own_pubkey_len;
- memcpy( buf, &own_pubkey, own_pubkey_len );
-
return( 0 );
}
#endif /* MBEDTLS_ECDH_C */
|
Fix obsolete comment in xlogutils.c.
Oversight in commit | @@ -260,10 +260,9 @@ XLogCheckInvalidPages(void)
* determines what needs to be done to redo the changes to it. If the WAL
* record includes a full-page image of the page, it is restored.
*
- * 'lsn' is the LSN of the record being replayed. It is compared with the
- * page's LSN to determine if the record has already been replayed.
- * 'block_id' is the ID number the block was registered with, when the WAL
- * record was created.
+ * 'record.EndRecPtr' is compared to the page's LSN to determine if the record
+ * has already been replayed. 'block_id' is the ID number the block was
+ * registered with, when the WAL record was created.
*
* Returns one of the following:
*
|
IAS: fix is_lc. | @@ -230,7 +230,6 @@ static unsigned int ias_choose_core(struct ias_data *sd, bool lc)
static int ias_add_kthread(struct proc *p)
{
struct ias_data *sd = (struct ias_data *)p->policy_data;
- bool is_lc = sd->threads_active < sd->threads_guaranteed;
unsigned int core;
/* check if we're constrained by the thread limit */
@@ -238,7 +237,7 @@ static int ias_add_kthread(struct proc *p)
return -ENOENT;
/* choose the best core to run the process on */
- core = ias_choose_core(sd, is_lc);
+ core = ias_choose_core(sd, is_lc(sd));
if (core == NCPU)
return -ENOENT;
|
do not restore old SIOCSIWMODE if --ignore_warning is used | @@ -360,7 +360,10 @@ if(fd_socket > 0)
memset(&ifr, 0, sizeof(ifr));
strncpy(ifr.ifr_name, interfacename, IFNAMSIZ -1);
ioctl(fd_socket, SIOCSIFFLAGS, &ifr);
+ if(ignorewarningflag == false)
+ {
ioctl(fd_socket, SIOCSIWMODE, &iwr_old);
+ }
ioctl(fd_socket, SIOCSIFFLAGS, &ifr_old);
if(close(fd_socket) != 0)
{
|
[chainmaker][#675]add BOAT_RESULT BoatChainmakerWalletSetHostName(BoatHlchainmakerWallet *wallet_ptr, const BCHAR *host_name_ptr) function declaration | @@ -33,6 +33,8 @@ api_chainmaker.c defines the Ethereum wallet API for BoAT IoT SDK.
static BOAT_RESULT BoatChainmakerWalletSetOrgId(BoatHlchainmakerWallet *wallet_ptr, const BCHAR *org_id_ptr);
static BOAT_RESULT BoatChainmakerWalletSetChainId(BoatHlchainmakerWallet *wallet_ptr, const BCHAR *chain_id_ptr);
+static BOAT_RESULT BoatChainmakerWalletSetHostName(BoatHlchainmakerWallet *wallet_ptr, const BCHAR *host_name_ptr);
+
BUINT8 get_fibon_data(BUINT8 n) {
|
Fix a mem leak in evp_pkey_copy_downgraded()
If we get a failure during evp_pkey_copy_downgraded() and on entry *dest
was NULL then we leak the EVP_PKEY that was automatically allocated and
stored in *dest.
Found due to this comment: | @@ -1973,6 +1973,8 @@ void *evp_pkey_export_to_provider(EVP_PKEY *pk, OSSL_LIB_CTX *libctx,
#ifndef FIPS_MODULE
int evp_pkey_copy_downgraded(EVP_PKEY **dest, const EVP_PKEY *src)
{
+ EVP_PKEY *allocpkey = NULL;
+
if (!ossl_assert(dest != NULL))
return 0;
@@ -2003,7 +2005,7 @@ int evp_pkey_copy_downgraded(EVP_PKEY **dest, const EVP_PKEY *src)
/* Make sure we have a clean slate to copy into */
if (*dest == NULL) {
- *dest = EVP_PKEY_new();
+ allocpkey = *dest = EVP_PKEY_new();
if (*dest == NULL) {
ERR_raise(ERR_LIB_EVP, ERR_R_MALLOC_FAILURE);
return 0;
@@ -2052,6 +2054,10 @@ int evp_pkey_copy_downgraded(EVP_PKEY **dest, const EVP_PKEY *src)
}
}
+ if (allocpkey != NULL) {
+ EVP_PKEY_free(allocpkey);
+ *dest = NULL;
+ }
return 0;
}
|
sse: fix native alias for _mm_cvttss_si32
Fixes | @@ -1629,7 +1629,7 @@ simde_mm_cvtt_ss2si (simde__m128 a) {
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
-# define _mm_cvttss_si32(a) simde_mm_cvttss_si32((a))
+# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
|
interface: show if tx queue is shared
Type: improvement | @@ -212,14 +212,16 @@ format_vnet_hw_interface (u8 * s, va_list * args)
if (vec_len (hi->tx_queue_indices))
{
s = format (s, "\n%UTX Queues:", format_white_space, indent + 2);
- s = format (s, "\n%U%-6s%-15s", format_white_space, indent + 4, "queue",
- "thread(s)");
+ s = format (s, "\n%U%-6s%-7s%-15s", format_white_space, indent + 4,
+ "queue", "shared", "thread(s)");
for (int i = 0; i < vec_len (hi->tx_queue_indices); i++)
{
vnet_hw_if_tx_queue_t *txq;
txq = vnet_hw_if_get_tx_queue (vnm, hi->tx_queue_indices[i]);
- s = format (s, "\n%U%-6u%U", format_white_space, indent + 4,
- txq->queue_id, format_bitmap_list, txq->threads);
+ s = format (
+ s, "\n%U%-6u%-7s%U", format_white_space, indent + 4, txq->queue_id,
+ clib_bitmap_count_set_bits (txq->threads) > 1 ? "yes" : "no",
+ format_bitmap_list, txq->threads);
}
}
|
capp: Add lid definition for P9 DD-2.2
Update fsp_lid_map to include CAPP ucode lid for phb4-chipid ==
0x202d1 that corresponds to P9 DD-2.2 chip. | @@ -2362,6 +2362,7 @@ int fsp_fetch_data_queue(uint8_t flags, uint16_t id, uint32_t sub_id,
#define CAPP_IDX_NIMBUS_DD10 0x100d1
#define CAPP_IDX_NIMBUS_DD20 0x200d1
#define CAPP_IDX_NIMBUS_DD21 0x201d1
+#define CAPP_IDX_NIMBUS_DD22 0x202d1
static struct {
enum resource_id id;
@@ -2378,6 +2379,7 @@ static struct {
{ RESOURCE_ID_CAPP, CAPP_IDX_NIMBUS_DD10, 0x80a02006 },
{ RESOURCE_ID_CAPP, CAPP_IDX_NIMBUS_DD20, 0x80a02007 },
{ RESOURCE_ID_CAPP, CAPP_IDX_NIMBUS_DD21, 0x80a02007 },
+ { RESOURCE_ID_CAPP, CAPP_IDX_NIMBUS_DD22, 0x80a02007 },
};
static void fsp_start_fetching_next_lid(void);
|
when doing SetToSelection, skip any selected items
that have the asset as a DAG parent, since that resulted in a DG loop and wierd errors. | @@ -13,6 +13,7 @@ stringArrayReverseAppend(string $to[], int $end, string $from[])
proc int
validateInputObjects(string $objects[],
+ string $targetAsset,
string $validObjects[],
string $multiObjects[],
string $invalidObjects[])
@@ -37,6 +38,19 @@ validateInputObjects(string $objects[],
continue;
}
+ // if object is the target asset or below the target asset skip it
+ // sure, the expected workflow was to select the inputs and maybe the asset last
+ // but it's easy enough to select the asset outputs or the asset by mistake.
+ // This does assume that the asset outputs are still parented under the asset
+ // where we left them (history assets are connected by a different code path)
+
+ if($object == $targetAsset) {
+ continue;
+ }
+ if(isParentOf($targetAsset, $object)) {
+ continue;
+ }
+
if(`objectType -isAType "houdiniAsset" $object`)
{
$validObjects[size($validObjects)] = $object;
@@ -393,7 +407,8 @@ houdiniEngine_setAssetInput(string $inputAttr, string $objects[])
string $validObjects[];
string $multiObjects[];
string $invalidObjects[];
- int $validObjectsCount = validateInputObjects($objects,
+ string $assetNodeName = plugNode($inputAttr);
+ int $validObjectsCount = validateInputObjects($objects, $assetNodeName,
$validObjects, $multiObjects, $invalidObjects);
if(size($invalidObjects))
@@ -409,7 +424,6 @@ houdiniEngine_setAssetInput(string $inputAttr, string $objects[])
return 1;
}
- string $assetNodeName = plugNode($inputAttr);
int $connectGeoForAsset = `getAttr ($assetNodeName + ".connectGeoForAssetInputs")`;
string $sourceInputAttr[];
for($i = 0; $i < size($validObjects); $i++)
|
Fix data race on stats due to the logging thread being started before the stats are initialized | @@ -9536,8 +9536,8 @@ int main (int argc, char **argv) {
}
/* initialize other stuff */
- logger_init();
stats_init();
+ logger_init();
conn_init();
bool reuse_mem = false;
void *mem_base = NULL;
|
Fixing build error on Visual Studio 2010 | @@ -164,11 +164,12 @@ void errorLookup(int errcode, char *errmsg, int len);
int DLLEXPORT ENepanet(const char *f1, const char *f2, const char *f3, void (*pviewprog)(char *))
{
int errcode = 0;
+ EN_Project *p = NULL;
ERRCODE(EN_createproject(&_defaultModel));
ERRCODE(EN_open(_defaultModel, f1, f2, f3));
- EN_Project *p = (EN_Project*)(_defaultModel);
+ p = (EN_Project*)(_defaultModel);
p->viewprog = pviewprog;
if (p->out_files.Hydflag != USE) {
|
diff FEATURE check required meta when adding into diff | @@ -82,6 +82,21 @@ lyd_diff_add(const struct lyd_node *node, enum lyd_diff_op op, const char *orig_
assert(diff);
+ /* replace leaf always needs orig-default and orig-value */
+ assert((node->schema->nodetype != LYS_LEAF) || (op != LYD_DIFF_OP_REPLACE) || (orig_default && orig_value));
+
+ /* create on userord needs key/value */
+ assert((node->schema->nodetype != LYS_LIST) || !(node->schema->flags & LYS_ORDBY_USER) || (op != LYD_DIFF_OP_CREATE) ||
+ key);
+ assert((node->schema->nodetype != LYS_LEAFLIST) || !(node->schema->flags & LYS_ORDBY_USER) ||
+ (op != LYD_DIFF_OP_CREATE) || value);
+
+ /* move on userord needs both key and orig-key/value and orig-value */
+ assert((node->schema->nodetype != LYS_LIST) || !(node->schema->flags & LYS_ORDBY_USER) || (op != LYD_DIFF_OP_REPLACE) ||
+ (key && orig_key));
+ assert((node->schema->nodetype != LYS_LEAFLIST) || !(node->schema->flags & LYS_ORDBY_USER) ||
+ (op != LYD_DIFF_OP_REPLACE) || (value && orig_value));
+
/* find the first existing parent */
siblings = *diff;
while (1) {
|
Setup tmate session | @@ -47,3 +47,6 @@ jobs:
DEBIAN_FRONTEND: noninteractive
NODE_PATH: /usr/lib/node_modules
DOTNET_CLI_TELEMETRY_OPTOUT: 'true'
+ - name: Setup tmate session
+ uses: mxschmitt/action-tmate@v3
+ timeout-minutes: 30
\ No newline at end of file
|
GitHub Action failure - attempt to fix | @@ -28,5 +28,5 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GIT_PULL_TOKEN: ${{ secrets.GIT_PULL_TOKEN }}
- run: %GITHUB_WORKSPACE%\build-tests.cmd ${{ matrix.arch }} ${{ matrix.config }}
-
+ run: |
+ $GITHUB_WORKSPACE\\build-tests.cmd ${{ matrix.arch }} ${{ matrix.config }}
|
doc: improvements for sphinx generation
Handle version retrieval better when comments are present.
Add warning if Sphinx theme (read_the_docs) is missing. | @@ -57,13 +57,17 @@ author = u'Project ARCN developers'
# Makefile from the acrn-hypervisor repo by finding these lines:
# MAJOR_VERSION=0
# MINOR_VERSION=1
+# RC_VERSION=1
try:
version_major = None
version_minor = None
version_rc = None
for line in open(os.path.normpath("../acrn-hypervisor/Makefile")) :
- if line.count("=") :
+ # remove comments
+ line = line.split('#', 1)[0]
+ line = line.rstrip()
+ if (line.count("=") == 1) :
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'MAJOR_VERSION':
version_major = val
@@ -124,6 +128,7 @@ except ImportError:
'searchbox.html',
]
}
+ sys.stderr.write('Warning: sphinx_rtd_theme missing. Use pip to install it.\n')
else:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
Change comments to C style for compatibility | @@ -1454,22 +1454,22 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define SGEMM_DEFAULT_P 768
#define SGEMM_DEFAULT_R sgemm_r
-//#define SGEMM_DEFAULT_R 1024
+/*#define SGEMM_DEFAULT_R 1024*/
#define DGEMM_DEFAULT_P 512
#define DGEMM_DEFAULT_R dgemm_r
-//#define DGEMM_DEFAULT_R 1024
+/*#define DGEMM_DEFAULT_R 1024*/
#define QGEMM_DEFAULT_P 504
#define QGEMM_DEFAULT_R qgemm_r
#define CGEMM_DEFAULT_P 768
#define CGEMM_DEFAULT_R cgemm_r
-//#define CGEMM_DEFAULT_R 1024
+/*#define CGEMM_DEFAULT_R 1024*/
#define ZGEMM_DEFAULT_P 512
#define ZGEMM_DEFAULT_R zgemm_r
-//#define ZGEMM_DEFAULT_R 1024
+/*#define ZGEMM_DEFAULT_R 1024*/
#define XGEMM_DEFAULT_P 252
#define XGEMM_DEFAULT_R xgemm_r
@@ -2571,7 +2571,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#endif
#ifdef LOONGSON3A
-////Copy from SICORTEX
+/*Copy from SICORTEX*/
#define SNUMOPT 2
#define DNUMOPT 2
@@ -2863,7 +2863,7 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#define SYMV_P 16
#endif
-// Common ARMv8 parameters
+/* Common ARMv8 parameters */
#if defined(ARMV8)
#define SNUMOPT 2
@@ -3066,7 +3066,7 @@ is a big desktop or server with abundant cache rather than a phone or embedded d
#define CGEMM_DEFAULT_R 4096
#define ZGEMM_DEFAULT_R 4096
-#else // Other/undetected ARMv8 cores
+#else /* Other/undetected ARMv8 cores */
#define SGEMM_DEFAULT_UNROLL_M 16
#define SGEMM_DEFAULT_UNROLL_N 4
@@ -3095,9 +3095,9 @@ is a big desktop or server with abundant cache rather than a phone or embedded d
#define CGEMM_DEFAULT_R 4096
#define ZGEMM_DEFAULT_R 4096
-#endif // Cores
+#endif /* Cores */
-#endif // ARMv8
+#endif /* ARMv8 */
#if defined(ARMV5)
#define SNUMOPT 2
|
Remove deleted -c flag from mbld.1 | @@ -35,10 +35,6 @@ The myrbuild options are:
.B -[h|?]
Print a summary of the available options.
-.TP
-.B -c
-cleans the code before building. This applies to both
-
.TP
.B -b \fIbinname\fP
Compile source into a binary named 'name'. If neither this option nor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.