message
stringlengths
6
474
diff
stringlengths
8
5.22k
Improve page balancing function (in-daemon) match the python changes for the scripts/memcached-automove copy of the algorithm.
struct window_data { uint64_t age; uint64_t dirty; - uint64_t evicted; + float evicted_ratio; + uint64_t evicted_seen; // if evictions were seen at all this window }; typedef struct { @@ -63,7 +64,8 @@ static void window_sum(struct window_data *wd, struct window_data *w, uint32_t s struct window_data *d = &wd[x]; w->age += d->age; w->dirty += d->dirty; - w->evicted += d->evicted; + w->evicted_ratio += d->evicted_ratio; + w->evicted_seen += d->evicted_seen; } } @@ -84,6 +86,11 @@ void slab_automove_run(void *arg, int *src, int *dst) { // fill after structs fill_item_stats_automove(a->iam_after); fill_slab_stats_automove(a->sam_after); + // Loop once to get total_evicted for this window. + uint64_t evicted_total = 0; + for (n = POWER_SMALLEST; n < MAX_NUMBER_OF_SLAB_CLASSES; n++) { + evicted_total += a->iam_after[n].evicted - a->iam_before[n].evicted; + } a->window_cur++; // iterate slabs @@ -97,9 +104,15 @@ void slab_automove_run(void *arg, int *src, int *dst) { // if page delta, or evicted delta, mark window dirty // (or outofmemory) - if (a->iam_after[n].evicted - a->iam_before[n].evicted > 0 || - a->iam_after[n].outofmemory - a->iam_before[n].outofmemory > 0) { - wd->evicted = 1; + uint64_t evicted_delta = a->iam_after[n].evicted - a->iam_before[n].evicted; + if (evicted_delta > 0) { + // FIXME: the python script is using floats. we have ints. + wd->evicted_ratio = (float) evicted_delta / evicted_total; + wd->evicted_seen = 1; + wd->dirty = 1; + } + + if (a->iam_after[n].outofmemory - a->iam_before[n].outofmemory > 0) { wd->dirty = 1; } if (a->sam_after[n].total_pages - a->sam_before[n].total_pages > 0) { @@ -129,10 +142,12 @@ void slab_automove_run(void *arg, int *src, int *dst) { // grab evicted count from window // if > half the window and youngest, mark as youngest - if (age < youngest_age && w_sum.evicted > a->window_size / 2) { + // or, if more than 25% of total evictions in the window. + if (age < youngest_age && (w_sum.evicted_seen > a->window_size / 2 + || w_sum.evicted_ratio / a->window_size > 0.25)) { youngest = n; youngest_age = age; - youngest_evicting = wd->evicted ? true : false; + youngest_evicting = wd->evicted_seen ? true : false; } }
python: revert changes
@@ -71,7 +71,6 @@ typedef struct PyObject * instance; int printError; int shutdown; - int subinterpreter; } moduleData; static int Python_AppendToSysPath (const char * path) @@ -163,7 +162,6 @@ static void Python_Shutdown (moduleData * data) /* destroy python if plugin isn't used anymore */ if (Py_IsInitialized ()) { - // Do we have a sub-interpreter? if (data->tstate) { Python_LockSwap pylock (data->tstate); @@ -207,8 +205,6 @@ int PYTHON_PLUGIN_FUNCTION (Open) (ckdb::Plugin * handle, ckdb::Key * errorKey) * expected behaviour without worring about default values */ data->shutdown = (ksLookupByName (config, "/shutdown", 0) && !!strcmp (keyString (ksLookupByName (config, "/shutdown", 0)), "0")); - data->subinterpreter = (ksLookupByName (config, "/subinterpreter", 0) && - !!strcmp (keyString (ksLookupByName (config, "/subinterpreter", 0)), "0")); { /* initialize python interpreter if necessary */ @@ -233,11 +229,7 @@ int PYTHON_PLUGIN_FUNCTION (Open) (ckdb::Plugin * handle, ckdb::Key * errorKey) /* acquire GIL */ Python_LockSwap pylock (nullptr); - if (data->subinterpreter) - { - /* Create a new sub-interpreter. - This is incompatible with PyGILState_Ensure, see directly above - https://docs.python.org/3/c-api/init.html#c.PyGILState_Ensure */ + /* create a new sub-interpreter */ data->tstate = Py_NewInterpreter (); if (data->tstate == nullptr) { @@ -245,7 +237,6 @@ int PYTHON_PLUGIN_FUNCTION (Open) (ckdb::Plugin * handle, ckdb::Key * errorKey) goto error; } PyThreadState_Swap (data->tstate); - } /* import kdb */ PyObject * kdbModule = PyImport_ImportModule ("kdb"); @@ -316,7 +307,6 @@ error: /* destroy python */ Python_Shutdown (data); delete data; - elektraPluginSetData (handle, nullptr); return -1; } @@ -330,7 +320,6 @@ int PYTHON_PLUGIN_FUNCTION (Close) (ckdb::Plugin * handle, ckdb::Key * errorKey) /* destroy python */ Python_Shutdown (data); delete data; - elektraPluginSetData (handle, nullptr); return ret; }
Found/fixed trivial variable use bug
@@ -507,7 +507,7 @@ ThresholdsCheck( if (SensorInfo.MediaTempShutdownThresh < MediaTemperatureThreshold) { APPEND_RESULT_TO_THE_LOG(pDimm, STRING_TOKEN(STR_FW_MEDIA_TEMPERATURE_THRESHOLD_ERROR), EVENT_CODE_903, DIAG_STATE_MASK_WARNING, ppResultStr, pDiagState, - pDimm->DeviceHandle.AsUint32, ControllerTemperatureThreshold, SensorInfo.MediaTempShutdownThresh); + pDimm->DeviceHandle.AsUint32, MediaTemperatureThreshold, SensorInfo.MediaTempShutdownThresh); } ReturnCode = GetAlarmThresholds(NULL,
Ensure lua state is closed even if errors occur
@@ -67,6 +67,7 @@ liftLua1 f x = liftLua $ \l -> f l x luaState :: Lua LuaState luaState = ask --- | Run lua computation with custom lua state. +-- | Run lua computation with custom lua state. Errors are left unhandled, the +-- caller of this function is responsible to catch lua errors. runLuaWith :: LuaState -> Lua a -> IO a runLuaWith l s = runReaderT (unLua s) l
MIPS64: Fix defined-but-not-used errors with WEBP_REDUCE_CSP
@@ -264,6 +264,7 @@ static void YuvToBgr(int y, int u, int v, uint8_t* const bgr) { bgr[2] = Clip8(r1 >> 6); } +#if !defined(WEBP_REDUCE_CSP) static void YuvToRgb565(int y, int u, int v, uint8_t* const rgb) { const int y1 = MultHi(y, 19077); const int r1 = y1 + MultHi(v, 26149) - 14234; @@ -306,6 +307,7 @@ static void YuvToArgb(uint8_t y, uint8_t u, uint8_t v, uint8_t* const argb) { argb[0] = 0xff; YuvToRgb(y, u, v, argb + 1); } +#endif // WEBP_REDUCE_CSP static void YuvToBgra(uint8_t y, uint8_t u, uint8_t v, uint8_t* const bgra) { YuvToBgr(y, u, v, bgra); @@ -317,6 +319,7 @@ static void YuvToRgba(uint8_t y, uint8_t u, uint8_t v, uint8_t* const rgba) { rgba[3] = 0xff; } +#if !defined(WEBP_REDUCE_CSP) static void YuvToRgbLine(const uint8_t* y, const uint8_t* u, const uint8_t* v, uint8_t* dst, int length) { v16u8 R, G, B; @@ -370,6 +373,7 @@ static void YuvToBgrLine(const uint8_t* y, const uint8_t* u, memcpy(dst, temp, length * 3 * sizeof(*dst)); } } +#endif // WEBP_REDUCE_CSP static void YuvToRgbaLine(const uint8_t* y, const uint8_t* u, const uint8_t* v, uint8_t* dst, int length) { @@ -427,6 +431,7 @@ static void YuvToBgraLine(const uint8_t* y, const uint8_t* u, } } +#if !defined(WEBP_REDUCE_CSP) static void YuvToArgbLine(const uint8_t* y, const uint8_t* u, const uint8_t* v, uint8_t* dst, int length) { v16u8 R, G, B; @@ -526,6 +531,7 @@ static void YuvToRgb565Line(const uint8_t* y, const uint8_t* u, memcpy(dst, temp, length * 2 * sizeof(*dst)); } } +#endif // WEBP_REDUCE_CSP #define UPSAMPLE_32PIXELS(a, b, c, d) do { \ v16u8 s = __msa_aver_u_b(a, d); \
Java: fixing configure errors reporting.
@@ -84,11 +84,12 @@ if [ -n "${NXT_JAVA_HOME}" ]; then NXT_JAVA="${NXT_JAVA_HOME}/bin/java" else - $echo -n "checking for java executable" + $echo -n "checking for java executable ..." $echo "checking for java executable ..." >> $NXT_AUTOCONF_ERR NXT_JAVA=`which java || :` if [ -z "$NXT_JAVA" -o ! -x "$NXT_JAVA" ]; then + $echo " not found" $echo $echo $0: error: java executable not found. $echo @@ -100,11 +101,12 @@ else "$NXT_JAVA" -version - $echo -n "checking java.home" + $echo -n "checking java.home ..." $echo "checking java.home ..." >> $NXT_AUTOCONF_ERR NXT_JAVA_HOME=`$NXT_JAVA -XshowSettings 2>&1 | grep -F -e java.home | sed -e 's/^.*= //'` if [ -z "$NXT_JAVA_HOME" ]; then + $echo " not found" $echo $echo $0: error: java.home not found. $echo @@ -149,10 +151,11 @@ case "$NXT_SYSTEM" in esac if [ -z "$NXT_JAVA_LIB_PATH" ]; then - $echo -n "checking library path" + $echo -n "checking library path ..." $echo "checking library path ..." >> $NXT_AUTOCONF_ERR if [ ! -x "$NXT_JAVA" ]; then + $echo " not found" $echo $echo $0: error: java executable not found. $echo @@ -162,6 +165,7 @@ if [ -z "$NXT_JAVA_LIB_PATH" ]; then NXT_JAVA_LIB_PATH=`$NXT_JAVA -XshowSettings 2>&1 | grep -F -e sun.boot.library.path | sed -e 's/^.*= //'` if [ -z "$NXT_JAVA_LIB_PATH" ]; then + $echo " not found" $echo $echo $0: error: library path not found. $echo
Refactor fill-case of md_copy2
@@ -644,6 +644,68 @@ void md_copy2(unsigned int D, const long dim[D], const long ostr[D], void* optr, #ifdef USE_CUDA bool use_gpu = cuda_ondevice(optr) || cuda_ondevice(iptr); + +#if 1 + // less calls for filling-like copies + + long tostr_fill[D]; + long tistr_fill[D]; + long tdims_fill[D]; + + md_copy_strides(D, tostr_fill, ostr); + md_copy_strides(D, tistr_fill, istr); + md_copy_dims(D, tdims_fill, dim); + + long (*nstr2_fill[2])[D] = { &tostr_fill, &tistr_fill }; + + int ND_fill = simplify_dims(2, D, tdims_fill, nstr2_fill); + + bool fill = (2 == ND_fill) || (1 == ND_fill); + + size_t cp_size = 0; + unsigned long repetitions = 0; + + if (2 == ND_fill) { + + fill = fill && ((*nstr2_fill[0])[0] == (signed)size); + fill = fill && ((*nstr2_fill[1])[0] == (signed)size); + + cp_size = tdims_fill[0] * size; + repetitions = tdims_fill[1]; + + fill = fill && ((*nstr2_fill[0])[1] == (signed)cp_size); + fill = fill && ((*nstr2_fill[1])[1] == 0); + } + + if (1 == ND_fill) { + + fill = fill && ((*nstr2_fill[0])[0] == (signed)size); + fill = fill && ((*nstr2_fill[1])[0] == 0); + + cp_size = size; + repetitions = tdims_fill[0]; + } + + if (use_gpu && fill) { + + cuda_memcpy(cp_size, optr, iptr); + + unsigned int i = 1; + + while (2 * i <= repetitions) { + + cuda_memcpy(cp_size * i, optr + i * cp_size, optr); + i = i * 2; + } + + if (0 < repetitions - i) + cuda_memcpy(cp_size * (repetitions - i), optr + i * cp_size, optr); + + return; + } +#endif + + #if 1 long tostr[D]; long tistr[D];
dedicated logic for rx_spektrum_bind
@@ -967,27 +967,34 @@ void findprotocol(void) { // Send Spektrum bind pulses to a GPIO e.g. TX1 void rx_spektrum_bind(void) { +#define SPECTRUM_BIND_PIN usart_port_defs[profile.serial.rx].rx_pin +#define SPECTRUM_BIND_PORT usart_port_defs[profile.serial.rx].rx_pin + + if (profile.serial.rx == USART_PORT_INVALID) { + return; + } + rx_bind_enable = fmc_read_float(56); if (rx_bind_enable == 0) { GPIO_InitTypeDef GPIO_InitStructure; - GPIO_InitStructure.GPIO_Pin = USART.rx_pin; + GPIO_InitStructure.GPIO_Pin = SPECTRUM_BIND_PIN; GPIO_InitStructure.GPIO_Mode = GPIO_Mode_OUT; GPIO_InitStructure.GPIO_OType = GPIO_OType_PP; GPIO_InitStructure.GPIO_PuPd = GPIO_PuPd_NOPULL; - GPIO_Init(USART.gpio_port, &GPIO_InitStructure); + GPIO_Init(SPECTRUM_BIND_PORT, &GPIO_InitStructure); // RX line, set high - GPIO_SetBits(USART.gpio_port, USART.rx_pin); + GPIO_SetBits(SPECTRUM_BIND_PORT, SPECTRUM_BIND_PIN); // Bind window is around 20-140ms after powerup delay(60000); for (uint8_t i = 0; i < 9; i++) { // 9 pulses for internal dsmx 11ms, 3 pulses for internal dsm2 22ms // RX line, drive low for 120us - GPIO_ResetBits(USART.gpio_port, USART.rx_pin); + GPIO_ResetBits(SPECTRUM_BIND_PORT, SPECTRUM_BIND_PIN); delay(120); // RX line, drive high for 120us - GPIO_SetBits(USART.gpio_port, USART.rx_pin); + GPIO_SetBits(SPECTRUM_BIND_PORT, SPECTRUM_BIND_PIN); delay(120); } }
[ivshmem] Request mmap fixed This parameter was optional, but now causes such an error: "IOCTL_IVSHMEM_MMAP: Invalid input size, expected 1 but got 0"
@@ -101,9 +101,11 @@ int main() printf("Size: %I64u\n", size); TEST_START("IOCTL_IVSHMEM_REQUEST_MMAP"); + IVSHMEM_MMAP_CONFIG config; + config.cacheMode = IVSHMEM_CACHE_NONCACHED; IVSHMEM_MMAP map; ZeroMemory(&map, sizeof(IVSHMEM_MMAP)); - if (!DeviceIoControl(devHandle, IOCTL_IVSHMEM_REQUEST_MMAP, NULL, 0, &map, sizeof(IVSHMEM_MMAP), &ulReturnedLength, NULL)) + if (!DeviceIoControl(devHandle, IOCTL_IVSHMEM_REQUEST_MMAP, &config, sizeof(IVSHMEM_MMAP_CONFIG), &map, sizeof(IVSHMEM_MMAP), &ulReturnedLength, NULL)) { TEST_FAIL("DeviceIoControl"); break;
test-suite: fortran/mpi update to avoid interface mismatch errors
C Ping-pong with MPI + I/O - include 'mpif.h' +c include 'mpif.h' + use mpi integer BUFFSIZE, MSGSIZE, error, rank, size, dest, i integer status(MPI_STATUS_SIZE), NITERS, retsize
HW: More generic pattern for modification of AXI ID width parameter in hardware/setup/patch_vhd.sh
@@ -9,6 +9,6 @@ fi NAME=`basename $2` if ([ "$NAME" == "psl_accel_sim.vhd" ] || [ "$NAME" == "psl_accel_syn.vhd" ]); then - sed -i 's/C_AXI_CARD_MEM0_ID_WIDTH : integer := 1/C_AXI_CARD_MEM0_ID_WIDTH : integer := '$NUM_OF_ACTIONS'/' $1/$2 - sed -i 's/C_AXI_HOST_MEM_ID_WIDTH : integer := 1/C_AXI_HOST_MEM_ID_WIDTH : integer := '$NUM_OF_ACTIONS'/' $1/$2 + sed -i 's/C_AXI_CARD_MEM0_ID_WIDTH[ ^I]*:[ ^I]*integer[ ^I]*:=[ ^I]*[0-9]*/C_AXI_CARD_MEM0_ID_WIDTH : integer := '$NUM_OF_ACTIONS'/' $1/$2 + sed -i 's/C_AXI_HOST_MEM_ID_WIDTH[ ^I]*:[ ^I]*integer[ ^I]*:=[ ^I]*[0-9]*/C_AXI_HOST_MEM_ID_WIDTH : integer := '$NUM_OF_ACTIONS'/' $1/$2 fi
relay: document -U in usage message
@@ -329,6 +329,7 @@ do_usage(char *name, int exitcode) printf(" -L server max stalls, defaults to %d\n", maxstalls); printf(" -S statistics sending interval in seconds, defaults to 60\n"); printf(" -B connection listen backlog, defaults to 32\n"); + printf(" -U socket receive buffer size, max/min/default values depend on OS\n"); printf(" -T IO timeout in milliseconds for server connections, defaults to %d\n", iotimeout); printf(" -m send statistics like carbon-cache.py, e.g. not cumulative\n"); printf(" -c characters to allow next to [A-Za-z0-9], defaults to -_:#\n");
Recognise in X509_certificate_type
@@ -41,6 +41,9 @@ int X509_certificate_type(const X509 *x, const EVP_PKEY *pkey) case EVP_PKEY_EC: ret = EVP_PK_EC | EVP_PKT_SIGN | EVP_PKT_EXCH; break; + case NID_ED25519: + ret = EVP_PKT_SIGN; + break; case EVP_PKEY_DH: ret = EVP_PK_DH | EVP_PKT_EXCH; break;
http3::accept_hashkey_flatten_address missing break in switch/case
@@ -360,6 +360,7 @@ static uint8_t *accept_hashkey_flatten_address(uint8_t *p, quicly_address_t *add break; case AF_UNSPEC: *p++ = 0; + break; default: h2o_fatal("unknown protocol family"); break;
Delete fd members of dumpParser object Refactor dumpParser.py for encapsulation Use 'with' statement to make sure fd is closed after use
@@ -67,7 +67,6 @@ class dumpParser: # As well as open using NM utility so that, we can read the symbols easily if self.elf is not None: self.elf_file_fd = open(elf, 'rb') - self.elf_file_fd_nm = os.popen(self.nm_path + ' -n ' + elf) if not self.elf_file_fd: print('Failed to open {0}'.format(elf)) return None @@ -91,8 +90,8 @@ class dumpParser: # Read the elf header to get the offset of text section and ARM exidx section # These offsets will be used while creating ARM exidx table as well as while reading # the address from ELF file at a particular text address - self.readelf_fd = os.popen(self.readelf_path + ' -S ' + elf) - elfdata = self.readelf_fd.readlines() + with os.popen(self.readelf_path + ' -S ' + elf) as readelf_fd: + elfdata = readelf_fd.readlines() for line in elfdata: if '.text' in line: word = line.split() @@ -238,7 +237,8 @@ class dumpParser: # Function to setup the Address to Symbol mapping table from the ELF file ( tinyara in our case) def setup_symbol_table(self,tinyara_elf_file, debug=False): # Reading the tinyara elf and preparing the symbol map table - symbols = self.elf_file_fd_nm.readlines() + with os.popen(self.nm_path + ' -n ' + tinyara_elf_file) as elf_file_fd_nm: + symbols = elf_file_fd_nm.readlines() for line in symbols: s = line.split(' ') if len(s) == 3: @@ -253,9 +253,6 @@ class dumpParser: print "{0:x} {1}".format(line[0], line[1]) print '~~~~~~~~~~~~~~~~~~~~~~~~ SYMBOL TABLE END ~~~~~~~~~~~~~~~~~~~~~' - # Close the NM ELF file descriptor - self.elf_file_fd_nm.close() - # Function to read the contents of given length from specific RAM/ELF address def read_address(self, addr, length, debug=False): # First check whether address falls within the code section, if so read from elf
Update fw_update comments.
# # NOTE: Older fimware versions are no longer supported by the host driver. # NOTE: The latest firmware (19.6.1) only works on ATWINC1500-MR210PB. +# NOTE: Firmware is at <openmv-ide-install-dir>/share/qtcreator/firmware/WINC1500/winc_19_6_1.bin import network
Use better test macro
@@ -777,7 +777,7 @@ void mpi_core_cond_assign( data_t * input_X, size_t copy_limbs = CHARS_TO_LIMBS( input_len ); size_t len = limbs * sizeof( mbedtls_mpi_uint ); - TEST_ASSERT( limbs_X == limbs_Y ); + TEST_EQUAL( limbs_X, limbs_Y ); TEST_ASSERT( copy_limbs <= limbs ); ASSERT_ALLOC( X, len ); @@ -836,7 +836,7 @@ void mpi_core_cond_swap( data_t * input_X, size_t copy_limbs = CHARS_TO_LIMBS( input_len ); size_t len = limbs * sizeof( mbedtls_mpi_uint ); - TEST_ASSERT( limbs_X == limbs_Y ); + TEST_EQUAL( limbs_X, limbs_Y ); TEST_ASSERT( copy_limbs <= limbs ); ASSERT_ALLOC( tmp_X, len ); @@ -912,7 +912,7 @@ void mpi_mod_raw_cond_assign( data_t * input_X, size_t copy_limbs = CHARS_TO_LIMBS( input_len ); size_t len = limbs * sizeof( mbedtls_mpi_uint ); - TEST_ASSERT( limbs_X == limbs_Y ); + TEST_EQUAL( limbs_X, limbs_Y ); TEST_ASSERT( copy_limbs <= limbs ); ASSERT_ALLOC( X, len ); @@ -986,7 +986,7 @@ void mpi_mod_raw_cond_swap( data_t * input_X, size_t copy_limbs = CHARS_TO_LIMBS( input_len ); size_t len = limbs * sizeof( mbedtls_mpi_uint ); - TEST_ASSERT( limbs_X == limbs_Y ); + TEST_EQUAL( limbs_X, limbs_Y ); TEST_ASSERT( copy_limbs <= limbs ); ASSERT_ALLOC( tmp_X, len );
Make ya.conf.json valid again
}, "lkvm": { "description": "kvmtool is a userland tool for creating and controlling KVM guests" - }, + } }, "toolchain": { "msvc2017-i686": {
hsa: set connected mode for udp server Type: fix
@@ -531,6 +531,10 @@ vts_worker_init (vcl_test_server_worker_t * wrk) if (wrk->listen_fd < 0) vtfail ("vppcom_session_create()", wrk->listen_fd); + if (vsm->cfg.proto == VPPCOM_PROTO_UDP) + { + vppcom_session_attr (wrk->listen_fd, VPPCOM_ATTR_SET_CONNECTED, 0, 0); + } if (vsm->cfg.proto == VPPCOM_PROTO_TLS || vsm->cfg.proto == VPPCOM_PROTO_QUIC)
apps/cmp.c: Improve diagnostics on -server URL parse error
@@ -1834,8 +1834,10 @@ static int setup_client_ctx(OSSL_CMP_CTX *ctx, ENGINE *engine) CMP_err("missing -server option"); goto err; } - if (!OSSL_HTTP_parse_url(opt_server, &server, &port, &portnum, &path, &ssl)) + if (!OSSL_HTTP_parse_url(opt_server, &server, &port, &portnum, &path, &ssl)) { + CMP_err1("cannot parse -server URL: %s", opt_server); goto err; + } if (ssl && !opt_tls_used) { CMP_err("missing -tls_used option since -server URL indicates https"); goto err;
[platform-stm32f4xx] Updata chip defenition
@@ -14,16 +14,16 @@ ARM_CPU := cortex-m4 # TODO: integrate better with platform/stm32f4xx/CMSIS/stm32f4xx.h ifeq ($(STM32_CHIP),stm32f407) -GLOBAL_DEFINES += STM32F40_41xxx +GLOBAL_CFLAGS=-DSTM32F40_41xxx=1 FOUND_CHIP := true endif ifeq ($(STM32_CHIP),stm32f417) FOUND_CHIP := true -GLOBAL_DEFINES += STM32F40_41xxx +GLOBAL_CFLAGS=-DSTM32F40_41xxx=1 endif ifeq ($(STM32_CHIP),stm32f429) FOUND_CHIP := true -GLOBAL_DEFINES += STM32F429_439xx +GLOBAL_CFLAGS=-DSTM32F429_439xx=1 endif ifeq ($(FOUND_CHIP),)
[org] list of parameters in cg file for SDF
@@ -1612,6 +1612,42 @@ Steve, Vincent. *** Missing devel for Siconos/Gazebo + SDF, siconos configuration profile in SDF for siconos ? + configuration file for siconos hdf5, xml, without various profile serializatoion xml + + - time step + - Newton tolerance, nb iterations + - osi class + - onsnp + - onsnp solver, tolerance, nb iterations + - projection techniques (position, velocities) + + time_stepping=None, + h=0.0005, + + osi=Kernel.MoreauJeanOSI, + theta=0.50001, + + Newton_options=Kernel.SICONOS_TS_NONLINEAR, + Newton_max_iter=20, + Newton_update_interactions=False, + + solver=Numerics.SICONOS_FRICTION_3D_NSGS, + itermax=100000, + tolerance=1e-8, + + projection_itermax=20, + projection_tolerance=1e-8, + projection_tolerance_unilateral=1e-8, + contact_index_set=1, + + numerics_verbose=False, + verbose=True, + verbose_progress=True, + + output_frequency=None, + + + + debian packages + docker file. [[http://siconos.github.io/gazebo-siconos/]] + Evaluation of the devel with examples
Allow all clusters to be used for button maps
@@ -74,7 +74,7 @@ QMap<QString, quint16> loadButtonMapClustersJson(const QJsonDocument &buttonMaps DBG_Printf(DBG_INFO, "[ERROR] - Key #%d for object 'clusters' is no string or too long. Skipping entry...\n", counter); continue; } - else if (!i.value().isDouble() || i.value().toDouble() > 2000) + else if (!i.value().isDouble() || i.value().toDouble() > 65535) { DBG_Printf(DBG_INFO, "[ERROR] - Value #%d for object 'clusters' is no number or too large. Skipping entry...\n", counter); continue;
kernel: only allow sys_identify_cap() when target is L1 CNode
@@ -678,6 +678,10 @@ struct sysret sys_identify_cap(struct capability *root, capaddr_t cptr, return SYSRET(SYS_ERR_INVALID_USER_BUFFER); } + if (root->type != ObjType_L1CNode) { + return SYSRET(SYS_ERR_CNODE_NOT_ROOT); + } + struct capability *thecap; // XXX: what's the correct caprights here? err = caps_lookup_cap(root, cptr, level, &thecap, CAPRIGHTS_ALLRIGHTS);
apptrace: fix SystemView example test add trace svdat file to artifacts
-# SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD +# SPDX-FileCopyrightText: 2022-2023 Espressif Systems (Shanghai) CO LTD # SPDX-License-Identifier: Unlicense OR CC0-1.0 import os.path import time @@ -7,8 +7,6 @@ import pexpect.fdpexpect import pytest from pytest_embedded_idf import IdfDut -TEMP_FILE = os.path.join(os.path.dirname(__file__), 'heap_log.svdat') - @pytest.mark.esp32 @pytest.mark.jtag @@ -16,12 +14,14 @@ TEMP_FILE = os.path.join(os.path.dirname(__file__), 'heap_log.svdat') 'esp,idf,jtag', ], indirect=True) def test_examples_sysview_tracing_heap_log(idf_path: str, dut: IdfDut) -> None: + trace_log = os.path.join(os.path.dirname(dut.gdb._logfile), 'heap_log.svdat') # pylint: disable=protected-access + dut.gdb.write('mon reset halt') dut.gdb.write('flushregs') dut.gdb.write('tb heap_trace_start') dut.gdb.write('commands', non_blocking=True) - dut.gdb.write(f'mon esp sysview start file://{TEMP_FILE}', non_blocking=True) + dut.gdb.write(f'mon esp sysview start file://{trace_log}', non_blocking=True) dut.gdb.write('c', non_blocking=True) dut.gdb.write('end') @@ -30,14 +30,14 @@ def test_examples_sysview_tracing_heap_log(idf_path: str, dut: IdfDut) -> None: dut.gdb.write('mon esp sysview stop', non_blocking=True) dut.gdb.write('end') - dut.gdb.write('c', non_blocking=True) + dut.gdb.write('c') dut.expect('esp_apptrace: Initialized TRAX on CPU0') time.sleep(1) # make sure that the sysview file has been generated with pexpect.spawn(' '.join([os.path.join(idf_path, 'tools', 'esp_app_trace', 'sysviewtrace_proc.py'), '-p', '-b', dut.app.elf_file, - TEMP_FILE])) as sysviewtrace: + trace_log])) as sysviewtrace: sysviewtrace.expect(r'Found \d+ leaked bytes in \d+ blocks.', timeout=120) with open(dut.gdb._logfile) as fr: # pylint: disable=protected-access
from_parts generated methods
@@ -144,7 +144,7 @@ impl<'de> serde::Deserialize<'de> for Sbp { } impl Sbp { - /// Parse a message from a [Frame](crate::Frame). + /// Parse a message from given fields. /// /// # Example /// @@ -152,35 +152,33 @@ impl Sbp { /// use std::convert::TryInto; /// /// use sbp::messages::logging::MsgLog; - /// use sbp::{Frame, Sbp}; + /// use sbp::Sbp; /// /// fn main() -> Result<(), Box<dyn std::error::Error>> { /// // log level 1 and with "hello" as the message + /// let msg_type = 1025; + /// let sender_id = 1; /// let payload: &[u8] = &[1, 104, 101, 108, 108, 111]; - /// let frame = Frame { - /// msg_type: 1025, - /// sender_id: 1, - /// payload, - /// }; - /// let msg: MsgLog = Sbp::from_frame(frame)?.try_into()?; + /// + /// let msg: MsgLog = Sbp::from_parts(msg_type, sender_id, payload)?.try_into()?; /// assert_eq!(msg.sender_id, Some(1)); /// assert_eq!(msg.level, 1); /// assert_eq!(msg.text.as_bytes(), "hello".as_bytes()); /// Ok(()) /// } /// ``` - pub fn from_frame<B: Buf>(mut frame: crate::Frame<B>) -> Result<Sbp, PayloadParseError> { - match frame.msg_type { + pub fn from_parts<B: bytes::Buf>(msg_type: u16, sender_id: u16, mut payload: B) -> Result<Sbp, PayloadParseError> { + match msg_type { ((*- for m in msgs *)) (((m.msg_name)))::MESSAGE_TYPE => { - let mut msg = (((m.msg_name)))::parse(&mut frame.payload)?; - msg.set_sender_id(frame.sender_id); + let mut msg = (((m.msg_name)))::parse(&mut payload)?; + msg.set_sender_id(sender_id); Ok(Sbp::(((m.msg_name)))(msg)) }, ((*- endfor *)) _ => { - let mut msg = Unknown::parse(&mut frame.payload)?; - msg.set_sender_id(frame.sender_id); + let mut msg = Unknown::parse(&mut payload)?; + msg.set_sender_id(sender_id); Ok(Sbp::Unknown(msg)) } }
http_static: add "http static cache clear" CLI Useful to force content reloads Type: feature
@@ -1656,6 +1656,81 @@ VLIB_CLI_COMMAND (http_show_static_server_command, static) = }; /* *INDENT-ON* */ +static clib_error_t * +http_clear_static_cache_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + http_static_server_main_t *hsm = &http_static_server_main; + file_data_cache_t *dp; + u32 free_index; + u32 busy_items = 0; + BVT (clib_bihash_kv) kv; + + if (hsm->www_root == 0) + return clib_error_return (0, "Static server disabled"); + + http_static_server_sessions_reader_lock (); + + /* Walk the LRU list to find active entries */ + free_index = hsm->last_index; + while (free_index != ~0) + { + dp = pool_elt_at_index (hsm->cache_pool, free_index); + free_index = dp->prev_index; + /* Which could be in use... */ + if (dp->inuse) + { + busy_items++; + free_index = dp->next_index; + continue; + } + kv.key = (u64) (dp->filename); + kv.value = ~0ULL; + if (BV (clib_bihash_add_del) (&hsm->name_to_data, &kv, + 0 /* is_add */ ) < 0) + { + clib_warning ("BUG: cache clear delete '%s' FAILED!", dp->filename); + } + + lru_remove (hsm, dp); + hsm->cache_size -= vec_len (dp->data); + hsm->cache_evictions++; + vec_free (dp->filename); + vec_free (dp->data); + if (hsm->debug_level > 1) + clib_warning ("pool put index %d", dp - hsm->cache_pool); + pool_put (hsm->cache_pool, dp); + free_index = hsm->last_index; + } + http_static_server_sessions_reader_unlock (); + if (busy_items > 0) + vlib_cli_output (vm, "Note: %d busy items still in cache...", busy_items); + else + vlib_cli_output (vm, "Cache cleared..."); + return 0; +} + +/*? + * Clear the static http server cache, to force the server to + * reload content from backing files + * + * @cliexpar + * This command clear the static http server cache + * @clistart + * clear http static cache + * @cliend + * @cliexcmd{clear http static cache} +?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (clear_http_static_cache_command, static) = +{ + .path = "clear http static cache", + .short_help = "clear http static cache", + .function = http_clear_static_cache_command_fn, +}; +/* *INDENT-ON* */ + static clib_error_t * http_static_server_main_init (vlib_main_t * vm) {
use colemak keyboard by default
#include "kb.h" #include "kb_us.h" +#include "kb_colemak.h" #include <std/common.h> #include <std/std.h> #include <kernel/interrupts/interrupts.h> @@ -16,7 +17,7 @@ void kb_install() { printf_info("Initializing keyboard driver..."); interrupt_setup_callback(INT_VECTOR_IRQ1, &kb_callback); - switch_layout(&kb_us); + switch_layout(&kb_colemak); } char kgetch() {
zephyr/projects/herobrine/src/usbc_config.c: Format with clang-format BRANCH=none TEST=none
#define CPRINTS(format, args...) cprints(CC_USBCHARGE, format, ##args) #define CPRINTF(format, args...) cprintf(CC_USBCHARGE, format, ##args) - /* GPIO Interrupt Handlers */ void tcpc_alert_event(enum gpio_signal signal) { @@ -54,9 +53,9 @@ void tcpc_alert_event(enum gpio_signal signal) static void usba_oc_deferred(void) { /* Use next number after all USB-C ports to indicate the USB-A port */ - board_overcurrent_event(CONFIG_USB_PD_PORT_MAX_COUNT, - !gpio_pin_get_dt( - GPIO_DT_FROM_NODELABEL(gpio_usb_a0_oc_odl))); + board_overcurrent_event( + CONFIG_USB_PD_PORT_MAX_COUNT, + !gpio_pin_get_dt(GPIO_DT_FROM_NODELABEL(gpio_usb_a0_oc_odl))); } DECLARE_DEFERRED(usba_oc_deferred); @@ -194,8 +193,7 @@ void board_overcurrent_event(int port, int is_overcurrented) int board_set_active_charge_port(int port) { - int is_real_port = (port >= 0 && - port < CONFIG_USB_PD_PORT_MAX_COUNT); + int is_real_port = (port >= 0 && port < CONFIG_USB_PD_PORT_MAX_COUNT); int i; if (!is_real_port && port != CHARGE_PORT_NONE) @@ -223,7 +221,6 @@ int board_set_active_charge_port(int port) return EC_ERROR_INVAL; } - CPRINTS("New charge port: p%d", port); /* @@ -247,23 +244,21 @@ int board_set_active_charge_port(int port) return EC_SUCCESS; } -void board_set_charge_limit(int port, int supplier, int charge_ma, - int max_ma, int charge_mv) +void board_set_charge_limit(int port, int supplier, int charge_ma, int max_ma, + int charge_mv) { /* * Ignore lower charge ceiling on PD transition if our battery is * critical, as we may brownout. */ - if (supplier == CHARGE_SUPPLIER_PD && - charge_ma < 1500 && + if (supplier == CHARGE_SUPPLIER_PD && charge_ma < 1500 && charge_get_percent() < CONFIG_CHARGER_MIN_BAT_PCT_FOR_POWER_ON) { CPRINTS("Using max ilim %d", max_ma); charge_ma = max_ma; } - charge_set_input_current_limit(MAX(charge_ma, - CONFIG_CHARGER_INPUT_CURRENT), - charge_mv); + charge_set_input_current_limit( + MAX(charge_ma, CONFIG_CHARGER_INPUT_CURRENT), charge_mv); } uint16_t tcpc_get_alert_status(void)
win32: another trying with appveyor
@@ -3,7 +3,7 @@ image: Visual Studio 2015 platform: x86 clone_folder: C:\TAU\rhodes environment: - QTDIR: C:\Qt\5.8\msvc2013 + QTDIR: C:\Qt\5.8\ matrix: - win32_rhosimulator: testable_application_repository: https://github.com/rhomobile/RMS-Testing.git
added handling for WNOHANG
@@ -46,7 +46,11 @@ Pwait(lua_State *L) if (pid == -1) return pusherror(L, NULL); lua_pushinteger(L, pid); - if (WIFEXITED(status)) + if(pid == 0){ + lua_pushliteral(L,"running"); + return 2 + } + else if (WIFEXITED(status)) { lua_pushliteral(L,"exited"); lua_pushinteger(L, WEXITSTATUS(status));
Adds zlib1g-dev Debian package build dependency When building from source in Debian, zlib1g-dev is needed or else '/usr/bin/ld: cannot find -lz' error will occur.
@@ -210,7 +210,7 @@ apt-get -t jessie-backports install linux-base linux-image-4.8.0-0.bpo.2-amd64 apt-get install debhelper cmake libllvm3.8 llvm-3.8-dev libclang-3.8-dev \ libelf-dev bison flex libedit-dev clang-format-3.8 python python-netaddr \ python-pyroute2 luajit libluajit-5.1-dev arping iperf netperf ethtool \ - devscripts + devscripts zlib1g-dev ``` #### Sudo
hv: coding style: refine find_vcpuid_entry 1) add local_find_vcpuid_entry to find whether a cpuid leaf exist in vcpuid cache entries. 2) find_vcpuid_entry will return the found entry if local_find_vcpuid_entry return is not null. Otherwise, call local_find_vcpuid_entry again when necessary. In this case, there could eliminate recursion in find_vcpuid_entry.
#include <hypervisor.h> -static inline struct vcpuid_entry *find_vcpuid_entry(const struct acrn_vcpu *vcpu, - uint32_t leaf_arg, uint32_t subleaf) +static inline struct vcpuid_entry *local_find_vcpuid_entry(const struct acrn_vcpu *vcpu, + uint32_t leaf, uint32_t subleaf) { uint32_t i = 0U, nr, half; struct vcpuid_entry *entry = NULL; struct acrn_vm *vm = vcpu->vm; - uint32_t leaf = leaf_arg; nr = vm->vcpuid_entry_nr; half = nr >> 1U; @@ -38,8 +37,19 @@ static inline struct vcpuid_entry *find_vcpuid_entry(const struct acrn_vcpu *vcp } } + return entry; +} + +static inline struct vcpuid_entry *find_vcpuid_entry(const struct acrn_vcpu *vcpu, + uint32_t leaf_arg, uint32_t subleaf) +{ + struct vcpuid_entry *entry; + uint32_t leaf = leaf_arg; + + entry = local_find_vcpuid_entry(vcpu, leaf, subleaf); if (entry == NULL) { uint32_t limit; + struct acrn_vm *vm = vcpu->vm; if ((leaf & 0x80000000U) != 0U) { limit = vm->vcpuid_xlevel; @@ -55,7 +65,7 @@ static inline struct vcpuid_entry *find_vcpuid_entry(const struct acrn_vcpu *vcp * CPUID) */ leaf = vm->vcpuid_level; - return find_vcpuid_entry(vcpu, leaf, subleaf); + entry = local_find_vcpuid_entry(vcpu, leaf, subleaf); } }
nimble/ll: Fix ble_ll_sync_rx_isr_end return value ble_ll_sync_current_sm_over() already disables PHY so there is no need to do that again when isr_end callback returns. This saves few us on slow MCUs like CM0.
@@ -748,9 +748,10 @@ ble_ll_sync_rx_isr_end(uint8_t *rxbuf, struct ble_mbuf_hdr *rxhdr) ble_ll_event_send(&g_ble_ll_sync_sm_current->sync_ev_end); } + /* PHY is disabled here */ ble_ll_sync_current_sm_over(); - return -1; + return 1; } /**
doc: todo small changes
@@ -54,16 +54,31 @@ The following section lists news about the [modules](https://www.libelektra.org/ ### <<Plugin1>> +- <<TODO>> +- <<TODO>> +- <<TODO>> + + ### <<Plugin2>> +- <<TODO>> +- <<TODO>> +- <<TODO>> + + ### <<Plugin3>> +- <<TODO>> +- <<TODO>> +- <<TODO>> + ## Libraries -The text below summarizes updates to the [C (and C++) based interface](https://www.libelektra.org/libraries/readme) of Elektra. +The text below summarizes updates to the [C (and C++)-based libraries](https://www.libelektra.org/libraries/readme) of Elektra. + ### Compatibility @@ -75,14 +90,32 @@ compiled against an older 0.8 version of Elektra will continue to work - <<TODO>> - <<TODO>> +### Core + +- <<TODO>> +- <<TODO>> +- <<TODO>> + ### <<Library1>> +- <<TODO>> +- <<TODO>> +- <<TODO>> + ### <<Library2>> +- <<TODO>> +- <<TODO>> +- <<TODO>> + ### <<Library3>> +- <<TODO>> +- <<TODO>> +- <<TODO>> + ## Bindings
remove duplicate and outdated gnu example
\subsection{3rd Party Libraries and Tools} \label{sec:3rdparty} \input{common/third_party_libs_intro} -%---------------- -% SLES specific -%---------------- - -\begin{lstlisting}[language=bash,keepspaces=true,keywords={}] -[sms](*\#*) zypper search -t package petsc-gnu-*-ohpc -Loading repository data... -Reading installed packages... - -S | Name | Summary ---+-------------------------+--------------------------------------------------------+-------- -i | petsc-gnu-mvapich2-ohpc | Portable Extensible Toolkit for Scientific Computation | package -i | petsc-gnu-openmpi-ohpc | Portable Extensible Toolkit for Scientific Computation | package -\end{lstlisting} - \input{common/third_party_libs} \input{common/third_party_mpi_libs_x86}
listbox_get_selected_file1: iterate / don't use a callback function.. gtk_tree_selection_selected_foreach() is more suitable for a GtkTreeStore see src/gtk/bookmarks.c
@@ -555,36 +555,30 @@ listbox_select_all_files (gftp_window_data *wdata) { // ============================================================== -/* - listbox_get_selected_file1() - retrieve existing gftp fileinfo / does not create a glist.. - https://developer.gnome.org/gtk2/stable/GtkTreeSelection.html#gtk-tree-selection-get-selected-rows -*/ - -static void -selected_1_foreach_func (GtkTreeModel *model, - GtkTreePath *path, - GtkTreeIter *iter, - gpointer userdata) -{ - gpointer *gftp_file = (gpointer *) userdata; - gtk_tree_model_get (model, iter, LISTBOX_COL_GFTPFILE, gftp_file, -1); -} - gftp_file * listbox_get_selected_file1 (gftp_window_data *wdata) { // retrieve selected file name from listbox - gpointer file = NULL; GtkTreeView * tree = GTK_TREE_VIEW (wdata->listbox); GtkTreeSelection * tsel = gtk_tree_view_get_selection (tree); + GtkTreeModel * model = gtk_tree_view_get_model (tree); + GtkTreeIter iter; + gboolean valid; + gftp_file * gftpFile = NULL; - gtk_tree_selection_selected_foreach(tsel, selected_1_foreach_func, &file); - if (!file) { - fprintf(stderr, "listbox.c: ERROR, could not retrieve filename...\n"); + valid = gtk_tree_model_get_iter_first (model, &iter); + while (valid) + { + if (gtk_tree_selection_iter_is_selected (tsel, &iter)) { + gtk_tree_model_get (model, &iter, LISTBOX_COL_GFTPFILE, &gftpFile, -1); + break; /* only 1 */ + } + valid = gtk_tree_model_iter_next (model, &iter); } - return ((gftp_file *) file); + if (!gftpFile) fprintf(stderr, "listbox.c: ERROR, could not retrieve filename...\n"); //debug + + return (gftpFile); } /* listbox_get_selected_files() */
crypto: initialize result variable
@@ -193,7 +193,7 @@ int flb_crypto_transform(struct flb_crypto *context, unsigned char *output_buffer, size_t *output_length) { - int result; + int result = FLB_CRYPTO_BACKEND_ERROR; if (context == NULL) { return FLB_CRYPTO_INVALID_ARGUMENT;
removed wps debug info
@@ -414,8 +414,6 @@ wpsd = (wps_t*)(eapext->data); if((memcmp(wpsd->vid, WPS_VENDOR, sizeof(wpsd->vid)) != 0) || (be32toh(wpsd->type) != WPS_SIMPLECONF)) return 0; -printf("%ld\n", EAPEXT_SIZE); - int vtagl = be16toh(eapext->eaplen); vtag = (vtag_t*)(wpsd->tags); while( 0 < vtagl)
put the janet core api "map" fn under the name `iter/map` avoids name collision with the tic80 map function. Could have just as easily done the same to that one, but chances are we'll be using the tic80 map more often.
@@ -1033,7 +1033,15 @@ static bool initJanet(tic_mem* tic, const char* code) CurrentMachine = core; core->currentVM = (JanetTable*)janet_core_env(NULL); + + // Both the janet core lib and tic api define a `map` function + // So we give the janet core lib one the new name `iter/map`. + janet_dostring(core->currentVM, "(var iter/map map)", "setup", NULL); + + // add the tic80 api bindings janet_cfuns(core->currentVM, "tic", janet_c_functions); + + // override the dynamic err to a buffer, so that we can get errors later janet_setdyn("err", janet_wrap_buffer(janet_buffer(1024))); Janet result = janet_wrap_nil();
host/mesh: Remove krp param check param can never be NULL, so this check is redundant. Coverity complains about this, as the param variable is accessed before the check, which would be wrong if param could be NULL. This is port of
@@ -134,8 +134,7 @@ static int krp_status(struct bt_mesh_model *model, struct bt_mesh_msg_ctx *ctx, ctx->net_idx, ctx->app_idx, ctx->addr, buf->om_len, bt_hex(buf->om_data, buf->om_len)); - if (!bt_mesh_msg_ack_ctx_match(&cli->ack_ctx, OP_KRP_STATUS, ctx->addr, - (void **)&param)) { + if (!bt_mesh_msg_ack_ctx_match(&cli->ack_ctx, OP_KRP_STATUS, ctx->addr, (void **)&param)) { return -ENOENT; } @@ -147,11 +146,11 @@ static int krp_status(struct bt_mesh_model *model, struct bt_mesh_msg_ctx *ctx, return -ENOENT; } - if (param && param->status) { + if (param->status) { *param->status = status; } - if (param && param->phase) { + if (param->phase) { *param->phase = phase; }
Documentation: Rephrase sentences
@@ -170,9 +170,9 @@ static inline KeySet *elektraYajlContract() } ``` -It basically only contains the symbols to be exported (that are -dependent on your functions to be available) and the plugin version -information that is always defined to the macro `PLUGINVERSION`. +It basically only contains the symbols to be exported (these symbols +depend on the functions the plugin provides) and the plugin version +information that is always defined by the macro `PLUGINVERSION`. As already said, `readme_yourplugin.c` is generated in the binary directory, so make sure that your `CMakeLists.txt` contains (prefer to use `add_plugin` @@ -241,8 +241,7 @@ add_plugin (xmltool Important is that you pass the information which packages are found as boolean. The plugin will actually be added iff all of the `DEPENDENCIES` are true. -Note that no code should be outside of `if (DEPENDENCY_PHASE)` -thus it would be executed twice otherwise. The only exception is +Note that no code should be outside of `if (DEPENDENCY_PHASE)`. It would be executed twice otherwise. The only exception is `add_plugin` which *must* be called twice to successfully add a plugin. If your plugin makes use of [compilation variants](/doc/tutorials/compilation-variants.md)
in_opentelemetry: set default OTLP/HTTP port to 4318
#include "opentelemetry.h" #include "http_conn.h" +/* default HTTP port for OTLP/HTTP is 4318 */ +#define OTLP_HTTP_PORT 4318 + struct flb_opentelemetry *opentelemetry_config_create(struct flb_input_instance *ins) { int ret; @@ -43,8 +46,8 @@ struct flb_opentelemetry *opentelemetry_config_create(struct flb_input_instance return NULL; } - /* Listen interface (if not set, defaults to 0.0.0.0:9880) */ - flb_input_net_default_listener("0.0.0.0", 9880, ins); + /* Listen interface (if not set, defaults to 0.0.0.0:4318) */ + flb_input_net_default_listener("0.0.0.0", OTLP_HTTP_PORT, ins); ctx->listen = flb_strdup(ins->host.listen); snprintf(port, sizeof(port) - 1, "%d", ins->host.port);
Replace with lwesp
* \ref lwesp_mem_calloc, \ref lwesp_mem_realloc and \ref lwesp_mem_free * * \note Function declaration follows standard C functions `malloc, calloc, realloc, free`. - * Declaration is available in `esp/lwesp_mem.h` file. Include this file to final + * Declaration is available in `lwesp/lwesp_mem.h` file. Include this file to final * implementation file * * \note When implementing custom memory allocation, it is necessary
Fix `run_pending_requests` state to expect a state strictly below SEND_HEADERS for the stream (for streaming bodies)
@@ -190,7 +190,7 @@ static void run_pending_requests(h2o_http2_conn_t *conn) conn->num_streams._request_body_in_progress++; stream->_conn_stream_in_progress = 1; } else { - if (stream->state <= H2O_HTTP2_STREAM_STATE_SEND_HEADERS) { + if (stream->state < H2O_HTTP2_STREAM_STATE_SEND_HEADERS) { h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_REQ_PENDING); h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_SEND_HEADERS); }
Create friendlier default sensor names without ZHA prefix
@@ -2651,14 +2651,12 @@ void DeRestPluginPrivate::addSensorNode(const deCONZ::Node *node, const SensorFi if (sensorNode.name().isEmpty()) { - if (type.endsWith(QLatin1String("Switch"))) + QString name = type; + if (name.startsWith("ZHA")) { - sensorNode.setName(QString("Switch %1").arg(sensorNode.id())); - } - else - { - sensorNode.setName(QString("%1 %2").arg(type).arg(sensorNode.id())); + name.remove(0, 3); } + sensorNode.setName(QString("%1 %2").arg(name).arg(sensorNode.id())); } // force reading attributes
Fix range checking
@@ -232,7 +232,7 @@ int main(int argc, char *argv[]) int rc = 1; int i, iter = 1; int timeout = ACTION_WAIT_TIME; - uint32_t size = (4 * KILO_BYTE); + long size = (4 * KILO_BYTE); uint64_t begin = 0; uint8_t pattern = 0x0; int func = ACTION_CONFIG_MEMSET_H; @@ -292,6 +292,15 @@ int main(int argc, char *argv[]) break; case 's': /* size */ size = strtol(optarg, (char **)NULL, 0); + if ((errno == ERANGE && (size == LONG_MAX || size == LONG_MIN)) + || (errno != 0 && size == 0)) { + PRINTF0("Error errno %d\n", errno); + exit(1); + } + if (size > 0x100000000) { + PRINTF0("Size (-s or --size Out of range\n"); + exit(1); + } break; case 'b': /* begin */ begin = strtoll(optarg, (char **)NULL, 0); @@ -313,11 +322,6 @@ int main(int argc, char *argv[]) exit(1); } - if (0 == size) { - PRINTF0("Please Provide a Valid Size. (--size, -s)\n"); - exit(1); - } - PRINTF2("Start Memset Test. Timeout: %d sec Device: ", timeout); sprintf(device, "/dev/cxl/afu%d.0s", card_no); @@ -331,7 +335,7 @@ int main(int argc, char *argv[]) if (ACTION_CONFIG_MEMSET_H == func) { if (size > (32 * MEGA_BYTE)) { - PRINTF0("Size %d must be less than %d\n", size, (32*MEGA_BYTE)); + PRINTF0("Size %ld must be less than %d\n", size, (32*MEGA_BYTE)); goto __exit1; } h_begin = (int)(begin & 0xF) + 16; @@ -345,7 +349,7 @@ int main(int argc, char *argv[]) PRINTF1("Allocate Host Buffer at %p Size: %d Bytes\n", hb, h_mem_size); } - PRINTF1("Fill %d Bytes from %lld to %lld with Pattern 0x%02x\n", + PRINTF1("Fill %ld Bytes from %lld to %lld with Pattern 0x%02x\n", size, (long long)begin, (long long)begin+size-1, pattern); for (i = 0; i < iter; i++) {
cr50: add rollback command In DEV, it's necessary to rollback to reenter prod signed images. Let's make this reasonably easy. BRANCH=cr50 TEST=CR50_DEV fw does roll back to prod..
@@ -1504,3 +1504,17 @@ int chip_factory_mode(void) return mode_set & 1; } + +#ifdef CR50_DEV +static int command_rollback(int argc, char **argv) +{ + system_ensure_rollback(); + ccprintf("Rebooting to alternate RW due to manual request\n"); + cflush(); + system_reset(0); + + return EC_SUCCESS; +} +DECLARE_CONSOLE_COMMAND(rollback, command_rollback, + "", "Force rollback to escape DEV image."); +#endif
host/gap: fix doxygen for ble_gap_ext_disc() Documentation for the parameters `filter_duplicates` and `filter_policy` for ble_gap_ext_disc() was missing.
@@ -1592,6 +1592,16 @@ int ble_gap_disc(uint8_t own_addr_type, int32_t duration_ms, * its last Scan Duration until it begins the * subsequent Scan Duration. Specify 0 to scan * continuously. Units are 1.28 second. + * @param filter_duplicates Set to enable packet filtering in the + * controller + * @param filter_policy Set the used filter policy. Valid values are: + * - BLE_HCI_SCAN_FILT_NO_WL + * - BLE_HCI_SCAN_FILT_USE_WL + * - BLE_HCI_SCAN_FILT_NO_WL_INITA + * - BLE_HCI_SCAN_FILT_USE_WL_INITA + * - BLE_HCI_SCAN_FILT_MAX + * This parameter is ignored unless + * @p filter_duplicates is set. * @param limited If limited discovery procedure should be used. * @param uncoded_params Additional arguments specifying the particulars * of the discovery procedure for uncoded PHY.
Fix function override for inkGameController
#include "Scripting.h" #include <reverse/StrongReference.h> #include <reverse/RTTIHelper.h> +#include <reverse/RTTILocator.h> static FunctionOverride* s_pOverride = nullptr; +static RTTILocator s_inkGameControllerType("gameuiWidgetGameController"); using TRunPureScriptFunction = bool (*)(RED4ext::CClassFunction* apFunction, RED4ext::CScriptStack*, void*); static TRunPureScriptFunction RealRunPureScriptFunction = nullptr; @@ -472,7 +474,7 @@ void FunctionOverride::Override(const std::string& acTypeName, const std::string pEntry->Trampoline = pFunc; pEntry->pScripting = m_pScripting; - pEntry->CollectGarbage = aCollectGarbage; + pEntry->CollectGarbage = aCollectGarbage || pClassType->IsA(s_inkGameControllerType); // Swap the content of the real function with the one we just created std::array<char, sizeof(RED4ext::CClassFunction)> tmpBuffer;
Activate channels for SPI, I2C
/* * I2C driver system settings. */ -#define STM32_I2C_USE_I2C1 FALSE +#define STM32_I2C_USE_I2C1 TRUE #define STM32_I2C_USE_I2C2 FALSE #define STM32_I2C_USE_I2C3 FALSE #define STM32_I2C_BUSY_TIMEOUT 50 /* * SPI driver system settings. */ -#define STM32_SPI_USE_SPI1 FALSE +#define STM32_SPI_USE_SPI1 TRUE #define STM32_SPI_USE_SPI2 FALSE #define STM32_SPI_USE_SPI3 FALSE #define STM32_SPI_SPI1_RX_DMA_STREAM STM32_DMA_STREAM_ID(2, 0)
[kernel] fix wrong computation in FOLTIDS thanks for the bug report
@@ -439,6 +439,7 @@ void EulerMoreauOSI::computeW(double time, DynamicalSystem& ds, } // Remark: W is not LU-factorized here. // Function PLUForwardBackward will do that if required. + DEBUG_EXPR(W.display()); DEBUG_END("EulerMoreauOSI::computeW(...)\n"); } @@ -543,6 +544,7 @@ double EulerMoreauOSI::computeResidu() folds.computeA(told); prod(*folds.A(), xold, residu); } + if(folds.b()) { folds.computeb(told); @@ -615,7 +617,7 @@ double EulerMoreauOSI::computeResidu() //Don't use W because it is LU factorized //Residu : R_{free} = M(x^{\alpha}_{k+1} - x_{k}) -h( A (\theta x^{\alpha}_{k+1} + (1-\theta) x_k) +b_{k+1}) if(foltids.b()) - residuFree = *(foltids.b()); + scal(-h, *(foltids.b()), residuFree, true); else residuFree.zero();
docs - note some aggregate and window function limitations
href="../topics/functions-operators.xml#topic29/in204913"/>.</li> <li id="in200915">Greenplum Database does not support DISTINCT with multiple input expressions.</li> + <li id="in2009151">Greenplum Database does not support specifying an + aggregate function as an argument to another aggregate function.</li> + <li id="in2009152">Greenplum Database does not support specifying a + window function as an argument to an aggregate function.</li> </ul> </body> </topic> value for every row, but that value is calculated with respect to the rows in a particular window partition. If no partition is specified, the window function is computed over the complete intermediate result set.</p> + <p>Greenplum Database does not support specifying a window function as an argument + to another window function.</p> <p>The syntax of a window expression is:</p> <p> <codeblock><i>window_function</i> ( [<i>expression</i> [, ...]] ) OVER ( <i>window_specification</i> )</codeblock>
EIP712 - Extra fixes from security review
@@ -452,8 +452,13 @@ bool set_struct_name(uint8_t length, const uint8_t *const name) { apdu_response_code = APDU_RESPONSE_CONDITION_NOT_SATISFIED; return false; } + // increment number of structs - *(typed_data->structs_array) += 1; + if ((*(typed_data->structs_array) += 1) == 0) { + PRINTF("EIP712 Structs count overflow!\n"); + apdu_response_code = APDU_RESPONSE_CONDITION_NOT_SATISFIED; + return false; + } // copy length if ((length_ptr = mem_alloc(sizeof(uint8_t))) == NULL) { @@ -555,7 +560,7 @@ static bool set_struct_field_custom_typename(const uint8_t *const data, */ static bool set_struct_field_array(const uint8_t *const data, uint8_t *data_idx, uint8_t length) { uint8_t *array_levels_count; - e_array_type *array_level; + uint8_t *array_level; uint8_t *array_level_size; if ((*data_idx + sizeof(*array_levels_count)) > length) // check buffer bound @@ -574,12 +579,12 @@ static bool set_struct_field_array(const uint8_t *const data, uint8_t *data_idx, apdu_response_code = APDU_RESPONSE_INVALID_DATA; return false; } - if ((array_level = mem_alloc(sizeof(uint8_t))) == NULL) { + if ((array_level = mem_alloc(sizeof(*array_level))) == NULL) { apdu_response_code = APDU_RESPONSE_INSUFFICIENT_MEMORY; return false; } *array_level = data[(*data_idx)++]; - if (*array_level > ARRAY_TYPES_COUNT) { + if (*array_level >= ARRAY_TYPES_COUNT) { apdu_response_code = APDU_RESPONSE_INVALID_DATA; return false; } @@ -694,8 +699,13 @@ bool set_struct_field(uint8_t length, const uint8_t *const data) { apdu_response_code = APDU_RESPONSE_CONDITION_NOT_SATISFIED; return false; } + // increment number of struct fields - *(typed_data->current_struct_fields_array) += 1; + if ((*(typed_data->current_struct_fields_array) += 1) == 0) { + PRINTF("EIP712 Struct fields count overflow!\n"); + apdu_response_code = APDU_RESPONSE_CONDITION_NOT_SATISFIED; + return false; + } if ((typedesc_ptr = set_struct_field_typedesc(data, &data_idx, length)) == NULL) { return false;
hfuzz: repeat libhfuzz.a
@@ -264,7 +264,6 @@ static int ldMode(int argc, char **argv) commonOpts(&j, args); - /* Repeat it, just in case anything late needs symbols from libhfuzz.a */ args[j++] = LHFUZZ_A_PATH; /* libcommon.a will use it when compiled with clang */ @@ -278,6 +277,9 @@ static int ldMode(int argc, char **argv) args[j++] = argv[i]; } + /* Repeat it, just in case anything late needs symbols from libhfuzz.a */ + args[j++] = LHFUZZ_A_PATH; + return execCC(j, args); }
nvs_flash: Fix typo in README.rst Merges This should read "except for the new key-value pair" instead of "expect for the new key-value pair".
@@ -51,7 +51,7 @@ NVS is not directly compatible with the ESP32 flash encryption system. However, If NVS encryption is not used, it is possible for anyone with physical access to the flash chip to alter, erase, or add key-value pairs. With NVS encryption enabled, it is not possible to alter or add a key-value pair and get recognized as a valid pair without knowing corresponding NVS encryption keys. However, there is no tamper-resistance against erase operation. -The library does try to recover from conditions when flash memory is in an inconsistent state. In particular, one should be able to power off the device at any point and time and then power it back on. This should not result in loss of data, expect for the new key-value pair if it was being written at the moment of power off. The library should also be able to initialize properly with any random data present in flash memory. +The library does try to recover from conditions when flash memory is in an inconsistent state. In particular, one should be able to power off the device at any point and time and then power it back on. This should not result in loss of data, except for the new key-value pair if it was being written at the moment of power off. The library should also be able to initialize properly with any random data present in flash memory. Internals ---------
bootloader: Fix issue - bs->app_count is zero but ota_data have valid entry If we have the partition table without any ota_apps but in ota_data have valide entry, in this case we get an error(hang). This commit fix this case. If bs->app_count is zero when selecting the factory app. Closes
@@ -211,8 +211,8 @@ int bootloader_utility_get_selected_boot_partition(const bootloader_state_t *bs) bootloader_munmap(ota_select_map); ESP_LOGD(TAG, "OTA sequence values A 0x%08x B 0x%08x", sa.ota_seq, sb.ota_seq); - if(sa.ota_seq == UINT32_MAX && sb.ota_seq == UINT32_MAX) { - ESP_LOGD(TAG, "OTA sequence numbers both empty (all-0xFF)"); + if ((sa.ota_seq == UINT32_MAX && sb.ota_seq == UINT32_MAX) || (bs->app_count == 0)) { + ESP_LOGD(TAG, "OTA sequence numbers both empty (all-0xFF) or partition table does not have bootable ota_apps (app_count=%d)", bs->app_count); if (bs->factory.offset != 0) { ESP_LOGI(TAG, "Defaulting to factory image"); return FACTORY_INDEX;
Always use nix-style cabal commands
@@ -51,7 +51,7 @@ before_install: install: - echo "$(ghc --version) [$(ghc --print-project-git-commit-id 2> /dev/null || echo '?')]" - echo "$(cabal --version)" - - cabal install --only-dependencies --enable-tests --disable-optimization + - cabal new-build --only-dependencies --enable-tests --disable-optimization script: - CABAL_CONFIG_ARGS="" @@ -80,9 +80,9 @@ script: esac - export LD_LIBRARY_PATH=${HOME}/usr/lib:$LD_LIBRARY_PATH - echo $CABAL_CONFIG_ARGS - - cabal configure --enable-tests --disable-optimization --ghc-options="-Werror" $CABAL_CONFIG_ARGS - - cabal build - - cabal test + - cabal new-configure --enable-tests --disable-optimization --ghc-options="-Werror" $CABAL_CONFIG_ARGS + - cabal new-build + - cabal new-test - cabal copy - cabal sdist # Run HLint
metadata-store: prevent infinite loop
?. ?=([%group [~ [~ [@ [@ @]]]]] config.met) ~ =* res resource.u.u.feed.config.met + ?: =(our.bowl entity.res) ~ =- `[%pass /fix-feed %agent [our.bowl %graph-pull-hook] %poke -] :- %pull-hook-action !> ^- action:pull-hook
set sp_rate to 80 in sdr_transceiver_emb_122_88/app/sdr-transceiver-emb.c
@@ -230,7 +230,7 @@ int main() RXASetPassband(0, cutoff[mode][filter][0], cutoff[mode][filter][1]); SetTXABandpassFreqs(1, cutoff[mode][filter][0], cutoff[mode][filter][1]); - *sp_rate = 125; + *sp_rate = 80; *sp_total = 32767; *sp_scale = 1024; *sp_corr = 8.68589;
Add flags to voice-info to know if the plugin will support overlapping notes
@@ -15,6 +15,12 @@ static const char CLAP_EXT_VOICE_INFO[] = "clap.voice-info.draft/0"; extern "C" { #endif +enum { + // Allows the host to send overlapping NOTE_ON events. + // The plugin will then rely upon the note_id to distinguish between them. + CLAP_VOICE_INFO_SUPPORTS_OVERLAPPING_NOTES = 1 << 0, +}; + typedef struct clap_voice_info { // voice_count is the current number of voices that the patch can use // voice_capacity is the number of voices allocated voices @@ -29,6 +35,8 @@ typedef struct clap_voice_info { // can decide to only use global modulation mapping. uint32_t voice_count; uint32_t voice_capacity; + + uint64_t flags; } clap_voice_info_t; typedef struct clap_plugin_voice_info {
avx512f : added copyright info in avx512f.h avx512f : added copyright info in avx512f.h
* Copyright: * 2020 Evan Nemerson <[email protected]> 2020 Himanshi Mathur <[email protected]> + 2020 Hidayat Khan <[email protected]> */ #if !defined(SIMDE__AVX512F_H)
fix axis_oscilloscope
@@ -91,6 +91,7 @@ module axis_oscilloscope # if(s_axis_tvalid) begin int_cntr_next = int_cntr_reg + 1'b1; + end if(trg_flag) begin int_addr_next = int_cntr_reg; @@ -98,7 +99,6 @@ module axis_oscilloscope # int_case_next = 2'd3; end end - end // post-trigger recording 3:
If we don't set content_length, it won't add the header.
@@ -524,12 +524,6 @@ _http_start_respond(u3_hreq* req_u, req_u->gen_u = gen_u; - if ( c3y == complete ) { - // XX: Joe says I may need to copy it from the header. - // - rec_u->res.content_length = 0; - } - h2o_start_response(rec_u, &gen_u->neg_u); _http_hgen_send(gen_u);
apps/system/utils: fix wrong conditional for netcmd and fscmd SYSTEM_CMDS, FS_CMDS and NET_CMDS are separated commands sets. They should be in SYSTEM_CMDS conditional.
@@ -73,6 +73,7 @@ endif ifeq ($(CONFIG_SYSTEM_CMDS),y) CSRCS += systemcmd.c +endif ifeq ($(CONFIG_FS_CMDS),y) CSRCS += fscmd.c @@ -98,8 +99,6 @@ ifeq ($(CONFIG_NETUTILS_TFTPC),y) CSRCS += netcmd_tftpc.c endif -endif #CONFIG_TASH - ifeq ($(CONFIG_ENABLE_CPULOAD),y) CSRCS += utils_cpuload.c endif
Ignore new changes to sources
@@ -67,7 +67,9 @@ parallel-libs/mumps/SOURCES/MUMPS*.tar.gz parallel-libs/opencoarrays/SOURCES/OpenCoarrays-*.tar.gz parallel-libs/petsc/SOURCES/petsc-*.tar.gz parallel-libs/ptscotch/SOURCES/scotch*.tar.gz +parallel-libs/ptscotch/SOURCES/scotch*.bz2 parallel-libs/scalapack/SOURCES/scalapack-*.tgz +parallel-libs/scalapack/SOURCES/v*.tar.gz parallel-libs/slepc/SOURCES/slepc-*.tar.gz parallel-libs/superlu_dist/SOURCES/superlu_dist-*.tar parallel-libs/trilinos/SOURCES/trilinos-release-*.tar.gz @@ -78,6 +80,7 @@ perf-tools/imb/SOURCES/IMB-v*.tar.gz perf-tools/likwid/SOURCES/v*.tar.gz perf-tools/msr-safe/SOURCES/msr-safe-*.tar.gz perf-tools/omb/SOURCES/osu-micro-benchmarks-*.tgz +perf-tools/omb/SOURCES/osu-micro-benchmarks-*.tar.gz perf-tools/papi/SOURCES/papi-*.tar.gz perf-tools/paraver/SOURCES/wxparaver-*-src.tar.bz2 perf-tools/pdtoolkit/SOURCES/pdtoolkit-*.tar.gz @@ -103,5 +106,6 @@ serial-libs/metis/SOURCES/*.tar.gz serial-libs/openblas/SOURCES/openblas-*.tar.gz serial-libs/plasma/SOURCES/v*.tar.gz serial-libs/plasma/SOURCES/plasma*.tar.gz -serial-libs/scotch/SOURCES/scotch_*.tar.gz +serial-libs/scotch/SOURCES/scotch*.tar.gz +serial-libs/scotch/SOURCES/scotch*.bz2 serial-libs/superlu/SOURCES/superlu_*.tar.gz
mangle: use oldsz instead of run->dynamicFileSz
@@ -575,7 +575,7 @@ static void mangle_Resize(run_t* run, bool printable) { newsz = oldsz + 8 - v; break; case 17 ... 32: - newsz = run->dynamicFileSz; + newsz = oldsz; break; default: LOG_F("Illegal value from util_rndGet: %" PRIu64, v);
sysdeps/managarm: treat CLOCK_MONOTONIC_COARSE as CLOCK_MONOTONIC
@@ -25,7 +25,7 @@ namespace mlibc { int sys_clock_get(int clock, time_t *secs, long *nanos) { // This implementation is inherently signal-safe. - if(clock == CLOCK_MONOTONIC) { + if(clock == CLOCK_MONOTONIC || clock == CLOCK_MONOTONIC_COARSE) { uint64_t tick; HEL_CHECK(helGetClock(&tick)); *secs = tick / 1000000000; @@ -65,11 +65,6 @@ int sys_clock_get(int clock, time_t *secs, long *nanos) { HEL_CHECK(helGetClock(&tick)); *secs = tick / 1000000000; *nanos = tick % 1000000000; - }else if(clock == CLOCK_MONOTONIC_COARSE) { - mlibc::infoLogger() << "\e[31mmlibc: clock_gettime does not support CLOCK_MONOTONIC_COARSE" - "\e[39m" << frg::endlog; - *secs = 0; - *nanos = 0; }else if(clock == CLOCK_BOOTTIME) { mlibc::infoLogger() << "\e[31mmlibc: clock_gettime does not support CLOCK_BOOTTIME" "\e[39m" << frg::endlog;
fix at_server_getchar spelling error
@@ -412,7 +412,7 @@ static rt_err_t at_cmd_get_name(const char *cmd_buffer, char *cmd_name) return -RT_ERROR; } -static rt_err_t at_server_gerchar(at_server_t server, char *ch, rt_int32_t timeout) +static rt_err_t at_server_getchar(at_server_t server, char *ch, rt_int32_t timeout) { rt_err_t result = RT_EOK; @@ -595,7 +595,7 @@ int at_server_init(void) goto __exit; } - at_server_local->get_char = at_server_gerchar; + at_server_local->get_char = at_server_getchar; memcpy(at_server_local->end_mark, AT_CMD_END_MARK, sizeof(AT_CMD_END_MARK)); at_server_local->parser_entry = server_parser;
trembyle: Increase CONFIG_UART_TX_BUF_SIZE Increase console output buffer to avoid risk of losing output (eg when tracing I2C) since we have the RAM available. BRANCH=none TEST=build
#undef CONFIG_PORT80_HISTORY_LEN #define CONFIG_PORT80_HISTORY_LEN 256 +/* Increase console output buffer since we have the RAM available. */ +#undef CONFIG_UART_TX_BUF_SIZE +#define CONFIG_UART_TX_BUF_SIZE 4096 + #define I2C_PORT_TCPC0 NPCX_I2C_PORT0_0 #define I2C_PORT_TCPC1 NPCX_I2C_PORT1_0 #define I2C_PORT_BATTERY NPCX_I2C_PORT2_0
seed random number generator with current pid
@@ -1350,6 +1350,9 @@ initializer (void) { parsing_spinner = new_gspinner (); parsing_spinner->processed = &(logs->processed); parsing_spinner->filename = &(logs->filename); + + /* init random number generator */ + srand (getpid ()); } static char *
make sure all mangohud files are being uninstalled
@@ -168,7 +168,9 @@ clean() { uninstall() { [ "$UID" -eq 0 ] || exec sudo bash "$0" uninstall rm -rfv "/usr/lib/mangohud" + rm -rfv "/usr/share/doc/mangohud" rm -fv "/usr/share/vulkan/implicit_layer.d/mangohud.json" + rm -fv "/usr/share/vulkan/implicit_layer.d/MangoHud.json" rm -fv "/etc/ld.so.conf.d/libmangohud.conf" rm -fv "/etc/ld.so.conf.d/lib32-libmangohud.conf" rm -fv "/usr/bin/mangohud"
specreader: ignore non-spec key When reading specification, ignore all non-spec keys. Fixes
@@ -162,9 +162,18 @@ SpecBackendBuilder SpecMountpointReader::readMountpointSpecification (KeySet con void SpecReader::readSpecification (KeySet const & cks) { - KeySet ks (cks); + KeySet ks; Key mp; + // only accept keys in 'spec' namespace + for (Key k : cks) + { + if (k.isSpec ()) + { + ks.append (k); + } + } + ks.rewind (); // we need old fashioned loop, because it can handle ks.cut during iteration for (Key k = ks.next (); k; k = ks.next ()) {
use old-style ruby syntax
@@ -910,14 +910,16 @@ namespace "config" do begin core_build_cfg = YAML.load_file(File.join( $builddir, 'config.yml')) - rescue - puts "Error while loading config file with maven dependencies " + File.join( $builddir, 'config.yml') + rescue Exception => e + $logger.warn "Can't read core build config: #{e.inspect}" end - core_build_cfg['maven_deps']&.each { |d| + if core_build_cfg['maven_deps'] + core_build_cfg['maven_deps'].each { |d| AndroidTools::MavenDepsExtractor.instance.add_dependency( d ) } end + end unless $debug $confdir = "release"
Bugfix in HAL for LEDs. Static variable leds did not get updated.
@@ -38,6 +38,9 @@ static unsigned char leds; static inline void show_leds(unsigned char new_leds) { + + leds = new_leds; + leds_arch_set(new_leds); } /*---------------------------------------------------------------------------*/
server session BUGFIX always reset last session on delete
@@ -841,13 +841,11 @@ remove: --ps->session_count; if (i < ps->session_count) { ps->sessions[i] = ps->sessions[ps->session_count]; - if (ps->last_event_session == i) { - ps->last_event_session = 0; - } } else if (!ps->session_count) { free(ps->sessions); ps->sessions = NULL; } + ps->last_event_session = 0; return 0; } }
Fix OpenVR driver;
@@ -678,7 +678,7 @@ static ModelData* openvr_newModelData(Device device, bool animated) { model->buffers[2 * i + 0] = (ModelBuffer) { .blob = 0, .size = vertexCount * vertexSize, - .stride = vertexSize + .stride = vertexSize, .data = (char*) vertices };
OcDataHubLib: Drop invalid TODO
@@ -271,9 +271,6 @@ UpdateDataHub ( DataHubSetAppleMiscData (DataHub, OC_SYSTEM_UUID, &SystemId, sizeof (SystemId)); } DataHubSetAppleMiscAscii (DataHub, OC_BOARD_PRODUCT, Data->BoardProduct); - // - // TODO: Clover uses DataHubSetAppleProcessorData equivalent here, check if legit. - // DataHubSetAppleMiscData (DataHub, OC_BOARD_REVISION, Data->BoardRevision, sizeof (*Data->BoardRevision)); DataHubSetAppleMiscData (DataHub, OC_STARTUP_POWER_EVENTS, Data->StartupPowerEvents, sizeof (*Data->StartupPowerEvents)); DataHubSetAppleProcessorData (DataHub, OC_INITIAL_TSC, Data->InitialTSC, sizeof (*Data->InitialTSC));
Use raw string + binary matching for URL regex. Long URLs are allowed only if they are alone on their lines.
@@ -201,6 +201,8 @@ class ChangeLog: # a version that is not yet released. Something like "3.1a" is accepted. _version_number_re = re.compile(br'[0-9]+\.[0-9A-Za-z.]+') _incomplete_version_number_re = re.compile(br'.*\.[A-Za-z]') + _only_url_re = re.compile(br'^\s*\w+://\S+\s*$') + _has_url_re = re.compile(br'.*://.*') def add_categories_from_text(self, filename, line_offset, text, allow_unknown_category): @@ -219,14 +221,18 @@ class ChangeLog: category.name.decode('utf8')) body_split = category.body.splitlines() - re_has_url = re.compile('.*http[s]?://.*') for line_number, line in enumerate(body_split, 1): - if not re_has_url.match(line.decode('utf-8')) and \ + if not self.__class__._only_url_re.match(line) and \ len(line) > MAX_LINE_LENGTH: + long_url_msg = '. URL exceeding length limit must be ' \ + 'alone in it\'s line.' if \ + self.__class__._has_url_re.match(line) else "" raise InputFormatError(filename, category.body_line + line_number, - 'Line is longer than allowed: Length {} (Max {})', - len(line), MAX_LINE_LENGTH) + 'Line is longer than allowed: ' + 'Length {} (Max {}){}', + len(line), MAX_LINE_LENGTH, + long_url_msg) self.categories[category.name] += category.body
Fix some literals in Curve:evaluate;
@@ -17,19 +17,19 @@ static void evaluate(float* restrict P, size_t n, float t, vec3 p) { p[2] = P[2] + (P[6] - P[2]) * t; p[3] = P[3] + (P[7] - P[3]) * t; } else if (n == 3) { - float t1 = (1 - t); + float t1 = (1.f - t); float a = t1 * t1; - float b = 2 * t1 * t; + float b = 2.f * t1 * t; float c = t * t; p[0] = a * P[0] + b * P[4] + c * P[8]; p[1] = a * P[1] + b * P[5] + c * P[9]; p[2] = a * P[2] + b * P[6] + c * P[10]; p[3] = a * P[3] + b * P[7] + c * P[11]; } else if (n == 4) { - float t1 = (1 - t); + float t1 = (1.f - t); float a = t1 * t1 * t1; - float b = 3 * t1 * t1 * t; - float c = 3 * t1 * t * t; + float b = 3.f * t1 * t1 * t; + float c = 3.f * t1 * t * t; float d = t * t * t; p[0] = a * P[0] + b * P[4] + c * P[8] + d * P[12]; p[1] = a * P[1] + b * P[5] + c * P[9] + d * P[13]; @@ -39,7 +39,7 @@ static void evaluate(float* restrict P, size_t n, float t, vec3 p) { float b = 1.f; p[0] = p[1] = p[2] = p[3] = 0.f; for (size_t i = 0; i < n; i++, b *= (float) (n - i) / i) { - float c1 = powf(1 - t, n - (i + 1)); + float c1 = powf(1.f - t, n - (i + 1)); float c2 = powf(t, i); p[0] += b * c1 * c2 * P[i * 4 + 0]; p[1] += b * c1 * c2 * P[i * 4 + 1];
graph-create thread: fix metadata to metadatum rename
/- spider, graph=graph-store, - *metadata-store, + met=metadata-store, *group, group-store, inv=invite-store :: :: Setup metadata :: -=/ =metadata - %* . *metadata +=/ =metadatum:met + %* . *metadatum:met title title.action description description.action date-created now.bowl creator our.bowl module module.action == -=/ =metadata-action - [%add group graph+rid.action metadata] +=/ met-action=action:met + [%add group graph+rid.action metadatum] ;< ~ bind:m - (poke-our %metadata-store %metadata-action !>(metadata-action)) + (poke-our %metadata-store %metadata-action !>(met-action)) ;< ~ bind:m (poke-our %metadata-push-hook %push-hook-action !>([%add group])) ::
[Rust] Implement file reading in the file server
extern crate alloc; extern crate libc; +use alloc::str::{self, from_utf8}; + use axle_rt::amc_register_service; use axle_rt::printf; use axle_rt::AmcMessage; @@ -14,10 +16,10 @@ use axle_rt::{amc_message_await_untyped, amc_message_send}; use axle_rt_derive::ContainsEventField; use cstr_core::CString; -use file_manager_messages::FileManagerDirectoryContents; use file_manager_messages::FileManagerDirectoryEntry; use file_manager_messages::FileManagerReadDirectory; use file_manager_messages::{str_from_u8_nul_utf8_unchecked, LaunchProgram}; +use file_manager_messages::{FileManagerDirectoryContents, ReadFile, ReadFileResponse}; use libfs::{fs_entry_find, DirectoryImage, FsEntry}; @@ -152,10 +154,8 @@ fn read_directory(root_dir: &DirectoryImage, sender: &str, request: &FileManager } } -fn launch_program(root_dir: &DirectoryImage, sender: &str, request: &LaunchProgram) { - let requested_path = str_from_u8_nul_utf8_unchecked(&request.path); - if let Some(entry) = fs_entry_find(&root_dir, &requested_path) { - //printf!("Found FS entry: {}\n", entry.path); +fn launch_program_by_path(root_dir: &DirectoryImage, path: &str) { + if let Some(entry) = fs_entry_find(&root_dir, &path) { if entry.is_dir { printf!("Can't launch directories\n"); } else { @@ -170,6 +170,31 @@ fn launch_program(root_dir: &DirectoryImage, sender: &str, request: &LaunchProgr AmcExecBuffer::from(program_name_ptr, &entry), ); } + } else { + printf!("Couldn't find path {}\n", path); + } +} + +fn launch_program(root_dir: &DirectoryImage, sender: &str, request: &LaunchProgram) { + let requested_path = str_from_u8_nul_utf8_unchecked(&request.path); + launch_program_by_path(root_dir, requested_path) +} + +fn read_file(root_dir: &DirectoryImage, sender: &str, request: &ReadFile) { + let requested_path = str_from_u8_nul_utf8_unchecked(&request.path); + printf!("Reading {} for {}\n", requested_path, sender); + if let Some(entry) = fs_entry_find(&root_dir, &requested_path) { + if entry.is_dir { + printf!("Can't read directories\n"); + } else { + /* + amc_message_send( + sender, + ReadFileResponse::new(&entry.path, entry.file_data.unwrap()), + ); + */ + ReadFileResponse::send(sender, &entry.path, entry.file_data.unwrap()); + } } else { printf!("Couldn't find path {}\n", requested_path); } @@ -234,6 +259,11 @@ fn start(_argc: isize, _argv: *const *const u8) -> isize { msg_unparsed.source(), body_as_type_unchecked(raw_body), ), + ReadFile::EXPECTED_EVENT => read_file( + &root_dir, + msg_unparsed.source(), + body_as_type_unchecked(raw_body), + ), _ => printf!("Unknown event: {}\n", event), } }
syscall: Fix definition of syscall numbers Remove a dependency of prctl, 'CONFIG_TASK_NAME_SIZE' because prctl has many other functionalities. SYS_maxsyscall should be greater than used numbers for syscall.
#define SYS_setsockopt (__SYS_network + 12) #define SYS_shutdown (__SYS_network + 13) #define SYS_socket (__SYS_network + 14) -#define SYS_nnetsocket (__SYS_network + 15) +#define __SYS_prctl (__SYS_network + 15) #else -#define SYS_nnetsocket __SYS_network +#define __SYS_prctl __SYS_network #endif -/* The following is defined only if CONFIG_TASK_NAME_SIZE > 0 */ +#define SYS_prctl __SYS_prctl -#if CONFIG_TASK_NAME_SIZE > 0 -#define SYS_prctl (SYS_nnetsocket + 0) -#define SYS_fin_wait (SYS_nnetsocket + 1) -#else -#define SYS_fin_wait SYS_nnetsocket -#endif +#define SYS_fin_wait SYS_prctl + 1 -#define SYS_maxsyscall SYS_fin_wait +#define SYS_maxsyscall (SYS_fin_wait + 1) /* Note that the reported number of system calls does *NOT* include the * architecture-specific system calls. If the "real" total is required,
ixfr-out, fix delete of remaining files on error.
@@ -1176,7 +1176,7 @@ static void ixfr_delete_rest_files(struct zone* zone, struct ixfr_data* from, { size_t prevcount = 0; struct ixfr_data* data = from; - while(data && data->file_num == 0) { + while(data) { if(data->file_num != 0) { (void)ixfr_unlink_it(zone, zfile, data->file_num, 0); data->file_num = 0; @@ -1260,7 +1260,9 @@ static int ixfr_rename_files(struct zone* zone, const char* zfile, /* failure, we cannot store files */ struct ixfr_data* prev = ixfr_data_prev(zone->ixfr, data, NULL); - /* delete the previously renamed files */ + /* delete the previously renamed files, so in + * memory stays as is, on disk we have the current + * item (and newer transfers) okay. */ if(prev) { ixfr_delete_rest_files(zone, prev, zfile); } @@ -1522,9 +1524,17 @@ static void ixfr_write_files(struct zone* zone, const char* zfile) num=1; while(data && data->file_num == 0) { if(!ixfr_write_file(zone, data, zfile, num)) { - /* there could be more files that are sitting on the + /* There could be more files that are sitting on the * disk, remove them, they are not used without - * this ixfr file */ + * this ixfr file. + * + * Give this element a file num, so it can be + * deleted, it failed to write. It may be partial, + * and we do not want to read that back in. + * We are left with the newer transfers, that form + * a correct list of transfers, that are wholly + * written. */ + data->file_num = num; ixfr_delete_rest_files(zone, data, zfile); return; }
NaCl over UDP test
@@ -70,6 +70,7 @@ int main() { rc = hclose(s0); errno_assert(rc == 0); + /* Test wrong key. */ rc = ipc_pair(s); errno_assert(rc == 0); s0 = pfx_attach(s[0], 1, 0); @@ -89,6 +90,38 @@ int main() { rc = hclose(s0); errno_assert(rc == 0); + /* Test sending via UDP. */ + struct ipaddr addr0; + rc = ipaddr_local(&addr0, NULL, 5555, IPADDR_IPV4); + errno_assert(rc == 0); + s0 = udp_open(&addr0, NULL); + errno_assert(s0 >= 0); + struct ipaddr addr1; + rc = ipaddr_remote(&addr1, "127.0.0.1", 5555, IPADDR_IPV4, -1); + errno_assert(rc == 0); + s1 = udp_open(NULL, &addr1); + errno_assert(s1 >= 0); + s0 = nacl_attach(s0, key, 32, -1); + errno_assert(s0 >= 0); + s1 = nacl_attach(s1, key, 32, -1); + errno_assert(s1 >= 0); + while(1) { + rc = msend(s1, "ABC", 3, -1); + errno_assert(rc == 0); + char buf[16]; + ssize_t sz = mrecv(s0, buf, sizeof(buf), now() + 100); + if(sz < 0 && errno == ETIMEDOUT) + continue; + errno_assert(sz >= 0); + assert(sz == 3); + assert(buf[0] == 'A' && buf[1] == 'B' && buf[2] == 'C'); + break; + } + rc = hclose(s1); + errno_assert(rc == 0); + rc = hclose(s0); + errno_assert(rc == 0); + return 0; }
Have example/zcat use the release edition
@@ -25,13 +25,32 @@ for a C compiler $cc, such as clang or gcc. #include <errno.h> #include <unistd.h> +// Defining the WUFFS_CONFIG__MODULE* macros are optional, but it lets users of +// the release/c/wuffs-etc/etc.c code whitelist which parts of Wuffs to build. +// That C file contains the entire Wuffs standard library, implementing a +// variety of codecs and file formats. Without this macro definition, an +// optimizing compiler or linker may very well discard Wuffs code for unused +// codecs, but listing the Wuffs modules we use makes that process explicit. +// Preprocessing means that such code simply isn't compiled. +#define WUFFS_CONFIG__MODULES +#define WUFFS_CONFIG__MODULE__BASE +#define WUFFS_CONFIG__MODULE__CRC32 +#define WUFFS_CONFIG__MODULE__DEFLATE +#define WUFFS_CONFIG__MODULE__GZIP + // If building this program in an environment that doesn't easily accomodate // relative includes, you can use the script/inline-c-relative-includes.go // program to generate a stand-alone C file. -#include "../../gen/c/base.c" -#include "../../gen/c/std/crc32.c" -#include "../../gen/c/std/deflate.c" -#include "../../gen/c/std/gzip.c" +#include "../../release/c/wuffs-v0.2/wuffs-v0.2.c" +// +// To build this program against the development (instead of the release) +// editions, comment out the #include line above and uncomment the #include +// lines below. +// +// #include "../../gen/c/base.c" +// #include "../../gen/c/std/crc32.c" +// #include "../../gen/c/std/deflate.c" +// #include "../../gen/c/std/gzip.c" #ifdef __linux__ #include <linux/prctl.h>
Coverity: explicit null dereference Coverity is being pretty silly here but adding the explicit pointer checks will stop a crash if something goes badly awry. Fixes Coverity
@@ -53,6 +53,10 @@ static int test_fizzbuzz(void) if (!TEST_size_t_eq(ossl_list_fizz_num(&a), na) || !TEST_size_t_eq(ossl_list_buzz_num(&b), nb) + || !TEST_ptr(ossl_list_fizz_head(&a)) + || !TEST_ptr(ossl_list_fizz_tail(&a)) + || !TEST_ptr(ossl_list_buzz_head(&b)) + || !TEST_ptr(ossl_list_buzz_tail(&b)) || !TEST_int_eq(ossl_list_fizz_head(&a)->n, 3) || !TEST_int_eq(ossl_list_fizz_tail(&a)->n, na * 3) || !TEST_int_eq(ossl_list_buzz_head(&b)->n, nb * 5) @@ -62,8 +66,12 @@ static int test_fizzbuzz(void) ossl_list_buzz_remove(&b, ossl_list_buzz_tail(&b)); if (!TEST_size_t_eq(ossl_list_fizz_num(&a), --na) || !TEST_size_t_eq(ossl_list_buzz_num(&b), --nb) + || !TEST_ptr(ossl_list_fizz_head(&a)) + || !TEST_ptr(ossl_list_buzz_tail(&b)) || !TEST_int_eq(ossl_list_fizz_head(&a)->n, 6) || !TEST_int_eq(ossl_list_buzz_tail(&b)->n, 10) + || !TEST_ptr(ossl_list_fizz_next(ossl_list_fizz_head(&a))) + || !TEST_ptr(ossl_list_fizz_prev(ossl_list_fizz_tail(&a))) || !TEST_int_eq(ossl_list_fizz_next(ossl_list_fizz_head(&a))->n, 9) || !TEST_int_eq(ossl_list_fizz_prev(ossl_list_fizz_tail(&a))->n, 15)) return 0; @@ -133,14 +141,18 @@ static int test_insert(void) ossl_list_int_remove(&l, elem + 2); /* 3 4 5 */ ossl_list_int_remove(&l, elem + 4); /* 3 5 */ ossl_list_int_remove(&l, elem + 3); /* 5 */ - if (!TEST_int_eq(ossl_list_int_head(&l)->n, 5) + if (!TEST_ptr(ossl_list_int_head(&l)) + || !TEST_ptr(ossl_list_int_tail(&l)) + || !TEST_int_eq(ossl_list_int_head(&l)->n, 5) || !TEST_int_eq(ossl_list_int_tail(&l)->n, 5)) return 0; /* Check removing the tail of a two element list works */ ossl_list_int_insert_head(&l, elem); /* 0 5 */ ossl_list_int_remove(&l, elem + 5); /* 0 */ - if (!TEST_int_eq(ossl_list_int_head(&l)->n, 0) + if (!TEST_ptr(ossl_list_int_head(&l)) + || !TEST_ptr(ossl_list_int_tail(&l)) + || !TEST_int_eq(ossl_list_int_head(&l)->n, 0) || !TEST_int_eq(ossl_list_int_tail(&l)->n, 0)) return 0;
Use VM_IDLE rather than expensive wait call before each loop of actor update script
@@ -112,7 +112,7 @@ const compileEntityEvents = (scriptSymbolName, input = [], options = {}) => { if (!branch) { scriptBuilder._packLocals(); if (loop && input.length > 0) { - scriptBuilder.nextFrameAwait(); + scriptBuilder.idle(); scriptBuilder._jump(loopId); } if (isFunction) {
pyocf: API for getting cleaning policy from cache
@@ -17,6 +17,7 @@ from ctypes import ( byref, cast, create_string_buffer, + POINTER, ) from enum import IntEnum, auto from datetime import timedelta @@ -346,6 +347,22 @@ class Cache: if c.results["error"]: raise OcfError("Error changing cleaning policy", c.results["error"]) + def get_cleaning_policy(self): + cleaning_policy = c_int() + + self.read_lock() + + status = self.owner.lib.ocf_mngt_cache_cleaning_get_policy( + self.cache_handle, byref(cleaning_policy) + ) + + self.read_unlock() + + if status != OcfErrorCode.OCF_OK: + raise OcfError("Failed to get cleaning policy", ret) + + return CleaningPolicy(cleaning_policy.value) + def set_cleaning_policy_param(self, cleaning_policy: CleaningPolicy, param_id, param_value): self.write_lock() @@ -998,6 +1015,8 @@ lib.ocf_cache_get_front_volume.argtypes = [c_void_p] lib.ocf_cache_get_front_volume.restype = c_void_p lib.ocf_cache_get_volume.argtypes = [c_void_p] lib.ocf_cache_get_volume.restype = c_void_p +lib.ocf_mngt_cache_cleaning_get_policy.argypes = [c_void_p, POINTER(c_int)] +lib.ocf_mngt_cache_cleaning_get_policy.restype = OcfErrorCode lib.ocf_mngt_cache_cleaning_set_policy.argtypes = [ c_void_p, c_uint32,
Cirrus: Improve name of environment variable
@@ -9,18 +9,21 @@ task: freebsd_instance: image: freebsd-12-0-release-amd64 env: - # We use `PLUGINS_KDB` instead of `PLUGINS`, since `pkg` interprets the environment variable `PLUGINS`. - PLUGINS_KDB: 'ALL;-iconv;-filecheck' # Linking the iconv library fails: https://cirrus-ci.com/task/4923537438539776 + PLUGINS: 'ALL;-iconv;-filecheck' # Linking the iconv library fails: https://cirrus-ci.com/task/4923537438539776 install_script: - - pkg install -y cmake - - pkg install -y git - - pkg install -y ninja - - pkg install -y yajl + - PLUGINS_KDB="$PLUGINS"; unset PLUGINS # `pkg` tries to load modules from the environment variable `PLUGINS` + - > + pkg install -y + cmake + git + ninja + yajl + - export PLUGINS="$PLUGINS_KDB"; unset PLUGINS_KDB script: - mkdir build && cd build - - cmake -GNinja -DPLUGINS="${PLUGINS_KDB:-ALL}" -DBINDINGS='ALL' -DTARGET_PLUGIN_FOLDER="" -DCMAKE_SKIP_INSTALL_RPATH=ON .. + - cmake -GNinja -DPLUGINS="${PLUGINS:-ALL}" -DBINDINGS='ALL' -DTARGET_PLUGIN_FOLDER="" -DCMAKE_SKIP_INSTALL_RPATH=ON .. - ninja - output="$(ninja install 2>&1)" || printf '%s' "$output"
core: clarify new size of keyname on realloc
@@ -856,15 +856,16 @@ ssize_t keyAddBaseName (Key * key, const char * baseName) key->keySize += len + 1; } + const size_t newSize = key->keySize * 2; if (test_bit (key->flags, KEY_FLAG_MMAP_KEY)) { // key was in mmap region, clear flag and trigger malloc instead of realloc - key->key = elektraMalloc (key->keySize * 2); + key->key = elektraMalloc (newSize); clear_bit (key->flags, KEY_FLAG_MMAP_KEY); } else { - if (-1 == elektraRealloc ((void **) &key->key, key->keySize * 2)) return -1; + if (-1 == elektraRealloc ((void **) &key->key, newSize)) return -1; } if (!key->key) @@ -973,17 +974,17 @@ ssize_t keyAddName (Key * key, const char * newName) if (!elektraValidateKeyName (newName, nameSize)) return -1; const size_t origSize = key->keySize; - const size_t newSize = origSize + nameSize; + const size_t newSize = (origSize + nameSize) * 2; if (test_bit (key->flags, KEY_FLAG_MMAP_KEY)) { // key was in mmap region, clear flag and trigger malloc instead of realloc - key->key = elektraMalloc (newSize * 2); + key->key = elektraMalloc (newSize); clear_bit (key->flags, KEY_FLAG_MMAP_KEY); } else { - if (-1 == elektraRealloc ((void **) &key->key, newSize * 2)) return -1; + if (-1 == elektraRealloc ((void **) &key->key, newSize)) return -1; } if (!key->key) return -1; @@ -1122,15 +1123,16 @@ ssize_t keySetBaseName (Key * key, const char * baseName) elektraEscapeKeyNamePart (baseName, escaped); size_t sizeEscaped = elektraStrLen (escaped); + const size_t newSize = (key->keySize + sizeEscaped) * 2; if (test_bit (key->flags, KEY_FLAG_MMAP_KEY)) { // key was in mmap region, clear flag and trigger malloc instead of realloc - key->key = elektraMalloc ((key->keySize + sizeEscaped) * 2); + key->key = elektraMalloc (newSize); clear_bit (key->flags, KEY_FLAG_MMAP_KEY); } else { - if (-1 == elektraRealloc ((void **) &key->key, (key->keySize + sizeEscaped) * 2)) return -1; + if (-1 == elektraRealloc ((void **) &key->key, newSize)) return -1; } if (!key->key)
Docs: added missing columns to table pg_trigger
<entry colname="col4">The table referenced by an referential integrity constraint.</entry> </row> + <row> + <entry><codeph>tgconstrindid</codeph></entry> + <entry>oid</entry> + <entry><i>pg_class.oid</i></entry> + <entry>The index supporting a unique, primary key, or referential integrity + constraint.</entry> + </row> + <row> + <entry><codeph>tgconstraint</codeph></entry> + <entry>oid</entry> + <entry><i>pg_constraint.oid</i></entry> + <entry>The <codeph>pg_constraint</codeph> entry associated with the trigger, if + any.</entry> + </row> <row> <entry colname="col1"><codeph>tgdeferrable</codeph></entry> <entry colname="col2">boolean</entry> <entry colname="col3"/> <entry colname="col4">Argument strings to pass to trigger, each NULL-terminated.</entry> </row> + <row> + <entry><codeph>tgqual</codeph></entry> + <entry>pg_node_tree</entry> + <entry/> + <entry>Expression tree (in <codeph>nodeToString()</codeph> representation) for the + trigger's <codeph>WHEN</codeph> condition, or null if none.</entry> + </row> </tbody> </tgroup> </table>
[apps] Fix systolic_matrix_create() for non-multiple matrices
@@ -97,7 +97,6 @@ void systolic_matrix_allocate(systolic_matrix_t **syst_matrix, *syst_matrix = new_matrix; } -// TODO: Copy over for non-multiple matrices -> Fill with 0 (memory overflow) void systolic_matrix_create(systolic_matrix_t **syst_matrix, int32_t *matrix, uint32_t num_rows, uint32_t num_cols) { // Calculate how many submatrices in row and col dimension @@ -114,6 +113,7 @@ void systolic_matrix_create(systolic_matrix_t **syst_matrix, int32_t *matrix, // Store submatrices uint32_t idx = 0; uint32_t anchor; + uint32_t rem_x, rem_y; for (uint32_t y = 0; y < num_rows; y += SYSTOLIC_SIZE) { for (uint32_t x = 0; x < num_cols; x += SYSTOLIC_SIZE) { // Allocate submatrix @@ -121,10 +121,27 @@ void systolic_matrix_create(systolic_matrix_t **syst_matrix, int32_t *matrix, // Copy over values from matrix anchor = y * num_cols + x; - for (uint32_t syst_y = 0; syst_y < SYSTOLIC_SIZE; ++syst_y) { - for (uint32_t syst_x = 0; syst_x < SYSTOLIC_SIZE; ++syst_x) { - sub_matrix[syst_y * SYSTOLIC_SIZE + syst_x] = - matrix[anchor + syst_y * num_cols + syst_x]; + rem_x = num_cols - x; + rem_y = num_rows - y; + if ((rem_x < SYSTOLIC_SIZE) || (rem_y < SYSTOLIC_SIZE)) { + // Submatrix is only partly within original matrix + for (uint32_t sub_y = 0; sub_y < SYSTOLIC_SIZE; ++sub_y) { + for (uint32_t sub_x = 0; sub_x < SYSTOLIC_SIZE; ++sub_x) { + if ((sub_x < rem_x) && (sub_y < rem_y)) { + sub_matrix[sub_y * SYSTOLIC_SIZE + sub_x] = + matrix[anchor + sub_y * num_cols + sub_x]; + } else { + sub_matrix[sub_y * SYSTOLIC_SIZE + sub_x] = 0; + } + } + } + } else { + // Submatrix is fully within original matrix + for (uint32_t sub_y = 0; sub_y < SYSTOLIC_SIZE; ++sub_y) { + for (uint32_t sub_x = 0; sub_x < SYSTOLIC_SIZE; ++sub_x) { + sub_matrix[sub_y * SYSTOLIC_SIZE + sub_x] = + matrix[anchor + sub_y * num_cols + sub_x]; + } } }
rms/slurm: re-enable external pmix build configuration
%include %{_sourcedir}/OHPC_macros %global _with_mysql 1 +%global _with_pmix --with-pmix=%{OHPC_ADMIN}/pmix %define pname slurm @@ -85,6 +86,8 @@ Obsoletes: slurm-lua%{PROJ_DELIM} slurm-munge%{PROJ_DELIM} slurm-plugins%{PROJ_D %if 0%{?suse_version} PreReq: %{insserv_prereq} %{fillup_prereq} %endif +BuildRequires: pmix%{PROJ_DELIM} +Requires: pmix%{PROJ_DELIM} #!BuildIgnore: post-build-checks # fake systemd support when building rpms on other platforms
odissey: add console_query_show() and parser tokens
@@ -57,6 +57,19 @@ typedef struct machine_queue_t *response; } od_msgconsole_t; +enum +{ + OD_LSHOW, + OD_LSTATS +}; + +static od_keyword_t od_console_keywords[] = +{ + od_keyword("show", OD_LSHOW), + od_keyword("stats", OD_LSTATS), + { 0, 0, 0 } +}; + static inline int od_console_show_stats(od_console_t *console, od_msgconsole_t *msg_console) { @@ -140,6 +153,17 @@ od_console_show_stats(od_console_t *console, od_msgconsole_t *msg_console) return 0; } +static inline int +od_console_query_show(od_console_t *console, od_parser_t *parser, + od_msgconsole_t *msg_console) +{ + (void)parser; + + int rc; + rc = od_console_show_stats(console, msg_console); + return rc; +} + static inline int od_console_query(od_console_t *console, od_msgconsole_t *msg_console) { @@ -154,9 +178,36 @@ od_console_query(od_console_t *console, od_msgconsole_t *msg_console) od_debug_client(&instance->logger, &client->id, "console", "%.*s", query_len, query); + od_parser_t parser; + od_parser_init(&parser, query, query_len); + + od_token_t token; int rc; - rc = od_console_show_stats(console, msg_console); - return rc; + rc = od_parser_next(&parser, &token); + switch (rc) { + case OD_PARSER_EOF: + return 0; + case OD_PARSER_KEYWORD: + break; + default: + /* error */ + return -1; + } + od_keyword_t *keyword; + keyword = od_keyword_match(od_console_keywords, &token); + if (keyword == NULL) { + /* error */ + return -1; + } + switch (keyword->id) { + case OD_LSHOW: + return od_console_query_show(console, &parser, msg_console); + default: + /* error */ + break; + } + + return -1; } static void
Added versionstrings
@@ -75,6 +75,11 @@ if (NOT PUPNP_VERSION_STRING) endif() project (PUPNP VERSION ${PUPNP_VERSION_STRING} LANGUAGES C) +set (UPNP_VERSION_MAJOR ${PUPNP_VERSION_MAJOR}) +set (UPNP_VERSION_MINOR ${PUPNP_VERSION_MINOR}) +set (UPNP_VERSION_PATCH ${PUPNP_VERSION_PATCH}) +set (UPNP_VERSION_STRING ${PUPNP_VERSION_STRING}) + include (GNUInstallDirs) if (WIN32)
.travis.yml: Install GCC 7.x for coverage builds. To get around false-positive strict aliasing errors.
@@ -104,6 +104,12 @@ jobs: - stage: test env: NAME="unix coverage build and tests" install: + # Install gcc 7.x, as older version has glitchy strict alisasing warnings-as-errors + - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test + - sudo apt-get update -qq || true + - sudo apt-get install -y gcc-7 + - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 80 + - sudo update-alternatives --config gcc - sudo apt-get install python3-pip - sudo pip install cpp-coveralls - sudo pip3 install setuptools @@ -143,7 +149,13 @@ jobs: - stage: test env: NAME="unix coverage 32-bit build and tests" install: - - sudo apt-get install gcc-multilib libffi-dev:i386 + # Install gcc 7.x, as older version has glitchy strict alisasing warnings-as-errors + - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test + - sudo apt-get update -qq || true + - sudo apt-get install -y gcc-7 + - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-7 80 + - sudo update-alternatives --config gcc + - sudo apt-get install gcc-multilib gcc-7-multilib libffi-dev:i386 - sudo apt-get install python3-pip - sudo pip3 install setuptools - sudo pip3 install pyelftools
Add workaround for spurious clang warning
@@ -2615,7 +2615,7 @@ int main(int argc, char **argv) { int init_grid_dim_y = 25, init_grid_dim_x = 57; bool should_autosize_grid = true; - Tui t = {0}; + Tui t = {.file_name = NULL}; // Weird because of clang warning t.undo_history_limit = 100; t.softmargin_y = 1; t.softmargin_x = 2;
Fix thumbnails in background select
@@ -43,7 +43,7 @@ class BackgroundSelect extends Component { <div className="Thumbnail" style={{ - backgroundImage: `url("${assetFilename( + backgroundImage: `url("file://${assetFilename( projectRoot, "backgrounds", backgroundsLookup[value]
correct gyro orientation geprcf405
#define GYRO_SPI_PORT SPI_PORT3 #define GYRO_NSS PIN_A15 #define GYRO_INT PIN_C3 -#define GYRO_ORIENTATION GYRO_ROTATE_90_CCW +#define GYRO_ORIENTATION GYRO_ROTATE_180 // RADIO #ifdef SERIAL_RX