message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
doc: fix+add link | @@ -477,4 +477,7 @@ Any plugin expects the passed `KeySet` to be **rewinded**.
## Further Readings
-- [Details of how to write a storage plugin](doc/tutorials/storage-plugins.md)
+Read more about:
+
+- [contracts](/doc/help/elektra-contracts.md)
+- [of how to write a storage plugin](storage-plugins.md)
|
khan: comment fixes | @@ -546,7 +546,7 @@ _khan_sock_err_chdir:
u3_king_bail();
}
-/* _khan_born_news(): initialization complete, open socket.
+/* _khan_born_news(): initialization complete; vane available.
*/
static void
_khan_born_news(u3_ovum* egg_u, u3_ovum_news new_e)
@@ -566,7 +566,7 @@ _khan_born_bail(u3_ovum* egg_u, u3_noun lud)
u3l_log("khan: %%born failure; %%fyrd not supported\n");
}
-/* _khan_io_talk(): notify %khan that we're live
+/* _khan_io_talk(): open socket and notify %khan that we're live.
*/
static void
_khan_io_talk(u3_auto* car_u)
|
periph_ctrl: port*_CRITICAL vanilla FreeRTOS compliance | @@ -29,26 +29,26 @@ static uint32_t get_rst_en_reg(periph_module_t periph);
void periph_module_enable(periph_module_t periph)
{
- portENTER_CRITICAL(&periph_spinlock);
+ portENTER_CRITICAL_SAFE(&periph_spinlock);
DPORT_SET_PERI_REG_MASK(get_clk_en_reg(periph), get_clk_en_mask(periph));
DPORT_CLEAR_PERI_REG_MASK(get_rst_en_reg(periph), get_rst_en_mask(periph, true));
- portEXIT_CRITICAL(&periph_spinlock);
+ portEXIT_CRITICAL_SAFE(&periph_spinlock);
}
void periph_module_disable(periph_module_t periph)
{
- portENTER_CRITICAL(&periph_spinlock);
+ portENTER_CRITICAL_SAFE(&periph_spinlock);
DPORT_CLEAR_PERI_REG_MASK(get_clk_en_reg(periph), get_clk_en_mask(periph));
DPORT_SET_PERI_REG_MASK(get_rst_en_reg(periph), get_rst_en_mask(periph, false));
- portEXIT_CRITICAL(&periph_spinlock);
+ portEXIT_CRITICAL_SAFE(&periph_spinlock);
}
void periph_module_reset(periph_module_t periph)
{
- portENTER_CRITICAL(&periph_spinlock);
+ portENTER_CRITICAL_SAFE(&periph_spinlock);
DPORT_SET_PERI_REG_MASK(get_rst_en_reg(periph), get_rst_en_mask(periph, false));
DPORT_CLEAR_PERI_REG_MASK(get_rst_en_reg(periph), get_rst_en_mask(periph, false));
- portEXIT_CRITICAL(&periph_spinlock);
+ portEXIT_CRITICAL_SAFE(&periph_spinlock);
}
static uint32_t get_clk_en_mask(periph_module_t periph)
|
fix(tests): return error code when build fails | @@ -22,6 +22,7 @@ echo "Running $testcase:"
west build -d build/$testcase -b native_posix -- -DZMK_CONFIG=$testcase > /dev/null 2>&1
if [ $? -gt 0 ]; then
echo "FAIL: $testcase did not build" >> ./build/tests/pass-fail.log
+ exit 1
else
./build/$testcase/zephyr/zmk.exe | sed -e "s/.*> //" | tee build/$testcase/keycode_events_full.log | sed -n -f $testcase/events.patterns > build/$testcase/keycode_events.log
diff -au $testcase/keycode_events.snapshot build/$testcase/keycode_events.log
|
ble_mesh: Fixes wrong subnet info used for Friend Clear
When Friend node tries to send Friend Clear message to other
Friend nodes, it should use the subnet information based on
the net_idx from friendship. | @@ -363,7 +363,7 @@ static int unseg_app_sdu_unpack(struct bt_mesh_friend *frnd,
meta->is_dev_key = (app_idx == BLE_MESH_KEY_DEV);
bt_mesh_net_header_parse(&buf->b, &meta->net);
err = bt_mesh_app_key_get(meta->subnet, app_idx, &meta->key,
- &meta->aid, NODE, meta->net.ctx.addr);
+ &meta->aid, 0x0, meta->net.ctx.addr);
if (err) {
return err;
}
@@ -774,7 +774,7 @@ static void send_friend_clear(struct bt_mesh_friend *frnd)
.send_ttl = BLE_MESH_TTL_MAX,
};
struct bt_mesh_net_tx tx = {
- .sub = &bt_mesh.sub[0],
+ .sub = bt_mesh_subnet_get(frnd->net_idx),
.ctx = &ctx,
.src = bt_mesh_primary_addr(),
.xmit = bt_mesh_net_transmit_get(),
|
Fix OpenVR models; | @@ -152,6 +152,7 @@ ModelData* lovrModelDataFromOpenVRModel(OpenVRModel* vrModel) {
vec_init(&root->meshes);
vec_push(&root->meshes, 0);
vec_init(&root->children);
+ mat4_identity(root->transform);
modelData->root = root;
modelData->hasNormals = 1;
|
Replace fill with class fill-current | @@ -13,7 +13,7 @@ export const Lock = (props: React.SVGProps<SVGSVGElement>) => (
fillRule="evenodd"
clipRule="evenodd"
d="M8 5H9C9.55228 5 10 5.44772 10 6V11C10 11.5523 9.55229 12 9 12H1C0.447716 12 0 11.5523 0 11V6C0 5.44772 0.447715 5 1 5H2V3C2 1.34315 3.34315 0 5 0C6.65685 0 8 1.34315 8 3V5ZM7 5V3C7 1.89543 6.10457 1 5 1C3.89543 1 3 1.89543 3 3V5H7ZM3 6H9V11H1V6H2H3Z"
- fill="black"
+ className="fill-current"
strokeMiterlimit="10"
/>
</svg>
|
docs/library/machine.Timer.rst: Add mention of constructor arguments. | @@ -32,6 +32,8 @@ Constructors
Construct a new timer object of the given id. Id of -1 constructs a
virtual timer (if supported by a board).
+ See ``init`` for parameters of initialisation.
+
Methods
-------
|
Swap #if blocks in uid.c so target platform gets checked before host
This avoids the case where a UEFI build on FreeBSD tries to call the system
issetugid function instead of returning 0 as it should do.
CLA: trivial | #include <openssl/crypto.h>
#include <openssl/opensslconf.h>
-#if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD__ > 2) || defined(__DragonFly__)
-
-# include OPENSSL_UNISTD
+#if defined(OPENSSL_SYS_WIN32) || defined(OPENSSL_SYS_VXWORKS) || defined(OPENSSL_SYS_UEFI)
int OPENSSL_issetugid(void)
{
- return issetugid();
+ return 0;
}
-#elif defined(OPENSSL_SYS_WIN32) || defined(OPENSSL_SYS_VXWORKS) || defined(OPENSSL_SYS_UEFI)
+#elif defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD__ > 2) || defined(__DragonFly__)
+
+# include OPENSSL_UNISTD
int OPENSSL_issetugid(void)
{
- return 0;
+ return issetugid();
}
#else
|
Improve find angle header file | @@ -310,7 +310,10 @@ end
function find_angle_header_file(target, file)
local headerpaths = modules_support(target).toolchain_includedirs(target)
for _, dep in ipairs(target:orderdeps()) do
- table.insert(headerpaths, dep:get("sysincludedirs") or dep:get("includedirs"))
+ local includedirs = dep:get("sysincludedirs") or dep:get("includedirs")
+ if includedirs then
+ table.join2(headerpaths, includedirs)
+ end
end
for _, pkg in ipairs(target:pkgs()) do
local includedirs = pkg:get("sysincludedirs") or pkg:get("includedirs")
|
Fix to GNSS test code only.
The computation of the number of expected decoded messages wasn't taking the number of outsized messages into account correctly; it does now. | @@ -735,7 +735,7 @@ U_PORT_TEST_FUNCTION("[gnssMsg]", "gnssMsgReceiveNonBlocking")
uPortLog("\n");
# ifdef U_GNSS_MSG_TEST_INCLUDE_A_POS
if (z == 0) {
- if (pTmp->numDecoded - pTmp->numOutsize < pTmp->numDecodedMin) {
+ if (pTmp->numDecoded + pTmp->numOutsize < pTmp->numDecodedMin) {
bad = true;
}
} else {
@@ -744,11 +744,11 @@ U_PORT_TEST_FUNCTION("[gnssMsg]", "gnssMsgReceiveNonBlocking")
}
}
# else
- if (pTmp->numDecoded - pTmp->numOutsize < pTmp->numDecodedMin) {
+ if (pTmp->numDecoded + pTmp->numOutsize < pTmp->numDecodedMin) {
bad = true;
}
# endif
- if (pTmp->numReceived - pTmp->numOutsize < pTmp->numRead) {
+ if (pTmp->numReceived + pTmp->numOutsize < pTmp->numRead) {
bad = true;
}
if (pTmp->numNotWanted > 0) {
|
use H2O_SIZE_T_LONGEST_STR from | @@ -109,8 +109,8 @@ static inline void h2o_probe_log_response(h2o_req_t *req, uint64_t req_index)
H2O_PROBE_CONN(SEND_RESPONSE, req->conn, req_index, req->res.status);
if (H2O_CONN_IS_PROBED(SEND_RESPONSE_HEADER, req->conn)) {
if (req->res.content_length != SIZE_MAX) {
- char buf[sizeof(H2O_UINT64_LONGEST_STR)];
- size_t len = sprintf(buf, "%" PRIu64, req->res.content_length);
+ char buf[sizeof(H2O_SIZE_T_LONGEST_STR)];
+ size_t len = (size_t)sprintf(buf, "%zu", req->res.content_length);
h2o_probe_response_header(req, req_index, H2O_TOKEN_CONTENT_LENGTH->buf, h2o_iovec_init(buf, len));
}
size_t i;
|
Add news entries, and fix a typo | Next Release
============
+ * Experimental support for wire compression.
+ * Support for snappy and zlib. MongoDB 3.4 only supports snappy, while zlib
+ support is expected in MongoDB 3.6. Wire compression is still experimental
+ for MongoDB clients, and therefore hidden from default configuration.
+ The enable, configure mongoc like so:
+ ./configure --enable-snappy --enable-zlib MONGOC_36_EXPERIMENT=yes
+ * New functions: mongoc_uri_get_compressors & mongoc_uri_set_compressors, to
+ get and set compressor configuration on mongoc_uri_t
+ * Added support for comma seperated "compressors" connection string option (e.g.
+ mongodb://localhost/?compressors=snappy,zlib)
+ * Added support for configuring zlib compression level in the connection string
+ (e.g. mongodb://localhost/?compressors=zlib&zlibcompressionlevel=8)
* Now requires the use of CMake config files for libbson to build libmongoc
with CMake
+ * Added pkg-config support for libressl.
* New function mongoc_uri_set_auth_mechanism to update the authentication
mechanism of a mongoc_uri_t after it is created from a string.
* New function mongoc_bulk_operation_insert_with_opts provides immediate
error checking.
+ * Support for MongoDB Connection String specification
+ * All connection string options are now represented by MONGOC_URI_xxx macros
+ * Paths to Unix Domain Sockets must be url encoded
+ * Repeated options now issue warnings
+ * Special characters in username, password and other values must be url encoded
+ * Unsupported connection string options now issue warnings
+ * Boolean values can now be represented as true/yes/y/t/1 and false/no/n/f/0.
+
mongo-c-driver 1.6.0
====================
@@ -1353,7 +1374,7 @@ Changes include:
* Addition of missing trace macros
* Improvement of internal error messages
* Fix a segfault in OpenSSL cleanup routines
- * Fix for IPv66 support for replica sets
+ * Fix for IPv6 support for replica sets
* Coalesce small vectorized TLS writes
* MinGW fixups
* Fix for a memory leak in get_database_names()
|
Updated README.md - incremental log processing. | @@ -489,7 +489,8 @@ your local machine!
#### INCREMENTAL LOG PROCESSING ####
GoAccess has the ability to process logs incrementally through the on-disk
-B+Tree database. It works in the following way:
+[B+Tree](https://github.com/allinurl/goaccess#storage) database. It works in
+the following way:
1. A data set must be persisted first with `--keep-db-files`, then the same
data set can be loaded with `--load-from-disk`.
|
in_netif: add missing return type | @@ -184,7 +184,7 @@ static inline uint64_t calc_diff(struct netif_entry *entry)
}
#define LINE_LEN 256
-static read_proc_file_linux(struct flb_in_netif_config *ctx)
+static int read_proc_file_linux(struct flb_in_netif_config *ctx)
{
FILE *fp = NULL;
char line[LINE_LEN] = {0};
|
docs/conf: Version 2.12.0.6. | @@ -74,7 +74,7 @@ copyright = '2014-2019, Damien P. George, Paul Sokolovsky, and contributors'
#
# We don't follow "The short X.Y version" vs "The full version, including alpha/beta/rc tags"
# breakdown, so use the same version identifier for both to avoid confusion.
-version = release = '2.12.0.5'
+version = release = '2.12.0.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
|
Dealing with white spaces in files path on cert sign script | @echo off
SET "SignToolDir=C:\Program Files (x86)\Windows Kits\8.1\bin\x64"
-for /r "%~dp0\..\" %%i in (*.sys) do "%SignToolDir%\signtool.exe" sign /f %~dp0\VirtIOTestCert.pfx %%i
-for /r "%~dp0\..\" %%i in (*.cat) do "%SignToolDir%\signtool.exe" sign /f %~dp0\VirtIOTestCert.pfx %%i
+for /r "%~dp0\..\" %%i in (*.sys) do "%SignToolDir%\signtool.exe" sign /f "%~dp0\VirtIOTestCert.pfx" "%%i"
+for /r "%~dp0\..\" %%i in (*.cat) do "%SignToolDir%\signtool.exe" sign /f "%~dp0\VirtIOTestCert.pfx" "%%i"
|
Fixup EVP-MAC-KMAC documentation
Fixes
Added example that shows setup of XOF. | @@ -42,11 +42,78 @@ The length of the "size" parameter should not exceed that of a B<size_t>.
=item "xof" (B<OSSL_MAC_PARAM_XOF>) <integer>
+The "xof" parameter value is expected to be 1 or 0. Use 1 to enable XOF mode.
+The default value is 0.
+
=back
-The "xof" parameter value is expected to be 1 or 0. Use 1 to enable XOF
-mode. If XOF is enabled then the output length that is encoded as part of
-the input stream is set to zero.
+The "custom" and "key" parameters must be set before EVP_MAC_init().
+The "xof" and "size" parameters can be set at any time before EVP_MAC_final().
+
+=head1 EXAMPLES
+
+ #include <openssl/evp.h>
+ #include <openssl/params.h>
+
+ static int do_kmac(const unsigned char *in, size_t in_len,
+ const unsigned char *key, size_t key_len,
+ const unsigned char *custom, size_t custom_len,
+ int xof_enabled, unsigned char *out, int out_len)
+ {
+ EVP_MAC_CTX *ctx = NULL;
+ EVP_MAC *mac = NULL;
+ OSSL_PARAM params[4], *p;
+ int ret = 0;
+ size_t l = 0;
+
+ mac = EVP_MAC_fetch(NULL, "KMAC-128", NULL);
+ if (mac == NULL)
+ goto err;
+ ctx = EVP_MAC_CTX_new(mac);
+ /* The mac can be freed after it is used by EVP_MAC_CTX_new */
+ EVP_MAC_free(mac);
+ if (ctx == NULL)
+ goto err;
+
+ /*
+ * Setup parameters required before calling EVP_MAC_init()
+ * The parameters OSSL_MAC_PARAM_XOF and OSSL_MAC_PARAM_SIZE may also be
+ * used at this point.
+ */
+ p = params;
+ *p++ = OSSL_PARAM_construct_octet_string(OSSL_MAC_PARAM_KEY,
+ (void *)key, key_len);
+ if (custom != NULL && custom_len != 0)
+ *p++ = OSSL_PARAM_construct_octet_string(OSSL_MAC_PARAM_CUSTOM,
+ (void *)custom, custom_len);
+ *p = OSSL_PARAM_construct_end();
+ if (!EVP_MAC_CTX_set_params(ctx, params))
+ goto err;
+
+ if (!EVP_MAC_init(ctx))
+ goto err;
+
+ /*
+ * Note: the following optional parameters can be set any time
+ * before EVP_MAC_final().
+ */
+ p = params;
+ *p++ = OSSL_PARAM_construct_int(OSSL_MAC_PARAM_XOF, &xof_enabled);
+ *p++ = OSSL_PARAM_construct_int(OSSL_MAC_PARAM_SIZE, &out_len);
+ *p = OSSL_PARAM_construct_end();
+ if (!EVP_MAC_CTX_set_params(ctx, params))
+ goto err;
+
+ /* The update may be called multiple times here for streamed input */
+ if (!EVP_MAC_update(ctx, in, in_len))
+ goto err;
+ if (!EVP_MAC_final(ctx, out, &l, out_len))
+ goto err;
+ ret = 1;
+ err:
+ EVP_MAC_CTX_free(ctx);
+ return ret;
+ }
=head1 SEE ALSO
|
Minor bug when autoremove in runtime. | @@ -173,9 +173,8 @@ sub_install(){
sub_clean(){
echo "clean dependencies"
- $SUDO_CMD apt-get -y remove wget
+ $SUDO_CMD apt-get -y remove wget gpg apt-transport-https
$SUDO_CMD apt-get -y autoclean
- $SUDO_CMD apt-get -y autoremove
}
# Configuration
|
sokol sound works | @@ -259,6 +259,14 @@ static tic80* tic = NULL;
static void app_init(void)
{
+ saudio_desc desc = {0};
+ desc.num_channels = 2;
+
+ saudio_setup(&desc);
+
+ printf("channels %i\n", saudio_channels());
+ printf("samplerate %i\n", saudio_sample_rate());
+
FILE* file = fopen("cart.tic", "rb");
if(file)
@@ -274,7 +282,7 @@ static void app_init(void)
if(cart)
{
printf("%s\n", "cart loaded");
- tic = tic80_create(44100);
+ tic = tic80_create(saudio_sample_rate());
if(tic)
{
@@ -298,6 +306,15 @@ static void app_frame(void)
}
gfx_draw();
+
+ static float floatSamples[44100/60];
+
+ // TODO: remove /2 for stereo sound
+ for(s32 i = 0; i < tic->sound.count/2; i++)
+ floatSamples[i] = (float)tic->sound.samples[i*2] / 32768.0f;
+
+ // TODO: remove /2 for stero sound
+ saudio_push(floatSamples, tic->sound.count/2);
}
static void app_input(const sapp_event* event)
|
GIF loader: skip unknown extension block | @@ -420,6 +420,7 @@ gif_load_next(
{
SIXELSTATUS status = SIXEL_FALSE;
unsigned char buffer[256];
+ unsigned char c;
int x;
int y;
int w;
@@ -427,7 +428,7 @@ gif_load_next(
int len;
for (;;) {
- switch (gif_get8(s)) {
+ switch ((c = gif_get8(s))) {
case 0x2C: /* Image Separator (1 byte) */
x = gif_get16le(s); /* Image Left Position (2 bytes)*/
y = gif_get16le(s); /* Image Top Position (2 bytes) */
@@ -544,10 +545,20 @@ gif_load_next(
}
break;
default:
- break;
+ len = gif_get8(s); /* block size */
+ if (s->img_buffer + len > s->img_buffer_end) {
+ status = SIXEL_RUNTIME_ERROR;
+ goto end;
}
- while ((len = gif_get8(s)) != 0) {
+ memcpy(buffer, s->img_buffer, (size_t)len);
s->img_buffer += len;
+ break;
+ }
+ if ((c = gif_get8(s)) != 0x00) {
+ sprintf((char *)buffer, "missing valid block terminator (unknown code %02x).", c);
+ sixel_helper_set_additional_message((char *)buffer);
+ status = SIXEL_RUNTIME_ERROR;
+ goto end;
}
break;
@@ -557,8 +568,8 @@ gif_load_next(
goto end;
default:
- sixel_helper_set_additional_message(
- "corrupt GIF (reason: unknown code).");
+ sprintf((char *)buffer, "corrupt GIF (reason: unknown code %02x).", c);
+ sixel_helper_set_additional_message((char *)buffer);
status = SIXEL_RUNTIME_ERROR;
goto end;
}
@@ -595,7 +606,6 @@ load_gif(
fn_pointer fnp;
fnp.p = fn_load;
- g.out = NULL;
status = sixel_frame_new(&frame, allocator);
if (SIXEL_FAILED(status)) {
|
links: adds titles to remote content
Fixes | @@ -119,8 +119,10 @@ export const LinkItem = (props: LinkItemProps) => {
overflow="hidden"
onClick={markRead}
>
+ <Text p={2}>{contents[0].text}</Text>
<RemoteContent
ref={r => { remoteRef.current = r }}
+ renderUrl={false}
url={contents[1].url}
text={contents[0].text}
unfold={true}
|
WIP py/builtinimport: Refetch the module from sys.modules before returning it.
This allows module to wrap itself in some proxy (e.g. for read-only access),
and import to return the updated version. | @@ -472,7 +472,15 @@ mp_obj_t mp_builtin___import__(size_t n_args, const mp_obj_t *args) {
return module_obj;
}
// Otherwise, we need to return top-level package
+ #ifdef REFETCH_MODULE_OBJ_FROM_SYS_MODULE
+ // Don't return the original module that we created, return what's now
+ // stored in sys.modules. This allows the module itself influence it,
+ // say wrap itself in some proxy object, e.g. to provide read-only
+ // access to its attributes. TODO: needs to be done for return above too.
+ return mp_module_get(MP_OBJ_QSTR_VALUE(mp_load_attr(top_module_obj, MP_QSTR___name__)));
+ #else
return top_module_obj;
+ #endif
}
#else // MICROPY_ENABLE_EXTERNAL_IMPORT
|
driver/stm_mems_common.h: Format with clang-format
BRANCH=none
TEST=none | * Read single register
*/
static inline int st_raw_read8(const int port,
- const uint16_t i2c_spi_addr_flags,
- const int reg, int *data_ptr)
+ const uint16_t i2c_spi_addr_flags, const int reg,
+ int *data_ptr)
{
/* TODO: Implement SPI interface support */
return i2c_read8(port, i2c_spi_addr_flags, reg, data_ptr);
@@ -53,15 +53,13 @@ static inline int st_raw_write8(const int port,
/**
* st_raw_read_n - Read n bytes for read
*/
-int st_raw_read_n(const int port,
- const uint16_t i2c_spi_addr_flags,
+int st_raw_read_n(const int port, const uint16_t i2c_spi_addr_flags,
const uint8_t reg, uint8_t *data_ptr, const int len);
/**
* st_raw_read_n_noinc - Read n bytes for read (no auto inc address)
*/
-int st_raw_read_n_noinc(const int port,
- const uint16_t i2c_spi_addr_flags,
+int st_raw_read_n_noinc(const int port, const uint16_t i2c_spi_addr_flags,
const uint8_t reg, uint8_t *data_ptr, const int len);
/**
@@ -86,8 +84,8 @@ int st_get_resolution(const struct motion_sensor_t *s);
* @offset: offset vector
* @temp: Temp
*/
-int st_set_offset(const struct motion_sensor_t *s,
- const int16_t *offset, int16_t temp);
+int st_set_offset(const struct motion_sensor_t *s, const int16_t *offset,
+ int16_t temp);
/**
* st_get_offset - Get data offset
@@ -95,8 +93,8 @@ int st_set_offset(const struct motion_sensor_t *s,
* @offset: offset vector
* @temp: Temp
*/
-int st_get_offset(const struct motion_sensor_t *s,
- int16_t *offset, int16_t *temp);
+int st_get_offset(const struct motion_sensor_t *s, int16_t *offset,
+ int16_t *temp);
/**
* st_get_data_rate - Get data rate (ODR)
|
bootstrap-t4p4s clones into t4p4s, not t4p4s-16 | @@ -37,7 +37,7 @@ git clone --recursive https://github.com/p4lang/p4c && cd p4c && git checkout $P
WAITPROC_P4C="$!"
[ $PARALLEL_INSTALL -ne 0 ] || wait "$WAITPROC_P4C"
-[ -d t4p4s-16 ] || git clone --recursive https://github.com/P4ELTE/t4p4s t4p4s-16 &
+[ -d t4p4s-16 ] || git clone --recursive https://github.com/P4ELTE/t4p4s &
WAITPROC_T4P4S="$!"
[ $PARALLEL_INSTALL -ne 0 ] || wait "$WAITPROC_T4P4S"
@@ -87,7 +87,7 @@ cd ../..
# Enter t4p4s directory
[ $PARALLEL_INSTALL -ne 1 ] || wait "$WAITPROC_T4P4S"
-cd t4p4s-16
+cd t4p4s
# Print environment variables
|
Update FOV documentation. | @@ -51,23 +51,33 @@ typedef struct TCOD_Map {
} TCOD_Map;
typedef TCOD_Map* TCOD_map_t;
/**
- * Field-of-view options.
+ \rst
+ Field-of-view options for :any:`TCOD_map_compute_fov`.
+ \endrst
*/
typedef enum {
/**
- * Basic: http://roguebasin.roguelikedevelopment.org/index.php?title=Ray_casting
+ Trace multiple Bresenham lines along the perimeter.
+
+ Based on: http://www.roguebasin.com/index.php?title=Ray_casting
*/
FOV_BASIC,
/**
- Diamond: http://www.oocities.org/temerra/los_rays.html
+ Cast Bresenham line shadows on a per-tile basis.
+
+ Based on: http://www.oocities.org/temerra/los_rays.html
*/
FOV_DIAMOND,
/**
- * Shadow casting: http://roguebasin.roguelikedevelopment.org/index.php?title=FOV_using_recursive_shadowcasting
+ Recursive Shadowcast.
+
+ Based on: http://www.roguebasin.com/index.php?title=FOV_using_recursive_shadowcasting
*/
FOV_SHADOW,
/**
- * Permissive: http://roguebasin.roguelikedevelopment.org/index.php?title=Precise_Permissive_Field_of_View
+ Precise Permissive Field of View.
+
+ Based on: http://www.roguebasin.com/index.php?title=Precise_Permissive_Field_of_View
*/
FOV_PERMISSIVE_0,
FOV_PERMISSIVE_1,
@@ -79,14 +89,18 @@ typedef enum {
FOV_PERMISSIVE_7,
FOV_PERMISSIVE_8,
/**
- * Mingos' Restrictive Precise Angle Shadowcasting (contribution by Mingos)
+ Mingos' Restrictive Precise Angle Shadowcasting (contribution by Mingos)
+
+ Based on: http://www.roguebasin.com/index.php?title=Restrictive_Precise_Angle_Shadowcasting
*/
FOV_RESTRICTIVE,
/**
Symmetric Shadowcast.
Based on: https://www.albertford.com/shadowcasting/
-
+ \rst
+ .. versionadded :: 1.16
+ \endrst
*/
FOV_SYMMETRIC_SHADOWCAST,
NB_FOV_ALGORITHMS
|
grib_get_data: add -s option to set keys | @@ -29,6 +29,7 @@ grib_option grib_options[]={
{"p:",0,0,0,1,0},
{"F:","format","\n\t\tC style format for values. Default is \"%.10e\"\n",0,1,0},
{"w:",0,0,0,1,0},
+ {"s:",0,0,0,1,0},
{"f",0,0,0,1,0},
{"G",0,0,0,1,0},
{"7",0,0,0,1,0},
@@ -93,6 +94,12 @@ int grib_tool_new_handle_action(grib_runtime_options* options, grib_handle* h)
size_t size=0, num_bytes=0;
long hasMissingValues = 0;
+ if (!options->skip) {
+ if (options->set_values_count != 0)
+ err=grib_set_values(h,options->set_values,options->set_values_count);
+ if( err != GRIB_SUCCESS && options->fail) exit(err);
+ }
+
if (grib_options_on("m:")) {
/* User wants to see missing values */
char* theEnd = NULL;
|
Fixed callbacks
This fixes the callback versions of the addDevice
and initPlatform functions. I think I inadvertantly
removed the version of the functions that exposed
the callback in a past cleanup this restors the verson
of the functions that calls the handler. | @@ -447,6 +447,16 @@ int jni_oc_add_device(const char *uri, const char *rt, const char *name,
}
%}
+%rename(addDevice) jni_oc_add_device1;
+%inline %{
+int jni_oc_add_device1(const char *uri, const char *rt, const char *name,
+ const char *spec_version, const char *data_model_version,
+ oc_add_device_cb_t add_device_cb, jni_callback_data *jcb) {
+ OC_DBG("JNI: %s\n", __func__);
+ return oc_add_device(uri, rt, name, spec_version, data_model_version, add_device_cb, jcb);
+}
+%}
+
/* Code and typemaps for mapping the oc_init_platform to the java OCInitPlatformHandler */
%{
void jni_oc_init_platform_callback(void *user_data)
@@ -486,6 +496,13 @@ int jni_oc_init_platform(const char *mfg_name) {
return oc_init_platform(mfg_name, NULL, NULL);
}
%}
+%rename(initPlatform) jni_oc_init_platform1;
+%inline %{
+int jni_oc_init_platform1(const char *mfg_name, oc_init_platform_cb_t init_platform_cb, jni_callback_data *jcb) {
+ OC_DBG("JNI: %s\n", __func__);
+ return oc_init_platform(mfg_name, init_platform_cb, jcb);
+}
+%}
/* Code and typemaps for mapping the oc_random_pin_cb_t to the OCRandomPinHandler */
%{
|
server session BUGFIX validate data before passing them into reply | @@ -401,7 +401,7 @@ nc_clb_default_get_schema(struct lyd_node *rpc, struct nc_session *UNUSED(sessio
data = lyd_new_path(NULL, server_opts.ctx, "/ietf-netconf-monitoring:get-schema/data", model_data,
LYD_ANYDATA_STRING, LYD_PATH_OPT_OUTPUT);
- if (!data) {
+ if (!data || lyd_validate(&data, LYD_OPT_RPCREPLY, NULL)) {
ERRINT;
free(model_data);
return NULL;
|
Check if path part ends with dir sep upfront in `Path.join` | @@ -257,22 +257,22 @@ static Value joinNative(DictuVM *vm, int argCount, Value *args) {
}
}
+ ObjString* part;
// resultSize = # of dir separators that will be used + length of each string arg
size_t resultSize = abs(argCount - 1); // abs is needed her because of a clang bug
- for (int i = 0; i < argCount; ++i) resultSize += AS_STRING(args[i])->length;
- // It's possible for resultSize to be too large if the strings already end with the dir
- // separator, but having likely at most a few bytes of unused memory in the resulting string
- // object's char array should be fine.
+ for (int i = 0; i < argCount; ++i) {
+ part = AS_STRING(args[i]);
+ resultSize += part->length - (part->chars[part->length-1] == DIR_SEPARATOR);
+ }
char* str = ALLOCATE(vm, char, resultSize + 1);
char* dest = str;
for (int i = 0; i < argCount; ++i) {
- ObjString* src = AS_STRING(args[i]);
- // Append the src string to the end of dest
- for (int j = 0; j < src->length; ++j) *dest++ = src->chars[j];
+ part = AS_STRING(args[i]);
+ // Append the part string to the end of dest
+ for (int j = 0; j < part->length; ++j) *dest++ = part->chars[j];
// Append a DIR_SEPARATOR if necessary
- if (src->chars[src->length-1] == DIR_SEPARATOR) --resultSize;
- else *dest++ = DIR_SEPARATOR;
+ if (part->chars[part->length-1] != DIR_SEPARATOR) *dest++ = DIR_SEPARATOR;
}
return OBJ_VAL(takeString(vm, str, resultSize));
|
io: tls: do not assume handshake is in async mode | @@ -326,7 +326,6 @@ int net_io_tls_handshake(void *_u_conn, void *_th)
struct flb_tls_session *session;
struct flb_upstream_conn *u_conn = _u_conn;
struct flb_upstream *u = u_conn->u;
-
struct flb_thread *th = _th;
session = flb_tls_session_new(u->tls->context);
@@ -366,6 +365,21 @@ int net_io_tls_handshake(void *_u_conn, void *_th)
else {
}
+ /*
+ * If there are no coroutine thread context (th == NULL) it means this
+ * TLS handshake is happening from a blocking code. Just sleep a bit
+ * and retry.
+ *
+ * In the other case for an async socket 'th' is NOT NULL so the code
+ * is under a coroutine context and it can yield.
+ */
+ if (!th) {
+ flb_trace("[io_tls] handshake in process to %s:%i",
+ u->tcp_host, u->tcp_port);
+ flb_time_msleep(500);
+ goto retry_handshake;
+ }
+
/*
* FIXME: if we need multiple reads we are invoking the same
* system call multiple times.
|
Fix potential wraparound in conn_update_recv_rate()
This is unlikely to wraparound in the real world, but fixing it makes the code easier to fuzz. | @@ -548,7 +548,7 @@ static void conn_update_recv_rate(ngtcp2_conn *conn, size_t datalen,
window = conn->cstat.min_rtt == UINT64_MAX ? NGTCP2_DEFAULT_INITIAL_RTT
: conn->cstat.min_rtt * 2;
- if (conn->rx.rate.start_ts + window > ts) {
+ if (window > ts - conn->rx.rate.start_ts) {
return;
}
|
SAM E54 Xplained Pro: applies reviewer feedback | @@ -166,9 +166,9 @@ void board_init(void)
#endif
// LED0 init
- gpio_set_pin_function(BUTTON_PIN, GPIO_PIN_FUNCTION_OFF);
+ gpio_set_pin_function(LED_PIN, GPIO_PIN_FUNCTION_OFF);
gpio_set_pin_direction(LED_PIN, GPIO_DIRECTION_OUT);
- gpio_set_pin_level(LED_PIN, 1);
+ board_led_write(0);
#if CFG_TUSB_DEBUG >= 2
uart_send_str(BOARD_NAME " LED pin configured\n");
|
Use h2o_memcpy instead of memcpy in `APPEND` | @@ -511,7 +511,7 @@ static h2o_iovec_t build_request(struct st_h2o_http1client_t *client, h2o_iovec_
} while (0)
#define APPEND(s, l) \
do { \
- memcpy(buf.base + offset, (s), (l)); \
+ h2o_memcpy(buf.base + offset, (s), (l)); \
offset += (l); \
} while (0)
#define APPEND_STRLIT(lit) APPEND((lit), sizeof(lit) - 1)
|
Fix add missing condition to init logstash or not | @@ -95,8 +95,10 @@ if [[ "$NODETYPE" = *mono* ]]; then
if [[ "$STATE" = *initialized* ]];
then
+ if [[ "$AnalyticsActivation" = *true* ]]; then
cd $AS_HOME/scripts
run_as ${DATAFARI_USER} "bash as-manager.sh init_logstash";
+ fi
cd $DIR
|
{AH} add htslib/win to MANIFEST | #
include MANIFEST.in
include COPYING
+include NEWS
include INSTALL
include KNOWN_BUGS
include THANKS
@@ -31,6 +32,8 @@ exclude bcftools/config.h
# htslib
include htslib/*.c
include htslib/*.h
+include htslib/INSTALL
+include htslib/NEWS
exclude htslib/config.h
include htslib/Makefile
include htslib/htslib_vars.mk
@@ -41,6 +44,8 @@ include htslib/htslib.pc.in
include htslib/htslib/*.h
include htslib/cram/*.c
include htslib/cram/*.h
+include htslib/win/*.c
+include htslib/win/*.h
include cy_build.py
include pysam.py
include requirements.txt
|
linux/trace: missing ARM_pc/ARM_cpsr for Android arm64 | @@ -378,6 +378,13 @@ static size_t arch_getPC(pid_t pid, uint64_t* pc, uint64_t* status_reg HF_ATTR_U
#endif /* defined(__i386__) || defined(__x86_64__) */
#if defined(__arm__) || defined(__aarch64__)
+#if !defined(ARM_pc)
+#define ARM_pc uregs[15]
+#endif
+#if !defined(ARM_cpsr)
+#define ARM_cpsr uregs[16]
+#endif /* !defined(ARM_cpsr) */
+
/*
* 32-bit
*/
|
Improve shader compilation error; | @@ -1631,7 +1631,14 @@ static GLuint compileShader(GLenum type, const char** sources, int count) {
char* log = malloc(logLength);
lovrAssert(log, "Out of memory");
glGetShaderInfoLog(shader, logLength, &logLength, log);
- lovrThrow("Could not compile shader:\n%s", log);
+ const char* name;
+ switch (type) {
+ case GL_VERTEX_SHADER: name = "vertex shader"; break;
+ case GL_FRAGMENT_SHADER: name = "fragment shader"; break;
+ case GL_COMPUTE_SHADER: name = "compute shader"; break;
+ default: name = "shader"; break;
+ }
+ lovrThrow("Could not compile %s:\n%s", name, log);
}
return shader;
|
ames: pop the next packet, not the first packet again | |- ^+ packet-pump
?: =(0 sot) packet-pump
?: =(~ liv) packet-pump
- =^ hed liv (pop:packet-queue live.state)
+ =^ hed liv (pop:packet-queue liv)
=. packet-pump (give %send (to-static-fragment hed))
$(sot (dec sot))
:: +on-wake: handle packet timeout
|
Add a flag to clap_event to indicate if an event is live
Fixes | @@ -27,6 +27,12 @@ enum {
};
typedef int32_t clap_event_type;
+enum {
+ // Should be true if the events is external to the host, like a live user input
+ CLAP_EVENT_IS_LIVE = 1 << 0,
+};
+typedef int32_t clap_event_flags;
+
/**
* Note on, off, end and choke events.
* In the case of note choke or end events:
@@ -174,6 +180,7 @@ typedef struct clap_event_midi_sysex {
typedef struct clap_event {
alignas(4) clap_event_type type;
alignas(4) uint32_t time; // offset from the first sample in the process block
+ alignas(4) clap_event_flags flags;
union {
clap_event_note_t note;
|
py/asmx64: Fix bug in assembler when creating disp with r13 and 0 offset | @@ -192,7 +192,7 @@ STATIC void asm_x64_write_r64_disp(asm_x64_t *as, int r64, int disp_r64, int dis
return;
}
- if (disp_offset == 0 && disp_r64 != ASM_X64_REG_RBP) {
+ if (disp_offset == 0 && disp_r64 != ASM_X64_REG_RBP && disp_r64 != ASM_X64_REG_R13) {
asm_x64_write_byte_1(as, MODRM_R64(r64) | MODRM_RM_DISP0 | MODRM_RM_R64(disp_r64));
} else if (SIGNED_FIT8(disp_offset)) {
asm_x64_write_byte_2(as, MODRM_R64(r64) | MODRM_RM_DISP8 | MODRM_RM_R64(disp_r64), IMM32_L0(disp_offset));
|
change the return type of TSCH_CALLBACK_PACKET_READY | @@ -140,7 +140,7 @@ void TSCH_CALLBACK_NEW_TIME_SOURCE(const struct tsch_neighbor *old, const struct
/* Called by TSCH every time a packet is ready to be added to the send queue */
#ifdef TSCH_CALLBACK_PACKET_READY
-void TSCH_CALLBACK_PACKET_READY(void);
+int TSCH_CALLBACK_PACKET_READY(void);
#endif
/***** External Variables *****/
|
CI: use docker function for building
Use the docker function that contains
the environment for improved caching
instead of continuously updating
the docker command in build.yml.
This is mostly old functionality,
the primary benefit is that it is
easy for the user to test with the CI
environment. | @@ -132,6 +132,6 @@ jobs:
# Run test
# FIXME: (2023) Remove "ccache -c", workaround for cache growing
# too large from ccache CI/ccache configuration mismatch.
- docker run --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -e GITHUB_JOB -e GITHUB_REF -e GITHUB_SHA -e GITHUB_REPOSITORY -e GITHUB_REPOSITORY_OWNER -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RETENTION_DAYS -e GITHUB_ACTOR -e GITHUB_WORKFLOW -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GITHUB_EVENT_NAME -e GITHUB_SERVER_URL -e GITHUB_API_URL -e GITHUB_GRAPHQL_URL -e GITHUB_WORKSPACE -e GITHUB_ACTION -e GITHUB_EVENT_PATH -e GITHUB_ACTION_REPOSITORY -e GITHUB_ACTION_REF -e GITHUB_PATH -e GITHUB_ENV -e RUNNER_OS -e RUNNER_TOOL_CACHE -e RUNNER_TEMP -e RUNNER_WORKSPACE -e ACTIONS_RUNTIME_URL -e ACTIONS_RUNTIME_TOKEN -e ACTIONS_CACHE_URL -e GITHUB_ACTIONS=true -e CI=true -e RELSTR=citest $DOCKER_ARGS -v `pwd`:/home/user/contiki-ng -v $GITHUB_WORKSPACE/.ccache:/home/user/.ccache $DOCKER_IMG bash --login -c "ccache --set-config=max_size='250M' && make -C tests/??-${{ matrix.test }}; ccache -c"
+ docker run --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -e GITHUB_JOB -e GITHUB_REF -e GITHUB_SHA -e GITHUB_REPOSITORY -e GITHUB_REPOSITORY_OWNER -e GITHUB_RUN_ID -e GITHUB_RUN_NUMBER -e GITHUB_RETENTION_DAYS -e GITHUB_ACTOR -e GITHUB_WORKFLOW -e GITHUB_HEAD_REF -e GITHUB_BASE_REF -e GITHUB_EVENT_NAME -e GITHUB_SERVER_URL -e GITHUB_API_URL -e GITHUB_GRAPHQL_URL -e GITHUB_WORKSPACE -e GITHUB_ACTION -e GITHUB_EVENT_PATH -e GITHUB_ACTION_REPOSITORY -e GITHUB_ACTION_REF -e GITHUB_PATH -e GITHUB_ENV -e RUNNER_OS -e RUNNER_TOOL_CACHE -e RUNNER_TEMP -e RUNNER_WORKSPACE -e ACTIONS_RUNTIME_URL -e ACTIONS_RUNTIME_TOKEN -e ACTIONS_CACHE_URL -e GITHUB_ACTIONS=true -e CI=true $DOCKER_ARGS -v `pwd`:/home/user/contiki-ng -v $GITHUB_WORKSPACE/.ccache:/home/user/.ccache $DOCKER_IMG bash --login -c "source ../.bash_aliases && ccache --set-config=max_size='250M' && cimake -C tests/??-${{ matrix.test }}; ccache -c"
# Check outcome of the test
./tests/check-test.sh `pwd`/tests/??-${{ matrix.test }}
|
try_pkcs12(): cleanse passphrase so it is not left on the stack | @@ -619,9 +619,10 @@ static int try_pkcs12(struct extracted_param_data_st *data, OSSL_STORE_INFO **v,
}
ctx->cached_info = infos;
}
- }
p12_end:
+ OPENSSL_cleanse(tpass, sizeof(tpass));
PKCS12_free(p12);
+ }
*v = sk_OSSL_STORE_INFO_shift(ctx->cached_info);
}
|
Fix another issue on of inheritance table
Previously, when creating a APPEND node for inheritance table, if
subpaths has different number segments in gp_distribution_policy,
the whole APPEND node might be assigned with a wrong numsegments,
so some segments can not get plans and lost data in the results. | @@ -1531,8 +1531,16 @@ set_append_path_locus(PlannerInfo *root, Path *pathnode, RelOptInfo *rel,
}
else if (CdbPathLocus_IsPartitioned(pathnode->locus) &&
CdbPathLocus_IsPartitioned(projectedlocus))
+ {
+ /*
+ * subpaths have different distributed policy, mark it as random
+ * distributed and set the numsegments to the maximum of all
+ * subpaths to not missing any tuples.
+ */
CdbPathLocus_MakeStrewn(&pathnode->locus,
- CdbPathLocus_NumSegments(projectedlocus));
+ Max(CdbPathLocus_NumSegments(pathnode->locus),
+ CdbPathLocus_NumSegments(projectedlocus)));
+ }
else
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg_internal("cannot append paths with incompatible distribution")));
|
update wdsp patch in sdr_transceiver_emb | --- old/wdsp-master/linux_port.c
+++ wdsp-master/linux_port.c
-@@ -45,7 +45,7 @@
- void InitializeCriticalSectionAndSpinCount(pthread_mutex_t *mutex,int count) {
- pthread_mutexattr_t mAttr;
- pthread_mutexattr_init(&mAttr);
+@@ -49,7 +49,7 @@ void InitializeCriticalSectionAndSpinCou
+ // DL1YCF: MacOS X does not have PTHREAD_MUTEX_RECURSIVE_NP
+ pthread_mutexattr_settype(&mAttr,PTHREAD_MUTEX_RECURSIVE);
+ #else
- pthread_mutexattr_settype(&mAttr,PTHREAD_MUTEX_RECURSIVE_NP);
+ pthread_mutexattr_settype(&mAttr,PTHREAD_MUTEX_RECURSIVE);
+ #endif
pthread_mutex_init(mutex,&mAttr);
pthread_mutexattr_destroy(&mAttr);
- // ignore count
|
Make sure to set deriv_idx | @@ -516,9 +516,9 @@ static int mpfunc(int m, int n, FLT *p, FLT *deviates, FLT **derivs, void *priva
LinmathAxisAnglePose *world2lh = (LinmathAxisAnglePose *)&cameras[lh];
scale3d(rot, world2lh->AxisAngleRot, -1);
+ deriv_idx = survive_optimizer_get_camera_index(mpfunc_ctx) + lh * 7 + 3;
if (isfinite(up[0])) {
error = gen_world2lh_aa_up_err(world2lh->AxisAngleRot, up);
- deriv_idx = survive_optimizer_get_camera_index(mpfunc_ctx) + lh * 7 + 3;
gen_world2lh_aa_up_err_jac_axis_angle(deriv, world2lh->AxisAngleRot, up);
}
}
|
__builtin_ctzll, __builtin_clzll for Win32 | #include <intrin.h>
#define __builtin_popcount __popcnt
-#define __builtin_popcountll __popcnt64
static inline
int __builtin_ctz(uint32_t x) {
@@ -29,27 +28,58 @@ int __builtin_ctz(uint32_t x) {
}
static inline
-int __builtin_ctzll(unsigned long long x) {
+int __builtin_clz(uint32_t x) {
unsigned long ret;
- _BitScanForward64(&ret, x);
- return (int)ret;
+ _BitScanReverse(&ret, x);
+ return (int)(31 ^ ret);
}
+
+
+#ifdef _WIN64
+
+#define __builtin_popcountll __popcnt64
+
static inline
-int __builtin_clz(uint32_t x) {
+int __builtin_ctzll(uint64_t value) {
unsigned long ret;
- _BitScanReverse(&ret, x);
- return (int)(31 ^ ret);
+ _BitScanForward64(&ret, value);
+ return (int)ret;
}
static inline
-int __builtin_clzll(unsigned long long x) {
+int __builtin_clzll(uint64_t value) {
unsigned long ret;
- _BitScanReverse64(&ret, x);
+ _BitScanReverse64(&ret, value);
return (int)(63 ^ ret);
}
-#endif
+#else // _WIN64
+
+#define __builtin_popcountll(x) (__popcnt((x) & 0xFFFFFFFF) + __popcnt((x) >> 32))
+
+static inline
+int __builtin_ctzll(uint64_t value) {
+ if (value == 0) return 64;
+ uint32_t msh = (uint32_t)(value >> 32);
+ uint32_t lsh = (uint32_t)(value & 0xFFFFFFFF);
+ if (lsh != 0) return __builtin_ctz(lsh);
+ return 32 + __builtin_ctz(msh);
+}
+
+static inline
+int __builtin_clzll(uint64_t value) {
+ if (value == 0) return 64;
+ uint32_t msh = (uint32_t)(value >> 32);
+ uint32_t lsh = (uint32_t)(value & 0xFFFFFFFF);
+ if (msh != 0) return __builtin_clz(msh);
+ return 32 + __builtin_clz(lsh);
+}
+
+#endif // _WIN64
+
+#endif // defined(M3_COMPILER_MSVC)
+
// TODO: not sure why, signbit is actually defined in math.h
#if defined(ESP8266)
|
Logging invaliddata | @@ -205,12 +205,16 @@ public abstract class StratumServer
{
case SocketException sockEx:
if(!ignoredSocketErrors.Contains(sockEx.ErrorCode))
- logger.Error(() => $"[{connection.ConnectionId}] Connection error state: {ex}");
+ logger.Error(() => $"[{connection.ConnectionId}] Connection error: {ex}");
+ break;
+
+ case InvalidDataException idEx:
+ logger.Error(() => $"[{connection.ConnectionId}] Connection error: {idEx}");
break;
case JsonException jsonEx:
// junk received (invalid json)
- logger.Error(() => $"[{connection.ConnectionId}] Connection json error state: {jsonEx.Message}");
+ logger.Error(() => $"[{connection.ConnectionId}] Connection json error: {jsonEx.Message}");
if(clusterConfig.Banning?.BanOnJunkReceive.HasValue == false || clusterConfig.Banning?.BanOnJunkReceive == true)
{
@@ -221,7 +225,7 @@ public abstract class StratumServer
case AuthenticationException authEx:
// junk received (SSL handshake)
- logger.Error(() => $"[{connection.ConnectionId}] Connection json error state: {authEx.Message}");
+ logger.Error(() => $"[{connection.ConnectionId}] Connection json error: {authEx.Message}");
if(clusterConfig.Banning?.BanOnJunkReceive.HasValue == false || clusterConfig.Banning?.BanOnJunkReceive == true)
{
@@ -232,7 +236,7 @@ public abstract class StratumServer
case IOException ioEx:
// junk received (SSL handshake)
- logger.Error(() => $"[{connection.ConnectionId}] Connection json error state: {ioEx.Message}");
+ logger.Error(() => $"[{connection.ConnectionId}] Connection json error: {ioEx.Message}");
if(ioEx.Source == "System.Net.Security")
{
@@ -250,7 +254,7 @@ public abstract class StratumServer
case ArgumentException argEx:
if(argEx.TargetSite != streamWriterCtor || argEx.ParamName != "stream")
- logger.Error(() => $"[{connection.ConnectionId}] Connection error state: {ex}");
+ logger.Error(() => $"[{connection.ConnectionId}] Connection error: {ex}");
break;
case InvalidOperationException:
@@ -258,7 +262,7 @@ public abstract class StratumServer
break;
default:
- logger.Error(() => $"[{connection.ConnectionId}] Connection error state: {ex}");
+ logger.Error(() => $"[{connection.ConnectionId}] Connection error: {ex}");
break;
}
|
wincng: make more use of new helper functions | @@ -423,6 +423,24 @@ _libssh2_wincng_safe_free(void *buf, int len)
free(buf);
}
+/* Copy a big endian set of bits from src to dest.
+ * if the size of src is smaller than dest then pad the "left" (MSB)
+ * end with zeroes and copy the bits into the "right" (LSB) end. */
+static void
+memcpy_with_be_padding(unsigned char *dest, unsigned long dest_len,
+ unsigned char *src, unsigned long src_len)
+{
+ if(dest_len > src_len) {
+ memset(dest, 0, dest_len - src_len);
+ }
+ memcpy((dest + dest_len) - src_len, src, src_len);
+}
+
+static int
+round_down(int number, int multiple)
+{
+ return (number / multiple) * multiple;
+}
/*******************************************************************/
/*
@@ -2060,6 +2078,7 @@ _libssh2_wincng_bignum_mod_exp(_libssh2_bn *r,
offset += p->length;
memcpy(key + offset, m->bignum, m->length);
+ offset = 0;
ret = BCryptImportKeyPair(_libssh2_wincng.hAlgRSA, NULL,
BCRYPT_RSAPUBLIC_BLOB, &hKey, key, keylen, 0);
@@ -2071,9 +2090,8 @@ _libssh2_wincng_bignum_mod_exp(_libssh2_bn *r,
length = max(a->length, length);
bignum = malloc(length);
if(bignum) {
- offset = length - a->length;
- memset(bignum, 0, offset);
- memcpy(bignum + offset, a->bignum, a->length);
+ memcpy_with_be_padding(bignum, length,
+ a->bignum, a->length);
ret = BCryptEncrypt(hKey, bignum, length, NULL, NULL, 0,
r->bignum, r->length, &offset,
@@ -2204,6 +2222,7 @@ _libssh2_wincng_bignum_free(_libssh2_bn *bn)
}
}
+
/*******************************************************************/
/*
* Windows CNG backend: Diffie-Hellman support.
@@ -2237,25 +2256,6 @@ _libssh2_dh_dtor(_libssh2_dh_ctx *dhctx)
}
}
-/* Copy a big endian set of bits from src to dest.
- * if the size of src is smaller than dest then pad the "left" (MSB)
- * end with zeroes and copy the bits into the "right" (LSB) end. */
-static void
-memcpy_with_be_padding(unsigned char *dest, unsigned long dest_len,
- unsigned char *src, unsigned long src_len)
-{
- if(dest_len > src_len) {
- memset(dest, 0, dest_len - src_len);
- }
- memcpy(dest + dest_len - src_len, src, src_len);
-}
-
-static int
-round_down(int number, int multiple)
-{
- return (number / multiple) * multiple;
-}
-
/* Generates a Diffie-Hellman key pair using base `g', prime `p' and the given
* `group_order'. Can use the given big number context `bnctx' if needed. The
* private key is stored as opaque in the Diffie-Hellman context `*dhctx' and
|
BugID:22604012:[http]check token str len to avoid memory writes out of bounds | @@ -522,6 +522,11 @@ int IOT_HTTP_DeviceNameAuth(void *handle)
goto do_exit;
}
+ if(strlen(pvalue) > IOTX_HTTP_AUTH_TOKEN_LEN - 1) {
+ http_err("token is out of size");
+ goto do_exit;
+ }
+
strcpy(iotx_http_context->p_auth_token, pvalue);
iotx_http_context->is_authed = 1;
HTTP_API_FREE(pvalue);
|
Doc: Tweak description of B-Tree duplicate tuples.
Defining duplicates as "close by" to each other was unclear. Simplify
the definition.
Backpatch: 13-, where deduplication was introduced (by commit 0d861bbb) | @@ -635,10 +635,10 @@ options(<replaceable>relopts</replaceable> <type>local_relopts *</type>) returns
A duplicate is a leaf page tuple (a tuple that points to a table
row) where <emphasis>all</emphasis> indexed key columns have values
that match corresponding column values from at least one other leaf
- page tuple that's close by in the same index. Duplicate tuples are
- quite common in practice. B-Tree indexes can use a special,
- space-efficient representation for duplicates when an optional
- technique is enabled: <firstterm>deduplication</firstterm>.
+ page tuple in the same index. Duplicate tuples are quite common in
+ practice. B-Tree indexes can use a special, space-efficient
+ representation for duplicates when an optional technique is
+ enabled: <firstterm>deduplication</firstterm>.
</para>
<para>
Deduplication works by periodically merging groups of duplicate
|
[CI] Exclude `.def` files from trailing-whites-pace check | @@ -42,6 +42,6 @@ done
# Check for trailing whitespaces and tabs
echo "Checking for trailing whitespaces and tabs"
-git diff --check HEAD $base || EXIT_STATUS=$?
+git diff --check HEAD $base -- ':(exclude)**.def' || EXIT_STATUS=$?
exit $EXIT_STATUS
|
doc/tutorials/webui: corrected webui directory name | @@ -46,7 +46,7 @@ To build Elektra with the elektra-web tool:
- Install and start the client (connects to the elektrad instance):
- - `cd client`
+ - `cd webui`
- `npm install`
- `npm start` (replaces `kdb run-web`)
@@ -188,7 +188,7 @@ authenticate users, e.g. by [username/password auth](https://www.digitalocean.co
`elektrad/` - contains the daemon to interact with a single elektra instance
`webd/` - contains a daemon to serve the client and interact with multiple elektra instances
-`client/` - contains the elektra-web client (Web UI)
+`webui/` - contains the elektra-web client (Web UI)
- `src/actions/` - Redux actions to access the KDB or display notifications in the UI
- `src/components/` - React components
@@ -252,7 +252,7 @@ docker push elektra/web:1.5.0
- Create a new sub dialog by, for example, copying the `NumberSubDialog.jsx`
file (or similar) to a new file in the
- `client/src/components/TreeItem/dialogs` folder.
+ `webui/src/components/TreeItem/dialogs` folder.
- Include the sub dialog by adding it to the `SettingsDialog.jsx` file in the
same folder. For example, it could be added before the
@@ -275,7 +275,7 @@ docker push elektra/web:1.5.0
```
- Mark the meta keys as handled by adding them to the `HANDLED_METADATA` array
- in `client/src/components/TreeItem/dialogs/utils.js`:
+ in `webui/src/components/TreeItem/dialogs/utils.js`:
```diff
export const HANDLED_METADATA = [
@@ -287,8 +287,8 @@ export const HANDLED_METADATA = [
```
- Validation can then be added by handling metadata in the
- `client/src/components/TreeItem/fields/validateType.js` file to the
+ `webui/src/components/TreeItem/fields/validateType.js` file to the
`validateType` function.
- Rendering fields in a special way when certain metakeys are present can be
- done by adjusting the `renderSpecialValue` function in the `client/src/components/TreeItem/index.js` file.
+ done by adjusting the `renderSpecialValue` function in the `webui/src/components/TreeItem/index.js` file.
|
we'll have not only float/cat features in future
Note: mandatory check (NEED_CHECK) was skipped | @@ -24,7 +24,7 @@ namespace NCatboostCuda {
const auto& featureMetaInfo = featuresMetaInfo[featureIdx];
if (featureMetaInfo.Type == EFeatureType::Float) {
RegisterDataProviderFloatFeature(featureIdx);
- } else {
+ } else if (featureMetaInfo.Type == EFeatureType::Categorical) {
RegisterDataProviderCatFeature(featureIdx);
}
}
|
Fix Pagination numbers disappear | @@ -111,19 +111,31 @@ AjaxFranceLabs.PagerModule = AjaxFranceLabs.AbstractModule.extend({
elm.find('.go_n, .go_l').css('display', this.display);
}
elm.find('.page').css('display', 'none');
- elm.find('.page[page=' + this.pageSelected + ']').addClass('selected');
- elm.find('.page.selected').css('display', this.display).prev().css('display', this.display);
- for (var p = $(this.elm).find('.page:visible').length; p < this.nbPageDisplayed; p++)
- elm.find('.page:visible:last').next().css('display', this.display);
- if (elm.find('.page:visible').length < this.nbPageDisplayed)
- for (var p = $(this.elm).find('.page:visible').length; p < this.nbPageDisplayed; p++)
- elm.find('.page:visible:first').prev().css('display', this.display);
+ elm.find('.page[page=' + this.pageSelected + ']').addClass('selected disp');
+ // Add class 'disp' because the old code version relied on the ':visible' filter to find .page elements
+ // but if the pager module is hidden at this point it breaks everything
+ // By relying on a class, the behavior of the following code will be correct in any cases
+ elm.find('.page.selected').css('display', this.display).prev().addClass('disp').css('display', this.display);
+ for (var p = $(this.elm).find('.page.disp').length; p < this.nbPageDisplayed; p++) {
+ // Display the page numbers one by one till the nbPageDisplayed (which should be renamed as 'MaxPagesToDisplay') is reached
+ elm.find('.page.disp:last').next().addClass('disp').css('display', this.display);
+ }
+ // If the number of currently displayed pages is below the max allowed number (nbPageDisplayed),
+ // then display any existing previous pages till the max allowed number is reached
+ // For example if the selected page is 11 and the current diplayed pages are 11,12 and 13, if the max allowed pages to display is 10
+ // then we can display the pages 4,5,6,7,8,9 and 10 in addition
+ if (elm.find('.page.disp').length < this.nbPageDisplayed) {
+ for (var p = $(this.elm).find('.page.disp').length; p < this.nbPageDisplayed; p++) {
+ elm.find('.page.disp:first').prev().addClass('disp').css('display', this.display);
+ }
+ }
var width = 10;
- for (var p = 0; p < this.nbPageDisplayed; p++)
- width += $(this.elm).find('.page:visible:eq(' + p + ')').outerWidth(true);
+ for (var p = 0; p < this.nbPageDisplayed; p++) {
+ width += $(this.elm).find('.page.disp:eq(' + p + ')').outerWidth(true);
+ }
elm.find('.pages').width(width);
AjaxFranceLabs.clearMultiElementClasses(elm.find('.page'));
- AjaxFranceLabs.addMultiElementClasses(elm.find('.page:visible'));
+ AjaxFranceLabs.addMultiElementClasses(elm.find('.page.disp'));
},
clickHandler : function() {
|
libbpf-tools: update opensnoop for libbpf 1.0
Switch to libbpf 1.0 mode and adapt libbpf API usage accordingly. | @@ -220,7 +220,6 @@ int main(int argc, char **argv)
.parser = parse_arg,
.doc = argp_program_doc,
};
- struct perf_buffer_opts pb_opts;
struct perf_buffer *pb = NULL;
struct opensnoop_bpf *obj;
__u64 time_end = 0;
@@ -230,14 +229,9 @@ int main(int argc, char **argv)
if (err)
return err;
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
libbpf_set_print(libbpf_print_fn);
- err = bump_memlock_rlimit();
- if (err) {
- fprintf(stderr, "failed to increase rlimit: %d\n", err);
- return 1;
- }
-
obj = opensnoop_bpf__open();
if (!obj) {
fprintf(stderr, "failed to open BPF object\n");
@@ -283,13 +277,10 @@ int main(int argc, char **argv)
printf("%s\n", "PATH");
/* setup event callbacks */
- pb_opts.sample_cb = handle_event;
- pb_opts.lost_cb = handle_lost_events;
pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES,
- &pb_opts);
- err = libbpf_get_error(pb);
- if (err) {
- pb = NULL;
+ handle_event, handle_lost_events, NULL, NULL);
+ if (!pb) {
+ err = -errno;
fprintf(stderr, "failed to open perf buffer: %d\n", err);
goto cleanup;
}
@@ -307,8 +298,8 @@ int main(int argc, char **argv)
/* main: poll */
while (!exiting) {
err = perf_buffer__poll(pb, PERF_POLL_TIMEOUT_MS);
- if (err < 0 && errno != EINTR) {
- fprintf(stderr, "error polling perf buffer: %s\n", strerror(errno));
+ if (err < 0 && err != -EINTR) {
+ fprintf(stderr, "error polling perf buffer: %s\n", strerror(-err));
goto cleanup;
}
if (env.duration && get_ktime_ns() > time_end)
|
build: fix centos 8 steam build install-dep
make install-dep sometimes failed at
downloading metadata for repository 'powertools-source':
disable unnecessary powertools-source repo.
Type: fix
Fixes: ("build: fix centos-8 'make install-deps'") | @@ -324,7 +324,7 @@ ifeq ($(OS_ID),rhel)
else ifeq ($(OS_ID)-$(OS_VERSION_ID),centos-8)
@sudo -E dnf install $(CONFIRM) dnf-plugins-core epel-release
@sudo -E dnf config-manager --set-enabled \
- $(shell dnf repolist all 2>/dev/null|grep -i powertools|cut -d' ' -f1)
+ $(shell dnf repolist all 2>/dev/null|grep -i powertools|cut -d' ' -f1|grep -v source)
@sudo -E dnf groupinstall $(CONFIRM) $(RPM_DEPENDS_GROUPS)
@sudo -E dnf install --skip-broken $(CONFIRM) $(RPM_DEPENDS)
else ifeq ($(OS_ID),centos)
|
BugID:16944965: Reformat app Config.in | @@ -6,6 +6,18 @@ mainmenu "AliOS Things Configuration"
config AOS_BUILD
bool
default y
+ help
+ This option indicates AOS build env is running.
+
+config AOS_BUILD_BOARD
+ string
+ help
+ This option holds the name of the board that is going to build.
+
+config AOS_BUILD_APP
+ string
+ help
+ This option holds the name of the application that is going to build.
config MBINS
string
|
RemoteContent: fix CORS fallback for images
Fixes urbit/landscape#994 | @@ -44,8 +44,13 @@ const Image = styled.img(system({ objectFit: true }), ...allSystemStyle);
export function RemoteContentImageEmbed(
props: ImageProps & RemoteContentEmbedProps
) {
- const { url, noCors = false, ...rest } = props;
+ const { url, ...rest } = props;
+ const [noCors, setNoCors] = useState(false);
const { hovering, bind } = useHovering();
+ // maybe images aren't set up for CORS embeds
+ const onError = useCallback(() => {
+ setNoCors(true);
+ }, []);
return (
<Box height="100%" width="100%" position="relative" {...bind} {...rest}>
@@ -77,6 +82,7 @@ export function RemoteContentImageEmbed(
width="100%"
objectFit="contain"
borderRadius={2}
+ onError={onError}
{...props}
/>
</Box>
|
Fix deadlock querying some types of pipe handles on Win10 | @@ -673,6 +673,7 @@ VOID PhpUpdateHandleGeneral(
BOOLEAN disableFlushButton = FALSE;
BOOLEAN isFileOrDirectory = FALSE;
BOOLEAN isConsoleHandle = FALSE;
+ BOOLEAN isPipeHandle = FALSE;
FILE_FS_DEVICE_INFORMATION fileDeviceInfo;
FILE_MODE_INFORMATION fileModeInfo;
FILE_STANDARD_INFORMATION fileStandardInfo;
@@ -690,6 +691,7 @@ VOID PhpUpdateHandleGeneral(
switch (fileDeviceInfo.DeviceType)
{
case FILE_DEVICE_NAMED_PIPE:
+ isPipeHandle = TRUE;
PhSetListViewSubItem(Context->ListViewHandle, Context->ListViewRowCache[PH_HANDLE_GENERAL_INDEX_FILETYPE], 1, L"Pipe");
break;
case FILE_DEVICE_CD_ROM:
@@ -713,10 +715,11 @@ VOID PhpUpdateHandleGeneral(
}
}
- if (isConsoleHandle)
+ if (isPipeHandle || isConsoleHandle)
{
- // TODO: We block indefinitely when calling NtQueryInformationFile for '\Device\ConDrv\CurrentIn'
- // but we can query other '\Device\ConDrv' console handles (dmex)
+ // NOTE: NtQueryInformationFile for '\Device\ConDrv\CurrentIn' causes a deadlock but
+ // we can query other '\Device\ConDrv' console handles. NtQueryInformationFile also
+ // causes a deadlock for some types of named pipes and only on Win10 (dmex)
status = PhCallNtQueryFileInformationWithTimeout(
fileHandle,
FileModeInformation,
|
Fix issues with undefined wsgi_daemon_process when compiling for Windows. | @@ -419,6 +419,8 @@ InterpreterObject *newInterpreterObject(const char *name)
int is_threaded = 0;
int is_forked = 0;
+ int is_service_script = 0;
+
const char *str = NULL;
/* Create handle for interpreter and local data. */
@@ -595,7 +597,10 @@ InterpreterObject *newInterpreterObject(const char *name)
* registering signal handlers so they are ignored.
*/
+#if defined(MOD_WSGI_WITH_DAEMONS)
if (wsgi_daemon_process && wsgi_daemon_process->group->threads == 0) {
+ is_service_script = 1;
+
module = PyImport_ImportModule("signal");
if (module) {
@@ -637,7 +642,9 @@ InterpreterObject *newInterpreterObject(const char *name)
Py_XDECREF(module);
}
- else if (wsgi_server_config->restrict_signal != 0) {
+#endif
+
+ if (!is_service_script && wsgi_server_config->restrict_signal != 0) {
module = PyImport_ImportModule("signal");
if (module) {
@@ -2057,17 +2064,19 @@ apr_status_t wsgi_python_term(void)
* condition.
*/
+#if defined(MOD_WSGI_WITH_DAEMONS)
if (wsgi_daemon_process)
apr_thread_mutex_lock(wsgi_shutdown_lock);
-#if defined(MOD_WSGI_WITH_DAEMONS)
wsgi_daemon_shutdown++;
#endif
Py_Finalize();
+#if defined(MOD_WSGI_WITH_DAEMONS)
if (wsgi_daemon_process)
apr_thread_mutex_unlock(wsgi_shutdown_lock);
+#endif
wsgi_python_initialized = 0;
|
Fix cap for argc / envc + display an error if these values go above that
cap | @@ -203,9 +203,9 @@ char **uhyve_envp = NULL;
typedef struct {
int argc;
- int argsz[128];
+ int argsz[MAX_ARGC_ENVC];
int envc;
- int envsz[128];
+ int envsz[MAX_ARGC_ENVC];
} __attribute__ ((packed)) uhyve_cmdsize_t;
typedef struct {
@@ -1605,6 +1605,13 @@ int uhyve_loop(int argc, char **argv)
i++;
uhyve_envc = i;
+ if(uhyve_argc > MAX_ARGC_ENVC || uhyve_envc > MAX_ARGC_ENVC) {
+ fprintf(stderr, "uhyve cannot forward more than %d command line "
+ "arguments or environment variables, please consider increasing "
+ "the MAX_ARGC_ENVP cmake argument\n", MAX_ARGC_ENVC);
+ return -1;
+ }
+
if (hermit_check)
ts = atoi(hermit_check);
|
tls13: add labels
add client and server cv magic words | MBEDTLS_SSL_TLS1_3_LABEL( exp_master , "exp master" ) \
MBEDTLS_SSL_TLS1_3_LABEL( ext_binder , "ext binder" ) \
MBEDTLS_SSL_TLS1_3_LABEL( res_binder , "res binder" ) \
- MBEDTLS_SSL_TLS1_3_LABEL( derived , "derived" )
+ MBEDTLS_SSL_TLS1_3_LABEL( derived , "derived" ) \
+ MBEDTLS_SSL_TLS1_3_LABEL( client_cv , "TLS 1.3, client CertificateVerify" ) \
+ MBEDTLS_SSL_TLS1_3_LABEL( server_cv , "TLS 1.3, server CertificateVerify" )
#define MBEDTLS_SSL_TLS1_3_LABEL( name, string ) \
const unsigned char name [ sizeof(string) - 1 ];
@@ -57,9 +59,12 @@ struct mbedtls_ssl_tls1_3_labels_struct
extern const struct mbedtls_ssl_tls1_3_labels_struct mbedtls_ssl_tls1_3_labels;
+#define MBEDTLS_SSL_TLS1_3_LBL_LEN( LABEL ) \
+ sizeof(mbedtls_ssl_tls1_3_labels.LABEL)
+
#define MBEDTLS_SSL_TLS1_3_LBL_WITH_LEN( LABEL ) \
mbedtls_ssl_tls1_3_labels.LABEL, \
- sizeof(mbedtls_ssl_tls1_3_labels.LABEL)
+ MBEDTLS_SSL_TLS1_3_LBL_LEN( LABEL )
#define MBEDTLS_SSL_TLS1_3_KEY_SCHEDULE_MAX_LABEL_LEN \
sizeof( union mbedtls_ssl_tls1_3_labels_union )
|
value BUGFIX sr_val_t does not support rpc/action envelopes
Fixes | @@ -844,7 +844,7 @@ sr_tree_to_val(const struct lyd_node *data, const char *path, sr_val_t **value)
sr_error_info_t *err_info = NULL;
struct ly_set *set = NULL;
- SR_CHECK_ARG_APIRET(!data || !path || !value, NULL, err_info)
+ SR_CHECK_ARG_APIRET(!data || (data->schema->nodetype & (LYS_RPC | LYS_ACTION)) || !path || !value, NULL, err_info);
*value = NULL;
@@ -902,7 +902,11 @@ sr_tree_to_values(const struct lyd_node *data, const char *xpath, sr_val_t **val
SR_CHECK_MEM_GOTO(!*values, err_info, cleanup);
for (i = 0; i < set->number; ++i) {
- if ((err_info = sr_val_ly2sr(set->set.d[i], (*values) + i))) {
+ if (set->set.d[i]->schema->nodetype & (LYS_RPC | LYS_ACTION)) {
+ continue;
+ }
+
+ if ((err_info = sr_val_ly2sr(set->set.d[i], *values + *value_cnt))) {
goto cleanup;
}
++(*value_cnt);
|
bignum_mod: Updated modulus lifecycle with mm and rr.
This patch updates the `mbedtls_mpi_mod_modulus_setup/free()`
methods to precalculate mm and rr(Montgomery const squared) during
setup and zeroize it during free.
A static `set_mont_const_square()` is added to manage the memory allocation
and parameter checking before invoking the
`mbedtls_mpi_core_get_mont_r2_unsafe()` | @@ -77,6 +77,9 @@ void mbedtls_mpi_mod_modulus_free( mbedtls_mpi_mod_modulus *m )
switch( m->int_rep )
{
case MBEDTLS_MPI_MOD_REP_MONTGOMERY:
+ mbedtls_platform_zeroize( (mbedtls_mpi_uint *) m->rep.mont.rr,
+ m->limbs );
+ mbedtls_free( (mbedtls_mpi_uint *)m->rep.mont.rr );
m->rep.mont.rr = NULL;
m->rep.mont.mm = 0; break;
case MBEDTLS_MPI_MOD_REP_OPT_RED:
@@ -93,6 +96,38 @@ void mbedtls_mpi_mod_modulus_free( mbedtls_mpi_mod_modulus *m )
m->int_rep = MBEDTLS_MPI_MOD_REP_INVALID;
}
+static int set_mont_const_square( const mbedtls_mpi_uint **X,
+ const mbedtls_mpi_uint *A,
+ size_t limbs )
+{
+ int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
+ mbedtls_mpi N;
+ mbedtls_mpi RR;
+
+ mbedtls_mpi_init( &N );
+ mbedtls_mpi_init( &RR );
+
+ if ( A == NULL || limbs == 0 || limbs >= ( MBEDTLS_MPI_MAX_LIMBS / 2 ) - 2 )
+ goto cleanup;
+
+ if ( !mbedtls_mpi_grow( &N, limbs ))
+ memcpy( N.p, A, sizeof(mbedtls_mpi_uint) * limbs );
+ else
+ goto cleanup;
+
+ mbedtls_mpi_core_get_mont_r2_unsafe(&RR, &N);
+
+ *X = RR.p;
+ RR.p = NULL;
+ ret = 0;
+
+cleanup:
+ mbedtls_mpi_free(&N);
+ mbedtls_mpi_free(&RR);
+ ret = ( ret != 0 ) ? MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED : 0;
+ return( ret );
+}
+
int mbedtls_mpi_mod_modulus_setup( mbedtls_mpi_mod_modulus *m,
const mbedtls_mpi_uint *p,
size_t p_limbs,
@@ -120,8 +155,9 @@ int mbedtls_mpi_mod_modulus_setup( mbedtls_mpi_mod_modulus *m,
{
case MBEDTLS_MPI_MOD_REP_MONTGOMERY:
m->int_rep = int_rep;
- m->rep.mont.rr = NULL;
- m->rep.mont.mm = 0; break;
+ m->rep.mont.mm = mbedtls_mpi_core_montmul_init( m->p );
+ set_mont_const_square( &m->rep.mont.rr, m->p, m->limbs );
+ break;
case MBEDTLS_MPI_MOD_REP_OPT_RED:
m->int_rep = int_rep;
m->rep.ored = NULL;
|
suppress the output from ping command to determine if internal access is available | @@ -19,7 +19,7 @@ ROCM_VERSION=${ROCM_VERSION:-4.0.0}
# Set the AOMP VERSION STRING and AOMP_PROJECT_REPO_BRANCH.
AOMP_GIT_INTERNAL_IP="gerrit-git.amd.com"
-ping -c 1 $AOMP_GIT_INTERNAL_IP 2> /dev/null
+ping -c 1 $AOMP_GIT_INTERNAL_IP 2> /dev/null >/dev/null
if [ $? != 0 ]; then
AOMP_VERSION=${AOMP_VERSION:-"13.0"}
AOMP_VERSION_MOD=${AOMP_VERSION_MOD:-"7"}
|
pkg/columns/formatter/textcolumns: call buildFillString() in AdjustWidthsToContent() | @@ -297,6 +297,8 @@ func (tf *TextColumnsFormatter[T]) AdjustWidthsToContent(entries []*T, considerH
totalWidth += column.calculatedWidth
}
+ tf.buildFillString()
+
// Last but not least, add column dividers
totalWidth += len([]rune(tf.options.ColumnDivider)) * (len(tf.showColumns) - 1)
|
hv:vtd: fix MISRA-C violations on logical conjunctions need brackets
This patch fix MISRA-C violations in arch/x86/vtd.c
on logical conjunctions need brackets.
Acked-by: Anthony Xu | @@ -145,7 +145,7 @@ bool iommu_snoop_supported(const struct acrn_vm *vm)
{
bool ret;
- if (vm->iommu == NULL || vm->iommu->iommu_snoop) {
+ if ((vm->iommu == NULL) || (vm->iommu->iommu_snoop)) {
ret = true;
} else {
ret = false;
@@ -175,7 +175,7 @@ static int32_t register_hrhd_units(void)
uint32_t i;
int32_t ret = 0;
- if (info == NULL || info->drhd_count == 0U) {
+ if ((info == NULL) || (info->drhd_count == 0U)) {
pr_fatal("%s: can't find dmar info\n", __func__);
return -ENODEV;
}
|
Simplelink GPIO HAL: Fix bug in pull and IRQ config readout | @@ -101,7 +101,7 @@ to_hal_cfg(PIN_Config pin_cfg, gpio_hal_pin_cfg_t *cfg)
}
/* Pulling config */
- switch(pin_cfg & PIN_BM_PULLING) {
+ switch(pin_cfg & (PIN_GEN | PIN_BM_PULLING)) {
case PIN_NOPULL: *cfg |= GPIO_HAL_PIN_CFG_PULL_NONE; break;
case PIN_PULLUP: *cfg |= GPIO_HAL_PIN_CFG_PULL_UP; break;
case PIN_PULLDOWN: *cfg |= GPIO_HAL_PIN_CFG_PULL_DOWN; break;
@@ -111,7 +111,7 @@ to_hal_cfg(PIN_Config pin_cfg, gpio_hal_pin_cfg_t *cfg)
/* Interrupt config */
if(pin_cfg & PIN_BM_IRQ) {
/* Interrupt edge config */
- switch(pin_cfg & PIN_BM_IRQ) {
+ switch(pin_cfg & (PIN_GEN | PIN_BM_IRQ)) {
case PIN_IRQ_DIS: *cfg |= GPIO_HAL_PIN_CFG_EDGE_NONE;
*cfg |= GPIO_HAL_PIN_CFG_INT_DISABLE;
break;
|
samd: Fix a lock-up situation at high traffic.
This PR fixes a transmit lock-up, which happens, when data is received
and sent at the sime time at moderate to high speeds, like code
which just echoes incoming data.
In my case, an issue was reported here: | @@ -286,14 +286,14 @@ bool dcd_edpt_xfer (uint8_t rhport, uint8_t ep_addr, uint8_t * buffer, uint16_t
{
bank->PCKSIZE.bit.MULTI_PACKET_SIZE = total_bytes;
bank->PCKSIZE.bit.BYTE_COUNT = 0;
- ep->EPSTATUSCLR.reg |= USB_DEVICE_EPSTATUSCLR_BK0RDY;
- ep->EPINTFLAG.reg |= USB_DEVICE_EPINTFLAG_TRFAIL0;
+ ep->EPSTATUSCLR.reg = USB_DEVICE_EPSTATUSCLR_BK0RDY;
+ ep->EPINTFLAG.reg = USB_DEVICE_EPINTFLAG_TRFAIL0;
} else
{
bank->PCKSIZE.bit.MULTI_PACKET_SIZE = 0;
bank->PCKSIZE.bit.BYTE_COUNT = total_bytes;
- ep->EPSTATUSSET.reg |= USB_DEVICE_EPSTATUSSET_BK1RDY;
- ep->EPINTFLAG.reg |= USB_DEVICE_EPINTFLAG_TRFAIL1;
+ ep->EPSTATUSSET.reg = USB_DEVICE_EPSTATUSSET_BK1RDY;
+ ep->EPINTFLAG.reg = USB_DEVICE_EPINTFLAG_TRFAIL1;
}
return true;
|
There is mismach between function sdssplitlen() comments and implementation
when count is 0, return NULL | @@ -939,15 +939,13 @@ sds *sdssplitlen(const char *s, ssize_t len, const char *sep, int seplen, int *c
long start = 0, j;
sds *tokens;
- if (seplen < 1 || len < 0) return NULL;
-
+ if (seplen < 1 || len <= 0) {
+ *count = 0;
+ return NULL;
+ }
tokens = s_malloc(sizeof(sds)*slots);
if (tokens == NULL) return NULL;
- if (len == 0) {
- *count = 0;
- return tokens;
- }
for (j = 0; j < (len-(seplen-1)); j++) {
/* make sure there is room for the next element and the final one */
if (slots < elements+2) {
|
Add prose test to not spawn mongocrypt if shared library is loaded | @@ -2675,13 +2675,27 @@ test_bypass_spawning_via_helper (const char *auto_encryption_opt)
mongoc_auto_encryption_opts_set_bypass_query_analysis (
auto_encryption_opts, true);
} else if (0 == strcmp (auto_encryption_opt, "cryptSharedLibRequired")) {
+ bson_t *schema =
+ get_bson_from_json_file ("./src/libmongoc/tests/"
+ "client_side_encryption_prose/external/"
+ "external-schema.json");
+ BSON_ASSERT (schema);
+ bson_t *schema_map = BCON_NEW ("db.coll", BCON_DOCUMENT (schema));
+ mongoc_auto_encryption_opts_set_schema_map (auto_encryption_opts,
+ schema_map);
check_crypt_shared = true;
char *env_cryptSharedLibPath =
test_framework_getenv ("MONGOC_TEST_CRYPT_SHARED_LIB_PATH");
BSON_ASSERT (env_cryptSharedLibPath);
BSON_APPEND_UTF8 (extra, "cryptSharedLibPath", env_cryptSharedLibPath);
BSON_APPEND_BOOL (extra, "cryptSharedLibRequired", true);
+ BSON_APPEND_UTF8 (
+ extra,
+ "mongocryptdURI",
+ "mongodb://localhost:27021/db?serverSelectionTimeoutMS=1000");
bson_free (env_cryptSharedLibPath);
+ bson_destroy (schema);
+ bson_destroy (schema_map);
} else {
test_error ("Unexpected 'auto_encryption_opt' argument: %s",
auto_encryption_opt);
@@ -2706,7 +2720,7 @@ test_bypass_spawning_via_helper (const char *auto_encryption_opt)
NULL);
}
- /* Insert { 'encrypt': 'test' }. Should succeed. */
+ /* Insert { 'unencrypted': 'test' }. Should succeed. */
coll = mongoc_client_get_collection (client_encrypted, "db", "coll");
doc_to_insert = BCON_NEW ("unencrypted", "test");
ret = mongoc_collection_insert_one (
@@ -2740,8 +2754,9 @@ test_bypass_spawning_via_bypassQueryAnalysis (void *unused)
test_bypass_spawning_via_helper ("bypass_query_analysis");
}
+/* Prose Test 8: Bypass Spawning mongocryptd - Via loading shared library */
static void
-test_bypass_spawning_via_cryptSharedLibRequired (void *unused)
+test_bypass_spawning_via_cryptSharedLibLoaded (void *unused)
{
BSON_UNUSED (unused);
test_bypass_spawning_via_helper ("cryptSharedLibRequired");
@@ -5773,8 +5788,8 @@ test_client_side_encryption_install (TestSuite *suite)
test_framework_skip_if_max_wire_version_less_than_8);
TestSuite_AddFull (suite,
"/client_side_encryption/bypass_spawning_mongocryptd/"
- "cryptSharedLibRequired",
- test_bypass_spawning_via_cryptSharedLibRequired,
+ "cryptSharedLibLoaded",
+ test_bypass_spawning_via_cryptSharedLibLoaded,
NULL,
NULL,
test_framework_skip_if_no_client_side_encryption,
|
saves our public keys and deed sig in %jael on boot | $: yen/(set duct) :: trackers
:: XX use this ::
our=ship ::
+ sig=(unit oath) :: for a moon
:: XX reconcile with .dns.eth ::
tuf=(list turf) :: domains
fak/_| :: fake keys
:: sort-of single-homed
::
=. our.own.sub our
+ :: save our parent signature (only for moons)
+ ::
+ =. sig.own.sub sig.seed.tac
+ :: our initial public key
+ ::
+ =. kyz.puk.sub
+ =/ cub (nol:nu:crub:crypto key.seed.tac)
+ %+ ~(put by kyz.puk.sub)
+ our
+ [& lyf.seed.tac (my [lyf.seed.tac pub:ex:cub] ~)]
:: our initial private key, as a +tree of +rite
::
=/ rit (sy [%jewel (my [lyf.seed.tac key.seed.tac] ~)] ~)
:: sort-of single-homed
::
=. our.own.sub our
+ :: fake keys are deterministically derived from the ship
+ ::
+ =/ cub (pit:nu:crub:crypto 512 our)
+ :: save our parent signature (only for moons)
+ ::
+ :: XX move logic to zuse
+ ::
+ =. sig.own.sub
+ ?. ?=(%earl (clan:title our))
+ ~
+ =/ yig (pit:nu:crub:crypto 512 (sein:title our))
+ [~ (sign:as:yig (shaf %earl (sham our 1 pub:ex:cub)))]
+ :: our initial public key
+ ::
+ =. kyz.puk.sub
+ (~(put by kyz.puk.sub) our [& 1 (my [1 pub:ex:cub] ~)])
:: our private key, as a +tree of +rite
::
:: Private key updates are disallowed for fake ships,
:: so we do this first.
::
- =/ cub (pit:nu:crub:crypto 512 our)
=/ rit (sy [%jewel (my [1 sec:ex:cub] ~)] ~)
=. +>.$ $(tac [%mint our our rit])
:: set the fake bit
|
arch/imxrt: split hprtc conditional from lpsrtc in Make.defs
When LPSRTC is enabled, its config enables hprtc by selecting.
It means that Make.defs does not need the hprtc file to include
compilation under lpsrtc. Make.defs can include hprtc under hprtc
conditional. | @@ -157,8 +157,8 @@ endif
ifeq ($(CONFIG_IMXRT_SNVS_LPSRTC),y)
CHIP_CSRCS += imxrt_lpsrtc.c
-CHIP_CSRCS += imxrt_hprtc.c
-else ifeq ($(CONFIG_IMXRT_SNVS_HPRTC),y)
+endif
+ifeq ($(CONFIG_IMXRT_SNVS_HPRTC),y)
CHIP_CSRCS += imxrt_hprtc.c
endif
|
Add Object#memory_statistics for debug | @@ -1003,6 +1003,21 @@ static void c_object_instance_variables(struct VM *vm, mrbc_value v[], int argc)
SET_NIL_RETURN();
}
+
+static void c_object_memory_statistics(struct VM *vm, mrbc_value v[], int argc)
+{
+ int total, used, free, frag;
+ mrbc_alloc_statistics(&total, &used, &free, &frag);
+
+ console_printf("Memory Statistics\n");
+ console_printf(" Total: %d\n", total);
+ console_printf(" Used : %d\n", used);
+ console_printf(" Free : %d\n", free);
+ console_printf(" Frag.: %d\n", frag);
+
+ SET_NIL_RETURN();
+}
+
#endif
@@ -1041,6 +1056,8 @@ static void mrbc_init_class_object(struct VM *vm)
mrbc_define_method(vm, mrbc_class_object, "object_id", c_object_object_id);
mrbc_define_method(vm, mrbc_class_object, "instance_methods", c_object_instance_methods);
mrbc_define_method(vm, mrbc_class_object, "instance_variables", c_object_instance_variables);
+ mrbc_define_method(vm, mrbc_class_object, "memory_statistics", c_object_memory_statistics);
+
#endif
}
|
Only use Bidirectional Streams for Throughput Perf | @@ -297,9 +297,7 @@ ThroughputClient::StartQuic()
Status =
MsQuic->StreamOpen(
Shutdown.ConnHandle,
- DownloadLength != 0 ?
- QUIC_STREAM_OPEN_FLAG_NONE :
- QUIC_STREAM_OPEN_FLAG_UNIDIRECTIONAL,
+ QUIC_STREAM_OPEN_FLAG_NONE,
[](HQUIC Handle, void* Context, QUIC_STREAM_EVENT* Event) -> QUIC_STATUS {
return ((StreamContext*)Context)->Client->
StreamCallback(
|
resgroup: add rolresgroup to pg_roles.
rolresgroup was added to pg_authid in previous commits, we also should
add it to pg_authid. | @@ -26,7 +26,8 @@ CREATE VIEW pg_roles AS
rolcreaterexthttp,
rolcreatewextgpfd,
rolcreaterexthdfs,
- rolcreatewexthdfs
+ rolcreatewexthdfs,
+ rolresgroup
FROM pg_authid;
CREATE VIEW pg_shadow AS
|
Fix netcore on netcore runtime in windows | @@ -18,7 +18,7 @@ netcore_win::netcore_win()
{
this->log = new logger();
this->log->disable();
- this->domain_id = 0;
+ this->domain_id = -1;
}
@@ -132,6 +132,7 @@ bool netcore_win::create_host() {
}
HRESULT hr;
+ bool flgasError = false;
*this->log << W("Setting ICLRRuntimeHost2 startup flags") << logger::endl;
@@ -142,7 +143,7 @@ bool netcore_win::create_host() {
STARTUP_FLAGS::STARTUP_CONCURRENT_GC));
if (FAILED(hr)) {
*this->log << W("Failed to set startup flags. ERRORCODE: ") << hr << logger::endl;
- return false;
+ flgasError = true;
}
*this->log << W("Starting ICLRRuntimeHost2") << logger::endl;
@@ -153,6 +154,14 @@ bool netcore_win::create_host() {
return false;
}
+ if (flgasError) {
+ hr = host->GetCurrentAppDomainId(&this->domain_id);
+ if (FAILED(hr)) {
+ *this->log << W("Failed to GetCurrentAppDomainId. ERRORCODE: ") << hr << logger::endl;
+ return false;
+ }
+ }
+ else {
//-------------------------------------------------------------
// Create an AppDomain
@@ -195,10 +204,10 @@ bool netcore_win::create_host() {
*this->log << W("Creating an AppDomain") << logger::endl;
- *this->log << W("TRUSTED_PLATFORM_ASSEMBLIES=") << property_values[0] << logger::endl;
- *this->log << W("APP_PATHS=") << property_values[1] << logger::endl;
- *this->log << W("APP_NI_PATHS=") << property_values[2] << logger::endl;
- *this->log << W("NATIVE_DLL_SEARCH_DIRECTORIES=") << property_values[3] << logger::endl;
+ //*this->log << W("TRUSTED_PLATFORM_ASSEMBLIES=") << property_values[0] << logger::endl;
+ //*this->log << W("APP_PATHS=") << property_values[1] << logger::endl;
+ //*this->log << W("APP_NI_PATHS=") << property_values[2] << logger::endl;
+ //*this->log << W("NATIVE_DLL_SEARCH_DIRECTORIES=") << property_values[3] << logger::endl;
hr = host->CreateAppDomainWithManager(
@@ -231,6 +240,8 @@ bool netcore_win::create_host() {
*this->log << W("Failed call to CreateAppDomainWithManager. ERRORCODE: ") << hr << logger::endl;
return false;
}
+ }
+
return true;
}
bool netcore_win::load_main() {
|
Unclutter benchmark-hslua
The way benchmarks were defined was unnecessarily verbose. | @@ -61,42 +61,17 @@ setupTableWithFooField = do
Lua.pushstring "foo"
Lua.setfield (Lua.nthFromTop 2) "bar"
-getfieldBench :: Benchmark
-getfieldBench =
- let getFooField = Lua.getfield Lua.stackTop "foo"
- in luaBench "getfield" setupTableWithFooField getFooField
-
-getlfieldBench :: Benchmark
-getlfieldBench =
- let getlfieldFoo = getlfield Lua.stackTop "foo"
- in luaBench "getlfield" setupTableWithFooField getlfieldFoo
-
--- * Benchmark setfield
-setfieldBench :: Benchmark
-setfieldBench =
- let setfieldFoo = do
- Lua.pushboolean True
- Lua.setfield (Lua.nthFromTop 2) "foo"
- in luaBench "setfield" setupTableWithFooField setfieldFoo
-
-setfield_oldBench :: Benchmark
-setfield_oldBench =
- let setfieldFoo = do
- Lua.pushboolean True
- setfield_old Lua.stackTop "foo"
- in luaBench "setfield_old" setupTableWithFooField setfieldFoo
-
-getglobalBench :: Benchmark
-getglobalBench =
- luaBench "getglobal" setupTableWithFooField (Lua.getglobal "foo")
-
main :: IO ()
main = defaultMain
- [ getfieldBench
- , getlfieldBench
- , setfieldBench
- , setfield_oldBench
- , getglobalBench
+ [ luaBench "getfield" setupTableWithFooField (Lua.getfield Lua.stackTop "foo")
+ , luaBench "getlfield" setupTableWithFooField (getlfield Lua.stackTop "foo")
+ , luaBench "setfield"
+ (Lua.newtable *> Lua.pushboolean True)
+ (Lua.setfield (Lua.nthFromTop 2) "foo")
+ , luaBench "setfield_old"
+ (Lua.newtable *> Lua.pushboolean True)
+ (setfield_old (Lua.nthFromTop 2) "foo")
+ , luaBench "getglobal" (return ()) (Lua.getglobal "foo")
, luaBench "setglobal" (Lua.pushboolean True) (Lua.setglobal "foo")
, luaBench "setraw"
(Lua.newtable *> Lua.pushstring "foo" *> Lua.pushboolean True)
|
[clocks] ClockDividerN: make first output edge occur on first input edge | module ClockDividerN #(parameter DIV)(output logic clk_out = 1'b0, input clk_in);
- localparam DIV_COUNTER_WIDTH = $clog2(DIV);
+ localparam CWIDTH = $clog2(DIV);
localparam LOW_CYCLES = DIV / 2;
+ localparam HIGH_TRANSITION = LOW_CYCLES - 1;
+ localparam LOW_TRANSITION = DIV - 1;
generate
if (DIV == 1) begin
@@ -17,19 +19,19 @@ module ClockDividerN #(parameter DIV)(output logic clk_out = 1'b0, input clk_in)
clk_out = clk_in;
end
end else begin
- reg [DIV_COUNTER_WIDTH - 1: 0] count = '0;
+ reg [CWIDTH - 1: 0] count = HIGH_TRANSITION[CWIDTH-1:0];
// The blocking assignment to clock out is used to conform what was done
// in RC's clock dividers.
// It should have the effect of preventing registers in the divided clock
// domain latching register updates launched by the fast clock-domain edge
// that occurs at the same simulated time (as the divided clock edge).
always @(posedge clk_in) begin
- if (count == (DIV - 1)) begin
+ if (count == LOW_TRANSITION[CWIDTH-1:0]) begin
clk_out = 1'b0;
count <= '0;
end
else begin
- if (count == (LOW_CYCLES - 1)) begin
+ if (count == HIGH_TRANSITION[CWIDTH-1:0]) begin
clk_out = 1'b1;
end
count <= count + 1'b1;
|
Fix typos in help | @@ -66,12 +66,12 @@ scrcpy_print_usage(const char *arg0) {
"\n"
" --force-adb-forward\n"
" Do not attempt to use \"adb reverse\" to connect to the\n"
- " the device.\n"
+ " device.\n"
"\n"
" --forward-all-clicks\n"
" By default, right-click triggers BACK (or POWER on) and\n"
" middle-click triggers HOME. This option disables these\n"
- " shortcuts and forward the clicks to the device instead.\n"
+ " shortcuts and forwards the clicks to the device instead.\n"
"\n"
" -f, --fullscreen\n"
" Start in fullscreen.\n"
@@ -142,7 +142,7 @@ scrcpy_print_usage(const char *arg0) {
"\n"
" --push-target path\n"
" Set the target directory for pushing files to the device by\n"
- " drag & drop. It is passed as-is to \"adb push\".\n"
+ " drag & drop. It is passed as is to \"adb push\".\n"
" Default is \"/sdcard/Download/\".\n"
"\n"
" -r, --record file.mp4\n"
@@ -162,14 +162,14 @@ scrcpy_print_usage(const char *arg0) {
"\n"
" --rotation value\n"
" Set the initial display rotation.\n"
- " Possibles values are 0, 1, 2 and 3. Each increment adds a 90\n"
+ " Possible values are 0, 1, 2 and 3. Each increment adds a 90\n"
" degrees rotation counterclockwise.\n"
"\n"
" -s, --serial serial\n"
" The device serial number. Mandatory only if several devices\n"
" are connected to adb.\n"
"\n"
- " --shortcut-mod key[+...]][,...]\n"
+ " --shortcut-mod key[+...][,...]\n"
" Specify the modifiers to use for scrcpy shortcuts.\n"
" Possible keys are \"lctrl\", \"rctrl\", \"lalt\", \"ralt\",\n"
" \"lsuper\" and \"rsuper\".\n"
|
in_random: use nanosecond API | #include <fluent-bit/flb_error.h>
#include <fluent-bit/flb_utils.h>
#include <fluent-bit/flb_stats.h>
+#include <fluent-bit/flb_time.h>
#define DEFAULT_INTERVAL_SEC 1
#define DEFAULT_INTERVAL_NSEC 0
@@ -78,7 +79,7 @@ static int in_random_collect(struct flb_input_instance *i_ins,
flb_input_buf_write_start(i_ins);
msgpack_pack_array(&i_ins->mp_pck, 2);
- msgpack_pack_uint64(&i_ins->mp_pck, time(NULL));
+ flb_time_append_to_msgpack(NULL, &i_ins->mp_pck, 0);
msgpack_pack_map(&i_ins->mp_pck, 1);
msgpack_pack_bin(&i_ins->mp_pck, 10);
|
better pos for combo on scoreboard | @@ -336,7 +336,7 @@ class Scoreboard(FrameObject):
return
number = number + "x"
n = len(number)
- x_start = self.frames[0].size[0] - int(n * self.combo[0].size[0])
+ x_start = self.frames[0].size[0] * 0.95 - int(n * self.combo[0].size[0])
self.drawnumber(background, x_start, y_offset, number, self.combo, alpha)
def drawname(self, background, y_offset, text, alpha):
|
sdl/sdl: Add SDL_INIT_SENSOR for SDL 2.0.9 | // Package sdl is SDL2 wrapped for Go users. It enables interoperability between Go and the SDL2 library which is written in C. That means the original SDL2 installation is required for this to work. SDL2 is a cross-platform development library designed to provide low level access to audio, keyboard, mouse, joystick, and graphics hardware via OpenGL and Direct3D.
package sdl
-//#include "sdl_wrapper.h"
+/*
+#include "sdl_wrapper.h"
+
+#if !(SDL_VERSION_ATLEAST(2,0,9))
+#pragma message("SDL_INIT_SENSOR is not supported before SDL 2.0.9")
+#define SDL_INIT_SENSOR (0x00008000u)
+#endif
+*/
import "C"
import (
@@ -19,6 +26,7 @@ const (
INIT_GAMECONTROLLER = C.SDL_INIT_GAMECONTROLLER // controller subsystem; automatically initializes the joystick subsystem
INIT_EVENTS = C.SDL_INIT_EVENTS // events subsystem
INIT_NOPARACHUTE = C.SDL_INIT_NOPARACHUTE // compatibility; this flag is ignored
+ INIT_SENSOR = C.SDL_INIT_SENSOR // sensor subsystem
INIT_EVERYTHING = C.SDL_INIT_EVERYTHING // all of the above subsystems
)
|
esp_pm: Fix build error when CONFIG_PM_PROFILING is enabled | @@ -646,7 +646,7 @@ void IRAM_ATTR vApplicationSleep( TickType_t xExpectedIdleTime )
int64_t sleep_time_us = MIN(wakeup_delay_us, time_until_next_alarm);
if (sleep_time_us >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP * portTICK_PERIOD_MS * 1000LL) {
esp_sleep_enable_timer_wakeup(sleep_time_us - LIGHT_SLEEP_EARLY_WAKEUP_US);
-#ifdef CONFIG_PM_TRACE
+#if CONFIG_PM_TRACE && SOC_PM_SUPPORT_RTC_PERIPH_PD
/* to force tracing GPIOs to keep state */
esp_sleep_pd_config(ESP_PD_DOMAIN_RTC_PERIPH, ESP_PD_OPTION_ON);
#endif
|
qs1dsearch/autotest: moving functionality to testbench | #include <math.h>
#include <getopt.h>
-float qs1dsearch_autotest_utility_min(float _v, void * _context)
+float qs1dsearch_utility_min(float _v, void * _context)
{
float v_opt = *(float*)(_context);
float v = _v - v_opt;
@@ -37,15 +37,16 @@ float qs1dsearch_autotest_utility_min(float _v, void * _context)
}
//
-void autotest_qs1dsearch_min()
+void testbench_qs1dsearch(liquid_utility_1d _utility,
+ float _v_opt,
+ float _v_init,
+ int _direction)
{
// create qs1dsearch object
- float v_opt = 0.0f;
- qs1dsearch q = qs1dsearch_create(qs1dsearch_autotest_utility_min,
- &v_opt, LIQUID_OPTIM_MINIMIZE);
+ qs1dsearch q = qs1dsearch_create(_utility, &_v_opt, _direction);
//qs1dsearch_init_bounds(q, -20, 10);
- qs1dsearch_init(q, -50);
+ qs1dsearch_init(q, _v_init);
// run search
unsigned int i;
@@ -56,18 +57,20 @@ void autotest_qs1dsearch_min()
}
// check result
- CONTEND_DELTA( qs1dsearch_get_opt_v(q), v_opt, 1e-3f );
- CONTEND_LESS_THAN( qs1dsearch_get_opt_u(q), 1e-3f );
+ CONTEND_DELTA( qs1dsearch_get_opt_v(q), _v_opt, 1e-3f );
+ CONTEND_DELTA( qs1dsearch_get_opt_u(q), 0.0f, 1e-3f );
if (liquid_autotest_verbose) {
printf("%3u : u(%12.8f) = %12.4e, v_opt=%12.4e (error=%12.4e)\n",
i,
qs1dsearch_get_opt_v(q),
qs1dsearch_get_opt_u(q),
- v_opt,
- v_opt - qs1dsearch_get_opt_v(q));
+ _v_opt,
+ _v_opt - qs1dsearch_get_opt_v(q));
}
qs1dsearch_destroy(q);
}
+void autotest_qs1dsearch_01() { testbench_qs1dsearch(qs1dsearch_utility_min, 0, -20, LIQUID_OPTIM_MINIMIZE); }
+
|
OcBootManagementLib: Fix potential OOB access | @@ -291,6 +291,7 @@ InternalSetBootEntryFlags (
{
EFI_DEVICE_PATH_PROTOCOL *DevicePathWalker;
CONST CHAR16 *Path;
+ UINTN Len;
BootEntry->IsFolder = FALSE;
BootEntry->IsRecovery = FALSE;
@@ -306,7 +307,8 @@ InternalSetBootEntryFlags (
if ((DevicePathType (DevicePathWalker) == MEDIA_DEVICE_PATH)
&& (DevicePathSubType (DevicePathWalker) == MEDIA_FILEPATH_DP)) {
Path = ((FILEPATH_DEVICE_PATH *) DevicePathWalker)->PathName;
- if (Path[StrLen (Path) - 1] == L'\\') {
+ Len = StrLen (Path);
+ if ((Len > 0) && (Path[Len - 1] == L'\\')) {
BootEntry->IsFolder = TRUE;
}
if (StrStr (Path, L"com.apple.recovery.boot") != NULL) {
|
test-ipmi-hiomap: Add ack-error test
Cc: stable | @@ -1652,6 +1652,12 @@ static void test_hiomap_flush_error(void)
scenario_exit();
}
+static void test_hiomap_ack_error(void)
+{
+ /* Same thing at the moment */
+ test_hiomap_protocol_action_error();
+}
+
struct test_case {
const char *name;
void (*fn)(void);
@@ -1689,6 +1695,7 @@ struct test_case test_cases[] = {
TEST_CASE(test_hiomap_create_write_window_error),
TEST_CASE(test_hiomap_mark_dirty_error),
TEST_CASE(test_hiomap_flush_error),
+ TEST_CASE(test_hiomap_ack_error),
{ NULL, NULL },
};
|
Add check mark for large heap on STM32F769I_DISCOVERY | @@ -68,7 +68,7 @@ The above firmware builds include support for the class libraries and features m
| ST_STM32F429I_DISCOVERY | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: |
| ST_NUCLEO64_F091RC | | | | :heavy_check_mark: | | | | :heavy_check_mark: | | |
| ST_NUCLEO144_F746ZG | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
-| ST_STM32F769I_DISCOVERY | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
+| ST_STM32F769I_DISCOVERY | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: |
| MBN_QUAIL | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | :heavy_check_mark: | | | | |
| NETDUINO3_WIFI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | | |
| ESP32_DEVKITC | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | | | |
|
[chainmaker][#676]add BoatChainmakerWalletSetChainId function declaration | @@ -32,6 +32,8 @@ api_chainmaker.c defines the Ethereum wallet API for BoAT IoT SDK.
#define BOAT_TXID_LEN 64
static BOAT_RESULT BoatChainmakerWalletSetOrgId(BoatHlchainmakerWallet *wallet_ptr, const BCHAR *org_id_ptr);
+static BOAT_RESULT BoatChainmakerWalletSetChainId(BoatHlchainmakerWallet *wallet_ptr, const BCHAR *chain_id_ptr);
+
BUINT8 get_fibon_data(BUINT8 n) {
|
Check that a TLSv1.3 encrypted message has an app data content type | @@ -618,7 +618,8 @@ int ssl3_get_record(SSL *s)
if (SSL_IS_TLS13(s) && s->enc_read_ctx != NULL) {
size_t end;
- if (thisrr->length == 0) {
+ if (thisrr->length == 0
+ || thisrr->type != SSL3_RT_APPLICATION_DATA) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_RECORD, SSL_R_BAD_RECORD_TYPE);
goto f_err;
|
dm: validate inputs in vq_endchains
inputs shall be validated to avoid NULL pointer access. | @@ -647,6 +647,9 @@ vq_endchains(struct virtio_vq_info *vq, int used_all_avail)
uint16_t event_idx, new_idx, old_idx;
int intr;
+ if (!vq || !vq->used)
+ return;
+
/*
* Interrupt generation: if we're using EVENT_IDX,
* interrupt if we've crossed the event threshold.
|
print output in ci | @@ -112,6 +112,8 @@ test_script:
xmake lua --verbose --diagnosis runner.lua $_.fullname >stdout_file 2>stderr_file
$outcome = if ($?) { "Passed" } else { $all_success = $false; "Failed" }
}
+ Get-Content stdout_file | Out-Host
+ Get-Content stderr_file | Out-Host
Update-AppveyorTest -Name $testname -Framework "xmake-test" -FileName $filename -Outcome $outcome -Duration $time.TotalMilliseconds -StdOut (Get-Content -Raw stdout_file) -StdErr (Get-Content -Raw stderr_file)
}
- ps: Pop-Location
|
SOVERSION bump to version 4.1.0 | @@ -34,8 +34,8 @@ set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_
# Major version is changed with every backward non-compatible API/ABI change, minor version changes
# with backward compatible change and micro version is connected with any internal change of the library.
set(SYSREPO_MAJOR_SOVERSION 4)
-set(SYSREPO_MINOR_SOVERSION 0)
-set(SYSREPO_MICRO_SOVERSION 11)
+set(SYSREPO_MINOR_SOVERSION 1)
+set(SYSREPO_MICRO_SOVERSION 0)
set(SYSREPO_SOVERSION_FULL ${SYSREPO_MAJOR_SOVERSION}.${SYSREPO_MINOR_SOVERSION}.${SYSREPO_MICRO_SOVERSION})
set(SYSREPO_SOVERSION ${SYSREPO_MAJOR_SOVERSION})
|
Use only log enricher to validate when the profile recorder is enabled | @@ -16,6 +16,10 @@ limitations under the License.
package e2e_test
+import (
+ "time"
+)
+
func (e *e2e) testCaseSPODEnableProfileRecorder(nodes []string) {
e.enableLogEnricherInSpod()
@@ -23,8 +27,14 @@ func (e *e2e) testCaseSPODEnableProfileRecorder(nodes []string) {
profileRecorderEnabledInSPODDS := e.kubectlOperatorNS("get", "ds", "spod", "-o", "yaml")
e.Contains(profileRecorderEnabledInSPODDS, "--with-recording=true")
- e.enableBpfRecorderInSpod()
- e.logf("assert profile recorder is enabled in the spod DS when bpf recorder is enabled")
- profileRecorderEnabledInSPODDS = e.kubectlOperatorNS("get", "ds", "spod", "-o", "yaml")
- e.Contains(profileRecorderEnabledInSPODDS, "--with-recording=true")
+ e.logf("Disable log enricher from SPOD")
+ e.kubectlOperatorNS("patch", "spod", "spod", "-p", `{"spec":{"enableLogEnricher": false}}`, "--type=merge")
+
+ time.Sleep(defaultWaitTime)
+ e.waitInOperatorNSFor("condition=ready", "spod", "spod")
+ e.kubectlOperatorNS("rollout", "status", "ds", "spod", "--timeout", defaultBpfRecorderOpTimeout)
+
+ e.logf("assert profile recorder is disabled in the spod DS when log enricher is disabled")
+ selinuxDisabledInSPODDS := e.kubectlOperatorNS("get", "ds", "spod", "-o", "yaml")
+ e.Contains(selinuxDisabledInSPODDS, "--with-recording=false")
}
|
DM: virtio-gpio: support reading value from IRQ descriptor
Support reading GPIO value when the GPIO switches to IRQ mode.
Acked-by: Yu Wang | @@ -250,6 +250,7 @@ struct gpio_irq_desc {
int fd; /* read event */
int pin; /* pin number */
bool mask; /* mask and unmask */
+ bool deinit; /* deinit state */
uint8_t level; /* level value */
uint64_t mode; /* interrupt trigger mode */
void *data; /* virtio gpio instance */
@@ -401,17 +402,32 @@ gpio_get_value(struct virtio_gpio *gpio, unsigned int offset)
{
struct gpio_line *line;
struct gpiohandle_data data;
- int rc;
+ int rc, fd;
line = gpio->vlines[offset];
- if (line->busy || line->fd < 0) {
- DPRINTF("failed to get gpio%d value, busy:%d, fd:%d\n",
- offset, line->busy, line->fd);
+ if (line->busy) {
+ DPRINTF("failed to get gpio %d value, it is busy\n", offset);
return -1;
}
+ fd = line->fd;
+ if (fd < 0) {
+
+ /*
+ * if the GPIO line has configured as IRQ mode, then can't use
+ * gpio line fd to get its value, instead, use IRQ fd to get
+ * the value.
+ */
+ if (line->irq->fd < 0) {
+ DPRINTF("failed to get gpio %d value, fd is invalid\n",
+ offset);
+ return -1;
+ }
+ fd = line->irq->fd;
+ }
+
memset(&data, 0, sizeof(data));
- rc = ioctl(line->fd, GPIOHANDLE_GET_LINE_VALUES_IOCTL, &data);
+ rc = ioctl(fd, GPIOHANDLE_GET_LINE_VALUES_IOCTL, &data);
if (rc < 0) {
DPRINTF("ioctl GPIOHANDLE_GET_LINE_VALUES_IOCTL error %s\n",
strerror(errno));
@@ -951,6 +967,7 @@ gpio_irq_disable(struct gpio_irq_chip *chip, unsigned int pin)
/* Release the mevent, mevent teardown handles IRQ desc reset */
if (desc->mevt) {
+ desc->deinit = false;
mevent_delete(desc->mevt);
desc->mevt = NULL;
}
@@ -970,6 +987,10 @@ gpio_irq_teardown(void *param)
close(desc->fd);
desc->fd = -1;
}
+
+ /* if deinit is not set, switch the pin to GPIO mode */
+ if (!desc->deinit)
+ native_gpio_open_line(desc->gpio, 0, 0);
}
static void
@@ -1168,6 +1189,7 @@ gpio_irq_deinit(struct virtio_gpio *gpio)
for (i = 0; i < gpio->nvline; i++) {
desc = &chip->descs[i];
if (desc->mevt) {
+ desc->deinit = true;
mevent_delete(desc->mevt);
desc->mevt = NULL;
|
Solve minor const bug in python loader. | @@ -1194,7 +1194,7 @@ void py_loader_impl_error_print(loader_impl_py py_impl)
type_str = PyString_AsString(type_str_obj);
value_str = PyString_AsString(value_str_obj);
- traceback_str = traceback_str_obj ? PyString_AsString(traceback_str_obj) : traceback_not_found;
+ traceback_str = traceback_str_obj ? PyString_AsString(traceback_str_obj) : NULL;
#elif PY_MAJOR_VERSION == 3
separator = PyUnicode_FromString(separator_str);
@@ -1202,10 +1202,10 @@ void py_loader_impl_error_print(loader_impl_py py_impl)
type_str = PyUnicode_AsUTF8(type_str_obj);
value_str = PyUnicode_AsUTF8(value_str_obj);
- traceback_str = traceback_str_obj ? PyUnicode_AsUTF8(traceback_str_obj) : traceback_not_found;
+ traceback_str = traceback_str_obj ? PyUnicode_AsUTF8(traceback_str_obj) : NULL;
#endif
- log_write("metacall", LOG_LEVEL_ERROR, error_format_str, type_str, value_str, traceback_str);
+ log_write("metacall", LOG_LEVEL_ERROR, error_format_str, type_str, value_str, traceback_str ? traceback_str : traceback_not_found);
Py_DECREF(traceback_list);
Py_DECREF(separator);
|
README.md: Use version 2.05.39 | @@ -36,11 +36,11 @@ https://github.com/dresden-elektronik/deconz-rest-plugin/releases
1. Download deCONZ package
- wget http://www.dresden-elektronik.de/rpi/deconz/beta/deconz-2.05.38-qt5.deb
+ wget http://www.dresden-elektronik.de/rpi/deconz/beta/deconz-2.05.39-qt5.deb
2. Install deCONZ package
- sudo dpkg -i deconz-2.05.38-qt5.deb
+ sudo dpkg -i deconz-2.05.39-qt5.deb
**Important** this step might print some errors *that's ok* and will be fixed in the next step.
@@ -55,11 +55,11 @@ The deCONZ package already contains the REST API plugin, the development package
1. Download deCONZ development package
- wget http://www.dresden-elektronik.de/rpi/deconz-dev/deconz-dev-2.05.38.deb
+ wget http://www.dresden-elektronik.de/rpi/deconz-dev/deconz-dev-2.05.39.deb
2. Install deCONZ development package
- sudo dpkg -i deconz-dev-2.05.38.deb
+ sudo dpkg -i deconz-dev-2.05.39.deb
3. Install missing dependencies
@@ -74,7 +74,7 @@ The deCONZ package already contains the REST API plugin, the development package
2. Checkout related version tag
cd deconz-rest-plugin
- git checkout -b mybranch V2_05_38
+ git checkout -b mybranch V2_05_39
3. Compile the plugin
|
Simpler and probably better to handle it that way | @@ -202,7 +202,8 @@ static s16 fadeCounter;
// forward
-static void setFadePalette(u16 ind, const u16 *src, u16 len);
+static void setFadePalette(u16 ind, const u16 *src, u16 len, bool forceWaitVBlank);
+static bool doFadeStepInternal(bool forceWaitVBlank);
u16 PAL_getColor(u16 index)
@@ -302,18 +303,13 @@ void PAL_setPaletteDMA(u16 numPal, const u16* pal)
PAL_setPalette(numPal * 16, pal, DMA);
}
-static void setFadePalette(u16 ind, const u16 *src, u16 len)
+static void setFadePalette(u16 ind, const u16 *src, u16 len, bool forceWaitVBlank)
{
- static u32 lastVTimer = 0;
-
- // be sure that we are during vblank and to wait at least 1 frame between each setFadePalette call
- if ((GET_VDPSTATUS(VDP_VBLANK_FLAG) == 0) || (lastVTimer == vtimer)) SYS_doVBlankProcess();
+ // be sure that we are during vblank
+ if (forceWaitVBlank) SYS_doVBlankProcess();
// use DMA for long transfer
PAL_setColors(ind, src, len, (len > 16)?DMA:CPU);
-
- // keep track of last update
- lastVTimer = vtimer;
}
//static void setFadePalette(u16 ind, const u16 *src, u16 len)
@@ -394,13 +390,13 @@ bool PAL_initFade(u16 fromCol, u16 toCol, const u16* palSrc, const u16* palDst,
return TRUE;
}
-bool PAL_doFadeStep()
+static bool doFadeStepInternal(bool forceWaitVBlank)
{
// not yet done ?
if (--fadeCounter >= 0)
{
// set current fade palette
- setFadePalette(fadeInd, fadeCurrentPal, fadeSize);
+ setFadePalette(fadeInd, fadeCurrentPal, fadeSize, forceWaitVBlank);
// then prepare fade palette for next frame
s16* palR = fadeR;
@@ -438,13 +434,17 @@ bool PAL_doFadeStep()
else
{
// last step --> we can just set the final fade palette
- setFadePalette(fadeInd, fadeEndPal, fadeSize);
+ setFadePalette(fadeInd, fadeEndPal, fadeSize, forceWaitVBlank);
// done
return FALSE;
}
}
+bool PAL_doFadeStep()
+{
+ return doFadeStepInternal(FALSE);
+}
void PAL_interruptFade()
{
@@ -460,8 +460,8 @@ void PAL_fade(u16 fromCol, u16 toCol, const u16* palSrc, const u16* palDst, u16
if (async) VBlankProcess |= PROCESS_PALETTE_FADING;
else
{
- // process fading immediatly (PAL_doFadeStep() wait for vblank if needed)
- while (PAL_doFadeStep());
+ // process fading immediatly with forced VBlank wait
+ while (doFadeStepInternal(TRUE));
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.