message
stringlengths
6
474
diff
stringlengths
8
5.22k
Removed REINDEX step from intensive system catalog maintenance
@@ -185,9 +185,6 @@ analyzedb -a -s pg_catalog -d $DBNAME</pre></p> catalog tables can prevent the need for this more costly procedure.</p> <p>These are steps for intensive system catalog maintenance.<ol id="ol_trp_xqs_f2b"> <li>Stop all catalog activity on the Greenplum Database system.</li> - <li>Perform a <codeph>REINDEX</codeph> on the system catalog tables to rebuild the system - catalog indexes. This removes bloat in the indexes and improves <codeph>VACUUM</codeph> - performance.</li> <li>Perform a <codeph>VACUUM FULL</codeph> on the system catalog tables. See the following Note.</li> <li>Perform an <codeph>ANALYZE</codeph> on the system catalog tables to update the catalog
move library/string_utils/base64 into library/cpp/string_utils/base64 (folders starting on [a-e])
@@ -562,7 +562,7 @@ Include files should be specified in the order of less general to more general ( #include <library/json/json_reader.h> // library -#include <library/string_utils/base64/base64.h> +#include <library/cpp/string_utils/base64/base64.h> #include <library/threading/local_executor/local_executor.h> ```
Don't filter TLS 1.3 ciphersuites by signing or key exchange algorithm
@@ -3633,7 +3633,11 @@ const SSL_CIPHER *ssl3_choose_cipher(SSL *s, STACK_OF(SSL_CIPHER) *clnt, (DTLS_VERSION_LT(s->version, c->min_dtls) || DTLS_VERSION_GT(s->version, c->max_dtls))) continue; - + /* + * Since TLS 1.3 ciphersuites can be used with any auth or + * key exchange scheme skip tests. + */ + if (!SSL_IS_TLS13(s)) { mask_k = s->s3->tmp.mask_k; mask_a = s->s3->tmp.mask_a; #ifndef OPENSSL_NO_SRP @@ -3669,6 +3673,7 @@ const SSL_CIPHER *ssl3_choose_cipher(SSL *s, STACK_OF(SSL_CIPHER) *clnt, if (!ok) continue; + } ii = sk_SSL_CIPHER_find(allow, c); if (ii >= 0) { /* Check security callback permits this cipher */
compiler:Open include files allowing other readers
@@ -84,7 +84,7 @@ const char* _yr_compiler_default_include_callback( int fd = -1; #if defined(_MSC_VER) - _sopen_s(&fd, include_name, _O_RDONLY | _O_BINARY, _SH_DENYRW, _S_IREAD); + _sopen_s(&fd, include_name, _O_RDONLY | _O_BINARY, _SH_DENYWR, _S_IREAD); #elif defined(_WIN32) || defined(__CYGWIN__) fd = open(include_name, O_RDONLY | O_BINARY); #else
volteer: Reduce flash usage This saves 120 bytes. BRANCH=none TEST=buildall passes
#define CONFIG_SYSTEM_UNLOCKED /* Allow dangerous commands while in dev. */ /* Remove PRL state names to free flash space */ -#define CONFIG_USB_PD_DEBUG_LEVEL 2 +#define CONFIG_USB_PD_DEBUG_LEVEL 1 #define CONFIG_VBOOT_EFS2
docs/library/index: Add hint about using help('modules') for discovery.
@@ -40,6 +40,11 @@ best place to find general information of the availability/non-availability of a particular feature is the "General Information" section which contains information pertaining to a specific `MicroPython port`. +On some ports you are able to discover the available, built-in libraries that +can be imported by entering the following at the REPL:: + + help('modules') + Beyond the built-in libraries described in this documentation, many more modules from the Python standard library, as well as further MicroPython extensions to it, can be found in `micropython-lib`.
mmapstorage: add opmphm cleanup functions
@@ -1027,6 +1027,42 @@ static void copyKeySetToMmap (char * const dest, KeySet * keySet, KeySet * globa memcpy ((dest + mmapHeader->allocSize - SIZEOF_MMAPFOOTER), mmapFooter, SIZEOF_MMAPFOOTER); } +/** + * @brief Deletes the OPMPHM. + * + * Clears and frees all memory in Opmphm. + * + * @param opmphm the OPMPHM + */ +void mmapOpmphmDel (Opmphm * opmphm) +{ + ELEKTRA_NOT_NULL (opmphm); + if (opmphmIsBuild (opmphm)) + { + if (!test_bit (opmphm->flags, OPMPHM_FLAG_MMAP_GRAPH)) elektraFree (opmphm->graph); + opmphm->size = 0; + } + if (opmphm->rUniPar) + { + if (!test_bit (opmphm->flags, OPMPHM_FLAG_MMAP_HASHFUNCTIONSEEDS)) elektraFree (opmphm->hashFunctionSeeds); + } + if (!test_bit (opmphm->flags, OPMPHM_FLAG_MMAP_STRUCT)) elektraFree (opmphm); +} + +/** + * @brief Deletes the OpmphmPredictor. + * + * Clears and frees all memory in OpmphmPredictor. + * + * @param op the OpmphmPredictor + */ +static void mmapOpmphmPredictorDel (OpmphmPredictor * op) +{ + ELEKTRA_NOT_NULL (op); + if (!test_bit (op->flags, OPMPHM_PREDICTOR_FLAG_MMAP_PATTERNTABLE)) elektraFree (op->patternTable); + if (!test_bit (op->flags, OPMPHM_PREDICTOR_FLAG_MMAP_STRUCT)) elektraFree (op); +} + /** * @brief Replaces contents of a keyset with the keyset from the mapped region. * @@ -1049,6 +1085,7 @@ static void mmapToKeySet (Plugin * handle, char * mappedRegion, KeySet * returne #ifdef ELEKTRA_ENABLE_OPTIMIZATIONS if (keySet->opmphm) { + if (returned->opmphm) mmapOpmphmDel (returned->opmphm); returned->opmphm = keySet->opmphm; returned->opmphm->flags = OPMPHM_FLAG_MMAP_STRUCT; if (returned->opmphm->hashFunctionSeeds) @@ -1062,6 +1099,7 @@ static void mmapToKeySet (Plugin * handle, char * mappedRegion, KeySet * returne } if (keySet->opmphmPredictor) { + if (returned->opmphmPredictor) mmapOpmphmPredictorDel (returned->opmphmPredictor); returned->opmphmPredictor = keySet->opmphmPredictor; returned->opmphmPredictor->flags = OPMPHM_PREDICTOR_FLAG_MMAP_STRUCT; if (returned->opmphmPredictor->patternTable)
use `h2o_configurator_errprintf` instead of `fprintf`
@@ -1233,7 +1233,7 @@ static int on_config_num_threads(h2o_configurator_command_t *cmd, h2o_configurat conf.thread_map.entries[conf.thread_map.size++] = -1; } else if (node->type == YOML_TYPE_SEQUENCE) { #ifndef H2O_HAS_PTHREAD_SETAFFINITY_NP - fprintf(stderr, "[error] Can't handle a CPU list, this platform doesn't support `pthread_setaffinity_np`\n"); + h2o_configurator_errprintf(cmd, node, "[error] Can't handle a CPU list, this platform doesn't support `pthread_setaffinity_np`\n"); return -1; #endif /* a sequence is treated as a list of CPUs to bind to, one per thread to instantiate */
hooks: document current hooks
@@ -17,6 +17,48 @@ For example, the `gopts` hook only requires the `get` function. A plugin that wa Other hooks (e.g. `spec`) require multiple exported functions. +### `gopts` hook + +Currently hard-coded to search for a plugin named `gopts`. + +The following function **must** be exported: +- `get` + - Signature: `(Plugin * handle, KeySet * returned, Key * parentKey)` + - Called in `kdbGet` after the storage phase, after `notification/send` hook but before the `spec` hook. + - TODO: Describe what the function should do + +### `spec` hook + +Currently hard-coded to search for a plugin named `spec`. + +The following functions **must** be exported: +- `copy` + - Signature: `(Plugin * handle, KeySet * returned, Key * parentKey, bool isKdbGet)` + - Called in: + - `kdbGet`: after the storage phase, after `notification/send` and `gopts` hook. + - `kdbSet`: right after the backends are initialized + - Should copy all the spec meta keys into the keyset + +- `remove` + - Signature: `(Plugin * handle, KeySet * returned, Key * parentKey)` + - Called in `kdbSet` right after the prestorage phase + - Should remove all the spec meta keys from the keyset + +### `notification/send` hook + +We look within the array `system:/elektra/hook/notification/send/plugins` for the plugins that shall be loaded. +The name of the plugin **must** be the value of the keys directly below this, +e.g. `system:/elektra/hook/notification/send/plugins/#0 (= dbus)`. + +The following functions **may** be exported (optional): +- `get`: + - Signature: `(Plugin * handle, KeySet * returned, Key * parentKey)` + - Called in `kdbGet` after the storage phase. + +- `set`: + - Signature: `(Plugin * handle, KeySet * returned, Key * parentKey)` + - Called in `kdbSet` after the storage phase. + ## Lifecycle 1. Hooks are initilized within `kdbOpen` after the contract has been processed. This includes loading the plugins.
vlib: support macros in initial config file Type: improvement
@@ -3359,10 +3359,14 @@ unix_cli_exec (vlib_main_t * vm, int fd; unformat_input_t sub_input; clib_error_t *error; - + unix_cli_main_t *cm = &unix_cli_main; + unix_cli_file_t *cf; + u8 *file_data = 0; file_name = 0; fd = -1; error = 0; + struct stat s; + if (!unformat (input, "%s", &file_name)) { @@ -3379,9 +3383,6 @@ unix_cli_exec (vlib_main_t * vm, } /* Make sure its a regular file. */ - { - struct stat s; - if (fstat (fd, &s) < 0) { error = clib_error_return_unix (0, "failed to stat `%s'", file_name); @@ -3393,9 +3394,52 @@ unix_cli_exec (vlib_main_t * vm, error = clib_error_return (0, "not a regular file `%s'", file_name); goto done; } + + /* Read the file */ + vec_validate (file_data, s.st_size); + + if (read (fd, file_data, s.st_size) != s.st_size) + { + error = clib_error_return_unix (0, "Failed to read %d bytes from '%s'", + s.st_size, file_name); + vec_free (file_data); + goto done; } - unformat_init_clib_file (&sub_input, fd); + /* The macro expander expects a c string... */ + vec_add1 (file_data, 0); + + unformat_init_vector (&sub_input, file_data); + + /* Run the file contents through the macro processor */ + if (vec_len (sub_input.buffer) > 1) + { + u8 *expanded; + clib_macro_main_t *mm = 0; + + /* Initial config process? Use the global macro table. */ + if (pool_is_free_index + (cm->cli_file_pool, cm->current_input_file_index)) + mm = &cm->macro_main; + else + { + /* Otherwise, use the per-cli-process macro table */ + cf = pool_elt_at_index (cm->cli_file_pool, + cm->current_input_file_index); + mm = &cf->macro_main; + } + + expanded = (u8 *) clib_macro_eval (mm, + (i8 *) sub_input.buffer, + 1 /* complain */ , + 0 /* level */ , + 8 /* max_level */ ); + /* Macro processor NULL terminates the return */ + _vec_len (expanded) -= 1; + vec_reset_length (sub_input.buffer); + vec_append (sub_input.buffer, expanded); + vec_free (expanded); + } vlib_cli_input (vm, &sub_input, 0, 0); unformat_free (&sub_input);
mqtt: Update tests to start with valid transport
@@ -69,7 +69,11 @@ struct ClientInitializedFixture { }; TEST_CASE_METHOD(ClientInitializedFixture, "Client set uri") { - struct http_parser_url ret_uri; + struct http_parser_url ret_uri = { + .field_set = 1, + .port = 0, + .field_data = { { 0, 1} } + }; SECTION("User set a correct URI") { http_parser_parse_url_StopIgnore(); http_parser_parse_url_ExpectAnyArgsAndReturn(0); @@ -88,8 +92,20 @@ TEST_CASE_METHOD(ClientInitializedFixture, "Client set uri") TEST_CASE_METHOD(ClientInitializedFixture, "Client Start") { SECTION("Successful start") { + esp_mqtt_client_config_t config{}; + config.uri = "mqtt://1.1.1.1"; + struct http_parser_url ret_uri = { + .field_set = 1 | (1<<1), + .port = 0, + .field_data = { { 0, 4 } /*mqtt*/, { 7, 1 } } // at least *scheme* and *host* + }; + http_parser_parse_url_StopIgnore(); + http_parser_parse_url_ExpectAnyArgsAndReturn(0); + http_parser_parse_url_ReturnThruPtr_u(&ret_uri); xTaskCreatePinnedToCore_ExpectAnyArgsAndReturn(pdTRUE); - auto res = esp_mqtt_client_start(client); + auto res = esp_mqtt_set_config(client, &config); + REQUIRE(res == ESP_OK); + res = esp_mqtt_client_start(client); REQUIRE(res == ESP_OK); } SECTION("Failed on initialization") {
dockerfile: bump to v1.0.0
FROM launcher.gcr.io/google/debian9 as builder # Fluent Bit version -ENV FLB_MAJOR 0 -ENV FLB_MINOR 15 +ENV FLB_MAJOR 1 +ENV FLB_MINOR 0 ENV FLB_PATCH 0 -ENV FLB_VERSION 0.15.0 +ENV FLB_VERSION 1.0.0 ENV DEBIAN_FRONTEND noninteractive
Update documenation of PSA_ALG_RSA_PSS
* This is the signature scheme defined by RFC 8017 * (PKCS#1: RSA Cryptography Specifications) under the name * RSASSA-PSS, with the message generation function MGF1, and with - * a salt length equal to the length of the hash. The specified - * hash algorithm is used to hash the input message, to create the - * salted hash, and for the mask generation. + * a salt length equal to the length of the hash, or the largest + * possible salt length for the algorithm and key size if that is + * smaller than the hash length. The specified hash algorithm is + * used to hash the input message, to create the salted hash, and + * for the mask generation. * * \param hash_alg A hash algorithm (\c PSA_ALG_XXX value such that * #PSA_ALG_IS_HASH(\p hash_alg) is true).
Add fix to ensure that X-Client-IP header is dropped when is not a trusted header.
@@ -14055,6 +14055,7 @@ static void wsgi_process_proxy_headers(request_rec *r) name = ((const char**)trusted_proxy_headers->elts)[i]; if (!strcmp(name, "HTTP_X_FORWARDED_FOR") || + !strcmp(name, "HTTP_X_CLIENT_IP") || !strcmp(name, "HTTP_X_REAL_IP")) { match_client_header = 1;
[dpos] collect the garbage entries of the pre-LIB map lower than the LIB. This may be the cause of the recent excessive DB storage usage.
@@ -206,6 +206,24 @@ func (pls *pLibStatus) gc(lib *blockInfo) { func(e *list.Element) bool { return cInfo(e).BlockNo <= lib.BlockNo }) + + for bpID, pl := range pls.plib { + var beg int + for i, l := range pl { + if l.BlockNo > lib.BlockNo { + beg = i + break + } + } + oldLen := len(pl) + newPl := pl[beg:] + pls.plib[bpID] = newPl + + logger.Debug(). + Str("BPID", bpID).Int("old len", oldLen).Int("new len", len(newPl)). + Msg("collect garbage pre-LIB entry") + + } } func removeIf(l *list.List, p func(e *list.Element) bool) {
Add check for bits accessor
@@ -288,6 +288,20 @@ static int pack_long(grib_accessor* a, const long* val, size_t* len) grib_context_log(h->context, GRIB_LOG_ERROR, "key=%s: value cannot be negative", a->name); return GRIB_ENCODING_ERROR; } + +#ifdef DEBUG + { + const long numbits = (x->length)*8; + if (start + length > numbits) { + grib_context_log(h->context, GRIB_LOG_ERROR, + "grib_accessor_class_bits::pack_long: key=%s (x=%s): " + "Invalid start/length. x->length=%ld, start=%ld, length=%ld", + a->name, x->name, numbits, start, length); + return GRIB_ENCODING_ERROR; + } + } +#endif + maxval = (1 << length) - 1; if (*val > maxval) { grib_context_log(h->context, GRIB_LOG_ERROR,
Fix parse_pciids when trying to open pci.ids from alternate location
+#include <spdlog/spdlog.h> #include <string> #include <iostream> #include <fstream> @@ -22,12 +23,12 @@ std::istream& get_uncommented_line(std::istream& is, std::string &line) void parse_pciids() { - std::ifstream file("/usr/share/hwdata/pci.ids"); + std::ifstream file; + file.open("/usr/share/hwdata/pci.ids"); if (file.fail()){ - std::ifstream file("/usr/share/misc/pci.ids"); + file.open("/usr/share/misc/pci.ids"); if (file.fail()) - printf("MANGOHUD: can't find file pci.ids\n"); - + SPDLOG_ERROR("can't find file pci.ids"); } std::string line;
api: enable trace / replay flag on messages For an unknown reason the trace/replay flags where missed when moving API message registration code from manually cut and pasted to aut-generated. Type: fix
@@ -1344,6 +1344,8 @@ def generate_c_boilerplate(services, defines, counters, file_crc, ' .cleanup = vl_noop_handler,\n' ' .endian = vl_api_{n}_t_endian,\n' ' .print = vl_api_{n}_t_print,\n' + ' .traced = 1,\n' + ' .replay = 1,\n' ' .is_autoendian = {auto}}};\n' .format(n=s.caller, ID=s.caller.upper(), auto=d.autoendian))
acrn-config: correct console argument for logical partition scenario Currently config tool generated 'console=/dev/ttySn' in boot cmdline for logical_partiton scenario, need to strip '/dev/' to avoid kernel boot issue. Acked-by: Victor Sun
@@ -183,7 +183,7 @@ def gen_logical_partition_header(vm_info, config): print('#define VM{0}_CONFIG_OS_BOOTARG_MAXCPUS\t\t"maxcpus={1} "'.format( i, cpu_bits['cpu_num']), file=config) print('#define VM{0}_CONFIG_OS_BOOTARG_CONSOLE\t\t"console={1} "'.format( - i, vm_info.os_cfg.kern_console[i]), file=config) + i, vm_info.os_cfg.kern_console[i].strip('/dev/')), file=config) print("", file=config) print('/* VM pass-through devices assign policy:', file=config)
INI: Add metadata example This commit closes
@@ -192,6 +192,44 @@ kdb rm -r user/examples/ini sudo kdb umount user/examples/ini ``` +## Metadata + +The INI plugin also supports metadata. + +```sh +sudo kdb mount config.ini user/examples/ini ini + +# Add a new key and some metadata +kdb set user/examples/ini/brand new +kdb setmeta user/examples/ini/brand description "The Devil And God Are Raging Inside Me" +kdb setmeta user/examples/ini/brand rationale "Because I Love It" + +# The plugin stores metadata as comments inside the INI file +kdb file /examples/ini | xargs cat +#> #@META description = The Devil And God Are Raging Inside Me +#> #@META rationale = Because I Love It +#> brand = new + +# Retrieve metadata +kdb lsmeta user/examples/ini/brand | grep --invert-match 'internal' +# rationale +# description + +kdb getmeta user/examples/ini/brand description +#> The Devil And God Are Raging Inside Me +kdb getmeta user/examples/ini/brand rationale +#> Because I Love It + +# The plugin ignores some metadata such as `comment`! +kdb setmeta user/examples/ini/brand comment "Where Art Thou?" +kdb getmeta user/examples/ini/brand comment +# STDERR: Metakey not found +# RET: 2 + +kdb rm -r user/examples/ini +sudo kdb umount user/examples/ini +``` + ## Sections The ini plugin supports 3 different sectioning modes (via `section=`):
Omit the third argument to `handle_one_body_fragment`
@@ -178,7 +178,7 @@ DECL_ENTITY_READ_SEND_ERROR_XXX(400) DECL_ENTITY_READ_SEND_ERROR_XXX(413) DECL_ENTITY_READ_SEND_ERROR_XXX(502) -static void handle_one_body_fragment(struct st_h2o_http1_conn_t *conn, size_t fragment_size, size_t consume, int complete) +static void handle_one_body_fragment(struct st_h2o_http1_conn_t *conn, size_t fragment_size, int complete) { set_timeout(conn, 0, NULL); h2o_socket_read_stop(conn->sock); @@ -186,7 +186,7 @@ static void handle_one_body_fragment(struct st_h2o_http1_conn_t *conn, size_t fr entity_read_send_error_502(conn, "Bad Gateway", "Bad Gateway"); return; } - h2o_buffer_consume(&conn->sock->input, consume); + h2o_buffer_consume(&conn->sock->input, fragment_size); conn->req._req_body.bytes_received += fragment_size; if (complete) { conn->req.proceed_req = NULL; @@ -222,7 +222,8 @@ static void handle_chunked_entity_read(struct st_h2o_http1_conn_t *conn) /* complete */ consume -= ret; Done: - handle_one_body_fragment(conn, bufsz, consume, complete); + handle_one_body_fragment(conn, bufsz, complete); + h2o_buffer_consume(&conn->sock->input, consume - bufsz); } static int create_chunked_entity_reader(struct st_h2o_http1_conn_t *conn) @@ -250,7 +251,7 @@ static void handle_content_length_entity_read(struct st_h2o_http1_conn_t *conn) if (!complete && length == 0) return; - handle_one_body_fragment(conn, length, length, complete); + handle_one_body_fragment(conn, length, complete); } static int create_content_length_entity_reader(struct st_h2o_http1_conn_t *conn, size_t content_length)
Always use 24bit depth buffer on mac
@@ -2200,7 +2200,11 @@ void CreateMainFrame(FrameCreationCallback inOnFrame, int inWidth, int inHeight, SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8); if (inFlags & wfDepthBuffer) + #ifdef HX_MACOS + SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24 ); + #else SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 32 - (inFlags & wfStencilBuffer) ? 8 : 0); + #endif if (inFlags & wfStencilBuffer) SDL_GL_SetAttribute(SDL_GL_STENCIL_SIZE, 8);
NULL is not valid for buffers in UnqueueBuffers
@@ -169,8 +169,9 @@ void lovrSourcePlay(Source* source) { // BEFORE user code calls source:play(). This means that some buffers may still be queued (but processed // and completely finished playing). These must be unqueued before we can start using the source again. ALint processed; + ALuint _unused[SOURCE_BUFFERS]; alGetSourcei(lovrSourceGetId(source), AL_BUFFERS_PROCESSED, &processed); - alSourceUnqueueBuffers(source->id, processed, NULL); + alSourceUnqueueBuffers(source->id, processed, &_unused); lovrSourceStream(source, source->buffers, SOURCE_BUFFERS); alSourcePlay(source->id);
files: skip empty input files
@@ -180,6 +180,10 @@ static bool files_getDirStatsAndRewind(honggfuzz_t * hfuzz) LOG_W("File '%s' is bigger than maximal defined file size (-F): %" PRId64 " > %" PRId64, fname, (int64_t) st.st_size, (int64_t) hfuzz->maxFileSz); } + if (st.st_size == 0U) { + LOG_W("File '%s' is empty", fname); + continue; + } if ((size_t) st.st_size > maxSize) { maxSize = st.st_size; } @@ -245,6 +249,10 @@ bool files_getNext(honggfuzz_t * hfuzz, char *fname, bool rewind) LOG_D("'%s' is not a regular file, skipping", fname); continue; } + if (st.st_size == 0U) { + LOG_D("File '%s' is empty", fname); + continue; + } return true; } }
apps/netutils/netlib/netlib_parsehttpurl.c: Correct handling of long URLs as noted in Bitbucket issue (in the nuttx/ repository, not the apps/ repository).
@@ -60,12 +60,12 @@ static const char g_http[] = "http://"; * Name: netlib_parsehttpurl ****************************************************************************/ -int netlib_parsehttpurl(const char *url, uint16_t *port, - char *hostname, int hostlen, - char *filename, int namelen) +int netlib_parsehttpurl(FAR const char *url, FAR uint16_t *port, + FAR char *hostname, int hostlen, + FAR char *filename, int namelen) { - const char *src = url; - char *dest; + FAR const char *src = url; + FAR char *dest; int bytesleft; int ret = OK; @@ -88,22 +88,21 @@ int netlib_parsehttpurl(const char *url, uint16_t *port, dest = hostname; bytesleft = hostlen; + while (*src != '\0' && *src != '/' && *src != ' ' && *src != ':') { /* Make sure that there is space for another character in the hostname. * (reserving space for the null terminator) */ - if (bytesleft > 1) - { *dest++ = *src++; - bytesleft--; - } - else + if (--bytesleft <= 1) { ret = -E2BIG; + break; } } + *dest = '\0'; /* Check if the hostname is following by a port number */ @@ -118,6 +117,7 @@ int netlib_parsehttpurl(const char *url, uint16_t *port, accum = 10*accum + *src - '0'; src++; } + *port = accum; } } @@ -133,10 +133,12 @@ int netlib_parsehttpurl(const char *url, uint16_t *port, dest = filename; bytesleft = namelen; + while (*src == '/') { src++; } + *dest++ = '/'; bytesleft--; @@ -146,4 +148,3 @@ int netlib_parsehttpurl(const char *url, uint16_t *port, filename[namelen-1] = '\0'; return ret; } -
redrix: Fix lsm6dsm initial data Add the lsm6dsm initial data to avoid unexpected exception occurs. BRANCH=none TEST=Boot EC success.
@@ -51,7 +51,7 @@ BUILD_ASSERT(ARRAY_SIZE(adc_channels) == ADC_CH_COUNT); K_MUTEX_DEFINE(g_lid_accel_mutex); K_MUTEX_DEFINE(g_base_accel_mutex); static struct accelgyro_saved_data_t g_bma253_data; -static struct lsm6dsm_data lsm6dsm_data; +static struct lsm6dsm_data lsm6dsm_data = LSM6DSM_DATA; /* TODO(b/184779333): calibrate the orientation matrix on later board stage */ static const mat33_fp_t lid_standard_ref = {
Update serverinfo documentation based on feedback received
@@ -35,7 +35,8 @@ consist of a 4-byte context, a 2-byte Extension Type, a 2-byte length, and then length bytes of extension_data. The context and type values have the same meaning as for L<SSL_CTX_add_custom_ext(3)>. If serverinfo is being loaded for extensions to be added to a Certificate message, then the extension will only -be added for the first Certificate in the message. +be added for the first certificate in the message (which is always the +end-entity certificate). If B<version> is B<SSL_SERVERINFOV1> then the extensions in the array must consist of a 2-byte Extension Type, a 2-byte length, and then length bytes of @@ -62,7 +63,7 @@ last certificate installed. If e.g. the last item was a RSA certificate, the loaded serverinfo extension data will be loaded for that certificate. To use the serverinfo extension for multiple certificates, SSL_CTX_use_serverinfo() needs to be called multiple times, once B<after> -each time a certificate is loaded. +each time a certificate is loaded via a call to SSL_CTX_use_certificate(). =head1 RETURN VALUES
sse2: add fast-math WASM implementation of _mm_cvtps_epi32
@@ -2635,6 +2635,8 @@ simde_mm_cvtps_epi32 (simde__m128 a) { SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_ r_.altivec_i32 = vec_cts(a_.altivec_f32, 1); HEDLEY_DIAGNOSTIC_POP + #elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES) + r_.wasm_v128 = wasm_i32x4_trunc_sat_f32x4(a_.wasm_v128); #else a_ = simde__m128_to_private(simde_x_mm_round_ps(a, SIMDE_MM_FROUND_TO_NEAREST_INT, 1)); SIMDE_VECTORIZE
added random sampling (gaussian and uniform) to vspace
@@ -43,6 +43,11 @@ for i in range(len(lines)): dest = lines[i].split()[1] elif lines[i].split()[0] == 'trialname': trial = lines[i].split()[1] + elif lines[i].split()[0] == 'seed': + if np.float(lines[i].split()[1]).is_integer(): + np.random.seed(np.int(lines[i].split()[1])) + else: + raise IOError("Attempt to pass non-integer value to seed") elif lines[i].split()[0] == 'file': flist.append(lines[i].split()[1]) fline.append(i) @@ -80,7 +85,18 @@ for i in range(len(lines)): array = np.logspace(np.log10(np.float(values[0])),np.log10(np.float(values[1])),np.float(values[2])) else: raise IOError("Attempt to iterate over '%s' for '%s', but number of points provided not an integer value"%(name,flist[fnum-1])) - + elif values[2][0] == 'g': + values[2] = values[2][1:] + if np.float(values[2]).is_integer(): + array = np.random.normal(loc=np.float(values[0]),scale=np.float(values[1]),size=np.int(values[2])) + else: + raise IOError("Attempt to randomly select for '%s' for '%s', but number of points provided not an integer value"%(name,flist[fnum-1])) + elif values[2][0] == 'u': + values[2] = values[2][1:] + if np.float(values[2]).is_integer(): + array = np.random.uniform(low=np.float(values[0]),high=np.float(values[1]),size=np.int(values[2])) + else: + raise IOError("Attempt to randomly select for '%s' for '%s', but number of points provided not an integer value"%(name,flist[fnum-1])) else: if np.float(values[0]) > np.float(values[1]) and np.float(values[2]) > 0:
Template: Add MSR test This commit closes
@@ -39,7 +39,21 @@ None. ## Examples -None. +```sh +# Mount template plugin to cascading namespace `/examples/template` +sudo kdb mount config.file /examples/template template + +kdb set /examples/template/key value +#> Using name user/examples/template/key +#> Create a new key user/examples/template/key with string "value" + +kdb get /examples/template/key +#> value + +# Undo modifications to the key database +kdb rm -r /examples/template +sudo kdb umount /examples/template +``` ## Limitations
config_tools: add cpu_affinity error message add cpu_affinity error message
pCPU ID </b-col> <b-col> - <b-form-select v-model="cpu.pcpu_id" :options="pcpuid_enum"></b-form-select> + <b-form-select :state="validateCPUAffinity(cpu.pcpu_id)" v-model="cpu.pcpu_id" :options="pcpuid_enum"></b-form-select> + <b-form-invalid-feedback> + pCPU ID is required! + </b-form-invalid-feedback> </b-col> <b-col class="p-3 col-3"> <div style="padding:9px;border-radius: 9px;width: 100%;border: 1px solid dimgray;background: lightgray;"> @@ -113,6 +116,9 @@ export default { } }, methods: { + validateCPUAffinity(pcpu_id){ + return !!pcpu_id; + }, vCPUName(index) { return `${this.rootFormData.name} vCPU ${index}` },
fix support for api level above 28 opening content file provider
<activity android:name='com.rhomobile.rhodes.ui.LogViewDialog' android:windowSoftInputMode='stateAlwaysHidden' android:configChanges='orientation' android:screenOrientation='<%= @screenOrientation %>'/> <service android:name='com.rhomobile.rhodes.RhodesService' android:exported='false'/> - <provider android:name='com.rhomobile.rhodes.LocalFileProvider' android:authorities='<%=@appPackageName%>' android:grantUriPermissions='false'> + <provider android:name='com.rhomobile.rhodes.LocalFileProvider' android:authorities='<%=@appPackageName%>' android:grantUriPermissions='false' <%= @targetSdkVer >= 28 ? "android:exported='true'" : "android:exported='false'" %> > <grant-uri-permission android:pathPrefix='/rhodes/apps/'/> </provider>
fix uge playback breaking when loosing focus, fix preview corruption This does mean that previewing a song in events is harder to stop, must stop with the pause button.
@@ -25,7 +25,7 @@ export function initMusic() { // Initialise audio on first click window.addEventListener("click", initMusic); window.addEventListener("keydown", initMusic); -window.addEventListener("blur", pause); +//window.addEventListener("blur", pause); function onSongLoaded(player: ScripTracker) { player.play(); @@ -51,7 +51,7 @@ async function playUGE(filename: string, _settings: MusicSettings) { }); } }; - ipcRenderer.on("music-data", listener); + ipcRenderer.once("music-data", listener); ipcRenderer.send("open-music"); }
microbitv2: Replace commented out code with explanatory comments.
@@ -115,11 +115,8 @@ void power_sleep() static void power_before(bool systemoff) { - // TODO - wait for USB transmit to computer to finish? - /* Wait for debug console output finished. */ - // while (!(LPUART_STAT_TC_MASK & UART->STAT)) - // { - // } + // KL27 waits "for debug console output finished" by checking (LPUART_STAT_TC_MASK & UART->STAT), + // but we never get here with USB connected, so there is no need to wait uart_uninitialize(); // disables RX and TX pins @@ -190,8 +187,8 @@ static void power_after() PORT_SWD_SETUP(); uart_initialize(); - // i2c_deinitialize(); - // i2c_initialize(); + // The KL27 code calls i2c_deinitialize() and i2c_initialize() + // but tests have indicated this is not necessary here }
Right way to test in Makefile `-[ "$str" = "foo" ] && $(MAKE) -C bar` ignores errors while making bar, better to use full `if-then` clause.
@@ -255,7 +255,9 @@ BLD_CONFIG_SHELL=$($(BLD_ARCH)_CONFIG_SHELL) $(GPPGDIR)/GNUmakefile : $(GPPGDIR)/configure env.sh rm -rf $(INSTLOC) mkdir -p $(GPPGDIR) - -[ "$(BLD_ARCH)" = "win32" ] && $(MAKE) -C $(GPPGDIR)/src/bin/gpfdist/ext BLD_TOP=$(BLD_TOP) + if [ "$(BLD_ARCH)" = "win32" ]; then \ + $(MAKE) -C $(GPPGDIR)/src/bin/gpfdist/ext BLD_TOP=$(BLD_TOP); \ + fi cd $(GPPGDIR) && CC="$(strip $(BLD_CC) $(BLD_CFLAGS))" \ CFLAGS=$(INSTCFLAGS) $(BLD_CONFIG_SHELL) \ ./configure $(CONFIGFLAGS) \
Fix for comparing with same string with extension
static double getPSAScore(const char *requested_admin, const char *request_qos, const char *adminType, double sampleScore, double controlScore, double defaultScore) { double score; - if (requested_admin != NULL && strncmp(requested_admin, adminType, strlen(adminType)) == 0) { + if (requested_admin != NULL && strncmp(requested_admin, adminType, strlen(adminType) + 1) == 0) { /* We got precise specification on the pubsub_admin we want */ //Full match score = PUBSUB_ADMIN_FULL_MATCH_SCORE; } else if (requested_admin != NULL) { //admin type requested, but no match -> do not select this psa score = PUBSUB_ADMIN_NO_MATCH_SCORE; - } else if (request_qos != NULL && strncmp(request_qos, PUBSUB_UTILS_QOS_TYPE_SAMPLE, strlen(PUBSUB_UTILS_QOS_TYPE_SAMPLE)) == 0) { + } else if (request_qos != NULL && strncmp(request_qos, PUBSUB_UTILS_QOS_TYPE_SAMPLE, strlen(PUBSUB_UTILS_QOS_TYPE_SAMPLE) + 1) == 0) { //qos match score = sampleScore; - } else if (request_qos != NULL && strncmp(request_qos, PUBSUB_UTILS_QOS_TYPE_CONTROL, strlen(PUBSUB_UTILS_QOS_TYPE_CONTROL)) == 0) { + } else if (request_qos != NULL && strncmp(request_qos, PUBSUB_UTILS_QOS_TYPE_CONTROL, strlen(PUBSUB_UTILS_QOS_TYPE_CONTROL) + 1) == 0) { //qos match score = controlScore; } else if (request_qos != NULL) { @@ -273,7 +273,7 @@ bool pubsubEndpoint_match( bool psaMatch = false; const char *configured_admin = celix_properties_get(ep, PUBSUB_ENDPOINT_ADMIN_TYPE, NULL); if (configured_admin != NULL) { - psaMatch = strncmp(configured_admin, adminType, strlen(adminType)) == 0; + psaMatch = strncmp(configured_admin, adminType, strlen(adminType) + 1) == 0; } bool serMatch = false;
updated validation decision
Validation plugins operate as indenpendent blackboxes. For every backend each mounted validation plugin iterates -over the whole keyset, checks every key for it's trigger metakey, +over the whole keyset, checks every key for its trigger metakey, and validates the key. -Each plugin decides on it's own what should happen when a key -fails to validate. +Currently all needed validation plugins need to be specified at +mounttime - if additional validation is required, the backend +has to be remounted with the required plugins and plugin +configuration. -## Constraints +If validation of a key fails, each plugin decides on its own +how to handle the issue and proceed in ways that might be +different than what is expected or desired. -## Assumptions -While plugins should always fail and return -an error if validation fails on kdbSet, there are be different -requirements on what should happen on kdbGet: +## Constraints -- do nothing +## Assumptions - we just want to read the configuration file and don't - care about invalid values +While plugins should always fail and return an error if validation +fails on kdbSet, there are be can be several different requirements +for what should happen on kdbGet and handle problems e.g. -- issue warnings +- only issue warnings we want to read the whole configuration, but issue warnings - if keys fail to validate + if keys fail to validate instead + + problems are handled external by an application, user, ... - remove invalid keys we want to read the whole configuration, but drop invalid keys + invalid keys might be replaced by default values, requested + from the user, ... + - fail with error we only want to read valid configurations, and fail with an error if the configuration is invalid + ## Considered Alternatives - Extend validation plugins to allow us to specify what should happen @@ -58,6 +66,8 @@ of each key to the corresponding validation plugin. ## Implications +- validation plugins have to export their validation routine + ## Related decisions ## Notes
[ya shelve/unshelve] use md5 to encode patches directories and names; store shelves in ~/.ya.shelf instead of ~/.ya/shelf
@@ -44,6 +44,26 @@ def lazy_property(fn): return _lazy_property +class classproperty(object): + def __init__(self, func): + self.func = func + + def __get__(self, _, owner): + return self.func(owner) + + +class lazy_classproperty(object): + def __init__(self, func): + self.func = func + + def __get__(self, _, owner): + attr_name = '_lazy_' + self.func.__name__ + + if not hasattr(owner, attr_name): + setattr(owner, attr_name, self.func(owner)) + return getattr(owner, attr_name) + + def memoize(thread_safe=False, limit=0): assert limit >= 0
Now compiles the app with the new CAL key for Ragger tests
@@ -177,13 +177,13 @@ jobs: - name: Build test binaries run: | - make -j BOLOS_SDK=$NANOS_SDK CAL_TESTING_KEY=1 + make -j BOLOS_SDK=$NANOS_SDK CAL_CI_KEY=1 mv bin/app.elf app-nanos.elf make clean - make -j BOLOS_SDK=$NANOX_SDK CAL_TESTING_KEY=1 + make -j BOLOS_SDK=$NANOX_SDK CAL_CI_KEY=1 mv bin/app.elf app-nanox.elf make clean - make -j BOLOS_SDK=$NANOSP_SDK CAL_TESTING_KEY=1 + make -j BOLOS_SDK=$NANOSP_SDK CAL_CI_KEY=1 mv bin/app.elf app-nanosp.elf - name: Upload app binaries
restructure conditional
@@ -246,14 +246,15 @@ patch_return_addrs(funchook_t *funchook, // If the current instruction is a RET or XORPS // we want to patch the ADD or SUB immediately before it. uint32_t add_arg = 0; - if ((!strcmp((const char*)asm_inst[i].mnemonic, "ret") && - asm_inst[i].size == 1) || + if (((!strcmp((const char*)asm_inst[i].mnemonic, "ret") && + (asm_inst[i].size == 1)) && + (((!strcmp((const char*)asm_inst[i-1].mnemonic, "add")) || + ((!strcmp((const char*)asm_inst[i-1].mnemonic, "sub")))) && + (add_arg = add_argument(&asm_inst[i-1])))) || + (!strcmp((const char*)asm_inst[i].mnemonic, "xorps") && asm_inst[i].size == 4)) { - if (((!strcmp((const char*)asm_inst[i-1].mnemonic, "add")) || - ((!strcmp((const char*)asm_inst[i-1].mnemonic, "sub")))) && - (add_arg = add_argument(&asm_inst[i-1]))) { void *pre_patch_addr = (void*)asm_inst[i-1].address; void *patch_addr = (void*)asm_inst[i].address; @@ -1109,7 +1110,7 @@ c_read(char *stackaddr) if (rc == -1) return; - funcprint("Scope: read of %ld\n", fd); + funcprint("Scope: read of %ld rc %ld\n", fd, rc); doRead(fd, initialTime, (rc != -1), (void*)buf, rc, "go_read", BUF, 0); }
Draw headset mirror with white color;
@@ -658,6 +658,10 @@ void lovrHeadsetRenderTo(headsetRenderCallback callback, void* userdata) { lovrGraphicsPopCanvas(); if (state.isMirrored) { + unsigned char r, g, b, a; + lovrGraphicsGetColor(&r, &g, &b, &a); + lovrGraphicsSetColor(255, 255, 255, 255); lovrGraphicsPlaneFullscreen(state.texture); + lovrGraphicsSetColor(r, g, b, a); } }
sysrepoctl CHANGE enable connection recovery
@@ -73,8 +73,8 @@ help_print(void) " sysrepoctl <operation-option> [other-options]\n" "\n" "Available operation-options:\n" - " -h, --help Prints usage help.\n" - " -V, --version Prints only information about sysrepo version.\n" + " -h, --help Print usage help.\n" + " -V, --version Print only information about sysrepo version.\n" " -l, --list List YANG modules in sysrepo.\n" " -i, --install <path> Install the specified schema into sysrepo. Can be in either YANG or YIN format.\n" " -u, --uninstall <module>[,<module2>,<module3> ...]\n" @@ -83,7 +83,8 @@ help_print(void) " Change access rights, features, or replay support of the specified module.\n" " -U, --update <path> Update the specified schema in sysrepo. Can be in either YANG or YIN format.\n" " -C, --connection-count\n" - " Prints the number of sysrepo connections to STDOUT.\n" + " Print the number of sysrepo connections to STDOUT.\n" + " -R, --recover Check current connections state and clean any non-existing ones.\n" "\n" "Available other-options:\n" " -s, --search-dir <dir-path>\n" @@ -401,6 +402,7 @@ main(int argc, char** argv) {"change", required_argument, NULL, 'c'}, {"update", required_argument, NULL, 'U'}, {"connection-count",no_argument, NULL, 'C'}, + {"recover", no_argument, NULL, 'R'}, {"search-dir", required_argument, NULL, 's'}, {"enable-feature", required_argument, NULL, 'e'}, {"disable-feature", required_argument, NULL, 'd'}, @@ -419,7 +421,7 @@ main(int argc, char** argv) /* process options */ opterr = 0; - while ((opt = getopt_long(argc, argv, "hVli:u:c:U:Cs:e:d:r:o:g:p:v:", options, NULL)) != -1) { + while ((opt = getopt_long(argc, argv, "hVli:u:c:U:CRs:e:d:r:o:g:p:v:", options, NULL)) != -1) { switch (opt) { case 'h': version_print(); @@ -476,6 +478,13 @@ main(int argc, char** argv) } operation = 'C'; break; + case 'R': + if (operation) { + error_print(0, "Operation already specified"); + goto cleanup; + } + operation = 'R'; + break; case 's': if (search_dir) { error_print(0, "Search dir already specified"); @@ -652,6 +661,14 @@ main(int argc, char** argv) fprintf(stdout, "%u\n", conn_count); rc = EXIT_SUCCESS; break; + case 'R': + /* recover */ + if ((r = sr_connection_recover(conn)) != SR_ERR_OK) { + error_print(r, "Failed to recover stale connections"); + goto cleanup; + } + rc = EXIT_SUCCESS; + break; case 0: error_print(0, "No operation specified"); break;
Set key usage limits for gnutls
@@ -41,6 +41,8 @@ ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_initial(ngtcp2_crypto_ctx *ctx) { ctx->aead.native_handle = (void *)GNUTLS_CIPHER_AES_128_GCM; ctx->md.native_handle = (void *)GNUTLS_DIG_SHA256; ctx->hp.native_handle = (void *)GNUTLS_CIPHER_AES_128_CBC; + ctx->max_encryption = 0; + ctx->max_decryption_failure = 0; return ctx; } @@ -64,6 +66,38 @@ static gnutls_cipher_algorithm_t crypto_get_hp(gnutls_session_t session) { } } +static uint64_t +crypto_get_aead_max_encryption(gnutls_cipher_algorithm_t cipher) { + switch (cipher) { + case GNUTLS_CIPHER_AES_128_GCM: + case GNUTLS_CIPHER_AES_256_GCM: + return NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_GCM; + case GNUTLS_CIPHER_CHACHA20_POLY1305: + return NGTCP2_CRYPTO_MAX_ENCRYPTION_CHACHA20_POLY1305; + case GNUTLS_CIPHER_AES_128_CCM: + case GNUTLS_CIPHER_AES_256_CCM: + return NGTCP2_CRYPTO_MAX_ENCRYPTION_AES_CCM; + default: + return 0; + } +} + +static uint64_t +crypto_get_aead_max_decryption_failure(gnutls_cipher_algorithm_t cipher) { + switch (cipher) { + case GNUTLS_CIPHER_AES_128_GCM: + case GNUTLS_CIPHER_AES_256_GCM: + return NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_GCM; + case GNUTLS_CIPHER_CHACHA20_POLY1305: + return NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_CHACHA20_POLY1305; + case GNUTLS_CIPHER_AES_128_CCM: + case GNUTLS_CIPHER_AES_256_CCM: + return NGTCP2_CRYPTO_MAX_DECRYPTION_FAILURE_AES_CCM; + default: + return 0; + } +} + ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls(ngtcp2_crypto_ctx *ctx, void *tls_native_handle) { gnutls_session_t session = tls_native_handle; @@ -86,6 +120,9 @@ ngtcp2_crypto_ctx *ngtcp2_crypto_ctx_tls(ngtcp2_crypto_ctx *ctx, ctx->hp.native_handle = (void *)hp_cipher; } + ctx->max_encryption = crypto_get_aead_max_encryption(cipher); + ctx->max_decryption_failure = crypto_get_aead_max_decryption_failure(cipher); + return ctx; }
Fix Torque usage in MapService APIs, URL suffix was missing
@@ -309,7 +309,7 @@ namespace carto { // Create layer based on type and flags if (type == "torque") { std::string cartoCSS = layerInfos.front().cartoCSS; - auto baseDataSource = std::make_shared<HTTPTileDataSource>(minZoom, maxZoom, urlTemplateBase + "/{z}/{x}/{y}.torque" + urlTemplateSuffix); + auto baseDataSource = std::make_shared<HTTPTileDataSource>(minZoom, maxZoom, urlTemplateBase + "/{z}/{x}/{y}.torque.json" + urlTemplateSuffix); auto dataSource = std::make_shared<MemoryCacheTileDataSource>(baseDataSource); // in memory cache allows to change style quickly auto styleSet = std::make_shared<CartoCSSStyleSet>(cartoCSS, _vectorTileAssetPackage); auto torqueTileDecoder = std::make_shared<TorqueTileDecoder>(styleSet);
fixed <tag> end detection in project files
#include "ext/file_dialog.h" #include <zlib.h> +#include <ctype.h> #define CONSOLE_CURSOR_COLOR ((tic_color_red)) #define CONSOLE_BACK_TEXT_COLOR ((tic_color_dark_gray)) @@ -692,7 +693,7 @@ static bool loadTextSection(const char* project, const char* comment, char* dst, { makeTag(BinarySections[i].tag, tag, b); - sprintf(tagbuf, "\n%s <%s>\n", comment, tag); + sprintf(tagbuf, "\n%s <%s>", comment, tag); const char* ptr = SDL_strstr(project, tagbuf); @@ -702,7 +703,7 @@ static bool loadTextSection(const char* project, const char* comment, char* dst, } { - sprintf(tagbuf, "\n%s <PALETTE>\n", comment); + sprintf(tagbuf, "\n%s <PALETTE>", comment); const char* ptr = SDL_strstr(project, tagbuf); @@ -720,10 +721,17 @@ static bool loadTextSection(const char* project, const char* comment, char* dst, return done; } +static inline const char* getLineEnd(const char* ptr) +{ + while(*ptr && isspace(*ptr) && *ptr++ != '\n'); + + return ptr; +} + static bool loadTextSectionBank(const char* project, const char* comment, const char* tag, char* dst, s32 size) { char tagbuf[64]; - sprintf(tagbuf, "%s <%s>\n", comment, tag); + sprintf(tagbuf, "%s <%s>", comment, tag); const char* start = SDL_strstr(project, tagbuf); bool done = false; @@ -731,6 +739,7 @@ static bool loadTextSectionBank(const char* project, const char* comment, const if(start) { start += strlen(tagbuf); + start = getLineEnd(start); sprintf(tagbuf, "\n%s </%s>", comment, tag); const char* end = SDL_strstr(start, tagbuf); @@ -749,7 +758,7 @@ static bool loadTextSectionBank(const char* project, const char* comment, const static bool loadBinarySection(const char* project, const char* comment, const char* tag, s32 count, void* dst, s32 size, bool flip) { char tagbuf[64]; - sprintf(tagbuf, "%s <%s>\n", comment, tag); + sprintf(tagbuf, "%s <%s>", comment, tag); const char* start = SDL_strstr(project, tagbuf); bool done = false; @@ -757,6 +766,7 @@ static bool loadBinarySection(const char* project, const char* comment, const ch if(start) { start += strlen(tagbuf); + start = getLineEnd(start); sprintf(tagbuf, "\n%s </%s>", comment, tag); const char* end = SDL_strstr(start, tagbuf); @@ -779,6 +789,8 @@ static bool loadBinarySection(const char* project, const char* comment, const ch ptr += sizeof("-- 999:") - 1; str2buf(ptr, size*2, (u8*)dst + size*index, flip); ptr += size*2 + 1; + + ptr = getLineEnd(ptr); } else break; }
autotest: checking real, imag components on isnan to avoid errors on certain compilers
@@ -116,6 +116,9 @@ void liquid_autotest_print_array(unsigned char * _x, // Compute magnitude of (possibly) complex number #define LIQUID_AUTOTEST_VMAG(V) (sqrt(creal(V)*creal(V)+cimag(V)*cimag(V))) +// Compute isnan on (possibly) complex number +#define LIQUID_AUTOTEST_ISNAN(V) (isnan(crealf(V)) || isnan(cimagf(V))) + // CONTEND_EQUALITY #define TEST_EQUALITY(F,L,EX,X,EY,Y) \ { \ @@ -174,7 +177,8 @@ void liquid_autotest_print_array(unsigned char * _x, // as well as complex numbers #define TEST_DELTA(F,L,EX,X,EY,Y,ED,D) \ { \ - if (LIQUID_AUTOTEST_VMAG((X)-(Y)) > (D) || isnan((X)) || isnan((Y))) \ + if (LIQUID_AUTOTEST_VMAG((X)-(Y)) > (D) || \ + LIQUID_AUTOTEST_ISNAN((X)) || LIQUID_AUTOTEST_ISNAN((Y)) ) \ { \ liquid_autotest_failed_expr(F,L,"abs(" #X "-" #Y ")", \ LIQUID_AUTOTEST_VMAG((X)-(Y)),"<",ED,D); \
ksched: build fix for recent kernels
#include <linux/smp.h> #include <linux/uaccess.h> #include <linux/signal.h> +#include <linux/version.h> #include "ksched.h" #include "../iokernel/pmc.h" @@ -136,8 +137,13 @@ static void ksched_next_tid(struct ksched_percpu *kp, int cpu, pid_t tid) } raw_spin_lock_irqsave(&p->pi_lock, flags); - already_running = p->on_cpu || p->state == TASK_WAKING || - p->state == TASK_RUNNING || !try_mark_task_unparked(p); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0) + already_running = p->on_cpu || READ_ONCE(p->state) == TASK_WAKING || + task_is_running(p) || !try_mark_task_unparked(p); +#else + already_running = p->on_cpu || READ_ONCE(p->__state) == TASK_WAKING || + task_is_running(p) || !try_mark_task_unparked(p); +#endif raw_spin_unlock_irqrestore(&p->pi_lock, flags); if (unlikely(already_running)) { rcu_read_unlock();
docs - add info. to set blockev at boot. * docs - add info. to set blockev at boot. One way to set blockdev at boot. This will be backported to 6X_STABLE and 5X_STABLE * docs - update based on review comments.
@@ -314,10 +314,19 @@ vm.dirty_ratio = 10</codeblock></p> <p>See the manual page (man) for the <codeph>blockdev</codeph> command for more information about using that command (<codeph>man blockdev</codeph> opens the man page).</p> - <note>The <codeph>blockdev --setra</codeph> command is not persistent, it needs to be run - every time the system reboots. How to run the command will vary based on your system, - but you must ensure that the read ahead setting is set every time the system - reboots.</note> + <note>The <codeph>blockdev --setra</codeph> command is not persistent. You must ensure + the read-ahead value is set whenever the system restarts. How to set the value will + vary based on your system.</note> + <p>One method to set the <codeph>blockdev</codeph> value at system startup is by adding + the <codeph>/sbin/blockdev --setra</codeph> command in the <codeph>rc.local</codeph> + file. For example, add this line to the <codeph>rc.local</codeph> file to set the + read-ahead value for the disk + <codeph>sdb</codeph>.<codeblock>/sbin/blockdev --setra 16384 /dev/sdb</codeblock></p> + <p>On systems that use systemd, you must also set the execute permissions on the + <codeph>rc.local</codeph> file to enable it to run at startup. For example, on a + RHEL/CentOS 7 system, this command sets execute permissions on the + file.<codeblock># chmod +x /etc/rc.d/rc.local</codeblock></p> + <p>Restart the system to have the setting take effect.</p> </li> <li> <p>Disk I/O scheduler</p>
[DevTool] 0.2.6
Pod::Spec.new do |s| s.name = 'MLNDevTool' - s.version = '0.2.5' + s.version = '0.2.6' s.summary = 'Debug Tool of MLN.' # This description is used to generate tags and improve search results.
Don't print a stack trace for `read-at-aeon-fail` We shouldn't get a clay stack trace for read-at-aeon-fail because that gives us miles of clay stack trace whenever hoon compilation fails
[~ ..park] :: virtualize to catch and produce deterministic failures :: - !: |^ =/ res (mule |.(read)) ?: ?=(%& -.res) p.res %. [[~ ~] ..park]
weather: add remediation path, styling
@@ -248,26 +248,42 @@ export default class WeatherTile extends React.Component { return this.renderManualEntry(data); } + if ('currently' in data) { // Old weather source + this.props.api.launch.weather(this.props.location); + } + if ('current-condition' in data && 'weather' in data) { return this.renderWithData(data); } if (this.props.location) { - return this.renderWrapper(( + return this.renderWrapper( <Box - p='2' width='100%' height='100%' backgroundColor='white' color='black' + display="flex" + flexDirection="column" + justifyContent="flex-start" > - <Icon icon='Weather' color='black' display='inline' style={{ position: 'relative', top: '.3em' }} /> - <Text>Weather</Text> - <Text pt='2' width='100%' display='flex' flexDirection='column'> + <Text><Icon icon='Weather' color='black' display='inline' style={{ position: 'relative', top: '.3em' }} /> Weather</Text> + <Text width='100%' display='flex' flexDirection='column' mt={1}> Loading, please check again later... </Text> + <Text mt="auto"> + Set new location{' '} + <Text + cursor='pointer' + onClick={() => + this.setState({ manualEntry: !this.state.manualEntry }) + } + > + -> + </Text> + </Text> </Box> - )); + ); } return this.renderNoData(); }
extmod/vfs: Add fast path for stating VfsPosix filesystem.
#if MICROPY_VFS +#if MICROPY_VFS_POSIX +#include "extmod/vfs_posix.h" +#endif #if MICROPY_VFS_FAT #include "extmod/vfs_fat.h" #endif @@ -124,8 +127,14 @@ mp_import_stat_t mp_vfs_import_stat(const char *path) { if (vfs == MP_VFS_NONE || vfs == MP_VFS_ROOT) { return MP_IMPORT_STAT_NO_EXIST; } + + // Fast paths for known VFS types + #if MICROPY_VFS_POSIX + if (mp_obj_get_type(vfs->obj) == &mp_type_vfs_posix) { + return mp_vfs_posix_import_stat(MP_OBJ_TO_PTR(vfs->obj), path_out); + } + #endif #if MICROPY_VFS_FAT - // fast paths for known VFS types if (mp_obj_get_type(vfs->obj) == &mp_fat_vfs_type) { return fat_vfs_import_stat(MP_OBJ_TO_PTR(vfs->obj), path_out); }
python/plugins: added get() and localhost test cases
@@ -17,7 +17,10 @@ class DNSPlugin(unittest.TestCase): def setUp(self): self.parent_key = kdb.Key("user:/python") - self.plugin = dns_plugin.ElektraPlugin() + self.plugin = dns_plugin.ElektraDNSPlugin() + self.localhost_key_with_plugin = kdb.Key("user:/python/hostname", + kdb.KEY_VALUE, "localhost", + kdb.KEY_META, "check/dns", "") self.valid_key_with_plugin = kdb.Key("user:/python/hostname", kdb.KEY_VALUE, VALID_DOMAIN, kdb.KEY_META, "check/dns", "") @@ -28,6 +31,7 @@ class DNSPlugin(unittest.TestCase): self.invalid_ks = kdb.KeySet(10, self.invalid_key_with_plugin, self.valid_key_without_plugin, kdb.KS_END) self.valid_ks = kdb.KeySet(10, self.valid_key_with_plugin, self.valid_key_without_plugin, kdb.KS_END) + self.localhost_ks = kdb.KeySet(10, self.localhost_key_with_plugin, kdb.KS_END) # socket.getaddrinfo is mocked in order to make the tests non-reliant on an active internet connection @patch.object(socket, 'getaddrinfo', return_value=VALID_ADDR_INFO) @@ -58,6 +62,20 @@ class DNSPlugin(unittest.TestCase): def test_set_containing_valid_key_returns_success(self, mock_socket): self.assertEqual(1, self.plugin.set(self.valid_ks, self.parent_key)) + @patch.object(socket, 'getaddrinfo', side_effect=Exception) + def test_get_containing_invalid_key_returns_failure(self, mock_socket): + self.assertEqual(-1, self.plugin.get(self.invalid_ks, self.parent_key)) + + @patch.object(socket, 'getaddrinfo', return_value=VALID_ADDR_INFO) + def test_get_containing_valid_key_returns_success(self, mock_socket): + self.assertEqual(1, self.plugin.get(self.valid_ks, self.parent_key)) + + def test_get_containing_localhost_returns_success(self): + self.assertEqual(1, self.plugin.get(self.localhost_ks, self.parent_key)) + + def test_set_containing_localhost_returns_success(self): + self.assertEqual(1, self.plugin.get(self.localhost_ks, self.parent_key)) + if __name__ == '__main__': unittest.main()
harness: distops tests: make sure tests have run when reporting test success
@@ -45,10 +45,13 @@ def dist_test_factory(testname, finish_string, error_regex): # error regex error_re = re.compile(error_regex) passed = True + found_test = False for line in rawiter: if error_re.match(line): passed = False - return PassFailResult(passed) + if self.get_finish_string() in line: + found_test = True + return PassFailResult(found_test and passed) return DistTest for t in distops_tests:
gsettings: rework subscription mechanism
@@ -412,44 +412,25 @@ static void elektra_settings_key_changed (GDBusConnection * connection G_GNUC_UN g_mutex_lock (&elektra_settings_kdb_lock); GElektraKeySet * ks = gelektra_keyset_dup (esb->subscription_gks); - g_mutex_unlock (&elektra_settings_kdb_lock); - GElektraKey * key = gelektra_key_new (keypathname, KEY_VALUE, "", KEY_END); + + GElektraKey * cutpoint = gelektra_key_new (keypathname, KEY_VALUE, "", KEY_END); g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, "%s %s!", "GSEttings Path: ", (g_strstr_len (g_strstr_len (keypathname, -1, "/") + 1, -1, "/"))); + GElektraKeySet * subscribed = gelektra_keyset_cut (ks, cutpoint); + g_free (cutpoint); + GElektraKey * item; gssize pos = 0; - while ((item = gelektra_keyset_at (ks, pos)) != NULL) - { - g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, "%s: %s with %s", "Comparing", gelektra_key_name (key), gelektra_key_name (item)); - if (gelektra_key_isbeloworsame (key, item)) + while ((item = gelektra_keyset_at (subscribed, pos)) != NULL) { - g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, "%s!", "Subscribed key changed"); gchar * gsettingskeyname = g_strdup (g_strstr_len (g_strstr_len (keypathname, -1, "/") + 1, -1, "/")); + g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, "%s: %s!", "Subscribed key changed", gsettingskeyname); g_settings_backend_changed (G_SETTINGS_BACKEND (user_data), gsettingskeyname, NULL); g_free (gsettingskeyname); - } - else - { - g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, "%s!", "Not subscribed to key"); - - // TODO: workaround for broken subscription - notify change anyway - gchar * gsettingskeyname = g_strdup (g_strstr_len (g_strstr_len (keypathname, -1, "/") + 1, -1, "/")); - g_settings_backend_changed (G_SETTINGS_BACKEND (user_data), gsettingskeyname, NULL); - g_free (gsettingskeyname); - } pos++; } g_variant_unref (variant); - - -// g_mutex_lock (&elektra_settings_kdb_lock); -// // TODO: mpranj check if sync needed here -// esb->gks = gelektra_keyset_new (0, GELEKTRA_KEYSET_END); -// if (gelektra_kdb_get (esb->gkdb, esb->gks, esb->gkey) == -1) -// { -// g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, "%s\n", "Error on sync!"); -// } -// g_mutex_unlock (&elektra_settings_kdb_lock); + g_mutex_unlock (&elektra_settings_kdb_lock); } static void elektra_settings_bus_connected (GObject * source_object G_GNUC_UNUSED, GAsyncResult * res, gpointer user_data)
cache: check that nftw is available
include (LibAddPlugin) +include (SafeCheckSymbolExists) plugin_check_if_included ("resolver") @@ -14,6 +15,13 @@ if (NOT_INCLUDED) return () endif (NOT_INCLUDED) +safe_check_symbol_exists (nftw "ftw.h" HAVE_NFTW) + +if (NOT HAVE_NFTW) + message ("nftw (ftw.h) not found, excluding the cache plugin") + return () +endif (NOT HAVE_NFTW) + add_plugin (cache SOURCES cache.h cache.c LINK_ELEKTRA elektra-kdb
Enable requires_gnutls_tls1_3 in sni test cases
@@ -11398,6 +11398,7 @@ run_test "TLS 1.3: Server side check, no server certificate available" \ -s "tls13 server state: MBEDTLS_SSL_SERVER_CERTIFICATE" \ -s "No certificate available." +requires_gnutls_tls1_3 requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_3 requires_config_enabled MBEDTLS_SSL_TLS1_3_COMPATIBILITY_MODE requires_config_enabled MBEDTLS_DEBUG_C @@ -11421,6 +11422,7 @@ run_test "TLS 1.3: Server side check - openssl with sni" \ -s "=> parse client hello" \ -s "<= parse client hello" +requires_gnutls_tls1_3 requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_3 requires_config_enabled MBEDTLS_SSL_TLS1_3_COMPATIBILITY_MODE requires_config_enabled MBEDTLS_DEBUG_C
jenkins: stricter logRotator settings to reduce disk usage
@@ -28,7 +28,14 @@ import java.text.SimpleDateFormat // Buildjob properties properties([ - buildDiscarder(logRotator(numToKeepStr: '60', artifactNumToKeepStr: '60')) + buildDiscarder( + logRotator( + artifactDaysToKeepStr: '31', // Keep artifacts for max 31 days + artifactNumToKeepStr: '5', // Keep artifacts for last 5 builds + daysToKeepStr: '90', // Keep build info for 90 days + numToKeepStr: '60' // Keep a max of 60 builds + ) + ) ]) // If previous run is still running, cancel it
Skip testing ciphers in the legacy provider if no legacy test_enc should not test ciphers that are not available due to a lack of the legacy provider
@@ -15,6 +15,7 @@ use File::Copy; use File::Compare qw/compare_text/; use File::Basename; use OpenSSL::Test qw/:DEFAULT srctop_file bldtop_dir/; +use OpenSSL::Test::Utils; setup("test_enc"); @@ -27,13 +28,16 @@ my $test = catfile(".", "p"); my $cmd = "openssl"; my $provpath = bldtop_dir("providers"); -my @prov = ("-provider-path", $provpath, "-provider", "default", "-provider", "legacy"); - +my @prov = ("-provider-path", $provpath, "-provider", "default"); +push @prov, ("-provider", "legacy") unless disabled("legacy"); my $ciphersstatus = undef; my @ciphers = map { s/^\s+//; s/\s+$//; split /\s+/ } run(app([$cmd, "list", "-cipher-commands"]), capture => 1, statusvar => \$ciphersstatus); +@ciphers = grep {!/^(bf|cast|des$|des-cbc|des-cfb|des-ecb|des-ofb|desx|idea + |rc2|rc4|seed)/x} @ciphers + if disabled("legacy"); plan tests => 2 + (scalar @ciphers)*2;
fixes unwanted fallthrough in setters
@@ -784,6 +784,7 @@ void set_pkt_ctx(picoquic_packet_context_t *pkt_ctx, access_key_t ak, protoop_ar break; case AK_PKTCTX_LATEST_RETRANSMIT_TIME: pkt_ctx->latest_retransmit_time = val; + break; case AK_PKTCTX_LATEST_RETRANSMIT_CC_NOTIFICATION_TIME: pkt_ctx->latest_retransmit_cc_notification_time = val; break; @@ -935,6 +936,7 @@ void set_pkt(picoquic_packet_t *pkt, access_key_t ak, protoop_arg_t val) break; case AK_PKT_HAS_HANDSHAKE_DONE: pkt->has_handshake_done = (unsigned int) val; + break; case AK_PKT_IS_CONGESTION_CONTROLLED: pkt->is_congestion_controlled = val; break;
fix color completition see
@@ -23,7 +23,7 @@ _kdb() { # only kdb was entered yet, print a list of available commands if [[ $COMP_CWORD -eq 1 ]]; then local IFS=$'\n' - local commands=($(${kdbpath} 2> /dev/null | sed -e '0,/^Known commands are/d' | awk '{print $1}')) + local commands=($(${kdbpath} 2> /dev/null | sed -e '0,/^Known commands are/d' | awk '{print $1}' | sed -r "s/\x1B\[([0-9]{1,3}((;[0-9]{1,3})*)?)?[m|K]//g" )) COMPREPLY=($(compgen -W '${commands[@]}' -- "${cur}")) return fi
fix git invocation if repository path contains whitespace
@@ -180,19 +180,19 @@ def get_hg_scm_data(info): def git_call(fpath, git_arg): - return system_command_call("git --git-dir " + fpath + "/.git " + git_arg) + return system_command_call(["git", "--git-dir", fpath + "/.git"] + git_arg) def get_git_dict(fpath): info = {} - git_test = git_call(fpath, "rev-parse HEAD").strip() + git_test = git_call(fpath, ["rev-parse", "HEAD"]).strip() if not git_test or len(git_test) != 40: return info info['rev'] = git_test - info['author'] = git_call(fpath, "log -1 --format=\"format:%an <%ae>\" " + git_test) - info['summary'] = git_call(fpath, "log -1 --format=\"format:%s\" " + git_test) + info['author'] = git_call(fpath, ["log", "-1", "--format='format:%an <%ae>'", git_test]) + info['summary'] = git_call(fpath, ["log", "-1", "--format='format:%s'", git_test]) - body = git_call(fpath, "log -1 --grep=\"^git-svn-id: \" --format=\"format:%b\"") + body = git_call(fpath, ["log", "-1", "--grep='^git-svn-id: '", "--format='format:%b'"]) if body: url = re.match("git?-svn?-id: (.*)@", body) rev = re.search('@(.*?) ', body)
mangle: make mangle_AddSub use smaller ranges
@@ -477,14 +477,12 @@ static void mangle_Random(run_t* run, bool printable) { } } -static void mangle_AddSubWithRange(run_t* run, size_t off, uint64_t varLen) { - int delta = (int)util_rndGet(0, 8192); - delta -= 4096; +static void mangle_AddSubWithRange(run_t* run, size_t off, uint64_t varLen, uint64_t range) { + int64_t delta = (int64_t)util_rndGet(0, range * 2) - (int64_t)range; switch (varLen) { case 1: { run->dynamicFile[off] += delta; - return; break; } case 2: { @@ -499,7 +497,6 @@ static void mangle_AddSubWithRange(run_t* run, size_t off, uint64_t varLen) { val = __builtin_bswap16(val); } mangle_Overwrite(run, (uint8_t*)&val, off, varLen); - return; break; } case 4: { @@ -514,7 +511,6 @@ static void mangle_AddSubWithRange(run_t* run, size_t off, uint64_t varLen) { val = __builtin_bswap32(val); } mangle_Overwrite(run, (uint8_t*)&val, off, varLen); - return; break; } case 8: { @@ -529,12 +525,10 @@ static void mangle_AddSubWithRange(run_t* run, size_t off, uint64_t varLen) { val = __builtin_bswap64(val); } mangle_Overwrite(run, (uint8_t*)&val, off, varLen); - return; break; } default: { - LOG_F("Unknown variable length size: %" PRIu64, varLen); - break; + LOG_F("Unknown variable length size: %zu", varLen); } } } @@ -543,12 +537,30 @@ static void mangle_AddSub(run_t* run, bool printable) { size_t off = mangle_getOffSet(run); /* 1,2,4,8 */ - uint64_t varLen = 1U << util_rndGet(0, 3); + size_t varLen = 1U << util_rndGet(0, 3); if ((run->dynamicFileSz - off) < varLen) { varLen = 1; } - mangle_AddSubWithRange(run, off, varLen); + uint64_t range; + switch (varLen) { + case 1: + range = 16; + break; + case 2: + range = 4096; + break; + case 4: + range = 1048576; + break; + case 8: + range = 268435456; + break; + default: + LOG_F("Invalid operand size: %zu", varLen); + } + + mangle_AddSubWithRange(run, off, varLen, range); if (printable) { util_turnToPrintable((uint8_t*)&run->dynamicFile[off], varLen); }
CMake: Fix name of external CMake file Before this update building the man pages would fail on a case-sensitive file system. Thank you to Stefan Husmann (@stefanhusmann) for reporting the problem. See also:
@@ -568,7 +568,7 @@ function (generate_manpage NAME) -D MANPAGE=${OUTFILE} -P - ${CMAKE_SOURCE_DIR}/scripts/cmake/ElektraManpage.cmake) + ${CMAKE_SOURCE_DIR}/scripts/cmake/ElektraManPage.cmake) add_custom_target (man-${NAME} ALL DEPENDS ${OUTFILE}) add_dependencies (man man-${NAME}) endif (RONN_LOC)
xfconf-plugin: set interface error on faulty initalization
*/ #include "xfconf.h" +#include "kdberrors.h" #include <kdb.h> #include <kdbease.h> @@ -27,6 +28,7 @@ int elektraXfconfOpen (Plugin * handle ELEKTRA_UNUSED, Key * errorKey ELEKTRA_UN else { ELEKTRA_LOG ("unable to initialize xfconf(%d): %s\n", err->code, err->message); + ELEKTRA_SET_INTERFACE_ERROR (errorKey, err->message); g_error_free (err); return ELEKTRA_PLUGIN_STATUS_ERROR; }
Fix for new thread creation function prototype
@@ -80,12 +80,12 @@ The Defer library overriding functions /** OVERRIDE THIS to replace the default pthread implementation. */ -void *defer_new_thread(void *(*thread_func)(void *), pool_pt pool) { +void *defer_new_thread(void *(*thread_func)(void *), void *arg) { struct CreateThreadArgs *data = malloc(sizeof(*data)); if (!data) return NULL; *data = (struct CreateThreadArgs){ - .thread_func = thread_func, .arg = pool, + .thread_func = thread_func, .arg = arg, }; void *thr = RubyCaller.call_c(create_ruby_thread_gvl, data); if (!thr || thr == (void *)Qnil || thr == (void *)Qfalse) {
mpiotest: handle enospc from mpool_mblock_write.
@@ -535,6 +535,9 @@ test_start(void *arg) err = mpool_mblock_write(mp, objid, iov, niov); if (err) { + if (merr_errno(err) == ENOSPC) + break; + merr_strinfo(err, errbuf, sizeof(errbuf), NULL); eprint( "mpool_mblock_write: %d objid=0x%lx len=%zu: %s\n",
cooja: simplify JNI method declaration Rename and document the macros to make them easier to understand.
#ifndef CLASSNAME #error CLASSNAME is undefined, required by platform.c #endif /* CLASSNAME */ -#define COOJA__QUOTEME(a,b,c) COOJA_QUOTEME(a,b,c) -#define COOJA_QUOTEME(a,b,c) a##b##c -#define COOJA_JNI_PATH Java_org_contikios_cooja_corecomm_ -#define CLASS_init COOJA__QUOTEME(COOJA_JNI_PATH,CLASSNAME,_init) -#define CLASS_getMemory COOJA__QUOTEME(COOJA_JNI_PATH,CLASSNAME,_getMemory) -#define CLASS_setMemory COOJA__QUOTEME(COOJA_JNI_PATH,CLASSNAME,_setMemory) -#define CLASS_tick COOJA__QUOTEME(COOJA_JNI_PATH,CLASSNAME,_tick) -#define CLASS_setReferenceAddress COOJA__QUOTEME(COOJA_JNI_PATH,CLASSNAME,_setReferenceAddress) +/* Construct the name of JNI method m in class c. */ +#define COOJA_METHOD(c, m) COOJA_QUOTEME(c, m) +/* Indirection to get the right preprocessor behavior. */ +#define COOJA_QUOTEME(c, m) Java_org_contikios_cooja_corecomm_##c##_##m +/* Names of JNI methods. */ +#define CLASS_init COOJA_METHOD(CLASSNAME, init) +#define CLASS_getMemory COOJA_METHOD(CLASSNAME, getMemory) +#define CLASS_setMemory COOJA_METHOD(CLASSNAME, setMemory) +#define CLASS_tick COOJA_METHOD(CLASSNAME, tick) +#define CLASS_setReferenceAddress COOJA_METHOD(CLASSNAME, setReferenceAddress) #if NETSTACK_CONF_WITH_IPV6 #include "net/ipv6/uip.h"
adding libm tgamma, tgammal, tgammaf, lgamma_r, lgammal_r, lgammaf_r, lgamma, lgammal and lgammaf
@@ -240,13 +240,13 @@ GOW(hypotf, fFff) GOW(ldexp, dFdi) GOW(ldexpf, fFfi) // ldexpl // Weak -// lgamma // Weak -// lgammaf // Weak -// lgammaf_r // Weak +GOW(lgamma, dFd) +GOW(lgammaf, fFf) +GOW(lgammaf_r, fFfp) // __lgammaf_r_finite -// lgammal // Weak -// lgammal_r // Weak -// lgamma_r // Weak +GO2(lgammal, LFL, lgamma) +GO2(lgammal_r, LFLp, lgamma_r) +GOW(lgamma_r, dFdp) // __lgamma_r_finite DATAV(_LIB_VERSION, 4) // llrint // Weak @@ -363,9 +363,9 @@ GOW(tanh, dFd) GOW(tanhf, fFf) // tanhl // Weak // tanl // Weak -// tgamma // Weak -// tgammaf // Weak -// tgammal // Weak +GOW(tgamma, dFd) +GOW(tgammaf, fFf) +GO2(tgammal, LFL, tgamma) GOW(trunc, dFd) GOW(truncf, fFf) // truncl // Weak
admin/ohpc-filesystem: adding to scanning path for automatic rpm dependency analysis; adding specific slurm binaries that require hwloc so that they will correctly detect dependencies from the ohpc-provided variant
#----------------------------------------------------------------------------eh- Name: ohpc-filesystem -Version: 2.0 +Version: 2.1 Release: 1%{?dist} Summary: Common top-level OpenHPC directories @@ -57,8 +57,8 @@ install -p -m 755 %{SOURCE3} $RPM_BUILD_ROOT/usr/lib/rpm %%__ohpc_provides /usr/lib/rpm/ohpc-find-provides %%__ohpc_requires /usr/lib/rpm/ohpc-find-requires %%{buildroot} %{OHPC_HOME} -%%__ohpc_path ^%{OHPC_HOME} -%%__elf_exclude_path ^%{OHPC_HOME} +%%__ohpc_path ^(%{OHPC_HOME})|(/usr/sbin/slurm.*)$ +%%__elf_exclude_path ^(%{OHPC_HOME})|(/usr/sbin/slurm.*)$ %%__ohpc_magic ^ELF (32|64)-bit.*$ %%__ohpc_flags magic_and_path
Configurations/windows-makefile.tmpl: Fix template code for INSTALL_MODULES Fixes
@@ -95,12 +95,10 @@ INSTALL_ENGINEPDBS={- @{$unified_info{modules}}) -} INSTALL_MODULES={- - join(" \\\n" . ' ' x 16, - fill_lines(" ", $COLUMNS - 16, - map { platform->dso($_) } + join(" ", map { platform->dso($_) } grep { !$unified_info{attributes}->{modules}->{$_}->{noinst} && !$unified_info{attributes}->{modules}->{$_}->{engine} } - @{$unified_info{modules}})) + @{$unified_info{modules}}) -} INSTALL_MODULEPDBS={- join(" ", map { quotify1(platform->dsopdb($_)) }
ulp: Add aditional uint32_t object to `ulp_insn_t` Used to get the encoded instruction from bit-field structs. Merges
@@ -293,6 +293,8 @@ typedef union { uint32_t opcode: 4; /*!< Opcode (OPCODE_MACRO) */ } macro; /*!< Format of tokens used by MACROs */ + uint32_t instruction; /*!< Encoded instruction for ULP coprocessor */ + } ulp_insn_t; _Static_assert(sizeof(ulp_insn_t) == 4, "ULP coprocessor instruction size should be 4 bytes");
MTRIE coverity fixes
@@ -271,7 +271,7 @@ set_leaf (ip4_fib_mtrie_t * m, old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index); - ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32); + ASSERT (a->dst_address_length <= 32); ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8)); /* how many bits of the destination address are in the next PLY */ @@ -284,7 +284,8 @@ set_leaf (ip4_fib_mtrie_t * m, if (n_dst_bits_next_plies <= 0) { /* The mask length of the address to insert maps to this ply */ - uword i, n_dst_bits_this_ply, old_leaf_is_terminal; + uword i, old_leaf_is_terminal; + u32 n_dst_bits_this_ply; /* The number of bits, and hence slots/buckets, we will fill */ n_dst_bits_this_ply = clib_min (8, -n_dst_bits_next_plies); @@ -401,7 +402,7 @@ set_root_leaf (ip4_fib_mtrie_t * m, old_ply = &m->root_ply; - ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32); + ASSERT (a->dst_address_length <= 32); /* how many bits of the destination address are in the next PLY */ n_dst_bits_next_plies = a->dst_address_length - BITS (u16); @@ -412,7 +413,8 @@ set_root_leaf (ip4_fib_mtrie_t * m, if (n_dst_bits_next_plies <= 0) { /* The mask length of the address to insert maps to this ply */ - uword i, n_dst_bits_this_ply, old_leaf_is_terminal; + uword i, old_leaf_is_terminal; + u32 n_dst_bits_this_ply; /* The number of bits, and hence slots/buckets, we will fill */ n_dst_bits_this_ply = 16 - a->dst_address_length; @@ -515,7 +517,7 @@ unset_leaf (ip4_fib_mtrie_t * m, i32 i, n_dst_bits_this_ply, old_leaf_is_terminal; u8 dst_byte; - ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32); + ASSERT (a->dst_address_length <= 32); ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8)); n_dst_bits_next_plies = @@ -588,7 +590,7 @@ unset_root_leaf (ip4_fib_mtrie_t * m, u16 dst_byte; ip4_fib_mtrie_16_ply_t *old_ply; - ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32); + ASSERT (a->dst_address_length <= 32); old_ply = &m->root_ply; n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
landscape: fix newchannel 'back' in dark mode
@@ -127,7 +127,7 @@ export function NewChannel(props: NewChannelProps & RouteComponentProps) { }; return ( <Col overflowY="auto" p={3}> - <Box pb='3' display={['block', 'none']} onClick={() => history.push(props.baseUrl)}> + <Box color='black' pb='4' display={['block', 'none']} onClick={() => history.push(props.baseUrl)}> {'<- Back'} </Box> <Box fontWeight="bold" mb={4} color="black">
SOVERSION bump to version 3.5.0
@@ -66,8 +66,8 @@ set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION # Major version is changed with every backward non-compatible API/ABI change in libyang, minor version changes # with backward compatible change and micro version is connected with any internal change of the library. set(LIBNETCONF2_MAJOR_SOVERSION 3) -set(LIBNETCONF2_MINOR_SOVERSION 4) -set(LIBNETCONF2_MICRO_SOVERSION 1) +set(LIBNETCONF2_MINOR_SOVERSION 5) +set(LIBNETCONF2_MICRO_SOVERSION 0) set(LIBNETCONF2_SOVERSION_FULL ${LIBNETCONF2_MAJOR_SOVERSION}.${LIBNETCONF2_MINOR_SOVERSION}.${LIBNETCONF2_MICRO_SOVERSION}) set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_SOVERSION})
[runtime] Fix allocation of stacks
+// Copyright 2020 ETH Zurich and University of Bologna. +// +// SPDX-License-Identifier: Apache-2.0 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Author: Samuel Riedel, ETH Zurich +// Matheus Cavalcante, ETH Zurich + .globl _start -.section .text.init; .section .text; +.section .text.init; _start: - # Initialize global pointer + // Initialize global pointer .option push .option norelax 1:auipc gp, %pcrel_hi(__global_pointer$) @@ -43,16 +62,13 @@ reset_vector: li x29, 0 li x30, 0 li x31, 0 - la sp, tcdm_end_address_reg # load stack top from peripheral register + la sp, tcdm_end_address_reg // load stack top from peripheral register lw sp, 0(sp) - csrr a0, mhartid # get hart id - la a1, nr_cores_address_reg # get the number of cores per cluster + csrr a0, mhartid // get hart id + la a1, nr_cores_address_reg // get the number of cores per cluster lw a1, 0(a1) - li t0, -1 # generate a all 1 bitmask - sll t0, t0, a1 - neg t0, t0 - and t0, a0, t0 # generate a core id which starts at 0 for each cluster - slli t0, t0, 8 # set some stack-space aside for each hart + mv t0, a0 + slli t0, t0, 10 // set some stack-space aside for each hart sub sp, sp, t0 call main
roll back commit #72d8608 to avoid spin lock contention on region allocation
@@ -119,8 +119,6 @@ bool mi_is_in_heap_region(const void* p) mi_attr_noexcept { Commit from a region -----------------------------------------------------------------------------*/ -#define ALLOCATING ((void*)1) - // Commit the `blocks` in `region` at `idx` and `bitidx` of a given `size`. // Returns `false` on an error (OOM); `true` otherwise. `p` and `id` are only written // if the blocks were successfully claimed so ensure they are initialized to NULL/SIZE_MAX before the call. @@ -132,31 +130,10 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit mi_assert_internal((mask & mi_atomic_read(&region->map)) == mask); // ensure the region is reserved - void* start; - do { - start = mi_atomic_read_ptr(&region->start); - if (start == NULL) { - start = ALLOCATING; // try to start allocating - } - else if (start == ALLOCATING) { - // another thead is already allocating.. wait it out - // note: the wait here is not great (but should not happen often). Another - // strategy might be to just allocate another region in parallel. This tends - // to be bad for benchmarks though as these often start many threads at the - // same time leading to the allocation of too many regions. (Still, this might - // be the most performant and it's ok on 64-bit virtual memory with over-commit.) - mi_atomic_yield(); - continue; - } - } while( start == ALLOCATING && !mi_atomic_compare_exchange_ptr(&region->start, ALLOCATING, NULL) ); - mi_assert_internal(start != NULL); - - // allocate the region if needed - if (start == ALLOCATING) { + void* start = mi_atomic_read_ptr(&region->start); + if (start == NULL) + { start = _mi_os_alloc_aligned(MI_REGION_SIZE, MI_SEGMENT_ALIGN, mi_option_is_enabled(mi_option_eager_region_commit), tld); - // set the new allocation (or NULL on failure) -- this releases any waiting threads. - mi_atomic_write_ptr(&region->start, start); - if (start == NULL) { // failure to allocate from the OS! unclaim the blocks and fail size_t map; @@ -166,11 +143,20 @@ static bool mi_region_commit_blocks(mem_region_t* region, size_t idx, size_t bit return false; } - // update the region count if this is a new max idx. - mi_atomic_compare_exchange(&regions_count, idx+1, idx); + // set the newly allocated region + if (mi_atomic_compare_exchange_ptr(&region->start, start, NULL)) { + // update the region count + mi_atomic_increment(&regions_count); + } + else { + // failed, another thread allocated just before us, free our allocated memory + // TODO: should we keep the allocated memory and assign it to some other region? + _mi_os_free(start, MI_REGION_SIZE, tld->stats); + start = mi_atomic_read_ptr(&region->start); + } } - mi_assert_internal(start != NULL && start != ALLOCATING); mi_assert_internal(start == mi_atomic_read_ptr(&region->start)); + mi_assert_internal(start != NULL); // Commit the blocks to memory void* blocks_start = (uint8_t*)start + (bitidx * MI_SEGMENT_SIZE);
storage: do not release an external config property
@@ -290,9 +290,6 @@ void flb_storage_destroy(struct flb_config *ctx) } cio_destroy(cio); - if (ctx->storage_bl_mem_limit) { - flb_free(ctx->storage_bl_mem_limit); - } /* Delete references from input instances */ storage_contexts_destroy(ctx);
Fixed neg index read in channelMap
@@ -217,9 +217,11 @@ static int process_jsonarray(scratch_space_t *scratch, char *ct0conf, stack_entr so->channel_map[i] = -1; for (int i = 0; i < count; i++) { + if(values[i] >= 0) { so->channel_map[values[i]] = i; } } + } free(values); } else if (jsoneq(ct0conf, tk, "acc_bias") == 0) { int32_t count = (tk + 1)->size;
TestsUser/Prelinked: Fix typo.
@@ -501,7 +501,7 @@ int main(int argc, char** argv) { Status = PrelinkedInjectKext ( &Context, - KextPath + KextPath, TestPlist, TestPlistSize, "Contents/MacOS/Kext",
server: fix compilation on older SSL, make error more useful
@@ -445,21 +445,25 @@ sslerror(z_strm *strm, int rval) snprintf(_sslerror_buf, sizeof(_sslerror_buf), "%d: call callback via SSL_CTX_set_client_cert_cb()", err); break; +#ifdef SSL_ERROR_WANT_ASYNC case SSL_ERROR_WANT_ASYNC: snprintf(_sslerror_buf, sizeof(_sslerror_buf), "%d: asynchronous engine is still processing data", err); break; +#endif +#ifdef SSL_ERROR_WANT_ASYNC_JOB case SSL_ERROR_WANT_ASYNC_JOB: snprintf(_sslerror_buf, sizeof(_sslerror_buf), "%d: no async jobs available in the pool", err); break; +#endif case SSL_ERROR_SYSCALL: snprintf(_sslerror_buf, sizeof(_sslerror_buf), "%d: I/O error: %s", err, strerror(errno)); break; case SSL_ERROR_SSL: - snprintf(_sslerror_buf, sizeof(_sslerror_buf), - "%d: protocol error", err); + ERR_error_string_n(ERR_get_error(), + _sslerror_buf, sizeof(_sslerror_buf)); break; } return _sslerror_buf;
don't add prefix if build type is None (PR
@@ -224,7 +224,7 @@ else() endif() string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC) -if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel)$")) +if(NOT(CMAKE_BUILD_TYPE_LC MATCHES "^(release|relwithdebinfo|minsizerel|none)$")) set(mi_basename "${mi_basename}-${CMAKE_BUILD_TYPE_LC}") #append build type (e.g. -debug) if not a release version endif() if(MI_BUILD_SHARED)
Python-package: Change import module
@@ -2,6 +2,7 @@ import sys from copy import deepcopy from six import iteritems, string_types, integer_types import os +import imp from collections import Iterable, Sequence, Mapping, MutableMapping import warnings import numpy as np @@ -24,13 +25,29 @@ except ImportError: class Series(object): pass + +def get_so_paths(dir_name): + dir_name = os.path.join(os.path.dirname(__file__), dir_name) + list_dir = os.listdir(dir_name) if os.path.isdir(dir_name) else [] + return [os.path.join(dir_name, so_name) for so_name in list_dir if so_name.split('.')[-1] in ['so', 'pyd']] + +so_paths = get_so_paths('./gpu') + get_so_paths('./') +for so_path in so_paths: try: - from catboost.gpu._catboost import _PoolBase, _CatBoostBase, _MetricCalcerBase, CatboostError, _cv, _set_logger, _reset_logger, _configure_malloc + _catboost = imp.load_dynamic('_catboost', so_path) + _PoolBase = _catboost._PoolBase + _CatBoostBase = _catboost._CatBoostBase + _MetricCalcerBase = _catboost._MetricCalcerBase + _cv = _catboost._cv + _set_logger = _catboost._set_logger + _reset_logger = _catboost._reset_logger + _configure_malloc = _catboost._configure_malloc + CatboostError = _catboost.CatboostError + break except ImportError: - try: + pass +else: from _catboost import _PoolBase, _CatBoostBase, _MetricCalcerBase, CatboostError, _cv, _set_logger, _reset_logger, _configure_malloc - except ImportError: - from ._catboost import _PoolBase, _CatBoostBase, _MetricCalcerBase, CatboostError, _cv, _set_logger, _reset_logger, _configure_malloc from contextlib import contextmanager
fix the issue #886,in function "platone_createOnetimeWallet"
@@ -60,11 +60,13 @@ BoatPlatoneWallet *g_platone_wallet_ptr; __BOATSTATIC BOAT_RESULT platone_createOnetimeWallet() { BSINT32 index; - BoatPlatoneWalletConfig wallet_config = {0}; + BoatPlatoneWalletConfig wallet_config; BUINT8 binFormatKey[32] = {0}; (void)binFormatKey; //avoid warning + memset(&wallet_config,0,sizeof(wallet_config)); + /* wallet_config value assignment */ #if defined(USE_PRIKEY_FORMAT_INTERNAL_GENERATION) //BoatLog(BOAT_LOG_NORMAL, ">>>>>>>>>> wallet format: internal generated.");
Make sync compatible with mingw. This fixes the problems of the first try: It does not use fsync(fileno(fd)) anymore, which is discouraged and doesn't work.
@@ -49,6 +49,29 @@ int elektraSyncSet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned ELEKTRA_UN /* set all keys */ const char * configFile = keyString (parentKey); if (!strcmp (configFile, "")) return 0; // no underlying config file + + // Syncing requires different functions for mingw vs. POSIX builds. + // For mingw, we need to fflush(), for POSIX we need to fsync(). + // See https://stackoverflow.com/a/41615150 + // Using fsync(fileno(fd)) does not work! +#ifdef __MINGW32__ + FILE * fd = NULL; + // For mingw, we need to use mode "wc" and fflush(). See https://stackoverflow.com/a/57090195 . + fileMode = "wc"; + fd = fopen (configFile, fileMode); + if (fd == NULL) + { + ELEKTRA_SET_RESOURCE_ERRORF (parentKey, "Could not open config file %s. Reason: %s", configFile, strerror (errno)); + return -1; + } + if(fflush (fd) == EOF) + { + ELEKTRA_SET_RESOURCE_ERRORF (parentKey, "Could not fsync/fflush config file %s. Reason: %s", configFile, strerror (errno)); + fclose (fd); + return -1; + } + fclose (fd); +#else int fd = open (configFile, O_RDWR); if (fd == -1) { @@ -62,6 +85,7 @@ int elektraSyncSet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned ELEKTRA_UN return -1; } close (fd); +#endif return 1; /* success */ }
eq_value should return boolean
@@ -405,8 +405,8 @@ describe("Pallene coder /", function() -- Comparison operators, same order as types.lua -- Nil and Record are tested separately. - { "eq_value", "==", "value", "value", "value" }, - { "ne_value", "~=", "value", "value", "value" }, + { "eq_value", "==", "value", "value", "boolean" }, + { "ne_value", "~=", "value", "value", "boolean" }, { "eq_boolean", "==", "boolean", "boolean", "boolean" }, { "ne_boolean", "~=", "boolean", "boolean", "boolean" },
feat(fabric test): add fabric test case "Free_WalletConfig_NULL"
@@ -764,6 +764,16 @@ START_TEST(test_002Transaction_0030DeInit_Txptr_NULL) } END_TEST +START_TEST(test_002Transaction_0031Free_WalletConfig_NULL) +{ + + BoatHlfabricWalletConfig wallet_config = get_fabric_wallet_settings(); + BoatIotSdkInit(); + BoatIotSdkDeInit(); + fabricWalletConfigFree(wallet_config); +} +END_TEST + Suite *make_transaction_suite(void) { @@ -806,6 +816,7 @@ Suite *make_transaction_suite(void) tcase_add_test(tc_transaction_api, test_002Transaction_0028TxQueryFail_arg2_NULL); tcase_add_test(tc_transaction_api, test_002Transaction_0029TxQueryFail_args_ADD1); tcase_add_test(tc_transaction_api, test_002Transaction_0030DeInit_Txptr_NULL); + tcase_add_test(tc_transaction_api, test_002Transaction_0031Free_WalletConfig_NULL); return s_transaction; }
bfd:fix handling session creation batch when multiple session creating script is ran (via exec) only the first one actually starts
@@ -1165,6 +1165,7 @@ bfd_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) } } now = clib_cpu_time_now (); + uword *session_index; switch (event_type) { case ~0: /* no events => timeout */ @@ -1180,35 +1181,41 @@ bfd_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) * each event or timeout */ break; case BFD_EVENT_NEW_SESSION: + vec_foreach (session_index, event_data) + { bfd_lock (bm); - if (!pool_is_free_index (bm->sessions, *event_data)) + if (!pool_is_free_index (bm->sessions, *session_index)) { bfd_session_t *bs = - pool_elt_at_index (bm->sessions, *event_data); + pool_elt_at_index (bm->sessions, *session_index); bfd_send_periodic (vm, rt, bm, bs, now); bfd_set_timer (bm, bs, now, 1); } else { BFD_DBG ("Ignoring event for non-existent session index %u", - (u32) * event_data); + (u32) * session_index); } bfd_unlock (bm); + } break; case BFD_EVENT_CONFIG_CHANGED: + vec_foreach (session_index, event_data) + { bfd_lock (bm); - if (!pool_is_free_index (bm->sessions, *event_data)) + if (!pool_is_free_index (bm->sessions, *session_index)) { bfd_session_t *bs = - pool_elt_at_index (bm->sessions, *event_data); + pool_elt_at_index (bm->sessions, *session_index); bfd_on_config_change (vm, rt, bm, bs, now); } else { BFD_DBG ("Ignoring event for non-existent session index %u", - (u32) * event_data); + (u32) * session_index); } bfd_unlock (bm); + } break; default: vlib_log_err (bm->log_class, "BUG: event type 0x%wx", event_type);
feat(venachain):Remove the txType field in the interface description
@@ -52,10 +52,9 @@ extern "C" { * 4. recipient; * 5. value(optional); * 6. data(contain at least an 8-byte bigendian txtype field in data); - * 7. txtype(same value as above, except trimming leading zeros); - * 8. v; - * 9. signature.r; - * 10. signature.s; + * 7. v; + * 8. signature.r; + * 9. signature.s; * * These transaction fields are encoded as elements of a LIST in above order * as per RLP encoding rules. "LIST" is a type of RLP field.
sidk_s5jt200: remove unused code lines board_button_irq() and board_button_handler() are not in use, as there is no Kconfig entry that enables them. Let's remove them. We can add it later when we need it again.
#include <errno.h> #include <tinyara/board.h> -#include <tinyara/irq.h> #include <chip.h> @@ -136,59 +135,4 @@ uint8_t board_buttons(void) return ret; } -/**************************************************************************** - * Button support. - * - * Description: - * board_button_initialize() must be called to initialize button resources. - * After that, board_buttons() may be called to collect the current state - * of all buttons or board_button_irq() may be called to register button - * interrupt handlers. - * - * After board_button_initialize() has been called, board_buttons() may be - * called to collect the state of all buttons. board_buttons() returns an - * 8-bit bit set with each bit associated with a button. See the - * BUTTON_*_BIT definitions in board.h for the meaning of each bit. - * - * board_button_irq() may be called to register an interrupt handler that - * will be called when a button is depressed or released. The ID value is - * a button enumeration value that uniquely identifies a button resource. - * See the BUTTON_* definitions in board.h for the meaning of enumeration - * value. The previous interrupt handler address is returned (so that it - * may restored, if so desired). - * - ****************************************************************************/ - -#ifdef CONFIG_ARCH_IRQBUTTONS -xcpt_t board_button_irq(int id, xcpt_t irqhandler) -{ - xcpt_t oldhandler = NULL; - - /* The following should be atomic */ - - if (irqhandler == NULL) { - up_disable_irq(IRQ_EINT0); - irq_attach(IRQ_EINT0, NULL, NULL); - return oldhandler; - } - - if (irq_attach(IRQ_EINT0, irqhandler, NULL) == OK) { - up_enable_irq(IRQ_EINT0); - } else { - /* TO DO: How can it contolled ? */ - } - - return oldhandler; -} -#endif - -#ifdef CONFIG_ARCH_IRQBUTTONS -void board_button_handler(int id, int irq) -{ - s32 gpio; - - gpio = s5j_gpio(GPA0, 0); - gpio_eint_clear_pending(gpio); -} -#endif #endif /* CONFIG_ARCH_BUTTONS */
docs: Configure enclave.runtime.path as liberpal-skeleton-v3.so in skeleton_remote_attestation_with_rune.md. Only `liberpal-skeleton-v3.so` supports `rune attest` command.
@@ -7,7 +7,7 @@ This guide will guide you how to use remote attestation based on SGX in skeleton - Register a `SPID` and `Subscription Key` of [IAS](https://api.portal.trustedservices.intel.com/EPID-attestation). After the registration, Intel will respond with a SPID which is needed to communicate with IAS. # Run skeleton bundle with `rune` -Before using `rune attest` command, you must ensure your skeleton container/bundles(such as skeleton-enclave-container) running by setting `"wait_timeout","100"` of `process.args` in config.json, just like +Before using `rune attest` command, you must ensure your skeleton container/bundles(such as skeleton-enclave-container) running by setting `"wait_timeout","100"` of `process.args` in config.json as following: ```json "process": { "args": [ @@ -16,6 +16,15 @@ Before using `rune attest` command, you must ensure your skeleton container/bund } ``` +Only `liberpal-skeleton-v3.so` supports `rune attest` command. So you also need to configure enclave runtime as following: +```json +"annotations": { + "enclave.type": "intelSgx", + "enclave.runtime.path": "/usr/lib/liberpal-skeleton-v3.so", + "enclave.runtime.args": "debug" +} +``` + Then you can run your skeleton containers by typing the following commands: ```shell
enable npgsql legacy timestamps
@@ -733,6 +733,8 @@ namespace Miningcore if(string.IsNullOrEmpty(pgConfig.User)) logger.ThrowLogPoolStartupException("Postgres configuration: invalid or missing 'user'"); + AppContext.SetSwitch("Npgsql.EnableLegacyTimestampBehavior", true); + // build connection string var connectionString = $"Server={pgConfig.Host};Port={pgConfig.Port};Database={pgConfig.Database};User Id={pgConfig.User};Password={pgConfig.Password};CommandTimeout=900;";
Fire events when group.name or group.class is changed
@@ -445,7 +445,12 @@ int DeRestPluginPrivate::setGroupAttributes(const ApiRequest &req, ApiResponse & ResourceItem *item = group->item(RAttrClass); DBG_Assert(item != 0); + if (item && item->toString() != gclass) + { item->setValue(gclass); + Event e(RGroups, RAttrClass, group->address()); + enqueueEvent(e); + } } // name @@ -467,6 +472,8 @@ int DeRestPluginPrivate::setGroupAttributes(const ApiRequest &req, ApiResponse & { group->setName(name); changed = true; + Event e(RGroups, RAttrName, group->address()); + enqueueEvent(e); queSaveDb(DB_GROUPS, DB_SHORT_SAVE_DELAY); } } @@ -3186,6 +3193,21 @@ void DeRestPluginPrivate::handleGroupEvent(const Event &e) webSocketServer->broadcastTextMessage(Json::serialize(map)); } } + else if (strncmp(e.what(), "attr/", 5) == 0) + { + ResourceItem *item = group->item(e.what()); + if (item) + { + QVariantMap map; + map["t"] = QLatin1String("event"); + map["e"] = QLatin1String("changed"); + map["r"] = QLatin1String("groups"); + map["id"] = group->id(); + map[e.what() + 5] = item->toVariant(); + + webSocketServer->broadcastTextMessage(Json::serialize(map)); + } + } else if (e.what() == REventAdded) { QVariantMap map;
docs: Update www.raspberrypi.com documentation links www.raspberrypi.com restructured its documentation. Reflect the new links in our docs. Fixes
@@ -29,7 +29,7 @@ Accommodate the values above to your own needs (ex: ext3 / ext4). * `GPU_MEM_1024`: GPU memory in megabyte for the 1024MB Raspberry Pi. Ignored by the 256MB/512MB RP. Overrides gpu_mem. Max 944. Default not set. -See: <https://www.raspberrypi.org/documentation/configuration/config-txt/memory.md> +See: <https://www.raspberrypi.com/documentation/computers/config_txt.html#memory-options> ## VC4 @@ -47,7 +47,7 @@ You can supply more licenses separated by comma. Example: KEY_DECODE_WVC1 = "0x12345678,0xabcdabcd,0x87654321" -See: <https://www.raspberrypi.org/documentation/configuration/config-txt/codeclicence.md> +See: <https://www.raspberrypi.com/documentation/computers/config_txt.html#licence-key-and-codec-options> ## Disable overscan @@ -89,7 +89,7 @@ Example official settings for Turbo Mode in Raspberry Pi 2: SDRAM_FREQ = "500" OVER_VOLTAGE = "6" -See: <https://www.raspberrypi.org/documentation/configuration/config-txt/overclocking.md> +See: <https://www.raspberrypi.com/documentation/computers/config_txt.html#overclocking-options> ## HDMI and composite video options @@ -106,7 +106,7 @@ Example to force HDMI output to 720p in CEA mode: HDMI_GROUP = "1" HDMI_MODE = "4" -See: <https://www.raspberrypi.org/documentation/configuration/config-txt/video.md> +See: <https://www.raspberrypi.com/documentation/computers/configuration.html#hdmi-configuration> ## Video camera support with V4L2 drivers
Migrate to LED HAL (GPIO HAL example)
#include "contiki.h" #include "dev/gpio-hal.h" /*---------------------------------------------------------------------------*/ -#define BASE_TO_PORT_NUM(base) (((base) - GPIO_A_BASE) >> 12) - -gpio_hal_pin_t out_pin1 = (BASE_TO_PORT_NUM(LEDS_GREEN_PORT_BASE) << 3) + LEDS_GREEN_PIN; -gpio_hal_pin_t out_pin2 = (BASE_TO_PORT_NUM(LEDS_BLUE_PORT_BASE) << 3) + LEDS_BLUE_PIN; -gpio_hal_pin_t out_pin3 = (BASE_TO_PORT_NUM(LEDS_RED_PORT_BASE) << 3) + LEDS_RED_PIN; +gpio_hal_pin_t out_pin1 = (LEDS_ARCH_L1_PORT << 3) + LEDS_ARCH_L1_PIN; +gpio_hal_pin_t out_pin2 = (LEDS_ARCH_L2_PORT << 3) + LEDS_ARCH_L2_PIN; +gpio_hal_pin_t out_pin3 = (LEDS_ARCH_L3_PORT << 3) + LEDS_ARCH_L3_PIN; /*---------------------------------------------------------------------------*/ gpio_hal_pin_t btn_pin = (BUTTON_USER_PORT << 3) + BUTTON_USER_PIN; /*---------------------------------------------------------------------------*/
YAML CPP: Improve array check
@@ -56,23 +56,9 @@ NameIterator relativeKeyIterator (Key const & key, Key const & parent) std::pair<bool, unsigned long long> isArrayIndex (NameIterator const & nameIterator) { string const name = *nameIterator; - if (name.size () < 2 || name.front () != '#') return std::make_pair (false, 0); - - auto errnoValue = errno; - - try - { - return std::make_pair (true, stoull (name.substr (name.find_first_not_of ("#\\_")))); - } - catch (invalid_argument const &) - { - return std::make_pair (false, 0); - } - catch (out_of_range const &) - { - errno = errnoValue; - return std::make_pair (false, 0); - } + auto const offsetIndex = ckdb::elektraArrayValidateBaseNameString (name.c_str ()); + auto const isArrayElement = offsetIndex >= 1; + return { isArrayElement, isArrayElement ? stoull (name.substr (offsetIndex)) : 0 }; } /**
Solve minor bug in web script (file was never closed).
@@ -6,8 +6,7 @@ from datetime import datetime def index(): basepath = path.dirname(path.abspath(__file__)) - f = open(path.join(basepath, 'index.html'), 'r') - + with open(path.join(basepath, 'index.html'), 'r') as f: return f.read() def time():
SOVERSION bump to version 7.3.9
@@ -72,7 +72,7 @@ set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_ # with backward compatible change and micro version is connected with any internal change of the library. set(SYSREPO_MAJOR_SOVERSION 7) set(SYSREPO_MINOR_SOVERSION 3) -set(SYSREPO_MICRO_SOVERSION 8) +set(SYSREPO_MICRO_SOVERSION 9) set(SYSREPO_SOVERSION_FULL ${SYSREPO_MAJOR_SOVERSION}.${SYSREPO_MINOR_SOVERSION}.${SYSREPO_MICRO_SOVERSION}) set(SYSREPO_SOVERSION ${SYSREPO_MAJOR_SOVERSION})
core/minute-ia/mia_panic_internal.h: Format with clang-format BRANCH=none TEST=none
* convenientely in the same order as pushed by hardwared during a * processor exception. */ -noreturn -void exception_panic( - uint32_t vector, - uint32_t errorcode, - uint32_t eip, - uint32_t cs, - uint32_t eflags); +noreturn void exception_panic(uint32_t vector, uint32_t errorcode, uint32_t eip, + uint32_t cs, uint32_t eflags);
fix(rt-thread): Sconscript use LOCAL_CFLAGS to replace LOCAL_CCFLAGS
@@ -52,15 +52,15 @@ if GetDepend('PKG_USING_LVGL_DEMOS'): if check_h_hpp_exsit(current_path): inc = inc + [current_path] -LOCAL_CCFLAGS = '' +LOCAL_CFLAGS = '' if rtconfig.PLATFORM == 'gcc': # GCC - LOCAL_CCFLAGS += ' -std=c99' + LOCAL_CFLAGS += ' -std=c99' elif rtconfig.PLATFORM == 'armcc': # Keil AC5 - LOCAL_CCFLAGS += ' --c99 --gnu -g -W' + LOCAL_CFLAGS += ' --c99 --gnu -g -W' elif rtconfig.PLATFORM == 'armclang': # Keil AC6 - LOCAL_CCFLAGS += ' -std=c99 -g -w' + LOCAL_CFLAGS += ' -std=c99 -g -w' -group = group + DefineGroup('LVGL', src, depend = ['PKG_USING_LVGL'], CPPPATH = inc, LOCAL_CCFLAGS = LOCAL_CCFLAGS) +group = group + DefineGroup('LVGL', src, depend = ['PKG_USING_LVGL'], CPPPATH = inc, LOCAL_CFLAGS = LOCAL_CFLAGS) list = os.listdir(cwd) for d in list: