message
stringlengths
6
474
diff
stringlengths
8
5.22k
[kernel] format scheduler log.
@@ -445,8 +445,8 @@ void rt_schedule(void) /* switch to new thread */ RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("[%d]switch to priority#%d " - "thread:%.*s(sp:0x%p), " - "from thread:%.*s(sp: 0x%p)\n", + "thread:%.*s(sp:0x%08x), " + "from thread:%.*s(sp: 0x%08x)\n", rt_interrupt_nest, highest_ready_priority, RT_NAME_MAX, to_thread->name, to_thread->sp, RT_NAME_MAX, from_thread->name, from_thread->sp));
error: synchronized naming from documentations
@@ -88,9 +88,9 @@ is the abbreviation for "Code". - Memory Allocation C01110 - Installation C01200 - Logical C01300 - - Assertion C01310 + - Internal C01310 - Interface C01320 - - Plugin is Broken C01330 + - Plugin Misbehavior C01330 - Conflicting State C02000 - Validation C03000 - Syntactic C03100
examples/esp_http_client: `output_buffer` isn't necessarily NUL-terminated Closes Closes
@@ -603,7 +603,7 @@ static void http_native_request(void) ESP_LOGI(TAG, "HTTP GET Status = %d, content_length = %d", esp_http_client_get_status_code(client), esp_http_client_get_content_length(client)); - ESP_LOG_BUFFER_HEX(TAG, output_buffer, strlen(output_buffer)); + ESP_LOG_BUFFER_HEX(TAG, output_buffer, data_read); } else { ESP_LOGE(TAG, "Failed to read response"); }
gnutls: Ignore non-fatal errors from gnutls_handshake_write Ignore non-fatal errors from gnutls_handshake_write. Send alert on fatal error.
@@ -452,9 +452,14 @@ int ngtcp2_crypto_read_write_crypto_data(ngtcp2_conn *conn, int rv; if (datalen > 0) { - if (gnutls_handshake_write( + rv = gnutls_handshake_write( session, ngtcp2_crypto_gnutls_from_ngtcp2_level(crypto_level), data, - datalen) != 0) { + datalen); + if (rv != 0) { + if (!gnutls_error_is_fatal(rv)) { + return 0; + } + gnutls_alert_send_appropriate(session, rv); return -1; } }
TEST: Add missing initialization Compiler complained.
@@ -303,7 +303,7 @@ static int test_rsa_sslv23(int idx) static int test_rsa_oaep(int idx) { int ret = 0; - RSA *key; + RSA *key = NULL; unsigned char ptext[256]; unsigned char ctext[256]; static unsigned char ptext_ex[] = "\x54\x85\x9b\x34\x2c\x49\xea\x2a";
framework/task_manager: Fix missing deallocation in task_manager_reply_unicast for fail case, allocated for data and msg should be freed.
@@ -142,6 +142,8 @@ int task_manager_reply_unicast(tm_msg_t *reply_msg) reply_mqfd = mq_open(TM_UNICAST_MQ, O_WRONLY | O_CREAT, 0666, &attr); if (reply_mqfd == (mqd_t)ERROR) { + TM_FREE(data->msg); + TM_FREE(data); tmdbg("mq_open failed!\n"); return TM_COMMUCATION_FAIL; } @@ -149,6 +151,8 @@ int task_manager_reply_unicast(tm_msg_t *reply_msg) ret = mq_send(reply_mqfd, (char *)data, sizeof(tm_msg_t), TM_MQ_PRIO); if (ret != 0) { mq_close(reply_mqfd); + TM_FREE(data->msg); + TM_FREE(data); tmdbg("mq_send failed! %d\n", errno); return TM_COMMUCATION_FAIL; }
Remove OPENSSL_assert() from crypto/hmac
@@ -37,7 +37,8 @@ int HMAC_Init_ex(HMAC_CTX *ctx, const void *key, int len, if (key != NULL) { reset = 1; j = EVP_MD_block_size(md); - OPENSSL_assert(j <= (int)sizeof(ctx->key)); + if (!ossl_assert(j <= (int)sizeof(ctx->key))) + goto err; if (j < len) { if (!EVP_DigestInit_ex(ctx->md_ctx, md, impl)) goto err;
test: expect a HTTPError like we throw now
@@ -48,7 +48,7 @@ class OnlineFetchTest < Rugged::OnlineTestCase def test_fetch_over_https_with_certificate_callback_fail @repo.remotes.create("origin", "https://github.com/libgit2/TestGitRepository.git") - exception = assert_raises Rugged::NetworkError do + exception = assert_raises Rugged::HTTPError do @repo.fetch("origin", { certificate_check: lambda { |valid, host| false } })
emit an error if the server cannot provide the ELF files
@@ -3725,10 +3725,12 @@ protoop_arg_t process_plugin_validate_frame(picoquic_cnx_t* cnx) if (strcmp(frame->pid, cnx->quic->plugins_to_inject.elems[i].plugin_name) == 0) { uint8_t plugin_buffer[MAX_PLUGIN_DATA_LEN]; size_t size_used = 0; - plugin_prepare_plugin_data_exchange(cnx, cnx->quic->plugins_to_inject.elems[i].plugin_path, plugin_buffer, + int err = plugin_prepare_plugin_data_exchange(cnx, cnx->quic->plugins_to_inject.elems[i].plugin_path, plugin_buffer, MAX_PLUGIN_DATA_LEN, &size_used); + if (err == 0) { picoquic_add_to_plugin_stream(cnx, frame->pid_id, plugin_buffer, size_used, 1); - return 0; + } + return err; } }
better to use volatile for asm
#define _ASM_H_ -#define VAR2REG_B(var, reg) asm ("move.b %0, %/"reg"" :: "r" (var) : ""reg"") -#define VAR2REG_W(var, reg) asm ("move.w %0, %/"reg"" :: "r" (var) : ""reg"") -#define VAR2REG_L(var, reg) asm ("move.l %0, %/"reg"" :: "r" (var) : ""reg"") +#define VAR2REG_B(var, reg) asm volatile ("move.b %0, %/"reg"" :: "r" (var) : ""reg""); +#define VAR2REG_W(var, reg) asm volatile ("move.w %0, %/"reg"" :: "r" (var) : ""reg""); +#define VAR2REG_L(var, reg) asm volatile ("move.l %0, %/"reg"" :: "r" (var) : ""reg""); -#define REG2VAR_B(reg, var) asm ("move.b %/"reg", %0" : "=r" (var)) -#define REG2VAR_W(reg, var) asm ("move.w %/"reg", %0" : "=r" (var)) -#define REG2VAR_L(reg, var) asm ("move.l %/"reg", %0" : "=r" (var)) +#define REG2VAR_B(reg, var) asm volatile ("move.b %/"reg", %0" : "=r" (var)); +#define REG2VAR_W(reg, var) asm volatile ("move.w %/"reg", %0" : "=r" (var)); +#define REG2VAR_L(reg, var) asm volatile ("move.l %/"reg", %0" : "=r" (var)); // enumeration helper for GAS #if defined(__ASSEMBLY__) || defined(__ASSEMBLER__)
Fix the checks of X509_REVOKED_add1_ext_i2d
@@ -2479,18 +2479,18 @@ static int make_revoked(X509_REVOKED *rev, const char *str) rtmp = ASN1_ENUMERATED_new(); if (rtmp == NULL || !ASN1_ENUMERATED_set(rtmp, reason_code)) goto end; - if (!X509_REVOKED_add1_ext_i2d(rev, NID_crl_reason, rtmp, 0, 0)) + if (X509_REVOKED_add1_ext_i2d(rev, NID_crl_reason, rtmp, 0, 0) <= 0) goto end; } if (rev && comp_time) { - if (!X509_REVOKED_add1_ext_i2d - (rev, NID_invalidity_date, comp_time, 0, 0)) + if (X509_REVOKED_add1_ext_i2d + (rev, NID_invalidity_date, comp_time, 0, 0) <= 0) goto end; } if (rev && hold) { - if (!X509_REVOKED_add1_ext_i2d - (rev, NID_hold_instruction_code, hold, 0, 0)) + if (X509_REVOKED_add1_ext_i2d + (rev, NID_hold_instruction_code, hold, 0, 0) <= 0) goto end; }
Orchestra: optimize channel offsets of unicast slotframes
#define ORCHESTRA_DEFAULT_COMMON_CHANNEL_OFFSET 1 #endif -/* Min channel offset for the unicast rules; the default min/max range is [2, 255] */ +/* Min channel offset for the unicast rules; the default min/max range is [2, sizeof(HS)-2]. + If the HS has less then 3 channels [1, 1] is used instead. +*/ #ifdef ORCHESTRA_CONF_UNICAST_MIN_CHANNEL_OFFSET #define ORCHESTRA_UNICAST_MIN_CHANNEL_OFFSET ORCHESTRA_CONF_UNICAST_MIN_CHANNEL_OFFSET #else -#define ORCHESTRA_UNICAST_MIN_CHANNEL_OFFSET 2 +#define ORCHESTRA_UNICAST_MIN_CHANNEL_OFFSET (sizeof(TSCH_DEFAULT_HOPPING_SEQUENCE) > 2 ? 2 : 1) #endif /* Max channel offset for the unicast rules */ #ifdef ORCHESTRA_CONF_UNICAST_MAX_CHANNEL_OFFSET #define ORCHESTRA_UNICAST_MAX_CHANNEL_OFFSET ORCHESTRA_CONF_UNICAST_MAX_CHANNEL_OFFSET #else -#define ORCHESTRA_UNICAST_MAX_CHANNEL_OFFSET 255 +#define ORCHESTRA_UNICAST_MAX_CHANNEL_OFFSET \ + (MAX(ORCHESTRA_UNICAST_MIN_CHANNEL_OFFSET, sizeof(TSCH_DEFAULT_HOPPING_SEQUENCE) - 1)) +#endif + +/* Channel offsets for the EB rule, default: 1 */ +#ifdef ORCHESTRA_CONF_EB_MIN_CHANNEL_OFFSET +#define ORCHESTRA_EB_MIN_CHANNEL_OFFSET ORCHESTRA_CONF_EB_MIN_CHANNEL_OFFSET +#else +#define ORCHESTRA_EB_MIN_CHANNEL_OFFSET 1 +#endif + +#ifdef ORCHESTRA_CONF_EB_MAX_CHANNEL_OFFSET +#define ORCHESTRA_EB_MAX_CHANNEL_OFFSET ORCHESTRA_CONF_EB_MAX_CHANNEL_OFFSET +#else +#define ORCHESTRA_EB_MAX_CHANNEL_OFFSET 1 #endif #endif /* __ORCHESTRA_CONF_H__ */
julian date: floor on seconds>=59.5
@@ -344,8 +344,10 @@ static int select_datetime(grib_accessor* a) } for (i=0;i<numberOfSubsets;i++) { - sprintf( datetime_str, "%04ld/%02ld/%02ld %02ld:%02ld:%02ld",year[i],month[i],day[i],hour[i],minute[i], (long)round(second[i]) ); - julianDT = date_to_julian( year[i],month[i],day[i],hour[i],minute[i],(long)round(second[i]) ); + long rounded_second=(long)round(second[i]); + if (rounded_second==60) { rounded_second=59;} + sprintf( datetime_str, "%04ld/%02ld/%02ld %02ld:%02ld:%02ld",year[i],month[i],day[i],hour[i],minute[i], rounded_second ); + julianDT = date_to_julian( year[i],month[i],day[i],hour[i],minute[i],rounded_second ); if (julianDT == -1) { grib_context_log(c,GRIB_LOG_ERROR,"Invalid date/time: %s", datetime_str); return GRIB_INTERNAL_ERROR;
[apps] Define barrier_init with sleep/wake up functionality
@@ -37,6 +37,21 @@ void mempool_barrier_init(uint32_t core_id, uint32_t num_cores) { } } + +void mempool_barrier_init_sleep(uint32_t core_id, uint32_t num_cores) { + if (core_id == 0) { + // Give other cores time to go to sleep + mempool_wait(4 * num_cores); + barrier = 0; + barrier_iteration = 0; + barrier_init = 1; + wake_up(-1); + } else { + mempool_wfi(); + } +} + + void mempool_barrier(uint32_t num_cores, uint32_t cycles) { // Remember previous iteration uint32_t iteration_old = barrier_iteration;
Add missing LV_GC_INCLUDE
#include "../lv_core/lv_debug.h" #include "../lv_misc/lv_gc.h" +#if defined(LV_GC_INCLUDE) + #include LV_GC_INCLUDE +#endif /* LV_ENABLE_GC */ + /********************* * DEFINES *********************/
increased eyre hackiness level more rickety /$ stub to allow ren/collections to build at all diasable cross-ship builds for requests with extensions, e.g. foo.css
%^ exec-live ay+(dray p+uv+~ q.p.kyz p.u.mez) -.q.u.mez :: =/ ext %urb-elem ::XX in message? - =/ fig/coin many+~ ::STUB fcgi params for /$ + =/ fig/coin (fcgi ~ *cred) ::STUB fcgi params for /$ [%bake ext fig q.u.mez] :: $got-inner =+ bem=?-(-.hem $beam p.hem, $spur [-.top (weld p.hem s.top)]) ~| bad-beam+q.bem ?< =([~ 0] (sky [151 %noun] %cw (en-beam bem(+ ~, r [%da now])))) - ?. =< | ::TEST - =(our p.bem) ::TODO also if it is? + ?. ::=< & ::TEST + ::=(our p.bem) ::TODO also if it is? + ?=(^ p.pok) :: fetch local css etc =. ext %urb-elem ::XX =. -.bem (norm-beak -.bem) =/ han (sham hen)
Promote --data-checksums to the common set of options in initdb --help This was previously part of the section dedicated to less common options, but it is an option commonly used these days. Author: Michael Banck Discussion:
@@ -2275,6 +2275,7 @@ usage(const char *progname) printf(_(" [-D, --pgdata=]DATADIR location for this database cluster\n")); printf(_(" -E, --encoding=ENCODING set default encoding for new databases\n")); printf(_(" -g, --allow-group-access allow group read/execute on data directory\n")); + printf(_(" -k, --data-checksums use data page checksums\n")); printf(_(" --locale=LOCALE set default locale for new databases\n")); printf(_(" --lc-collate=, --lc-ctype=, --lc-messages=LOCALE\n" " --lc-monetary=, --lc-numeric=, --lc-time=LOCALE\n" @@ -2290,7 +2291,6 @@ usage(const char *progname) printf(_(" --wal-segsize=SIZE size of WAL segments, in megabytes\n")); printf(_("\nLess commonly used options:\n")); printf(_(" -d, --debug generate lots of debugging output\n")); - printf(_(" -k, --data-checksums use data page checksums\n")); printf(_(" -L DIRECTORY where to find the input files\n")); printf(_(" -n, --no-clean do not clean up after errors\n")); printf(_(" -N, --no-sync do not wait for changes to be written safely to disk\n"));
OcAppleBootCompatLib: Fix null pointer dereference
@@ -537,12 +537,14 @@ OcExitBootServices ( // // Handle events in case we have any. // + if (BootCompat->Settings.ExitBootServicesHandlers != NULL) { for (Index = 0; BootCompat->Settings.ExitBootServicesHandlers[Index] != NULL; ++Index) { BootCompat->Settings.ExitBootServicesHandlers[Index] ( NULL, BootCompat->Settings.ExitBootServicesHandlerContexts[Index] ); } + } // // For non-macOS operating systems return directly.
tcp_transport: Implement connect timeout Merges Closes
@@ -81,15 +81,61 @@ static int tcp_connect(esp_transport_handle_t t, const char *host, int port, int setsockopt(tcp->sock, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)); setsockopt(tcp->sock, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)); - ESP_LOGD(TAG, "[sock=%d],connecting to server IP:%s,Port:%d...", + // Set socket to non-blocking + int flags; + if ((flags = fcntl(tcp->sock, F_GETFL, NULL)) < 0 || fcntl(tcp->sock, F_SETFL, flags |= O_NONBLOCK) < 0) { + ESP_LOGE(TAG, "[sock=%d] set nonblocking error: %s", tcp->sock, strerror(errno)); + goto error; + } + + ESP_LOGD(TAG, "[sock=%d] Connecting to server. IP: %s, Port: %d", tcp->sock, ipaddr_ntoa((const ip_addr_t*)&remote_ip.sin_addr.s_addr), port); - if (connect(tcp->sock, (struct sockaddr *)(&remote_ip), sizeof(struct sockaddr)) != 0) { + + if (connect(tcp->sock, (struct sockaddr *)(&remote_ip), sizeof(struct sockaddr)) < 0) { + if (errno == EINPROGRESS) { + fd_set fdset; + + esp_transport_utils_ms_to_timeval(timeout_ms, &tv); + FD_ZERO(&fdset); + FD_SET(tcp->sock, &fdset); + + int res = select(tcp->sock+1, NULL, &fdset, NULL, &tv); + if (res < 0) { + ESP_LOGE(TAG, "[sock=%d] select() error: %s", tcp->sock, strerror(errno)); + goto error; + } + else if (res == 0) { + ESP_LOGE(TAG, "[sock=%d] select() timeout", tcp->sock); + goto error; + } else { + int sockerr; + socklen_t len = (socklen_t)sizeof(int); + + if (getsockopt(tcp->sock, SOL_SOCKET, SO_ERROR, (void*)(&sockerr), &len) < 0) { + ESP_LOGE(TAG, "[sock=%d] getsockopt() error: %s", tcp->sock, strerror(errno)); + goto error; + } + else if (sockerr) { + ESP_LOGE(TAG, "[sock=%d] delayed connect error: %s", tcp->sock, strerror(sockerr)); + goto error; + } + } + } else { + ESP_LOGE(TAG, "[sock=%d] connect() error: %s", tcp->sock, strerror(errno)); + goto error; + } + } + // Reset socket to blocking + if ((flags = fcntl(tcp->sock, F_GETFL, NULL)) < 0 || fcntl(tcp->sock, F_SETFL, flags & ~O_NONBLOCK) < 0) { + ESP_LOGE(TAG, "[sock=%d] reset blocking error: %s", tcp->sock, strerror(errno)); + goto error; + } + return tcp->sock; +error: close(tcp->sock); tcp->sock = -1; return -1; } - return tcp->sock; -} static int tcp_write(esp_transport_handle_t t, const char *buffer, int len, int timeout_ms) {
chat: added ability to blur chat editor
@@ -116,6 +116,9 @@ export default class ChatEditor extends Component { extraKeys: { 'Enter': () => { this.submit(); + }, + 'Esc': () => { + this.editor?.getInputField().blur(); } } };
arch/arm/src/common : Change user heap definition from USR_HEAP to BASE_HEAP BASE_HEAP, not USR_HEAP is used for user heap definition in mm.h
@@ -221,11 +221,11 @@ void up_addregion(void) for (region_cnt = 1; region_cnt < CONFIG_MM_REGIONS; region_cnt++) { if (heapx_is_init[regionx_heap_idx[region_cnt]] != true) { - mm_initialize(&USR_HEAP[regionx_heap_idx[region_cnt]], regionx_start[region_cnt], regionx_size[region_cnt]); + mm_initialize(&BASE_HEAP[regionx_heap_idx[region_cnt]], regionx_start[region_cnt], regionx_size[region_cnt]); heapx_is_init[regionx_heap_idx[region_cnt]] = true; continue; } - mm_addregion(&USR_HEAP[regionx_heap_idx[region_cnt]], regionx_start[region_cnt], regionx_size[region_cnt]); + mm_addregion(&BASE_HEAP[regionx_heap_idx[region_cnt]], regionx_start[region_cnt], regionx_size[region_cnt]); } } #endif
[viostor] introduce pnp data helpers
@@ -85,6 +85,7 @@ SrbGetPnpInfo(_In_ PVOID Srb, ULONG* PnPFlags, ULONG* PnPAction) { #define SRB_DATA_TRANSFER_LENGTH(Srb) SrbGetDataTransferLength(Srb) #define SRB_LENGTH(Srb) SrbGetSrbLength(Srb) #define SRB_WMI_DATA(Srb) (PSRBEX_DATA_WMI)SrbGetSrbExDataByType((PSTORAGE_REQUEST_BLOCK)Srb, SrbExDataTypeWmi) +#define SRB_PNP_DATA(Srb) (PSRBEX_DATA_PNP)SrbGetSrbExDataByType((PSTORAGE_REQUEST_BLOCK)Srb, SrbExDataTypePnP) #define SRB_GET_SENSE_INFO(Srb, senseInfoBuffer, senseInfoBufferLen) SrbGetScsiData(Srb, NULL, NULL, NULL, &senseInfoBuffer, &senseInfoBufferLen) #define SRB_GET_SENSE_INFO_BUFFER(Srb, senseInfoBuffer) senseInfoBuffer = SrbGetSenseInfoBuffer(Srb) #define SRB_GET_SENSE_INFO_BUFFER_LENGTH(Srb, senseInfoBufferLength) senseInfoBufferLength = SrbGetSenseInfoBufferLength(Srb) @@ -108,6 +109,7 @@ SrbGetPnpInfo(_In_ PVOID Srb, ULONG* PnPFlags, ULONG* PnPAction) { #define SRB_DATA_TRANSFER_LENGTH(Srb) Srb->DataTransferLength #define SRB_LENGTH(Srb) Srb->Lenght #define SRB_WMI_DATA(Srb) (PSCSI_WMI_REQUEST_BLOCK)Srb +#define SRB_PNP_DATA(Srb) (PSCSI_PNP_REQUEST_BLOCK)Srb #define SRB_GET_SENSE_INFO(Srb, senseInfoBuffer, senseInfoBufferLen) senseInfoBuffer = Srb->SenseInfoBuffer;senseInfoBufferLen = Srb->SenseInfoBufferLength #define SRB_GET_SENSE_INFO_BUFFER(Srb, senseInfoBuffer) senseInfoBuffer = Srb->SenseInfoBuffer #define SRB_GET_SENSE_INFO_BUFFER_LENGTH(Srb, senseInfoBufferLength) senseInfoBufferLength = Srb->SenseInfoBufferLength
prov: update provider util to be less agressive about changing things unnecessarily
@@ -72,6 +72,9 @@ int ossl_prov_cipher_load_from_params(PROV_CIPHER *pc, const OSSL_PARAM *p; const char *propquery; + if (params == NULL) + return 1; + if (!load_common(params, &propquery, &pc->engine)) return 0; @@ -140,10 +143,12 @@ int ossl_prov_digest_load_from_params(PROV_DIGEST *pd, const OSSL_PARAM *p; const char *propquery; + if (params == NULL) + return 1; + if (!load_common(params, &propquery, &pd->engine)) return 0; - p = OSSL_PARAM_locate_const(params, OSSL_ALG_PARAM_DIGEST); if (p == NULL) return 1;
Try flake8 fix
@@ -16,7 +16,7 @@ except ImportError: try: import isitgr except ImportError: - pass # If this is required, an error message will be emitted below, but this is necessary to avoid weird nan issues from isitgr functions + pass #prevent nans from isitgr from . import ccllib as lib from .pyutils import check
Apply Hudson's corrections.
@@ -91,17 +91,20 @@ int putnstr_async(const char *str, size_t len, subscribe_cb cb, void* userdata) } syscall_return_t com = command2(DRIVER_NUM_CONSOLE, 1, len, 0); - if (com.type >= TOCK_SYSCALL_SUCCESS) { + if (com.type == TOCK_SYSCALL_SUCCESS) { return TOCK_SUCCESS; + } else if (com.type > TOCK_SYSCALL_SUCCESS) { + // Returned an incorrect success code + return TOCK_FAIL; } else { return tock_error_to_rcode(com.data[1]); } } int getnstr_async(char *buf, size_t len, subscribe_cb cb, void* userdata) { - allow_rw_return_t ro = allow_readwrite(DRIVER_NUM_CONSOLE, 1, buf, len); - if (ro.success == 0) { - return tock_error_to_rcode(ro.error); + allow_rw_return_t rw = allow_readwrite(DRIVER_NUM_CONSOLE, 1, buf, len); + if (rw.success == 0) { + return tock_error_to_rcode(rw.error); } subscribe_return_t sub = subscribe2(DRIVER_NUM_CONSOLE, 2, cb, userdata);
sys/log: Add doxygen to new functions
@@ -568,9 +568,33 @@ log_level_set(uint8_t module, uint8_t level) #endif #if MYNEWT_VAL(LOG_STORAGE_INFO) +/** + * Return information about log storage + * + * This return information about size and usage of storage on top of which log + * instance is created. + * + * @param log The log to query. + * @param info The destination to write information to. + * + * @return 0 on success, error code otherwise + * + */ int log_storage_info(struct log *log, struct log_storage_info *info); #endif #if MYNEWT_VAL(LOG_STORAGE_WATERMARK) +/** + * Set watermark on log + * + * This sets watermark on log item with given index. This information is used + * to calculate size of entries which were logged after watermark item, i.e. + * unread items. The watermark is stored persistently for each log. + * + * @param log The log to set watermark on. + * @param index The index of a watermarked item. + * + * @return 0 on success, error code otherwise. + */ int log_set_watermark(struct log *log, uint32_t index); #endif
tests: internal: hashtable: check single insert
@@ -83,6 +83,29 @@ void test_create_zero() TEST_CHECK(ht == NULL); } +/* bug 355 */ +void test_single() +{ + int ret; + char *out_buf; + size_t out_size; + struct flb_hash *ht; + + ht = flb_hash_create(1); + TEST_CHECK(ht != NULL); + + ret = ht_add(ht, "key", "value"); + TEST_CHECK(ret != -1); + + ret = flb_hash_get(ht, "key", 3, &out_buf, &out_size); + TEST_CHECK(ret >= 0); + + ret = flb_hash_get(ht, "NOT", 3, &out_buf, &out_size); + TEST_CHECK(ret == -1); + + flb_hash_destroy(ht); +} + void test_small_table() { int i; @@ -193,6 +216,7 @@ void test_delete_all() TEST_LIST = { { "zero_size", test_create_zero }, + { "single", test_single }, { "small_table", test_small_table }, { "medium_table", test_medium_table }, { "chaining_count", test_chaining },
More info in serialization exception [cuda/methods/serialization_helper.cpp]
@@ -49,7 +49,8 @@ ui32 NCatboostCuda::UpdateFeatureId(TBinarizedFeaturesManager& featuresManager, auto& floatInfo = map.FloatFeatures.at(featureId); const ui32 featureManagerId = featuresManager.GetFeatureManagerIdForFloatFeature(floatInfo.DataProviderId); CB_ENSURE(floatInfo.Borders == featuresManager.GetBorders(featureManagerId), - "Error: progress borders should be consistent"); + "Error: progress borders should be consistent: featureId=" << featureId << " borders " + << Print(floatInfo.Borders) << " vs " << Print(featuresManager.GetBorders(featureManagerId))); return featureManagerId; } else if (map.CatFeaturesMap.contains(featureId)) { const ui32 dataProviderId = map.CatFeaturesMap.at(featureId);
Enable invalid param test in sha256
@@ -18,7 +18,7 @@ void mbedtls_sha1( data_t * src_str, data_t * hash ) } /* END_CASE */ -/* BEGIN_CASE depends_on:MBEDTLS_SHA256_C:NOT_DEFINED */ +/* BEGIN_CASE depends_on:MBEDTLS_SHA256_C */ void sha256_invalid_param( ) { mbedtls_sha256_context ctx;
Add gif-multiple-loop-counts comments
# Feed this file to script/make-artificial.go +# This GIF image contains multiple "loop count" entries. +# +# The GIF89a specification doesn't discuss loop counts: it is an extension to +# the format. Neither the official spec nor unofficial documents (e.g. +# http://www.vurdalakov.net/misc/gif/netscape-looping-application-extension) +# state how to interpret having more than one of them: whether to accept the +# first, accept the last, reject the overall animated image as invalid, etc. +# Different spec-compliant decoders can choose different policies. +# +# Wuffs reports the most recent loop count seen, which may change over the +# course of stepping through the frames. Programs that use Wuffs may enforce +# their own policy (e.g. accept first, accept last, etc.) on top of that. +# +# Note also that the loop count as written in the GIF file format has "excludes" +# semantics: it is the number of times to repeat the animation excluding the +# initial play-through (and 0 means to loop forever). An "excludes" loop count +# of 3 means to play each animation frame 4 times. An absent loop count means to +# play each animation frame exactly once. +# +# Wuffs' API provides "includes" semantics, across all of its supported +# animation file formats. An "includes" loop count of 3 means to play each +# animation frame 3 times. There is no absent option. Wuffs' numbers and GIF's +# numbers will generally differ by 1 (other than the absent case, and both +# using 0 to mean forever). + make gif header
Using 'armasm' as an assembler for armclang
@@ -29,7 +29,7 @@ toolchain("armclang") set_toolset("cxx", "armclang") set_toolset("ld", "armlink") set_toolset("ar", "armar") - set_toolset("as", "armclang") + set_toolset("as", "armasm") on_check(function (toolchain) import("lib.detect.find_tool") @@ -59,7 +59,7 @@ toolchain("armclang") toolchain:add("cxflags", "-target=" .. arch_target) toolchain:add("cxflags", "-mcpu=" .. arch_cpu) toolchain:add("asflags", "-target=" .. arch_target) - toolchain:add("asflags", "-mcpu=" .. arch_cpu) + toolchain:add("asflags", "--cpu=" .. arch_cpu) toolchain:add("ldflags", "--cpu " .. arch_cpu_ld) end end)
RP2: Don't build sensor.c if it's disabled.
* * Sensor abstraction layer for nRF port. */ +#if MICROPY_PY_SENSOR #include <stdio.h> #include <string.h> #include <stdint.h> @@ -1075,3 +1076,4 @@ int sensor_snapshot(sensor_t *sensor, image_t *image, uint32_t flags) return 0; } +#endif
iOS: modify Podfile for app template
@@ -26,7 +26,7 @@ post_install do |installer| # modify OTHER_LDFLAGS config_line = build_settings['OTHER_LDFLAGS'] if (config_line != nil) - config_line = config_line.gsub("$(inherited) -ObjC ","") + config_line = config_line.gsub("$(inherited) ","") build_settings['OTHER_LDFLAGS'] = config_line puts "updated OTHER_LDFLAGS = "+config_line end
platforms: disabling sshd
@@ -174,8 +174,8 @@ let bin_rcce_lu = [ "/sbin/" ++ f | f <- [ -- List of modules that are arch-independent and always built modules_generic = [ "/eclipseclp_ramfs.cpio.gz", - "/skb_ramfs.cpio.gz", - "/sshd_ramfs.cpio.gz" ] + "/skb_ramfs.cpio.gz" ] +-- "/sshd_ramfs.cpio.gz" ] -- x86_64-specific modules to build by default -- this should shrink as targets are ported and move into the generic list above @@ -220,7 +220,7 @@ let bin_rcce_lu = [ "/sbin/" ++ f | f <- [ "sfxge", "sfn5122f", "slideshow", - "sshd", +-- "sshd", "vbe", "virtio_blk", "virtio_blk_host",
updates for 4.4
@@ -50,7 +50,7 @@ function bv_nektarpp_info export NEKTAR_PLUS_PLUS_FILE=${NEKTAR_PLUS_PLUS_FILE:-"nektar-${NEKTAR_PLUS_PLUS_VERSION}.tar.gz"} export NEKTAR_PLUS_PLUS_COMPATIBILITY_VERSION=${NEKTAR_PLUS_PLUS_COMPATIBILITY_VERSION:-"4.4"} export NEKTAR_PLUS_PLUS_BUILD_DIR=${NEKTAR_PLUS_PLUS_BUILD_DIR:-"nektar++-${NEKTAR_PLUS_PLUS_VERSION}"} - export NEKTAR_PLUS_PLUS_URL=${NEKTAR_PLUS_PLUS_URL:-"http://www.nektar.info/downloads/nektar++-${NEKTAR_PLUS_PLUS_VERSION}/src"} + export NEKTAR_PLUS_PLUS_URL=${NEKTAR_PLUS_PLUS_URL:-"https://www.nektar.info/wp-content/uploads/2017/03/"} export NEKTAR_PLUS_PLUS_MD5_CHECKSUM="" export NEKTAR_PLUS_PLUS_SHA256_CHECKSUM="" } @@ -236,12 +236,12 @@ function apply_nektarpp_patch return 1 fi - if [[ "$OPSYS" == "Darwin" ]]; then +# if [[ "$OPSYS" == "Darwin" ]]; then apply_nektarpp_4_4_OSX_patch if [[ $? != 0 ]]; then return 1 fi - fi +# fi fi return 0
Sync sig handler naming.
@@ -181,7 +181,7 @@ static int open_handlers(void) return num_good; } -static gboolean sighandler(gpointer user_data) +static gboolean handle_sig(gpointer user_data) { tcmu_dbg("Have received signal!\n"); @@ -1120,8 +1120,8 @@ int main(int argc, char **argv) } loop = g_main_loop_new(NULL, FALSE); - if (g_unix_signal_add(SIGINT, sighandler, loop) <= 0 || - g_unix_signal_add(SIGTERM, sighandler, loop) <= 0 || + if (g_unix_signal_add(SIGINT, handle_sig, loop) <= 0 || + g_unix_signal_add(SIGTERM, handle_sig, loop) <= 0 || g_unix_signal_add(SIGHUP, handle_sighup, loop) <= 0) { tcmu_err("couldn't setup signal handlers\n"); goto err_tcmulib_close;
fix double communication when not all required scopes could be eregistered, leading to strange effects
@@ -565,9 +565,10 @@ void oidcd_handleRegister(struct ipcPipe pipes, const char* account_json, // did not get all scopes necessary for oidc-agent oidc_errno = OIDC_EUNSCOPE; ipc_writeToPipe(pipes, RESPONSE_ERROR_CLIENT, oidc_serror(), res); + } else { + ipc_writeToPipe(pipes, RESPONSE_SUCCESS_CLIENT, res); } secFree(scopes); - ipc_writeToPipe(pipes, RESPONSE_SUCCESS_CLIENT, res); } secFreeJson(json_res1); }
Allow Tuya devices to poll onOff attribute This should generally be allowed for Tuya devices as well. The code handles any non-existance, so no polling will occur if the on/off cluster doesn't exist.
@@ -276,19 +276,10 @@ void PollManager::pollTimerFired() } if (suffix == RStateOn && lightNode) - { - item = r->item(RAttrModelId); - - if (UseTuyaCluster(lightNode->manufacturer())) - { - //Thoses devices haven't cluster 0006, and use Cluster specific - } - else { clusterId = ONOFF_CLUSTER_ID; attributes.push_back(0x0000); // onOff } - } else if (suffix == RStateBri && isOn) { NodeValue &val = restNode->getZclValue(LEVEL_CLUSTER_ID, 0x0000);
logger: control: use hudelements
@@ -202,7 +202,7 @@ void Logger::stop_logging() { #endif } clear_log_data(); - control_client_check(m_params->control, global_control_client, gpu.c_str()); + control_client_check(HUDElements.params->control, global_control_client, gpu.c_str()); const char * cmd = "LoggingFinished"; control_send(global_control_client, cmd, strlen(cmd), 0, 0); }
Update spell checker exclusion.
@@ -1711,6 +1711,7 @@ pxtask pxtaskbuffer pxtaskcode pxtaskdefinition +pxtaskin pxtaskstatus pxtaskstatusarray pxtasktag @@ -2310,6 +2311,8 @@ ullporttaskhasfpucontext ulmair ulmask ulmatchvalueforonetick +ulnumberofheapallocations +ulnumberofheapfrees ulong ulparameter ulparameters @@ -2342,6 +2345,8 @@ ultasknotifyvalueclear ultasknotifyvalueclearindexed ultaskswitchedintime ultaskswitchrequested +ultotalmemoryallocations +ultotalmemoryfrees ultotalruntime ultotalruntimediv ulusingfpu @@ -2773,6 +2778,9 @@ xgenericlistitem xgetfreestackspace xhandle xhead +xheapbytescurrentlyallocated +xheapbytescurrentlyheld +xheapbyteshighwatermark xheapregions xhigherpriorittaskwoken xhigherprioritytaskwoken @@ -2799,6 +2807,7 @@ xlistend xmair xmaxcount xmaxexpirycountbeforestopping +xmaxheapbyteseverheld xmaxsize xmc xmessage
Prevent selecting tiles outside of sprite tileset
@@ -48,8 +48,11 @@ const SpriteTilePalette = ({ id }: SpriteTilePaletteProps) => { const currentTargetRect = e.currentTarget.getBoundingClientRect(); const offsetX = Math.floor((e.pageX - currentTargetRect.left) / 8 / zoom) * 8; - const offsetY = - Math.floor((e.pageY - currentTargetRect.top) / 8 / zoom) * 8; + const offsetY = Math.min( + height - 16, + Math.floor((e.pageY - currentTargetRect.top) / 8 / zoom) * 8 + ); + setSelectedTiles({ x: offsetX, y: offsetY, @@ -65,16 +68,28 @@ const SpriteTilePalette = ({ id }: SpriteTilePaletteProps) => { return; } const currentTargetRect = wrapperRef.current.getBoundingClientRect(); - const offsetX = Math.floor((e.pageX - currentTargetRect.left) / 8 / zoom); - const offsetY = Math.floor((e.pageY - currentTargetRect.top) / 8 / zoom); + const offsetX = Math.max( + 0, + Math.min( + width / 8 - 1, + Math.floor((e.pageX - currentTargetRect.left) / 8 / zoom) + ) + ); + const offsetY = Math.max( + 0, + Math.min( + height / 8 - 2, + Math.floor((e.pageY - currentTargetRect.top) / 8 / zoom) + ) + ); const x = Math.min(selectedTiles.x / 8, offsetX) * 8; const y = Math.min(selectedTiles.y / 8, offsetY) * 8; - const width = Math.max( + const selectionWidth = Math.max( 1, offsetX < selectedTiles.x / 8 ? 1 : offsetX - selectedTiles.x / 8 + 1 ); - const height = Math.ceil( + const selectionHeight = Math.ceil( Math.max( 1, offsetY < selectedTiles.y / 8 ? 2 : offsetY - selectedTiles.y / 8 + 1 @@ -83,11 +98,11 @@ const SpriteTilePalette = ({ id }: SpriteTilePaletteProps) => { setSelectedTiles({ x, y, - width, - height, + width: selectionWidth, + height: selectionHeight, }); }, - [zoom, selectedTiles, setSelectedTiles] + [zoom, height, selectedTiles, setSelectedTiles] ); const onDragEnd = (e: MouseEvent) => { @@ -97,16 +112,24 @@ const SpriteTilePalette = ({ id }: SpriteTilePaletteProps) => { const onHover = (e: React.MouseEvent<HTMLDivElement, MouseEvent>) => { const currentTargetRect = e.currentTarget.getBoundingClientRect(); - const offsetX = Math.floor((e.pageX - currentTargetRect.left) / 8 / zoom); - const offsetY = Math.floor((e.pageY - currentTargetRect.top) / 8 / zoom); - if (offsetX < 0 || offsetY < 0 || offsetX > 10 || offsetY > 18) { - setHoverTile(undefined); - } else { + const offsetX = Math.max( + 0, + Math.min( + width / 8 - 1, + Math.floor((e.pageX - currentTargetRect.left) / 8 / zoom) + ) + ); + const offsetY = Math.max( + 0, + Math.min( + height / 8 - 2, + Math.floor((e.pageY - currentTargetRect.top) / 8 / zoom) + ) + ); setHoverTile({ x: offsetX, y: offsetY, }); - } }; const onMouseOut = () => {
Load vector form of ewb->error_weights
@@ -155,8 +155,7 @@ static bool realign_weights( vfloat4 color = color_base + color_offset * plane_weight; vfloat4 origcolor = blk->texel(texel); - vfloat4 error_weight = vfloat4(ewb->texel_weight_r[texel], ewb->texel_weight_g[texel], - ewb->texel_weight_b[texel], ewb->texel_weight_a[texel]); + vfloat4 error_weight = ewb->error_weights[texel]; vfloat4 colordiff = color - origcolor; vfloat4 color_up_diff = colordiff + color_offset * plane_up_weight;
Travis: Truncate list of plugins for Haskell job
@@ -144,7 +144,7 @@ before_script: - | if [[ $HASKELL == ON ]]; then bindings="haskell" - plugins="resolver_fm_hpu_b;dump;ini;dini;sync;error;hosts;list;glob;profile;spec;network;tracer;timeofday;base64;haskell" + plugins="resolver_fm_hpu_b;dump;sync;error;list;spec" tools="kdb" fi - |
vtx: ensure transfer flag is only cleared on TC
@@ -82,7 +82,6 @@ void vtx_uart_isr() { if (vtx_frame_offset < vtx_frame_length) { LL_USART_TransmitData8(USART.channel, vtx_frame[vtx_frame_offset]); vtx_frame_offset++; - vtx_transfer_done = 0; } if (vtx_frame_offset == vtx_frame_length) { LL_USART_DisableIT_TXE(USART.channel);
hw/bus: Fix typo
@@ -221,7 +221,7 @@ bus_node_unlock(struct os_dev *node); /** * Get node configured lock timeout * - * Returns lock timeout as configured for node. If not timeout is configured for + * Returns lock timeout as configured for node. If no timeout is configured for * give node or no node is specified, default timeout is returned. * * @param node Node to get timeout for
Add WASIENV option
@@ -23,6 +23,14 @@ if(EMSCRIPTEN) set(APP_DIR "platforms/emscripten") endif() +if(WASIENV) + set(CMAKE_C_COMPILER "wasicc") + set(CMAKE_CXX_COMPILER "wasic++") + + set(OUT_FILE "wasm3.wasm") + set(APP_DIR "platforms/emscripten") +endif() + if(M EQUAL 32) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -m32") endif()
fix : esp_partition_find missing `` ``
@@ -121,7 +121,7 @@ and perform operations on them. These functions are declared in ``esp_partition. - :cpp:func:`esp_partition_next` advances iterator to the next partition found - :cpp:func:`esp_partition_iterator_release` releases iterator returned by ``esp_partition_find`` - :cpp:func:`esp_partition_find_first` is a convenience function which returns structure - describing the first partition found by esp_partition_find + describing the first partition found by ``esp_partition_find`` - :cpp:func:`esp_partition_read`, :cpp:func:`esp_partition_write`, :cpp:func:`esp_partition_erase_range` are equivalent to :cpp:func:`spi_flash_read`, :cpp:func:`spi_flash_write`, :cpp:func:`spi_flash_erase_range`, but operate within partition boundaries
Add proximity to Subpaths;
@@ -32,6 +32,7 @@ const char* Subpaths[] = { [PATH_HANDS] = "hands", [PATH_LEFT] = "left", [PATH_RIGHT] = "right", + [PATH_PROXIMITY] = "proximity", [PATH_TRIGGER] = "trigger", [PATH_TRACKPAD] = "trackpad", [PATH_MENU] = "menu",
hatch: Enable extpwrlimit option in ectool BRANCH=hatch TEST=ectool extpwrlimit 3000 5000 Tested-by: Shelley Chen
/* Common charger defines */ #define CONFIG_CHARGE_MANAGER +#define CONFIG_CHARGE_MANAGER_EXTERNAL_POWER_LIMIT #define CONFIG_CHARGER #define CONFIG_CHARGER_BQ25710 #define CONFIG_CHARGER_DISCHARGE_ON_AC
* vscode settings cleanup
"astyle.astylerc": "${workspaceRoot}/.astylerc", "astyle.c.enable": true, "astyle.cpp.enable": true, - "clang.cflags": [ - "-D_GNU_SOURCE", - "-D_LARGEFILE_SOURCE", - "-D_FILE_OFFSET_BITS=64", - "-DIW_64", - "-std=gnu11", - "-Wc99-compat", - "-Wall", - "-Wcast-qual", - "-Wunused", - "-Wnonportable-system-include-path", - "-Wno-sign-compare", - "-Wno-unused-parameter", - "-Wno-unknown-pragmas", - "-Wno-unused-function", - "-Wno-missing-field-initializers", - "-Wno-missing-braces", - "-Wno-pragma-once-outside-header", - "-I/usr/lib/jvm/java-11-openjdk-amd64/include", - "-I${workspaceRoot}/build/include", - "-I${workspaceRoot}/build/src/extern_facil/lib/facil", - "-I${workspaceRoot}/build/src/extern_facil/lib/facil/cli", - "-I${workspaceRoot}/build/src/extern_facil/lib/facil/http", - "-I${workspaceRoot}/build/src/extern_facil/lib/facil/fiobj", - "-I${workspaceRoot}/build/src/generated", - "-I${workspaceRoot}/src", - "-I${workspaceRoot}/src/jbl", - "-I${workspaceRoot}/src/jql", - "-I${workspaceRoot}/src/util" - ], - "clang.cxxflags": [ - "-D_GNU_SOURCE", - "-D_LARGEFILE_SOURCE", - "-D_FILE_OFFSET_BITS=64", - "-DIW_64", - "-std=c++11", - "-Wall", - "-Wcast-qual", - "-Wunused", - "-Wnonportable-system-include-path", - "-Wno-sign-compare", - "-Wno-unused-parameter", - "-Wno-unknown-pragmas", - "-Wno-unused-function", - "-Wno-missing-field-initializers", - "-Wno-pragma-once-outside-header", - "-I/usr/lib/jvm/java-11-openjdk-amd64/include", - "-I${workspaceRoot}/build/include", - "-I${workspaceRoot}/build/src/extern_facil/lib/facil", - "-I${workspaceRoot}/build/src/extern_facil/lib/facil/cli", - "-I${workspaceRoot}/build/src/extern_facil/lib/facil/http", - "-I${workspaceRoot}/build/src/extern_facil/lib/facil/fiobj", - "-I${workspaceRoot}/build/src/generated", - "-I${workspaceRoot}/src", - "-I${workspaceRoot}/src/jbl", - "-I${workspaceRoot}/src/jql", - "-I${workspaceRoot}/src/util" - ], "python.pythonPath": "/usr/bin/python3", "python.linting.pylintEnabled": false, "python.linting.enabled": true,
Directory Value: Remove unused code
@@ -157,50 +157,6 @@ CppKey convertToDirectChild (CppKey const & parent, CppKey const & child) return directChild; } -/** - * @brief This function checks if `element` is an array element of `parent`. - * - * @pre The key `child` must be below `parent`. - * - * @param parent This parameter specifies a parent key. - * @param keys This variable stores a direct or indirect child of `parent`. - * - * @retval true If `element` is an array element - * @retval false Otherwise - */ -bool inline isArrayElementOf (CppKey const & parent, CppKey const & child) -{ - char const * relative = elektraKeyGetRelativeName (*child, *parent); - auto offsetIndex = elektraArrayValidateBaseNameString (relative); - if (offsetIndex <= 0) return false; - // Skip `#`, underscores and digits - relative += 2 * offsetIndex; - // The next character has to be the separation char (`/`) or end of string - if (relative[0] != '\0' && relative[0] != '/') return false; - - return true; -} - -/** - * @brief This function determines if the given key is an array parent. - * - * @param parent This parameter specifies a possible array parent. - * @param keys This variable stores the key set of `parent`. - * - * @retval true If `parent` is the parent key of an array - * @retval false Otherwise - */ -bool isArrayParent (CppKey const & parent, CppKeySet const & keys) -{ - for (auto const & key : keys) - { - if (!key.isBelow (parent)) continue; - if (!isArrayElementOf (parent, key)) return false; - } - - return true; -} - /** * @brief Return all array parents of the given key set. *
breakwater: update README (support NIC hardware timestamp)
@@ -30,7 +30,7 @@ client, or agent machines. 0. Install dependencies ``` $ sudo apt-get update -$ sudo apt-get install -y libnuma-dev libaio1 libaio-dev uuid-dev libcunit1 libcunit1-doc libcunit1-dev libmnl-dev cmake python3 python3-pip +$ sudo apt-get install -y libnuma-dev libaio1 libaio-dev uuid-dev libcunit1 libcunit1-doc libcunit1-dev libmnl-dev libnl-3-dev libnl-route-3-dev libibverbs-dev cmake python3 python3-pip $ sudo python3 -m pip install paramiko ``` @@ -61,9 +61,9 @@ breakwater$ sudo ./scripts/setup_machine.sh breakwater$ make -C apps/netbench/ ``` -6. Start IOKernel (hardware timestamp feature is under testing) +6. Start IOKernel ``` -breakwater$ sudo ./iokerneld no_hw_qdel +breakwater$ sudo ./iokerneld ``` 7. Start application. The following example will start a server with Breakwater and make a client to generate workload with exponential distribution (10us average and 100us of SLO) at a rate of 100k requests/s by 100 threads. @@ -80,3 +80,6 @@ breakwater$ sudo ./apps/netbench/netbench breakwater ../client.config client 100 ## Reproducing paper results Please refer to [breakwater-artifact](https://github.com/inhocho89/breakwater-artifact) repository for experiment scripts to reproduce the paper results. + +## Contact +If you have any questions, feel free to contact Inho Cho <[email protected]>.
SOVERSION bump to version 4.3.0
@@ -38,8 +38,8 @@ set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_ # Major version is changed with every backward non-compatible API/ABI change, minor version changes # with backward compatible change and micro version is connected with any internal change of the library. set(SYSREPO_MAJOR_SOVERSION 4) -set(SYSREPO_MINOR_SOVERSION 2) -set(SYSREPO_MICRO_SOVERSION 8) +set(SYSREPO_MINOR_SOVERSION 3) +set(SYSREPO_MICRO_SOVERSION 0) set(SYSREPO_SOVERSION_FULL ${SYSREPO_MAJOR_SOVERSION}.${SYSREPO_MINOR_SOVERSION}.${SYSREPO_MICRO_SOVERSION}) set(SYSREPO_SOVERSION ${SYSREPO_MAJOR_SOVERSION})
Yan LR: Fix incorrect reference test data
#define PREFIX "user/tests/yanlr/" ksNew (10, - keyNew (PREFIX, KEY_VALUE, "@CONFIG_FILEPATH@", KEY_META, "array", "#0", KEY_END), + keyNew (PREFIX, KEY_VALUE, "@CONFIG_FILEPATH@", KEY_META, "array", "#1", KEY_END), keyNew (PREFIX "#0/bla", KEY_VALUE, "blubb", KEY_END), keyNew (PREFIX "#1", KEY_META, "array", "#0", KEY_END), keyNew (PREFIX "#1/#0", KEY_VALUE, "hello", KEY_END),
Change scan to 10000 blocks
@@ -741,7 +741,7 @@ bool IsNameFeeEnough(const CTransaction& tx, const NameTxInfo& nti, const CBlock //LogPrintf("IsNameFeeEnough(): pindexBlock->nHeight = %d, op = %s, nameSize = %lu, valueSize = %lu, nRentalDays = %d, txFee = %"PRI64d"\n", // lastPoW->nHeight, nameFromOp(nti.op), nti.name.size(), nti.value.size(), nti.nRentalDays, txFee); bool txFeePass = false; - for (int i = 1; i <= 10; i++) + for (int i = 1; i <= 10000; i++) { CAmount netFee = GetNameOpFee(lastPoW, nti.nRentalDays, nti.op, nti.name, nti.value); //LogPrintf(" : netFee = %"PRI64d", lastPoW->nHeight = %d\n", netFee, lastPoW->nHeight);
graph-push-hook: find group associated with app resource
/+ store=graph-store +/+ met=metadata /+ res=resource /+ graph /+ group == :: +$ agent (push-hook:push-hook config) +:: +++ is-member + |= [=resource:res =bowl:gall] + ^- ? + =/ grp ~(. group bowl) + =/ group-paths (groups-from-resource:met [%graph (en-path:res resource)]) + ?~ group-paths %.n + (is-member:grp src.bowl i.group-paths) -- :: %- agent:dbug ++ should-proxy-update |= =vase ^- ? - |^ =/ =update:store !<(update:store vase) ?- -.q.update - %add-graph (is-member resource.q.update) - %remove-graph (is-member resource.q.update) - %add-nodes (is-member resource.q.update) - %remove-nodes (is-member resource.q.update) - %add-signatures (is-member resource.uid.q.update) - %remove-signatures (is-member resource.uid.q.update) - %archive-graph (is-member resource.q.update) + %add-graph (is-member resource.q.update bowl) + %remove-graph (is-member resource.q.update bowl) + %add-nodes (is-member resource.q.update bowl) + %remove-nodes (is-member resource.q.update bowl) + %add-signatures (is-member resource.uid.q.update bowl) + %remove-signatures (is-member resource.uid.q.update bowl) + %archive-graph (is-member resource.q.update bowl) %unarchive-graph %.n %add-tag %.n %remove-tag %.n %keys %.n %tags %.n %tag-queries %.n - %run-updates (is-member resource.q.update) + %run-updates (is-member resource.q.update bowl) == :: - ++ is-member - |= =resource:res - ^- ? - (is-member:grp src.bowl (en-path:res resource)) - -- -:: ++ resource-for-update |= =vase ^- (unit resource:res) ++ initial-watch |= [=path =resource:res] ^- vase - ?> (is-member:grp src.bowl (en-path:res resource)) + ?> (is-member resource bowl) !> ^- update:store ?~ path :: new subscribe
hark-graph-hook: use %mention type for +is-mention
^- ? ?. mentions %.n ?~ contents %.n - ?. ?=(%text -.i.contents) + ?. ?=(%mention -.i.contents) $(contents t.contents) - =/ res - (find (scow %p our.bowl) (trip text.i.contents)) - ?^ res + ?: =(our.bowl ship.i.contents) %.y $(contents t.contents) ::
fixed rules files for LDS
ATTRS{idVendor}=="0483" ATTRS{idProduct}=="5740", ENV{ID_MM_DEVICE_IGNORE}="1", MODE:="0666" ATTRS{idVendor}=="0483" ATTRS{idProduct}=="df11", MODE:="0666" -ATTRS{idVendor}=="FFF1" ATTRS{idProduct}=="FF48", ENV{ID_MM_DEVICE_IGNORE}="1", MODE:="0666" -ATTRS{idVendor}=="10C4" ATTRS{idProduct}=="EA60", ENV{ID_MM_DEVICE_IGNORE}="1", MODE:="0666" +ATTRS{idVendor}=="fff1" ATTRS{idProduct}=="ff48", ENV{ID_MM_DEVICE_IGNORE}="1", MODE:="0666" +ATTRS{idVendor}=="10c4" ATTRS{idProduct}=="ea60", ENV{ID_MM_DEVICE_IGNORE}="1", MODE:="0666"
virtio: fix the NULL deference Type: fix
@@ -1219,6 +1219,7 @@ virtio_pci_delete_if (vlib_main_t * vm, virtio_if_t * vif) if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ)) vif->virtio_pci_func->del_queue (vm, vif, vif->max_queue_pairs * 2); + if (vif->virtio_pci_func) vif->virtio_pci_func->device_reset (vm, vif); if (vif->hw_if_index)
board/panqueque/board.c: Format with clang-format BRANCH=none TEST=none
@@ -135,8 +135,7 @@ struct ppc_config_t ppc_chips[] = { * PS8802 set mux board tuning. * Adds in board specific gain and DP lane count configuration */ -static int board_ps8822_mux_set(const struct usb_mux *me, - mux_state_t mux_state) +static int board_ps8822_mux_set(const struct usb_mux *me, mux_state_t mux_state) { int rv = EC_SUCCESS; @@ -167,16 +166,12 @@ const struct usb_mux usb_muxes[CONFIG_USB_PD_PORT_MAX_COUNT] = { /* USB-C PPC Configuration */ struct ppc_config_t ppc_chips[CONFIG_USB_PD_PORT_MAX_COUNT] = { - [USB_PD_PORT_HOST] = { - .i2c_port = I2C_PORT_I2C1, + [USB_PD_PORT_HOST] = { .i2c_port = I2C_PORT_I2C1, .i2c_addr_flags = SN5S330_ADDR0_FLAGS, - .drv = &sn5s330_drv - }, - [USB_PD_PORT_USB3] = { - .i2c_port = I2C_PORT_I2C3, + .drv = &sn5s330_drv }, + [USB_PD_PORT_USB3] = { .i2c_port = I2C_PORT_I2C3, .i2c_addr_flags = SN5S330_ADDR1_FLAGS, - .drv = &sn5s330_drv - }, + .drv = &sn5s330_drv }, }; unsigned int ppc_cnt = ARRAY_SIZE(ppc_chips); @@ -284,14 +279,13 @@ static void board_usb_tc_disconnect(void) if (port == USB_PD_PORT_HOST) gpio_set_level(GPIO_UFP_PLUG_DET, 1); } -DECLARE_HOOK(HOOK_USB_PD_DISCONNECT, board_usb_tc_disconnect,\ +DECLARE_HOOK(HOOK_USB_PD_DISCONNECT, board_usb_tc_disconnect, HOOK_PRIO_DEFAULT); #endif /* SECTION_IS_RW */ static void board_init(void) { - } DECLARE_HOOK(HOOK_INIT, board_init, HOOK_PRIO_DEFAULT); @@ -356,6 +350,4 @@ static int command_dplane(int argc, char **argv) return EC_SUCCESS; } -DECLARE_CONSOLE_COMMAND(dplane, command_dplane, - "<2 | 4>", - "MST lane control."); +DECLARE_CONSOLE_COMMAND(dplane, command_dplane, "<2 | 4>", "MST lane control.");
Added LCD Touch Int pin to ioconfig. During testing GT911 would not come out of reset because INT pin could not be driven low or high.
@@ -449,6 +449,9 @@ static void imxrt_lcd_pins_init(void) IOMUXC_SetPinMux( IOMUXC_GPIO_AD_B0_02_GPIO1_IO02, /* GPIO_AD_B0_02 is configured as GPIO1_IO02 */ 0U); /* Software Input On Field: Input Path is determined by functionality */ + IOMUXC_SetPinMux( + IOMUXC_GPIO_AD_B0_11_GPIO1_IO11, /* GPIO_AD_B0_11 is configured as GPIO1_IO11 */ + 0U); /* Software Input On Field: Input Path is determined by functionality */ IOMUXC_SetPinMux( IOMUXC_GPIO_B1_15_GPIO2_IO31, /* GPIO_B1_15 is configured as GPIO2_IO31 */ 0U); /* Software Input On Field: Input Path is determined by functionality */ @@ -522,6 +525,16 @@ static void imxrt_lcd_pins_init(void) Pull / Keep Select Field: Keeper Pull Up / Down Config. Field: 100K Ohm Pull Down Hyst. Enable Field: Hysteresis Disabled */ + IOMUXC_SetPinConfig( + IOMUXC_GPIO_AD_B0_11_GPIO1_IO11, /* GPIO_AD_B0_11 PAD functional properties : */ + 0x10B0u); /* Slew Rate Field: Slow Slew Rate + Drive Strength Field: R0/6 + Speed Field: medium(100MHz) + Open Drain Enable Field: Open Drain Disabled + Pull / Keep Enable Field: Pull/Keeper Enabled + Pull / Keep Select Field: Keeper + Pull Up / Down Config. Field: 100K Ohm Pull Down + Hyst. Enable Field: Hysteresis Disabled */ IOMUXC_SetPinConfig( IOMUXC_GPIO_B1_15_GPIO2_IO31, /* GPIO_B1_15 PAD functional properties : */ 0x10B0u); /* Slew Rate Field: Slow Slew Rate
Update networks.json This is a merge from 3.1RC
{ "name": "host_llnl_closed_agate.xml" }, - { - "name": "host_llnl_closed_cmax.xml" - }, { "name": "host_llnl_closed_jade.xml" }, { "name": "host_llnl_closed_sierra.xml" }, + { + "name": "host_llnl_closed_tron.xml" + }, { "name": "host_llnl_closed_zin.xml" },
Fix obsolete comments referencing JoinPathExtraData.extra_lateral_rels. That field went away in commit but it seems that commit re-introduced some comments mentioning it. Noted by James Coleman, though this isn't exactly his proposed new wording. Also thanks to Justin Pryzby for software archaeology. Discussion:
@@ -670,8 +670,8 @@ try_partial_nestloop_path(PlannerInfo *root, /* * If the inner path is parameterized, the parameterization must be fully * satisfied by the proposed outer path. Parameterized partial paths are - * not supported. The caller should already have verified that no - * extra_lateral_rels are required here. + * not supported. The caller should already have verified that no lateral + * rels are required here. */ Assert(bms_is_empty(joinrel->lateral_relids)); if (inner_path->param_info != NULL) @@ -984,8 +984,8 @@ try_partial_hashjoin_path(PlannerInfo *root, /* * If the inner path is parameterized, the parameterization must be fully * satisfied by the proposed outer path. Parameterized partial paths are - * not supported. The caller should already have verified that no - * extra_lateral_rels are required here. + * not supported. The caller should already have verified that no lateral + * rels are required here. */ Assert(bms_is_empty(joinrel->lateral_relids)); if (inner_path->param_info != NULL) @@ -1714,7 +1714,7 @@ match_unsorted_outer(PlannerInfo *root, * partial path and the joinrel is parallel-safe. However, we can't * handle JOIN_UNIQUE_OUTER, because the outer path will be partial, and * therefore we won't be able to properly guarantee uniqueness. Nor can - * we handle extra_lateral_rels, since partial paths must not be + * we handle joins needing lateral rels, since partial paths must not be * parameterized. Similarly, we can't handle JOIN_FULL and JOIN_RIGHT, * because they can produce false null extended rows. */
Fix lv_label_set_text() crash This routine tried to optimize a special case of setting the label text to the same address as previously set, but it did not consider whether the prior set was static and tried to realloc non-allocated memory.
@@ -188,7 +188,7 @@ void lv_label_set_text(lv_obj_t * label, const char * text) LV_ASSERT_STR(text); - if(ext->text == text) { + if(ext->text == text && ext->static_txt == 0) { /*If set its own text then reallocate it (maybe its size changed)*/ ext->text = lv_mem_realloc(ext->text, strlen(ext->text) + 1); LV_ASSERT_MEM(ext->text);
deps: Check helm chart version in install doc.
@@ -168,3 +168,9 @@ dependencies: refPaths: - path: hack/ci/install-cri-o.sh match: BOM_VERSION + + - name: spo-current-release + version: v0.4.3 + refPaths: + - path: installation-usage.md + match: ^helm install security-profiles-operator
[tools] Fix eclipse configuration for libs.
@@ -181,6 +181,7 @@ def HandleToolOption(tools, env, project, reset): if tool.get('id').find('c.linker') != -1: options = tool.findall('option') for option in options: + # update linker script config if option.get('id').find('c.linker.scriptfile') != -1: linker_script = 'link.lds' items = env['LINKFLAGS'].split(' ') @@ -201,12 +202,22 @@ def HandleToolOption(tools, env, project, reset): linker_script = ConverToEclipsePathFormat(items[items.index('-T') + 1]).strip('"') option.set('value',linker_script) + # update nostartfiles config if option.get('id').find('c.linker.nostart') != -1: if env['LINKFLAGS'].find('-nostartfiles') != -1: option.set('value', 'true') else: option.set('value', 'false') + # update libs + if option.get('id').find('c.linker.libs') != -1 and env.has_key('LIBS'): + # remove old libs + for item in option.findall('listOptionValue'): + option.remove(item) + # add new libs + for lib in env['LIBS']: + SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': lib}) + return
DatafariUI upgraded to 1.0 remove developers section
<cassandra.sha512>2890c054666afa93fe2978c46da5db15ed7d11d60f4890574dc642bd81f7b3af40b41d3e5b8245e6c1453a165eddebeb28bcf3fef1a2b2fc6fb3b82058d7ceb0</cassandra.sha512> <cassandra.driver.version>4.13.0</cassandra.driver.version> <datafari.version>6.0-dev-Community</datafari.version> - <datafariui.version>1.0.0-beta-0.7.0</datafariui.version> - <datafariui.md5>6b3138072f34d800a56b02801b5ac0b6</datafariui.md5> + <datafariui.version>1.0</datafariui.version> + <datafariui.md5>34191a7fe86e025b6a6c8cf05a6940b5</datafariui.md5> <guava.version>26.0-jre</guava.version> <httpclient.version>4.5.13</httpclient.version> <java.version>1.11</java.version> @@ -205,61 +205,6 @@ Datafari is the ideal product for those who want to search through their data, w <name>France Labs - Search Experts</name> <url>http://www.francelabs.com/en/</url> </organization> - <developers> - <developer> - <id>culmer</id> - <name>Cedric Ulmer</name> - <email>[email protected]</email> - <organization>France Labs</organization> - <roles> - <role>architect</role> - <role>developer</role> - </roles> - <timezone>Europe/Paris</timezone> - </developer> - <developer> - <id>otavard</id> - <name>Olivier Tavard</name> - <email>[email protected]</email> - <organization>France Labs</organization> - <roles> - <role>architect</role> - <role>developer</role> - </roles> - <timezone>Europe/Paris</timezone> - </developer> - <developer> - <id>amazoyer</id> - <name>Aurelien Mazoyer</name> - <email>[email protected]</email> - <organization>France Labs</organization> - <roles> - <role>architect</role> - <role>developer</role> - </roles> - <timezone>Europe/Paris</timezone> - </developer> - <developer> - <id>jmassiera</id> - <name>Julien Massiera</name> - <email>[email protected]</email> - <organization>France Labs</organization> - <roles> - <role>developer</role> - </roles> - <timezone>Europe/Paris</timezone> - </developer> - <developer> - <id>gusai</id> - <name>Giovanni Usai</name> - <email>[email protected]</email> - <organization>France Labs</organization> - <roles> - <role>developer</role> - </roles> - <timezone>Europe/Paris</timezone> - </developer> - </developers> <issueManagement> <system>JIRA</system> <url>https://datafari.atlassian.net/secure/Dashboard.jspa</url>
hw/bsp/dialog: Update default path for flash loader Recent versions of newt include repository name in generated files path.
@@ -46,7 +46,7 @@ JLINK_LOG_FILE=.jlink_log # flash_loader build for this BSP if [ -z $FLASH_LOADER ]; then FL_TGT=da1469x_flash_loader - FLASH_LOADER=$BIN_ROOT/targets/$FL_TGT/app/apps/flash_loader/flash_loader.elf + FLASH_LOADER=$BIN_ROOT/targets/$FL_TGT/app/@apache-mynewt-core/apps/flash_loader/flash_loader.elf fi if [ ! -f $FLASH_LOADER ]; then FILE=${FLASH_LOADER##$(pwd)/}
use static buffer when resetting state
@@ -160,6 +160,11 @@ static inline task_s pop_task(void) { to_free = deferred.reader; deferred.reader = deferred.reader->next; } else { + if (deferred.reader != &static_queue && static_queue.state == 2) { + to_free = deferred.reader; + deferred.writer = &static_queue; + deferred.reader = &static_queue; + } deferred.reader->write = deferred.reader->read = deferred.reader->state = 0; } @@ -169,6 +174,7 @@ static inline task_s pop_task(void) { finish: if (to_free == &static_queue) { static_queue.state = 2; + static_queue.next = NULL; } spn_unlock(&deferred.lock);
try bootstrapping libgcc_s1 on sles
@@ -98,7 +98,7 @@ cp /etc/zypp/zypp.conf $SINGULARITY_ROOTFS/$ZYPP_CONF echo 'cachedir=/var/cache/zypp-bootstrap' >> "$SINGULARITY_ROOTFS/$ZYPP_CONF" cp /etc/zypp/repos.d/* $SINGULARITY_ROOTFS/$ZYPP_CONF_DIRNAME/repos.d/. -if ! eval "$INSTALL_CMD -c $SINGULARITY_ROOTFS/$ZYPP_CONF --root $SINGULARITY_ROOTFS --gpg-auto-import-keys -n install --auto-agree-with-licenses sles-release coreutils libstdc++-devel $INSTALLPKGS"; then +if ! eval "$INSTALL_CMD -c $SINGULARITY_ROOTFS/$ZYPP_CONF --root $SINGULARITY_ROOTFS --gpg-auto-import-keys -n install --auto-agree-with-licenses sles-release coreutils libgcc_s1 libstdc++-devel $INSTALLPKGS"; then message ERROR "Bootstrap failed... exiting\n" ABORT 255 fi
Try adding west update.
@@ -4,6 +4,7 @@ build: before_script: - west init -m https://github.com/petejohanson/zephyr --mr driver-kscan-gpio-matrix-first-pass + - west update . script: - west build -b nucelo_wb55rg -- -DSHIELD=petejohanson_handwire
Add documentation for SSL_SESSION_set_cipher()
=head1 NAME -SSL_SESSION_get0_cipher - retrieve the SSL cipher associated with a session +SSL_SESSION_get0_cipher, +SSL_SESSION_set_cipher +- set and retrieve the SSL cipher associated with a session =head1 SYNOPSIS #include <openssl/ssl.h> const SSL_CIPHER *SSL_SESSION_get0_cipher(const SSL_SESSION *s); + int SSL_SESSION_set_cipher(SSL_SESSION *s, const SSL_CIPHER *cipher); =head1 DESCRIPTION @@ -18,21 +21,34 @@ connection when the session was created, or NULL if it cannot be determined. The value returned is a pointer to an object maintained within B<s> and should not be released. +SSL_SESSION_set_cipher() can be used to set the ciphersuite associated with the +SSL_SESSION B<s> to B<cipher>. For example, this could be used to set up a +session based PSK (see L<SSL_CTX_set_psk_use_session_callback(3)>). + +=head1 RETURN VALUES + +SSL_SESSION_get0_cipher() returns the SSL_CIPHER associated with the SSL_SESSION +or NULL if it cannot be determined. + +SSL_SESSION_set_cipher() returns 1 on success or 0 on failure. + =head1 SEE ALSO L<ssl(7)>, L<d2i_SSL_SESSION(3)>, L<SSL_SESSION_get_time(3)>, L<SSL_SESSION_get0_hostname(3)>, -L<SSL_SESSION_free(3)> +L<SSL_SESSION_free(3)>, +L<SSL_CTX_set_psk_use_session_callback(3)> =head1 HISTORY -SSL_SESSION_get0_cipher() was first added to OpenSSL 1.1.0 +SSL_SESSION_get0_cipher() was first added to OpenSSL 1.1.0. +SSL_SESSION_set_cipher() was first added to OpenSSL 1.1.1. =head1 COPYRIGHT -Copyright 2016 The OpenSSL Project Authors. All Rights Reserved. +Copyright 2016-2017 The OpenSSL Project Authors. All Rights Reserved. Licensed under the OpenSSL license (the "License"). You may not use this file except in compliance with the License. You can obtain a copy
Correct package name in Debian changelog
-box86 (0.1.6) unstable; urgency=low +box64 (0.1.6) unstable; urgency=low * Introduce "HotPage", to temporarily disable Dynarec on a page were writing is also occuring (can help speed up C# code) * Some work on Dynarec to limit the number of mutex use, and also allow smaller block to be built (for JIT'd programs)
VOM: missing GBP symbol
@@ -31,7 +31,8 @@ const gbp_subnet::type_t gbp_subnet::type_t::STITCHED_INTERNAL( const gbp_subnet::type_t gbp_subnet::type_t::STITCHED_EXTERNAL( 1, "stitched-external"); -const gbp_subnet::type_t gbp_subnet::type_t::TRANSPORT(1, "transport"); +const gbp_subnet::type_t gbp_subnet::type_t::TRANSPORT(2, "transport"); +const gbp_subnet::type_t gbp_subnet::type_t::L3_OUT(3, "l3-out"); singular_db<gbp_subnet::key_t, gbp_subnet> gbp_subnet::m_db;
fix(docs): Improve powershell command for setup script for failure cases
@@ -82,7 +82,7 @@ bash -c "$(wget https://zmk.dev/setup.sh -O -)" '' --wget <TabItem value="PowerShell"> ``` -iex ((New-Object System.Net.WebClient).DownloadString('https://zmk.dev/setup.ps1')) +powershell -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://zmk.dev/setup.ps1'))" ``` </TabItem>
Look for parallelism preserving fusion only when a there is no parallel hyperplane for an SCC
@@ -1763,7 +1763,7 @@ bool colour_scc_cluster (int scc_id, int *colour, int current_colour, PlutoProg* } printf("Colouring Scc %d with colour %d \n", scc_id, current_colour); - if (options->fuse == TYPED_FUSE && sccs[scc_id].is_parallel) { + if (options->fuse == TYPED_FUSE && sccs[scc_id].is_parallel && !sccs[scc_id].has_parallel_hyperplane) { printf("Scc %d has a parallel hyperplane\n", scc_id); printf("Parallelism preventing adjecency Matrix\n"); pluto_matrix_print(stdout, par_preventing_adj_mat);
BugID:17401890: fix the problem of IMCOMPATIBLE_CAST
@@ -120,10 +120,15 @@ SysTime_t SysTimeGet( void ) SysTime_t calendarTime = { .Seconds = 0, .SubSeconds = 0 }; SysTime_t sysTime = { .Seconds = 0, .SubSeconds = 0 }; SysTime_t deltaTime; + uint32_t deltaSeconds; + uint32_t deltaSubSeconds; calendarTime.Seconds = RtcGetCalendarTime( ( uint16_t* )&calendarTime.SubSeconds ); - RtcBkupRead( &deltaTime.Seconds, ( uint32_t* )&deltaTime.SubSeconds ); + RtcBkupRead( &deltaSeconds, &deltaSubSeconds ); + + deltaTime.Seconds = deltaSeconds; + deltaTime.SubSeconds = (int16_t)deltaSubSeconds; sysTime = SysTimeAdd( deltaTime, calendarTime ); @@ -142,7 +147,14 @@ SysTime_t SysTimeGetMcuTime( void ) uint32_t SysTime2Ms( SysTime_t sysTime ) { SysTime_t deltaTime; - RtcBkupRead( &deltaTime.Seconds, ( uint32_t* )&deltaTime.SubSeconds ); + uint32_t deltaSeconds; + uint32_t deltaSubSeconds; + + RtcBkupRead( &deltaSeconds, &deltaSubSeconds ); + + deltaTime.Seconds = deltaSeconds; + deltaTime.SubSeconds = (int16_t)deltaSubSeconds; + SysTime_t calendarTime = SysTimeSub( sysTime, deltaTime ); return calendarTime.Seconds * 1000 + calendarTime.SubSeconds; }
add actual rate defaults
#define BF_EXPO_PITCH 0.40 #define BF_EXPO_YAW 0.40 +// ******************** ACTUAL_RATES ******************** +#define ACTUAL_CENTER_SENS_ROLL 250 +#define ACTUAL_CENTER_SENS_PITCH 250 +#define ACTUAL_CENTER_SENS_YAW 250 + +#define ACTUAL_MAX_RATE_ROLL 860.0 +#define ACTUAL_MAX_RATE_PITCH 860.0 +#define ACTUAL_MAX_RATE_YAW 860.0 + +#define ACTUAL_EXPO_ROLL 0.5 +#define ACTUAL_EXPO_PITCH 0.5 +#define ACTUAL_EXPO_YAW 0.5 + // *************max angle for level mode #define LEVEL_MAX_ANGLE 65.0f
Add description of AOMP_CHECK_GIT_BRANCH to build previous versions of AOMP without incorrect branch build errors.
@@ -111,6 +111,7 @@ The development version is the next version to be released. It is possible that ``` git checkout rel_11.5-0 git pull + export AOMP_CHECK_GIT_BRANCH=0 //Tags will be used to checkout various repos. This will ignore the detached head state to avoid build errors. ``` <b>Clone and Build:</b> ```
ssl_tls13_parse_certificate_verify(): optimize the code
@@ -257,13 +257,8 @@ static int ssl_tls13_parse_certificate_verify( mbedtls_ssl_context *ssl, if( sig_alg == MBEDTLS_PK_RSASSA_PSS ) { rsassa_pss_options.mgf1_hash_id = md_alg; - psa_algorithm_t psa_alg = mbedtls_psa_translate_md( md_alg ); - if( psa_alg == 0 ) - { - return( MBEDTLS_ERR_SSL_INTERNAL_ERROR ); - } - rsassa_pss_options.expected_salt_len = PSA_HASH_LENGTH( psa_alg ); + rsassa_pss_options.expected_salt_len = PSA_HASH_LENGTH( hash_alg ); options = (const void*) &rsassa_pss_options; } #endif /* MBEDTLS_X509_RSASSA_PSS_SUPPORT */
CI: CMake build for Pico W and Pico.
@@ -10,17 +10,15 @@ env: jobs: build: - name: ${{matrix.name}} + name: ${{matrix.name}} (C++) + runs-on: ubuntu-20.04 strategy: matrix: include: - - os: ubuntu-20.04 - name: Linux - cache-key: linux - cmake-args: '-DPICO_SDK_PATH=$GITHUB_WORKSPACE/pico-sdk -DPICO_SDK_POST_LIST_DIRS=$GITHUB_WORKSPACE/pico-extras' - apt-packages: ccache gcc-arm-none-eabi - - runs-on: ${{matrix.os}} + - name: Pico + board: pico + - name: Pico W + board: pico_w env: PICO_SDK_PATH: $GITHUB_WORKSPACE/pico-sdk @@ -59,7 +57,7 @@ jobs: - name: Install deps if: runner.os == 'Linux' run: | - sudo apt update && sudo apt install ${{matrix.apt-packages}} + sudo apt update && sudo apt install ccache gcc-arm-none-eabi - name: Create Build Environment run: cmake -E make_directory ${{runner.workspace}}/build @@ -67,7 +65,7 @@ jobs: - name: Configure CMake shell: bash working-directory: ${{runner.workspace}}/build - run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ${{matrix.cmake-args}} + run: cmake $GITHUB_WORKSPACE -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -DPICO_SDK_PATH=$GITHUB_WORKSPACE/pico-sdk -DPICO_SDK_POST_LIST_DIRS=$GITHUB_WORKSPACE/pico-extras -DPICO_BOARD=${{matrix.board}} - name: Build working-directory: ${{runner.workspace}}/build
Test attribute value of a predefined entity with a duff allocator
@@ -7413,6 +7413,39 @@ START_TEST(test_alloc_attribute_whitespace) #undef MAX_ALLOC_COUNT END_TEST +START_TEST(test_alloc_attribute_predefined_entity) +{ + const char *text = "<doc a='&amp;'></doc>"; + int i; +#define MAX_ALLOC_COUNT 10 + int repeat = 0; + + for (i = 0; i < MAX_ALLOC_COUNT; i++) { + /* Repeat some counts to defeat cached allocations */ + if (i == 3 && repeat == 1) { + i -= 2; + repeat++; + } + else if ((i == 2 && + (repeat == 0 || repeat == 2 || repeat == 3)) || + (i == 3 && repeat == 4)) { + i--; + repeat++; + } + allocation_count = i; + if (_XML_Parse_SINGLE_BYTES(parser, text, strlen(text), + XML_TRUE) != XML_STATUS_ERROR) + break; + XML_ParserReset(parser, NULL); + } + if (i == 0) + fail("Parse succeeded despite failing allocator"); + if (i == MAX_ALLOC_COUNT) + fail("Parse failed at maximum allocation count"); +} +#undef MAX_ALLOC_COUNT +END_TEST + /* Test that a character reference at the end of a suitably long * default value for an attribute can trigger pool growth, and recovers * if the allocator fails on it. @@ -8322,6 +8355,7 @@ make_suite(void) tcase_add_test(tc_alloc, test_alloc_comment_in_epilog); tcase_add_test(tc_alloc, test_alloc_realloc_long_attribute_value); tcase_add_test(tc_alloc, test_alloc_attribute_whitespace); + tcase_add_test(tc_alloc, test_alloc_attribute_predefined_entity); tcase_add_test(tc_alloc, test_alloc_long_attr_default_with_char_ref); tcase_add_test(tc_alloc, test_alloc_long_attr_value);
Stop matching on a pattern, thus avoid MonadFail This no longer works in GHC 8.6, and was bad practice anyway. Fixes:
@@ -37,6 +37,7 @@ module Foreign.Lua.CoreTests (tests) where import Prelude hiding (compare) import Control.Monad (forM_) +import Data.Maybe (fromMaybe) import Data.Monoid ((<>)) import Foreign.Lua as Lua import Test.HsLua.Arbitrary () @@ -225,7 +226,8 @@ tests = testGroup "Core module" getglobal "coroutine" getfield stackTop "resume" pushLuaExpr "coroutine.create(function() coroutine.yield(9) end)" - (Just contThread) <- tothread stackTop + contThread <- fromMaybe (Prelude.error "not a thread at top of stack") + <$> tothread stackTop call 1 0 liftIO $ runWith contThread status ]
Ensure netlib will not be broken when setip will not bring the network up anymore
@@ -159,6 +159,11 @@ static void netest_initialize(void) netlib_set_ipv6netmask("eth0", (FAR const struct in6_addr *)g_ipv6_netmask); + /* New versions of netlib_set_ipv6addr will not bring the network up, + * So ensure the network is really up at this point. */ + + netlib_ifup("eth0"); + #endif /* CONFIG_NET_ICMPv6_AUTOCONF */ #else /* CONFIG_EXAMPLES_NETTEST_IPv6 */
Debugging: cosmetics
@@ -48,7 +48,9 @@ void grib_dependency_add(grib_accessor* observer, grib_accessor* observed) grib_dependency* d = h->dependencies; grib_dependency* last = 0; - /*printf("observe %p %p %s %s\n",(void*)observed,(void*)observer, observed?observed->name:"NULL", + /*printf("grib_dependency_add: observe %p %p observed=%s observer=%s\n", + (void*)observed, (void*)observer, + observed ? observed->name : "NULL", observer ? observer->name : "NULL");*/ if (!observer || !observed) {
tests: python3 changes for load balancer test Type: fix
@@ -200,7 +200,7 @@ class TestLB(VppTestCase): # This is just to roughly check that the balancing algorithm # is not completely biased. for asid in self.ass: - if load[asid] < len(self.packets) / (len(self.ass) * 2): + if load[asid] < int(len(self.packets) / (len(self.ass) * 2)): self.logger.error( "ASS is not balanced: load[%d] = %d" % (asid, load[asid])) raise Exception("Load Balancer algorithm is biased")
zephyr: 96b_carbon: use flash defines from DTS
#define FLASH_DRIVER_NAME CONFIG_SOC_FLASH_STM32_DEV_NAME #endif #define FLASH_ALIGN 1 -#define FLASH_AREA_IMAGE_0_OFFSET 0x20000 -#define FLASH_AREA_IMAGE_0_SIZE 0x20000 -#define FLASH_AREA_IMAGE_1_OFFSET 0x40000 -#define FLASH_AREA_IMAGE_1_SIZE 0x20000 -#define FLASH_AREA_IMAGE_SCRATCH_OFFSET 0x60000 -#define FLASH_AREA_IMAGE_SCRATCH_SIZE 0x20000 -/* Though sectors have variable size on this part, we've chosen - * three sectors with uniform size here. */ +/* + * Though sectors have variable size on this part, we require Zephyr + * to choose image and flash sectors with uniform size, each one + * sector in size. + */ #define FLASH_AREA_IMAGE_SECTOR_SIZE 0x20000
Avoid mentioning ctrl_str in the MAC documentation. Change to mentioning params instead.
@@ -49,7 +49,7 @@ Output the MAC in binary form. Uses hexadecimal text format if not specified. Passes options to the MAC algorithm. A comprehensive list of controls can be found in the EVP_MAC implementation documentation. -Common control strings used by EVP_MAC_ctrl_str() are: +Common parameter names used by EVP_MAC_CTX_get_params() are: =over 4 @@ -144,12 +144,12 @@ The B<list -mac-algorithms> command can be used to list them. L<openssl(1)>, L<EVP_MAC(3)>, -L<EVP_MAC_CMAC(7)>, -L<EVP_MAC_GMAC(7)>, -L<EVP_MAC_HMAC(7)>, -L<EVP_MAC_KMAC(7)>, -L<EVP_MAC_SIPHASH(7)>, -L<EVP_MAC_POLY1305(7)> +L<EVP_MAC-CMAC(7)>, +L<EVP_MAC-GMAC(7)>, +L<EVP_MAC-HMAC(7)>, +L<EVP_MAC-KMAC(7)>, +L<EVP_MAC-SIPHASH(7)>, +L<EVP_MAC-POLY1305(7)> =head1 COPYRIGHT
refactor(common/third-party/curl-websocket-utils.c): replace sha1 encrypting logic with sha1.c d297e
#include <unistd.h> #include <sys/stat.h> #include <fcntl.h> - -#ifdef BEARSSL -#include <bearssl_hash.h> - -static void -_cws_sha1(const void *input, const size_t input_len, void *output) -{ - br_sha1_context cxt; - br_sha1_init(&cxt); - br_sha1_update(&cxt, input, input_len); - br_sha1_out(&cxt, output); -} -#elif defined(MBEDTLS) -#include "mbedtls/sha1.h" -static void -_cws_sha1(const void *input, const size_t input_len, void *output) -{ - mbedtls_sha1(input, input_len, output); -} -#elif defined(WOLFSSL) -#include <stdint.h> -#include "wolfssl/wolfcrypt/sha.h" -static void -_cws_sha1(const void *input, const size_t input_len, void *output) -{ - Sha sha; - wc_InitSha(&sha); - wc_ShaUpdate(&sha, input, input_len); - wc_ShaFinal(&sha, output); -} -#else - -#include <openssl/evp.h> +#include "sha1.h" static void _cws_sha1(const void *input, const size_t input_len, void *output) { - static const EVP_MD *md = NULL; - EVP_MD_CTX *ctx; - - ctx = EVP_MD_CTX_new(); + SHA1_CTX ctx; - if (!md) { - OpenSSL_add_all_digests(); - md = EVP_get_digestbyname("sha1"); + SHA1Init(&ctx); + SHA1Update(&ctx, input, input_len); + SHA1Final(output, &ctx); } - EVP_MD_CTX_init(ctx); - EVP_DigestInit_ex(ctx, md, NULL); - - EVP_DigestUpdate(ctx, input, input_len); - EVP_DigestFinal_ex(ctx, output, NULL); - - EVP_MD_CTX_free(ctx); -} - -#endif - static inline void _cws_debug(const char *prefix, const void *buffer, size_t len) {
env: Ensure that env_strncpy() always produces NULL-terminated string
@@ -618,7 +618,8 @@ static inline void env_sort(void *base, size_t num, size_t size, #define env_strnlen(s, smax) strnlen(s, smax) #define env_strncmp strncmp #define env_strncpy(dest, dmax, src, slen) ({ \ - strncpy(dest, src, min(dmax, slen)); \ + strncpy(dest, src, min(dmax - 1, slen)); \ + dest[dmax - 1] = '\0'; \ 0; \ })
removed warning about out of sequence packets until we find a solution to detect them correct
@@ -230,7 +230,6 @@ static long int usernamecount; static uint64_t rcgapmax; static long int taglenerrorcount; -static long int sequenceerrorcount; static long int essidcount; static long int essiderrorcount; static long int essiddupemax; @@ -240,7 +239,6 @@ static long int malformedcount; static uint64_t timestampstart; static uint64_t timestampmin; static uint64_t timestampmax; -static uint64_t captimestampold; static uint64_t eaptimegapmax; static uint32_t eapoltimeoutvalue; @@ -472,11 +470,9 @@ essiddupemax = 0; rcgapmax = 0; eaptimegapmax = 0; malformedcount = 0; -sequenceerrorcount = 0; timestampmin = 0; timestampmax = 0; timestampstart = 0; -captimestampold = 0; return true; } /*===========================================================================*/ @@ -654,12 +650,6 @@ if(eapolmsgtimestamperrorcount > 0) "This dump file contains frames with wrong timestamps.\n" "That prevent calculation of EAPOL TIMEOUT values.\n"); } -if(sequenceerrorcount > 0) - { - printf("\nWarning: out of sequence timestamps!\n" - "This dump file contains frames with timestamps out of sequence.\n" - "That prevent calculation of EAPOL TIMEOUT values.\n"); - } printf("\n"); return; } @@ -3335,8 +3325,6 @@ if(fh_raw_out != NULL) fprintf(fh_raw_out, "*%02x\n", cs); } -if(captimestamp < captimestampold) sequenceerrorcount++; -captimestampold = captimestamp; if(timestampmin == 0) timestampmin = captimestamp; if(timestampmin > captimestamp) timestampmin = captimestamp; if(timestampmax < captimestamp) timestampmax = captimestamp;
extmod/modmotor: error check run_until_stalled Settings setters may return errors, which must be raised.
@@ -368,7 +368,7 @@ STATIC mp_obj_t motor_Motor_run_until_stalled(size_t n_args, const mp_obj_t *pos user_limit = user_limit > 100 ? 100 : user_limit; // Apply the user limit - pbio_control_settings_set_limits(&self->srv->control.settings, _speed, _acceleration, user_limit); + pb_assert(pbio_control_settings_set_limits(&self->srv->control.settings, _speed, _acceleration, user_limit)); } mp_obj_t ex = MP_OBJ_NULL; @@ -388,7 +388,7 @@ STATIC mp_obj_t motor_Motor_run_until_stalled(size_t n_args, const mp_obj_t *pos // Restore original settings if (override_duty_limit) { - pbio_control_settings_set_limits(&self->srv->control.settings, _speed, _acceleration, _actuation); + pb_assert(pbio_control_settings_set_limits(&self->srv->control.settings, _speed, _acceleration, _actuation)); } if (ex != MP_OBJ_NULL) { @@ -397,7 +397,7 @@ STATIC mp_obj_t motor_Motor_run_until_stalled(size_t n_args, const mp_obj_t *pos // Read the angle upon completion of the stall maneuver int32_t stall_point; - pbio_tacho_get_angle(self->srv->tacho, &stall_point); + pb_assert(pbio_tacho_get_angle(self->srv->tacho, &stall_point)); // Return angle at which the motor stalled return mp_obj_new_int(stall_point);
validation BUGFIX appending to empty data tree
@@ -1101,7 +1101,7 @@ _lyd_validate(struct lyd_node **tree, const struct lys_module **modules, int mod if (!mod) { break; } - if (first == *tree) { + if (!first || (first == *tree)) { /* make sure first2 changes are carried to tree */ first2 = tree; } else {
Fix link in README.md Fix wiki link to Session Management page
@@ -39,7 +39,7 @@ of claims provided in the `id_token`/ `userinfo` claims. - [OAuth 2.0 Multiple Response Type Encoding Practices 1.0](http://openid.net/specs/oauth-v2-multiple-response-types-1_0.html) - [OAuth 2.0 Form Post Response Mode 1.0](http://openid.net/specs/oauth-v2-form-post-response-mode-1_0.html) - [RFC7 7636 - Proof Key for Code Exchange by OAuth Public Clients](https://tools.ietf.org/html/rfc7636) -- [OpenID Connect Session Management 1.0](http://openid.net/specs/openid-connect-session-1_0.html) *(implementers draft; see the [Wiki](https://github.com/zmartzone/mod_auth_openidc/wiki/Session-Management) for information on how to configure it)* +- [OpenID Connect Session Management 1.0](http://openid.net/specs/openid-connect-session-1_0.html) *(implementers draft; see the [Wiki](https://github.com/zmartzone/mod_auth_openidc/wiki/OpenID-Connect-Session-Management) for information on how to configure it)* - [OpenID Connect Front-Channel Logout 1.0](http://openid.net/specs/openid-connect-frontchannel-1_0.html) *(implementers draft)* - [OpenID Connect Back-Channel Logout 1.0](https://openid.net/specs/openid-connect-backchannel-1_0.html) *(implementers draft)* - [Encoding claims in the OAuth 2 state parameter using a JWT](https://tools.ietf.org/html/draft-bradley-oauth-jwt-encoded-state-08) *(draft spec)*
doc: Expand/simplify libelektra config section
@@ -173,13 +173,12 @@ This section describes how to replicate the current Jenkins configuration. The `libelektra` build job is a multibranch pipeline job. It is easiest to add via the BlueOcean interface. -The newly added job can afterwards configured. -All options have a helptext next to them explaining what the settings do. - Most of the default settings should be ok, however some settings need to be -verified or added: +verified or added to build Elektra correctly: * In Branch Sources under Behaviours `Filter by name` should be added to exclude the `debian` branch from being build. + The reason for this is that the `debian` branch is not providing a + Jenkinsfile. * `Advanced clone behaviours` should be added and the path to the git mirror needs to be specified: `/home/jenkins/git_mirrors/libelektra`. This reference repository is created and maintained by our
[MQTT5] Doxygen
@@ -623,6 +623,7 @@ mqtt_status_t mqtt_register(struct mqtt_connection *conn, * from the client. Shall be min 1.5 x report interval. * \param clean_session Request a new session and discard pending messages with * QoS > 0, as well as client subscriptions + * \param prop_list Output properties (MQTTv5-only). * \return MQTT_STATUS_OK or an error status * * This function connects to a MQTT broker. @@ -641,6 +642,7 @@ mqtt_status_t mqtt_connect(struct mqtt_connection *conn, /** * \brief Disconnects from a MQTT broker. * \param conn A pointer to the MQTT connection. + * \param prop_list Output properties (MQTTv5-only). * * This function disconnects from a MQTT broker. */ @@ -657,6 +659,10 @@ void mqtt_disconnect(struct mqtt_connection *conn); * \param mid A pointer to message ID. * \param topic A pointer to the topic to subscribe to. * \param qos_level Quality Of Service level to use. Currently supports 0, 1. + * \param nl No Local (MQTTv5-only). + * \param rap Retain As Published (MQTTv5-only). + * \param ret_handling Retain handling options (MQTTv5-only). + * * \return MQTT_STATUS_OK or some error status * * This function subscribes to a topic on a MQTT broker. @@ -678,6 +684,7 @@ mqtt_status_t mqtt_subscribe(struct mqtt_connection *conn, * \param conn A pointer to the MQTT connection. * \param mid A pointer to message ID. * \param topic A pointer to the topic to unsubscribe from. + * \param prop_list Output properties (MQTTv5-only). * \return MQTT_STATUS_OK or some error status * * This function unsubscribes from a topic on a MQTT broker. @@ -703,6 +710,10 @@ mqtt_status_t mqtt_unsubscribe(struct mqtt_connection *conn, * Client to a Server, the Server MUST store the Application Message * and its QoS, so that it can be delivered to future subscribers whose * subscriptions match its topic name + * \param topic_alias Topic alias to send (MQTTv5-only). + * \param topic_alias_en Control whether or not to discard topic and only send + * topic alias s(MQTTv5-only). + * \param prop_list Output properties (MQTTv5-only). * \return MQTT_STATUS_OK or some error status * * This function publishes to a topic on a MQTT broker. @@ -741,6 +752,7 @@ void mqtt_set_username_password(struct mqtt_connection *conn, * \param topic A pointer to the Last Will topic. * \param message A pointer to the Last Will message (payload). * \param qos The desired QoS level. + * \param will_props Will message properties (MQTTv5-only). * * This function sets clients Last Will topic and message (payload). * If the Will Flag is set to 1 (using the function) this indicates that, @@ -781,11 +793,11 @@ encode_prop(struct mqtt_out_property_t **prop_out, mqtt_vhdr_prop_t prop_id, /*---------------------------------------------------------------------------*/ /** -* \brief Send authentication message. +* \brief Send authentication message (MQTTv5-only). * \param conn A pointer to the MQTT connection. -* \param auth_payload A pointer to auth data. * \param auth_type The type of auth to send (continue authentication or * re-authentication). +* \param prop_list Output properties. * \return MQTT_STATUS_OK or some error status * * This function send an MQTT authentication message.
nimble/ll: Send number of events in case of advertising timeout We shall send number of advertising events in case max number of events was specified and timeout due to duration was reached. Since events counter is only increased if max number of events was specified, we do not need extra checks here.
@@ -3041,7 +3041,8 @@ ble_ll_adv_done(struct ble_ll_adv_sm *advsm) if ((advsm->props & BLE_HCI_LE_SET_EXT_ADV_PROP_LEGACY) && (advsm->flags & BLE_LL_ADV_SM_FLAG_ADV_TERMINATE_EVT)) { ble_ll_hci_ev_send_adv_set_terminated(BLE_ERR_DIR_ADV_TMO, - advsm->adv_instance, 0, 0); + advsm->adv_instance, 0, + advsm->events); /* * For high duty directed advertising we need to send connection @@ -3172,7 +3173,8 @@ ble_ll_adv_sec_done(struct ble_ll_adv_sm *advsm) /* Check if advertising timed out */ if (advsm->duration && (aux->start_time >= advsm->adv_end_time)) { ble_ll_hci_ev_send_adv_set_terminated(BLE_ERR_DIR_ADV_TMO, - advsm->adv_instance, 0, 0); + advsm->adv_instance, 0, + advsm->events); /* * For high duty directed advertising we need to send connection
fix overlay behaviour with fullscreen apps
@@ -411,11 +411,21 @@ showoverlay() { if (!overlayexists() || selmon->overlaystatus) return; + int yoffset = selmon->showbar ? bh : 0; + + Client *c; + for (c = selmon->clients; c; c = c->next) { + if (c->tags & (1 << (selmon->pertag->curtag - 1)) && c->isfullscreen) { + yoffset = 0; + break; + } + } + for (m = mons; m; m = m->next) { m->overlaystatus = 1; } - Client *c = selmon->overlay; + c = selmon->overlay; detach(c); detachstack(c); @@ -427,7 +437,7 @@ showoverlay() { if (c->islocked) { switch (selmon->overlaymode) { case 0: - resize(c, selmon->mx + 20, selmon->my + (selmon->showbar ? bh : 0) - c->h, + resize(c, selmon->mx + 20, selmon->my + yoffset - c->h, selmon->ww - 40, c->h, True); break; case 1: @@ -460,7 +470,7 @@ showoverlay() { XRaiseWindow(dpy, c->win); switch (selmon->overlaymode) { case 0: - animateclient(c, c->x, selmon->my + ( selmon->showbar ? bh : 0 ), 0, 0, 15, 0); + animateclient(c, c->x, selmon->my + yoffset, 0, 0, 15, 0); break; case 1: animateclient(c, selmon->mx + selmon->mw - c->w, selmon->my + 40, 0, 0, 15, 0);
docs: Remove references to jenkinsfile job and and add new
@@ -145,14 +145,7 @@ phrases: * jenkins build [multiconfig-gcc47-cmake-options](https://build.libelektra.org/job/elektra-multiconfig-gcc47-cmake-options/) please * jenkins build [source-package-test](https://build.libelektra.org/job/elektra-source-package-test/) please * jenkins build [homepage](https://build.libelektra.org/job/elektra-homepage/) please -* jenkins build [jenkinsfile](https://build.libelektra.org/jenkins/job/elektra-jenkinsfile/)[REGEX] please - * Use the optional `[REGEX]` field to specify which stages you want to run - The default is to run all stages. - Docker build phase stages will always be run. - Running a partial build will degrade the build result to unstable which is - displayed as failed in the github status. - Example: `jenkins build jenkinsfile[.*-fast] please` will run all stages - ending with `-fast`. +* jenkins build [libelektra](https://build.libelektra.org/jenkins/job/libelektra/) please ### Run All Tests
Fix a possible integer overflow in long_c2i Credit to OSS-Fuzz for finding this.
@@ -149,6 +149,10 @@ static int long_c2i(ASN1_VALUE **pval, const unsigned char *cont, int len, utmp |= cont[i]; } ltmp = (long)utmp; + if (ltmp < 0) { + ASN1err(ASN1_F_LONG_C2I, ASN1_R_INTEGER_TOO_LARGE_FOR_LONG); + return 0; + } if (neg) { ltmp = -ltmp; ltmp--;