message
stringlengths
6
474
diff
stringlengths
8
5.22k
gitlab: Move to the latest docker image This enables support for testing U-Boot vboot. BRANCH=none TEST=passes on gitlab:
# Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -image: jbettis/ubuntu-29sep21 +image: sjg20/ubuntu-25feb22c # You can update that image using this repo: # https://gitlab.com/zephyr-ec/gitlab-ci-runner/-/tree/main
Adjust the style to be in alignment with what the rest of code is
@@ -9,7 +9,7 @@ AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: false AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: Inline +AllowShortFunctionsOnASingleLine: None AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterDefinitionReturnType: None @@ -22,7 +22,7 @@ BreakBeforeBraces: Allman BreakBeforeTernaryOperators: true BreakConstructorInitializers: AfterColon #BreakInheritanceList: AfterColon -ColumnLimit: 256 +ColumnLimit: 0 CommentPragmas: '^ IWYU pragma:' ConstructorInitializerAllOnOneLineOrOnePerLine: true ConstructorInitializerIndentWidth: 4 @@ -63,3 +63,4 @@ SpacesInSquareBrackets: false Standard: Auto TabWidth: 2 UseTab: Never +FixNamespaceComments: false
emitter: Fix an indentation oops.
@@ -3963,8 +3963,8 @@ static void run_named_call(lily_emit_state *emit, lily_ast *ast, if (va_pos != INT_MAX) { lily_type *va_type = call_type->subtypes[call_type->subtype_count - 1]; - if (va_type->cls->id != LILY_ID_OPTARG || - var_arg_head) { + if (var_arg_head || + va_type->cls->id != LILY_ID_OPTARG) { lily_type *va_list_type = get_va_type(call_type); if (va_list_type->flags & TYPE_IS_UNRESOLVED)
http_server: api/v1: metrics: add fluentbit_uptime on prometheus output
@@ -224,6 +224,7 @@ void cb_metrics_prometheus(mk_request_t *request, void *data) int len; int time_len; int start_time_len; + uint64_t uptime; size_t index; size_t num_metrics = 0; long now; @@ -274,12 +275,6 @@ void cb_metrics_prometheus(mk_request_t *request, void *data) } metric_helptxt_head = FLB_SDS_HEADER(metric_helptxt); - /* current time */ - flb_time_get(&tp); - now = flb_time_to_nanosec(&tp) / 1000000; /* in milliseconds */ - time_len = snprintf(time_str, sizeof(time_str) - 1, "%lu", now); - start_time_len = snprintf(start_time_str, sizeof(start_time_str) - 1, "%lu", config->init_time); - /* * fluentbit_input_records[name="cpu0", hostname="${HOSTNAME}"] NUM TIMESTAMP * fluentbit_input_bytes[name="cpu0", hostname="${HOSTNAME}"] NUM TIMESTAMP @@ -314,6 +309,10 @@ void cb_metrics_prometheus(mk_request_t *request, void *data) return; } + flb_time_get(&tp); + now = flb_time_to_nanosec(&tp) / 1000000; /* in milliseconds */ + time_len = snprintf(time_str, sizeof(time_str) - 1, "%lu", now); + for (i = 0; i < map.via.map.size; i++) { msgpack_object k; msgpack_object v; @@ -411,11 +410,34 @@ void cb_metrics_prometheus(mk_request_t *request, void *data) null_check(tmp_sds); } } + + /* Attach uptime */ + uptime = time(NULL) - config->init_time; + len = snprintf(time_str, sizeof(time_str) - 1, "%lu", uptime); + + tmp_sds = flb_sds_cat(sds, + "# HELP fluentbit_uptime Number of seconds that Fluent Bit has " + "been running.\n", 76); + null_check(tmp_sds); + tmp_sds = flb_sds_cat(sds, "# TYPE fluentbit_uptime counter\n", 32); + null_check(tmp_sds); + + tmp_sds = flb_sds_cat(sds, "fluentbit_uptime ", 17); + null_check(tmp_sds); + tmp_sds = flb_sds_cat(sds, time_str, len); + null_check(tmp_sds); + tmp_sds = flb_sds_cat(sds, "\n", 1); + null_check(tmp_sds); + /* Attach process_start_time_seconds metric. */ + start_time_len = snprintf(start_time_str, sizeof(start_time_str) - 1, + "%lu", config->init_time); + tmp_sds = flb_sds_cat(sds, "# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n", 89); null_check(tmp_sds); tmp_sds = flb_sds_cat(sds, "# TYPE process_start_time_seconds gauge\n", 40); null_check(tmp_sds); + tmp_sds = flb_sds_cat(sds, "process_start_time_seconds ", 27); null_check(tmp_sds); tmp_sds = flb_sds_cat(sds, start_time_str, start_time_len);
khan: comment changes Not sure we actually want to guarantee exactly-once response semantics, so you can't rely on request ordering to correlate requests even if you never send simultaneous requests. Also do a worked example of a %urth scry, which hopefully raises the question of whether %urth should try to support views.
** ** request-id is a 31-bit client-supplied identifier that will ** be returned along with the response, to allow correlating -** responses to simultaneous requests. (any request that may -** take more than a single arvo event is not guaranteed to -** return in order.) it may be reused; e.g. 0 could be supplied -** every time for a client that never sends simultaneous -** requests or that doesn't care about responses. +** responses with requests. it may be reused; e.g. 0 could be +** supplied every time for a client that doesn't care about +** responses. ** ** %fyrd is a request to run a thread. its arguments are ** described in the %khan vane, which handles these. it produces ** $+ each path ** $% [%once vis=view syd=desk tyl=spur] ** [%beam vis=view bem=beam] -** [%urth *] +** [%urth urth-args=*] ** == ** +** so e.g. a full %urth peek request might look like: +** +** [`@ud`%id %peek | %urth %mass ~] +** ** %move is a kernel move. these are injected into arvo, except ** again for a runtime overlay. **
tools/docker: Add libpython2.7 in order to satisfy GDB dependencies It was not possible to run xtensa-esp32-elf-gdb from container due to missing libpython2.7 library. Merges Closes
@@ -2,6 +2,7 @@ FROM ubuntu:18.04 ARG DEBIAN_FRONTEND=noninteractive +# We need libpython2.7 due to GDB tools RUN apt-get update && apt-get install -y \ apt-utils \ bison \ @@ -17,6 +18,7 @@ RUN apt-get update && apt-get install -y \ libusb-1.0-0-dev \ make \ ninja-build \ + libpython2.7 \ python3 \ python3-pip \ unzip \
dpdk: fix coverity warning Add a hint so that it's obvious that fall through in switch statement is intentional. Type: fix Fixes:
@@ -476,6 +476,7 @@ dpdk_lib_init (dpdk_main_t * dm) /* Drivers with valid speed_capa set */ case VNET_DPDK_PMD_I40E: xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE; + /* fall through */ case VNET_DPDK_PMD_E1000EM: case VNET_DPDK_PMD_IGB: case VNET_DPDK_PMD_IGC: @@ -519,6 +520,7 @@ dpdk_lib_init (dpdk_main_t * dm) /* SR-IOV VFs */ case VNET_DPDK_PMD_I40EVF: xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE; + /* fall through */ case VNET_DPDK_PMD_IGBVF: case VNET_DPDK_PMD_IXGBEVF: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
esp_http_server: reword error messages
@@ -381,7 +381,7 @@ esp_err_t httpd_resp_send_err(httpd_req_t *req, httpd_err_code_t error, const ch switch (error) { case HTTPD_501_METHOD_NOT_IMPLEMENTED: status = "501 Method Not Implemented"; - msg = "Request method is not supported by server"; + msg = "Server does not support this operation"; break; case HTTPD_505_VERSION_NOT_SUPPORTED: status = "505 Version Not Supported"; @@ -389,23 +389,23 @@ esp_err_t httpd_resp_send_err(httpd_req_t *req, httpd_err_code_t error, const ch break; case HTTPD_400_BAD_REQUEST: status = "400 Bad Request"; - msg = "Server unable to understand request due to invalid syntax"; + msg = "Bad request syntax or unsupported method"; break; case HTTPD_401_UNAUTHORIZED: status = "401 Unauthorized"; - msg = "Server known the client's identify and it must authenticate itself to get he requested response"; + msg = "No permission -- see authorization schemes"; break; case HTTPD_403_FORBIDDEN: status = "403 Forbidden"; - msg = "Server is refusing to give the requested resource to the client"; + msg = "Request forbidden -- authorization will not help"; break; case HTTPD_404_NOT_FOUND: status = "404 Not Found"; - msg = "This URI does not exist"; + msg = "Nothing matches the given URI"; break; case HTTPD_405_METHOD_NOT_ALLOWED: status = "405 Method Not Allowed"; - msg = "Request method for this URI is not handled by server"; + msg = "Specified method is invalid for this resource"; break; case HTTPD_408_REQ_TIMEOUT: status = "408 Request Timeout"; @@ -413,11 +413,11 @@ esp_err_t httpd_resp_send_err(httpd_req_t *req, httpd_err_code_t error, const ch break; case HTTPD_414_URI_TOO_LONG: status = "414 URI Too Long"; - msg = "URI is too long for server to interpret"; + msg = "URI is too long"; break; case HTTPD_411_LENGTH_REQUIRED: status = "411 Length Required"; - msg = "Chunked encoding not supported by server"; + msg = "Client must specify Content-Length"; break; case HTTPD_431_REQ_HDR_FIELDS_TOO_LARGE: status = "431 Request Header Fields Too Large";
Ben says this might work.
@@ -5,7 +5,7 @@ env.make_derivation rec { builder = ./builder.sh; src = ../../../pkg/ge-additions; - nativeBuildInputs = [ deps.ed25519 ]; + cross_inputs = [ deps.ed25519 ]; CC = "${env.host}-gcc"; AR = "${env.host}-ar";
fix stringUtils to complain with tests
@@ -112,7 +112,8 @@ char* getDateString() { * eliminates a character c if it is followed by character f */ char* strelimIfFollowed(char* str, char c, char f) { - if (!strValid(str)) { + if (str == NULL) { + oidc_setArgNullFuncError(__func__); return str; } size_t len = strlen(str); @@ -132,7 +133,8 @@ char* strelimIfFollowed(char* str, char c, char f) { * eliminates a character c if it the previous character is f */ char* strelimIfAfter(char* str, char c, char f) { - if (!strValid(str)) { + if (str == NULL) { + oidc_setArgNullFuncError(__func__); return str; } size_t len = strlen(str); @@ -149,6 +151,7 @@ char* strelimIfAfter(char* str, char c, char f) { } char* strelim(char str[], char c) { if (str == NULL) { + oidc_setArgNullFuncError(__func__); return NULL; } size_t len = strlen(str); @@ -164,6 +167,10 @@ char* strelim(char str[], char c) { } size_t strCountChar(const char* s, char c) { + if (s == NULL) { + oidc_setArgNullFuncError(__func__); + return 0; + } int i; for (i = 0; s[i]; s[i] == c ? i++ : *s++) ; @@ -178,6 +185,7 @@ int strcaseequal(const char* a, const char* b) { char* escapeCharInStr(const char* str, char c) { if (str == NULL) { + oidc_setArgNullFuncError(__func__); return NULL; } char* s = oidc_strcopy(str); @@ -207,6 +215,7 @@ int strSubStringCase(const char* h, const char* n) { int strToInt(const char* str) { if (str == NULL) { + oidc_setArgNullFuncError(__func__); return 0; } int i; @@ -216,6 +225,7 @@ int strToInt(const char* str) { unsigned long strToULong(const char* str) { if (str == NULL) { + oidc_setArgNullFuncError(__func__); return 0; } unsigned long l;
armv8: EFI loader sets correct memory attributes
@@ -232,6 +232,12 @@ static EFI_STATUS build_page_tables(struct config *cfg) { EFI_STATUS status = EFI_SUCCESS; + /* We need the current memory map to set memory attributes */ + status = update_memory_map(); + if (EFI_ERROR(status)) { + Print(L"Failed to update memory map\n"); + } + /* Page table book keeping in static buffer * so we don't need malloc & friends */ @@ -662,7 +668,10 @@ EFI_STATUS EFIAPI efi_main(EFI_HANDLE ImageHandle, print_memory_map(1); - update_memory_map(); + status = update_memory_map(); + if (EFI_ERROR(status)) { + Print(L"Failed to update memory map\n"); + } status = ST->BootServices->ExitBootServices(ImageHandle, mmap_key); if (EFI_ERROR(status)) {
mesh: Fix clearing model subscription list The code was storing a "cleared" entry in storage for every model, regardless of them having any subscriptions or not. Update hte mod_sub_list_clear() function to return a "cleared entries" count so that the calling code can decide whether any action is needed or not.
@@ -1243,28 +1243,36 @@ static u8_t va_del(u8_t *label_uuid, u16_t *addr) return STATUS_CANNOT_REMOVE; } -static void mod_sub_list_clear(struct bt_mesh_model *mod) +static size_t mod_sub_list_clear(struct bt_mesh_model *mod) { u8_t *label_uuid; + size_t clear_count; int i; /* Unref stored labels related to this model */ - for (i = 0; i < ARRAY_SIZE(mod->groups); i++) { + for (i = 0, clear_count = 0; i < ARRAY_SIZE(mod->groups); i++) { if (!BT_MESH_ADDR_IS_VIRTUAL(mod->groups[i])) { - continue; + if (mod->groups[i] != BT_MESH_ADDR_UNASSIGNED) { + mod->groups[i] = BT_MESH_ADDR_UNASSIGNED; + clear_count++; } - label_uuid = bt_mesh_label_uuid_get(mod->groups[i]); - if (!label_uuid) { - BT_ERR("Label UUID not found"); continue; } + label_uuid = bt_mesh_label_uuid_get(mod->groups[i]); + + mod->groups[i] = BT_MESH_ADDR_UNASSIGNED; + clear_count++; + + if (label_uuid) { va_del(label_uuid, NULL); + } else { + BT_ERR("Label UUID not found"); + } } - /* Clear all subscriptions (0x0000 is the unassigned address) */ - memset(mod->groups, 0, sizeof(mod->groups)); + return clear_count; } static void mod_pub_va_set(struct bt_mesh_model *model, @@ -1333,10 +1341,20 @@ send_status: status, mod_id); } #else -static void mod_sub_list_clear(struct bt_mesh_model *mod) +static size_t mod_sub_list_clear(struct bt_mesh_model *mod) { - /* Clear all subscriptions (0x0000 is the unassigned address) */ - memset(mod->groups, 0, sizeof(mod->groups)); + size_t clear_count; + int i; + + /* Unref stored labels related to this model */ + for (i = 0, clear_count = 0; i < ARRAY_SIZE(mod->groups); i++) { + if (mod->groups[i] != BT_MESH_ADDR_UNASSIGNED) { + mod->groups[i] = BT_MESH_ADDR_UNASSIGNED; + clear_count++; + } + } + + return clear_count; } static void mod_pub_va_set(struct bt_mesh_model *model, @@ -3396,15 +3414,17 @@ int bt_mesh_cfg_srv_init(struct bt_mesh_model *model, bool primary) static void mod_reset(struct bt_mesh_model *mod, struct bt_mesh_elem *elem, bool vnd, bool primary, void *user_data) { + size_t clear_count; + /* Clear model state that isn't otherwise cleared. E.g. AppKey * binding and model publication is cleared as a consequence * of removing all app keys, however model subscription clearing * must be taken care of here. */ - mod_sub_list_clear(mod); + clear_count = mod_sub_list_clear(mod); - if (IS_ENABLED(CONFIG_BT_SETTINGS)) { + if (IS_ENABLED(CONFIG_BT_SETTINGS) && clear_count) { bt_mesh_store_mod_sub(mod); } }
u3: rewrites +skip jet with u3i_defcons()
*/ #include "all.h" - static u3_noun - _skip_in(u3j_site* sit_u, u3_noun a) - { - if ( 0 == a ) { - return a; - } - else if ( c3n == u3du(a) ) { - return u3_none; - } else { - u3_noun hoz = u3j_gate_slam(sit_u, u3k(u3h(a))); - u3_noun vyr = _skip_in(sit_u, u3t(a)); - - switch ( hoz ) { - case c3y: return vyr; - case c3n: return u3nc(u3k(u3h(a)), vyr); - default: u3z(hoz); - u3z(vyr); - return u3_none; - } - } - } - -/* functions -*/ u3_noun - u3qb_skip(u3_noun a, - u3_noun b) +u3qb_skip(u3_noun a, u3_noun b) { - u3j_site sit_u; u3_noun pro; + u3_noun* lit = &pro; + + if ( u3_nul != a) { + u3_noun i, t = a; + u3_noun* hed; + u3_noun* tel; + u3j_site sit_u; u3j_gate_prep(&sit_u, u3k(b)); - pro = _skip_in(&sit_u, a); + + do { + u3x_cell(t, &i, &t); + + switch ( u3j_gate_slam(&sit_u, u3k(i)) ) { + case c3y: break; + + case c3n: { + *lit = u3i_defcons(&hed, &tel); + *hed = u3k(i); + lit = tel; + } break; + + default: u3m_bail(c3__exit); + } + } + while ( u3_nul != t ); + u3j_gate_lose(&sit_u); + } + + *lit = u3_nul; + return pro; } + u3_noun u3wb_skip(u3_noun cor) { u3_noun a, b; - - if ( c3n == u3r_mean(cor, u3x_sam_2, &a, u3x_sam_3, &b, 0) ) { - return u3_none; - } else { + u3x_mean(cor, u3x_sam_2, &a, u3x_sam_3, &b, 0); return u3qb_skip(a, b); } - } -
User/Pcd: Add PcdPciExpressBaseSize Needed since
@@ -43,6 +43,7 @@ extern BOOLEAN _gPcd_FixedAtBuild_PcdImageLoaderLoadHeader; #define _PCD_GET_MODE_32_PcdCpuNumberOfReservedVariableMtrrs _gPcd_FixedAtBuild_PcdCpuNumberOfReservedVariableMtrrs // this will not be of any effect at userspace #define _PCD_GET_MODE_64_PcdPciExpressBaseAddress 0 +#define _PCD_GET_MODE_64_PcdPciExpressBaseSize 0 #define _PCD_GET_MODE_32_PcdMaximumDevicePathNodeCount _gPcd_FixedAtBuild_PcdMaximumDevicePathNodeCount #define _PCD_GET_MODE_BOOL_PcdImageLoaderHashProhibitOverlap _gPcd_FixedAtBuild_PcdImageLoaderHashProhibitOverlap #define _PCD_GET_MODE_BOOL_PcdImageLoaderLoadHeader _gPcd_FixedAtBuild_PcdImageLoaderLoadHeader
changes: add a CHANGES.md entry for the OSSL_FORCE_NO_CACHE_FETCH option.
@@ -23,6 +23,12 @@ OpenSSL 3.0 ### Changes between 1.1.1 and 3.0 [xx XXX xxxx] + * Add a compile time option to prevent the caching of provider fetched + algorithms. This is enabled by including the no-cached-fetch option + at configuration time. + + *Paul Dale* + * Combining the Configure options no-ec and no-dh no longer disables TLSv1.3. Typically if OpenSSL has no EC or DH algorithms then it cannot support connections with TLSv1.3. However OpenSSL now supports "pluggable" groups
Update toc in installation-usage readme
- [Set logging verbosity](#set-logging-verbosity) - [Configure the SELinux type](#configure-the-selinux-type) - [Restrict the allowed syscalls in seccomp profiles](#restrict-the-allowed-syscalls-in-seccomp-profiles) +- [Constrain spod scheduling](#constrain-spod-scheduling) - [Create a seccomp profile](#create-a-seccomp-profile) - [Apply a seccomp profile to a pod](#apply-a-seccomp-profile-to-a-pod) - [Base syscalls for a container runtime](#base-syscalls-for-a-container-runtime)
Remove commented out text from previous commit
\usepackage[outdir=./]{epstopdf} \usepackage{graphicx,verbatim} -%\usepackage{subcaption} -%\captionsetup{compatibility=false} \usepackage{xspace} \usepackage[figuresright]{rotating} \usepackage{tabularx} @@ -826,7 +824,6 @@ In addition to the above tests in $\Lambda$CDM cosmologies without massive neutr %------------------------------------------------------------------------------- \subsubsection{Generalized validation of the power spectrum over $\Lambda$CDM parameter space} \vol{Phil Bull} -%\cont{ `Fair' sampling of parameter space with Latin Hypercubes. Summary statistics used, and binning of power spectrum in $k$ and $z$. Precision settings used for CLASS. Estimates of run-time. Results. Range of validity.} \begin{figure*} \centering @@ -860,13 +857,6 @@ Fig.~\ref{fig:power_paramspace_z2} shows the same comparison, but now for $z=2$. These results show that the {\tt CLASS}-based \ccl power spectrum calculations are robust across a broad range of cosmological parameters, especially for the linear power spectrum, but that some caution must be taken when using the Halofit power spectrum in MCMC studies for example. Since other, more realistic, non-linear power spectrum methods are available (e.g. see Sect.~\ref{sec:cosmicemu}), and tend to have larger theoretical errors than the deviations shown in Fig.~\ref{fig:power_paramspace_z2}, this is unlikely to be a serious concern for most users however. -%To quantify the level of agreement between the \ccl and reference power spectra, we use the following summary statistic that can be summed over a chosen set of bins in redshift and wavenumber: -%\begin{equation} -%\Delta = \sum_{ij} \Theta \left ( \log_{10} \left | \frac{P_{\rm CCL}(k_i, z_j) - P_{\rm ref}(k_i, z_j)}{P_{\rm ref}(k_i, z_j) \Delta_{\rm thres}} \right | \right ). -%\end{equation} -%Here, $\Delta_{\rm thres}$ is a target threshold for the fractional deviation between the power spectra, and we have defined $\Theta(x) \equiv x ~~(x \ge 0)$ and $0$ otherwise. Bins where the \ccl power spectrum deviates from the reference power spectrum by a fraction less than $\Delta_{\rm thres}$ do not contribute to the statistic, so the aim is to have $\Delta = 0$ (i.e. no deviation beyond the threshold in any bin). If deviations are found, however, they are weighted logarithmically -- one large deviation of several orders of magnitude affects the statistic as much as a few smaller deviations of order $\sim \Delta_{\rm thres}$. -%\todo{This section still needs discussion and conclusions, right? -DL} - \subsubsection{Validation of the Cosmic Emulator implementation} \label{sec:cosmicemu}
Add uge-tracker branch to CI config
@@ -149,6 +149,7 @@ workflows: - develop - master - v2beta + - uge-tracker - "make:win32": requires: - checkout @@ -158,6 +159,7 @@ workflows: - develop - master - v2beta + - uge-tracker - "make:win64": requires: - checkout @@ -167,6 +169,7 @@ workflows: - develop - master - v2beta + - uge-tracker - "make:linux": requires: - checkout @@ -176,6 +179,7 @@ workflows: - develop - master - v2beta + - uge-tracker - "upload_artifacts": requires: - "make:mac" @@ -188,3 +192,4 @@ workflows: - develop - master - v2beta + - uge-tracker
pbio/control: Log time in microseconds. No need to throw away accuracy in the log.
@@ -120,7 +120,7 @@ void pbio_control_update(pbio_control_t *ctl, int32_t time_now, int32_t count_no // Log control data int32_t log_data[] = { - (time_ref - ctl->trajectory.t0) / 1000, + time_ref - ctl->trajectory.t0, count_now, rate_now, *actuation,
[dpos] fix genesis block handling during LIB status rollback
@@ -161,6 +161,11 @@ func (pls *libStatus) load(lib *blockInfo, block *types.Block) { pls.confirms.Init() } + // Nothing left for the genesis block. + if block.BlockNo() == 0 { + return + } + // Rebuild confirms info & pre-LIB map from LIB + 1 and block based on // the blocks. if tmp := loadPlibStatus(lib, block); tmp != nil {
WritableBuffer re-use problem
@@ -146,7 +146,11 @@ namespace MiningCore.JsonRpc var json = JsonConvert.SerializeObject(payload, serializerSettings); logger.Trace(() => $"[{ConnectionId}] Sending: {json}"); - SendInternal(Encoding.UTF8.GetBytes(json + '\n')); + var buffer = upstream.Allocate(json.Length); + buffer.WriteString(json, Encoding.UTF8); + buffer.WriteByte(0xa); // append newline + + SendInternal(buffer); } public IPEndPoint RemoteEndPoint => upstream?.GetPeerEndPoint(); @@ -154,13 +158,16 @@ namespace MiningCore.JsonRpc #endregion - private void SendInternal(byte[] data) + private void SendInternal(WritableBuffer data) { try { var marshaller = loop.CreateAsync(handle => { - upstream.QueueWriteStream(data, null); + upstream.QueueWriteStream(data, (tcp, ex) => + { + data.Dispose(); + }); handle.Dispose(); });
DBG_(FATAL)_PRINTF has now optional va args
@@ -68,11 +68,11 @@ int picoquic_compare_addr(struct sockaddr * expected, struct sockaddr * actual); #define DBG_PRINTF(fmt, ...) \ debug_printf("%s:%u [%s]: " fmt "\n", \ __FILE__ + MAX(DBG_PRINTF_FILENAME_MAX, sizeof(__FILE__)) - DBG_PRINTF_FILENAME_MAX, \ - __LINE__, __FUNCTION__, __VA_ARGS__) + __LINE__, __FUNCTION__, ##__VA_ARGS__) #define DBG_FATAL_PRINTF(fmt, ...) \ do { \ - DBG_PRINTF("(FATAL) " fmt "\n", __VA_ARGS__); \ + DBG_PRINTF("(FATAL) " fmt "\n", ##__VA_ARGS__); \ exit(1); \ } while (0)
Update: Better pretty printing
@@ -297,7 +297,6 @@ void Encode_PrintAtom(Atom atom) if(atom) { fputs(atom_names[atom-1], stdout); - fputs(" ", stdout); } else { @@ -315,23 +314,19 @@ void Encode_PrintTermPrettyRecursive(Term *term, int index) //start with index=1 int child1 = index*2; int child2 = index*2+1; bool hasChild = child1 < NARSESE_LEN_MAX && term->atoms[child1-1]; - if(hasChild) - { - fputs("(", stdout); - } + fputs(hasChild ? "(" : "", stdout); if(child1 < NARSESE_LEN_MAX) { Encode_PrintTermPrettyRecursive(term, child1); } + fputs(hasChild ? " " : "", stdout); Encode_PrintAtom(atom); + fputs(hasChild ? " " : "", stdout); if(child2 < NARSESE_LEN_MAX) { Encode_PrintTermPrettyRecursive(term, child2); } - if(hasChild) - { - fputs(")", stdout); - } + fputs(hasChild ? ")" : "", stdout); } void Encode_PrintTerm(Term *term)
correct build CMAKE options for building opencl for rocm 4.1
@@ -96,7 +96,8 @@ if [ "$1" != "nocmake" ] && [ "$1" != "install" ] ; then fi VDI_ROOT=$AOMP_REPOS/$AOMP_VDI_REPO_NAME - MYCMAKEOPTS="$AOMP_ORIGIN_RPATH_NO_DTAGS -DCMAKE_BUILD_TYPE=$BUILDTYPE -DCMAKE_INSTALL_PREFIX=$AOMP_INSTALL_DIR -DUSE_COMGR_LIBRARY=yes -DROCclr_DIR=$VDI_ROOT -DLIBROCclr_STATIC_DIR=$BUILD_DIR/build/vdi -DCMAKE_MODULE_PATH=$VDI_ROOT/cmake/modules -DCMAKE_PREFIX_PATH=$AOMP_INSTALL_DIR;$AOMP_INSTALL_DIR/include -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON -DCMAKE_INSTALL_RPATH='\$ORIGIN/../lib' -DCMAKE_EXE_LINKER_FLAGS='-Wl,--disable-new-dtags'" + export CMAKE_PREFIX_PATH="""$AOMP_INSTALL_DIR""" + MYCMAKEOPTS="$AOMP_ORIGIN_RPATH_NO_DTAGS -DOpenGL_GL_PREFERENCE=LEGACY -DCMAKE_BUILD_TYPE=$BUILDTYPE -DCMAKE_INSTALL_PREFIX=$AOMP_INSTALL_DIR -DROCclr_DIR=$VDI_ROOT -DLIBROCclr_STATIC_DIR=$BUILD_DIR/build/vdi -DCMAKE_MODULE_PATH=$VDI_ROOT/cmake/modules -DCMAKE_PREFIX_PATH="$CMAKE_PREFIX_PATH" -DCMAKE_EXE_LINKER_FLAGS='-Wl,--disable-new-dtags'" mkdir -p $BUILD_DIR/build/ocl cd $BUILD_DIR/build/ocl
nvbios: Add unknown mem types for Pascal and "skip"
@@ -926,6 +926,12 @@ const char * mem_type(uint8_t version, uint16_t start) case 3: return "GDDR5"; break; + case 8: + return "GDDR5X"; + break; + case 15: + return "Skip"; + break; default: return "Unknown ram type"; }
Less verbose generator head.
:: with normal hoon rules. multipass parsing is the tax :: humans have to pay for simple but human-friendly syntax.) :: -:- %say -|= {^ {pax/path $~} $~} -:- %noun +|= pax/path =< (test pax) => |% ++ item (pair mite (list flow)) :: xml node generator
Stitch exception stack to the location raising the exception
@@ -385,6 +385,19 @@ void oe_real_exception_dispatcher(oe_context_t* oe_context) * for the case of nested exceptions) */ td->state = OE_TD_STATE_SECOND_LEVEL_EXCEPTION_HANDLING; + // Update the stitched callstack so that it points to the location that + // raised the exception. + if (is_enclave_debug_allowed_cached()) + { + // Real exception dispatcher never returns. It is directly called by + // oe_exception_dispatcher. It is safe to modify its frame. + void** frame = (void**)__builtin_frame_address(0); + + // Update the frame with rip, rbp from SSA. + frame[0] = (void*)oe_exception_record.context->rbp; + frame[1] = (void*)oe_exception_record.context->rip; + } + // Traverse the existing exception handlers, stop when // OE_EXCEPTION_CONTINUE_EXECUTION is found. uint64_t handler_ret = OE_EXCEPTION_CONTINUE_SEARCH; @@ -516,6 +529,32 @@ void oe_virtual_exception_dispatcher( td->exception_address = ssa_gpr->rip; td->exception_code = OE_EXCEPTION_UNKNOWN; + // Update the stitched callstack so that it points to the location that + // raised the exception. + if (is_enclave_debug_allowed_cached()) + { + // Start at the current frame. + void** frame = (void**)__builtin_frame_address(0); + + // Look at only 20 caller frames. This ensures that the loop will + // terminate. + for (int i = 0; i < 20; ++i) + { + // Check if the frame is the first enclave frame. For the first + // frame, the caller will lie outside the enclave. + if (frame && oe_is_outside_enclave(frame[0], 1)) + { + // Update the frame with rip, rbp from SSA. + frame[0] = (void*)ssa_gpr->rbp; + frame[1] = (void*)ssa_gpr->rip; + break; + } + + // Move to previous frame. + frame = frame[0]; + } + } + /* Get the exception code and flags only if the exception type * is recognized by the SGX hardware */ if (ssa_gpr->exit_info.as_fields.valid)
do not clear _isIncludingExternal in nested calls
function includeexternal(fname) local fullPath = p.findProjectScript(fname) + local wasIncludingExternal = api._isIncludingExternal api._isIncludingExternal = true fname = fullPath or fname dofile(fname) - api._isIncludingExternal = nil + api._isIncludingExternal = wasIncludingExternal end p.alias(_G, "includeexternal", "includeExternal")
Added default logger in debug mode.
@@ -50,13 +50,16 @@ int metacall_initialize() metacall_null_args[0] = NULL; /* TODO: Initialize by config or default */ - /* - log_configure("metacall", + #if (!defined(NDEBUG) || defined(DEBUG) || defined(_DEBUG) || defined(__DEBUG) || defined(__DEBUG__)) + if (log_configure("metacall", log_policy_format_text(), log_policy_schedule_sync(), log_policy_storage_sequential(), - log_policy_stream_stdio(stdout)); - */ + log_policy_stream_stdio(stdout)) != 0) + { + return 1; + } + #endif if (configuration_initialize("rapid_json", NULL) != 0) {
add warning when fewer closest hits are found than requested.
@@ -167,8 +167,21 @@ void RecordOutputMgr::printClosest(RecordKeyVector &keyList, const vector<CHRPOS _currBamBlockList = &blockList; } if (!keyList.empty()) { + if (context->getNumClosestHitsWanted() > keyList.size()) + { + cerr << "Warning: Fewer hits (" + << keyList.size() + << ") found on " + << keyRec->getChrName() + << " than requested (" + << context->getNumClosestHitsWanted() + << "). It is likely that there are fewer total records" + << " on that chromosome than requested." + << endl; + } int distCount = 0; - for (RecordKeyVector::iterator_type iter = keyList.begin(); iter != keyList.end(); iter = keyList.next()) { + for (RecordKeyVector::iterator_type iter = keyList.begin(); iter != keyList.end(); iter = keyList.next()) + { const Record *hitRec = *iter; printKey(keyRec, keyRec->getStartPosStr(), keyRec->getEndPosStr()); tab(); @@ -198,7 +211,6 @@ void RecordOutputMgr::printClosest(RecordKeyVector &keyList, const vector<CHRPOS null(false, true); if (context->reportDistance()) { tab(); - _outBuf.append("-1"); } newline();
usb.c refactoring request: remove getenv("STLINK_DEVICE") There is no enironment variable "STLINK_DEVICE" in user system, and program do not set it. So I removed all code which works with it.
@@ -1134,33 +1134,8 @@ stlink_t *stlink_open_usb(enum ugly_loglevel verbose, enum connect_type connect, #endif libusb_device **list = NULL; - // TODO: We should use ssize_t and use it as a counter if > 0. - // As per libusb API: ssize_t libusb_get_device_list (libusb_context *ctx, libusb_device ***list) - int cnt = (int)libusb_get_device_list(slu->libusb_ctx, &list); + ssize_t cnt = libusb_get_device_list(slu->libusb_ctx, &list); struct libusb_device_descriptor desc; - int devBus = 0; - int devAddr = 0; - - // TODO: Reading a environment variable in a usb open function is not very nice, this should - // be refactored and moved into the CLI tools, and instead of giving USB_BUS:USB_ADDR a real - // stlink serial string should be passed to this function. Probably people are using this - // but this is very odd because as programmer can change to multiple busses and it is better - // to detect them based on serial. - char *device = getenv("STLINK_DEVICE"); - - if (device) { - char *c = strchr(device, ':'); - - if (c == NULL) { - WLOG("STLINK_DEVICE must be <USB_BUS>:<USB_ADDR> format\n"); - goto on_error; - } - - devBus = atoi(device); - *c++ = 0; - devAddr = atoi(c); - ILOG("bus %03d dev %03d\n", devBus, devAddr); - } while (cnt-- > 0) { struct libusb_device_handle *handle; @@ -1169,13 +1144,6 @@ stlink_t *stlink_open_usb(enum ugly_loglevel verbose, enum connect_type connect, if (desc.idVendor != STLINK_USB_VID_ST) { continue; } - if (devBus && devAddr) { - if ((libusb_get_bus_number(list[cnt]) != devBus) || - (libusb_get_device_address(list[cnt]) != devAddr)) { - continue; - } - } - ret = libusb_open(list[cnt], &handle); if (ret) { continue; } // could not open device @@ -1202,7 +1170,7 @@ stlink_t *stlink_open_usb(enum ugly_loglevel verbose, enum connect_type connect, } if (cnt < 0) { - WLOG ("Couldn't find %s ST-Link devices\n", (devBus && devAddr) ? "matched" : "any"); + WLOG ("Couldn't find any ST-Link devices\n"); libusb_free_device_list(list, 1); goto on_error; } else {
gdbstub: improve format of info thread command output gdbstub: simplify the state handling on extra thread info
@@ -76,7 +76,6 @@ void esp_gdbstub_panic_handler(esp_gdbstub_frame_t *frame) } } - static void send_reason(void) { esp_gdbstub_send_start(); @@ -90,6 +89,13 @@ static uint32_t gdbstub_hton(uint32_t i) return __builtin_bswap32(i); } +static void esp_gdbstub_send_str_as_hex(const char *str) +{ + while (*str) { + esp_gdbstub_send_hex(*str, 8); + str++; + } +} /** Send all registers to gdb */ static void handle_g_command(const unsigned char* cmd, int len) { @@ -179,10 +185,6 @@ static bool get_task_handle(size_t index, TaskHandle_t *handle) static eTaskState get_task_state(size_t index) { - if (index >= s_scratch.task_count) { - return eInvalid; - } - return s_scratch.tasks[index].eState; } @@ -289,13 +291,6 @@ static void handle_qsThreadInfo_command(const unsigned char* cmd, int len) /** qThreadExtraInfo requests the thread name */ static void handle_qThreadExtraInfo_command(const unsigned char* cmd, int len) { - uint8_t task_state_string_index = 0; - const char task_state_string[][] = "Running", - "Ready", - "Blocked", - "Suspended", - "Invalid"; - cmd += sizeof("qThreadExtraInfo,") - 1; int task_index = esp_gdbstub_gethex(&cmd, -1); TaskHandle_t handle; @@ -304,39 +299,32 @@ static void handle_qThreadExtraInfo_command(const unsigned char* cmd, int len) return; } esp_gdbstub_send_start(); - const char* task_name = pcTaskGetTaskName(handle); - while (*task_name) { - esp_gdbstub_send_hex(*task_name, 8); - task_name++; - } - + esp_gdbstub_send_str_as_hex("Name: "); + esp_gdbstub_send_str_as_hex(pcTaskGetTaskName(handle)); esp_gdbstub_send_hex(' ', 8); eTaskState state = get_task_state(task_index); switch (state) { case eRunning: - task_state_string_index = 0; + esp_gdbstub_send_str_as_hex("State: Running"); break; case eReady: - task_state_string_index = 1; + esp_gdbstub_send_str_as_hex("State: Ready"); break; case eBlocked: - task_state_string_index = 2; + esp_gdbstub_send_str_as_hex("State: Blocked"); break; case eSuspended: - task_state_string_index = 3; + esp_gdbstub_send_str_as_hex("State: Suspended"); + break; + case eDeleted: + esp_gdbstub_send_str_as_hex("State: Deleted"); break; default: - task_state_string_index = 4; + esp_gdbstub_send_str_as_hex("State: Invalid"); break; } - const char* buffer = &task_state_string[task_state_string_index][0]; - while (*buffer) { - esp_gdbstub_send_hex(*buffer, 8); - buffer++; - } - esp_gdbstub_send_end(); }
[skip CI][cmake] change version
# --- set siconos current version --- set(MAJOR_VERSION 4) -set(MINOR_VERSION 2) +set(MINOR_VERSION 3) set(PATCH_VERSION 0) set(SICONOS_VERSION "${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}")
Update gps.go Added logic to only slow down to 2Hz for UBX8 using Galileo
@@ -254,8 +254,7 @@ func initGPSSerial() bool { // Byte order for UBX configuration is little endian. // Set 10 Hz update to make gpsattitude more responsive for ublox7/8. - //p.Write(makeUBXCFG(0x06, 0x08, 6, []byte{0x64, 0x00, 0x01, 0x00, 0x01, 0x00})) // 10 Hz - p.Write(makeUBXCFG(0x06, 0x08, 6, []byte{0x06, 0x00, 0xF4, 0x01, 0x01, 0x00})) // 2 Hz + updatespeed = []byte{0x64, 0x00, 0x01, 0x00, 0x01, 0x00} // 10 Hz // Set navigation settings. nav := make([]byte, 36) @@ -288,6 +287,7 @@ func initGPSSerial() bool { //log.Printf("UBX8 device detected on USB, or GPS serial connection in use. Attempting GLONASS and Galelio configuration.\n") glonass = []byte{0x06, 0x08, 0x0E, 0x00, 0x01, 0x00, 0x01, 0x01} // this enables GLONASS with 8-14 tracking channels galileo = []byte{0x02, 0x04, 0x08, 0x00, 0x01, 0x00, 0x01, 0x01} // this enables Galileo with 4-8 tracking channels + updatespeed = []byte{0x06, 0x00, 0xF4, 0x01, 0x01, 0x00} // Nav speed 2Hz } cfgGnss = append(cfgGnss, gps...) cfgGnss = append(cfgGnss, sbas...) @@ -299,6 +299,8 @@ func initGPSSerial() bool { // SBAS configuration for ublox 6 and higher p.Write(makeUBXCFG(0x06, 0x16, 8, []byte{0x01, 0x07, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00})) + //Navigation Rate 10Hz for <= UBX7 2Hz for UBX8 + p.Write(makeUBXCFG(0x06, 0x08, 6, upatespeed)) // Message output configuration: UBX,00 (position) on each calculated fix; UBX,03 (satellite info) every 5th fix, // UBX,04 (timing) every 10th, GGA (NMEA position) every 5th. All other NMEA messages disabled.
hoon: removes obsolete compiler hooks
~% %pen + == - %ap ap %ut ut == |% -- :: ++ ap :: hoon engine - ~% %ap - +>+ - == - %open open - %rake rake - == + ~% %ap +>+ ~ |_ gen=hoon :: ++ grip ~% %ut +>+ == - %ar ar %fan fan %rib rib %vet vet - %blow blow - %burp burp - %busk busk - %buss buss - %crop crop - %duck duck - %dune dune - %dunk dunk - %epla epla - %emin emin - %emul emul - %feel feel - %felt felt - %fine fine - %fire fire - %fish fish - %fond fond - %fund fund - %funk funk - %fuse fuse - %gain gain - %lose lose - %mile mile - %mine mine - %mint mint - %moot moot - %mull mull - %nest nest - %peel peel - %play play - %peek peek - %repo repo - %rest rest - %tack tack - %toss toss - %wrap wrap == =+ :* fan=*(set [type hoon]) rib=*(set [type type hoon]) :: :: +ar: texture engine :: - ++ ar !: - ~% %ar - +> - == - %fish fish - %gain gain - %lose lose - == + ++ ar + ~% %ar +> ~ |_ [ref=type =skin] :: :: =fish: make a $nock that tests a .ref at .axis for .skin |= [hud=poly gol=type gen=hoon] ^- nock ~+ - =+ %hemp-141 ?- hud %dry q:(mint gol gen) %wet q:(mint(vet |) gol gen) |= [nym=(unit term) hud=poly dom=(map term tome)] ~+ ^- seminoun - =+ %hemp-141 :: tal: map from battery axis to foot :: =; tal=(map @ud hoon) == :: :: ++ redo :: refurbish faces + ~/ %redo |= $: :: ref: raw payload :: ref=type
Added hyperlink to the VPLanet model description and validation paper.
@@ -40,7 +40,7 @@ undergoing plate tectonics or stagnant lid evolution. `vplanet` is a community project. We're happy to take pull requests; if you want to create one, please issue it to the *dev* branch. Soon we will include tutorials on adding new input options, governing variables, and modules. It's a platform for planetary science that can grow exponentially, either by adding new physics or by adding competing models for clean comparisons. -The [examples/](examples) directory contains input files and scripts for generating the figures in Barnes et al. (2019) and all subsequent module descriptions. The [Manual/](Manual) directory contains the pdf of Barnes et al. (2019), which describes the physics of each module, validates each module against observations or past results, and uses figures from the [examples/](examples) directory. +The [examples/](examples) directory contains input files and scripts for generating the figures in [Barnes et al. (2019)](https://arxiv.org/abs/1905.06367) and all subsequent module descriptions. The [Manual/](Manual) directory contains the pdf of [Barnes et al. (2019)](https://arxiv.org/abs/1905.06367), which describes the physics of each module, validates each module against observations or past results, and uses figures from the [examples/](examples) directory. An ecosystem of support software is also publicly available. In this repo, [vspace/](vspace) contains scripts to generate input files for a parameter space sweep. [bigplanet/](bigplanet) contains scripts to store large datasets in HDF5 format and quickly calculate summary properties from an integration, like change in surface temperature. In a separate repository is [vplot](https://github.com/VirtualPlanetaryLaboratory/vplot), which consists of both a command line tool to quickly plot the evolution of a system, and also matplotlib functions to more easily generate publication-worthy figures. Finally, we recommend using [approxposterior](https://github.com/dflemin3/approxposterior) to quickly obtain posterior distributions of model parameters. @@ -48,6 +48,6 @@ Behind the scenes, the vplanet team maintains code integrity through continuous If you'd like to stay up to date on vplanet by joining the e-mail list, please send a request to Rory Barnes, [email protected]. -`vplanet` development has been supported by NASA grants NNA13AA93A, NNX15AN35G, and 13-13NAI7_0024. We also acnkowledge support from the University of Washington and the Carnegie Institute for Science. +`vplanet` development has been supported by NASA grants NNA13AA93A, NNX15AN35G, and 13-13NAI7_0024. We also acknowledge support from the University of Washington and the Carnegie Institute for Science. Enjoy!
tests: internal: multiline: fix expected output data
@@ -108,7 +108,7 @@ struct record_check java_input[] = { {" at com.example.myproject.Book.getId(Book.java:22)\n"}, {" at com.example.myproject.Author.getBookIds(Author.java:35)\n"}, {" ... 1 more"}, - {"single line"} + {"single line\n"} }; struct record_check java_output[] = { @@ -122,7 +122,7 @@ struct record_check java_output[] = { " ... 1 more" }, { - "single line" + "single line\n" } }; @@ -339,13 +339,6 @@ static int flush_callback(struct flb_ml_parser *parser, } TEST_CHECK(found == FLB_TRUE); - if (!exp->buf) { - printf("expected length: %i, received: NULL\n", len); - msgpack_unpacked_destroy(&result); - exit(1); - return -1; - } - len = strlen(exp->buf); TEST_CHECK(val.via.str.size == len); if (val.via.str.size != len) {
Build the GPIO HAL for the CC2538
@@ -17,7 +17,7 @@ CONTIKI_CPU_DIRS = . dev usb usb/common usb/common/cdc-acm CONTIKI_CPU_SOURCEFILES += soc.c clock.c rtimer-arch.c uart.c watchdog.c CONTIKI_CPU_SOURCEFILES += nvic.c sys-ctrl.c gpio.c ioc.c spi.c adc.c CONTIKI_CPU_SOURCEFILES += crypto.c aes.c ecb.c cbc.c ctr.c cbc-mac.c gcm.c -CONTIKI_CPU_SOURCEFILES += ccm.c sha256.c +CONTIKI_CPU_SOURCEFILES += ccm.c sha256.c gpio-hal-arch.c CONTIKI_CPU_SOURCEFILES += cc2538-aes-128.c cc2538-ccm-star.c CONTIKI_CPU_SOURCEFILES += cc2538-rf.c udma.c lpm.c int-master.c CONTIKI_CPU_SOURCEFILES += pka.c bignum-driver.c ecc-driver.c ecc-algorithm.c
test FEATURE perf validation test
@@ -371,6 +371,22 @@ test_create_path(struct test_state *state, struct timespec *ts_start, struct tim return LY_SUCCESS; } +static LY_ERR +test_validate(struct test_state *state, struct timespec *ts_start, struct timespec *ts_end) +{ + LY_ERR r; + + TEST_START(ts_start); + + if ((r = lyd_validate_all(&state->data1, NULL, LYD_VALIDATE_PRESENT, NULL))) { + return r; + } + + TEST_END(ts_end); + + return LY_SUCCESS; +} + static LY_ERR _test_parse(struct test_state *state, LYD_FORMAT format, ly_bool use_file, uint32_t print_options, uint32_t parse_options, uint32_t validate_options, struct timespec *ts_start, struct timespec *ts_end) @@ -708,6 +724,7 @@ struct test tests[] = { { "create new text", setup_basic, test_create_new_text }, { "create new bin", setup_basic, test_create_new_bin }, { "create path", setup_basic, test_create_path }, + { "validate", setup_data_single_tree, test_validate }, { "parse xml mem validate", setup_data_single_tree, test_parse_xml_mem_validate }, { "parse xml mem no validate", setup_data_single_tree, test_parse_xml_mem_no_validate }, { "parse xml file no validate format", setup_data_single_tree, test_parse_xml_file_no_validate_format },
Update routing table unit test
@@ -51,17 +51,17 @@ void unittest_RTFilter_InitCheck(void) // Init variables search_result_t result; uint8_t value; - // Add samples + NEW_STEP("Verify that we have not initialized the result"); value = RTFilter_InitCheck(&result); // Verify - TEST_ASSERT_EQUAL(0, value); + TEST_ASSERT_EQUAL(FAILED, value); NEW_STEP("Verify that we have initialized the result"); RTFilter_Reset(&result); value = RTFilter_InitCheck(&result); // Verify - TEST_ASSERT_EQUAL(1, value); + TEST_ASSERT_EQUAL(SUCCEED, value); } } @@ -78,9 +78,8 @@ void unittest_RTFilter_Type(void) search_result_t result; result.result_nbr = 0; RTFilter_Type(&result, VOID_TYPE); - - NEW_STEP("Test that we have asserted"); - TEST_ASSERT_TRUE(IS_ASSERT()); + NEW_STEP("Test result_nbr is set to 0"); + TEST_ASSERT_EQUAL(0, result.result_nbr); } NEW_TEST_CASE("Test the type filtering result number"); {
u8g2: Fix name of updateDisplay export Correct typo in Lua export from updateDispla() to updateDisplay()
@@ -607,7 +607,7 @@ LROT_BEGIN(lu8g2_display, NULL, LROT_MASK_INDEX) LROT_FUNCENTRY( setFontRefHeightExtendedText, lu8g2_setFontRefHeightExtendedText ) LROT_FUNCENTRY( setFontRefHeightText, lu8g2_setFontRefHeightText ) LROT_FUNCENTRY( setPowerSave, lu8g2_setPowerSave ) - LROT_FUNCENTRY( updateDispla, lu8g2_updateDisplay ) + LROT_FUNCENTRY( updateDisplay, lu8g2_updateDisplay ) LROT_FUNCENTRY( updateDisplayArea, lu8g2_updateDisplayArea ) LROT_END(lu8g2_display, NULL, LROT_MASK_INDEX)
flash_fp_mcu: Minor spacing fix BRANCH=none TEST=none
@@ -355,6 +355,7 @@ flash_fp_mcu_stm32() { config_hatch() { check_gpio_chip_exists "gpiochip200" + readonly TRANSPORT="SPI" readonly DEVICE="/dev/spidev1.1" # See @@ -442,7 +443,6 @@ config_zork() { check_gpio_chip_exists "gpiochip320" readonly TRANSPORT="UART" - readonly DEVICE="/dev/ttyS1" # FPMCU RST_ODL is on AGPIO 11 = 320 + 11 = 331
Track log files for current session
@@ -12,6 +12,7 @@ string os, cpu, gpu, ram, kernel, driver; bool sysInfoFetched = false; int gpuLoadLog = 0, cpuLoadLog = 0; uint64_t elapsedLog; +std::vector<std::string> logFiles; struct logData{ double fps; @@ -50,6 +51,7 @@ string exec(string command) { } void writeFile(string filename){ + logFiles.push_back(filename); out.open(filename, ios::out | ios::app); out << "os," << "cpu," << "gpu," << "ram," << "kernel," << "driver" << endl; out << os << "," << cpu << "," << gpu << "," << ram << "," << kernel << "," << driver << endl;
options/posix: Simplify posix_fallocate().
@@ -40,25 +40,8 @@ int posix_fadvise(int fd, off_t offset, off_t length, int advice) { } int posix_fallocate(int fd, off_t offset, off_t size) { - struct error_guard { - error_guard() - : _s{errno} { } - - ~error_guard() { - errno = _s; - } - - private: - int _s; - }; - - error_guard guard; - - if(int e = mlibc::sys_fallocate(fd, offset, size); e) { - errno = e; - return -1; - } - return 0; + // posix_fallocate() returns an error instead of setting errno. + return mlibc::sys_fallocate(fd, offset, size); } // This is a linux extension
Use `%d` instead of `%ld` for `uint32_t`.
@@ -862,11 +862,11 @@ static uint32_t get_relative_index(lily_state *s, lily_container_val *list_val, if (pos < 0 || pos > list_size) lily_IndexError(s, - "Index %ld is too small for list (minimum: -%ld)", old_pos, + "Index %ld is too small for list (minimum: -%d)", old_pos, list_size); } else if (pos > list_size) - lily_IndexError(s, "Index %ld is too large for list (maximum: %ld)", + lily_IndexError(s, "Index %ld is too large for list (maximum: %d)", pos, list_size); return (uint32_t)pos;
Enable examples suite failures as epsdb blocking
@@ -153,7 +153,7 @@ if [ "$nekfails" -ge "1" ]; then fi echo "EPSDB Status: " $epsdb_status Ret=$(($efails + $appfails + $nekfails + $exafails + $exaMfails)) -echo "Experimental Ret " $Ret -Ret=$(($efails + $appfails + $nekfails)) +#echo "Experimental Ret " $Ret +#Ret=$(($efails + $appfails + $nekfails)) exit $Ret
cooja-radio RSSI_NO_SIGNAL more commented
const struct simInterface radio_interface; + + +/* There radio driver can provide cooja it's nosignal value. + * But at present, cooja ignore and override it. + * */ enum { - RSSI_NO_SIGNAL = -120, - LQI_NO_SIGNAL = 120, + /* + * Tmote Sky (with CC2420 radio) give value -100dB + * CC1310 gives value about -110dB + */ + RSSI_NO_SIGNAL = -110 , + + /* + * Tmote Sky (with CC2420 radio) give value 105 + * CC1310 gives value about 100? + */ + LQI_NO_SIGNAL = 100 , }; /* COOJA */ @@ -71,11 +85,11 @@ rtimer_clock_t simLastPacketTimestamp = 0; char simOutDataBuffer[COOJA_RADIO_BUFSIZE]; int simOutSize = 0; char simRadioHWOn = 1; -int simSignalStrength = -100; -int simLastSignalStrength = -100; +int simSignalStrength = RSSI_NO_SIGNAL; +int simLastSignalStrength = RSSI_NO_SIGNAL; char simPower = 100; int simRadioChannel = 26; -int simLQI = 105; +int simLQI = LQI_NO_SIGNAL; int simLastLQI = LQI_NO_SIGNAL;
pgraph/hwtest: Nailed down Kelvin POLYGON_STIPPLE method.
@@ -4407,6 +4407,20 @@ class MthdKelvinPolygonStippleEnable : public SingleMthdTest { using SingleMthdTest::SingleMthdTest; }; +class MthdKelvinPolygonStipple : public SingleMthdTest { + void adjust_orig_mthd() override { + adjust_orig_bundle(&orig); + } + void emulate_mthd() override { + pgraph_kelvin_check_err19(&exp); + pgraph_kelvin_check_err18(&exp); + if (!exp.nsource) { + pgraph_bundle(&exp, 0x100 + idx, val, true); + } + } + using SingleMthdTest::SingleMthdTest; +}; + class MthdKelvinUnk17cc : public SingleMthdTest { void adjust_orig_mthd() override { if (rnd() & 1) { @@ -5803,7 +5817,7 @@ std::vector<SingleMthdTest *> Kelvin::mthds() { new MthdKelvinXfCtx3(opt, rnd(), "light_7_position", -1, cls, 0x13dc, 0x2f), new MthdKelvinLtCtx(opt, rnd(), "light_7_attenuation", -1, cls, 0x13e8, 0x3b), new MthdKelvinPolygonStippleEnable(opt, rnd(), "polygon_stipple_enable", -1, cls, 0x147c), - new UntestedMthd(opt, rnd(), "meh", -1, cls, 0x1480, 0x20), // XXX + new MthdKelvinPolygonStipple(opt, rnd(), "polygon_stipple", -1, cls, 0x1480, 0x20), new UntestedMthd(opt, rnd(), "meh", -1, cls, 0x1500, 0x40), // XXX new UntestedMthd(opt, rnd(), "meh", -1, cls, 0x1600, 0x10), // XXX new UntestedMthd(opt, rnd(), "meh", -1, cls, 0x1680, 0x10), // XXX
Update movie tutorial with ffmpeg information.
@@ -413,3 +413,22 @@ their resolution. :: Convert can also be used to do other types of image manipulations such as cropping a flipping images. To learn more about convert google *ImageMagick convert*. + + +Higher quality encoding with ffmpeg +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can get higher quality encoding using *ffmpeg* instead of VisIt_'s built-in *mpeg2encode*. +If VisIt_ finds *ffmpeg* in your search path it will use that instead of the built in encoder. + +You can obtain *ffmpeg* from the `ffmpeg download site <https://www.ffmpeg.org/download.html>`_. +Scroll down a bit until you get to a section labeled *Get packages & executable files*. +Click on the icon representing the OS you desire, this will change the text below the three logos. +Choose and click one of the options and you will be taken to a page with downloads or package information. + +You may be able to install *ffmpeg* on Linux with the standard package manager for the flavor of Linux you are running. +For example, to install on Ubuntu:: + + apt-get update + apt-get install -y ffmpeg +
stm32/boards/NUCLEO_H743ZI: Use priority 0 for SysTick IRQ. This follows how all other boards are configured.
* @brief This is the HAL system configuration section */ #define VDD_VALUE ((uint32_t)3300) /*!< Value of VDD in mv */ -#define TICK_INT_PRIORITY ((uint32_t)0x0F) /*!< tick interrupt priority */ +#define TICK_INT_PRIORITY ((uint32_t)0x00) /*!< tick interrupt priority */ #define USE_RTOS 0 #define USE_SD_TRANSCEIVER 0U /*!< use uSD Transceiver */
btshell: Add option for directed advertising in the shell Note when peer_addr is set then directed is marked to true however it can be overwriten by option "directed"
@@ -206,6 +206,13 @@ cmd_advertise_configure(int argc, char **argv) return rc; } + + params.directed = parse_arg_bool_dflt("directed", params.directed, &rc); + if (rc != 0) { + console_printf("invalid 'directed' parameter\n"); + return rc; + } + params.own_addr_type = parse_arg_kv_dflt("own_addr_type", cmd_own_addr_types, BLE_OWN_ADDR_PUBLIC, &rc); @@ -435,6 +442,7 @@ static const struct shell_param advertise_configure_params[] = { {"instance", "default: 0"}, {"connectable", "connectable advertising, usage: =[0-1], default: 0"}, {"scannable", "scannable advertising, usage: =[0-1], default: 0"}, + {"directed", "directed advertising, usage: =[0-1], default: 0"}, {"peer_addr_type", "usage: =[public|random|public_id|random_id], default: public"}, {"peer_addr", "usage: =[XX:XX:XX:XX:XX:XX]"}, {"own_addr_type", "usage: =[public|random|rpa_pub|rpa_rnd], default: public"},
YAy PEG: Fix warning about unused function
@@ -42,6 +42,8 @@ bool ends_with (std::string const & text, std::string const & ending) return ending.size () > text.size () ? false : std::equal (ending.rbegin (), ending.rend (), text.rbegin ()); } +#ifdef HAVE_LOGGER + /** * @brief This function returns the string representation of a tree node. * @@ -72,6 +74,8 @@ string toString (node const & node, string const indent = "") return representation; } +#endif + /** * @brief This function will be called before the walker enters a tree node. *
fix: incorrect comment and parameter
@@ -106,17 +106,19 @@ struct discord_bucket { /** * Match endpoint to a route * + * @see https://discord.com/developers/docs/topics/rate-limits#rate-limits + * * @param endpoint that will be matched - * @return the bucket assigned to this endpoint, can be itself or a major parameter + * @return the route from @endpoint (can be itself or a major parameter) */ char* discord_get_route(const char *endpoint); /** - * Free buckets + * Free client buckets * - * @param Client buckets + * @param adapter the client adapter containinig every bucket found */ -void discord_buckets_cleanup(struct discord_adapter *bucket); +void discord_buckets_cleanup(struct discord_adapter *adapter); /** * Check if connections from a bucket hit its threshold, and lock every connection
Send systematic window updates for the connection *even on bogus DATA frames* when streaming, only send window updates for the stream
@@ -502,6 +502,7 @@ static void set_priority(h2o_http2_conn_t *conn, h2o_http2_stream_t *stream, con } } +static int write_req_chunk(void *req_, h2o_iovec_t payload, int is_end_stream); static void write_req_chunk_done(h2o_req_t *req, size_t written, int done) { h2o_http2_stream_t *stream = H2O_STRUCT_FROM_MEMBER(h2o_http2_stream_t, req, req); @@ -513,7 +514,6 @@ static void write_req_chunk_done(h2o_req_t *req, size_t written, int done) } update_input_window(conn, stream->stream_id, &stream->input_window, written); - update_input_window(conn, 0, &conn->_input_window, written); if (done) { if (stream->state == H2O_HTTP2_STREAM_STATE_RECV_BODY) { @@ -548,6 +548,8 @@ static int write_req_chunk(void *req_, h2o_iovec_t payload, int is_end_stream) execute_or_enqueue_request(conn, stream); } stream->req._found_handler = 1; + } else { + write_req_chunk_done(req, payload.len, is_end_stream); } } } else { @@ -559,7 +561,6 @@ static int write_req_chunk(void *req_, h2o_iovec_t payload, int is_end_stream) h2o_http2_stream_set_state(conn, stream, H2O_HTTP2_STREAM_STATE_REQ_PENDING); } } - write_req_chunk_done(req, payload.len, is_end_stream); return 0; } @@ -576,6 +577,8 @@ static int handle_data_frame(h2o_http2_conn_t *conn, h2o_http2_frame_t *frame, c if (conn->state >= H2O_HTTP2_CONN_STATE_HALF_CLOSED) return 0; + update_input_window(conn, 0, &conn->_input_window, payload.length); + stream = h2o_http2_conn_get_stream(conn, frame->stream_id); /* save the input in the request body buffer, or send error (and close the stream) */
Do not use space padded day of the month as it messes up log names.
@@ -42,7 +42,7 @@ setaompgpu # Log directories/files function create_logs(){ - log_dir=$AOMP_SRC/test/test-suite-results/$(date '+%b-%e-%Y')/$(date '+%H-%M-%S') + log_dir=$AOMP_SRC/test/test-suite-results/$(date '+%b-%d-%Y')/$(date '+%H-%M-%S') mkdir -p $log_dir results_file=results-report.log echo Final Log: $log_dir/$results_file
support mingw64, according to [Add partial support for building with MingW64 GCC 4.8-SEH.](https://github.com/LuaJIT/LuaJIT/commit/1a5fd521b830a8aa17c12d2e707d167722e8c7b1)
@@ -64,8 +64,7 @@ IF(SIZEOF_VOID_P EQUAL 8) ENDIF() if ( WIN32 AND NOT CYGWIN ) - add_definitions ( -DLUAJIT_OS=LUAJIT_OS_WINDOWS) - set ( LJVM_MODE coffasm ) + set ( LJVM_MODE peobj ) elseif ( APPLE ) set ( CMAKE_EXE_LINKER_FLAGS "-pagezero_size 10000 -image_base 100000000 ${CMAKE_EXE_LINKER_FLAGS}" ) set ( LJVM_MODE machasm ) @@ -299,7 +298,7 @@ macro(add_buildvm_target _target _mode) ) endmacro(add_buildvm_target) -if (MSVC) +if (WIN32) add_buildvm_target ( lj_vm.obj peobj ) set (LJ_VM_SRC ${CMAKE_CURRENT_BINARY_DIR}/lj_vm.obj) else ()
libhfuzz/instrument: print debugging for memcmp earlier
@@ -82,6 +82,10 @@ static void* getsym(const char* sym) { extern int __wrap_memcmp(const void* s1, const void* s2, size_t n) __attribute__((weak)); static void initializeLibcFunctions(void) { libc_memcmp = (int (*)(const void* s1, const void* s2, size_t n))getsym("memcmp"); + + LOG_D("libc_memcmp=%p, (_memcmp=%p, memcmp=%p, __wrap_memcmp=%p)", libc_memcmp, _memcmp, memcmp, + __wrap_memcmp); + if (!libc_memcmp) { LOG_W("dlsym(memcmp) failed: %s", dlerror()); libc_memcmp = _memcmp; @@ -90,8 +94,6 @@ static void initializeLibcFunctions(void) { LOG_W("dlsym(memcmp)==__wrap_memcmp: %p==%p", libc_memcmp, __wrap_memcmp); libc_memcmp = _memcmp; } - LOG_D("libc_memcmp=%p, (_memcmp=%p, memcmp=%p, __wrap_memcmp=%p)", libc_memcmp, _memcmp, memcmp, - __wrap_memcmp); } static void* initializeTryMapHugeTLB(int fd, size_t sz) {
[dpos] Bug fix: prevent block producers with redudant ID
@@ -18,7 +18,7 @@ type errBpSize struct { } func (e errBpSize) Error() string { - return fmt.Sprintf("wrong # of block producers - %v (required - %v)", e.given, e.required) + return fmt.Sprintf("insufficient or redundant block producers - %v (required - %v)", e.given, e.required) } // Cluster represents a cluster of block producers. @@ -56,6 +56,10 @@ func NewCluster(ids []string) (*Cluster, error) { c.index[bpID] = index } + if len(c.member) != param.BlockProducers { + return nil, errBpSize{required: param.BlockProducers, given: uint16(len(ids))} + } + return c, nil }
ctype: fix bugs
@@ -209,9 +209,8 @@ void test_wchar (void) keySetString (k, ""); succeed_if (!checkType (k), "empty string should not check successfully as wchar"); - wchar_t y[2] = L"ab"; - char s[3 * MB_CUR_MAX]; - wcstombs (s, y, 3 * sizeof (wchar_t)); + char * s = elektraCalloc (3 * MB_CUR_MAX + 1); + wcstombs (s, L"ab", 3 * MB_CUR_MAX); keySetString (k, s); succeed_if (!checkType (k), "two wchars should not check successfully as wchar"); @@ -236,9 +235,10 @@ void test_wchar (void) printf ("0x%lx\n", i); } } - wctomb (NULL, 0); + int x ELEKTRA_UNUSED = wctomb (NULL, 0); } + elektraFree (s); keyDel (k); }
updated logstream test to logstream 2.0
@@ -55,16 +55,17 @@ class CriblTCPToFileTest(ApplicationTest): out_file_pattern = "/tmp/CriblOut-*.json" logging.info(f"Waiting for cribl to output to file {out_file_pattern}") - result_file_path = self.__wait_for_file(out_file_pattern) + result_file_paths = self.__wait_for_files(out_file_pattern) try: - res = self.__validate_results(result_file_path, sent_messages) + res = self.__validate_results(result_file_paths, sent_messages) finally: - os.remove(result_file_path) + for path in result_file_paths: + os.remove(path) return res, None - def __validate_results(self, result_file_path, sent_messages): + def __validate_results(self, result_file_paths, sent_messages): def read_msg_from_json(msg): obj = json.loads(msg) @@ -74,13 +75,14 @@ class CriblTCPToFileTest(ApplicationTest): "count": obj["count"], } - logging.info(f"Reading file {result_file_path}") received_messages = [] - with open(result_file_path, "r") as f: + for path in result_file_paths: + logging.info(f"Reading file {path}") + with open(path, "r") as f: lines = f.readlines() logging.debug("First lines from file:") logging.debug("".join(lines[:5])) - received_messages = [read_msg_from_json(l) for l in lines] + received_messages.extend([read_msg_from_json(l) for l in lines]) logging.info("Validating results") return validate_all( @@ -95,15 +97,12 @@ class CriblTCPToFileTest(ApplicationTest): ) @retry(stop_max_attempt_number=7, wait_fixed=20000) - def __wait_for_file(self, path): + def __wait_for_files(self, path): matched_files = glob.glob(path) if len(matched_files) == 0: raise FileNotFoundError(path) - if len(matched_files) > 1: - raise Exception(f"Expected to find 1 file, but found more {matched_files}") - - return matched_files[0] + return matched_files @property def name(self): @@ -111,6 +110,6 @@ class CriblTCPToFileTest(ApplicationTest): def configure(runner: Runner, config): - app_controller = SubprocessAppController(["/opt/cribl/bin/cribld", "server"], "cribl", config.scope_path, + app_controller = SubprocessAppController(["/opt/cribl/bin/cribl", "server"], "cribl", config.scope_path, config.logs_path) runner.add_tests([CriblTCPToFileTest(app_controller)])
[kservice] Fix 64 bit compilation warning
@@ -1383,8 +1383,8 @@ rt_inline void _heap_unlock(rt_base_t level) #if defined(RT_USING_SMALL_MEM_AS_HEAP) static rt_smem_t system_heap; -rt_inline void _smem_info(rt_uint32_t *total, - rt_uint32_t *used, rt_uint32_t *max_used) +rt_inline void _smem_info(rt_size_t *total, + rt_size_t *used, rt_size_t *max_used) { if (total) *total = system_heap->total; @@ -1420,8 +1420,8 @@ void *_memheap_realloc(struct rt_memheap *heap, void *rmem, rt_size_t newsize); rt_memheap_info(&system_heap, _total, _used, _max) #elif defined(RT_USING_SLAB_AS_HEAP) static rt_slab_t system_heap; -rt_inline void _slab_info(rt_uint32_t *total, - rt_uint32_t *used, rt_uint32_t *max_used) +rt_inline void _slab_info(rt_size_t *total, + rt_size_t *used, rt_size_t *max_used) { if (total) *total = system_heap->total;
crota: enable BBR firmware update BRANCH=none TEST=make -j BOARD=crota TEST=boot into OS and no type-c function lost
/* Enabling USB4 mode */ #define CONFIG_USB_PD_USB4 -/* - * TODO: b/229934138, Disable BBR firmware update temporarily. - */ /* Retimer */ -#undef CONFIG_USBC_RETIMER_FW_UPDATE +#define CONFIG_USBC_RETIMER_FW_UPDATE /* Thermal features */ #define CONFIG_THERMISTOR
proc: kill the children first
@@ -153,6 +153,11 @@ int proc_start(void (*initthr)(void *), void *arg, const char *path) void proc_kill(process_t *proc) { + process_t *child; + + for (child = proc->childs; child != proc->childs; child = child->next) + proc_kill(child); + proc_lockSet(&process_common.lock); lib_rbRemove(&process_common.id, &proc->idlinkage); proc_lockClear(&process_common.lock);
fs/select : Prevent assert when select called after close
@@ -273,10 +273,18 @@ int file_poll(FAR struct file *filep, FAR struct pollfd *fds, bool setup) FAR struct inode *inode; int ret = -ENOSYS; - DEBUGASSERT(filep != NULL && filep->f_inode != NULL); + DEBUGASSERT(filep != NULL); inode = filep->f_inode; - if (inode != NULL) { + /* If inode is null, notify error result */ + if (inode == NULL) { + /* Error case, it is lost connection, There can be readable data exist */ + fds->revents |= (POLLERR | POLLHUP); + fds->revents &= ~POLLOUT; + sem_post(fds->sem); + return OK; + } + /* Is a driver registered? Does it support the poll method? * If not, return -ENOSYS */ @@ -303,7 +311,6 @@ int file_poll(FAR struct file *filep, FAR struct pollfd *fds, bool setup) ret = OK; } - } return ret; }
Do not cast to uint16 in SurfaceBlit
@@ -386,8 +386,8 @@ struct MultiplyHandler static inline uint8 comp(uint8 a, uint8 b) { return (a * b ) / 255; } static inline uint8 alpha(uint8 a, uint8 b) { return (a * b )/ 255; } #else - static inline uint8 comp(uint8 a, uint8 b) { return ( ( (uint16)a + (a>>7)) * b ) >> 8; } - static inline uint8 alpha(uint8 a, uint8 b) { return ( ( (uint16)a + (a>>7)) * b ) >> 8; } + static inline uint8 comp(uint8 a, uint8 b) { return ( ( a + (a>>7)) * b ) >> 8; } + static inline uint8 alpha(uint8 a, uint8 b) { return ( ( a + (a>>7)) * b ) >> 8; } #endif };
Comment out a code block that performs out-of-bounds memory accesses ...and does not appear to be needed even when it stays within the bounds of the array
@@ -823,24 +823,22 @@ int CNAME(BLASLONG m, BLASLONG n, FLOAT *a, BLASLONG lda, BLASLONG posX, BLASLON b[ 3] = *(a01 + 3); b += 4; } +#if 1 + } +#else } else { #ifdef UNIT b[ 0] = ONE; b[ 1] = ZERO; #else -// out-of-bounds memory accesses, see issue 601 -// b[ 0] = *(a01 + 0); -// b[ 1] = *(a01 + 1); - b[0]=ZERO; - b[1]=ZERO; + b[ 0] = *(a01 + 0); + b[ 1] = *(a01 + 1); #endif -// out-of-bounds memory accesses, see issue 601 -// b[ 2] = *(a02 + 0); -// b[ 3] = *(a02 + 1); - b[2]=ZERO; - b[3]=ZERO; + b[ 2] = *(a02 + 0); + b[ 3] = *(a02 + 1); b += 4; } +#endif posY += 2; }
BugID:18236504:Enable rhino preempt for fixing yloop cert issue
@@ -1058,8 +1058,7 @@ CASE(test_yloop, aos_2_009) aos_sem_wait(&g_sem_taskexit_sync, -1); } printf("%d tasks exit!\r\n", TEST_CONFIG_YLOOP_LOOP_COUNT); - //ASSERT_EQ(g_var, TEST_CONFIG_YLOOP_LOOP_COUNT); - ASSERT_EQ(g_var, 1); + ASSERT_EQ(g_var, TEST_CONFIG_YLOOP_LOOP_COUNT); aos_sem_free(&g_sem_taskexit_sync); } #endif /* TEST_CONFIG_YLOOP_ENABLED */
add non-binary flag!
@@ -20,6 +20,7 @@ BROWN = display.create_pen(97, 57, 21) BLACK = display.create_pen(0, 0, 0) MAGENTA = display.create_pen(255, 33, 140) CYAN = display.create_pen(33, 177, 255) +AMETHYST = display.create_pen(156, 89, 209) # Uncomment one of these to change flag # If adding your own, colour order is left to right (or top to bottom) @@ -28,6 +29,7 @@ COLOUR_ORDER = [RED, ORANGE, YELLOW, GREEN, INDIGO, VIOLET] # traditional pride # COLOUR_ORDER = [BLUE, PINK, WHITE, PINK, BLUE] # trans flag # COLOUR_ORDER = [MAGENTA, YELLOW, CYAN] # pan flag # COLOUR_ORDER = [MAGENTA, VIOLET, INDIGO] # bi flag +# COLOUR_ORDER = [YELLOW, WHITE, AMETHYST, BLACK] # non-binary flag # Change this for vertical stripes STRIPES_DIRECTION = "horizontal"
TCPMv2: PD Timers - Add PE ChunkingNotSupported to framework BRANCH=none TEST=make runtests Tested-by: Denis Brockus
@@ -672,17 +672,6 @@ static struct policy_engine { */ uint64_t wait_and_add_jitter_timer; - /* - * PD 3.0, version 2.0, section 6.6.18.1: The ChunkingNotSupportedTimer - * is used by a Source or Sink which does not support multi-chunk - * Chunking but has received a Message Chunk. The - * ChunkingNotSupportedTimer Shall be started when the last bit of the - * EOP of a Message Chunk of a multi-chunk Message is received. The - * Policy Engine Shall Not send its Not_Supported Message before the - * ChunkingNotSupportedTimer expires. - */ - uint64_t chunking_not_supported_timer; - /* * Used to wait for tSrcTransition between sending an Accept for a * Request or receiving a GoToMin and transitioning the power supply. @@ -3876,8 +3865,8 @@ __maybe_unused static void pe_chunk_received_entry(int port) assert(0); print_current_state(port); - pe[port].chunking_not_supported_timer = - get_time().val + PD_T_CHUNKING_NOT_SUPPORTED; + pd_timer_enable(port, PE_TIMER_CHUNKING_NOT_SUPPORTED, + PD_T_CHUNKING_NOT_SUPPORTED); } __maybe_unused static void pe_chunk_received_run(int port) @@ -3886,10 +3875,15 @@ __maybe_unused static void pe_chunk_received_run(int port) IS_ENABLED(CONFIG_USB_PD_EXTENDED_MESSAGES)) assert(0); - if (get_time().val > pe[port].chunking_not_supported_timer) + if (pd_timer_is_expired(port, PE_TIMER_CHUNKING_NOT_SUPPORTED)) set_state_pe(port, PE_SEND_NOT_SUPPORTED); } +__maybe_unused static void pe_chunk_received_exit(int port) +{ + pd_timer_disable(port, PE_TIMER_CHUNKING_NOT_SUPPORTED); +} + /** * PE_SRC_Ping */ @@ -7146,10 +7140,12 @@ static __const_data const struct usb_state pe_states[] = { [PE_SRC_CHUNK_RECEIVED] = { .entry = pe_chunk_received_entry, .run = pe_chunk_received_run, + .exit = pe_chunk_received_exit, }, [PE_SNK_CHUNK_RECEIVED] = { .entry = pe_chunk_received_entry, .run = pe_chunk_received_run, + .exit = pe_chunk_received_exit, }, #endif /* CONFIG_USB_PD_EXTENDED_MESSAGES */ #endif /* CONFIG_USB_PD_REV30 */
List the correct features for 3D queues.
@@ -146,7 +146,9 @@ serverCreatePrinter( }; static const char * const features3d[] =/* ipp-features-supported values */ { - "ipp-3d" + "infrastructure-printer", + "ipp-3d", + "system-service" }; static const int ops[] = /* operations-supported values */ {
ipsec: fix perf issue in esp_aad_fill Type: fix Fixes:
@@ -137,19 +137,22 @@ esp_aad_fill (vnet_crypto_op_t * op, esp_aead_t *aad; aad = (esp_aead_t *) op->aad; - clib_memcpy_fast (aad, esp, 8); + aad->data[0] = esp->spi; if (ipsec_sa_is_set_USE_ESN (sa)) { /* SPI, seq-hi, seq-low */ - aad->data[2] = aad->data[1]; aad->data[1] = clib_host_to_net_u32 (sa->seq_hi); + aad->data[2] = esp->seq; op->aad_len = 12; } else + { /* SPI, seq-low */ + aad->data[1] = esp->seq; op->aad_len = 8; } +} #endif /* __ESP_H__ */ /*
appveyor: changing Qt folder
@@ -3,7 +3,7 @@ image: Visual Studio 2015 platform: x86 clone_folder: C:\TAU\rhodes environment: - QTDIR: C:\Qt\5.9.5\msvc2015 + QTDIR: C:\Qt\5.9\msvc2015 matrix: - win32_rhosimulator: testable_application_repository: https://github.com/rhomobile/RMS-Testing.git
spec: correct return value for missing keys when ignore is set
@@ -793,8 +793,11 @@ static int processSpecKey (Key * specKey, Key * parentKey, KeySet * ks, const Co char * msg = elektraFormat ("Required key %s is missing.", strchr (keyName (specKey), '/')); handleConflict (parentKey, msg, ch->missing); elektraFree (msg); + if (ch->missing != IGNORE) + { ret = -1; } + } if (isKdbGet) {
readme: add a link to the quic wikipedia page
@@ -26,7 +26,7 @@ $ sudo h2olog -p $(pgrep -o h2o) ## Tracing QUIC events -Server-side QUIC events can be traced using the `quic` subcommand. +Server-side [QUIC](https://en.wikipedia.org/wiki/QUIC) events can be traced using the `quic` subcommand. Events are rendered in [JSON](https://en.wikipedia.org/wiki/JSON) format. This feature is heavily a [WIP](https://en.wikipedia.org/wiki/Work_in_process).
doc: Fix typos in interrupt hld
@@ -191,7 +191,7 @@ The interrupt vectors are assigned as shown here: - Usage * - 0x0-0x14 - - Exceptions: NMI, INT3, page dault, GP, debug. + - Exceptions: NMI, INT3, page fault, GP, debug. * - 0x15-0x1F - Reserved @@ -218,7 +218,7 @@ The interrupt vectors are assigned as shown here: - Hypervisor Callback HSM * - 0xF4 - - Performance Monitering Interrupt + - Performance Monitoring Interrupt * - 0xFF - SPURIOUS_APIC_VECTOR
vere: updates ames future-refactoring comments for accuracy
@@ -190,7 +190,7 @@ _ames_etch_head(u3_head* hed_u, c3_y buf_y[4]) } /* _ames_chub_bytes(): c3_y[8] to c3_d -** XX move +** XX factor out, deduplicate with other conversions */ static inline c3_d _ames_chub_bytes(c3_y byt_y[8]) @@ -206,7 +206,6 @@ _ames_chub_bytes(c3_y byt_y[8]) } /* _ames_ship_to_chubs(): pack [len_y] bytes into c3_d[2] -** XX move */ static inline void _ames_ship_to_chubs(c3_d sip_d[2], c3_y len_y, c3_y* buf_y) @@ -219,7 +218,7 @@ _ames_ship_to_chubs(c3_d sip_d[2], c3_y len_y, c3_y* buf_y) } /* _ames_chub_bytes(): c3_d to c3_y[8] -** XX move +** XX factor out, deduplicate with other conversions */ static inline void _ames_bytes_chub(c3_y byt_y[8], c3_d num_d) @@ -235,7 +234,6 @@ _ames_bytes_chub(c3_y byt_y[8], c3_d num_d) } /* _ames_ship_of_chubs(): unpack c3_d[2] into [len_y] bytes. -** XX move */ static inline void _ames_ship_of_chubs(c3_d sip_d[2], c3_y len_y, c3_y* buf_y)
router_add_route: be more specific about unreachable matches
@@ -840,19 +840,25 @@ router_add_route(router *rtr, route *rte) { route *rw; route *last = NULL; - char hadmatchall = 0; + route *matchallstop = NULL; for (rw = rtr->routes; rw != NULL; last = rw, rw = rw->next) if (rw->matchtype == MATCHALL && rw->stop) - hadmatchall = 1; + matchallstop = rw; if (last == NULL) { rtr->routes = rte; return NULL; } - if (hadmatchall) { - logerr("warning: match %s will never match " - "due to preceding match * ... stop\n", - rw->pattern == NULL ? "*" : rw->pattern); + +#define matchtype(r) \ + r->dests->cl->type == AGGREGATION ? "aggregate" : \ + r->dests->cl->type == REWRITE ? "rewrite" : \ + "match" + if (matchallstop != NULL) { + logerr("warning: %s %s will never match " + "due to preceding %s * ... stop\n", + matchtype(rte), rte->pattern == NULL ? "*" : rte->pattern, + matchtype(matchallstop)); } last->next = rte; return NULL;
Destroy Lua objects before destroying lovr; This fixes a bug where physics Worlds are destroyed after ODE is deinitialized, which can cause a segfault. Instead we collect Lua objects first and then destroy each module. This makes more sense and doesn't seem to have any consequences...
@@ -33,9 +33,8 @@ static void emscriptenLoop(void* arg) { int status = lua_tonumber(L, -1); bool isRestart = lua_type(L, -1) == LUA_TSTRING && !strcmp(lua_tostring(L, -1), "restart"); - lovrDestroy(); - lua_close(L); + lovrDestroy(); emscripten_cancel_main_loop(); if (isRestart) { @@ -98,8 +97,8 @@ bool lovrRun(int argc, char** argv, int* status) { lua_pushcfunction(L, luax_getstack); if (luaL_loadbuffer(L, (const char*) boot_lua, boot_lua_len, "boot.lua") || lua_pcall(L, 0, 1, -2)) { fprintf(stderr, "%s\n", lua_tostring(L, -1)); - lovrDestroy(); lua_close(L); + lovrDestroy(); *status = 1; return false; } @@ -119,8 +118,8 @@ bool lovrRun(int argc, char** argv, int* status) { *status = lua_tonumber(L, -1); bool restart = lua_type(L, -1) == LUA_TSTRING && !strcmp(lua_tostring(L, -1), "restart"); - lovrDestroy(); lua_close(L); + lovrDestroy(); if (!restart) { glfwTerminate();
Support Python 3.7 in iterate_leaf_indexes and staged_predict fix
@@ -1812,7 +1812,10 @@ class CatBoost(_CatBoostBase): ntree_end = self.tree_count_ staged_predict_iterator = self._staged_predict_iterator(data, prediction_type, ntree_start, ntree_end, eval_period, thread_count, verbose) while True: + try: predictions = staged_predict_iterator.next() + except StopIteration: + return yield predictions[0] if data_is_single_object else predictions def staged_predict(self, data, prediction_type='RawFormulaVal', ntree_start=0, ntree_end=0, eval_period=1, thread_count=-1, verbose=None): @@ -1872,7 +1875,10 @@ class CatBoost(_CatBoostBase): data, _ = self._process_predict_input_data(data, "iterate_leaf_indexes") leaf_indexes_iterator = self._leaf_indexes_iterator(data, ntree_start, ntree_end) while True: + try: yield leaf_indexes_iterator.next() + except StopIteration: + return def iterate_leaf_indexes(self, data, ntree_start=0, ntree_end=0): """
Fix arduino primo LED pin (25 --> 20).
@@ -38,7 +38,7 @@ extern uint8_t _ram_start; #define RAM_SIZE 0x10000 /* LED pins */ -#define LED_BLINK_PIN (25) +#define LED_BLINK_PIN (20) /* UART info */ #define ESPDUINO_UART "uart1"
acrn-config: fix logical of vm total pci devices count Skip vhostbridge if there is no pci passtrhough device
@@ -110,14 +110,25 @@ def pci_dev_num_per_vm_gen(config): shmem_num = scenario_cfg_lib.get_shmem_num(shmem_regions) for vm_i,vm_type in common.VM_TYPES.items(): + num = 0 if "POST_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: + shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): - print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, shmem_num[vm_i]), file=config) + shmem_num_i = shmem_num[vm_i] + num = shmem_num_i elif "PRE_LAUNCHED_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: shmem_num_i = 0 if shmem_enabled == 'y' and vm_i in shmem_num.keys(): shmem_num_i = shmem_num[vm_i] - print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, pci_dev_num[vm_i] + shmem_num_i), file=config) + if pci_dev_num[vm_i] == 1: + # there is only vhostbridge but no passthrough device + # remove the count of vhostbridge, check get_pci_num definition + pci_dev_num[vm_i] -= 1 + num = pci_dev_num[vm_i] + shmem_num_i + elif "SOS_VM" == scenario_cfg_lib.VM_DB[vm_type]['load_type']: + continue + if num > 0: + print("#define VM{}_CONFIG_PCI_DEV_NUM\t{}U".format(vm_i, num), file=config) print("", file=config)
link to Wiki sections in README.md
@@ -20,19 +20,27 @@ to applications protected by the Apache web server and establishes an authentica The protected content, applications and services can be hosted by the Apache server itself or served from origin server(s) residing behind it by configuring Apache as a Reverse Proxy in front of those servers. The -latter allows adding OpenID Connect based authentication to existing applications/services/SPAs without -modifying those applications, possibly migrating them away from legacy authentication mechanisms to standards based +latter allows for adding OpenID Connect based authentication to existing applications/services/SPAs without +modifying those applications, possibly migrating them away from legacy authentication mechanisms to standards-based OpenID Connect Single Sign On (SSO). By default the module sets the `REMOTE_USER` variable to the `id_token` `[sub]` claim, concatenated with the OP's Issuer identifier (`[sub]@[iss]`). Other `id_token` claims are passed in HTTP headers and/or environment variables together with those -(optionally) obtained from the UserInfo endpoint. The provider HTTP headers and environment variables can be consumed by +(optionally) obtained from the UserInfo endpoint. The provided HTTP headers and environment variables can be consumed by applications protected by the Apache server. Custom fine-grained authorization rules - based on Apache's `Require` primitives - can be specified to match against the -set of claims provided in the `id_token`/ `userinfo` claims. +set of claims provided in the `id_token`/ `userinfo` claims, see [here](https://github.com/zmartzone/mod_auth_openidc/wiki/Authorization). +Clustering for resilience and performance can be configured using one of the supported cache backends options as +listed [here](https://github.com/zmartzone/mod_auth_openidc/wiki/Caching). -*mod_auth_openidc* supports the following specifications: +For an exhaustive description of all configuration options, see the file [`auth_openidc.conf`](https://github.com/zmartzone/mod_auth_openidc/blob/master/auth_openidc.conf). +This file can also serve as an include file for `httpd.conf`. + +Interoperability +---------------- + +*mod_auth_openidc* is [OpenID Connect certified](https://openid.net/certification/#RPs) and supports the following specifications: - [OpenID Connect Core 1.0](http://openid.net/specs/openid-connect-core-1_0.html) *(Basic, Implicit, Hybrid and Refresh flows)* - [OpenID Connect Discovery 1.0](http://openid.net/specs/openid-connect-discovery-1_0.html) - [OpenID Connect Dynamic Client Registration 1.0](http://openid.net/specs/openid-connect-registration-1_0.html) @@ -43,9 +51,6 @@ set of claims provided in the `id_token`/ `userinfo` claims. - [OpenID Connect Front-Channel Logout 1.0](http://openid.net/specs/openid-connect-frontchannel-1_0.html) *(implementers draft)* - [OpenID Connect Back-Channel Logout 1.0](https://openid.net/specs/openid-connect-backchannel-1_0.html) *(implementers draft)* -For an exhaustive description of all configuration options, see the file `auth_openidc.conf` -in this directory. This file can also serve as an include file for `httpd.conf`. - Support -------
try osx builds, 2
@@ -24,16 +24,7 @@ jobs: python -m pip install --upgrade pip pip install cython pytest pytest-pep8 - - name: Install build prerequisites - if: runner.os == 'Linux' - uses: pypa/[email protected] - env: - CIBW_BUILD: cp36-* cp37-* cp38-* - CIBW_BEFORE_BUILD: yum install -y libcurl-devel zlib-devel bzip2-devel xz-devel && pip install cython - CIBW_MANYLINUX_X86_64_IMAGE: manylinux1 - CIBW_MANYLINUX_I686_IMAGE: manylinux1 - - - name: Install build prerequisites + - name: Build wheels for linux if: runner.os == 'Linux' uses: pypa/[email protected] env: @@ -42,7 +33,7 @@ jobs: CIBW_MANYLINUX_X86_64_IMAGE: manylinux1 CIBW_MANYLINUX_I686_IMAGE: manylinux1 - - name: Build wheels + - name: Build wheels for macos if: runner.os != 'Linux' uses: pypa/[email protected] env:
Fixing but in return value of neutrino integral at limits.
@@ -77,10 +77,10 @@ double nu_phasespace_intg(gsl_interp_accel* accel, double mnuOT, int* status) // First check the cases where we are in the limits. if (mnuOT<CCL_NU_MNUT_MIN) { - integral_value = 7./8.; + return 7./8.; } else if (mnuOT>CCL_NU_MNUT_MAX) { - integral_value = 0.2776566337*mnuOT; + return 0.2776566337*mnuOT; } // Evaluate the spline - this will use the accelerator if it has been defined.
fixing uninitialized value
@@ -5088,7 +5088,7 @@ FieldlineLib:: removeOverlap( std::vector< std::vector< std::vector < Point > > > &bins, unsigned int windingGroupOffset ) { - unsigned int nnodes; + unsigned int nnodes = 0; unsigned int nSections = bins.size(); for( unsigned int s=0; s<nSections; ++s )
FIX: set exp_bucket to hashsize-1.
@@ -287,7 +287,7 @@ static void assoc_expand(void) #ifdef SLOW_HASH_EXPANSION /* set hash table expansion */ assocp->expanding = true; - assocp->exp_bucket = assocp->prevsize - 1; + assocp->exp_bucket = assocp->hashsize - 1; assocp->exp_tabidx = 0; #else if (assocp->redistributed_bucket_cnt != 0) {
Allow "nul" output for compressed images
@@ -1199,11 +1199,18 @@ int main( } else { +#if defined(_WIN32) + bool is_null = output_filename == "NUL" || output_filename == "nul"; +#else + bool is_null = output_filename == "/dev/null"; +#endif + if (!is_null) + { printf("ERROR: Unknown compressed output file type\n"); - return 1; } } + } // Store decompressed image if (operation & ASTCENC_STAGE_ST_NCOMP)
riscv64: cleared page attributes
@@ -132,10 +132,10 @@ int pmap_enter(pmap_t *pmap, addr_t pa, void *va, int attr, page_t *alloc) return -EFAULT; } - pmap->pdir2[pdi2] = (((alloc->addr >> 12) << 10) | (attr & 0x10) | 0xc1); + pmap->pdir2[pdi2] = (((alloc->addr >> 12) << 10) | 0x01); /* Initialize pdir (MOD) - because of reentrancy */ - pmap_common.pdir0[((u64)pmap_common.ptable >> 12) & 0x1ff] = (((alloc->addr >> 12) << 10) | /*0x10 | */ 0xcf); + pmap_common.pdir0[((u64)pmap_common.ptable >> 12) & 0x1ff] = (((alloc->addr >> 12) << 10) | 0xcf); hal_cpuFlushTLB(pmap_common.ptable); hal_memset(pmap_common.ptable, 0, 4096); @@ -144,7 +144,7 @@ int pmap_enter(pmap_t *pmap, addr_t pa, void *va, int attr, page_t *alloc) else { /* Map next level pdir */ addr = ((pmap->pdir2[pdi2] >> 10) << 12); - pmap_common.pdir0[((u64)pmap_common.ptable >> 12) & 0x1ff] = (((addr >> 12) << 10) | /*0x10 | */ 0xcf); + pmap_common.pdir0[((u64)pmap_common.ptable >> 12) & 0x1ff] = (((addr >> 12) << 10) | 0xcf); hal_cpuFlushTLB(pmap_common.ptable); } @@ -153,7 +153,7 @@ int pmap_enter(pmap_t *pmap, addr_t pa, void *va, int attr, page_t *alloc) hal_spinlockClear(&pmap_common.lock, &sc); return -EFAULT; } - pmap_common.ptable[pdi1] = (((alloc->addr >> 12) << 10) | (attr & 0x10) | 0xc1); + pmap_common.ptable[pdi1] = (((alloc->addr >> 12) << 10) | 0x01); alloc = NULL; } @@ -452,7 +452,7 @@ void _pmap_preinit(void) /* Map PLIC (MOD) */ // pmap_common.pdir2[511] = ((u64)pmap_common.iopdir >> 2) | 1; - pmap_common.pdir2[511] = 0xcf | 0x10; + pmap_common.pdir2[511] = 0xcf; // pmap_common.iopdir[0] = (((u64)0x0c000000 >> 2) | 0xcf); return;
[cmake] work around a Debian/Ubuntu bug in Python install paths
@@ -33,6 +33,12 @@ option(INSTALL_PYTHON_SYMLINKS "Install Python .py files as symlinks" OFF) # function(set_python_install_path) set(python_install_options "--record;${CMAKE_BINARY_DIR}/python_install_manifest.txt") + + execute_process(COMMAND ${PYTHON_EXECUTABLE} -c + "import sys; print('%d.%d'%(sys.version_info.major,sys.version_info.minor))" + OUTPUT_VARIABLE PY_VERSION) + string(STRIP ${PY_VERSION} PY_VERSION) + if(siconos_python_install STREQUAL "user") # --- Case 1 : siconos_python_install=user --- # In that case, we need to find the user path. It depends on the operation system @@ -60,8 +66,10 @@ function(set_python_install_path) # which probably means that python is run using virtualenv # Command to find 'global' site-packages # default path will probably be ok --> no options + # note: the '.replace()' is to work around the following Debian/Ubuntu bug: + # https://bugs.launchpad.net/ubuntu/+source/python3-defaults/+bug/1408092 set(GET_SITE_PACKAGE - "from distutils.sysconfig import get_python_lib; print(get_python_lib())") + "from distutils.sysconfig import get_python_lib; print(get_python_lib().replace('/python3/','/python${PY_VERSION}/'))") execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "${GET_SITE_PACKAGE}" OUTPUT_VARIABLE PY_INSTALL_DIR) endif() @@ -70,9 +78,11 @@ function(set_python_install_path) elseif(siconos_python_install STREQUAL prefix) # Case 2 : siconos_python_install=prefix # we use CMAKE_INSTALL_PREFIX as the path for python install + # note: the '.replace()' is to work around the following Debian/Ubuntu bug: + # https://bugs.launchpad.net/ubuntu/+source/python3-defaults/+bug/1408092 list(APPEND python_install_options --prefix=${CMAKE_INSTALL_PREFIX}) set(GET_SITE_PACKAGE - "from distutils.sysconfig import get_python_lib; print(get_python_lib(prefix='${CMAKE_INSTALL_PREFIX}'))") + "from distutils.sysconfig import get_python_lib; print(get_python_lib(prefix='${CMAKE_INSTALL_PREFIX}').replace('/python3/','/python${PY_VERSION}/'))") execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "${GET_SITE_PACKAGE}" OUTPUT_VARIABLE PY_INSTALL_DIR) else()
framework/st_things: Change local stack variable to static The reference of this stack object is passed to new thread as parameter, but it may be freed before dereference in the new thread.
@@ -301,7 +301,8 @@ static void *__attribute__((optimize("O0"))) t_things_wifi_join_loop(void *args) void things_wifi_sta_connected(wifi_manager_result_e res) { - bool is_wifi_retry_connect = false; + static bool is_wifi_retry_connect; + is_wifi_retry_connect = false; if (res == WIFI_MANAGER_FAIL) { THINGS_LOG_E(TAG, "Failed to connect to the AP");
benchmark/rresamp: scaling number of trials by Q, adding more benches
@@ -31,7 +31,8 @@ void rresamp_crcf_bench(struct rusage * _start, unsigned int _P, unsigned int _Q) { - unsigned long int i; + // adjust number of iterations: cycles/trial ~ 160 + 50 Q + *_num_iterations /= (160 + 50*_Q); rresamp_crcf q = rresamp_crcf_create_default(_P,_Q); @@ -40,6 +41,7 @@ void rresamp_crcf_bench(struct rusage * _start, float complex * buf = (float complex*) malloc(buf_len*sizeof(float complex)); // initialize buffer + unsigned long int i; for (i=0; i<buf_len; i++) buf[i] = i==0 ? 1.0 : 0.0; @@ -75,4 +77,6 @@ void benchmark_rresamp_crcf_P17_Q8 RRESAMP_CRCF_BENCHMARK_API(17, 8) void benchmark_rresamp_crcf_P17_Q16 RRESAMP_CRCF_BENCHMARK_API(17, 16) void benchmark_rresamp_crcf_P17_Q32 RRESAMP_CRCF_BENCHMARK_API(17, 32) void benchmark_rresamp_crcf_P17_Q64 RRESAMP_CRCF_BENCHMARK_API(17, 64) +void benchmark_rresamp_crcf_P17_Q128 RRESAMP_CRCF_BENCHMARK_API(17, 128) +void benchmark_rresamp_crcf_P17_Q256 RRESAMP_CRCF_BENCHMARK_API(17, 256)
Fix no-ec The cmp_protect_test cert chain tests use some EC certs which breaks in a no-ec build. The fix is to just skip those tests if no-ec has been configured.
@@ -294,6 +294,8 @@ static int test_MSG_add_extraCerts(void) return result; } +#ifndef OPENSSL_NO_EC +/* The cert chain tests use EC certs so we skip them in no-ec builds */ static int execute_cmp_build_cert_chain_test(CMP_PROTECT_TEST_FIXTURE *fixture) { STACK_OF(X509) *result = NULL; @@ -372,6 +374,7 @@ static int test_cmp_build_cert_chain_no_certs(void) EXECUTE_TEST(execute_cmp_build_cert_chain_test, tear_down); return result; } +#endif /* OPENSSL_NO_EC */ static int execute_X509_STORE_test(CMP_PROTECT_TEST_FIXTURE *fixture) { @@ -505,10 +508,12 @@ int setup_tests(void) ADD_TEST(test_MSG_add_extraCerts); +#ifndef OPENSSL_NO_EC ADD_TEST(test_cmp_build_cert_chain); ADD_TEST(test_cmp_build_cert_chain_missing_root); ADD_TEST(test_cmp_build_cert_chain_missing_intermediate); ADD_TEST(test_cmp_build_cert_chain_no_certs); +#endif ADD_TEST(test_X509_STORE); ADD_TEST(test_X509_STORE_only_self_signed);
evp: fix coverity & unchecked return values
@@ -57,11 +57,13 @@ RSA *EVP_PKEY_get1_RSA(EVP_PKEY *pkey) #ifndef OPENSSL_NO_EC int EVP_PKEY_set1_EC_KEY(EVP_PKEY *pkey, EC_KEY *key) { - int ret = EVP_PKEY_assign_EC_KEY(pkey, key); - - if (ret) - EC_KEY_up_ref(key); - return ret; + if (!EC_KEY_up_ref(key)) + return 0; + if (!EVP_PKEY_assign_EC_KEY(pkey, key)) { + EC_KEY_free(key); + return 0; + } + return 1; } EC_KEY *evp_pkey_get0_EC_KEY_int(const EVP_PKEY *pkey) @@ -82,8 +84,8 @@ EC_KEY *EVP_PKEY_get1_EC_KEY(EVP_PKEY *pkey) { EC_KEY *ret = evp_pkey_get0_EC_KEY_int(pkey); - if (ret != NULL) - EC_KEY_up_ref(ret); + if (ret != NULL && !EC_KEY_up_ref(ret)) + ret = NULL; return ret; } #endif /* OPENSSL_NO_EC */
Update debug code for fibers.
@@ -612,7 +612,9 @@ static void *op_lookup[255] = { nextfiber = dst_unwrap_fiber(fiberval); switch (nextfiber->status) { default: - vm_throw("expected pending or new fiber"); + vm_throw("expected pending, new, or debug fiber"); + case DST_FIBER_DEBUG: + break; case DST_FIBER_NEW: { dst_fiber_push(nextfiber, val); @@ -632,16 +634,16 @@ static void *op_lookup[255] = { retreg = dst_run(nextfiber); switch (nextfiber->status) { case DST_FIBER_DEBUG: - if (fiber->flags & DST_FIBER_MASK_DEBUG) goto vm_debug; + if (nextfiber->flags & DST_FIBER_MASK_DEBUG) goto vm_debug; fiber->child = NULL; break; case DST_FIBER_ERROR: - if (fiber->flags & DST_FIBER_MASK_ERROR) goto vm_error; + if (nextfiber->flags & DST_FIBER_MASK_ERROR) goto vm_error; fiber->child = NULL; break; default: fiber->child = NULL; - if (fiber->flags & DST_FIBER_MASK_RETURN) goto vm_return_root; + if (nextfiber->flags & DST_FIBER_MASK_RETURN) goto vm_return_root; break; } stack[oparg(1, 0xFF)] = retreg; @@ -774,7 +776,9 @@ static void *op_lookup[255] = { Dst dst_resume(DstFiber *fiber, int32_t argn, const Dst *argv) { switch (fiber->status) { default: - dst_exit("expected new or pending or fiber"); + dst_exit("expected new, pending or debug fiber"); + case DST_FIBER_DEBUG: + break; case DST_FIBER_NEW: { int32_t i;
Add static qualifier to s_light_sleep_wakeup variable to prevent it from being global. Closes
@@ -81,7 +81,9 @@ static sleep_config_t s_config = { .wakeup_triggers = 0 }; -bool s_light_sleep_wakeup = false; +/* Internal variable used to track if light sleep wakeup sources are to be + expected when determining wakeup cause. */ +static bool s_light_sleep_wakeup = false; /* Updating RTC_MEMORY_CRC_REG register via set_rtc_memory_crc() is not thread-safe. */
fixes the initial value
@@ -711,7 +711,7 @@ static void gen_enum(FILE *fp, struct jc_enum *e) char *t = ns_to_symbol_name(e->name); fprintf(fp, "enum %s {\n", t); - int i = 0, prev_value; + int i = 0, prev_value = -1; for (i = 0; e->items && e->items[i]; i++) { struct jc_item * item = e->items[i];
muxread: fix 0 offset of NULL pointer
@@ -100,7 +100,7 @@ static int MuxImageParse(const WebPChunk* const chunk, int copy_data, WebPMuxImage* const wpi) { const uint8_t* bytes = chunk->data_.bytes; size_t size = chunk->data_.size; - const uint8_t* const last = bytes + size; + const uint8_t* const last = (bytes == NULL) ? NULL : bytes + size; WebPChunk subchunk; size_t subchunk_size; WebPChunk** unknown_chunk_list = &wpi->unknown_;
Try fixing -lm in cmake
@@ -9,13 +9,17 @@ find_package(CMocka REQUIRED) message(STATUS "CMocka vars: ${CMOCKA_LIBRARIES} ${CMOCKA_INCLUDE_DIR}") +find_library(MATH_LIBRARY m) + foreach (TEST ${TESTS}) string(REGEX REPLACE ".*/([^/]+).c" "\\1" NAME ${TEST}) message("Adding test ${NAME}") add_executable(${NAME} "${NAME}.c" assertions.c stream_expectations.c) target_link_libraries(${NAME} ${CMOCKA_LIBRARIES}) target_link_libraries(${NAME} cbor) - target_link_libraries(${name} PRIVATE m) + if(MATH_LIBRARY) + target_link_libraries(${NAME} ${MATH_LIBRARY}) + endif() target_include_directories(${NAME} PUBLIC ${CMOCKA_INCLUDE_DIR}) # See https://stackoverflow.com/a/10824578/499521 ADD_TEST(ctest_build_test_${NAME}
examples/nettest: Fix some printf output
@@ -108,7 +108,7 @@ void nettest_client(void) memcpy(server.sin6_addr.s6_addr16, g_nettestserver_ipv6, 8 * sizeof(uint16_t)); addrlen = sizeof(struct sockaddr_in6); - printf("Connecting to IPv6 Address: %04x:04x:04x:04x:04x:04x:04x:04x\n", + printf("Connecting to IPv6 Address: %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", g_nettestserver_ipv6[0], g_nettestserver_ipv6[1], g_nettestserver_ipv6[2], g_nettestserver_ipv6[3], g_nettestserver_ipv6[4], g_nettestserver_ipv6[5], g_nettestserver_ipv6[6], g_nettestserver_ipv6[7]); #else
khan: wip naive scry implementation As of yet untested since I don't yet know how to construct a scry path from Haskell. Probably broken; crashes the process on some input.
@@ -168,6 +168,21 @@ _khan_moor_bail(void* ptr_v, ssize_t err_i, const c3_c* err_c) } } +/* _khan_peek_cb(): handle scry result: send immediately. +*/ +static void +_khan_peek_cb(void* ptr_v, u3_noun nun) +{ + u3_chan* can_u = (u3_chan*)ptr_v; + u3_khan* kan_u = can_u->san_u->kan_u; + c3_y* byt_y; + c3_d len_d; + + u3s_jam_xeno(nun, &len_d, &byt_y); + u3_newt_send((u3_mojo*)&can_u->mor_u, len_d, byt_y); + u3z(nun); +} + /* _khan_moor_poke(): called on message read from u3_moor. */ static void @@ -177,7 +192,6 @@ _khan_moor_poke(void* ptr_v, c3_d len_d, c3_y* byt_y) u3_noun i_jar, t_jar; u3_chan* can_u = (u3_chan*)ptr_v; u3_khan* kan_u = can_u->san_u->kan_u; - u3_noun wir; u3_noun cad; jar = u3s_cue_xeno_with(kan_u->sil_u, len_d, byt_y); @@ -189,16 +203,17 @@ _khan_moor_poke(void* ptr_v, c3_d len_d, c3_y* byt_y) can_u->mor_u.bal_f(can_u, -2, "jar-atom"); } else { - wir = u3nq(c3__khan, - u3dc("scot", c3__uv, kan_u->sev_l), - u3dc("scot", c3__ud, can_u->coq_l), - u3_nul); switch (i_jar) { default: { - can_u->mor_u.bal_f(can_u, -2, "i.jar-unknown"); + can_u->mor_u.bal_f(can_u, -3, "i.jar-unknown"); break; } case c3__fyrd: { + u3_noun wir = u3nq(c3__khan, + u3dc("scot", c3__uv, kan_u->sev_l), + u3dc("scot", c3__ud, can_u->coq_l), + u3_nul); + u3_auto_peer( u3_auto_plan(&kan_u->car_u, u3_ovum_init(0, c3__k, wir, jar)), @@ -206,11 +221,17 @@ _khan_moor_poke(void* ptr_v, c3_d len_d, c3_y* byt_y) break; } case c3__scry: { - // TODO implement + u3_noun ful = u3k(t_jar); + + // TODO: handle runtime-specific namespace queries. dispatch on ful. + // + u3_pier_peek(kan_u->car_u.pir_u, u3_nul, ful, can_u, _khan_peek_cb); + u3z(jar); break; } case c3__move: { // TODO implement + u3z(jar); break; } }