message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
klocwork error issue | @@ -199,6 +199,7 @@ DumpDebugCommand(
else {
PRINTER_SET_MSG(pPrinterCtx, ReturnCode, L"Failed to dump FW Debug logs to file (" FORMAT_STR L")\n", pDumpUserPath);
}
+ FREE_POOL_SAFE(pDumpUserPath);
}
else {
PRINTER_SET_MSG(pPrinterCtx, ReturnCode, L"Successfully dumped FW Debug logs to file (" FORMAT_STR L"). (%lu) MiB were written.\n",
@@ -249,6 +250,9 @@ DumpDebugCommand(
decode_nlog_binary(pCmd, decoded_file_name, pDebugBuffer, BytesWritten, dict_version, dict_head);
}
}
+ else {
+ FREE_POOL_SAFE(pDumpUserPath);
+ }
}
Finish:
|
Allow to *just* print key and IV of unstreamable modes when no input files | @@ -334,7 +334,7 @@ int enc_main(int argc, char **argv)
buff = app_malloc(EVP_ENCODE_LENGTH(bsize), "evp buffer");
if (infile == NULL) {
- if (!streamable) {
+ if (!streamable && printkey != 2) { /* if just print key and exit, it's ok */
BIO_printf(bio_err, "Unstreamable cipher mode\n");
goto end;
}
|
Fix sanitizer builds | @@ -642,7 +642,7 @@ tasks:
params:
working_dir: "mongoc"
script: |
- PATH=/usr/lib/llvm-3.8/bin:$PATH DEBUG=1 CC='clang-3.8' MARCH='${MARCH}' CHECK_LOG=yes CFLAGS='-fsanitize=address' sh .evergreen/compile.sh
+ PATH=/usr/lib/llvm-3.8/bin:$PATH DEBUG=1 CC='clang-3.8' MARCH='${MARCH}' CHECK_LOG=yes CFLAGS='-fsanitize=address' CXXFLAGS="$CFLAGS" sh .evergreen/compile.sh
- name: debug-compile-sanitizer-undefined
commands:
@@ -651,7 +651,7 @@ tasks:
params:
working_dir: "mongoc"
script: |
- PATH=/usr/lib/llvm-3.8/bin:$PATH DEBUG=1 CC='clang-3.8' MARCH='${MARCH}' CHECK_LOG=yes CFLAGS='-fsanitize=undefined' sh .evergreen/compile.sh
+ PATH=/usr/lib/llvm-3.8/bin:$PATH DEBUG=1 CC='clang-3.8' MARCH='${MARCH}' CHECK_LOG=yes CFLAGS='-fsanitize=undefined' CXXFLAGS="$CFLAGS" sh .evergreen/compile.sh
- name: debug-compile-scan-build
tags: ["debug-compile", "special", "scan-build", "clang"]
|
Cleanup after myself, register with psk for kdf135_ikev1 | @@ -1505,7 +1505,7 @@ static int enable_kdf (ACVP_CTX *ctx) {
CHECK_ENABLE_CAP_RV(rv);
rv = acvp_enable_kdf135_ikev1_cap_param(ctx, ACVP_KDF_IKEv1_HASH_ALG, ACVP_KDF135_SHA1);
CHECK_ENABLE_CAP_RV(rv);
- rv = acvp_enable_kdf135_ikev1_cap_param(ctx, ACVP_KDF_IKEv1_AUTH_METHOD, ACVP_KDF135_IKEV1_AMETH_DSA);
+ rv = acvp_enable_kdf135_ikev1_cap_param(ctx, ACVP_KDF_IKEv1_AUTH_METHOD, ACVP_KDF135_IKEV1_AMETH_PSK);
CHECK_ENABLE_CAP_RV(rv);
rv = acvp_enable_kdf135_x963_cap(ctx, &app_kdf135_x963_handler);
|
Updated Changelog for the latest release changes. | +Changes to GoAccess 1.5.2 - Tuesday, September 28, 2021
+
+ - Added .avi to the list of static requests/extensions.
+ - Changed label from 'Init. Proc. Time' to 'Log Parsing Time'.
+ - Fixed issue where lengthy static-file extension wouldn't account certain
+ valid requests.
+ - Fixed possible buffer underflow when checking static-file extension.
+ - Fixed segfault when attempting to parse an invalid JSON log while using a
+ JSON log format.
+ - Fixed segfault when ignoring a status code and processing a line > '4096'
+ chars.
+
Changes to GoAccess 1.5.1 - Wednesday, June 30, 2021
- Changed official deb repo so it now builds '--with-getline' in order to
|
Actually detect network type in payout handler | @@ -196,10 +196,15 @@ namespace MiningCore.Blockchain.Monero
// update request
request.Destinations = page
.Where(x => x.Amount > 0)
- .Select(x => new TransferDestination
+ .Select(x =>
+ {
+ ExtractAddressAndPaymentId(x.Address, out var address, out var paymentId);
+
+ return new TransferDestination
{
- Address = x.Address,
+ Address = address,
Amount = (ulong) Math.Floor(x.Amount * MoneroConstants.SmallestUnit[poolConfig.Coin.Type])
+ };
}).ToArray();
logger.Info(() => $"[{LogCategory}] Page {i + 1}: Paying out {FormatAmount(page.Sum(x => x.Amount))} to {page.Length} addresses");
@@ -310,6 +315,9 @@ namespace MiningCore.Blockchain.Monero
walletDaemon = new DaemonClient(jsonSerializerSettings);
walletDaemon.Configure(walletDaemonEndpoints, MoneroConstants.DaemonRpcLocation);
+ // detect network
+ await GetNetworkTypeAsync();
+
// detect transfer_split support
var response = await walletDaemon.ExecuteCmdSingleAsync<TransferResponse>(MWC.TransferSplit);
walletSupportsTransferSplit = response.Error.Code != MoneroConstants.MoneroRpcMethodNotFound;
@@ -473,7 +481,7 @@ namespace MiningCore.Blockchain.Monero
{
ExtractAddressAndPaymentId(x.Address, out var address, out var paymentId);
- var hasPaymentId = !string.IsNullOrEmpty(paymentId);
+ var hasPaymentId = paymentId != null;
var isIntegratedAddress = false;
var addressIntegratedPrefix = LibCryptonote.DecodeIntegratedAddress(address);
|
Minor multihit glitch fix
Added all necessary codes to the attack boxes memorization, used together with the multihit glitch fix | @@ -20356,6 +20356,11 @@ void do_attack(entity *e)
self->modeldata.animation[current_follow_id]->attackone = self->animation->attackone;
}
ent_set_anim(self, current_follow_id, 0);
+
+ // Kratus (20-04-21) used by the multihit glitch memorization
+ self->attack_id_incoming4 = self->attack_id_incoming3;
+ self->attack_id_incoming3 = self->attack_id_incoming2;
+ self->attack_id_incoming2 = self->attack_id_incoming;
self->attack_id_incoming = current_attack_id;
}
@@ -20437,6 +20442,10 @@ void do_attack(entity *e)
//followed = 1; // quit loop, animation is changed
}
+ // Kratus (20-04-21) used by the multihit glitch memorization
+ self->attack_id_incoming4 = self->attack_id_incoming3;
+ self->attack_id_incoming3 = self->attack_id_incoming2;
+ self->attack_id_incoming2 = self->attack_id_incoming;
self->attack_id_incoming = current_attack_id;
// If hit, stop blocking.
|
Fix issue where extra destination markers get drawn when switching between connected scenes | @@ -274,6 +274,7 @@ class Connections extends Component {
y: y2,
direction,
eventId,
+ sceneId,
className: "Connections__Destination",
onMouseDown: this.onDragDestinationStart(
eventId,
|
[mod_openssl] issue error trace if < openssl 1.1.1 | @@ -2916,6 +2916,14 @@ SETDEFAULTS_FUNC(mod_openssl_set_defaults)
mod_openssl_merge_config(&p->defaults, cpv);
}
+ #if OPENSSL_VERSION_NUMBER < 0x10101000L \
+ && !defined(LIBRESSL_VERSION_NUMBER)
+ log_error(srv->errh, __FILE__, __LINE__, "SSL:"
+ "openssl library version is outdated and has reached end-of-life. "
+ "As of 1 Jan 2020, only openssl 1.1.1 and later continue to receive "
+ "security patches from openssl.org");
+ #endif
+
return mod_openssl_set_defaults_sockets(srv, p);
}
|
no more nest fails | =^ ban fox (kick hen)
[:(weld bin p.ban next) fox]
=/ fro=(list ship) (saxo-scry our)
- =^ bun fox zork:zank:(thaw fro):(ho:um i.neb)
+ =/ hoz (ho:um i.neb)
+ =^ bun fox zork:zank:(thaw:hoz fro)
$(neb t.neb, bin (weld p.bun bin))
::
++ wise :: wise:am
=/ seg (sein-scry her)
=^ pax diz (zuul:diz now seg [%back cop dam ~s0])
=/ fro=(list ship) (saxo-scry our)
- +>(+> (busk(diz (wast:diz ryn)) (xong fro) pax))
+ ..cock(+> (busk(diz (wast:diz ryn)) (xong fro) pax))
::
++ deer :: deer:la:ho:um:am
|= [cha=path num=@ud dut=(unit)] :: interpret message
|
Tightens readme | @@ -81,11 +81,10 @@ make install
# Bring in greenplum environment into your running shell
source /usr/local/gpdb/greenplum_path.sh
-# Start demo cluster (gpdemo-env.sh is created which contain
-# __PGPORT__ and __MASTER_DATA_DIRECTORY__ values)
-cd gpAux/gpdemo
+# Start demo cluster
make create-demo-cluster
-source gpdemo-env.sh
+# (gpdemo-env.sh contains __PGPORT__ and __MASTER_DATA_DIRECTORY__ values)
+source gpAux/gpdemo/gpdemo-env.sh
```
Compilation can be sped up with parallelization. Instead of `make`, consider:
|
quic: support the quictrace_sent probe | @@ -104,6 +104,7 @@ struct quic_event_t {
u64 stream_id;
u64 packet_num;
u64 packet_len;
+ u8 packet_type;
u32 ack_only;
u64 largest_acked;
u64 bytes_acked;
@@ -463,6 +464,26 @@ int trace_stream_data_blocked_receive(struct pt_regs *ctx) {
return 0;
}
+
+int trace_quictrace_sent(struct pt_regs *ctx) {
+ void *pos = NULL;
+ struct quic_event_t event = {};
+ struct st_quicly_conn_t conn = {};
+ sprintf(event.type, "quictrace_sent");
+
+ bpf_usdt_readarg(1, ctx, &pos);
+ bpf_probe_read(&conn, sizeof(conn), pos);
+ event.master_conn_id = conn.master_id;
+ bpf_usdt_readarg(2, ctx, &event.at);
+ bpf_usdt_readarg(3, ctx, &event.packet_num);
+ bpf_usdt_readarg(4, ctx, &event.packet_len);
+ bpf_usdt_readarg(5, ctx, &event.packet_type);
+
+ if (events.perf_submit(ctx, &event, sizeof(event)) < 0)
+ bpf_trace_printk("failed to perf_submit\\n");
+
+ return 0;
+}
"""
def handle_req_line(cpu, data, size):
@@ -530,6 +551,9 @@ def handle_quic_event(cpu, data, size):
elif line.type == "stream_data_blocked_receive":
for k in ["stream_id", "limit"]:
rv[k] = getattr(line, k)
+ elif line.type == "quictrace_sent":
+ for k in ["packet_num", "packet_len", "packet_type"]:
+ rv[k] = getattr(line, k)
print(json.dumps(rv))
@@ -602,6 +626,7 @@ if sys.argv[1] == "quic":
u.enable_probe(probe="streams_blocked_receive", fn_name="trace_streams_blocked_receive")
u.enable_probe(probe="data_blocked_receive", fn_name="trace_data_blocked_receive")
u.enable_probe(probe="stream_data_blocked_receive", fn_name="trace_stream_data_blocked_receive")
+ u.enable_probe(probe="quictrace_sent", fn_name="trace_quictrace_sent")
b = BPF(text=quic_bpf, usdt_contexts=[u])
else:
u.enable_probe(probe="receive_request", fn_name="trace_receive_req")
|
hv: refine strncpy_s to only one exit point
Fix procedure has more than one exit point
Acked-by: Eddie Dong | @@ -203,28 +203,28 @@ char *strncpy_s(char *d_arg, size_t dmax, const char *s_arg, size_t slen_arg)
{
const char *s = s_arg;
char *d = d_arg;
- char *dest_base;
+ char *pret;
size_t dest_avail;
uint64_t overlap_guard;
size_t slen = slen_arg;
if ((d == NULL) || (s == NULL)) {
pr_err("%s: invlaid src or dest buffer", __func__);
- return NULL;
+ pret = NULL;
+ } else {
+ pret = d_arg;
}
+ if (pret != NULL) {
if ((dmax == 0U) || (slen == 0U)) {
pr_err("%s: invlaid length of src or dest buffer", __func__);
- return NULL;
+ pret = NULL;
}
-
- if (d == s) {
- return d;
}
+ /* if d equal to s, just return d; else execute the below code */
+ if ((pret != NULL) && (d != s)) {
overlap_guard = (uint64_t)((d > s) ? (d - s - 1) : (s - d - 1));
-
- dest_base = d;
dest_avail = dmax;
while (dest_avail > 0U) {
@@ -232,17 +232,21 @@ char *strncpy_s(char *d_arg, size_t dmax, const char *s_arg, size_t slen_arg)
pr_err("%s: overlap happened.", __func__);
d--;
*d = '\0';
- return NULL;
+ /* break out to return */
+ pret = NULL;
+ break;
}
if (slen == 0U) {
*d = '\0';
- return dest_base;
+ /* break out to return */
+ break;
}
*d = *s;
if (*d == '\0') {
- return dest_base;
+ /* break out to return */
+ break;
}
d++;
@@ -252,14 +256,15 @@ char *strncpy_s(char *d_arg, size_t dmax, const char *s_arg, size_t slen_arg)
overlap_guard--;
}
+ if (dest_avail == 0U) {
pr_err("%s: dest buffer has no enough space.", __func__);
- /*
- * to avoid a string that is not
- * null-terminated in dest buffer
- */
- dest_base[dmax - 1] = '\0';
- return NULL;
+ /* to avoid a string that is not null-terminated in dest buffer */
+ pret[dmax - 1] = '\0';
+ }
+ }
+
+ return pret;
}
/**
|
Update CMakeLists.txt
Add Network framework to unittest | @@ -108,7 +108,7 @@ else()
set (PLATFORM_LIBS "")
# Add flags for obtaining system UUID via IOKit
if (CMAKE_SYSTEM_NAME STREQUAL "Darwin")
- set (PLATFORM_LIBS "-framework CoreFoundation -framework IOKit -framework SystemConfiguration -framework Foundation")
+ set (PLATFORM_LIBS "-framework CoreFoundation -framework IOKit -framework SystemConfiguration -framework Foundation -framework Network")
if(BUILD_IOS)
set (PLATFORM_LIBS "${PLATFORM_LIBS} -framework UIKit")
endif()
|
Fix gfir coefficient sync using chip->GUI when gfir was not written before | @@ -44,6 +44,30 @@ void LMS7002M_RegistersMap::InitializeDefaultValues(const std::vector<const LMS7
mChannelB[addr + i + 0x0200].defaultValue = 0;
mChannelB[addr + i + 0x0200].value = 0;
}
+
+ //add GFIRS
+ std::vector<std::pair<uint16_t, uint16_t> > intervals = {
+ {0x0280, 0x02A7},
+ {0x02C0, 0x02E7},
+ {0x0300, 0x0327},
+ {0x0340, 0x0367},
+ {0x0380, 0x03A7},
+ };
+ for (const auto &range : intervals)
+ {
+ for(int i=range.first; i<=range.second; ++i)
+ {
+ mChannelA[i].defaultValue = 0;
+ mChannelA[i].value = 0;
+ mChannelB[i].defaultValue = 0;
+ mChannelB[i].value = 0;
+
+ mChannelA[i+0x0200].defaultValue = 0;
+ mChannelA[i+0x0200].value = 0;
+ mChannelB[i+0x0200].defaultValue = 0;
+ mChannelB[i+0x0200].value = 0;
+ }
+ }
}
void LMS7002M_RegistersMap::SetValue(uint8_t channel, const uint16_t address, const uint16_t value)
|
updated info about wlandump-ng (no longer understatement) | @@ -22,8 +22,8 @@ Detailed description
| Tool | Description |
| ------------- | -----------------------------------------------------------------------------------------------------|
-| wlandump-ng | Small, fast and simple but powerfull WLAN scanner |
-| wlanresponse | Extreme fast deauthentication/authentication/response tool |
+| wlandump-ng | Small, fast and powerfull deauthentication/authentication/response tool |
+| wlanresponse | Extreme fast deauthentication/authentication/response tool (unattended use on raspberry pi's) |
| wlanrcascan | Small, fast and simple passive WLAN channel assignment scanner (status output) |
| pioff | Turns Raspberry Pi off via GPIO switch |
| wlancapinfo | Shows info of pcap file |
|
format: fix js format | @@ -31,12 +31,14 @@ getVersions()
error(`are you sure you have libelektra and kdb installed?`);
process.exit(1);
} else {
- const { major, minor, micro } = versions.elektra
- const versionSupported = major >= 0 && minor >= 9 && micro >= 0
+ const { major, minor, micro } = versions.elektra;
+ const versionSupported = major >= 0 && minor >= 9 && micro >= 0;
if (!versionSupported) {
- error(`you are running an old libelektra version, which is not supported`)
- error(`please upgrade to libelektra 0.9.0 or higher`)
- process.exit(1)
+ error(
+ `you are running an old libelektra version, which is not supported`
+ );
+ error(`please upgrade to libelektra 0.9.0 or higher`);
+ process.exit(1);
}
return getInstances() // make sure yajl is installed
.then(() => {
|
odissey: extend periodic stats | @@ -71,17 +71,20 @@ od_periodic_stats(od_router_t *router)
{
int stream_count = 0;
int stream_count_allocated = 0;
+ int stream_total_allocated = 0;
shapito_cache_stat(&instance->stream_cache, &stream_count,
- &stream_count_allocated);
-
+ &stream_count_allocated, &stream_total_allocated);
int count_machine = 0;
int count_coroutine = 0;
int count_coroutine_cache = 0;
machinarium_stat(&count_machine, &count_coroutine,
&count_coroutine_cache);
od_log(&instance->logger, "stats", NULL, NULL,
- "stream cache: (%d allocated, %d cached), coroutines: (%d active, %d cached)",
+ "clients %d, stream cache (%d:%d allocated, %d cached), "
+ "coroutines (%d active, %d cached)",
+ router->clients,
stream_count_allocated,
+ stream_total_allocated,
stream_count,
count_coroutine,
count_coroutine_cache);
|
nrf: Use --gc-sections to reduce code size
This saves about 6-7kB. | @@ -65,7 +65,7 @@ NRF_DEFINES += -DCONFIG_GPIO_AS_PINRESET
CFLAGS_CORTEX_M = -mthumb -mabi=aapcs -fsingle-precision-constant -Wdouble-promotion
-CFLAGS_MCU_m4 = $(CFLAGS_CORTEX_M) -mtune=cortex-m4 -mcpu=cortex-m4 -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections
+CFLAGS_MCU_m4 = $(CFLAGS_CORTEX_M) -mtune=cortex-m4 -mcpu=cortex-m4 -mfpu=fpv4-sp-d16 -mfloat-abi=hard
CFLAGS_MCU_m0 = $(CFLAGS_CORTEX_M) --short-enums -mtune=cortex-m0 -mcpu=cortex-m0 -mfloat-abi=soft -fno-builtin
@@ -74,12 +74,14 @@ CFLAGS += $(CFLAGS_MCU_$(MCU_SERIES))
CFLAGS += $(INC) -Wall -Werror -ansi -std=gnu99 -nostdlib $(COPT) $(NRF_DEFINES) $(CFLAGS_MOD)
CFLAGS += -fno-strict-aliasing
CFLAGS += -fstack-usage
+CFLAGS += -fdata-sections -ffunction-sections
CFLAGS += -Iboards/$(BOARD)
CFLAGS += -DNRF5_HAL_H='<$(MCU_VARIANT)_hal.h>'
LDFLAGS = $(CFLAGS)
LDFLAGS += -Xlinker -Map=$(@:.elf=.map)
LDFLAGS += -mthumb -mabi=aapcs -T $(LD_FILE) -L boards/
+LDFLAGS += -Wl,--gc-sections
#Debugging/Optimization
ifeq ($(DEBUG), 1)
|
Update macros.
Use TLS_MAX_SIGALGCNT for the maximum number of entries in the
signature algorithms array.
Use TLS_MAX_SIGSTRING_LEN for the maxiumum length of each signature
component instead of a magic number. | @@ -1715,11 +1715,12 @@ int SSL_get_shared_sigalgs(SSL *s, int idx,
return (int)s->cert->shared_sigalgslen;
}
-#define MAX_SIGALGLEN (TLSEXT_hash_num * TLSEXT_signature_num * 2)
+/* Maximum possible number of unique entries in sigalgs array */
+#define TLS_MAX_SIGALGCNT (OSSL_NELEM(sigalg_lookup_tbl) * 2)
typedef struct {
size_t sigalgcnt;
- int sigalgs[MAX_SIGALGLEN];
+ int sigalgs[TLS_MAX_SIGALGCNT];
} sig_cb_st;
static void get_sigorhash(int *psig, int *phash, const char *str)
@@ -1738,16 +1739,18 @@ static void get_sigorhash(int *psig, int *phash, const char *str)
*phash = OBJ_ln2nid(str);
}
}
+/* Maximum length of a signature algorithm string component */
+#define TLS_MAX_SIGSTRING_LEN 40
static int sig_cb(const char *elem, int len, void *arg)
{
sig_cb_st *sarg = arg;
size_t i;
- char etmp[40], *p;
+ char etmp[TLS_MAX_SIGSTRING_LEN], *p;
int sig_alg = NID_undef, hash_alg = NID_undef;
if (elem == NULL)
return 0;
- if (sarg->sigalgcnt == MAX_SIGALGLEN)
+ if (sarg->sigalgcnt == TLS_MAX_SIGALGCNT)
return 0;
if (len > (int)(sizeof(etmp) - 1))
return 0;
|
libhfuzz: make __sanitizer_cov_trace_const_cmp* symbols weak aliases temporarily for Darwin | @@ -140,16 +140,20 @@ ATTRIBUTE_X86_REQUIRE_SSE42 void __sanitizer_cov_trace_cmp8(uint64_t Arg1, uint6
/*
* Const versions of trace_cmp, we don't use any special handling for these (for
- * now)
+ * now).
+ *
+ * TODO: This should be a non-weak alias (a regular function), so it can overload symbols provided
+ * in lib*san, but Darwin doesn't support them:
+ * https://github.com/google/honggfuzz/issues/176#issuecomment-353809324
*/
void __sanitizer_cov_trace_const_cmp1(uint8_t Arg1, uint8_t Arg2)
- __attribute__((alias("__sanitizer_cov_trace_cmp1")));
+ __attribute__((weak, alias("__sanitizer_cov_trace_cmp1")));
void __sanitizer_cov_trace_const_cmp2(uint16_t Arg1, uint16_t Arg2)
- __attribute__((alias("__sanitizer_cov_trace_cmp2")));
+ __attribute__((weak, alias("__sanitizer_cov_trace_cmp2")));
void __sanitizer_cov_trace_const_cmp4(uint32_t Arg1, uint32_t Arg2)
- __attribute__((alias("__sanitizer_cov_trace_cmp4")));
+ __attribute__((weak, alias("__sanitizer_cov_trace_cmp4")));
void __sanitizer_cov_trace_const_cmp8(uint64_t Arg1, uint64_t Arg2)
- __attribute__((alias("__sanitizer_cov_trace_cmp8")));
+ __attribute__((weak, alias("__sanitizer_cov_trace_cmp8")));
/*
* Cases[0] is number of comparison entries
|
Fix issue loading incorrect sprites for some actors | @@ -66,10 +66,12 @@ void MoveActors_b()
sprites[k].pos.x = screen_x;
sprites[k].pos.y = screen_y;
+ if(actors[a].sprite_type != SPRITE_STATIC) {
+
// Increase frame based on facing direction
- if (IS_NEG(actors[i].dir.y))
+ if (IS_NEG(actors[a].dir.y))
{
- fo = 1 + (actors[i].sprite_type == SPRITE_ACTOR_ANIMATED);
+ fo = 1 + (actors[a].sprite_type == SPRITE_ACTOR_ANIMATED);
if (sprites[k].frame_offset != fo)
{
sprites[k].frame_offset = fo;
@@ -77,16 +79,16 @@ void MoveActors_b()
sprites[k].rerender = TRUE;
}
}
- else if (actors[i].dir.x != 0)
+ else if (actors[a].dir.x != 0)
{
- fo = 2 + MUL_2(actors[i].sprite_type == SPRITE_ACTOR_ANIMATED);
+ fo = 2 + MUL_2(actors[a].sprite_type == SPRITE_ACTOR_ANIMATED);
if (sprites[k].frame_offset != fo)
{
sprites[k].frame_offset = fo;
sprites[k].rerender = TRUE;
}
// Facing left so flip sprite
- if (IS_NEG(actors[i].dir.x))
+ if (IS_NEG(actors[a].dir.x))
{
flip = TRUE;
if (!sprites[k].flip)
@@ -114,6 +116,7 @@ void MoveActors_b()
sprites[k].rerender = TRUE;
}
}
+ }
// Check if actor is off screen
if (((UINT16)(screen_x + 32u) >= SCREENWIDTH_PLUS_64) || ((UINT16)(screen_y + 32u) >= SCREENHEIGHT_PLUS_64))
|
Do not apply empty model | @@ -21,6 +21,9 @@ namespace NCB::NModelEvaluation {
const size_t blockSize = Min(FORMULA_EVALUATION_BLOCK_SIZE, docCount);
auto calcTrees = GetCalcTreesFunction(trees, blockSize);
std::fill(results.begin(), results.end(), 0.0);
+ if (trees.GetTreeCount() == 0) {
+ return;
+ }
TVector<TCalcerIndexType> indexesVec(blockSize);
TEvalResultProcessor resultProcessor(
docCount,
|
netbench: avoid panic on ESHUTDOWN | @@ -52,7 +52,7 @@ void ServerWorker(rt::UdpConn *c) {
// Receive a network response.
ssize_t ret = c->Read(&buf, sizeof(buf));
if (ret <= 0 || ret > static_cast<ssize_t>(sizeof(buf))) {
- if (ret == 0) break;
+ if (ret == -ESHUTDOWN) break;
panic("udp read failed, ret = %ld", ret);
}
@@ -170,7 +170,7 @@ std::vector<double> PoissonWorker(rt::UdpConn *c, double req_rate,
while (true) {
ssize_t ret = c->Read(rbuf, sizeof(rbuf));
if (ret != static_cast<ssize_t>(sizeof(rbuf))) {
- if (ret == 0) break;
+ if (ret == -ESHUTDOWN) break;
panic("udp read failed, ret = %ld", ret);
}
|
NN: Define ARM_MATH_DSP and ARM_NN_TRUNCATE. | @@ -75,8 +75,10 @@ endif
# Compiler Flags
include $(OMV_BOARD_CONFIG_DIR)/omv_boardconfig.mk
CFLAGS += -std=gnu99 -Wall -Werror -Warray-bounds -mthumb -nostartfiles -mabi=aapcs-linux -fdata-sections -ffunction-sections
-CFLAGS += -D$(MCU) -D$(CFLAGS_MCU) -D$(ARM_MATH) -fsingle-precision-constant -Wdouble-promotion -mcpu=$(CPU) -mtune=$(CPU) -mfpu=$(FPU) -mfloat-abi=hard
-CFLAGS += -D__FPU_PRESENT=1 -D__VFP_FP__ -DUSE_USB_FS -DUSE_DEVICE_MODE -DUSE_USB_OTG_ID=0 -DHSE_VALUE=12000000 -D$(TARGET) -DSTM32_HAL_H=$(HAL_INC) -DVECT_TAB_OFFSET=$(VECT_TAB_OFFSET) -DMAIN_APP_ADDR=$(MAIN_APP_ADDR)
+CFLAGS += -D$(MCU) -D$(CFLAGS_MCU) -D$(ARM_MATH) -DARM_MATH_DSP -DARM_NN_TRUNCATE\
+ -fsingle-precision-constant -Wdouble-promotion -mcpu=$(CPU) -mtune=$(CPU) -mfpu=$(FPU) -mfloat-abi=hard
+CFLAGS += -D__FPU_PRESENT=1 -D__VFP_FP__ -DUSE_USB_FS -DUSE_DEVICE_MODE -DUSE_USB_OTG_ID=0 -DHSE_VALUE=12000000\
+ -D$(TARGET) -DSTM32_HAL_H=$(HAL_INC) -DVECT_TAB_OFFSET=$(VECT_TAB_OFFSET) -DMAIN_APP_ADDR=$(MAIN_APP_ADDR)
CFLAGS += -I. -Iinclude
CFLAGS += -I$(TOP_DIR)/$(BOOT_DIR)/include/
|
Minor enhancements in build_linux.sh. | -#!/bin/bash
+#!/bin/sh
set -e
SCRIPT_DIR=$( cd $(dirname $0); pwd -P)
@@ -9,7 +9,7 @@ mkdir -p ${DIST_DIR}
# Docker image repository.
REPOSITORY="tinyspline"
-STRETCH_SETUP_CMDS=$(cat << END
+APT_GET_CMDS=$(cat << END
RUN apt-get update && apt-get install -y --no-install-recommends cmake swig
COPY . /tinyspline
WORKDIR /tinyspline
@@ -28,7 +28,7 @@ COPY_ARTIFACTS_AND_DELETE() {
NAME="misc"
docker build -t "${REPOSITORY}:${NAME}" -f - ${ROOT_DIR} <<-END
FROM buildpack-deps:stretch
- ${STRETCH_SETUP_CMDS}
+ ${APT_GET_CMDS}
RUN apt-get install -y --no-install-recommends \
mono-mcs nuget \
dub \
@@ -40,9 +40,9 @@ docker run --name "${NAME}" "${REPOSITORY}:${NAME}" \
-DTINYSPLINE_ENABLE_DLANG=True \
-DTINYSPLINE_ENABLE_JAVA=True . && \
cmake --build . --target tinysplinecsharp && \
- nuget pack && mv ./*.nupkg dist/ && \
- dub build && tar cJf dist/tinysplinedlang.linux-x86_64.tar.xz dub && \
- mvn package && mv ./target/*.jar dist/"
+ nuget pack && mv ./*.nupkg dist && \
+ dub build && tar cJf dist/tinysplinedlang.tar.xz dub && \
+ mvn package && mv ./target/*.jar dist"
COPY_ARTIFACTS_AND_DELETE ${NAME}
##################################### Lua #####################################
@@ -50,7 +50,7 @@ BUILD_LUA() {
NAME="lua${1}"
docker build -t "${REPOSITORY}:${NAME}" -f - ${ROOT_DIR} <<-END
FROM buildpack-deps:stretch
- ${STRETCH_SETUP_CMDS}
+ ${APT_GET_CMDS}
RUN apt-get install -y --no-install-recommends \
luarocks liblua${1}-dev
END
@@ -61,7 +61,9 @@ BUILD_LUA() {
COPY_ARTIFACTS_AND_DELETE ${NAME}
for file in "${DIST_DIR}/"*.rock
do
- mv "${file}" "${file}.${NAME}"
+ if [[ "${file}" != *"lua"* ]];then
+ mv $file ${file/.rock/.${NAME}.rock}
+ fi
done
}
@@ -74,7 +76,7 @@ BUILD_PYTHON() {
NAME="python${1}"
docker build -t "${REPOSITORY}:${NAME}" -f - ${ROOT_DIR} <<-END
FROM python:${1}-stretch
- ${STRETCH_SETUP_CMDS}
+ ${APT_GET_CMDS}
END
docker run --name "${NAME}" "${REPOSITORY}:${NAME}" \
/bin/bash -c "cmake -DTINYSPLINE_ENABLE_PYTHON=True . && \
|
Check psa_destroy_key() return in rsa_encrypt_wrap() | @@ -298,7 +298,10 @@ static int rsa_encrypt_wrap( void *ctx,
ret = 0;
cleanup:
- psa_destroy_key( key_id );
+ status = psa_destroy_key( key_id );
+ if( ret == 0 && status != PSA_SUCCESS )
+ ret = mbedtls_psa_err_translate_pk( status );
+
return( ret );
}
#else
|
misc: Add maintainer entry for ipfix-export
Type: improvement
Add Ole as a maintainer for the ipfix-export code | @@ -789,6 +789,11 @@ I: vat2
M: Ole Troan <[email protected]>
F: src/vat2/
+VNET Ipfix Export
+I: ipfix-export
+M: Ole Troan <[email protected]>
+F: src/vnet/ipfix-export/
+
THE REST
I: misc
M: vpp-dev Mailing List <[email protected]>
|
fix up rx_dsm | @@ -170,24 +170,24 @@ void rx_spektrum_bind(void) {
}
#endif
GPIO_InitTypeDef GPIO_InitStructure;
- GPIO_InitStructure.GPIO_Pin = SERIAL_RX_SPEKBIND_BINDTOOL_PIN;
+ GPIO_InitStructure.GPIO_Pin = usart_port_defs[RX_USART].rx_pin;
GPIO_InitStructure.GPIO_Mode = GPIO_Mode_OUT;
GPIO_InitStructure.GPIO_OType = GPIO_OType_PP;
GPIO_InitStructure.GPIO_PuPd = GPIO_PuPd_NOPULL;
- GPIO_Init(SERIAL_RX_UART, &GPIO_InitStructure);
+ GPIO_Init(usart_port_defs[RX_USART].gpio_port, &GPIO_InitStructure);
// RX line, set high
- GPIO_SetBits(SERIAL_RX_UART, SERIAL_RX_SPEKBIND_BINDTOOL_PIN);
+ GPIO_SetBits(usart_port_defs[RX_USART].gpio_port, usart_port_defs[RX_USART].rx_pin);
// Bind window is around 20-140ms after powerup
delay(60000);
for (uint8_t i = 0; i < BIND_PULSES; i++) { // 9 pulses for internal dsmx 11ms, 3 pulses for internal dsm2 22ms
// RX line, drive low for 120us
- GPIO_ResetBits(SERIAL_RX_UART, SERIAL_RX_SPEKBIND_BINDTOOL_PIN);
+ GPIO_ResetBits(usart_port_defs[RX_USART].gpio_port, usart_port_defs[RX_USART].rx_pin);
delay(120);
// RX line, drive high for 120us
- GPIO_SetBits(SERIAL_RX_UART, SERIAL_RX_SPEKBIND_BINDTOOL_PIN);
+ GPIO_SetBits(usart_port_defs[RX_USART].gpio_port, usart_port_defs[RX_USART].rx_pin);
delay(120);
}
}
@@ -195,10 +195,7 @@ void rx_spektrum_bind(void) {
void rx_init(void) {
}
-void checkrx()
-
-{
-
+void checkrx() {
if (framestarted < 0) {
failsafe = 1; //kill motors while initializing usart (maybe not necessary)
dsm_init(); // toggles "framestarted = 0;" after initializing
|
hoon: leb/reb to kel/ker | ==
:: :: ++obje:de-json:html
++ obje :: object list
- %+ ifix [(wish leb) (wish reb)]
+ %+ ifix [(wish kel) (wish ker)]
(more (wish com) pear)
:: :: ++obox:de-json:html
++ obox :: object
|
osc operator = redesign | @@ -370,23 +370,23 @@ BEGIN_OPERATOR(midi)
END_OPERATOR
BEGIN_OPERATOR(osc)
- PORT(0, -2, IN | PARAM);
- PORT(0, -1, IN | PARAM);
- Usz len = index_of(PEEK(0, -1)) + 1;
+ PORT(0, 2, IN | PARAM);
+ PORT(0, 1, IN | PARAM);
+ Usz len = index_of(PEEK(0, 1)) + 1;
if (len > Oevent_osc_int_count)
len = Oevent_osc_int_count;
for (Usz i = 0; i < len; ++i) {
PORT(0, (Isz)i + 1, IN);
}
STOP_IF_NOT_BANGED;
- Glyph g = PEEK(0, -2);
+ Glyph g = PEEK(0, 2);
if (g != '.') {
- Usz len = index_of(PEEK(0, -1)) + 1;
+ Usz len = index_of(PEEK(0, 1)) + 1;
if (len > Oevent_osc_int_count)
len = Oevent_osc_int_count;
U8 buff[Oevent_osc_int_count];
for (Usz i = 0; i < len; ++i) {
- buff[i] = (U8)index_of(PEEK(0, (Isz)i + 1));
+ buff[i] = (U8)index_of(PEEK(0, (Isz)i + 3));
}
Oevent_osc_ints* oe =
&oevent_list_alloc_item(extra_params->oevent_list)->osc_ints;
|
add alienwhoop_v3 to targets | }
]
},
+ {
+ "name": "alienwhoop_v3",
+ "configurations": [
+ {
+ "name": "brushed.serial",
+ "defines": {
+ "BRUSHED_TARGET": "",
+ "RX_UNIFIED_SERIAL": ""
+ }
+ }
+ ]
+ },
{
"name": "matekf411rx",
"configurations": [
|
libhfuzz/instrument: ignore const values which are of len=0 | @@ -465,6 +465,9 @@ void instrumentClearNewCov() {
}
static inline void instrumentAddConstMemInternal(const void* mem, size_t len) {
+ if (len == 0) {
+ return;
+ }
if (len > sizeof(cmpFeedback->valArr[0].val)) {
len = sizeof(cmpFeedback->valArr[0].val);
}
@@ -494,6 +497,9 @@ void instrumentAddConstMem(const void* mem, size_t len, bool check_if_ro) {
if (!cmpFeedback) {
return;
}
+ if (len == 0) {
+ return;
+ }
if (check_if_ro && !util_isAddrRO(mem)) {
return;
}
|
Fix test_misc_alloc_create_parser_with_encoding() to work in | @@ -7910,7 +7910,7 @@ START_TEST(test_misc_alloc_create_parser_with_encoding)
/* Try several levels of allocation */
for (i = 0; i < max_alloc_count; i++) {
allocation_count = i;
- parser = XML_ParserCreate_MM("us-ascii", &memsuite, NULL);
+ parser = XML_ParserCreate_MM(XCS("us-ascii"), &memsuite, NULL);
if (parser != NULL)
break;
}
|
improved identity handling | @@ -432,6 +432,8 @@ if(eapidentity->eaptype != EAP_TYPE_ID)
idlen = htons(eapidentity->eaplen) -5;
if((idlen > 0) && (idlen <= 256))
{
+ if(eapidentity->identity[0] == 0)
+ return;
memset(idstring, 0, 258);
memcpy(&idstring, eapidentity->identity, idlen);
if(usernameoutname != NULL)
@@ -1880,7 +1882,7 @@ while((pcapstatus = pcap_next_ex(pcapin, &pkh, &packet)) != -2)
pcap_dump((u_char *) pcapextout, pkh, h80211);
- if(eapext->eapcode == EAP_CODE_RESP)
+ if(eapext->eaptype == EAP_TYPE_ID)
addresponseidentity(eapext);
if(eapext->eaptype == EAP_TYPE_NAK)
|
Basic-Flow update | @@ -148,7 +148,7 @@ Additionally, some Hammer process technology plugins do not provide default valu
Place-and-route tools are very sensitive to process technologes (significantly more sensitive than synthesis tools), and different process technologies may work only on specific tool versions. It is recommended to check what is the appropriate tool version for the specific process technology you are working with.
-.. Note:: If you edit the yml configuration files in between synthesis and place-and-route, the `make par` command will automatically re-run synthesis. If you would like to avoid that and are confident that your configuration file changes do not affect synthesis results, you may use the `make redo-par` instead with the variable ``HAMMER_EXTRA_ARGS='-p <your-changed.yml>'``.
+.. Note:: If you edit the yml configuration files in between synthesis and place-and-route, the ``make par`` command will automatically re-run synthesis. If you would like to avoid that and are confident that your configuration file changes do not affect synthesis results, you may use the ``make redo-par`` command instead with the variable ``HAMMER_EXTRA_ARGS='-p <your-changed.yml>'``.
|
netkvm: defines for USO feature
Define the feature bit and GSO type according to the spec. | #define VIRTIO_NET_F_GUEST_RSC4_DONT_USE 41 /* reserved */
#define VIRTIO_NET_F_GUEST_RSC6_DONT_USE 42 /* reserved */
+#define VIRTIO_NET_F_HOST_USO 56 /* Host can handle USO in. */
#define VIRTIO_NET_F_HASH_REPORT 57
#define VIRTIO_NET_F_RSS 60
#define VIRTIO_NET_F_RSC_EXT 61
@@ -145,6 +146,7 @@ struct virtio_net_hdr_v1 {
#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */
#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */
#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
+#define VIRTIO_NET_HDR_GSO_UDP_L4 5 /* GSO frame, IPv4 UDP (USO) */
#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
__u8 gso_type;
__virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
|
Fix EnableDebug patch not working | @@ -106,12 +106,12 @@ void EnableDebugPatch(const Image* apImage)
if (i == 0)
{
RealRegisterScriptFunction = reinterpret_cast<TRegisterScriptFunction*>(pCallLocation + offset);
- MH_CreateHook(&RealRegisterScriptFunction, &HookRegisterScriptFunction, reinterpret_cast<void**>(&RealRegisterScriptFunction));
+ MH_CreateHook(RealRegisterScriptFunction, &HookRegisterScriptFunction, reinterpret_cast<void**>(&RealRegisterScriptFunction));
}
else
{
RealRegisterScriptMemberFunction = reinterpret_cast<TRegisterScriptMemberFunction*>(pCallLocation + offset);
- MH_CreateHook(&RealRegisterScriptMemberFunction, &HookRegisterScriptMemberFunction, reinterpret_cast<void**>(&RealRegisterScriptMemberFunction));
+ MH_CreateHook(RealRegisterScriptMemberFunction, &HookRegisterScriptMemberFunction, reinterpret_cast<void**>(&RealRegisterScriptMemberFunction));
}
spdlog::info("{}: success", patchType);
|
Work CI-CD
Fix version composing for WIN32 job.
***NO_CI*** | @@ -698,8 +698,7 @@ jobs:
inputs:
targetType: 'inline'
script: |
- Write-Host "$("##vso[task.setvariable variable=TARGET_BUILD_COUNTER]")$version"
- Write-Host "$("##vso[task.setvariable variable=WINCLR_PACKAGE_VERSION]")$(NBGV_VersionMajor).$(NBGV_VersionMinor).$(NBGV_BuildNumber).$version"
+ Write-Host "$("##vso[task.setvariable variable=WINCLR_PACKAGE_VERSION]")$(NBGV_VersionMajor).$(NBGV_VersionMinor).$(NBGV_BuildNumber).$(TARGET_BUILD_COUNTER)"
- template: azure-pipelines-templates/install-nuget.yml@templates
|
document in manpage that port 0 is off.
issue | @@ -62,7 +62,7 @@ caches, so consult the README and memcached homepage for configuration
suggestions.
.TP
.B \-p, --port=<num>
-Listen on TCP port <num>, the default is port 11211.
+Listen on TCP port <num>, the default is port 11211. 0 means off.
.TP
.B \-U, --udp-port=<num>
Listen on UDP port <num>, the default is port 0, which is off.
|
libbpf-tools: update drsnoop for libbpf 1.0
Switch to libbpf 1.0 mode and adapt libbpf API usage accordingly. | @@ -147,7 +147,6 @@ int main(int argc, char **argv)
.parser = parse_arg,
.doc = argp_program_doc,
};
- struct perf_buffer_opts pb_opts;
struct perf_buffer *pb = NULL;
struct ksyms *ksyms = NULL;
const struct ksym *ksym;
@@ -159,14 +158,9 @@ int main(int argc, char **argv)
if (err)
return err;
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
libbpf_set_print(libbpf_print_fn);
- err = bump_memlock_rlimit();
- if (err) {
- fprintf(stderr, "failed to increase rlimit: %d\n", err);
- return 1;
- }
-
obj = drsnoop_bpf__open();
if (!obj) {
fprintf(stderr, "failed to open BPF object\n");
@@ -214,13 +208,10 @@ int main(int argc, char **argv)
printf(" %8s", "FREE(KB)");
printf("\n");
- pb_opts.sample_cb = handle_event;
- pb_opts.lost_cb = handle_lost_events;
pb = perf_buffer__new(bpf_map__fd(obj->maps.events), PERF_BUFFER_PAGES,
- &pb_opts);
- err = libbpf_get_error(pb);
- if (err) {
- pb = NULL;
+ handle_event, handle_lost_events, NULL, NULL);
+ if (!pb) {
+ err = -errno;
fprintf(stderr, "failed to open perf buffer: %d\n", err);
goto cleanup;
}
@@ -238,8 +229,8 @@ int main(int argc, char **argv)
/* main: poll */
while (!exiting) {
err = perf_buffer__poll(pb, PERF_POLL_TIMEOUT_MS);
- if (err < 0 && errno != EINTR) {
- fprintf(stderr, "error polling perf buffer: %s\n", strerror(errno));
+ if (err < 0 && err != -EINTR) {
+ fprintf(stderr, "error polling perf buffer: %s\n", strerror(-err));
goto cleanup;
}
if (env.duration && get_ktime_ns() > time_end)
|
SLW: Add opal_slw_set_reg support for power9
This OPAL call is made from Linux to OPAL to configure values in
various SPRs after wakeup from a deep idle state. | #include <libfdt/libfdt.h>
#include <opal-api.h>
+#include <p9_stop_api.H>
#include <p8_pore_table_gen_api.H>
#include <sbe_xip_image.h>
@@ -1402,15 +1403,26 @@ int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val)
struct cpu_thread *c = find_cpu_by_pir(cpu_pir);
struct proc_chip *chip;
- void *image;
int rc;
- int i;
- int spr_is_supported = 0;
assert(c);
chip = get_chip(c->chip_id);
assert(chip);
- image = (void *) chip->slw_base;
+
+ if (proc_gen == proc_gen_p9) {
+ if (!chip->homer_base) {
+ log_simple_error(&e_info(OPAL_RC_SLW_REG),
+ "SLW: HOMER base not set %x\n",
+ chip->id);
+ return OPAL_INTERNAL_ERROR;
+ }
+ rc = p9_stop_save_cpureg((void *)chip->homer_base,
+ sprn, val, cpu_pir);
+
+ } else if (proc_gen == proc_gen_p8) {
+ int spr_is_supported = 0;
+ void *image;
+ int i;
/* Check of the SPR is supported by libpore */
for (i = 0; i < SLW_SPR_REGS_SIZE ; i++) {
@@ -1425,18 +1437,26 @@ int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val)
c->pir);
return OPAL_UNSUPPORTED;
}
-
- rc = p8_pore_gen_cpureg_fixed(image, P8_SLW_MODEBUILD_SRAM, sprn,
- val, cpu_get_core_index(c),
+ image = (void *)chip->slw_base;
+ rc = p8_pore_gen_cpureg_fixed(image, P8_SLW_MODEBUILD_SRAM,
+ sprn, val,
+ cpu_get_core_index(c),
cpu_get_thread_index(c));
+ } else {
+ log_simple_error(&e_info(OPAL_RC_SLW_REG),
+ "SLW: proc_gen not supported\n");
+ return OPAL_UNSUPPORTED;
+
+ }
if (rc) {
log_simple_error(&e_info(OPAL_RC_SLW_REG),
- "SLW: Failed to set spr for CPU %x\n",
- c->pir);
+ "SLW: Failed to set spr %llx for CPU %x, RC=0x%x\n",
+ sprn, c->pir, rc);
return OPAL_INTERNAL_ERROR;
}
-
+ prlog(PR_DEBUG, "SLW: restore spr:0x%llx on c:0x%x with 0x%llx\n",
+ sprn, c->pir, val);
return OPAL_SUCCESS;
}
|
Add new files to the rockspec
As well as some old files that were also missing... | @@ -19,26 +19,30 @@ dependencies = {
build = {
type = "builtin",
modules = {
+ ["pallene.assignment_conversion"] = "pallene/assignment_conversion.lua",
["pallene.ast"] = "pallene/ast.lua",
["pallene.builtins"] = "pallene/builtins.lua",
["pallene.C"] = "pallene/C.lua",
["pallene.c_compiler"] = "pallene/c_compiler.lua",
["pallene.checker"] = "pallene/checker.lua",
["pallene.coder"] = "pallene/coder.lua",
+ ["pallene.constant_propagation"] = "pallene/constant_propagation.lua",
+ ["pallene.driver"] = "pallene/driver.lua",
["pallene.gc"] = "pallene/gc.lua",
["pallene.ir"] = "pallene/ir.lua",
["pallene.Lexer"] = "pallene/Lexer.lua",
["pallene.Location"] = "pallene/Location.lua",
+ ["pallene.pallenelib"] = "pallene/pallenelib.lua",
["pallene.parser"] = "pallene/parser.lua",
["pallene.print_ir"] = "pallene/print_ir.lua",
["pallene.symtab"] = "pallene/symtab.lua",
["pallene.to_ir"] = "pallene/to_ir.lua",
+ ["pallene.translator"] = "pallene/translator.lua",
+ ["pallene.trycatch"] = "pallene/trycatch.lua",
["pallene.typedecl"] = "pallene/typedecl.lua",
["pallene.types"] = "pallene/types.lua",
["pallene.uninitialized"] = "pallene/uninitialized.lua",
["pallene.util"] = "pallene/util.lua",
- ["pallene.translator"] = "pallene/translator.lua",
- ["pallene.trycatch"] = "pallene/trycatch.lua",
},
install = {
bin = {
|
fixing bang on 3rd outlet in threaded mode | @@ -2090,7 +2090,8 @@ static void *coll_new(t_symbol *s, int argc, t_atom *argv)
//used to be only for threaded, now make it anyways
//needed for bang on instantiate - DK
x->x_clock = clock_new(x, (t_method)coll_tick);
- if (x->x_threaded == 1)
+// if (x->x_threaded == 1) - there's something here that needs to be done in instantiating the object
+ // even for unthreaded mode, otherwise it won't bang on the 3rd outlet when in threaded mode... porres
{
//x->x_clock = clock_new(x, (t_method)coll_tick);
t_threadedFunctionParams rPars;
|
window_update_motif_hints: Do not assert that the property will always be there
Fixes | @@ -459,7 +459,9 @@ static border_style_t border_style_from_motif_value(uint32_t value) {
*
*/
bool window_update_motif_hints(i3Window *win, xcb_get_property_reply_t *prop, border_style_t *motif_border_style) {
- assert(prop != NULL);
+ if (prop == NULL) {
+ return false;
+ }
assert(motif_border_style != NULL);
if (xcb_get_property_value_length(prop) == 0) {
|
Adding missing metadata to use_setuptools==False branch of setup.py. | @@ -144,7 +144,10 @@ if use_setuptools:
else:
setup(name='spglib',
version=version,
+ license='BSD-3-Clause',
description='This is the spglib module.',
+ long_description=open('README.rst', 'rb').read().decode('utf-8'),
+ long_description_content_type='text/x-rst',
author='Atsushi Togo',
author_email='[email protected]',
url='http://atztogo.github.io/spglib/',
|
Clean up ceremony | (punt zero-ux)
(punt zero-ux)
::
- ?. live
- (punt ;~(plug zero-ux ;~(pfix com zero-ux)))
- (stag ~ ;~(plug zero-ux ;~(pfix com zero-ux)))
+ =+ ;~(plug zero-ux ;~(pfix com zero-ux))
+ ?. live (punt -)
+ (stag ~ -)
:: %. ;~(plug zero-ux ;~(pfix com zero-ux))
+ :: ^- $-(rule rule)
:: ?. live punt
:: (cury stag ~)
==
[%uint 1.610.668.800] :: 2021-01-15 00:00:00 UTC
==
==
- ::TODO deploy censures, votingdsending
+ ~& 'Deploying censures...'
+ =^ censures this
+ %+ do-deploy 'censures'
+ ~[address+ships]
+ ~& 'Deploying delegated-sending...'
+ =^ delegated-sending this
+ %+ do-deploy 'delegated-sending'
+ ~[address+ships]
::
:: tlon galaxy booting
::
(deposit-galaxies conditional-star-release con-gal)
::
~& 'Depositing conditional release stars...'
+ ~& con-sar
=. this
(deposit-stars conditional-star-release con-sar)
::
?~ galaxies this
~& [(lent galaxies) 'galaxies remaining']
=* galaxy gal.i.galaxies
+ ~& `@p`galaxy
=* gal-deed i.galaxies
::
:: create the galaxy, with spawn proxy set to the lockup contract
(create-ship galaxy `into net.gal-deed)
::
:: deposit all its stars
- =+ stars=(gulf 1 2)::55)
+ =+ stars=(gulf 1 255)
|-
?^ stars
=. this
:: a spawn proxy yet, do so now
=+ par=(sein:title star)
=? this !(~(has in gals) par)
- ~& [%setting-spawn-proxy-for par +(nonce)]
=. gals (~(put in gals) par)
- %^ do constitution 350.000
+ %^ do constitution 300.000
%+ set-spawn-proxy:dat par
into
::
- ~& [%depositing star +(nonce)]
=. this
- %^ do into 350.000
+ %^ do into 550.000
(deposit:dat to star)
$(stars t.stars)
::
|
BugID:17646749:[build-rules] fix -fuse-linker-plugin warnings when build mk3060 | @@ -171,6 +171,7 @@ clean:
-or -name "*.o" \
-or -name "*.d" \
-or -name "*.gc*" \
+ | grep -v '$(OUTPUT_DIR)/compiler' \
2>/dev/null)
distclean:
|
website: two more small fixes at homepage | "APP.HOME.LABEL.KEYFACT.4.HEADLINE": "Batteries Included",
"APP.HOME.LABEL.KEYFACT.5.DETAILS": "<a href=\"/tutorials/validate-configuration\">Validate configuration</a> immediately on changes, eliminate your configuration duplicates with the help of symbolic links or compute configuration parameters from other configuration values.",
"APP.HOME.LABEL.KEYFACT.5.HEADLINE": "Configuration Specification",
- "APP.HOME.LABEL.KEYFACT.6.DETAILS": "An important step in utilizing all of Elektras features is the elektrification of your application. We have written a <a href=\"/tutorials/integration-of-your-application\">tutorial</a> for you that explains how to do that!",
+ "APP.HOME.LABEL.KEYFACT.6.DETAILS": "Make maintenance of your application easier and utilize all of Elektra's features by elektrification of your application. We have written a <a href=\"/tutorials/integration-of-your-application\">tutorial</a> for you, which explains how to do that!",
"APP.HOME.LABEL.KEYFACT.6.HEADLINE": "Elektrify your Application",
"APP.HOME.LABEL.KEYFACT.7.DETAILS": "The Elektra core is written without external dependencies and with high regard to good performance.",
"APP.HOME.LABEL.KEYFACT.7.HEADLINE": "Fast Without Dependencies",
"APP.HOME.LABEL.KEYFACT.8.HEADLINE": "100% Free Software",
"APP.HOME.LABEL.KEYFACT.9.DETAILS": "Elektra strives to eliminate cross-platform related issues and is therefore suitable for all systems.",
"APP.HOME.LABEL.KEYFACT.9.HEADLINE": "Cross-platform",
- "APP.HOME.LABEL.ORIENTATION.1.DETAILS": "You need help in developing a custom plugin? Check out the <a href=\"/tutorials/plugins-introduction\">plugin tutorial</a>! Make sure to report an <a href=\"https://github.com/ElektraInitiative/libelektra/issues/new\">issue</a> before: we can help or maybe even do it for you.",
+ "APP.HOME.LABEL.ORIENTATION.1.DETAILS": "You need help in developing a custom plugin? You want move unmaintained configuration code to us? Check out the <a href=\"/tutorials/plugins-introduction\">plugin tutorial</a>! Make sure to report an <a href=\"https://github.com/ElektraInitiative/libelektra/issues/new\">issue</a> before: we can help or maybe even do it for you.",
"APP.HOME.LABEL.ORIENTATION.1.HEADLINE": "Develop Custom Plugins",
"APP.HOME.LABEL.ORIENTATION.2.DETAILS": "Of course you can use Elektra also outside a C or C++ environment. Have a look at our <a href=\"/bindings/readme\">language bindings</a>! You can even write plugins in <a href=\"/plugins/jni\">Java</a>, <a href=\"/plugins/python\">Python</a>, and <a href=\"/plugins/lua\">Lua</a>.",
"APP.HOME.LABEL.ORIENTATION.2.HEADLINE": "Use Elektra outside C/C++",
|
Update common.go for ops lepton refactoring | @@ -7,14 +7,15 @@ import (
"os"
"sort"
"strings"
- "time"
"testing"
+ "time"
- "github.com/nanovms/ops/lepton"
+ "github.com/nanovms/ops/config"
+ "github.com/nanovms/ops/qemu"
)
-func defaultConfig() lepton.Config {
- var c lepton.Config
+func defaultConfig() config.Config {
+ var c config.Config
c.Boot = "../../output/test/go/boot.img"
c.Kernel = "../../output/test/go/kernel.img"
@@ -46,8 +47,8 @@ func sortString(s string) string {
const START_WAIT_TIMEOUT = time.Second * 30
-func runAndWaitForString(rconfig *lepton.RunConfig, timeout time.Duration, text string, t *testing.T) lepton.Hypervisor {
- hypervisor := lepton.HypervisorInstance()
+func runAndWaitForString(rconfig *config.RunConfig, timeout time.Duration, text string, t *testing.T) qemu.Hypervisor {
+ hypervisor := qemu.HypervisorInstance()
if hypervisor == nil {
t.Fatal("No hypervisor found on $PATH")
}
|
Unix Makefile: Rework the assignment of CXX and AS
If the configured value is the empty string, give them a sane default.
Otherwise, give them the configured value prefix with $(CROSS_COMPILE) | @@ -191,7 +191,7 @@ CPPFLAGS={- our $cppflags = join(" ",
CPPFLAGS_Q={- $cppflags =~ s|([\\"])|\\$1|g; $cppflags -}
CC= $(CROSS_COMPILE){- $config{cc} -}
CFLAGS={- join(' ', @{$config{cflags}}) -}
-CXX= $(CROSS_COMPILE){- $config{cxx} -}
+CXX={- $config{cxx} ? "\$(CROSS_COMPILE)$config{cxx}" : '' -}
CXXFLAGS={- join(' ', @{$config{cxxflags}}) -}
LDFLAGS= {- join(' ', @{$config{lflags}}) -}
PLIB_LDFLAGS= {- join(' ', @{$config{plib_lflags}}) -}
@@ -236,7 +236,7 @@ TARFILE= ../$(NAME).tar
# dependent assembler flags. E.g. if you throw -mcpu=ultrasparc at SPARC
# gcc, then the driver will automatically translate it to -xarch=v8plus
# and pass it down to assembler.
-AS={- $config{as} || '$(CC) -c' -}
+AS={- $config{as} ? "\$(CROSS_COMPILE)$config{as}" : '$(CC) -c' -}
ASFLAGS={- join(' ', @{$config{asflags}}) || '$(CFLAGS)' -}
PERLASM_SCHEME= {- $target{perlasm_scheme} -}
|
Add PP type validation | @@ -63,6 +63,9 @@ ocf_error_t ocf_promotion_set_policy(ocf_promotion_policy_t policy,
ocf_cache_t cache = policy->owner;
ocf_promotion_t prev_policy;
+ if (type >= ocf_promotion_max)
+ return -OCF_ERR_INVAL;
+
prev_policy = cache->conf_meta->promotion_policy_type;
if (ocf_promotion_policies[prev_policy].deinit)
|
Update unit-tests script. | @@ -18,13 +18,10 @@ def print_result(test, passed):
padding = "."*(60-len(s))
print(s + padding + ("PASSED" if passed == True else "FAILED"))
-for module in sorted(os.listdir(SCRIPT_DIR)):
- mod_path = "/".join((SCRIPT_DIR, module))
-
- for test in sorted(os.listdir(mod_path)):
+for test in sorted(os.listdir(SCRIPT_DIR)):
if test.endswith(".py"):
test_passed = True
- test_path = "/".join((mod_path, test))
+ test_path = "/".join((SCRIPT_DIR, test))
try:
exec(open(test_path).read())
gc.collect()
|
[voting-reward] substract voting power only for effective voters | @@ -561,7 +561,7 @@ func (v *vpr) add(id types.AccountID, addr []byte, power *big.Int) {
}
func (v *vpr) sub(id types.AccountID, addr []byte, power *big.Int) {
- if v == nil {
+ if v == nil || v.voters.powers[id] == nil {
return
}
|
Fix ModelData pointer alignment;
ModelData manages a single allocation and creates pointers into
that allocation. These pointers were tightly packed, creating
alignment issues which triggered undefined behavior. Now, the
pointers are all aligned to 8 byte boundaries. | @@ -33,18 +33,19 @@ void lovrModelDataDestroy(void* ref) {
void lovrModelDataAllocate(ModelData* model) {
size_t totalSize = 0;
size_t sizes[13];
- totalSize += sizes[0] = model->blobCount * sizeof(Blob*);
- totalSize += sizes[1] = model->bufferCount * sizeof(ModelBuffer);
- totalSize += sizes[2] = model->textureCount * sizeof(TextureData*);
- totalSize += sizes[3] = model->materialCount * sizeof(ModelMaterial);
- totalSize += sizes[4] = model->attributeCount * sizeof(ModelAttribute);
- totalSize += sizes[5] = model->primitiveCount * sizeof(ModelPrimitive);
- totalSize += sizes[6] = model->animationCount * sizeof(ModelAnimation);
- totalSize += sizes[7] = model->skinCount * sizeof(ModelSkin);
- totalSize += sizes[8] = model->nodeCount * sizeof(ModelNode);
- totalSize += sizes[9] = model->channelCount * sizeof(ModelAnimationChannel);
- totalSize += sizes[10] = model->childCount * sizeof(uint32_t);
- totalSize += sizes[11] = model->jointCount * sizeof(uint32_t);
+ size_t alignment = 8;
+ totalSize += sizes[0] = ALIGN(model->blobCount * sizeof(Blob*), alignment);
+ totalSize += sizes[1] = ALIGN(model->bufferCount * sizeof(ModelBuffer), alignment);
+ totalSize += sizes[2] = ALIGN(model->textureCount * sizeof(TextureData*), alignment);
+ totalSize += sizes[3] = ALIGN(model->materialCount * sizeof(ModelMaterial), alignment);
+ totalSize += sizes[4] = ALIGN(model->attributeCount * sizeof(ModelAttribute), alignment);
+ totalSize += sizes[5] = ALIGN(model->primitiveCount * sizeof(ModelPrimitive), alignment);
+ totalSize += sizes[6] = ALIGN(model->animationCount * sizeof(ModelAnimation), alignment);
+ totalSize += sizes[7] = ALIGN(model->skinCount * sizeof(ModelSkin), alignment);
+ totalSize += sizes[8] = ALIGN(model->nodeCount * sizeof(ModelNode), alignment);
+ totalSize += sizes[9] = ALIGN(model->channelCount * sizeof(ModelAnimationChannel), alignment);
+ totalSize += sizes[10] = ALIGN(model->childCount * sizeof(uint32_t), alignment);
+ totalSize += sizes[11] = ALIGN(model->jointCount * sizeof(uint32_t), alignment);
totalSize += sizes[12] = model->charCount * sizeof(char);
size_t offset = 0;
|
Fix minor type warnings and risk of memory leak in testutil/driver.c
Discussion is in | @@ -40,7 +40,7 @@ static int seed = 0;
*/
static int num_test_cases = 0;
-void add_test(const char *test_case_name, int (*test_fn) ())
+void add_test(const char *test_case_name, int (*test_fn) (void))
{
assert(num_tests != OSSL_NELEM(all_tests));
all_tests[num_tests].test_case_name = test_case_name;
@@ -105,7 +105,7 @@ void setup_test_framework()
if (test_seed != NULL) {
seed = atoi(test_seed);
if (seed <= 0)
- seed = time(NULL);
+ seed = (int)time(NULL);
test_printf_stdout("%*s# RAND SEED %d\n", subtest_level(), "", seed);
test_flush_stdout();
srand(seed);
@@ -121,6 +121,7 @@ void setup_test_framework()
int pulldown_test_framework(int ret)
{
+ set_test_title(NULL);
#ifndef OPENSSL_NO_CRYPTO_MDEBUG
if (should_report_leaks()
&& CRYPTO_mem_leaks_cb(openssl_error_cb, NULL) <= 0)
|
ledc: fixed check to s_ledc_fade_rec array | /*
- * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
+ * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
*
* SPDX-License-Identifier: Apache-2.0
*/
@@ -1035,7 +1035,7 @@ esp_err_t ledc_set_fade_with_step(ledc_mode_t speed_mode, ledc_channel_t channel
esp_err_t ledc_fade_start(ledc_mode_t speed_mode, ledc_channel_t channel, ledc_fade_mode_t fade_mode)
{
- LEDC_CHECK(s_ledc_fade_rec != NULL, LEDC_FADE_SERVICE_ERR_STR, ESP_ERR_INVALID_STATE);
+ LEDC_CHECK(s_ledc_fade_rec[speed_mode][channel] != NULL, LEDC_FADE_SERVICE_ERR_STR, ESP_ERR_INVALID_STATE);
LEDC_ARG_CHECK(channel < LEDC_CHANNEL_MAX, "channel");
LEDC_ARG_CHECK(fade_mode < LEDC_FADE_MAX, "fade_mode");
LEDC_CHECK(p_ledc_obj[speed_mode] != NULL, LEDC_NOT_INIT, ESP_ERR_INVALID_STATE);
@@ -1053,9 +1053,6 @@ esp_err_t ledc_fade_func_install(int intr_alloc_flags)
void ledc_fade_func_uninstall(void)
{
- if (s_ledc_fade_rec == NULL) {
- return;
- }
if (s_ledc_fade_isr_handle) {
esp_intr_free(s_ledc_fade_isr_handle);
s_ledc_fade_isr_handle = NULL;
|
Update BOOM URL in README.md | @@ -78,7 +78,7 @@ These additional publications cover many of the internal components used in Chip
[berkeley]: https://berkeley.edu
[riscv]: https://riscv.org/
[rocket-chip]: https://github.com/freechipsproject/rocket-chip
-[boom]: https://github.com/ucb-bar/riscv-boom
+[boom]: https://github.com/riscv-boom/riscv-boom
[firemarshal]: https://github.com/firesim/FireMarshal/
[ariane]: https://github.com/pulp-platform/ariane/
[gemmini]: https://github.com/ucb-bar/gemmini
|
Better parse arguments | @@ -10,6 +10,7 @@ import tempfile
import hashlib
from base64 import urlsafe_b64encode
+from argparse import ArgumentParser
sys.dont_write_bytecode = True
@@ -300,16 +301,10 @@ if __name__ == '__main__':
arc_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
out_root = tempfile.mkdtemp()
- widget_args = [arg for arg in sys.argv[1:] if arg.startswith('-DBUILD_WIDGET=')]
- catboost_args = [arg for arg in sys.argv[1:] if not arg.startswith('-DBUILD_WIDGET=')]
-
- if any([arg != '-DBUILD_WIDGET=yes' and arg != '-DBUILD_WIDGET=no' for arg in widget_args]):
- print("Please specify -DBUILD_WIDGET=yes/no")
- exit()
-
- should_build_widget = True
- if '-DBUILD_WIDGET=no' in widget_args:
- should_build_widget = False
+ widget_args_parser = ArgumentParser()
+ widget_args_parser.add_argument('--build_widget', choices=['yes', 'no'], default='yes')
+ widget_args, catboost_args = widget_args_parser.parse_known_args()
+ should_build_widget = widget_args.build_widget == 'yes'
wheel_name = build(arc_root, out_root, catboost_args, should_build_widget)
print(wheel_name)
|
update bc target order in pq-crypto Makefile | @@ -18,14 +18,14 @@ OBJS=$(SRCS:.c=.o)
BCS_11=pq_random.bc
BCS1=$(addprefix $(BITCODE_DIR), $(BCS_11))
-.PHONY : bc
-bc: $(BCS1)
- $(MAKE) -C sike bc
-
.PHONY : all
all: $(OBJS)
$(MAKE) -C sike
+.PHONY : bc
+bc: $(BCS1)
+ $(MAKE) -C sike bc
+
.PHONY : clean
clean: decruft
${MAKE} -C sike decruft
|
Increment version to 4.9.4.dev1. | #define MOD_WSGI_MAJORVERSION_NUMBER 4
#define MOD_WSGI_MINORVERSION_NUMBER 9
-#define MOD_WSGI_MICROVERSION_NUMBER 3
-#define MOD_WSGI_VERSION_STRING "4.9.3"
+#define MOD_WSGI_MICROVERSION_NUMBER 4
+#define MOD_WSGI_VERSION_STRING "4.9.4.dev1"
/* ------------------------------------------------------------------------- */
|
Investigate options for ack delay and gap | @@ -1910,6 +1910,15 @@ uint64_t picoquic_compute_ack_gap(picoquic_cnx_t* cnx, uint64_t data_rate)
uint64_t packet_rate_times_1M = (data_rate * 1000000) / cnx->path[0]->send_mtu;
nb_packets = packet_rate_times_1M / cnx->path[0]->smoothed_rtt;
}
+#if 1
+ if (cnx->path[0]->smoothed_rtt < 4 * PICOQUIC_ACK_DELAY_MIN) {
+ uint64_t mult = 4;
+ if (cnx->path[0]->smoothed_rtt > PICOQUIC_ACK_DELAY_MIN) {
+ mult = ((uint64_t)(4 * PICOQUIC_ACK_DELAY_MIN)) / cnx->path[0]->smoothed_rtt;
+ }
+ nb_packets *= mult;
+ }
+#endif
ack_gap = (nb_packets + 3) / 4;
@@ -1925,9 +1934,28 @@ uint64_t picoquic_compute_ack_gap(picoquic_cnx_t* cnx, uint64_t data_rate)
if (ack_gap < ack_gap_min) {
ack_gap = ack_gap_min;
}
+#if 0
else if (ack_gap > 32) {
ack_gap = 32;
}
+#else
+ else if (ack_gap > 32) {
+#if 0
+ ack_gap = 32 +((nb_packets - 128) / 16);
+#else
+ ack_gap = 32 + ((nb_packets - 128) / 8);
+#endif
+#if 1
+ if (ack_gap > 64) {
+ ack_gap = 64;
+ }
+#else
+ if (ack_gap > 128) {
+ ack_gap = 128;
+ }
+#endif
+ }
+#endif
return ack_gap;
}
|
porting: cleanup porting_init()
code simplification
add some more comments
call ble_hci_ram_init() before ble_ll_init() to make sure the
buffers are intialized before they are potentially used | #include "nimble/nimble_port.h"
#if NIMBLE_CFG_CONTROLLER
#include "controller/ble_ll.h"
+#include "transport/ram/ble_hci_ram.h"
#endif
static struct ble_npl_eventq g_eventq_dflt;
+extern void os_msys_init(void);
+
void
nimble_port_init(void)
{
- void os_msys_init(void);
-
-#if NIMBLE_CFG_CONTROLLER
- void ble_hci_ram_init(void);
-#endif
-
/* Initialize default event queue */
ble_npl_eventq_init(&g_eventq_dflt);
-
+ /* Initialize the global memory pool */
os_msys_init();
-
+ /* Initialize the host */
ble_hs_init();
#if NIMBLE_CFG_CONTROLLER
+ ble_hci_ram_init();
hal_timer_init(5, NULL);
os_cputime_init(32768);
ble_ll_init();
- ble_hci_ram_init();
#endif
}
|
Fixed regression from early refactor | @@ -1437,7 +1437,7 @@ static Point SolveForLighthouse(FLT posOut[3], FLT quatOut[4], TrackedObject *ob
SurvivePose lighthousePose;
FLT invRot[4];
- quatgetreciprocal(invRot, lighthousePose.Rot);
+ quatgetreciprocal(lighthousePose.Rot, rotQuat);
lighthousePose.Pos[0] = refinedEstimateGd.x;
lighthousePose.Pos[1] = refinedEstimateGd.y;
|
Give HarnessRAM implicit clock/reset in SerialTiedOff | @@ -238,30 +238,24 @@ class WithTiedOffDebug extends OverrideHarnessBinder({
})
-class WithSerialAdapterTiedOff(asyncQueue: Boolean = false) extends OverrideHarnessBinder({
+class WithSerialAdapterTiedOff extends OverrideHarnessBinder({
(system: CanHavePeripheryTLSerial, th: HasHarnessSignalReferences, ports: Seq[ClockedIO[SerialIO]]) => {
implicit val p = chipyard.iobinders.GetSystemParameters(system)
ports.map({ port =>
- val bits = if (asyncQueue) {
- SerialAdapter.asyncQueue(port, th.harnessClock, th.harnessReset)
- } else {
- port.bits
- }
+ val bits = SerialAdapter.asyncQueue(port, th.harnessClock, th.harnessReset)
+ withClockAndReset(th.harnessClock, th.harnessReset) {
val ram = SerialAdapter.connectHarnessRAM(system.serdesser.get, bits, th.harnessReset)
SerialAdapter.tieoff(ram.module.io.tsi_ser)
+ }
})
}
})
-class WithSimSerial(asyncQueue: Boolean = false) extends OverrideHarnessBinder({
+class WithSimSerial extends OverrideHarnessBinder({
(system: CanHavePeripheryTLSerial, th: HasHarnessSignalReferences, ports: Seq[ClockedIO[SerialIO]]) => {
implicit val p = chipyard.iobinders.GetSystemParameters(system)
ports.map({ port =>
- val bits = if (asyncQueue) {
- SerialAdapter.asyncQueue(port, th.harnessClock, th.harnessReset)
- } else {
- port.bits
- }
+ val bits = SerialAdapter.asyncQueue(port, th.harnessClock, th.harnessReset)
withClockAndReset(th.harnessClock, th.harnessReset) {
val ram = SerialAdapter.connectHarnessRAM(system.serdesser.get, bits, th.harnessReset)
val success = SerialAdapter.connectSimSerial(ram.module.io.tsi_ser, th.harnessClock, th.harnessReset.asBool)
|
doc: Adds note about pre-burned eFuses for Blocks with a coding scheme | eFuse Manager
=============
+{IDF_TARGET_CODING_SCHEMES:default="Reed-Solomon", esp32="3/4 or Repeat"}
+
Introduction
------------
@@ -215,6 +217,12 @@ Supported coding scheme
To write some fields into one block, or different blocks in one time, you need to use ``the batch writing mode``. Firstly set this mode through :cpp:func:`esp_efuse_batch_write_begin` function then write some fields as usual using the ``esp_efuse_write_...`` functions. At the end to burn them, call the :cpp:func:`esp_efuse_batch_write_commit` function. It burns prepared data to the eFuse blocks and disables the ``batch recording mode``.
+.. note::
+
+ If there is already pre-written data in the eFuse block using the ``{IDF_TARGET_CODING_SCHEMES}`` encoding scheme, then it is not possible to write anything extra (even if the required bits are empty) without breaking the previous encoding data. This encoding data will be overwritten with new encoding data and completely destroyed (however, the payload eFuses are not damaged). It can be related to: CUSTOM_MAC, SPI_PAD_CONFIG_HD, SPI_PAD_CONFIG_CS, etc. Please contact Espressif to order the required pre-burnt eFuses.
+
+ FOR TESTING ONLY (NOT RECOMMENDED): You can ignore or suppress errors that violate encoding scheme data in order to burn the necessary bits in the eFuse block.
+
eFuse API
---------
|
[chainamker]remove response struct | @@ -138,13 +138,6 @@ typedef struct TBoatHlchainamkerResult {
} BoatHlchainamkerResult;
-typedef struct TBoatHlchainmakerResponse {
-
- BoatHlchainamkerResult http_result;
- BUINT32 httpResLen;
- BUINT8 *http2Res;
-}BoatHlchainmakerResponse;
-
// chainmaker wallet config structure
typedef struct TBoatHlchainmakerWalletConfig {
@@ -178,7 +171,6 @@ typedef struct TBoatHlchainmakerWallet {
typedef struct TBoatHlchainamkerTx {
BoatHlchainmakerWallet* wallet_ptr; //!< Pointer of the transaction wallet
- BoatHlchainmakerResponse tx_reponse;
BoatTransactionPara trans_para;
BoatHlchainamkerClient client_info;
}BoatHlchainmakerTx;
|
Define Extended DNS Errors | #include "nsec3.h"
#include "tsig.h"
+/* The Extended DNS Error codes (RFC8914) we use */
+#define EDE_OTHER 0
+#define EDE_NOT_READY 14
+#define EDE_PROHIBITED 18
+#define EDE_NOT_AUTHORITATIVE 20
+#define EDE_NOT_SUPPORTED 21
+
+
/* [Bug #253] Adding unnecessary NS RRset may lead to undesired truncation.
* This function determines if the final response packet needs the NS RRset
* included. Currently, it will only return negative if QTYPE == DNSKEY|DS.
@@ -542,7 +550,7 @@ answer_chaos(struct nsd *nsd, query_type *q)
RCODE_SET(q->packet, RCODE_REFUSE);
/* RFC8914 - Extended DNS Errors
* 4.19. Extended DNS Error Code 18 - Prohibited */
- q->edns.ede = 18;
+ q->edns.ede = EDE_PROHIBITED;
}
} else if ((q->qname->name_size == 16
&& memcmp(dname_name(q->qname), "\007version\006server", 16) == 0) ||
@@ -561,20 +569,20 @@ answer_chaos(struct nsd *nsd, query_type *q)
RCODE_SET(q->packet, RCODE_REFUSE);
/* RFC8914 - Extended DNS Errors
* 4.19. Extended DNS Error Code 18 - Prohibited */
- q->edns.ede = 18;
+ q->edns.ede = EDE_PROHIBITED;
}
} else {
RCODE_SET(q->packet, RCODE_REFUSE);
/* RFC8914 - Extended DNS Errors
* 4.21. Extended DNS Error Code 20 - Not Authoritative */
- q->edns.ede = 20;
+ q->edns.ede = EDE_NOT_AUTHORITATIVE;
}
break;
default:
RCODE_SET(q->packet, RCODE_REFUSE);
/* RFC8914 - Extended DNS Errors
* 4.22. Extended DNS Error Code 21 - Not Supported */
- q->edns.ede = 21;
+ q->edns.ede = EDE_NOT_SUPPORTED;
break;
}
@@ -1287,7 +1295,7 @@ answer_lookup_zone(struct nsd *nsd, struct query *q, answer_type *answer,
RCODE_SET(q->packet, RCODE_REFUSE);
/* RFC 8914 - Extended DNS Errors
* 4.21. Extended DNS Error Code 20 - Not Authoritative */
- q->edns.ede = 20;
+ q->edns.ede = EDE_NOT_AUTHORITATIVE;
}
return;
}
@@ -1298,7 +1306,7 @@ answer_lookup_zone(struct nsd *nsd, struct query *q, answer_type *answer,
RCODE_SET(q->packet, RCODE_SERVFAIL);
/* RFC 8914 - Extended DNS Errors
* 4.15. Extended DNS Error Code 14 - Not Ready */
- q->edns.ede = 14;
+ q->edns.ede = EDE_NOT_READY;
}
return;
}
@@ -1340,7 +1348,7 @@ answer_lookup_zone(struct nsd *nsd, struct query *q, answer_type *answer,
RCODE_SET(q->packet, RCODE_SERVFAIL);
/* RFC 8914 - Extended DNS Errors
* 4.15. Extended DNS Error Code 14 - Not Ready */
- q->edns.ede = 14;
+ q->edns.ede = EDE_NOT_READY;
}
return;
}
|
[numerics] remove computation of problem info and add debug output | #include "numerics_verbose.h"
#include "NumericsVector.h"
#include "float.h"
-#include "debug.h"
-#include "float.h"
#include "JordanAlgebra.h"
#include "CSparseMatrix.h"
#include "NumericsSparseMatrix.h"
+
+/* #define DEBUG_MESSAGES */
+/* #define DEBUG_STDOUT */
+#include "debug.h"
+
+
const char* const SICONOS_GLOBAL_FRICTION_3D_IPM_STR = "GFC3D IPM";
@@ -535,7 +539,7 @@ int gfc3d_IPM_setDefaultSolverOptions(SolverOptions* options)
options->iparam[SICONOS_IPARAM_MAX_ITER] = 200;
options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_GET_PROBLEM_INFO] =
- SICONOS_FRICTION_3D_IPM_GET_PROBLEM_INFO_YES;
+ SICONOS_FRICTION_3D_IPM_GET_PROBLEM_INFO_NO;
options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_NESTEROV_TODD_SCALING] = 1;
@@ -564,7 +568,7 @@ void gfc3d_IPM(GlobalFrictionContactProblem* restrict problem, double* restrict
/* if SICONOS_FRICTION_3D_IPM_FORCED_SPARSE_STORAGE = SICONOS_FRICTION_3D_IPM_FORCED_SPARSE_STORAGE,
we force the copy into a NM_SPARSE storageType */
-
+ DEBUG_PRINTF("problem->M->storageType : %i\n",problem->M->storageType );
if(options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_SPARSE_STORAGE] == SICONOS_FRICTION_3D_IPM_FORCED_SPARSE_STORAGE
&& problem->M->storageType == NM_SPARSE_BLOCK)
{
@@ -576,6 +580,7 @@ void gfc3d_IPM(GlobalFrictionContactProblem* restrict problem, double* restrict
{
M = problem->M;
}
+ DEBUG_PRINTF("problem->M->storageType : %i\n",problem->H->storageType );
if(options->iparam[SICONOS_FRICTION_3D_IPM_IPARAM_SPARSE_STORAGE] == SICONOS_FRICTION_3D_IPM_FORCED_SPARSE_STORAGE
&& problem->H->storageType == NM_SPARSE_BLOCK)
{
@@ -616,7 +621,6 @@ void gfc3d_IPM(GlobalFrictionContactProblem* restrict problem, double* restrict
NM_copy(H, minus_H);
NM_gemm(-1.0, H, NM_eye(H->size1), 0.0, minus_H);
-
double alpha_primal = data->internal_params->alpha_primal;
double alpha_dual = data->internal_params->alpha_dual;
double barr_param = data->internal_params->barr_param;
|
Add additional passing tests to epsdb_passes.txt. | @@ -5,6 +5,8 @@ atomic_double
class
collapse
complex
+complex2
+d2h_slow_copy
data_enter_issue01
data_issue_59
data_share1
@@ -23,23 +25,34 @@ flags 5
flags 6
flags 7
flags 8
+flang_blkdata-g
flang_char_kind
+flang_gen_sptr_prob
flang_isystem_prob
flang_math
flang_omp_map
+flang_omp_red_3d
flang_real16_prob
+flang_reduction_offload
+flang_teams
flang_tgt_alloc_ptr
+fprintf
function
function_overload
function_template
gdb_teams
+global_allocate
helloworld
+hip_device_compile
+hip_rocblas
+host_targ
issue_001
issue_002
issue_flang_libomp
launch_latency
liba_bundled
liba_bundled_cmdline
+longDouble
lto_teams
ManyKernels
map_to_from_prob
@@ -47,6 +60,7 @@ map_zero_bug
MasterBarrier
math_flog
math_modff
+math_pow
math_sqrt_float
MaxNumThrds
mem_foot_print
@@ -54,11 +68,15 @@ nativetests
nest_call_par2
nested_loop
nested_par3
+omp_get_device_num
+omp_get_initial
omp_lock
omp_num_teams_generic
omp_num_teams_SPMD
+omp_wtime
pfspecifier
pfspecifier_str
+printf_parallel_for_target
red_bug_51
reduce
reduc_map_prob
@@ -85,6 +103,7 @@ thread_limit
threads
Threads1xxx
unique-kernel-name
+use_device_addr
use_device_ptr
veccopy
vmuldemo
|
plugins: in_mqtt: added optional TLS flag | @@ -157,5 +157,5 @@ struct flb_input_plugin in_mqtt_plugin = {
.cb_flush_buf = NULL,
.cb_exit = in_mqtt_exit,
.config_map = config_map,
- .flags = FLB_INPUT_NET,
+ .flags = FLB_INPUT_NET | FLB_IO_OPT_TLS,
};
|
Handling char ** in SWIG to get json_stats from NEAT to use in Python front-end | #include "neat.h"
%}
+// This input typemap declares that char** requires no input parameter.
+// Instead, the address of a local char* is used to call the function.
+%typemap(in,numinputs=0) char** (char* tmp) %{
+$1 = &tmp;
+%}
+
+// After the function is called, the char** parameter contains a malloc'ed char* pointer.
+// Construct a Python Unicode object (I'm using Python 3) and append it to
+// any existing return value for the wrapper.
+%typemap(argout) char** (PyObject* obj) %{
+obj = PyUnicode_FromString(*$1);
+$result = SWIG_Python_AppendOutput($result,obj);
+%}
+
+// The malloc'ed pointer is no longer needed, so make sure it is freed.
+%typemap(freearg) char** %{
+free(*$1);
+%}
+
+
+
+
+
%apply int *OUTPUT { int *send, int *recv};
%inline %{
extern void neat_get_max_buffer_sizes(struct neat_flow *flow, int *send, int *recv);
|
perf-tools/dimemas: fix package name | %define PNAME %(echo %{pname} | tr [a-z] [A-Z])
Summary: Dimemas tool
-Name: %{pname}-%{compiler_family}%{PROJ_DELIM}
+Name: %{pname}-%{compiler_family}-%{mpi_family}%{PROJ_DELIM}
Version: 5.3.4
Release: 5
License: GNU
|
Restore the correct condition | @@ -42,7 +42,7 @@ typedef struct clap_plugin_audio_ports_activation {
// It is only possible to activate and de-activate on the audio-thread if
// can_activate_while_processing() returns true.
//
- // [can_activate_while_processing ? audio-thread : main-thread]
+ // [active ? audio-thread : main-thread]
void(CLAP_ABI *set_active)(const clap_plugin_t *plugin,
bool is_input,
uint32_t port_index,
|
doc: finalize criteria | @@ -54,15 +54,20 @@ But why not also learn about plugins while creating a new plugin?
Elektra's documentation must fulfill:
- It is self-contained.
- It is not enough to link to some paper and an external page to explain, e.g., decisions.
+ It is not enough to link to some paper and an external page as explanation.
All explanation must be within the repository, e.g., in case the external information goes away.
This doesn't apply if the authoritative standard lives outside of Elektra.
- Then you would write, e.g., "toml implements [this standard](https://toml.io/en/v1.0.0), with following extensions:".
-- It is consistent in its [terminology](/doc/help/elektra-glossary.md).
+ Then you would write, e.g., "The toml plugin implements [this standard](https://toml.io/en/v1.0.0), with following extensions:".
+ The extensions, however, again must be fully listed within our repository.
+- Always write what **is** not what you would like to have.
+ Explanations must always refer to the current situation (as changed with your PR).
+- It is consistent with our [terminology](/doc/help/elektra-glossary.md).
- Spelling is American English with [spellings as defined here](/scripts/sed).
- Sentences are short and written in one line.
I.e. lines usually end with `.`, `:` or `;`.
Avoid line breaks in the middle of the sentence.
+- Use active and strong verbs early in your sentences.
+ "We" refers to the community around the Elektra Initiative.
- Use enumerations or itemizations to keep a clear structure in the text.
- We use standard Markdown where possible, with only a few extensions:
- styled fenced blocks
|
Add message for symbol end events | * thread stack viewer
*
* Copyright (C) 2010-2016 wj32
- * Copyright (C) 2017-2021 dmex
+ * Copyright (C) 2017-2022 dmex
*
* This file is part of Process Hacker.
*
@@ -1575,15 +1575,12 @@ VOID PhpSymbolProviderEventCallbackHandler(
statusMessage = PhReferenceObject(event->EventMessage);
break;
case PH_SYMBOL_EVENT_TYPE_LOAD_END:
- statusMessage = PhReferenceEmptyString();
+ statusMessage = PhCreateString(L"Loading symbols from image...");
break;
case PH_SYMBOL_EVENT_TYPE_PROGRESS:
{
- ULONG64 progress = event->EventProgress;
-
statusMessage = PhReferenceObject(event->EventMessage);
- //context->SymbolProgress =
- statusProgress = (ULONG)progress;
+ statusProgress = (ULONG)event->EventProgress;
}
break;
}
@@ -1613,13 +1610,6 @@ HRESULT CALLBACK PhpThreadStackTaskDialogCallback(
{
context->TaskDialogHandle = hwndDlg;
- PhRegisterCallback(
- &PhSymbolEventCallback,
- PhpSymbolProviderEventCallbackHandler,
- context,
- &context->SymbolProviderEventRegistration
- );
-
PhSetApplicationWindowIcon(hwndDlg);
SendMessage(hwndDlg, TDM_UPDATE_ICON, TDIE_ICON_MAIN, (LPARAM)PhGetApplicationIcon(FALSE));
@@ -1631,6 +1621,13 @@ HRESULT CALLBACK PhpThreadStackTaskDialogCallback(
PhSetWindowContext(hwndDlg, 0xF, context);
SetWindowLongPtr(hwndDlg, GWLP_WNDPROC, (LONG_PTR)PhpThreadStackTaskDialogSubclassProc);
+ PhRegisterCallback(
+ &PhSymbolEventCallback,
+ PhpSymbolProviderEventCallbackHandler,
+ context,
+ &context->SymbolProviderEventRegistration
+ );
+
PhReferenceObject(context);
PhCreateThread2(PhpRefreshThreadStackThreadStart, context);
}
@@ -1669,29 +1666,11 @@ HRESULT CALLBACK PhpThreadStackTaskDialogCallback(
PhReleaseQueuedLockExclusive(&context->StatusLock);
- if (message)
- {
- SendMessage(
- context->TaskDialogHandle,
- TDM_SET_ELEMENT_TEXT,
- TDE_MAIN_INSTRUCTION,
- (LPARAM)PhGetString(message)
- );
-
- PhDereferenceObject(message);
- }
-
- if (content)
- {
- SendMessage(
- context->TaskDialogHandle,
- TDM_SET_ELEMENT_TEXT,
- TDE_CONTENT,
- (LPARAM)PhGetString(content)
- );
+ SendMessage(context->TaskDialogHandle, TDM_SET_ELEMENT_TEXT, TDE_MAIN_INSTRUCTION, (LPARAM)PhGetStringOrDefault(message, L" "));
+ SendMessage(context->TaskDialogHandle, TDM_SET_ELEMENT_TEXT, TDE_CONTENT, (LPARAM)PhGetStringOrDefault(content, L" "));
- PhDereferenceObject(content);
- }
+ if (message) PhDereferenceObject(message);
+ if (content) PhDereferenceObject(content);
if (context->SymbolProgressReset)
{
|
[mempool] :memo: Add DATE paper to the README | @@ -205,3 +205,20 @@ We use the following RISC-V tools to parse simulation traces and keep opcodes co
The open-source simulator [Verilator](https://www.veripool.org/verilator) can be used for RTL simulation.
- `toolchain/verilator` is licensed under GPL. See [Verilator's license](https://github.com/verilator/verilator/blob/master/LICENSE) for more details.
+
+## Publication
+
+If you want to use MemPool, you can cite us:
+
+```
+@InProceedings{MemPool2021,
+ author = {Matheus Cavalcante and Samuel Riedel and Antonio Pullini and Luca Benini},
+ title = {{MemPool}: A Shared-{L1} Memory Many-Core Cluster with a Low-Latency Interconnect},
+ booktitle = {Proceedings of the 2021 {Design}, {Automation} & {Test} in {Europe} {Conference} & {Exhibition} ({DATE})},
+ year = 2021,
+ month = mar,
+ address = {Grenoble, France}
+}
+```
+
+This paper is also available at arXiv, at the following link: [arXiv:2012.02973 [cs.AR]](https://arxiv.org/abs/2012.02973).
|
Testing: Add tests for large constant fields (GRIB1 and GRIBEX mode) | @@ -142,5 +142,12 @@ grib_check_key_equals $temp const,bitsPerValue,section7Length '1 0 5'
${tools_dir}/grib_set -s produceLargeConstantFields=1 -d1 $input $temp
grib_check_key_equals $temp const,bitsPerValue,section7Length '1 16 997'
+# GRIB1: when GRIBEX mode is enabled, we also get a large constant field
+input=${data_dir}/simple.grib
+${tools_dir}/grib_set -d1 $input $temp
+grib_check_key_equals $temp const,bitsPerValue,section4Length '1 0 12'
+ECCODES_GRIBEX_MODE_ON=1 ${tools_dir}/grib_set -d1 $input $temp
+grib_check_key_equals $temp const,bitsPerValue,section4Length '1 12 8966'
+
rm -f $temp $temp_err
|
Set status for new links in EN_addlink
Status is OPEN for pumps\pipes\CV and ACTIVE for valves. Closes | @@ -4806,6 +4806,7 @@ int DLLEXPORT EN_addlink(EN_ProjectHandle ph, char *id, EN_LinkType linkType, ch
link->Type = linkType;
link->N1 = N1;
link->N2 = N2;
+ link->Stat = OPEN;
if (linkType == EN_PUMP) {
link->Kc = 1.0; // Speed factor
@@ -4821,23 +4822,14 @@ int DLLEXPORT EN_addlink(EN_ProjectHandle ph, char *id, EN_LinkType linkType, ch
link->Kc = 0.0; // Valve setting.
link->Km = 0.0; // Loss coeff
link->Len = 0.0;
+ link->Stat = ACTIVE;
}
- // link->Len = 0.0;
- // link->Kc = 0.01;
- // link->Km = 0;
link->Kb = 0;
link->Kw = 0;
link->R = 0;
link->Rc = 0;
link->Rpt = 0;
- if (linkType == EN_CVPIPE) {
- link->Stat = OPEN;
- }
- else {
- link->Stat = CLOSED;
- }
-
ENHashTableInsert(net->LinkHashTable, link->ID, n);
return set_error(p->error_handle, 0);
}
|
fix scratchpad super + scroll | @@ -5550,6 +5550,8 @@ shiftview(const Arg *arg)
} while (!visible && ++count < 10);
if (count < 10) {
+ if (nextseltags & (1 << 20))
+ nextseltags = nextseltags ^ (1<<20);
a.i = nextseltags;
view(&a);
}
|
[MLN] 1.0.0.6.beta | Pod::Spec.new do |s|
s.name = 'MLN'
- s.version = '1.0.0.5.beta'
+ s.version = '1.0.0.6.beta'
s.summary = 'A lib of Momo Lua Native.'
# This description is used to generate tags and improve search results.
|
Change display message to unsupported when there is no PMTT
Reported-By: Robert Elliott | @@ -116,7 +116,10 @@ EFI_STATUS showAcpi(struct Command *pCmd) {
if (ChosenAcpiSystem == AcpiAll || ChosenAcpiSystem == AcpiPMTT) {
ReturnCode = pNvmDimmConfigProtocol->GetAcpiPMTT(pNvmDimmConfigProtocol, &pPMTT);
- if (EFI_ERROR(ReturnCode) || pPMTT == NULL) {
+ if (ReturnCode == EFI_NOT_FOUND) {
+ Print(L"PMTT table not found.\n");
+ ReturnCode = EFI_SUCCESS;
+ } else if (EFI_ERROR(ReturnCode)|| pPMTT == NULL) {
Print(L"Error: Failed to find the PMTT tables\n");
} else {
Print(L"---Platform Memory Topology Table---\n");
|
Use proper conditional for software DES
When converting definitions to use the new PSA defines, one erroneously
was conditionalized on the WANT macro instead of on the BUILTIN macro. | @@ -5063,7 +5063,7 @@ exit:
return( status );
}
-#if defined(PSA_WANT_KEY_TYPE_DES)
+#if defined(MBEDTLS_PSA_BUILTIN_KEY_TYPE_DES)
static void psa_des_set_key_parity( uint8_t *data, size_t data_size )
{
if( data_size >= 8 )
@@ -5073,7 +5073,7 @@ static void psa_des_set_key_parity( uint8_t *data, size_t data_size )
if( data_size >= 24 )
mbedtls_des_key_set_parity( data + 16 );
}
-#endif /* PSA_WANT_KEY_TYPE_DES */
+#endif /* MBEDTLS_PSA_BUILTIN_KEY_TYPE_DES */
static psa_status_t psa_generate_derived_key_internal(
psa_key_slot_t *slot,
|
viostor: Remove code for legacy OSes | <ConfigurationType>Driver</ConfigurationType>
<DriverType>Miniport</DriverType>
</PropertyGroup>
- <PropertyGroup Label="PropertySheets" Condition="'$(PlatformToolset)'=='v141_xp'">
- <UseLegacyDDK>true</UseLegacyDDK>
- <ConfigurationType>Application</ConfigurationType>
- <GenerateManifest>false</GenerateManifest>
- </PropertyGroup>
<Import Project="$(MSBuildProjectDirectory)\viostor.props" />
<Import Project="$(MSBuildProjectDirectory)\..\Tools\Driver.Common.props" />
<ImportGroup Label="ExtensionSettings">
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\Inc</AdditionalIncludeDirectories>
</ResourceCompile>
</ItemDefinitionGroup>
- <ItemDefinitionGroup Condition="'$(PlatformToolset)'=='v141_xp'">
- <ClCompile>
- <DisableSpecificWarnings>4267;%(DisableSpecificWarnings)</DisableSpecificWarnings>
- </ClCompile>
- <Link>
- <EntryPointSymbol Condition="'$(Platform)'=='Win32'">GsDriverEntry@8</EntryPointSymbol>
- <EntryPointSymbol Condition="'$(Platform)'=='x64'">GsDriverEntry</EntryPointSymbol>
- </Link>
- </ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)'=='Win8 Release'">
<ClCompile>
<PreprocessorDefinitions>%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
or-map: fix specific assertion that should have been general | (del-span tre %end end)
?~ end
(del-span tre %start start)
- ?> (lth u.start u.end)
+ ?> (compare u.start u.end)
=. tre (del-span tre %start start)
(del-span tre %end end)
::
|
refactor(memory check):
"keypair" should check non-null in UtilityPKCS2Native() | @@ -939,6 +939,12 @@ BOAT_RESULT UtilityPKCS2Native(BCHAR *input, KeypairNative *keypair)
{
// (*keypair).prikey = TLV_Level_2.data;
(*keypair).prikey = BoatMalloc(TLV_Level_2.len);
+ if(NULL == (*keypair).prikey){
+ if((*keypair).pubkey != NULL){
+ BoatFree((*keypair).pubkey);
+ }
+ return BOAT_ERROR;
+ }
memcpy((*keypair).prikey, TLV_Level_2.data, TLV_Level_2.len);
(*keypair).prikeylen = TLV_Level_2.len;
}
@@ -1009,6 +1015,12 @@ BOAT_RESULT UtilityPKCS2Native(BCHAR *input, KeypairNative *keypair)
(*keypair).pubkeylen = TLV_Level_3.len - 2;
// (*keypair).pubkey = TLV_Level_3.data + 2;
(*keypair).pubkey = BoatMalloc(TLV_Level_3.len - 2);
+ if(NULL == (*keypair).pubkey){
+ if((*keypair).prikey != NULL){
+ BoatFree((*keypair).prikey);
+ }
+ return BOAT_ERROR;
+ }
memcpy((*keypair).pubkey, TLV_Level_3.data + 2, TLV_Level_3.len - 2);
return BOAT_SUCCESS;
}
@@ -1102,6 +1114,12 @@ BOAT_RESULT UtilityPKCS2Native(BCHAR *input, KeypairNative *keypair)
{
// (*keypair).prikey = TLV_Level_2.data;
(*keypair).prikey = BoatMalloc(TLV_Level_4.len);
+ if(NULL == (*keypair).prikey){
+ if((*keypair).pubkey != NULL){
+ BoatFree((*keypair).pubkey);
+ }
+ return BOAT_ERROR;
+ }
memcpy((*keypair).prikey, TLV_Level_4.data, TLV_Level_4.len);
BoatLog(BOAT_LOG_NORMAL, " UtilityPKCS2Native prikey TLV_Level_4.len = %d .", TLV_Level_4.len);
(*keypair).prikeylen = TLV_Level_4.len;
@@ -1128,6 +1146,12 @@ BOAT_RESULT UtilityPKCS2Native(BCHAR *input, KeypairNative *keypair)
(*keypair).pubkeylen = TLV_Level_5.len - 2;
// (*keypair).pubkey = TLV_Level_3.data + 2;
(*keypair).pubkey = BoatMalloc(TLV_Level_5.len - 2);
+ if(NULL == (*keypair).pubkey){
+ if((*keypair).prikey != NULL){
+ BoatFree((*keypair).prikey);
+ }
+ return BOAT_ERROR;
+ }
memcpy((*keypair).pubkey, TLV_Level_5.data + 2, TLV_Level_5.len - 2);
return BOAT_SUCCESS;
}
|
board/kukui_scp/venc.h: Format with clang-format
BRANCH=none
TEST=none | @@ -22,7 +22,8 @@ struct venc_msg {
unsigned char msg[288];
};
-BUILD_ASSERT(member_size(struct venc_msg, msg) <= CONFIG_IPC_SHARED_OBJ_BUF_SIZE);
+BUILD_ASSERT(member_size(struct venc_msg, msg) <=
+ CONFIG_IPC_SHARED_OBJ_BUF_SIZE);
/* Functions provided by private overlay. */
void venc_h264_msg_handler(void *data);
|
doc: mention the pg_locks lock names in parentheses
Reported-by: Troy Frericks
Discussion:
Backpatch-through: 10 | @@ -875,7 +875,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<title>Table-Level Lock Modes</title>
<varlistentry>
<term>
- <literal>ACCESS SHARE</literal>
+ <literal>ACCESS SHARE</literal> (<literal>AccessShareLock</literal>)
</term>
<listitem>
<para>
@@ -893,7 +893,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<varlistentry>
<term>
- <literal>ROW SHARE</literal>
+ <literal>ROW SHARE</literal> (<literal>RowShareLock</literal>)
</term>
<listitem>
<para>
@@ -914,7 +914,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<varlistentry>
<term>
- <literal>ROW EXCLUSIVE</literal>
+ <literal>ROW EXCLUSIVE</literal> (<literal>RowExclusiveLock</literal>)
</term>
<listitem>
<para>
@@ -936,7 +936,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<varlistentry>
<term>
- <literal>SHARE UPDATE EXCLUSIVE</literal>
+ <literal>SHARE UPDATE EXCLUSIVE</literal> (<literal>ShareUpdateExclusiveLock</literal>)
</term>
<listitem>
<para>
@@ -963,7 +963,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<varlistentry>
<term>
- <literal>SHARE</literal>
+ <literal>SHARE</literal> (<literal>ShareLock</literal>)
</term>
<listitem>
<para>
@@ -983,7 +983,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<varlistentry>
<term>
- <literal>SHARE ROW EXCLUSIVE</literal>
+ <literal>SHARE ROW EXCLUSIVE</literal> (<literal>ShareRowExclusiveLock</literal>)
</term>
<listitem>
<para>
@@ -1005,7 +1005,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<varlistentry>
<term>
- <literal>EXCLUSIVE</literal>
+ <literal>EXCLUSIVE</literal> (<literal>ExclusiveLock</literal>)
</term>
<listitem>
<para>
@@ -1027,7 +1027,7 @@ ERROR: could not serialize access due to read/write dependencies among transact
<varlistentry>
<term>
- <literal>ACCESS EXCLUSIVE</literal>
+ <literal>ACCESS EXCLUSIVE</literal> (<literal>AccessExclusiveLock</literal>)
</term>
<listitem>
<para>
|
Add --ignore-qemu flag to toolchains | Prepare QEMU when it builds | @@ -20,6 +20,7 @@ usage() {
echo "Options"
echo " --prefix PREFIX : Install destination. If unset, defaults to $(pwd)/riscv-tools-install"
echo " or $(pwd)/esp-tools-install"
+ echo " --ignore-qemu : Ignore installing QEMU"
echo " --help -h : Display this message"
exit "$1"
}
@@ -34,6 +35,7 @@ die() {
TOOLCHAIN="riscv-tools"
EC2FASTINSTALL="false"
+IGNOREQEMU="false"
RISCV=""
# getopts does not support long options, and is inflexible
@@ -45,6 +47,9 @@ do
-p | --prefix )
shift
RISCV=$(realpath $1) ;;
+ --ignore-qemu )
+ shift
+ IGNOREQEMU="true" ;;
riscv-tools | esp-tools)
TOOLCHAIN=$1 ;;
ec2fast )
@@ -109,7 +114,7 @@ else
*) false ;;
esac; ) || die 'obsolete make version; need GNU make 4.x or later'
- module_prepare riscv-gnu-toolchain qemu
+ module_prepare riscv-gnu-toolchain
module_build riscv-gnu-toolchain --prefix="${RISCV}" --with-cmodel=medany
echo '==> Building GNU/Linux toolchain'
module_make riscv-gnu-toolchain linux
@@ -128,7 +133,9 @@ module_all riscv-tests --prefix="${RISCV}/riscv64-unknown-elf"
SRCDIR="$(pwd)/toolchains" module_all libgloss --prefix="${RISCV}/riscv64-unknown-elf" --host=riscv64-unknown-elf
+if [ "${IGNOREQEMU}" = false ] ; then
SRCDIR="$(pwd)/toolchains" module_all qemu --prefix="${RISCV}" --target-list=riscv64-softmmu
+fi
# make Dromajo
git submodule update --init $CHIPYARD_DIR/tools/dromajo/dromajo-src
|
Fix BOARDS string in srf06-cc26xx platform makefile to accurately reflect all valid values of the BOARD variable | @@ -6,7 +6,7 @@ endif
### Board and BSP selection
BOARD ?= srf06/cc26xx
-BOARDS = srf06/cc26xx srf06/cc13xx launchpad/cc26xx launchpad/cc13xx sensortag/cc26xx sensortag/cc13xx
+BOARDS = srf06/cc26xx srf06/cc13xx launchpad/cc2650 launchpad/cc1310 launchpad/cc1350 sensortag/cc2650 sensortag/cc1350
CONTIKI_TARGET_DIRS += .
|
Refactor ccp_create using anchor | @@ -706,12 +706,7 @@ jobs:
- get: centos-gpdb-dev-6
- put: terraform
params:
- action: create
- delete_on_failure: true
- generate_random_name: true
- terraform_source: ccp_src/aws/
- vars:
- aws_instance-node-instance_type: t2.medium
+ <<: *ccp_create_params
- task: gen_cluster
file: ccp_src/ci/tasks/gen_cluster.yml
on_failure:
@@ -1213,12 +1208,7 @@ jobs:
- get: centos-gpdb-dev-6
- put: terraform
params:
- action: create
- delete_on_failure: true
- generate_random_name: true
- terraform_source: ccp_src/aws/
- vars:
- aws_instance-node-instance_type: t2.medium
+ <<: *ccp_create_params
- task: gen_cluster
file: ccp_src/ci/tasks/gen_cluster.yml
on_failure:
@@ -1457,6 +1447,14 @@ jobs:
## reusable anchors
## ======================================================================
+ccp_create_params_anchor: &ccp_create_params
+ action: create
+ delete_on_failure: true
+ generate_random_name: true
+ terraform_source: ccp_src/aws/
+ vars:
+ aws_instance-node-instance_type: t2.medium
+
ccp_destroy_anchor: &ccp_destroy
put: terraform
params:
@@ -1464,7 +1462,7 @@ ccp_destroy_anchor: &ccp_destroy
env_name_file: terraform/name
terraform_source: ccp_src/aws/
vars:
- aws_instance-node-instance_type: t2.micro
+ aws_instance-node-instance_type: t2.micro #t2.micro is ignored in destroy, but aws_instance-node-instance_type is required.
get_params:
action: destroy
|
mmu: replace ASSERT with panic in fetch_page_table_offset
all callers for fetch_page_table_offset should already make sure
it will not come to an unknown table_leve, so just panic here. | @@ -325,8 +325,10 @@ static uint32_t fetch_page_table_offset(void *addr, uint32_t table_level)
break;
default:
- pr_err("Wrong page table level = 0x%lx", table_level);
- ASSERT(false, "Wrong page table level");
+ /* all callers should already make sure it will not come
+ * to here
+ */
+ panic("Wrong page table level");
break;
}
|
Replace elif without condition with else | @@ -293,7 +293,7 @@ void initArgv(VM *vm, Table *table, int argc, const char *argv[]) {
void initPlatform(VM *vm, Table *table) {
#ifdef _WIN32
defineNativeProperty(vm, table, "platform", OBJ_VAL(copyString(vm, "windows", 7)));
-#elif
+#else
struct utsname u;
if (-1 == uname(&u)) {
defineNativeProperty(vm, table, "platform", OBJ_VAL(copyString(vm,
|
Fix signed/unsigned issue. | @@ -306,7 +306,8 @@ main(int argc, char *argv[])
SCTP_PEER_ADDR_CHANGE,
SCTP_SEND_FAILED_EVENT};
char buffer[80];
- int i, n;
+ unsigned int i;
+ int n;
if (argc < 3) {
printf("%s", "Usage: client remote_addr remote_port local_port local_encaps_port remote_encaps_port\n");
@@ -381,7 +382,7 @@ main(int argc, char *argv[])
} else {
addr = addrs;
printf("Local addresses: ");
- for (i = 0; i < n; i++) {
+ for (i = 0; i < (unsigned int)n; i++) {
if (i > 0) {
printf("%s", ", ");
}
@@ -429,7 +430,7 @@ main(int argc, char *argv[])
} else {
addr = addrs;
printf("Peer addresses: ");
- for (i = 0; i < n; i++) {
+ for (i = 0; i < (unsigned int)n; i++) {
if (i > 0) {
printf("%s", ", ");
}
|
Add unit test for float encoding
PR <https://github.com/Genymobile/scrcpy/pull/3369> | @@ -67,6 +67,17 @@ static void test_read64be(void) {
assert(val == 0xABCD1234567890EF);
}
+static void test_float_to_u16fp(void) {
+ assert(sc_float_to_u16fp(0.0f) == 0);
+ assert(sc_float_to_u16fp(0.03125f) == 0x800);
+ assert(sc_float_to_u16fp(0.0625f) == 0x1000);
+ assert(sc_float_to_u16fp(0.125f) == 0x2000);
+ assert(sc_float_to_u16fp(0.25f) == 0x4000);
+ assert(sc_float_to_u16fp(0.5f) == 0x8000);
+ assert(sc_float_to_u16fp(0.75f) == 0xc000);
+ assert(sc_float_to_u16fp(1.0f) == 0xffff);
+}
+
int main(int argc, char *argv[]) {
(void) argc;
(void) argv;
@@ -77,5 +88,7 @@ int main(int argc, char *argv[]) {
test_read16be();
test_read32be();
test_read64be();
+
+ test_float_to_u16fp();
return 0;
}
|
Make sure that a cert with extensions gets version number 2 (v3)
Fixes | @@ -1735,7 +1735,6 @@ static int do_body(X509 **xret, EVP_PKEY *pkey, X509 *x509,
/* Lets add the extensions, if there are any */
if (ext_sect) {
X509V3_CTX ctx;
- X509_set_version(ret, 2);
/* Initialize the context structure */
if (selfsign)
@@ -1790,6 +1789,15 @@ static int do_body(X509 **xret, EVP_PKEY *pkey, X509 *x509,
goto end;
}
+ {
+ const STACK_OF(X509_EXTENSION) *exts = X509_get0_extensions(ret);
+
+ if (exts != NULL && sk_X509_EXTENSION_num(exts) > 0)
+ /* Make it an X509 v3 certificate. */
+ if (!X509_set_version(ret, 2))
+ goto end;
+ }
+
/* Set the right value for the noemailDN option */
if (email_dn == 0) {
if (!X509_set_subject_name(ret, dn_subject))
|
Scorecard Write-All Permission | @@ -9,18 +9,13 @@ on:
pull_request:
branches: [ main ]
-# Declare default permissions as read only.
-permissions: read-all
+# Declare default permissions as write.
+permissions: write-all
jobs:
analysis:
name: Scorecards analysis
runs-on: ubuntu-latest
- permissions:
- # Needed to upload the results to code-scanning dashboard.
- security-events: write
- actions: read
- contents: read
steps:
- name: "Checkout code"
|
out_prometheus_exporter: disable http stop on exit | @@ -106,7 +106,7 @@ static int cb_prom_exit(void *data, struct flb_config *config)
{
struct prom_exporter *ctx = data;
- prom_http_server_stop(ctx->http);
+ //FIXME: prom_http_server_stop(ctx->http);
prom_http_server_destroy(ctx->http);
flb_free(ctx);
|
pbio/drivebase: include motor ratio in geometry
This way, all geometry is captured in the same variable (the control unit scaler) and all control can be performed as counts. | @@ -179,34 +179,53 @@ static pbio_error_t pbio_drivebase_setup(pbio_drivebase_t *db,
return err;
}
- // Individual servos
- db->left = left;
- db->right = right;
-
// Drivebase geometry
if (wheel_diameter <= 0 || axle_track <= 0) {
return PBIO_ERROR_INVALID_ARG;
}
+ // Assert that both motors have the same gearing
+ if (left->control.settings.counts_per_unit != right->control.settings.counts_per_unit) {
+ return PBIO_ERROR_INVALID_ARG;
+ }
+
+ // Individual servos
+ db->left = left;
+ db->right = right;
+
// Initialize log
db->log.num_values = DRIVEBASE_LOG_NUM_VALUES;
// Configure heading controller
db->control_heading.settings = settings_drivebase_heading_default;
- // Difference between the motors for every 1 degree drivebase rotation
- db->control_heading.settings.counts_per_unit = fix16_div(
- fix16_mul(axle_track, fix16_from_int(2)),
+ // Count difference between the motors for every 1 degree drivebase rotation
+ db->control_heading.settings.counts_per_unit =
+ fix16_mul(
+ left->control.settings.counts_per_unit,
+ fix16_div(
+ fix16_mul(
+ axle_track,
+ fix16_from_int(2)
+ ),
wheel_diameter
+ )
);
// Configure distance controller
db->control_distance.settings = settings_drivebase_distance_default;
- // Sum of motors for every mm forward
- db->control_distance.settings.counts_per_unit = fix16_div(
- fix16_mul(fix16_from_int(180), FOUR_DIV_PI),
+ // Sum of motor counts for every 1 mm forward
+ db->control_distance.settings.counts_per_unit =
+ fix16_mul(
+ left->control.settings.counts_per_unit,
+ fix16_div(
+ fix16_mul(
+ fix16_from_int(180),
+ FOUR_DIV_PI
+ ),
wheel_diameter
+ )
);
return PBIO_SUCCESS;
|
proxy: fix flushing partial writes
If we got a partial write while flushing commands to the backend it was
not advancing the IOV pointer properly and re-flushing the same data.
This would usually lead to a "CLIENT_ERROR bad data chunk" from the
server for set commands.
I super love/hate scatter/gather code... | @@ -970,6 +970,7 @@ static int _flush_pending_write(mcp_backend_t *be) {
iov->iov_len = 0;
} else {
iov->iov_len -= sent;
+ iov->iov_base = (char *)iov->iov_base + sent;
sent = 0;
flushed = false;
break;
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.