message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
Verbose asm output | @@ -51,7 +51,7 @@ local function link_obj(o_filename, so_filename)
end
function c_compiler.compile_c_to_s(src, dst, _modname)
- return compile_c(src, dst, "-S")
+ return compile_c(src, dst, "-S -fverbose-asm")
end
function c_compiler.compile_s_to_o(src, dst, _modname)
|
include aarch64 builds for docs RPM | @@ -77,7 +77,15 @@ make ; %{parser} steps.tex > recipe.sh ; popd
pushd docs/recipes/install/sles12sp1/x86_64/warewulf/pbspro
make ; %{parser} steps.tex > recipe.sh ; popd
+#----------------------
+# aarch64-based recipes
+#----------------------
+pushd docs/recipes/install/centos7.2/aarch64/warewulf/slurm
+make ; %{parser} steps.tex > recipe.sh ; popd
+
+pushd docs/recipes/install/sles12sp1/aarch64/warewulf/slurm
+make ; %{parser} steps.tex > recipe.sh ; popd
%install
@@ -86,6 +94,8 @@ make ; %{parser} steps.tex > recipe.sh ; popd
install -m 0644 -p docs/ChangeLog %{buildroot}/%{OHPC_PUB}/doc/ChangeLog
install -m 0644 -p docs/Release_Notes.txt %{buildroot}/%{OHPC_PUB}/doc/Release_Notes.txt
+# x86_64 guides
+
%define lpath centos7.2/x86_64/warewulf/slurm
install -m 0644 -p -D docs/recipes/install/%{lpath}/steps.pdf %{buildroot}/%{OHPC_PUB}/doc/recipes/%{lpath}/Install_guide.pdf
install -m 0755 -p -D docs/recipes/install/%{lpath}/recipe.sh %{buildroot}/%{OHPC_PUB}/doc/recipes/%{lpath}/recipe.sh
@@ -102,6 +112,17 @@ install -m 0755 -p -D docs/recipes/install/%{lpath}/recipe.sh %{buildroot}/%{OHP
install -m 0644 -p -D docs/recipes/install/%{lpath}/steps.pdf %{buildroot}/%{OHPC_PUB}/doc/recipes/%{lpath}/Install_guide.pdf
install -m 0755 -p -D docs/recipes/install/%{lpath}/recipe.sh %{buildroot}/%{OHPC_PUB}/doc/recipes/%{lpath}/recipe.sh
+# aarch64 guides
+
+%define lpath centos7.2/aarch64/warewulf/slurm
+install -m 0644 -p -D docs/recipes/install/%{lpath}/steps.pdf %{buildroot}/%{OHPC_PUB}/doc/recipes/%{lpath}/Install_guide.pdf
+install -m 0755 -p -D docs/recipes/install/%{lpath}/recipe.sh %{buildroot}/%{OHPC_PUB}/doc/recipes/%{lpath}/recipe.sh
+
+%define lpath sles12sp1/aarch64/warewulf/slurm
+install -m 0644 -p -D docs/recipes/install/%{lpath}/steps.pdf %{buildroot}/%{OHPC_PUB}/doc/recipes/%{lpath}/Install_guide.pdf
+install -m 0755 -p -D docs/recipes/install/%{lpath}/recipe.sh %{buildroot}/%{OHPC_PUB}/doc/recipes/%{lpath}/recipe.sh
+
+# input file templates
install -m 0644 -p docs/recipes/install/centos7.2/input.local.template %{buildroot}/%{OHPC_PUB}/doc/recipes/centos7.2/input.local
install -m 0644 -p docs/recipes/install/sles12sp1/input.local.template %{buildroot}/%{OHPC_PUB}/doc/recipes/sles12sp1/input.local
|
[SBT] Quiet down jgit http warnings | resolvers += Resolver.url("scalasbt", new URL("http://scalasbt.artifactoryonline.com/scalasbt/sbt-plugin-releases")) (Resolver.ivyStylePatterns)
resolvers += Classpaths.sbtPluginReleases
-resolvers += "jgit-repo" at "http://download.eclipse.org/jgit/maven"
+resolvers += "jgit-repo" at "https://download.eclipse.org/jgit/maven"
addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.2")
addSbtPlugin("com.typesafe.sbt" % "sbt-ghpages" % "0.6.2")
|
BugID:16731946:add lock to tlsf_realloc | @@ -1232,7 +1232,7 @@ void tlsf_free(tlsf_t tlsf, void* ptr)
** - an extended buffer size will leave the newly-allocated area with
** contents undefined
*/
-void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
+void* _tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
{
control_t* control = tlsf_cast(control_t*, tlsf);
void* p = 0;
@@ -1290,3 +1290,15 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
return p;
}
+
+void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
+{
+ void *addr;
+
+ krhino_mutex_lock(tlsf_mutex, RHINO_WAIT_FOREVER);
+ addr = _tlsf_realloc(tlsf, ptr, size);
+ krhino_mutex_unlock(tlsf_mutex);
+
+ return ret;
+}
+
|
console; enable backspace. | /* Control characters */
#define ESC 0x1b
#define DEL 0x7f
+#define BS 0x08
/* ANSI escape sequences */
#define ANSI_ESC '['
@@ -458,6 +459,7 @@ console_handle_char(uint8_t byte)
nlip_state |= NLIP_DATA_START1;
break;
case DEL:
+ case BS:
if (cur > 0) {
del_char(&input->line[--cur], end);
}
|
tests/sprintf: add tests for # and + | @@ -44,5 +44,38 @@ int main() {
sprintf(buf, "%3.2d", 123);
assert(buf[0] == '1' && buf[1] == '2' && buf[2] == '3'
&& buf[3] == '\0');
+
+ // Test '+' and ' ' flags - mlibc issue #229.
+ sprintf(buf, "%+d", 12);
+ assert(buf[0] == '+' && buf[1] == '1' && buf[2] == '2' && buf[3] == '\0');
+ sprintf(buf, "% d", 12);
+ assert(buf[0] == ' ' && buf[1] == '1' && buf[2] == '2' && buf[3] == '\0');
+ sprintf(buf, "% +d", 12);
+ assert(buf[0] == '+' && buf[1] == '1' && buf[2] == '2' && buf[3] == '\0');
+ sprintf(buf, "%+ d", 12);
+ assert(buf[0] == '+' && buf[1] == '1' && buf[2] == '2' && buf[3] == '\0');
+ sprintf(buf, "%+d", -12);
+ assert(buf[0] == '-' && buf[1] == '1' && buf[2] == '2' && buf[3] == '\0');
+ sprintf(buf, "% d", -12);
+ assert(buf[0] == '-' && buf[1] == '1' && buf[2] == '2' && buf[3] == '\0');
+ sprintf(buf, "% +d", -12);
+ assert(buf[0] == '-' && buf[1] == '1' && buf[2] == '2' && buf[3] == '\0');
+ sprintf(buf, "%+ d", -12);
+ assert(buf[0] == '-' && buf[1] == '1' && buf[2] == '2' && buf[3] == '\0');
+
+ // Test '#' flag.
+ // TODO: Test with a, A, e, E, f, F, g, G conversions.
+ sprintf(buf, "%#x", 12);
+ assert(buf[0] == '0' && buf[1] == 'x' && buf[2] == 'c' && buf[3] == '\0');
+ sprintf(buf, "%#X", 12);
+ assert(buf[0] == '0' && buf[1] == 'X' && buf[2] == 'C' && buf[3] == '\0');
+ sprintf(buf, "%#o", 12);
+ assert(buf[0] == '0' && buf[1] == '1' && buf[2] == '4' && buf[3] == '\0');
+ sprintf(buf, "%#x", 0);
+ assert(buf[0] == '0' && buf[1] == '\0');
+ sprintf(buf, "%#X", 0);
+ assert(buf[0] == '0' && buf[1] == '\0');
+ sprintf(buf, "%#o", 0);
+ assert(buf[0] == '0' && buf[1] == '\0');
return 0;
}
|
http: check return of snprintf
Purely cautionary / defensive programming; no reason to think this would
have been a problem. | @@ -1635,9 +1635,12 @@ _http_release_ports_file(c3_c *pax_c)
{
c3_c* nam_c = ".http.ports";
c3_w len_w = 1 + strlen(pax_c) + 1 + strlen(nam_c);
-
c3_c* paf_c = c3_malloc(len_w);
- snprintf(paf_c, len_w, "%s/%s", pax_c, nam_c);
+ c3_i wit_i;
+
+ wit_i = snprintf(paf_c, len_w, "%s/%s", pax_c, nam_c);
+ c3_assert(wit_i > 0);
+ c3_assert(len_w == (c3_w)wit_i + 1);
unlink(paf_c);
c3_free(paf_c);
|
COIL: Rename GT7288 address define
Rename GT7288 address definition to match current I2C naming
conventions.
BRANCH=None
TEST=make -j buildall | #define CPRINTS(format, args...) cprints(CC_TOUCHPAD, format, ## args)
-#define GT7288_SLAVE_ADDRESS 0x14
+#define GT7288_I2C_ADDR_FLAGS 0x14
#define GT7288_REPORT_ID_PTP 0x04
@@ -47,7 +47,7 @@ static int gt7288_read_desc(uint16_t register_id, uint8_t *data,
uint8_t reg_bytes[] = {
register_id & 0xFF, (register_id & 0xFF00) >> 8
};
- return i2c_xfer(CONFIG_TOUCHPAD_I2C_PORT, GT7288_SLAVE_ADDRESS,
+ return i2c_xfer(CONFIG_TOUCHPAD_I2C_PORT, GT7288_I2C_ADDR_FLAGS,
reg_bytes, sizeof(reg_bytes), data, max_length);
}
@@ -88,7 +88,7 @@ static void gt7288_translate_contact(const uint8_t *data,
static int gt7288_read(uint8_t *data, size_t max_length)
{
- return i2c_xfer(CONFIG_TOUCHPAD_I2C_PORT, GT7288_SLAVE_ADDRESS,
+ return i2c_xfer(CONFIG_TOUCHPAD_I2C_PORT, GT7288_I2C_ADDR_FLAGS,
NULL, 0, data, max_length);
}
|
Fix firmware version number. | * the IDE will Not connect if the major version number is different.
*/
#define FIRMWARE_VERSION_MAJOR (3)
-#define FIRMWARE_VERSION_MINOR (4)
-#define FIRMWARE_VERSION_PATCH (0)
+#define FIRMWARE_VERSION_MINOR (3)
+#define FIRMWARE_VERSION_PATCH (1)
/**
* To add a new debugging command, increment the last command value used.
|
fix garbage value in ksFindHierarchy | @@ -1291,7 +1291,15 @@ ssize_t ksCopyInternal (KeySet * ks, size_t to, size_t from)
elektraCursor ksFindHierarchy (const KeySet * ks, const Key * root, elektraCursor * end)
{
if (ks == NULL || root == NULL) return -1;
- if (ks->data == NULL) return 0;
+ if (ks->data == NULL)
+ {
+ if (end != NULL)
+ {
+ *end = 0;
+ }
+
+ return 0;
+ }
ssize_t search = ksSearchInternal (ks, root);
size_t it = search < 0 ? -search - 1 : search;
|
site.cfg only | @@ -131,9 +131,6 @@ module load fftw
module load numpy
CFLAGS="%{optflags} -fno-strict-aliasing" \
-FFTW=$FFTW_LIB \
-BLAS=$OPENBLAS_LIB \
-LAPACK=$OPENBLAS_LIB \
%if "%{compiler_family}" == "intel"
LDSHARED="icc -shared" \
python setup.py config --compiler=intelm --fcompiler=intelem build_clib --compiler=intelem --fcompiler=intelem build_ext --compiler=intelem --fcompiler=intelem build
|
Blind update of transport_param_test to work with version 'PCQ1' | @@ -81,7 +81,7 @@ static picoquic_tp_t transport_param_test10 = {
};
uint8_t client_param1[] = {
- 'P', 'C', 'Q', '0',
+ 'P', 'C', 'Q', '1',
0, 0x28,
0, 0, 0, 4, 0, 0, 0xFF, 0xFF,
0, 1, 0, 4, 0, 0x40, 0, 0,
@@ -131,8 +131,9 @@ uint8_t client_param5[] = {
};
uint8_t server_param1[] = {
- 'P', 'C', 'Q', '0',
- 0x08,
+ 'P', 'C', 'Q', '1',
+ 0x0C,
+ 'P', 'C', 'Q', '1',
'P', 'C', 'Q', '0',
0xFF, 0x00, 0x00, 0x0F,
0, 0x36,
@@ -145,8 +146,9 @@ uint8_t server_param1[] = {
};
uint8_t server_param2[] = {
- 'P', 'C', 'Q', '0',
- 0x08,
+ 'P', 'C', 'Q', '1',
+ 0x0C,
+ 'P', 'C', 'Q', '1',
'P', 'C', 'Q', '0',
0xFF, 0x00, 0x00, 0x0F,
0, 0x36,
@@ -159,7 +161,7 @@ uint8_t server_param2[] = {
};
uint8_t client_param8[] = {
- 'P', 'C', 'Q', '0',
+ 'P', 'C', 'Q', '1',
0, 0x1C,
0, 0, 0, 4, 0, 0, 0xFF, 0xFF,
0, 1, 0, 4, 0, 0x40, 0, 0,
@@ -168,8 +170,9 @@ uint8_t client_param8[] = {
};
uint8_t server_param3[] = {
- 'P', 'C', 'Q', '0',
- 0x08,
+ 'P', 'C', 'Q', '1',
+ 0x0C,
+ 'P', 'C', 'Q', '1',
'P', 'C', 'Q', '0',
0xFF, 0x00, 0x00, 0x0F,
0, 87,
@@ -184,7 +187,7 @@ uint8_t server_param3[] = {
};
uint8_t client_param9[] = {
- 'P', 'C', 'Q', '0',
+ 'P', 'C', 'Q', '1',
0, 0x2C,
0, 0, 0, 4, 0, 0, 0xFF, 0xFF,
0, 1, 0, 4, 0, 0x40, 0, 0,
|
[stat_cache] FAM: ignore follow-symlink config
no distinction needs to be made whether or not server.follow-symlink set | @@ -159,9 +159,6 @@ static handler_t stat_cache_handle_fdevent(server *srv, void *_fce, int revent)
for (i = 0; i < events; i++) {
FAMEvent fe;
- splay_tree *node;
- int ndx;
-
if (FAMNextEvent(&scf->fam, &fe) < 0) break;
scf->dirs = splaytree_splay(scf->dirs, (int)(intptr_t)fe.userdata);
if (!scf->dirs || scf->dirs->key != (int)(intptr_t)fe.userdata) {
@@ -193,28 +190,12 @@ static handler_t stat_cache_handle_fdevent(server *srv, void *_fce, int revent)
switch(fe.code) {
case FAMChanged:
+ ++fam_dir->version;
+ break;
case FAMDeleted:
case FAMMoved:
- /* if the filename is a directory remove the entry */
-
- fam_dir->version++;
-
- /* file/dir is still here */
- if (fe.code == FAMChanged) break;
-
- ndx = hashme(fe.filename, strlen(fe.filename));
-
- scf->dirs = splaytree_splay(scf->dirs, ndx);
- node = scf->dirs;
-
- if (node && (node->key == ndx)) {
- int osize = splaytree_size(scf->dirs);
-
- fam_dir_entry_free(&scf->fam, node->data);
- scf->dirs = splaytree_delete(scf->dirs, ndx);
-
- force_assert(osize - 1 == splaytree_size(scf->dirs));
- }
+ fam_dir_entry_free(&scf->fam, fam_dir);
+ scf->dirs = splaytree_delete(scf->dirs, scf->dirs->key);
break;
default:
break;
|
Fix compilier warning on -Os, gcc. | @@ -125,7 +125,7 @@ static void bignat_div(struct BigNat *mant, uint32_t divisor) {
int32_t i;
uint32_t quotient, remainder;
uint64_t dividend;
- remainder = 0;
+ remainder = 0, quotient = 0;
for (i = mant->n - 1; i >= 0; i--) {
dividend = ((uint64_t)remainder * BIGNAT_BASE) + mant->digits[i];
if (i < mant->n - 1) mant->digits[i + 1] = quotient;
|
Implement model_data access. | @@ -50,6 +50,7 @@ int mapstrings_entity_property(ScriptVariant **varlist, int paramCount)
"energy_status",
"exists",
"model",
+ "model_data",
"model_default",
"name",
"opponent",
@@ -344,6 +345,13 @@ HRESULT openbor_get_entity_property(ScriptVariant **varlist , ScriptVariant **pr
break;
+ case _ENTITY_MODEL_DATA:
+
+ ScriptVariant_ChangeType(*pretvar, VT_PTR);
+ (*pretvar)->ptrVal = (VOID *)&handle->modeldata;
+
+ break;
+
case _ENTITY_MODEL_DEFAULT:
ScriptVariant_ChangeType(*pretvar, VT_PTR);
@@ -747,6 +755,10 @@ HRESULT openbor_set_entity_property(ScriptVariant **varlist, ScriptVariant **pre
break;
+ case _ENTITY_MODEL_DATA:
+
+ // Read only.
+
case _ENTITY_MODEL_DEFAULT:
handle->defaultmodel = (s_model *)varlist[ARG_VALUE]->ptrVal;
|
Show the shell as the secondary view controller at startup | @@ -134,9 +134,11 @@ class SidebarSplitViewController: UISplitViewController, UISplitViewControllerDe
compactFileBrowserNavVC.navigationBar.prefersLargeTitles = true
compactFileBrowserNavVC.view.backgroundColor = .systemBackground
- setViewController(UINavigationController(rootViewController: SidebarViewController(splitViewID: id, compact: false)), for: .primary)
+ let sidebar = SidebarViewController(splitViewID: id, compact: false)
+ sidebar.makeModuleRunnerIfNecessary()
+ setViewController(UINavigationController(rootViewController: sidebar), for: .primary)
setViewController(fileBrowserNavVC, for: .supplementary)
- setViewController(UINavigationController(rootViewController: UIViewController()), for: .secondary)
+ setViewController(sidebar.moduleRunner!, for: .secondary)
setViewController(UINavigationController(rootViewController: SidebarViewController(splitViewID: id, compact: true)), for: .compact)
preferredDisplayMode = .twoOverSecondary
|
gdbmacros; Add os_wakeups, which lists scheduled os_callouts and
when next task wakeups are supposed to happen. | @@ -34,5 +34,40 @@ end
document os_tasks
usage: os_tasks
-
Displays os tasks
+end
+
+define os_callouts
+ printf "Callouts:\n"
+ printf " %8s %10s %10s\n", "tick", "callout", "func"
+ while $c != 0
+ printf " %8d %10p %10p\n", $c->c_ticks, $c, $c->c_ev.ev_cb
+ set $c = $c->c_next.tqe_next
+ end
+end
+
+define os_sleep_list
+ printf "Tasks:\n"
+ printf " %8s %10s %10s\n", "tick", "task", "taskname"
+ set $t = g_os_sleep_list.tqh_first
+ while $t != 0
+ set $no_timo = $t->t_flags & 0x1
+ if $no_timo == 0
+ printf " %8d %10p ", $t->t_next_wakeup, $t
+ printf "%10s\n", $t->t_name
+ end
+ set $t = $t->t_os_list.tqe_next
+ end
+end
+
+define os_wakeups
+ set $c = g_callout_list.tqh_first
+ printf " Now is %d\n", g_os_time
+ os_callouts
+ os_sleep_list
+end
+
+document os_wakeups
+usage: os_wakeups
+Displays scheduled OS callouts, and next scheduled task wakeups
+end
|
disown and pickup of next task. | @@ -3150,13 +3150,23 @@ xfr_serial_means_update(struct auth_xfer* xfr, uint32_t serial)
return 0;
}
+/** disown task_transfer. caller must hold xfr.lock */
+static void
+xfr_transfer_disown(struct auth_xfer* xfr)
+{
+ /* remove the commpoint */
+ comm_point_delete(xfr->task_transfer->cp);
+ xfr->task_transfer->cp = NULL;
+ /* we don't own this item anymore */
+ xfr->task_transfer->worker = NULL;
+ xfr->task_transfer->env = NULL;
+}
+
/** perform next lookup, next transfer TCP, or end and resume wait time task */
static void
xfr_transfer_nexttarget_or_end(struct auth_xfer* xfr, struct module_env* env)
{
log_assert(xfr->task_transfer->worker == env->worker);
- /* TODO: setup locks, also in probe lookups */
- (void)xfr; (void)env;
/* TODO */
#if 0
@@ -3174,6 +3184,7 @@ xfr_transfer_nexttarget_or_end(struct auth_xfer* xfr, struct module_env* env)
}
/* initiate TCP and fetch the zone from the master */
+ /* and set timeout on it */
while(!xfr_transfer_end_of_list(xfr)) {
if(xfr_transfer_init_fetch(xfr, env)) {
/* successfully started, wait for callback */
@@ -3184,6 +3195,7 @@ xfr_transfer_nexttarget_or_end(struct auth_xfer* xfr, struct module_env* env)
break;
}
}
+#endif
lock_basic_lock(&xfr->lock);
/* we failed to fetch the zone, move to wait task
@@ -3193,7 +3205,6 @@ xfr_transfer_nexttarget_or_end(struct auth_xfer* xfr, struct module_env* env)
/* pick up the nextprobe task and wait */
xfr_set_timeout(xfr, env, 1);
lock_basic_unlock(&xfr->lock);
-#endif
}
/** start transfer task by this worker , xfr is locked. */
@@ -3215,7 +3226,7 @@ xfr_start_transfer(struct auth_xfer* xfr, struct module_env* env,
/* start lookup for hostnames in transfer master list */
xfr_transfer_start_lookups(xfr);
- /* TODO initiate TCP, and set timeout on it */
+ /* initiate TCP, and set timeout on it */
xfr_transfer_nexttarget_or_end(xfr, env);
}
|
Return the cookie_len value from generate_cookie_callback
The generate_cookie_callback was failing to pass back the generated
cookie length to the caller. This results in DTLS connection failures
from s_server. | @@ -745,6 +745,7 @@ int generate_cookie_callback(SSL *ssl, unsigned char *cookie,
EVP_MAC *hmac = NULL;
EVP_MAC_CTX *ctx = NULL;
OSSL_PARAM params[3], *p = params;
+ size_t mac_len;
/* Initialize a random secret */
if (!cookie_initialized) {
@@ -808,10 +809,11 @@ int generate_cookie_callback(SSL *ssl, unsigned char *cookie,
BIO_printf(bio_err, "HMAC context update failed\n");
goto end;
}
- if (!EVP_MAC_final(ctx, cookie, NULL, (size_t)cookie_len)) {
+ if (!EVP_MAC_final(ctx, cookie, &mac_len, DTLS1_COOKIE_LENGTH)) {
BIO_printf(bio_err, "HMAC context final failed\n");
goto end;
}
+ *cookie_len = (int)mac_len;
res = 1;
end:
OPENSSL_free(buffer);
@@ -840,7 +842,8 @@ int verify_cookie_callback(SSL *ssl, const unsigned char *cookie,
int generate_stateless_cookie_callback(SSL *ssl, unsigned char *cookie,
size_t *cookie_len)
{
- unsigned int temp;
+ unsigned int temp = 0;
+
int res = generate_cookie_callback(ssl, cookie, &temp);
*cookie_len = temp;
return res;
|
cms: Do not try to check binary format on stdin
Fixes | @@ -278,6 +278,8 @@ static void warn_binary(const char *file)
unsigned char linebuf[1024], *cur, *end;
int len;
+ if (file == NULL)
+ return; /* cannot give a warning for stdin input */
if ((bio = bio_open_default(file, 'r', FORMAT_BINARY)) == NULL)
return; /* cannot give a proper warning since there is an error */
while ((len = BIO_read(bio, linebuf, sizeof(linebuf))) > 0) {
|
Fixed wrong source ids in documentation (nutiteq.osm -> carto.streets), updated version numbers to the latest SDK | @@ -53,7 +53,7 @@ These SDK libraries are managed by CARTO and include all the required libraries
// Add to your build.gradle file:
dependencies {
- compile 'com.carto:carto-mobile-sdk:4.1.0@aar'
+ compile 'com.carto:carto-mobile-sdk:4.1.2@aar'
}
{% endhighlight %}
@@ -64,7 +64,7 @@ These SDK libraries are managed by CARTO and include all the required libraries
// Add to your CocoaPods Podfile:
-pod 'CartoMobileSDK', '4.1.0'
+pod 'CartoMobileSDK', '4.1.2'
{% endhighlight %}
</div>
@@ -195,7 +195,7 @@ If using Android as the mobile platform, follow this implementation procedure.
{% highlight groovy %}
dependencies {
- compile 'com.carto:carto-mobile-sdk:4.1.0@aar'
+ compile 'com.carto:carto-mobile-sdk:4.1.2@aar'
}
{% endhighlight %}
@@ -317,7 +317,7 @@ If using iOS as the mobile platform, follow this implementation procedure.
// Add to your CocoaPods Podfile:
-pod 'CartoMobileSDK', '4.1.0'
+pod 'CartoMobileSDK', '4.1.2'
{% endhighlight %}
@@ -362,7 +362,7 @@ pod 'CartoMobileSDK', '4.1.0'
NTMapView* mapView = (NTMapView*) self.view;
// 3. Create online vector tile layer, use style asset embedded in the project
- NTVectorTileLayer* vectorTileLayer = [[NTCartoOnlineVectorTileLayer alloc] initWithSource: @"nutiteq.osm"];
+ NTVectorTileLayer* vectorTileLayer = [[NTCartoOnlineVectorTileLayer alloc] initWithSource: @"carto.streets"];
// 4. Add vector tile layer
[[mapView getLayers] add:vectorTileLayer];
@@ -482,7 +482,7 @@ While you can share most of code using Native Controls, you just need to specify
stack.Children.Add(mapView);
#endif
// 2. Indicate the common code from both platforms
- var baseLayer = new Carto.Layers.CartoOnlineVectorTileLayer("nutiteq.osm");
+ var baseLayer = new Carto.Layers.CartoOnlineVectorTileLayer("carto.streets");
mapView.Layers.Add(baseLayer);
</pre>
@@ -536,7 +536,7 @@ public class MainActivity : Activity
var mapView = FindViewById<MapView> ( Resource.Id.mapView );
/// 4. Online vector base layer
- var baseLayer = new CartoOnlineVectorTileLayer("nutiteq.osm");
+ var baseLayer = new CartoOnlineVectorTileLayer("carto.streets");
/// 5. Set online base layer
mapView.Layers.Add(baseLayer);
@@ -579,7 +579,7 @@ public class MainViewController : GLKit.GLKViewController
MapView.RegisterLicense("YOUR_LICENSE_KEY");
// Online vector base layer
- var baseLayer = new CartoOnlineVectorTileLayer("nutiteq.osm");
+ var baseLayer = new CartoOnlineVectorTileLayer("carto.streets");
// Set online base layer.
// Note: assuming here that Map is an outlet added to the controller.
@@ -607,7 +607,7 @@ _**Note:** The Windows Phone 10 implementation of the Mobile SDK is experimenta
The following requirements are mandatory:
- Windows Phone version 10
- - MS Visual Studio 2013 Community edition, or better
+ - MS Visual Studio 2015 Community edition, or better
- Windows Phone 10 SDK, should come with Visual Studio
- Visual Studio extension (VSIX) for CARTO Maps SDK component. Download and start the package to install it
|
Fix build on older Qt versions due missing QLatin1String methods | @@ -527,22 +527,22 @@ static ApiVersion getAcceptHeaderApiVersion(const QString &hdrValue)
static const struct {
ApiVersion version;
- const char *str;
+ QLatin1String str;
} versions[] = {
// ordered by largest version
- {ApiVersion_2_DDEL, "application/vnd.ddel.v2"},
- {ApiVersion_1_1_DDEL, "application/vnd.ddel.v1.1"},
- {ApiVersion_1_1_DDEL, "vnd.ddel.v1.1"}, // backward compatibility
- {ApiVersion_1_DDEL, "application/vnd.ddel.v1"},
- {ApiVersion_1_DDEL, "vnd.ddel.v1"}, // backward compatibility
- {ApiVersion_1, nullptr}
+ {ApiVersion_2_DDEL, QLatin1String("application/vnd.ddel.v2")},
+ {ApiVersion_1_1_DDEL, QLatin1String("application/vnd.ddel.v1.1")},
+ {ApiVersion_1_1_DDEL, QLatin1String("vnd.ddel.v1.1")}, // backward compatibility
+ {ApiVersion_1_DDEL, QLatin1String("application/vnd.ddel.v1")},
+ {ApiVersion_1_DDEL, QLatin1String("vnd.ddel.v1")}, // backward compatibility
+ {ApiVersion_1, QLatin1String() }
};
const auto ls = hdrValue.split(QLatin1Char(','), QString::SkipEmptyParts);
- for (int i = 0; versions[i].str != nullptr; i++)
+ for (int i = 0; !versions[i].str.isEmpty(); i++)
{
- if (ls.contains(QLatin1String(versions[i].str)))
+ if (ls.contains(versions[i].str))
{
result = versions[i].version;
break;
@@ -555,9 +555,10 @@ static ApiVersion getAcceptHeaderApiVersion(const QString &hdrValue)
ApiRequest::ApiRequest(const QHttpRequestHeader &h, const QStringList &p, QTcpSocket *s, const QString &c) :
hdr(h), path(p), sock(s), content(c), version(ApiVersion_1), auth(ApiAuthNone), mode(ApiModeNormal)
{
- if (hdr.hasKey(QLatin1String("Accept")) && hdr.value(QLatin1String("Accept")).contains(QLatin1String("vnd.ddel")))
+ const auto accept = hdr.value(QLatin1String("Accept"));
+ if (accept.size() > 4) // rule out */*
{
- version = getAcceptHeaderApiVersion(hdr.value(QLatin1String("Accept")));
+ version = getAcceptHeaderApiVersion(accept);
}
}
|
Fix test_french_charref_hexidecimal() to work for | @@ -673,8 +673,14 @@ START_TEST(test_french_charref_hexidecimal)
const char *text =
"<?xml version='1.0' encoding='iso-8859-1'?>\n"
"<doc>éèàçêÈ</doc>";
- run_character_check(text,
- "\xC3\xA9\xC3\xA8\xC3\xA0\xC3\xA7\xC3\xAA\xC3\x88");
+#ifdef XML_UNICODE
+ const XML_Char *expected =
+ XCS("\x00e9\x00e8\x00e0\x00e7\x00ea\x00c8");
+#else
+ const XML_Char *expected =
+ XCS("\xC3\xA9\xC3\xA8\xC3\xA0\xC3\xA7\xC3\xAA\xC3\x88");
+#endif
+ run_character_check(text, expected);
}
END_TEST
|
virtualize pill cue to print message on invalid jamfile | @@ -1720,13 +1720,26 @@ u3m_boot(c3_o nuu_o, c3_o bug_o, c3_c* dir_c,
printf("boot: loading %s\r\n", ful_c);
{
- u3_noun sys = u3ke_cue(u3m_file(ful_c));
- u3_noun bot;
+ u3_noun pil = u3m_file(ful_c);
+ u3_noun sys, bot;
+
+ {
+ u3_noun pro = u3m_soft(0, u3ke_cue, u3k(pil));
+
+ if ( 0 != u3h(pro) ) {
+ fprintf(stderr, "boot: failed: unable to parse pill\r\n");
+ exit(1);
+ }
+
+ sys = u3k(u3t(pro));
+ u3z(pro);
+ }
u3x_trel(sys, &bot, 0, 0);
u3v_boot(u3k(bot));
u3z(sys);
+ u3z(pil);
}
}
else {
|
ssc: avoid multiple computations | @@ -1170,13 +1170,18 @@ def halomod_Tk3D_SSC(cosmo, hmc,
P_12 = norm12 * (pk * i11_1 * i11_2 + i02_12)
if prof1.is_number_counts:
- b1 = halomod_bias_1pt(cosmo, hmc, k_use, aa, prof1,
- normprof=True)
+ if normprof1:
+ b1 = i11_1 * norm1
+ else:
+ b1 = i11_1 * hmc.profile_norm(cosmo, aa, prof1)
+
if prof2 is None:
b2 = b1
elif prof2.is_number_counts:
- b2 = halomod_bias_1pt(cosmo, hmc, k_use, aa, prof2,
- normprof=True)
+ if normprof2:
+ b2 = i11_2 * norm2
+ else:
+ b2 = i11_2 * hmc.profile_norm(cosmo, aa, prof2)
dpk12[ia, :] -= (b1 + b2) * P_12
@@ -1193,14 +1198,18 @@ def halomod_Tk3D_SSC(cosmo, hmc,
if prof3 is None:
b3 = b1
elif prof3.is_number_counts:
- b3 = halomod_bias_1pt(cosmo, hmc, k_use, aa, prof3,
- normprof=True)
+ if normprof3:
+ b3 = i11_3 * norm3
+ else:
+ b3 = i11_3 * hmc.profile_norm(cosmo, aa, prof3)
if prof4 is None:
b4 = b3
elif prof4.is_number_counts:
- b4 = halomod_bias_1pt(cosmo, hmc, k_use, aa, prof4,
- normprof=True)
+ if normprof4:
+ b4 = i11_4 * norm4
+ else:
+ b4 = i11_4 * hmc.profile_norm(cosmo, aa, prof4)
dpk34[ia, :] -= (b3 + b4) * P_34
|
libhfuzz/instrument: increase number of 8bit instrumented regions to 256 | @@ -450,12 +450,12 @@ HF_REQUIRE_SSE42_POPCNT void __sanitizer_cov_trace_pc_guard(uint32_t* guard) {
}
}
-/* Support up to 128 DSO modules with separate 8bit counters */
+/* Support up to 256 DSO modules with separate 8bit counters */
static struct {
uint8_t* start;
size_t cnt;
size_t guard;
-} hf8bitcounters[128] = {};
+} hf8bitcounters[256] = {};
void instrument8BitCountersCount(void) {
for (size_t i = 0; i < ARRAYSIZE(hf8bitcounters) && hf8bitcounters[i].start; i++) {
@@ -491,15 +491,17 @@ void __sanitizer_cov_8bit_counters_init(char* start, char* end) {
/* Make sure that the feedback struct is already mmap()'d */
hfuzzInstrumentInit();
+ if ((uintptr_t)start == (uintptr_t)end) {
+ return;
+ }
+
for (size_t i = 0; i < ARRAYSIZE(hf8bitcounters); i++) {
if (hf8bitcounters[i].start == NULL) {
hf8bitcounters[i].start = (uint8_t*)start;
hf8bitcounters[i].cnt = (uintptr_t)end - (uintptr_t)start + 1;
hf8bitcounters[i].guard = instrumentReserveGuard(hf8bitcounters[i].cnt);
-
LOG_D("8-bit module initialization %p-%p (count:%zu) at guard %zu", start, end,
hf8bitcounters[i].cnt, hf8bitcounters[i].guard);
-
break;
}
}
|
link-store: don't send update if not new
We were taking care not to re-add something to our data store if we
already had it in there, but were still sending out an update
regardless.
With this, we only send out an update if we weren't previously aware
of the content. | :: add link to group submissions
::
=/ =links (~(gut by by-group) path *links)
- =. submissions.links
+ =^ added submissions.links
+ ?: ?=(^ (find ~[submission] submissions.links))
+ [| submissions.links]
+ :- &
(submissions:merge submissions.links ~[submission])
=. by-group (~(put by by-group) path links)
:: add submission to global sites
:: send updates to subscribers
::
:_ state
+ ?. added ~
:_ ~
:+ %give %fact
:+ :~ /submissions
::
=/ urls (~(gut by discussions) path *(map ^url discussion))
=/ =discussion (~(gut by urls) url *discussion)
- =. comments.discussion
+ =^ added comments.discussion
+ ?: ?=(^ (find ~[comment] comments.discussion))
+ [| comments.discussion]
+ :- &
(comments:merge comments.discussion ~[comment])
=. urls (~(put by urls) url discussion)
=. discussions (~(put by discussions) path urls)
:: send updates to subscribers
::
:_ state
+ ?. added ~
:_ ~
:+ %give %fact
:+ :~ /discussions
|
blockresolver: added exporting of the commit function to contract
Related to issue | @@ -232,6 +232,7 @@ int elektraBlockresolverGet (Plugin * handle ELEKTRA_UNUSED, KeySet * returned E
keyNew ("system/elektra/modules/blockresolver/exports/error", KEY_FUNC, elektraBlockresolverError, KEY_END),
keyNew ("system/elektra/modules/blockresolver/exports/get", KEY_FUNC, elektraBlockresolverGet, KEY_END),
keyNew ("system/elektra/modules/blockresolver/exports/set", KEY_FUNC, elektraBlockresolverSet, KEY_END),
+ keyNew ("system/elektra/modules/blockresolver/exports/commit", KEY_FUNC, elektraBlockresolverCommit, KEY_END),
keyNew ("system/elektra/modules/blockresolver/exports/checkfile", KEY_FUNC, elektraBlockresolverCheckFile, KEY_END),
#include ELEKTRA_README
keyNew ("system/elektra/modules/blockresolver/infos/version", KEY_VALUE, PLUGINVERSION, KEY_END), KS_END);
|
tools/mksymtab.sh: Fix issue to generate symtab_apps.c for MSYS
The file permission is used to get the execlist, but the file permission
cannot be changed for MSYS environment. As a result, symtab_apps.c cannot
be generated correctly. This commit removes the permission match. | @@ -42,9 +42,9 @@ prefix=$2
varlist=`find $dir -name *-thunk.S 2>/dev/null | xargs grep -h asciz | cut -f3 | sort | uniq`
if [ -z "$varlist" ]; then
- execlist=`find $dir -type f -perm -a=x 2>/dev/null`
+ execlist=`find $dir -type f 2>/dev/null`
if [ ! -z "$execlist" ]; then
- varlist=`nm $execlist | fgrep ' U ' | sed -e "s/^[ ]*//g" | cut -d' ' -f2 | sort | uniq`
+ varlist=`nm $execlist 2>/dev/null | fgrep ' U ' | sed -e "s/^[ ]*//g" | cut -d' ' -f2 | sort | uniq`
fi
fi
|
filter_grep: use kv interface to query properties | #include <sys/types.h>
#include <fluent-bit/flb_info.h>
+#include <fluent-bit/flb_kv.h>
#include <fluent-bit/flb_mem.h>
#include <fluent-bit/flb_str.h>
#include <fluent-bit/flb_filter.h>
@@ -53,12 +54,12 @@ static int set_rules(struct grep_ctx *ctx, struct flb_filter_instance *f_ins)
struct mk_list *head;
struct mk_list *split;
struct flb_split_entry *sentry;
- struct flb_config_prop *prop;
+ struct flb_kv *kv;
struct grep_rule *rule;
/* Iterate all filter properties */
mk_list_foreach(head, &f_ins->properties) {
- prop = mk_list_entry(head, struct flb_config_prop, _head);
+ kv = mk_list_entry(head, struct flb_kv, _head);
/* Create a new rule */
rule = flb_malloc(sizeof(struct grep_rule));
@@ -68,10 +69,10 @@ static int set_rules(struct grep_ctx *ctx, struct flb_filter_instance *f_ins)
}
/* Get the type */
- if (strcasecmp(prop->key, "regex") == 0) {
+ if (strcasecmp(kv->key, "regex") == 0) {
rule->type = GREP_REGEX;
}
- else if (strcasecmp(prop->key, "exclude") == 0) {
+ else if (strcasecmp(kv->key, "exclude") == 0) {
rule->type = GREP_EXCLUDE;
}
else {
@@ -81,7 +82,7 @@ static int set_rules(struct grep_ctx *ctx, struct flb_filter_instance *f_ins)
}
/* As a value we expect a pair of field name and a regular expression */
- split = flb_utils_split(prop->val, ' ', 1);
+ split = flb_utils_split(kv->val, ' ', 1);
if (mk_list_size(split) != 2) {
flb_error("[filter_grep] invalid regex, expected field and regular expression");
delete_rules(ctx);
|
lv_style.c change int16_t to lv_anim_value_t in animation exec cb | *********************/
#include "lv_obj.h"
#include "../lv_misc/lv_mem.h"
+#include "../lv_misc/lv_anim.h"
/*********************
* DEFINES
* STATIC PROTOTYPES
**********************/
#if LV_USE_ANIMATION
-static void style_animator(lv_style_anim_dsc_t * dsc, int16_t val);
+static void style_animator(lv_style_anim_dsc_t * dsc, lv_anim_value_t val);
static void style_animation_common_end_cb(lv_anim_t * a);
#endif
@@ -318,7 +319,7 @@ void lv_style_anim_set_styles(lv_anim_t * a, lv_style_t * to_anim, const lv_styl
* @param dsc the 'animated variable' set by lv_style_anim_create()
* @param val the current state of the animation between 0 and LV_ANIM_RESOLUTION
*/
-static void style_animator(lv_style_anim_dsc_t * dsc, int16_t val)
+static void style_animator(lv_style_anim_dsc_t * dsc, lv_anim_value_t val)
{
const lv_style_t * start = &dsc->style_start;
const lv_style_t * end = &dsc->style_end;
|
Remove unused lovrGraphicsSetShapeData; | @@ -364,19 +364,6 @@ void lovrGraphicsMatrixTransform(mat4 transform) {
// Primitives
-void lovrGraphicsSetShapeData(float* data, int dataCount, unsigned int* indices, int indicesCount) {
- vec_clear(&state.shapeIndices);
- vec_clear(&state.shapeData);
-
- if (data) {
- vec_pusharr(&state.shapeData, data, dataCount);
- }
-
- if (indices) {
- vec_pusharr(&state.shapeIndices, indices, indicesCount);
- }
-}
-
void lovrGraphicsDrawPrimitive(GLenum mode, Texture* texture, int hasNormals, int hasTexCoords, int useIndices) {
int stride = 3 + (hasNormals ? 3 : 0) + (hasTexCoords ? 2 : 0);
int strideBytes = stride * sizeof(float);
|
No longer select scene after creating new actor or trigger | @@ -71,11 +71,6 @@ class Scene extends Component {
}
};
- onMouseDown = e => {
- const { id, selectScene } = this.props;
- selectScene(id);
- };
-
onMouseLeave = e => {
const { sceneHover } = this.props;
sceneHover("");
@@ -142,7 +137,6 @@ class Scene extends Component {
<div
className="Scene__Image"
onMouseMove={this.onMouseMove}
- onMouseDown={this.onMouseDown}
onMouseLeave={this.onMouseLeave}
style={{
width: width * TILE_SIZE,
|
xerces: small fixes in README | - infos = Information about the template plugin is in keys below
-- infos/author = e1528532 <[email protected]>
+- infos/author = Armin Wurzinger <[email protected]>
- infos/licence = BSD
- infos/provides = storage/xml
- infos/needs =
This plugin is a storage plugin allowing Elektra to read and write XML
formatted files. It uses a general format which:
+
- Maps key names to XML elements
- Maps key values to textual content of XML elements
- Maps metakeys to XML attributes. Metakey name = attribute name, Metakey value
@@ -52,7 +53,9 @@ the mountpoint, then it uses the mountpoint's name instead.
## Dependencies
-- `Xerces-C++ 3.0.0` or newer
+- `Xerces-C++ 3.0.0` or newer (`apt-get install libxerces-c-dev`)
+- CMake 3.6 or a copy of `FindXercesC.cmake` in
+ `/usr/share/cmake-3.0/Modules/`
## Limitations
@@ -62,7 +65,7 @@ take care about proper escaping.
The main rules of an XML element name are:
- Element names must start with a letter or underscore
-- Element names cannot start with the letters xml (or XML, or Xml, etc)
+- Element names cannot start with the letters xml (or XML, or Xml, etc.)
- Element names can contain letters, digits, hyphens, underscores, and periods
- Element names cannot contain spaces
|
Disallow Insert and Delete triggers on SplitUpdate
Considering that the original command is UPDATE for a SplitUpdate, fire
INSERT and DELETE triggers may lead to the wrong action to be enforced.
And the triggers in GPDB may require cross segments data changes, disallow
the INSERT and DELETE triggers on a SplitUpdate. | @@ -397,11 +397,11 @@ ExecInsert(ModifyTableState *mtstate,
* violations before firing these triggers, because they can change the
* values to insert. Also, they can run arbitrary user-defined code with
* side-effects that we can't cancel by just not inserting the tuple.
- */
- /*
- * GPDB_12_MERGE_FIXME: PostgreSQL *does* fire INSERT and DELETE
- * triggers on an UPDATE that moves tuples from one partition to another.
- * Should we follow that example with cross-segment UPDATEs too?
+ *
+ * Considering that the original command is UPDATE for a SplitUpdate, fire
+ * insert triggers may lead to the wrong action to be enforced. And the
+ * triggers in GPDB may require cross segments data changes, disallow the
+ * INSERT triggers on a SplitUpdate.
*/
if (resultRelInfo->ri_TrigDesc &&
resultRelInfo->ri_TrigDesc->trig_insert_before_row &&
@@ -660,11 +660,8 @@ ExecInsert(ModifyTableState *mtstate,
/* AFTER ROW INSERT Triggers */
/*
- * GPDB: Don't fire DELETE triggers on a split UPDATE.
- *
- * GPDB_12_MERGE_FIXME: PostgreSQL *does* fire INSERT and DELETE
- * triggers on an UPDATE that moves tuples from one partition to another.
- * Should we follow that example with cross-segment UPDATEs too?
+ * GPDB: Disallow INSERT triggers on a split UPDATE. See comments in
+ * BEFORE ROW INSERT Triggers.
*/
if (!splitUpdate)
ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
@@ -794,9 +791,7 @@ ExecDelete(ModifyTableState *mtstate,
/* BEFORE ROW DELETE Triggers */
/*
- * GPDB_12_MERGE_FIXME: PostgreSQL *does* fire INSERT and DELETE
- * triggers on an UPDATE that moves tuples from one partition to another.
- * Should we follow that example with cross-segment UPDATEs too?
+ * Disallow DELETE triggers on a split UPDATE. See comments in ExecInsert().
*/
if (resultRelInfo->ri_TrigDesc &&
resultRelInfo->ri_TrigDesc->trig_delete_before_row &&
@@ -1090,9 +1085,7 @@ ldelete:;
/* AFTER ROW DELETE Triggers */
/*
- * GPDB_12_MERGE_FIXME: PostgreSQL *does* fire INSERT and DELETE
- * triggers on an UPDATE that moves tuples from one partition to another.
- * Should we follow that example with cross-segment UPDATEs too?
+ * Disallow DELETE triggers on a split UPDATE. See comments in ExecInsert().
*/
if (!RelationIsAppendOptimized(resultRelationDesc) && !splitUpdate)
{
|
Comments: fix deletion
Fixes urbit/landscape#902 | @@ -3,8 +3,7 @@ import { Group } from '@urbit/api';
import { GraphNode } from '@urbit/api/graph';
import bigInt from 'big-integer';
import React, { useCallback, useEffect, useRef } from 'react';
-import { Link, useHistory } from 'react-router-dom';
-import styled from 'styled-components';
+import { Link } from 'react-router-dom';
import GlobalApi from '~/logic/api/global';
import { roleForShip } from '~/logic/lib/group';
import { getPermalinkForGraph } from '~/logic/lib/permalinks';
@@ -14,11 +13,6 @@ import useMetadataState from '~/logic/state/metadata';
import Author from '~/views/components/Author';
import { GraphContent } from '../landscape/components/Graph/GraphContent';
-const ClickBox = styled(Box)`
- cursor: pointer;
- padding-left: ${p => p.theme.space[2]}px;
-`;
-
interface CommentItemProps {
pending?: boolean;
comment: GraphNode;
@@ -60,7 +54,8 @@ export function CommentItem(props: CommentItemProps) {
};
const ourMention = post?.contents?.some((e) => {
- if (!('mention' in e)) return false;
+ if (!('mention' in e))
+return false;
return e?.mention && e?.mention === window.ship;
});
@@ -98,7 +93,6 @@ export function CommentItem(props: CommentItemProps) {
ref.current.scrollIntoView({ block: 'center' });
}
}, [ref, props.highlighted]);
- const history = useHistory();
const { copyDisplay, doCopy } = useCopy(
getPermalinkForGraph(
@@ -109,7 +103,7 @@ export function CommentItem(props: CommentItemProps) {
'Copy Link'
);
- if (!post || typeof post === 'string') {
+ if (!post || typeof post === 'string' || typeof comment.post === 'string') {
return (
<Box width="100%" textAlign="left" py="3">
<Text gray>This comment has been deleted.</Text>
|
pg_upgrade: fix GPDB-specific long options
The --progress, --add-checksum, and --remove-checksum options weren't
being recognized because their option.vals were set to the characters
'2', '3', and '4' (integer values 50, 51, and 52, respectively), while
the option handling was comparing to the literal values 2, 3, and 4.
Switch back to integers. | @@ -57,9 +57,9 @@ parseCommandLine(int argc, char *argv[])
/* Greenplum specific parameters */
{"mode", required_argument, NULL, 1},
- {"progress", no_argument, NULL, '2'},
- {"add-checksum", no_argument, NULL, '3'},
- {"remove-checksum", no_argument, NULL, '4'},
+ {"progress", no_argument, NULL, 2},
+ {"add-checksum", no_argument, NULL, 3},
+ {"remove-checksum", no_argument, NULL, 4},
{NULL, 0, NULL, 0}
};
|
Fix PhDeleteDirectory access | @@ -6333,7 +6333,7 @@ static BOOLEAN PhpDeleteDirectoryCallback(
if (NT_SUCCESS(PhCreateFileWin32(
&fileHandle,
PhGetString(fullName),
- FILE_GENERIC_WRITE,
+ FILE_GENERIC_READ | FILE_WRITE_ATTRIBUTES,
FILE_ATTRIBUTE_NORMAL,
FILE_SHARE_WRITE,
FILE_OPEN,
|
c_frame_callback: whitespace changes to sbp.h | @@ -77,7 +77,8 @@ typedef void (*sbp_msg_callback_t)(u16 sender_id, u8 len, u8 msg[], void *contex
/** SBP callback node.
* Forms a linked list of callbacks.
- * \note Must be statically allocated for use with sbp_register_callback().
+ * \note Must be statically allocated for use with sbp_register_callback()
+ * and sbp_register_frame_callback().
*/
typedef struct sbp_msg_callbacks_node {
u16 msg_type; /**< Message ID associated with callback. */
|
nrf: Make LTO configurable via Makefile flag.
LTO messes up debuggability and may cause some other issues.
Additionally, it does not always result in reduced code size. | @@ -69,6 +69,13 @@ CFLAGS_MCU_m4 = $(CFLAGS_CORTEX_M) -mtune=cortex-m4 -mcpu=cortex-m4 -mfpu=fpv4-s
CFLAGS_MCU_m0 = $(CFLAGS_CORTEX_M) --short-enums -mtune=cortex-m0 -mcpu=cortex-m0 -mfloat-abi=soft -fno-builtin
+LTO ?= 1
+ifeq ($(LTO),1)
+CFLAGS_LTO += -flto
+else
+CFLAGS_LTO += -Wl,--gc-sections -ffunction-sections -fdata-sections
+endif
+
CFLAGS += $(CFLAGS_MCU_$(MCU_SERIES))
CFLAGS += $(INC) -Wall -Werror -ansi -std=gnu99 -nostdlib $(COPT) $(NRF_DEFINES) $(CFLAGS_MOD)
@@ -76,7 +83,7 @@ CFLAGS += -fno-strict-aliasing
CFLAGS += -fstack-usage
CFLAGS += -Iboards/$(BOARD)
CFLAGS += -DNRF5_HAL_H='<$(MCU_VARIANT)_hal.h>'
-CFLAGS += -flto
+CFLAGS += $(CFLAGS_LTO)
LDFLAGS = $(CFLAGS)
LDFLAGS += -Xlinker -Map=$(@:.elf=.map)
|
kernel/os; document the purpose of OS_COREDUMP. | @@ -31,7 +31,7 @@ syscfg.defs:
restrictions:
- SHELL_TASK
OS_COREDUMP:
- description: 'TBD'
+ description: 'Attempt to dump corefile when system crashes'
value: 0
OS_SCHEDULING:
description: 'Whether OS will be started or not'
|
Add tmpfs support to "hse storage profile" | #include <sys/statvfs.h>
#include <sys/time.h>
#include <sys/uio.h>
+#include <sys/vfs.h>
+#if __linux__
+#include <linux/magic.h>
+#endif
/* We use 128KiB writes in all cases */
#define PROF_BLOCK_SIZE (128u * 1024)
@@ -53,6 +57,7 @@ struct storage_profile_work {
uint64_t file_sz;
uint64_t block_sz;
int rc;
+ bool tmpfs;
uint64_t *samples;
};
@@ -148,7 +153,9 @@ profile_worker(void *rock)
snprintf(fname, sizeof(fname), "%s-%d-%d", "profile-file", work->thrcnt, work->index);
- flags = O_CREAT | O_EXCL | O_DIRECT | O_SYNC | O_RDWR;
+ flags = O_CREAT | O_EXCL | O_SYNC | O_RDWR;
+ flags |= (work->tmpfs ? 0 : O_DIRECT);
+
fd = openat(dirfd, fname, flags, S_IRUSR | S_IWUSR);
if (fd < 0) {
work->rc = errno;
@@ -215,8 +222,10 @@ perform_profile_run(
struct storage_profile_work *work_specs;
struct storage_prof_stat stats;
struct itimerval timer = {0};
+ struct statfs sbuf;
const uint32_t samples_per_thread = (file_sz / block_sz);
int i, j, rc = 0;
+ bool tmpfs;
uint64_t sum, tot_samples;
double tmp, var_sum;
double mean;
@@ -231,6 +240,13 @@ perform_profile_run(
if (!work_specs)
return ENOMEM;
+ rc = fstatfs(dirfd, &sbuf);
+ if (rc == -1) {
+ rc = errno;
+ goto err_exit;
+ }
+ tmpfs = (sbuf.f_type == TMPFS_MAGIC);
+
/* prepare the per-thread data */
for (i = 0; i < thread_cnt; ++i) {
work_specs[i].dirfd = dirfd;
@@ -238,6 +254,7 @@ perform_profile_run(
work_specs[i].thrcnt = thread_cnt;
work_specs[i].file_sz = file_sz;
work_specs[i].block_sz = block_sz;
+ work_specs[i].tmpfs = tmpfs;
work_specs[i].rc = 0;
work_specs[i].samples = malloc(samples_per_thread * sizeof(double));
|
cleanup candidate list on error | @@ -3472,11 +3472,15 @@ open_resolve_cb(struct neat_resolver_results *results, uint8_t code,
}
struct neat_he_candidate *candidate = calloc(1, sizeof(*candidate));
- if (!candidate)
+ if (!candidate) {
+ nt_free_candidates(ctx, candidates);
return NEAT_ERROR_OUT_OF_MEMORY;
+ }
+
candidate->pollable_socket = calloc(1, sizeof(struct neat_pollable_socket));
if (!candidate->pollable_socket) {
free(candidate);
+ nt_free_candidates(ctx, candidates);
return NEAT_ERROR_OUT_OF_MEMORY;
}
@@ -3491,6 +3495,7 @@ open_resolve_cb(struct neat_resolver_results *results, uint8_t code,
if (!candidate->if_name) {
free(candidate->pollable_socket);
free(candidate);
+ nt_free_candidates(ctx, candidates);
return NEAT_ERROR_OUT_OF_MEMORY;
}
candidate->if_idx = result->if_idx;
@@ -3502,6 +3507,7 @@ open_resolve_cb(struct neat_resolver_results *results, uint8_t code,
free(candidate->if_name);
free(candidate->pollable_socket);
free(candidate);
+ nt_free_candidates(ctx, candidates);
return NEAT_ERROR_OUT_OF_MEMORY;
}
candidate->pollable_socket->dst_address = strdup(dst_buffer);
@@ -3510,6 +3516,7 @@ open_resolve_cb(struct neat_resolver_results *results, uint8_t code,
free(candidate->if_name);
free(candidate->pollable_socket);
free(candidate);
+ nt_free_candidates(ctx, candidates);
return NEAT_ERROR_OUT_OF_MEMORY;
}
candidate->pollable_socket->port = flow->port;
@@ -3566,6 +3573,7 @@ open_resolve_cb(struct neat_resolver_results *results, uint8_t code,
candidate->pollable_socket->src_address = strdup(src_buffer);
if (!candidate->pollable_socket->src_address) {
free(candidate);
+ nt_free_candidates(ctx, candidates);
return NEAT_ERROR_OUT_OF_MEMORY;
}
@@ -3579,6 +3587,7 @@ open_resolve_cb(struct neat_resolver_results *results, uint8_t code,
free(candidate->if_name);
free(candidate->pollable_socket);
free(candidate);
+ nt_free_candidates(ctx, candidates);
return NEAT_ERROR_OUT_OF_MEMORY;
}
candidate->pollable_socket->dst_len = result->dst_addr_len;
|
unix/Makefile: Update libffi include/lib paths for MICROPY_STANDALONE=1.
With these settings, the lib is now linked properly for different arch
variants, e.g. x86_64 -m64 vs -m32 vs -mx32. | @@ -152,12 +152,8 @@ endif
ifeq ($(MICROPY_PY_FFI),1)
ifeq ($(MICROPY_STANDALONE),1)
-LIBFFI_CFLAGS_MOD := -I$(shell ls -1d $(BUILD)/lib/libffi/out/lib/libffi-*/include)
- ifeq ($(MICROPY_FORCE_32BIT),1)
- LIBFFI_LDFLAGS_MOD = $(BUILD)/lib/libffi/out/lib32/libffi.a
- else
- LIBFFI_LDFLAGS_MOD = $(BUILD)/lib/libffi/out/lib/libffi.a
- endif
+LIBFFI_CFLAGS_MOD := -I$(shell ls -1d $(BUILD)/lib/libffi/out/include)
+LIBFFI_LDFLAGS_MOD = $(BUILD)/lib/libffi/.libs/libffi.a
else
LIBFFI_CFLAGS_MOD := $(shell pkg-config --cflags libffi)
LIBFFI_LDFLAGS_MOD := $(shell pkg-config --libs libffi)
|
jvpp: ignore messages if callback method is missing | @@ -226,11 +226,19 @@ static void vl_api_${handler_name}_t_handler (vl_api_${handler_name}_t * mp)
{
${plugin_name}_main_t *plugin_main = &${plugin_name}_main;
JNIEnv *env = jvpp_main.jenv;
+ jthrowable exc;
$err_handler
jmethodID constructor = (*env)->GetMethodID(env, ${class_ref_name}Class, "<init>", "()V");
jmethodID callbackMethod = (*env)->GetMethodID(env, plugin_main->callbackClass, "on${dto_name}", "(Lio/fd/vpp/jvpp/${plugin_name}/dto/${dto_name};)V");
+ exc = (*env)->ExceptionOccurred(env);
+ if (exc) {
+ clib_warning("Unable to extract on${dto_name} method reference from ${plugin_name} plugin's callbackClass. Ignoring message.\\n");
+ (*env)->ExceptionDescribe(env);
+ (*env)->ExceptionClear(env);
+ return;
+ }
jobject dto = (*env)->NewObject(env, ${class_ref_name}Class, constructor);
$dto_setters
|
Add CMake options to disable various dependencies.
Actually doing so will cause compile errors until the sources are updated. | @@ -28,12 +28,14 @@ else()
message(FATAL_ERROR "Unknown option for LIBTCOD_SDL2: '${LIBTCOD_SDL2}'")
endif()
-set_property(CACHE LIBTCOD_ZLIB PROPERTY STRINGS "find_package" "conan")
+set_property(CACHE LIBTCOD_ZLIB PROPERTY STRINGS "find_package" "conan" "disable")
if(LIBTCOD_ZLIB STREQUAL "find_package")
find_package(ZLIB REQUIRED)
target_link_libraries(${PROJECT_NAME} PRIVATE ZLIB::ZLIB)
elseif(LIBTCOD_ZLIB STREQUAL "conan")
target_link_libraries(${PROJECT_NAME} PRIVATE CONAN_PKG::zlib)
+elseif(LIBTCOD_ZLIB STREQUAL "disable")
+ target_compile_definitions(${PROJECT_NAME} PUBLIC TCOD_NO_ZLIB)
else()
message(FATAL_ERROR "Unknown option for LIBTCOD_ZLIB: '${LIBTCOD_ZLIB}'")
endif()
@@ -54,7 +56,7 @@ if(NOT LIBTCOD_SDL2 STREQUAL "disable") # Ignore GLAD if SDL is disabled.
endif()
set(LIBTCOD_LODEPNG "find_package" CACHE STRING "How this library will be linked.")
-set_property(CACHE LIBTCOD_LODEPNG PROPERTY STRINGS "vendored" "find_package")
+set_property(CACHE LIBTCOD_LODEPNG PROPERTY STRINGS "vendored" "find_package" "disable")
if(LIBTCOD_LODEPNG STREQUAL "find_package")
find_package(lodepng-c CONFIG REQUIRED)
target_link_libraries(${PROJECT_NAME} PRIVATE lodepng-c)
@@ -62,12 +64,14 @@ elseif(LIBTCOD_LODEPNG STREQUAL "vendored")
message("Will be vendored: LodePNG")
target_sources(${PROJECT_NAME} PRIVATE "vendor/lodepng.c")
target_include_directories(${PROJECT_NAME} PRIVATE "vendor/")
+elseif(LIBTCOD_LODEPNG STREQUAL "disable")
+ target_compile_definitions(${PROJECT_NAME} PUBLIC TCOD_NO_PNG)
else()
message(FATAL_ERROR "Unknown option for LIBTCOD_LODEPNG: '${LIBTCOD_LODEPNG}'")
endif()
set(LIBTCOD_UTF8PROC "vcpkg" CACHE STRING "How this library will be linked.")
-set_property(CACHE LIBTCOD_UTF8PROC PROPERTY STRINGS "vendored" "vcpkg")
+set_property(CACHE LIBTCOD_UTF8PROC PROPERTY STRINGS "vendored" "vcpkg" "disable")
if(LIBTCOD_UTF8PROC STREQUAL "vendored")
message("Will be vendored: utf8proc")
target_include_directories(${PROJECT_NAME} PRIVATE "vendor/utf8proc")
@@ -86,6 +90,8 @@ elseif (LIBTCOD_UTF8PROC STREQUAL "vcpkg")
endif()
target_include_directories(${PROJECT_NAME} PRIVATE ${UTF8PROC_INCLUDE_DIRS})
target_link_libraries(${PROJECT_NAME} PUBLIC ${UTF8PROC_LIBRARY})
+elseif(LIBTCOD_UTF8PROC STREQUAL "disable")
+ target_compile_definitions(${PROJECT_NAME} PUBLIC TCOD_NO_UTF8)
else()
message(FATAL_ERROR "Unknown option for LIBTCOD_UTF8PROC: '${LIBTCOD_UTF8PROC}'")
endif()
|
Updated docs on parallel scale | @@ -173,7 +173,16 @@ Near clipping and Far clipping
are so you will have to experiment.
Parallel scale
- Zoom factor: larger values zoom the camera towards the focus.
+ Acts like a zoom factor that zooms the camera towards the focus. For a
+ parallel projection, it is half
+ the height of an object in the window. For example, if you had a sphere of
+ radius 10, setting the parallel scale to 10, would result in the top and
+ bottom of the sphere touching the top and bottom of the image. Where the
+ sphere touches on the left and right edges depends on the aspect ratio of
+ the image. If it was 1:1, then the sphere would also touch the left and
+ right edges of the image. When doing a perspective projection, it attempts
+ to have the top and bottom of the sphere touch the top and bottom of the
+ image.
Perspective
Applies to 3D visualizations and it causes a more realistic view to be used
|
call global postgetstorage plugins (like spec) before non-global ones (like type)
Fixes | @@ -1394,6 +1394,19 @@ cachemiss:
ksClear (ks);
splitMergeBackends (split, ks);
+ if (elektraGlobalGet (handle, ks, parentKey, POSTGETSTORAGE, INIT) == ELEKTRA_PLUGIN_STATUS_ERROR)
+ {
+ goto error;
+ }
+ if (elektraGlobalGet (handle, ks, parentKey, POSTGETSTORAGE, MAXONCE) == ELEKTRA_PLUGIN_STATUS_ERROR)
+ {
+ goto error;
+ }
+ if (elektraGlobalGet (handle, ks, parentKey, POSTGETSTORAGE, DEINIT) == ELEKTRA_PLUGIN_STATUS_ERROR)
+ {
+ goto error;
+ }
+
clearError (parentKey);
if (elektraGetDoUpdateWithGlobalHooks (handle, split, ks, parentKey, initialParent, LAST) == -1)
{
@@ -1429,7 +1442,6 @@ cachemiss:
ksClear (ks);
splitMergeBackends (split, ks);
- }
keySetName (parentKey, keyName (initialParent));
@@ -1445,6 +1457,9 @@ cachemiss:
{
goto error;
}
+ }
+
+ keySetName (parentKey, keyName (initialParent));
if (handle->globalPlugins[POSTGETCACHE][MAXONCE])
{
|
Allocate LMS C_RANDOM_VALUE as hash size | #define J_HASH_IDX_LEN (1)
#define D_CONST_LEN (2)
-/* Currently only defined for SHA256, 32 is the max hash output size */
-#define C_RANDOM_VALUE_LEN_MAX (MBEDTLS_LMOTS_N_HASH_LEN_MAX)
-
#define DIGIT_MAX_VALUE ((1u << W_WINTERNITZ_PARAMETER) - 1u)
#define D_CONST_LEN (2)
@@ -731,7 +728,7 @@ int mbedtls_lmots_sign( mbedtls_lmots_private_t *ctx,
* key.
*/
unsigned char tmp_sig[MBEDTLS_LMOTS_P_SIG_DIGIT_COUNT_MAX][MBEDTLS_LMOTS_N_HASH_LEN_MAX];
- unsigned char tmp_c_random[C_RANDOM_VALUE_LEN_MAX];
+ unsigned char tmp_c_random[MBEDTLS_LMOTS_N_HASH_LEN_MAX];
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
if( msg == NULL && msg_size != 0 )
|
contributing.md: added more pre-req detail to the beginning of the edit
Added information pertaining to nix and git-lfs installation required to work with urbit's interfaces. | @@ -251,9 +251,13 @@ $ git lfs pull
## Interface development
-Designing interfaces within urbit/urbit requires that the above steps for
-fake `~zod` initialization are followed. Once your fake ship is running and you
-see
+[nix](https://github.com/NixOS/nix) and `git-lfs` should be installed at
+this point, and have been used to `make build` the project.
+
+Designing interfaces within urbit/urbit additionally requires that the
+above steps for fake `~zod` initialization have been followed.
+
+Once your fake ship is running and you see
```
~zod:dojo>
```
|
wifi_prov_mgr : Updated the set of WIFI_REASON_ codes used for setting WIFI_PROV_STA_AUTH_ERROR | @@ -852,9 +852,8 @@ static void wifi_prov_mgr_event_handler_internal(
switch (disconnected->reason) {
case WIFI_REASON_AUTH_EXPIRE:
case WIFI_REASON_4WAY_HANDSHAKE_TIMEOUT:
- case WIFI_REASON_BEACON_TIMEOUT:
case WIFI_REASON_AUTH_FAIL:
- case WIFI_REASON_ASSOC_FAIL:
+ case WIFI_REASON_ASSOC_EXPIRE:
case WIFI_REASON_HANDSHAKE_TIMEOUT:
ESP_LOGE(TAG, "STA Auth Error");
prov_ctx->wifi_disconnect_reason = WIFI_PROV_STA_AUTH_ERROR;
|
Donot install package if segment list is empty.
This commit fixed the regresstion introduced by commit
WorkerPool: Error out if numWorkers is 0 or less.
Details in | @@ -862,9 +862,10 @@ class InstallPackage(Operation):
# distribute package to segments
srcFile = self.gppkg.abspath
dstFile = os.path.join(GPHOME, self.gppkg.pkg)
- GpScp(srcFile, dstFile, self.segment_host_list).run()
# install package on segments
+ if self.segment_host_list:
+ GpScp(srcFile, dstFile, self.segment_host_list).run()
HostOperation(InstallPackageLocally(dstFile), self.segment_host_list).run()
# install package on standby
|
fix build failure on appveyor
modify appveyor.yml to fit new appveyor build environment
use windows-style command | @@ -5,16 +5,16 @@ platform:
- Win64
before_build:
- - cmd: curl -fsSL https://github.com/tboox/xmake/releases/download/v2.0.5/xmake-v2.0.5.exe -o xmake-installer.exe
- - cmd: mkdir program
- - cmd: xmake-installer.exe /S /D=program
+ - ps: Invoke-WebRequest "https://github.com/tboox/xmake/releases/download/v2.1.2/xmake-v2.1.2.exe" -OutFile "xmake-installer.exe"
+ - cmd: xmake-installer.exe /S /D=C:\xmake
+ - cmd: PATH=%PATH%;C:\xmake
- cmd: xmake --version
- - cmd: cp -r xmake/* program/
+ - ps: cp -r -force xmake\* C:\xmake
- cmd: xmake --version
- - cmd: ls program
+ - cmd: dir C:\xmake
build_script:
- - cmd: cd ./tests/console_c
+ - cmd: cd tests\console_c
- cmd: if %platform%==Win32 xmake f -m debug
- cmd: if %platform%==Win64 xmake f -m debug -a x64
- cmd: xmake
|
fix(random):
BoatRandom() return BOAT_SUCCESS | @@ -76,7 +76,8 @@ BOAT_RESULT BoatRandom(BUINT8 *output, BUINT32 outputLen, void *rsvd)
(void)rsvd;
- return random_buffer(output, outputLen);
+ random_buffer(output, outputLen);
+ return BOAT_SUCCESS;
}
|
Fixed sending of bytes over PandaSerial | @@ -19,6 +19,9 @@ class PandaSerial(object):
def write(self, dat):
#print "W: ", dat.encode("hex")
#print ' pigeon_send("' + ''.join(map(lambda x: "\\x%02X" % ord(x), dat)) + '");'
+ if(isinstance(dat, bytes)):
+ return self.panda.serial_write(self.port, dat)
+ else:
return self.panda.serial_write(self.port, str.encode(dat))
def close(self):
|
Makefile.include: fix 'usage' | @@ -43,7 +43,7 @@ ifdef CI
endif
usage:
- @echo "make MAKETARGETS... [TARGET=(TARGET)] [savetarget] [targets]"
+ @echo "make MAKETARGETS... [TARGET=(TARGET)] [BOARD=(BOARD)] [savetarget] [targets]"
targets:
@ls -1 $(CONTIKI)/arch/platform $(TARGETDIRS)
|
mqueue: fix compilation error when CONFIG_DISABLE_MQUEUE=y
This patch fixes compilation error when CONFIG_DISABLE_MQUEUE is
enabled.
In file included from group/group_leave.c:71:0:
./mqueue/mqueue.h:73:5: error: "CONFIG_MQ_MAXMSGSIZE" is not defined [-Werror=undef]
#if CONFIG_MQ_MAXMSGSIZE > 0
^
cc1: all warnings being treated as errors | #include <tinyara/mqueue.h>
-#if CONFIG_MQ_MAXMSGSIZE > 0
+#if !defined(CONFIG_DISABLE_MQUEUE) && CONFIG_MQ_MAXMSGSIZE > 0
/****************************************************************************
* Pre-processor Definitions
|
workflow/sdcard: use workflow/blocking | // limitations under the License.
#include "sdcard.h"
+
+#include "blocking.h"
+#include "workflow.h"
+
#include "generated/hww.pb.h"
-#include "workflow/workflow.h"
#include <ui/components/ui_components.h>
-#include <ui/screen_process.h>
#include <ui/screen_stack.h>
-static bool _done = false;
-
-static bool _is_done(void)
+void sdcard_handle(const InsertRemoveSDCardRequest* insert_remove_sdcard)
{
- return _done;
-}
+ bool inserted = workflow_get_interface_functions()->sd_card_inserted();
-static void _continue(void)
-{
- _done = true;
+ // No action required, already inserted (INSERT request) or not inserted (REMOVE request)
+ if ((insert_remove_sdcard->action == InsertRemoveSDCardRequest_SDCardAction_INSERT_CARD &&
+ inserted) ||
+ (insert_remove_sdcard->action == InsertRemoveSDCardRequest_SDCardAction_REMOVE_CARD &&
+ !inserted)) {
+ return;
}
-void sdcard_handle(const InsertRemoveSDCardRequest* insert_remove_sdcard)
-{
- _done = false;
- bool pushed = false;
- if (insert_remove_sdcard->action == InsertRemoveSDCardRequest_SDCardAction_INSERT_CARD &&
- !workflow_get_interface_functions()->sd_card_inserted()) {
- component_t* screen = insert_sd_card_create(_continue);
- pushed = true;
- ui_screen_stack_push(screen);
- } else if (
- insert_remove_sdcard->action == InsertRemoveSDCardRequest_SDCardAction_INSERT_CARD &&
- workflow_get_interface_functions()->sd_card_inserted()) {
- _continue();
- } else if (
- insert_remove_sdcard->action == InsertRemoveSDCardRequest_SDCardAction_REMOVE_CARD &&
- workflow_get_interface_functions()->sd_card_inserted()) {
- component_t* screen = remove_sd_card_create(_continue);
- pushed = true;
- ui_screen_stack_push(screen);
+ component_t* screen;
+ if (insert_remove_sdcard->action == InsertRemoveSDCardRequest_SDCardAction_INSERT_CARD) {
+ screen = insert_sd_card_create(workflow_blocking_unblock);
+ } else if (insert_remove_sdcard->action == InsertRemoveSDCardRequest_SDCardAction_REMOVE_CARD) {
+ screen = remove_sd_card_create(workflow_blocking_unblock);
} else {
- _continue();
+ return;
}
- ui_screen_process(_is_done);
- if (pushed) {
+
+ ui_screen_stack_push(screen);
+ bool blocking_result = workflow_blocking_block();
ui_screen_stack_pop();
+ if (!blocking_result) {
+ // No meaningful error handling here.
}
}
|
Utilities: Fix session retrieval in macrecovery when verifying MLB | @@ -289,7 +289,7 @@ def action_verify(args):
"""
Try to verify MLB serial number.
"""
- session = get_session()
+ session = get_session(args)
generic_latest = get_image_info(session, bid=RECENT_MAC, mlb=MLB_ZERO,
diag=False, os_type='latest')
uvalid_default = get_image_info(session, bid=args.board_id, mlb=args.mlb,
|
ports/stm32: Call mp_deinit() before soft reboot. | @@ -624,12 +624,6 @@ soft_reset:
usbdbg_wait_for_command(1000);
}
- #if MICROPY_PY_LWIP
- // Must call GC sweep here to close open sockets.
- gc_sweep_all();
- systick_disable_dispatch(SYSTICK_DISPATCH_LWIP);
- #endif
-
// soft reset
storage_flush();
@@ -639,6 +633,9 @@ soft_reset:
#if MICROPY_PY_BLUETOOTH
mp_bluetooth_deinit();
#endif
+ #if MICROPY_PY_LWIP
+ systick_disable_dispatch(SYSTICK_DISPATCH_LWIP);
+ #endif
mod_network_deinit();
timer_deinit();
i2c_deinit_all();
@@ -654,6 +651,10 @@ soft_reset:
py_audio_deinit();
#endif
imlib_deinit_all();
+
+ gc_sweep_all();
+ mp_deinit();
+
first_soft_reset = false;
goto soft_reset;
}
|
bugid:19795128:[alcs]fix prefix/secret set and load | @@ -333,12 +333,15 @@ static int add_svr_key(CoAPContext *ctx, const char *keyprefix, const char *secr
return COAP_ERROR_MALLOC;
}
+ memset(item, 0, sizeof(svr_key_item));
item->keyInfo.secret = (char *) coap_malloc(strlen(secret) + 1);
if (!item->keyInfo.secret) {
HAL_MutexUnlock(lst->list_mutex);
coap_free(item);
return COAP_ERROR_MALLOC;
}
+
+ memset(item->keyInfo.secret, 0, strlen(secret) + 1);
strcpy(item->keyInfo.secret, secret);
strcpy(item->keyInfo.keyprefix, keyprefix);
item->keyInfo.priority = priority;
|
FIX: handled memory allocation failure for connection object. | @@ -607,6 +607,12 @@ void dispatch_conn_new(int sfd, STATE_FUNC init_state, int event_flags,
int read_buffer_size, enum network_transport transport)
{
CQ_ITEM *item = cqi_new();
+ if (item == NULL) {
+ close(sfd);
+ mc_logger->log(EXTENSION_LOG_WARNING, NULL,
+ "Failed to allocate memory for connection object.\n");
+ return;
+ }
int tid = (last_thread + 1) % settings.num_threads;
LIBEVENT_THREAD *thread = threads + tid;
|
apps/btshell: Fix not reporting scan start error
Extended parameter cannot be invalid as this is verified by parsing
function so just print error in common exit path. | @@ -1310,11 +1310,14 @@ cmd_scan(int argc, char **argv)
&g_scan_opts);
break;
default:
- rc = -1;
- console_printf("invalid 'extended' parameter\n");
+ assert(0);
break;
}
+ if (rc != 0) {
+ console_printf("error scanning; rc=%d\n", rc);
+ }
+
return rc;
}
|
Add missing Endl | @@ -667,7 +667,7 @@ namespace NCatboostCuda {
} else if (ShouldConsiderWeightsByDefault(metrics.back())) {
metrics.back()->GetUseWeights() = true;
CATBOOST_INFO_LOG << "Note: eval_metric is using sample weights by default. " <<
- "Set MetricName:use_weights=False to calculate unweighted metric.";
+ "Set MetricName:use_weights=False to calculate unweighted metric." << Endl;
}
}
usedDescriptions.insert(metrics.back()->GetCpuMetric().GetDescription());
|
Emitter: Closure transform doesn't need emit->class_block_depth.
It's always going to be 3 for reasons documented. | @@ -1157,8 +1157,15 @@ static void perform_closure_transform(lily_emit_state *emit,
/* The backing closure is always a class method, never the class
constructor itself. Use +1 for the right depth. This search should
never fail. */
- uint16_t self_spot = find_closed_sym_spot(emit,
- emit->class_block_depth + 1, (lily_sym *)prev_block->self);
+
+ /* Why is the depth 3? The backing closure is always a class method.
+ The depth will always be 3 because classes can't be declared inside
+ of another class.
+ __main__ or an import call has depth 1.
+ The class constructor has depth 2.
+ The method has depth 3. */
+ uint16_t self_spot = find_closed_sym_spot(emit, 3,
+ (lily_sym *)prev_block->self);
lily_u16_write_4(emit->closure_aux_code, o_closure_get,
self_spot, block_self->reg_spot, first_line);
|
filter_modify: remove free of unitialized value (CID 183634) | @@ -269,7 +269,6 @@ static int setup(struct filter_modify_ctx *ctx,
flb_error
("[filter_modify] Unable to allocate memory for rule");
teardown(ctx);
- flb_free(condition);
flb_utils_split_free(split);
return -1;
}
|
release i3-gaps 4.21 | @@ -63,7 +63,7 @@ config_h = declare_dependency(
sources: vcs_tag(
input: config_h_in,
output: 'config.h',
- fallback: meson.project_version() + '-non-git',
+ fallback: meson.project_version() + ' (2022-09-21)',
)
)
|
Ragger tests now take CLI arguments for device model and elfs location | @@ -4,51 +4,62 @@ from ragger import Firmware
from ragger.backend import SpeculosBackend, LedgerCommBackend, LedgerWalletBackend, BackendInterface
from ethereum_client.client import EthereumClient
-ELFS_DIR = (Path(__file__).parent.parent / "elfs").resolve()
FWS = [
Firmware("nanos", "2.1"),
Firmware("nanox", "2.0.2"),
Firmware("nanosp", "1.0.3")
]
-# adding a pytest CLI option "--backend"
def pytest_addoption(parser):
- print(help(parser.addoption))
parser.addoption("--backend", action="store", default="speculos")
+ parser.addoption("--path", action="store", default="./elfs")
+ parser.addoption("--model", action="store", required=True)
# accessing the value of the "--backend" option as a fixture
[email protected](scope="session")
-def backend_name(pytestconfig) -> str:
[email protected]
+def arg_backend(pytestconfig) -> str:
return pytestconfig.getoption("backend")
[email protected]
+def arg_path(pytestconfig) -> str:
+ return pytestconfig.getoption("path")
+
[email protected]
+def arg_model(pytestconfig) -> str:
+ return pytestconfig.getoption("model")
+
# Providing the firmware as a fixture
[email protected](params=FWS)
-def firmware(request) -> Firmware:
- return request.param
-
-def get_elf_path(firmware: Firmware) -> Path:
- assert ELFS_DIR.is_dir(), f"{ELFS_DIR} is not a directory"
- app = ELFS_DIR / ("app-%s.elf" % firmware.device)
- assert app.is_file(), f"{app} must exist"
[email protected]
+def firmware(arg_model: str) -> Firmware:
+ for fw in FWS:
+ if fw.device == arg_model:
+ return fw
+ raise ValueError("Unknown device model \"%s\"" % (arg_model))
+
+def get_elf_path(arg_path: str, firmware: Firmware) -> Path:
+ elf_dir = Path(arg_path).resolve()
+ assert elf_dir.is_dir(), ("%s is not a directory" % (arg_path))
+ app = elf_dir / ("app-%s.elf" % firmware.device)
+ assert app.is_file(), ("Firmware %s does not exist !" % (app))
return app
# Depending on the "--backend" option value, a different backend is
# instantiated, and the tests will either run on Speculos or on a physical
# device depending on the backend
-def create_backend(backend: str, firmware: Firmware) -> BackendInterface:
+def create_backend(backend: str, arg_path: str, firmware: Firmware) -> BackendInterface:
if backend.lower() == "ledgercomm":
return LedgerCommBackend(firmware, interface="hid")
elif backend.lower() == "ledgerwallet":
return LedgerWalletBackend(firmware)
elif backend.lower() == "speculos":
- return SpeculosBackend(get_elf_path(firmware), firmware)
+ return SpeculosBackend(get_elf_path(arg_path, firmware), firmware)
else:
raise ValueError(f"Backend '{backend}' is unknown. Valid backends are: {BACKENDS}")
# This fixture will create and return the backend client
@pytest.fixture
-def backend_client(backend_name: str, firmware: Firmware) -> BackendInterface:
- with create_backend(backend_name, firmware) as b:
+def backend_client(arg_backend: str, arg_path: str, firmware: Firmware) -> BackendInterface:
+ with create_backend(arg_backend, arg_path, firmware) as b:
yield b
# This final fixture will return the properly configured app client, to be used in tests
|
pwm_nrf52: fix crash on close not checking playing status | @@ -339,7 +339,10 @@ nrf52_pwm_close(struct os_dev *odev)
return (EINVAL);
}
+ if(instances[inst_id].playing) {
nrfx_pwm_uninit(&instances[inst_id].drv_instance);
+ }
+
cleanup_instance(inst_id);
if (os_started()) {
@@ -545,8 +548,13 @@ nrf52_pwm_disable(struct pwm_dev *dev)
return (-EINVAL);
}
+ if (!instances[inst_id].playing) {
+ return (-EINVAL);
+ }
+
nrfx_pwm_uninit(&instances[inst_id].drv_instance);
instances[inst_id].playing = false;
+
return (0);
}
|
Fix PayloadDecoder broken for win32-mini-lib flavor
* Fix PayloadDecoder broken for win32-mini-lib flavor
* Update setup-buildtools.cmd
Temporary fix for GitHub Action runner issue | //
#include "PayloadDecoder.hpp"
+#if !defined(HAVE_MAT_ZLIB) || !defined(HAVE_MAT_JSONHPP)
+
+/* PayloadDecoder functionality requires ZLib and json.hpp.
+ * If these components are not included in the build, then
+ * replace decoder utility functions with stubs that return
+ * false.
+ */
+namespace MAT_NS_BEGIN
+{
+ namespace exporters
+ {
+ bool DecodeRecord(const CsProtocol::Record&, std::string&)
+ {
+ return false;
+ }
+
+ bool DecodeRequest(const std::vector<uint8_t>&, std::string&, bool)
+ {
+ return false;
+ }
+ };
+}
+MAT_NS_END
+
+#else
#include <algorithm>
#include <chrono>
#include <fstream>
@@ -557,4 +582,4 @@ namespace MAT_NS_BEGIN {
}
} MAT_NS_END
-
+#endif
|
rune: fix the wrong input parameter in KillPayload
The first parameter of func KillPayload(pid int, sig int)
is pid rather than sig. | @@ -180,7 +180,7 @@ func forwardSignal(rt *runtime.EnclaveRuntimeWrapper, notifySignal <-chan os.Sig
return
case sig := <-notifySignal:
n := int(sig.(syscall.Signal))
- err := rt.KillPayload(n, -1)
+ err := rt.KillPayload(-1, n)
if err != nil {
logrus.Debugf("failed to kill enclave runtime with signal %d", n)
}
|
handle non unique channel numbers | @@ -7376,7 +7376,8 @@ for(c = 0; c < 256; c++)
if(frequency <= 1000) testchannel = frequency;
else if((frequency >= 2407) && (frequency <= 2474)) testchannel = (frequency -2407)/5;
else if((frequency >= 2481) && (frequency <= 2487)) testchannel = (frequency -2412)/5;
- else if((frequency >= 5035) && (frequency <= 6425)) testchannel = (frequency -5000)/5;
+ else if((frequency >= 5035) && (frequency <= 5980)) testchannel = (frequency -5000)/5;
+ else if((frequency >= 5955) && (frequency <= 6415)) testchannel = (frequency -5950)/5;
if(testchannel > 0)
{
memset(&pwrq, 0, sizeof(pwrq));
|
Remove NULL pointer validation in sha256.c | @@ -159,8 +159,6 @@ static int mbedtls_a64_crypto_sha256_determine_support( void )
void mbedtls_sha256_init( mbedtls_sha256_context *ctx )
{
- SHA256_VALIDATE( ctx != NULL );
-
memset( ctx, 0, sizeof( mbedtls_sha256_context ) );
}
@@ -175,9 +173,6 @@ void mbedtls_sha256_free( mbedtls_sha256_context *ctx )
void mbedtls_sha256_clone( mbedtls_sha256_context *dst,
const mbedtls_sha256_context *src )
{
- SHA256_VALIDATE( dst != NULL );
- SHA256_VALIDATE( src != NULL );
-
*dst = *src;
}
@@ -186,8 +181,6 @@ void mbedtls_sha256_clone( mbedtls_sha256_context *dst,
*/
int mbedtls_sha256_starts( mbedtls_sha256_context *ctx, int is224 )
{
- SHA256_VALIDATE_RET( ctx != NULL );
-
#if defined(MBEDTLS_SHA224_C)
SHA256_VALIDATE_RET( is224 == 0 || is224 == 1 );
#else
@@ -427,9 +420,6 @@ int mbedtls_internal_sha256_process_c( mbedtls_sha256_context *ctx,
unsigned int i;
- SHA256_VALIDATE_RET( ctx != NULL );
- SHA256_VALIDATE_RET( (const unsigned char *)data != NULL );
-
for( i = 0; i < 8; i++ )
local.A[i] = ctx->state[i];
@@ -579,9 +569,6 @@ int mbedtls_sha256_update( mbedtls_sha256_context *ctx,
size_t fill;
uint32_t left;
- SHA256_VALIDATE_RET( ctx != NULL );
- SHA256_VALIDATE_RET( ilen == 0 || input != NULL );
-
if( ilen == 0 )
return( 0 );
@@ -633,9 +620,6 @@ int mbedtls_sha256_finish( mbedtls_sha256_context *ctx,
uint32_t used;
uint32_t high, low;
- SHA256_VALIDATE_RET( ctx != NULL );
- SHA256_VALIDATE_RET( (unsigned char *)output != NULL );
-
/*
* Add padding: 0x80 then 0x00 until 8 bytes remain for the length
*/
@@ -710,9 +694,6 @@ int mbedtls_sha256( const unsigned char *input,
SHA256_VALIDATE_RET( is224 == 0 );
#endif
- SHA256_VALIDATE_RET( ilen == 0 || input != NULL );
- SHA256_VALIDATE_RET( (unsigned char *)output != NULL );
-
mbedtls_sha256_init( &ctx );
if( ( ret = mbedtls_sha256_starts( &ctx, is224 ) ) != 0 )
|
tools/makemanifest.py: Show directory name if there is a FreezeError. | @@ -199,7 +199,7 @@ def mkdir(filename):
def freeze_internal(kind, path, script, opt):
path = convert_path(path)
if not os.path.isdir(path):
- raise FreezeError("freeze path must be a directory")
+ raise FreezeError("freeze path must be a directory: {}".format(path))
if script is None and kind == KIND_AS_STR:
if any(f[0] == KIND_AS_STR for f in manifest_list):
raise FreezeError("can only freeze one str directory")
|
fcrypt: minor improvement in shredTemporaryFile ()
Prevent writing to corrupted files.
See feedback from . | @@ -102,7 +102,6 @@ error:
*/
static int shredTemporaryFile (int fd, Key * errorKey)
{
- kdb_octet_t error = 0;
kdb_octet_t buffer[512] = { 0 };
struct stat tmpStat;
@@ -120,16 +119,10 @@ static int shredTemporaryFile (int fd, Key * errorKey)
for (off_t i = 0; i < tmpStat.st_size; i += sizeof (buffer))
{
if (write (fd, buffer, sizeof (buffer)) != sizeof (buffer))
- {
- // save the error state but keep on writing in the hope that further writes wont't fail
- error = 1;
- }
- }
- if (error)
{
goto error;
}
-
+ }
return 1;
error:
|
Add gcc63-linux-arm toolchain for cross-compilation
Add gcc63 toolchain for cross-compilation | "gcc_version": "6.0"
}
},
+ "gcc63-armv7a": {
+ "name": "gcc63",
+ "tools": {
+ "cc": { "bottle": "gcc63-armv7a", "executable": "cc" },
+ "c++": { "bottle": "gcc63-armv7a", "executable": "c++" },
+ "gcov": { "bottle": "gcc63-armv7a", "executable": "gcov" },
+ "c++filt": { "bottle": "gcc63-armv7a", "executable": "c++filt" },
+ "strip": { "bottle": "gcc63-armv7a", "executable": "strip" },
+ "nm": { "bottle": "gcc63-armv7a", "executable": "nm" }
+ },
+ "platforms": [
+ {"host": {"os": "LINUX"}, "target": {"os": "LINUX", "arch": "armv7a"}, "default": true}
+ ],
+ "params": {
+ "type": "gnu",
+ "match_root": "GCC",
+ "werror_mode": "compiler_specific",
+ "c_compiler": "$(GCC)/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-gcc",
+ "cxx_compiler": "$(GCC)/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-g++",
+ "ar": "$(GCC)/gcc-linaro-6.3.1-2017.05-x86_64_arm-linux-gnueabihf/bin/arm-linux-gnueabihf-ar",
+ "gcc_version": "6.3"
+ }
+ },
"bloat": {
"tools": {
"bloat": { "bottle": "bloat", "executable": "bloat2" }
"nm": ["bin", "aarch64-linux-android-nm"]
}
},
+ "gcc63-armv7a": {
+ "formula": {
+ "sandbox_id": ["306585614"],
+ "match": "GCC"
+ },
+ "executable": {
+ "c++": ["bin", "arm-linux-gnueabihf-g++"],
+ "cc": ["bin", "arm-linux-gnueabihf-gcc"],
+ "gcov": ["bin", "arm-linux-gnueabihf-gcov"],
+ "c++filt": ["bin", "arm-linux-gnueabihf-c++filt"],
+ "strip": ["bin", "arm-linux-gnueabihf-strip"],
+ "nm": ["bin", "arm-linux-gnueabihf-nm"]
+ }
+ },
"cuda": {
"formula": {
"sandbox_id": [29625981],
|
stm32/Makefile: Add settings to support H7 MCUs. | @@ -53,7 +53,7 @@ INC += -I$(USBDEV_DIR)/core/inc -I$(USBDEV_DIR)/class/inc
CFLAGS_CORTEX_M = -mthumb
# Select hardware floating-point support
-ifeq ($(CMSIS_MCU),$(filter $(CMSIS_MCU),STM32F767xx STM32F769xx))
+ifeq ($(CMSIS_MCU),$(filter $(CMSIS_MCU),STM32F767xx STM32F769xx STM32H743xx))
CFLAGS_CORTEX_M += -mfpu=fpv5-d16 -mfloat-abi=hard
else
CFLAGS_CORTEX_M += -mfpu=fpv4-sp-d16 -mfloat-abi=hard
@@ -63,6 +63,7 @@ endif
CFLAGS_MCU_f4 = $(CFLAGS_CORTEX_M) -mtune=cortex-m4 -mcpu=cortex-m4 -DMCU_SERIES_F4
CFLAGS_MCU_f7 = $(CFLAGS_CORTEX_M) -mtune=cortex-m7 -mcpu=cortex-m7 -DMCU_SERIES_F7
CFLAGS_MCU_l4 = $(CFLAGS_CORTEX_M) -mtune=cortex-m4 -mcpu=cortex-m4 -DMCU_SERIES_L4
+CFLAGS_MCU_h7 = $(CFLAGS_CORTEX_M) -mtune=cortex-m7 -mcpu=cortex-m7
CFLAGS = $(INC) -Wall -Wpointer-arith -Werror -std=gnu99 -nostdlib $(CFLAGS_MOD) $(CFLAGS_EXTRA)
CFLAGS += -D$(CMSIS_MCU)
@@ -250,11 +251,11 @@ SRC_O = \
startup_stm32.o \
gchelper.o \
+$(BUILD)/$(HAL_DIR)/Src/%.o: CFLAGS += -fno-strict-aliasing
SRC_HAL = $(addprefix $(HAL_DIR)/Src/stm32$(MCU_SERIES)xx_,\
hal.c \
hal_adc.c \
hal_adc_ex.c \
- hal_can.c \
hal_cortex.c \
hal_dac.c \
hal_dac_ex.c \
@@ -280,6 +281,12 @@ SRC_HAL = $(addprefix $(HAL_DIR)/Src/stm32$(MCU_SERIES)xx_,\
ll_usb.c \
)
+ifeq ($(CMSIS_MCU),$(filter $(CMSIS_MCU),STM32H743xx))
+ SRC_HAL += $(addprefix $(HAL_DIR)/Src/stm32$(MCU_SERIES)xx_, hal_fdcan.c)
+else
+ SRC_HAL += $(addprefix $(HAL_DIR)/Src/stm32$(MCU_SERIES)xx_, hal_can.c)
+endif
+
SRC_USBDEV = $(addprefix $(USBDEV_DIR)/,\
core/src/usbd_core.c \
core/src/usbd_ctlreq.c \
|
Update mk_wheel.py | @@ -34,7 +34,7 @@ class PythonTrait(object):
def gen_cmd(self):
cmd = [
sys.executable, arc_root + '/ya', 'make', os.path.join(arc_root, 'catboost', 'python-package', 'catboost'),
- '--no-src-links', '-r', '--output', out_root, '-DPYTHON_CONFIG=' + self.py_config, '-DNO_DEBUGINFO',
+ '--no-src-links', '-r', '--output', out_root, '-DPYTHON_CONFIG=' + self.py_config, '-DNO_DEBUGINFO', '-DOS_SDK=local',
]
if not self.python_version.from_sandbox:
|
imageio: add ability restrict max image size
WEBP_MAX_IMAGE_SIZE can be defined to control this limit.
Set it to 1.5GiB w/--config=asan-fuzzer to avoid OOM with large resolution
images. This limit leaves some headroom over the single image max of 2^14 *
2^14 * 4 | @@ -137,7 +137,11 @@ void ImgIoUtilCopyPlane(const uint8_t* src, int src_stride,
int ImgIoUtilCheckSizeArgumentsOverflow(uint64_t nmemb, size_t size) {
const uint64_t total_size = nmemb * size;
- return (total_size == (size_t)total_size);
+ int ok = (total_size == (size_t)total_size);
+#if defined(WEBP_MAX_IMAGE_SIZE)
+ ok = ok && (total_size <= (uint64_t)WEBP_MAX_IMAGE_SIZE);
+#endif
+ return ok;
}
// -----------------------------------------------------------------------------
|
Update yfm for ya tool to 2.10.1 | },
"yfm": {
"formula": {
- "sandbox_id": 618063136,
+ "sandbox_id": 618755316,
"match": "yfm"
},
"executable": {
|
Support escaping delimiters in MANGOHUD_CONFIG | @@ -182,8 +182,15 @@ parse_string(const char *s, char *out_param, char *out_value)
if (*s == '=') {
s++;
i++;
- for (; !is_delimiter(*s); s++, out_value++, i++)
+ for (; !is_delimiter(*s); s++, out_value++, i++) {
*out_value = *s;
+ // Consume escaped delimiter, but don't escape null. Might be end of string.
+ if (*s == '\\' && *(s + 1) != 0 && is_delimiter(*(s + 1))) {
+ s++;
+ i++;
+ *out_value = *s;
+ }
+ }
} else
*(out_value++) = '1';
*out_value = 0;
|
Adds GPDB Merge notice for pg_upgrade
Detailed steps that we need to follow to make sure upstream merge is
updated to support additional flags we added for pg_upgrade related to
check mode. | @@ -87,3 +87,20 @@ intention is for this to be a quick test that all objects are handled by
pg_upgrade; upgrade testing still requires the full run across all
segments/mirrors. The smoketest can be invoked by "make check" in the
contrib/pg_upgrade directory.
+
+GPDB Merge notice:
+------------------
+We added the gp_fatal_log() function to replace some of the pg_fatal() calls in
+check.c and check_gp.c to support --continue-check-on-fatal, which allows
+running through all the checks even if we get a fatal failure. We need to
+update any newly added checks to ensure they work with the flag by replacing
+pg_fatal function calls with gp_fatal_log calls.
+
+Note: we cannot skip few checks despite using the --continue-check-on-fatal
+flag. See gp_fatal_log() for details.
+
+For --skip-target-check flag while merging upstream, if we have new checks for
+new/target cluster, they should be skipped when the flag is enabled. See
+is_skip_target_check(). There might be new pre-check steps for the target
+cluster before we check_new_cluster(), such as get_sock_dir(), which must be
+guarded.
|
add link to rename plugin | @@ -19,7 +19,7 @@ The problem, in general, can be described as: Which phase of the KDB should be u
### Observed problems with changing key names
-For example, the `rename` plugin supports different transformations for `get` and `set`.
+For example, the [`rename` plugin](../../src/plugins/rename/README.md) supports different transformations for `get` and `set`.
The plugin is executed in the post-storage phase for the `get` operation and the pre-storage phase for the `set` operation.
We can instruct it to convert the keynames to lowercase in the `get` operation, and to UPPERCASE in the `set` operation.
This results in the keys being in UPPERCASE in the configuration files, but they are presented in lowercase to other plugins and applications using the Elektra API.
|
document gfortran dependency | @@ -72,7 +72,7 @@ The software tools in recon should run on any recent Linux distribution.
To install the required libraries on Debian and Ubuntu run:
- $ sudo apt-get install gcc make libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev
+ $ sudo apt-get install gcc make libfftw3-dev liblapacke-dev libpng-dev libopenblas-dev gfortran
(optional)
$ sudo apt-get install octave
|
Stop dropping ctrs when no ctrs | @@ -503,9 +503,12 @@ static void SelectCtrsToDropAfterCalc(
},
candList);
+ if (!fullNeededMemoryForCtrs) {
+ return;
+ }
auto currentMemoryUsage = NMemInfo::GetMemInfo().RSS;
if (fullNeededMemoryForCtrs + currentMemoryUsage > memoryLimit) {
- CATBOOST_DEBUG_LOG << "Needed more memory than allowed, will drop some ctrs after score calculation"
+ CATBOOST_DEBUG_LOG << "Need more memory than allowed, will drop some ctrs after score calculation"
<< Endl;
const float GB = (ui64)1024 * 1024 * 1024;
CATBOOST_DEBUG_LOG << "current rss: " << currentMemoryUsage / GB << " full needed memory: "
|
include/rsa.h: Format with clang-format
BRANCH=none
TEST=none | @@ -58,10 +58,8 @@ struct rsa_public_key {
};
#endif
-int rsa_verify(const struct rsa_public_key *key,
- const uint8_t *signature,
- const uint8_t *sha,
- uint32_t *workbuf32);
+int rsa_verify(const struct rsa_public_key *key, const uint8_t *signature,
+ const uint8_t *sha, uint32_t *workbuf32);
#endif /* !__ASSEMBLER__ */
|
Add comment explaining generate_psa_code.py
Explain that the output filename is derived from the -d
argument, so that it's obvious why the CMakefile code
does what it does. | @@ -92,6 +92,8 @@ function(add_test_suite suite_name)
add_custom_command(
OUTPUT
+ # The output filename of generate_test_code.py is derived from the -d
+ # input argument.
test_suite_${data_name}.c
COMMAND
${MBEDTLS_PYTHON_EXECUTABLE}
|
val_alloc should be checked for 1, not 0 | @@ -2935,7 +2935,7 @@ evhtp_kv_new(const char * key, const char * val,
{
kv->vlen = strlen(val);
- if (val_alloc == 0)
+ if (val_alloc == 1)
{
char * s = htp__malloc_(kv->vlen + 1);
|
kernel/os; fix crash_restore_regs for m4 when FP registers
are pushed to exception stack. | @@ -174,7 +174,7 @@ os_default_irq(struct trap_frame *tf)
struct coredump_regs regs;
#endif
#if MYNEWT_VAL(OS_CRASH_RESTORE_REGS)
- uint32_t *orig_sp;
+ uint32_t orig_sp;
#endif
console_blocking_mode();
@@ -212,10 +212,19 @@ os_default_irq(struct trap_frame *tf)
#if MYNEWT_VAL(OS_CRASH_RESTORE_REGS)
if (((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) < 16) &&
hal_debugger_connected()) {
- orig_sp = &tf->ef->r0;
- orig_sp += 8;
- if (tf->ef->psr & SCB_CCR_STKALIGN_Msk) {
- orig_sp++;
+ if ((tf->lr & 0x10) == 0) {
+ /*
+ * Extended frame
+ */
+ orig_sp = ((uint32_t)tf->ef) + 0x68;
+ if (tf->ef->psr & (1 << 9)) {
+ orig_sp += 4;
+ }
+ } else {
+ orig_sp = ((uint32_t)tf->ef) + 0x20;
+ if ((SCB->CCR & SCB_CCR_STKALIGN_Msk) & tf->ef->psr & (1 << 9)) {
+ orig_sp += 4;
+ }
}
console_printf("Use 'set $pc = 0x%08lx' to restore PC in gdb\n",
|
Prevent interaction with main window when enable color modal is open | import electron from "electron";
const dialog = electron.remote ? electron.remote.dialog : electron.dialog;
+const win = electron.remote.getCurrentWindow();
export default () => {
// eslint-disable-next-line global-require
@@ -15,5 +16,5 @@ export default () => {
detail: l10n("DIALOG_ENABLE_COLOR_MODE_DESCRIPTION")
};
- return dialog.showMessageBoxSync(dialogOptions);
+ return dialog.showMessageBoxSync(win, dialogOptions);
};
|
CHANGES: note which command line utilities are marked for deprecation but still available.
Some of the utilities are much easier to use than their pkey alternatives.
These have been modified to use the PKEY APIs but still note that they are
deprecated. | @@ -79,11 +79,6 @@ OpenSSL 3.0
*Richard Levitte*
- * The command line utilities ecparam and ec have been deprecated. Instead
- use the pkeyparam, pkey and genpkey programs.
-
- *Paul Dale*
-
* All of the low level RSA functions have been deprecated including:
RSA_new_method, RSA_bits, RSA_size, RSA_security_bits,
@@ -138,25 +133,35 @@ OpenSSL 3.0
*Kurt Roeckx*
* The command line utilities dhparam, dsa, gendsa and dsaparam have been
- deprecated. Instead use the pkeyparam, pkey, genpkey and pkeyparam
- programs respectively.
+ modified to use PKEY APIs. These commands are now in maintenance mode
+ and no new features will be added to them.
+
+ *Paul Dale*
+
+ * The command line utility rsautl has been deprecated.
+ Instead use the pkeyutl program.
+
+ *Paul Dale*
+
+ * The command line utilities genrsa and rsa have been modified to use PKEY
+ APIs These commands are now in maintenance mode and no new features will
+ be added to them.
*Paul Dale*
* All of the low level DH functions have been deprecated including:
- DH_OpenSSL, DH_set_default_method, DH_get_default_method, DH_set_method,
- DH_new_method, DH_bits, DH_size, DH_security_bits, DH_get_ex_new_index,
- DH_set_ex_data, DH_get_ex_data, DH_generate_parameters_ex,
- DH_check_params_ex, DH_check_ex, DH_check_pub_key_ex,
- DH_check, DH_check_pub_key, DH_generate_key, DH_compute_key,
- DH_compute_key_padded, DHparams_print_fp, DHparams_print, DH_get_nid,
- DH_KDF_X9_42, DH_get0_engine, DH_get_length, DH_set_length, DH_meth_new,
+ DH_OpenSSL, DH_set_default_method, DH_get_default_method,
+ DH_set_method, DH_new_method, DH_bits, DH_size, DH_security_bits,
+ DH_get_ex_new_index, DH_set_ex_data, DH_get_ex_data,
+ DH_generate_parameters_ex, DH_check_params_ex, DH_check_ex,
+ DH_check_pub_key_ex, DH_check, DH_check_pub_key, DH_generate_key,
+ DH_compute_key, DH_compute_key_padded, DHparams_print_fp,
+ DHparams_print, DH_get_nid, DH_KDF_X9_42, DH_get0_engine, DH_meth_new,
DH_meth_free, DH_meth_dup, DH_meth_get0_name, DH_meth_set1_name,
DH_meth_get_flags, DH_meth_set_flags, DH_meth_get0_app_data,
- DH_meth_set0_app_data, DH_meth_get_generate_key,
- DH_meth_set_generate_key, DH_meth_get_compute_key,
- DH_meth_set_compute_key, DH_meth_get_bn_mod_exp,
+ DH_meth_set0_app_data, DH_meth_get_generate_key, DH_meth_set_generate_key,
+ DH_meth_get_compute_key, DH_meth_set_compute_key, DH_meth_get_bn_mod_exp,
DH_meth_set_bn_mod_exp, DH_meth_get_init, DH_meth_set_init,
DH_meth_get_finish, DH_meth_set_finish, DH_meth_get_generate_params
and DH_meth_set_generate_params.
|
[kernel] add a cast on NewtonEulerFrom1DLocalFrameR | @@ -339,6 +339,12 @@ KERNEL_REGISTRATION()
return std11::dynamic_pointer_cast<LagrangianDS>(ds);
}
+ SP::NewtonEulerFrom1DLocalFrameR cast_NewtonEulerFrom1DLocalFrameR(SP::Relation r)
+ {
+ return std11::dynamic_pointer_cast<NewtonEulerFrom1DLocalFrameR>(r);
+ }
+
+
// Required to get size of a graph of interactions in python interp
size_t size_graph(const InteractionsGraph& index_set)
{
|
Fix missing icons regression | @@ -3049,8 +3049,10 @@ VOID PhProcessImageListInitialization(
PhImageListItemType = PhCreateObjectType(L"ImageListItem", 0, PhpImageListItemDeleteProcedure);
- PhProcessLargeImageList = ImageList_Create(32, 32, ILC_MASK | ILC_COLOR32, 100, 100);
- PhProcessSmallImageList = ImageList_Create(16, 16, ILC_MASK | ILC_COLOR32, 100, 100);
+ PhProcessLargeImageList = ImageList_Create(GetSystemMetrics(SM_CXICON), GetSystemMetrics(SM_CYICON), ILC_MASK | ILC_COLOR32, 100, 100);
+ PhProcessSmallImageList = ImageList_Create(GetSystemMetrics(SM_CXSMICON), GetSystemMetrics(SM_CYSMICON), ILC_MASK | ILC_COLOR32, 100, 100);
+ ImageList_SetBkColor(PhProcessLargeImageList, CLR_NONE);
+ ImageList_SetBkColor(PhProcessSmallImageList, CLR_NONE);
PhGetStockApplicationIcon(&iconSmall, &iconLarge);
ImageList_AddIcon(PhProcessLargeImageList, iconLarge);
|
Codegen await input | @@ -522,6 +522,10 @@ class ScriptBuilder {
);
};
+ _inputWait = (mask: number) => {
+ this._addCmd("VM_INPUT_WAIT", mask);
+ };
+
_stop = () => {
this._assertStackNeutral();
this._addComment("Stop Script");
@@ -748,6 +752,14 @@ class ScriptBuilder {
this._overlayWait(true, [".UI_WAIT_WINDOW"]);
};
+ // --------------------------------------------------------------------------
+ // Input
+
+ inputAwait = (input: string[]) => {
+ this._addComment("Wait For Input");
+ this._inputWait(inputDec(input));
+ };
+
// --------------------------------------------------------------------------
// Scenes
|
ames: refactor +load | [%2 ames-state-2]
[%3 ^ames-state]
==
- ^+ ames-gate
- ?- -.old-state
- %3
+ |^ ^+ ames-gate
+ ::
+ =? old-state ?=(%1 -.old-state) %2^(state-1-to-2 +.old-state)
+ =? old-state ?=(%2 -.old-state) %3^(state-2-to-3 +.old-state)
+ ::
+ ?> ?=(%3 -.old-state)
ames-gate(ames-state +.old-state)
::
- %2
- =. ames-state
- :* peers.old-state
- unix-duct.old-state
- life.old-state
- crypto-core.old-state
- bug=[veb=veb.old-state ships=~]
- ==
- ames-gate
+ ++ state-1-to-2
+ |= =ames-state-1
+ ^- ames-state-2
::
- %1
- => .(old-state +.old-state)
- =. +.ames-state
- :* unix-duct.old-state
- life.old-state
- crypto-core.old-state
- bug=[veb=veb-all-off ships=~]
+ =| =ames-state-2
+ =. +.ames-state-2
+ :* unix-duct.ames-state-1
+ life.ames-state-1
+ crypto-core.ames-state-1
+ veb=veb-all-off
==
- =. peers.ames-state
+ =. peers.ames-state-2
%- ~(gas by *(map ship ship-state))
- %+ turn ~(tap by peers.old-state)
- |= [peer=ship old-ship-state=ship-state-1]
+ %+ turn ~(tap by peers.ames-state-1)
+ |= [peer=ship =ship-state-1]
^- [ship ship-state]
- ?: ?=(%alien -.old-ship-state)
- [peer old-ship-state]
+ ?: ?=(%alien -.ship-state-1)
+ [peer ship-state-1]
:+ peer %known
- %= +.old-ship-state
+ %= +.ship-state-1
qos
- ?+ -.qos.old-ship-state qos.old-ship-state
+ ?+ -.qos.ship-state-1 qos.ship-state-1
%unborn [%unborn now]
==
==
- ames-gate
+ ames-state-2
+ ::
+ ++ state-2-to-3
+ |= =ames-state-2
+ ^- ^ames-state
+ ::
+ :* peers.ames-state-2
+ unix-duct.ames-state-2
+ life.ames-state-2
+ crypto-core.ames-state-2
+ bug=[veb=veb.ames-state-2 ships=~]
==
+ --
:: +scry: dereference namespace
::
++ scry
|
Don't change the move order. | ?~ mor
[ova vanes]
=^ nyx vanes (jack lac i.mor)
- $(ova (weld ova p.nyx), mor (weld t.mor q.nyx))
+ $(ova (weld ova p.nyx), mor (weld q.nyx t.mor))
--
--
=< :: Arvo larval stage
|
[MEMPOOL] Nonce is checked ahead of Balance during validation | @@ -269,15 +269,15 @@ func (mp *MemPool) validate(tx *types.Tx) error {
if err != nil {
return err
}
+ if tx.GetBody().GetNonce() <= ns.Nonce {
+ return message.ErrTxNonceTooLow
+ }
if tx.GetBody().GetAmount() > ns.Balance {
if !mp.cfg.EnableTestmode {
// Skip balance validation in test mode
return message.ErrInsufficientBalance
}
}
- if tx.GetBody().GetNonce() <= ns.Nonce {
- return message.ErrTxNonceTooLow
- }
return nil
}
|
Use global variable rm_ID_values directly. | @@ -127,24 +127,22 @@ Export_ChromaticityInfo(ChromaticityInfo *ci, VALUE chrom)
VALUE chrom_members;
VALUE red_primary, green_primary, blue_primary, white_point;
VALUE entry_members, x, y;
- ID values_id;
if (CLASS_OF(chrom) != Class_Chromaticity)
{
rb_raise(rb_eTypeError, "type mismatch: %s given",
rb_class2name(CLASS_OF(chrom)));
}
- values_id = rm_ID_values;
// Get the struct members in an array
- chrom_members = rb_funcall(chrom, values_id, 0);
+ chrom_members = rb_funcall(chrom, rm_ID_values, 0);
red_primary = rb_ary_entry(chrom_members, 0);
green_primary = rb_ary_entry(chrom_members, 1);
blue_primary = rb_ary_entry(chrom_members, 2);
white_point = rb_ary_entry(chrom_members, 3);
// Get the red_primary PrimaryInfo members in an array
- entry_members = rb_funcall(red_primary, values_id, 0);
+ entry_members = rb_funcall(red_primary, rm_ID_values, 0);
x = rb_ary_entry(entry_members, 0); // red_primary.x
ci->red_primary.x = x == Qnil ? 0.0 : NUM2DBL(x);
y = rb_ary_entry(entry_members, 1); // red_primary.y
@@ -152,7 +150,7 @@ Export_ChromaticityInfo(ChromaticityInfo *ci, VALUE chrom)
ci->red_primary.z = 0.0;
// Get the green_primary PrimaryInfo members in an array
- entry_members = rb_funcall(green_primary, values_id, 0);
+ entry_members = rb_funcall(green_primary, rm_ID_values, 0);
x = rb_ary_entry(entry_members, 0); // green_primary.x
ci->green_primary.x = x == Qnil ? 0.0 : NUM2DBL(x);
y = rb_ary_entry(entry_members, 1); // green_primary.y
@@ -160,7 +158,7 @@ Export_ChromaticityInfo(ChromaticityInfo *ci, VALUE chrom)
ci->green_primary.z = 0.0;
// Get the blue_primary PrimaryInfo members in an array
- entry_members = rb_funcall(blue_primary, values_id, 0);
+ entry_members = rb_funcall(blue_primary, rm_ID_values, 0);
x = rb_ary_entry(entry_members, 0); // blue_primary.x
ci->blue_primary.x = x == Qnil ? 0.0 : NUM2DBL(x);
y = rb_ary_entry(entry_members, 1); // blue_primary.y
@@ -168,7 +166,7 @@ Export_ChromaticityInfo(ChromaticityInfo *ci, VALUE chrom)
ci->blue_primary.z = 0.0;
// Get the white_point PrimaryInfo members in an array
- entry_members = rb_funcall(white_point, values_id, 0);
+ entry_members = rb_funcall(white_point, rm_ID_values, 0);
x = rb_ary_entry(entry_members, 0); // white_point.x
ci->white_point.x = x == Qnil ? 0.0 : NUM2DBL(x);
y = rb_ary_entry(entry_members, 1); // white_point.y
|
Add example to CLI helper | @@ -21,6 +21,17 @@ parsed as errors, since they hadn't been declared yet.
EXAMPLE:
+ // initialize the CLI helper.
+ fio_cli_start(argc, argv, "App description or NULL");
+ // setup possible command line arguments.
+ fio_cli_accept_num("port p", "the port to listen to, defaults to 3000.");
+ fio_cli_accept_bool("log v", "enable logging");
+ // read command line arguments.
+ uint8_t logging = fio_cli_get_int("v");
+ const char *port = fio_cli_get_str("port");
+ if (!port)
+ port = "3000";
+ fio_cli_end();
*/
|
docs: include pbspro now that recipes exist for aarch64 | @@ -58,7 +58,7 @@ echo "Querying available packages for baseOS=${baseos}, arch=${arch}: minor_ver=
if [[ "${arch}" == "aarch64" ]];then
echo "Querying aarch64..."
- skip="mvapich2|pbspro|impi-ohpc|lmod-defaults-intel"
+ skip="mvapich2|impi-ohpc|lmod-defaults-intel"
fi
repobase="http://build.openhpc.community/OpenHPC:/${minor_ver}/${baseos}"
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.