message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
add the ci of nrf5x | @@ -65,6 +65,10 @@ jobs:
- {RTT_BSP: "mb9bf618s", RTT_TOOL_CHAIN: "sourcery-arm"}
- {RTT_BSP: "mb9bf568r", RTT_TOOL_CHAIN: "sourcery-arm"}
- {RTT_BSP: "mini2440", RTT_TOOL_CHAIN: "sourcery-arm"}
+ - {RTT_BSP: "nrf5x/nrf51822", RTT_TOOL_CHAIN: "sourcery-arm"}
+ - {RTT_BSP: "nrf5x/nrf52832", RTT_TOOL_CHAIN: "sourcery-arm"}
+ - {RTT_BSP: "nrf5x/nrf52833", RTT_TOOL_CHAIN: "sourcery-arm"}
+ - {RTT_BSP: "nrf5x/nrf52840", RTT_TOOL_CHAIN: "sourcery-arm"}
- {RTT_BSP: "qemu-vexpress-a9", RTT_TOOL_CHAIN: "sourcery-arm"}
- {RTT_BSP: "qemu-vexpress-gemini", RTT_TOOL_CHAIN: "sourcery-arm"}
- {RTT_BSP: "sam7x", RTT_TOOL_CHAIN: "sourcery-arm"}
|
Modify ip_context struct using MEMB.
Tested-by: Kishen Maloor | @@ -62,11 +62,8 @@ struct sockaddr_nl ifchange_nl;
int ifchange_sock;
bool ifchange_initialized;
-#ifdef OC_DYNAMIC_ALLOCATION
OC_LIST(ip_contexts);
-#else /* OC_DYNAMIC_ALLOCATION */
-static ip_context_t devices[OC_MAX_NUM_DEVICES];
-#endif /* !OC_DYNAMIC_ALLOCATION */
+OC_MEMB(ip_context_s, ip_context_t, OC_MAX_NUM_DEVICES);
void
oc_network_event_handler_mutex_init(void)
@@ -93,8 +90,9 @@ void oc_network_event_handler_mutex_destroy(void) {
pthread_mutex_destroy(&mutex);
}
-static ip_context_t *get_ip_context_for_device(int device) {
-#ifdef OC_DYNAMIC_ALLOCATION
+static ip_context_t *
+get_ip_context_for_device(int device)
+{
ip_context_t *dev = oc_list_head(ip_contexts);
while (dev != NULL && dev->device != device) {
dev = dev->next;
@@ -102,9 +100,6 @@ static ip_context_t *get_ip_context_for_device(int device) {
if (!dev) {
return NULL;
}
-#else /* OC_DYNAMIC_ALLOCATION */
- ip_context_t *dev = &devices[device];
-#endif /* !OC_DYNAMIC_ALLOCATION */
return dev;
}
@@ -885,15 +880,12 @@ connectivity_ipv4_init(ip_context_t *dev)
int oc_connectivity_init(int device) {
OC_DBG("Initializing connectivity for device %d", device);
-#ifdef OC_DYNAMIC_ALLOCATION
- ip_context_t *dev = (ip_context_t *)calloc(1, sizeof(ip_context_t));
+
+ ip_context_t *dev = (ip_context_t *)oc_memb_alloc(&ip_context_s);
if (!dev) {
oc_abort("Insufficient memory");
}
oc_list_add(ip_contexts, dev);
-#else /* OC_DYNAMIC_ALLOCATION */
- ip_context_t *dev = &devices[device];
-#endif /* !OC_DYNAMIC_ALLOCATION */
dev->device = device;
if (pipe(dev->shutdown_pipe) < 0) {
@@ -1090,10 +1082,8 @@ oc_connectivity_shutdown(int device)
close(dev->shutdown_pipe[1]);
close(dev->shutdown_pipe[0]);
-#ifdef OC_DYNAMIC_ALLOCATION
oc_list_remove(ip_contexts, dev);
- free(dev);
-#endif /* OC_DYNAMIC_ALLOCATION */
+ oc_memb_free(&ip_context_s, dev);
OC_DBG("oc_connectivity_shutdown for device %d", device);
}
|
If window is too big for screen, account for borders when shrinking to fit | @@ -1846,6 +1846,33 @@ void CreateMainFrame(FrameCreationCallback inOnFrame, int inWidth, int inHeight,
if (targetH>sgDesktopHeight)
targetH = sgDesktopHeight;
+ int targetX = SDL_WINDOWPOS_UNDEFINED;
+ int targetY = SDL_WINDOWPOS_UNDEFINED;
+
+ #if (defined(HX_WINDOWS) && !defined(HX_WINRT))
+ if (!borderless && !fullscreen)
+ {
+ DWORD style = WS_CLIPSIBLINGS | WS_CLIPCHILDREN | WS_OVERLAPPED |
+ WS_CAPTION | WS_SYSMENU | WS_MINIMIZEBOX;
+ if (resizable)
+ style |= WS_THICKFRAME | WS_MAXIMIZEBOX;
+ RECT r; r.left = 0; r.top = 0; r.left = 1; r.right = 1;
+ AdjustWindowRectEx(&r, style, FALSE, 0);
+ int borderW = r.right-r.left;
+ int borderH = r.bottom-r.top;
+ if (targetH + borderH > sgDesktopHeight)
+ {
+ targetY = -r.top;
+ targetH = sgDesktopHeight - borderH;
+ }
+ if (targetW + borderW > sgDesktopWidth)
+ {
+ targetX = -r.left;
+ targetW = sgDesktopWidth - borderW;
+ }
+ }
+ #endif
+
#ifdef HX_LINUX
int setWidth = targetW;
int setHeight = targetH;
@@ -1866,7 +1893,7 @@ void CreateMainFrame(FrameCreationCallback inOnFrame, int inWidth, int inHeight,
window = NULL;
}
- window = SDL_CreateWindow(inTitle, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, setWidth, setHeight, requestWindowFlags);
+ window = SDL_CreateWindow(inTitle, targetX, targetY, setWidth, setHeight, requestWindowFlags);
#if (defined(HX_WINDOWS) && !defined(HX_WINRT))
HINSTANCE handle = ::GetModuleHandle(0);
|
nshlib/nsh_netcmds.c: Fix a compilation error when IPv4/IPv6 dual stack is enabled | @@ -1023,7 +1023,7 @@ int cmd_ifconfig(FAR struct nsh_vtbl_s *vtbl, int argc, char **argv)
#ifdef CONFIG_NET_IPv6
#ifdef CONFIG_NET_IPv4
- if (inet6i != NULL)
+ if (inet6 != NULL)
#endif
{
#warning Missing Logic
|
updated install_dependencies.bat | @@ -4,13 +4,35 @@ if not exist windows_dependencies mkdir windows_dependencies
if exist windows_dependencies/odb exit /b
cd windows_dependencies
+:: Download zip files
curl -o libodb-2.4.0.zip https://www.codesynthesis.com/download/odb/2.4/libodb-2.4.0.zip
-unzip libodb-2.4.0.zip
-move libodb-2.4.0\odb .\
-
curl -o libodb-sqlite-2.4.0.zip https://www.codesynthesis.com/download/odb/2.4/libodb-sqlite-2.4.0.zip
+curl -o sqlite-dll.zip https://www.sqlite.org/2018/sqlite-dll-win32-x86-3240000.zip
+curl -o sqlite-amalgamation.zip https://www.sqlite.org/2018/sqlite-amalgamation-3240000.zip
+
+:: Unzip zip files
+unzip libodb-2.4.0.zip
unzip libodb-sqlite-2.4.0.zip
-move libodb-sqlite-2.4.0\odb\sqlite odb\
+unzip sqlite-dll.zip
+unzip sqlite-amalgamation.zip
+
+:: Restructure Directory
+move libodb-sqlite-2.4.0\etc\sqlite .\
+move sqlite3.def .\sqlite\
+move sqlite-amalgamation-3240000\sqlite3.c .\sqlite\
+move sqlite-amalgamation-3240000\sqlite3.h .\sqlite\
+
+:: copy headers
+xcopy /e/i libodb-2.4.0\odb .\odb\
+xcopy /e/i libodb-sqlite-2.4.0\odb\sqlite odb\sqlite\
+
+:: Delete useless files
+del libodb-2.4.0.zip
+del libodb-sqlite-2.4.0.zip
+del sqlite-dll.zip
+del sqlite-amalgamation.zip
+del sqlite3.dll
+rmdir /s /q sqlite-amalgamation-3240000
dir
cd ..
|
agent retrieves new token_endpoint if it suspects that it changed | @@ -151,11 +151,21 @@ void handleAdd(char* q, int sock, struct oidc_provider** loaded_p, size_t* loade
ipc_write(sock, RESPONSE_ERROR, "provider already loaded");
return;
}
+ if(retrieveAccessToken(provider, FORCE_NEW_TOKEN)!=0) {
+ char* newTokenEndpoint = getTokenEndpoint(provider_getConfigEndpoint(*provider), provider_getCertPath(*provider));
+ if(newTokenEndpoint && strcmp(newTokenEndpoint, provider_getTokenEndpoint(*provider))!=0) {
+ provider_setTokenEndpoint(provider, newTokenEndpoint);
if(retrieveAccessToken(provider, FORCE_NEW_TOKEN)!=0) {
freeProvider(provider);
- ipc_write(sock, RESPONSE_ERROR, "misconfiguration or network issues");
+ ipc_write(sock, RESPONSE_ERROR, "network issues");
return;
}
+ } else {
+ freeProvider(provider);
+ ipc_write(sock, RESPONSE_ERROR, "network issues");
+ return;
+ }
+ }
*loaded_p = addProvider(*loaded_p, loaded_p_count, *provider);
free(provider);
ipc_write(sock, RESPONSE_STATUS, "success");
|
docs: change descriptions of substitution defines to use literal blocks
Copy-pasting the "local substitution" example would result in an error,
due to the ASCII quotation marks being replaced by unicode double quotation marks
when outputting the HTML. | @@ -328,11 +328,11 @@ Would render in the documentation as:
This is a {IDF_TARGET_NAME}, with /{IDF_TARGET_PATH_NAME}/soc.c, compiled with `xtensa-{IDF_TARGET_TOOLCHAIN_NAME}-elf-gcc` with `CONFIG_{IDF_TARGET_CFG_PREFIX}_MULTI_DOC`.
-This extension also supports markup for defining local (within a single source file) substitutions. Place a definition like the following into a single line of the RST file:
+This extension also supports markup for defining local (within a single source file) substitutions. Place a definition like the following into a single line of the RST file::
{\IDF_TARGET_SUFFIX:default="DEFAULT_VALUE", esp32="ESP32_VALUE", esp32s2="ESP32S2_VALUE"}
-This will define a target-dependent substitution of the tag {\IDF_TARGET_SUFFIX} in the current RST file. For example:
+This will define a target-dependent substitution of the tag {\IDF_TARGET_SUFFIX} in the current RST file. For example::
{\IDF_TARGET_TX_PIN:default="IO3", esp32="IO4", esp32s2="IO5"}
|
docs - gpfdist uncompreses data on the fly. | errors.</note>
<p>For readable external tables, if load files are compressed using <codeph>gzip</codeph> or
<codeph>bzip2</codeph> (have a <codeph>.gz</codeph> or <codeph>.bz2</codeph> file
- extension), <codeph>gpfdist</codeph> uncompresses the files before loading. For writable
- external tables, <codeph>gpfdist</codeph> compresses the data using <codeph>gzip</codeph> if
- the target file has a <codeph>.gz</codeph> extension.</p>
+ extension), <codeph>gpfdist</codeph> uncompresses the data while loading the data (on the
+ fly). For writable external tables, <codeph>gpfdist</codeph> compresses the data using
+ <codeph>gzip</codeph> if the target file has a <codeph>.gz</codeph> extension.</p>
<note type="note">Compression is not supported for readable and writeable external tables when
the <codeph>gpfdist</codeph> utility runs on Windows platforms.</note>
<p>When reading or writing data with the <codeph>gpfdist</codeph> or <codeph>gpfdists</codeph>
|
static buffer for data_flash | @@ -402,7 +402,7 @@ void data_flash_read_backbox(const uint32_t file_index, const uint32_t offset, u
}
cbor_result_t data_flash_write_backbox(const blackbox_t *b) {
- uint8_t buffer[PAGE_SIZE];
+ static uint8_t buffer[PAGE_SIZE];
cbor_value_t enc;
cbor_encoder_init(&enc, buffer, PAGE_SIZE);
|
apps/blemesh_shell: Set default syscfg values for testing | @@ -31,9 +31,25 @@ syscfg.vals:
# Newtmgr is not supported in this app, so disable newtmgr-over-shell.
SHELL_NEWTMGR: 0
+ MSYS_1_BLOCK_COUNT: 80
+
BLE_MESH: 1
BLE_MESH_SHELL: 1
- MSYS_1_BLOCK_COUNT: 48
+ BLE_MESH_PROV: 1
+ BLE_MESH_RELAY: 1
+ BLE_MESH_PB_ADV: 1
+ BLE_MESH_PB_GATT: 1
+ BLE_MESH_LOW_POWER: 1
+ BLE_MESH_LPN_AUTO: 0
+ BLE_MESH_GATT_PROXY: 1
+ BLE_MESH_LABEL_COUNT: 2
+ BLE_MESH_SUBNET_COUNT: 2
+ BLE_MESH_MODEL_GROUP_COUNT: 2
+ BLE_MESH_APP_KEY_COUNT: 4
+ BLE_MESH_IV_UPDATE_TEST: 1
+ BLE_MESH_TESTING: 1
+ BLE_MESH_FRIEND: 1
+ BLE_MESH_CFG_CLI: 1
BLE_MESH_DEBUG: 1
BLE_MESH_DEBUG_NET: 1
|
[build-srt] Install newer meson | @@ -43,12 +43,9 @@ dependencies() {
if [[ ! -f ./bin/get-pip.py ]]; then
curl https://bootstrap.pypa.io/get-pip.py -o bin/get-pip.py
- fi
python3 ./bin/get-pip.py
-
- if [[ $(pip3 show meson >/dev/null; echo $?) == 1 || $(pip3 show mako >/dev/null; echo $?) == 1 ]]; then
- pip3 install meson mako
fi
+ pip3 install 'meson>=0.54' mako
if [[ ! -f /usr/include/NVCtrl/NVCtrl.h ]]; then
curl -LO http://mirrors.kernel.org/ubuntu/pool/main/n/nvidia-settings/libxnvctrl0_440.64-0ubuntu1_amd64.deb
|
Changed eqtide.c:VerifyLostEngEqtide to make the same assignments regardless of thet tidal model. This may have fixed a memory leak in which some memory was not allocated, leading to very spurious behavior (e.g. evolve->dTime would be overwritten). | @@ -836,6 +836,7 @@ void VerifyRotationEqtideWarning(char cName1[],char cName2[],char cFile[],int iL
void VerifyLostEngEqtide(BODY *body,UPDATE *update, CONTROL *control,OPTIONS *options,int iBody) {
+/* XXX I think this old way doesn't depend on tidal model
if (control->Evolve.iEqtideModel == CPL) {
update[iBody].iaType[update[iBody].iLostEng][update[iBody].iLostEngEqtide] = 5;
update[iBody].iNumBodies[update[iBody].iLostEng][update[iBody].iLostEngEqtide] = 1;
@@ -859,6 +860,15 @@ void VerifyLostEngEqtide(BODY *body,UPDATE *update, CONTROL *control,OPTIONS *op
fprintf(stderr,"ERROR: Must choose CPL or CTL tidal model!\n");
exit(1);
}
+*/
+
+ update[iBody].iaType[update[iBody].iLostEng][update[iBody].iLostEngEqtide] = 5;
+ update[iBody].iNumBodies[update[iBody].iLostEng][update[iBody].iLostEngEqtide] = 1;
+ update[iBody].iaBody[update[iBody].iLostEng][update[iBody].iLostEngEqtide] = malloc(update[iBody].iNumBodies[update[iBody].iLostEng][update[iBody].iLostEngEqtide]*sizeof(int));
+ update[iBody].iaBody[update[iBody].iLostEng][update[iBody].iLostEngEqtide][0] = iBody;
+
+ update[iBody].pdLostEngEqtide = &update[iBody].daDerivProc[update[iBody].iLostEng][update[iBody].iLostEngEqtide];
+
}
void VerifyRotationEqtide(BODY *body,CONTROL *control, UPDATE *update, OPTIONS *options,char cFile[],int iBody) {
@@ -3011,9 +3021,11 @@ void PropsAuxOrbiterGeneral(BODY *body,int iBody) {
body[iBody].dEcc = sqrt(body[iBody].dEccSq);
// LongP is needed for Hecc and Kecc calculations
body[iBody].dLongP = atan2(body[iBody].dHecc,body[iBody].dKecc);
+ /*
printf("%e\n",body[iBody].dHecc);
printf("%e\n",body[iBody].dKecc);
fflush(stdout);
+ */
}
|
misc: bond_create is broken for custom dump print
Regression from
The aformentioned patch changed lb and mode to u32. But it does not make the
same change to custom_dump.c
Type: fix | @@ -663,9 +663,9 @@ static void *vl_api_bond_create_t_print
s = format (s, "mac-address %U ",
format_ethernet_address, mp->mac_address);
if (mp->mode)
- s = format (s, "mode %U ", format_bond_mode, mp->mode);
+ s = format (s, "mode %U ", format_bond_mode, ntohl (mp->mode));
if (mp->lb)
- s = format (s, "lb %U ", format_bond_load_balance, mp->lb);
+ s = format (s, "lb %U ", format_bond_load_balance, ntohl (mp->lb));
if (mp->numa_only)
s = format (s, "numa-only is set in lacp mode");
if (mp->id != ~0)
|
dev-tools/scipy: stay in 1.3 tree and bump to v1.3.3 | @@ -25,7 +25,7 @@ Requires: openblas-%{compiler_family}%{PROJ_DELIM}
%define pname scipy
Name: %{python_prefix}-%{pname}-%{compiler_family}-%{mpi_family}%{PROJ_DELIM}
-Version: 1.4.1
+Version: 1.3.3
Release: 1%{?dist}
Summary: Scientific Tools for Python
License: BSD-3-Clause
|
Swap reading y and z values | @@ -96,8 +96,8 @@ static int hmc5883_read(lua_State* L) {
platform_i2c_send_stop(hmc5883_i2c_id);
x = (int16_t) ((data[0] << 8) | data[1]);
- y = (int16_t) ((data[2] << 8) | data[3]);
- z = (int16_t) ((data[4] << 8) | data[5]);
+ z = (int16_t) ((data[2] << 8) | data[3]);
+ y = (int16_t) ((data[4] << 8) | data[5]);
lua_pushinteger(L, x);
lua_pushinteger(L, y);
|
Corrected N250SP chip | @@ -51,7 +51,7 @@ while [ -z "$SETUP_DONE" ]; do
SNAP_CFG="CONFIG_S121B=y\n""CONFIG_FPGACHIP=xcku115-flva1517-2-e\n"
;;
"N250SP" )
- SNAP_CFG="CONFIG_N250SP=y\n""CONFIG_FPGACHIP=xcku15p-ffva1156-2-e\n"
+ SNAP_CFG="CONFIG_N250SP=y\n""CONFIG_FPGACHIP=xcku15p-ffva1156-2-i\n"
;;
* )
echo "($0) ERROR: FPGACARD is not set correctly. Not generating config files."
|
nrf52820_microbit: HID LED initially off | @@ -203,6 +203,11 @@ static inline uint8_t get_led_gamma(uint8_t brightness) {
// Called in main_task() to init before USB and files are configured
static void prerun_board_config(void)
{
+ // HID_LED_DEF is on so the resting state of the orange LED after flashing is on
+ // but turn it off here so it's initially off, then stays off when on battery,
+ // and comes on only when USB enumerates
+ gpio_set_hid_led(GPIO_LED_OFF);
+
mb_version_t board_version = read_brd_rev_id();
set_board_id(board_version);
|
Set GDL90 traffic alert bit always when bearing/distance is not available. | @@ -259,6 +259,10 @@ func registerTrafficUpdate(ti TrafficInfo) {
func isTrafficAlertable(ti TrafficInfo) bool {
// Set alert bit if possible and traffic is within some threshold
// TODO: Could be more intelligent, taking into account headings etc.
+ if !ti.BearingDist_valid {
+ // If not able to calculate the distance to the target, let the alert bit be set always.
+ return true
+ }
if ti.BearingDist_valid &&
ti.Distance < 3704 { // 3704 meters, 2 nm.
return true
|
Fix test-callbacks when using libuv < 1.8.0 | @@ -38,18 +38,22 @@ return require('lib/tap')(function (test)
end)
test("luv_req_t: function", function (print, p, expect, uv)
- local fn = function(err, path)
- p(err, path)
+ local fn = function(err, stat)
+ assert(not err)
+ assert(stat)
end
- assert(uv.fs_realpath('.', expect(fn)))
+ assert(uv.fs_stat('.', expect(fn)))
end)
test("luv_req_t: callable table", function (print, p, expect, uv)
- local fn = function(self, err, path)
- p(self, err, path)
+ local callable
+ local fn = function(self, err, stat)
+ assert(self == callable)
+ assert(not err, err)
+ assert(stat)
end
- local callable = setmetatable({}, {__call=expect(fn)})
- assert(uv.fs_realpath('.', callable))
+ callable = setmetatable({}, {__call=expect(fn)})
+ assert(uv.fs_stat('.', callable))
end)
end)
\ No newline at end of file
|
TSCH: log parsing errors even when security is not enabled | @@ -763,6 +763,12 @@ PT_THREAD(tsch_rx_slot(struct pt *pt, struct rtimer *t))
packet_duration = TSCH_PACKET_DURATION(current_input->len);
+ if(!frame_valid) {
+ TSCH_LOG_ADD(tsch_log_message,
+ snprintf(log->message, sizeof(log->message),
+ "!failed to parse frame %u %u", header_len, current_input->len));
+ }
+
if(frame_valid) {
if(frame.fcf.frame_type != FRAME802154_DATAFRAME
&& frame.fcf.frame_type != FRAME802154_BEACONFRAME) {
@@ -786,11 +792,6 @@ PT_THREAD(tsch_rx_slot(struct pt *pt, struct rtimer *t))
"!failed to authenticate frame %u", current_input->len));
frame_valid = 0;
}
- } else {
- TSCH_LOG_ADD(tsch_log_message,
- snprintf(log->message, sizeof(log->message),
- "!failed to parse frame %u %u", header_len, current_input->len));
- frame_valid = 0;
}
#endif /* LLSEC802154_ENABLED */
|
Added success counter to sba too | @@ -35,6 +35,8 @@ typedef struct SBAData {
int last_lh;
int failures_to_reset;
int failures_to_reset_cntr;
+ int successes_to_reset;
+ int successes_to_reset_cntr;
} SBAData;
void metric_function(int j, int i, double *aj, double *xij, void *adata) {
@@ -292,9 +294,9 @@ static double run_sba_find_3d_structure(SBAData *d, survive_calibration_config o
SurvivePose soLocation = so->OutPose;
bool currentPositionValid = quatmagnitude(&soLocation.Rot[0]) != 0;
- if (d->failures_to_reset_cntr == 0 || currentPositionValid == 0) {
+ if (d->successes_to_reset_cntr == 0 || d->failures_to_reset_cntr == 0 || currentPositionValid == 0) {
SurviveContext *ctx = so->ctx;
- SV_INFO("Must rerun seed poser");
+ // SV_INFO("Must rerun seed poser");
const char *subposer = config_read_str(so->ctx->global_config_values, "SBASeedPoser", "PoserEPNP");
PoserCB driver = (PoserCB)GetDriver(subposer);
@@ -310,11 +312,12 @@ static double run_sba_find_3d_structure(SBAData *d, survive_calibration_config o
pdl->hdr = hdr;
if (locations.hasInfo == false) {
-
return -1;
} else if (locations.hasInfo) {
soLocation = locations.poses;
}
+
+ d->successes_to_reset_cntr = d->successes_to_reset;
} else {
SV_INFO("Not using a seed poser for SBA; results will likely be way off");
}
@@ -457,7 +460,9 @@ int PoserSBA(SurviveObject *so, PoserData *pd) {
so->PoserData = calloc(1, sizeof(SBAData));
SBAData *d = so->PoserData;
d->failures_to_reset_cntr = 0;
- d->failures_to_reset = 30;
+ d->failures_to_reset = 5;
+ d->successes_to_reset_cntr = 0;
+ d->successes_to_reset = 20;
}
SBAData *d = so->PoserData;
SurviveContext *ctx = so->ctx;
@@ -481,6 +486,9 @@ int PoserSBA(SurviveObject *so, PoserData *pd) {
if (error < 0) {
if (d->failures_to_reset_cntr > 0)
d->failures_to_reset_cntr--;
+ } else {
+ if (d->successes_to_reset_cntr > 0)
+ d->successes_to_reset_cntr--;
}
return 0;
|
docker: add tmpfs for alpine image build | @@ -44,6 +44,7 @@ RUN adduser -u ${USERID} -G wheel -D elektra
ARG PARALLEL=8
WORKDIR ${ELEKTRA_ROOT}
RUN --mount=type=tmpfs,target=/tmp \
+ --mount=type=tmpfs,target=/etc/kdb \
--mount=type=tmpfs,target=/root/.cache/elektra \
--mount=type=tmpfs,target=/root/.config \
mkdir build \
|
t1_trce: Fix remaining places where the 24 bit shift overflow happens
[extended tests] | @@ -670,7 +670,10 @@ static int ssl_print_random(BIO *bio, int indent,
if (*pmsglen < 32)
return 0;
- tm = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
+ tm = ((unsigned int)p[0] << 24)
+ | ((unsigned int)p[1] << 16)
+ | ((unsigned int)p[2] << 8)
+ | (unsigned int)p[3];
p += 4;
BIO_indent(bio, indent, 80);
BIO_puts(bio, "Random:\n");
@@ -875,8 +878,10 @@ static int ssl_print_extension(BIO *bio, int indent, int server,
break;
if (extlen != 4)
return 0;
- max_early_data = (ext[0] << 24) | (ext[1] << 16) | (ext[2] << 8)
- | ext[3];
+ max_early_data = ((unsigned int)ext[0] << 24)
+ | ((unsigned int)ext[1] << 16)
+ | ((unsigned int)ext[2] << 8)
+ | (unsigned int)ext[3];
BIO_indent(bio, indent + 2, 80);
BIO_printf(bio, "max_early_data=%u\n", max_early_data);
break;
@@ -1379,7 +1384,10 @@ static int ssl_print_ticket(BIO *bio, int indent, const SSL *ssl,
}
if (msglen < 4)
return 0;
- tick_life = (msg[0] << 24) | (msg[1] << 16) | (msg[2] << 8) | msg[3];
+ tick_life = ((unsigned int)msg[0] << 24)
+ | ((unsigned int)msg[1] << 16)
+ | ((unsigned int)msg[2] << 8)
+ | (unsigned int)msg[3];
msglen -= 4;
msg += 4;
BIO_indent(bio, indent + 2, 80);
@@ -1390,7 +1398,10 @@ static int ssl_print_ticket(BIO *bio, int indent, const SSL *ssl,
if (msglen < 4)
return 0;
ticket_age_add =
- (msg[0] << 24) | (msg[1] << 16) | (msg[2] << 8) | msg[3];
+ ((unsigned int)msg[0] << 24)
+ | ((unsigned int)msg[1] << 16)
+ | ((unsigned int)msg[2] << 8)
+ | (unsigned int)msg[3];
msglen -= 4;
msg += 4;
BIO_indent(bio, indent + 2, 80);
|
Modernize cups-poll.m4. | @@ -8937,22 +8937,31 @@ fi
ac_fn_c_check_func "$LINENO" "poll" "ac_cv_func_poll"
if test "x$ac_cv_func_poll" = xyes
then :
+
+
printf "%s\n" "#define HAVE_POLL 1" >>confdefs.h
+
fi
ac_fn_c_check_func "$LINENO" "epoll_create" "ac_cv_func_epoll_create"
if test "x$ac_cv_func_epoll_create" = xyes
then :
+
+
printf "%s\n" "#define HAVE_EPOLL 1" >>confdefs.h
+
fi
ac_fn_c_check_func "$LINENO" "kqueue" "ac_cv_func_kqueue"
if test "x$ac_cv_func_kqueue" = xyes
then :
+
+
printf "%s\n" "#define HAVE_KQUEUE 1" >>confdefs.h
+
fi
|
wuffs gen -version=0.3.0 | @@ -85,15 +85,15 @@ extern "C" {
// each major.minor branch, the commit count should increase monotonically.
//
// WUFFS_VERSION was overridden by "wuffs gen -version" based on revision
-// c27250f52fcfb936d4f0410d1199fe24c04b9713 committed on 2022-10-28.
+// c698d81b4d6868f3c5e26d2d7c3b39fe97209ec0 committed on 2023-01-26.
#define WUFFS_VERSION 0x000030000
#define WUFFS_VERSION_MAJOR 0
#define WUFFS_VERSION_MINOR 3
#define WUFFS_VERSION_PATCH 0
-#define WUFFS_VERSION_PRE_RELEASE_LABEL "rc.3"
-#define WUFFS_VERSION_BUILD_METADATA_COMMIT_COUNT 3369
-#define WUFFS_VERSION_BUILD_METADATA_COMMIT_DATE 20221028
-#define WUFFS_VERSION_STRING "0.3.0-rc.3+3369.20221028"
+#define WUFFS_VERSION_PRE_RELEASE_LABEL ""
+#define WUFFS_VERSION_BUILD_METADATA_COMMIT_COUNT 3374
+#define WUFFS_VERSION_BUILD_METADATA_COMMIT_DATE 20230126
+#define WUFFS_VERSION_STRING "0.3.0+3374.20230126"
// ---------------- Configuration
|
Mask exceptions in Lua.run
Lua.run is the default entry point and should be easy to use.
Asynchronous exceptions pose a serious footgun in HsLua. Masking
exceptions avoids some issues which lead to errors like `too many
hs_exit()s`. | @@ -36,11 +36,12 @@ import qualified Foreign.Lua.Core as Lua
import qualified Foreign.Lua.Types as Lua
import qualified Foreign.Lua.Utf8 as Utf8
--- | Run lua computation using the default HsLua state as starting point. Raised
--- exceptions are passed through; error handling is the responsibility of the
--- caller.
+-- | Run Lua computation using the default HsLua state as starting point.
+-- Exceptions are masked, thus avoiding some issues when using multiple threads.
+-- All exceptions are passed through; error handling is the responsibility of
+-- the caller.
run :: Lua a -> IO a
-run = (Lua.newstate `bracket` Lua.close) . flip Lua.runWith
+run = (Lua.newstate `bracket` Lua.close) . flip Lua.runWith . Catch.mask_
-- | Run the given Lua computation; exceptions raised in haskell code are
-- caught, but other exceptions (user exceptions raised in haskell, unchecked
|
remove android:targetSdkVersion from AndroidManifest that is conflict with project.properties API target.
I don't know why well..
Here the discussion: | android:installLocation="preferExternal">
<!-- Android 4.4.2 -->
- <uses-sdk android:minSdkVersion="19" android:targetSdkVersion="19" />
+ <uses-sdk android:minSdkVersion="19" />
<!-- OpenGL ES 2.0 -->
<uses-feature android:glEsVersion="0x00020000" />
|
QA: Check driver installation status
When you have a large project, uart_driver_install() may fail. For QA we should always check the status of the driver first. | @@ -22,6 +22,11 @@ static const char* TAG = "uart_select_example";
static void uart_select_task(void *arg)
{
+ if (uart_driver_install(UART_NUM_0, 2*1024, 0, 0, NULL, 0) != ESP_OK) {
+ ESP_LOGE(TAG, "Driver installation failed");
+ vTaskDelete(NULL);
+ }
+
uart_config_t uart_config = {
.baud_rate = 115200,
.data_bits = UART_DATA_8_BITS,
@@ -30,7 +35,7 @@ static void uart_select_task(void *arg)
.flow_ctrl = UART_HW_FLOWCTRL_DISABLE,
.source_clk = UART_SCLK_DEFAULT,
};
- uart_driver_install(UART_NUM_0, 2*1024, 0, 0, NULL, 0);
+
uart_param_config(UART_NUM_0, &uart_config);
while (1) {
|
Add logic data type to perf_event_count port
Output port perf_event_count has no data type defined and will default to
'wire logic', which can't be driven using procedural assignments. | @@ -29,7 +29,7 @@ module performance_counters
input reset,
input [NUM_EVENTS - 1:0] perf_events,
input [NUM_COUNTERS - 1:0][EVENT_IDX_WIDTH - 1:0] perf_event_select,
- output [NUM_COUNTERS - 1:0][63:0] perf_event_count);
+ output logic[NUM_COUNTERS - 1:0][63:0] perf_event_count);
always_ff @(posedge clk, posedge reset)
begin : update
|
Add missing members to empty_recursive. | @@ -133,7 +133,10 @@ const s_damage_recursive empty_recursive = { .force = 0,
.index = 0,
.mode = 0,
.rate = 0,
- .time = 0};
+ .tick = 0,
+ .time = 0,
+ .owner = NULL,
+ .next = NULL};
// unknockdown attack
const s_collision_attack emptyattack =
|
mm_heap/mm_addfreechunk : sorting the free nodelist
in ascending order of physical address when the size is the same.
This can prevent higher address memory from being allocated
when lower address memory is available in the free nodelist.
Therefore, memory fragmentation can be mitigated. | @@ -92,6 +92,12 @@ void mm_addfreechunk(FAR struct mm_heap_s *heap, FAR struct mm_freenode_s *node)
for (prev = &heap->mm_nodelist[ndx], next = heap->mm_nodelist[ndx].flink; next && next->size && next->size < node->size; prev = next, next = next->flink) ;
+ /* Mitigate memory fragmentation
+ * by sorting the list in ascending order of physical address
+ * when the size is the same.
+ */
+ for ( ; next && next->size == node->size && next < node; prev = next, next = next->flink) ;
+
/* Does it go in mid next or at the end? */
prev->flink = node;
|
cond: Added missing resource_puts | @@ -93,6 +93,9 @@ int proc_condSignal(process_t *process, unsigned int c)
}
proc_threadWakeupYield(&rc->waitq);
+
+ resource_put(process, rc);
+
return err;
}
@@ -111,6 +114,9 @@ int proc_condBroadcast(process_t *process, unsigned int c)
}
proc_threadBroadcastYield(&rc->waitq);
+
+ resource_put(process, rc);
+
return err;
}
|
fix travis script to avoid idempotency problem | @@ -11,15 +11,16 @@ before_install:
before_script:
- "export CRUBY_VERSION=$(ruby -e'puts `grep cruby_version .mrubycconfig`.sub(\"cruby_version: \", \"\")')"
- export MRUBY_VERSION=$(grep mruby .ruby-version)
- - if [ ! -e src/hal ]; then ln -s hal_posix src/hal; fi
- - if [ ! -d ~/.rbenv; ]; then git clone https://github.com/rbenv/rbenv.git ~/.rbenv; fi
- - if [ ! -d ~/.rbenv/plugins/ruby-build; ]; then git clone https://github.com/rbenv/ruby-build.git ~/.rbenv/plugins/ruby-build; fi
+ - if [ ! -e ~/.rbenv/bin/rbenv ]; then git clone https://github.com/rbenv/rbenv.git ~/.rbenv; fi
+ - if [ ! -e ~/.rbenv/plugins/ruby-build/bin/ruby-build ]; then git clone https://github.com/rbenv/ruby-build.git ~/.rbenv/plugins/ruby-build; fi
+ - cd ~/.rbenv/plugins/ruby-build && git pull && cd -
- echo 'export PATH="$HOME/.rbenv/shims:$HOME/.rbenv/bin:$PATH"' >> ~/.bash_profile
- source ~/.bash_profile
+ - echo $PATH
- rbenv init -
- - rbenv install --skip-existing $CRUBY_VERSION
- rbenv install --skip-existing $MRUBY_VERSION
- - RBENV_VERSION=$CRUBY_VERSION gem install bundler:2.0.1
+ - rbenv install --skip-existing $CRUBY_VERSION
+ - RBENV_VERSION=$CRUBY_VERSION gem install bundler
- RBENV_VERSION=$CRUBY_VERSION bundle install
script:
|
Explain postmaster's treatment of SIGURG.
Add a few words of comment to explain why SIGURG doesn't follow the
dummy_handler pattern used for SIGUSR2, since that might otherwise
appear to be a bug.
Discussion: | @@ -660,6 +660,11 @@ PostmasterMain(int argc, char *argv[])
pqsignal_pm(SIGCHLD, reaper); /* handle child termination */
#ifdef SIGURG
+ /*
+ * Ignore SIGURG for now. Child processes may change this (see
+ * InitializeLatchSupport), but they will not receive any such signals
+ * until they wait on a latch.
+ */
pqsignal_pm(SIGURG, SIG_IGN); /* ignored */
#endif
|
doc: add rdmsr/wrmsr to the "ACRN Shell Commands" documentation
Add 'rdmsr' and 'wrmsr' to the "ACRN Shell Commands" documentation. Both are
already described via the interactive help command but not (yet) included in our
on-line documentation. | @@ -44,3 +44,9 @@ The ACRN hypervisor shell supports the following commands:
- Display the CPUID leaf [subleaf], in hexadecimal
* - reboot
- Trigger a system reboot (immediately)
+ * - rdmsr [-p<pcpu_id>] <msr_index>
+ - Read the Model-Specific Register (MSR) at index ``msr_index`` (in
+ hexadecimal) for CPU ID ``pcpu_id``
+ * - wrmsr [-p<pcpu_id>] <msr_index> <value>
+ - Write ``value`` (in hexadecimal) to the Model-Specific Register (MSR) at
+ index ``msr_index`` (in hexadecimal) for CPU ID ``pcpu_id``
|
acrn-config: correct epc_section base/size value
Current epc_section base/size vaule missed vm id so the configure item is not
working, the patch will fix this issue.
Acked-by: Victor Sun | @@ -150,8 +150,8 @@ def is_need_epc(epc_section, i, config):
return
else:
print("\t\t.epc= {", file=config)
- print('\t\t\t.base = "{0}",'.format(epc_section.base), file=config)
- print('\t\t\t.size = {0},'.format(epc_section.size), file=config)
+ print('\t\t\t.base = {0},'.format(epc_section.base[i]), file=config)
+ print('\t\t\t.size = {0},'.format(epc_section.size[i]), file=config)
print("\t\t},", file=config)
|
Consider review comments | @@ -9858,7 +9858,7 @@ Sensor *DeRestPluginPrivate::getSensorNodeForAddress(const deCONZ::Address &addr
{
for (Sensor &sensor: sensors)
{
- if (sensor.deletedState() != Sensor::StateNormal || !sensor.node()) { continue; }
+ if (sensor.deletedState() != Sensor::StateNormal) { continue; }
if (!isSameAddress(sensor.address(), addr)) { continue; }
return &sensor;
@@ -9874,8 +9874,8 @@ Sensor *DeRestPluginPrivate::getSensorNodeForAddressAndEndpoint(const deCONZ::Ad
{
if (sensor.deletedState() != Sensor::StateNormal || !sensor.node()) { continue; }
if (sensor.fingerPrint().endpoint != ep) { continue; }
- if (!isSameAddress(sensor.address(), addr)) { continue; }
if (sensor.type() != type) { continue; }
+ if (!isSameAddress(sensor.address(), addr)) { continue; }
return &sensor;
}
|
Fix timeline column drawing | #include <fastlock.h>
#include <guisupp.h>
+#include <math.h>
#include <commoncontrols.h>
#include <shellapi.h>
#include <uxtheme.h>
@@ -2478,8 +2479,18 @@ VOID PhCustomDrawTreeTimeLine(
createTime.QuadPart = systemTime.QuadPart - CreateTime->QuadPart;
}
+ // Note: Time is 8 bytes, Float is 4 bytes. Use DOUBLE type at some stage. (dmex)
percent = (FLOAT)createTime.QuadPart / (FLOAT)startTime.QuadPart * 100.f;
+ if (!(Flags & PH_DRAW_TIMELINE_OVERFLOW))
+ {
+ // Prevent overflow from changing the system time to an earlier date. (dmex)
+ if (fabsf(percent) > 100.f)
+ percent = 100.f;
+ if (fabsf(percent) < 0.0005f)
+ percent = 0.f;
+ }
+
if (Flags & PH_DRAW_TIMELINE_DARKTHEME)
FillRect(Hdc, &rect, PhMenuBackgroundBrush);
else
@@ -2511,9 +2522,15 @@ VOID PhCustomDrawTreeTimeLine(
previousBrush = SelectBrush(Hdc, GetStockBrush(DC_BRUSH));
}
+ if (Flags & PH_DRAW_TIMELINE_OVERFLOW)
+ {
// Prevent overflow from changing the system time to an earlier date. (dmex)
- if (percent > 100.f) percent = 100.f;
- // TODO: This still loses a small fraction of precision compared to PE here causing a 1px difference. (dmex)
+ if (fabsf(percent) > 100.f)
+ percent = 100.f;
+ if (fabsf(percent) < 0.0005f)
+ percent = 0.f;
+ }
+
//rect.right = ((LONG)(rect.left + ((rect.right - rect.left) * (LONG)percent) / 100));
//rect.left = ((LONG)(rect.right + ((rect.left - rect.right) * (LONG)percent) / 100));
rect.left = (LONG)(rect.right + ((rect.left - rect.right) * percent / 100));
|
Fix AppVeyor collecting artifacts only when there is a DFU package | If($env:NEEDS_DFU -eq 'True')
{
Compress-7Zip -Path . -Filter "*.dfu" -ArchiveFileName $env:BOARD_NAME-$env:GitVersion_SemVer.zip -DisableRecursion -Append
- Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
}
+ Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
#upload package only if this not a PR
if(!$env:APPVEYOR_PULL_REQUEST_NUMBER)
If($env:NEEDS_DFU -eq 'True')
{
Compress-7Zip -Path . -Filter "*.dfu" -ArchiveFileName $env:BOARD_NAME-$env:GitVersion_SemVer.zip -DisableRecursion -Append
- Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
}
+ Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
#upload package only if this not a PR
if(!$env:APPVEYOR_PULL_REQUEST_NUMBER)
If($env:NEEDS_DFU -eq 'True')
{
Compress-7Zip -Path . -Filter "*.dfu" -ArchiveFileName $env:BOARD_NAME-$env:GitVersion_SemVer.zip -DisableRecursion -Append
- Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
}
+ Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
# upload package only if this not a PR
# and this is 'develop' (not develop-something)
If($env:NEEDS_DFU -eq 'True')
{
Compress-7Zip -Path . -Filter "*.dfu" -ArchiveFileName $env:BOARD_NAME-$env:GitVersion_SemVer.zip -DisableRecursion -Append
- Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
}
+ Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
# upload package only if this not a PR
# and this is 'develop' (not develop-something)
If($env:NEEDS_DFU -eq 'True')
{
Compress-7Zip -Path . -Filter "*.dfu" -ArchiveFileName $env:BOARD_NAME-$env:GitVersion_SemVer.zip -DisableRecursion -Append
- Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
}
+ Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
#upload package only if this not a PR
if(!$env:APPVEYOR_PULL_REQUEST_NUMBER)
If($env:NEEDS_DFU -eq 'True')
{
Compress-7Zip -Path . -Filter "*.dfu" -ArchiveFileName $env:BOARD_NAME-$env:GitVersion_SemVer.zip -DisableRecursion -Append
- Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
}
+ Push-AppveyorArtifact $env:BOARD_NAME-$env:GitVersion_SemVer.zip
#upload package only if this not a PR
if(!$env:APPVEYOR_PULL_REQUEST_NUMBER)
|
web-ui: remove debug output | @@ -24,7 +24,6 @@ export const validateRange = (rangeStr, num) => {
if (res) return res
const [ , first, second ] = range.match(RANGE_REGEX)
const [ min, max ] = getMinMax(Number(first), Number(second))
- console.log('valida!!!', num, min, max)
if ((num >= min) && (num <= max)) {
return true
}
|
ixfr-out, remove debug statements. | @@ -933,8 +933,6 @@ static void ixfr_create_finishup(struct ixfr_create* ixfrcr,
PACKAGE_VERSION, wiredname2str(ixfrcr->zone_name),
(unsigned)ixfrcr->old_serial, (unsigned)ixfrcr->new_serial,
(unsigned)ixfr_data_size(store->data), nowstr);
- log_msg(LOG_ERR, "done %s", log_buf);
- exit(1);
if(append_mem) {
ixfr_store_finish(store, nsd, log_buf, 0, 0, 0, 0);
} else {
|
Added support for opening from clicking gbsproj on Windows (need to manually associate file extension to application right now) | @@ -197,7 +197,10 @@ app.on("ready", async () => {
await installExtension(REDUX_DEVTOOLS);
}
- if (splashWindow === null && mainWindow === null) {
+ const lastArg = process.argv[process.argv.length - 1];
+ if (process.argv.length >= 2 && lastArg.indexOf("-") !== 0) {
+ openProject(lastArg);
+ } else if (splashWindow === null && mainWindow === null) {
createSplash();
}
});
|
Output valid HTML on built in webpage | #define MAX_RESP 0x800
char resp[MAX_RESP];
-char staticpage[] = "HTTP/1.0 200 OK\nContent-Type: text/html\n\n"
-"<pre>This is your comma.ai panda<br/><br/>"
-"It's open source. Find the code <a href=\"https://github.com/commaai/panda\">here</a><br/>"
-"Designed to work with our dashcam, <a href=\"http://chffr.comma.ai\">chffr</a><br/>";
+char pageheader[] = "HTTP/1.0 200 OK\nContent-Type: text/html\n\n"
+"<!DOCTYPE html>\n"
+"<html>\n"
+"<head>\n"
+"<title>Panda</title>\n"
+"</head>\n"
+"<body>\n"
+"<pre>This is your comma.ai panda<br/><br/>\n"
+"It's open source. Find the code <a href=\"https://github.com/commaai/panda\">here</a><br/>\n"
+"Designed to work with our dashcam, <a href=\"http://chffr.comma.ai\">chffr</a><br/>\n";
+
+char pagefooter[] = "</pre>\n"
+"</body>\n"
+"</html>\n";
static struct espconn web_conn;
static esp_tcp web_proto;
@@ -175,7 +185,7 @@ static void ICACHE_FLASH_ATTR web_rx_cb(void *arg, char *data, uint16_t len) {
if (memcmp(data, "GET / ", 6) == 0) {
memset(resp, 0, MAX_RESP);
- strcpy(resp, staticpage);
+ strcpy(resp, pageheader);
ets_strcat(resp, "<br/>ssid: ");
ets_strcat(resp, ssid);
ets_strcat(resp, "<br/>");
@@ -198,6 +208,7 @@ static void ICACHE_FLASH_ATTR web_rx_cb(void *arg, char *data, uint16_t len) {
"<button onclick=\"var xhr = new XMLHttpRequest(); xhr.open('GET', 'client'); xhr.send()\" type='button'>Client</button>"
"<button onclick=\"var xhr = new XMLHttpRequest(); xhr.open('GET', 'cdp'); xhr.send()\" type='button'>CDP</button>"
"<button onclick=\"var xhr = new XMLHttpRequest(); xhr.open('GET', 'dcp'); xhr.send()\" type='button'>DCP</button>");
+ ets_strcat(resp, pagefooter);
espconn_send_string(&web_conn, resp);
espconn_disconnect(conn);
|
Added LHB prefix to base station ids | @@ -117,7 +117,7 @@ struct SurviveSimpleContext *survive_simple_init(int argc, char *const *argv) {
obj->actx = actx;
obj->has_update = ctx->bsd[i].PositionSet;
snprintf(obj->name, 32, "LH%" PRIdPTR, i);
- snprintf(obj->data.lh.serial_number, 16, "%X", ctx->bsd[i].BaseStationID);
+ snprintf(obj->data.lh.serial_number, 16, "LHB-%X", ctx->bsd[i].BaseStationID);
}
for (; i < object_ct; i++) {
struct SurviveSimpleObject *obj = &actx->objects[i];
|
Remove invalid page checksum test.
All zero pages should not have checksums. Not only is this test invalid but it will not work with the stock page checksum implementation in PostgreSQL, which checks for zero pages. Since we will be using that code verbatim soon this test needs to go. | @@ -27,10 +27,6 @@ testRun(void)
// *****************************************************************************************************************************
if (testBegin("pgPageChecksum()"))
{
- // Checksum for 0x00 fill, page 0x00
- memset(testPage(0), 0, PG_PAGE_SIZE_DEFAULT);
- TEST_RESULT_U16_HEX(pgPageChecksum(testPage(0), 0), 0xC6AA, "check for 0x00 filled page, block 0");
-
// Checksum for 0xFF fill, page 0x00
memset(testPage(0), 0xFF, PG_PAGE_SIZE_DEFAULT);
TEST_RESULT_U16_HEX(pgPageChecksum(testPage(0), 0), 0x0E1C, "check for 0xFF filled page, block 0");
|
Add Aduro Light commands to button_maps.schema.json
Needed for PR | "ADUROLIGHT"
]
},
+ "adurolight-commands": {
+ "enum": [ "ATTRIBUTE_REPORT", "COMMAND_0", "COMMAND_20", ]
+ },
"basic-commands": {
"enum": [ "0x02" ]
},
},
{
"anyOf": [
+ { "$ref": "#/definitions/adurolight-commands" },
{ "$ref": "#/definitions/basic-commands" },
{ "$ref": "#/definitions/onoff-commands" },
{ "$ref": "#/definitions/level-commands" },
|
OcBootManagementLib: Report unsupported etc. custom entries | @@ -1679,7 +1679,14 @@ AddFileSystemEntryForCustom (
FALSE
);
- if (!EFI_ERROR (Status)) {
+ if (EFI_ERROR (Status)) {
+ DEBUG ((
+ DEBUG_WARN,
+ "OCB: Failed to add custom entry %a - %r",
+ BootContext->PickerContext->CustomEntries[Index].Name,
+ Status
+ ));
+ } else {
ReturnStatus = EFI_SUCCESS;
}
}
|
arch/xtensa/esp32: Propagate RTC IRQ status register to lower levels | @@ -63,6 +63,7 @@ enum rtcio_lh_out_mode_e
#ifdef CONFIG_ESP32_RTCIO_IRQ
static int g_rtcio_cpuint;
+static uint32_t last_status;
#endif
static const uint32_t rtc_gpio_to_addr[] =
@@ -114,12 +115,11 @@ static inline bool is_valid_rtc_gpio(uint32_t rtcio_num)
* Name: rtcio_dispatch
*
* Description:
- * Second level dispatch for RTC interrupt handling.
+ * Second level dispatch for the RTC interrupt.
*
* Input Parameters:
* irq - The IRQ number;
- * status - The interrupt status register;
- * context - The interrupt context.
+ * reg_status - Pointer to a copy of the interrupt status register.
*
* Returned Value:
* None.
@@ -127,8 +127,9 @@ static inline bool is_valid_rtc_gpio(uint32_t rtcio_num)
****************************************************************************/
#ifdef CONFIG_ESP32_RTCIO_IRQ
-static void rtcio_dispatch(int irq, uint32_t status, uint32_t *context)
+static void rtcio_dispatch(int irq, uint32_t *reg_status)
{
+ uint32_t status = *reg_status;
uint32_t mask;
int i;
@@ -141,9 +142,11 @@ static void rtcio_dispatch(int irq, uint32_t status, uint32_t *context)
mask = (UINT32_C(1) << i);
if ((status & mask) != 0)
{
- /* Yes... perform the second level dispatch */
+ /* Yes... perform the second level dispatch. The IRQ context will
+ * contain the contents of the status register.
+ */
- irq_dispatch(irq + i, context);
+ irq_dispatch(irq + i, (void *)reg_status);
/* Clear the bit in the status so that we might execute this loop
* sooner.
@@ -174,16 +177,14 @@ static void rtcio_dispatch(int irq, uint32_t status, uint32_t *context)
#ifdef CONFIG_ESP32_RTCIO_IRQ
static int rtcio_interrupt(int irq, void *context, void *arg)
{
- uint32_t status;
-
/* Read and clear the lower RTC interrupt status */
- status = getreg32(RTC_CNTL_INT_ST_REG);
- putreg32(status, RTC_CNTL_INT_CLR_REG);
+ last_status = getreg32(RTC_CNTL_INT_ST_REG);
+ putreg32(last_status, RTC_CNTL_INT_CLR_REG);
/* Dispatch pending interrupts in the RTC status register */
- rtcio_dispatch(ESP32_FIRST_RTCIOIRQ_PERIPH, status, (uint32_t *)context);
+ rtcio_dispatch(ESP32_FIRST_RTCIOIRQ_PERIPH, &last_status);
return OK;
}
|
Fix SERVERPORT variable (Issue | @@ -2565,6 +2565,10 @@ load_system(const char *conf) /* I - Configuration file */
*ptr++ = '\0';
port = atoi(ptr);
}
+ else if (DefaultPort)
+ {
+ port = DefaultPort;
+ }
else
{
#ifdef _WIN32
@@ -2574,6 +2578,9 @@ load_system(const char *conf) /* I - Configuration file */
#endif /* _WIN32 */
}
+ if (!DefaultPort)
+ DefaultPort = port;
+
if (!serverCreateListeners(host, port))
{
status = 0;
|
Checking __STDC_VERSION__ rather than __STRICT_ANSI__
`__STRICT_ANSI__` is a gnuish flag macro that indicates if `-ansi`
was given on the command line. To check the C version, it's better
to check the macro `__STDC_VERSION__`. | @@ -56,18 +56,31 @@ static void log_with_prefix(const char *prog, const char *fmt, va_list ap)
BIO_free(pre);
}
+/*
+ * Unfortunately, C before C99 does not define va_copy, so we must
+ * check if it can be assumed to be present. We do that with an internal
+ * antifeature macro.
+ * C versions since C94 define __STDC_VERSION__, so it's enough to
+ * check its existence and value.
+ */
+#undef OSSL_NO_C99
+#if !defined(__STDC_VERSION__) || __STDC_VERSION__ + 0 < 199900L
+# define OSSL_NO_C99
+#endif
+
void trace_log_message(int category,
const char *prog, int level, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
-#ifdef __STRICT_ANSI__ /* unfortuantely, ANSI does not define va_copy */
+
+#ifdef OSSL_NO_C99
if (verbosity >= level)
category = -1; /* disabling trace output in addition to logging */
#endif
if (category >= 0 && OSSL_trace_enabled(category)) {
BIO *out = OSSL_trace_begin(category);
-#ifndef __STRICT_ANSI__
+#ifndef OSSL_NO_C99
va_list ap_copy;
va_copy(ap_copy, ap);
|
Docs: removed reference to distributed clause since it's not relevant for GP 6.x | master. Additionally, it is often the case that the master host has insufficient disk space
to save a backup of an entire distributed Greenplum database. </p>
<p>The <codeph>pg_restore</codeph> utility requires compressed dump files created by
- <codeph>pg_dump</codeph> or <codeph>pg_dumpall</codeph>. Before starting the restore, you
- should modify the <codeph>CREATE TABLE</codeph> statements in the dump files to include the
- Greenplum <codeph>DISTRIBUTED</codeph> clause. If you do not include the
- <codeph>DISTRIBUTED</codeph> clause, Greenplum Database assigns default values, which may
- not be optimal. For details, see <codeph>CREATE TABLE</codeph> in the <i>Greenplum Database
- Reference Guide</i>.</p>
- <p>To perform a non-parallel restore using parallel backup files, you can copy the backup
- files from each segment host to the master host, and then load them through the master.</p>
+ <codeph>pg_dump</codeph> or <codeph>pg_dumpall</codeph>. To perform a non-parallel restore
+ using parallel backup files, you can copy the backup files from each segment host to the
+ master host, and then load them through the master.</p>
<fig id="kk156418">
<title>Non-parallel Restore Using Parallel Backup Files</title>
<image href="../graphics/nonpar_restore.jpg" placement="break" width="390px"
|
[awm] compositor_queue_rect_to_redraw no-ops if we have too many update rects this frame | @@ -18,6 +18,15 @@ array_t* _g_screen_rects_to_update_this_cycle = NULL;
These may include portions of windows, the desktop background, etc.
*/
void compositor_queue_rect_to_redraw(Rect update_rect) {
+ if (update_rect.size.width == 0 || update_rect.size.height == 0) {
+ // TODO(PT): Investigate how this happens? Trigger by quickly resizing a window to flood events
+ //printf("Dropping update rect of zero height or width\n");
+ return;
+ }
+ if (_g_screen_rects_to_update_this_cycle->size + 1 >= _g_screen_rects_to_update_this_cycle->max_size) {
+ //printf("Dropping update rect because we've hit our max updates this cycle: (%d, %d), (%d, %d)\n", rect_min_x(update_rect), rect_min_y(update_rect), update_rect.size.width, update_rect.size.height);
+ return;
+ }
Rect* r = calloc(1, sizeof(Rect));
r->origin.x = update_rect.origin.x;
r->origin.y = update_rect.origin.y;
@@ -39,7 +48,7 @@ void compositor_queue_rect_difference_to_redraw(Rect bg, Rect fg) {
}
void compositor_init(void) {
- _g_screen_rects_to_update_this_cycle = array_create(128);
+ _g_screen_rects_to_update_this_cycle = array_create(256);
}
void compositor_render_frame(void) {
@@ -56,7 +65,7 @@ void compositor_render_frame(void) {
Rect* rp = array_lookup(_g_screen_rects_to_update_this_cycle, i);
Rect r = *rp;
- array_t* unobscured_region = array_create(128);
+ array_t* unobscured_region = array_create(256);
rect_add(unobscured_region, r);
// Handle the parts of the dirty region that are obscured by desktop views
|
vulkan: add PTHREAD_CHECK() to all pthread_*() calls | @@ -1242,8 +1242,8 @@ pocl_vulkan_init (unsigned j, cl_device_id dev, const char *parameters)
d->work_queue = NULL;
- pthread_create (&d->driver_pthread_id, NULL, pocl_vulkan_driver_pthread,
- dev);
+ PTHREAD_CHECK (pthread_create (&d->driver_pthread_id, NULL,
+ pocl_vulkan_driver_pthread, dev));
return CL_SUCCESS;
}
@@ -1827,8 +1827,7 @@ pocl_vulkan_init_queue (cl_device_id dev, cl_command_queue queue)
queue->data
= pocl_aligned_malloc (HOST_CPU_CACHELINE_SIZE, sizeof (pthread_cond_t));
pthread_cond_t *cond = (pthread_cond_t *)queue->data;
- int r = pthread_cond_init (cond, NULL);
- assert (r == 0);
+ PTHREAD_CHECK (pthread_cond_init (cond, NULL));
return CL_SUCCESS;
}
@@ -1836,8 +1835,7 @@ int
pocl_vulkan_free_queue (cl_device_id dev, cl_command_queue queue)
{
pthread_cond_t *cond = (pthread_cond_t *)queue->data;
- int r = pthread_cond_destroy (cond);
- assert (r == 0);
+ PTHREAD_CHECK (pthread_cond_destroy (cond));
POCL_MEM_FREE (queue->data);
return CL_SUCCESS;
}
@@ -1850,8 +1848,7 @@ pocl_vulkan_notify_cmdq_finished (cl_command_queue cq)
* user threads waiting on the same command queue
* in pthread_scheduler_wait_cq(). */
pthread_cond_t *cq_cond = (pthread_cond_t *)cq->data;
- int r = pthread_cond_broadcast (cq_cond);
- assert (r == 0);
+ PTHREAD_CHECK (pthread_cond_broadcast (cq_cond));
}
void
@@ -1884,8 +1881,7 @@ pocl_vulkan_join (cl_device_id device, cl_command_queue cq)
}
else
{
- int r = pthread_cond_wait (cq_cond, &cq->pocl_lock);
- assert (r == 0);
+ PTHREAD_CHECK (pthread_cond_wait (cq_cond, &cq->pocl_lock));
}
}
return;
@@ -2817,7 +2813,7 @@ RETRY:
if ((cmd == NULL) && (do_exit == 0))
{
- pthread_cond_wait (&d->wakeup_cond, &d->wq_lock_fast);
+ PTHREAD_CHECK (pthread_cond_wait (&d->wakeup_cond, &d->wq_lock_fast));
/* since cond_wait returns with locked mutex, might as well retry */
goto RETRY;
}
|
Partial revert of [db3c164c]: changes break armcc build | #endif
extern void (* const __isr_vector[])(void);
+
+/*
+* Note: When compiling on ARM Keil Toolchain only.
+* If the SystemCoreClock is left uninitialized, post Scatter load
+* the clock will default to system reset value(48MHz)
+*/
uint32_t SystemCoreClock = RO_FREQ/2;
void SystemCoreClockUpdate(void)
@@ -278,22 +284,6 @@ __weak void SystemInit(void)
/* Perform an initial trim of the internal ring oscillator */
CLKMAN_TrimRO();
-#if !defined (__CC_ARM) // Prevent Keil tools from calling these functions until post scatter load
- SystemCoreClockUpdate();
- Board_Init();
-#endif /* ! __CC_ARM */
-
-}
-
-#if defined ( __CC_ARM )
-/* Function called post memory initialization in the Keil Toolchain, which
- * we are using to call the system core clock upddate and board initialization
- * to prevent data corruption if they are called from SystemInit. */
-extern void $Super$$__main_after_scatterload(void);
-void $Sub$$__main_after_scatterload(void)
-{
SystemCoreClockUpdate();
Board_Init();
- $Super$$__main_after_scatterload();
}
-#endif /* __CC_ARM */
|
Update hold-tap.md
The bindings for the toggle-layer-on-tap/momentary-layer-on-hold example code were backwards, resulting in toggle-on-hold. This also made momentary unachievable. | @@ -294,7 +294,7 @@ This hold-tap example implements a [toggle-layer](layers.md/#toggle-layer) when
#binding-cells = <2>;
flavor = "hold-preferred";
tapping-term-ms = <200>;
- bindings = <&tog>, <&mo>;
+ bindings = <&mo>, <&tog>;
};
};
|
Update Telepo.cs
the generator doesn't like generics i guess | @@ -20,7 +20,7 @@ namespace FFXIVClientStructs.FFXIV.Client.Game.UI {
public partial bool Teleport(uint aetheryteID, byte subIndex);
[MemberFunction("E8 ?? ?? ?? ?? 48 8B 48 08 48 2B 08")]
- public partial Vector<TeleportInfo>* UpdateAetheryteList();
+ public partial void* UpdateAetheryteList();
}
[StructLayout(LayoutKind.Explicit, Size = 0x14)]
|
add edge to helper menu | @@ -25,7 +25,8 @@ be set to sockets with unix:///var/run/mysock, tcp://hostname:port, udp://hostna
scope run -- perl -e 'print "foo\n"'
scope run --payloads -- nc -lp 10001
scope run -- curl https://wttr.in/94105
-scope run -c tcp://127.0.0.1:10091 -- curl https://wttr.in/94105`,
+scope run -c tcp://127.0.0.1:10091 -- curl https://wttr.in/94105
+scope run -c edge -- top`,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
internal.InitConfig()
|
chat: copy shortcodes | @@ -148,6 +148,27 @@ export class SettingsScreen extends Component {
</div>
<div className="w-100 pl3 mt4 cf">
<h2 className="f8 pb2">Chat Settings</h2>
+ <div className="w-100 mt3">
+ <p className="f8 mt3 lh-copy">Share</p>
+ <p className="f9 gray2 mb4">Share a shortcode to join this chat</p>
+ <div className="relative w-100 flex"
+ style={{ maxWidth: "29rem" }}>
+ <input
+ className="f8 ba b--gray3 b--gray2-d bg-gray0-d white-d pa3 db w-100 flex-auto mr3"
+ disabled={true}
+ value={props.station.substr(1)}
+ />
+ <span className="f8 pointer green2 absolute pa3 inter"
+ style={{right: 12, top: 1}}
+ ref="copy"
+ onClick={() => {
+ navigator.clipboard.writeText(props.station.substr(1));
+ this.refs.copy.innerText = "Copied";
+ }}>
+ Copy
+ </span>
+ </div>
+ </div>
{this.renderDelete()}
</div>
</div>
|
multithread cruncher: only copy stats when picture->stats != NULL | @@ -1796,7 +1796,9 @@ WebPEncodingError VP8LEncodeStream(const WebPConfig* const config,
}
// This line is here and not in the param initialization above to remove a
// Clang static analyzer warning.
+ if (picture->stats != NULL) {
memcpy(&stats_side, picture->stats, sizeof(stats_side));
+ }
// This line is only useful to remove a Clang static analyzer warning.
params_side.err_ = VP8_ENC_OK;
worker_interface->Launch(&worker_side);
|
memif: memif buffer leaks during disconnecting zero copy interface.
code added to free the zero copy interface rx/tx queue buffers during disconnecting.
As find the last official solution introduced core in ut. This does not.
Type: fix | @@ -65,6 +65,24 @@ memif_queue_intfd_close (memif_queue_t * mq)
}
}
+static void
+memif_disconnect_free_zc_queue_buffer (memif_queue_t * mq, u8 is_rx)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u16 ring_size, n_slots, mask, start;
+
+ ring_size = 1 << mq->log2_ring_size;
+ mask = ring_size - 1;
+ n_slots = mq->ring->head - mq->last_tail;
+ start = mq->last_tail & mask;
+ if (is_rx)
+ vlib_buffer_free_from_ring (vm, mq->buffers, start, ring_size, n_slots);
+ else
+ vlib_buffer_free_from_ring_no_next (vm, mq->buffers, start, ring_size,
+ n_slots);
+ vec_free (mq->buffers);
+}
+
void
memif_disconnect (memif_if_t * mif, clib_error_t * err)
{
@@ -126,10 +144,28 @@ memif_disconnect (memif_if_t * mif, clib_error_t * err)
memif_log_warn (mif,
"Unable to unassign interface %d, queue %d: rc=%d",
mif->hw_if_index, i, rv);
+ if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
+ {
+ memif_disconnect_free_zc_queue_buffer(mq, 1);
+ }
mq->ring = 0;
}
}
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, mif->tx_queues)
+ {
+ mq = vec_elt_at_index (mif->tx_queues, i);
+ if (mq->ring)
+ {
+ if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
+ {
+ memif_disconnect_free_zc_queue_buffer(mq, 0);
+ }
+ }
+ mq->ring = 0;
+ }
+
/* free tx and rx queues */
vec_foreach (mq, mif->rx_queues)
memif_queue_intfd_close (mq);
|
Install: Replace Homebrew with Linuxbrew formula
Homebrew is not a package manager for Linux, but for macOS.
[Linuxbrew](http://linuxbrew.sh) on the other hand is a fork of
Homebrew that should work on Linux. | ## Linux
-For the following Linux Distributions 0.8 packages are available:
+For the following Linux distributions and package managers 0.8 packages are available:
- [Arch Linux](https://aur.archlinux.org/packages/elektra/)
- - [Homebrew](http://formulae.brew.sh/formula/elektra)
- [Openwrt](https://github.com/openwrt/packages/tree/master/libs/elektra)
- [OpenSuse](https://software.opensuse.org/package/elektra)
- [Debian](https://packages.debian.org/de/jessie/libelektra4)
- [Ubuntu](https://launchpad.net/ubuntu/+source/elektra)
- [Gentoo](http://packages.gentoo.org/package/app-admin/elektra)
- [Linux Mint](https://community.linuxmint.com/software/view/elektra-bin)
+ - [LinuxBrew](https://github.com/Linuxbrew/homebrew-core/blob/master/Formula/elektra.rb)
Available, but not up-to-date:
|
nimble/phy: Fix debug pins build on nRF52832
Need to adjust code for nRF52832 which does not have GPIO ports and
NRF_RADIO->EVENT_RXREADY event.
Also setup GPIO via registers to strip dependency from hal_gpio (for
porting). | @@ -1213,12 +1213,27 @@ ble_phy_isr(void)
static inline void
ble_phy_dbg_time_setup_gpiote(int index, int pin)
{
- hal_gpio_init_out(pin, 0);
+ NRF_GPIO_Type *port;
+
+#if NRF52840_XXAA
+ port = pin > 31 ? NRF_P1 : NRF_P0;
+ pin &= 0x1f;
+#else
+ port = NRF_P0;
+#endif
+
+ /* Configure GPIO directly to avoid dependency to hal_gpio (for porting) */
+ port->DIRSET = (1 << pin);
+ port->OUTCLR = (1 << pin);
NRF_GPIOTE->CONFIG[index] =
(GPIOTE_CONFIG_MODE_Task << GPIOTE_CONFIG_MODE_Pos) |
((pin & 0x1F) << GPIOTE_CONFIG_PSEL_Pos) |
+#if NRF52840_XXAA
((pin > 31) << GPIOTE_CONFIG_PORT_Pos);
+#else
+ 0;
+#endif
}
#endif
@@ -1259,7 +1274,11 @@ ble_phy_dbg_time_setup(void)
ble_phy_dbg_time_setup_gpiote(--gpiote_idx,
MYNEWT_VAL(BLE_PHY_DBG_TIME_WFR_PIN));
+#if NRF52840_XXAA
NRF_PPI->CH[18].EEP = (uint32_t)&(NRF_RADIO->EVENTS_RXREADY);
+#else
+ NRF_PPI->CH[18].EEP = (uint32_t)&(NRF_RADIO->EVENTS_READY);
+#endif
NRF_PPI->CH[18].TEP = (uint32_t)&(NRF_GPIOTE->TASKS_SET[gpiote_idx]);
NRF_PPI->CH[19].EEP = (uint32_t)&(NRF_RADIO->EVENTS_DISABLED);
NRF_PPI->CH[19].TEP = (uint32_t)&(NRF_GPIOTE->TASKS_CLR[gpiote_idx]);
|
gard: Add path parsing support
In order to support manual GARD records we need to be able to parse the
hardware unit path strings. This patch implements that. | @@ -168,6 +168,20 @@ static const char *target_type_to_str(int type)
return "UNKNOWN";
}
+static int str_to_target_type(const char *path)
+{
+ int i, len;
+
+ for (i = 0; i < chip_unit_count; i++) {
+ len = strlen(chip_units[i].desc);
+
+ if (!strncmp(chip_units[i].desc, path, len))
+ return chip_units[i].type; /* match! */
+ }
+
+ return -1;
+}
+
static const char *deconfig_reason_str(enum gard_reason reason)
{
switch (reason) {
@@ -228,6 +242,79 @@ static char *format_path(struct entity_path *path, char *buffer)
return buffer;
}
+/*
+ * parses a Path string into the entity_path structured provided.
+ *
+ * str - In param, String to parse
+ * parsed - Out param, resultant entity_path
+ *
+ * e.g.
+ *
+ * "/Sys0/Node0/Proc1" -> {
+ * type_size = 0x23,
+ *
+ * path_element[0] = {0, 0}
+ * path_element[1] = {1, 0}
+ * path_element[2] = {2, 1}
+ * }
+ */
+int parse_path(const char *str, struct entity_path *parsed)
+{
+ int unit_count = 0;
+
+ memset(parsed, 0, sizeof(*parsed));
+
+ while (*str != '\0') {
+ int unit_id = str_to_target_type(++str); /* ++ skips the '/' */
+ long instance;
+ char *end;
+
+ if (unit_count > MAX_PATH_ELEMENTS - 1) {
+ fprintf(stderr, "Path has more than 10 components!\n");
+ return -1;
+ }
+
+ /* find the type Id of this component */
+ if (unit_id < 0) { /* unknown unit, bail out */
+ fprintf(stderr, "Unknown unit at: '%s'\n", str);
+ return -1;
+ }
+
+ parsed->path_elements[unit_count].target_type = unit_id;
+
+ /* now parse the instance # */
+ str += strlen(chip_units[unit_id].desc);
+ instance = strtol(str, &end, 10);
+
+ if (*end != '\0' && *end != '/') {
+ fprintf(stderr, "Unable to parse instance after '%s'\n",
+ str);
+ return -1;
+ }
+
+ if (instance > 15 || instance < 0) {
+ fprintf(stderr,
+ "Instance %ld out of range should be 0 to 15\n",
+ instance);
+ return -1;
+ }
+ parsed->path_elements[unit_count].instance = instance;
+
+ str = end;
+ unit_count++;
+ }
+
+ /*
+ * We assume the path is a physical path because every gard record I've
+ * seen so far uses them. We might need to fix this later on, but lets
+ * cross the bridge when we have to.
+ */
+ parsed->type_size = (unit_count & 0xf) |
+ (PATH_PHYSICAL << PATH_TYPE_SHIFT);
+
+ return 0;
+}
+
static bool is_valid_record(struct gard_record *g)
{
return be32toh(g->record_id) != CLEARED_RECORD_ID;
|
util/markdownlint.rb: Allow fenced code blocks
We use both indented and fenced styles in diverse markdown files.
We try to do this consistently in each file, though. | @@ -5,8 +5,9 @@ all
# Use --- and === for H1 and H2.
rule 'MD003', :style => :setext_with_atx
-# Code blocks are indented
-rule 'MD046', :style => :indented
+# Code blocks may be fenced or indented, both are OK...
+# but they must be consistent throughout each file.
+rule 'MD046', :style => :consistent
# Bug in mdl, https://github.com/markdownlint/markdownlint/issues/313
exclude_rule 'MD007'
|
nimble/phy/nrf5x: Implement NRF5340 errata 158
Trim values shall be loaded after if toggling power. | @@ -1520,6 +1520,12 @@ ble_phy_init(void)
nrf_radio_power_set(NRF_RADIO, true);
#ifdef NRF53_SERIES
+ /* Errata 158: load trim values after toggling power */
+ for (uint32_t index = 0; index < 32ul &&
+ NRF_FICR_NS->TRIMCNF[index].ADDR != (uint32_t *)0xFFFFFFFFul; index++) {
+ *((volatile uint32_t *)NRF_FICR_NS->TRIMCNF[index].ADDR) = NRF_FICR_NS->TRIMCNF[index].DATA;
+ }
+
*(volatile uint32_t *)(NRF_RADIO_NS_BASE + 0x774) =
(*(volatile uint32_t* )(NRF_RADIO_NS_BASE + 0x774) & 0xfffffffe) | 0x01000000;
#if NRF53_ERRATA_16_ENABLE_WORKAROUND
|
Fix crash in xdg_activation_v1.c
wlr_xdg_surface_from_wlr_surface() can return a NULL pointer, so check for NULL before dereferencing it. | @@ -11,6 +11,9 @@ void xdg_activation_v1_handle_request_activate(struct wl_listener *listener,
struct wlr_xdg_surface *xdg_surface =
wlr_xdg_surface_from_wlr_surface(event->surface);
+ if (xdg_surface == NULL) {
+ return;
+ }
struct sway_view *view = xdg_surface->data;
if (!xdg_surface->mapped || view == NULL) {
return;
|
docs - remove SOCKS proxy support from s3 protocol | @@ -280,13 +280,13 @@ server_side_encryption = sse-s3
<section id="s3_proxy">
<title>s3 Protocol Proxy Support </title>
<p>You can specify a URL that is the proxy that S3 uses to connect to a data source. S3
- supports these protocols: HTTP, HTTPS, and SOCKS (4, 4a, 5, 5h). You can specify a proxy
+ supports these protocols: HTTP and HTTPS. You can specify a proxy
with the <codeph>s3</codeph> protocol configuration parameter <codeph>proxy</codeph> or
an environment variable. If the configuration parameter is set, the environment
variables are ignored. </p>
<p>To specify proxy with an environment variable, you set the environment variable based on
- the protocol: <codeph>http_proxy</codeph>, <codeph>https_proxy</codeph>, or
- <codeph>socks_proxy</codeph>. You can specify a different URL for each protocol by
+ the protocol: <codeph>http_proxy</codeph> or <codeph>https_proxy</codeph>.
+ You can specify a different URL for each protocol by
setting the appropriate environment variable. S3 supports these environment
variables.<ul id="ul_cy3_km2_r1b">
<li><codeph>all_proxy</codeph> specifies the proxy URL that is used if an environment
@@ -432,13 +432,13 @@ chunksize = 67108864</codeblock></p>
<plentry>
<pt>proxy</pt>
<pd>Specify a URL that is the proxy that S3 uses to connect to a data source. S3
- supports these protocols: HTTP, HTTPS, and SOCKS (4, 4a, 5, 5h). This is the
+ supports these protocols: HTTP and HTTPS. This is the
format for the
parameter.<codeblock>proxy = <varname>protocol</varname>://[<varname>user</varname>:<varname>password</varname>@]<varname>proxyhost</varname>[:<varname>port</varname>]</codeblock></pd>
<pd>If this parameter is not set or is an empty string (<codeph>proxy =
""</codeph>), S3 uses the proxy specified by the environment variable
- <codeph>http_proxy</codeph>, <codeph>https_proxy</codeph>, or
- <codeph>socks_proxy</codeph> (and the environment variables
+ <codeph>http_proxy</codeph> or <codeph>https_proxy</codeph>
+ (and the environment variables
<codeph>all_proxy</codeph> and <codeph>no_proxy</codeph>). The environment
variable that S3 uses depends on the protocol. For information about the
environment variables, see <xref href="g-s3-protocol.xml#amazon-emr/s3_proxy"
|
tools: fix typo in help message | @@ -43,7 +43,7 @@ from time import sleep
examples = """examples:
./bindsnoop # trace all TCP bind()s
./bindsnoop -t # include timestamps
- ./tcplife -w # wider columns (fit IPv6)
+ ./bindsnoop -w # wider columns (fit IPv6)
./bindsnoop -p 181 # only trace PID 181
./bindsnoop -P 80 # only trace port 80
./bindsnoop -P 80,81 # only trace port 80 and 81
|
Default missing otherActor in custom event call to be $self$ | @@ -27,7 +27,7 @@ const compile = (input, helpers) => {
e.args.actorId = input[`$actor[${e.args.actorId}]$`] || "$self$";
}
if (e.args.otherActorId && e.args.otherActorId !== "player") {
- e.args.otherActorId = input[`$actor[${e.args.otherActorId}]$`];
+ e.args.otherActorId = input[`$actor[${e.args.otherActorId}]$`] || "$self$";
}
Object.keys(e.args).forEach((arg) => {
|
first try migrating one of the arm builds from travis | @@ -14,6 +14,26 @@ steps:
displayName: 'Run a one-line script'
- script: |
- echo Add other tasks to build, test, and deploy your project.
- echo See https://aka.ms/yaml
- displayName: 'Run a multi-line script'
+ docker run --rm --privileged multiarch/qemu-user-static:register --reset
+ ls /proc/sys/fs/binfmt_misc/
+ condition: not(startsWith(variables['CONFIG'], 'linux_64'))
+ displayName: Configure binfmt_misc
+
+- script: |
+ echo "FROM openblas/alpine:arm32
+ COPY . /tmp/openblas
+ RUN mkdir /tmp/openblas/build && \
+ cd /tmp/openblas/build && \
+ CC=gcc cmake -D DYNAMIC_ARCH=OFF \
+ -D TARGET=ARMV6 \
+ -D BUILD_SHARED_LIBS=ON \
+ -D BUILD_WITHOUT_LAPACK=ON \
+ -D BUILD_WITHOUT_CBLAS=ON \
+ -D CMAKE_BUILD_TYPE=Release ../ && \
+ cmake --build ." > Dockerfile
+ docker build .
+
+#- script: |
+# echo Add other tasks to build, test, and deploy your project.
+# echo See https://aka.ms/yaml
+# displayName: 'Run a multi-line script'
|
Use a dedicated symbol for in-function loop to avoid the weak symbol JAL range error when a strong symbol is defined outside. | @@ -274,17 +274,19 @@ xPortStartFirstTask:
/*-----------------------------------------------------------*/
freertos_risc_v_application_exception_handler:
+__application_exception_handler_loop:
csrr t0, mcause /* For viewing in the debugger only. */
csrr t1, mepc /* For viewing in the debugger only */
csrr t2, mstatus /* For viewing in the debugger only */
- j freertos_risc_v_application_exception_handler
+ j __application_exception_handler_loop
/*-----------------------------------------------------------*/
freertos_risc_v_application_interrupt_handler:
+__application_interrupt_handler_loop:
csrr t0, mcause /* For viewing in the debugger only. */
csrr t1, mepc /* For viewing in the debugger only */
csrr t2, mstatus /* For viewing in the debugger only */
- j freertos_risc_v_application_interrupt_handler
+ j __application_interrupt_handler_loop
/*-----------------------------------------------------------*/
.section .text.freertos_risc_v_exception_handler
|
Removed unused defs GRAY_SVR, _CONSOLE, _REENTRANT from Linux makefile | SHELL = /bin/bash
# Generic makefile
-#ifdef FORCE32
MARCH = -march=i686 -m32
-#endif
OPTDEFAULT = -fno-omit-frame-pointer -ffast-math -fpermissive $(MARCH)
COPTDEFAULT = -fno-omit-frame-pointer -ffast-math $(MARCH)
@@ -12,34 +10,24 @@ COPT = -O0 -fno-expensive-optimizations $(COPTDEFAULT)
WARN = -Wall -Wno-unknown-pragmas -Wno-invalid-offsetof -Wno-unused-but-set-variable -Wno-switch
CWARN = -Wall -Wno-unknown-pragmas -Wno-unused-but-set-variable -Wno-switch -Wno-implicit-function-declaration
-ifdef DBG
-DEBUG = -s -ggdb3
-else
-DEBUG = -s
-endif
-
# DB includes + libs
-DBINCL = -I/usr/include/mysql -L/usr/lib/mysql
+DBINCLUDE = -I/usr/include/mysql -L/usr/lib/mysql
DBLIBS = -lmysqlclient
# Linux
-INCLUDE = -I./src/common $(DBINCL)
+INCLUDE = -I./src/common $(DBINCLUDE)
LIBS = -dynamic -lpthread -lrt -ldl $(DBLIBS)
-DEFNIX = -D_LINUX
ifdef NIGHTLY
-NIGHTLYDEFS = -D_NIGHTLYBUILD
-# NIGHTLYDEFS = -D_NIGHTLYBUILD -DTHREAD_TRACK_CALLSTACK
+ NIGHTLYDEFS = -D_NIGHTLYBUILD # -DTHREAD_TRACK_CALLSTACK
endif
ifdef DBG
-DEBUGDEFS = -D_DEBUG -DTHREAD_TRACK_CALLSTACK
-DBGWARN = -Wno-unused-variable
-# DEBUGDEFS = -D_DEBUG -D_PACKETDUMP -D_TESTEXCEPTION -DDEBUG_CRYPT_MSGS
+ DBGDEFS = -D_DEBUG -DTHREAD_TRACK_CALLSTACK # -D_PACKETDUMP -D_TESTEXCEPTION -DDEBUG_CRYPT_MSGS
+ DBGWARN = -ggdb3
endif
-EXTRADEFS = -D_MTNETWORK
-DEFINES = -DGRAY_SVR -D_CONSOLE -D_REENTRANT $(DEFNIX) $(NIGHTLYDEFS) $(EXTRADEFS) $(DEBUGDEFS)
+DEFINES = -D_MTNETWORK $(NIGHTLYDEFS) $(DBGDEFS)
EXE = spheresvr
@@ -48,7 +36,7 @@ CCO = gcc
NO = -fno-rtti -fno-exceptions
EX = -fexceptions -fnon-call-exceptions
-SPECIAL = $(EX) $(DEBUG)
+SPECIAL = -s $(EX) $(DBGWARN)
GITREVISION = $(shell expr $(shell git rev-list --count HEAD) - 2406)
GITHASH = $(shell git rev-parse --short HEAD)
|
contacts: add bg-gray0 to root page | @@ -50,7 +50,7 @@ export class Root extends Component {
contacts={contacts}
groups={groups}
invites={invites}>
- <div className="h-100 w-100 overflow-x-hidden bg-white dn db-ns">
+ <div className="h-100 w-100 overflow-x-hidden bg-white bg-gray0-d dn db-ns">
<div className="pl3 pr3 pt2 dt pb3 w-100 h-100">
<p className="f9 pt3 gray2 w-100 h-100 dtc v-mid tc">
Select a group to begin.
|
Run and kill python in the test to check coredump presence | @@ -76,7 +76,7 @@ def recover_core_dump_file(binary_path, cwd, pid):
logger.debug("Search for core dump files match pattern '%s' in '%s'", core_mask, core_dump_dir)
files = glob.glob(os.path.join(core_dump_dir, core_mask))
- logger.debug("Matched core dump files (%d): [%s]", len(files), ", ".join(files))
+ logger.debug("Matched core dump files (%d/%d): [%s]", len(files), len(os.listdir(core_dump_dir)), ", ".join(files))
if len(files) == 1:
return files[0]
elif len(files) > 1:
|
ci: fix python3 pip problem
upgrade pip to get setuptools | @@ -19,7 +19,7 @@ matrix:
- python3-pip
- script: scripts/build-pypi.sh
install:
- - python3 -m pip install --user pybind11 nose2
+ - python3 -m pip install --upgrade pip && python3 -m pip install --user pybind11 nose2
cache: ccache
env:
- BUILD_JOB=Build python distribution and upload to PyPI
|
WIFI: added log for wifi test, increased timeout | @@ -287,7 +287,8 @@ static void wifi_connect_by_bssid(uint8_t *bssid)
TEST_ESP_OK(esp_wifi_set_config(WIFI_IF_STA, &w_config));
TEST_ESP_OK(esp_wifi_connect());
- bits = xEventGroupWaitBits(wifi_events, GOT_IP_EVENT, 1, 0, 5000/portTICK_RATE_MS);
+ ESP_LOGI(TAG, "called esp_wifi_connect()");
+ bits = xEventGroupWaitBits(wifi_events, GOT_IP_EVENT, 1, 0, 7000/portTICK_RATE_MS);
TEST_ASSERT(bits == GOT_IP_EVENT);
}
|
graph-store: add index validation | ?~ node-list graph
=* index -.i.node-list
=* node +.i.node-list
+ ~| "cannot add deleted post"
+ ?> ?=(%& -.post.node)
+ =* p p.post.node
+ ~| "graph indexes must match"
+ ?> =(index index.p)
%_ $
node-list t.node-list
graph (add-node-at-index graph index node mark)
(~(get by graphs) [ship term])
?~ result
[~ ~]
+ ~& (has:orm p.u.result atom)
?. (has:orm p.u.result atom)
[~ ~]
=/ =node:store (got:orm p.u.result atom)
|
[bsp][stm32] update stm32h743-st-nucleo/board/SConscript to fix compiling error after dist | import rtconfig
from building import *
+Import('SDK_LIB')
+
cwd = GetCurrentDir()
# add the general drivers.
@@ -10,12 +12,14 @@ src += Glob('CubeMX_Config/Src/stm32h7xx_hal_msp.c')
path = [cwd]
path += [cwd + '/CubeMX_Config/Inc']
+startup_path_prefix = SDK_LIB
+
if rtconfig.CROSS_TOOL == 'gcc':
- src += [cwd + '/../../libraries/STM32H7xx_HAL/CMSIS/Device/ST/STM32H7xx/Source/Templates/gcc/startup_stm32h743xx.s']
+ src += [startup_path_prefix + '/STM32H7xx_HAL/CMSIS/Device/ST/STM32H7xx/Source/Templates/gcc/startup_stm32h743xx.s']
elif rtconfig.CROSS_TOOL == 'keil':
- src += [cwd + '/../../libraries/STM32H7xx_HAL/CMSIS/Device/ST/STM32H7xx/Source/Templates/arm/startup_stm32h743xx.s']
+ src += [startup_path_prefix + '/STM32H7xx_HAL/CMSIS/Device/ST/STM32H7xx/Source/Templates/arm/startup_stm32h743xx.s']
elif rtconfig.CROSS_TOOL == 'iar':
- src += [cwd + '/../../libraries/STM32H7xx_HAL/CMSIS/Device/ST/STM32H7xx/Source/Templates/iar/startup_stm32h743xx.s']
+ src += [startup_path_prefix + '/STM32H7xx_HAL/CMSIS/Device/ST/STM32H7xx/Source/Templates/iar/startup_stm32h743xx.s']
# STM32H743xx || STM32H750xx || STM32F753xx
# You can select chips from the list above
|
[tools] Fix the lib paths not found issue. | @@ -22,7 +22,7 @@ from xml.etree.ElementTree import SubElement
from building import *
-MODULE_VER_NUM = 0
+MODULE_VER_NUM = 1
source_pattern = ['*.c', '*.cpp', '*.cxx', '*.s', '*.S', '*.asm']
@@ -301,10 +301,12 @@ def HandleToolOption(tools, env, project, reset):
option = linker_paths_option
# remove old lib paths
for item in option.findall('listOptionValue'):
+ if IsRttEclipsePathFormat(item.get('value')):
+ # clean old configuration
option.remove(item)
# add new old lib paths
for path in env['LIBPATH']:
- SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': path})
+ SubElement(option, 'listOptionValue', {'builtIn': 'false', 'value': ConverToRttEclipsePathFormat(RelativeProjectPath(env, path).replace('\\', '/'))})
return
|
input: move a static function close to its use | @@ -361,31 +361,6 @@ static bool input_cmpCov(struct dynfile_t* item1, struct dynfile_t* item2) {
return false;
}
-/* Number of tests taken, based on how fresh the input is */
-static size_t input_numTests(run_t* run, struct dynfile_t* dynfile) {
- size_t total = run->global->io.dynfileqCnt;
- if (dynfile->idx > total) {
- LOG_F("idx (%zu) > total (%zu)", dynfile->idx, total);
- }
- if (dynfile->idx == 0 || (total - dynfile->idx) > 5) {
- return 1;
- }
- /* If the sample is older than 10 seconds, don't bump its testing ratio */
- if ((run->timeStartedMillis - dynfile->timeAddedMillis) > (1000 * 10)) {
- return 1;
- }
-
- static size_t const scaleMap[] = {
- [0] = 128,
- [1] = 32,
- [2] = 8,
- [3] = 4,
- [4] = 2,
- [5] = 1,
- };
- return scaleMap[total - dynfile->idx];
-}
-
#define TAILQ_FOREACH_HF(var, head, field) \
for ((var) = TAILQ_FIRST((head)); (var); (var) = TAILQ_NEXT((var), field))
@@ -454,6 +429,31 @@ void input_addDynamicInput(
}
}
+/* Number of tests taken, based on how 'fresh' the input is */
+static size_t input_numTests(run_t* run, struct dynfile_t* dynfile) {
+ size_t total = run->global->io.dynfileqCnt;
+ if (dynfile->idx > total) {
+ LOG_F("idx (%zu) > total (%zu)", dynfile->idx, total);
+ }
+ if (dynfile->idx == 0 || (total - dynfile->idx) > 5) {
+ return 1;
+ }
+ /* If the sample is older than 10 seconds, don't bump its testing ratio */
+ if ((run->timeStartedMillis - dynfile->timeAddedMillis) > (1000 * 10)) {
+ return 1;
+ }
+
+ static size_t const scaleMap[] = {
+ [0] = 128,
+ [1] = 32,
+ [2] = 8,
+ [3] = 4,
+ [4] = 2,
+ [5] = 1,
+ };
+ return scaleMap[total - dynfile->idx];
+}
+
bool input_prepareDynamicInput(run_t* run, bool needs_mangle) {
struct dynfile_t* current = NULL;
|
grib_filter: Fail if key of type double (e.g. values) is assigned to a string | @@ -390,7 +390,8 @@ static int pack_expression(grib_accessor* a, grib_expression* e)
len = 1;
ret = grib_expression_evaluate_long(hand, e, &lval);
if (ret != GRIB_SUCCESS) {
- grib_context_log(a->context, GRIB_LOG_ERROR, "Unable to set %s as long", a->name);
+ grib_context_log(a->context, GRIB_LOG_ERROR, "Unable to set %s as long (from %s)",
+ a->name, e->cclass->name);
return ret;
}
/*if (hand->context->debug)
@@ -401,6 +402,11 @@ static int pack_expression(grib_accessor* a, grib_expression* e)
case GRIB_TYPE_DOUBLE: {
len = 1;
ret = grib_expression_evaluate_double(hand, e, &dval);
+ if (ret != GRIB_SUCCESS) {
+ grib_context_log(a->context, GRIB_LOG_ERROR, "unable to set %s as double (from %s)",
+ a->name, e->cclass->name);
+ return ret;
+ }
/*if (hand->context->debug)
printf("ECCODES DEBUG grib_accessor_class_gen::pack_expression %s %g\n", a->name, dval);*/
return grib_pack_double(a, &dval, &len);
@@ -411,7 +417,8 @@ static int pack_expression(grib_accessor* a, grib_expression* e)
len = sizeof(tmp);
cval = grib_expression_evaluate_string(hand, e, tmp, &len, &ret);
if (ret != GRIB_SUCCESS) {
- grib_context_log(a->context, GRIB_LOG_ERROR, "unable to set %s as string", a->name);
+ grib_context_log(a->context, GRIB_LOG_ERROR, "unable to set %s as string (from %s)",
+ a->name, e->cclass->name);
return ret;
}
len = strlen(cval);
|
zoul: only pass -DDATE on platform.o
The other files do not need this definition. | @@ -20,7 +20,10 @@ BSL_SPEED ?= 460800
# Works in Linux and probably on OSX too (RTCC example)
COMPILE_DATE := $(shell date +"%02u %02d %02m %02y %02H %02M %02S")
-CFLAGS += -DDATE="\"$(COMPILE_DATE)\""
+CFLAGS_DATE ?= -DDATE="\"$(COMPILE_DATE)\""
+
+# Compile platform.o with -DDATE, other files without it.
+$(OBJECTDIR)/platform.o: CFLAGS += $(CFLAGS_DATE)
### Configure the build for the board and pull in board-specific sources
CONTIKI_TARGET_DIRS += . dev
|
Time cluster: fix compilation on Qt < 5.5 (2) | @@ -116,7 +116,7 @@ void DeRestPluginPrivate::sendTimeClusterResponse(const deCONZ::ApsDataIndicatio
DBG_Printf(DBG_INFO, "Time_Cluster time_dst_end %s %ld\n", dstend.toUTC().toString(Qt::ISODate).toStdString().c_str(), (long) time_dst_end);
DBG_Printf(DBG_INFO, "Time_Cluster time_dst_shift %d\n", (int) time_dst_shift);
DBG_Printf(DBG_INFO, "Time_Cluster time_zone %d %s\n", (int) time_zone, timeZoneLocal.abbreviation(local).toStdString().c_str());
- DBG_Printf(DBG_INFO, "Time_Cluster systemTimeZone %s\n", QTimeZone::systemTimeZone().abbreviation(local).toStdString().c_str());
+ //DBG_Printf(DBG_INFO, "Time_Cluster systemTimeZone %s\n", QTimeZone::systemTimeZone().abbreviation(local).toStdString().c_str());
{ // payload
QDataStream stream(&outZclFrame.payload(), QIODevice::WriteOnly);
|
honggfuzz: displayDisplay can be called anytime | @@ -64,14 +64,12 @@ static void exitWithMsg(const char* msg, int exit_code) {
}
}
-static bool showDisplay = true;
static void sigHandler(int sig) {
/* We should not terminate upon SIGALRM delivery */
if (sig == SIGALRM) {
if (fuzz_shouldTerminate()) {
exitWithMsg("Terminating forcefully\n", EXIT_FAILURE);
}
- showDisplay = true;
return;
}
/* Do nothing with pings from the main thread */
@@ -300,9 +298,8 @@ int main(int argc, char** argv) {
setupMainThreadTimer();
for (;;) {
- if (hfuzz.display.useScreen && showDisplay) {
+ if (hfuzz.display.useScreen) {
display_display(&hfuzz);
- showDisplay = false;
}
if (ATOMIC_GET(sigReceived) > 0) {
LOG_I("Signal %d (%s) received, terminating", ATOMIC_GET(sigReceived),
|
EFAS: Add new MARS streams | 1054 rjtd Tokyo
1055 cwao Montreal
1056 ammc Melbourne
-1057 efas European Flood Awareness System
+1057 efas European Flood Awareness System (EFAS)
+1058 efra European Flood Awareness System (EFAS) reanalysis
+1059 efrf European Flood Awareness System (EFAS) hindcasts
+1060 efse European Flood Awareness System (EFAS) seasonal forecasts
+1061 efmf European Flood Awareness System (EFAS) merged forecasts
1070 msdc Monthly standard deviation and covariance
1071 moda Monthly means of daily means
1072 monr Monthly means using G. Boer's step function
|
fix err using rpa for confirmation calculation | @@ -1990,7 +1990,7 @@ void btm_ble_conn_complete(UINT8 *p, UINT16 evt_len, BOOLEAN enhanced)
}
#if (BLE_PRIVACY_SPT == TRUE )
peer_addr_type = bda_type;
- match = btm_identity_addr_to_random_pseudo (bda, &bda_type, TRUE);
+ match = btm_identity_addr_to_random_pseudo (bda, &bda_type, FALSE);
/* possiblly receive connection complete with resolvable random on
slave role while the device has been paired */
|
stats: Move misplaced comment block
Type: fix
Fixes: | @@ -90,9 +90,6 @@ typedef struct
uint64_t epoch;
} stat_segment_access_t;
-/*
- * Returns 0 on success, -1 on failure (timeout)
- */
static inline uint64_t
_time_now_nsec (void)
{
@@ -108,6 +105,9 @@ stat_segment_adjust (stat_client_main_t * sm, void *data)
((char *) data - (char *) sm->shared_header->base));
}
+/*
+ * Returns 0 on success, -1 on failure (timeout)
+ */
static inline int
stat_segment_access_start (stat_segment_access_t * sa,
stat_client_main_t * sm)
|
Flip canvas perspective projection; | @@ -88,7 +88,7 @@ void lovrCanvasBind(Canvas* canvas) {
lovrGraphicsSetProjection(projection);
} else {
mat4 projection = lovrGraphicsGetProjection();
- float b = projection[5];
+ float b = -projection[5];
float c = projection[10];
float d = projection[14];
float aspect = (float) width / height;
|
Add github action badge to README.md | @@ -12,7 +12,7 @@ following components:
* **Octave plugin** (provides some basic functionality only);
## Build Status
-
+- GitHub: 
- AppVeyor: [](https://ci.appveyor.com/project/myriadrf/limesuite)
## Documentation
|
Undoing test commit | @@ -288,7 +288,7 @@ QvisSetupHostProfilesAndConfigWindow::installConfigFile(const QString& srcFilena
// in installConfigFile.
//
// Kathleen Biagas, Thu Nov 7 11:51:53 PST 2019
-// Info now stored in QListWidgets and QStringlists
+// Info now stored in QListWidgets and QStringlists.
//
// Kevin Griffin, Thu Nov 7 17:43:56 PST 2019
// In rare cases when the user moves or deletes their .visit directory
|
update mapkit_sdk for Linux | @@ -12,13 +12,14 @@ ENDIF()
IF (OS_LINUX)
# Qt + protobuf 2.6.1 + GL headers + GLES2
- DECLARE_EXTERNAL_RESOURCE(MAPKIT_SDK sbr:648642209)
+ DECLARE_EXTERNAL_RESOURCE(MAPKIT_SDK sbr:649684872)
CFLAGS(
GLOBAL "-I$MAPKIT_SDK_RESOURCE_GLOBAL/mapkit_sdk/include"
GLOBAL "-I$MAPKIT_SDK_RESOURCE_GLOBAL/mapkit_sdk/local/linux.x86-64/include"
)
LDFLAGS_FIXED(
"-L$MAPKIT_SDK_RESOURCE_GLOBAL/mapkit_sdk/local/linux.x86-64/lib"
+ "-L$MAPKIT_SDK_RESOURCE_GLOBAL/mapkit_sdk/lib/x86_64-linux-gnu"
)
ELSEIF (OS_ANDROID)
# protobuf 2.6.1
|
fixes u3n_prog pointers in u3n_ream() | @@ -2346,6 +2346,14 @@ _n_ream(u3_noun kev)
{
c3_w i_w;
u3n_prog* pog_u = u3to(u3n_prog, u3t(kev));
+
+ // fix up pointers for loom portability
+ pog_u->byc_u.ops_y = (c3_y*) _n_prog_dat(pog_u);
+ pog_u->lit_u.non = (u3_noun*) (pog_u->byc_u.ops_y + pog_u->byc_u.len_w);
+ pog_u->mem_u.sot_u = (u3n_memo*) (pog_u->lit_u.non + pog_u->lit_u.len_w);
+ pog_u->cal_u.sit_u = (u3j_site*) (pog_u->mem_u.sot_u + pog_u->mem_u.len_w);
+ pog_u->reg_u.rit_u = (u3j_rite*) (pog_u->cal_u.sit_u + pog_u->cal_u.len_w);
+
for ( i_w = 0; i_w < pog_u->cal_u.len_w; ++i_w ) {
u3j_site_ream(&(pog_u->cal_u.sit_u[i_w]));
}
|
add new packages to cleanpackage target | @@ -656,6 +656,12 @@ cleanpackage:
@$(rm) -r debian/oidc-agent-prompt
@$(rm) -r debian/oidc-agent-prompt.debhelper.log
@$(rm) -r debian/oidc-agent-prompt.substvars
+ @$(rm) -r debian/oidc-agent-cli
+ @$(rm) -r debian/oidc-agent-cli.debhelper.log
+ @$(rm) -r debian/oidc-agent-cli.substvars
+ @$(rm) -r debian/oidc-agent-desktop
+ @$(rm) -r debian/oidc-agent-desktop.debhelper.log
+ @$(rm) -r debian/oidc-agent-desktop.substvars
.PHONY: cleantest
cleantest:
|
[CHAIN] move handleing for contract GetQuery from ChainWorker to ChainService
contract can't support parallel get query yet | @@ -283,7 +283,6 @@ func (cs *ChainService) Receive(context actor.Context) {
*message.GetTx,
*message.GetReceipt,
*message.GetABI,
- *message.GetQuery,
*message.GetStateQuery,
*message.SyncBlockState,
*message.GetElected,
@@ -310,7 +309,18 @@ func (cs *ChainService) Receive(context actor.Context) {
if err != nil {
logger.Error().Err(err).Msg("failed to remove txs from mempool")
}
-
+ case *message.GetQuery: //TODO move to ChainWorker (Currently, contract doesn't support parallel execution)
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ ctrState, err := cs.sdb.GetStateDB().OpenContractStateAccount(types.ToAccountID(msg.Contract))
+ if err != nil {
+ logger.Error().Str("hash", enc.ToString(msg.Contract)).Err(err).Msg("failed to get state for contract")
+ context.Respond(message.GetQueryRsp{Result: nil, Err: err})
+ } else {
+ bs := state.NewBlockState(cs.sdb.OpenNewStateDB(cs.sdb.GetRoot()))
+ ret, err := contract.Query(msg.Contract, bs, ctrState, msg.Queryinfo)
+ context.Respond(message.GetQueryRsp{Result: ret, Err: err})
+ }
case actor.SystemMessage,
actor.AutoReceiveMessage,
actor.NotInfluenceReceiveTimeout:
@@ -539,18 +549,6 @@ func (cw *ChainWorker) Receive(context actor.Context) {
Err: err,
})
}
- case *message.GetQuery:
- runtime.LockOSThread()
- defer runtime.UnlockOSThread()
- ctrState, err := cw.sdb.GetStateDB().OpenContractStateAccount(types.ToAccountID(msg.Contract))
- if err != nil {
- logger.Error().Str("hash", enc.ToString(msg.Contract)).Err(err).Msg("failed to get state for contract")
- context.Respond(message.GetQueryRsp{Result: nil, Err: err})
- } else {
- bs := state.NewBlockState(cw.sdb.OpenNewStateDB(cw.sdb.GetRoot()))
- ret, err := contract.Query(msg.Contract, bs, ctrState, msg.Queryinfo)
- context.Respond(message.GetQueryRsp{Result: ret, Err: err})
- }
case *message.GetStateQuery:
var varProof *types.ContractVarProof
var contractProof *types.StateProof
|
Cast socklen_t to size_t in assert comparison
This causes a warning otherwise when socklen_t is signed (Watt32). | @@ -765,7 +765,7 @@ static long dgram_ctrl(BIO *b, int cmd, long num, void *ptr)
ERR_raise_data(ERR_LIB_SYS, get_last_socket_error(),
"calling getsockopt()");
} else {
- OPENSSL_assert(sz <= sizeof(struct timeval));
+ OPENSSL_assert((size_t)sz <= sizeof(struct timeval));
ret = (int)sz;
}
# endif
@@ -816,7 +816,7 @@ static long dgram_ctrl(BIO *b, int cmd, long num, void *ptr)
ERR_raise_data(ERR_LIB_SYS, get_last_socket_error(),
"calling getsockopt()");
} else {
- OPENSSL_assert(sz <= sizeof(struct timeval));
+ OPENSSL_assert((size_t)sz <= sizeof(struct timeval));
ret = (int)sz;
}
# endif
|
Add goto statement so that inject error thread executes the appropriate cleanup statements | @@ -143,6 +143,8 @@ int main(int argc, char *argv[])
res = inject_ras_fatal_error(fpga_device_token, 0);
ON_ERR_GOTO(res, out_destroy_tok, "unsetting inject error register");
+
+ goto out_destroy_tok;
} else {
res = fpgaOpen(fpga_device_token, &fpga_device_handle, FPGA_OPEN_SHARED);
ON_ERR_GOTO(res, out_close, "opening accelerator");
|
Add missing mutex unlock on error
Fixes <https://github.com/Genymobile/scrcpy/issues/1770>
Reported-by: lordnn | @@ -361,12 +361,14 @@ recorder_push(struct recorder *recorder, const AVPacket *packet) {
if (recorder->failed) {
// reject any new packet (this will stop the stream)
+ mutex_unlock(recorder->mutex);
return false;
}
struct record_packet *rec = record_packet_new(packet);
if (!rec) {
LOGC("Could not allocate record packet");
+ mutex_unlock(recorder->mutex);
return false;
}
|
Fix handling of syscalls with zero time | @@ -2549,10 +2549,10 @@ static boolean stat_compare(void *za, void *zb)
return sb->usecs > sa->usecs;
}
-static inline char *print_ts(buffer b, u64 x)
+static inline char *print_usecs(buffer b, u64 x)
{
buffer_clear(b);
- print_timestamp(b, microseconds(x));
+ bprintf(b, "%d.%06d", x / MILLION, x % MILLION);
buffer_write_byte(b, 0);
return buffer_ref(b, 0);
}
@@ -2571,7 +2571,8 @@ static inline char *print_pct(buffer b, u64 x, u64 y)
#define LINE3 LINE LINE LINE
#define SEPARATOR LINE " " LINE2 " " LINE2 " " LINE2 " " LINE2 " " LINE3 "\n"
#define HDR_FMT "%6s %12s %12s %12s %12s %-18s\n"
-#define DATA_FMT "%6s %12s %12.0d %12d %12.0d %-18s\n"
+#define DATA_FMT "%6s %12s %12d %12d %12.0d %-18s\n"
+#define SUM_FMT "%6s %12s %12.0d %12d %12.0d %-18s\n"
#define ROUNDED_IDIV(x, y) (((x)* 10 / (y) + 5) / 10)
@@ -2597,10 +2598,10 @@ closure_function(0, 1, void, print_syscall_stats_cfn,
while ((ss = pqueue_pop(pq)) != INVALID_ADDRESS) {
tot_calls += ss->calls;
tot_errs += ss->errors;
- rprintf(DATA_FMT, print_pct(pbuf, ss->usecs, tot_usecs), print_ts(tbuf, ss->usecs),
+ rprintf(DATA_FMT, print_pct(pbuf, ss->usecs, tot_usecs), print_usecs(tbuf, ss->usecs),
ROUNDED_IDIV(ss->usecs, ss->calls), ss->calls, ss->errors, _linux_syscalls[ss - stats].name);
}
- rprintf(SEPARATOR DATA_FMT, "100.00", print_ts(tbuf, tot_usecs), 0, tot_calls, tot_errs, "total");
+ rprintf(SEPARATOR SUM_FMT, "100.00", print_usecs(tbuf, tot_usecs), 0, tot_calls, tot_errs, "total");
deallocate_pqueue(pq);
}
|
Handle Opaque PK EC keys in ssl_get_ecdh_params_from_cert() | @@ -2863,18 +2863,45 @@ static int ssl_get_ecdh_params_from_cert( mbedtls_ssl_context *ssl )
psa_status_t status = PSA_ERROR_CORRUPTION_DETECTED;
unsigned char buf[
PSA_KEY_EXPORT_ECC_KEY_PAIR_MAX_SIZE(PSA_VENDOR_ECC_MAX_CURVE_BITS)];
- psa_key_attributes_t key_attributes;
+ psa_key_attributes_t key_attributes = PSA_KEY_ATTRIBUTES_INIT;
size_t ecdh_bits = 0;
size_t key_len;
+ mbedtls_pk_context *pk;
+ mbedtls_ecp_keypair *key;
- if( ! mbedtls_pk_can_do( mbedtls_ssl_own_key( ssl ), MBEDTLS_PK_ECKEY ) )
+ pk = mbedtls_ssl_own_key( ssl );
+
+ if( pk == NULL )
+ return( MBEDTLS_ERR_ECP_BAD_INPUT_DATA );
+
+ switch( mbedtls_pk_get_type( pk ) )
{
+ case MBEDTLS_PK_OPAQUE:
+ if( ! mbedtls_pk_can_do( pk, MBEDTLS_PK_ECKEY ) )
return( MBEDTLS_ERR_SSL_PK_TYPE_MISMATCH );
- }
- mbedtls_ecp_keypair *key =
- mbedtls_pk_ec( *mbedtls_ssl_own_key( ssl ) );
+ ssl->handshake->ecdh_psa_privkey =
+ *( (mbedtls_svc_key_id_t*) pk->pk_ctx );
+
+ status = psa_get_key_attributes( ssl->handshake->ecdh_psa_privkey,
+ &key_attributes );
+ if( status != PSA_SUCCESS)
+ return( MBEDTLS_ERR_PK_BAD_INPUT_DATA );
+
+ ssl->handshake->ecdh_psa_type = psa_get_key_type( &key_attributes );
+ ssl->handshake->ecdh_bits = psa_get_key_bits( &key_attributes );
+
+ psa_reset_key_attributes( &key_attributes );
+ /* Key should no be destroyed in the TLS library */
+ ssl->handshake->ecdh_psa_shared_key = 1;
+
+ ret = 0;
+ break;
+ case MBEDTLS_PK_ECKEY:
+ case MBEDTLS_PK_ECKEY_DH:
+ case MBEDTLS_PK_ECDSA:
+ key = mbedtls_pk_ec( *pk );
if( key == NULL )
return( MBEDTLS_ERR_ECP_BAD_INPUT_DATA );
@@ -2912,6 +2939,10 @@ static int ssl_get_ecdh_params_from_cert( mbedtls_ssl_context *ssl )
}
ret = 0;
+ break;
+ default:
+ ret = MBEDTLS_ERR_SSL_PK_TYPE_MISMATCH;
+ }
cleanup:
mbedtls_platform_zeroize( buf, sizeof( buf ) );
|
fs/lustre-client: bump to v2.12.7 | @@ -64,7 +64,7 @@ BuildRequires: kernel-abi-whitelists
%undefine with_zfs
%endif
-%{!?version: %global version 2.12.6}
+%{!?version: %global version 2.12.7}
%if 0%{?suse_version}
%{!?kver: %global kver %(readlink /usr/src/linux | sed 's/linux-//' | sed 's/$/-default/')}
%else
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.