message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
start serial driver at boot | //kernel drivers
#include <kernel/drivers/pit/pit.h>
+#include <kernel/drivers/serial/serial.h>
#include <kernel/drivers/text_mode/text_mode.h>
//higher-level kernel features
@@ -41,6 +42,7 @@ void system_mem() {
void drivers_init(void) {
pit_timer_init(PIT_TICK_GRANULARITY_1MS);
+ serial_init();
}
static void kernel_spinloop() {
|
1% speed-up training for NN with layernorm
2.5% faster backward with layernorm
slightly faster forward | @@ -675,6 +675,37 @@ __forceinline__ __device__ float4 SharedReduce4(float4 val, float* tmp) {
}
+template <int BlockSize>
+__forceinline__ __device__ void SharedReduce8(float4* val0, float4* val1, float* tmp) {
+ Float4ToSharedMemory<BlockSize>(*val0, tmp, threadIdx.x);
+ Float4ToSharedMemory<BlockSize>(*val1, tmp + 4 * BlockSize, threadIdx.x);
+ __syncthreads();
+ if (BlockSize > 32) {
+ for (int s = BlockSize / 2; s >= 32; s >>= 1) {
+ if (threadIdx.x < s) {
+ for (int k = 0; k < 8; ++k) {
+ tmp[threadIdx.x + BlockSize * k] += tmp[threadIdx.x + s + BlockSize * k];
+ }
+ }
+ __syncthreads();
+ }
+ }
+ for (int s = 16; s > 0; s >>= 1) {
+ if (threadIdx.x < s) {
+ for (int k = 0; k < 8; ++k) {
+ tmp[threadIdx.x + BlockSize * k] += tmp[threadIdx.x + s + BlockSize * k];
+ }
+ }
+ __syncwarp();
+
+ }
+ __syncthreads();
+ (*val0) = Float4FromSharedMemory<BlockSize>(tmp, 0);
+ (*val1) = Float4FromSharedMemory<BlockSize>(tmp + 4 * BlockSize, 0);
+ __syncthreads();
+}
+
+
template <int BlockSize>
__forceinline__ __device__ void SharedPartReduce4(float4 val0, float4 val1, float* tmp, int tileSize) {
Float4ToSharedMemory<BlockSize>(val0, tmp, threadIdx.x);
|
haskell-build-fixes: document SHARED_ONLY_PLUGINS | @@ -17,6 +17,8 @@ set (INFO_PLUGINS_DOC "only for informational purposes. Modify PLUGINS to change
set (ADDED_PLUGINS_DOC "List of plugins already added, ${INFO_PLUGINS_DOC}")
set (ADDED_PLUGINS "" CACHE STRING ${PLUGINS_DOC} FORCE)
+set (SHARED_ONLY_PLUGINS_DOC "List of plugins already added with the ONLY_SHARED configuration enabled, ${INFO_PLUGINS_DOC}")
+
set (REMOVED_PLUGINS_DOC "List of plugins removed, ${INFO_PLUGINS_DOC} ")
set (REMOVED_PLUGINS "" CACHE STRING ${PLUGINS_DOC} FORCE)
|
Windows access() call is different to UNIX. | @@ -2272,6 +2272,7 @@ void wsgi_python_init(apr_pool_t *p)
"Verify the supplied path.", getpid(),
python_home);
}
+#if !defined(WIN32)
else if (access(python_home, X_OK) == -1) {
ap_log_error(APLOG_MARK, APLOG_WARNING, rv, wsgi_server,
"mod_wsgi (pid=%d): Python home %s is not "
@@ -2281,14 +2282,20 @@ void wsgi_python_init(apr_pool_t *p)
"permissions on the directory.", getpid(),
python_home);
}
+#endif
}
/* Now detect whether have a pyvenv with Python 3.3+. */
pyvenv_cfg = apr_pstrcat(p, python_home, "/pyvenv.cfg", NULL);
+#if defined(WIN32)
+ if (access(pyvenv_cfg, 0) == 0)
+ is_pyvenv = 1;
+#else
if (access(pyvenv_cfg, R_OK) == 0)
is_pyvenv = 1;
+#endif
if (is_pyvenv) {
/*
|
Use yr_lowercase array here. | @@ -29,6 +29,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <ctype.h>
#include <string.h>
+#include <yara/globals.h>
#include <yara/mem.h>
#include <yara/sizedstr.h>
@@ -41,7 +42,7 @@ int sized_string_cmp_nocase(
while (s1->length > i &&
s2->length > i &&
- tolower(s1->c_string[i]) == tolower(s2->c_string[i]))
+ yr_lowercase[(uint8_t) s1->c_string[i]] == yr_lowercase[(uint8_t) s2->c_string[i]])
{
i++;
}
|
Fix ShaderBlock:send(Blob) error message; | @@ -47,7 +47,7 @@ static int l_lovrShaderBlockSend(lua_State* L) {
size_t bufferSize = lovrBufferGetSize(buffer);
// TODO make/use shared helper to check srcOffset/dstOffset/size are non-negative to make these errors better
lovrAssert(srcOffset <= blob->size, "Source offset is bigger than the Blob size (%d > %d)", srcOffset, blob->size);
- lovrAssert(dstOffset <= bufferSize, "Destination offset is bigger than the ShaderBlock size (%d > %d)", srcOffset, bufferSize);
+ lovrAssert(dstOffset <= bufferSize, "Destination offset is bigger than the ShaderBlock size (%d > %d)", dstOffset, bufferSize);
size_t maxSize = MIN(blob->size - srcOffset, bufferSize - dstOffset);
size_t size = luaL_optinteger(L, 5, maxSize);
lovrAssert(size <= blob->size - srcOffset, "Source offset plus copy size exceeds Blob size (%d > %d)", srcOffset + size, blob->size);
|
nimble/ll: Allow 2M params as fallback for ext conn create
We ignore 2M values only for scanning since we do not scan on 2M when
ext conn create is pending, but there's nothing wrong in using those
parameters as fallback in case we established connection on phy that
was not in phy mask. | @@ -707,6 +707,9 @@ ble_ll_ext_conn_create(const uint8_t *cmdbuf, uint8_t len)
ble_ll_conn_itvl_to_ticks(conn_itvl_max, &hcc.params[1].conn_itvl_ticks,
&hcc.params[1].conn_itvl_usecs);
+ if (!fallback_params) {
+ fallback_params = &hcc.params[1];
+ }
params++;
}
#endif
|
Scripts: Fix previous commit | @@ -90,7 +90,7 @@ package() {
# Mark binaries to be recognisable by OcBootManagementLib.
bootsig="${selfdir}/Library/OcBootManagementLib/BootSignature.bin"
efiOCBMs=(
- "BOOTx64.efi"
+ "Bootstrap.efi"
"OpenCore.efi"
)
for efiOCBM in "${efiOCBMs[@]}"; do
|
drivebase: drop servo dependency
Leave a fake open loop dc driven start command in place to keep REPL usable for testing. | @@ -45,6 +45,10 @@ static pbio_error_t pbio_drivebase_setup(pbio_drivebase_t *drivebase,
drivebase->wheel_diameter = wheel_diameter;
drivebase->axle_track = axle_track;
+ // Claim servos
+ drivebase->left->state = PBIO_SERVO_STATE_CLAIMED;
+ drivebase->right->state = PBIO_SERVO_STATE_CLAIMED;
+
return PBIO_SUCCESS;
}
@@ -85,8 +89,8 @@ pbio_error_t pbio_drivebase_start(pbio_drivebase_t *drivebase, int32_t speed, in
int32_t sum = 180 * pbio_math_mul_i32_fix16(pbio_math_div_i32_fix16(speed, drivebase->wheel_diameter), FOUR_DIV_PI);
int32_t dif = 2 * pbio_math_div_i32_fix16(pbio_math_mul_i32_fix16(rate, drivebase->axle_track), drivebase->wheel_diameter);
- err_l = pbio_servo_run(drivebase->left, (sum+dif)/2);
- err_r = pbio_servo_run(drivebase->right, (sum-dif)/2);
+ err_l = pbio_hbridge_set_duty_cycle_sys(drivebase->left->hbridge, ((sum+dif)/2)*10);
+ err_r = pbio_hbridge_set_duty_cycle_sys(drivebase->right->hbridge, ((sum-dif)/2)*10);
return error_or(err_l, err_r);
}
|
btshell: Fix scan command in legacy mode
When starting legacy scan code was also trying to start extended scan
with garbage parameters. | @@ -1185,6 +1185,8 @@ cmd_scan(int argc, char **argv)
console_printf("error scanning; rc=%d\n", rc);
return rc;
}
+
+ return 0;
}
/* Copy above parameters to uncoded params */
|
settings: restore back button on s3
Fixes urbit/landscape#501 | @@ -14,6 +14,7 @@ import {
import GlobalApi from "~/logic/api/global";
import { BucketList } from "./BucketList";
import { S3State } from '~/types/s3-update';
+import { BackButton } from './BackButton';
interface FormSchema {
s3bucket: string;
|
analyze OCSP response after communicate | @@ -154,18 +154,6 @@ def try_handshake(endpoint, port, cipher, ssl_version, server_cert=None, server_
cleanup_processes(s2nd, s_client)
return -1
- # Validate that s_client accepted s2nd's stapled OCSP response
- if ocsp is not None:
- ocsp_success = False
- for line in s_client.stdout:
- line = line.decode("utf-8").strip()
- if S_CLIENT_SUCCESSFUL_OCSP in line:
- ocsp_success = True
- break
- if not ocsp_success:
- cleanup_processes(s2nd, s_client)
- return -1
-
# Write the cipher name towards s2n server
msg = (cipher + "\n").encode("utf-8")
s_client.stdin.write(msg)
@@ -175,6 +163,7 @@ def try_handshake(endpoint, port, cipher, ssl_version, server_cert=None, server_
s2nd.stdin.write(msg)
s2nd.stdin.flush()
+
sleep(0.1)
outs = communicate_processes(s_client, s2nd)
s_out = outs[1]
@@ -182,6 +171,25 @@ def try_handshake(endpoint, port, cipher, ssl_version, server_cert=None, server_
print ("No output from client PIPE, skip")
return 0
+ c_out = outs[0]
+ if '' == c_out:
+ print ("No output from client PIPE, skip")
+ return 0
+ s_out_len = len (s_out)
+ c_out_len = len (c_out)
+ # Validate that s_client accepted s2nd's stapled OCSP response
+ c_line = 0
+ if ocsp is not None:
+ ocsp_success = False
+ for i in range(0, c_out_len):
+ line = c_out[i].strip()
+ c_line = i
+ if S_CLIENT_SUCCESSFUL_OCSP in line:
+ ocsp_success = True
+ break
+ if not ocsp_success:
+ return -1
+
# Analyze server output
found = 0
for line in range(0, len(s_out)):
@@ -194,13 +202,8 @@ def try_handshake(endpoint, port, cipher, ssl_version, server_cert=None, server_
print ("No cipher output from server")
return -1
- c_out = outs[0]
- if '' == c_out:
- print ("No output from client PIPE, skip")
- return 0
-
found = 0
- for line in range(0, len(c_out)):
+ for line in range(c_line, c_out_len):
output = c_out[line]
if output.strip() == cipher:
found = 1
|
Docs: minor edits and fixes. | PUBLIC "-//OASIS//DTD DITA Composite//EN" "ditabase.dtd">
<topic id="topic2" xml:lang="en">
<title id="iw157419">About the Greenplum Architecture</title>
- <shortdesc>Greenplum Database is a massively parallel processing (MPP) database server
- with an architecture specially designed to manage large-scale analytic data warehouses and
- business intelligence workloads. </shortdesc>
+ <shortdesc>Greenplum Database is a massively parallel processing (MPP) database server with an
+ architecture specially designed to manage large-scale analytic data warehouses and business
+ intelligence workloads. </shortdesc>
<body>
<p id="iw157381">MPP (also known as a <i>shared nothing</i> architecture) refers to systems with
two or more processors that cooperate to carry out an operation, each processor with its own
PostgreSQL disk-oriented database instances acting together as one cohesive database
management system (DBMS). It is based on PostgreSQL 8.3.23, and in most cases is very similar
to PostgreSQL with regard to SQL support, features, configuration options, and end-user
- functionality. Database users interact with Greenplum Database as they would a regular
+ functionality. Database users interact with Greenplum Database as they would with a regular
PostgreSQL DBMS. </p>
<p>Greenplum Database can use the append-optimized (AO) storage format for bulk loading and
reading of data, and provides performance advantages over HEAP tables. Append-optimized
which store and process the data.</p>
<fig id="iw157440">
<title>High-Level Greenplum Database Architecture</title>
- <image height="316px" href="../graphics/highlevel_arch.jpg" placement="break" width="397px"
- />
+ <image height="316px" href="../graphics/highlevel_arch.jpg" placement="break" width="397px"/>
</fig>
<p>The following topics describe the components that make up a Greenplum Database system and how
they work together. <ul id="ul_cz4_xhy_dq" otherprops="op-help">
<body>
<p>Greenplum Database end-users interact with Greenplum Database (through the master) as they
would with a typical PostgreSQL database. They connect to the database using client programs
- such as <codeph>psql</codeph> or application programming interfaces (APIs) such as JDBC or
- ODBC.</p>
+ such as <codeph>psql</codeph> or application programming interfaces (APIs) such as JDBC,
+ ODBC or <xref href="https://www.postgresql.org/docs/8.3/static/libpq.html" format="html"
+ scope="external">libpq</xref> (the PostgreSQL C API).</p>
<p>The master is where the <i>global system catalog</i> resides. The global system catalog is
the set of system tables that contain metadata about the Greenplum Database system itself.
The master does not contain any user data; data resides only on the <i>segments</i>. The
<body>
<p>The <i>interconnect</i> refers to the inter-process communication between segments and the
network infrastructure on which this communication relies. The Greenplum interconnect uses a
- standard Ethernet switching fabric. For performance reasons, a 10-Gigabit system, or faster, is recommended.</p>
+ standard Ethernet switching fabric. For performance reasons, a 10-Gigabit system, or faster,
+ is recommended.</p>
<p>By default, the interconnect uses User Datagram Protocol with flow control (UDPIFC) for
interconnect traffic to send messages over the network. The Greenplum software performs
packet verification beyond what is provided by UDP. This means the reliability is equivalent
|
backtick string literal in documentation
can be squashed | @@ -105,7 +105,7 @@ and use to get a basic plugin experience:
## Third-party extensions
-- [cockos.reaper_extension](https://github.com/justinfrankel/reaper-sdk/blob/main/reaper-plugins/reaper_plugin.h#L138), access the [REAPER](http://reaper.fm) API
+- [`cockos.reaper_extension`](https://github.com/justinfrankel/reaper-sdk/blob/main/reaper-plugins/reaper_plugin.h#L138), access the [REAPER](http://reaper.fm) API
# Adapters
|
make sure src/h2olog/generated_raw_tracer.cc is up-to-date in CI | @@ -27,6 +27,7 @@ ossl1.1.1:
CMAKE_ARGS='-DOPENSSL_ROOT_DIR=/opt/openssl-1.1.1'
dtrace:
+ rm -rf src/h2olog/generated_raw_tracer.cc # make sure it's up-todate
docker run $(DOCKER_RUN_OPTS) $(CONTAINER_NAME) env DTRACE_TESTS=1 make -f $(SRC_DIR)/misc/docker-ci/check.mk _check
_check:
|
Fix I2C issues in ESP32 | @@ -99,6 +99,9 @@ HRESULT Library_win_dev_i2c_native_Windows_Devices_I2c_I2cDevice::NativeInit___V
// Set the Bus parameters
SetConfig( bus, pConfig);
+ // If this is first devcie on Bus then init driver
+ if ( Esp_I2C_Initialised_Flag[bus] == 0 )
+ {
esp_err_t res = i2c_driver_install( bus, I2C_MODE_MASTER, 0, 0, 0);
if ( res != ESP_OK)
{
@@ -107,7 +110,8 @@ HRESULT Library_win_dev_i2c_native_Windows_Devices_I2c_I2cDevice::NativeInit___V
// Ensure driver gets unitialized during soft reboot
HAL_AddSoftRebootHandler(Esp32_I2c_UnitializeAll);
- Esp_I2C_Initialised_Flag[bus] = 1;
+ Esp_I2C_Initialised_Flag[bus]++;
+ }
}
NANOCLR_NOCLEANUP();
}
@@ -121,10 +125,15 @@ HRESULT Library_win_dev_i2c_native_Windows_Devices_I2c_I2cDevice::DisposeNative_
i2c_port_t bus = (i2c_port_t)((pThis[ FIELD___deviceId ].NumericByRef().s4 / 1000) - 1);
+ Esp_I2C_Initialised_Flag[bus]--;
+
+ if ( Esp_I2C_Initialised_Flag[bus] <= 0 )
+ {
i2c_driver_delete(bus);
Esp_I2C_Initialised_Flag[bus] = 0;
}
+ }
NANOCLR_NOCLEANUP();
}
@@ -137,7 +146,6 @@ HRESULT Library_win_dev_i2c_native_Windows_Devices_I2c_I2cDevice::NativeTransmit
int writeSize = 0;
int readSize = 0;
esp_err_t i2cStatus;
- int returnStatus = I2cTransferStatus_FullTransfer;
CLR_RT_HeapBlock* result;
// create the return object (I2cTransferResult)
@@ -228,15 +236,6 @@ HRESULT Library_win_dev_i2c_native_Windows_Devices_I2c_I2cDevice::NativeTransmit
memcpy(readBuffer->GetFirstElement(), &readData[0], readSize);
}
}
-
- // null pointers and vars
- writeData = NULL;
- readData = NULL;
- writeBuffer = NULL;
- readBuffer = NULL;
- pThis = NULL;
-
- stack.SetResult_I4(returnStatus);
}
NANOCLR_NOCLEANUP();
}
|
Check malloc failure via app_malloc
Thanks to GitHUb user murugesandins for reporting this. | @@ -3152,7 +3152,7 @@ static int do_multi(int multi, int size_num)
int *fds;
static char sep[] = ":";
- fds = malloc(sizeof(*fds) * multi);
+ fds = app_malloc(sizeof(*fds) * multi, "fd buffer for do_multi");
for (n = 0; n < multi; ++n) {
if (pipe(fd) == -1) {
BIO_printf(bio_err, "pipe failure\n");
|
Add non-allocating closure related functions.
Dup closures into buffers. | use "alloc"
use "die"
use "sldup"
+use "slcp"
use "types"
pkg std =
+ generic fnenvsz : (fn : @fn::function -> size)
generic fndup : (fn : @fn::function -> @fn::function)
+ generic fnbdup : (fn : @fn::function, buf : byte[:] -> @fn::function)
generic fnfree : (fn : @fn::function -> void)
;;
generic fndup = {fn
+ var sl
+
+ sl = std.slalloc(fnenvsz(fn))
+ -> fnbdup(fn, sl)
+}
+
+generic fnenvsz = {fn
+ var repr : intptr[2]
+
+ repr = (&fn : intptr[2]#)#
+ -> envslice(repr[0]).len
+}
+
+generic fnbdup = {fn, buf
var repr : intptr[2]
repr = (&fn : intptr[2]#)#
- repr[0] = (sldup(envslice(repr[0])) : intptr)
+ slcp(buf, envslice(repr[0]))
+ repr[0] = (buf[0] : intptr)
-> (&repr : @fn::function#)#
}
|
remove pygments setup | @@ -7,11 +7,6 @@ then
WREN_PY="$WREN_PY_BINARY"
fi
-# Install the Wren Pygments lexer.
-cd util/pygments-lexer
-sudo $WREN_PY setup.py develop
-cd ../..
-
# Build the docs.
mkdir -p build
$WREN_PY ./util/generate_docs.py
|
Better localized fix for ecn_accounting | @@ -1849,7 +1849,7 @@ void picoquic_ecn_accounting(picoquic_cnx_t* cnx,
unsigned char received_ecn, picoquic_packet_context_enum pc, picoquic_local_cnxid_t * l_cid)
{
picoquic_ack_context_t* ack_ctx = (pc == picoquic_packet_context_application && cnx->is_multipath_enabled) ?
- &l_cid->ack_ctx : &cnx->ack_ctx[pc];
+ ((l_cid == NULL) ? &cnx->path[0]->p_local_cnxid->ack_ctx : &l_cid->ack_ctx): &cnx->ack_ctx[pc];
switch (received_ecn & 0x03) {
case 0x00:
@@ -2264,8 +2264,7 @@ int picoquic_incoming_segment(
if (ret == 0) {
if (cnx != NULL && cnx->cnx_state != picoquic_state_disconnected &&
- ph.ptype != picoquic_packet_version_negotiation &&
- (!cnx->is_multipath_enabled || ph.l_cid != NULL)) {
+ ph.ptype != picoquic_packet_version_negotiation) {
cnx->nb_packets_received++;
/* Mark the sequence number as received */
ret = picoquic_record_pn_received(cnx, ph.pc, ph.l_cid, ph.pn64, receive_time);
|
spi_slave: enable spi slave dual board test on esp32c3 | @@ -581,6 +581,13 @@ UT_C3_FLASH:
- ESP32C3_IDF
- UT_T1_ESP_FLASH
+UT_C3_SPI_DUAL:
+ extends: .unit_test_esp32c3_template
+ parallel: 2
+ tags:
+ - ESP32C3_IDF
+ - Example_SPI_Multi_device
+
.integration_test_template:
extends:
- .target_test_job_template
|
check whether CONFIG_NAME is valid or not at create_appspec.sh
If Kconfig_ENTRY is not existed, CONFIG_NAME is not valid.
IF CONFIG_NAME is not valid, let's skip to make an application list.
Here is an abnormal case.
{ aws_sample, aws_sample_main, CONFIG_, y, y, } | @@ -37,6 +37,9 @@ do
APPNAME=`sed -n '/^APPNAME/p' $APPDIR$FILE$MAKEFILE | sed -n 's/APPNAME = //p'`
FUNCTION=`sed -n '/^FUNCNAME/p' $APPDIR$FILE$MAKEFILE | sed -n 's/FUNCNAME = //p'`
CONFIG_NAME=`sed -n '/depends/p' $APPDIR$FILE$KCONFIG_ENTRY | sed -n 's/.*depends on //p'`
+ if [ "$CONFIG_NAME" = "" ]; then
+ continue
+ fi
CHECK=`sed -n "/# CONFIG_$CONFIG_NAME is not set/p" $DOTCONFIG`
if [ "$CHECK" = "" ]
then CONFIG_USE="y"
|
[stm32] perform an automatic ADC calibration to improve the conversion accuracy using function HAL_ADCEx_Calibration_Start(). | @@ -209,10 +209,12 @@ static rt_err_t stm32_get_adc_value(struct rt_adc_device *device, rt_uint32_t ch
#elif defined(SOC_SERIES_STM32MP1)
ADC_ChanConf.SamplingTime = ADC_SAMPLETIME_810CYCLES_5;
#endif
+
#if defined(SOC_SERIES_STM32F2) || defined(SOC_SERIES_STM32F4) || defined(SOC_SERIES_STM32F7) || defined(SOC_SERIES_STM32L4)
ADC_ChanConf.Offset = 0;
#endif
-#ifdef SOC_SERIES_STM32L4
+
+#if defined(SOC_SERIES_STM32L4)
ADC_ChanConf.OffsetNumber = ADC_OFFSET_NONE;
ADC_ChanConf.SingleDiff = LL_ADC_SINGLE_ENDED;
#elif defined(SOC_SERIES_STM32MP1)
@@ -221,9 +223,17 @@ static rt_err_t stm32_get_adc_value(struct rt_adc_device *device, rt_uint32_t ch
ADC_ChanConf.SingleDiff = ADC_SINGLE_ENDED; /* ADC channel differential mode */
#endif
HAL_ADC_ConfigChannel(stm32_adc_handler, &ADC_ChanConf);
-#ifdef SOC_SERIES_STM32MP1
+
+ /* perform an automatic ADC calibration to improve the conversion accuracy */
+#if defined(SOC_SERIES_STM32L4)
+ if (HAL_ADCEx_Calibration_Start(stm32_adc_handler, ADC_ChanConf.SingleDiff) != HAL_OK)
+ {
+ LOG_E("ADC calibration error!\n");
+ return -RT_ERROR;
+ }
+#elif defined(SOC_SERIES_STM32MP1)
/* Run the ADC linear calibration in single-ended mode */
- if (HAL_ADCEx_Calibration_Start(stm32_adc_handler, ADC_CALIB_OFFSET_LINEARITY, ADC_SINGLE_ENDED) != HAL_OK)
+ if (HAL_ADCEx_Calibration_Start(stm32_adc_handler, ADC_CALIB_OFFSET_LINEARITY, ADC_ChanConf.SingleDiff) != HAL_OK)
{
LOG_E("ADC open linear calibration error!\n");
/* Calibration Error */
|
Handle 2D meshes in simv2_RectilinearMesh_check | @@ -520,19 +520,25 @@ simv2_RectilinearMesh_check(visit_handle h)
for(int i = 0; i < obj->ndims; ++i)
{
int nCoordTuples = 0;
+
if(i == 0)
simv2_VariableData_getData(obj->xcoords, owner, dataType, nComps, nCoordTuples, data);
else if(i == 1)
simv2_VariableData_getData(obj->ycoords, owner, dataType, nComps, nCoordTuples, data);
else
simv2_VariableData_getData(obj->zcoords, owner, dataType, nComps, nCoordTuples, data);
- nCells *= (nCoordTuples-1);
+
+ // calculate number of cells, but watch out for 2D meshes which have only 1
+ // point in one direction.
+ nCells *= (nCoordTuples > 1 ? (nCoordTuples - 1) : 1);
}
if(nCells != nTuples)
{
- VisItError("The number of elements in the ghost cell array does "
- "not match the number of mesh cells.");
+ char tmp[1024] = {'\0'};
+ snprintf(tmp, 1023, "The number of elements in the ghost cell array %d does "
+ "not match the number of mesh cells %d.", nTuples, nCells);
+ VisItError(tmp);
return VISIT_ERROR;
}
}
|
Enable zcl attribute reporting for OSRAM onoff and level clusters | @@ -560,6 +560,21 @@ bool DeRestPluginPrivate::sendConfigureReportingRequest(BindingTask &bt)
maxInterval = 300;
reportableChange8bit = 1;
}
+ else if (bt.binding.clusterId == ONOFF_CLUSTER_ID)
+ {
+ dataType = deCONZ::Zcl8BitUint;
+ attributeId = 0x0000; // on/off
+ minInterval = 5;
+ maxInterval = 180;
+ }
+ else if (bt.binding.clusterId == LEVEL_CLUSTER_ID)
+ {
+ dataType = deCONZ::Zcl8BitUint;
+ attributeId = 0x0000; // current level
+ minInterval = 5;
+ maxInterval = 180;
+ reportableChange8bit = 1;
+ }
else
{
return false;
@@ -639,13 +654,24 @@ void DeRestPluginPrivate::checkLightBindingsForAttributeReporting(LightNode *lig
BindingTask::Action action = BindingTask::ActionUnbind;
- // whitelist by Model ID
+ // whitelist
if (gwReportingEnabled)
{
+ action = BindingTask::ActionBind;
if (lightNode->modelId().startsWith(QLatin1String("FLS-NB")))
{
- action = BindingTask::ActionBind;
}
+ else if (lightNode->manufacturer() == QLatin1String("OSRAM"))
+ {
+ }
+ else
+ {
+ return;
+ }
+ }
+ else
+ {
+ return;
}
QList<deCONZ::ZclCluster>::const_iterator i = lightNode->haEndpoint().inClusters().begin();
|
Fix function documentation
CLA: trivial | @@ -573,11 +573,9 @@ int EC_POINT_oct2point(const EC_GROUP *group, EC_POINT *p,
* \param point EC_POINT object
* \param form point conversion form
* \param pbuf returns pointer to allocated buffer
- * \param len length of the memory buffer
* \param ctx BN_CTX object (optional)
* \return the length of the encoded octet string or 0 if an error occurred
*/
-
size_t EC_POINT_point2buf(const EC_GROUP *group, const EC_POINT *point,
point_conversion_form_t form,
unsigned char **pbuf, BN_CTX *ctx);
@@ -869,7 +867,7 @@ int EC_KEY_generate_key(EC_KEY *key);
int EC_KEY_check_key(const EC_KEY *key);
/** Indicates if an EC_KEY can be used for signing.
- * \param key the EC_KEY object
+ * \param eckey the EC_KEY object
* \return 1 if can can sign and 0 otherwise.
*/
int EC_KEY_can_sign(const EC_KEY *eckey);
@@ -888,11 +886,9 @@ int EC_KEY_set_public_key_affine_coordinates(EC_KEY *key, BIGNUM *x,
* \param key key to encode
* \param form point conversion form
* \param pbuf returns pointer to allocated buffer
- * \param len length of the memory buffer
* \param ctx BN_CTX object (optional)
* \return the length of the encoded octet string or 0 if an error occurred
*/
-
size_t EC_KEY_key2buf(const EC_KEY *key, point_conversion_form_t form,
unsigned char **pbuf, BN_CTX *ctx);
@@ -927,11 +923,10 @@ int EC_KEY_oct2priv(EC_KEY *key, const unsigned char *buf, size_t len);
size_t EC_KEY_priv2oct(const EC_KEY *key, unsigned char *buf, size_t len);
/** Encodes an EC_KEY private key to an allocated octet string
- * \param key key to encode
+ * \param eckey key to encode
* \param pbuf returns pointer to allocated buffer
* \return the length of the encoded octet string or 0 if an error occurred
*/
-
size_t EC_KEY_priv2buf(const EC_KEY *eckey, unsigned char **pbuf);
/********************************************************************/
|
Check for memory faster | @@ -24,7 +24,7 @@ import Foundation
/// Starts listening for memory.
func startListening() {
// Yes, a timer. But it does not seem to slow down the app.
- _ = Timer.scheduledTimer(withTimeInterval: 0.1, repeats: true, block: { (_) in
+ _ = Timer.scheduledTimer(withTimeInterval: 0.01, repeats: true, block: { (_) in
#if WIDGET
let leftLimit: Float = 0.0
|
sdl/guid: use C.size_t for SDL_malloc for cross-platform compatibility | @@ -34,7 +34,7 @@ type GUID C.SDL_GUID
// ToString returns an ASCII string representation for a given GUID.
func (guid GUID) ToString() (ascii string) {
- _cap := C.ulong(33)
+ _cap := C.size_t(33)
_buf := (*C.char)(C.SDL_malloc(_cap))
defer C.SDL_free(unsafe.Pointer(_buf))
C.SDL_GUIDToString(C.SDL_GUID(guid), _buf, C.int(_cap))
|
fixed flickering on automatic exposure and added support for set_auto_exposure API | #include "omv_boardconfig.h"
#define HIMAX_BOOT_RETRY (10)
+#define HIMAX_LINE_LEN_PCK 0x172
+#define HIMAX_FRAME_LENGTH 0x232
#if (OMV_ENABLE_HM01B0 == 1)
static const uint16_t default_regs[][2] = {
{BLC_TGT, 0x08}, // BLC target :8 at 8 bit mode
@@ -43,7 +45,7 @@ static const uint16_t default_regs[][2] = {
{0x1001, 0x43}, // BLC dithering en
{0x1002, 0x43}, // blc_darkpixel_thd
- {0x0350, 0x00}, // Dgain Control
+ {0x0350, 0x7F}, // Dgain Control
{BLI_EN, 0x01}, // BLI enable
{0x1003, 0x00}, // BLI Target [Def: 0x20]
@@ -93,10 +95,10 @@ static const uint16_t default_regs[][2] = {
{FS_50HZ_L, 0x32},
{MD_CTRL, 0x30},
- {0x0340, 0x02},
- {0x0341, 0x16},
- {0x0342, 0x01},
- {0x0343, 0x78},
+ {FRAME_LEN_LINES_H, HIMAX_FRAME_LENGTH>>8},
+ {FRAME_LEN_LINES_L, HIMAX_FRAME_LENGTH&0xFF},
+ {LINE_LEN_PCK_H, HIMAX_LINE_LEN_PCK>>8},
+ {LINE_LEN_PCK_L, HIMAX_LINE_LEN_PCK&0xFF},
{0x3010, 0x00}, // no full frame
{0x0383, 0x01},
{0x0387, 0x01},
@@ -260,7 +262,20 @@ static int get_gain_db(sensor_t *sensor, float *gain_db)
static int set_auto_exposure(sensor_t *sensor, int enable, int exposure_us)
{
- return 0;
+ int ret=0;
+
+ if (enable) {
+ ret |= cambus_writeb2(&sensor->i2c, sensor->slv_addr, AE_CTRL, 1);
+ } else {
+ int coarse_int = exposure_us*(OMV_XCLK_FREQUENCY/1000000)/LINE_LEN_PCK_H;
+ if (coarse_int<2) coarse_int = 2;
+ if (coarse_int>HIMAX_FRAME_LENGTH-2) coarse_int = HIMAX_FRAME_LENGTH-2;
+ ret |= cambus_writeb2(&sensor->i2c, sensor->slv_addr, AE_CTRL, 0);
+ ret |= cambus_writeb2(&sensor->i2c, sensor->slv_addr, INTEGRATION_H, coarse_int>>8);
+ ret |= cambus_writeb2(&sensor->i2c, sensor->slv_addr, INTEGRATION_L, coarse_int&0xff);
+ }
+
+ return ret;
}
static int get_exposure_us(sensor_t *sensor, int *exposure_us)
|
Add more json allow_trailing_comments test cases | @@ -3427,6 +3427,14 @@ test_wuffs_json_decode_quirk_allow_trailing_comments() {
// - '4' means that there is non-filler after eof-or-'\n'
// - '5' means that there is non-filler before eof-or-'\n'
//
+ // The second and third bytes are digits such that the overall string starts
+ // with a three digit number, which is parsed as the top-level JSON value and
+ // everything afterwards is trailer.
+ //
+ // That three digit number modulo 100 also acts as a 'line number' (matching
+ // the tc loop variable), starting counting from zero, so that the 6th test
+ // case, tc == 5, is the line starting with "something hundred and five".
+ //
// WUFFS_JSON__QUIRK_ALLOW_TRAILING_FILLER (together with
// WUFFS_JSON__QUIRK_ALLOW_COMMENT_ETC) should decode the '1's, '2's and '3's
// completely and the '4's and '5's up to but excluding the non-filler.
@@ -3443,11 +3451,13 @@ test_wuffs_json_decode_quirk_allow_trailing_comments() {
"306 /*foo*/ \n", //
"307 /*foo*/ \n\n", //
"308/*bar\nbaz*/\n\n", //
- "309 // qux\n\n", //
- "310 /*c0*/ /*c1*/\n\n", //
- "311 /*c0*/ \n\n // c2 \n\n", //
- "412 \n9", //
- "513 9", //
+ "309 // qux\n", // TODO: drop the "\n".
+ "310 // qux\n", //
+ "311 // qux\n\n", //
+ "312 /*c0*/ /*c1*/\n\n", //
+ "313 /*c0*/ \n\n // c2 \n\n", //
+ "414 \n9", //
+ "515 9", //
};
int tc;
|
data BUGFIX check input node when anydata/anyxml expected
Be more careful about the expected input data in API functions to avoid
later segfaults when treating data as anyxml/anydata when they are
something else. | @@ -266,6 +266,7 @@ lyd_any_value_str(const struct lyd_node *any, char **value_str)
LY_ERR ret = LY_SUCCESS;
LY_CHECK_ARG_RET(NULL, any, value_str, LY_EINVAL);
+ LY_CHECK_ARG_RET(NULL, any->schema, any->schema->nodetype & LYS_ANYDATA, LY_EINVAL);
a = (struct lyd_node_any *)any;
*value_str = NULL;
@@ -318,6 +319,7 @@ lyd_any_copy_value(struct lyd_node *trg, const union lyd_any_value *value, LYD_A
struct lyd_node_any *t;
LY_CHECK_ARG_RET(NULL, trg, LY_EINVAL);
+ LY_CHECK_ARG_RET(NULL, trg->schema, trg->schema->nodetype & LYS_ANYDATA, LY_EINVAL);
t = (struct lyd_node_any *)trg;
|
don't decommit/reset parts of a region when using large os pages | @@ -312,12 +312,14 @@ void _mi_mem_free(void* p, size_t size, size_t id, mi_stats_t* stats) {
// TODO: implement delayed decommit/reset as these calls are too expensive
// if the memory is reused soon.
// reset: 10x slowdown on malloc-large, decommit: 17x slowdown on malloc-large
+ if (!mi_option_is_enabled(mi_option_large_os_pages)) {
if (mi_option_is_enabled(mi_option_eager_region_commit)) {
_mi_os_reset(p, size, stats); // 10x slowdown on malloc-large
}
else {
_mi_os_decommit(p, size, stats); // 17x slowdown on malloc-large
}
+ }
// TODO: should we free empty regions?
// this frees up virtual address space which
|
Fix search language parser accepting only ASCII input, not full Unicode | @@ -28,7 +28,8 @@ namespace carto {
using qi::_1;
using qi::_2;
- unesc_char.add("\\a", '\a')("\\b", '\b')("\\f", '\f')("\\n", '\n')
+ unesc_char.add
+ ("\\a", '\a')("\\b", '\b')("\\f", '\f')("\\n", '\n')
("\\r", '\r')("\\t", '\t')("\\v", '\v')("\\\\", '\\')
("\\\'", '\'')("\\\"", '\"');
@@ -44,7 +45,7 @@ namespace carto {
regexp_ilike_kw = repo::distinct(qi::char_("a-zA-Z0-9_"))[qi::no_case["regexp_ilike"]];
string =
- '\'' >> *(unesc_char | "\\x" >> qi::hex | (qi::print - '\'')) >> '\'';
+ '\'' >> *(unesc_char | "\\x" >> qi::hex | (qi::char_ - '\'')) >> '\'';
value =
null_kw [_val = phx::construct<Value>()]
|
runtime: reduce softirq budget for latency reasons | #define RUNTIME_STACK_SIZE 128 * KB
#define RUNTIME_GUARD_SIZE 128 * KB
#define RUNTIME_RQ_SIZE 32
-#define RUNTIME_SOFTIRQ_BUDGET 32
+#define RUNTIME_SOFTIRQ_BUDGET 16
#define RUNTIME_MAX_TIMERS 4096
#define RUNTIME_SCHED_POLL_ITERS 4
#define RUNTIME_WATCHDOG_US 50
|
Added NASM package to support SW crypto | @@ -95,7 +95,7 @@ endif
RPM_SUSE_DEPENDS = autoconf automake bison ccache chrpath distribution-release gcc6 glibc-devel-static
RPM_SUSE_DEPENDS += java-1_8_0-openjdk-devel libopenssl-devel libtool lsb-release make openssl-devel
-RPM_SUSE_DEPENDS += python-devel python-pip python-rpm-macros shadow
+RPM_SUSE_DEPENDS += python-devel python-pip python-rpm-macros shadow nasm
ifneq ($(wildcard $(STARTUP_DIR)/startup.conf),)
STARTUP_CONF ?= $(STARTUP_DIR)/startup.conf
|
Remove Setting OpenSSL Groups List | @@ -148,11 +148,6 @@ typedef struct CXPLAT_TLS {
#define CXPLAT_TLS_AES_256_GCM_SHA384 "TLS_AES_256_GCM_SHA384"
#define CXPLAT_TLS_CHACHA20_POLY1305_SHA256 "TLS_CHACHA20_POLY1305_SHA256"
-//
-// Default list of curves for ECDHE ciphers.
-//
-#define CXPLAT_TLS_DEFAULT_SSL_CURVES "P-256:X25519:P-384:P-521"
-
//
// Default cert verify depth.
//
@@ -1170,20 +1165,6 @@ CxPlatTlsSecConfigCreate(
}
}
- Ret =
- SSL_CTX_set1_groups_list(
- SecurityConfig->SSLCtx,
- CXPLAT_TLS_DEFAULT_SSL_CURVES);
- if (Ret != 1) {
- QuicTraceEvent(
- LibraryErrorStatus,
- "[ lib] ERROR, %u, %s.",
- ERR_get_error(),
- "SSL_CTX_set1_groups_list failed");
- Status = QUIC_STATUS_TLS_ERROR;
- goto Exit;
- }
-
Ret = SSL_CTX_set_quic_method(SecurityConfig->SSLCtx, &OpenSslQuicCallbacks);
if (Ret != 1) {
QuicTraceEvent(
|
nrf: Use separate config for each PWM instance.
The hard_configs table has entries for each PWM instance. Use them. | @@ -80,11 +80,10 @@ STATIC machine_pwm_config_t hard_configs[MP_ARRAY_SIZE(machine_hard_pwm_instance
STATIC const machine_hard_pwm_obj_t machine_hard_pwm_obj[] = {
#if defined(NRF52_SERIES)
{{&machine_hard_pwm_type}, .p_pwm = &machine_hard_pwm_instances[0], .p_config = &hard_configs[0]},
-
- {{&machine_hard_pwm_type}, .p_pwm = &machine_hard_pwm_instances[1], .p_config = &hard_configs[0]},
- {{&machine_hard_pwm_type}, .p_pwm = &machine_hard_pwm_instances[2], .p_config = &hard_configs[0]},
+ {{&machine_hard_pwm_type}, .p_pwm = &machine_hard_pwm_instances[1], .p_config = &hard_configs[1]},
+ {{&machine_hard_pwm_type}, .p_pwm = &machine_hard_pwm_instances[2], .p_config = &hard_configs[2]},
#if NRF52840
- {{&machine_hard_pwm_type}, .p_pwm = &machine_hard_pwm_instances[3], .p_config = &hard_configs[0]},
+ {{&machine_hard_pwm_type}, .p_pwm = &machine_hard_pwm_instances[3], .p_config = &hard_configs[3]},
#endif
#endif
};
|
web-ui: implement undo/redo for copying, duplicating and moving keys | import { createUndoMiddleware } from 'redux-undo-redo-middleware'
import {
- setKey, deleteKey, createKey,
+ setKey, deleteKey, createKey, copyKey, moveKey,
setMetaKey, deleteMetaKey, createMetaKey,
} from './actions'
@@ -33,7 +33,10 @@ const undoMiddleware = createUndoMiddleware({
action: ({ id, path, value }) => setKey(id, path, value),
},
'DELETE_KEY_SUCCESS': {
- action: ({ id, path }, { previousValue }) => createKey(id, path, previousValue),
+ action: ({ id, path }, { previousValue, from, to }) =>
+ (from && to) // we are reverting a copy action
+ ? copyKey(id, from, to)
+ : createKey(id, path, previousValue),
createArgs: storePreviousValue,
},
'CREATE_KEY_SUCCESS': ({ id, path }) => deleteKey(id, path),
@@ -52,8 +55,16 @@ const undoMiddleware = createUndoMiddleware({
createArgs: storePreviousMeta,
},
'CREATE_META_SUCCESS': ({ id, path, key }) => deleteMetaKey(id, path, key),
- // TODO: moveKey
- // TODO: copyKey
+ 'COPY_KEY_SUCCESS': {
+ action: ({ id, from, to }) => deleteKey(id, to),
+ createArgs: (state, { from, to }) => ({ from, to }),
+ },
+ 'MOVE_KEY_SUCCESS': {
+ action: ({ id, from, to }) => moveKey(id, to, from),
+ },
+ 'RESET_MOVE_KEY_SUCCESS': {
+ action: ({ id, from, to }) => moveKey(id, from, to),
+ }
},
originalActions: {
'SET_KEY_SUCCESS': 'RESET_KEY_SUCCESS',
@@ -62,6 +73,8 @@ const undoMiddleware = createUndoMiddleware({
'SET_META_SUCCESS': 'RESET_META_SUCCESS',
'DELETE_META_SUCCESS': 'CREATE_META_SUCCESS',
'CREATE_META_SUCCESS': 'DELETE_META_SUCCESS',
+ 'COPY_KEY_SUCCESS': 'DELETE_KEY_SUCCESS',
+ 'MOVE_KEY_SUCCESS': 'RESET_MOVE_KEY_SUCCESS',
}
})
|
Update Makefile
Hcxtools fails to cross build from source, because Makefile hard codes the build architecture pkg-config and thus fails
finding required libraries. Please consider applying the attached patch
to make it substitutable. | @@ -23,17 +23,18 @@ DEFS += -DWANTZLIB
INSTALL ?= install
INSTFLAGS =
+PKG_CONFIG ?= pkg-config
ifeq ($(HOSTOS), Linux)
INSTFLAGS += -D
endif
-OPENSSL_LIBS=$(shell pkg-config --libs openssl)
-OPENSSL_CFLAGS=$(shell pkg-config --cflags openssl)
-CURL_LIBS=$(shell pkg-config --libs libcurl)
-CURL_CFLAGS=$(shell pkg-config --cflags libcurl)
-Z_LIBS=$(shell pkg-config --libs zlib)
-Z_CFLAGS=$(shell pkg-config --cflags zlib)
+OPENSSL_LIBS=$(shell $(PKG_CONFIG) --libs openssl)
+OPENSSL_CFLAGS=$(shell $(PKG_CONFIG) --cflags openssl)
+CURL_LIBS=$(shell $(PKG_CONFIG) --libs libcurl)
+CURL_CFLAGS=$(shell $(PKG_CONFIG) --cflags libcurl)
+Z_LIBS=$(shell $(PKG_CONFIG) --libs zlib)
+Z_CFLAGS=$(shell $(PKG_CONFIG) --cflags zlib)
TOOLS=
TOOLS+=hcxpcapngtool
|
Fix hid.py example. | #
# Add the following script to boot.py:
#
-##import pyb (UNCOMMENT THIS LINE!)
+##import pyb #(UNCOMMENT THIS LINE!)
##pyb.usb_mode('VCP+HID') # serial device + mouse (UNCOMMENT THIS LINE!)
##pyb.usb_mode('VCP+MSC') # serial device + storage device (default)
##pyb.usb_mode('VCP+HID', hid=pyb.hid_keyboard) # serial device + keyboard
|
I updated the Plots chapter in the Sphynx users manual. | @@ -31,7 +31,7 @@ these settings to persist across VisIt sessions, you can either
**Save session**, and then restart from this saved session later,
or **Save settings** and then all VisIt sessions will use those
defaults. For more about saving sessions and settings, see
-:ref:`Preferences_How_to_save_your_settings`. The **Save** and
+:ref:`How_to_save_your_settings`. The **Save** and
**Load** buttons give you the option of saving and loading plot
attributes using their own separate XML. This allows users to
easily share individual plot attributes. The reset button will
|
luci-app-firewall: drop offloading option | @@ -70,7 +70,7 @@ return view.extend({
}
/* Netfilter flow offload support */
-
+/*
if (L.hasSystemFeature('offloading')) {
s = m.section(form.TypedSection, 'defaults', _('Routing/NAT Offloading'),
_('Experimental feature. Not fully compatible with QoS/SQM.'));
@@ -89,7 +89,7 @@ return view.extend({
o.optional = true;
o.depends('flow_offloading', '1');
}
-
+*/
s = m.section(form.GridSection, 'zone', _('Zones'));
s.addremove = true;
|
Increased connection duration due to Azure pipeline. | @@ -165,7 +165,7 @@ testConnDuration(void** state)
log = strtok_r(NULL, delim, &last);
assert_non_null(log);
int duration = strtol(log, NULL, 0);
- if ((duration < 1000) || (duration > 1300))
+ if ((duration < 1000) || (duration > 1400))
fail_msg("Duration %d is outside of allowed bounds (1000, 1300)", duration);
free(buf);
|
fuzzing BUGFIX using internal functions in tests | #include <stdlib.h>
#include <stdbool.h>
-#include "common.h"
-#include "tree_schema_internal.h"
-#include "libyang.h"
-
-LY_ERR yang_parse_module(struct lys_parser_ctx **context, const char *data, struct lys_module *mod);
+#include "../../src/common.h"
+#include "../../src/tree_schema_internal.h"
int LLVMFuzzerTestOneInput(uint8_t const *buf, size_t len)
{
|
cancellable request | @@ -45,7 +45,6 @@ struct connect_request {
static void on_error(struct connect_request *req, const char *errstr)
{
h2o_send_error_502(req->src_req, "Gateway Error", errstr, 0);
- free(req);
}
static void on_connect(h2o_socket_t *sock, const char *err)
@@ -59,8 +58,8 @@ static void on_connect(h2o_socket_t *sock, const char *err)
h2o_req_t *req = creq->src_req;
uint64_t timeout = creq->handler_ctx->config.tunnel.timeout;
- free(creq);
sock->data = NULL;
+ creq->sock = NULL;
req->res.status = 200;
h2o_httpclient_tunnel_t *tunnel = h2o_open_tunnel_from_socket(sock);
@@ -69,6 +68,15 @@ static void on_connect(h2o_socket_t *sock, const char *err)
static void on_generator_dispose(void *_self)
{
+ struct connect_request *req = _self;
+
+ if (req->getaddr_req != NULL) {
+ h2o_hostinfo_getaddr_cancel(req->getaddr_req);
+ req->getaddr_req = NULL;
+ }
+
+ if (req->sock != NULL)
+ h2o_socket_close(req->sock);
}
static void try_connect(struct connect_request *req);
@@ -127,11 +135,12 @@ static int on_req(h2o_handler_t *_handler, h2o_req_t *req)
return 0;
}
ret = h2o_url_parse_hostport(req->input.path.base, req->input.path.len, &host, &port);
- if (ret == NULL || port == 0)
- return -1;
-
- struct connect_request *creq = h2o_mem_alloc(sizeof(*creq));
+ if (ret == NULL || port == 0) {
+ h2o_send_error_400(req, "Bad Request", "Bad Request", 0);
+ return 0;
+ }
+ struct connect_request *creq =h2o_mem_alloc_shared(&req->pool, sizeof(*creq), on_generator_dispose);
*creq = (struct connect_request){req->conn->ctx->loop, req, 1};
creq->getaddr_receiver = &req->conn->ctx->receivers.hostinfo_getaddr;
creq->host = host;
@@ -143,26 +152,10 @@ static int on_req(h2o_handler_t *_handler, h2o_req_t *req)
return 0;
}
-static void on_context_init(h2o_handler_t *_self, h2o_context_t *ctx)
-{
-}
-
-static void on_context_dispose(h2o_handler_t *_self, h2o_context_t *ctx)
-{
-}
-
-static void on_handler_dispose(h2o_handler_t *_self)
-{
-}
-
void h2o_connect_register(h2o_pathconf_t *pathconf, h2o_proxy_config_vars_t *config)
{
struct st_handler_ctx_t *self = (void *)h2o_create_handler(pathconf, sizeof(*self));
- self->super.on_context_init = on_context_init;
- self->super.on_context_dispose = on_context_dispose;
- self->super.dispose = on_handler_dispose;
self->super.on_req = on_req;
- self->super.supports_request_streaming = 1;
self->config = *config;
}
|
mmapstorage: document MmapAddr structure | #define SIZEOF_MMAPHEADER (sizeof (MmapHeader))
#define SIZEOF_MMAPMETADATA (sizeof (MmapMetaData))
#define SIZEOF_MMAPFOOTER (sizeof (MmapFooter))
-#define SIZEOF_ADDR_STRING (19) // format: 0xADDR -> ADDR in hex, for 64bit addr: 2 bytes (0x) + 16 bytes (ADDR) + 1 byte (ending null)
+/** Size to store a 64-bit (max.) address.
+ * format: 0xADDR -> ADDR in hex, for 64bit addr: 2 bytes (0x) + 16 bytes (ADDR) + 1 byte (ending null)
+ */
+#define SIZEOF_ADDR_STRING (19)
+
+/** Flag for mmap file format. Defines whether file was written with checksum on or off. */
#define ELEKTRA_MMAP_CHECKSUM_ON (1)
+/**
+ * Internal MmapAddr structure.
+ * Used for functions passing around relevant pointers into the mmap region.
+ * The write functions increment the pointers by the number of bytes written,
+ * such that the next iteration or function writes to the correct (free) place.
+ */
struct _mmapAddr
{
- KeySet * const ksPtr;
- char * metaKsPtr;
- char * ksArrayPtr;
- char * metaKsArrayPtr;
- char * keyPtr;
- char * dataPtr;
+ KeySet * const ksPtr; /**<Pointer to the (main) KeySet struct. */
+ char * metaKsPtr; /**<Pointer to the current meta KeySet structs. */
+ char * ksArrayPtr; /**<Pointer to the current KeySet->array. */
+ char * metaKsArrayPtr; /**<Pointer to the current meta KeySet->array. */
+ char * keyPtr; /**<Pointer to the current Key struct. */
+ char * dataPtr; /**<Pointer to the data region, where Key->key and Key->data is stored. */
- const uintptr_t mmapAddrInt;
+ const uintptr_t mmapAddrInt; /**<Address of the mapped region as integer. */
};
typedef struct _mmapAddr MmapAddr;
|
Subscriptions now get ended if we no longer know their story. | ?~ rum ~
[b %diff %hall-rumor u.rum]~
?. ?=($circle -.qer) ~
+ =+ qit=[b %quit ~]~
+ :: kill the subscription if we forgot the story.
+ ?. (~(has by stories) nom.qer) qit
:: kill the subscription if it's past its range.
- =- ?:(done:- [b %quit ~]~ ~)
+ =- ?:(done:- qit ~)
%. ran.qer
=- ~(so-in-range so:ta nom.qer ~ -)
(~(got by stories) nom.qer)
|
always allow posts/comments from team | ?($submit $comment)
=/ col (~(get by cols) col.act)
?~ col &
+ ?: (team:title our.bol src.bol) | ::REVIEW this is implicit yes?
?: publ.conf.u.col
(~(has in mems.conf.u.col) src.bol) :: not on blacklist
!(~(has in mems.conf.u.col) src.bol) :: is on whitelist
|
Save variables before first class function call | @@ -1377,32 +1377,36 @@ generate_exp = function(exp, ctx)
local nargs = #fexp._type.params
local nret = #fexp._type.rettypes
- local push_function = {}
- do
- local cstats, cvalue = generate_exp(fexp, ctx)
- local push = push_to_stack(ctx, fexp._type, cvalue)
- table.insert(push_function, cstats)
- table.insert(push_function, push)
+ local to_push = {}
+ function generate(exp)
+ local cstats, cvalue = generate_exp(exp, ctx)
+ -- we don't use ctx:new_tvar because values were already saved
+ table.insert(to_push, { typ = exp._type, cvalue = cvalue })
+ return cstats
end
- local push_args = {}
+ local body = {}
+ table.insert(body, generate(fexp))
for _, arg_exp in ipairs(fargs) do
- local cstats, cvalue = generate_exp(arg_exp, ctx)
- local push = push_to_stack(ctx, arg_exp._type, cvalue)
- table.insert(push_args, cstats)
- table.insert(push_args, push)
+ table.insert(body, generate(arg_exp))
+ end
+
+ table.insert(body, gc_save_vars(ctx))
+
+ for _, x in ipairs(to_push) do
+ local push = push_to_stack(ctx, x.typ, x.cvalue)
+ table.insert(body, push)
end
ctx:free_slots(nargs + 1)
ctx:reserve_slots(nret)
- local call_function = util.render([[
+ table.insert(body, util.render([[
lua_call(L, ${NARGS}, ${NRET});
]], {
NARGS = nargs,
NRET = nret,
- })
+ }))
- local check_rets = {}
local retval
if nret == 0 then
retval = "VOID"
@@ -1412,7 +1416,7 @@ generate_exp = function(exp, ctx)
local slot = ctx:new_cvar("TValue*")
local ret = ctx:new_tvar(ret_typ)
retval = ret.name
- table.insert(check_rets, util.render([[
+ table.insert(body, util.render([[
${SLOT_DECL} = s2v(L->top-1);
if (!${CHECK_TAG}) {
luaL_error(L,
@@ -1437,18 +1441,9 @@ generate_exp = function(exp, ctx)
end
ctx:free_slots(nret)
- local cstats = util.render([[
- ${PUSH_FUNCTION}
- ${PUSH_ARGS}
- ${CALL_FUNCTION}
- ${CHECK_RETS}
- ]], {
- PUSH_FUNCTION = table.concat(push_function, "\n"),
- PUSH_ARGS = table.concat(push_args, "\n"),
- CALL_FUNCTION = call_function,
- CHECK_RETS = table.concat(check_rets, "\n"),
- })
+ table.insert(body, gc_release_vars(ctx))
+ local cstats = table.concat(body, "\n")
return cstats, retval
end
|
incorporate Gabriels new targets | @@ -741,17 +741,19 @@ deb: cleanapi create_obj_dir_structure preparedeb debsource
@echo "Success: DEBs are in parent directory"
.PHONY: buster-deb
-buster-deb: cleanapi create_obj_dir_structure preparedeb buster-debsource
- debuild -i -b -uc -us
+buster-deb: cleanapi create_obj_dir_structure preparedeb buster-debsource deb buster-cleanup-debsource
+
+.PHONY: buster-cleanup-debsource
+buster-cleanup-debsource:
@mv debian/control.bck debian/control
- @echo "Success: DEBs are in parent directory"
.PHONY: bionic-deb
-bionic-deb: cleanapi create_obj_dir_structure preparedeb bionic-debsource
- debuild -i -b -uc -us
+bionic-deb: cleanapi create_obj_dir_structure preparedeb bionic-debsource deb bionic-cleanup-debsource
+
+.PHONY: bionic -cleanup-debsource
+bionic-cleanup-debsource:
@mv debian/control.bck debian/control
@rm debian/oidc-agent-desktop.triggers
- @echo "Success: DEBs are in parent directory"
.PHONY: deb-buster
deb-buster: buster-deb
|
VERSION bump to version 2.0.258 | @@ -61,7 +61,7 @@ set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
# set version of the project
set(LIBYANG_MAJOR_VERSION 2)
set(LIBYANG_MINOR_VERSION 0)
-set(LIBYANG_MICRO_VERSION 257)
+set(LIBYANG_MICRO_VERSION 258)
set(LIBYANG_VERSION ${LIBYANG_MAJOR_VERSION}.${LIBYANG_MINOR_VERSION}.${LIBYANG_MICRO_VERSION})
# set version of the library
set(LIBYANG_MAJOR_SOVERSION 2)
|
Add Platform Support Info to Readme | @@ -22,8 +22,8 @@ QUIC has many benefits when compared to existing TLS over TCP scenarios:
> **Important** Several QUIC protocol features are not yet fully implemented:
>
-> * 0-RTT with Schannel and OpenSSL
-> * Client Migration
+> * 0-RTT
+> * Client-side Migration
> * Server Preferred Address
> * Path MTU Discovery
@@ -36,10 +36,34 @@ QUIC has many benefits when compared to existing TLS over TCP scenarios:
* Receive side scaling (RSS).
* UDP send and receive coalescing support.
-## Documentation
+# Platform Support
+
+MsQuic currently officially supports the following platform configurations.
+
+## Windows 10
+
+On Windows 10, MsQuic relies on built-in support from [Schannel](https://docs.microsoft.com/en-us/windows/win32/com/schannel) for TLS 1.3 functionality. MsQuic is shipped in-box in the Windows kernel in the form of the `msquic.sys` driver, to support built-in HTTP and SMB features. User mode applications use `msquic.dll` (built from here) and package it with their app.
+
+> **Important** This configuration requires running the latest [Windows Insider Preview Builds](https://insider.windows.com/en-us/) for Schannel's TLS 1.3 support.
+
+> **Important** This configuration does not support 0-RTT due to Schannel's current lack of support.
+
+## Linux
+
+On Linux, MsQuic relies on [OpenSSL](https://www.openssl.org/) for TLS 1.3 functionality.
+
+> **Important** This configuration relies on an [outstanding pull request](https://github.com/openssl/openssl/pull/8797) to OpenSSL master for TLS 1.3 support. It is still currently unknown as to when it will be merged into master. See [here](https://www.openssl.org/blog/blog/2020/02/17/QUIC-and-OpenSSL/) for more details.
+
+> **Important** This configuration does not support 0-RTT. Complete integration with OpenSSL is an ongoing effort.
+
+## Other
+
+For testing or experimentation purposes, MsQuic may be built with other configurations, but they are not to be considered officially supported unless they are listed above. Any bugs found while using these configurations may be looked at, but no guarantees are provided that they will be fixed.
+
+# Documentation
* For building the library, see the [Build docs](./docs/BUILD.md).
- * For using the library, see the [API docs](./docs/API.md).
+ * For using the library, see the [API docs](./docs/API.md) or the [Sample](./src/tools/sample/sample.cpp).
# Contributing
|
Log how many listeners are in use for the socket print thread. | @@ -83,6 +83,8 @@ _papplPrinterRunRaw(
int i; // Looping var
+ papplLogPrinter(printer, PAPPL_LOGLEVEL_DEBUG, "Running socket print thread with %d listeners.", printer->num_listeners);
+
while (printer->listeners[0].fd >= 0)
{
// Don't accept connections if we can't accept a new job...
|
reformat tab | // v1.0 : 03/24/2017 : creation
// v1.1 : 04/05/2017 : multiple changes to fit new config reg mapping
// v1.2 : 04/13/2017 : Add Hash/Sort and more steps
+// v1.3 : 05/09/2017 : Refine Hash method to 22bit HT_ENTRY_NUM_EXP
//--------------------------------------------------------------------------------------------
static snapu32_t read_bulk ( snap_membus_t *src_mem,
|
[catboost] Add column name to pool metainfo for quantized pools | @@ -22,6 +22,7 @@ enum EColumnType {
message TPoolMetainfo {
map<uint32, EColumnType> ColumnIndexToType = 1;
+ map<uint32, string> ColumnIndexToName = 7;
optional uint64 DocumentCount = 2;
repeated uint32 IgnoredColumnIndices = 3;
optional uint32 StringDocIdFakeColumnIndex = 4; // Index of additional DocId column with actual DocId values
|
Remove redundant RAND_get0_private() call
The test called this twice which doesn't hurt but isn't ideal. | @@ -21,9 +21,6 @@ static int test_rand(void)
unsigned char entropy2[] = { 0xff, 0xfe, 0xfd };
unsigned char outbuf[3];
- if (!TEST_ptr(privctx = RAND_get0_private(NULL)))
- return 0;
-
*p++ = OSSL_PARAM_construct_octet_string(OSSL_RAND_PARAM_TEST_ENTROPY,
entropy1, sizeof(entropy1));
*p = OSSL_PARAM_construct_end();
|
Release Notes: Add plugin information | @@ -72,11 +72,11 @@ TODO: https://book.libelektra.org
### Maturing of Plugins
- The new [Directory Value plugin](https://www.libelektra.org/plugins/directoryvalue) supports storage plugins such as [YAJL](https://www.libelektra.org/plugins/yajl) and [YAML CPP ](https://www.libelektra.org/plugins/yamlcpp). It adds extra leaf values for directories (keys with children) that store the data of their parents. This way plugins that normally are only able to store values in leaf keys are able to support arbitrary key sets.
-- The [yamlcpp plugin](https://www.libelektra.org/plugins/yamlcpp) TODO
-- The [camel plugin](https://www.libelektra.org/plugins/camel) TODO
-- The [mini plugin](https://www.libelektra.org/plugins/mini) TODO
-- The [xerces plugin](https://www.libelektra.org/plugins/xerces) TODO
-- boolean? TODO (is currently described below)
+- The [YAML CPP plugin](https://www.libelektra.org/plugins/yamlcpp) reads and writes [YAML](http://yaml.org) data using [yaml-cpp](https://github.com/jbeder/yaml-cpp). The plugin supports arrays, binary data and metadata.
+- The [Camel plugin](https://www.libelektra.org/plugins/camel) stores data as simplified YAML flow lists containing double quoted keys and values. For proper YAML support please use the [YAML CPP](https://www.libelektra.org/plugins/yamlcpp) instead.
+- The [mINI plugin](https://www.libelektra.org/plugins/mini) reads and writes simple property list, separated by equal (`=`) signs.
+- The [xerces plugin](https://www.libelektra.org/plugins/xerces) allows Elektra to read and write XML data. The plugin uses [Xerces-C++](http://xerces.apache.org/xerces-c) for this task. It supports both arrays and metadata.
+- The [boolean plugin](https://www.libelektra.org/plugins/boolean) normalizes boolean values such as `0`, `1`, `true` and `false`.
- The [crypto plugin](https://www.libelektra.org/plugins/crypto) and [fcrypt plugin](https://www.libelektra.org/plugins/fcrypt) are described below.
### Elektra With Encryption
|
Fix PhCmLoadSettingsEx scaling type | @@ -364,7 +364,6 @@ BOOLEAN PhCmLoadSettingsEx(
PPH_TREENEW_COLUMN column;
ULONG id;
ULONG displayIndex;
- ULONG width;
PhSplitStringRefAtChar(&remainingColumnPart, L'|', &columnPart, &remainingColumnPart);
@@ -432,7 +431,7 @@ BOOLEAN PhCmLoadSettingsEx(
width = (ULONG)integer;
if (scale != dpiValue && scale != 0)
- width = PhMultiplyDivide(width, dpiValue, scale);
+ width = PhMultiplyDivideSigned(width, dpiValue, scale);
column = PhAllocate(sizeof(PH_TREENEW_COLUMN));
column->Id = id;
|
Fix compile errors on Windows (strcasestr not available). | #include <Expression.h>
#include <DebugStream.h>
+#ifdef _MSC_VER
+#include<shlwapi.h>
+#endif
+
using std::string;
using std::vector;
#ifndef MAX
@@ -2039,7 +2043,11 @@ avtTecplotFileFormat::PopulateDatabaseMetaData(avtDatabaseMetaData *md)
variableNames[0] == "X" ||
variableNames[0] == "i" ||
variableNames[0] == "I" ||
+#ifdef _MSC_VER
+ !StrStrIA(variableNames[0].c_str(), "coord"))
+#else
!strcasestr(variableNames[0].c_str(), "coord"))
+#endif
doAllVsAll = false;
//
|
lib/ev3dev/sysfs: scan for last character in port
Don't scan for the 12th character, because for motors it is the 13th.
We cannot seek from SEEK_END because this seeks from 4096.
Also, add error checking. | @@ -41,9 +41,17 @@ pbio_error_t sysfs_get_number(pbio_port_t port, const char *rdir, int *sysfs_num
}
// Get the port from the address file
- fseek(f_address, 12, SEEK_SET);
- pbio_port_t port_found = fgetc(f_address);
- fclose(f_address);
+ char address[MAX_PATH_LENGTH];
+ int len;
+
+ if (fscanf(f_address, "%" MAX_READ_LENGTH "s%n", address, &len) < 1) {
+ return PBIO_ERROR_IO;
+ }
+
+ pbio_port_t port_found = address[len-1];
+ if (fclose(f_address) != 0) {
+ return PBIO_ERROR_IO;
+ }
// If the port matches the requested port, get where it was found.
if (port_found == port) {
|
Workaround Ansible install via pip and py36 | @@ -57,6 +57,10 @@ ARG GNAME=jenkins
ARG UID=1000
ARG GID=1000
+# Workaround for https://githubmemory.com/repo/pypa/pip/issues/10219
+ENV LANG=C.UTF-8
+ENV LC_ALL=C.UTF-8
+
# Install essential packages
RUN apt-get update && \
apt-get -y --no-install-recommends upgrade && \
|
remove limitation from README since it has been fixed | @@ -18,15 +18,3 @@ You will likely want to tailor fuzzer options to your execution environment, but
HTTP/1: `ASAN_OPTIONS=detect_leaks=0 ./h2o-fuzzer-http1 -max_len=$((16 * 1024 )) fuzz/http1-corpus`
HTTP/2: `ASAN_OPTIONS=detect_leaks=0 ./h2o-fuzzer-http2 -max_len=$((16 * 1024 )) fuzz/http2-corpus`
-
-### Limitations
-
-As of this writing, the test driver creates and destroys a "client" thread for each HTTP request that the fuzzer generates. Eventually this exceeds an [artificial limit created by a bug in Address Sanitizer](https://github.com/google/sanitizers/issues/273), resulting in a error message that looks like this:
-
-```
-==233076==AddressSanitizer: Thread limit (4194304 threads) exceeded. Dying.
-MS: 2 CrossOver-ChangeASCIIInt-; base unit: 9cf0910400bc91fa413c37730e040d91e21f361f
-artifact_prefix='./'; Test unit written to ./crash-ef5e5c6b2ced09cf94b401be57ad5ffe21c60ad5
-```
-
-As it stands, this can be worked around by restarting the fuzzer when this error is encountered. The test drivers could also be refactored to use a single thread as a client instead of creating one for each request.
|
support .cpp files in GO SRCS | import base64
+import itertools
import os
from _common import rootrel_arc_src, tobuilddir
import ymake
@@ -96,7 +97,7 @@ def on_go_process_srcs(unit):
basedirs[basedir] = []
basedirs[basedir].append(f)
for basedir in basedirs:
- unit.onadd_check(["gofmt"] + basedirs[basedir])
+ unit.onadd_check(['gofmt'] + basedirs[basedir])
# Add go vet check
if unit.get(['GO_VET']) == 'yes':
@@ -123,14 +124,20 @@ def on_go_process_srcs(unit):
s_files = filter(lambda x: x.endswith('.S'), srcs_files)
c_files = filter(lambda x: x.endswith('.c'), srcs_files)
+ cxx_files = filter(lambda x: any(x.endswith(e) for e in ('.cc', '.cpp', '.cxx', '.C')), srcs_files)
syso_files = filter(lambda x: x.endswith('.syso'), srcs_files)
cgo_files = get_appended_values(unit, 'CGO_SRCS_VALUE')
cgo_cflags = []
- if len(c_files) + len(s_files) + len(cgo_files) > 0:
+ if len(c_files) + len(cxx_files) + len(s_files) + len(cgo_files) > 0:
cgo_cflags = get_appended_values(unit, 'CGO_CFLAGS_VALUE')
- for f in c_files + s_files:
+ if len(cxx_files) > 0:
+ unit.onpeerdir('contrib/libs/cxxsupp')
+ if unit.get(['USE_LIBCXXRT']) == 'yes':
+ unit.onpeerdir('contrib/libs/cxxsupp/libcxxrt')
+
+ for f in itertools.chain(c_files, cxx_files, s_files):
unit.onsrc([f] + cgo_cflags)
if len(cgo_files) > 0:
|
CMake add compilation flag for Ruby support in Homebrew OSX fix
* Due to an LLVM bug in AppleClang a flag has to be passed to the
compiler in order to fix Ruby building | @@ -188,6 +188,17 @@ target_compile_options(${target}
INTERFACE
)
+# Fix Ruby MacOSX LLVM bug
+# '__declspec' attributes are not enabled; use '-fdeclspec' or '-fms-extensions' to enable support for __declspec attributes
+
+include(Portability)
+if("${PROJECT_OS_FAMILY}" STREQUAL "macos" )
+ if("${CMAKE_C_COMPILER_ID}" STREQUAL "AppleClang" OR "${CMAKE_C_COMPILER_ID}" STREQUAL "Clang")
+ target_compile_options(${target} PRIVATE "-fdeclspec")
+ endif()
+endif()
+
+
#
# Linker options
#
|
USE_COMMON_GOOGLE_APIS macro | @@ -424,6 +424,64 @@ macro PY_PROTO_PLUGIN2(NAME, EXT1, EXT2, TOOL, DEPS[]) {
_ADD_PY_PROTO_OUT($EXT2)
}
+_GO_COMMON_GOOGLE_APIS = \
+vendor/google.golang.org/genproto/googleapis/api/annotations \
+vendor/google.golang.org/genproto/googleapis/api/configchange \
+vendor/google.golang.org/genproto/googleapis/api/distribution \
+vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1 \
+vendor/google.golang.org/genproto/googleapis/api/expr/v1beta1 \
+vendor/google.golang.org/genproto/googleapis/api/httpbody \
+vendor/google.golang.org/genproto/googleapis/api/label \
+vendor/google.golang.org/genproto/googleapis/api/metric \
+vendor/google.golang.org/genproto/googleapis/api/monitoredres \
+vendor/google.golang.org/genproto/googleapis/api/serviceconfig \
+vendor/google.golang.org/genproto/googleapis/api/servicecontrol/v1 \
+vendor/google.golang.org/genproto/googleapis/api/servicemanagement/v1 \
+vendor/google.golang.org/genproto/googleapis/iam/admin/v1 \
+vendor/google.golang.org/genproto/googleapis/iam/credentials/v1 \
+vendor/google.golang.org/genproto/googleapis/iam/v1 \
+vendor/google.golang.org/genproto/googleapis/iam/v1/logging \
+vendor/google.golang.org/genproto/googleapis/logging/type \
+vendor/google.golang.org/genproto/googleapis/logging/v2 \
+vendor/google.golang.org/genproto/googleapis/rpc/code \
+vendor/google.golang.org/genproto/googleapis/rpc/errdetails \
+vendor/google.golang.org/genproto/googleapis/rpc/status \
+vendor/google.golang.org/genproto/googleapis/type/calendarperiod \
+vendor/google.golang.org/genproto/googleapis/type/color \
+vendor/google.golang.org/genproto/googleapis/type/date \
+vendor/google.golang.org/genproto/googleapis/type/dayofweek \
+vendor/google.golang.org/genproto/googleapis/type/expr \
+vendor/google.golang.org/genproto/googleapis/type/fraction \
+vendor/google.golang.org/genproto/googleapis/type/latlng \
+vendor/google.golang.org/genproto/googleapis/type/money \
+vendor/google.golang.org/genproto/googleapis/type/postaladdress \
+vendor/google.golang.org/genproto/googleapis/type/quaternion \
+vendor/google.golang.org/genproto/googleapis/type/timeofday
+
+_COMMON_GOOGLE_APIS=None
+when ($_COMMON_GOOGLE_APIS != "None") {
+ when ($GO_PROTO == "yes") {
+ when ($_COMMON_GOOGLE_APIS == "") {
+ PEERDIR+=$_GO_COMMON_GOOGLE_APIS
+ }
+ otherwise {
+ PEERDIR+=$_COMMON_GOOGLE_APIS
+ }
+ }
+ otherwise {
+ PEERDIR+=contrib/libs/googleapis-common-protos
+ }
+}
+
+### @usage: USE_COMMON_GOOGLE_APIS([common-google-apis...]) # internal
+###
+### TBD
+macro USE_COMMON_GOOGLE_APIS(APIS...) {
+ SET_APPEND(PROTO_PATH -I contrib/libs/googleapis-common-protos)
+ ADDINCL(contrib/libs/googleapis-common-protos)
+ SET(_COMMON_GOOGLE_APIS ${pre=vendor/google.golang.org/genproto/googleapis/:APIS})
+}
+
GO_PROTO_GEN_TOOL=vendor/github.com/golang/protobuf/protoc-gen-go
GO_PROTO_GEN_PLUGINS=
GO_PROTO_PACKAGE_MAP=
|
netutils: webserver: Replace license header with Apache License 2.0
Summary:
This commit replaces license header in httpd_dirlist.c
Impact:
No impact
Testing:
Buid check only | /****************************************************************************
* netutils/webserver/httpd_dirlist.c
*
- * Copyright 2019 Sony Home Entertainment & Sound Products Inc.
- * Author: Masayuki Ishikawa <[email protected]>
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership. The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * 3. Neither the name NuttX nor the names of its contributors may be
- * used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
- * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
*
****************************************************************************/
|
Refactor if-blocks
Group all conditions requiring a controller in a single if-block. | @@ -707,13 +707,14 @@ sc_input_manager_process_mouse_button(struct sc_input_manager *im,
bool down = event->type == SDL_MOUSEBUTTONDOWN;
if (!im->forward_all_clicks) {
+ if (controller) {
enum sc_action action = down ? SC_ACTION_DOWN : SC_ACTION_UP;
- if (controller && event->button == SDL_BUTTON_X1) {
+ if (event->button == SDL_BUTTON_X1) {
action_app_switch(controller, action);
return;
}
- if (controller && event->button == SDL_BUTTON_X2 && down) {
+ if (event->button == SDL_BUTTON_X2 && down) {
if (event->clicks < 2) {
expand_notification_panel(controller);
} else {
@@ -721,14 +722,15 @@ sc_input_manager_process_mouse_button(struct sc_input_manager *im,
}
return;
}
- if (controller && event->button == SDL_BUTTON_RIGHT) {
+ if (event->button == SDL_BUTTON_RIGHT) {
press_back_or_turn_screen_on(controller, action);
return;
}
- if (controller && event->button == SDL_BUTTON_MIDDLE) {
+ if (event->button == SDL_BUTTON_MIDDLE) {
action_home(controller, action);
return;
}
+ }
// double-click on black borders resize to fit the device screen
if (event->button == SDL_BUTTON_LEFT && event->clicks == 2) {
|
test-suite: fix stdout summary alignment for plasma | @@ -48,7 +48,7 @@ echo
echo '-------------------------------------------------- SUMMARY --------------------------------------------------'
echo
echo Package version............... : $PACKAGE-$VERSION
-echo OHPC compiler toolchain........ : $LMOD_FAMILY_COMPILER
+echo OHPC compiler toolchain....... : $LMOD_FAMILY_COMPILER
echo
echo C compiler.................... : `which $CC`
echo F77 compiler ................. : `which $F77`
|
BugID:17960618:set pca10040 flash parttion | const hal_logic_partition_t hal_partitions[] =
{
+ [HAL_PARTITION_BOOTLOADER] =
+ {
+ .partition_owner = HAL_FLASH_EMBEDDED,
+ .partition_description = "Bootloader",
+ .partition_start_addr = 0x000000,
+ .partition_length = 0x10000, //64k bytes
+ .partition_options = PAR_OPT_READ_EN | PAR_OPT_WRITE_EN,
+ },
+ [HAL_PARTITION_APPLICATION] =
+ {
+ .partition_owner = HAL_FLASH_EMBEDDED,
+ .partition_description = "Application",
+ .partition_start_addr = 0x10000,
+ .partition_length = 0x35000, //212k bytes
+ .partition_options = PAR_OPT_READ_EN | PAR_OPT_WRITE_EN,
+ },
+ [HAL_PARTITION_OTA_TEMP] =
+ {
+ .partition_owner = HAL_FLASH_EMBEDDED,
+ .partition_description = "OTA Storage",
+ .partition_start_addr = 0x45000,
+ .partition_length = 0x35000, //212k bytes
+ .partition_options = PAR_OPT_READ_EN | PAR_OPT_WRITE_EN,
+ },
+ [HAL_PARTITION_PARAMETER_1] =
+ {
+ .partition_owner = HAL_FLASH_EMBEDDED,
+ .partition_description = "PARAMETER1",
+ .partition_start_addr = 0x7F000,
+ .partition_length = 0x1000, //4k bytes
+ .partition_options = PAR_OPT_READ_EN | PAR_OPT_WRITE_EN,
+ },
[HAL_PARTITION_PARAMETER_2] =
{
.partition_owner = HAL_FLASH_EMBEDDED,
.partition_description = "PARAMETER2",
- .partition_start_addr = 0x7D000,
+ .partition_start_addr = 0x7B000,
.partition_length = 0x2000, //8k bytes
.partition_options = PAR_OPT_READ_EN | PAR_OPT_WRITE_EN,
},
+ [HAL_PARTITION_PARAMETER_3] =
+ {
+ .partition_owner = HAL_FLASH_EMBEDDED,
+ .partition_description = "PARAMETER3",
+ .partition_start_addr = 0x7D000,
+ .partition_length = 0x1000, //4k bytes
+ .partition_options = PAR_OPT_READ_EN | PAR_OPT_WRITE_EN,
+ },
+ [HAL_PARTITION_PARAMETER_4] =
+ {
+ .partition_owner = HAL_FLASH_EMBEDDED,
+ .partition_description = "PARAMETER4",
+ .partition_start_addr = 0x7E000,
+ .partition_length = 0x1000, //4k bytes
+ .partition_options = PAR_OPT_READ_EN | PAR_OPT_WRITE_EN,
+ },
+ /*addr 0x7F000 ~ 0x080000 bootloader settings*/
};
|
HV: modify HV RAM and serial config for apl-nuc
To support grub multiboot for nuc6cayh, we should put hv ram start
at a suitable address;
Enable HSUART controller at PCI 0:18.0 as HV serail port;
Acked-by: Eddie Dong | # Generated by Kconfiglib (https://github.com/ulfalizer/Kconfiglib)
CONFIG_BOARD="nuc6cayh"
-CONFIG_SERIAL_LEGACY=y
+# There is no ready-made serial connector on NUC6CAYH, but developer could
+# enable HSUART at PCI 0:18.0 by soldering Tx/Rx wires from M.2 connector;
+CONFIG_SERIAL_PCI=y
+CONFIG_SERIAL_PCI_BDF="0:18.0"
+CONFIG_HV_RAM_START=0x12200000
|
Warning: arithmetic on a pointer to void is a GNU extension. | @@ -1935,7 +1935,7 @@ comm_point_tcp_handle_write(int fd, struct comm_point* c)
log_assert(c->tcp_write_and_read || sldns_buffer_remaining(buffer) > 0);
log_assert(!c->tcp_write_and_read || c->tcp_write_byte_count < c->tcp_write_pkt_len + 2);
if(c->tcp_write_and_read) {
- r = send(fd, (void*)c->tcp_write_pkt + c->tcp_write_byte_count - 2,
+ r = send(fd, (void*)(c->tcp_write_pkt + c->tcp_write_byte_count - 2),
c->tcp_write_pkt_len + 2 - c->tcp_write_byte_count, 0);
} else {
r = send(fd, (void*)sldns_buffer_current(buffer),
|
Fix warnings for "atomic_init" | }
#endif
#if defined(__Userspace_os_Windows)
-static void atomic_init() {} /* empty when we are not using atomic_mtx */
+static void atomic_init(void) {} /* empty when we are not using atomic_mtx */
#else
-static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
+static inline void atomic_init(void) {} /* empty when we are not using atomic_mtx */
#endif
#else
@@ -132,7 +132,7 @@ static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
} \
}
#endif
-static inline void atomic_init() {} /* empty when we are not using atomic_mtx */
+static inline void atomic_init(void) {} /* empty when we are not using atomic_mtx */
#endif
#if 0 /* using libatomic_ops */
|
CI: use GitHub Actions to test with stack on Windows | @@ -103,3 +103,31 @@ jobs:
- name: Test all
run: cabal v2-test all
+
+ windows:
+ name: Windows (stack)
+ runs-on: windows-2019
+ steps:
+ - uses: actions/checkout@v2
+
+ - name: Cache stack global package db
+ id: stack-global-package-db
+ uses: actions/cache@v2
+ with:
+ path: 'C:\Users\runneradmin\AppData\Roaming\stack\'
+ key: ${{ runner.os }}-stack-${{ hashFiles('stack.yaml') }}
+
+ - name: Install dependencies
+ run: |
+ stack update
+ stack test --dependencies-only --fast
+
+ - name: Build
+ shell: cmd
+ run: |
+ stack test --no-run-tests --fast
+
+ - name: Test
+ shell: cmd
+ run: |
+ stack test --fast --test-arguments="--ansi-tricks=false"
|
[numerics] complete NM_get_value function | @@ -480,8 +480,24 @@ void NM_zentry(NumericsMatrix* M, int i, int j, double val)
double NM_get_value(NumericsMatrix* M, int i, int j)
{
assert(M);
+
+ if ((i + 1 > M->size0) || (j + 1 > M->size1) )
+ {
+ fprintf(stderr, "NM_get_value :: out of range \n");
+ exit(EXIT_FAILURE);
+ }
switch (M->storageType)
{
+ case NM_DENSE:
+ {
+ assert(M->matrix0);
+ return M->matrix0[i+j*M->size0];
+ }
+ case NM_SPARSE_BLOCK:
+ {
+ assert(M->matrix1);
+ return SBM_get_value(M->matrix1,i,j);
+ }
case NM_SPARSE:
{
assert(M->matrix2);
|
Fix a mem leak in libssl
Make sure we free up any ENGINE references after we have finished using
them.
Fixes | @@ -5849,12 +5849,17 @@ const EVP_CIPHER *ssl_evp_cipher_fetch(OPENSSL_CTX *libctx,
const char *properties)
{
#ifndef OPENSSL_NO_ENGINE
+ ENGINE *eng;
+
/*
* If there is an Engine available for this cipher we use the "implicit"
* form to ensure we use that engine later.
*/
- if (ENGINE_get_cipher_engine(nid) != NULL)
+ eng = ENGINE_get_cipher_engine(nid);
+ if (eng != NULL) {
+ ENGINE_finish(eng);
return EVP_get_cipherbynid(nid);
+ }
#endif
/* Otherwise we do an explicit fetch */
@@ -5894,12 +5899,17 @@ const EVP_MD *ssl_evp_md_fetch(OPENSSL_CTX *libctx,
const char *properties)
{
#ifndef OPENSSL_NO_ENGINE
+ ENGINE *eng;
+
/*
* If there is an Engine available for this digest we use the "implicit"
* form to ensure we use that engine later.
*/
- if (ENGINE_get_digest_engine(nid) != NULL)
+ eng = ENGINE_get_digest_engine(nid);
+ if (eng != NULL) {
+ ENGINE_finish(eng);
return EVP_get_digestbynid(nid);
+ }
#endif
/* Otherwise we do an explicit fetch */
|
esp_ipc: fix race condition in ipc task | @@ -78,7 +78,6 @@ static void IRAM_ATTR ipc_task(void* arg)
if (s_ipc_wait[cpuid] == IPC_WAIT_FOR_END) {
xSemaphoreGive(s_ipc_ack[cpuid]);
}
- s_func[cpuid] = NULL;
}
}
@@ -149,6 +148,7 @@ static esp_err_t esp_ipc_call_and_wait(uint32_t cpu_id, esp_ipc_func_t func, voi
s_ipc_wait[cpu_id] = wait_for;
xSemaphoreGive(s_ipc_sem[cpu_id]);
xSemaphoreTake(s_ipc_ack[cpu_id], portMAX_DELAY);
+ s_func[cpu_id] = NULL;
#ifdef CONFIG_ESP_IPC_USES_CALLERS_PRIORITY
xSemaphoreGive(s_ipc_mutex[cpu_id]);
#else
|
Travis: Add explicit stage `Test`. | if: tag IS blank
+language: cpp
+dist: bionic
+sudo: true
+osx_image: xcode11
+
+jobs:
+ include:
+ - stage: Test
os:
- linux
- osx
@@ -10,28 +18,18 @@ compiler:
env:
- TINYSPLINE_FLOAT_PRECISION=OFF
- TINYSPLINE_FLOAT_PRECISION=ON
-
-language: cpp
-dist: bionic
-sudo: true
-osx_image: xcode11
-
install:
- if [ "$TRAVIS_OS_NAME" == "linux" ]; then sudo apt-get -qq update && sudo apt-get install -y cmake; fi;
- if [ "$TRAVIS_OS_NAME" == "windows" ]; then choco install cmake && choco install swig; fi;
-
script:
- if [ "$TRAVIS_OS_NAME" == "windows" ] && [ "$CC" == "gcc" ]; then cmake -G "Visual Studio 15 2017" -A Win32 .; fi;
- if [ "$TRAVIS_OS_NAME" == "windows" ] && [ "$CC" == "clang" ]; then cmake -G "Visual Studio 15 2017" -A x64 .; fi;
- if [ "$TRAVIS_OS_NAME" != "windows" ]; then cmake .; fi;
- cmake --build .
- cmake --build . --target tests
-
-jobs:
- include:
- stage: Deploy Binaries (Linux Docker)
+ os: linux
services: docker
- install: skip
script:
- pushd tools/ci
- ./build.linux-x86_64.sh
@@ -58,8 +56,8 @@ jobs:
- stage: Deploy Binaries (Windows VM)
os: windows
- stage: Deploy Website
+ os: linux
service: docker
- install: skip
script:
- pushd tools/ci
- ./build.docs.sh
|
[libgui] Use dependencies in std mode | @@ -3,6 +3,9 @@ name = "libgui"
version = "0.1.0"
edition = "2021"
+[features]
+run_with_std = ["pixels", "winit"]
+
[dependencies]
cstr_core = "0.2.4"
rand_core = { version = "0.5.1", default-features = false }
@@ -13,3 +16,7 @@ axle_rt_derive = {path = "../axle_rt_derive" }
awm_messages = {path = "../awm_messages" }
agx_definitions = {path = "../agx_definitions" }
libgui_derive = {path = "../libgui_derive"}
+
+# These dependencies are only enabled in use_std mode
+pixels = { version = "0.9.0", optional = true }
+winit = { version = "*", optional = true }
|
[CI] Debug meson script | @@ -19,7 +19,7 @@ def install_dependencies():
"xorriso",
# OS build
"nasm",
- "mtools",
+ "mtools"
]
run_and_check(["sudo", "apt", "install", "-y", *dependencies])
|
math/besseli0: use base liquid_besseli(nu,z) with nu=0 | @@ -114,25 +114,9 @@ float liquid_besselif(float _nu,
}
// I_0(z) : Modified bessel function of the first kind (order zero)
-#define NUM_BESSELI0_ITERATIONS 32
float liquid_besseli0f(float _z)
{
- // TODO : use better low-signal approximation
- if (_z == 0.0f)
- return 1.0f;
-
- unsigned int k;
- float t, y=0.0f;
- for (k=0; k<NUM_BESSELI0_ITERATIONS; k++) {
-#if 0
- t = powf(_z/2, (float)k) / tgamma((float)k+1);
- y += t*t;
-#else
- t = k * logf(0.5f*_z) - liquid_lngammaf((float)k + 1.0f);
- y += expf(2*t);
-#endif
- }
- return y;
+ return liquid_besselif(0,_z);
}
// J_v(z) : Bessel function of the first kind
|
armv8-m: Fix MPU Attribute Indirection reg offsets
Both MPU_MAIR0 and MPU_MAIR1 were off by 0x10. | #define MPU_RBAR_A3_OFFSET 0x0024
#define MPU_RLAR_A3_OFFSET 0x0028
-#define MPU_MAIR_OFFSET(n) (0x0040 + 4 * ((n) >> 2))
-#define MPU_MAIR0_OFFSET 0x0040 /* MPU Memory Attribute Indirection Register 0 */
-#define MPU_MAIR1_OFFSET 0x0044 /* MPU Memory Attribute Indirection Register 1 */
+#define MPU_MAIR_OFFSET(n) (0x0030 + 4 * ((n) >> 2))
+#define MPU_MAIR0_OFFSET 0x0030 /* MPU Memory Attribute Indirection Register 0 */
+#define MPU_MAIR1_OFFSET 0x0034 /* MPU Memory Attribute Indirection Register 1 */
/* MPU Register Addresses */
|
better early skip? | @@ -1706,14 +1706,6 @@ static void search_pu_inter(encoder_state_t * const state,
double bits = merge_flag_cost + merge_idx + CTX_ENTROPY_FBITS(&(state->search_cabac.ctx.cu_merge_idx_ext_model), merge_idx != 0);
if(state->encoder_control->cfg.rdo >= 2 && cur_pu->part_size == SIZE_2Nx2N) {
kvz_cu_cost_inter_rd2(state, x, y, depth, &merge->unit[merge->size], lcu, &merge->cost[merge->size], &bits);
- if(state->encoder_control->cfg.early_skip && merge->unit[merge->size].skipped) {
- *cur_pu = merge->unit[merge->size];
- merge->unit[0] = *cur_pu;
- merge->size = 1;
- merge->cost[0] = merge->cost[merge->size];
- merge->bits[0] = bits;
- return;
- }
}
else {
merge->cost[merge->size] = kvz_satd_any_size(width, height,
@@ -1737,9 +1729,16 @@ static void search_pu_inter(encoder_state_t * const state,
// Early Skip Mode Decision
bool has_chroma = state->encoder_control->chroma_format != KVZ_CSP_400;
- if (cfg->early_skip && cur_pu->part_size == SIZE_2Nx2N && cfg->rdo < 2) {
+ if (cfg->early_skip && cur_pu->part_size == SIZE_2Nx2N) {
for (int merge_key = 0; merge_key < num_rdo_cands; ++merge_key) {
-
+ if(cfg->rdo >= 2 && merge->unit[merge->keys[merge_key]].skipped) {
+ merge->size = 1;
+ merge->bits[0] = merge->bits[merge->keys[merge_key]];
+ merge->cost[0] = merge->cost[merge->keys[merge_key]];
+ merge->unit[0] = merge->unit[merge->keys[merge_key]];
+ merge->keys[0] = 0;
+ }
+ else if(cfg->rdo < 2) {
// Reconstruct blocks with merge candidate.
// Check luma CBF. Then, check chroma CBFs if luma CBF is not set
// and chroma exists.
@@ -1776,6 +1775,7 @@ static void search_pu_inter(encoder_state_t * const state,
}
}
}
+ }
// AMVP search starts here
|
Fix CI_UPSTREAM_COMMIT_SHA | @@ -247,7 +247,7 @@ installer:
CI_UPSTREAM_PIPELINE_SOURCE: $CI_PIPELINE_SOURCE
CI_UPSTREAM_COMMIT_BRANCH: $CI_COMMIT_BRANCH
CI_UPSTREAM_DEFAULT_BRANCH: $CI_DEFAULT_BRANCH
- CI_UPSTREAM_COMMIT_SHA: ${CI_UPSTREAM_COMMIT_SHA}
+ CI_UPSTREAM_COMMIT_SHA: $CI_COMMIT_SHA
PLUGIN_REF: main
AGENT_REF: $CI_COMMIT_REF_NAME
trigger:
|
fix segfault - copy last NULL as well | @@ -640,7 +640,7 @@ void conf_load_args( int argc, char **argv ) {
int i;
// Duplicate memory to get an array that can be appended to
- g_argv = (char**) memdup(argv, argc * sizeof(char*));
+ g_argv = (char**) memdup(argv, (argc + 1) * sizeof(char*));
g_argc = argc;
for( i = 1; i < g_argc; i++ ) {
|
Allow alpha palettes in non bilinear grayscale draw_image | @@ -842,7 +842,13 @@ void imlib_draw_image(image_t *img, image_t *other, int x_off, int y_off, float
if (!mask || image_get_mask_pixel(mask, other_x, other_y)) {
uint8_t result_pixel = safe_map_pixel(IMAGE_BPP_GRAYSCALE, other_bpp, imlib_get_pixel_fast(other_bpp, other_row_ptr, other_x));
- if (alpha != 256) {
+ if (alpha_palette) {
+ uint32_t temp_alpha = (alpha * alpha_palette[result_pixel]) >> 8;
+
+ packed_alpha = (temp_alpha << 16) + (256 - temp_alpha);
+ }
+
+ if (packed_alpha & 0x1ff) {
uint8_t img_pixel = IMAGE_GET_GRAYSCALE_PIXEL_FAST(img_row_ptr, x);
uint32_t vgs = (result_pixel << 16) + img_pixel;
@@ -968,7 +974,7 @@ void imlib_draw_image(image_t *img, image_t *other, int x_off, int y_off, float
}
result_pixel = color_palette ? color_palette[result_pixel] : safe_map_pixel(IMAGE_BPP_RGB565, other_bpp, result_pixel);
- if (va & 0xff) {
+ if (va & 0x1ff) {
// Blend img to other pixel
uint16_t img_pixel = IMAGE_GET_RGB565_PIXEL_FAST(img_row_ptr, x);
uint32_t r_ta = COLOR_RGB565_TO_R5(result_pixel);
|
brya: Enable i2cspeed console command
BRANCH=none
TEST=with follow-on patches, switched I2C bus speed between 400 kHz
and 1 MHz. | #define CONFIG_USB_PD_TCPM_PS8815_FORCE_DID
#define CONFIG_USBC_RETIMER_INTEL_BB
+/* I2C speed console command */
+#define CONFIG_CMD_I2C_SPEED
+
#define CONFIG_USBC_PPC_SYV682X
#define CONFIG_USBC_PPC_NX20P3483
|
JNA: Improve Maven integration into build system
This commit closes | @@ -46,14 +46,46 @@ if (Java_JAVAC_EXECUTABLE)
INPUT
"${CMAKE_CURRENT_BINARY_DIR}/libelektra4j/pom.xml")
- # then copy over the source files in the build folder as thats the easiest way to handle it
- file (COPY "${CMAKE_CURRENT_SOURCE_DIR}/libelektra4j" DESTINATION "${CMAKE_CURRENT_BINARY_DIR}")
-
- # compile it and build the jar
+ set (JNA_BINDING_PREFIX libelektra4j/src)
+ set (JNA_BINDING_NAMESPACE java/org/libelektra)
+
+ set (JNA_BINDING_SOURCE_DIRECTORY ${JNA_BINDING_PREFIX}/main/${JNA_BINDING_NAMESPACE})
+ set (JNA_BINDING_SOURCE_DIRECTORY_PLUGIN ${JNA_BINDING_SOURCE_DIRECTORY}/plugin)
+ set (JNA_BINDING_SOURCE_DIRECTORY_TEST ${JNA_BINDING_PREFIX}/test/${JNA_BINDING_NAMESPACE})
+
+ # The build system calls Maven to recompile the binding, if we change any of the following source files.
+ set (JNA_BINDING_SOURCE_FILES
+ ${JNA_BINDING_SOURCE_DIRECTORY}/Elektra.java
+ ${JNA_BINDING_SOURCE_DIRECTORY}/KDB.java
+ ${JNA_BINDING_SOURCE_DIRECTORY}/Key.java
+ ${JNA_BINDING_SOURCE_DIRECTORY}/KeyNameIterator.java
+ ${JNA_BINDING_SOURCE_DIRECTORY}/KeySet.java
+ ${JNA_BINDING_SOURCE_DIRECTORY}/KeySetIterator.java
+ ${JNA_BINDING_SOURCE_DIRECTORY}/KeyUtils.java
+ ${JNA_BINDING_SOURCE_DIRECTORY}/Plugin.java
+ ${JNA_BINDING_SOURCE_DIRECTORY_PLUGIN}/Echo.java
+ ${JNA_BINDING_SOURCE_DIRECTORY_PLUGIN}/ExceptionTest.java
+ ${JNA_BINDING_SOURCE_DIRECTORY_PLUGIN}/PropertiesStorage.java
+ ${JNA_BINDING_SOURCE_DIRECTORY_PLUGIN}/Return.java
+ ${JNA_BINDING_SOURCE_DIRECTORY_PLUGIN}/Template.java
+ ${JNA_BINDING_SOURCE_DIRECTORY_TEST}/AllTests.java
+ ${JNA_BINDING_SOURCE_DIRECTORY_TEST}/KDBTest.java
+ ${JNA_BINDING_SOURCE_DIRECTORY_TEST}/KeySetTest.java
+ ${JNA_BINDING_SOURCE_DIRECTORY_TEST}/KeyTest.java)
+
+ # Compile the source files and build the jar. We copy the source files into the build folder as that is the easiest
+ # way to handle the integration between CMake and Maven.
add_custom_command (OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/libelektra4j/target/libelektra4j-${KDB_VERSION}.jar"
+ COMMAND ${CMAKE_COMMAND}
+ ARGS -E
+ copy_directory
+ "${CMAKE_CURRENT_SOURCE_DIR}/libelektra4j"
+ "${CMAKE_CURRENT_BINARY_DIR}/libelektra4j"
COMMAND ${MAVEN_EXECUTABLE} -q -B -DskipTests=true package
WORKING_DIRECTORY libelektra4j
- DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/libelektra4j/pom.xml")
+ DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/libelektra4j/pom.xml"
+ ${JNA_BINDING_SOURCE_FILES})
+
add_custom_target (jna
ALL
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/libelektra4j/target/libelektra4j-${KDB_VERSION}.jar")
|
Add WSL workaround | @@ -170,6 +170,11 @@ BOOLEAN PhInitializeLxssImageVersionInfo(
return FALSE;
}
+ if (PhEqualString2(lxssFileName, L"/init", FALSE))
+ {
+ PhMoveReference(&lxssFileName, PhCreateString(L"/sbin/init"));
+ }
+
PhMoveReference(&lxssCommandLine, PhFormatString(
L"rpm -qf %s --queryformat \"%%{VERSION}|%%{VENDOR}|%%{SUMMARY}\"",
lxssFileName->Buffer
|
[DeviceDriver][SFUD] Replace the vsnprintf to rt_vsnprintf. | @@ -189,7 +189,7 @@ void sfud_log_debug(const char *file, const long line, const char *format, ...)
va_start(args, format);
rt_kprintf("[SFUD] (%s:%ld) ", file, line);
/* must use vprintf to print */
- vsnprintf(log_buf, sizeof(log_buf), format, args);
+ rt_vsnprintf(log_buf, sizeof(log_buf), format, args);
rt_kprintf("%s\n", log_buf);
va_end(args);
}
@@ -207,7 +207,7 @@ void sfud_log_info(const char *format, ...) {
va_start(args, format);
rt_kprintf("[SFUD] ");
/* must use vprintf to print */
- vsnprintf(log_buf, sizeof(log_buf), format, args);
+ rt_vsnprintf(log_buf, sizeof(log_buf), format, args);
rt_kprintf("%s\n", log_buf);
va_end(args);
}
|
fixed detection M23E2 if M2 is behind M3 in timeline | @@ -1962,7 +1962,6 @@ for(zeiger = messagelist; zeiger < messagelist +MESSAGELIST_MAX; zeiger++)
}
if((zeiger->message &HS_M1) != HS_M1) continue;
if(zeiger->rc >= rc -1) rcgap = zeiger->rc -rc +1;
- else rcgap = rc -zeiger->rc +1;
if(rcgap > ncvalue) continue;
if(memcmp(zeiger->client, macclient, 6) != 0) continue;
if(memcmp(zeiger->ap, macap, 6) != 0) continue;
@@ -2039,7 +2038,6 @@ for(zeiger = messagelist; zeiger < messagelist +MESSAGELIST_MAX; zeiger++)
}
if((zeiger->message) != HS_M2) continue;
if(zeiger->rc >= rc -1) rcgap = zeiger->rc -rc +1;
- else rcgap = rc -zeiger->rc +1;
if(rcgap > ncvalue) continue;
if(eaptimestamp > zeiger->timestamp) eaptimegap = eaptimestamp -zeiger->timestamp;
else eaptimegap = zeiger->timestamp -eaptimestamp;
@@ -2148,9 +2146,9 @@ if(infolen >= RSNIE_LEN_MIN)
}
for(zeiger = messagelist; zeiger < messagelist +MESSAGELIST_MAX; zeiger++)
{
- if((zeiger->message &HS_M1) != HS_M1) continue;
+ if((zeiger->message &HS_M1) == HS_M1)
+ {
if(zeiger->rc >= rc) rcgap = zeiger->rc -rc;
- else rcgap = rc -zeiger->rc;
if(rcgap > ncvalue) continue;
if(memcmp(zeiger->client, macclient, 6) != 0) continue;
if(memcmp(zeiger->ap, macap, 6) != 0) continue;
@@ -2170,6 +2168,27 @@ for(zeiger = messagelist; zeiger < messagelist +MESSAGELIST_MAX; zeiger++)
if(eaptimegap > eaptimegapmax) eaptimegapmax = eaptimegap;
if(eaptimegap <= eapoltimeoutvalue) addhandshake(eaptimegap, rcgap, messagelist +MESSAGELIST_MAX, zeiger, keyver, mpfield);
}
+ if((zeiger->message &HS_M3) != HS_M3) continue;
+ if(zeiger->rc >= rc +1) rcgap = zeiger->rc -rc -1;
+ if(rcgap > ncvalue) continue;
+ if(memcmp(zeiger->client, macclient, 6) != 0) continue;
+ if(memcmp(zeiger->ap, macap, 6) != 0) continue;
+ if(eaptimestamp > zeiger->timestamp) eaptimegap = eaptimestamp -zeiger->timestamp;
+ else eaptimegap = zeiger->timestamp -eaptimestamp;
+ mpfield = ST_M32E2;
+ if(myaktreplaycount > 0)
+ {
+ if((rc == myaktreplaycount) && (memcmp(&myaktanonce, zeiger->nonce, 32) == 0))
+ {
+ eaptimegap = 0;
+ mpfield |= ST_APLESS;
+ }
+ if(rcgap != 0) continue;
+ }
+ if(rcgap > rcgapmax) rcgapmax = rcgap;
+ if(eaptimegap > eaptimegapmax) eaptimegapmax = eaptimegap;
+ if(eaptimegap <= eapoltimeoutvalue) addhandshake(eaptimegap, rcgap, messagelist +MESSAGELIST_MAX, zeiger, keyver, mpfield);
+ }
qsort(messagelist, MESSAGELIST_MAX +1, MESSAGELIST_SIZE, sort_messagelist_by_epcount);
return;
}
|
fixed cpp build error with adapters/libhv.h | @@ -56,7 +56,7 @@ static void redisLibhvCleanup(void *privdata) {
static void redisLibhvTimeout(htimer_t* timer) {
hio_t* io = (hio_t*)hevent_userdata(timer);
- redisAsyncHandleTimeout(hevent_userdata(io));
+ redisAsyncHandleTimeout((redisAsyncContext*)hevent_userdata(io));
}
static void redisLibhvSetTimeout(void *privdata, struct timeval tv) {
@@ -94,7 +94,7 @@ static int redisLibhvAttach(redisAsyncContext* ac, hloop_t* loop) {
}
/* Create container struct to keep track of our io and any timer */
- events = hi_malloc(sizeof(*events));
+ events = (redisLibhvEvents*)hi_malloc(sizeof(*events));
if (events == NULL) {
return REDIS_ERR;
}
|
data tree BUGFIX set when_true flag for implciti nodes
So that they are silently removed during validation. | @@ -1405,7 +1405,7 @@ lyd_new_implicit_r(struct lyd_node *parent, struct lyd_node **first, const struc
if (!(iter->flags & LYS_PRESENCE) && lyd_find_sibling_val(*first, iter, NULL, 0, NULL)) {
/* create default NP container */
LY_CHECK_RET(lyd_create_inner(iter, &node));
- node->flags = LYD_DEFAULT;
+ node->flags = LYD_DEFAULT | (node->schema->when ? LYD_WHEN_TRUE : 0);
lyd_insert_node(parent, first, node);
/* cannot be a NP container with when */
@@ -1429,7 +1429,7 @@ lyd_new_implicit_r(struct lyd_node *parent, struct lyd_node **first, const struc
} else if (ret) {
return ret;
}
- node->flags = LYD_DEFAULT;
+ node->flags = LYD_DEFAULT | (node->schema->when ? LYD_WHEN_TRUE : 0);
lyd_insert_node(parent, first, node);
if (iter->when && node_when) {
@@ -1457,7 +1457,7 @@ lyd_new_implicit_r(struct lyd_node *parent, struct lyd_node **first, const struc
} else if (ret) {
return ret;
}
- node->flags = LYD_DEFAULT;
+ node->flags = LYD_DEFAULT | (node->schema->when ? LYD_WHEN_TRUE : 0);
lyd_insert_node(parent, first, node);
if (iter->when && node_when) {
|
Modify the IP & GW & MSK ADDR configure mode as string mode, insted separation mode | // <integer name="RT_LWIP_ETHTHREAD_STACKSIZE" description="the stack size of ethnetif thread" default="512" />
#define RT_LWIP_ETHTHREAD_STACKSIZE 512
// <ipaddr name="RT_LWIP_IPADDR" description="IP address of device" default="192.168.1.30" />
-#define RT_LWIP_IPADDR0 192
-#define RT_LWIP_IPADDR1 168
-#define RT_LWIP_IPADDR2 1
-#define RT_LWIP_IPADDR3 30
+#define RT_LWIP_IPADDR "192.168.1.30"
// <ipaddr name="RT_LWIP_GWADDR" description="Gateway address of device" default="192.168.1.1" />
-#define RT_LWIP_GWADDR0 192
-#define RT_LWIP_GWADDR1 168
-#define RT_LWIP_GWADDR2 1
-#define RT_LWIP_GWADDR3 1
+#define RT_LWIP_GWADDR "192.168.1.1"
// <ipaddr name="RT_LWIP_MSKADDR" description="Mask address of device" default="255.255.255.0" />
-#define RT_LWIP_MSKADDR0 255
-#define RT_LWIP_MSKADDR1 255
-#define RT_LWIP_MSKADDR2 255
-#define RT_LWIP_MSKADDR3 0
+#define RT_LWIP_MSKADDR "255.255.255.0"
// </section>
// </RDTConfigurator>
|
overwrite old output PSK file | @@ -1686,7 +1686,7 @@ if(apessidliste == NULL)
if(pskname != NULL)
{
- if((fhpsk = fopen(pskname, "w")) == NULL)
+ if((fhpsk = fopen(pskname, "w+")) == NULL)
{
fprintf(stderr, "1 error opening psk file %s\n", pskname);
exit(EXIT_FAILURE);
|
opae.admin: mtd: change `open` to return `self`
This allows using it in a context manager:
```Python
mtd_obj = mtd('/dev/mtd0')
with mtd_obj.open('r') as m:
# do stuff with m | @@ -60,6 +60,7 @@ class mtd(loggable):
self.log.warn('device is currently open, closing')
self._fp.close()
self._fp = open(self._devpath, mode)
+ return self
def close(self):
"""close Close an open mtd device."""
|
Make sim header size slightly more configurable
This adds an HDR_SIZE constant to the simulator which allows for easier
testing of images with different header sizes. | @@ -1011,11 +1011,13 @@ fn install_image(flash: &mut Flash, slots: &[SlotInfo], slot: usize, len: usize,
let mut tlv = make_tlv();
+ const HDR_SIZE: usize = 32;
+
// Generate a boot header. Note that the size doesn't include the header.
let header = ImageHeader {
magic: 0x96f3b83d,
load_addr: 0,
- hdr_size: 32,
+ hdr_size: HDR_SIZE as u16,
_pad1: 0,
img_size: len as u32,
flags: tlv.get_flags(),
@@ -1028,13 +1030,11 @@ fn install_image(flash: &mut Flash, slots: &[SlotInfo], slot: usize, len: usize,
_pad2: 0,
};
- let b_header = header.as_raw();
+ let mut b_header = [0; HDR_SIZE];
+ b_header[..32].clone_from_slice(header.as_raw());
+ assert_eq!(b_header.len(), HDR_SIZE);
+
tlv.add_bytes(&b_header);
- /*
- let b_header = unsafe { slice::from_raw_parts(&header as *const _ as *const u8,
- mem::size_of::<ImageHeader>()) };
- */
- assert_eq!(b_header.len(), 32);
// The core of the image itself is just pseudorandom data.
let mut b_img = vec![0; len];
|
sse2: NEON implementation of srli_epi64 should mask 7 bits not 8 | @@ -3965,7 +3965,7 @@ simde_mm_srli_epi64 (simde__m128i a, const int imm8) {
# define simde_mm_srli_epi64(a, imm8) SIMDE__M128I_FROM_NATIVE(_mm_srli_epi64(a.n, imm8))
#elif defined(SIMDE_SSE2_NEON)
# define simde_mm_srli_epi64(a, imm8) \
- SIMDE__M128I_NEON_C(u64, (((imm8)&255) < 0 || ((imm8)&255) > 63) ? (vdupq_n_u64(0)) : ((((imm8)&255) == 0) ? (a.neon_u64) : (vshrq_n_u64((a).neon_u64, (imm8)&255))))
+ SIMDE__M128I_NEON_C(u64, (((imm8)&127) < 0 || ((imm8)&127) > 63) ? (vdupq_n_u64(0)) : ((((imm8)&127) == 0) ? (a.neon_u64) : (vshrq_n_u64((a).neon_u64, (imm8)&127))))
#endif
#if defined(SIMDE_SSE2_ENABLE_NATIVE_ALIASES)
# define _mm_srli_epi64(a, imm8) SIMDE__M128I_TO_NATIVE(simde_mm_srli_epi64(SIMDE__M128I_FROM_NATIVE(a), imm8))
|
improved ESSID detection in case of a IE_TAG | @@ -3188,8 +3188,10 @@ return true;
static bool gettags(int infolen, uint8_t *infoptr, tags_t *zeiger)
{
static ietag_t *tagptr;
+static bool ef;
memset(zeiger, 0, TAGS_SIZE);
+ef = false;
while(0 < infolen)
{
if(infolen == 4) return true;
@@ -3200,7 +3202,11 @@ while(0 < infolen)
infolen -= tagptr->len +IETAG_SIZE;
continue;
}
- if(tagptr->len > infolen) return false;
+ if(tagptr->len > infolen)
+ {
+ if(ef == false) return false;
+ return true;
+ }
if(tagptr->id == TAG_SSID)
{
if(tagptr->len > ESSID_LEN_MAX)
@@ -3210,6 +3216,7 @@ while(0 < infolen)
}
if(isessidvalid(tagptr->len, &tagptr->data[0]) == false) return false;
{
+ ef = true;
memcpy(zeiger->essid, &tagptr->data[0], tagptr->len);
zeiger->essidlen = tagptr->len;
}
@@ -3243,7 +3250,7 @@ while(0 < infolen)
infoptr += tagptr->len +IETAG_SIZE;
infolen -= tagptr->len +IETAG_SIZE;
}
-if((infolen != 0) && (infolen != 4)) return false;
+if((infolen != 0) && (infolen != 4) && (ef == false)) return false;
return true;
}
/*===========================================================================*/
@@ -4320,7 +4327,6 @@ if(memcmp(&tags.essid, &zeroed32, tags.essidlen) == 0)
beaconssidzeroedcount++;
return;
}
-
if((tags.channel > 0) && (tags.channel <= 14))
{
beaconchannel[0] |= GHZ24;
|
out_datadog: use 'unsigned int' instead of 'uint'
`uint` is not a standard type and many compilers do not recognize it.
This should fix the testing failure occurring on AppVeyor. | @@ -49,7 +49,7 @@ static void dd_remap_move_to_tags(const char* tag_name, msgpack_object attr_valu
static void dd_remap_container_name(const char* tag_name, msgpack_object attr_value, flb_sds_t dd_tags)
{
/* remove the first / if present */
- uint adjust = attr_value.via.str.ptr[0] == '/' ? 1 : 0;
+ unsigned int adjust = attr_value.via.str.ptr[0] == '/' ? 1 : 0;
flb_sds_t buf = flb_sds_create_len(attr_value.via.str.ptr + adjust, attr_value.via.str.size - adjust);
dd_remap_append_kv_to_ddtags(tag_name, buf, strlen(buf), dd_tags);
flb_sds_destroy(buf);
|
EVP: make evp_pkey_is_assigned() usable in the FIPS module | @@ -598,8 +598,13 @@ DEFINE_STACK_OF(OP_CACHE_ELEM)
((pk)->type == EVP_PKEY_NONE && (pk)->keymgmt == NULL)
#define evp_pkey_is_typed(pk) \
((pk)->type != EVP_PKEY_NONE || (pk)->keymgmt != NULL)
+#ifndef FIPS_MODULE
# define evp_pkey_is_assigned(pk) \
((pk)->pkey.ptr != NULL || (pk)->keydata != NULL)
+#else
+# define evp_pkey_is_assigned(pk) \
+ ((pk)->keydata != NULL)
+#endif
#define evp_pkey_is_legacy(pk) \
((pk)->type != EVP_PKEY_NONE && (pk)->keymgmt == NULL)
#define evp_pkey_is_provided(pk) \
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.