message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
Slightly better location for stopping AHRS. | @@ -164,6 +164,7 @@ func sensorAttitudeSender() {
)
log.Println("AHRS Info: initializing new simple AHRS")
s = ahrs.InitializeSimple(fmt.Sprintf("/var/log/sensors_%s.csv", time.Now().Format("20060102_150405")))
+ defer s.Stop()
m = ahrs.NewMeasurement()
cage = make(chan(bool))
@@ -315,9 +316,7 @@ func sensorAttitudeSender() {
}
makeAHRSGDL90Report() // Send whether or not valid - the function will invalidate the values as appropriate
-
}
- s.Stop()
}
}
|
Add device name finder query | @@ -61,3 +61,8 @@ findSharedMemoryFrame(NodeId,DeviceId) :-
findRanges(NodeName,SharedName,NodeRange,SharedRange),
findRanges(DevName,SharedName,DeviceRange,SharedRange),
printSharedRanges(NodeRange,SharedRange,DeviceRange).
+
+findDeviceId(NodeId,Addr) :-
+ SrcName = name(NodeId,Addr),
+ resolve(SrcName,name(DeviceId,_)),
+ writeln(DeviceId).
\ No newline at end of file
|
GtkFileChooser 1: replace GtkFileSelection (save dir listing)
GtkFileSelection = GTK1 | @@ -91,29 +91,17 @@ change_filespec (gpointer data)
static void
-destroy_save_directory_listing (GtkWidget * widget, gftp_save_dir_struct * str)
+dosave_directory_listing (const char *filename, gftp_save_dir_struct * str)
{
- gtk_widget_destroy (str->filew);
- g_free (str);
-}
-
-
-static void
-dosave_directory_listing (GtkWidget * widget, gftp_save_dir_struct * str)
-{
- const char *filename;
gftp_file * tempfle;
GList * templist;
char *tempstr;
FILE * fd;
-
- filename = gtk_file_selection_get_filename (GTK_FILE_SELECTION (str->filew));
if ((fd = fopen (filename, "w")) == NULL)
{
ftp_log (gftp_logging_error, NULL,
- _("Error: Cannot open %s for writing: %s\n"), filename,
- g_strerror (errno));
+ _("Error: Cannot open %s for writing: %s\n"), filename, g_strerror (errno));
return;
}
@@ -140,25 +128,31 @@ save_directory_listing (gpointer data)
{
gftp_save_dir_struct * str;
GtkWidget *filew;
+ char current_dir[256];
+ getcwd(current_dir, sizeof(current_dir));
+ const char *filename;
- filew = gtk_file_selection_new (_("Save Directory Listing"));
+ filew = gtk_file_chooser_dialog_new (_("Save Directory Listing"),
+ main_window, //GTK_WINDOW(gtk_widget_get_toplevel (GTK_WIDGET(xxx)))
+ GTK_FILE_CHOOSER_ACTION_SAVE,
+ GTK_STOCK_SAVE, GTK_RESPONSE_ACCEPT,
+ GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL,
+ NULL );
str = g_malloc0 (sizeof (*str));
str->filew = filew;
str->wdata = data;
- gtk_signal_connect (GTK_OBJECT (GTK_FILE_SELECTION (filew)->ok_button),
- "clicked", GTK_SIGNAL_FUNC (dosave_directory_listing),
- str);
- gtk_signal_connect (GTK_OBJECT (GTK_FILE_SELECTION (filew)->ok_button),
- "clicked",
- GTK_SIGNAL_FUNC (destroy_save_directory_listing), str);
- gtk_signal_connect (GTK_OBJECT (GTK_FILE_SELECTION (filew)->cancel_button),
- "clicked",
- GTK_SIGNAL_FUNC (destroy_save_directory_listing), str);
+ gtk_file_chooser_set_do_overwrite_confirmation(GTK_FILE_CHOOSER(filew), TRUE);
+ gtk_file_chooser_set_current_folder( GTK_FILE_CHOOSER(filew), current_dir);
+ gtk_file_chooser_set_current_name( GTK_FILE_CHOOSER(filew), "Directory_Listing.txt");
+ if (gtk_dialog_run (GTK_DIALOG(filew)) == GTK_RESPONSE_ACCEPT) {
+ filename = gtk_file_chooser_get_filename (GTK_FILE_CHOOSER (filew));
+ dosave_directory_listing(filename, str);
+ }
- gtk_window_set_wmclass (GTK_WINDOW(filew), "Save Directory Listing", "gFTP");
- gtk_widget_show (filew);
+ gtk_widget_destroy (filew);
+ g_free (str);
}
|
[MUSL] Remove locks in freeaddrinfo
this avoid locking during fork | @@ -10,7 +10,7 @@ void freeaddrinfo(struct addrinfo *p)
for (cnt=1; p->ai_next; cnt++, p=p->ai_next);
struct aibuf *b = (void *)((char *)p - offsetof(struct aibuf, ai));
b -= b->slot;
- LOCK(b->lock);
+ // LOCK(b->lock);
if (!(b->ref -= cnt)) free(b);
- else UNLOCK(b->lock);
+ // else UNLOCK(b->lock);
}
|
HLS: Increasing the FIFO depth to 32 as experiment | @@ -312,9 +312,9 @@ static void __do_the_work(snap_membus_t din_gmem[MEMORY_LINES],
t1_fifo_t t1_fifo;
t2_fifo_t t2_fifo;
t3_fifo_t t3_fifo;
-#pragma HLS stream variable=t1_fifo depth=4
-#pragma HLS stream variable=t2_fifo depth=4
-#pragma HLS stream variable=t3_fifo depth=4
+#pragma HLS stream variable=t1_fifo depth=32
+#pragma HLS stream variable=t2_fifo depth=32
+#pragma HLS stream variable=t3_fifo depth=32
//== Parameters fetched in memory ==
//==================================
|
makefile support for tensorflow | @@ -29,6 +29,7 @@ UBSAN?=0
FFTWTHREADS?=1
SCALAPACK?=0
ISMRMRD?=0
+TENSORFLOW?=0
NOEXEC_STACK?=0
PARALLEL?=0
PARALLEL_NJOBS?=
@@ -141,6 +142,8 @@ endif
CUDA_BASE ?= /usr/
CUDA_LIB ?= lib
+# tensorflow
+TENSORFLOW_BASE ?= /usr/local/
# acml
@@ -149,6 +152,7 @@ ACML_BASE ?= /usr/local/acml/acml4.4.0/gfortran64_mp/
# mkl
MKL_BASE ?= /opt/intel/mkl/lib/intel64/
+
# fftw
ifneq ($(BUILDTYPE), MacOSX)
@@ -255,6 +259,11 @@ CPPFLAGS += -DNOLAPACKE
MODULES += -llapacke
endif
+ifeq ($(TENSORFLOW),1)
+CPPFLAGS += -DTENSORFLOW -I$(TENSORFLOW_BASE)/include
+LIBS += -L$(TENSORFLOW_BASE)/lib -Wl,-rpath $(TENSORFLOW_BASE)/lib -ltensorflow_framework -ltensorflow
+endif
+
XTARGETS += $(TBASE) $(TFLP) $(TNUM) $(TIO) $(TRECO) $(TCALIB) $(TMRI) $(TSIM)
|
Do not trigger initial pktns PTO probe if no ack-eliciting packet is left | @@ -10786,11 +10786,11 @@ static ngtcp2_pktns *conn_get_earliest_pktns(ngtcp2_conn *conn,
ngtcp2_tstamp *pts,
const ngtcp2_tstamp *times) {
ngtcp2_pktns *ns[] = {conn->in_pktns, conn->hs_pktns, &conn->pktns};
- ngtcp2_pktns *res = ns[0];
+ ngtcp2_pktns *res = NULL;
size_t i;
- ngtcp2_tstamp earliest_ts = times[NGTCP2_PKTNS_ID_INITIAL];
+ ngtcp2_tstamp earliest_ts = UINT64_MAX;
- for (i = NGTCP2_PKTNS_ID_HANDSHAKE; i < NGTCP2_PKTNS_ID_MAX; ++i) {
+ for (i = NGTCP2_PKTNS_ID_INITIAL; i < NGTCP2_PKTNS_ID_MAX; ++i) {
if (ns[i] == NULL || ns[i]->rtb.num_retransmittable == 0 ||
(times[i] == UINT64_MAX ||
(earliest_ts != UINT64_MAX && times[i] >= earliest_ts) ||
@@ -10804,8 +10804,6 @@ static ngtcp2_pktns *conn_get_earliest_pktns(ngtcp2_conn *conn,
}
if (res == NULL && !conn->server) {
- earliest_ts = UINT64_MAX;
-
if (conn->hs_pktns && conn->hs_pktns->crypto.tx.ckm) {
res = conn->hs_pktns;
} else {
|
mix: fix wrong outdated message printed for Mix_OpenAudioDevice | @@ -28,7 +28,7 @@ package mix
//#if !(SDL_MIXER_VERSION_ATLEAST(2,0,2))
//
//#if defined(WARN_OUTDATED)
-//#pragma message("Mix_OpenAudioDevice is not supported before SDL 2.0.9")
+//#pragma message("Mix_OpenAudioDevice is not supported before SDL_mixer 2.0.2")
//#endif
//
//static inline int Mix_OpenAudioDevice(int frequency, Uint16 format, int channels, int chunksize, const char* device, int allowed_changes)
|
Fix chip/imxrt_enc.c:950:27: error: use of logical '&&' with constant operand | @@ -951,7 +951,7 @@ static int imxrt_setup(struct qe_lowerhalf_s *lower)
imxrt_enc_putreg16(priv, IMXRT_ENC_TST_OFFSET, regval);
#endif
- if ((config->init_flags && XIE_SHIFT) == 1)
+ if (((config->init_flags >> XIE_SHIFT) & 1) != 0)
{
ret = irq_attach(config->irq, imxrt_enc_index, priv);
if (ret < 0)
@@ -1011,10 +1011,9 @@ static int imxrt_shutdown(struct qe_lowerhalf_s *lower)
/* Disable interrupts if used */
- if ((priv->config->init_flags && XIE_SHIFT) == 1)
+ if (((priv->config->init_flags >> XIE_SHIFT) & 1) != 0)
{
up_disable_irq(priv->config->irq);
-
irq_detach(priv->config->irq);
}
|
bump numpy to v1.13.1 | @@ -22,7 +22,7 @@ Requires: openblas-%{compiler_family}%{PROJ_DELIM}
%define PNAME %(echo %{pname} | tr [a-z] [A-Z])
Name: python-%{pname}-%{compiler_family}%{PROJ_DELIM}
-Version: 1.12.1
+Version: 1.13.1
Release: 1%{?dist}
Url: http://sourceforge.net/projects/numpy
Summary: NumPy array processing for numbers, strings, records and objects
|
build: bump to v1.9.6 | @@ -4,7 +4,7 @@ project(fluent-bit)
# Fluent Bit Version
set(FLB_VERSION_MAJOR 1)
set(FLB_VERSION_MINOR 9)
-set(FLB_VERSION_PATCH 5)
+set(FLB_VERSION_PATCH 6)
set(FLB_VERSION_STR "${FLB_VERSION_MAJOR}.${FLB_VERSION_MINOR}.${FLB_VERSION_PATCH}")
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
Updated man page ms specifier. | @@ -772,6 +772,10 @@ If a timestamp is given in microseconds,
must be used as
.I
time-format
+or
+.I
+%*
+if the timestamp is given in milliseconds.
.IP "date-format"
The
.I date-format
@@ -787,6 +791,10 @@ If a timestamp is given in microseconds,
must be used as
.I
date-format
+or
+.I
+%*
+if the timestamp is given in milliseconds.
.IP "log-format"
The
.I log-format
|
Update signiture algorithm handling
Rename local variables and to simplify things use static_assert to
determine if the default signiture algorithms are not fit into the
SSL handshake structure. | @@ -1628,10 +1628,10 @@ read_record_header:
* Try to fall back to default hash SHA1 if the client
* hasn't provided any preferred signature-hash combinations.
*/
- if( sig_hash_alg_ext_present == 0 )
+ if( ! sig_hash_alg_ext_present )
{
- uint16_t *set = ssl->handshake->received_sig_algs;
- const uint16_t sig_algs[] = {
+ uint16_t *received_sig_algs = ssl->handshake->received_sig_algs;
+ const uint16_t default_sig_algs[] = {
#if defined(MBEDTLS_ECDSA_C)
MBEDTLS_SSL_TLS12_SIG_AND_HASH_ALG( MBEDTLS_SSL_SIG_ECDSA,
MBEDTLS_SSL_HASH_SHA1 ),
@@ -1640,24 +1640,15 @@ read_record_header:
MBEDTLS_SSL_TLS12_SIG_AND_HASH_ALG( MBEDTLS_SSL_SIG_RSA,
MBEDTLS_SSL_HASH_SHA1 ),
#endif
+ MBEDTLS_TLS_SIG_NONE
};
- const uint16_t invalid_sig_alg = MBEDTLS_TLS_SIG_NONE;
- size_t count = sizeof( sig_algs ) / sizeof( sig_algs[0] );
- if( count < MBEDTLS_RECEIVED_SIG_ALGS_SIZE )
- {
- memcpy( set, sig_algs, sizeof( sig_algs ) );
- memcpy( &set[count], &invalid_sig_alg, sizeof( sig_algs[0] ) );
- }
- else
- {
- size_t size = ( MBEDTLS_RECEIVED_SIG_ALGS_SIZE - 1 ) *
- sizeof( sig_algs[0] );
+#if defined(static_assert)
+ static_assert( sizeof( default_sig_algs ) / sizeof( default_sig_algs[0] ) <=
+ MBEDTLS_RECEIVED_SIG_ALGS_SIZE, "default_sig_algs is too big" );
+#endif
- memcpy( set, sig_algs, size );
- memcpy( &set[MBEDTLS_RECEIVED_SIG_ALGS_SIZE - 1],
- &invalid_sig_alg, sizeof( sig_algs[0] ) );
- }
+ memcpy( received_sig_algs, default_sig_algs, sizeof( default_sig_algs ) );
}
#endif /* MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED */
|
ssse3: silence MSVC warning in simde_mm_abs_epi32 | @@ -122,7 +122,14 @@ simde_mm_abs_epi32 (simde__m128i a) {
SIMDE__VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
+ #if defined(_MSC_VER)
+ HEDLEY_DIAGNOSTIC_PUSH
+ #pragma warning(disable:4146)
+ #endif
r_.u32[i] = (a_.i32[i] < 0) ? (- HEDLEY_STATIC_CAST(uint32_t, a_.i32[i])) : HEDLEY_STATIC_CAST(uint32_t, a_.i32[i]);
+ #if defined(_MSC_VER)
+ HEDLEY_DIAGNOSTIC_POP
+ #endif
}
return simde__m128i_from_private(r_);
|
add matekf405 to targets | }
]
},
+ {
+ "name": "matekf405",
+ "configurations": [
+ {
+ "name": "brushless.serial",
+ "defines": {
+ "BRUSHLESS_TARGET": "",
+ "RX_UNIFIED_SERIAL": ""
+ }
+ }
+ ]
+ },
{
"name": "crazybee_f4",
"configurations": [
|
Travis: temporary decrease wait time for debug | @@ -90,7 +90,7 @@ script:
#TODO: change this to run specs in auto mode and collect results
- |
if [[ -z $SKIP_TESTS ]]; then
- travis_wait 45 $TRAVIS_BUILD_DIR/.ci/safe_run.sh $TRAVIS_BUILD_DIR/.ci/targets/$RHO_TARGET/$RHO_RUNNER_SCRIPT;
+ travis_wait 20 $TRAVIS_BUILD_DIR/.ci/safe_run.sh $TRAVIS_BUILD_DIR/.ci/targets/$RHO_TARGET/$RHO_RUNNER_SCRIPT;
fi
before_deploy:
|
Fix comment in procarray.c
The description of GlobalVisDataRels was missing, GlobalVisCatalogRels
being mentioned instead.
Author: Jim Nasby
Discussion: | @@ -146,7 +146,7 @@ typedef struct ProcArrayStruct
* I.e. the difference to GlobalVisSharedRels is that
* snapshot in other databases are ignored.
*
- * 3) GlobalVisCatalogRels, which only considers an XID's
+ * 3) GlobalVisDataRels, which only considers an XID's
* effects visible-to-everyone if neither snapshots in the current
* database, nor a replication slot's xmin consider XID as running.
*
|
defines: add undef for boards without buzzer | #undef SOFTSPI_4WIRE
#endif
+#if defined(BUZZER_ENABLE) && !defined(BUZZER_PIN)
+#undef BUZZER_ENABLE
+#endif
+
#ifdef BUZZER_INVERT
#define PIN_ON(port, pin) GPIO_ResetBits(port, pin)
#define PIN_OFF(port, pin) GPIO_SetBits(port, pin)
|
fix assertion for huge blocks | @@ -783,7 +783,7 @@ static mi_page_t* mi_large_huge_page_alloc(mi_heap_t* heap, size_t size) {
mi_page_queue_t* pq = (is_huge ? NULL : mi_page_queue(heap, block_size));
mi_page_t* page = mi_page_fresh_alloc(heap, pq, block_size);
if (page != NULL) {
- const size_t bsize = mi_page_usable_block_size(page);
+ const size_t bsize = mi_page_block_size(page); // note: not `mi_page_usable_block_size` as `size` includes padding
mi_assert_internal(mi_page_immediate_available(page));
mi_assert_internal(bsize >= size);
|
examples: fix endpoint for sidecar container | @@ -3,19 +3,19 @@ set -eu
REMOTE_HOST=${REMOTE_HOST:-127.0.0.1}
-if command -v http; then
+if command -v http &> /dev/null ; then
http -v "$REMOTE_HOST":2020/api/v1/trace/dummy.0 output=stdout prefix=trace. params:='{"format":"json"}'
-elif command -v curl; then
+elif command -v curl &> /dev/null ; then
curl --header 'Content-Type: application/json' --data '{"output": "stdout", "params": { "format": "json" }, "prefix": "trace."}' "$REMOTE_HOST":2020/api/v1/trace/dummy.0
else
echo "No curl or httpie installed"
- if command -v apt-get; then
- apt-get update
- apt-get install -y curl
- elif command -v yum; then
+ if command -v apt-get &> /dev/null ; then
+ apt-get -qq update
+ apt-get -qq install -y curl
+ elif command -v yum &> /dev/null ; then
yum install -y curl
else
exit 1
fi
- curl --header 'Content-Type: application/json' --data '{"output": "stdout", "params": { "format": "json" }, "prefix": "trace."}' "$REMOTE_HOST":2020/api/v1/trace/dummy
+ curl --header 'Content-Type: application/json' --data '{"output": "stdout", "params": { "format": "json" }, "prefix": "trace."}' "$REMOTE_HOST":2020/api/v1/trace/dummy.0
fi
|
component/bt: Some timer oneshot will timeout twice
Find when BQB | @@ -242,23 +242,6 @@ void btu_task_thread_handler(void *arg)
case SIG_BTU_ONESHOT_ALARM: {
TIMER_LIST_ENT *p_tle = (TIMER_LIST_ENT *)e.par;
btu_general_alarm_process(p_tle);
-
- switch (p_tle->event) {
-#if (defined(BLE_INCLUDED) && BLE_INCLUDED == TRUE)
- case BTU_TTYPE_BLE_RANDOM_ADDR:
- btm_ble_timeout(p_tle);
- break;
-#endif
- case BTU_TTYPE_USER_FUNC: {
- tUSER_TIMEOUT_FUNC *p_uf = (tUSER_TIMEOUT_FUNC *)p_tle->param;
- (*p_uf)(p_tle);
- break;
- }
- default:
- // FAIL
- HCI_TRACE_ERROR("Received unexpected oneshot timer event:0x%x\n", p_tle->event);
- break;
- }
break;
}
case SIG_BTU_L2CAP_ALARM:
|
doc: edit RDT_ENABLED tooltip | @@ -290,9 +290,8 @@ and no more than 512.</xs:documentation>
<xs:sequence>
<xs:element name="RDT_ENABLED" type="Boolean" default="n">
<xs:annotation acrn:title="Intel Resource Director Tech">
- <xs:documentation>Enable Intel Resource Director Technology (RDT). If
-the board hardware does not support
-RDT, setting this option to checked is ignored.</xs:documentation>
+ <xs:documentation>Enable Cache Allocation Technology of Intel Resource Director Technology (RDT). If
+the board hardware does not support RDT, enabling this option is ignored.</xs:documentation>
</xs:annotation>
</xs:element>
<xs:element name="CDP_ENABLED" type="Boolean" default="n">
|
[Cita][#1193]update cita2c.py | # limitations under the License.
-# This python script generates FISCOBCOS's C language interface function from contract ABI (solidity).
+# This python script generates Ethereum's C language interface function from contract ABI (solidity).
# Not all contract ABI can be converted to C interface because C is lack of object-oriented programming
# capability. If the tool fails to generate the interface, you may have to organize the contract call
# function. The return value string has to be parsed manually as per the contract prototype. If the call
# fails, it returns NULL.
#
-# For state-ful contract call, i.e. a transaction, the generated C API returns a HEX string representing
-# the transaction hash. If the transaction fails, it returns NULL.
+# For value transfer or state-ful contract call, i.e. a transaction, the generated C API returns a HEX
+# string representing the transaction hash. If the transaction fails, it returns NULL.
import sys
import json
@@ -232,7 +232,7 @@ class CFunctionGen():
with open(abi_file_name) as file_handle:
self.abi_object = json.load(file_handle)
self.abi_file_name = os.path.basename(abi_file_name)
- #print(self.abi_object)
+ #print(self.abi_object);
self.output_path = output_path
@@ -857,8 +857,6 @@ class CFunctionGen():
if not self.is_Change_Blockchain_State(abi_item):
func_body_str += ' boat_try(BoatCitaTxSetNonce(tx_ptr, BOAT_CITA_NONCE_AUTO));\n\n'
-
-
# Extract solidity function inputs
inputs_len = len(inputs)
nonFixed_filedLen_str = self.gen_nonFixed_mallocSize_exp(abi_item, 27)
@@ -1062,7 +1060,6 @@ class CFunctionGen():
i = i + 1
if self.is_Change_Blockchain_State(abi_item):
- # for state-less funciton call
func_body_str += ' call_result_str = BoatCitaCallContractFunc(tx_ptr, function_prototye_str, data_field.field_ptr+4, data_field.field_len-4);\n\n'
else:
# for stateful transaction
|
profile: absolute-positioning fix
fixes urbit/landscape#444 | @@ -165,7 +165,7 @@ export function Profile(props: any): ReactElement {
const ViewInterface = () => {
return (
<Center p={[0, 4]} height='100%' width='100%'>
- <Box ref={anchorRef} maxWidth='600px' width='100%'>
+ <Box ref={anchorRef} maxWidth='600px' width='100%' position='relative'>
<ViewProfile
api={props.api}
nacked={nacked}
@@ -183,7 +183,7 @@ export function Profile(props: any): ReactElement {
const EditInterface = () => {
return (
<Center p={[0, 4]} height='100%' width='100%'>
- <Box ref={anchorRef} maxWidth='600px' width='100%'>
+ <Box ref={anchorRef} maxWidth='600px' width='100%' position='relative'>
<EditProfile
ship={ship}
contact={contact}
|
fix floating layout resize | @@ -1463,7 +1463,7 @@ enternotify(XEvent *e)
if ((ev->mode != NotifyNormal || ev->detail == NotifyInferior) && ev->window != root)
return;
c = wintoclient(ev->window);
- if (c && selmon->sel && selmon->sel->isfloating && c != selmon->sel &&
+ if (c && selmon->sel && ( selmon->sel->isfloating || NULL == selmon->lt[selmon->sellt]->arrange ) && c != selmon->sel &&
(ev->window == root || (c->tags & selmon->sel->tags && c->mon == selmon) || selmon->sel->issticky)) {
if (!resizeborder(NULL))
return;
@@ -2446,7 +2446,7 @@ gesturemouse(const Arg *arg)
// hover over the border to move/resize a window
int
resizeborder(const Arg *arg) {
- if (!(selmon->sel && selmon->sel->isfloating))
+ if (!(selmon->sel && (selmon->sel->isfloating || NULL == selmon->lt[selmon->sellt]->arrange )))
return 0;
XEvent ev;
Time lasttime = 0;
|
Encoding with packingType=grid_ccsds via codes_grib_util_set_spec | @@ -1429,7 +1429,7 @@ grib_handle* grib_util_set_spec2(grib_handle* h,
* Reason 1: It is not available in GRIB1 and so we have to wait until we change edition
* Reason 2: It has to be done AFTER we set the data values
*/
- if (strcmp(input_packing_type, "grid_ccsds") && !strcmp(input_packing_type, "grid_simple"))
+ if (!STR_EQUAL(input_packing_type, "grid_ccsds"))
setCcsdsPacking = 1;
break;
case GRIB_UTIL_PACKING_TYPE_IEEE:
|
http_server: metrics: add destructor to prevent leak | pthread_key_t hs_metrics_key;
+static struct mk_list *hs_metrics_key_create()
+{
+ struct mk_list *metrics_list = NULL;
+
+ metrics_list = flb_malloc(sizeof(struct mk_list));
+ if (metrics_list == NULL) {
+ flb_errno();
+ return NULL;
+ }
+ mk_list_init(metrics_list);
+ pthread_setspecific(hs_metrics_key, metrics_list);
+
+ return metrics_list;
+}
+
+static void hs_metrics_key_destroy(void *data)
+{
+ struct mk_list *metrics_list = (struct mk_list*)data;
+ struct mk_list *tmp;
+ struct mk_list *head;
+ struct flb_hs_buf *entry;
+
+ if (metrics_list == NULL) {
+ return;
+ }
+ mk_list_foreach_safe(head, tmp, metrics_list) {
+ entry = mk_list_entry(head, struct flb_hs_buf, _head);
+ if (entry != NULL) {
+ if (entry->raw_data != NULL) {
+ flb_free(entry->raw_data);
+ entry->raw_data = NULL;
+ }
+ if (entry->data) {
+ flb_sds_destroy(entry->data);
+ entry->data = NULL;
+ }
+ mk_list_del(&entry->_head);
+ flb_free(entry);
+ }
+ }
+
+ flb_free(metrics_list);
+}
+
/* Return the newest metrics buffer */
static struct flb_hs_buf *metrics_get_latest()
{
@@ -105,13 +149,10 @@ static void cb_mq_metrics(mk_mq_t *queue, void *data, size_t size)
metrics_list = pthread_getspecific(hs_metrics_key);
if (!metrics_list) {
- metrics_list = flb_malloc(sizeof(struct mk_list));
- if (!metrics_list) {
- flb_errno();
+ metrics_list = hs_metrics_key_create();
+ if (metrics_list == NULL) {
return;
}
- mk_list_init(metrics_list);
- pthread_setspecific(hs_metrics_key, metrics_list);
}
/* Convert msgpack to JSON */
@@ -527,7 +568,7 @@ static void cb_metrics(mk_request_t *request, void *data)
int api_v1_metrics(struct flb_hs *hs)
{
- pthread_key_create(&hs_metrics_key, NULL);
+ pthread_key_create(&hs_metrics_key, hs_metrics_key_destroy);
/* Create a message queue */
hs->qid_metrics = mk_mq_create(hs->ctx, "/metrics",
|
Remove TracedBoomConfig | @@ -48,15 +48,6 @@ class DualSmallBoomConfig extends Config(
new boom.common.WithNBoomCores(2) ++ // dual-core
new freechips.rocketchip.system.BaseConfig)
-class TracedSmallBoomConfig extends Config(
- new WithTop ++
- new WithBootROM ++
- new freechips.rocketchip.subsystem.WithInclusiveCache ++
- new boom.common.WithTrace ++ // enable trace port on BOOM
- new boom.common.WithSmallBooms ++
- new boom.common.WithNBoomCores(1) ++
- new freechips.rocketchip.system.BaseConfig)
-
class SmallRV32UnifiedBoomConfig extends Config(
new WithTop ++
new WithBootROM ++
|
fix CDS values written on GFIR enable/disable | @@ -143,11 +143,11 @@ int LMS7_Device::ConfigureGFIR(bool tx, unsigned ch, bool enabled, double bandwi
if (ch%2)
{
lms->Modify_SPI_Reg_bits(LMS7param(CDSN_RXBLML), !(enabled|sisoDDR));
- lms->Modify_SPI_Reg_bits(LMS7param(CDS_RXBLML), !(enabled|sisoDDR));
+ lms->Modify_SPI_Reg_bits(LMS7param(CDS_RXBLML), enabled? 3 : 0);
}
else
{
- lms->Modify_SPI_Reg_bits(LMS7param(CDSN_RXALML), enabled? 3 : 0);
+ lms->Modify_SPI_Reg_bits(LMS7param(CDSN_RXALML), !(enabled|sisoDDR));
lms->Modify_SPI_Reg_bits(LMS7param(CDS_RXALML), enabled? 3 : 0);
}
}
@@ -948,11 +948,11 @@ int LMS7_Device::SetGFIR(bool tx, unsigned chan, lms_gfir_t filt, bool enabled)
if (chan%2)
{
lms->Modify_SPI_Reg_bits(LMS7param(CDSN_RXBLML), !(enabled|sisoDDR));
- lms->Modify_SPI_Reg_bits(LMS7param(CDS_RXBLML), !(enabled|sisoDDR));
+ lms->Modify_SPI_Reg_bits(LMS7param(CDS_RXBLML), enabled? 3 : 0);
}
else
{
- lms->Modify_SPI_Reg_bits(LMS7param(CDSN_RXALML), enabled? 3 : 0);
+ lms->Modify_SPI_Reg_bits(LMS7param(CDSN_RXALML), !(enabled|sisoDDR));
lms->Modify_SPI_Reg_bits(LMS7param(CDS_RXALML), enabled? 3 : 0);
}
}
|
fix segfault in ProcessBlock - incorrect jobs count | @@ -78,14 +78,15 @@ namespace NCB {
template <class TProcessDataFunc>
void ProcessBlock(TProcessDataFunc processFunc) {
const int threadCount = LocalExecutor->GetThreadCount() + 1;
- LocalExecutor->ExecRangeWithThrow(NPar::TLocalExecutor::BlockedLoopBody(
- NPar::TLocalExecutor::TExecRangeParams(0, ParseBuffer.ysize()).SetBlockCount(threadCount),
- [this, processFunc = std::move(processFunc)](int lineIdx) {
- processFunc(ParseBuffer[lineIdx], lineIdx);
- }),
- 0, ParseBuffer.ysize(),
- NPar::TLocalExecutor::WAIT_COMPLETE
- );
+
+ NPar::TLocalExecutor::TExecRangeParams blockParams(0, ParseBuffer.ysize());
+ blockParams.SetBlockCount(threadCount);
+ LocalExecutor->ExecRangeWithThrow([this, blockParams, processFunc = std::move(processFunc)](int blockIdx) {
+ const int blockOffset = blockIdx * blockParams.GetBlockSize();
+ for (int i = blockOffset; i < Min(blockOffset + blockParams.GetBlockSize(), ParseBuffer.ysize()); ++i) {
+ processFunc(ParseBuffer[i], i);
+ }
+ }, 0, blockParams.GetBlockCount(), NPar::TLocalExecutor::WAIT_COMPLETE);
LinesProcessed += ParseBuffer.ysize();
}
|
website-frontend: reformattion with prettier | @@ -31,10 +31,10 @@ Check the [Ideas](/doc/IDEAS.md) page if you are searching for a good topic to s
If you want to publish your local changes to this project you have to create a new pull-request.
-1. Open GitHub and navigate to *your* libeketra-fork
-2. In the tab *Codes* press the *New Pull-Request* button and choose a title and fill in the [pull-request template](/.github/PULL_REQUEST_TEMPLATE.md)
-3. Now you should be able to set up the *Comparing changes* settings. Choose the head repository and base repository. E.g.: if you have some changes in the master branch of your forked repository, select *head repository: yournickname/libelektra and *compare: master* and *base repositroy: ElektraInitiative/libelektra* and *base: master*
-4. Add some information about the changes in the release notes (path of the file: /libelektra/doc/news/_preparation_next_release.md), skipping this step may cause a rejected pull-request
+1. Open GitHub and navigate to _your_ libeketra-fork
+2. In the tab _Codes_ press the _New Pull-Request_ button and choose a title and fill in the [pull-request template](/.github/PULL_REQUEST_TEMPLATE.md)
+3. Now you should be able to set up the _Comparing changes_ settings. Choose the head repository and base repository. E.g.: if you have some changes in the master branch of your forked repository, select *head repository: yournickname/libelektra and *compare: master* and *base repositroy: ElektraInitiative/libelektra* and *base: master\*
+4. Add some information about the changes in the release notes (path of the file: /libelektra/doc/news/\_preparation_next_release.md), skipping this step may cause a rejected pull-request
5. Commit and push your local changes in git (keep in mind to sync your fork - fetch, rebase & push)
6. Wait for the code-review
|
Update main.tex
Rephrased. | @@ -1035,7 +1035,7 @@ If you would like to contribute to \ccl or contact the developers, please do so
\section{Citing \ccl}
\label{sec:cite}
-If you use \ccl in your work, please provide a link to the repository and cite it as LSST DESC (in preparation). \ccl has a built-in version of {\tt CLASS} for convenience. We request that \ccl users cite the {\tt CLASS} paper: {\it CLASS II: Approximation schemes}, D. Blas, J. Lesgourgues, T. Tram, arXiv:1104.2933, JCAP 1107 (2011) 034.
+If you use \ccl in your work, please provide a link to the repository and cite it as LSST DESC (in preparation). \ccl has a built-in version of {\tt CLASS} for convenience, thus \ccl users must cite the {\tt CLASS} paper: {\it CLASS II: Approximation schemes}, D. Blas, J. Lesgourgues, T. Tram, arXiv:1104.2933, JCAP 1107 (2011) 034.
\section{License}
\label{sec:license}
|
Add binnec requirement for devs merging to develop | @@ -131,7 +131,16 @@ Tests will run automatically via GitHub Actions when you open a pull request or
push new commits to an existing pull request.
Once you've collected and addressed feedback, tests are passing, and your PR has
-been approved, merge the pull request. If you properly included the "Resolves
+been approved, merge the pull request.
+
+**Note**: If you are merging into develop, you *must* be syncing OTAs from
+`~binnec-dozzod-marzod` which gets the tip of develop deployed to it. If
+your merge breaks `binnec` it's your responsibility to alert people and
+fix it. Your PR is shipped when it's successfully been deployed to
+`~binnec` and picked up by your personal ship. If you're merging on behalf
+of an external developer, this is also your responsibility.
+
+If you properly included the "Resolves
#N." directive in the pull request description, merging will automatically close
the tracking issue associated with the pull request.
|
View.rst uses a list for the options in 3d viewing | @@ -150,22 +150,34 @@ of a 3D view.
The 3D perspective view volume
-To set the 3D view, first decide on where you want to look from. Type a
-vector value into the **View normal** text field. Next, type the vector
-valued location of what you want to look at into the **Focus** text
-field. The **Up axis** vector is simply a vector that determines which
-way is up. A good default value for the up axis is 0 1 0. VisIt will
-often calculate a better value to use for the up axis so it is not too
-important to figure out the right value. The **View Angle** determines
-how wide the field of view is. The view angle is specified in degrees
-and a value around 30 is usually sufficient. **Near clipping** and
-**Far clipping** are values along the view normal that determine where
-the near and far clipping planes are to be placed. It is not easy to
-know that good values for these are so you will have to experiment.
-**Parallel scale** acts as a zoom factor and larger values zoom the
-camera towards the focus. The **Perspective** check box applies to 3D
-visualizations and it causes a more realistic view to be used where
-objects that are farther away are drawn smaller than closer objects of
+To set the 3D view, fill in the following fields:
+
+View normal
+ Where you want to look from.
+
+Focus
+ What you want to look at
+
+Up axis
+ Determines which way is up. A good default value for the up axis is 0 1 0.
+ VisIt will often calculate a better value to use for the up axis so it is
+ not too important to figure out the right value.
+
+View Angle
+ Determines how wide the field of view is. The view angle is specified in
+ degrees and a value around 30 is usually sufficient.
+
+Near clipping and Far clipping
+ Values along the view normal that determine where the near and far clipping
+ planes are to be placed. It is not easy to know that good values for these
+ are so you will have to experiment.
+
+Parallel scale
+ Zoom factor: larger values zoom the camera towards the focus.
+
+Perspective
+ Applies to 3D visualizations and it causes a more realistic view to be used
+ where objects that are farther away are drawn smaller than closer objects of
the same size. VisIt uses a perspective view for 3D visualizations by
default.
|
Use both fonts simultaneously in text demo | #include <cstdlib>
#include "pico_explorer.hpp"
+#include "font6_data.hpp"
#include "font8_data.hpp"
#include "msa301.hpp"
@@ -26,7 +27,10 @@ int main() {
pico_explorer.clear();
pico_explorer.set_pen(255, 255, 255);
- pico_explorer.text("Hello World. How are you today?", Point(10, 190), 100);
+ pico_explorer.set_font(&font6);
+ pico_explorer.text("6x6: Hello World. How are you today?", Point(10, 10), 220);
+ pico_explorer.set_font(&font8);
+ pico_explorer.text("6x8: Hello World. How are you today?", Point(10, 120), 220);
pico_explorer.update();
|
Fix a compilation failure with no-tls_1_2 | @@ -8627,7 +8627,7 @@ int setup_tests(void)
void cleanup_tests(void)
{
-# ifndef OPENSSL_NO_DH
+# if !defined(OPENSSL_NO_TLS1_2) && !defined(OPENSSL_NO_DH)
EVP_PKEY_free(tmp_dh_params);
#endif
OPENSSL_free(cert);
|
[swig] provide __array__ so that numpy.array(v) for SiconosVector works | SiconosVectorIterator __iter__() {
return SiconosVectorIterator($self->begin());
}
+%insert("python") %{
+ def __array__(self):
+ import numpy
+ return numpy.fromiter(self, dtype=float)
+%}
}
%extend BlockVector{
std::string __str__() { return $self->toString(); }
|
guybrush: Implement ppc_interrupt
Implement ppc_interrupt handler.
BRANCH=None
TEST=Build | @@ -495,7 +495,18 @@ uint16_t tcpc_get_alert_status(void)
void ppc_interrupt(enum gpio_signal signal)
{
- /* TODO */
+ switch (signal) {
+ case GPIO_USB_C0_PPC_INT_ODL:
+ aoz1380_interrupt(USBC_PORT_C0);
+ break;
+
+ case GPIO_USB_C1_PPC_INT_ODL:
+ nx20p348x_interrupt(USBC_PORT_C1);
+ break;
+
+ default:
+ break;
+ }
}
void bc12_interrupt(enum gpio_signal signal)
|
HTTP: request body was not passed to application.
The bug has appeared in changeset | @@ -218,6 +218,11 @@ nxt_http_app_request(nxt_task_t *task, void *obj, void *data)
ar->r.header.cookie.start = r->cookie->value;
}
+ if (r->body != NULL) {
+ ar->r.body.buf = r->body;
+ ar->r.body.preread_size = r->content_length_n;
+ }
+
ar->r.body.done = 1;
ret = nxt_http_parse_request_init(&ar->resp_parser, r->mem_pool);
|
gsl_error handling for ccl_cls.c | @@ -46,8 +46,12 @@ static double spline_eval(double x,SplPar *spl)
return spl->y0;
else if(x>=spl->xf)
return spl->yf;
- else
- return gsl_spline_eval(spl->spline,x,spl->intacc);
+ else{
+ double y;
+ int stat= gsl_spline_eval_e(spl->spline,x,spl->intacc,&y);
+ if (stat != GSL_SUCCESS) return NAN;
+ return y;
+ }
}
//Wrapper around spline_eval with GSL function syntax
|
apps: address potential memory leaks | @@ -3016,6 +3016,7 @@ static int www_body(int s, int stype, int prot, unsigned char *context)
/* No need to free |con| after this. Done by BIO_free(ssl_bio) */
BIO_set_ssl(ssl_bio, con, BIO_CLOSE);
BIO_push(io, ssl_bio);
+ ssl_bio = NULL;
#ifdef CHARSET_EBCDIC
io = BIO_push(BIO_new(BIO_f_ebcdic_filter()), io);
#endif
@@ -3376,6 +3377,7 @@ static int www_body(int s, int stype, int prot, unsigned char *context)
err:
OPENSSL_free(buf);
+ BIO_free(ssl_bio);
BIO_free_all(io);
return ret;
}
@@ -3420,6 +3422,7 @@ static int rev_body(int s, int stype, int prot, unsigned char *context)
/* No need to free |con| after this. Done by BIO_free(ssl_bio) */
BIO_set_ssl(ssl_bio, con, BIO_CLOSE);
BIO_push(io, ssl_bio);
+ ssl_bio = NULL;
#ifdef CHARSET_EBCDIC
io = BIO_push(BIO_new(BIO_f_ebcdic_filter()), io);
#endif
@@ -3517,6 +3520,7 @@ static int rev_body(int s, int stype, int prot, unsigned char *context)
err:
OPENSSL_free(buf);
+ BIO_free(ssl_bio);
BIO_free_all(io);
return ret;
}
|
Test calling lua function from haskell | @@ -23,8 +23,8 @@ THE SOFTWARE.
module Foreign.Lua.InteropTest (tests) where
import Foreign.Lua.Functions
-import Foreign.Lua.Interop (peek, registerhsfunction)
-import Foreign.Lua.Types (Result (..))
+import Foreign.Lua.Interop (callfunc, peek, registerhsfunction)
+import Foreign.Lua.Types (LuaNumber, Result (..))
import Test.Tasty (TestTree, testGroup)
import Test.Tasty.HUnit (assertBool, testCase)
@@ -48,4 +48,10 @@ tests = testGroup "Interoperability"
return $ res == Success True
in assertBool "Operation was successful" =<< runLua luaOp
]
+
+ , testGroup "call lua function from haskell"
+ [ testCase "test equality within lua" $
+ assertBool "raw equality test failed" =<<
+ runLua (openlibs *> callfunc "rawequal" (5 :: Int) (5.0 :: LuaNumber))
+ ]
]
|
[esp32] use system esptool.py for elf2bin conversion | @@ -26,4 +26,4 @@ OTA_BIN_OUTPUT_FILE := $(LINK_OUTPUT_FILE:$(LINK_OUTPUT_SUFFIX)=.ota$(BIN_OUTPUT
EXTRA_POST_BUILD_TARGETS += gen_crc_bin
gen_crc_bin:
- python platform/mcu/esp32/esptool_py/esptool/esptool.py --chip esp32 elf2image $(LINK_OUTPUT_FILE)
+ esptool.py --chip esp32 elf2image $(LINK_OUTPUT_FILE)
|
tools:ras remove fme error revision zero code
changes:
- remove fme error revision zero code, not supported
in relase bitstream | @@ -637,7 +637,7 @@ fpga_result print_ras_errors(fpga_token token)
}
printf(" fme error revison : %ld \n", revision);
- // Revision 0
+ // Revision 1
if( revision == 1 ) {
// Non Fatal Error
@@ -712,81 +712,6 @@ fpga_result print_ras_errors(fpga_token token)
return result;
}
- // Revision 0
- } else if( revision == 0){
-
- // GBS Error
- printf("\n ------- GBS error ------------ \n");
- result = print_errors(token,
- FME_SYSFS_GBS_ERRORS,
- RAS_GBS_ERROR,
- RAS_GBS_ERROR_COUNT);
-
- if (result != FPGA_OK) {
- FPGA_ERR("Failed to get fme gbs errors");
- return result;
- }
-
- // BBS Error
- printf("\n ------- BBS error ------------ \n");
- result = print_errors(token,
- FME_SYSFS_BBS_ERRORS,
- RAS_BBS_ERROR,
- RAS_BBS_ERROR_COUNT);
-
- if (result != FPGA_OK) {
- FPGA_ERR("Failed to get fme bbs errors");
- return result;
- }
-
- // Injected error
- printf("\n ------- Injected error ------------ \n");
- result = print_errors(token,
- FME_SYSFS_INJECT_ERROR,
- RAS_INJECT_ERROR,
- RAS_INJECT_ERROR_COUNT);
-
- if (result != FPGA_OK) {
- FPGA_ERR("Failed to get fme Injected errors");
- return result;
- }
-
- // FME error
- printf("\n ------- FME error ------------ \n");
- result = print_errors(token,
- FME_SYSFS_FME_ERRORS,
- FME_ERROR,
- FME_ERROR_COUNT);
-
- if (result != FPGA_OK) {
- FPGA_ERR("Failed to get fme errors");
- return result;
- }
-
- // PCIe0 error
- printf("\n ------- PCIe0 error ------------ \n");
- result = print_errors(token,
- FME_SYSFS_PCIE0_ERRORS,
- PCIE0_ERROR,
- PCIE0_ERROR_COUNT);
-
- if (result != FPGA_OK) {
- FPGA_ERR("Failed to get pcie0 errors");
- return result;
- }
-
- // PCIe1 error
- printf("\n ------- PCIe1 error ------------ \n");
- result = print_errors(token,
- FME_SYSFS_PCIE1_ERRORS,
- PCIE1_ERROR,
- PCIE1_ERROR_COUNT);
-
- if (result != FPGA_OK) {
- FPGA_ERR("Failed to get pcie1 errors");
- return result;
- }
-
} else {
printf("\n Invalid FME Error Revision \n");
}
|
Fixed crash at copy report unexpected message type error | @@ -7105,11 +7105,14 @@ extract_line_buf(CopyState cstate)
lineno_delim = memchr(line_start, COPY_METADATA_DELIM,
Min(32, cstate->line_buf.len));
+ if (lineno_delim && (lineno_delim != line_start))
+ {
value_len = lineno_delim - line_start + 1;
line_start += value_len;
line_buf = line_start;
}
}
+ }
/*
* Finally allocate a new buffer and trim the string to a reasonable
|
webdojo: remove "no slogs" notification | @@ -19,11 +19,9 @@ export default class Subscription {
setupSlog() {
const slog = new EventSource('/~/slog', { withCredentials: true });
- let available = false;
slog.onopen = e => {
console.log('slog: opened stream');
- available = true;
}
slog.onmessage = e => {
@@ -32,11 +30,6 @@ export default class Subscription {
slog.onerror = e => {
console.error('slog: eventsource error:', e);
- if (!available) {
- this.handleEvent({ txt:
- 'landscape: no printf stream. bad connection or old binary.'
- });
- } else {
window.setTimeout(() => {
if (slog.readyState !== EventSource.CLOSED) return;
console.log('slog: reconnecting...');
@@ -44,7 +37,6 @@ export default class Subscription {
}, 10000);
}
}
- }
delete() {
this.channel.delete();
|
[Rust] File server launches programs in /config/run_on_startup.txt on startup | @@ -180,6 +180,20 @@ fn launch_program(root_dir: &DirectoryImage, sender: &str, request: &LaunchProgr
launch_program_by_path(root_dir, requested_path)
}
+fn launch_startup_programs(root_dir: &DirectoryImage) {
+ let run_on_startup_path = "/config/run_on_startup.txt";
+ let run_on_startup_config =
+ fs_entry_find(root_dir, run_on_startup_path).expect("{run_on_startup_path} is missing!");
+ let run_on_startup_contents = match str::from_utf8(run_on_startup_config.file_data.unwrap()) {
+ Ok(v) => v,
+ Err(e) => panic!("Failed to read {run_on_startup_path}, invalid UTF-8: {e}"),
+ };
+
+ for line in run_on_startup_contents.split("\n") {
+ launch_program_by_path(root_dir, line);
+ }
+}
+
fn read_file(root_dir: &DirectoryImage, sender: &str, request: &ReadFile) {
let requested_path = str_from_u8_nul_utf8_unchecked(&request.path);
printf!("Reading {} for {}\n", requested_path, sender);
@@ -227,6 +241,8 @@ fn start(_argc: isize, _argv: *const *const u8) -> isize {
let root_dir: DirectoryImage = postcard::from_bytes(rust_reference).expect("Dealloc failed");
//traverse_dir(0, &root_dir);
+ launch_startup_programs(&root_dir);
+
loop {
printf!("Awaiting next message...\n");
// TODO(PT): This pattern is copied from the AwmWindow event loop
|
papi: export packed message structures
Use the Python API binding to generate a set of API messages
in binary format, that can later be replayed independently
of the Python API.
Type: improvement | @@ -530,6 +530,13 @@ class VPPApiClient:
return f
+ def make_pack_function(self, msg, i, multipart):
+ def f(**kwargs):
+ return self._call_vpp_pack(i, msg, **kwargs)
+
+ f.msg = msg
+ return f
+
def _register_functions(self, do_async=False):
self.id_names = [None] * (self.vpp_dictionary_maxid + 1)
self.id_msgdef = [None] * (self.vpp_dictionary_maxid + 1)
@@ -544,7 +551,9 @@ class VPPApiClient:
# Create function for client side messages.
if name in self.services:
f = self.make_function(msg, i, self.services[name], do_async)
+ f_pack = self.make_pack_function(msg, i, self.services[name])
setattr(self._api, name, FuncWrapper(f))
+ setattr(self._api, name + "_pack", FuncWrapper(f_pack))
else:
self.logger.debug("No such message type or failed CRC checksum: %s", n)
@@ -836,6 +845,13 @@ class VPPApiClient:
self.transport.write(b)
return context
+ def _call_vpp_pack(self, i, msg, **kwargs):
+ """Given a message, return the binary representation."""
+ kwargs["_vl_msg_id"] = i
+ kwargs["client_index"] = 0
+ kwargs["context"] = 0
+ return msg.pack(kwargs)
+
def read_blocking(self, no_type_conversion=False, timeout=None):
"""Get next received message from transport within timeout, decoded.
|
Update scripts/docker/alpine/3.10/release.Dockerfile
docker: alpine release image fix ALLUSERSPROFILE | @@ -96,7 +96,7 @@ RUN echo "%wheel ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
RUN echo "alias sudo='sudo -i' # in this image we do not need to be root" >> /etc/profile
RUN echo "export PS1='\u $ '" >> /etc/profile
RUN echo "export LD_LIBRARY_PATH=/usr/local/lib/elektra/" >> /etc/profile
-RUN echo "export ALLUSERSPROFILE=/" >> /etc/profile
+RUN echo "export ALLUSERSPROFILE=''" >> /etc/profile
USER ${USERID}
WORKDIR /home/elektra
|
osx: travis fixing path for unziping | @@ -11,7 +11,7 @@ echo Downloading Qt
#./dmginstall.sh https://download.qt.io/official_releases/qt/5.9/5.9.5/qt-opensource-mac-x64-5.9.5.dmg
#brew update && brew install qt5 --with-qtwebengine
wget -q https://s3.amazonaws.com/files.tau-technologies.com/buildenv/Qt5.9.5.tar.gz -O $HOME/Qt5.9.5.tar.gz
-tar -xzf $HOME/Qt5.9.5.tar.gz
+tar -xzf $HOME/Qt5.9.5.tar.gz -C $HOME/
echo Qt installed
if [ $TRAVIS_BRANCH == "CI" ]
|
docs/packages: Add quick "Creating distribution packages" section.
Needs more details. | @@ -199,6 +199,32 @@ Few notes:
you may want to decrease the amount of frozen modules included.
+Creating distribution packages
+------------------------------
+
+Distribution packages for MicroPython are created in the same manner
+as for CPython or any other Python implementation, see references at
+the end of chapter. "Source distribution" (sdist) format is used for
+packaging. The post-processing discussed above, (and pre-processing
+discussed in the following section) is achieved by using custom
+"sdist" command for distutils/setuptools. Thus, packaging steps
+remain the same as for standard distutils/setuptools, the user just
+need to override "sdist" command implementation by passing the
+appropriate argument to ``setup()`` call::
+
+ from setuptools import setup
+ import sdist_upip
+
+ setup(
+ ...,
+ cmdclass={'sdist': sdist_upip.sdist}
+ )
+
+The sdist_upip.py module as referenced above can be found in
+`micropython-lib`:
+https://github.com/micropython/micropython-lib/blob/master/sdist_upip.py
+
+
Application resources
---------------------
|
Changed references to to . | @@ -43,8 +43,8 @@ class ClTracer(object):
has_intrinsic_alignment (bool, optional): Flag to incorporate intrinsic alignment into the model. Defaults to False.
z_n (array_like, optional): Array of redshifts for N(z).
n (array_like, optional): Array of N(z)-values.
- z_b (array_like, optional): Array of redshifts for biases, b(z).
- b (array_like, optional): Array of biases.
+ z_b (array_like, optional): Array of redshifts for alignment biases, b(z).
+ b (array_like, optional): Array of alignment biases.
z_s (array_like, optional): Array of redshifts for shapes, s(z).
s (array_like, optional): Array of shapes.
z_ba (array_like, optional): Array of redshifts for intrinsic alignment amplitudes.
|
Enhanced functionality | @@ -292,7 +292,8 @@ typedef enum libxsmm_meltw_opreduce_vecs_flags {
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_NONE = 512,
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_SUM = 1024,
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_MAX = 2048,
- LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_MIN = 4096
+ LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_MIN = 4096,
+ LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_SCALE_OP_RESULT = 8192
} libxsmm_meltw_opreduce_vecs_flags;
LIBXSMM_EXTERN_C typedef union LIBXSMM_RETARGETABLE libxsmm_xmelt_flags {
|
OpenXR: sync all subaction paths; | @@ -1775,13 +1775,14 @@ static double openxr_update(void) {
state.lastDisplayTime = state.frameState.predictedDisplayTime - state.frameState.predictedDisplayPeriod;
}
+ XrActiveActionSet activeSets[] = {
+ { state.actionSet, XR_NULL_PATH }
+ };
+
XrActionsSyncInfo syncInfo = {
.type = XR_TYPE_ACTIONS_SYNC_INFO,
- .countActiveActionSets = 2,
- .activeActionSets = (XrActiveActionSet[]) {
- { state.actionSet, state.actionFilters[0] },
- { state.actionSet, state.actionFilters[1] }
- }
+ .countActiveActionSets = COUNTOF(activeSets),
+ .activeActionSets = activeSets
};
XR(xrSyncActions(state.session, &syncInfo));
|
Show topology - dimms list sort by DimmID | @@ -59,6 +59,45 @@ CONST CHAR16 *mpDefaultDimmIds[DISPLAY_DIMM_ID_MAX_SIZE] = {
#define ERROR_CHECKING_MIXED_SKU L"Error: Could not check if SKU is mixed."
#define WARNING_DIMMS_SKU_MIXED L"Warning: Mixed SKU detected. Driver functionalities limited.\n"
+/**
+ Compare DimmID field in DIMM_INFO Struct
+
+ @param[in] pFirst First item to compare
+ @param[in] pSecond Second item to compare
+
+ @retval -1 if first is less than second
+ @retval 0 if first is equal to second
+ @retval 1 if first is greater than second
+**/
+STATIC
+INT32
+CompareDimmIdInDimmInfo(
+ IN VOID *pFirst,
+ IN VOID *pSecond
+)
+{
+ DIMM_INFO *pDimmInfo = NULL;
+ DIMM_INFO *pDimmInfo2 = NULL;
+
+ if (pFirst == NULL || pSecond == NULL) {
+ NVDIMM_DBG("NULL pointer found.");
+ return 0;
+ }
+
+ pDimmInfo = (DIMM_INFO*)pFirst;
+ pDimmInfo2 = (DIMM_INFO*)pSecond;
+
+ if (pDimmInfo->DimmID < pDimmInfo2->DimmID) {
+ return -1;
+ }
+ else if (pDimmInfo->DimmID > pDimmInfo2->DimmID) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
/**
Retrieve a populated array and count of DIMMs in the system. The caller is
responsible for freeing the returned array
@@ -221,6 +260,12 @@ GetAllDimmList(
goto FinishError;
}
+ ReturnCode = BubbleSort((VOID*)*ppDimms, *pDimmCount, sizeof(**ppDimms), CompareDimmIdInDimmInfo);
+ if (EFI_ERROR(ReturnCode)) {
+ NVDIMM_DBG("Dimms list may not be sorted");
+ goto FinishError;
+ }
+
goto Finish;
FinishError:
|
Drop unnecessary NULL check
The variable `str` has been already dereferenced in the loop.
check_after_deref: Null-checking str suggests that it may be null, but it has already been dereferenced on all paths leading to the check.
Found by Coverity | @@ -1365,7 +1365,7 @@ parse_format (GLogItem * logitem, char *str, char *lfmt) {
return 0;
if (tilde && *p != '\0') {
- if ((str == NULL) || (*str == '\0'))
+ if (*str == '\0')
return 0;
if (special_specifier (logitem, &str, &p) == 1)
return 1;
@@ -1373,7 +1373,7 @@ parse_format (GLogItem * logitem, char *str, char *lfmt) {
}
/* %h */
else if (perc && *p != '\0') {
- if ((str == NULL) || (*str == '\0'))
+ if (*str == '\0')
return 0;
memset (end, 0, sizeof end);
|
Ignore Zero-Length Receive Events in Tests | @@ -1056,7 +1056,9 @@ DummyStreamCallback(
switch (Event->Type) {
case QUIC_STREAM_EVENT_RECEIVE:
- TEST_FAILURE("QUIC_STREAM_EVENT_RECEIVE should never be called!");
+ if (Event->RECEIVE.TotalBufferLength != 0) {
+ TEST_FAILURE("QUIC_STREAM_EVENT_RECEIVE with data should never be called!");
+ }
break;
case QUIC_STREAM_EVENT_SEND_COMPLETE:
@@ -1083,7 +1085,9 @@ ShutdownStreamCallback(
switch (Event->Type) {
case QUIC_STREAM_EVENT_RECEIVE:
- TEST_FAILURE("QUIC_STREAM_EVENT_RECEIVE should never be called!");
+ if (Event->RECEIVE.TotalBufferLength != 0) {
+ TEST_FAILURE("QUIC_STREAM_EVENT_RECEIVE with data should never be called!");
+ }
break;
case QUIC_STREAM_EVENT_SEND_COMPLETE:
|
Update: Readme link to demos | @@ -4,6 +4,7 @@ This is a completely new platform and not branched from the existing OpenNARS co
The ONA implementation has been developed with a pragmatic mindset. The focus on the design has been to implement the 'existing' theory [6, 7] as effectively as possible and make firm decisions rather than keep as many options open as possible. This has led to some small conceptual differences to OpenNARS [8] which was developed for research purposes.
+Video tutorials and demo videos can be found here: [Video tutorials](https://github.com/opennars/OpenNARS-for-Applications/wiki/Video-tutorials)
***How to clone and compile (tested with GCC and Clang for x64, x86 and ARM):***
|
VERSION bump version to 0.11.22 | @@ -32,7 +32,7 @@ set(CMAKE_C_FLAGS_DEBUG "-g -O0")
# set version
set(LIBNETCONF2_MAJOR_VERSION 0)
set(LIBNETCONF2_MINOR_VERSION 11)
-set(LIBNETCONF2_MICRO_VERSION 21)
+set(LIBNETCONF2_MICRO_VERSION 22)
set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION}.${LIBNETCONF2_MICRO_VERSION})
set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION})
|
Add :release to all objects; | @@ -23,6 +23,7 @@ static int luax_meta__gc(lua_State* L) {
destructorFn* destructor = (destructorFn*) lua_tocfunction(L, -1);
if (destructor) {
_lovrRelease(p->object, destructor);
+ p->object = NULL;
}
}
return 0;
@@ -70,6 +71,10 @@ void _luax_registertype(lua_State* L, const char* name, const luaL_Reg* function
luaL_register(L, NULL, functions);
}
+ // :release function
+ lua_pushcfunction(L, luax_meta__gc);
+ lua_setfield(L, -2, "release");
+
// Pop metatable
lua_pop(L, 1);
}
|
hv: lapic: fix a theoretic infinite loop when clear lapic isr
In theory, there would have 256 pending interrupts on lapic ISR.
Add this check to avoid theoretic infinite loop for clearing
lapic isr. Leave the potentially hardware issue to machine check. | @@ -45,20 +45,23 @@ static union lapic_base_msr saved_lapic_base_msr;
static void clear_lapic_isr(void)
{
- uint32_t isr_reg = MSR_IA32_EXT_APIC_ISR0;
+ uint32_t i;
+ uint32_t isr_reg;
/* This is a Intel recommended procedure and assures that the processor
* does not get hung up due to already set "in-service" interrupts left
* over from the boot loader environment. This actually occurs in real
* life, therefore we will ensure all the in-service bits are clear.
*/
- do {
+ for (isr_reg = MSR_IA32_EXT_APIC_ISR7; isr_reg >= MSR_IA32_EXT_APIC_ISR0; isr_reg--) {
+ for (i = 0U; i < 32U; i++) {
if (msr_read(isr_reg) != 0U) {
msr_write(MSR_IA32_EXT_APIC_EOI, 0U);
- continue;
+ } else {
+ break;
+ }
+ }
}
- isr_reg += 0x1U;
- } while (isr_reg <= MSR_IA32_EXT_APIC_ISR7);
}
void early_init_lapic(void)
@@ -98,8 +101,7 @@ void init_lapic(uint16_t pcpu_id)
/* Enable Local APIC */
/* TODO: add spurious-interrupt handler */
- msr_write(MSR_IA32_EXT_APIC_SIVR,
- LAPIC_SVR_APIC_ENABLE_MASK | LAPIC_SVR_VECTOR);
+ msr_write(MSR_IA32_EXT_APIC_SIVR, LAPIC_SVR_APIC_ENABLE_MASK | LAPIC_SVR_VECTOR);
/* Ensure there are no ISR bits set. */
clear_lapic_isr();
|
Updated values in tests/AbioticO2. | @@ -4,8 +4,8 @@ import astropy.units as u
@benchmark(
{
- "log.final.star.Luminosity": {"value": 7.362835e23, "unit": u.W},
- "log.final.star.LXUVStellar": {"value": 7.362835e20, "unit": u.W},
+ "log.final.star.Luminosity": {"value": 7.362896e+23, "unit": u.W},
+ "log.final.star.LXUVStellar": {"value": 7.362896e+20, "unit": u.W},
"log.final.star.Radius": {"value": 1.186502e08, "unit": u.m},
"log.final.star.Temperature": {"value": 2926.556751, "unit": u.Kelvin},
"log.final.star.RadGyra": {"value": 0.466090},
@@ -13,9 +13,9 @@ import astropy.units as u
"log.final.b.OxygenMass": {"value": 209.805999, "unit": u.bar, "rtol": 1e-4},
"log.final.e.SurfWaterMass": {"value": 7.403340, "unit": u.TO},
"log.final.e.OxygenMass": {"value": 418.194489, "unit": u.bar},
- "log.final.e.FXUV": {"value": 3.053257, "unit": u.W / u.m ** 2},
+ "log.final.e.FXUV": {"value": 3.053282, "unit": u.W / u.m ** 2},
"log.final.e.AtmXAbsEffH2O": {"value": 0.051776},
- "log.final.e.Instellation": {"value": 3053.257033, "unit": u.kg / u.sec ** 3},
+ "log.final.e.Instellation": {"value": 3053.282097, "unit": u.kg / u.sec ** 3},
}
)
class TestAbioticO2(Benchmark):
|
zephyr: lis2dw12: Add register to emulator
Update the emulator to handle the CTRL1 and add an assert message when
attempting to access unsupported registers
BRANCH=None
TEST=zmake -D configure --test test-drivers | @@ -29,6 +29,8 @@ struct lis2dw12_emul_data {
struct i2c_common_emul_data common;
/** Emulated who-am-i register */
uint8_t who_am_i_reg;
+ /** Emulated ctrl1 register */
+ uint8_t ctrl1_reg;
/** Emulated ctrl2 register */
uint8_t ctrl2_reg;
/** Soft reset count */
@@ -87,11 +89,16 @@ static int lis2dw12_emul_read_byte(struct i2c_emul *emul, int reg, uint8_t *val,
__ASSERT_NO_MSG(bytes == 0);
*val = data->who_am_i_reg;
break;
+ case LIS2DW12_CTRL1_ADDR:
+ __ASSERT_NO_MSG(bytes == 0);
+ *val = data->ctrl1_reg;
+ break;
case LIS2DW12_CTRL2_ADDR:
__ASSERT_NO_MSG(bytes == 0);
*val = data->ctrl2_reg;
break;
default:
+ __ASSERT(false, "No read handler for register 0x%02x", reg);
return -EINVAL;
}
return 0;
@@ -106,6 +113,9 @@ static int lis2dw12_emul_write_byte(struct i2c_emul *emul, int reg, uint8_t val,
case LIS2DW12_WHO_AM_I_REG:
LOG_ERR("Can't write to who-am-i register");
return -EINVAL;
+ case LIS2DW12_CTRL1_ADDR:
+ data->ctrl1_reg = val;
+ break;
case LIS2DW12_CTRL2_ADDR:
__ASSERT_NO_MSG(bytes == 1);
if ((val & LIS2DW12_SOFT_RESET_MASK) != 0) {
@@ -115,6 +125,7 @@ static int lis2dw12_emul_write_byte(struct i2c_emul *emul, int reg, uint8_t val,
data->ctrl2_reg = val & ~LIS2DW12_SOFT_RESET_MASK;
break;
default:
+ __ASSERT(false, "No write handler for register 0x%02x", reg);
return -EINVAL;
}
return 0;
|
build: create _objs target for each library
Type: improvement | @@ -19,7 +19,13 @@ macro(add_vpp_library lib)
${ARGN}
)
- add_library(${lib} SHARED ${ARG_SOURCES})
+ set (lo ${lib}_objs)
+ add_library(${lo} OBJECT ${ARG_SOURCES})
+ set_target_properties(${lo} PROPERTIES POSITION_INDEPENDENT_CODE ON)
+
+ add_library(${lib} SHARED)
+ target_sources(${lib} PRIVATE $<TARGET_OBJECTS:${lo}>)
+
if(VPP_LIB_VERSION)
set_target_properties(${lib} PROPERTIES SOVERSION ${VPP_LIB_VERSION})
endif()
@@ -39,6 +45,7 @@ macro(add_vpp_library lib)
)
if (ARG_LTO AND VPP_USE_LTO)
+ set_property(TARGET ${lo} PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE)
set_property(TARGET ${lib} PROPERTY INTERPROCEDURAL_OPTIMIZATION TRUE)
target_compile_options (${lib} PRIVATE "-ffunction-sections")
target_compile_options (${lib} PRIVATE "-fdata-sections")
@@ -66,11 +73,11 @@ macro(add_vpp_library lib)
endif()
if(NOT VPP_EXTERNAL_PROJECT)
- add_dependencies(${lib} api_headers)
+ add_dependencies(${lo} api_headers)
endif()
if(ARG_DEPENDS)
- add_dependencies(${lib} ${ARG_DEPENDS})
+ add_dependencies(${lo} ${ARG_DEPENDS})
endif()
# install headers
|
Make test_nsalloc_realloc_attributes() robust vs allocation changes | @@ -10804,7 +10804,9 @@ START_TEST(test_nsalloc_realloc_attributes)
if (_XML_Parse_SINGLE_BYTES(parser, text, strlen(text),
XML_TRUE) != XML_STATUS_ERROR)
break;
- XML_ParserReset(parser, NULL);
+ /* See comment in test_nsalloc_xmlns() */
+ nsalloc_teardown();
+ nsalloc_setup();
}
if (i == 0)
fail("Parsing worked despite failing reallocations");
|
storage: added extra argument to flb_sched_timer_cb_create call | @@ -267,7 +267,7 @@ struct flb_storage_metrics *flb_storage_metrics_create(struct flb_config *ctx)
ret = flb_sched_timer_cb_create(ctx->sched, FLB_SCHED_TIMER_CB_PERM, 5000,
cb_storage_metrics_collect,
- ctx->storage_metrics_ctx);
+ ctx->storage_metrics_ctx, NULL);
if (ret == -1) {
flb_error("[storage metrics] cannot create timer to collect metrics");
flb_free(sm);
|
zephyr: docs: Update the doc template
Update the document template to reflect current best practices.
Specifically, move all related Kconfigs into a separate file and link to
the file only
BRANCH=none
TEST=View preview in gitiles | ## Kconfig Options
-*List the Kconfig options that enable the feature and list any sub-configuration
-options that control the behavior of the feature.*
-
-Kconfig Option | Default | Documentation
-:------------------------------------- | :-----: | :------------
-`CONFIG_PLATFORM_EC_<option>` | y/n | [zephyr/Kconfig](../zephyr/Kconfig)
-
-Kconfig sub-option | Default | Documentation
-:------------------------------------- | :-----: | :------------
-`CONFIG_PLATFORM_EC_<option>` | y/n | [zephyr/Kconfig](../zephyr/Kconfig)
+*Link to the file providing all the Kconfig options related to the feature. If
+the Kconfig options are not currently in a standalone file, consider moving the
+related Kconfigs into their own file.*
+*Example CL moving I2C related configs into a new file: https://crrev.com/c/3575081*
*Note - Avoid documenting `CONFIG_` options in the markdown as the relevant
-`Kconfig*` contains the authoritative definition. Link directly to the Kconfig
-option in source like this: [I2C Passthru Restricted].*
+`Kconfig*` contains the authoritative definition. If there is one main Kconfig
+that must be enabled for the feature, mention it in this section. See the [I2C
+documentation](zephyr_i2c.md#kconfig-options) for an example.*
+
+*If the `Kconfig` file does not provide sufficient help descriptions, please fix
+them.*
## Devicetree Nodes
@@ -44,6 +42,9 @@ compile and run. For many features, this can section can be empty.*
*Provide any tips for testing and debugging the EC feature.*
+*It's especially helpful to document EC console commands and `ectool` commands
+from the AP in this section. Please provide example output.*
+
## Example
*Provide code snippets from a working board to walk the user through
|
A hacky fix for missed elements when renderer/layer caches are out of sync | @@ -360,8 +360,12 @@ namespace carto {
std::lock_guard<std::recursive_mutex> lock(_mutex);
+ long long tileId = getTileId(MapTile(vtTileId.x, vtTileId.y, vtTileId.zoom, _frameNr));
TileInfo tileInfo;
- _visibleCache.peek(getTileId(MapTile(vtTileId.x, vtTileId.y, vtTileId.zoom, _frameNr)), tileInfo);
+ _visibleCache.peek(tileId, tileInfo);
+ if (!tileInfo.getTileMap()) {
+ _preloadingCache.peek(tileId, tileInfo);
+ }
if (std::shared_ptr<BinaryData> tileData = tileInfo.getTileData()) {
if (std::shared_ptr<VectorTileFeature> tileFeature = _tileDecoder->decodeFeature(id, vtTileId, tileData, tileInfo.getTileBounds())) {
std::shared_ptr<Layer> thisLayer = std::const_pointer_cast<Layer>(shared_from_this());
|
Add option to run clang-tidy and cppcheck | @@ -109,6 +109,7 @@ option(QUIC_UWP_BUILD "Build for UWP" OFF)
option(QUIC_PGO "Enables profile guided optimizations" OFF)
option(QUIC_SOURCE_LINK "Enables source linking on MSVC" ON)
option(QUIC_PDBALTPATH "Enable PDBALTPATH setting on MSVC" ON)
+option(QUIC_CODE_CHECK "Run static code checkers" OFF)
# FindLTTngUST does not exist before CMake 3.6, so disable logging for older cmake versions
if (${CMAKE_VERSION} VERSION_LESS "3.6.0")
@@ -378,6 +379,35 @@ if(QUIC_TLS STREQUAL "mitls")
add_subdirectory(submodules/everest/msquic/msvc/quiccrypto)
endif()
+if(QUIC_CODE_CHECK)
+ find_program(CLANGTIDY NAMES clang-tidy)
+ if(CLANGTIDY)
+ message(STATUS "Found clang-tidy: ${CLANGTIDY}")
+ set(CLANG_TIDY_CHECKS *
+ # add checks to ignore here, e.g.,
+ # -hicpp-no-assembler
+ # -hicpp-signed-bitwise
+ # ...
+ )
+ string(REPLACE ";" "," CLANG_TIDY_CHECKS "${CLANG_TIDY_CHECKS}")
+ set(CMAKE_C_CLANG_TIDY ${CLANGTIDY} -checks=${CLANG_TIDY_CHECKS}
+ -system-headers)
+ set(CMAKE_CXX_CLANG_TIDY ${CMAKE_C_CLANG_TIDY})
+ else()
+ message(STATUS "clang-tidy not found")
+ endif()
+
+ find_program(CPPCHECK NAMES cppcheck)
+ if(CPPCHECK)
+ message(STATUS "Found cppcheck: ${CPPCHECK}")
+ set(CMAKE_C_CPPCHECK ${CPPCHECK} -q --inline-suppr
+ --enable=warning,style,performance,portability -D__linux__)
+ set(CMAKE_CXX_CPPCHECK ${CMAKE_C_CPPCHECK})
+ else()
+ message(STATUS "cppcheck not found")
+ endif()
+endif()
+
add_subdirectory(src/inc)
# Product code
|
enable jobcomp_elasticsearch plugin
The plugin is built automatically if curl-config is found. | @@ -228,6 +228,9 @@ Requires(pre): shadow-utils
%endif
%endif
+#needed to enable jobcomp_elasticsearch plugin
+BuildRequires: curl-devel
+
%description
Slurm is an open source, fault-tolerant, and highly
scalable cluster management and job scheduling system for Linux clusters.
|
Improve encoders api docs | @@ -16,9 +16,15 @@ extern "C" {
#endif
/*
- * ============================================================================
- * Primitives encoding
- * ============================================================================
+ * All cbor_encode_* methods take 2 or 3 arguments:
+ * - a logical `value` to encode (except for trivial items such as NULLs)
+ * - an output `buffer` pointer
+ * - a `buffer_size` specification
+ *
+ * They serialize the `value` into one or more bytes and write the bytes to the
+ * output `buffer` and return either the number of bytes written, or 0 if the
+ * `buffer_size` was too small to small to fit the serialized value (in which
+ * case it is not modified).
*/
CBOR_EXPORT size_t cbor_encode_uint8(uint8_t, unsigned char *, size_t);
@@ -86,11 +92,6 @@ CBOR_EXPORT size_t cbor_encode_undef(unsigned char *, size_t);
* lost.
* - In all other cases, the sign bit, the exponent, and 10 most significant
* bits of the significand are kept
- *
- * @param value
- * @param buffer Target buffer
- * @param buffer_size Available space in the buffer
- * @return number of bytes written
*/
CBOR_EXPORT size_t cbor_encode_half(float, unsigned char *, size_t);
|
do not respond to WILDCARD SSID request | @@ -2051,9 +2051,8 @@ static inline void getnewmac(uint8_t *newapmac, uint8_t *essidtagptr)
int c;
static macessidlist_t *zeiger;
static ietag_t *essidtag;
-essidtag = (ietag_t*)essidtagptr;
-
+essidtag = (ietag_t*)essidtagptr;
zeiger = myproberesponselist;
for(c = 0; c < MYPROBERESPONSELIST_MAX -1; c++)
{
@@ -2111,6 +2110,10 @@ if(essidtagptr == NULL)
}
essidtag = (ietag_t*)essidtagptr;
+if((essidtag->len == 0) || (essidtag->data[0] == 0))
+ {
+ return;
+ }
if(attackclientflag == false)
{
if(memcmp(macfrx->addr1, &mac_broadcast, 6) != 0)
@@ -2123,10 +2126,6 @@ if(attackclientflag == false)
send_proberesponse(sendmac, essidtagptr);
}
}
-if((essidtag->len == 0) || (essidtag->data[0] == 0))
- {
- return;
- }
zeiger = proberequestlist;
for(c = 0; c < PROBEREQUESTLIST_MAX -1; c++)
{
|
Fix a possible recursion in SSLfatal handling
Fixes: (hopefully) | @@ -118,11 +118,12 @@ void ossl_statem_set_renegotiate(SSL *s)
void ossl_statem_fatal(SSL *s, int al, int func, int reason, const char *file,
int line)
{
+ ERR_put_error(ERR_LIB_SSL, func, reason, file, line);
/* We shouldn't call SSLfatal() twice. Once is enough */
- assert(s->statem.state != MSG_FLOW_ERROR);
+ if (s->statem.in_init && s->statem.state == MSG_FLOW_ERROR)
+ return;
s->statem.in_init = 1;
s->statem.state = MSG_FLOW_ERROR;
- ERR_put_error(ERR_LIB_SSL, func, reason, file, line);
if (al != SSL_AD_NO_ALERT
&& s->statem.enc_write_state != ENC_WRITE_STATE_INVALID)
ssl3_send_alert(s, SSL3_AL_FATAL, al);
|
[trace] Only extract benchmark sections | @@ -489,8 +489,12 @@ def main():
'cfg_buf': deque(),
'curr_cfg': None
}
- perf_metrics = [defaultdict(int)] # all values initially 0, also 'start' time of measurement 0
- perf_metrics[0]['start'] = None
+ perf_metrics_bench = [defaultdict(int)] # all values initially 0, also 'start' time of measurement 0
+ perf_metrics_setup = [defaultdict(int)] # all values initially 0, also 'start' time of measurement 0
+ perf_metrics_bench[0]['start'] = None
+ perf_metrics_setup[0]['start'] = None
+ # Initial code belongs to setup phase
+ perf_metrics = perf_metrics_setup
# Parse input line by line
for line in line_iter:
if line:
@@ -498,16 +502,27 @@ def main():
False, time_info, args.offl, not args.saddr, args.permissive)
if perf_metrics[0]['start'] is None:
perf_metrics[0]['start'] = time_info[1]
- # Create a new section after every 'nop' instruction
+ # Start a new benchmark section after 'csrw cycle' instruction
if 'cycle' in ann_insn:
perf_metrics[-1]['end'] = time_info[1]
perf_metrics.append(defaultdict(int))
+ if 'csrw' in ann_insn:
+ # Start of a benchmark section
+ perf_metrics = perf_metrics_bench
+ else:
+ # End of a benchmark section
+ perf_metrics = perf_metrics_setup
perf_metrics[-1]['start'] = time_info[1]
if not empty:
print(ann_insn)
else:
break # Nothing more in pipe, EOF
perf_metrics[-1]['end'] = time_info[1]
+ # Evaluate only the benchmarks
+ perf_metrics = perf_metrics_bench
+ # Remove last emtpy entry
+ if not bool(perf_metrics[-1]):
+ perf_metrics.pop()
# Compute metrics
eval_perf_metrics(perf_metrics)
# Write metrics to CSV
|
Improve use of `@` in match. | ~(do (def ,pattern ,expr) ,(onmatch))))
(and (tuple? pattern) (= :parens (tuple/type pattern)))
- (if (and (= (pattern 0) '@) (symbol? (pattern 1)))
+ (if (= (get pattern 0) '@)
# Unification with external values
- ~(if (= ,(pattern 1) ,expr) ,(onmatch) ,sentinel)
+ ~(if (= ,(get pattern 1) ,expr) ,(onmatch) ,sentinel)
(match-1
(in pattern 0) expr
(fn []
|
Error on illegal port values in WKS
Thanks Ray Bellis | @@ -615,7 +615,7 @@ ldns_str2rdf_b32_ext(ldns_rdf **rd, const char *str)
int i;
/* first byte contains length of actual b32 data */
size_t slen = strlen(str);
- uint32_t len = ldns_b32_pton_calculate_size(slen);
+ size_t len = ldns_b32_pton_calculate_size(slen);
if (len > 255) {
return LDNS_STATUS_INVALID_B32_EXT;
}
@@ -1148,7 +1148,15 @@ ldns_str2rdf_wks(ldns_rdf **rd, const char *str)
if (serv) {
serv_port = (int) ntohs((uint16_t) serv->s_port);
} else {
- serv_port = (uint16_t) atoi(token);
+ serv_port = atoi(token);
+ }
+ if (sev_port < 0 || sev_port > 65535) {
+ LDNS_FREE(bitmap);
+ LDNS_FREE(token);
+ ldns_buffer_free(str_buf);
+ free(proto_str);
+ free(lc_proto_str);
+ return LDNS_STATUS_INVALID_STR;
}
if (serv_port / 8 >= bm_len) {
uint8_t *b2 = LDNS_XREALLOC(bitmap, uint8_t, (serv_port / 8) + 1);
|
Fix missing moc json generation | @@ -80,7 +80,7 @@ rule("qt.moc")
end
local user_flags = target:get("qt.moc.flags") or {}
batchcmds:mkdir(path.directory(sourcefile_moc))
- batchcmds:vrunv(moc, table.join(flags, path(sourcefile), "-o", path(sourcefile_moc)))
+ batchcmds:vrunv(moc, table.join(user_flags, flags, path(sourcefile), "-o", path(sourcefile_moc)))
-- we need compile this moc_xxx.cpp file if exists Q_PRIVATE_SLOT, @see https://github.com/xmake-io/xmake/issues/750
local mocdata = io.readfile(sourcefile)
|
platforms/nicole: Fixup the system VPD EEPROM size
Hostboot doesn't export the correct description for EEPROMs,
as a result, all EEPROMs in the system work in "atmel,24c128"
compatibility mode (16KiB).
Nicole platform has 32KiB EEPROM for the system VPD. | @@ -34,6 +34,26 @@ static const struct slot_table_entry nicole_phb_table[] = {
{ .etype = st_end },
};
+/* Fixup the system VPD EEPROM size.
+ *
+ * Hostboot doesn't export the correct description for EEPROMs, as a result,
+ * all EEPROMs in the system work in "atmel,24c128" compatibility mode (16KiB).
+ * Nicole platform has 32KiB EEPROM for the system VPD.
+ */
+static void vpd_dt_fixup(void)
+{
+ struct dt_node* vpd_eeprom = dt_find_by_path(dt_root,
+ "/xscom@603fc00000000/i2cm@a2000/i2c-bus@0/eeprom@50");
+
+ if (vpd_eeprom) {
+ dt_check_del_prop(vpd_eeprom, "compatible");
+ dt_add_property_string(vpd_eeprom, "compatible", "atmel,24c256");
+
+ dt_check_del_prop(vpd_eeprom, "label");
+ dt_add_property_string(vpd_eeprom, "label", "system-vpd");
+ }
+}
+
static bool nicole_probe(void)
{
if (!dt_node_is_compatible(dt_root, "YADRO,nicole"))
@@ -45,6 +65,9 @@ static bool nicole_probe(void)
/* Setup UART for use by OPAL (Linux hvc) */
uart_set_console_policy(UART_CONSOLE_OPAL);
+ /* Fixup system VPD EEPROM size */
+ vpd_dt_fixup();
+
slot_table_init(nicole_phb_table);
return true;
|
is31fl3743b: Set and clear config register bits correctly
This patch makes is31fl3743b_enable set and clear the bits of the config
register correctly.
BRANCH=None
TEST=None | @@ -91,8 +91,10 @@ static int is31fl3743b_enable(struct rgbkbd *ctx, bool enable)
return rv;
}
- return is31fl3743b_write(ctx, IS31FL3743B_REG_CONFIG,
- u8 | BIT(3) | (enable ? BIT(0) : 0));
+ WRITE_BIT(u8, 3, 1);
+ WRITE_BIT(u8, 0, enable);
+
+ return is31fl3743b_write(ctx, IS31FL3743B_REG_CONFIG, u8);
}
static int is31fl3743b_set_color(struct rgbkbd *ctx, uint8_t offset,
|
test-suite: add logic to skip local ipmitool tests on sms if aarch64 | @@ -23,6 +23,7 @@ fi
}
@test "[OOB] ipmitool local bmc ping" {
+ [[ "$ARCH" == "aarch64" ]] && skip "Skipping local bmc ping for ARCH=$ARCH"
# Check 4 channels of ipmi lan for IP address
@@ -44,6 +45,7 @@ fi
}
@test "[OOB] ipmitool power status" {
+ [[ "$ARCH" == "aarch64" ]] && skip "Skipping local power status for ARCH=$ARCH"
if [ -z "$IPMI_PASSWORD" ];then
flunk "IPMI_PASSWORD is not set"
@@ -70,6 +72,7 @@ fi
}
@test "[OOB] ipmitool read CPU1 sensor data" {
+ [[ "$ARCH" == "aarch64" ]] && skip "Skipping read CPU1 data for ARCH=$ARCH"
if [ -z "$IPMI_PASSWORD" ];then
flunk "IPMI_PASSWORD is not set"
|
Fix servive properties propsheet flags | @@ -108,12 +108,15 @@ VOID PhShowServiceProperties(
SERVICE_PROPERTIES_CONTEXT context;
propSheetHeader.dwFlags =
+ PSH_MODELESS |
PSH_NOAPPLYNOW |
PSH_NOCONTEXTHELP |
- PSH_PROPTITLE;
+ PSH_PROPTITLE |
+ //PSH_USECALLBACK |
+ PSH_USEHICON;
propSheetHeader.hInstance = PhInstanceHandle;
propSheetHeader.hwndParent = ParentWindowHandle;
- propSheetHeader.pszCaption = ServiceItem->Name->Buffer;
+ propSheetHeader.pszCaption = PhGetString(ServiceItem->Name);
propSheetHeader.nPages = 0;
propSheetHeader.nStartPage = 0;
propSheetHeader.phpage = pages;
|
Use an executable example for net.socket:on() | @@ -310,11 +310,11 @@ Otherwise, all connection errors (with normal close) passed to disconnection eve
```lua
srv = net.createConnection(net.TCP, 0)
srv:on("receive", function(sck, c) print(c) end)
-srv:on("connection", function(sck)
+srv:on("connection", function(sck, c)
-- Wait for connection before sending.
- sck:send("GET / HTTP/1.1\r\nHost: 192.168.0.66\r\nConnection: keep-alive\r\nAccept: */*\r\n\r\n")
+ sck:send("GET /get HTTP/1.1\r\nHost: httpbin.org\r\nConnection: keep-alive\r\nAccept: */*\r\n\r\n")
end)
-srv:connect(80,"192.168.0.66")
+srv:connect(80,"httpbin.org")
```
#### See also
|
Use debug print in assertion failure
So the failing domain name is also printed. | @@ -36,16 +36,17 @@ __FBSDID("$FreeBSD$");
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
+#include <barrelfish/debug.h>
void
__assert(const char *func, const char *file, int line, const char *failedexpr)
{
if (func == NULL)
- (void)fprintf(stderr,
+ (void)debug_printf(
"Assertion failed: (%s), file %s, line %d.\n", failedexpr,
file, line);
else
- (void)fprintf(stderr,
+ (void)debug_printf(
"Assertion failed: (%s), function %s, file %s, line %d.\n",
failedexpr, func, file, line);
abort();
|
Stuffed neko path | @@ -23,7 +23,7 @@ before_install:
- sudo cp /usr/local/lib/neko/neko-2.3.0-osx64/neko /usr/local/lib/haxe
- sudo ln -s /usr/local/lib/neko/neko-2.3.0-osx64/libneko.2.3.0.dylib /usr/local/lib/libneko.2.dylib
- sudo ln -s /usr/local/lib/neko/neko-2.3.0-osx64/libneko.2.3.0.dylib /usr/local/lib/libneko.dylib
-- sudo ln -s /usr/local/lib/neko/neko /usr/local/bin/neko
+- sudo ln -s /usr/local/lib/neko/neko-2.3.0-osx64/neko /usr/local/bin/neko
- sudo ln -s /usr/local/lib/neko/libneko.2.dylib /usr/local/lib/neko/libneko.2.dylib
- sudo ln -s /usr/local/lib/neko/libneko.dylib /usr/local/lib/neko/libneko.dylib
- sudo ln -s /usr/local/lib/neko/libneko.2.dylib /usr/local/bin/libneko.2.dylib
@@ -35,7 +35,6 @@ before_install:
- find /usr/local -name libneko.2.dylib
- sudo ln -s /usr/local/lib/haxe/haxe /usr/local/bin/haxe
- sudo ln -s /usr/local/lib/haxe/haxelib /usr/local/bin/haxelib
-- sudo ln -s /usr/local/lib/neko/neko /usr/local/bin/neko
- mkdir ~/haxelib
- export DYLD_PRINT_RPATHS=1
- nm /usr/local/lib/haxe/haxelib
|
HardwareDevices: Fix options crash | @@ -185,7 +185,7 @@ VOID AddNetworkAdapterToListView(
BOOLEAN found = FALSE;
PDV_NETADAPTER_ID newId = NULL;
- InitializeNetAdapterId(&adapterId, IfIndex, Luid, NULL);
+ InitializeNetAdapterId(&adapterId, IfIndex, Luid, Guid);
for (ULONG i = 0; i < NetworkAdaptersList->Count; i++)
{
|
Reenable GCC unexpected __attribute__ warning | @@ -93,8 +93,6 @@ DEFINES += GW_MIN_DERFUSB23E0X_FW_VERSION=0x22030300
DEFINES += GW_DEFAULT_NAME=\\\"Phoscon-GW\\\"
-QMAKE_CXXFLAGS += -Wno-attributes
-
HEADERS = bindings.h \
connectivity.h \
colorspace.h \
|
VERSION bump to version 1.3.1 | @@ -27,7 +27,7 @@ endif()
# micro version is changed with a set of small changes or bugfixes anywhere in the project.
set(SYSREPO_MAJOR_VERSION 1)
set(SYSREPO_MINOR_VERSION 3)
-set(SYSREPO_MICRO_VERSION 0)
+set(SYSREPO_MICRO_VERSION 1)
set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION})
# Version of the library
|
parse_browser(): avoid passing NULL to strpbrk() | @@ -508,7 +508,7 @@ parse_browser (char *match, char *type, int i, char ***hash) {
match = char_replace (match, ' ', '/');
}
/* all others */
- else if ((ptr = strpbrk (b, ";) ")) != NULL) {
+ else if ((ptr = strpbrk (b ? b : match, ";) ")) != NULL) {
*ptr = '\0';
}
|
gitlab-ci: comment out LTO Integration Test
As it takes too long for now | @@ -235,13 +235,13 @@ Integration_Test_UBSan:
dependencies:
- Build_UBSan
-Integration_Test_LTO:
- image: debian:stable
- stage: test2
- script:
- - CFLAGS="-flto -O2 -g" make test
- needs: [Build_LTO]
- dependencies:
- - Build_LTO
+#Integration_Test_LTO:
+# image: debian:stable
+# stage: test2
+# script:
+# - CFLAGS="-flto -O2 -g" make test
+# needs: [Build_LTO]
+# dependencies:
+# - Build_LTO
|
OcXmlLib: Fix previous commit | @@ -1153,30 +1153,33 @@ XmlDocumentExport (
)
{
CHAR8 *Buffer;
- CHAR8 *BufferXmlContent;
+ CHAR8 *NewBuffer;
UINT32 AllocSize;
UINT32 CurrentSize;
+ UINT32 NewSize;
AllocSize = Document->Buffer.Length + 1;
- if (PrependPlistInfo && OcOverflowAddU32 (AllocSize, L_STR_SIZE_NT (XML_PLIST_HEADER), &AllocSize)) {
- return NULL;
- }
Buffer = AllocatePool (AllocSize);
if (Buffer == NULL) {
XML_USAGE_ERROR ("XmlDocumentExport::failed to allocate");
return NULL;
}
- BufferXmlContent = PrependPlistInfo ? &Buffer[L_STR_LEN (XML_PLIST_HEADER)] : Buffer;
CurrentSize = 0;
- XmlNodeExportRecursive (Document->Root, &BufferXmlContent, &AllocSize, &CurrentSize, Skip);
+ XmlNodeExportRecursive (Document->Root, &Buffer, &AllocSize, &CurrentSize, Skip);
if (PrependPlistInfo) {
- if (OcOverflowAddU32 (CurrentSize, L_STR_SIZE_NT (XML_PLIST_HEADER), &CurrentSize)) {
+ if (OcOverflowTriAddU32 (CurrentSize, L_STR_SIZE_NT (XML_PLIST_HEADER), 1, &NewSize)) {
FreePool (Buffer);
return NULL;
}
- CopyMem (Buffer, XML_PLIST_HEADER, L_STR_SIZE_NT (XML_PLIST_HEADER));
+ NewBuffer = AllocatePool (NewSize);
+ CopyMem (&NewBuffer[L_STR_LEN (XML_PLIST_HEADER)], Buffer, CurrentSize);
+ CopyMem (NewBuffer, XML_PLIST_HEADER, L_STR_SIZE_NT (XML_PLIST_HEADER));
+ FreePool (Buffer);
+
+ CurrentSize = NewSize - 1;
+ Buffer = NewBuffer;
}
if (Length != NULL) {
|
NN: remove convert/scaling code from network test. | @@ -433,22 +433,8 @@ int nn_dry_run_network(nn_t *net, image_t *img, int8_t *output_data)
switch (layer->type) {
case LAYER_TYPE_DATA: {
data_layer_t *data_layer = (data_layer_t *) layer;
- input_data = fb_alloc(data_layer->c * data_layer->h * data_layer->w);
- // Scale, convert, remove mean image and load input data.
- int x_ratio = (int)((img->w<<16)/layer->w)+1;
- int y_ratio = (int)((img->h<<16)/layer->h)+1;
- for (int y=0, i=0; y<layer->h; y++) {
- int sy = (y*y_ratio)>>16;
- for (int x=0; x<layer->w; x++, i+=3) {
- int sx = (x*x_ratio)>>16;
- uint16_t p = IM_GET_RGB565_PIXEL(img, sx, sy);
- input_data[i+0] = (int8_t) (((int) COLOR_RGB565_TO_R8(p)) - (int) data_layer->r_mean);
- input_data[i+1] = (int8_t) (((int) COLOR_RGB565_TO_G8(p)) - (int) data_layer->g_mean);
- input_data[i+2] = (int8_t) (((int) COLOR_RGB565_TO_B8(p)) - (int) data_layer->b_mean);
- }
- }
// Set image data as input buffer for the next layer.
- input_buffer = input_data;
+ input_buffer = input_data = fb_alloc(data_layer->c * data_layer->h * data_layer->w);
output_buffer = buffer1;
break;
}
|
Host: bettor library loading | @@ -110,6 +110,7 @@ bool PluginHost::load(const QString &path, int pluginIndex) {
unload();
library_.setFileName(path);
+ library_.setLoadHints(QLibrary::ResolveAllSymbolsHint | QLibrary::DeepBindHint);
if (!library_.load()) {
QString err = library_.errorString();
qWarning() << "failed to load " << path << ": " << err;
|
Fix some typos and make load/unload USB functionality conditionally compiled
for Linux. | // Local functions...
//
+#ifdef __linux
static bool load_usb_printer(pappl_printer_t *printer);
static void unload_usb_printer(void);
+#endif // __linux
//
@@ -44,6 +46,9 @@ _papplPrinterRunUSB(
ssize_t bytes; // Bytes in buffer
+ if (!load_usb_printer(printer))
+ return (NULL);
+
if ((data.fd = open("/dev/g_printer", O_RDWR | O_EXCL)) < 0)
{
papplLogPrinter(printer, PAPPL_LOGLEVEL_ERROR, "Unable to open USB printer gadget: %s", strerror(errno));
@@ -85,6 +90,8 @@ _papplPrinterRunUSB(
papplPrinterCloseDevice(printer);
}
+ unload_usb_printer();
+
#else
(void)printer;
#endif // __linux
@@ -111,6 +118,7 @@ papplPrinterSetUSB(
}
+#ifdef __linux
//
// 'load_usb_printer()' - Load the USB printer gadget module.
//
@@ -119,7 +127,6 @@ static bool // O - `true` on success, `false` otherwise
load_usb_printer(
pappl_printer_t *printer) // I - Printer
{
-#ifdef __linux
struct utsname info; // System information
char filename[1024], // Module file name
params[2048]; // Module parameters
@@ -190,10 +197,6 @@ load_usb_printer(
close(fd);
return (true);
-
-#else // !__linux
- return (false);
-#endif // __linux
}
@@ -204,7 +207,7 @@ load_usb_printer(
static void
unload_usb_printer(void)
{
-#ifdef __linux
syscall(__NR_delete_module, "g_printer", O_NONBLOCK);
-#endif // __linux
}
+#endif // __linux
+
|
make tmpname static so that the storage doesn't go out of scope when
LLVMFuzzerTestOneInput returns. | @@ -327,7 +327,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size)
*/
if (!init_done) {
const char *client_timeout_ms_str;
- char tmpname[] = "/tmp/h2o-fuzz-XXXXXX";
+ static char tmpname[] = "/tmp/h2o-fuzz-XXXXXX";
char *dirname;
h2o_url_t upstream;
signal(SIGPIPE, SIG_IGN);
|
RGBKBD: Print error code in set_color_single
This patch makes set_color_single print error code returned from a
driver.
BRANCH=None
TEST=Taniks | @@ -38,16 +38,17 @@ uint8_t rgbkbd_table[EC_RGBKBD_MAX_KEY_COUNT];
static int set_color_single(struct rgb_s color, int x, int y)
{
struct rgbkbd *ctx = &rgbkbds[0];
- uint8_t gid;
+ uint8_t grid;
uint8_t col = 0;
uint8_t offset;
+ int rv;
if (rgbkbd_hsize <= x || rgbkbd_vsize <= y) {
return EC_ERROR_OVERFLOW;
}
/* Search the grid where x belongs to. */
- for (gid = 0; gid < rgbkbd_count; gid++, ctx++) {
+ for (grid = 0; grid < rgbkbd_count; grid++, ctx++) {
if (x < col + ctx->cfg->col_len)
break;
col += ctx->cfg->col_len;
@@ -56,9 +57,13 @@ static int set_color_single(struct rgb_s color, int x, int y)
offset = ctx->cfg->row_len * (x - col) + y;
ctx->buf[offset] = color;
- CPRINTS("Set [%d,%d] to color=[%d,%d,%d] (gid=%u offset=%u)",
- x, y, color.r, color.g, color.b, gid, offset);
- return ctx->cfg->drv->set_color(ctx, offset, &ctx->buf[offset], 1);
+ rv = ctx->cfg->drv->set_color(ctx, offset, &ctx->buf[offset], 1);
+
+ CPRINTS("%set (%d,%d) to color=(%d,%d,%d) grid=%u offset=%u (%d)",
+ rv ? "Failed to s" : "S",
+ x, y, color.r, color.g, color.b, grid, offset, rv);
+
+ return rv;
}
test_export_static uint8_t get_grid_size(const struct rgbkbd *ctx)
|
More flexible shorthands for texture and material args; | @@ -706,7 +706,14 @@ int l_lovrGraphicsNewMaterial(lua_State* L) {
int index = 1;
- if (lua_isuserdata(L, index)) {
+ if (lua_type(L, index) == LUA_TSTRING) {
+ Blob* blob = luax_readblob(L, index++, "Texture");
+ TextureData* textureData = lovrTextureDataFromBlob(blob);
+ lovrRelease(&blob->ref);
+ Texture* texture = lovrTextureCreate(TEXTURE_2D, &textureData, 1, true);
+ lovrMaterialSetTexture(material, TEXTURE_DIFFUSE, texture);
+ lovrRelease(&texture->ref);
+ } else if (lua_isuserdata(L, index)) {
Texture* texture = luax_checktype(L, index++, Texture);
lovrMaterialSetTexture(material, TEXTURE_DIFFUSE, texture);
}
@@ -803,6 +810,24 @@ int l_lovrGraphicsNewModel(lua_State* L) {
Blob* blob = luax_readblob(L, 1, "Model");
ModelData* modelData = lovrModelDataCreate(blob);
Model* model = lovrModelCreate(modelData);
+
+ if (lua_gettop(L) >= 2) {
+ if (lua_type(L, 2) == LUA_TSTRING) {
+ Blob* blob = luax_readblob(L, 2, "Texture");
+ TextureData* textureData = lovrTextureDataFromBlob(blob);
+ Texture* texture = lovrTextureCreate(TEXTURE_2D, &textureData, 1, true);
+ MaterialData* materialData = lovrMaterialDataCreateEmpty();
+ Material* material = lovrMaterialCreate(materialData, false);
+ lovrMaterialSetTexture(material, TEXTURE_DIFFUSE, texture);
+ lovrModelSetMaterial(model, material);
+ lovrRelease(&blob->ref);
+ lovrRelease(&texture->ref);
+ lovrRelease(&material->ref);
+ } else {
+ lovrModelSetMaterial(model, luax_checktype(L, 2, Material));
+ }
+ }
+
luax_pushtype(L, Model, model);
lovrRelease(&model->ref);
lovrRelease(&blob->ref);
|
Fixes an deadlock issue in the dm | @@ -173,7 +173,6 @@ static void celix_dm_allComponentsActiveCallback(void *handle, const celix_bundl
if (!allActive) {
*allActivePtr = false;
}
- celixThreadMutex_lock(&mng->mutex);
}
bool celix_dependencyManager_allComponentsActive(celix_dependency_manager_t *manager) {
|
create key file with u+rw persmission | #include <errno.h>
#include <string.h>
#include <sys/socket.h>
+#include <sys/stat.h>
#include "mbedtls/platform.h"
#include "mbedtls/entropy.h"
@@ -301,6 +302,9 @@ static int write_pem(const mbedtls_pk_context *key, const char path[])
return -1;
}
+ // Set u+rw persmissions
+ chmod(path, 0600);
+
len = strlen((char*) buf);
if (fwrite(buf, 1, len, file) != len) {
fclose(file);
|
docs: Details the repositories used to store images. | @@ -23,8 +23,15 @@ To run the integration tests, it is necessary to have the gadget container image
available on a container repository so that it can be installed in the
Kubernetes cluster where the tests will run.
-As a default, `ghcr.io/${{ github.repository }}` is used to store images created
-in the CI pipeline.
+As a default, `ghcr.io/${{ github.repository }}-dev` is used to store images
+created in the CI pipeline for all branches except main and tags.
+When the target branch correspond to the main or the push refers to a tag, the
+default repository is `ghcr.io/${{ github.repository }}`.
+This permits a clear separation between "in development" images and production
+ones.
+During a release, integration test container image will be pushed to
+`ghcr.io/${{ github.repository }}-test`.
+
Note that, you need to [set repository packages as public](https://docs.github.com/en/packages/learn-github-packages/configuring-a-packages-access-control-and-visibility#configuring-visibility-of-container-images-for-your-personal-account) to allow anonymous pull.
## Run integration tests on an ARO cluster
|
avx: use internal symbols in clang fallbacks for cmp_ps/pd functions
This is just to work around the fallback macro being called recursively
instead of invoking the function. | @@ -2648,7 +2648,12 @@ simde_mm_cmp_ss (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_FUNCTION_ATTRIBUTES
simde__m256d
-simde_mm256_cmp_pd (simde__m256d a, simde__m256d b, const int imm8)
+#if defined(__clang__) && defined(__AVX512DQ__)
+simde_mm256_cmp_pd_internal_
+#else
+simde_mm256_cmp_pd
+#endif
+(simde__m256d a, simde__m256d b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) {
simde__m256d_private
r_,
@@ -2853,7 +2858,7 @@ simde_mm256_cmp_pd (simde__m256d a, simde__m256d b, const int imm8)
simde_mm256_cmp_pd_r = simde_x_mm256_setone_pd(); \
break; \
default: \
- simde_mm256_cmp_pd_r = simde_mm256_cmp_pd(a, b, imm8); \
+ simde_mm256_cmp_pd_r = simde_mm256_cmp_pd_internal_(a, b, imm8); \
break; \
} \
simde_mm256_cmp_pd_r; \
@@ -2868,7 +2873,12 @@ simde_mm256_cmp_pd (simde__m256d a, simde__m256d b, const int imm8)
SIMDE_FUNCTION_ATTRIBUTES
simde__m256
-simde_mm256_cmp_ps (simde__m256 a, simde__m256 b, const int imm8)
+#if defined(__clang__) && defined(__AVX512DQ__)
+simde_mm256_cmp_ps_internal_
+#else
+simde_mm256_cmp_ps
+#endif
+(simde__m256 a, simde__m256 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 31) {
simde__m256_private
r_,
@@ -3073,7 +3083,7 @@ simde_mm256_cmp_ps (simde__m256 a, simde__m256 b, const int imm8)
simde_mm256_cmp_ps_r = simde_x_mm256_setone_ps(); \
break; \
default: \
- simde_mm256_cmp_ps_r = simde_mm256_cmp_ps(a, b, imm8); \
+ simde_mm256_cmp_ps_r = simde_mm256_cmp_ps_internal_(a, b, imm8); \
break; \
} \
simde_mm256_cmp_ps_r; \
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.