message
stringlengths 6
474
| diff
stringlengths 8
5.22k
|
---|---|
remove end of line whitespace | @@ -21,7 +21,7 @@ decrypt functions
PKCS12_decrypt_skey() Decrypt the PKCS#8 shrouded keybag contained within I<bag>
using the supplied password I<pass> of length I<passlen>.
-PKCS12_decrypt_skey_ex() is similar to the above but allows for a library context
+PKCS12_decrypt_skey_ex() is similar to the above but allows for a library contex
I<ctx> and property query I<propq> to be used to select algorithm implementations.
=head1 RETURN VALUES
|
ipsec: cli bug fix
1. unformat_ip46_address must have ip-type specified
2. cannot unformat ip46_address_t with unformat_ip4_address | @@ -118,14 +118,14 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
unformat_ipsec_integ_alg, &integ_alg))
;
else if (unformat (line_input, "tunnel-src %U",
- unformat_ip46_address, &tun_src))
+ unformat_ip46_address, &tun_src, IP46_TYPE_ANY))
{
flags |= IPSEC_SA_FLAG_IS_TUNNEL;
if (!ip46_address_is_ip4 (&tun_src))
flags |= IPSEC_SA_FLAG_IS_TUNNEL_V6;
}
else if (unformat (line_input, "tunnel-dst %U",
- unformat_ip46_address, &tun_dst))
+ unformat_ip46_address, &tun_dst, IP46_TYPE_ANY))
;
else if (unformat (line_input, "udp-encap"))
flags |= IPSEC_SA_FLAG_UDP_ENCAP;
@@ -615,6 +615,8 @@ create_ipsec_tunnel_command_fn (vlib_main_t * vm,
ipsec_add_del_tunnel_args_t a;
int rv;
u32 num_m_args = 0;
+ u8 ipv4_set = 0;
+ u8 ipv6_set = 0;
clib_error_t *error = NULL;
clib_memset (&a, 0, sizeof (a));
@@ -627,12 +629,21 @@ create_ipsec_tunnel_command_fn (vlib_main_t * vm,
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat
- (line_input, "local-ip %U", unformat_ip4_address, &a.local_ip))
+ (line_input, "local-ip %U", unformat_ip46_address, &a.local_ip,
+ IP46_TYPE_ANY))
+ {
+ ip46_address_is_ip4 (&a.local_ip) ? (ipv4_set = 1) : (ipv6_set = 1);
num_m_args++;
+ }
else
if (unformat
- (line_input, "remote-ip %U", unformat_ip4_address, &a.remote_ip))
+ (line_input, "remote-ip %U", unformat_ip46_address, &a.remote_ip,
+ IP46_TYPE_ANY))
+ {
+ ip46_address_is_ip4 (&a.remote_ip) ? (ipv4_set = 1) : (ipv6_set =
+ 1);
num_m_args++;
+ }
else if (unformat (line_input, "local-spi %u", &a.local_spi))
num_m_args++;
else if (unformat (line_input, "remote-spi %u", &a.remote_spi))
@@ -663,6 +674,12 @@ create_ipsec_tunnel_command_fn (vlib_main_t * vm,
goto done;
}
+ if (ipv6_set)
+ return clib_error_return (0, "currently only IPv4 supported");
+
+ if (ipv4_set && ipv6_set)
+ return clib_error_return (0, "both IPv4 and IPv6 addresses specified");
+
rv = ipsec_add_del_tunnel_if (&a);
switch (rv)
|
Comment Py_Finalize for future GC fix | @@ -785,7 +785,8 @@ int py_loader_impl_destroy(loader_impl impl)
Py_DECREF(py_impl->main_module);
- Py_Finalize();
+ //TODO resolve GC error on finalize
+ //Py_Finalize();
free(py_impl);
|
Fixing more version checks in aomp_common_vars.
These version checks will be cleaned up in the
future as we do not need to support these older
AOMP builds anymore. | @@ -241,7 +241,7 @@ elif [ "$AOMP_MAJOR_VERSION" == "13" ] ; then
AOMP_PROJECT_REPO_NAME=${AOMP_PROJECT_REPO_NAME:-llvm-project}
AOMP_EXTRAS_REPO_NAME=${AOMP_EXTRAS_REPO_NAME:-aomp-extras}
AOMP_FLANG_REPO_NAME=${AOMP_FLANG_REPO_NAME:-flang}
-elif [ "$AOMP_MAJOR_VERSION" == "14" ] || [ "$AOMP_MAJOR_VERSION" == "15" ] ; then
+elif [ "$AOMP_MAJOR_VERSION" == "14" ] || [ "$AOMP_MAJOR_VERSION" == "15" ] || [ "$AOMP_MAJOR_VERSION" == "16" ] ; then
GITPROJECT=$GITROCDEV
AOMP_PROJECT_REPO_NAME=${AOMP_PROJECT_REPO_NAME:-llvm-project}
AOMP_EXTRAS_REPO_NAME=${AOMP_EXTRAS_REPO_NAME:-aomp-extras}
|
tools: Apply M16 rule at clang-format file
[M16] Open braces for enum, union and struct go on the same line | @@ -6,13 +6,13 @@ AccessModifierOffset: -4
AllowShortFunctionsOnASingleLine: false
BreakBeforeBraces: Custom
BraceWrapping:
- AfterEnum: true
- AfterFunction: true
- AfterStruct: true
- AfterUnion: true
+ AfterEnum: false #[M16]
+ AfterFunction: true #[M14]
+ AfterStruct: false #[M16]
+ AfterUnion: false #[M16]
AfterClass: true
AfterNamespace: false
- BeforeElse: false
+ BeforeElse: false #[M15]
IndentCaseLabels: false #[M09]
IndentWidth: 4 #[M08]
SpaceBeforeAssignmentOperators: true #[M11]
|
Use size of target buffer for allocation | @@ -110,7 +110,7 @@ SSL_SESSION *ssl_session_dup(const SSL_SESSION *src, int ticket)
{
SSL_SESSION *dest;
- dest = OPENSSL_malloc(sizeof(*src));
+ dest = OPENSSL_malloc(sizeof(*dest));
if (dest == NULL) {
goto err;
}
|
hw/drivers/sdadc_da1469x: Add missing parentheses
...to improve readability. | @@ -104,7 +104,7 @@ da1469x_sdadc_configure_channel(struct adc_dev *adev, uint8_t cnum,
*/
refmv = ((struct da1469x_sdadc_chan_cfg *)cfg)->dscc_refmv;
}
- if (ctrl & SDADC_SDADC_CTRL_REG_SDADC_SE_Msk &&
+ if ((ctrl & SDADC_SDADC_CTRL_REG_SDADC_SE_Msk) &&
((ctrl & SDADC_SDADC_CTRL_REG_SDADC_INP_SEL_Msk) ==
(8 << SDADC_SDADC_CTRL_REG_SDADC_INP_SEL_Pos))) {
/*
|
example:mqtt fix mqtt utc error | @@ -415,7 +415,7 @@ static void utc_mqtt_deinit_client_n(void)
* @postcondition none
*/
static void utc_mqtt_deinit_client_p(void)
-{p
+{
int res = mqtt_deinit_client(g_mqtt_pub_handle);
if (res < 0) {
UTC_MQTT_LOGE;
|
Fix dismissing the modules runner | @@ -60,7 +60,7 @@ import UIKit
navigationItem.leftBarButtonItems = []
navigationItem.rightBarButtonItems = []
- navigationItem.leftBarButtonItem = UIBarButtonItem(image: EditorSplitViewController.gridImage, style: .plain, target: REPLViewController.self, action: #selector(REPLViewController.goToFileBrowser))
+ navigationItem.leftBarButtonItem = UIBarButtonItem(image: EditorSplitViewController.gridImage, style: .plain, target: self, action: #selector(RunModuleViewController.goToFileBrowser))
navigationController?.isToolbarHidden = true
title = Localizable.repl
|
OcAppleKernelLib: Fix compiler warning in MSVC | @@ -246,7 +246,7 @@ PrelinkedContextInit (
if (!MachoInitializeContext (
&Context->InnerMachContext,
&Context->Prelinked[Segment->FileOffset],
- Context->PrelinkedSize - Segment->FileOffset)) {
+ (UINT32) (Context->PrelinkedSize - Segment->FileOffset))) {
return EFI_INVALID_PARAMETER;
}
}
|
Use RFC 2553 identifiers | @@ -617,7 +617,7 @@ int bytestream_test_addr()
struct sockaddr_in addr_in = { 0 };
addr_in.sin_family = AF_INET;
- addr_in.sin_addr.S_un.S_addr = 0x01020304;
+ addr_in.sin_addr.s_addr = 0x01020304;
addr_in.sin_port = 1234;
struct sockaddr_in addr_in_res = { 0 };
@@ -629,7 +629,7 @@ int bytestream_test_addr()
struct sockaddr_in6 addr_in6 = { 0 };
addr_in6.sin6_family = AF_INET6;
- addr_in6.sin6_addr.u.Word[0] = 12;
+ addr_in6.sin6_addr.s6_words[0] = 12;
addr_in6.sin6_port = 1234;
struct sockaddr_in6 addr_in6_res = { 0 };
|
fixed TACACS detection, ignore zeroed packets caused by driver cash | @@ -574,6 +574,10 @@ if(tacacspf->version != TACACSP_VERSION)
{
return false;
}
+if(tacacspf->type != 1)
+ {
+ return false;
+ }
datalen = ntohl(tacacspf->datalen);
if(datalen > pklen)
@@ -1972,9 +1976,12 @@ while((pcapstatus = pcap_next_ex(pcapin, &pkh, &packet)) != -2)
continue;
}
+ if(pkh->caplen == 0)
+ {
+ continue;
+ }
+
packetcount++;
- if((pkh->ts.tv_sec == 0) && (pkh->ts.tv_usec == 0))
- wcflag = true;
/* check Loopback-header */
if(datalink == DLT_NULL)
|
ya tool: update resources | },
"sandboxctl": {
"formula": {
- "sandbox_id": [442883572],
+ "sandbox_id": 470446244,
"match": "sandboxctl"
},
"executable": {
},
"qemu": {
"formula": {
- "sandbox_id": 435931020,
+ "sandbox_id": 468108108,
"match": "qemu"
},
"executable": {
|
Updated examples/ParameterSweep. | Parameter Sweep Example
=======================
-.. warning::
-
- This example is currently broken! See
- `this issue <https://github.com/VirtualPlanetaryLaboratory/vplanet-private/issues/299>`_.
-
Overview
--------
|
imu: revert to the more reliable QS settings | // time to correct gyro readings using the accelerometer
// 1-4 are generally good
#define FASTFILTER 0.05 //onground filter
-#define PREFILTER 0.2 //in_air prefilter (this can be commented out)
-#define FILTERTIME 1.0 //in_air fusion filter
+//#define PREFILTER 0.2 //in_air prefilter (this can be commented out)
+#define FILTERTIME 2.0 //in_air fusion filter
// accel magnitude limits for drift correction
-#define ACC_MIN 0.7f
-#define ACC_MAX 1.3f
+#define ACC_MIN 0.9f
+#define ACC_MAX 1.1f
float GEstG[3] = {0, 0, ACC_1G};
float attitude[3];
|
Heap: Fix a possible bug in the TLSF allocator
Fix a bug that could return a chunk of memory smaller than requested,
easily leading to a memory corruption, when the required memory alignment
passed to the allocator is 4. | @@ -802,7 +802,11 @@ void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_off
const size_t size_with_gap = adjust_request_size(adjust + align + gap_minimum - off_adjust, align);
/*
- ** If alignment is less than or equals base alignment, we're done.
+ ** If alignment is less than or equal to base alignment, we're done, because
+ ** we are guaranteed that the size is at least sizeof(block_header_t), enough
+ ** to store next blocks' metadata. Plus, all pointers allocated will all be
+ ** aligned on a 4-byte bound, so ptr + data_offset will also have this
+ ** alignment constraint. Thus, the gap is not required.
** If we requested 0 bytes, return null, as tlsf_malloc(0) does.
*/
const size_t aligned_size = (adjust && align > ALIGN_SIZE) ? size_with_gap : adjust;
@@ -820,10 +824,12 @@ void* tlsf_memalign_offs(tlsf_t tlsf, size_t align, size_t size, size_t data_off
tlsf_cast(tlsfptr_t, aligned) - tlsf_cast(tlsfptr_t, ptr));
/*
- ** If gap size is too small or if there is not gap but we need one,
+ ** If gap size is too small or if there is no gap but we need one,
** offset to next aligned boundary.
+ ** NOTE: No need for a gap if the alignment required is less than or is
+ ** equal to ALIGN_SIZE.
*/
- if ((gap && gap < gap_minimum) || (!gap && off_adjust))
+ if ((gap && gap < gap_minimum) || (!gap && off_adjust && align > ALIGN_SIZE))
{
const size_t gap_remain = gap_minimum - gap;
const size_t offset = tlsf_max(gap_remain, align);
|
Add more checking for SConscript files of libc. | @@ -5,16 +5,17 @@ Import('rtconfig')
from building import *
objs = []
+cwd = GetCurrentDir()
if GetDepend('RT_USING_LIBC'):
- if rtconfig.PLATFORM == 'gcc':
+ if os.path.isfile(os.path.join(cwd, 'newlib/SConscript')) and rtconfig.PLATFORM == 'gcc':
objs = objs + SConscript('newlib/SConscript')
- elif rtconfig.PLATFORM == 'armcc':
+ elif os.path.isfile(os.path.join(cwd, 'armlibc/SConscript')) and rtconfig.PLATFORM == 'armcc':
objs = objs + SConscript('armlibc/SConscript')
- elif rtconfig.PLATFORM == 'iar':
+ elif os.path.isfile(os.path.join(cwd, 'dlib/SConscript')) and rtconfig.PLATFORM == 'iar':
objs = objs + SConscript('dlib/SConscript')
else:
- if rtconfig.PLATFORM == 'gcc' and rtconfig.ARCH != 'sim':
+ if os.path.isfile(os.path.join(cwd, 'minilibc/SConscript')) and rtconfig.PLATFORM == 'gcc' and rtconfig.ARCH != 'sim':
objs = objs + SConscript('minilibc/SConscript')
if GetDepend('RT_USING_LIBC') and GetDepend('RT_USING_PTHREADS'):
|
Fix a few more issues with the Haar importer
It imports and creates binary files that don't crash the cam
No tracking yet
Number of features from rects is different than number of features from weakClassifiers for some reason.. | @@ -62,7 +62,7 @@ def cascade_binary(path, n_stages, name):
for node in stagesElements[0].childNodes:
if node.nodeType is 1:
stages.append(int(node.getElementsByTagName('maxWeakCount')[0].childNodes[0].nodeValue))
-
+ print(stages)
stage_threshold = xmldoc.getElementsByTagName('stageThreshold')[0:n_stages]
# total number of features
@@ -77,9 +77,8 @@ def cascade_binary(path, n_stages, name):
alpha1 = []
alpha2 = []
for val in leafValues:
- if val.nodeValue is not None:
- alpha1.append(val.nodeValue.split()[0])
- alpha2.append(val.nodeValue.split()[1])
+ alpha1.append(val.childNodes[0].nodeValue.split()[0])
+ alpha2.append(val.childNodes[0].nodeValue.split()[1])
# read rectangles
feature = xmldoc.getElementsByTagName('rects')[0:n_features]
@@ -104,21 +103,34 @@ def cascade_binary(path, n_stages, name):
# write num stages
fout.write(struct.pack('i', len(stages)))
+ count = 0
# write num feat in stages
for s in stages:
fout.write(struct.pack('B', s)) # uint8_t
+ count+=1
+ print("Stage count (num feats): %d"%count)
+ count = 0
# write stages thresholds
for t in stage_threshold:
fout.write(struct.pack('h', int(float(t.childNodes[0].nodeValue)*256))) #int16_t
+ count+=1
+ print("Stage count (threshold): %d"%count)
+ count = 0
# write features threshold 1 per feature
for t in threshold:
fout.write(struct.pack('h', int(float(t.childNodes[0].nodeValue.split()[3])*4096))) #int16_t
+ count+=1
+ print("Feature count (threshold): %d"%count)
+ count = 0
# write alpha1 1 per feature
for a in alpha1:
fout.write(struct.pack('h', int(float(a)*256))) #int16_t
+ count+=1
+ print("Feature count (left/right): %d"%count)
+ count = 0
# write alpha2 1 per feature
for a in alpha2:
@@ -128,6 +140,9 @@ def cascade_binary(path, n_stages, name):
for f in feature:
rects = f.getElementsByTagName('_')
fout.write(struct.pack('B', len(rects))) # uint8_t
+ count+=1
+ print("Feature count (rects): %d"%count)
+ count = 0
# write rects weights 1 per rectangle
for f in feature:
@@ -142,6 +157,9 @@ def cascade_binary(path, n_stages, name):
for r in rects:
l = map(int, r.childNodes[0].nodeValue[:-1].split())
fout.write(struct.pack('BBBB',l[0], l[1], l[2], l[3])) #uint8_t
+ count+=1
+ print("Rects count: %d"%count)
+ count = 0
# print cascade info
print("size:%dx%d"%(size[0], size[1]))
|
fix elseifeq and use older option core2-avx for compatibility | @@ -17,7 +17,7 @@ ifeq ($(TARGET_CORE), SKYLAKEX)
endif
endif
else ifeq($(TARGET_CORE), HASWELL)
- override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) -march=haswell
+ override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE) -march=core2-avx
else
override CFLAGS += -DBUILD_KERNEL -DTABLE_NAME=gotoblas_$(TARGET_CORE)
endif
|
linker.cpp: deal with anonymous internal functions
They have empty name, which breaks the linking algorithm. Assign
a random name | @@ -385,6 +385,9 @@ int link(llvm::Module *Program, const llvm::Module *Lib, std::string &log,
assert(Lib);
ValueToValueMapTy vvm;
llvm::StringSet<> DeclaredFunctions;
+ std::random_device RandomDevice;
+ std::mt19937 Mersenne{RandomDevice()};
+ std::uniform_int_distribution<unsigned long> UniDist{(1UL<<30), (1UL<<31)};
#ifndef LLVM_OLDER_THAN_10_0
// LLVM 9 misses some of the APIs needed by this function. We don't support
@@ -421,6 +424,15 @@ int link(llvm::Module *Program, const llvm::Module *Lib, std::string &log,
continue;
}
+ // anonymous functions have no name, which breaks the algorithm later
+ // when it searches for undefined functions in the kernel library.
+ // assign a randomized name here
+ std::string temp = std::to_string(UniDist(Mersenne));
+ if (!fi->hasName()) {
+ fi->setName(Twine("__anonymous_internal_func__",
+ StringRef(temp)));
+ }
+ DB_PRINT("Function '%s' is defined\n", fi->getName().data());
// Find all functions the program source calls
// TODO: is there no direct way?
find_called_functions(&*fi, DeclaredFunctions);
|
groupmanager functions | @@ -771,6 +771,18 @@ factory.register("Component::Log::LogModuleInterface")
factory.register("Component::Text::TextChecker::ExecNonMacroFunc")
factory.register("Component::Text::TextModule")
factory.register("Component::Text::TextModuleInterface")
+# Known classes - no vfunc/vtables
+factory.register("GroupManager", "", {
+ 0x140BB2650: "Create",
+ 0x1407776A0: "ctor",
+ 0x140777810: "SetPartyEmpty",
+ 0x1407785A0: "GetAllianceMemberByGroupAndIndex", # (this, group, index)
+ 0x140778600: "GetAllianceMemberByIndex", # (this, index)
+ 0x140778620: "IsObjectIDInParty", # (this, objectID)
+ 0x140778680: "IsCharacterInPartyByName", # (this, char*)
+ 0x140778700: "IsObjectIDInAlliance",
+ 0x140778780: "IsObjectIDPartyLeader",
+})
# Known classes
factory.register(0x14164E260, "Common::Configuration::ConfigBase", "Client::System::Common::NonCopyable", {
0x140068C30: "ctor",
|
added poke arm for onboarding bit | %collections-action act
==
::
+:: +poke-noun
+::
+:: utility for setting whether or not to display the onboarding page
+::
+++ poke-noun
+ |= onb=?
+ ^- (quip move _this)
+ =< ta-done
+ (ta-write:ta /web/landscape/onboard/atom [%atom !>(onb)])
+::
:: +ta: main event core for collections
::
++ ta
|
Clarify the change of enc -S behavior in 3.0
Fixes | @@ -256,6 +256,19 @@ All RC2 ciphers have the same key and effective key length.
Blowfish and RC5 algorithms use a 128 bit key.
+Please note that OpenSSL 3.0 changed the effect of the B<-S> option.
+Any explicit salt value specified via this option is no longer prepended to the
+ciphertext when encrypting, and must again be explicitly provided when decrypting.
+Conversely, when the B<-S> option is used during decryption, the ciphertext
+is expected to not have a prepended salt value.
+
+When using OpenSSL 3.0 or later to decrypt data that was encrypted with an
+explicit salt under OpenSSL 1.1.1 do not use the B<-S> option, the salt will
+then be read from the ciphertext.
+To generate ciphertext that can be decrypted with OpenSSL 1.1.1 do not use
+the B<-S> option, the salt will be then be generated randomly and prepended
+to the output.
+
=head1 SUPPORTED CIPHERS
Note that some of these ciphers can be disabled at compile time
|
fix(docs): Pin Zephyr docs links to a version | @@ -143,7 +143,7 @@ west flash
## Multi-CPU and Dual-Chip Bluetooth Boards
-Zephyr supports running the Bluetooth host and controller on separate processors. In such a configuration, ZMK always runs on the host processor, but you may need to build and flash separate firmware for the controller. Zephyr provides sample code which can be used as the controller firmware for Bluetooth HCI over [RPMsg](https://docs.zephyrproject.org/latest/samples/bluetooth/hci_rpmsg/README.html), [SPI](https://docs.zephyrproject.org/latest/samples/bluetooth/hci_spi/README.html), [UART](https://docs.zephyrproject.org/latest/samples/bluetooth/hci_uart/README.html), and [USB](https://docs.zephyrproject.org/latest/samples/bluetooth/hci_usb/README.html). See [Zephyr's Bluetooth Stack Architecture documentation](https://docs.zephyrproject.org/latest/connectivity/bluetooth/bluetooth-arch.html) for more details.
+Zephyr supports running the Bluetooth host and controller on separate processors. In such a configuration, ZMK always runs on the host processor, but you may need to build and flash separate firmware for the controller. Zephyr provides sample code which can be used as the controller firmware for Bluetooth HCI over [RPMsg](https://docs.zephyrproject.org/3.2.0/samples/bluetooth/hci_rpmsg/README.html), [SPI](https://docs.zephyrproject.org/3.2.0/samples/bluetooth/hci_spi/README.html), [UART](https://docs.zephyrproject.org/3.2.0/samples/bluetooth/hci_uart/README.html), and [USB](https://docs.zephyrproject.org/3.2.0/samples/bluetooth/hci_usb/README.html). See [Zephyr's Bluetooth Stack Architecture documentation](https://docs.zephyrproject.org/3.2.0/connectivity/bluetooth/bluetooth-arch.html) for more details.
The following documentation shows how to build and flash ZMK for boards that use a dual-chip configuration.
|
Try fix Android | @@ -422,7 +422,7 @@ function CMake-Generate {
$Arguments += " -DQUIC_USE_XDP=on"
}
if ($Platform -eq "android") {
- $env:PATH = "$env:ANDROID_NDK_ROOT/toolchains/llvm/prebuilt/linux-x86_64/bin:$env:PATH"
+ $env:PATH = "$env:ANDROID_NDK_LATEST_HOME/toolchains/llvm/prebuilt/linux-x86_64/bin:$env:PATH"
switch ($Arch) {
"x86" { $Arguments += " -DANDROID_ABI=x86"}
"x64" { $Arguments += " -DANDROID_ABI=x86_64" }
@@ -430,7 +430,8 @@ function CMake-Generate {
"arm64" { $Arguments += " -DANDROID_ABI=arm64-v8a" }
}
$Arguments += " -DANDROID_PLATFORM=android-29"
- $NDK = $env:ANDROID_NDK_HOME
+ $NDK = $env:ANDROID_NDK_LATEST_HOME
+ $env:ANDROID_NDK_HOME = $env:ANDROID_NDK_LATEST_HOME
$NdkToolchainFile = "$NDK/build/cmake/android.toolchain.cmake"
$Arguments += " -DANDROID_NDK=""$NDK"""
$Arguments += " -DCMAKE_TOOLCHAIN_FILE=""$NdkToolchainFile"""
|
Move the USB voltage level detector after all init code. | @@ -263,9 +263,7 @@ void SystemClock_Config(void)
}
#endif
- #if defined(MCU_SERIES_H7)
- HAL_PWREx_EnableUSBVoltageDetector();
- #elif defined(MCU_SERIES_F4) || defined(MCU_SERIES_F7)
+ #if defined(MCU_SERIES_F4) || defined(MCU_SERIES_F7)
if (HAL_PWREx_EnableOverDrive() != HAL_OK) {
// Initialization Error
__fatal_error("HAL_PWREx_EnableOverDrive");
@@ -305,5 +303,8 @@ void SystemClock_Config(void)
// Enables the I/O Compensation Cell
HAL_EnableCompensationCell();
+
+ // Enable the USB voltage level detector
+ HAL_PWREx_EnableUSBVoltageDetector();
#endif
}
|
data tree BUGFIX unitialized value | @@ -4107,6 +4107,7 @@ lyd_find_xpath(const struct lyd_node *ctx_node, const char *xpath, struct ly_set
LY_CHECK_ARG_RET(NULL, ctx_node, xpath, set, LY_EINVAL);
+ *set = NULL;
memset(&xp_set, 0, sizeof xp_set);
/* compile expression */
|
clang missing flag | @@ -277,7 +277,8 @@ function generate_dependencies(target, sourcebatch, opt)
for _, flag in ipairs(compflags) do
if flag == "-m64" or flag == "-g" or flag:startswith("-stdlib") or flag:startswith("-m") or
(flag:startswith("-f") and not flag:startswith("-fmodule") and not flag:startswith("-fno-implicit-module-maps")) or
- flag:startswith("-D") or flag:startswith("-U") or flag:startswith("-I") or flag:startswith("-isystem") or next_flag then
+ flag:startswith("-D") or flag:startswith("-U") or flag:startswith("-I") or flag:startswith("-isystem") or next_flag or
+ flag:startswith("-iframework") then
table.insert(flags, flag)
next_flag = false
if flag:startswith("-isystem") then
|
get proper session ID in JDK 11 and newer | @@ -74,6 +74,15 @@ initAppOutputStreamGlobals(JNIEnv *jni)
//support for JDK 9, JDK 10
g_fid_AppOutputStream_socket = (*jni)->GetFieldID(jni, appOutputStreamClass, "socket", "Lsun/security/ssl/SSLSocketImpl;");
}
+ if (g_fid_AppOutputStream_socket == NULL) {
+ jboolean flag = (*jni)->ExceptionCheck(jni);
+ if (flag) (*jni)->ExceptionClear(jni);
+ //support for JDK 11 - 14
+ g_fid_AppOutputStream_socket = (*jni)->GetFieldID(jni, appOutputStreamClass, "this$0", "Lsun/security/ssl/SSLSocketImpl;");
+ if ( g_fid_AppOutputStream_socket != NULL) {
+ printf("g_fid_AppOutputStream_socket found\n");
+ }
+ }
if (g_fid_AppOutputStream_socket == NULL) {
jboolean flag = (*jni)->ExceptionCheck(jni);
if (flag) (*jni)->ExceptionClear(jni);
@@ -97,6 +106,15 @@ initAppInputStreamGlobals(JNIEnv *jni)
//support for JDK 9, JDK 10
g_fid_AppInputStream_socket = (*jni)->GetFieldID(jni, appInputStreamClass, "socket", "Lsun/security/ssl/SSLSocketImpl;");
}
+ if (g_fid_AppInputStream_socket == NULL) {
+ jboolean flag = (*jni)->ExceptionCheck(jni);
+ if (flag) (*jni)->ExceptionClear(jni);
+ //support for JDK 11 - 14
+ g_fid_AppInputStream_socket = (*jni)->GetFieldID(jni, appInputStreamClass, "this$0", "Lsun/security/ssl/SSLSocketImpl;");
+ if ( g_fid_AppInputStream_socket != NULL) {
+ printf("g_fid_AppInputStream_socket found\n");
+ }
+ }
if (g_fid_AppInputStream_socket == NULL) {
jboolean flag = (*jni)->ExceptionCheck(jni);
if (flag) (*jni)->ExceptionClear(jni);
|
stm32/powerctrl: Enable overdrive on F7 when waking from stop mode.
Because if the SYSCLK is set to 180MHz or higher it will require this to be
on already. | @@ -381,6 +381,11 @@ void powerctrl_enter_stop_mode(void) {
}
#endif
+ #if defined(STM32F7)
+ // Enable overdrive to reach 216MHz (if needed)
+ HAL_PWREx_EnableOverDrive();
+ #endif
+
// enable PLL
__HAL_RCC_PLL_ENABLE();
while (!__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY)) {
|
doc: remove alternate keyset create scenario | - **Precondition:** -
- **Main success scenario:**
- Caller requests to create a new `KeySet`
- - Core instantiates an emtpy `KeySet` and returns it
-- **Alternative scenario:**
- - Caller requests to create a new `KeySet`, with known content
- - Core instantiates an appropriately sized `KeySet`, fills it and returns it
+ - Core instantiates an empty `KeySet` and returns it
+- **Alternative scenario:** -
- **Error scenario:** -
- **Postcondition:**
- `KeySet` exists and is usable by Caller
- - `KeySet` optionally has initial contents defined by Caller
- **Non-functional Constraints:**
- `KeySet` MUST be resizable after creation
- `KeySet` SHOULD be efficient for small and larger sizes
|
DSA: Check for sanity of input parameters
dsa_builtin_paramgen2 expects the L parameter to be greater than N,
otherwise the generation will get stuck in an infinite loop. | @@ -327,6 +327,12 @@ int dsa_builtin_paramgen2(DSA *ret, size_t L, size_t N,
if (mctx == NULL)
goto err;
+ /* make sure L > N, otherwise we'll get trapped in an infinite loop */
+ if (L <= N) {
+ DSAerr(DSA_F_DSA_BUILTIN_PARAMGEN2, DSA_R_INVALID_PARAMETERS);
+ goto err;
+ }
+
if (evpmd == NULL) {
if (N == 160)
evpmd = EVP_sha1();
|
Fix leaks in db module.
dbFreeResource() leaked ProtocolCommand.
dbTimeMSec() leaked PackRead. | @@ -54,6 +54,7 @@ dbFreeResource(THIS_VOID)
pckWriteU32P(protocolCommandParam(command), this->remoteIdx);
protocolClientExecute(this->remoteClient, command, false);
+ protocolCommandFree(command);
FUNCTION_LOG_RETURN_VOID();
}
@@ -787,9 +788,16 @@ dbTimeMSec(Db *this)
FUNCTION_LOG_PARAM(DB, this);
FUNCTION_LOG_END();
- FUNCTION_LOG_RETURN(
- TIME_MSEC,
- (TimeMSec)pckReadI64P(dbQueryColumn(this, STRDEF("select (extract(epoch from clock_timestamp()) * 1000)::bigint"))));
+ TimeMSec result;
+
+ MEM_CONTEXT_TEMP_BEGIN()
+ {
+ result = (TimeMSec)pckReadI64P(
+ dbQueryColumn(this, STRDEF("select (extract(epoch from clock_timestamp()) * 1000)::bigint")));
+ }
+ MEM_CONTEXT_TEMP_END();
+
+ FUNCTION_LOG_RETURN(TIME_MSEC, result);
}
/**********************************************************************************************************************************/
|
Add ya tool cue | "cling": {
"description": "Run cling"
},
+ "cue": {
+ "description": "Validate and define text-based and dynamic configuration"
+ },
"dctl": {
"description": "Run Yandex.Deploy CLI"
},
}
]
},
+ "cue": {
+ "tools": {
+ "cue": {
+ "bottle": "cue",
+ "executable": "cue"
+ }
+ },
+ "platforms": [
+ {
+ "host": {
+ "os": "LINUX"
+ },
+ "default": true
+ },
+ {
+ "host": {
+ "os": "DARWIN"
+ },
+ "default": true
+ }
+ ]
+ },
"yoimports": {
"tools": {
"yoimports": {
]
}
},
+ "cue": {
+ "formula": {
+ "sandbox_id": [
+ 631593996,
+ 631593924
+ ],
+ "match": "cue"
+ },
+ "executable": {
+ "cue": [
+ "cue"
+ ]
+ }
+ },
"yoimports": {
"formula": {
"sandbox_id": [
|
fix type error in rw.c | @@ -37,7 +37,7 @@ static ScsCone *read_scs_cone(FILE *fin) {
fread(&(k->ep), sizeof(scs_int), 1, fin);
fread(&(k->ed), sizeof(scs_int), 1, fin);
fread(&(k->psize), sizeof(scs_int), 1, fin);
- k->p = scs_calloc(k->psize, sizeof(scs_int));
+ k->p = scs_calloc(k->psize, sizeof(scs_float));
fread(k->p, sizeof(scs_float), k->psize, fin);
return k;
}
|
fee, fi, fo, fum, i had one extra one | @@ -11,7 +11,7 @@ else if [[ $GITHUB_REF == "refs/heads/*" ]]; then
VERSION="branch/$(echo ${GITHUB_REF} | sed -e 's*^refs/heads/v**')"
else
VERSION=whoops
-fi fi fi fi
+fi fi fi
TMPDIR=$(mktemp -d)
mkdir -p ${TMPDIR}/scope
|
fixed class memory leak | @@ -553,6 +553,7 @@ static void ccl_cosmology_compute_power_class(ccl_cosmology * cosmo, int * statu
}
+ ccl_free_class_structs(cosmo, &ba,&th,&pt,&tr,&pm,&sp,&nl,&le,init_arr,status);
free(x);
free(a);
free(y2d_nl);
|
Triggering appveyor build | @@ -18,4 +18,3 @@ A step-by-step tutorial on how to contribute to EPANET using GitHub is also [ava
__Note:__ This repository is not affiliated with, or endorsed by, the USEPA. For the last "official" release of EPANET (2.00.12 UI and Toolkit) please go to the [EPA's GitHub repo](https://github.com/USEPA/Water-Distribution-Network-Model) or [the USEPA website](http://www2.epa.gov/water-research/epanet). It is also not the graphical user interface version. This is the hydraulic and water quality solver engine.
However, if you are interested in extending EPANET for academic, personal, or commercial use, then you've come to the right place. For community discussion, FAQ, and roadmapping of the project, go to the [Community Forum](http://community.wateranalytics.org/category/epanet).
-
|
hfuzz_cc: try various clangs | @@ -27,15 +27,6 @@ __asm__("\n"
" .global lhfuzz_end\n"
"lhfuzz_start:\n" " .incbin \"libhfuzz/libhfuzz.a\"\n" "lhfuzz_end:\n" "\n");
-static const char *getClangCC()
-{
- const char *cc_path = getenv("HFUZZ_CC_PATH");
- if (cc_path != NULL) {
- return cc_path;
- }
- return CLANG_BIN;
-}
-
static bool useASAN()
{
if (getenv("HFUZZ_CC_ASAN") != NULL) {
@@ -87,9 +78,19 @@ static int execCC(int argc, char **argv)
if (useUBSAN()) {
argv[argc++] = "-fsanitize=undefined";
}
-
argv[argc] = NULL;
- execvp(argv[0], argv);
+
+ const char *cc_path = getenv("HFUZZ_CC_PATH");
+ if (cc_path != NULL) {
+ execvp(cc_path, argv);
+ }
+
+ execvp("clang-devel", argv);
+ execvp("clang-6.0", argv);
+ execvp("clang-5.0", argv);
+ execvp("clang-4.0", argv);
+ execvp("clang", argv);
+
PLOG_E("execvp('%s')", argv[0]);
return EXIT_FAILURE;
}
@@ -99,7 +100,7 @@ static int ccMode(int argc, char **argv)
char *args[4096];
int j = 0;
- args[j++] = (char*)getClangCC();
+ args[j++] = "clang";
args[j++] = "-fsanitize-coverage=trace-pc-guard,trace-cmp,indirect-calls";
args[j++] = "-funroll-loops";
args[j++] = "-fno-inline";
@@ -160,7 +161,7 @@ static int ldMode(int argc, char **argv)
char *args[4096];
int j = 0;
- args[j++] = (char*)getClangCC();
+ args[j++] = "clang";
args[j++] = "-Wl,-z,muldefs";
args[j++] = "-Wl,--whole-archive";
args[j++] = LHFUZZ_A_PATH;
@@ -181,6 +182,10 @@ static int ldMode(int argc, char **argv)
int main(int argc, char **argv)
{
+ if (argc <= 1) {
+ LOG_I("No arguments provided");
+ return execCC(argc, argv);
+ }
if (argc > (ARGS_MAX - 4)) {
LOG_F("Too many positional arguments: %d", argc);
return EXIT_FAILURE;
|
bootloader: keep bootloader_common code to retention region
It is possible to utilize some of the routines related to otadata
partition validation, after firmware image is downloaded to RAM. Hence
these routines should be part of app cpu cache, so that they do not
get overwritten by firmware. | @@ -36,11 +36,12 @@ SECTIONS
{
. = ALIGN (16);
_stext = .;
- _text_start = ABSOLUTE(.);
+ _loader_text_start = ABSOLUTE(.);
*(.stub .gnu.warning .gnu.linkonce.literal.* .gnu.linkonce.t.*.literal .gnu.linkonce.t.*)
*(.iram1 .iram1.*) /* catch stray IRAM_ATTR */
*liblog.a:(.literal .text .literal.* .text.*)
*libgcc.a:(.literal .text .literal.* .text.*)
+ *libbootloader_support.a:bootloader_common.o(.literal .text .literal.* .text.*)
*libbootloader_support.a:bootloader_flash.o(.literal .text .literal.* .text.*)
*libbootloader_support.a:bootloader_random.o(.literal .text .literal.* .text.*)
*libbootloader_support.a:bootloader_utility.o(.literal .text .literal.* .text.*)
@@ -56,7 +57,7 @@ SECTIONS
*(.fini.literal)
*(.fini)
*(.gnu.version)
- _text_end = ABSOLUTE(.);
+ _loader_text_end = ABSOLUTE(.);
_etext = .;
} > iram_loader_seg
|
CMake: macOS always builds with rpath; | @@ -610,6 +610,11 @@ elseif(APPLE)
target_link_libraries(lovr objc)
target_sources(lovr PRIVATE src/core/os_macos.c)
target_compile_definitions(lovr PRIVATE LOVR_GL)
+ set_target_properties(lovr PROPERTIES
+ MACOSX_RPATH TRUE
+ BUILD_WITH_INSTALL_RPATH TRUE
+ INSTALL_RPATH "@executable_path"
+ )
if(LOVR_BUILD_BUNDLE)
function(move_lib)
if(TARGET ${ARGV0})
@@ -635,9 +640,6 @@ elseif(APPLE)
target_sources(lovr PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/src/resources/lovr.icns")
set_target_properties(lovr PROPERTIES
MACOSX_BUNDLE TRUE
- MACOSX_RPATH TRUE
- BUILD_WITH_INSTALL_RPATH TRUE
- INSTALL_RPATH "@executable_path"
MACOSX_BUNDLE_INFO_PLIST "${CMAKE_CURRENT_SOURCE_DIR}/src/resources/Info.plist"
RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/src/resources/lovr.icns"
)
|
fixed shadowed declaration of variable when using MPTCP | @@ -2122,8 +2122,8 @@ he_connected_cb(uv_poll_t *handle, int status, int events)
#ifdef MPTCP_SUPPORT
if (candidate->pollable_socket->stack == NEAT_STACK_MPTCP) {
int mptcp_enabled = 0;
- unsigned int len = sizeof(mptcp_enabled);
- getsockopt(candidate->pollable_socket->fd, IPPROTO_TCP, MPTCP_ENABLED, &mptcp_enabled, &len);
+ unsigned int len_mp = sizeof(mptcp_enabled);
+ getsockopt(candidate->pollable_socket->fd, IPPROTO_TCP, MPTCP_ENABLED, &mptcp_enabled, &len_mp);
if (!mptcp_enabled) {
uv_poll_stop(handle);
|
vere: fix use-after-free in closing/canceling http request | @@ -325,9 +325,9 @@ _http_req_done(void* ptr_v)
{
u3_hreq* req_u = (u3_hreq*)ptr_v;
- // client canceled request
- if ( (u3_rsat_plan == req_u->sat_e ) ||
- (0 != req_u->gen_u && c3n == ((u3_hgen*)req_u->gen_u)->dun )) {
+ // client canceled request before response
+ //
+ if ( u3_rsat_plan == req_u->sat_e ) {
_http_req_kill(req_u);
}
@@ -461,7 +461,13 @@ _http_hgen_send(u3_hgen* gen_u)
static void
_http_hgen_stop(h2o_generator_t* neg_u, h2o_req_t* rec_u)
{
- // kill request in %light
+ u3_hgen* gen_u = (u3_hgen*)neg_u;
+
+ // response not complete, enqueue cancel
+ //
+ if ( c3n == gen_u->dun ) {
+ _http_req_kill(gen_u->req_u);
+ }
}
/* _http_hgen_proceed(): h2o is ready for more response data.
|
Make 'with_fallback' use 'use' instead of 'require'
This enables us to require module versions, and to fall back to a
bundled version if the system version is too low. | package with_fallback;
sub import {
+ shift;
+
use File::Basename;
use File::Spec::Functions;
foreach (@_) {
- eval "require $_";
+ eval "use $_";
if ($@) {
unshift @INC, catdir(dirname(__FILE__),
"..", "..", "external", "perl");
my $transfer = "transfer::$_";
- eval "require $transfer";
+ eval "use $transfer";
shift @INC;
warn $@ if $@;
}
|
[io] correct new call to topo.getDynamicalSystem | @@ -1145,7 +1145,7 @@ class MechanicsHdf5Runner(siconos.io.mechanics_hdf5.MechanicsHdf5):
refds_name = diff[0]
if refds_name:
- refds = topo.getDynamicalSystem(str(refds_name))
+ refds = sk.cast_NewtonEulerDS(topo.getDynamicalSystem(str(refds_name)))
# Determine reference indexes:
# Assert if neither ds in reference joints is the
|
Changed U16 to wchar_t in usb_lib.c for string descriptors. | #include "rl_usb.h"
#include "usb.h"
#include "settings.h"
+#include "compiler.h"
#pragma thumb
#pragma O3
@@ -2337,12 +2338,15 @@ U8 USBD_ConfigDescriptor_HS[200] = { 0 };
#endif
+// Verify that wchar_t is UTF-16.
+COMPILER_ASSERT(sizeof(wchar_t) == 2);
+
/* USB Device Create String Descriptor */
#define USBD_STR_DEF(n) \
struct { \
U8 len; \
U8 type; \
- U16 str[sizeof(USBD_##n)/2-1]; \
+ wchar_t str[sizeof(USBD_##n)/2-1]; \
} desc##n
#define USBD_STR_VAL(n) \
|
Build and Deploy UWP C#. | @@ -97,15 +97,15 @@ jobs:
python -m pip install wheel
- name: Create Build Environment
- run: cmake -E make_directory ${{runner.workspace}}/python
+ run: cmake -E make_directory ${{runner.workspace}}/build
- name: Configure CMake
shell: bash
- working-directory: ${{runner.workspace}}/python
+ working-directory: ${{runner.workspace}}/build
run: cmake $GITHUB_WORKSPACE -A x64 -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DTINYSPLINE_ENABLE_PYTHON=True -DTINYSPLINE_PYTHON_VERSION=${{ matrix.python }}
- name: Build
- working-directory: ${{runner.workspace}}/python
+ working-directory: ${{runner.workspace}}/build
shell: bash
run: python setup.py bdist_wheel
@@ -113,7 +113,7 @@ jobs:
uses: actions/upload-artifact@v2
with:
name: python${{ matrix.python }}-win_amd64
- path: ${{runner.workspace}}/python/dist/*.whl
+ path: ${{runner.workspace}}/build/dist/*.whl
if-no-files-found: error
uwp:
@@ -130,6 +130,7 @@ jobs:
shell: bash
run: |
choco install swig
+ choco install nuget.commandline
- name: Create Build Environment
run: cmake -E make_directory ${{runner.workspace}}/build
@@ -137,9 +138,18 @@ jobs:
- name: Configure CMake
shell: bash
working-directory: ${{runner.workspace}}/build
- run: cmake $GITHUB_WORKSPACE -A ${{ matrix.arch }} -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_SYSTEM_NAME=WindowsStore -DCMAKE_SYSTEM_VERSION="10.0"
+ run: cmake $GITHUB_WORKSPACE -A ${{ matrix.arch }} -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_SYSTEM_NAME=WindowsStore -DCMAKE_SYSTEM_VERSION="10.0" -DTINYSPLINE_ENABLE_CSHARP=True
- name: Build
working-directory: ${{runner.workspace}}/build
shell: bash
- run: cmake --build . --config $BUILD_TYPE
+ run: |
+ cmake --build . --config $BUILD_TYPE
+ nuget pack
+
+ - name: Deploy
+ uses: actions/upload-artifact@v2
+ with:
+ name: uwp_${{ matrix.arch }}
+ path: ${{runner.workspace}}/build/*.nupkg
+ if-no-files-found: error
|
equidistantKnotSeq/SWIG: Delete memory in case of an exception | @@ -1250,8 +1250,12 @@ tinyspline::BSpline::equidistantKnotSeq(size_t num,
num,
knots_ptr,
numSamples,
- &status))
+ &status)) {
+#ifdef SWIG
+ delete knots;
+#endif
throw std::runtime_error(status.message);
+ }
return knots;
}
|
Update documentation with nlerp | @@ -52,6 +52,7 @@ Functions:
#. :c:func:`glm_quat_mat3`
#. :c:func:`glm_quat_mat3t`
#. :c:func:`glm_quat_lerp`
+#. :c:func:`glm_quat_nlerp`
#. :c:func:`glm_quat_slerp`
#. :c:func:`glm_quat_look`
#. :c:func:`glm_quat_for`
@@ -304,6 +305,25 @@ Functions documentation
| *[in]* **t** interpolant (amount) clamped between 0 and 1
| *[out]* **dest** result quaternion
+.. c:function:: void glm_quat_nlerp(versor q, versor r, float t, versor dest)
+
+ | interpolates between two quaternions
+ | taking the shortest rotation path using
+ | normalized linear interpolation (NLERP)
+
+ | This is a cheaper alternative to slerp; most games use nlerp
+ | for animations as it visually makes little difference.
+
+ References:
+ * `Understanding Slerp, Then Not Using it <http://number-none.com/product/Understanding%20Slerp,%20Then%20Not%20Using%20It>`_
+ * `Lerp, Slerp and Nlerp <https://keithmaggio.wordpress.com/2011/02/15/math-magician-lerp-slerp-and-nlerp/>`_
+
+ Parameters:
+ | *[in]* **from** from
+ | *[in]* **to** to
+ | *[in]* **t** interpolant (amount) clamped between 0 and 1
+ | *[out]* **dest** result quaternion
+
.. c:function:: void glm_quat_slerp(versor q, versor r, float t, versor dest)
| interpolates between two quaternions
|
Correct small issue in ingen | |= [s=@ m=meta]
(child-node-from-seed [(met 3 seed) s] m ~)
::
- =+ owner=public:(wallet:dr `byts`[(met 3 seed) seed])
+ =+ owner=public.keys:(wallet:dr `byts`[(met 3 seed) seed])
=+ delegate=public.keys:(cn seed "delegate" 0 ~)
=+ manage=(cn seed "manage" 0 ~)
::
|
bump sionlib to v1.7.1 | @@ -68,7 +68,7 @@ Requires: openmpi-%{compiler_family}%{PROJ_DELIM}
Summary: Scalable Performance Measurement Infrastructure for Parallel Codes
Name: %{pname}-%{compiler_family}-%{mpi_family}%{PROJ_DELIM}
-Version: 1.7.0
+Version: 1.7.1
Release: 1
License: BSD
Group: %{PROJ_NAME}/perf-tools
|
pybricks.hubs.MoveHub.imu: update acceleration
Make it more similar to the other hubs, but without vectors. | @@ -131,9 +131,7 @@ static void motion_spi_write(uint8_t reg, uint8_t value) {
GPIOA->BSRR = GPIO_BSRR_BS_4;
}
-STATIC mp_obj_t hubs_MoveHub_IMU_accel(mp_obj_t self_in) {
- hubs_MoveHub_IMU_obj_t *self = MP_OBJ_TO_PTR(self_in);
- (void)self;
+STATIC mp_obj_t hubs_MoveHub_IMU_acceleration(mp_obj_t self_in) {
uint8_t data[3];
motion_spi_read(OUT_X_H, &data[0]);
@@ -141,16 +139,15 @@ STATIC mp_obj_t hubs_MoveHub_IMU_accel(mp_obj_t self_in) {
motion_spi_read(OUT_Z_H, &data[2]);
mp_obj_t values[3];
- values[0] = mp_obj_new_int((int8_t)data[0]);
- values[1] = mp_obj_new_int((int8_t)data[1]);
- values[2] = mp_obj_new_int((int8_t)data[2]);
-
+ for (uint8_t i = 0; i < 3; i++) {
+ values[i] = MP_OBJ_NEW_SMALL_INT((((int8_t)data[i]) * 10) >> 6);
+ }
return mp_obj_new_tuple(3, values);
}
-STATIC MP_DEFINE_CONST_FUN_OBJ_1(hubs_MoveHub_IMU_accel_obj, hubs_MoveHub_IMU_accel);
+STATIC MP_DEFINE_CONST_FUN_OBJ_1(hubs_MoveHub_IMU_acceleration_obj, hubs_MoveHub_IMU_acceleration);
STATIC const mp_rom_map_elem_t hubs_MoveHub_IMU_locals_dict_table[] = {
- { MP_ROM_QSTR(MP_QSTR_accel), MP_ROM_PTR(&hubs_MoveHub_IMU_accel_obj) },
+ { MP_ROM_QSTR(MP_QSTR_acceleration), MP_ROM_PTR(&hubs_MoveHub_IMU_acceleration_obj) },
};
STATIC MP_DEFINE_CONST_DICT(hubs_MoveHub_IMU_locals_dict, hubs_MoveHub_IMU_locals_dict_table);
|
Provide convenience function `peekRead`
This is useful when retrieving data that has been pushed with `push .
show`. | @@ -19,6 +19,7 @@ module Foreign.Lua.Util
, Optional (Optional, fromOptional)
-- * getting values
, peekEither
+ , peekRead
, popValue
) where
@@ -27,11 +28,13 @@ import Data.ByteString (ByteString)
import Data.Char (ord)
import Foreign.Lua.Core (Lua, NumResults, StackIndex)
import Foreign.Lua.Types (Peekable, Pushable)
+import Text.Read (readMaybe)
import qualified Control.Monad.Catch as Catch
import qualified Data.ByteString as B
import qualified Foreign.Lua.Core as Lua
import qualified Foreign.Lua.Types as Lua
+import qualified Foreign.Lua.Utf8 as Utf8
-- | Run lua computation using the default HsLua state as starting point. Raised
-- exceptions are passed through; error handling is the responsibility of the
@@ -116,6 +119,15 @@ instance Pushable a => Pushable (Optional a) where
-- Getting Values
--
+-- | Get a value by retrieving a String from Lua, then using @'readMaybe'@ to
+-- convert the String into a Haskell value.
+peekRead :: Read a => StackIndex -> Lua a
+peekRead idx = do
+ s <- Lua.peek idx
+ case readMaybe s of
+ Just x -> return x
+ Nothing -> Lua.throwException (Utf8.fromString ("Could not read: " ++ s))
+
-- | Try to convert the value at the given stack index to a Haskell value.
-- Returns @Left@ with an error message on failure.
peekEither :: Peekable a => StackIndex -> Lua (Either ByteString a)
|
I updated the LLNL custom launcher to also set the MPI paths when running
on rztopaz. | @@ -47,6 +47,9 @@ class JobSubmitter_qsub_LLNL(JobSubmitter_qsub):
# I added the MPI library paths to the LD_LIBRARY_PATH for borax, quartz,
# rzgenie and rztrona.
#
+# Eric Brugger, Thu Jun 15 11:44:37 PDT 2017
+# I added the MPI library paths to the LD_LIBRARY_PATH for rztopaz.
+#
###############################################################################
class LLNLLauncher(MainLauncher):
@@ -87,11 +90,12 @@ class LLNLLauncher(MainLauncher):
#
# Set the LD_LIBRARY_PATH to include the path to MPI on borax,
- # quartz, rzgenie and rztrona.
+ # quartz, rzgenie, rztopaz and rztrona.
#
if self.sectorname() == "borax" or \
self.sectorname() == "quartz" or \
self.sectorname() == "rzgenie" or \
+ self.sectorname() == "rztopaz" or \
self.sectorname() == "rztrona":
mpi_ld_library_paths = ["/usr/tce/packages/mvapich2/mvapich2-2.2-intel-16.0.3/lib", "/usr/tce/packages/intel/intel-16.0.3/lib/intel64"]
SETENV("LD_LIBRARY_PATH", self.joinpaths(mpi_ld_library_paths))
|
build: add libgpgme-dev to Debian unstable image | @@ -32,6 +32,7 @@ RUN apt-get update && apt-get -y install \
libgirepository1.0-dev \
libgit2-dev \
libglib2.0-dev \
+ libgpgme-dev \
liblua5.3-dev \
libpcre++-dev \
libpcre3-dev \
|
experiment with smaller segment size (32MiB) and finer minimal commit (1MiB) | @@ -132,7 +132,7 @@ typedef int32_t mi_ssize_t;
// Main tuning parameters for segment and page sizes
// Sizes for 64-bit (usually divide by two for 32-bit)
-#define MI_SEGMENT_SLICE_SHIFT (13 + MI_INTPTR_SHIFT) // 64KiB (32KiB on 32-bit)
+#define MI_SEGMENT_SLICE_SHIFT (12 + MI_INTPTR_SHIFT) // 64KiB (32KiB on 32-bit)
#if MI_INTPTR_SIZE > 4
#define MI_SEGMENT_SHIFT (10 + MI_SEGMENT_SLICE_SHIFT) // 64MiB
@@ -324,7 +324,7 @@ typedef enum mi_segment_kind_e {
// is still tracked in fine-grained MI_COMMIT_SIZE chunks)
// ------------------------------------------------------
-#define MI_MINIMAL_COMMIT_SIZE (2*MI_MiB)
+#define MI_MINIMAL_COMMIT_SIZE (MI_MiB)
#define MI_COMMIT_SIZE (MI_SEGMENT_SLICE_SIZE) // 64KiB
#define MI_COMMIT_MASK_BITS (MI_SEGMENT_SIZE / MI_COMMIT_SIZE)
#define MI_COMMIT_MASK_FIELD_BITS MI_SIZE_BITS
|
flake8 master doesn't exist anymore | @@ -12,7 +12,7 @@ repos:
- id: mypy
exclude: '^(tests/automated)/'
- repo: https://github.com/PyCQA/flake8
- rev: master
+ rev: 3.8.4
hooks:
- id: flake8
exclude: '^(tests/automated)/'
|
Anchor the regexp match | @@ -127,7 +127,7 @@ if ($WHAT eq '-newcert' ) {
# create a pre-certificate
$RET = run("$REQ -x509 -precert -keyout $NEWKEY -out $NEWCERT $DAYS");
print "Pre-cert is in $NEWCERT, private key is in $NEWKEY\n" if $RET == 0;
-} elsif ($WHAT =~ /\-newreq(\-nodes)?/ ) {
+} elsif ($WHAT =~ /^\-newreq(\-nodes)?$/ ) {
# create a certificate request
$RET = run("$REQ -new $1 -keyout $NEWKEY -out $NEWREQ $DAYS $EXTRA{req}");
print "Request is in $NEWREQ, private key is in $NEWKEY\n" if $RET == 0;
|
Fix datafari-community/datafari#859 Add missing suggester for Authors | </requestHandler>
<requestHandler class="org.apache.solr.handler.component.SearchHandler" name="/suggestAuthors">
+ <lst name="defaults">
+ <str name="suggest">true</str>
+ <str name="suggest.dictionary">suggesterAuthors</str>
+ <str name="suggest.count">10</str>
+ </lst>
+ <arr name="components">
+ <str>suggestAuthors</str>
+ </arr>
+ </requestHandler>
+
+ <requestHandler class="org.apache.solr.handler.component.SearchHandler" name="/suggesEntitytAuthors">
<lst name="defaults">
<str name="suggest">true</str>
<str name="suggest.dictionary">suggesterEntityAuthors</str>
<str name="suggestAnalyzerFieldType">text_general</str>
<str name="lookupImpl">AnalyzingInfixLookupFactory</str>
<str name="maxEdits">0</str>
- <str name="field">authors</str>
+ <str name="field">author</str>
<str name="highlight">false</str>
<str name="dictionaryImpl">HighFrequencyDictionaryFactory</str>
<str name="buildOnCommit">true</str>
|
[numerics] use numerics_error rather than exit failure | @@ -278,15 +278,12 @@ int fc3d_driver(FrictionContactProblem* problem,
}
default:
{
- numerics_printf("fc3d_driver failed. Unknown solver.");
- fprintf(stderr, "Numerics, fc3d_driver failed. Unknown solver.\n");
- exit(EXIT_FAILURE);
-
+ numerics_error("fc3d_driver", "Unknown solver.");
+ info = 1;
}
}
exit:
-
return info;
}
|
carve out bits out of op code for future use.
Mark flags as a field for future use. | #include <tinycbor/cbor.h>
#include <inttypes.h>
#include <os/os.h>
+#include <os/endian.h>
#ifdef __cplusplus
extern "C" {
@@ -34,8 +35,15 @@ extern "C" {
#define NMGR_OP_WRITE_RSP (3)
struct nmgr_hdr {
- uint8_t nh_op; /* NMGR_OP_XXX */
- uint8_t nh_flags;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ uint8_t nh_op:3; /* NMGR_OP_XXX */
+ uint8_t _res1:5;
+#endif
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ uint8_t _res1:5;
+ uint8_t nh_op:3; /* NMGR_OP_XXX */
+#endif
+ uint8_t nh_flags; /* XXX reserved for future flags */
uint16_t nh_len; /* length of the payload */
uint16_t nh_group; /* NMGR_GROUP_XXX */
uint8_t nh_seq; /* sequence number */
|
test: fix Coverity use after free | @@ -205,8 +205,10 @@ static int test_default_cipherlist_explicit(void)
{
SETUP_CIPHERLIST_TEST_FIXTURE();
if (!TEST_true(SSL_CTX_set_cipher_list(fixture->server, "DEFAULT"))
- || !TEST_true(SSL_CTX_set_cipher_list(fixture->client, "DEFAULT")))
+ || !TEST_true(SSL_CTX_set_cipher_list(fixture->client, "DEFAULT"))) {
tear_down(fixture);
+ fixture = NULL;
+ }
EXECUTE_CIPHERLIST_TEST();
return result;
}
|
Enhance doc string. | @@ -71,11 +71,10 @@ set(TINYSPLINE_DESCRIPTION "TinySpline is a small, yet powerful library for inte
# Location of compiled binaries.
#
# TINYSPLINE_LIBRARY_OUTPUT_DIRECTORY
-# Location of the compiled libraries.
+# Location of compiled libraries.
#
# TINYSPLINE_***_SOURCE_DIRECTORY
-# The directory where the auto-generated source code of binding *** is
-# located.
+# Location of the auto-generated source code of binding ***.
#
# TINYSPLINE_***_INTERFACE_FILE
# Location of the auto-generated interface file of binding *** (if any).
|
ikev2: fix leaking pending INIT requests
.. when associated profile is deleted.
Type: fix | @@ -3471,6 +3471,22 @@ ikev2_cleanup_profile_sessions (ikev2_main_t * km, ikev2_profile_t * p)
u32 *sai;
u32 *del_sai = 0;
+ /* *INDENT-OFF* */
+ pool_foreach(sa, km->sais, ({
+ if (pi == sa->profile_index)
+ vec_add1 (del_sai, sa - km->sais);
+ }));
+ /* *INDENT-ON* */
+
+ vec_foreach (sai, del_sai)
+ {
+ sa = pool_elt_at_index (km->sais, sai[0]);
+ ikev2_sa_free_all_vec (sa);
+ hash_unset (km->sa_by_ispi, sa->ispi);
+ pool_put (km->sais, sa);
+ }
+ vec_reset_length (del_sai);
+
vec_foreach (tkm, km->per_thread_data)
{
/* *INDENT-OFF* */
|
Use path variables: platform native | @@ -28,5 +28,5 @@ CONTIKI_SOURCEFILES += $(CONTIKI_TARGET_SOURCEFILES)
MAKE_MAC ?= MAKE_MAC_NULLMAC
### Define the CPU directory
-CONTIKI_CPU=$(ARCH_PATH)/cpu/native
-include $(ARCH_PATH)/cpu/native/Makefile.native
+CONTIKI_CPU = $(CONTIKI_NG_RELOC_CPU_DIR)/native
+include $(CONTIKI_CPU)/Makefile.native
|
update openssl download location and chaindata | @@ -31,9 +31,9 @@ cd ~/.denarius || exit
rm -rf database txleveldb smsgDB
#wget http://d.hashbag.cc/chaindata.zip
#unzip chaindata.zip
-wget https://gitlab.com/denarius/chain/raw/master/chaindata2290877.zip
-unzip chaindata2290877.zip
-rm -rf chaindata2290877.zip
+wget hhttps://denarii.cloud/chaindata.zip
+unzip chaindata.zip
+rm -rf chaindata.zip
Echo "Back to Compiled QT Binary Folder"
cd ~/denarius/src
;;
@@ -59,7 +59,7 @@ sudo apt-get install -y git unzip build-essential libdb++-dev libboost-all-dev l
echo "Downgrade libssl-dev"
sudo apt-get install make
-wget https://www.openssl.org/source/openssl-1.0.1j.tar.gz
+wget https://ftp.openssl.org/source/old/1.0.1/openssl-1.0.1j.tar.gz
tar -xzvf openssl-1.0.1j.tar.gz
cd openssl-1.0.1j
./config
@@ -90,9 +90,9 @@ cd ~/.denarius
rm -rf database txleveldb smsgDB
#wget http://d.hashbag.cc/chaindata.zip
#unzip chaindata.zip
-wget https://gitlab.com/denarius/chain/raw/master/chaindata2290877.zip
-unzip chaindata2290877.zip
-rm -rf chaindata2290877.zip
+wget https://denarii.cloud/chaindata.zip
+unzip chaindata.zip
+rm -rf chaindata.zip
Echo "Back to Compiled QT Binary Folder"
cd ~/denarius/src
;;
|
zephyr: kconfig: replace LOG_INPLACE_PROCESS setting w/ LOG_IMMEDIATE
Commit ("logging: Refactoring 'in place' mode to reduce memory
footprint") changed the resource restrained version of LOGGER from
LOG_INPLACE_PROCESS to LOG_IMMEDIATE. | @@ -137,7 +137,7 @@ config BOOT_HAVE_LOGGING
bool "MCUboot have logging enabled"
default y
select LOG
- select LOG_INPLACE_PROCESS
+ select LOG_IMMEDIATE
help
If y, enables logging on the serial port. The log level can
be defined by setting `LOG_DEFAULT_LEVEL`.
|
Jenkins: Use Debian `stretch` for main builds | @@ -436,7 +436,8 @@ def generateMainBuildStages() {
// in a standard environment
tasks << buildAndTest(
"debian-stable-full",
- DOCKER_IMAGES.buster,
+ // TODO: Use Debian buster
+ DOCKER_IMAGES.stretch,
CMAKE_FLAGS_BUILD_ALL +
CMAKE_FLAGS_DEBUG +
CMAKE_FLAGS_BUILD_FULL +
@@ -990,10 +991,10 @@ def generateArtifactStages() {
}
def buildPackageDebianStretch() {
- def stageName = "buildPackage/debian/buster"
+ def stageName = "buildPackage/debian/stretch"
return [(stageName): {
stage(stageName) {
- return withDockerEnv(DOCKER_IMAGES.buster, [DOCKER_OPTS.MOUNT_MIRROR]) {
+ return withDockerEnv(DOCKER_IMAGES.stretch, [DOCKER_OPTS.MOUNT_MIRROR]) {
withCredentials([file(credentialsId: 'jenkins-key', variable: 'KEY'),
file(credentialsId: 'jenkins-secret-key', variable: 'SKEY')]) {
sh "gpg --import $KEY"
|
doc: fix rebase | @@ -63,11 +63,8 @@ Elektra:
- [Profiling](profiling.md)
- [Run all Tests with Docker](run_all_tests_with_docker.md)
- [Run Reformatting with Docker](run_reformatting_script_with_docker.md)
-<<<<<<< HEAD
- [Language Bindings](language-bindings.md)
-=======
- [Code generator](code-generator.md)
->>>>>>> codegen: move stuff around and rename
## Installation Manuals
|
firdes: checking energy calculation configuration | @@ -253,5 +253,11 @@ void autotest_liquid_firdes_config()
CONTEND_EQUALITY(liquid_firdes_notch(m, 0.2f, -8.0f, h), LIQUID_EICONFIG);
CONTEND_EQUALITY(liquid_firdes_prototype(LIQUID_FIRFILT_UNKNOWN,2,2,0.3f,0.0f,h),LIQUID_EICONFIG);
+
+ // test energy calculation configuration; design proper filter
+ liquid_firdes_windowf(wtype, h_len, 0.2f, 0, h);
+ CONTEND_EQUALITY(liquid_filter_energy(h,h_len,-0.1f,1200), 0.0f);
+ CONTEND_EQUALITY(liquid_filter_energy(h,h_len, 0.7f,1200), 0.0f);
+ CONTEND_EQUALITY(liquid_filter_energy(h,h_len, 0.3f, 0), 0.0f);
}
|
Minor changes in ts loader test. | @@ -43,14 +43,9 @@ TEST_F(tmetacall_typescript_test, DefaultConstructor)
"typed_func.ts"
};
- const enum metacall_value_id hello_boy_double_ids[] =
- {
- METACALL_DOUBLE, METACALL_DOUBLE
- };
-
void * ret = NULL;
- EXPECT_EQ((int) 0, (int) metacall_load_from_file("node", node_scripts, sizeof(node_scripts) / sizeof(node_scripts[0]), NULL));
+ EXPECT_EQ((int) 0, (int) metacall_load_from_file("ts", ts_scripts, sizeof(ts_scripts) / sizeof(ts_scripts[0]), NULL));
ret = metacall("typed_sum", 3.0, 4.0);
|
fix md_gaussian_rand | @@ -75,6 +75,6 @@ void md_gaussian_rand(unsigned int D, const long dims[D], complex float* dst)
#endif
//#pragma omp parallel for
for (long i = 0; i < md_calc_size(D, dims); i++)
- dst[i] = (float)gaussian_rand();
+ dst[i] = (complex float)gaussian_rand();
}
|
seq_cutoff: Initialize each stream with different LBA
Initializing each stream with unique LBA ensures there are no initial
rbtree collisions, and thus helps to avoid clustering of all the streams
into one big linked list instead of forming performance friendly proper
tree structure. | @@ -73,7 +73,7 @@ void ocf_core_seq_cutoff_init(ocf_core_t core)
for (i = 0; i < OCF_SEQ_CUTOFF_MAX_STREAMS; i++) {
stream = &core->seq_cutoff.streams[i];
- stream->last = 0;
+ stream->last = 4096 * i;
stream->bytes = 0;
stream->rw = 0;
ocf_rb_tree_insert(&core->seq_cutoff.tree, &stream->node);
|
security: Increase buffer size to avoid buffer overflow error
The array indexx of "vm_list" may be out of bound.
Updated the size of "vm_list" | @@ -114,7 +114,7 @@ struct profiling_vm_info {
struct profiling_vm_info_list {
uint16_t num_vms;
- struct profiling_vm_info vm_list[CONFIG_MAX_VM_NUM];
+ struct profiling_vm_info vm_list[CONFIG_MAX_VM_NUM+1];
};
struct sw_msr_op_info {
|
sysdeps/linux: correct SYS_getrusage | #define SYS_fchown 93
#define SYS_umask 95
#define SYS_getrlimit 97
-#define SYS_getrusage 97
+#define SYS_getrusage 98
#define SYS_sysinfo 99
#define SYS_times 100
#define SYS_ptrace 101
|
hw/mcu/dialog: Fix retaining SCB state in deep sleep
SCB->CPACR value is loaded using r1 register as a base address, but
there was no proper value set there. | @@ -99,9 +99,9 @@ da1469x_m33_sleep:
/* Save SCB state (SCR, CCR, SHPR and SHCSR) */
ldr r0, =(SCB_BASE + SCB_SCR_OFFSET)
- ldmia r0!, {r4-r9}
+ ldmia r0, {r4-r9}
and r9, r9, #(SCB_SHCSR_MASK)
- ldr r10, [r1, #(SCB_CPACR_OFFSET - SCB_SCR_OFFSET)]
+ ldr r10, [r0, #(SCB_CPACR_OFFSET - SCB_SCR_OFFSET)]
and r10, r10, #(SCB_CPACR_MASK)
stmia r3!, {r4-r10}
|
data tree BUGFIX always use dynamic value
Fixes | @@ -824,7 +824,7 @@ lyd_create_opaq(const struct ly_ctx *ctx, const char *name, size_t name_len, con
assert(ctx && name && name_len && format);
- if (!value_len) {
+ if (!value_len && (!dynamic || !*dynamic)) {
value = "";
}
|
Ensure regex extraction routine use append_str() instead. | @@ -256,7 +256,7 @@ wc_match (const char *wc, char *str) {
return 0;
}
-/* Extract a string given a POSIX regex.
+/* Generic routine to extract all groups from a string given a POSIX regex.
*
* If no match found or error, NULL is returned.
* If match found, a string is returned. */
@@ -267,6 +267,11 @@ regex_extract_string (const char *str, const char *regex, int max_groups, char c
regex_t re;
regmatch_t groups[max_groups];
+ if (str == NULL || *str == '\0') {
+ *err = "Invalid string.";
+ return NULL;
+ }
+
if (regcomp (&re, regex, REG_EXTENDED)) {
*err = "Unable to compile regular expression upon extraction";
return NULL;
@@ -283,6 +288,7 @@ regex_extract_string (const char *str, const char *regex, int max_groups, char c
goto out;
}
+ dest = xstrdup ("");
for (i = 0; i < max_groups; ++i) {
if (groups[i].rm_so == -1)
break;
@@ -290,8 +296,7 @@ regex_extract_string (const char *str, const char *regex, int max_groups, char c
copy = xstrdup (str);
copy[groups[i].rm_eo] = 0;
- dest = xmalloc (snprintf (NULL, 0, "%s", copy + groups[i].rm_so) + 1);
- sprintf (dest, "%s", copy + groups[i].rm_so);
+ append_str (&dest, copy + groups[i].rm_so);
free (copy);
}
|
board/scout/board.h: Format with clang-format
BRANCH=none
TEST=none | @@ -194,11 +194,7 @@ enum mft_channel {
MFT_CH_COUNT,
};
-enum temp_sensor_id {
- TEMP_SENSOR_CORE,
- TEMP_SENSOR_WIFI,
- TEMP_SENSOR_COUNT
-};
+enum temp_sensor_id { TEMP_SENSOR_CORE, TEMP_SENSOR_WIFI, TEMP_SENSOR_COUNT };
enum sensor_id {
CLEAR_ALS,
|
temporarily do not build fortran | @@ -88,10 +88,12 @@ elif [ "$AOMP_USE_HIPVDI" != 0 ] ; then
components="roct rocr project libdevice comgr rocminfo vdi ocl hipvdi atmi extras openmp pgmath flang flang_runtime"
elif [ "$AOMP_STANDALONE_BUILD" != 1 ] ; then
# Over time we will reduce the list of components and get aomp to use preinstalled components
- components="project libdevice comgr rocminfo hip atmi extras openmp pgmath flang flang_runtime"
+ # components="project libdevice comgr rocminfo hip atmi extras openmp pgmath flang flang_runtime"
+ components="project libdevice comgr rocminfo hip atmi extras openmp"
else
# The standalone build builds all rocm components and installs in the compiler installation.
- components="roct rocr project libdevice comgr rocminfo hcc hip atmi extras openmp pgmath flang flang_runtime"
+ # components="roct rocr project libdevice comgr rocminfo hcc hip atmi extras openmp pgmath flang flang_runtime"
+ components="roct rocr project libdevice comgr rocminfo hcc hip atmi extras openmp"
fi
#Partial build options. Check if argument was given.
|
options/posix: Ignore timezone in gettimeofday()
This change matches musl/glibc. | #include <mlibc/sysdeps.hpp>
int gettimeofday(struct timeval *__restrict result, void *__restrict unused) {
- __ensure(!unused);
+ (void)unused; // Linux just ignores gettimeofday().
if(result) {
long nanos;
|
Fix memory leak in BN_rand_range()
The patch enables BN_rand_range() to exit immediately
if BIGNUM *rnd is NULL.
CLA: trivial
Fixes: | @@ -136,6 +136,11 @@ static int bnrand_range(BNRAND_FLAG flag, BIGNUM *r, const BIGNUM *range,
int n;
int count = 100;
+ if (r == NULL) {
+ ERR_raise(ERR_LIB_BN, ERR_R_PASSED_NULL_PARAMETER);
+ return 0;
+ }
+
if (range->neg || BN_is_zero(range)) {
ERR_raise(ERR_LIB_BN, BN_R_INVALID_RANGE);
return 0;
|
Switch to https for new modules. Closes | url = https://github.com/nasa/cfs_lib.git
[submodule "apps/to"]
path = apps/to
- url = [email protected]:nasa/CFS_TO.git
+ url = https://github.com/nasa/CFS_TO.git
[submodule "apps/ci"]
path = apps/ci
- url = [email protected]:nasa/CFS_CI.git
+ url = https://github.com/nasa/CFS_CI.git
[submodule "apps/io_lib"]
path = apps/io_lib
- url = [email protected]:nasa/CFS_IO_LIB.git
+ url = https://github.com/nasa/CFS_IO_LIB.git
|
mmx: use native type whenever SSE is enabled
Hopefully this fixes native aliases on MSVC when targeting x86_64. | @@ -40,7 +40,7 @@ SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
# if defined(SIMDE_MMX_NATIVE)
# define SIMDE_MMX_USE_NATIVE_TYPE
-# elif !defined(SIMDE_ENABLE_NATIVE_ALIASES) && (defined(SIMDE_ARCH_X86_SSE) || defined(SIMDE_ARCH_X86_SSE2))
+# elif defined(SIMDE_ARCH_X86_SSE)
# define SIMDE_MMX_USE_NATIVE_TYPE
# endif
|
misc: l2tp: cli: fix overly generic CLI commands
"clear counters" is not appropriate for a protocol to own. Change
to "clear l2tp counters" (and "test l2tp counter").
Type: fix | @@ -185,7 +185,7 @@ test_counters_command_fn (vlib_main_t * vm,
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (test_counters_command, static) = {
- .path = "test counters",
+ .path = "test lt2p counters",
.short_help = "increment all active counters",
.function = test_counters_command_fn,
};
@@ -220,7 +220,7 @@ clear_counters_command_fn (vlib_main_t * vm,
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (clear_counters_command, static) = {
- .path = "clear counters",
+ .path = "clear l2tp counters",
.short_help = "clear all active counters",
.function = clear_counters_command_fn,
};
|
pyocf: update cleaning policy switching test | @@ -228,14 +228,16 @@ class Cache:
def set_cleaning_policy(self, cleaning_policy: CleaningPolicy):
self.write_lock()
- status = self.owner.lib.ocf_mngt_cache_cleaning_set_policy(
- self.cache_handle, cleaning_policy
+ c = OcfCompletion([("priv", c_void_p), ("error", c_int)])
+ self.owner.lib.ocf_mngt_cache_cleaning_set_policy(
+ self.cache_handle, cleaning_policy, c, None
)
+ c.wait()
self.write_unlock()
- if status:
- raise OcfError("Error changing cleaning policy", status)
+ if c.results["error"]:
+ raise OcfError("Error changing cleaning policy", c.results["error"])
def set_cleaning_policy_param(
self, cleaning_policy: CleaningPolicy, param_id, param_value
@@ -704,8 +706,7 @@ lib.ocf_mngt_cache_remove_core.argtypes = [c_void_p, c_void_p, c_void_p]
lib.ocf_mngt_cache_add_core.argtypes = [c_void_p, c_void_p, c_void_p, c_void_p]
lib.ocf_cache_get_name.argtypes = [c_void_p]
lib.ocf_cache_get_name.restype = c_char_p
-lib.ocf_mngt_cache_cleaning_set_policy.argtypes = [c_void_p, c_uint32]
-lib.ocf_mngt_cache_cleaning_set_policy.restype = c_int
+lib.ocf_mngt_cache_cleaning_set_policy.argtypes = [c_void_p, c_uint32, c_void_p, c_void_p]
lib.ocf_mngt_core_set_seq_cutoff_policy_all.argtypes = [c_void_p, c_uint32]
lib.ocf_mngt_core_set_seq_cutoff_policy_all.restype = c_int
lib.ocf_mngt_core_set_seq_cutoff_threshold_all.argtypes = [c_void_p, c_uint32]
|
Replace targets 17.04 by 17.10 | @@ -36,14 +36,14 @@ env:
- TASK=default:distrib=debian,latest
- TASK=default:distrib=fedora,latest
- - TASK=default:distrib=ubuntu,17.04:ci_config=with_cxx11
+ - TASK=default:distrib=ubuntu,17.10:ci_config=with_cxx11
- TASK=default:pkgs+=lpsolve,
- TASK=default:distrib=nixos/nix,latest:ci_config=nix
- - TASK=default:distrib=ubuntu,17.04:build_configuration=Profiling:pkgs+=profiling,
+ - TASK=default:distrib=ubuntu,17.10:build_configuration=Profiling:pkgs+=profiling,
- TASK=default:pkgs-=atlas-lapack:pkgs+=openblas-lapacke,
- - TASK=default:distrib=ubuntu,17.04:ci_config=with_serialization:pkgs+=serialization,
+ - TASK=default:distrib=ubuntu,17.10:ci_config=with_serialization:pkgs+=serialization,
- TASK=default:ci_config=with_mumps:pkgs+=mumps,
- TASK=default:ci_config=with_umfpack:pkgs+=umfpack,
|
Update tls13_enc.c
Fix double + in hkdflabel declaration (FIXES
CLA: trivial | @@ -49,7 +49,7 @@ int tls13_hkdf_expand(SSL *s, const EVP_MD *md, const unsigned char *secret,
* prefix and label + bytes for the label itself + 1 byte length of hash
* + bytes for the hash itself
*/
- unsigned char hkdflabel[sizeof(uint16_t) + sizeof(uint8_t) +
+ unsigned char hkdflabel[sizeof(uint16_t) + sizeof(uint8_t)
+ (sizeof(label_prefix) - 1) + TLS13_MAX_LABEL_LEN
+ 1 + EVP_MAX_MD_SIZE];
WPACKET pkt;
|
Cache rlen for symbolic alleles | @@ -3176,6 +3176,9 @@ cdef class VariantRecord(object):
def alleles(self, values):
cdef bcf1_t *r = self.ptr
+ # Cache rlen of symbolic alleles before call to bcf_update_alleles_str
+ cdef int rlen = r.rlen
+
if bcf_unpack(r, BCF_UN_STR) < 0:
raise ValueError('Error unpacking VariantRecord')
@@ -3192,8 +3195,10 @@ cdef class VariantRecord(object):
if bcf_update_alleles_str(self.header.ptr, r, value) < 0:
raise ValueError('Error updating alleles')
- # Only reset rlen if alternate allele isn't symbolic
- if not has_symbolic_allele(self):
+ # Reset rlen if alternate allele isn't symbolic, otherwise used cached
+ if has_symbolic_allele(self):
+ self.ptr.rlen = rlen
+ else:
self.ptr.rlen = len(values[0])
bcf_sync_end(self)
|
dev-tools/scipy: bump to v1.5.1 | @@ -25,7 +25,7 @@ Requires: openblas-%{compiler_family}%{PROJ_DELIM}
%define pname scipy
Name: %{python_prefix}-%{pname}-%{compiler_family}-%{mpi_family}%{PROJ_DELIM}
-Version: 1.3.3
+Version: 1.5.1
Release: 1%{?dist}
Summary: Scientific Tools for Python
License: BSD-3-Clause
|
honggfuzz.h for 1.7 | #include "libhfcommon/util.h"
#define PROG_NAME "honggfuzz"
-#define PROG_VERSION "1.6"
+#define PROG_VERSION "1.7"
/* Name of the template which will be replaced with the proper name of the file */
#define _HF_FILE_PLACEHOLDER "___FILE___"
|
Update CHANGES with details of TLSv1.3 ciphersuite configuration | Changes between 1.1.0g and 1.1.1 [xx XXX xxxx]
+ *) Separated TLSv1.3 ciphersuite configuration out from TLSv1.2 ciphersuite
+ configuration. TLSv1.3 ciphersuites are not compatible with TLSv1.2 and
+ below. Similarly TLSv1.2 ciphersuites are not compatible with TLSv1.3.
+ In order to avoid issues where legacy TLSv1.2 ciphersuite configuration
+ would otherwise inadvertently disable all TLSv1.3 ciphersuites the
+ configuraton has been separated out. See the ciphers man page or the
+ SSL_CTX_set_ciphersuites() man page for more information.
+ [Matt Caswell]
+
*) On POSIX (BSD, Linux, ...) systems the ocsp(1) command running
in responder mode now supports the new "-multi" option, which
spawns the specified number of child processes to handle OCSP
*) Support for TLSv1.3 added. Note that users upgrading from an earlier
version of OpenSSL should review their configuration settings to ensure
- that they are still appropriate for TLSv1.3. In particular if no TLSv1.3
- ciphersuites are enabled then OpenSSL will refuse to make a connection
- unless (1) TLSv1.3 is explicitly disabled or (2) the ciphersuite
- configuration is updated to include suitable ciphersuites. The DEFAULT
- ciphersuite configuration does include TLSv1.3 ciphersuites. For further
- information on this and other related issues please see:
+ that they are still appropriate for TLSv1.3. For further information see:
https://www.openssl.org/blog/blog/2018/02/08/tlsv1.3/
NOTE: In this pre-release of OpenSSL a draft version of the
|
Fixed actor position check on windows build | @@ -720,14 +720,9 @@ void Script_ActorPush_b()
*/
void Script_IfActorPos_b()
{
- UBYTE pos_x, pos_y;
-
- pos_x = (script_cmd_args[1] << 3) + 8;
- pos_y = (script_cmd_args[2] << 3) + 8;
-
if (
- (actors[script_cmd_args[0]].pos.x == pos_x) &&
- (actors[script_cmd_args[0]].pos.y == pos_y))
+ ((script_cmd_args[1] << 3) + 8 == actors[script_cmd_args[0]].pos.x) &&
+ ((script_cmd_args[2] << 3) + 8 == actors[script_cmd_args[0]].pos.y))
{ // True path, jump to position specified by ptr
script_ptr = script_start_ptr + (script_cmd_args[3] * 256) + script_cmd_args[4];
}
|
fix(behavior): add logging to toggle layer for tests | LOG_MODULE_DECLARE(zmk, CONFIG_ZMK_LOG_LEVEL);
-struct behavior_tog_config { };
-struct behavior_tog_data { };
+struct behavior_tog_config
+{
+};
+struct behavior_tog_data
+{
+};
static int behavior_tog_init(struct device *dev)
{
return 0;
};
-
static int tog_keymap_binding_pressed(struct device *dev, u32_t position, u32_t layer, u32_t _)
{
+ LOG_DBG("position %d layer %d", position, layer);
+
return zmk_keymap_layer_toggle(layer);
}
static int tog_keymap_binding_released(struct device *dev, u32_t position, u32_t layer, u32_t _)
{
+ LOG_DBG("position %d layer %d", position, layer);
+
return 0;
}
|
fixing rockspec to require lpeglabel instead of parser-gen and to install new modules | @@ -15,7 +15,7 @@ description = {
}
dependencies = {
"lua ~> 5.3",
- "parser-gen >= 1.0",
+ "lpeglabel >= 1.0.0",
"inspect >= 3.1.0",
"argparse >= 0.5.0",
}
@@ -23,8 +23,12 @@ build = {
type = "builtin",
modules = {
["titan-compiler.ast"] = "titan-compiler/ast.lua",
+ ["titan-compiler.checker"] = "titan-compiler/checker.lua",
["titan-compiler.lexer"] = "titan-compiler/lexer.lua",
["titan-compiler.parser"] = "titan-compiler/parser.lua"
+ ["titan-compiler.symtab"] = "titan-compiler/symtab.lua"
+ ["titan-compiler.syntax_errors"] = "titan-compiler/syntax_errors.lua"
+ ["titan-compiler.util"] = "titan-compiler/util.lua"
},
install = {
bin = {
|
testcase/kernel/libc_string: Add ifdef condition which needed LIBM
using math apis such as round, fabs, CONFIG_LIBM is needed | @@ -869,12 +869,16 @@ static void tc_libc_string_strtof(void)
str = "123.456TizenRT";
value = strtof(str, &ptr);
+#ifdef CONFIG_LIBM
TC_ASSERT_LEQ("strtof", roundf((fabsf(value - 123.456f) * 1000) / 1000), FLT_EPSILON);
+#endif
TC_ASSERT_EQ("strtof", strncmp(ptr, "TizenRT", strlen("TizenRT")), 0);
str = "-78.9123TinyAra";
value = strtof(str, &ptr);
+#ifdef CONFIG_LIBM
TC_ASSERT_LEQ("strtof", roundf((fabsf(value - (-78.9123f)) * 10000) / 10000), FLT_EPSILON);
+#endif
TC_ASSERT_EQ("strtof", strncmp(ptr, "TinyAra", strlen("TinyAra")), 0);
TC_SUCCESS_RESULT();
@@ -896,12 +900,16 @@ static void tc_libc_string_strtold(void)
str = "123.456TizenRT";
value = strtold(str, &ptr);
+#ifdef CONFIG_LIBM
TC_ASSERT_LEQ("strtold", roundl((fabsl(value - 123.456) * 1000) / 1000), DBL_EPSILON);
+#endif
TC_ASSERT_EQ("strtold", strncmp(ptr, "TizenRT", strlen("TizenRT")), 0);
str = "-78.9123TinyAra";
value = strtold(str, &ptr);
+#ifdef CONFIG_LIBM
TC_ASSERT_LEQ("strtold", roundl((fabsl(value - (-78.9123)) * 10000) / 10000), DBL_EPSILON);
+#endif
TC_ASSERT_EQ("strtold", strncmp(ptr, "TinyAra", strlen("TinyAra")), 0);
TC_SUCCESS_RESULT();
|
fix bug of proxy dispatcher decode bytes as string | @@ -5521,7 +5521,7 @@ PostgresMain(int argc, char *argv[], const char *username)
case 'J':
{
- proxyDispatchRun(dispatchData, pq_getmsgstring(&input_message));
+ proxyDispatchRun(dispatchData, pq_getmsgbytes(&input_message, 0));
proxyDispatchWait(dispatchData);
proxyDispatchCleanUp(&dispatchData);
send_ready_for_query = true;
|
options/posix: Partially implement gai_strerror | #include <sys/socket.h>
#include <arpa/inet.h>
#include <stdlib.h>
+#include <stdio.h>
#include <stddef.h>
#include <errno.h>
@@ -50,9 +51,10 @@ void freeaddrinfo(struct addrinfo *ptr) {
}
}
-const char *gai_strerror(int) {
- __ensure(!"Not implemented");
- __builtin_unreachable();
+const char *gai_strerror(int code) {
+ static thread_local char buffer[128];
+ snprintf(buffer, sizeof(buffer), "Unknown error (%d)", code);
+ return buffer;
}
int getaddrinfo(const char *__restrict node, const char *__restrict service,
|
Remove dereference operator added by mistake in last commit | @@ -122,8 +122,8 @@ namespace ebi
std::map<std::string, int> counter;
for (auto & id : ids) {
- counter[*id]++;
- if (counter[*id] >= 2) {
+ counter[id]++;
+ if (counter[id] >= 2) {
throw new IdBodyError{line, "ID must not have duplicate values"};
}
}
|
Move TileResetCtrl before the ResetSynchronizers, and give them an async reset | @@ -70,16 +70,16 @@ object ClockingSchemeGenerators {
// Add a control register for each tile's reset
val resetSetter = chiptop.lazySystem match {
- case sys: BaseSubsystem with InstantiatesTiles => TLTileResetCtrl(sys)
- case _ => ClockGroupEphemeralNode()
+ case sys: BaseSubsystem with InstantiatesTiles => Some(TLTileResetCtrl(sys))
+ case _ => None
}
+ val resetSetterResetProvider = resetSetter.map(_.tileResetProviderNode).getOrElse(ClockGroupEphemeralNode())
val aggregator = LazyModule(new ClockGroupAggregator("allClocks")).node
(chiptop.implicitClockSinkNode
:= ClockGroup()
:= aggregator)
(systemAsyncClockGroup
- :*= resetSetter
:*= ClockGroupNamePrefixer()
:*= aggregator)
@@ -87,10 +87,16 @@ object ClockingSchemeGenerators {
(aggregator
:= ClockGroupFrequencySpecifier(p(ClockFrequencyAssignersKey), p(DefaultClockFrequencyKey))
:= ClockGroupResetSynchronizer()
+ := resetSetterResetProvider
:= DividerOnlyClockGenerator()
:= referenceClockSource)
+ val asyncResetBroadcast = FixedClockBroadcast(None)
+ resetSetter.foreach(_.asyncResetSinkNode := asyncResetBroadcast)
+ val asyncResetSource = ClockSourceNode(Seq(ClockSourceParameters()))
+ asyncResetBroadcast := asyncResetSource
+
InModuleBody {
val clock_wire = Wire(Input(Clock()))
val reset_wire = GenerateReset(chiptop, clock_wire)
@@ -102,6 +108,11 @@ object ClockingSchemeGenerators {
o.reset := reset_wire
}
+ asyncResetSource.out.unzip._1.map { o =>
+ o.clock := false.B.asClock // async reset broadcast network does not provide a clock
+ o.reset := reset_wire
+ }
+
chiptop.harnessFunctions += ((th: HasHarnessSignalReferences) => {
clock_io := th.harnessClock
Nil })
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.