message
stringlengths
6
474
diff
stringlengths
8
5.22k
fancier run test stuff
@@ -4,14 +4,25 @@ if [ -z "$1" ]; then echo "Usage: ./run-test.sh <path to testcase>" exit 1 elif [ "$1" = "all" ]; then + echo "" > ./build/tests/pass-fail.log find tests -name native_posix.keymap -exec dirname \{\} \; | xargs -l -P 4 ./run-test.sh - exit $? + err=$? + sort -k2 ./build/tests/pass-fail.log + exit $err fi testcase="$1" echo "Running $testcase:" -west build -d build/$testcase -b native_posix -- -DZMK_CONFIG=$testcase > /dev/null +west build -d build/$testcase -b native_posix -- -DZMK_CONFIG=$testcase > /dev/null 2>&1 +if [ $? -gt 0 ]; then + echo "FAIL: $testcase did not build" >> ./build/tests/pass-fail.log +else ./build/$testcase/zephyr/zmk.exe | sed -e "s/.*> //" | sed -n -f $testcase/events.patterns > build/$testcase/keycode_events.log - diff -au $testcase/keycode_events.snapshot build/$testcase/keycode_events.log + if [ $? -gt 0 ]; then + echo "FAIL: $testcase" >> ./build/tests/pass-fail.log + else + echo "PASS: $testcase" >> ./build/tests/pass-fail.log + fi +fi \ No newline at end of file
Changed path for udev rules to default /etc/udev/rules.d/ -> /lib/udev/rules.d/
@@ -268,8 +268,8 @@ if (CMAKE_SYSTEM_NAME STREQUAL "Linux") set(STLINK_MODPROBED_DIR "/etc/modprobe.d" CACHE PATH "modprobe.d directory") install(FILES ${CMAKE_SOURCE_DIR}/config/modprobe.d/stlink_v1.conf DESTINATION ${STLINK_MODPROBED_DIR}) - ## Install udev rules files to /etc/udev/rules.d/ (explicitly hardcoded) - set(STLINK_UDEV_RULES_DIR "/etc/udev/rules.d" CACHE PATH "udev rules directory") + ## Install udev rules files to /lib/udev/rules.d/ (explicitly hardcoded) + set(STLINK_UDEV_RULES_DIR "/lib/udev/rules.d" CACHE PATH "udev rules directory") file(GLOB RULES_FILES ${CMAKE_SOURCE_DIR}/config/udev/rules.d/*.rules) install(FILES ${RULES_FILES} DESTINATION ${STLINK_UDEV_RULES_DIR}) endif ()
doc: create a copy of the scenario file before making modifications Instruct the user to create a copy of the scenario XML file if modifications are needed. That modified copy should subsequently be used for building ACRN.
@@ -139,11 +139,6 @@ Update ACRN Hypervisor Image 04:00.0 Ethernet controller: Intel Corporation I210 Gigabit Network Connection (rev 03) Subsystem: Intel Corporation I210 Gigabit Network Connection - .. note:: - Verify the ``pci_devs`` defined for VM0 and VM1 in the - ``misc/config_tools/data/whl-ipc-i7/logical_partition.xml`` - with the information reported by the ``lspci -vv`` command. - #. Clone the ACRN source code and configure the build options. Refer to :ref:`getting-started-building` to set up the ACRN build @@ -157,7 +152,12 @@ Update ACRN Hypervisor Image $ cd acrn-hypervisor $ git checkout v2.4 - Build the ACRN hypervisor and ACPI binaries for pre-launched VMs with default xmls: +#. Check the ``pci_devs`` sections in ``misc/config_tools/data/whl-ipc-i7/logical_partition.xml`` + for each pre-launched VM to ensure you are using the right PCI device BDF information (as + reported by ``lspci -vv``). If you need to make changes to this file, create a copy of it and + use it subsequently when building ACRN (``SCENARIO=/path/to/newfile.xml``). + +#. Build the ACRN hypervisor and ACPI binaries for pre-launched VMs with default xmls: .. code-block:: none @@ -179,9 +179,6 @@ Update ACRN Hypervisor Image The above command output should contain the ``GRUB`` keyword. -#. Check or update the BDF information of the PCI devices for each - pre-launched VM; check it in the ``misc/config_tools/data/whl-ipc-i7/logical_partition.xml``. - #. Copy the artifact ``acrn.bin``, ``ACPI_VM0.bin``, and ``ACPI_VM1.bin`` to the ``/boot`` directory on NVME: #. Copy ``acrn.bin``, ``ACPI_VM1.bin`` and ``ACPI_VM0.bin`` to a removable disk.
fix swing compoment and sudo pipe execution
@@ -87,19 +87,19 @@ sub_v8repo(){ sudo apt-get -y install software-properties-common python-software-properties #v85.1 if [ $INSTALL_V8REPO51 = 1 ]; then - sudo echo "deb http://ppa.launchpad.net/pinepain/libv8-5.1/ubuntu wily main" > /etc/apt/sources.list.d/libv851.list + sudo sh -c "echo \"deb http://ppa.launchpad.net/pinepain/libv8-5.1/ubuntu wily main\" > /etc/apt/sources.list.d/libv851.list" wget http://launchpadlibrarian.net/234847357/libicu55_55.1-7_amd64.deb sudo dpkg -i libicu55_55.1-7_amd64.deb - sudo apt-get update - sudo apt-get -y install libv8-5.1-dev + sudo apt-get updatesudo apt-get -y --allow-unauthenticated + sudo apt-get -y --allow-unauthenticated install libv8-5.1-dev fi #v85.4 if [ $INSTALL_V8REPO54 = 1 ]; then - sudo echo "deb http://ppa.launchpad.net/pinepain/libv8-5.4/ubuntu xenial main" > /etc/apt/sources.list.d/libv854.list + sudo sh -c "echo \"deb http://ppa.launchpad.net/pinepain/libv8-5.4/ubuntu xenial main\" > /etc/apt/sources.list.d/libv854.list" wget http://launchpadlibrarian.net/234847357/libicu55_55.1-7_amd64.deb sudo dpkg -i libicu55_55.1-7_amd64.deb sudo apt-get update - sudo apt-get -y install libv8-5.4-dev + sudo apt-get -y --allow-unauthenticated install libv8-5.4-dev fi #v85.8 if [ $INSTALL_V8REPO58 = 1 ]; then @@ -178,6 +178,10 @@ sub_install(){ if [ $INSTALL_METACALL = 1 ]; then sub_metacall fi + if [ $INSTALL_SWIG = 1 ]; then + sub_swig + fi + } sub_config(){ @@ -228,6 +232,10 @@ sub_config(){ echo "metacall selected" INSTALL_METACALL=1 fi + if [ "$var" = 'swig' ]; then + echo "swig selected" + INSTALL_SWIG=1 + fi done }
cmake: respect TERM
############################################################################## function(message) list(GET ARGV 0 type) + if("$ENV{TERM}" STREQUAL "xterm-256color") string(ASCII 27 esc) set(red "${esc}[1;31m") set(yellow "${esc}[1;33m") set(reset "${esc}[m") + endif() if(type STREQUAL FATAL_ERROR OR type STREQUAL SEND_ERROR) list(REMOVE_AT ARGV 0) _message(${type} "${red}${ARGV}${reset}") @@ -38,10 +40,11 @@ endfunction() # aligned config output ############################################################################## function(pr desc val) + if("$ENV{TERM}" STREQUAL "xterm-256color") string(ASCII 27 esc) set(reset "${esc}[m") set(cyan "${esc}[36m") - + endif() string(LENGTH ${desc} len) while (len LESS 20) set (desc "${desc} ")
include/power/cometlake.h: Format with clang-format BRANCH=none TEST=none
#define IN_PCH_SLP_S3_DEASSERTED POWER_SIGNAL_MASK(X86_SLP_S3_DEASSERTED) #define IN_PCH_SLP_S4_DEASSERTED POWER_SIGNAL_MASK(X86_SLP_S4_DEASSERTED) -#define IN_ALL_PM_SLP_DEASSERTED (IN_PCH_SLP_S3_DEASSERTED | \ - IN_PCH_SLP_S4_DEASSERTED) +#define IN_ALL_PM_SLP_DEASSERTED \ + (IN_PCH_SLP_S3_DEASSERTED | IN_PCH_SLP_S4_DEASSERTED) #define IN_PGOOD_ALL_CORE POWER_SIGNAL_MASK(X86_RSMRST_L_PGOOD) -#define IN_ALL_S0 (IN_PGOOD_ALL_CORE | IN_ALL_PM_SLP_DEASSERTED | \ +#define IN_ALL_S0 \ + (IN_PGOOD_ALL_CORE | IN_ALL_PM_SLP_DEASSERTED | \ PP5000_PGOOD_POWER_SIGNAL_MASK) -#define CHIPSET_G3S5_POWERUP_SIGNAL (POWER_SIGNAL_MASK(X86_RSMRST_L_PGOOD) | \ +#define CHIPSET_G3S5_POWERUP_SIGNAL \ + (POWER_SIGNAL_MASK(X86_RSMRST_L_PGOOD) | \ POWER_SIGNAL_MASK(X86_PP5000_A_PGOOD)) #define CHARGER_INITIALIZED_DELAY_MS 100
feat: Add platform of "Fibocom-L610" for chainmaker
@@ -107,10 +107,10 @@ const BCHAR *chainmaker_client_tls_cert = #endif #endif -BCHAR *chainmaker_node_url = "152.136.217.46:12302"; -BCHAR *chainmaker_host_name = "common1.tls.org1.cmtestnet"; -BCHAR *chainmaker_chain_id = "chainmaker_testnet_chain"; -BCHAR *chainmaker_org_id = "org5.cmtestnet"; +BCHAR *chainmaker_node_url = "127.0.0.1:12302"; +BCHAR *chainmaker_host_name = "xxxxxx"; +BCHAR *chainmaker_chain_id = "xxxxxx"; +BCHAR *chainmaker_org_id = "xxxxxx"; BoatChainmakerWallet *g_chaninmaker_wallet_ptr; BoatChainmakerWallet wallet_config = {0};
Update ticket_flags related macros
@@ -812,9 +812,12 @@ typedef struct mbedtls_ssl_flight_item mbedtls_ssl_flight_item; #if defined(MBEDTLS_SSL_PROTO_TLS1_3) && defined(MBEDTLS_SSL_SESSION_TICKETS) typedef uint8_t mbedtls_ssl_tls13_ticket_flags; -#define MBEDTLS_SSL_TLS1_3_TICKET_ALLOW_PSK_RESUMPTION ( 1u << 0 ) -#define MBEDTLS_SSL_TLS1_3_TICKET_ALLOW_PSK_EPHEMERAL_RESUMPTION ( 1u << 1 ) -#define MBEDTLS_SSL_TLS1_3_TICKET_ALLOW_EARLY_DATA ( 1u << 2 ) +#define MBEDTLS_SSL_TLS1_3_TICKET_ALLOW_PSK_RESUMPTION \ + MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK +#define MBEDTLS_SSL_TLS1_3_TICKET_ALLOW_PSK_EPHEMERAL_RESUMPTION \ + MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_PSK_EPHEMERAL +#define MBEDTLS_SSL_TLS1_3_TICKET_ALLOW_EARLY_DATA \ + MBEDTLS_SSL_TLS1_3_TICKET_ALLOW_PSK_EPHEMERAL_RESUMPTION << 1 #endif /* MBEDTLS_SSL_PROTO_TLS1_3 && MBEDTLS_SSL_SESSION_TICKETS */ /**
Add constants for binding animation flag.
@@ -1125,6 +1125,15 @@ typedef enum DIRECTION_RIGHT } e_direction; +typedef enum +{ + BINDING_ANI_NONE = 0, + BINDING_ANI_ANIMATION_MATCH = 1, + BINDING_ANI_FRAME_MATCH = 2, + BINDING_ANI_ANIMATION_KILL = 4, + BINDING_ANI_FRAME_KILL = 6 +} e_binding_animation; + typedef enum { /*
Examples: add check for output GRIB
. ./include.sh +tempGrib="out_surface_level.grib2" + +# Input and output GRIB files are hard coded in the example ${examples_dir}/c_grib_set_missing -rm -f out_surface_level.grib2 + +# Check the keys have been set to MISSING +sf=`${tools_dir}/grib_get -p scaleFactorOfFirstFixedSurface $tempGrib` +[ "$sf" = "MISSING" ] +sf=`${tools_dir}/grib_get -p scaledValueOfFirstFixedSurface $tempGrib` +[ "$sf" = "MISSING" ] + +rm -f $tempGrib
Check that result of print function is not NULL before using
@@ -201,6 +201,12 @@ DumpSupportCommand( pPlatformSupportFileName = CatSPrint(pDumpUserPath, L"_" FORMAT_STR L".txt", APPEND_TO_FILE_NAME); + if (NULL == pPlatformSupportFileName) { + ReturnCode = EFI_OUT_OF_RESOURCES; + PRINTER_SET_MSG(pPrinterCtx, ReturnCode, CLI_ERR_OUT_OF_MEMORY); + goto Finish; + } + pPlatformSupportFilenameAsciiLength = StrLen(pPlatformSupportFileName) + 1; pPlatformSupportFilenameAsciiSize = pPlatformSupportFilenameAsciiLength * sizeof(CHAR8); if(NULL == pPlatformSupportFileName || NULL == (pPlatformSupportFilenameAscii = AllocatePool(pPlatformSupportFilenameAsciiSize)))
[agx] Update for Pixels API changes
@@ -16,7 +16,8 @@ use agx_definitions::{ StrokeThickness, CHAR_HEIGHT, CHAR_WIDTH, FONT8X8, }; use axle_rt::ExpectsEventField; -use pixels::{Error, Pixels, SurfaceTexture}; +use pixels::wgpu::TextureFormat; +use pixels::{Error, Pixels, PixelsBuilder, SurfaceTexture}; use winit::event::{MouseButton, MouseScrollDelta}; use winit::event_loop::{ControlFlow, EventLoop}; use winit::window::{Window, WindowBuilder}; @@ -93,7 +94,7 @@ impl LikeLayerSlice for PixelLayerSlice { self.fill_rect(right, color, StrokeThickness::Filled); } else { let mut pixels = self.parent.borrow_mut(); - let mut fb = pixels.get_frame(); + let mut fb = pixels.get_frame_mut(); // Construct the filled row of pixels that we can copy row-by-row let bytes_in_row = (rect.width() * bpp) as usize; let mut src_row_slice = vec![0; bytes_in_row]; @@ -133,7 +134,7 @@ impl LikeLayerSlice for PixelLayerSlice { let parent_bytes_per_row = parent_size.width * bpp; let bpp_multiple = Point::new(bpp, parent_bytes_per_row); let mut pixels = self.parent.borrow_mut(); - let mut fb = pixels.get_frame(); + let mut fb = pixels.get_frame_mut(); let slice_origin_offset = self.frame.origin * bpp_multiple; //let off = slice_origin_offset + (loc.y * parent_bytes_per_row) + (loc.x * bpp); let point_offset = slice_origin_offset + (loc * bpp_multiple); @@ -173,7 +174,7 @@ impl LikeLayerSlice for PixelLayerSlice { let parent_bytes_per_row = parent_size.width * bpp; let bpp_multiple = Point::new(bpp, parent_bytes_per_row); let mut pixels = self.parent.borrow_mut(); - let mut fb = pixels.get_frame(); + let mut fb = pixels.get_frame_mut(); let slice_origin_offset = self.frame.origin * bpp_multiple; for y in 0..self.frame().height() { @@ -242,11 +243,13 @@ impl PixelLayer { let window_size = window.inner_size(); let surface_texture = SurfaceTexture::new(window_size.width as _, window_size.height as _, &window); - Pixels::new( + PixelsBuilder::new( size.width.try_into().unwrap(), size.height.try_into().unwrap(), surface_texture, ) + .surface_texture_format(TextureFormat::Bgra8Unorm) + .build() .unwrap() }; pixel_buffer.render().unwrap();
Don't call extend_max_stream_data callback if stream is half-closed local
@@ -3652,6 +3652,11 @@ static int conn_recv_max_stream_data(ngtcp2_conn *conn, if (strm->tx.max_offset < fr->max_stream_data) { strm->tx.max_offset = fr->max_stream_data; + /* Don't call callback if stream is half-closed local */ + if (strm->flags & NGTCP2_STRM_FLAG_SHUT_WR) { + return 0; + } + rv = conn_call_extend_max_stream_data(conn, strm, fr->stream_id, fr->max_stream_data); if (rv != 0) {
Fix minor leak in TLS host verification. sk_GENERAL_NAME_free() only freed the name stack, not the names in the stack. sk_GENERAL_NAME_pop_free() frees both. Due to aggressive connection reuse this leak was unlikely to be very noticeable.
@@ -232,7 +232,7 @@ tlsClientHostVerify(const String *host, X509 *certificate) break; } - sk_GENERAL_NAME_free(altNameStack); + sk_GENERAL_NAME_pop_free(altNameStack, GENERAL_NAME_free); } // If no subject alternative name was found then check the common name. Per RFC 2818 and RFC 6125, if the subjectAltName
ci: allow failure for code quality report
@@ -58,7 +58,8 @@ code_quality_check: extends: - .sonar_scan_template - .rules:patterns:static-code-analysis-preview - allow_failure: true + allow_failure: true # since now it's using exit code to indicate the code analysis result, + # we don't want to block ci when critical issues founded script: - export CI_MERGE_REQUEST_COMMITS=$(python ${CI_PROJECT_DIR}/tools/ci/ci_get_mr_info.py commits ${CI_COMMIT_REF_NAME} | tr '\n' ',') # test if this branch have merge request, if not, exit 0 @@ -88,6 +89,8 @@ code_quality_report: extends: - .sonar_scan_template - .rules:protected + allow_failure: true # since now it's using exit code to indicate the code analysis result, + # we don't want to block ci when critical issues founded script: - sonar-scanner -Dsonar.branch.name=$CI_COMMIT_REF_NAME
check for NULL in ksClear and ksClose
@@ -474,10 +474,11 @@ int ksDel (KeySet * ks) * @param ks the keyset object to work with * @see ksAppendKey() for details on how keys are inserted in KeySets * @retval 0 on success - * @retval -1 on failure (memory) + * @retval -1 on failure (memory) or ks == NULL */ int ksClear (KeySet * ks) { + if (ks == NULL) return -1; ksClose (ks); // ks->array empty now @@ -2436,9 +2437,12 @@ int ksInit (KeySet * ks) * * @see ksDel(), ksNew(), keyInit() * @retval 0 on success + * @retval -1 on ks == NULL */ int ksClose (KeySet * ks) { + if (ks == NULL) return -1; + Key * k; ksRewind (ks);
Updater: Add gdi32 DelayLoadDLLs
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'"> <Link> <AdditionalDependencies>bcrypt.lib;%(AdditionalDependencies)</AdditionalDependencies> - <DelayLoadDLLs>comctl32.dll;bcrypt.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> + <DelayLoadDLLs>comctl32.dll;bcrypt.dll;gdi32.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> <Link> <AdditionalDependencies>bcrypt.lib;%(AdditionalDependencies)</AdditionalDependencies> - <DelayLoadDLLs>comctl32.dll;bcrypt.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> + <DelayLoadDLLs>comctl32.dll;bcrypt.dll;gdi32.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|ARM64'"> <Link> <AdditionalDependencies>bcrypt.lib;%(AdditionalDependencies)</AdditionalDependencies> - <DelayLoadDLLs>comctl32.dll;bcrypt.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> + <DelayLoadDLLs>comctl32.dll;bcrypt.dll;gdi32.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'"> <Link> <AdditionalDependencies>bcrypt.lib;%(AdditionalDependencies)</AdditionalDependencies> - <DelayLoadDLLs>comctl32.dll;bcrypt.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> + <DelayLoadDLLs>comctl32.dll;bcrypt.dll;gdi32.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'"> <Link> <AdditionalDependencies>bcrypt.lib;%(AdditionalDependencies)</AdditionalDependencies> - <DelayLoadDLLs>comctl32.dll;bcrypt.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> + <DelayLoadDLLs>comctl32.dll;bcrypt.dll;gdi32.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|ARM64'"> <Link> <AdditionalDependencies>bcrypt.lib;%(AdditionalDependencies)</AdditionalDependencies> - <DelayLoadDLLs>comctl32.dll;bcrypt.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> + <DelayLoadDLLs>comctl32.dll;bcrypt.dll;gdi32.dll;shell32.dll;user32.dll;version.dll;%(DelayLoadDLLs)</DelayLoadDLLs> </Link> </ItemDefinitionGroup> <ItemGroup>
Added R and Rcpp in travis for linux.
@@ -17,7 +17,7 @@ before_install: - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew tap homebrew/php; brew update; fi install: - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get install swig3.0 cmake python-dev ruby-dev php-dev liblua5.3-dev octave-pkg-dev openjdk-8-jdk -y; fi + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then sudo apt-get install swig3.0 cmake python-dev ruby-dev php-dev liblua5.3-dev octave-pkg-dev openjdk-8-jdk r-base r-cran-rcpp -y; fi - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install swig php71 lua; fi before_script:
notifications: clear before applying unreads Fixes urbit/landscape#646
@@ -204,6 +204,7 @@ function unreadEach(json: any, state: HarkState): HarkState { function unreads(json: any, state: HarkState): HarkState { const data = _.get(json, 'unreads'); if(data) { + state = clearState(state); data.forEach(({ index, stats }) => { const { unreads, notifications, last } = stats; updateNotificationStats(state, index, 'last', () => last);
use CommitDate on last commit for each branch as the reference date
@@ -46,8 +46,8 @@ function find_newer_branches() { branch=`echo $curline | cut -d" " -f1 | xargs` shortcommit=`echo $curline | cut -d" " -f2` if [[ "$shortcommit" != "->" ]] ; then - author=`git show $shortcommit | grep "^Author:" | cut -d":" -f2- | cut -d"<" -f1 | xargs` - branchdate=`git show $shortcommit | grep "^Date:" | cut -d":" -f2- | xargs | cut -d" " -f2- | cut -d"-" -f1 ` + author=`git show $shortcommit --pretty=fuller | grep "^Commit:" | cut -d":" -f2- | cut -d"<" -f1 | xargs` + branchdate=`git show $shortcommit --pretty=fuller | grep "^CommitDate:" | cut -d":" -f2- | xargs | cut -d" " -f2- | cut -d"-" -f1 ` get_monthnumber $branchdate branchday=`echo $branchdate | cut -d" " -f2` branchyear=`echo $branchdate | cut -d" " -f4`
Supported linking with -lrt on BSD systems. OpenBSD lacks support of librt.
@@ -58,7 +58,7 @@ case "$NXT_SYSTEM" in NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so" NXT_LIBM="-lm" - NXT_LIBS="$NXT_PTHREAD" + NXT_LIBS="$NXT_LIBRT $NXT_PTHREAD" ;; SunOS) @@ -144,7 +144,7 @@ case "$NXT_SYSTEM" in NXT_LIB_SHARED_LOCAL="$NXT_BUILD_DIR/libnxt.so" NXT_LIBM="-lm" - NXT_LIBS="$NXT_PTHREAD" + NXT_LIBS="$NXT_LIBRT $NXT_PTHREAD" ;; OpenBSD)
Change from jerk model to std noise model
@@ -531,17 +531,23 @@ void survive_kalman_tracker_process_noise(const struct SurviveKalmanTracker_Para // http://wiki.dmdevelopment.ru/wiki/Download/Books/Digitalimageprocessing/%D0%9D%D0%BE%D0%B2%D0%B0%D1%8F%20%D0%BF%D0%BE%D0%B4%D0%B1%D0%BE%D1%80%D0%BA%D0%B0%20%D0%BA%D0%BD%D0%B8%D0%B3%20%D0%BF%D0%BE%20%D1%86%D0%B8%D1%84%D1%80%D0%BE%D0%B2%D0%BE%D0%B9%20%D0%BE%D0%B1%D1%80%D0%B0%D0%B1%D0%BE%D1%82%D0%BA%D0%B5%20%D1%81%D0%B8%D0%B3%D0%BD%D0%B0%D0%BB%D0%BE%D0%B2/Estimation%20with%20Applications%20to%20Tracking%20and%20Navigation/[email protected] // We mix three order models here based on tuning variables. - FLT Q_acc[] = {t5 / 20., t4 / 8., t3 / 6., t3 / 3., t2 / 2., t}; + FLT Q_acc[] = { + t4 / 4., + t3 / 2., t2, + t2 / 2., t, 1 + }; + FLT Q_vel[] = { + t3 / 3., + t2 / 2., t, + }; - FLT Q_vel[] = {t3 / 3., t2 / 2., t}; + FLT p_p = params->process_weight_acc * Q_acc[0] + params->process_weight_vel * Q_vel[0] + params->process_weight_pos * t; + FLT p_v = params->process_weight_acc * Q_acc[1] + params->process_weight_vel * Q_vel[1]; + FLT p_a = params->process_weight_acc * Q_acc[3]; - FLT q_p = params->process_weight_acc; - FLT p_p = q_p * Q_acc[0] + params->process_weight_vel * Q_vel[0] + params->process_weight_pos * t; - FLT p_v = q_p * Q_acc[1] + params->process_weight_vel * Q_vel[1]; - FLT p_a = q_p * Q_acc[2]; - FLT v_v = q_p * Q_acc[3] + params->process_weight_vel * Q_vel[2]; - FLT v_a = q_p * Q_acc[4]; - FLT a_a = q_p * Q_acc[5]; + FLT v_v = params->process_weight_acc * Q_acc[2] + params->process_weight_vel * Q_vel[2]; + FLT v_a = params->process_weight_acc * Q_acc[4]; + FLT a_a = params->process_weight_acc * Q_acc[5]; /* ================== Rotational ==============================
Remove hand-written edit to generated C code.
@@ -252,8 +252,6 @@ puffs_flate_status puffs_flate_decoder_decode(puffs_flate_decoder* self, // C HEADER ENDS HERE. -#include "../../../script/puffs-flate-decoder-decode-huffman.c" - #ifndef PUFFS_BASE_IMPL_H #define PUFFS_BASE_IMPL_H @@ -582,7 +580,7 @@ puffs_flate_status puffs_flate_decoder_decode(puffs_flate_decoder* self, } } } - status = c_puffs_flate_decoder_decode_huffman(self, a_dst, a_src); + status = puffs_flate_decoder_decode_huffman(self, a_dst, a_src); if (a_src.buf) { b_rptr_src = a_src.buf->ptr + a_src.buf->ri; size_t len = a_src.buf->wi - a_src.buf->ri;
Updated Readme with a note to run tests parallely to avoid answer file mismatch
@@ -15,8 +15,9 @@ Before building the code of feature tests part, just make sure your compiler sup 1. Make sure HAWQ is running correctly. If not, `init` or `start` HAWQ at first. Note please don't set locale related arguments for hawq init. 2. Load environment configuration by running `source $INSTALL_PREFIX/greenplum_path.sh`. 3. Load hdfs configuration. For example, `export HADOOP_HOME=/Users/wuhong/hadoop-2.7.2 && export PATH=${PATH}:${HADOOP_HOME}/bin`. Since some test cases need `hdfs` and `hadoop` command, just ensure these commands work before running. Otherwise you will get failure. -4. Run the cases with`./parallel-run-feature-test.sh 8 ./feature-test`(in this case 8 threads in parallel), you could use `--gtest_filter` option to filter test cases(both positive and negative patterns are supported). Please see more options by running `./feature-test --help`. -5.You can also run cases with `./parallel-run-feature-test.sh 8 ./feature-test --gtest_schedule` (eg. --gtest_schedule=./full_tests.txt) if you want to run cases in both parallel way and serial way.The schedule file sample is full_tests.txt which stays in the same directory. +4. Run the cases with`./parallel-run-feature-test.sh 8 ./feature-test`(in this case 8 threads in parallel), you could use `--gtest_filter` option to filter test cases(both positive and negative patterns are supported). Please see more options by running `./feature-test --help`. You can also run cases with `./parallel-run-feature-test.sh 8 ./feature-test --gtest_schedule` (eg. --gtest_schedule=./full_tests.txt) if you want to run cases in both parallel way and serial way.The schedule file sample is full_tests.txt which stays in the same directory. + +**NOTE**: To ensure all answer files(files with extension .ans) are uniform in their output format, certain properties are set in parallel-run-feature-test.sh. Therefore running tests direclty using `./feature-test` might cause some tests to fail. # Development In contribution to HAWQ, we suggest developers submitting feature tests related to your feature development. In writting a featurte test, you need to write a cpp file inside corresponding folders. There are two recommended way to write this cpp file:
roller: simplify json for dns in get naive state
%- pairs :~ ['points' (points (tap:orm:naive points.state))] ['operators' (operators operators.state)] - ['dns' a+(turn dns.state (cork same (lead %s)))] + ['dns' a+(turn dns.state (lead %s))] == :: ++ operators
tests: internal: input_chunk: initialize event loop
@@ -407,8 +407,14 @@ void flb_test_input_chunk_fs_chunks_size_real() struct cio_ctx *cio; msgpack_sbuffer mp_sbuf; char buf[262144]; + struct mk_event_loop *evl; cfg = flb_config_init(); + evl = mk_event_loop_create(256); + + TEST_CHECK(evl != NULL); + cfg->evl = evl; + flb_log_create(cfg, FLB_LOG_STDERR, FLB_LOG_DEBUG, NULL); i_ins = flb_input_new(cfg, "dummy", NULL, FLB_TRUE);
config.in: use swaynag -B instead of -b '-b' spawns a terminal, which is unnecessary for this use case
@@ -82,7 +82,7 @@ output * bg @datadir@/backgrounds/sway/Sway_Wallpaper_Blue_1920x1080.png fill bindsym $mod+Shift+c reload # Exit sway (logs you out of your Wayland session) - bindsym $mod+Shift+e exec swaynag -t warning -m 'You pressed the exit shortcut. Do you really want to exit sway? This will end your Wayland session.' -b 'Yes, exit sway' 'swaymsg exit' + bindsym $mod+Shift+e exec swaynag -t warning -m 'You pressed the exit shortcut. Do you really want to exit sway? This will end your Wayland session.' -B 'Yes, exit sway' 'swaymsg exit' # # Moving around: #
Fix missing comment.
@@ -42,18 +42,18 @@ $CMAKE_FLAGS = "$CMAKE_FLAGS -G ""$GENERATOR""" ############################################################################### -### Install dependencies. +### Install dependencies and setup system. ############################################################################### choco install -y -r swig --version 3.0.9 choco install -y -r lua53 if ($Env:COMPILER -eq "mingw") { - sh.exe must not be in PATH when compiling with MinGW. - Rename-Item -Path "C:\Program Files\Git\usr\bin\sh.exe" -NewName "sh2.exe" if ($Env:PLATFORM -eq "Win64") { choco install -y -r mingw } else { $Env:Path += ";C:\MinGW\bin" } + # sh.exe must not be in PATH when compiling with MinGW. + Rename-Item -Path "C:\Program Files\Git\usr\bin\sh.exe" -NewName "sh2.exe" } refreshenv @@ -66,7 +66,7 @@ $CMAKE_FLAGS = "$CMAKE_FLAGS -DTINYSPLINE_ENABLE_JAVA=True" ############################################################################### -### Compile targets. +### Compile. ############################################################################### mkdir $BUILD_DIR_FIXED pushd $BUILD_DIR_FIXED
gh-actions: use grep instead of git to search for trailing whitespace The git version would only look at the last patch, which means whitespace could (and did) sneak in as part of multi-patch changes. This also removes the whitespace that snuck snuck in.
@@ -10,7 +10,7 @@ jobs: with: fetch-depth: 2 - name: Trailing whitespace - run: git diff --check HEAD^ + run: find simde/ -name '*.c' -o -name '*.h' -exec grep -nP '\s+$' {} + && exit 1 || exit 0 - name: Tabs run: find simde/ -name '*.c' -o -name '*.h' -exec grep -nP '\t' {} + && exit 1 || exit 0 # s/8/16/ will result in this if the input is x86.
chat: no s3, no blank space fixes urbit/landscape#892
@@ -178,7 +178,14 @@ export class ChatInput extends Component<ChatInputProps, ChatInputState> { changeEvent={this.eventHandler} placeholder='Message...' /> - <Box mx='12px' flexShrink={0} height='16px' width='16px' flexBasis='16px'> + <Box + mx='12px' + mr={this.props.canUpload ? '12px' : 3} + flexShrink={0} + height='16px' + width='16px' + flexBasis='16px' + > <Icon icon='Dojo' cursor='pointer' @@ -186,9 +193,16 @@ export class ChatInput extends Component<ChatInputProps, ChatInputState> { color={state.inCodeMode ? 'blue' : 'black'} /> </Box> - <Box ml='12px' mr={3} flexShrink={0} height='16px' width='16px' flexBasis='16px'> {this.props.canUpload ? ( - this.props.uploading ? ( + <Box + ml='12px' + mr={3} + flexShrink={0} + height='16px' + width='16px' + flexBasis='16px' + > + {this.props.uploading ? ( <LoadingSpinner /> ) : ( <Icon @@ -200,9 +214,9 @@ export class ChatInput extends Component<ChatInputProps, ChatInputState> { this.props.promptUpload().then(this.uploadSuccess) } /> - ) - ) : null} + )} </Box> + ) : null} {MOBILE_BROWSER_REGEX.test(navigator.userAgent) ? <Box ml={2}
Bumping patch version to indicate correct upcoming release version
@@ -11,7 +11,7 @@ Feel free to copy, use and enjoy according to the license provided. #define H_FACIL_H #define FACIL_VERSION_MAJOR 0 #define FACIL_VERSION_MINOR 5 -#define FACIL_VERSION_PATCH 2 +#define FACIL_VERSION_PATCH 3 #ifndef FACIL_PRINT_STATE /**
Update community and documentation links for GitHub templates
blank_issues_enabled: true contact_links: - name: Luos Community Support - url: https://community.luos.io + url: https://luos.io/community about: Please ask and answer questions about Luos here. - name: Luos Documentation - url: https://docs.luos.io/ + url: https://luos.io/docs about: Please read Luos documentation here.
OcBlitLib: Fix off by one error and a typo in BlitBuffer270
@@ -224,6 +224,7 @@ BlitLibBufferToVideo270 ( UINT32 *Destination; UINT32 *SourceWalker; UINT32 *DestinationWalker; + UINTN LastX; UINTN IndexX; UINTN PixelsPerScanLine; UINT32 Uint32; @@ -239,8 +240,9 @@ BlitLibBufferToVideo270 ( while (Height > 0) { DestinationWalker = Destination; SourceWalker = Source; - for (IndexX = 0; IndexX < Width; IndexX++) { - DestinationWalker[(Configure->Height - DestinationX - IndexX) * PixelsPerScanLine] = *SourceWalker++; + LastX = Configure->Height - DestinationX - 1; + for (IndexX = LastX; IndexX > LastX - Width; IndexX--) { + DestinationWalker[IndexX * PixelsPerScanLine] = *SourceWalker++; } Source += DeltaPixels; Destination++; @@ -250,9 +252,10 @@ BlitLibBufferToVideo270 ( while (Height > 0) { DestinationWalker = Destination; SourceWalker = Source; - for (IndexX = 0; IndexX < Width; IndexX++) { + LastX = Configure->Height - DestinationX - 1; + for (IndexX = LastX; IndexX > LastX - Width; IndexX--) { Uint32 = *SourceWalker++; - DestinationWalker[(DestinationX + IndexX) * PixelsPerScanLine] = + DestinationWalker[IndexX * PixelsPerScanLine] = (UINT32) ( (((Uint32 << Configure->PixelShl[0]) >> Configure->PixelShr[0]) & Configure->PixelMasks.RedMask) |
lv_indev.c have LV_GROUP_KEY_ESC send an LV_EVENT_CANCEL signal
@@ -415,13 +415,20 @@ static void indev_keypad_proc(lv_indev_t * i, lv_indev_data_t * data) /*Simulate a press on the object if ENTER was pressed*/ if(data->key == LV_GROUP_KEY_ENTER) { + /*Send the ENTER as a normal KEY*/ + lv_group_send_data(g, LV_GROUP_KEY_ENTER); + focused->signal_cb(focused, LV_SIGNAL_PRESSED, NULL); if(i->proc.reset_query) return; /*The object might be deleted*/ lv_event_send(focused, LV_EVENT_PRESSED, NULL); if(i->proc.reset_query) return; /*The object might be deleted*/ + } + else if(data->key == LV_GROUP_KEY_ESC) { + /*Send the ESC as a normal KEY*/ + lv_group_send_data(g, LV_GROUP_KEY_ESC); - /*Send the ENTER as a normal KEY*/ - lv_group_send_data(g, LV_GROUP_KEY_ENTER); + lv_event_send(focused, LV_EVENT_CANCEL, NULL); + if(i->proc.reset_query) return; /*The object might be deleted*/ } /*Move the focus on NEXT*/ else if(data->key == LV_GROUP_KEY_NEXT) { @@ -435,7 +442,7 @@ static void indev_keypad_proc(lv_indev_t * i, lv_indev_data_t * data) lv_group_focus_prev(g); if(i->proc.reset_query) return; /*The object might be deleted*/ } - /*Just send other keys to the object (e.g. 'A' or `LV_GORUP_KEY_RIGHT)*/ + /*Just send other keys to the object (e.g. 'A' or `LV_GROUP_KEY_RIGHT`)*/ else { lv_group_send_data(g, data->key); }
Add some missing sigalgs The SHA1 sigalgs were inadvertently missed off in the sigalgs refactor.
@@ -735,6 +735,10 @@ static const unsigned int tls12_sigalgs[] = { TLSEXT_SIGALG_rsa_pkcs1_sha384, TLSEXT_SIGALG_rsa_pkcs1_sha512, + TLSEXT_SIGALG_ecdsa_sha1, + TLSEXT_SIGALG_rsa_pkcs1_sha1, + TLSEXT_SIGALG_dsa_sha1, + TLSEXT_SIGALG_dsa_sha256, TLSEXT_SIGALG_dsa_sha384, TLSEXT_SIGALG_dsa_sha512
remove var. name in catch statement When catching 'not_found', simply return 0. No need for processing exception variable.
@@ -200,7 +200,7 @@ uint64_t fpga_cache_counters::read_counter(fpga_cache_counters::ctr_t c) if (counter) { return counter->read64(); } - }catch(not_found &err) { + }catch(not_found &) { return 0; } @@ -360,7 +360,7 @@ uint64_t fpga_fabric_counters::read_counter(fpga_fabric_counters::ctr_t c) if (counter) { return counter->read64(); } - }catch(not_found &err) { + }catch(not_found &) { return 0; }
apps/readline: check whether CONFIG_EOL_IS_EITHER_CRLF is defined * We should check whether CONFIG_EOL_IS_EITHER_CRLF is defined, instead of checking it's value.
@@ -247,7 +247,7 @@ ssize_t readline_common(FAR struct rl_common_s *vtbl, FAR char *buf, int buflen) else if (ch == '\n') #elif defined(CONFIG_EOL_IS_CR) else if (ch == '\r') -#elif CONFIG_EOL_IS_EITHER_CRLF +#elif defined(CONFIG_EOL_IS_EITHER_CRLF) else if (ch == '\n' || ch == '\r') #endif {
examples/socket: Fix for send() failure handling. Closes
@@ -53,6 +53,8 @@ static void do_retransmit(const int sock) int written = send(sock, rx_buffer + (len - to_write), to_write, 0); if (written < 0) { ESP_LOGE(TAG, "Error occurred during sending: errno %d", errno); + // Failed to retransmit, giving up + return; } to_write -= written; }
fix memory leak in nlop_permuts_inputs
@@ -283,7 +283,7 @@ struct nlop_s* nlop_permute_inputs(const struct nlop_s* x, int I2, const int per for (int i = 0; i < II + OO; i++) perm2[i] = (i < OO) ? i : (OO + perm[i - OO]); - n->op = operator_permute(operator_ref(x->op), II + OO, perm2); + n->op = operator_permute(x->op, II + OO, perm2); return PTR_PASS(n); }
GRE tunnel key should use fib_index instead of fib_id Follow up fix - vl_api_gre_add_del_tunnel_t_handler should pass outer_fib_id from API message to vnet_gre_add_del_tunnel() and not convert it to fib_index, since vnet_gre_add_del_tunnel() already perform the lookup to get fib_index from fib_id.
@@ -54,19 +54,8 @@ static void vl_api_gre_add_del_tunnel_t_handler vl_api_gre_add_del_tunnel_reply_t *rmp; int rv = 0; vnet_gre_add_del_tunnel_args_t _a, *a = &_a; - u32 outer_fib_id; - u32 p; u32 sw_if_index = ~0; - p = fib_table_find (!mp->is_ipv6 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6, - ntohl (mp->outer_fib_id)); - if (p == ~0) - { - rv = VNET_API_ERROR_NO_SUCH_FIB; - goto out; - } - outer_fib_id = p; - /* Check src & dst are different */ if ((mp->is_ipv6 && memcmp (mp->src_address, mp->dst_address, 16) == 0) || (!mp->is_ipv6 && memcmp (mp->src_address, mp->dst_address, 4) == 0)) @@ -92,7 +81,7 @@ static void vl_api_gre_add_del_tunnel_t_handler clib_memcpy (&(a->dst.ip6), mp->dst_address, 16); } - a->outer_fib_id = outer_fib_id; + a->outer_fib_id = ntohl (mp->outer_fib_id); rv = vnet_gre_add_del_tunnel (a, &sw_if_index); out:
Macro can trigger Misra violation due redef of var before their use
@@ -139,15 +139,15 @@ bool dist_to_meas_check(int val, int val_last, struct sample_t *val_meas, const int MAX_RATE_UP, const int MAX_RATE_DOWN, const int MAX_ERROR) { // *** val rate limit check *** - int highest_allowed_val = MAX(val_last, 0) + MAX_RATE_UP; - int lowest_allowed_val = MIN(val_last, 0) - MAX_RATE_UP; + int highest_allowed_rl = MAX(val_last, 0) + MAX_RATE_UP; + int lowest_allowed_rl = MIN(val_last, 0) - MAX_RATE_UP; // if we've exceeded the meas val, we must start moving toward 0 - highest_allowed_val = MIN(highest_allowed_val, MAX(val_last - MAX_RATE_DOWN, MAX(val_meas->max, 0) + MAX_ERROR)); - lowest_allowed_val = MAX(lowest_allowed_val, MIN(val_last + MAX_RATE_DOWN, MIN(val_meas->min, 0) - MAX_ERROR)); + int highest_allowed = MIN(highest_allowed_rl, MAX(val_last - MAX_RATE_DOWN, MAX(val_meas->max, 0) + MAX_ERROR)); + int lowest_allowed = MAX(lowest_allowed_rl, MIN(val_last + MAX_RATE_DOWN, MIN(val_meas->min, 0) - MAX_ERROR)); // check for violation - return (val < lowest_allowed_val) || (val > highest_allowed_val); + return (val < lowest_allowed) || (val > highest_allowed); } // check that commanded value isn't fighting against driver @@ -155,16 +155,16 @@ bool driver_limit_check(int val, int val_last, struct sample_t *val_driver, const int MAX, const int MAX_RATE_UP, const int MAX_RATE_DOWN, const int MAX_ALLOWANCE, const int DRIVER_FACTOR) { - int highest_allowed = MAX(val_last, 0) + MAX_RATE_UP; - int lowest_allowed = MIN(val_last, 0) - MAX_RATE_UP; + int highest_allowed_rl = MAX(val_last, 0) + MAX_RATE_UP; + int lowest_allowed_rl = MIN(val_last, 0) - MAX_RATE_UP; int driver_max_limit = MAX + (MAX_ALLOWANCE + val_driver->max) * DRIVER_FACTOR; int driver_min_limit = -MAX + (-MAX_ALLOWANCE + val_driver->min) * DRIVER_FACTOR; // if we've exceeded the applied torque, we must start moving toward 0 - highest_allowed = MIN(highest_allowed, MAX(val_last - MAX_RATE_DOWN, + int highest_allowed = MIN(highest_allowed_rl, MAX(val_last - MAX_RATE_DOWN, MAX(driver_max_limit, 0))); - lowest_allowed = MAX(lowest_allowed, MIN(val_last + MAX_RATE_DOWN, + int lowest_allowed = MAX(lowest_allowed_rl, MIN(val_last + MAX_RATE_DOWN, MIN(driver_min_limit, 0))); // check for violation
fix flag 4 mapsmobi
@@ -1157,6 +1157,8 @@ class GnuCompiler(Compiler): self.c_foptions.append('$CLANG_ALIGNED_ALLOCATION_FLAG') else: self.c_warnings.append('-Wno-aligned-allocation-unavailable') + if preset('MAPSMOBI_BUILD_TARGET') and self.target.is_arm: + self.c_foptions.append('-fembed-bitcode') if self.target.is_android: self.c_flags.append('-I{}/include/llvm-libc++abi/include'.format(tc.name_marker))
fixed missing variable in pyccl test
@@ -141,13 +141,13 @@ def check_massfunc(cosmo): mhalo_arr = np.array([1e11, 1e12, 1e13, 1e14, 1e15, 1e16]) # massfunc - assert_( all_finite(ccl.massfunc(cosmo, mhalo_scl, a)) ) - assert_( all_finite(ccl.massfunc(cosmo, mhalo_lst, a)) ) - assert_( all_finite(ccl.massfunc(cosmo, mhalo_arr, a)) ) + assert_( all_finite(ccl.massfunc(cosmo, mhalo_scl, a, odelta)) ) + assert_( all_finite(ccl.massfunc(cosmo, mhalo_lst, a, odelta)) ) + assert_( all_finite(ccl.massfunc(cosmo, mhalo_arr, a, odelta)) ) - assert_raises(TypeError, ccl.massfunc, cosmo, mhalo_scl, a_arr) - assert_raises(TypeError, ccl.massfunc, cosmo, mhalo_lst, a_arr) - assert_raises(TypeError, ccl.massfunc, cosmo, mhalo_arr, a_arr) + assert_raises(TypeError, ccl.massfunc, cosmo, mhalo_scl, a_arr, odelta) + assert_raises(TypeError, ccl.massfunc, cosmo, mhalo_lst, a_arr, odelta) + assert_raises(TypeError, ccl.massfunc, cosmo, mhalo_arr, a_arr, odelta) # massfunc_m2r assert_( all_finite(ccl.massfunc_m2r(cosmo, mhalo_scl)) )
[add] Bump version for test release
@@ -31,7 +31,7 @@ APP_LOAD_PARAMS += --path "1517992542'/1101353413'" APPVERSION_M=1 APPVERSION_N=9 APPVERSION_P=17 -APPVERSION=$(APPVERSION_M).$(APPVERSION_N).$(APPVERSION_P) +APPVERSION=$(APPVERSION_M).$(APPVERSION_N).$(APPVERSION_P)-exchangefix APP_LOAD_FLAGS= --appFlags 0x240 --dep Ethereum:$(APPVERSION) ifeq ($(CHAIN),)
Remove dummy vendorID overwrite in init_gpu_stats() We have already enforced that a few lines above just after parsing the vendor node.
@@ -657,8 +657,6 @@ void init_gpu_stats(uint32_t& vendorID, uint32_t reported_deviceID, overlay_para amdgpu.temp = fopen((path + dir + "/temp1_input").c_str(), "r"); if (!amdgpu.power_usage) amdgpu.power_usage = fopen((path + dir + "/power1_average").c_str(), "r"); - - vendorID = 0x1002; } break; }
core: fall back to "idle" debugging method when debugger can not be found This is done only when no specific debugger type is specified by the user.
@@ -325,6 +325,10 @@ static bxf_instance *run_test(struct run_next_context *ctx, default: break; } } + + if (criterion_options.debug == CR_DBG_NATIVE) + BXF_DBG_ENABLE_FALLBACK(debugger); + if (!debugger) cr_panic("Could not choose the debugger server for an " "unknown compiler"); @@ -347,7 +351,7 @@ static bxf_instance *run_test(struct run_next_context *ctx, if (rc < 0) { if (rc == -ENOENT && criterion_options.debug) { const char *dbgname = "<unknown>"; - switch (sp.debug.debugger) { + switch (BXF_DBG_GET_DEBUGGER(sp.debug.debugger)) { case BXF_DBG_GDB: dbgname = "gdbserver"; break; case BXF_DBG_LLDB: dbgname = "lldb-server"; break; case BXF_DBG_WINDBG: dbgname = "windbg"; break; @@ -364,8 +368,12 @@ static bxf_instance *run_test(struct run_next_context *ctx, bxf_context_term(inst_ctx); + if (criterion_options.debug == CR_DBG_NATIVE && instance->status.stopped) { + criterion_pinfo(CRITERION_PREFIX_DEBUG, "Default debugger can not be found, falling back to \"idle\" mode\n"); + } + /* TODO: integrate this to the logger after refactor */ - if (criterion_options.debug == CR_DBG_IDLE) { + if (criterion_options.debug == CR_DBG_IDLE || instance->status.stopped) { criterion_pinfo(CRITERION_PREFIX_DEBUG, _(msg_print_pid), ctx->test->category, ctx->test->name,
esp builds on mac
PATH := esp-open-sdk/xtensa-lx106-elf/bin:$(PATH) -CC = xtensa-lx106-elf-gcc +CC = esp-open-sdk/xtensa-lx106-elf/bin/xtensa-lx106-elf-gcc CFLAGS = -Iinclude/ -I. -I../ -mlongcalls -Iesp-open-sdk/ESP8266_NONOS_SDK_V1.5.4_16_05_20/driver_lib/include -std=c99 -DICACHE_FLASH LDLIBS = -nostdlib -Wl,--start-group -lmain -lnet80211 -lwpa -llwip -lpp -lphy -Wl,--end-group -lgcc -ldriver -Wl,--gc-sections LDFLAGS = -Teagle.app.v6.ld -OBJCP = xtensa-lx106-elf-objcopy +OBJCP = esp-open-sdk/xtensa-lx106-elf/bin/xtensa-lx106-elf-objcopy SDK_BASE = esp-open-sdk/ESP8266_NONOS_SDK_V1.5.4_16_05_20 ifeq ($(RELEASE),1)
Ruby: improved logging of exceptions without backtraces. If an exception was raised with a backtrace of zero length, the nxt_ruby_exception_log() routine would return without logging the exception class and message. This commit fixes the issue.
@@ -1069,14 +1069,18 @@ nxt_ruby_exception_log(nxt_unit_request_info_t *req, uint32_t level, return; } + eclass = rb_class_name(rb_class_of(err)); + + msg = rb_funcall(err, rb_intern("message"), 0); ary = rb_funcall(err, rb_intern("backtrace"), 0); - if (nxt_slow_path(RARRAY_LEN(ary) == 0)) { + + if (RARRAY_LEN(ary) == 0) { + nxt_unit_req_log(req, level, "Ruby: %s (%s)", RSTRING_PTR(msg), + RSTRING_PTR(eclass)); + return; } - eclass = rb_class_name(rb_class_of(err)); - msg = rb_funcall(err, rb_intern("message"), 0); - nxt_unit_req_log(req, level, "Ruby: %s: %s (%s)", RSTRING_PTR(RARRAY_PTR(ary)[0]), RSTRING_PTR(msg), RSTRING_PTR(eclass));
Fix SWO output
#error "ITM port is not available on Cortex-M0(+) cores. Need to set CMake option SWO_OUTPUT to OFF." #else -// number of attempts to write to the ITM port before quit -#define ITM_WRITE_ATTEMPTS 10 +// number of attempts to write to the ITM port before quitting +// developer note: this is an arbitrary value from trial & error attempts to get a satisfactory output on ST-Link SWO viewer +#define ITM_WRITE_ATTEMPTS 20000 extern "C" void SwoInit() { @@ -64,7 +65,7 @@ extern "C" void SwoPrintChar(char c) ASSERT((ITM->TER & 1UL ) != 0UL); uint32_t retryCounter = ITM_WRITE_ATTEMPTS; - bool okToTx = ITM->PORT[0U].u32 == 0UL; + bool okToTx = (ITM->PORT[0U].u32 == 1UL); // wait (with timeout) until ITM port TX buffer is available while( !okToTx && @@ -77,7 +78,7 @@ extern "C" void SwoPrintChar(char c) retryCounter--; // check again - okToTx = (ITM->PORT[0U].u32 == 0UL); + okToTx = (ITM->PORT[0U].u32 == 1UL); } if(okToTx) @@ -107,12 +108,13 @@ __STATIC_INLINE uint32_t GenericPort_Write_CMSIS(int portNum, const char* data, char* p = (char*)data; uint32_t counter = 0; uint32_t retryCounter; - bool okToTx = ITM->PORT[0U].u32 == 0UL; + bool okToTx; while( *p != '\0' && counter < size ) { retryCounter = ITM_WRITE_ATTEMPTS; + okToTx = (ITM->PORT[0U].u32 == 1UL); // wait (with timeout) until ITM port TX buffer is available while( !okToTx && @@ -125,7 +127,7 @@ __STATIC_INLINE uint32_t GenericPort_Write_CMSIS(int portNum, const char* data, retryCounter--; // check again - okToTx = (ITM->PORT[0U].u32 == 0UL); + okToTx = (ITM->PORT[0U].u32 == 1UL); } if(okToTx)
Update build-xamarin.py
@@ -115,7 +115,7 @@ def buildXamarinNuget(args, target): if not nuget(args, buildDir, 'pack', - '"%s/CartoMobileSDK.%s.nuspec"' % (buildDir, target), + '%s/CartoMobileSDK.%s.nuspec' % (buildDir, target), '-BasePath', '/' ): return False
Add test case for closures within loops
@@ -62,3 +62,19 @@ assert(ArrowFuncModule.func9("Dictu ", "is great!") == "Dictu is great!"); } } } + +/** + * Within a loop. + * There was an issue where functions defined in a loop wrongly shuffled the bytecode + * add a test to cover this + */ +var x = 0; + +while (x < 10) { + def myFunc() { + assert(type(x) == 'number'); + } + + myFunc(); + x += 1; +} \ No newline at end of file
codegen: fix possible segfault in example
@@ -40,7 +40,7 @@ void showMenu (Menu * menu) char * end; char buf[20]; - int selection; + int selection = -1; int minSelection = menu->command[0] != '\0' ? 0 : 1; do @@ -63,7 +63,7 @@ void showMenu (Menu * menu) { system (menu->command); } - else + else if (selection > 0) { showMenu (menu->children[selection - 1]); }
Fix TRADFRI remote control arrow buttons hold/long release events When pressing the middle button for more than 5 seconds the remote reads the Application Version attribute. If the coordinator doesn't respond with value 17 as indicated in earlier remote firmware versions, the arrow buttons don't send hold/long release commands anymore.
@@ -79,9 +79,22 @@ void DeRestPluginPrivate::sendBasicClusterResponse(const deCONZ::ApsDataIndicati break; case 0x0001: // Application Version + { stream << code; stream << (quint8) deCONZ::Zcl8BitUint; - stream << (quint8) 0x00; + + Sensor *sensor = getSensorNodeForAddressAndEndpoint(ind.srcAddress(), ind.srcEndpoint()); + if (sensor && sensor->modelId() == QLatin1String("TRADFRI remote control")) + { + // Since firmware version 2.3.014 when the large middle button is pressed the remote reads this attribute. + // If it isn't 17 as reported by earlier remote firmware, the left/right buttons don't send hold and long press commands anymore. + stream << quint8(17); + } + else + { + stream << quint8(0x00); + } + } break; case 0x0002: // Stack Version
ARMv8: disable traps to EL2 for timer accesses
@@ -156,6 +156,10 @@ efiboot_init(uint32_t magic, void *pointer, void *stack) { * within the kernel window). That's * fine, as we'll never return to * this context. */ sysreg_write_sp_el1((uint64_t)stack + KERNEL_OFFSET); + + /* disable traps to EL2 for timer accesses */ + uint32_t cnthctl = sysreg_read_cnthctl_el2(); + sysreg_write_cnthctl_el2(cnthctl | 0x3); } if (el == 3) {
[brick] update lua debuger According to changes at luajit, using a table as key at a table is disabled. So, I fix a table reference printing logic.
@@ -640,8 +640,6 @@ func GetDebuggerCode() *C.char { --}}} --{{{ local function dumpval( level, name, value, limit ) - local dumpvisited - local function dumpval( level, name, value, limit ) local index if type(name) == 'number' then @@ -656,20 +654,15 @@ func GetDebuggerCode() *C.char { index = string.format('[%q] = ',tostring(name)) end if type(value) == 'table' then - if dumpvisited[value] then - indented( level, index, string.format('ref%q;',dumpvisited[value]) ) - else - dumpvisited[value] = tostring(value) if (limit or 0) > 0 and level+1 >= limit then - indented( level, index, dumpvisited[value] ) + indented( level, index, tostring(value), ';' ) else - indented( level, index, '{ -- ', dumpvisited[value] ) + indented( level, index, '{' ) for n,v in pairs(value) do dumpval( level+1, n, v, limit ) end indented( level, '};' ) end - end else if type(value) == 'string' then if string.len(value) > 40 then @@ -687,7 +680,6 @@ func GetDebuggerCode() *C.char { --{{{ local function dumpvar( value, limit, name ) local function dumpvar( value, limit, name ) - dumpvisited = {} dumpval( 0, name or tostring(value), value, limit ) end
get_all_params -> init_params
@@ -4470,7 +4470,10 @@ class CatBoostRegressor(CatBoost): """ if prediction_type is None: prediction_type = 'RawFormulaVal' - params = self.get_all_params() + # TODO(ilyzhin) change on get_all_params after MLTOOLS-4758 + params = deepcopy(self._init_params) + _process_synonyms(params) + if 'loss_function' in params: if params['loss_function'] in ('Poisson', 'Tweedie'): prediction_type = 'Exponent' return self._predict(data, prediction_type, ntree_start, ntree_end, thread_count, verbose, 'predict')
fix(qrcodegen) add brackets around assert calls Add brackets to fix build errors on platforms which define assert as an empty macro.
@@ -948,9 +948,9 @@ struct qrcodegen_Segment qrcodegen_makeEci(long assignVal, uint8_t buf[]) { result.mode = qrcodegen_Mode_ECI; result.numChars = 0; result.bitLength = 0; - if (assignVal < 0) + if (assignVal < 0) { assert(false); - else if (assignVal < (1 << 7)) { + } else if (assignVal < (1 << 7)) { memset(buf, 0, 1 * sizeof(buf[0])); appendBitsToBuffer(assignVal, 8, buf, &result.bitLength); } else if (assignVal < (1 << 14)) { @@ -962,8 +962,9 @@ struct qrcodegen_Segment qrcodegen_makeEci(long assignVal, uint8_t buf[]) { appendBitsToBuffer(6, 3, buf, &result.bitLength); appendBitsToBuffer(assignVal >> 10, 11, buf, &result.bitLength); appendBitsToBuffer(assignVal & 0x3FF, 10, buf, &result.bitLength); - } else + } else { assert(false); + } result.data = buf; return result; }
gh-actions: add several GCC builds
@@ -73,6 +73,37 @@ jobs: with: file: ./build/meson-logs/coverage.xml + gcc: + runs-on: ubuntu-latest + strategy: + matrix: + version: ["10", "9", "8", "7"] + env: + CC: gcc-${{ matrix.version }} + CXX: g++-${{ matrix.version }} + CFLAGS: -Wall -Wextra -Werror -march=native + CXXFLAGS: -Wall -Wextra -Werror -march=native + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + - name: Install APT Dependencies + run: sudo add-apt-repository 'ppa:ubuntu-toolchain-r/test' && sudo apt-get update && sudo apt-get install -y ninja-build ninja-build python3-pip python3-setuptools python3-wheel parallel gcovr gcc-${{ matrix.version }} g++-${{ matrix.version }} + - name: Install pip Dependencies + run: pip3 install meson + - name: Configure + run: ~/.local/bin/meson setup build -Db_coverage=true + - name: Build + run: ninja -C build -v + - name: Test + run: ninja -C build -v test + - name: Coverage Report + run: ninja -C build -v coverage-xml + - name: CodeCov.io + uses: codecov/codecov-action@v1 + with: + file: ./build/meson-logs/coverage.xml + SDE: runs-on: ubuntu-latest strategy:
Samples:Host Exerciser Enhancements Modified all internal functions as STATIC functions
@@ -84,7 +84,7 @@ struct read_format { * If fpga_perf_check_and_lock() returns FPGA_OK, assume the mutex to be * locked. */ -fpga_result fpga_perf_check_and_lock(fpga_perf_counter *fpga_perf) +STATIC fpga_result fpga_perf_check_and_lock(fpga_perf_counter *fpga_perf) { int res = 0; @@ -105,7 +105,7 @@ fpga_result fpga_perf_check_and_lock(fpga_perf_counter *fpga_perf) } /* parse the each format and get the shift val */ -fpga_result parse_perf_format(struct udev_device *dev, fpga_perf_counter *fpga_perf) +STATIC fpga_result parse_perf_format(struct udev_device *dev, fpga_perf_counter *fpga_perf) { regex_t re; regmatch_t matches[3] = { {0} }; @@ -170,7 +170,7 @@ fpga_result parse_perf_format(struct udev_device *dev, fpga_perf_counter *fpga_p } /* parse the events for the particular device directory */ -fpga_result parse_perf_event(struct udev_device *dev, fpga_perf_counter *fpga_perf) +STATIC fpga_result parse_perf_event(struct udev_device *dev, fpga_perf_counter *fpga_perf) { regex_t re; regmatch_t matches[4] = { {0} }; @@ -241,7 +241,7 @@ fpga_result parse_perf_event(struct udev_device *dev, fpga_perf_counter *fpga_pe } -fpga_result fpga_perf_events(char* perf_sysfs_path, fpga_perf_counter *fpga_perf) +STATIC fpga_result fpga_perf_events(char* perf_sysfs_path, fpga_perf_counter *fpga_perf) { fpga_result ret = FPGA_OK; struct udev *udev = NULL; @@ -332,7 +332,7 @@ out: } /* get fpga sbdf from token */ -fpga_result get_fpga_sbdf(fpga_token token, +STATIC fpga_result get_fpga_sbdf(fpga_token token, uint16_t *segment, uint8_t *bus, uint8_t *device,
fix multi monitor focusing
@@ -840,6 +840,10 @@ clientmessage(XEvent *e) for (i = 0; i < LENGTH(tags) && !((1 << i) & c->tags); i++); if (i < LENGTH(tags)) { const Arg a = {.ui = 1 << i}; + if (selmon != c->mon) { + unfocus(selmon->sel, 0); + selmon = c->mon; + } view(&a); focus(c); restack(selmon);
api: remove unused singular option The singular option to the API language was added as a way to deal with messages that do not have a reply message. Examples in memclnt.api. Instead dealt with these messages using the service {} construct. Type: refactor
@@ -308,7 +308,6 @@ class Define(Processable): self.manual_print = False self.manual_endian = False self.autoreply = False - self.singular = False self.options = {} for f in flags: if f == 'dont_trace': @@ -323,9 +322,6 @@ class Define(Processable): remove = [] for b in block: if isinstance(b, Option): - if b[1] == 'singular' and b[2] == 'true': - self.singular = True - else: self.options[b.option] = b.value remove.append(b) @@ -1042,8 +1038,6 @@ class VPPAPI(): for d in msgs: if d in seen_services: continue - if msgs[d].singular is True: - continue if d.endswith('_reply'): if d[:-6] in svcs: continue
Mount dump to ensure its usage
@@ -54,6 +54,10 @@ This command will return the following values as an exit status: ```sh # Backup-and-Restore: user/tests/get/examples +# We use the `dump` plugin, since some storage plugins, e.g. INI, +# create intermediate keys. +sudo kdb mount get.ecf user/tests/get/examples/kdb-get dump + # Create the keys we use for the examples kdb set user/tests/get/examples/kdb-get/key myKey kdb setmeta /tests/get/examples/kdb-get/anotherKey default defaultValue @@ -92,6 +96,7 @@ kdb get -v /tests/get/examples/kdb-get/anotherKey kdb rm user/tests/get/examples/kdb-get/key kdb rm spec/tests/get/examples/kdb-get/anotherKey +sudo kdb umount user/tests/get/examples/kdb-get ``` To use bookmarks:<br>
CBLK: Adding librt as link requirement for old RHELs
@@ -48,7 +48,7 @@ snap_cblk_LDFLAGS += -L. \ -Wl,-rpath,$(SNAP_ROOT)/actions/hdl_nvme_example/sw \ -Wl,-rpath,$(SNAP_ROOT)/software/lib -snap_cblk_libs += -lsnapcblk +snap_cblk_libs += -lsnapcblk -lrt endif snap_cblk_objs += force_cpu.o
Add incremental margin collapse for grid<->HUD
@@ -1247,8 +1247,9 @@ void ged_update_internal_geometry(Ged* a) { int softmargin_y = a->softmargin_y; bool show_hud = win_h > Hud_height + 1; int grid_h = show_hud ? win_h - 2 : win_h; - if (grid_h > softmargin_y + 1 && grid_h > a->field.height + softmargin_y) { - grid_h -= softmargin_y; + if (grid_h > a->field.height) { + int halfy = (grid_h - a->field.height) / 2; + grid_h -= halfy < softmargin_y ? halfy : softmargin_y; } a->grid_h = grid_h; a->is_draw_dirty = true;
use json helpers
// (C) 2017 <>< Joshua Allen, Under MIT/x11 License. - #include <string.h> #include <assert.h> #include "survive_config.h" +#include <json_helpers.h> #define MAX_CONFIG_ENTRIES 100 @@ -227,22 +227,8 @@ const FLT* config_set_float_a(const char *tag, const FLT* values, uint8_t count) return values; } -void write_float_array(FILE* f, char* tag, FLT* v, uint8_t count) { - uint8_t i = 0; - printf("save float array\n"); - - fprintf(f, "\"%s\":[", tag); - for (i=0;i<count;++i) { -// if (i>0) { - fprintf(f, "\"%f\",", v[i]); -// } else { -// fprintf(f, "\"%f\"", v[i]); -// } - } - - fseek(f,-1,SEEK_CUR); - fprintf(f, "]\n"); - +void _json_write_float_array(FILE* f, const char* tag, FLT* v, uint8_t count) { + json_write_double_array(f,tag,v,count); } void config_save(const char* path) { @@ -252,13 +238,13 @@ void config_save(const char* path) { for (i=0;i<=used_entries;++i) { if (config_values[i].type == CONFIG_FLOAT) { - fprintf(f, "\"%s\":\"%F\"\n", config_values[i].tag, config_values[i].numeric.f); + json_write_float(f, config_values[i].tag, config_values[i].numeric.f); } else if (config_values[i].type == CONFIG_UINT32) { - fprintf(f, "\"%s\":\"%d\"\n", config_values[i].tag, config_values[i].numeric.i); + json_write_uint32(f, config_values[i].tag, config_values[i].numeric.i); } else if (config_values[i].type == CONFIG_STRING) { - fprintf(f, "\"%s\":\"%s\"\n", config_values[i].tag, config_values[i].data); + json_write_str(f, config_values[i].tag, config_values[i].data); } else if (config_values[i].type == CONFIG_FLOAT_ARRAY) { - write_float_array(f, config_values[i].tag, (FLT*)config_values[i].data, config_values[i].elements); + _json_write_float_array(f, config_values[i].tag, (FLT*)config_values[i].data, config_values[i].elements); } };
build fuzzers on trusty
@@ -52,6 +52,9 @@ matrix: - make check - sudo make check-as-root - os: linux + apt: + sources: ['ubuntu-toolchain-r-test', 'llvm-toolchain-trusty-4.0'] + packages: ['clang-4.0' , 'g++-6'] sudo: required dist: trusty env: @@ -59,4 +62,9 @@ matrix: php: '7.x' before_install: *bi before_script: *bs - script: *s + script: + - CC=clang CXX=clang++ cmake -DBUILD_FUZZERS=ON -DWITH_MRUBY=ON . + - make all + - make check + - sudo make check-as-root +
Correctly pad small alignments. Negative padding shouldn't happen.
@@ -255,15 +255,17 @@ blobstruct(Blob *seq, Htab *globls, Htab *strtab, Node *n) static size_t blobucon(Blob *seq, Htab *globls, Htab *strtab, Node *n) { - size_t sz, pad; + size_t sz, align; Ucon *uc; sz = 4; uc = finducon(exprtype(n), n->expr.args[0]); b(seq, mkblobi(Bti32, uc->id)); + align = 1; if (n->expr.nargs > 1) { - pad = tyalign(exprtype(n->expr.args[1])) - sz; - sz += blobpad(seq, pad); + align = tyalign(exprtype(n->expr.args[1])); + if (align > sz) + sz += blobpad(seq, align - sz); sz += blobrec(seq, globls, strtab, n->expr.args[1]); } sz += blobpad(seq, size(n) - sz);
Ensure that ldscopedyn is not found as a musl executable after the loader string has been updated.
@@ -901,6 +901,7 @@ reportPeriodicStuff(void) // empty the event queues doEvent(); + doPayload(); mtcFlush(g_mtc); @@ -1214,7 +1215,7 @@ initHook(int attachedFlag) return; } - if (ebuf && ebuf->buf) { + if (ebuf && ebuf->buf && (strstr(full_path, "ldscope") == NULL)) { g_ismusl = is_musl(ebuf->buf); }
Remove deprecated or conflicting VS solution opts
<Optimization>Disabled</Optimization> <AdditionalIncludeDirectories>$(ProjectDir);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions> - <MinimalRebuild>true</MinimalRebuild> + <MinimalRebuild> + </MinimalRebuild> <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks> <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary> <EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet> <GenerateDebugInformation>true</GenerateDebugInformation> <SubSystem>Console</SubSystem> <TargetMachine>MachineX86</TargetMachine> + <ImageHasSafeExceptionHandlers>false</ImageHasSafeExceptionHandlers> </Link> </ItemDefinitionGroup> <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'"> </PrecompiledHeader> <WarningLevel>Level3</WarningLevel> <DebugInformationFormat>ProgramDatabase</DebugInformationFormat> + <MinimalRebuild /> </ClCompile> <Link> <GenerateDebugInformation>true</GenerateDebugInformation>
Linux line bug
@@ -1867,7 +1867,8 @@ int h3_long_file_name_test() long_file_name_stream_length, 0, 400000, 0, NULL, NULL, NULL); } #else - ret = demo_server_test(PICOHTTP_ALPN_H3_LATEST, h3zero_server_callback, NULL, 0, long_file_name_scenario, nb_long_file_name_scenario, + ret = demo_server_test(PICOHTTP_ALPN_H3_LATEST, h3zero_server_callback, NULL, 0, + long_file_name_scenario, nb_long_file_name_scenario, long_file_name_stream_length, 0, 400000, 0, NULL, NULL, NULL); #endif return ret;
contacts-js: dismiss spinner on share
@@ -383,6 +383,7 @@ export class ContactCard extends Component { props.api.contacts .share(`~${props.ship}`, props.path, `~${window.ship}`, contact) .then(() => { + this.setState({ awaiting: false }); props.history.push(`/~groups/view${props.path}/${window.ship}`); }); });
server: properly defined request-line type
=, eyre |% -:: ++$ request-line + $: [ext=(unit @ta) site=(list @t)] + args=(list [key=@t value=@t]) + == :: +parse-request-line: take a cord and parse out a url :: ++ parse-request-line |= url=@t - ^- [[ext=(unit @ta) site=(list @t)] args=(list [key=@t value=@t])] + ^- request-line (fall (rush url ;~(plug apat:de-purl:html yque:de-purl:html)) [[~ ~] ~]) :: ++ manx-to-octs
chmod_dialog.c: use gtk_dialog_new() and gtk_dialog_add_button()
@@ -106,6 +106,7 @@ void chmod_dialog (gpointer data) { GtkWidget *label, *dialog, *hbox, *main_vbox; + GtkWidget * ButtonOK, * ButtonCancel, * IconOK, * IconCancel; GtkWidget * FrameItem[4][3]; // 4 frames with 3 items (CheckBoxes) each GtkWidget * frameX, * FrameVbox; int i, j; @@ -116,17 +117,23 @@ chmod_dialog (gpointer data) if (!check_status (_("Chmod"), wdata, gftpui_common_use_threads (wdata->request), 0, 1, wdata->request->chmod != NULL)) return; - dialog = gtk_dialog_new_with_buttons (_("Chmod"), NULL, 0, - "gtk-cancel", GTK_RESPONSE_CANCEL, - "gtk-ok", GTK_RESPONSE_OK, - NULL); - + dialog = gtk_dialog_new (); + gtk_window_set_title (GTK_WINDOW (dialog), _("Chmod")); gtk_window_set_role (GTK_WINDOW (dialog), "Chmod"); + gtk_window_set_transient_for (GTK_WINDOW (dialog), GTK_WINDOW (main_window)); gtk_window_set_position (GTK_WINDOW (dialog), GTK_WIN_POS_MOUSE); set_window_icon (GTK_WINDOW (dialog), NULL); gtk_container_set_border_width (GTK_CONTAINER (dialog), 5); gtk_widget_realize (dialog); + // buttons + ButtonCancel = gtk_dialog_add_button (GTK_DIALOG (dialog), _("_Cancel"), GTK_RESPONSE_CANCEL); + IconCancel = gtk_image_new_from_icon_name ("gtk-cancel", GTK_ICON_SIZE_BUTTON); + gtk_button_set_image (GTK_BUTTON (ButtonCancel), IconCancel); + ButtonOK = gtk_dialog_add_button (GTK_DIALOG (dialog), _("_OK"), GTK_RESPONSE_OK); + IconOK = gtk_image_new_from_icon_name ("gtk-ok", GTK_ICON_SIZE_BUTTON); + gtk_button_set_image (GTK_BUTTON (ButtonOK), IconOK); + // vbox main_vbox = gtk_dialog_get_content_area (GTK_DIALOG (dialog)); gtk_box_set_spacing (GTK_BOX (main_vbox), 5);
Ignore base64 padding for strings that are supposed to be unpadded.
@@ -735,9 +735,12 @@ static int sldns_b64_pton_base(char const *src, size_t srcsize, uint8_t *target, d = 63; else if(base64url && d == '_') d = 63; - else if(!base64url && d == '=') + else if(d == '=') { + if(!check_padding) + continue; d = 64; - else continue; + } else continue; + in[incount++] = (uint8_t)d; /* work on block of 4, unless padding is not used and there are * less than 4 chars left */
BugID:17134554:[http2]fix crash issue in IOT_HTTP2_Stream_Close
@@ -683,7 +683,15 @@ int IOT_HTTP2_Stream_Close(stream_handle_t *handle, stream_data_info_t *info) http2_stream_node_t *node; HAL_MutexLock(handle->mutex); list_for_each_entry(node, &handle->stream_list, list, http2_stream_node_t) { - if ((len == strlen(node->channel_id) && !memcmp(node->channel_id, stream_id, len))) { + if (info->h2_stream_id == node->stream_id) { + h2stream_info("stream_node found:stream_id= %d, Delete It", node->stream_id); + list_del((list_head_t *)&node->list); + HAL_Free(node->channel_id); + HAL_SemaphoreDestroy(node->semaphore); + HAL_Free(node); + continue; + } + if ((node->channel_id != NULL) && (len == strlen(node->channel_id) && !memcmp(node->channel_id, stream_id, len))) { h2stream_info("stream_node found: %s, Delete It", node->channel_id); list_del((list_head_t *)&node->list); HAL_Free(node->channel_id);
king: Terminal input line wasn't being shown b/c line buffering.
@@ -533,6 +533,8 @@ main :: IO () main = do mainTid <- myThreadId + hSetBuffering stdout NoBuffering + let onTermSig = throwTo mainTid UserInterrupt Sys.installHandler Sys.sigTERM (Sys.Catch onTermSig) Nothing
Enable ltree extension on gpdb/main
@@ -22,6 +22,7 @@ all: $(MAKE) -C contrib/indexscan all $(MAKE) -C contrib/pageinspect all # needed by src/test/isolation $(MAKE) -C contrib/hstore all + $(MAKE) -C contrib/ltree all $(MAKE) -C contrib/pgcrypto all $(MAKE) -C contrib/btree_gin all $(MAKE) -C contrib/pg_trgm all @@ -60,6 +61,7 @@ install: $(MAKE) -C contrib/indexscan $@ $(MAKE) -C contrib/pageinspect $@ # needed by src/test/isolation $(MAKE) -C contrib/hstore $@ + $(MAKE) -C contrib/ltree $@ $(MAKE) -C contrib/pgcrypto $@ $(MAKE) -C contrib/btree_gin $@ $(MAKE) -C contrib/pg_trgm $@ @@ -157,7 +159,7 @@ ICW_TARGETS = src/test src/pl src/interfaces/gppc ICW_TARGETS += contrib/auto_explain contrib/citext contrib/btree_gin ICW_TARGETS += contrib/file_fdw contrib/formatter_fixedwidth ICW_TARGETS += contrib/extprotocol contrib/dblink contrib/pg_trgm -ICW_TARGETS += contrib/indexscan contrib/hstore contrib/pgcrypto +ICW_TARGETS += contrib/indexscan contrib/hstore contrib/ltree contrib/pgcrypto # sslinfo depends on openssl ifeq ($(with_openssl), yes) ICW_TARGETS += contrib/sslinfo
workflows: clone ci repository on publish_images add a task for clone ci repository on publish_images
@@ -10,6 +10,11 @@ jobs: if: github.event.workflow_run.conclusion == 'success' runs-on: ubuntu-latest steps: + - uses: actions/checkout@v2 + with: + repository: calyptia/fluent-bit-ci + path: ci + - name: Download docker image from build artifacts uses: dawidd6/action-download-artifact@v2 with:
VERSION bump to version 1.1.39
@@ -40,7 +40,7 @@ set(CMAKE_C_FLAGS_PACKAGE "-g -O2 -DNDEBUG") # micro version is changed with a set of small changes or bugfixes anywhere in the project. set(LIBNETCONF2_MAJOR_VERSION 1) set(LIBNETCONF2_MINOR_VERSION 1) -set(LIBNETCONF2_MICRO_VERSION 38) +set(LIBNETCONF2_MICRO_VERSION 39) set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION}.${LIBNETCONF2_MICRO_VERSION}) # Version of the library
fix: windows build tutorial
@@ -225,7 +225,7 @@ Then, go to `discord-public-user.c` and include `strndup.h`: ``` Now, go to `Makefile`. -Then, go to the line that defines the variable `OBJS` and add `$(OBJDIR)/tdestroy.c.o` and `$(OBJDIR)/strndup.c.o` after `mkdir`. +Then, go to the line that defines the variable `OBJS` and add `$(OBJDIR)/tdestroy.c.o $(OBJDIR)/strndup.c.o`. ### Add native CA to curl options If we don't wanna get SSL certificate error, we need to add native CA to curl options.
Do not send PMTUD packet to an unvalidated path
@@ -11827,6 +11827,7 @@ ngtcp2_ssize ngtcp2_conn_write_vmsg(ngtcp2_conn *conn, ngtcp2_path *path, } if (conn->pmtud && + (conn->dcid.current.flags & NGTCP2_DCID_FLAG_PATH_VALIDATED) && (!conn->hs_pktns || ngtcp2_ksl_len(&conn->hs_pktns->crypto.tx.frq) == 0)) { nwrite = conn_write_pmtud_probe(conn, pi, dest, origdestlen, ts);
tools: windows installer: Modify IDF's archive mirror link
@@ -54,7 +54,7 @@ begin if IDFZIPFileVersion <> '' then begin Url := 'https://github.com/espressif/esp-idf/releases/download/' + IDFZIPFileVersion + '/esp-idf-' + IDFZIPFileVersion + '.zip'; - MirrorUrl := 'https://dl.espressif.com/dl/esp-idf/releases/esp-idf-' + IDFZIPFileVersion + '.zip'; + MirrorUrl := 'https://dl.espressif.com/github_assets/espressif/esp-idf/releases/download/' + IDFZIPFileVersion + '/esp-idf-' + IDFZIPFileVersion + '.zip'; IDFZIPFileName := ExpandConstant('{app}\releases\esp-idf-' + IDFZIPFileVersion + '.zip') if not FileExists(IDFZIPFileName) then begin
Initialise OPENSSL_armcap_P to 0 before setting it based on capabilities, not after
@@ -133,6 +133,8 @@ void OPENSSL_cpuid_setup(void) return; trigger = 1; + OPENSSL_armcap_P = 0; + if ((e = getenv("OPENSSL_armcap"))) { OPENSSL_armcap_P = (unsigned int)strtoul(e, NULL, 0); return; @@ -166,8 +168,6 @@ void OPENSSL_cpuid_setup(void) # endif # endif - OPENSSL_armcap_P = 0; - # ifdef OSSL_IMPLEMENT_GETAUXVAL if (getauxval(HWCAP) & HWCAP_NEON) { unsigned long hwcap = getauxval(HWCAP_CE);
Test MonadPlus instance of Result
@@ -33,6 +33,7 @@ Test for the conversion of lua values to haskell values. module Foreign.Lua.Types.FromLuaStackTest (tests) where import Control.Applicative (empty, (<|>)) +import Control.Monad (mplus, mzero) import Foreign.Lua.Types.Core (Lua, LuaInteger) import Foreign.Lua.Types.FromLuaStack import Foreign.Lua.Functions (call, loadstring, runLua) @@ -81,6 +82,14 @@ tests = testGroup "FromLuaStack" , testProperty "Alternative is the second value if first is empty" $ property (\x -> Success (x::Int) == (empty <|> Success x)) ] + + , testGroup "MonadPlus" + [ testProperty "Second arg is not evaluated if the first succeeded" $ + property (\x -> Success (x::Int) == (Success x `mplus` error "This is wrong")) + + , testProperty "Alternative is the second value if first is mzero" $ + property (\x -> Success (x::Int) == (mzero <|> Success x)) + ] ] ]
OSX: tune RhoSimulator build
@@ -580,6 +580,7 @@ class Jake def self.run3_dont_fail(command, cd = nil, env = {}, use_run2 = false) set_list = [] + currentdir = "" env.each_pair do |k, v| if RUBY_PLATFORM =~ /(win|w)32$/ set_list << "set \"#{k}=#{v}\"&&" @@ -596,10 +597,15 @@ class Jake if RUBY_PLATFORM =~ /(win|w)32$/ cd_ = cd.gsub('/', "\\") to_run = "cd /d \"#{cd_}\"&&#{to_run}" + else + if use_run2 + currentdir = Dir.pwd() + Dir.chdir cd else to_run = "cd '#{cd}'&&#{to_run}" end end + end if !env.nil? to_print = "ENV: #{env}\n#{to_print}" @@ -612,6 +618,9 @@ class Jake self.run2(to_run, []) do |line| log Logger::DEBUG,line end + if not cd.nil? + Dir.chdir currentdir + end return $?.exitstatus == 0 else res = system(to_run)
Edit installation guide -- explicitly mention Windows, etc.
@@ -83,11 +83,17 @@ brew install elektra . We also provide a tap containing a more elaborate formula [here](http://github.com/ElektraInitiative/homebrew-elektra). -## Generic +## Windows + +Please refer to the section OS independent below. + +## OS independent First follow the steps in [COMPILE](COMPILE.md). -To install Elektra use: +After you completed building Elektra on your own, there are multiple options how to install it. For example, with make or cPack tools. + +### MAKE sudo make install sudo ldconfig # See troubleshooting below @@ -102,7 +108,7 @@ or in the build directory (will not honor DESTDIR!): xargs rm < install_manifest.txt -## CPack +### CPack First follow the steps in [COMPILE](COMPILE.md).
[bsp][tm4c129x] Update drv_eth.c Decide whether to close eth hardware checksum by definition.
@@ -634,8 +634,13 @@ tivaif_transmit(net_device_t dev, struct pbuf *p) pDesc->Desc.ui32CtrlStatus = 0; } +#ifdef RT_LWIP_USING_HW_CHECKSUM pDesc->Desc.ui32CtrlStatus |= (DES0_TX_CTRL_IP_ALL_CKHSUMS | DES0_TX_CTRL_CHAINED); +#else + pDesc->Desc.ui32CtrlStatus |= (DES0_TX_CTRL_NO_CHKSUM | + DES0_TX_CTRL_CHAINED); +#endif /* Decrement our descriptor counter, move on to the next buffer in the * pbuf chain. */
net/netmgr: add wifi deinit in kernel space add wifi deinit in kernel space for binary manager to deinit wifi
@@ -145,6 +145,20 @@ int _trwifi_handle_command(struct netdev *dev, lwnl_req cmd) return 0; } +/* it's for reloading operation in binary manager. + * it must be used to binary manager only. + */ +trwifi_result_e netdev_deinit_wifi(void) +{ + struct netdev *dev = (struct netdev *)nm_get_netdev((uint8_t *)"wlan0"); + if (!dev) { + return TRWIFI_FAIL; + } + trwifi_result_e res = TRWIFI_FAIL; + TRWIFI_CALL(res, dev, deinit, (dev)); + return res; +} + int netdev_handle_wifi(struct netdev *dev, lwnl_req cmd, void *data, uint32_t data_len) { trwifi_result_e res = TRWIFI_FAIL;
YAML Benchmark: Add comment banners
# @date 12.04.2019 # @tags benchmark +# -- Global Variables ---------------------------------------------------------------------------------------------------------------------- + BUILD_DIRECTORY="@CMAKE_BINARY_DIR@" SOURCE_DIRECTORY="@CMAKE_SOURCE_DIR@" @@ -20,6 +22,7 @@ INPUT_FILES=( INPUT_URL="https://raw.githubusercontent.com/sanssecours/rawdata/master/YAML" export LD_LIBRARY_PATH="$BUILD_DIRECTORY/lib" +# -- Functions ----------------------------------------------------------------------------------------------------------------------------- cleanup() { rm -rf "$DATA_DIRECTORY" @@ -116,6 +119,8 @@ benchmark() { done } +# -- Main ---------------------------------------------------------------------------------------------------------------------------------- + trap cleanup EXIT INT QUIT TERM check_environment
update to dfilemaker.1.md
@@ -6,7 +6,7 @@ dfilemaker - distributed random file generation program # SYNOPSIS -dfilemaker [options] path +**dfilemaker [OPTION] PATH...** # DESCRIPTION dfilemaker is a tool for generating files and file trees which contain files @@ -14,6 +14,8 @@ suitable for testing. # OPTIONS +**NOTE: I cannot find these options in the code. I am not sure that they actually exist? Unless there is another version of the code somewhere?** + -d, \--depth=*min*-*max* : Specify the depth of the filesystem tree to generate. The depth will be selected at random within the bounds of min and max. The default depth @@ -54,6 +56,7 @@ suitable for testing. # SEE ALSO +`dchmod` (1). `dcmp` (1). `dcp` (1). `dfilemaker` (1).
libhfuzz/instrument: also instrument 1byte const value
@@ -237,7 +237,6 @@ void __sanitizer_cov_trace_cmp8(uint64_t Arg1, uint64_t Arg2) { /* Standard __sanitizer_cov_trace_const_cmp wrappers */ void __sanitizer_cov_trace_const_cmp1(uint8_t Arg1, uint8_t Arg2) { - /* No need to report back 1 byte comparisons */ hfuzz_trace_cmp1_internal((uintptr_t)__builtin_return_address(0), Arg1, Arg2); }
oc_cred:free alloc'd cred objs for failed request
@@ -417,6 +417,8 @@ oc_sec_add_new_cred(size_t device, bool roles_resource, oc_tls_peer_t *client, roles_resource) < 0) { if (roles_resource) { oc_sec_free_role(cred, client); + } else { + oc_sec_remove_cred(cred, device); } return -1; } @@ -1062,7 +1064,7 @@ delete_cred(oc_request_t *request, oc_interface_mask_t iface_mask, void *data) int credid = 0; if (ret != -1) { credid = (int)strtoul(query_param, NULL, 10); - if (credid != 0) { + if (credid >= 0) { if (!roles_resource) { if (oc_sec_remove_cred_by_credid(credid, request->resource->device)) { success = true;
fix lpf filter of setpoint derivative
@@ -194,10 +194,10 @@ float pid(int x) { float lpf2(float in, int num); static float dlpf[3] = {0}; static float setpoint_derivative[3]; - - setpoint_derivative[x] = (setpoint[x] - lastsetpoint[x]) * current_kd[x] * timefactor; #ifdef RX_SMOOTHING - lpf(&setpoint_derivative[x], setpoint_derivative[x], FILTERCALC(LOOPTIME * (float)1e-6, 1.0f / rx_smoothing_hz(RX_PROTOCOL))); + lpf(&setpoint_derivative[x], ((setpoint[x] - lastsetpoint[x]) * current_kd[x] * timefactor), FILTERCALC(LOOPTIME * (float)1e-6, 1.0f / rx_smoothing_hz(RX_PROTOCOL))); +#else + setpoint_derivative[x] = (setpoint[x] - lastsetpoint[x]) * current_kd[x] * timefactor; #endif dterm = (setpoint_derivative[x] * stickAccelerator[x] * transitionSetpointWeight[x]) - ((gyro[x] - lastrate[x]) * current_kd[x] * timefactor); lastsetpoint[x] = setpoint[x];
Start fixing lovr.graphics.fill;
@@ -247,6 +247,7 @@ const char* lovrFontFragmentShader = "" const char* lovrFillVertexShader = "" "vec4 position(mat4 projection, mat4 transform, vec4 vertex) { \n" +" texCoord.x = texCoord.x * (1. / lovrViewportCount) + (lovrViewID * 1. / float(lovrViewportCount)); \n" " return vertex; \n" "}";
Send a challenge response even if there is a pending challenge.
@@ -3194,7 +3194,7 @@ int picoquic_prepare_packet(picoquic_cnx_t* cnx, path_id = 0; break; } - else if (path_id < 0 && cnx->path[i]->path_is_activated) { + else if (path_id < 0) { if (cnx->path[i]->response_required) { path_id = i; } else if (cnx->path[i]->challenge_required) {
Fix build in CentOS 7
@@ -542,7 +542,12 @@ static void arch_traceAnalyzeData(run_t* run, pid_t pid) { static void arch_traceSaveData(run_t* run, pid_t pid) { char instr[_HF_INSTR_SZ] = "\x00"; +#if defined(__GNUC__) && ((__GNUC__ < 5) || (__GNUC__ == 5 && __GNUC_MINOR__ < 1)) + siginfo_t si; + bzero(&si, sizeof(si)); +#else siginfo_t si = {}; +#endif if (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) == -1) { PLOG_W("Couldn't get siginfo for pid %d", pid);
ioat: driver module improvements and selftest
@@ -104,14 +104,15 @@ errval_t ioat_device_poll(void) #define BUFFER_SIZE (1<<22) +uint32_t done = 0; + static void impl_test_cb(errval_t err, dma_req_id_t id, void *arg) { debug_printf("impl_test_cb\n"); assert(memcmp(arg, arg + BUFFER_SIZE, BUFFER_SIZE) == 0); debug_printf("test ok\n"); - memset(arg, 0, BUFFER_SIZE * 2); - memset(arg, 0xA5, BUFFER_SIZE); + done = 1; } static void impl_test(struct ioat_dma_device *dev) @@ -132,19 +133,21 @@ static void impl_test(struct ioat_dma_device *dev) err = vspace_map_one_frame(&buf, id.bytes, frame, NULL, NULL); assert(err_is_ok(err)); + uint64_t address = id.base; if (driverkit_iommu_present()) { - id.base = (lpaddr_t)buf; + address = (lpaddr_t)buf; + debug_printf("Setting id.base to %lx\n", address); } memset(buf, 0, id.bytes); memset(buf, 0xA5, BUFFER_SIZE); - assert(memcmp(buf, buf + BUFFER_SIZE, BUFFER_SIZE)); + struct dma_req_setup setup = { .args.memcpy = { - .src = id.base, - .dst = id.base + BUFFER_SIZE, + .src = address, + .dst = address + BUFFER_SIZE, .bytes = BUFFER_SIZE, }, .type = DMA_REQ_TYPE_MEMCPY, @@ -153,17 +156,30 @@ static void impl_test(struct ioat_dma_device *dev) }; int reps = 10; do { + memset(buf, 0, id.bytes); + memset(buf, reps + 2, BUFFER_SIZE); + assert(memcmp(buf, buf + BUFFER_SIZE, BUFFER_SIZE)); + debug_printf("!!!!!! NEW ROUND\n"); dma_req_id_t rid; err = ioat_dma_request_memcpy((struct dma_device *)dev, &setup, &rid); assert(err_is_ok(err)); - uint32_t i = 10; - while(i--) { + done = 0; + while(done == 0) { ioat_dma_device_poll_channels((struct dma_device *)dev); } +#if 0 + if (reps == 1) { + debug_printf("using phys addr!\n"); + setup.args.memcpy.src = id.base; + setup.args.memcpy.dst = id.base + BUFFER_SIZE; + } +#endif } while(reps--); + + } #endif @@ -228,6 +244,12 @@ static errval_t init(struct bfdriver_instance *bfi, uint64_t flags, iref_t* dev) debug_printf("IOMMU PRESENT: %u", driverkit_iommu_present()); if (driverkit_iommu_present()) { + + struct vnode_identity vid; + err = invoke_vnode_identify(cap_vroot, &vid); + assert(err_is_ok(err)); + debug_printf("[ioat] using ptable root: %lx\n", vid.base); + err = driverkit_iommu_create_domain(cap_vroot, devid); if (err_is_fail(err)) { DEBUG_ERR(err, "failed to create the iommu domain\n");
gbp2: Fix typo in condition
@@ -383,6 +383,8 @@ gbp_lpm_classify_inline (vlib_main_t * vm, to_next += 1; n_left_from -= 1; n_left_to_next -= 1; + ip4_0 = NULL; + ip6_0 = NULL; next0 = GPB_LPM_CLASSIFY_DROP; b0 = vlib_get_buffer (vm, bi0); @@ -441,7 +443,7 @@ gbp_lpm_classify_inline (vlib_main_t * vm, lbi0 = ip4_fib_forwarding_lookup (fib_index0, &ip4_0->src_address); } - else if (DPO_PROTO_IP4 == dproto) + else if (DPO_PROTO_IP6 == dproto) { lbi0 = ip6_fib_table_fwding_lookup (&ip6_main, fib_index0, &ip6_0->src_address);
create a socket of the family corresponding to the resolved sockaddr
@@ -185,7 +185,10 @@ void tunnel_proceed_read(struct st_h2o_tunnel_t *_tunnel) h2o_tunnel_t *h2o_open_udp_tunnel_from_sa(h2o_loop_t *loop, struct sockaddr *addr, socklen_t len) { int fd; - if ((fd = socket(PF_INET, SOCK_DGRAM, 0)) == -1) + + assert(addr->sa_family == AF_INET || addr->sa_family == AF_INET6); + + if ((fd = socket(addr->sa_family, SOCK_DGRAM, 0)) == -1) return NULL; if (connect(fd, (void *)addr, len) != 0) {
need to post install edit hip files for hardcodings of /opt/rocm
@@ -148,6 +148,16 @@ else fi fi +function edit_installed_hip_file(){ + if [ -f $installed_hip_file_to_edit ] ; then + # In hipvars.pm HIP_PATH is determined by parent directory of hipcc location. + # Set ROCM_PATH using HIP_PATH + $SUDO sed -i -e "s/\"\/opt\/rocm\"/\"\$HIP_PATH\"/" $installed_file_to_edit + # Set HIP_CLANG_PATH using ROCM_PATH/bin + $SUDO sed -i -e "s/\"\$ROCM_PATH\/llvm\/bin\"/\"\$ROCM_PATH\/bin\"/" $installed_file_to_edit + fi +} + # ----------- Install only if asked ---------------------------- if [ "$1" == "install" ] ; then cd $BUILD_DIR/build/hipamd @@ -160,4 +170,18 @@ if [ "$1" == "install" ] ; then fi removepatch $AOMP_REPOS/hipamd + # The hip perl scripts have /opt/rocm hardcoded, so fix them after then are installed + # but only if not installing to rocm. + if [ $AOMP_INSTALL_DIR != "/opt/rocm/llvm" ] ; then + SED_INSTALL_DIR=`echo $AOMP_INSTALL_DIR | sed -e 's/\//\\\\\//g' ` + installed_file_to_edit=$AOMP_INSTALL_DIR/bin/hipcc + $(edit_installed_hip_file) + installed_file_to_edit=$AOMP_INSTALL_DIR/bin/hipvars.pm + $(edit_installed_hip_file) + # nothing to change in hipconfig but in case something is added in future, try to fix it + installed_file_to_edit=$AOMP_INSTALL_DIR/bin/hipconfig + $(edit_installed_hip_file) + fi + + fi