message
stringlengths
6
474
diff
stringlengths
8
5.22k
cpack: remove rpath
@@ -40,10 +40,6 @@ set (ALL_PLUGINS ${CPACK_COMPONENTS_ALL}) list(FILTER ALL_PLUGINS INCLUDE REGEX "^libelektra4-.*") string (REPLACE ";" ", " ALL_PLUGINS_STR "${ALL_PLUGINS}") -set (CMAKE_BUILD_WITH_INSTALL_RPATH 1) - -list(APPEND CMAKE_INSTALL_RPATH "${CMAKE_CURRENT_BINARY_DIR}") - set ( PACKAGE_DESCRIPTION "Elektra provides a universal and secure framework to store configuration parameters in a global, hierarchical key database. The core is a small library implemented in C. The plugin-based framework fulfills many configuration-related tasks to avoid any unnecessary code duplication across applications while it still allows the core to stay without any external dependency. Elektra abstracts from cross-platform-related issues with an consistent API, and allows applications to be aware of other applications' configurations, leveraging easy application integration.\n."
[chainmaker]add txHeader data pack function
@@ -29,3 +29,80 @@ wait for its receipt. #include "http2intf.h" #include "boatplatform_internal.h" + + +/*!**************************************************************************** + * @brief channel header packed + * + * @details + * fabric has two types of headers: signature header and channel header, this + * function is the implemention of signature header protobuf pack. + * \n channel header is consist of follow fields: + * 1. type + * 2. version + * 3. timestamp + * 4. channel Id + * 5. transaction Id + * 6. extension + * channel header packed that is protobuf serialize of above fields. + * + * @param tx_ptr + * fabric transaction structure pointer + * + * @param[in] txIdBin + * generate transaction id in #hlfabricSignatureHeaderPacked + * + * @param[out] output_ptr + * A structure pointer to store signature header protobuf serialize data and length. + * In internal of this function, the memory of store serialize data has been alloced, + * caller SHOULD NOT alloc memory for this pointer again. + * + * @return + * Return \c BOAT_SUCCESS if packed successed, otherwise return a failed code. + * + * @see hlfabricSignatureHeaderPacked + ******************************************************************************/ +__BOATSTATIC BOAT_RESULT chainmakerTxHeaderPacked(const BoatChainmakerTx *tx_ptr, + BoatFieldVariable *output_ptr) +{ + _Common__TxHeader TxHeader = COMMON__TX_HEADER__INIT; + BUINT32 pack_len; + BCHAR tx_Id_String[72 + 1]; + + BOAT_RESULT result = BOAT_SUCCESS; + boat_try_declare; + + /* step-1: TxHeader packed */ + + /* -chain_id */ + TxHeader.chain_id = tx_ptr->tx_header.chainId; + + /* sender */ + TxHeader.sender->org_id = tx_ptr.tx_header.sender.orgId; + TxHeader.sender->member_info = tx_ptr.tx_header.sender.memberInfo; + TxHeader.sender->is_full_cert = tx_ptr.tx_header.sender.isFullCert; + + /* -tx_type */ + TxHeader.tx_type = tx_ptr->tx_header.txType; + + /* -timestramp */ + TxHeader.timestamp = tx_ptr->tx_header.timestamp; + + /* -expiration_time */ + TxHeader.expiration_time = tx_ptr->tx_header.expirationTime; + + /* -txID */ + memset(tx_Id_String, 0, sizeof(tx_Id_String)); + UtilityBinToHex(tx_Id_String, tx_ptr->tx_header.txId, 36, BIN2HEX_LEFTTRIM_UNFMTDATA, BIN2HEX_PREFIX_0x_NO, BOAT_FALSE ); + TxHeader.tx_id = tx_Id_String; + + /* pack the channelHeader */ + pack_len = common__tx_header__get_packed_size(&TxHeader); + output_ptr->field_ptr = BoatMalloc(pack_len); + output_ptr->field_len = pack_len; + common__tx_header__pack(&TxHeader, output_ptr->field_ptr); + + return result; +} + +
more alpha at the start hitresult
@@ -26,7 +26,7 @@ class HitResult(FrameObject): if scores == 300: return # [score, x, y, index, alpha, time, go down] - self.hitresults.append([scores, x, y, 0, 20, 0, 3]) + self.hitresults.append([scores, x, y, 0, 40, 0, 3]) def add_to_frame(self, background): i = len(self.hitresults)
Java: supporting jsp-file attribute for servlet. This closes issue on GitHub.
@@ -1214,6 +1214,16 @@ public class Context implements ServletContext, InitParams processXmlInitParam(reg, (Element) child_node); continue; } + + if (tag_name.equals("filter-name") + || tag_name.equals("#text") + || tag_name.equals("#comment")) + { + continue; + } + + log("processWebXml: tag '" + tag_name + "' for filter '" + + filter_name + "' is ignored"); } filters_.add(reg); @@ -1306,6 +1316,22 @@ public class Context implements ServletContext, InitParams reg.setLoadOnStartup(Integer.parseInt(child_node.getTextContent().trim())); continue; } + + if (tag_name.equals("jsp-file")) { + reg.setJspFile(child_node.getTextContent().trim()); + continue; + } + + if (tag_name.equals("servlet-name") + || tag_name.equals("display-name") + || tag_name.equals("#text") + || tag_name.equals("#comment")) + { + continue; + } + + log("processWebXml: tag '" + tag_name + "' for servlet '" + + servlet_name + "' is ignored"); } servlets_.add(reg); @@ -1888,6 +1914,7 @@ public class Context implements ServletContext, InitParams private boolean initialized_ = false; private final List<FilterMap> filters_ = new ArrayList<>(); private boolean system_jsp_servlet_ = false; + private String jsp_file_; private MultipartConfigElement multipart_config_; public ServletReg(String name, Class<?> servlet_class) @@ -1921,6 +1948,21 @@ public class Context implements ServletContext, InitParams trace("ServletReg.init(): " + getName()); + if (jsp_file_ != null) { + setInitParameter("jspFile", jsp_file_); + jsp_file_ = null; + + ServletReg jsp_servlet = name2servlet_.get("jsp"); + + if (jsp_servlet.servlet_class_ != null) { + servlet_class_ = jsp_servlet.servlet_class_; + } else { + setClassName(jsp_servlet.getClassName()); + } + + system_jsp_servlet_ = jsp_servlet.system_jsp_servlet_; + } + if (system_jsp_servlet_) { JasperInitializer ji = new JasperInitializer(); @@ -1972,6 +2014,10 @@ public class Context implements ServletContext, InitParams throw new IllegalStateException("Class already initialized"); } + if (jsp_file_ != null) { + throw new IllegalStateException("jsp-file already initialized"); + } + super.setClassName(class_name); } @@ -1985,11 +2031,31 @@ public class Context implements ServletContext, InitParams throw new IllegalStateException("Class already initialized"); } + if (jsp_file_ != null) { + throw new IllegalStateException("jsp-file already initialized"); + } + super.setClassName(servlet_class.getName()); servlet_class_ = servlet_class; getAnnotationMultipartConfig(); } + public void setJspFile(String jsp_file) throws IllegalStateException + { + if (servlet_ != null + || servlet_class_ != null + || getClassName() != null) + { + throw new IllegalStateException("Class already initialized"); + } + + if (jsp_file_ != null) { + throw new IllegalStateException("jsp-file already initialized"); + } + + jsp_file_ = jsp_file; + } + private void getAnnotationMultipartConfig() { if (servlet_class_ == null) { return;
write GPS info to CSV file, even though if user don't want --nmea output
@@ -4670,12 +4670,10 @@ while(0 < restlen) } else if(option->option_code == OPTIONCODE_NMEA) { - if(fh_nmea != NULL) - { + memset(&nmeasentence, 0, OPTIONLEN_MAX); if(option->option_length >= 48) { nmealen = option->option_length; - memset(&nmeasentence, 0, OPTIONLEN_MAX); memcpy(&nmeasentence, &option->data, option->option_length); csc = 0; csn = 0; @@ -4701,7 +4699,6 @@ while(0 < restlen) } } } - } optr += option->option_length +padding +OH_SIZE; restlen -= option->option_length +padding +OH_SIZE; }
Don't crash on incorrect +grab product type in marks.
(wrap-error call-result) :: =/ product=vase vase.u.call-result - :: TODO: why do we check nesting here? + :: +grab might produce the wrong type :: - ?> (~(nest ut p.mark-sample) | p.product) + ?. (~(nest ut p.mark-sample) | p.product) + %- return-error + :~ leaf+"ford: %vale failed" + leaf+"+grab has wrong type in mark {<mark>} on disc {<disc>}" + == :: check mold idempotence; if different, nest fail :: ?: =(q.product input)
downstream: added a check to only set the current coroutine when the downstream is running in async mode.
@@ -257,6 +257,7 @@ struct flb_connection *flb_downstream_conn_get(struct flb_downstream *stream) flb_sockfd_t connection_fd; struct flb_connection *connection; int transport; + struct flb_coro *coroutine; int result; transport = stream->base.transport; @@ -273,11 +274,18 @@ struct flb_connection *flb_downstream_conn_get(struct flb_downstream *stream) connection_fd = FLB_INVALID_SOCKET; } + if (flb_downstream_is_async(stream)) { + coroutine = flb_coro_get(); + } + else { + coroutine = NULL; + } + connection = flb_connection_create(connection_fd, FLB_DOWNSTREAM_CONNECTION, (void *) stream, flb_engine_evl_get(), - flb_coro_get()); + coroutine); if (connection == NULL) { return NULL; @@ -296,7 +304,7 @@ struct flb_connection *flb_downstream_conn_get(struct flb_downstream *stream) transport != FLB_TRANSPORT_UNIX_DGRAM ) { flb_connection_reset_connection_timeout(connection); - result = flb_io_net_accept(connection, flb_coro_get()); + result = flb_io_net_accept(connection, coroutine); if (result != 0) { flb_connection_reset_connection_timeout(connection);
improve oidc-gen.desktop file
[Desktop Entry] -Version=1.0 +Version=1.1 Name=oidc-gen Comment=oidc-gen auth code redirect Icon=utilities-terminal Terminal=true Type=Application -Categories=Application;Network; MimeType=x-scheme-handler/edu.kit.data.oidc-agent; +NoDisplay=true +Keywords=oidc-agent;authorization code;code exchange;redirect;
tap: fix flags in custom-dump Type: fix
@@ -579,17 +579,17 @@ static void *vl_api_tap_create_v2_t_print s = format (s, "rx-ring-size %u ", (mp->rx_ring_sz)); if (mp->host_mtu_set) s = format (s, "host-mtu-size %u ", (mp->host_mtu_size)); - if ((mp->tap_flags) & 0x1) + if ((mp->tap_flags) & TAP_API_FLAG_GSO) s = format (s, "gso-enabled "); - if ((mp->tap_flags) & 0x2) + if ((mp->tap_flags) & TAP_API_FLAG_CSUM_OFFLOAD) s = format (s, "csum-offload-enabled "); - if ((mp->tap_flags) & 0x4) + if ((mp->tap_flags) & TAP_API_FLAG_PERSIST) s = format (s, "persist "); - if ((mp->tap_flags) & 0x8) + if ((mp->tap_flags) & TAP_API_FLAG_ATTACH) s = format (s, "attach "); - if ((mp->tap_flags) & 0x16) + if ((mp->tap_flags) & TAP_API_FLAG_TUN) s = format (s, "tun "); - if ((mp->tap_flags) & 0x32) + if ((mp->tap_flags) & TAP_API_FLAG_GRO_COALESCE) s = format (s, "gro-coalesce-enabled "); FINISH; }
Enable build tests for debug and release on Linux and Mac
@@ -18,12 +18,13 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: + config: [release, debug] os: [ubuntu-latest, macOS-latest] steps: - uses: actions/checkout@v1 - - name: build + - name: Test ${{ matrix.os }} ${{ matrix.config }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GIT_PULL_TOKEN: ${{ secrets.GIT_PULL_TOKEN }} - run: ./build.sh + run: ./build-tests.sh ${{ matrix.config }}
abis/linux: Define the F_SEAL_ family
#define F_GETOWNER_UIDS 17 +#define F_ADD_SEALS 1033 +#define F_GET_SEALS 1034 + +#define F_SEAL_SEAL 0x0001 +#define F_SEAL_SHRINK 0x0002 +#define F_SEAL_GROW 0x0004 +#define F_SEAL_WRITE 0x0008 + #define F_RDLCK 0 #define F_WRLCK 1 #define F_UNLCK 2
fix "warning ISO C forbids omitting the middle term of a?:expression" According to the C99 manual 6.5.15 syntax conditional-expression logical-OR-expression logical-OR-expression ? expression :conditional-expression above bnf rules requires expression not be none
@@ -50,7 +50,8 @@ uint64_t get_microcode_version(void) * According to SDM vol 3 Table 9-7. If data_size field of uCode * header is zero, the ucode length is 2000 */ -#define GET_DATA_SIZE(hdptr) ((hdptr)->data_size ? : 2000) +#define GET_DATA_SIZE(hdptr) ((hdptr)->data_size ?\ + ((hdptr)->data_size) : 2000) void acrn_update_ucode(struct vcpu *vcpu, uint64_t v) { uint64_t hva, gpa, gva;
Fix "for loop initial declarations only in C99" compile error
@@ -246,7 +246,8 @@ static int find_buildid(Elf *e, char *buildid) { char *buf = (char *)data->d_buf + 16; size_t length = data->d_size - 16; - for (size_t i = 0; i < length; ++i) { + size_t i = 0; + for (i = 0; i < length; ++i) { sprintf(buildid + (i * 2), "%02hhx", buf[i]); }
Check for V_ASN1_BOOLEAN/V_ASN1_NULL in X509_ATTRIBUTE_get0_data The member value.ptr is undefined for those ASN1 types.
@@ -314,7 +314,9 @@ void *X509_ATTRIBUTE_get0_data(X509_ATTRIBUTE *attr, int idx, ttmp = X509_ATTRIBUTE_get0_type(attr, idx); if (!ttmp) return NULL; - if (atrtype != ASN1_TYPE_get(ttmp)) { + if (atrtype == V_ASN1_BOOLEAN + || atrtype == V_ASN1_NULL + || atrtype != ASN1_TYPE_get(ttmp)) { X509err(X509_F_X509_ATTRIBUTE_GET0_DATA, X509_R_WRONG_TYPE); return NULL; }
Test Java test run on macOS
@@ -435,7 +435,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, windows-latest] + os: [ubuntu-latest, windows-latest, macos-latest] java: ["11", "17"] steps:
Avoid compiler warning when casting the result of `GetProcAddress()` It is most unfortunate that the return type of `GetProcAddress()` is `FARPROC` (which is essentially `intptr_t(*)(void)): this type cannot be cast by GCC without warnings to anything but the generic function pointer type `void(*)(void)`. Let's work around that.
@@ -145,13 +145,13 @@ void _mi_os_init(void) { hDll = LoadLibrary(TEXT("kernelbase.dll")); if (hDll != NULL) { // use VirtualAlloc2FromApp if possible as it is available to Windows store apps - pVirtualAlloc2 = (PVirtualAlloc2)GetProcAddress(hDll, "VirtualAlloc2FromApp"); - if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)GetProcAddress(hDll, "VirtualAlloc2"); + pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2FromApp"); + if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2"); FreeLibrary(hDll); } hDll = LoadLibrary(TEXT("ntdll.dll")); if (hDll != NULL) { - pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)GetProcAddress(hDll, "NtAllocateVirtualMemoryEx"); + pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx"); FreeLibrary(hDll); } if (mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
OcDataHubLib: axe useless assignment to Status
@@ -213,8 +213,6 @@ SetDataHubEntry ( return EFI_OUT_OF_RESOURCES; } - Status = EFI_OUT_OF_RESOURCES; - // // TODO: We may want to fill header some day. // Currently it is not important.
fix pck: update makefile for rpm again
@@ -507,7 +507,7 @@ else endif DEBIAN_DEV_DEPENDENCIES = "libthemis (= $(VERSION)+$(DEBIAN_CODENAME))" RPM_DEPENDENCIES = openssl -RPM_DEV_DEPENDENCIES = "libthemis = $(VERSION)-1" +RPM_DEV_DEPENDENCIES = "libthemis = "$(shell echo -n "$(VERSION)"|sed s/-/_/g)-1.$(ARCHITECTURE) ifeq ($(shell lsb_release -is 2> /dev/null),Debian) NAME_SUFFIX = $(VERSION)+$(DEBIAN_CODENAME)_$(DEBIAN_ARCHITECTURE).deb
options/rtdl: Support dynamic TLS
@@ -608,9 +608,28 @@ void *accessDtv(SharedObject *object) { Tcb *tcb_ptr; asm ( "mov %%fs:(0), %0" : "=r" (tcb_ptr) ); - __ensure(object->tlsModel == TlsModel::initial); - __ensure(object->tlsIndex < tcb_ptr->dtvSize); - __ensure(tcb_ptr->dtvPointers[object->tlsIndex]); + // We might need to reallocate the DTV. + if(object->tlsIndex >= tcb_ptr->dtvSize) { + // TODO: need to protect runtimeTlsMap against concurrent access. + auto ndtv = frg::construct_n<void *>(getAllocator(), runtimeTlsMap->indices.size()); + memset(ndtv, 0, sizeof(void *) * runtimeTlsMap->indices.size()); + memcpy(ndtv, tcb_ptr->dtvPointers, sizeof(void *) * tcb_ptr->dtvSize); + frg::destruct(getAllocator(), tcb_ptr->dtvPointers); + tcb_ptr->dtvSize = runtimeTlsMap->indices.size(); + tcb_ptr->dtvPointers = ndtv; + } + + // We might need to fill in a new DTV entry. + if(!tcb_ptr->dtvPointers[object->tlsIndex]) { + __ensure(object->tlsModel == TlsModel::dynamic); + + auto buffer = getAllocator().allocate(object->tlsImageSize); + __ensure(!(reinterpret_cast<uintptr_t>(buffer) & (object->tlsAlignment - 1))); + memset(buffer, 0, object->tlsSegmentSize); + memcpy(buffer, object->tlsImagePtr, object->tlsImageSize); + tcb_ptr->dtvPointers[object->tlsIndex] = buffer; + } + return tcb_ptr->dtvPointers[object->tlsIndex]; } @@ -917,8 +936,7 @@ void Loader::_buildTlsMaps() { << ", size: " << object->tlsSegmentSize << ", alignment: " << object->tlsAlignment << frg::endlog; }else{ - // TODO: Implement dynamic TLS. - mlibc::panicLogger() << "rtdl: Dynamic TLS is not supported" << frg::endlog; + object->tlsModel = TlsModel::dynamic; } } }
snap_build and snap_cloud_build running in the same impl_step flow
@@ -34,15 +34,15 @@ set widthCol3 $::env(WIDTHCOL3) set widthCol4 $::env(WIDTHCOL4) if { $impl_flow == "CLOUD_BASE" } { - set cloud_flow TRUE + set merge_flow FALSE set prefix base_ set rpt_dir_prefix $rpt_dir/${prefix} } elseif { $impl_flow == "CLOUD_MERGE" } { - set cloud_flow TRUE + set merge_flow TRUE set prefix merge_ set rpt_dir_prefix $rpt_dir/${prefix} } else { - set cloud_flow FALSE + set merge_flow FALSE set rpt_dir_prefix $rpt_dir/ ## @@ -55,7 +55,7 @@ if { $impl_flow == "CLOUD_BASE" } { ## ## optimizing design -if { $cloud_flow == "TRUE" } { +if { $merge_flow == "TRUE" } { set step ${prefix}opt_design set directive Explore } else { @@ -81,7 +81,7 @@ if { [catch "$command > $logfile" errMsg] } { ## ## Vivado 2017.4 has problems to place the SNAP core logic, if they can place inside the PSL -if { ($vivadoVer >= "2017.4") && ($cloud_flow == "FALSE") } { +if { ($vivadoVer >= "2017.4") && ($merge_flow == "FALSE") } { puts [format "%-*s%-*s%-*s%-*s" $widthCol1 "" $widthCol2 "" $widthCol3 "reload opt_design DCP" $widthCol4 "[clock format [clock seconds] -format {%T %a %b %d %Y}]"] close_project >> $logfile open_checkpoint $dcp_dir/${step}.dcp >> $logfile @@ -90,7 +90,7 @@ if { ($vivadoVer >= "2017.4") && ($cloud_flow == "FALSE") } { ## ## placing design -if { $cloud_flow == "TRUE" } { +if { $merge_flow == "TRUE" } { set step ${prefix}place_design set directive Explore } else { @@ -123,7 +123,7 @@ if { [catch "$command > $logfile" errMsg] } { ## ## physical optimizing design -if { $cloud_flow == "TRUE" } { +if { $merge_flow == "TRUE" } { set step ${prefix}phys_opt_design set directive Explore } else { @@ -149,7 +149,7 @@ if { [catch "$command > $logfile" errMsg] } { ## ## routing design -if { $cloud_flow == "TRUE" } { +if { $merge_flow == "TRUE" } { set step ${prefix}route_design set directive Explore } else { @@ -175,7 +175,7 @@ if { [catch "$command > $logfile" errMsg] } { ## ## physical optimizing routed design -if { $cloud_flow == "TRUE" } { +if { $merge_flow == "TRUE" } { set step ${prefix}opt_routed_design set directive Explore } else {
BugID:18335647:comp Config.in optimization: sal
-config SAL_ENABLED - bool "FEATURE_SAL_ENABLED" +config AOS_COMP_SAL + bool "AOS_COMP_SAL" default n + select AOS_COMP_SAL_MODULE_gt202 if SAL_MODULE = "wifi.gt202" + select AOS_COMP_DEVICE_SAL_MK3060 if SAL_MODULE = "wifi.mk3060" + select AOS_COMP_DEVICE_SAL_ESP8266 if SAL_MODULE = "wifi.esp8266" + select AOS_COMP_DEVICE_SAL_BK7231 if SAL_MODULE = "wifi.bk7231" + select AOS_COMP_DEVICE_SAL_SIM800 if SAL_MODULE = "gprs.sim800"
Work around Travis "virtual memory exhausted" error One particular build was running out of memory. By swapping to debug mode we reduce the optimisation level which should reduce the amount of memory required. [extended tests]
@@ -121,7 +121,7 @@ matrix: sources: - ubuntu-toolchain-r-test compiler: gcc-5 - env: EXTENDED_TEST="yes" CONFIG_OPTS="no-asm enable-ubsan enable-rc5 enable-md2 enable-aria -DPEDANTIC" OPENSSL_TEST_RAND_ORDER=0 + env: EXTENDED_TEST="yes" CONFIG_OPTS="--debug no-asm enable-ubsan enable-rc5 enable-md2 enable-aria -DPEDANTIC" OPENSSL_TEST_RAND_ORDER=0 - os: linux addons: apt:
Enhance doxygen docy.
@@ -2130,14 +2130,13 @@ tsReal TINYSPLINE_API ts_distance(const tsReal *x, const tsReal *y, * @{ */ /** - * Copies the values of \p x (a vector with dimensionality \p dim) to \p out (a - * vector with dimensionality greater than or equal to \c 3). If \p dim is less - * than \c 3, the remaining components of \p out (up to and including the third - * component) are set to \c 0. Superfluous components in \p x (i.e., if \p dim - * is greater than \c 3) are ignored. + * Copies the values of vector \p x (a vector with dimensionality \p dim) to + * vector \p out (a vector with dimensionality \c 3). If \p dim is less than \c + * 3, the remaining values of \p out are set to \c 0. Superfluous values in \p + * x (i.e., if \p dim is greater than \c 3) are ignored. * * @pre - * \p out has at least dimensionality \c 3. + * \p out has dimensionality \c 3. * @param[in] x * Vector to read the values from. * @param[in] dim @@ -2159,7 +2158,7 @@ ts_vec3_set(const tsReal *x, * @param[in] y * Second vector. * @param[in] dim - * Dimensionality of \p x and \p y. + * Dimensionality of \p x, \p y, and \p out. * @param[out] out * Result vector. Can be same as \p x or \p y, i.e., the result can be * stored in-place. @@ -2179,7 +2178,7 @@ ts_vec_add(const tsReal *x, * @param[in] y * Second vector. * @param[in] dim - * Dimensionality of \p x and \p y. + * Dimensionality of \p x, \p y, and \p out. * @param[out] out * Result vector. Can be same as \p x or \p y, i.e., the result can be * stored in-place. @@ -2212,11 +2211,9 @@ ts_vec_dot(const tsReal *x, /** * Computes the cross product (also known as vector product or directed area - * product) of \p x and \p y (vectors with dimensionality greater than or equal - * to \c 3). Superfluous components in \p x or \p y (i.e., dimensionality is - * greater than \c 3) are ignored. + * product) of the vectors \p x and \p y. * - * @pre \p x and \p y have at least dimensionality \c 3. + * @pre \p x and \p y have dimensionality \c 3. * @param[in] x * First vector. * @param[in] y
multiline: cri: fix typo
@@ -73,7 +73,7 @@ struct flb_ml_parser *flb_ml_parser_cri(struct flb_config *config) NULL); /* parser name */ if (!mlp) { - flb_error("[multiline] could not create 'docker mode'"); + flb_error("[multiline] could not create 'cri mode'"); return NULL; }
Disable remote-profile import on Windows OS. Until can be addressed. This is a merge from 3.0RC.
@@ -391,9 +391,10 @@ QvisHostProfileWindow::CreateWindowContents() launchProfilesGroup = CreateLaunchProfilesGroup(); machineTabs->addTab(launchProfilesGroup, tr("Launch Profiles")); - +#ifndef WIN32 remoteProfilesGroup = CreateRemoteProfilesGroup(); masterWidget->addTab(remoteProfilesGroup, tr("Remote Profiles")); +#endif ((DropListWidget*)hostList)->window = this; }
fix compilatin issue on macOS 10.15
@@ -93,7 +93,8 @@ class CxPlatWatchdog { } public: CxPlatWatchdog(uint32_t WatchdogTimeoutMs) : TimeoutMs(WatchdogTimeoutMs) { - CXPLAT_THREAD_CONFIG Config = { 0 }; + CXPLAT_THREAD_CONFIG Config; + memset(&Config, 0, sizeof(CXPLAT_THREAD_CONFIG)); Config.Name = "cxplat_watchdog"; Config.Callback = WatchdogThreadCallback; Config.Context = this;
Add -march=skylake-avx512 to flags if target is skylake x
@@ -8,6 +8,13 @@ endif endif endif +ifeq ($(CORE), SKYLAKEX) +ifndef NO_AVX512 +CCOMMON_OPT += -march=skylake-avx512 +FCOMMON_OPT += -march=skylake-avx512 +endif +endif + ifeq ($(OSNAME), Interix) ARFLAGS = -m x64 endif
Bump superuser_reserved_connections to 10 As requested from field, 3 superuser connections is not enough for gpdb when customers run superuser maintenance scripts. 10 is the same value as the resource group admin_group's concurrency default limit.
@@ -1890,7 +1890,7 @@ static struct config_int ConfigureNamesInt[] = NULL }, &ReservedBackends, - 3, RESERVED_FTS_CONNECTIONS, MAX_BACKENDS, + 10, RESERVED_FTS_CONNECTIONS, MAX_BACKENDS, NULL, NULL, NULL },
[chainmaker][#436]add chain_id host_name and org_id
@@ -27,15 +27,21 @@ static BOAT_RESULT chainmakerWalletPrepare(void) wallet_config.user_prikey_cfg.prikey_genMode = BOAT_WALLET_PRIKEY_GENMODE_EXTERNAL_INJECTION; wallet_config.user_prikey_cfg.prikey_type = BOAT_WALLET_PRIKEY_TYPE_SECP256R1; wallet_config.user_prikey_cfg.prikey_format = BOAT_WALLET_PRIKEY_FORMAT_PKCS; - wallet_config.user_prikey_cfg.prikey_content.field_ptr = (BUINT8 *)chainmaker_key_ptr_buf; - wallet_config.user_prikey_cfg.prikey_content.field_len = strlen(chainmaker_key_ptr_buf) + 1; + wallet_config.user_prikey_cfg.prikey_content.field_ptr = (BUINT8 *)chainmaker_sign_key_buf; + wallet_config.user_prikey_cfg.prikey_content.field_len = strlen(chainmaker_sign_key_buf) + 1; //set user cert context - wallet_config.user_cert_cfg.length = strlen(chainmaker_cert_ptr_buf); - memcpy(wallet_config.user_cert_cfg.content, chainmaker_cert_ptr_buf, wallet_config.user_cert_cfg.length); - strncpy(wallet_config.node_url_cfg, TEST_CHAINMAKER_NODE_URL, strlen(TEST_CHAINMAKER_NODE_URL)); + wallet_config.user_cert_cfg.length = strlen(chainmaker_sign_cert_buf); + memcpy(wallet_config.user_cert_cfg.content, chainmaker_sign_cert_buf, wallet_config.user_cert_cfg.length); + //tls ca cert + wallet_config.tls_ca_cert_cfg.length = strlen(chainmaker_ca_cert_buf) + 1; + memcpy(wallet_config.tls_ca_cert_cfg.content, chainmaker_ca_cert_buf, wallet_config.tls_ca_cert_cfg.length); + strncpy(wallet_config.node_url_cfg, TEST_CHAINMAKER_NODE_URL, strlen(TEST_CHAINMAKER_NODE_URL)); + strncpy(wallet_config.host_name_cfg, TEST_CHAINMAKER_HOST_NAME, strlen(TEST_CHAINMAKER_HOST_NAME)); + strncpy(wallet_config.chain_id_cfg, TEST_CHAINMAKER_CHAIN_ID, strlen(TEST_CHAINMAKER_CHAIN_ID)); + strncpy(wallet_config.org_id_cfg, TEST_CHAINMAKER_ORG_ID, strlen(TEST_CHAINMAKER_ORG_ID)); // create wallet #if defined(USE_ONETIME_WALLET) index = BoatWalletCreate(BOAT_PROTOCOL_CHAINMAKER, NULL, &wallet_config, sizeof(BoatHlchainmakerWalletConfig));
remote: extract callbacks on push
@@ -722,6 +722,7 @@ static VALUE rb_git_remote_push(int argc, VALUE *argv, VALUE self) Data_Get_Struct(self, git_remote, remote); + rugged_remote_init_callbacks_and_payload_from_options(rb_options, &opts.callbacks, &payload); rugged_remote_init_custom_headers(rb_options, &opts.custom_headers); rugged_remote_init_proxy_options(rb_options, &opts.proxy_opts); init_pb_parallelism(rb_options, &opts);
DPDK: CVE-2018-1059 For further details, please see
@@ -25,14 +25,14 @@ DPDK_MLX5_PMD ?= n B := $(DPDK_BUILD_DIR) I := $(DPDK_INSTALL_DIR) -DPDK_VERSION ?= 18.02 +DPDK_VERSION ?= 18.02.1 PKG_SUFFIX ?= vpp1 DPDK_BASE_URL ?= http://fast.dpdk.org/rel DPDK_TARBALL := dpdk-$(DPDK_VERSION).tar.xz DPDK_TAR_URL := $(DPDK_BASE_URL)/$(DPDK_TARBALL) DPDK_17.11_TARBALL_MD5_CKSUM := 53ee9e054a8797c9e67ffa0eb5d0c701 -DPDK_18.02_TARBALL_MD5_CKSUM := ca13077a014a2102c6e10153dfa3b920 -DPDK_SOURCE := $(B)/dpdk-$(DPDK_VERSION) +DPDK_18.02.1_TARBALL_MD5_CKSUM := 3bbb5468f662e1f7472d4abc5c4cf08e +DPDK_SOURCE := $(B)/dpdk-stable-$(DPDK_VERSION) MACHINE=$(shell uname -m) NASM_BASE_URL := http://www.nasm.us/pub/nasm/releasebuilds
Modified CB project file to enable BlastEm GDB
</MakeCommands> <Build> <Target title="debug"> - <Option output="out/rom.bin" prefix_auto="0" extension_auto="0" /> + <Option output="out/rom.out" prefix_auto="0" extension_auto="0" /> <Option object_output="." /> <Option deps_output="." /> <Option type="1" /> <Option compilerVar="CC" /> </Unit> <Extensions> - <code_completion /> + <code_completion> + <search_path add="D:\Apps\SGDK\inc" /> + </code_completion> <envvars /> <debugger> <remote_debugging> - <options conn_type="-1" ip_address="localhost" ip_port="6868" /> + <options conn_type="0" serial_baud="115200" ip_address="localhost" ip_port="1234" /> </remote_debugging> <remote_debugging target="debug"> - <options conn_type="0" serial_baud="115200" ip_address="localhost" ip_port="6868" /> + <options conn_type="0" serial_baud="115200" ip_address="localhost" ip_port="1234" /> </remote_debugging> <remote_debugging target="release"> - <options conn_type="0" serial_baud="115200" ip_address="localhost" ip_port="6868" /> + <options conn_type="0" serial_baud="115200" ip_address="localhost" ip_port="1234" /> </remote_debugging> </debugger> <DoxyBlocks>
vlibmemory: coverity woes Coverity complains about resource leak after open when fd gets 0 with below warning. off_by_one: Testing whether handle tfd is strictly greater than zero is suspicious. tfd leaks when it is zero. It is right. 0 is a valid fd. -1 is not.
@@ -552,7 +552,7 @@ vl_map_shmem (const char *region_name, int is_vlib) while (nanosleep (&ts, &tsrem) < 0) ts = tsrem; tfd = open ((char *) api_name, O_RDWR); - if (tfd > 0) + if (tfd >= 0) break; } vec_free (api_name);
Add new lines at the end of files.
@@ -217,12 +217,6 @@ TEST_F(MultipleLogManagersTests, PrivacyGuardSharedWithTwoInstancesCoexist) lm1->SetDataInspector(privacyGuard); lm2->SetDataInspector(privacyGuard); - lm1->SetContext("test1", "abc"); - - lm2->GetSemanticContext().SetAppId("123"); - - ILogger* l1a = lm1->GetLogger("aaa"); - ILogger* l2a = lm2->GetLogger("aaa", "aaa-source"); EventProperties l2a1p("l2a1"); l2a1p.SetProperty("Field1", "http://www.microsoft.com"); //DataConcernType::Url @@ -234,6 +228,7 @@ TEST_F(MultipleLogManagersTests, PrivacyGuardSharedWithTwoInstancesCoexist) privacyConcernLogCount = 0; + ILogger* l1a = lm1->GetLogger("aaa"); EventProperties l1a1p("l1a1"); l1a1p.SetProperty("Field1", "Some%2eone%40Microsoft%2ecom"); //ConcernType::InternalEmailAddress //As happens in escaped URLs l1a1p.SetProperty("Field2", "[email protected]"); //ConcernType::InternalEmailAddress
ci fix typo in formatting action
@@ -21,7 +21,7 @@ jobs: env: GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}' - run: exit 1 - if: steps.check.outputs.triggered != 'true' + if: steps.check-comment.outputs.triggered == 'false' - name: Install astyle if: success() run: |
Fixed issue with NVM XML invalid token parsing
@@ -143,11 +143,12 @@ int tokenize_and_copy_key_value_pair( { wchar_t *tok = line; int index = 0; + int vindex = 0; memset(key, 0, key_sz); memset(val, 0, val_sz); - while (tok[index] != L'\0' && ((tok[index] != L'=') || (tok[index] != L':'))) + while (tok[index] != L'\0' && ((tok[index] != L'=') && (tok[index] != L':'))) { if(index < (key_sz-1)) key[index] = tok[index]; @@ -162,8 +163,9 @@ int tokenize_and_copy_key_value_pair( while (tok[index] != L'\0') { if (index < (val_sz - 1)) - val[index] = tok[index]; + val[vindex] = tok[index]; ++index; + ++vindex; } return 0;
test: h2o user must be a non-root user in order to test h2o_socket_ebpf_setup()
@@ -35,9 +35,11 @@ my $quic_port = empty_port({ }); sub spawn_my_h2o { + my $username = getpwuid($ENV{SUDO_UID}); return spawn_h2o({ opts => [qw(--mode=worker)], conf => << "EOT", +user: $username usdt-selective-tracing: ON listen: type: quic
utils: Simplify discard max size calculation
@@ -77,8 +77,8 @@ int ocf_submit_obj_discard_wait(ocf_data_obj_t obj, uint64_t addr, uint64_t length) { struct ocf_submit_io_wait_context cntx = { }; - uint32_t bytes; - uint32_t max_length = ~0; + uint64_t bytes; + uint64_t max_length = (uint32_t)~0; ENV_BUG_ON(env_memset(&cntx, sizeof(cntx), 0)); env_atomic_set(&cntx.rq_remaining, 1); @@ -92,10 +92,7 @@ int ocf_submit_obj_discard_wait(ocf_data_obj_t obj, uint64_t addr, break; } - if (length > max_length) - bytes = max_length; - else - bytes = length; + bytes = min(length, max_length); env_atomic_inc(&cntx.rq_remaining);
Update for version 0.4.0. Version 0.3.0 never really happened, as no release binaries were ever generated.
# Changelog All notable changes to this project will be documented in this file. -## 0.3.0 - present +## 0.4.0 - present +- Add amalgamated build to janet for easier embedding. +- Add os/date function +- Add slurp and spit to core library. - Added this changelog. - Added peg module (Parsing Expression Grammars) - Move hand written documentation into website repository.
driver/wpc/p9221.h: Format with clang-format BRANCH=none TEST=none
* found in the LICENSE file. */ - /* * IDT P9221-R7 Wireless Power Receiver driver definitions. */ #include "charge_manager.h" #include "task.h" - /* ========== Variant-specific configuration ============ */ #define P9221_R7_ADDR_FLAGS 0x61 #define P9221_STAT_OV_TEMP BIT(2) #define P9221_STAT_OV_VOLT BIT(1) #define P9221_STAT_OV_CURRENT BIT(0) -#define P9221_STAT_LIMIT_MASK (P9221_STAT_OV_TEMP | \ - P9221_STAT_OV_VOLT | \ - P9221_STAT_OV_CURRENT) +#define P9221_STAT_LIMIT_MASK \ + (P9221_STAT_OV_TEMP | P9221_STAT_OV_VOLT | P9221_STAT_OV_CURRENT) /* * Interrupt/Status flags for P9221R7 */ #define P9221R7_STAT_OVV BIT(1) #define P9221R7_STAT_OVC BIT(0) #define P9221R7_STAT_MASK 0x1FFF -#define P9221R7_STAT_CC_MASK (P9221R7_STAT_CCRESET | \ - P9221R7_STAT_PPRCVD | \ - P9221R7_STAT_CCERROR | \ - P9221R7_STAT_CCDATARCVD | \ - P9221R7_STAT_CCSENDBUSY) -#define P9221R7_STAT_LIMIT_MASK (P9221R7_STAT_UV | \ - P9221R7_STAT_OVV | \ - P9221R7_STAT_OVT | \ +#define P9221R7_STAT_CC_MASK \ + (P9221R7_STAT_CCRESET | P9221R7_STAT_PPRCVD | P9221R7_STAT_CCERROR | \ + P9221R7_STAT_CCDATARCVD | P9221R7_STAT_CCSENDBUSY) +#define P9221R7_STAT_LIMIT_MASK \ + (P9221R7_STAT_UV | P9221R7_STAT_OVV | P9221R7_STAT_OVT | \ P9221R7_STAT_OVC) #define P9221_DC_ICL_BPP_MA 1000
chip/host/reboot.c: Format with clang-format BRANCH=none TEST=none
@@ -20,8 +20,7 @@ void emulator_reboot(void) ccprints("Emulator would reboot here. Fuzzing: doing nothing."); } #else /* !TEST_FUZZ */ -noreturn -void emulator_reboot(void) +noreturn void emulator_reboot(void) { char *argv[] = { strdup(__get_prog_name()), NULL }; emulator_flush();
AS7262: Update MicroPython example.
from pimoroni_i2c import PimoroniI2C from breakout_as7262 import BreakoutAS7262 -import picoexplorer as display +import picographics import time -width = display.get_width() -height = display.get_height() + +display = picographics.PicoGraphics(picographics.DISPLAY_PICO_EXPLORER) + +width, height = display.get_bounds() bar_width = width // 6 bar_height = height -display_buffer = bytearray(width * height * 2) # 2-bytes per pixel (RGB565) -display.init(display_buffer) - i2c = PimoroniI2C(20, 21) as7 = BreakoutAS7262(i2c) @@ -31,35 +30,44 @@ def draw_bar(v, i): display.rectangle(i * bar_width, current_bar_top, bar_width, current_bar_height - 1) +BLACK = display.create_pen(0, 0, 0) +RED = display.create_pen(255, 0, 0) +ORANGE = display.create_pen(255, 128, 0) +YELLOW = display.create_pen(255, 255, 0) +GREEN = display.create_pen(0, 255, 0) +BLUE = display.create_pen(0, 0, 255) +VIOLET = display.create_pen(255, 0, 255) + + while True: r, o, y, g, b, v = as7.read() m = max(r, o, y, g, b, v) - display.set_pen(0, 0, 0) + display.set_pen(BLACK) display.clear() # Red - display.set_pen(255, 0, 0) + display.set_pen(RED) draw_bar(r / m, 0) # Orange - display.set_pen(255, 128, 0) + display.set_pen(ORANGE) draw_bar(o / m, 1) # Yellow - display.set_pen(255, 255, 0) + display.set_pen(YELLOW) draw_bar(y / m, 2) # Green - display.set_pen(0, 255, 0) + display.set_pen(GREEN) draw_bar(g / m, 3) # Blue - display.set_pen(0, 0, 255) + display.set_pen(BLUE) draw_bar(b / m, 4) # Violet - display.set_pen(255, 0, 255) + display.set_pen(VIOLET) draw_bar(v / m, 5) display.update()
update noexecstack patch for v0.2.20
-Index: OpenBLAS-0.2.13/exports/Makefile -=================================================================== ---- OpenBLAS-0.2.13.orig/exports/Makefile -+++ OpenBLAS-0.2.13/exports/Makefile -@@ -118,6 +118,7 @@ else +--- OpenBLAS-0.2.20/exports/Makefile 2017-07-23 21:03:35.000000000 -0700 ++++ OpenBLAS-0.2.20.patch/exports/Makefile 2017-08-01 13:17:26.000000000 -0700 +@@ -137,6 +137,7 @@ endif ifneq ($(C_COMPILER), LSB) $(CC) $(CFLAGS) $(LDFLAGS) -shared -o ../$(LIBSONAME) \ + -Wl,-z,noexecstack \ -Wl,--whole-archive $< -Wl,--no-whole-archive \ - -Wl,-soname,$(LIBPREFIX).so.$(MAJOR_VERSION) $(EXTRALIB) + -Wl,-soname,$(INTERNALNAME) $(EXTRALIB) $(CC) $(CFLAGS) $(LDFLAGS) -w -o linktest linktest.c ../$(LIBSONAME) $(FEXTRALIB) && echo OK. -@@ -145,6 +146,7 @@ else +@@ -164,6 +165,7 @@ ../$(LIBSONAME) : ../$(LIBNAME).renamed linktest.c endif $(CC) $(CFLAGS) $(LDFLAGS) -shared -o ../$(LIBSONAME) \ @@ -18,7 +16,7 @@ Index: OpenBLAS-0.2.13/exports/Makefile -Wl,--whole-archive $< -Wl,--no-whole-archive \ $(FEXTRALIB) $(EXTRALIB) $(CC) $(CFLAGS) $(LDFLAGS) -w -o linktest linktest.c ../$(LIBSONAME) $(FEXTRALIB) && echo OK. -@@ -164,6 +166,7 @@ ifeq ($(OSNAME), SunOS) +@@ -183,6 +185,7 @@ so : ../$(LIBSONAME) $(CC) $(CFLAGS) $(LDFLAGS) -shared -o ../$(LIBSONAME) \
updates %behn to no-op if its unix-duct is unset
++ emit-doze |= =date=(unit @da) ^+ event-core + :: no-op if .unix-duct has not yet been set + :: + ?~ unix-duct.state + event-core :: make sure we don't try to wake up in the past :: =? date-unit ?=(^ date-unit) `(max now u.date-unit)
python3-bindings: SubscriptionTest.py increase sleep time
@@ -34,7 +34,7 @@ class SubscriptionTester(SysrepoTester): ['python3','SubscriptionTestApp.py']) self.report_pid(self.process.pid) # wait for running data file to be copied - sleep(0.1) + sleep(1) def cancelSubscriptionStep(self): os.kill(self.process.pid, signal.SIGUSR1)
Document OPENSSL_secure_clear_free
CRYPTO_secure_malloc_init, CRYPTO_secure_malloc_initialized, CRYPTO_secure_malloc_done, OPENSSL_secure_malloc, CRYPTO_secure_malloc, OPENSSL_secure_zalloc, CRYPTO_secure_zalloc, OPENSSL_secure_free, -CRYPTO_secure_free, OPENSSL_secure_actual_size, +CRYPTO_secure_free, OPENSSL_secure_clear_free, +CRYPTO_secure_clear_free, OPENSSL_secure_actual_size, CRYPTO_secure_used - secure heap storage =head1 SYNOPSIS @@ -27,6 +28,9 @@ CRYPTO_secure_used - secure heap storage void OPENSSL_secure_free(void* ptr); void CRYPTO_secure_free(void *ptr, const char *, int); + void OPENSSL_secure_clear_free(void* ptr, size_t num); + void CRYPTO_secure_clear_free(void *ptr, size_t num, const char *, int); + size_t OPENSSL_secure_actual_size(const void *ptr); size_t CRYPTO_secure_used(); @@ -76,6 +80,12 @@ It exists for consistency with OPENSSL_secure_malloc() , and is a macro that expands to CRYPTO_secure_free() and adds the C<__FILE__> and C<__LINE__> parameters.. +OPENSSL_secure_clear_free() is similar to OPENSSL_secure_free() except +that it has an additional C<num> parameter which is used to clear +the memory if it was not allocated from the secure heap. +If CRYPTO_secure_malloc_init() is not called, this is equivalent to +calling OPENSSL_clear_free(). + OPENSSL_secure_actual_size() tells the actual size allocated to the pointer; implementations may allocate more space than initially requested, in order to "round up" and reduce secure heap fragmentation. @@ -101,13 +111,17 @@ CRYPTO_secure_allocated() returns 1 if the pointer is in the secure heap, or 0 i CRYPTO_secure_malloc_done() returns 1 if the secure memory area is released, or 0 if not. -OPENSSL_secure_free() returns no values. +OPENSSL_secure_free() and OPENSSL_secure_clear_free() return no values. =head1 SEE ALSO L<OPENSSL_malloc(3)>, L<BN_new(3)> +=head1 HISTORY + +OPENSSL_secure_clear_free() was added in OpenSSL 1.1.0g. + =head1 COPYRIGHT Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
SOVERSION bump to version 5.5.1
@@ -45,7 +45,7 @@ set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_ # with backward compatible change and micro version is connected with any internal change of the library. set(SYSREPO_MAJOR_SOVERSION 5) set(SYSREPO_MINOR_SOVERSION 5) -set(SYSREPO_MICRO_SOVERSION 0) +set(SYSREPO_MICRO_SOVERSION 1) set(SYSREPO_SOVERSION_FULL ${SYSREPO_MAJOR_SOVERSION}.${SYSREPO_MINOR_SOVERSION}.${SYSREPO_MICRO_SOVERSION}) set(SYSREPO_SOVERSION ${SYSREPO_MAJOR_SOVERSION})
Fix query for refetch_glue of stub leaking to internet.
15 June 2017: Wouter - Fix stub zone queries leaking to the internet for harden-referral-path ns checks. + - Fix query for refetch_glue of stub leaking to internet. 13 June 2017: Wouter - Fix #1279: Memory leak on reload when python module is enabled.
zephyr: test: test lis2dw12_init timeout BRANCH=none TEST=zmake configure --test zephyr/projects/drivers Tested-by: Yuval Peress
@@ -57,6 +57,19 @@ static void test_lis2dw12_init__fail_write_soft_reset(void) zassert_equal(EC_ERROR_INVAL, rv, NULL); } +static void test_lis2dw12_init__timeout_read_soft_reset(void) +{ + const struct emul *emul = emul_get_binding(EMUL_LABEL); + struct motion_sensor_t *ms = &motion_sensors[LIS2DW12_SENSOR_ID]; + int rv; + + i2c_common_emul_set_read_fail_reg(lis2dw12_emul_to_i2c_emul(emul), + LIS2DW12_SOFT_RESET_ADDR); + rv = ms->drv->init(ms); + zassert_equal(EC_ERROR_TIMEOUT, rv, "init returned %d but expected %d", + rv, EC_ERROR_TIMEOUT); +} + void test_suite_lis2dw12(void) { ztest_test_suite(lis2dw12, @@ -68,6 +81,9 @@ void test_suite_lis2dw12(void) lis2dw12_setup, unit_test_noop), ztest_unit_test_setup_teardown( test_lis2dw12_init__fail_write_soft_reset, + lis2dw12_setup, unit_test_noop), + ztest_unit_test_setup_teardown( + test_lis2dw12_init__timeout_read_soft_reset, lis2dw12_setup, unit_test_noop)); ztest_run_test_suite(lis2dw12); }
framework/st_things : url safe base64 encoding for artik h/w certificate uuid problem : (cci1+6o5 -> cci1%2B6o5) solution : Base64 Encode -> Base64 URL Safe Encode
@@ -306,5 +306,15 @@ bool things_encrypt_artik_uuid(unsigned char *output) memcpy(output, encode_buf2, 8); THINGS_LOG_D(THINGS_DEBUG, TAG, "output len [%d][%s]", strlen((char *)output), output); + // 6. url safe + int i; + for (i = 0; i < 8; i++) { + if (output[i] == '+') + output[i] = '-'; + else if (output[i] == '/') + output[i] = '_'; + } + THINGS_LOG_D(THINGS_DEBUG, TAG, "urlsafe output len [%d][%s]", strlen((char *)output), output); + return true; } \ No newline at end of file
Add function `sub`
@@ -33,7 +33,8 @@ Provide a lua module containing a selection of useful Text functions. module Foreign.Lua.Module.Text where import Data.Text (Text) -import Foreign.Lua (NumResults, Lua, LuaInteger, ToLuaStack) +import Data.Maybe (fromMaybe) +import Foreign.Lua (FromLuaStack, NumResults, Lua, LuaInteger, ToLuaStack) import Foreign.Lua.FunctionCalling (ToHaskellFunction, newCFunction) import qualified Foreign.Lua as Lua import qualified Data.Text as T @@ -46,6 +47,7 @@ pushModuleText = do addFunction "upper" (return . T.toUpper :: Text -> Lua Text) addFunction "reverse" (return . T.reverse :: Text -> Lua Text) addFunction "len" (return . fromIntegral . T.length :: Text -> Lua LuaInteger) + addFunction "sub" sub return 1 addPackagePreloader :: String -> Lua NumResults -> Lua () @@ -62,3 +64,21 @@ addFunction name fn = do Lua.push name Lua.pushHaskellFunction fn Lua.rawset (-3) + +sub :: Text -> LuaInteger -> OrNil LuaInteger -> Lua Text +sub s i j = + let i' = fromIntegral i + j' = fromIntegral . fromMaybe (-1) $ toMaybe j + fromStart = if i' >= 0 then (max 1 i' ) - 1 else T.length s + i' + fromEnd = if j' <= 0 then (max 1 (-j')) - 1 else T.length s - j' + in return . T.dropEnd fromEnd . T.drop fromStart $ s + +-- A lua value or nil +newtype OrNil a = OrNil { toMaybe :: Maybe a } + +instance FromLuaStack a => FromLuaStack (OrNil a) where + peek idx = do + noValue <- Lua.isnoneornil idx + if noValue + then return (OrNil Nothing) + else OrNil . Just <$> Lua.peek idx
fix skinparser for more rare case
# done by Kysan the gay pp farmer thanks + # return pos of the first char of the comment or -1 if there is no comment def detect_comments(line): ancient_char = '' @@ -27,7 +28,9 @@ escape_dict = {'\a': '/a', '\v': '/v', '\'': "/'", '\"': '/"', - '\\': '/'} + '\\': '/', + ' ': '', + '\ufeff': ''} def raw(text): @@ -41,18 +44,26 @@ def raw(text): return new_string +def getsection(line): + for s in ['[General]', '[Colours]', '[Fonts]', '[CatchTheBeat]', '[Mania]']: + if s in line: + return s[1:-1] + return None + + settings = {} # why not put all sections into it at the end ? class Skin: - def __init__(self, skin_path): + def __init__(self, skin_path, default_path): # sections - self.general = None - self.colours = None - self.fonts = None - self.catchTheBeat = None - self.mania = None + self.general = {} + self.colours = {} + self.fonts = {} + self.catchTheBeat = {} + self.mania = {} self.skin_path = skin_path + self.default_path = default_path self.read() self.parse_general() self.parse_colors() @@ -69,12 +80,17 @@ class Skin: CatchTheBeat = {} Mania = {} + try: with open(self.skin_path + 'skin.ini', 'rb') as file: lines = file.readlines() + except FileNotFoundError: + with open(self.default_path + 'skin.ini', 'rb') as file: + lines = file.readlines() for line in lines: # remove shit like `\r\n` and leading and trailling whitespaces line = line.decode().strip() + line = raw(line) # removing comments line = del_comment(line) @@ -84,14 +100,9 @@ class Skin: continue # if section tag - if line.startswith('[') and line.endswith(']'): - - if line in ['[General]', - '[Colours]', - '[Fonts]', - '[CatchTheBeat]', - '[Mania]']: - section = line[1:-1] + if '[' in line and ']' in line: + section = getsection(line) + if section is not None: continue else: raise Exception('invalid section name found: ' + line[1:-1]) @@ -161,4 +172,4 @@ class Skin: if __name__ == "__main__": - skin = Skin("../../res/skin1/") + skin = Skin("../../res/skin1/", "../../res/skin1/")
Minify HTML report charts.js upon compiling program.
@@ -91,7 +91,12 @@ else endif # Charts.js chartsjs.h: bin2c$(EXEEXT) resources/js/charts.js +if HAS_SEDTR + cat resources/js/charts.js | sed -E "s@(;)\s?//..*@\1@g" | sed -E "s@^[ \t]*//..*@@g" | sed "s/^[ \t]*//" | sed "/^$$/d" | tr -d "\r\n" > resources/js/charts.js.tmp + ./bin2c resources/js/charts.js.tmp src/chartsjs.h charts_js +else ./bin2c resources/js/charts.js src/chartsjs.h charts_js +endif # App.js appjs.h: bin2c$(EXEEXT) resources/js/app.js ./bin2c resources/js/app.js src/appjs.h app_js
error-drop; print interface by name
@@ -1000,7 +1000,8 @@ format_vnet_error_trace (u8 * s, va_list * va) CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *); vnet_error_trace_t *t = va_arg (*va, vnet_error_trace_t *); - s = format (s, " rx:%d", t->sw_if_index); + s = format (s, "rx:%U", format_vnet_sw_if_index_name, + vnet_get_main (), t->sw_if_index); return s; }
Remove ClangCL from name build instructions
@@ -25,7 +25,7 @@ cd build # Configure your build of choice, for example: # x86-64 using NMake -cmake -G "NMake Makefiles" -T ClangCL -DCMAKE_BUILD_TYPE=Release ^ +cmake -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ^ -DCMAKE_INSTALL_PREFIX=.\ -DISA_AVX2=ON -DISA_SSE41=ON -DISA_SSE2=ON .. # x86-64 using Visual Studio solution
removed Realtek drivers from list of know as working drivers
@@ -83,8 +83,6 @@ This list is for information purposes only and should not be regarded as a bindi | SEMPRE WU150-1 | ID 148f:7601 Ralink Technology, Corp. MT7601U Wireless Adapter | | TENDA W311U+ | ID 148f:3070 Ralink Technology, Corp. RT2870/RT3070 Wireless Adapter | | TP-LINK TL-WN722N v1 | ID 0cf3:9271 Qualcomm Atheros Communications AR9271 802.11n | -| ALFA AWUS036H | ID 0bda:8187 Realtek Semiconductor Corp. RTL8187 Wireless Adapter | -| RTL8821AE | Realtek Semiconductor Co., Ltd. RTL8821AE 802.11ac PCIe Wireless Network Adapter | Always verify the actual chipset with 'lsusb' and/or 'lspci'!
More unit tests for asm scanner
@@ -7,7 +7,11 @@ Build { Config { Name = "foo-bar", Tools = { "yasm", "gcc" }, - Env = { }, + Env = { + ASMINCPATH = { + "incy", + }, + }, DefaultOnHost = { native.host_platform }, } }, @@ -22,6 +26,7 @@ Build { END my $obj_file = '__result/foo-asm-541f0091dd54cc51f562e674e41814ab.o'; +#my $obj_file = '__result/foo-asm-0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33.o'; # SHA-1 my $foo_asm = <<END; %include "include1.i" @@ -91,6 +96,47 @@ sub test4() { }); } +sub test5() { + run_test({ + 'tundra.lua' => $build_file, + 'foo.asm' => "\t\tincbin \"bar.bin\"\n", + 'bar.bin' => "something\n" + }, sub { + update_file 'bar.bin', "something else\n"; + }); +} + +sub test6 { + my $files = { + 'tundra.lua' => $build_file, + 'foo.asm' => "\t\tincbin \"bar.bin\"\n", + 'bar.bin' => "include 'bar.i'\n", + 'bar.i' => "whatever\n" + }; + + with_sandbox($files, sub { + run_tundra 'foo-bar'; + my $sig1 = md5_output_file $obj_file; + + update_file 'bar.i', 'something else'; + + run_tundra 'foo-bar'; + my $sig2 = md5_output_file $obj_file; + fail "incbin file was scanned for dependencies" if $sig1 ne $sig2; + }); + +} + +sub test7 { + run_test({ + 'tundra.lua' => $build_file, + 'foo.asm' => "\t\tincbin \"header.i\"\n", + 'incy/header.i' => "\tmov eax, 1\n" + }, sub { + update_file 'incy/header.i', "\tmov eax, 2\n"; + }); +} + deftest { name => "yasm (generic) include scanning", procs => [ @@ -98,5 +144,8 @@ deftest { "Second level" => \&test2, "Parent directory" => \&test3, "Sibling directory" => \&test4, + "Basic incbin" => \&test5, + "incbin doesn't follow" => \&test6, + "Incbin via ASMINCPATH" => \&test7, ], };
Fix byte array formatting error in select expressions
@@ -292,9 +292,8 @@ def format_expr_16(e, format_as_value=True): if case_type == 'DefaultExpression': conds.append('true /* default */') elif case_type == 'Constant' and select_type == 'Type_Bits' and 32 < size and size % 8 == 0: - from utils.hlir import int_to_big_endian_byte_array - value_len, l = int_to_big_endian_byte_array(c.value) - #TODO: the byte array is not correct, not enough numbers + from utils.hlir import int_to_big_endian_byte_array_with_length + l = int_to_big_endian_byte_array_with_length(c.value, size/8) prepend_statement('uint8_t {0}[{1}] = {{{2}}};'.format(gen_var_name(c), size/8, ','.join([str(x) for x in l ]))) conds.append('memcmp({}, {}, {}) == 0'.format(gen_var_name(k), gen_var_name(c), size/8)) elif size <= 32:
consistent markdown on shell scripts in readme
@@ -117,15 +117,15 @@ Once you have done this for all of your existing pools you should now restore yo #### Building on Ubuntu 20.04 ```console -wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb -sudo dpkg -i packages-microsoft-prod.deb -sudo apt-get update \ - sudo apt-get install -y apt-transport-https && \ - sudo apt-get update -sudo apt-get -y install dotnet-sdk-5.0 git cmake build-essential libssl-dev pkg-config libboost-all-dev libsodium-dev libzmq5 -git clone https://github.com/coinfoundry/miningcore -cd miningcore/src/Miningcore -dotnet publish -c Release --framework net5.0 -o ../../build +$ wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb +$ sudo dpkg -i packages-microsoft-prod.deb +$ sudo apt-get update +$ sudo apt-get install -y apt-transport-https +$ sudo apt-get update +$ sudo apt-get -y install dotnet-sdk-5.0 git cmake build-essential libssl-dev pkg-config libboost-all-dev libsodium-dev libzmq5 +$ git clone https://github.com/coinfoundry/miningcore +$ cd miningcore/src/Miningcore +$ dotnet publish -c Release --framework net5.0 -o ../../build ``` #### Building on Windows @@ -148,9 +148,9 @@ Download and install the [.NET 5 SDK](https://dotnet.microsoft.com/download/dotn Create a configuration file <code>config.json</code> as described [here](https://github.com/coinfoundry/miningcore/wiki/Configuration) -``` -cd ../../build -Miningcore -c config.json +```console +$ cd ../../build +$ Miningcore -c config.json ``` ## Running a production pool
Mashes error severity DEBUG1-DEBUG5 into DEBUG
@@ -4652,23 +4652,12 @@ error_severity(int elevel) switch (elevel) { - /* GPDB_12_MERGE_FIXME: In PostgreSQL, DEBUG1-DEBUG5 are all mashed together into just - * DEBUG here. Why are we doing it differently? Revert to upstream version? - */ case DEBUG1: - prefix = gettext_noop("DEBUG1"); - break; case DEBUG2: - prefix = gettext_noop("DEBUG2"); - break; case DEBUG3: - prefix = gettext_noop("DEBUG3"); - break; case DEBUG4: - prefix = gettext_noop("DEBUG4"); - break; case DEBUG5: - prefix = gettext_noop("DEBUG5"); + prefix = gettext_noop("DEBUG"); break; case LOG: case LOG_SERVER_ONLY:
resolver: reformat assert and also output handle
@@ -75,7 +75,7 @@ static void resolverInit (resolverHandle * p, const char * path) static resolverHandle * elektraGetResolverHandle (Plugin * handle, Key * parentKey) { resolverHandles * pks = elektraPluginGetData (handle); - ELEKTRA_ASSERT (pks != NULL, "Unable to retrieve plugin data for %s", keyName(parentKey)); + ELEKTRA_ASSERT (pks != NULL, "Unable to retrieve plugin data for handle %p with parentKey %s", handle, keyName (parentKey)); switch (keyGetNamespace (parentKey)) {
call on_close when peer has disconnected
@@ -1228,6 +1228,8 @@ io_readable(neat_ctx *ctx, neat_flow *flow, struct sockaddr_storage peerAddr; socklen_t peerAddrLen = sizeof(struct sockaddr_storage); int stream_id = -1; + int retval; + char buffer[1]; ssize_t n; struct msghdr msghdr; //Not used when notifications aren't available: @@ -1279,7 +1281,6 @@ io_readable(neat_ctx *ctx, neat_flow *flow, * anything else will. */ if (!flow->operations->on_readable && flow->acceptPending) { - if (socket->stack != NEAT_STACK_UDP && socket->stack != NEAT_STACK_UDPLITE) { neat_log(ctx, NEAT_LOG_WARNING, "%s - READ_WITH_ERROR 1", __func__); return READ_WITH_ERROR; @@ -1606,6 +1607,18 @@ io_readable(neat_ctx *ctx, neat_flow *flow, } } + if (socket->stack == NEAT_STACK_TCP) { + retval = recv(flow->socket->fd, buffer, 1, MSG_PEEK); + if (retval <= 0) { + neat_log(ctx, NEAT_LOG_INFO, "%s - TCP connection peek: %d - connection closed", __func__, retval); + if (flow->operations->on_close) { + READYCALLBACKSTRUCT; + flow->operations->on_close(flow->operations); + } + return READ_WITH_ZERO; + } + } + if (flow->operations->on_readable) { READYCALLBACKSTRUCT; flow->operations->on_readable(flow->operations); @@ -6412,7 +6425,8 @@ neat_notify_close(neat_flow *flow) // Notify application about network changes. // Code should identify what happened. -void neat_notify_network_status_changed(neat_flow *flow, neat_error_code code) +void +neat_notify_network_status_changed(neat_flow *flow, neat_error_code code) { const int stream_id = NEAT_INVALID_STREAM; //READYCALLBACKSTRUCT expects this: @@ -6457,7 +6471,8 @@ neat_close(struct neat_ctx *ctx, struct neat_flow *flow) } // ABORT, D1.2 sect. 3.2.4 -neat_error_code neat_abort(struct neat_ctx *ctx, struct neat_flow *flow) +neat_error_code +neat_abort(struct neat_ctx *ctx, struct neat_flow *flow) { struct linger ling;
stm32: Enabled CPU usage counting
@@ -615,10 +615,8 @@ int threads_schedule(unsigned int n, cpu_context_t *context, void *arg) hal_cpuRestore(context, selected->context); } -#ifndef CPU_STM32 /* Update CPU usage */ threads_cpuTimeCalc(current, selected); -#endif #if 0 /* Test stack usage */
Run test-noaccel
@@ -11,7 +11,7 @@ jobs: - run: sudo apt-get install nasm qemu - run: cd .. && git clone [email protected]:nanovms/ops.git - run: make - - run: make test-nokvm + - run: make test-noaccel nightly-build: docker: @@ -22,7 +22,7 @@ jobs: - run: sudo apt-get update - run: sudo apt-get install nasm qemu - run: make - - run: make test-nokvm + - run: make test-noaccel - run: echo "deb http://packages.cloud.google.com/apt cloud-sdk-jessie main" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list - run: curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - @@ -54,7 +54,7 @@ jobs: name: macbuild environment: NANOS_TARGET_ROOT: target-root - command: make test-nokvm + command: make test-noaccel nightly-build-mac: macos:
aqua: build again (still broken)
=> $~ |% +$ versioned-state $% state-0 - state-1 == +$ state-0 $: %0 - pil=pill - assembled=* - tym=@da - fleet-snaps=(map term (map ship pier)) - piers=(map ship pier) - == - +$ state-1 - $: %1 pil=pill assembled=* tym=@da fleet-snaps=(map term fleet) piers=fleet == + :: XX temporarily shadowed, fix and remove + :: + +$ pill + $: boot-ova=* + kernel-ova=(list unix-event) + userspace-ova=(list unix-event) + == :: +$ fleet [ships=(map ship pier) azi=az-state] +$ pier == -- :: -=| state-1 +=| state-0 =* state - =< %- agent:dbug :: wipe fleets and piers rather than give them falsely nulled azimuth state :: %0 - %_ $ - -.old %1 - fleet-snaps.old *(map term fleet) - piers.old *fleet - == - :: - %1 [cards this(state old)] == ::
webp/decode.h,cosmetics: normalize 'flip' comment have it match the other boolean options with 'if true...'
@@ -453,7 +453,7 @@ struct WebPDecoderOptions { int scaled_width, scaled_height; // final resolution int use_threads; // if true, use multi-threaded decoding int dithering_strength; // dithering strength (0=Off, 100=full) - int flip; // flip output vertically + int flip; // if true, flip output vertically int alpha_dithering_strength; // alpha dithering strength in [0..100] uint32_t pad[5]; // padding for later use
dimmid zero value support
@@ -676,7 +676,7 @@ static int get_system_events_from_file(BOOLEAN reversed, BOOLEAN not_matching, U { skip_entry |= check_skip_entry_status_for_event_actionreq_set(not_matching, SYSTEM_EVENT_TYPE_AR_EVENT_GET(event_type_mask), event_message); } - if (dimm_uid != NULL) + if ((dimm_uid != NULL) && (*dimm_uid != 0)) { skip_entry |= check_skip_entry_status_for_dimm_id(not_matching, dimm_uid, event_message); }
Update Lua and add emscripten to docker image.
@@ -58,6 +58,11 @@ RUN apt-get update && \ ./configure && \ make && \ make install && \ + git clone https://github.com/emscripten-core/emsdk.git /opt/emsdk && \ + cd /opt/emsdk && \ + ./emsdk install 2.0.20 && \ + ./emsdk activate 2.0.20 && \ + echo "source $(pwd)/emsdk_env.sh" >> $HOME/.bashrc && \ wget -P /usr/local/src/python36 \ https://www.python.org/ftp/python/3.6.12/Python-3.6.12.tar.xz && \ tar -xf /usr/local/src/python36/Python-3.6.12.tar.xz \ @@ -158,6 +163,11 @@ RUN apt-get update && \ LDFLAGS=-Wl,-rpath=/opt/linux/ruby27/lib && \ make && \ make install && \ + wget -P /usr/local/src/lua54 \ + https://sourceforge.net/projects/luabinaries/files/5.4.2/Linux%20Libraries/lua-5.4.2_Linux54_64_lib.tar.gz && \ + mkdir -p /opt/linux/lua54 && \ + tar -xf /usr/local/src/lua54/lua-5.4.2_Linux54_64_lib.tar.gz \ + -C /opt/linux/lua54 && \ cd /usr/local/src && rm -rf ..?* .[!.]* * # Install macosx build tools (osxcross) and libraries (macports). @@ -201,8 +211,11 @@ RUN mkdir -p /opt/wincross/archives && \ wget -O archives/lua52.zip https://sourceforge.net/projects/luabinaries/files/5.2.4/Windows%20Libraries/Dynamic/lua-5.2.4_Win64_dllw6_lib.zip && \ unzip -d lua52 archives/lua52.zip && \ mkdir lua53 && \ - wget -O archives/lua53.zip https://sourceforge.net/projects/luabinaries/files/5.3.5/Windows%20Libraries/Dynamic/lua-5.3.5_Win64_dllw6_lib.zip && \ + wget -O archives/lua53.zip https://sourceforge.net/projects/luabinaries/files/5.3.6/Windows%20Libraries/Dynamic/lua-5.3.6_Win64_dllw6_lib.zip && \ unzip -d lua53 archives/lua53.zip && \ + mkdir lua54 && \ + wget -O archives/lua54.zip https://sourceforge.net/projects/luabinaries/files/5.4.2/Windows%20Libraries/Dynamic/lua-5.4.2_Win64_dllw6_lib.zip && \ + unzip -d lua54 archives/lua54.zip && \ rm -rf archives # Run environment.
BugID:19075306: Not log mod if input argu is NULL or empty string
static char serverity_name[LOG_NONE] = { 'V', 'A', 'F', 'E', 'W', 'T', 'I', 'D' }; -const char UNKNOWN_BUF[8] = "UNKNOWN"; +const char UNKNOWN_BUF[8] = ""; /* stop filter used in sync log, dedault value LOG_NONE, shall not larger than LOG_NONE */ static uint8_t stop_filter_level = LOG_NONE; @@ -82,19 +82,23 @@ int rt_log(const unsigned char s, const char* mod, const char* f, const unsigned long long ms = aos_now_ms(); if (log_get_mutex()) { const char* rpt_mod = NULL; + char before_mod[2]; if ((mod == NULL) || (0 == strlen(mod))) { rpt_mod = UNKNOWN_BUF; + before_mod[0] = 0; } else { rpt_mod = mod; + before_mod[0] =' '; + before_mod[1] = 0; } #if SYNC_DETAIL_COLOR - printf("%s [%4d.%03d]<%c> %s [%s#%d] : ", + printf("%s [%4d.%03d]<%c>%s%s [%s#%d] : ", log_col_def[s], #else - printf("[%4d.%03d]<%c> %s [%s#%d] : ", + printf("[%4d.%03d]<%c>%s%s [%s#%d] : ", #endif (int)(ms / 1000), - (int)(ms % 1000), serverity_name[s], rpt_mod, f, (int)l); + (int)(ms % 1000), serverity_name[s], before_mod, rpt_mod, f, (int)l); va_start(args, fmt); rc = vprintf(fmt, args); va_end(args); @@ -126,7 +130,7 @@ int rt_log(const unsigned char s, const char *fmt, ...) #if SYNC_LOG_MOD uint16_t mod_len = 0; if ((mod == NULL) || (0 == (mod_len = strlen(mod)))) { - printf("%s ", UNKNOWN_BUF); + /* not record any mode name */ } else if (mod_len <= MOD_MAX_LEN) { printf("%s ", mod); } else {
Don't verify ::Nice() call Does not work with WSL: It's known issue issue:https://st.yandex-team.ru/DEVTOOLS-4896
@@ -677,7 +677,7 @@ void TShellCommand::TImpl::OnFork(TPipes& pipes, sigset_t oldmask, char* const* } if (Nice) { - Y_VERIFY(::Nice(Nice), "nice() failed(%s)", LastSystemErrorText()); + ::Nice(Nice); } if (envp == nullptr) {
Fix cpu fallback temp sensor logic Break the for loop either way so `path` still points to sensor module's hwmon folder.
@@ -291,12 +291,15 @@ bool CPUStats::GetCpuFile() { #ifndef NDEBUG std::cerr << "hwmon: sensor name: " << name << std::endl; #endif - if (name == "coretemp" && find_temp_input(path, input, "Package id 0")) { + if (name == "coretemp") { + find_temp_input(path, input, "Package id 0"); break; } - else if ((name == "zenpower" || name == "k10temp") && find_temp_input(path, input, "Tdie")) { + else if ((name == "zenpower" || name == "k10temp")) { + find_temp_input(path, input, "Tdie"); break; - } else if (name == "atk0110" && find_temp_input(path, input, "CPU Temperature")){ + } else if (name == "atk0110") { + find_temp_input(path, input, "CPU Temperature"); break; } }
(os/cpu-count) should not be defined at all with JANET_REDUCED_OS
@@ -209,6 +209,8 @@ JANET_CORE_FN(os_exit, return janet_wrap_nil(); } +#ifndef JANET_REDUCED_OS + JANET_CORE_FN(os_cpu_count, "(os/cpu-count &opt dflt)", "Get an approximate number of CPUs available on for this process to use. If " @@ -250,7 +252,6 @@ JANET_CORE_FN(os_cpu_count, #endif } -#ifndef JANET_REDUCED_OS #ifndef JANET_NO_PROCESSES
fix vcxproj unittest
<ClCompile Include="$(ProjectDir)\ContextFieldsProviderTests.cpp" /> <ClCompile Include="$(ProjectDir)\ControlPlaneProviderTests.cpp" /> <ClCompile Include="$(ProjectDir)\CorrelationVectorTests.cpp" /> - <ClCompile Include="$(ProjectDir)\DataViewerCollectionTests.cpp" /> <ClCompile Include="$(ProjectDir)\DebugEventSourceTests.cpp" /> <ClCompile Include="$(ProjectDir)\DeviceStateHandlerTests.cpp" /> <ClCompile Include="$(ProjectDir)\DiskLocalStorageTests.cpp" /> </ItemGroup> <ItemGroup Condition = "exists('$(ProjectDir)..\..\lib\modules\dataviewer')"> <ClCompile Include="$(ProjectDir)..\..\lib\modules\dataviewer\tests\unittests\DefaultDataViewerTests.cpp" /> + <ClCompile Include="$(ProjectDir)\DataViewerCollectionTests.cpp" /> </ItemGroup> <Import Project="$(SolutionDir)\build.props" Condition="Exists('$(SolutionDir)\build.props')" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
Use only -O1 with AMD AOCC version of flang to prevent miscompilation of LAPACK codes and tests on Ryzen
@@ -783,6 +783,7 @@ endif ifeq ($(F_COMPILER), FLANG) CCOMMON_OPT += -DF_INTERFACE_FLANG +FCOMMON_OPT += -frecursive ifdef BINARY64 ifdef INTERFACE64 ifneq ($(INTERFACE64), 0) @@ -796,6 +797,11 @@ endif ifeq ($(USE_OPENMP), 1) FCOMMON_OPT += -fopenmp endif +ifeq ($(OSNAME), Linux) +ifeq ($(ARCH), x86_64) +FLANG_VENDOR := $(shell expr `$(FC) --version|cut -f 1 -d "."|head -1`) +endif +endif endif ifeq ($(F_COMPILER), G77) @@ -1270,8 +1276,11 @@ endif override CFLAGS += $(COMMON_OPT) $(CCOMMON_OPT) -I$(TOPDIR) override PFLAGS += $(COMMON_OPT) $(CCOMMON_OPT) -I$(TOPDIR) -DPROFILE $(COMMON_PROF) - +ifeq ($(FLANG_VENDOR),AOCC) +override FFLAGS += $(filter-out -O2 -O3,$(COMMON_OPT)) -O1 $(FCOMMON_OPT) +else override FFLAGS += $(COMMON_OPT) $(FCOMMON_OPT) +endif override FPFLAGS += $(FCOMMON_OPT) $(COMMON_PROF) #MAKEOVERRIDES =
Fix GPU YetiRank
@@ -66,7 +66,7 @@ void TCatboostOptions::SetLeavesEstimationDefault() { break; } case ELossFunction::YetiRank: { - defaultEstimationMethod = ELeavesEstimation::Gradient; + defaultEstimationMethod = (GetTaskType() == ETaskType::GPU) ? ELeavesEstimation::Newton : ELeavesEstimation::Gradient; defaultGradientIterations = 1; defaultNewtonIterations = 1; break; @@ -86,6 +86,9 @@ void TCatboostOptions::SetLeavesEstimationDefault() { if (treeConfig.LeavesEstimationMethod.NotSet()) { treeConfig.LeavesEstimationMethod = defaultEstimationMethod; + } else { + CB_ENSURE(lossFunctionConfig.GetLossFunction() != ELossFunction::YetiRank, + "At the moment, in the YetiRank mode, changing the leaf_estimation_method parameter is prohibited."); } if (treeConfig.LeavesEstimationIterations.NotSet()) {
Fixed GCC release build
#endif #if defined(ZYDIS_RELEASE) -# if defined(ZYDIS_GNUC) +# if defined(ZYDIS_CLANG) // GCC eagerly evals && RHS, we have to use nested ifs. # if __has_builtin(__builtin_unreachable) # define ZYDIS_UNREACHABLE __builtin_unreachable() # else # define ZYDIS_UNREACHABLE # endif +# elif defined(ZYDIS_GCC) && ((__GNUC__ == 4 && __GNUC_MINOR__ > 4) || __GNUC__ > 4) +# define ZYDIS_UNREACHABLE __builtin_unreachable() # elif defined(ZYDIS_MSVC) # define ZYDIS_UNREACHABLE __assume(0) # else
Add asciinema fuzzing example to README.md
@@ -253,4 +253,7 @@ Currently supported bindings are: * cmake option: `JAVASCRIPT_BINDING` * [README](./swig/javascript/README.md) + ## Fuzzing + An asciinema example describing the process of fuzzing libyang2 with the yangfuzz fuzz harness is available at https://asciinema.org/a/260417. +
ledc test: ignore one case. a bug caused by pcnt. it will fail randomly caused by PCNT bug. It will open when PCNT problem is resolved
@@ -254,7 +254,8 @@ TEST_CASE("LEDC normal channel and timer config", "[ledc][test_env=UT_T1_LEDC]") } } -TEST_CASE("LEDC set and get frequency", "[ledc][test_env=UT_T1_LEDC][timeout=60]") +// set it ignore: need to debug +TEST_CASE("LEDC set and get frequency", "[ledc][test_env=UT_T1_LEDC][timeout=60][ignore]") { timer_frequency_test(LEDC_CHANNEL_0, LEDC_TIMER_13_BIT, LEDC_TIMER_0, LEDC_HIGH_SPEED_MODE); timer_frequency_test(LEDC_CHANNEL_0, LEDC_TIMER_13_BIT, LEDC_TIMER_1, LEDC_HIGH_SPEED_MODE);
Fix context_realloc_test() to work in builds
@@ -11326,8 +11326,8 @@ static void context_realloc_test(const char *text) { ExtOption options[] = { - { "foo", "<!ELEMENT e EMPTY>"}, - { "bar", "<e/>" }, + { XCS("foo"), "<!ELEMENT e EMPTY>"}, + { XCS("bar"), "<e/>" }, { NULL, NULL } }; int i;
docs/uio: Document StringIO/BytesIO(alloc_size) constructors.
@@ -112,3 +112,18 @@ Classes .. method:: getvalue() Get the current contents of the underlying buffer which holds data. + +.. class:: StringIO(alloc_size) +.. class:: BytesIO(alloc_size) + + Create an empty `StringIO`/`BytesIO` object, preallocated to hold up + to *alloc_size* number of bytes. That means that writing that amount + of bytes won't lead to reallocation of the buffer, and thus won't hit + out-of-memory situation or lead to memory fragmentation. These constructors + are a MicroPython extension and are recommended for usage only in special + cases and in system-level libraries, not for end-user applications. + + .. admonition:: Difference to CPython + :class: attention + + These constructors are a MicroPython extension.
update default configuration file
# Load peers at startup from this file and save peers to this file at shutdown --peerfile /etc/kadnode/peers.txt -#Announce an identifier for the entire runtime of KadNode. +# For authentication via TLS, x509 certificates need to be provided. +# The server needs a tuple of the certificate file and private key file: +# --tls-server-cert mydomain.crt,mydomain.key +# The domain in the Common Name field of the certificate will be announced. # -#--value-id mycomputer.p2p +# The clients doing the lookup need to be provided with the appropiate CA certificates: +# --tls-client-cert /usr/share/ca-certificates/mozilla # -# To prevent others from claiming the same domain you should -# use cryptographic keys for authentication: -# -# 1. Create a public/secret key pairon the console: -# $kadnode --auth-gen-keys -# -# 2. Make sure these two lines are in this file: -# --auth-add-skey mycomputer.p2p:<secret-key> -# --value-id mycomputer.p2p -# -# 3. Add the following line on other computers: -# --auth-add-pkey mycomputer.p2p:<public-key> +# Note: --announce is only needed when KadNode does not do the authentication. + +# As an alternative, create a secret/public key via 'kadnode --bob-create-key' +# and load the secret keys as PEM file: +# --bob-load-key <secret-key-pem-file> # +# Other nodes can use <public-key-hex>.p2p in the browser to resolve the node. -#Switch to IPv6 mode -#--mode ipv6 +# Enable DNS proxy behavior. Reads /etc/resolv.conf by default. +# --dns-proxy-enable +# +# Or specify a DNS server by IP address: +# --dns-proxy-server <IP-address> # Disable UPnP/NAT-PMP support # --disable-forwarding
Added cursor to UI page
@@ -62,6 +62,12 @@ function mapStateToProps(state) { filename: `frame.png`, _v: uiVersion }, + { + id: "cursor", + name: "Cursor", + filename: `cursor.png`, + _v: uiVersion + }, { id: "emotes", name: "Emotes",
zephyr: Fix uart_console_read_buffer_init() return value This function should have a return value. Add it to avoid a warning. BRANCH=none TEST=warning is gone
@@ -57,6 +57,8 @@ enum ec_status uart_console_read_buffer_init(void) previous_snapshot_idx = current_snapshot_idx; current_snapshot_idx = tail_idx; + + return EC_RES_SUCCESS; } int uart_console_read_buffer(uint8_t type, char *dest, uint16_t dest_size,
Fix File.print opening for read instead of write.
@@ -604,7 +604,7 @@ with a newline at the end. void lily_builtin_File_print(lily_state *s) { lily_builtin_File_write(s); - fputc('\n', lily_file_for_read(s, lily_arg_file(s, 0))); + fputc('\n', lily_file_for_write(s, lily_arg_file(s, 0))); lily_return_unit(s); }
lua: drop dependency on package bytestring
@@ -88,7 +88,6 @@ flag hardcode-reg-keys common common-options default-language: Haskell2010 build-depends: base >= 4.8 && < 5 - , bytestring >= 0.10.2 && < 0.11 ghc-options: -Wall -Wincomplete-record-updates -Wnoncanonical-monad-instances
[arch][x86] drop march to x86-64 march=x86-64-v2 is not supported on old compilers. Can fix in the future, but for the moment may as well just drop -v2 since it's not really being used.
@@ -118,7 +118,7 @@ ARCH_COMPILEFLAGS += -march=i686 ARCH_OPTFLAGS := -O2 GLOBAL_DEFINES += X86_LEGACY=0 else ifeq ($(SUBARCH),x86-64) -ARCH_COMPILEFLAGS += -march=x86-64-v2 +ARCH_COMPILEFLAGS += -march=x86-64 ARCH_OPTFLAGS := -O2 GLOBAL_DEFINES += X86_LEGACY=0 endif
loadable_apps/binary_update.c : Remove unused local variable binary_update.c:320:7: warning: unused variable 'path' [-Wunused-variable] char path[BINARY_PATH_LEN]; ^~~~
@@ -317,7 +317,6 @@ static int binary_update_same_version_test(void) { int ret; uint8_t type = 0; - char path[BINARY_PATH_LEN]; binary_update_info_t pre_bin_info; binary_update_info_t cur_bin_info;
Note C type changes in changelog.
@@ -15,6 +15,7 @@ This project DOES NOT adhere to [Semantic Versioning](http://semver.org/). - lodepng and zlib sources moved into `src/vendor/` folder. - Bundled zlib library updated from `1.2.8` to `1.2.11`. - LodePNG updated from `20160501` to `20180611`. +- libtcod C type declarations are stricter. ### Fixed - `libtcodpy.map_clear`: `transparent` and `walkable` parameters were reversed.
schema compile DOC fix default value simplementing new modules
@@ -274,7 +274,7 @@ LY_ERR lys_compile_expr_implement(const struct ly_ctx *ctx, const struct lyxp_ex * What can cause new modules to be implemented when resolving unres in 5): * - leafref * - when, must - * - identityref default value + * - identityref, instance-identifier default value * - new implemented module augments, deviations * * @param[in] ctx libyang context.
OcFileLib: Should still free the original label
@@ -80,6 +80,7 @@ GetVolumeLabel ( // if (VolumeInfo->VolumeLabel[VolumeLabelSize/2-1] != '\0') { DEBUG ((DEBUG_ERROR, "Found unterminated volume label!")); + FreePool (VolumeInfo); return AllocateCopyPool (sizeof (L"INVALID"), L"INVALID"); } else { return VolumeInfo->VolumeLabel;
fix case issue for socket options
@@ -6,6 +6,11 @@ so_prefix = 'SO' def sock_prop(so_str): + """ + convert socket option names to system specific integers + e.g., 'SO/IPPROTO_IP/IP_TOS' --> 'SO/0/1' + """ + so_str = so_str.upper() if not so_str.startswith(so_prefix + so_separator): return _, level, optname = so_str.split(so_separator)
Bump HMAC_MAX_MD_CBLOCK to 200 due to The maximum (theoretical) block size of SHA3 is 200 bytes.
# include <openssl/evp.h> # ifndef OPENSSL_NO_DEPRECATED_3_0 -# define HMAC_MAX_MD_CBLOCK 128 /* Deprecated */ +# define HMAC_MAX_MD_CBLOCK 200 /* Deprecated */ # endif # ifdef __cplusplus
OpenCore: Fix shell booting
@@ -41,9 +41,58 @@ STATIC OC_STORAGE_CONTEXT mOpenCoreStorage; +STATIC +EFI_IMAGE_START +mOcOriginalStartImage; + +STATIC +UINT32 +mOpenCoreStartImageNest; + +STATIC +EFI_STATUS +OcEfiStartImage ( + IN EFI_HANDLE ImageHandle, + OUT UINTN *ExitDataSize, + OUT CHAR16 **ExitData OPTIONAL + ) +{ + EFI_STATUS Status; + + ++mOpenCoreStartImageNest; + + // + // We do not know what OS is that, probably booted macOS from shell. + // Apply all the fixtures once, they are harmless for any OS. + // + if (mOpenCoreStartImageNest == 1) { + OcLoadAcpiSupport (&mOpenCoreStorage, &mOpenCoreConfiguration); + OcLoadKernelSupport (&mOpenCoreStorage, &mOpenCoreConfiguration); + } + + Status = mOcOriginalStartImage ( + ImageHandle, + ExitDataSize, + ExitData + ); + + if (EFI_ERROR (Status)) { + DEBUG ((DEBUG_WARN, "OC: Boot failed - %r\n", Status)); + } + + if (mOpenCoreStartImageNest == 1) { + OcUnloadKernelSupport (); + } + + --mOpenCoreStartImageNest; + + return Status; +} + STATIC EFI_STATUS -EFIAPI OcStartImage ( +EFIAPI +OcStartImage ( IN OC_BOOT_ENTRY *Chosen, IN EFI_HANDLE ImageHandle, OUT UINTN *ExitDataSize, @@ -52,6 +101,9 @@ EFIAPI OcStartImage ( { EFI_STATUS Status; + ++mOpenCoreStartImageNest; + + if (mOpenCoreStartImageNest == 1) { // // Some make their ACPI tables incompatible with Windows after modding them for macOS. // While obviously it is their fault, here we provide a quick and dirty workaround. @@ -66,8 +118,9 @@ EFIAPI OcStartImage ( if (!Chosen->IsWindows) { OcLoadKernelSupport (&mOpenCoreStorage, &mOpenCoreConfiguration); } + } - Status = gBS->StartImage ( + Status = mOcOriginalStartImage ( ImageHandle, ExitDataSize, ExitData @@ -77,10 +130,12 @@ EFIAPI OcStartImage ( DEBUG ((DEBUG_WARN, "OC: Boot failed - %r\n", Status)); } - if (!Chosen->IsWindows) { + if (mOpenCoreStartImageNest == 1 && !Chosen->IsWindows) { OcUnloadKernelSupport (); } + --mOpenCoreStartImageNest; + return Status; } @@ -119,6 +174,14 @@ OcMain ( mOpenCoreConfiguration.Misc.Debug.Delay ); + // + // This is required to catch UEFI Shell boot if any. + // + mOcOriginalStartImage = gBS->StartImage; + gBS->StartImage = OcEfiStartImage; + gBS->Hdr.CRC32 = 0; + gBS->CalculateCrc32 (gBS, gBS->Hdr.HeaderSize, &gBS->Hdr.CRC32); + OcCpuScanProcessor (&CpuInfo); OcLoadUefiSupport (Storage, &mOpenCoreConfiguration, &CpuInfo); OcLoadPlatformSupport (&mOpenCoreConfiguration, &CpuInfo);
grib_get_data: GRIB2 file with missing jDirectionIncrement produces huge values
@@ -159,6 +159,22 @@ static int init(grib_iterator* iter, grib_handle* h,grib_arguments* args) if((err = grib_get_long_internal(h, s_jPtsConsec, &self->jPointsAreConsecutive))) return err; if((err = grib_get_long(h, "iteratorDisableUnrotate", &self->disableUnrotate))) return err; + /* If the jDirectionIncrement is missing, then we cannot use it (See jDirectionIncrementGiven) */ + if (grib_is_missing(h, s_jdir, &err) && err == GRIB_SUCCESS) { + double lat2; + err = grib_get_double_internal(h, "latitudeLastInDegrees", &lat2); + if (!err) { /* try to compute jDirectionIncrementInDegrees */ + const long Nj = self->nam; + Assert(Nj>1); + if (lat1 > lat2) { + jdir=(lat1-lat2)/(Nj-1); + } else { + jdir=(lat1+360.0-lat2)/(Nj-1); + } + grib_context_log(h->context, GRIB_LOG_INFO, + "%s is missing (See jDirectionIncrementGiven). Using value of %.6f obtained from lat1, lat2 and Nj",s_jdir,jdir); + } + } if (jScansPositively) jdir=-jdir; for( lai = 0; lai < self->nam; lai++ ) {
fix oidc-agent default timeout not applied to autoloaded accounts
@@ -301,6 +301,9 @@ oidc_error_t oidcd_autoload(struct ipcPipe pipes, list_t* loaded_accounts, return oidc_errno; } struct oidc_account* account = getAccountFromJSON(config); + account_setDeath(account, agent_state.defaultTimeout + ? time(NULL) + agent_state.defaultTimeout + : 0); if (addAccount(pipes, account, loaded_accounts) != OIDC_SUCCESS) { secFreeAccount(account); return oidc_errno;
Removed AppVeyor badge from GitHub readme file GitHub already show CI checks on commit messages, so the badge is redundant
# SphereServer Game server for Ultima Online -[![Build status](https://ci.appveyor.com/api/projects/status/befpuqebq01caopi?svg=true)](https://ci.appveyor.com/project/coruja747/source) [![Coverity Scan Build Status](https://scan.coverity.com/projects/16074/badge.svg)](https://scan.coverity.com/projects/sphereserver-source) -[![Join the chat at https://gitter.im/Sphereserver/Source](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Sphereserver/Source) +[![Gitter](https://badges.gitter.im/Sphereserver/Source.svg)](https://gitter.im/Sphereserver/Source) ## Download [Automatic builds](https://forum.spherecommunity.net/sshare.php?srt=4)
config-tools: add white space between arguments The macro definition SOS_VM_BOOTARGS in vm_configurations.h calls macros SOS_ROOTFS, SOS_CONSOLE and SOS_BOOTARGS_DIFF which is defined in misc_cfg.h and parsed from scenario.xmls. Add a whitespace in the end of the argument macros to prevent arguments are concatenated in a single line.
</xsl:template> <xsl:template name="sos_rootfs"> - <xsl:value-of select="acrn:define('SOS_ROOTFS', concat($quot, 'root=', vm/board_private/rootfs[text()], $quot), '')" /> + <xsl:value-of select="acrn:define('SOS_ROOTFS', concat($quot, 'root=', vm/board_private/rootfs[text()], ' ', $quot), '')" /> </xsl:template> <xsl:template name="sos_serial_console"> </xsl:if> <xsl:if test="$consoleport != ''"> <xsl:if test="contains($consoleport, '/')"> - <xsl:value-of select="concat($quot, 'console=', substring-after(substring-after($consoleport,'/'), '/'), $quot)" /> + <xsl:value-of select="concat($quot, 'console=', substring-after(substring-after($consoleport,'/'), '/'), ' ', $quot)" /> </xsl:if> <xsl:if test="not(contains($consoleport, '/'))"> - <xsl:value-of select="concat($quot, 'console=', $consoleport, $quot)" /> + <xsl:value-of select="concat($quot, 'console=', $consoleport, ' ', $quot)" /> </xsl:if> </xsl:if> </xsl:variable> </xsl:otherwise> </xsl:choose> </xsl:variable> - <xsl:value-of select="acrn:define('SOS_BOOTARGS_DIFF', concat($quot, $bootargs, ' ', $maxcpus, $quot), '')" /> + <xsl:value-of select="acrn:define('SOS_BOOTARGS_DIFF', concat($quot, $bootargs, ' ', $maxcpus, ' ', $quot), '')" /> </xsl:template> <xsl:template name="cpu_affinity"> <xsl:if test="acrn:is-pre-launched-vm(vm_type)"> <xsl:variable name="bootargs" select="normalize-space(os_config/bootargs)" /> <xsl:if test="$bootargs"> - <xsl:value-of select="acrn:define(concat('VM', @id, '_BOOT_ARGS'), concat($quot, $bootargs, $quot), '')" /> + <xsl:value-of select="acrn:define(concat('VM', @id, '_BOOT_ARGS'), concat($quot, $bootargs, ' ', $quot), '')" /> </xsl:if> </xsl:if> </xsl:for-each>
hark-graph-hook: remove ~&
name.u.notif-kind =/ parent=index:post (scag parent-lent.u.notif-kind index.post.node) - ~& parent ?. ?| =(desc %mention) (~(has in watching) [rid parent]) ==
aw20198: Set and clear config register bits correctly This patch makes aw20198_enable set and clear the bits of the config register correctly. BRANCH=None TEST=None
@@ -75,8 +75,8 @@ static int aw20198_enable(struct rgbkbd *ctx, bool enable) return rv; } - return aw20198_write(ctx, AW20198_REG_GCR, - cfg | (enable ? BIT(0) : 0)); + WRITE_BIT(cfg, 0, enable); + return aw20198_write(ctx, AW20198_REG_GCR, cfg); } static int aw20198_set_color(struct rgbkbd *ctx, uint8_t offset,
acl: use the global heap when allocating the lookup context The "ACL as a service" lookup infra is shared, so a global heap must be used. Type: fix
@@ -782,6 +782,8 @@ acl_interface_set_inout_acl_list (acl_main_t * am, u32 sw_if_index, */ vec_validate_init_empty ((*pinout_lc_index_by_sw_if_index), sw_if_index, ~0); + /* lookup context creation is to be done in global heap */ + void *oldheap = clib_mem_set_heap (am->vlib_main->heap_base); if (vec_len (vec_acl_list_index) > 0) { u32 lc_index = (*pinout_lc_index_by_sw_if_index)[sw_if_index]; @@ -802,6 +804,7 @@ acl_interface_set_inout_acl_list (acl_main_t * am, u32 sw_if_index, (*pinout_lc_index_by_sw_if_index)[sw_if_index] = ~0; } } + clib_mem_set_heap (oldheap); /* ensure ACL processing is enabled/disabled as needed */ acl_interface_inout_enable_disable (am, sw_if_index, is_input,