message
stringlengths
6
474
diff
stringlengths
8
5.22k
lib/handler/connect.c -> fix regression in t/50connect-proxy-status.t test
@@ -243,7 +243,7 @@ static void reset_io_timeout(struct st_connect_generator_t *self) } } -static void send_connect_error(struct st_connect_generator_t *self, const char *msg, const char *errstr) +static void send_connect_error(struct st_connect_generator_t *self, int code, const char *msg, const char *errstr) { cancel_hev2(self); h2o_timer_unlink(&self->timeout); @@ -252,13 +252,13 @@ static void send_connect_error(struct st_connect_generator_t *self, const char * h2o_socket_close(self->sock); self->sock = NULL; } - //FIXME prohibited should be 403? - h2o_send_error_502(self->src_req, msg, errstr, H2O_SEND_ERROR_KEEP_HEADERS); + + h2o_send_error_generic(self->src_req, code, msg, errstr, H2O_SEND_ERROR_KEEP_HEADERS); } static void on_connect_error(struct st_connect_generator_t *self, const char *errstr) { - send_connect_error(self, "Gateway Error", errstr); + send_connect_error(self, 502, "Gateway Error", errstr); } static void on_connect_timeout(h2o_timer_t *entry) @@ -288,7 +288,7 @@ static void on_hev2_conn_cycle(h2o_timer_t *entry) } if (self->hev2_conn_err == destination_ip_prohibited) { record_error(self, destination_ip_prohibited, NULL, NULL); - send_connect_error(self, "Destination IP Prohibited", "Destination IP Prohibited"); + send_connect_error(self, 403, "Destination IP Prohibited", "Destination IP Prohibited"); } else { record_socket_error(self, self->hev2_conn_err); on_connect_error(self, self->hev2_conn_err);
key zeroisation fix for p12
@@ -75,6 +75,7 @@ static int pkcs12_gen_mac(PKCS12 *p12, const char *pass, int passlen, unsigned char *out, const EVP_MD *md_type)) { + int ret = 0; const EVP_MD *md_type; HMAC_CTX *hmac = NULL; unsigned char key[EVP_MAX_MD_SIZE], *salt; @@ -116,24 +117,27 @@ static int pkcs12_gen_mac(PKCS12 *p12, const char *pass, int passlen, if (!pkcs12_gen_gost_mac_key(pass, passlen, salt, saltlen, iter, md_size, key, md_type)) { PKCS12err(PKCS12_F_PKCS12_GEN_MAC, PKCS12_R_KEY_GEN_ERROR); - return 0; + goto err; } } else if (!(*pkcs12_key_gen)(pass, passlen, salt, saltlen, PKCS12_MAC_ID, iter, md_size, key, md_type)) { PKCS12err(PKCS12_F_PKCS12_GEN_MAC, PKCS12_R_KEY_GEN_ERROR); - return 0; + goto err; } if ((hmac = HMAC_CTX_new()) == NULL || !HMAC_Init_ex(hmac, key, md_size, md_type, NULL) || !HMAC_Update(hmac, p12->authsafes->d.data->data, p12->authsafes->d.data->length) || !HMAC_Final(hmac, mac, maclen)) { - HMAC_CTX_free(hmac); - return 0; + goto err; } + ret = 1; + +err: + OPENSSL_cleanse(key, sizeof(key)); HMAC_CTX_free(hmac); - return 1; + return ret; } int PKCS12_gen_mac(PKCS12 *p12, const char *pass, int passlen,
Add handling around realloc
@@ -50,8 +50,15 @@ static void repl() { return; } - fullLine = realloc(fullLine, strlen(fullLine) + strlen(line) + 1); - snprintf(fullLine, strlen(fullLine) + strlen(line) + 1, "%s%s", fullLine, line); + char *temp = realloc(fullLine, strlen(fullLine) + strlen(line) + 1); + + if (temp == NULL) { + printf("Unable to allocate memory"); + exit(71); + } + + fullLine = temp; + memcpy(fullLine + strlen(fullLine), line, strlen(line) + 1); linenoiseHistoryAdd(line); linenoiseHistorySave("history.txt");
Ensure console mouse events are turned off on exit
@@ -3855,6 +3855,9 @@ quit: qnav_deinit(); if (cont_window) delwin(cont_window); +#ifndef FEAT_NOMOUSE + printf("\033[?1003l\n"); // turn off console mouse events if they were active +#endif printf("\033[?2004h\n"); // Tell terminal to not use bracketed paste endwin(); ged_deinit(&t.ged);
Fixed Luos image source
-<a href="https://luos.io"><img src="https://uploads-ssl.webflow.com/601a78a2b5d030260a40b7ad/603e0cc45afbb50963aa85f2_Gif%20noir%20rect.gif" alt="Luos logo" title="Luos" align="right" height="100" /></a> +<a href="https://luos.io"><img src="https://www.luos.io/img/logo_luos_animated_white.gif" alt="Luos logo" title="Luos" align="right" height="100" /></a> # Contributing to Luos
fix warning: unused variable 'ctx' [-Wunused-variable]
@@ -123,7 +123,6 @@ static int luv_work_cb(lua_State* L) { static void luv_work_cb_wrapper(uv_work_t* req) { luv_work_t* work = (luv_work_t*)req->data; - luv_work_ctx_t* ctx = work->ctx; lua_State *L = work->args.L; luv_ctx_t* lctx = luv_context(L);
changelog entry for clap-core deprecation
# Changes in 1.1.3 * CMake: generate CMake and pkg-config package files on install +* CMake: `clap-core` target is now deprecated, use `clap` target instead * [plugin.h](include/clap/plugin.h) make feature list on clap_plugin_descriptor_t const # Changes in 1.1.2
update maya attrs from parms after the cook, rather than before, so that they don't end up with an off-by-one-cook error.
@@ -1536,8 +1536,6 @@ AssetNode::compute(const MPlug& plug, MDataBlock& data) setParmValues(data); - getParmValues(data); - AssetNodeOptions::AccessorDataBlock options(assetNodeOptionsDefinition, data); MPlug outputPlug(thisMObject(), AssetNode::output); @@ -1548,6 +1546,12 @@ AssetNode::compute(const MPlug& plug, MDataBlock& data) options, needToSyncOutputs ); + + // this gets parm properties as well as values + // do this after the compute in case stuff like disable has changed + // or expressions have been evaulated + getParmValues(data); + // No need to print error messages from Asset::compute(). It should // have been printed already. if(MFAIL(status)) @@ -1620,7 +1624,6 @@ AssetNode::setInternalValueInContext( #endif { MStatus status; - if(plug == AssetNode::otlFilePath || plug == AssetNode::assetName) {
PEM: Always use PEM_def_callback() when cb == NULL in pem_read_bio_key() Too many other functions depend on this being done. Fixes
@@ -48,11 +48,8 @@ static EVP_PKEY *pem_read_bio_key(BIO *bp, EVP_PKEY **x, return NULL; } - if (u != NULL && cb == NULL) - cb = PEM_def_callback; if (cb == NULL) - ui_method = UI_null(); - else + cb = PEM_def_callback; ui_method = allocated_ui_method = UI_UTIL_wrap_read_pem_callback(cb, 0); if (ui_method == NULL) return NULL;
Update README.md Additional typographical fix.
@@ -47,7 +47,7 @@ If you plan to use YARA to scan compressed files (.zip, .tar, etc) you should take a look at [yextend](https://github.com/BayshoreNetworks/yextend), a very helpful extension to YARA developed and open-sourced by Bayshore Networks. -Additionally, they guys from [InQuest](https://inquest.net/) have curated an +Additionally, the guys from [InQuest](https://inquest.net/) have curated an awesome list of [YARA-related stuff](https://github.com/InQuest/awesome-yara). ## Who's using YARA
nfsuc steam compatibility fix
@@ -37,11 +37,9 @@ void InitRes() } list3[i] = 0; - auto pattern = GetPattern("6A 15 B9 ? ? ? ? 8B F8 E8 ? ? ? ? 33 C9"); //0x12AA194 - injector::WriteMemory(*pattern.get_first<void*>(18), list3.size() - 1, true); - - pattern = GetPattern("53 E8 ? ? ? ? 83 C4 1C 33 FF E9 ? ? ? ? E8 ? ? ? ? 33 FF"); //0x587291 - injector::MakeNOP(pattern.get_first(16), 5, true); + auto pattern = GetPattern("E8 ? ? ? ? 33 FF 39 ? ? ? ? ? 76 22 8B 04 BD"); //0x587291 + injector::MakeNOP(pattern.get_first(0), 5, true); + injector::WriteMemory(*pattern.get_first<void*>(9), list3.size() - 1, true);//0x12AA194 pattern = GetPattern("68 D7 13 00 00"); injector::WriteMemory(pattern.get_first(24), &list3[0], true); //0x57A5B5
Add minimum size validation for header parameter Throw exception on values that are less than the minimum acceptable size of 32 bytes.
@@ -98,6 +98,14 @@ def validate_version(ctx, param, value): raise click.BadParameter("{}".format(e)) +def validate_header_size(ctx, param, value): + min_hdr_size = image.IMAGE_HEADER_SIZE + if value < min_hdr_size: + raise click.BadParameter( + "Minimum value for -H/--header-size is {}".format(min_hdr_size)) + return value + + class BasedIntParamType(click.ParamType): name = 'integer' @@ -128,7 +136,8 @@ class BasedIntParamType(click.ParamType): help='Size of the slot where the image will be written') @click.option('--pad-header', default=False, is_flag=True, help='Add --header-size zeroed bytes at the beginning of the image') [email protected]('-H', '--header-size', type=BasedIntParamType(), required=True) [email protected]('-H', '--header-size', callback=validate_header_size, + type=BasedIntParamType(), required=True) @click.option('-v', '--version', callback=validate_version, required=True) @click.option('--align', type=click.Choice(['1', '2', '4', '8']), required=True)
[lwIP] use timeval in libc when minilibc is used
@@ -69,7 +69,7 @@ typedef rt_uint32_t mem_ptr_t; #define LWIP_PROVIDE_ERRNO #endif -#ifdef RT_USING_LIBC +#if defined(RT_USING_LIBC) || defined(RT_USING_MINILIBC) #include <sys/time.h> #define LWIP_TIMEVAL_PRIVATE 0 #else
Zephyr: Clarify keyboard powerbutton assert options Clarify the causality in the powerbutton assert options, and also clarify why one might select either option. BRANCH=None TEST=zmake build skyrim
@@ -99,15 +99,15 @@ config PLATFORM_EC_KEYBOARD_PWRBTN_ASSERTS_KSI2 bool "Forces KSI2 to be asserted" help Enable this if KSI2 is stuck 'asserted' for all scan columns if the - power button is held. We must be aware of this case in order to - correctly handle recovery-mode key combinations. + power button is held. This applies if the refresh key is on KSI2. + The GSC will assert this row for all columns during a recovery boot. config PLATFORM_EC_KEYBOARD_PWRBTN_ASSERTS_KSI3 bool "Forces KSI3 to be asserted" help Enable this if KSI3 is stuck 'asserted' for all scan columns if the - power button is held. We must be aware of this case in order to - correctly handle recovery-mode key combinations. + power button is held. This applies if the refresh key is on KSI3. + The GSC will assert this row for all columns during a recovery boot. endchoice # PLATFORM_EC_KEYBOARD_PWRBTN_MODE
adjust columns in Automation web example
@@ -28,6 +28,7 @@ table.GeneratedTable td, table.GeneratedTable th { border-color: #8f8f8f; border-style: solid; padding: 3px; + width: 50%; } table.GeneratedTable thead {
Fix newlines from copying code from my MacMini.
#include <IOKit/IOKitLib.h> // This would be better than int gethostuuid(uuid_t id, const struct timespec *wait); - void get_platform_uuid(char * buf, int bufSize) - { io_registry_entry_t ioRegistryRoot = IORegistryEntryFromPath(kIOMasterPortDefault, "IOService:/"); - CFStringRef uuidCf = (CFStringRef) IORegistryEntryCreateCFProperty(ioRegistryRoot, CFSTR(kIOPlatformUUIDKey), kCFAllocatorDefault, 0); - IOObjectRelease(ioRegistryRoot); - CFStringGetCString(uuidCf, buf, bufSize, kCFStringEncodingMacRoman); - CFRelease(uuidCf); - } #endif // TARGET_MAC_OS
Speeches embedded in speeches are no longer exempt from filtering. Closes
:: |= sep/speech ^- speech - ?. ?=($lin -.sep) sep - %_ sep - msg + ?+ -.sep sep + ?($ire $fat $app) + sep(sep $(sep sep.sep)) + :: + $lin + =- sep(msg -) %- crip %- tufa %+ turn (tuba (trip msg.sep))
Fixed compiler error on Linux
@@ -133,6 +133,7 @@ bool PacketCreate::doCreate(NetState* net, LPCTSTR charname, bool bFemale, RACE_ ASSERT(client); const CAccountRef account = client->m_pAccount; ASSERT(account); + RESDISPLAY_VERSION resdisp = static_cast<RESDISPLAY_VERSION>(account->GetResDisp()); // Check if the account is already connected using another character if ( client->GetChar() ) @@ -184,7 +185,6 @@ bool PacketCreate::doCreate(NetState* net, LPCTSTR charname, bool bFemale, RACE_ goto InvalidInfo; } - RESDISPLAY_VERSION resdisp = static_cast<RESDISPLAY_VERSION>(account->GetResDisp()); if ( (resdisp < RDS_AOS) || (!(g_Cfg.m_iFeatureAOS & FEATURE_AOS_UPDATE_A)) ) { if ( (prProf == PROFESSION_NECROMANCER) || (prProf == PROFESSION_PALADIN) )
doc: added missing div
@@ -834,7 +834,7 @@ Basemaps apply the map background required for rendering data. Basemaps are requ </div> <div class="Carousel-item js-Tabpanes-item--lang js-Tabpanes-item--lang--kotlin"> - {% highlight swift %} + {% highlight kotlin %} mapView = MapView(this) setContentView(mapView) @@ -2258,6 +2258,8 @@ The following code describes how to adjust the `LocalVectorDataSource` performan let vectorDataSource2 = NTLocalVectorDataSource(projection: projection, spatialIndexType: NTLocalSpatialIndexType.LOCAL_SPATIAL_INDEX_TYPE_KDTREE) {% endhighlight %} + </div> + <div class="Carousel-item js-Tabpanes-item--lang js-Tabpanes-item--lang--kotlin"> {% highlight kotlin %} @@ -2266,6 +2268,7 @@ The following code describes how to adjust the `LocalVectorDataSource` performan {% endhighlight %} </div> + </div> <br/><br/>**Note:** If you have **very complex lines or polygons**, this creates objects with high numbers (more than hundreds of points per object) of vertexes. For example, the GPS traces for long periods of time, recording a point every second or so.
merge changes from trunk
@@ -1804,27 +1804,21 @@ avtUintahFileFormat::GetVar(int timestate, int domain, const char *varname) LevelInfo &levelInfo = stepInfo->levelInfo[level]; PatchInfo &patchInfo = levelInfo.patchInfo[local_patch]; - bool nodeCentered; - // The region we're going to ask uintah for (from plow to phigh-1) int plow[3], phigh[3]; patchInfo.getBounds(plow, phigh, varType); // For node based meshes add one if there is a neighbor. - if( varType.find("NC") != std::string::npos ) - { - nodeCentered = true; + bool nodeCentered = (varType.find("NC") != std::string::npos); + if( nodeCentered ) + { int nlow[3], nhigh[3]; patchInfo.getBounds(nlow, nhigh, "NEIGHBORS"); for (int i=0; i<3; i++) phigh[i] += nhigh[i]; } - else - { - nodeCentered = false; - } GridDataRaw *gd = NULL; @@ -1869,8 +1863,11 @@ avtUintahFileFormat::GetVar(int timestate, int domain, const char *varname) // Patch processor Id else if (strcmp(varname, "patch/processor") == 0 ) { +#if (2 <= UINTAH_MAJOR_VERSION && 2 <= UINTAH_MINOR_VERSION ) + double value = patchInfo.getProcRankId(); +#else double value = patchInfo.getProcId(); - +#endif for (int i=0; i<gd->num; i++) gd->data[i] = value; }
Improve BN_CTX documentation Since BN_CTX_init() is gone, all calls use BN_CTX_new(). Also, essentially all consumers will use BN_CTX_start()/BN_CTX_end(), so make that more clear from the BN_CTX_new() man page. Document the thread-unsafety of individual BN_CTX objects.
@@ -26,12 +26,14 @@ BN_CTX_secure_new() allocates and initializes a B<BN_CTX> structure but uses the secure heap (see L<CRYPTO_secure_malloc(3)>) to hold the B<BIGNUM>s. -BN_CTX_free() frees the components of the B<BN_CTX>, and if it was -created by BN_CTX_new(), also the structure itself. -If L<BN_CTX_start(3)> has been used on the B<BN_CTX>, -L<BN_CTX_end(3)> must be called before the B<BN_CTX> -may be freed by BN_CTX_free(). -If B<c> is NULL, nothing is done. +BN_CTX_free() frees the components of the B<BN_CTX> and the structure itself. +Since BN_CTX_start() is required in order to obtain B<BIGNUM>s from the +B<BN_CTX>, in most cases BN_CTX_end() must be called before the B<BN_CTX> may +be freed by BN_CTX_free(). If B<c> is NULL, nothing is done. + +A given B<BN_CTX> must only be used by a single thread of execution. No +locking is performed, and the internal pool allocator will not properly handle +multiple threads of execution. =head1 RETURN VALUES
faq CHANGE mention problems with specific libssh versions
@@ -19,3 +19,20 @@ __A:__ The most likely reason for this is that the SSH key that is used for this session authentication uses an algorithm not supported by your system. The supported algorithms are automatically loaded by *libssh* from OpenSSH configuration files (more info in `ssh_config(5)` and `sshd_config(5)`). + +__Q: When I try to connect to a server I immediately get a timeout after authenticating:__ + +__A:__ You are probably using *libssh* version 0.9.3 that includes this + regression bug. To solve it, you must use another version. + +__Q: When I connect to a server, after around 10-20 seconds I get disconnected with an error:__ +``` +[ERR]: LN: Session 1: inactive read timeout elapsed. +``` + +__A:__ There are 2 most common reasons for this error. Either you are not using + a NETCONF client to connect (but `ssh(1)`, for example) and the messages received + by the server are not properly formatted (even an additional `\n` can cause this problem). + To fix, use a NETCONF client instead. Another reason may be that you are using *libssh* + version 0.9.4. It includes a regression bug that causes this problem and you must use + another version to fix it.
[io] avoid time_stepping in run_options output
@@ -2100,6 +2100,8 @@ class MechanicsHdf5Runner(siconos.io.mechanics_hdf5.MechanicsHdf5): d['friction_contact_trace_params']='not serialized' # fix it d['osi'] = 'not serialized' # fix it + d['time_stepping'] = 'not serialized' # fix it + d['start_run_iteration_hook']='not serialized' # fix it d['end_run_iteration_hook']='not serialized' # fix it if d['set_external_forces'] is not None: @@ -2114,7 +2116,6 @@ class MechanicsHdf5Runner(siconos.io.mechanics_hdf5.MechanicsHdf5): except : d['controller']= 'not serialized' - dict_json=json.dumps(d) self._run_options_data.attrs['options'] = dict_json
proc_mgmt: properly propagate spawnd errors to client
@@ -88,6 +88,7 @@ static void spawn_reply_handler(struct spawn_binding *b, errval_t spawn_err) if (err_is_ok(spawn_err)) { err = domain_spawn(spawn->cap_node, spawn->core_id, spawn->argvbuf, spawn->argvbytes); + } if (cl->type == ClientType_Spawn) { resp_err = cl->b->tx_vtbl.spawn_response(cl->b, NOP_CONT, err, spawn->cap_node->domain_cap); @@ -95,7 +96,6 @@ static void spawn_reply_handler(struct spawn_binding *b, errval_t spawn_err) resp_err = cl->b->tx_vtbl.spawn_with_caps_response(cl->b, NOP_CONT, err, spawn->cap_node->domain_cap); } - } free(spawn); break;
[CUDA] Use pthread_cond_wait in background threads Previously these were spin-locking which was consuming too much CPU time.
@@ -57,6 +57,8 @@ typedef struct pocl_cuda_queue_data_s pthread_t submit_thread; pthread_t finalize_thread; pthread_mutex_t lock; + pthread_cond_t pending_cond; + pthread_cond_t running_cond; _cl_command_node *volatile pending_queue; _cl_command_node *volatile running_queue; cl_command_queue queue; @@ -324,6 +326,8 @@ pocl_cuda_init_queue (cl_command_queue queue) if (queue_data->use_threads) { pthread_mutex_init (&queue_data->lock, NULL); + pthread_cond_init (&queue_data->pending_cond, NULL); + pthread_cond_init (&queue_data->running_cond, NULL); int err = pthread_create (&queue_data->submit_thread, NULL, pocl_cuda_submit_thread, queue_data); if (err) @@ -359,6 +363,8 @@ pocl_cuda_free_queue (cl_command_queue queue) if (queue_data->use_threads) { queue_data->queue = NULL; + pthread_cond_signal (&queue_data->pending_cond); + pthread_cond_signal (&queue_data->running_cond); pthread_join (queue_data->submit_thread, NULL); pthread_join (queue_data->finalize_thread, NULL); } @@ -1264,6 +1270,7 @@ pocl_cuda_submit (_cl_command_node *node, cl_command_queue cq) pocl_cuda_queue_data_t *queue_data = (pocl_cuda_queue_data_t *)cq->data; pthread_mutex_lock (&queue_data->lock); DL_APPEND (queue_data->pending_queue, node); + pthread_cond_signal (&queue_data->pending_cond); pthread_mutex_unlock (&queue_data->lock); } else @@ -1531,6 +1538,10 @@ pocl_cuda_submit_thread (void *data) /* Attempt to get next command from work queue */ _cl_command_node *node = NULL; pthread_mutex_lock (&queue_data->lock); + if (!queue_data->pending_queue) + { + pthread_cond_wait (&queue_data->pending_cond, &queue_data->lock); + } if (queue_data->pending_queue) { node = queue_data->pending_queue; @@ -1545,7 +1556,8 @@ pocl_cuda_submit_thread (void *data) /* Add command to running queue */ pthread_mutex_lock (&queue_data->lock); - DL_APPEND (queue_data->running_queue, node) + DL_APPEND (queue_data->running_queue, node); + pthread_cond_signal (&queue_data->running_cond); pthread_mutex_unlock (&queue_data->lock); } } @@ -1572,6 +1584,10 @@ pocl_cuda_finalize_thread (void *data) /* Attempt to get next node from running queue */ _cl_command_node *node = NULL; pthread_mutex_lock (&queue_data->lock); + if (!queue_data->running_queue) + { + pthread_cond_wait (&queue_data->running_cond, &queue_data->lock); + } if (queue_data->running_queue) { node = queue_data->running_queue;
network: on tcp connect change exception from error to debug
@@ -1218,7 +1218,7 @@ flb_sockfd_t flb_net_tcp_connect(const char *host, unsigned long port, } if (fd == -1) { - flb_error("[net] could not connect to %s:%s", + flb_debug("[net] could not connect to %s:%s", host, _port); }
dev-tools/valgrind: specify libexec path, fix LD_LIBRARY_PATH in module
@@ -37,7 +37,8 @@ AMD64/MacOSX. %setup -q -n %{pname}-%{version} %build -./configure --prefix=%{install_path} || { cat config.log && exit 1; } +./configure --prefix=%{install_path} \ + --libexecdir=%{install_path}/lib/valgrind || { cat config.log && exit 1; } make %{?_smp_mflags} %install @@ -63,7 +64,7 @@ module-whatis "Description: Memory debugging utilities" prepend-path PATH %{install_path}/bin prepend-path MANPATH %{install_path}/share/man prepend-path PKG_CONFIG_PATH %{install_path}/lib/pkgconfig -prepend-path LD_LIBRARY_PATH %{install_path}/lib/pkgconfig +prepend-path LD_LIBRARY_PATH %{install_path}/lib setenv %{PNAME}_DIR %{install_path} setenv %{PNAME}_LIB %{install_path}/lib/valgrind
panda recover should go through bootstub first
@@ -297,6 +297,7 @@ class Panda(object): self.reconnect() def recover(self, timeout=None): + self.reset(enter_bootstub=True) self.reset(enter_bootloader=True) t_start = time.time() while len(PandaDFU.list()) == 0:
Fixup include path in ossl_shim test after e_os.h work The include search path was not picking up files in the root of the tree. [extended tests]
IF[{- defined $target{cxx} && !$disabled{"external-tests"}-}] PROGRAMS_NO_INST=ossl_shim SOURCE[ossl_shim]=ossl_shim.cc async_bio.cc packeted_bio.cc test_config.cc - INCLUDE[ossl_shim]=. include ../../include + INCLUDE[ossl_shim]=. include ../../include ../.. DEPEND[ossl_shim]=../../libssl ../../libcrypto ENDIF
update ya tool arc inherite Updated flag remove an unrequired closing brace Note: mandatory check (NEED_CHECK) was skipped
}, "arc": { "formula": { - "sandbox_id": [397068943], + "sandbox_id": [397664968], "match": "arc" }, "executable": {
Fix patch lookup in test infrastructure
import unittest +from mock import MagicMock, Mock + class GpTestCase(unittest.TestCase): def __init__(self, methodName='runTest'): @@ -17,11 +19,12 @@ class GpTestCase(unittest.TestCase): self.__class__.apply_patches_counter += 1 def get_mock_from_apply_patch(self, mock_name): - ''' Return None if there is no existing object + """ Return None if there is no existing object mock name prints out the last "namespace" for example "os.path.exists", mock_name will be "exists" - ''' + """ for mock_obj in self.mock_objs: + if isinstance(mock_obj, Mock) or isinstance(mock_obj, MagicMock): if mock_name == mock_obj._mock_name: return mock_obj return None
Exclude VCS configuration files from archive generation
# OpenGL Shaders *.glsl* text + + +# Exclude VCS configuration files from archive generation +# https://feeding.cloud.geek.nz/posts/excluding-files-from-git-archive/ +.gitattributes export-ignore +.gitignore export-ignore +.hgtags export-ignore +.hgignore export-ignore
Disable test if not configured
. ./include.sh #set -x -# First decide if we have NetCDF enabled from configure. -# Temporarily turn off the '-e' flag for set so we dont exit on error -set +e -tmp_out=tmp_msg.out -skip_test=0 -rm -f $tmp_out -# Invoke the grib_to_netcdf command with no options. If NetCDF was enabled -# we get a usage message otherwise we get a specific message. Note: In both cases -# the command fails. -${tools_dir}/grib_to_netcdf > $tmp_out -grep 'ecCodes was not compiled with NETCDF enabled' $tmp_out > /dev/null -if [ $? -eq 0 ]; then - # Message was found - skip_test=1 -fi -# Restore the set -e so any error causes us to exit -set -e -rm -f $tmp_out -if [ $skip_test -eq 1 ]; then - echo "NetCDF was not enabled. Skipping this test." +# Disable if autotools being used +src_config=${src_dir}/config.h +if [ -f ${src_config} ]; then exit 0 fi -echo "NetCDF was enabled." grib_files="\ regular_latlon_surface.grib2 \
Fix compiler warning in non-assert builds Oversight in commit Reported-by: Erik Rijkers Discussion:
@@ -143,7 +143,6 @@ execute_attr_map_tuple(HeapTuple tuple, TupleConversionMap *map) bool *inisnull = map->inisnull; Datum *outvalues = map->outvalues; bool *outisnull = map->outisnull; - int outnatts = map->outdesc->natts; int i; /* @@ -156,7 +155,7 @@ execute_attr_map_tuple(HeapTuple tuple, TupleConversionMap *map) /* * Transpose into proper fields of the new tuple. */ - Assert(attrMap->maplen == outnatts); + Assert(attrMap->maplen == map->outdesc->natts); for (i = 0; i < attrMap->maplen; i++) { int j = attrMap->attnums[i];
imxrt1050-evk/nxp_demo: Enable TizenRT debug logs Enable CONFIG_DEBUG and related configs in imxrt1050-evk/nxp_demo Fix the defconfig by running make menuconfig
@@ -297,10 +297,6 @@ CONFIG_IMXRT_AUTOMOUNT_USERFS_DEVNAME="/dev/smart0p8" CONFIG_IMXRT_AUTOMOUNT_USERFS_MOUNTPOINT="/mnt" # CONFIG_IMXRT_AUTOMOUNT_SSSRW is not set # CONFIG_IMXRT_AUTOMOUNT_ROMFS is not set - -# -# Board-Partition Options -# # CONFIG_ARCH_USE_FLASH is not set # @@ -491,6 +487,7 @@ CONFIG_STANDARD_SERIAL=y CONFIG_SERIAL_NPOLLWAITERS=2 # CONFIG_SERIAL_IFLOWCONTROL is not set # CONFIG_SERIAL_OFLOWCONTROL is not set +# CONFIG_SERIAL_TIOCSERGSTRUCT is not set CONFIG_ARCH_HAVE_SERIAL_TERMIOS=y # CONFIG_SERIAL_TERMIOS is not set # CONFIG_OTHER_SERIAL_CONSOLE is not set @@ -531,6 +528,7 @@ CONFIG_LPUART1_2STOP=0 # CONFIG_LPUART1_DMA is not set # CONFIG_SENSOR is not set # CONFIG_USBDEV is not set +# CONFIG_USBHOST is not set # CONFIG_FOTA_DRIVER is not set # @@ -694,7 +692,39 @@ CONFIG_MM_NHEAPS=1 # # Debug Options # -# CONFIG_DEBUG is not set +CONFIG_DEBUG=y +CONFIG_DEBUG_ERROR=y +# CONFIG_DEBUG_WARN is not set +# CONFIG_DEBUG_VERBOSE is not set + +# +# Subsystem Debug Options +# +# CONFIG_DEBUG_AUDIO is not set +# CONFIG_DEBUG_BINFMT is not set +# CONFIG_DEBUG_LIB is not set +# CONFIG_DEBUG_FS is not set +# CONFIG_DEBUG_MM is not set +# CONFIG_DEBUG_SCHED is not set +# CONFIG_DEBUG_TASH is not set + +# +# OS Function Debug Options +# +# CONFIG_DEBUG_DMA is not set +# CONFIG_ARCH_HAVE_HEAPCHECK is not set +# CONFIG_DEBUG_MM_HEAPINFO is not set +# CONFIG_DEBUG_IRQ is not set + +# +# Driver Debug Options +# +# CONFIG_DEBUG_I2S is not set + +# +# System Debug Options +# +# CONFIG_DEBUG_SYSTEM is not set # # Stack Debug Options @@ -713,6 +743,11 @@ CONFIG_DEBUG_SYMBOLS=y # # CONFIG_LOGM is not set +# +# System Call +# +# CONFIG_LIB_SYSCALL is not set + # # Built-in Libraries # @@ -744,7 +779,14 @@ CONFIG_ARCH_LOWPUTC=y # CONFIG_LIBC_LOCALTIME is not set # CONFIG_TIME_EXTENDED is not set CONFIG_LIB_SENDFILE_BUFSIZE=512 +# CONFIG_LIBC_ARCH_ELF is not set # CONFIG_ARCH_OPTIMIZED_FUNCTIONS is not set +# CONFIG_LIB_ENVPATH is not set + +# +# Program Execution Options +# +# CONFIG_LIBC_EXECFUNCS is not set # # Non-standard Library Support @@ -774,6 +816,15 @@ CONFIG_LIB_SENDFILE_BUFSIZE=512 # CONFIG_STRESS_TOOL is not set # CONFIG_VOICE_SOFTWARE_EPD is not set +# +# Binary Loader +# +# CONFIG_BINFMT_DISABLE is not set +# CONFIG_BINFMT_LOADABLE is not set +# CONFIG_ELF is not set +# CONFIG_BUILTIN is not set +# CONFIG_SYMTAB_ORDEREDBYNAME is not set + # # Application Configuration #
Use ranged-for loop instead of for loop
@@ -121,7 +121,7 @@ namespace ebi } std::map<std::string, int> counter; - for (auto id = ids.begin(); id != ids.end(); ++id) { + for (auto & id : ids) { counter[*id]++; if (counter[*id] >= 2) { throw new IdBodyError{line, "ID must not have duplicate values"};
win32: task Z1793 - in this case - it is better to be paranoiac
@@ -26,6 +26,7 @@ class CKeyboardSingleton: public CKeyboardSingletonBase Hashtable<String, String> hashRes; bool isOk = true; #ifdef OS_WINDOWS_DESKTOP + try { if (isOpen){ void *was; Wow64DisableWow64FsRedirection (&was); @@ -42,12 +43,9 @@ class CKeyboardSingleton: public CKeyboardSingletonBase }else{ isOk = false; } - /*void *was; - Wow64DisableWow64FsRedirection (&was); - ShellExecuteA(NULL, "open", "tskill", "osk", NULL, SW_HIDE); - - Wow64RevertWow64FsRedirection (was);*/ - + } + }catch (...) { + isOpen = false; } #endif if (isOk) {hashRes["status"] = "ok";}
Allow error to be NULL
@@ -428,7 +428,9 @@ mongoc_topology_compatible (const mongoc_topology_description_t *td, int32_t max_wire_version; if (td->compatibility_error.code) { + if (error) { memcpy (error, &td->compatibility_error, sizeof (bson_error_t)); + } return false; }
change defualt prompt-mode for oidc-gen to cli
@@ -168,9 +168,9 @@ static struct argp_option options[] = { "Change the mode how oidc-gen should prompt for passwords. The default is " "'cli'.", 4}, - {"prompt", OPT_PROMPT_MODE, "cli|gui", 0, - "Change the mode how oidc-gen should prompt for information. On default " - "the user is not prompted.", + {"prompt", OPT_PROMPT_MODE, "cli|gui|none", 0, + "Change the mode how oidc-gen should prompt for information. The default " + "is 'cli'.", 4}, {"confirm-yes", OPT_CONFIRM_YES, 0, 0, "Confirm all confirmation prompts with yes.", 4}, @@ -255,7 +255,7 @@ void initArguments(struct arguments* arguments) { arguments->pw_prompt_mode = 0; set_pw_prompt_mode(arguments->pw_prompt_mode); - arguments->prompt_mode = 0; + arguments->prompt_mode = PROMPT_MODE_CLI; set_prompt_mode(arguments->prompt_mode); } @@ -310,6 +310,8 @@ static error_t parse_opt(int key, char* arg, struct argp_state* state) { } else if (strequal(arg, "gui")) { arguments->prompt_mode = PROMPT_MODE_GUI; common_assertOidcPrompt(); + } else if (strequal(arg, "none")) { + arguments->prompt_mode = 0; } else { return ARGP_ERR_UNKNOWN; }
vioapic.c: Using suffix rather than casting Using suffix "UL" rather than type casting 0xffffffff.
@@ -330,7 +330,7 @@ vioapic_write(struct vioapic *vioapic, uint32_t addr, uint32_t data) last = vioapic->rtbl[pin].reg; data64 = (uint64_t)data << lshift; - mask64 = (uint64_t)0xffffffff << lshift; + mask64 = 0xffffffffUL << lshift; new = last & (~mask64 | RTBL_RO_BITS); new |= data64 & ~RTBL_RO_BITS;
Test when all three inputs to mbedtls_mpi_core_sub() are aliased
@@ -1911,6 +1911,14 @@ void mpi_core_sub( char * input_A, char * input_B, /* 3b) r = a - b => we should get the correct result */ ASSERT_COMPARE( r, bytes, x, bytes ); + /* 4 tests "r may be aliased to [...] both" */ + if ( A.n == B.n && memcmp( A.p, B.p, bytes ) == 0 ) + { + memcpy( r, b, bytes ); + TEST_EQUAL( carry, mbedtls_mpi_core_sub( r, r, r, limbs ) ); + ASSERT_COMPARE( r, bytes, x, bytes ); + } + exit: mbedtls_free( a ); mbedtls_free( b );
BugID:17394649:set coap message handle threshold to 800ms
@@ -688,7 +688,7 @@ static int CoAPRespMessage_handle(CoAPContext *context, NetworkAddr *remote, CoA return COAP_ERROR_NOT_FOUND; } -#define PACKET_INTERVAL_THRE_MS 1 +#define PACKET_INTERVAL_THRE_MS 800 #define PACKET_TRIGGER_NUM 100 static int CoAPRequestMessage_ack_send(CoAPContext *context, NetworkAddr *remote, unsigned short msgid)
Clarify some help message descriptions
@@ -161,14 +161,13 @@ COMPRESSION -mask The input texture is a mask texture with unrelated data stored in the various color components, so enable error heuristics that - aim to improve perceptual quality by minimizing the effect of - error cross-talk across the color components. + aim to improve quality by minimizing the effect of error + cross-talk across the color components. -normal The input texture is a three component linear LDR normal map storing unit length normals as (R=X, G=Y, B=Z). The output will - be a two component X+Y normal map stored as (RGB=X, A=Y), - optimized for angular error instead of simple PSNR. The Z + be a two component X+Y normal map stored as (RGB=X, A=Y). The Z component can be recovered programmatically in shader code by using the equation:
tools/Config.mk: compile with full file path
@@ -268,7 +268,7 @@ endif define PREPROCESS @echo "CPP: $1->$2" - $(Q) $(CPP) $(CPPFLAGS) $($(strip $1)_CPPFLAGS) $1 -o $2 + $(Q) $(CPP) $(CPPFLAGS) $($(strip $1)_CPPFLAGS) $(abspath $1) -o $(abspath $2) endef # COMPILE - Default macro to compile one C file @@ -285,7 +285,7 @@ endef define COMPILE @echo "CC: $1" - $(Q) $(CCACHE) $(CC) -c $(CFLAGS) $3 $($(strip $1)_CFLAGS) $1 -o $2 + $(Q) $(CCACHE) $(CC) -c $(CFLAGS) $3 $($(strip $1)_CFLAGS) $(abspath $1) -o $(abspath $2) endef # COMPILEXX - Default macro to compile one C++ file @@ -303,7 +303,7 @@ endef define COMPILEXX @echo "CXX: $1" - $(Q) $(CCACHE) $(CXX) -c $(CXXFLAGS) $3 $($(strip $1)_CXXFLAGS) $1 -o $2 + $(Q) $(CCACHE) $(CXX) -c $(CXXFLAGS) $3 $($(strip $1)_CXXFLAGS) $(abspath $1) -o $(abspath $2) endef # COMPILERUST - Default macro to compile one Rust file @@ -321,7 +321,7 @@ endef define COMPILERUST @echo "RUSTC: $1" - $(Q) $(RUSTC) --emit obj $(RUSTFLAGS) $($(strip $1)_RUSTFLAGS) $1 -o $2 + $(Q) $(RUSTC) --emit obj $(RUSTFLAGS) $($(strip $1)_RUSTFLAGS) $(abspath $1) -o $(abspath $2) endef # COMPILEZIG - Default macro to compile one Zig file @@ -364,7 +364,7 @@ endef define ASSEMBLE @echo "AS: $1" - $(Q) $(CCACHE) $(CC) -c $(AFLAGS) $1 $($(strip $1)_AFLAGS) -o $2 + $(Q) $(CCACHE) $(CC) -c $(AFLAGS) $(abspath $1) $($(strip $1)_AFLAGS) -o $(abspath $2) endef # INSTALL_LIB - Install a library $1 into target $2
fix: rename json_inject_alloc to json_ainject
@@ -98,7 +98,7 @@ char * update_my_fork(dati *d) d->handle.ok_cb = log; - d->body.size = json_inject_alloc(&d->body.start, NULL, "(sha):s", sha); + d->body.size = json_ainject(&d->body.start, NULL, "(sha):s", sha); fprintf(stderr, "PATCH: %.*s %d\n", d->body.size, d->body.start, d->body.size); user_agent::run(&d->ua_data, &d->handle, &d->body, @@ -149,7 +149,7 @@ create_blobs(dati *d, struct file **files) size_t len; char *content = orka_load_whole_file(files[i]->path, &len); - d->body.size = json_inject_alloc(&d->body.start, NULL, + d->body.size = json_ainject(&d->body.start, NULL, "(content) : .*s, (encoding) : |utf-8|", len, content); @@ -187,7 +187,7 @@ create_tree(dati *d, char *base_tree_sha, struct file **files) { fprintf(stderr, "==create-tree==\n"); - d->body.size = json_inject_alloc(&d->body.start, NULL, + d->body.size = json_ainject(&d->body.start, NULL, "(tree):F" "(base_tree):s", node_list2json, files, @@ -217,7 +217,7 @@ create_a_commit(dati *d, char *tree_sha, d->handle.ok_cb = load_sha; d->handle.ok_obj = &new_commit_sha; - d->body.size = json_inject_alloc(&d->body.start, NULL, + d->body.size = json_ainject(&d->body.start, NULL, " (message) : s" " (tree) : s" " (parents) : [ s ]", @@ -238,7 +238,7 @@ void create_a_branch(dati *d, char *head_commit_sha, char *branch) { fprintf(stderr, "===create-a-branch===\n"); - d->body.size = json_inject_alloc(&d->body.start, NULL, + d->body.size = json_ainject(&d->body.start, NULL, "(ref): |refs/heads/%s|" "(sha): s", branch, head_commit_sha); @@ -257,7 +257,7 @@ update_a_commit(dati *d, char *branch, char *commit_sha) { fprintf(stderr, "===update-a-commit===\n"); d->handle.ok_cb = log; - d->body.size = json_inject_alloc(&d->body.start, NULL, "(sha):s", commit_sha); + d->body.size = json_ainject(&d->body.start, NULL, "(sha):s", commit_sha); fprintf(stderr, "PATCH: %s\n", d->body.start); user_agent::run(&d->ua_data, &d->handle, &d->body, HTTP_PATCH, "/repos/%s/%s/git/refs/heads/%s", @@ -270,7 +270,7 @@ create_a_pull_request(dati *d, char *branch, char *pull_msg) { // 5. create a pull request fprintf(stderr, "===create-a-pull-request===\n"); - d->body.size = json_inject_alloc(&d->body.start, NULL, + d->body.size = json_ainject(&d->body.start, NULL, "(title): s" "(body): s" "(head): |%s:%s|"
change line 50 'fir' to 'fit'
@@ -47,7 +47,7 @@ typedef struct /*New data for this type */ uint8_t layout :4; /*A layout from 'lv_cont_layout_t' enum*/ uint8_t hor_fit :1; /*1: Enable horizontal fit to involve all children*/ - uint8_t ver_fit :1; /*1: Enable horizontal fir to involve all children*/ + uint8_t ver_fit :1; /*1: Enable horizontal fit to involve all children*/ }lv_cont_ext_t; /**********************
Update arm_depthwise_separable_conv_HWC_q7_nonsquare.c
* <code>ARM_MATH_SIZE_MISMATCH</code> or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking. * * This function is the version with full list of optimization tricks, but with - * some contraints: + * some constraints: * ch_im_in is equal to ch_im_out * */
Resove GPDB_12_MERGE_FIXME in hba.c Can't set LDAPS and StartTLS at the same time. When use 'ldaptls' with 'ldaps' scheme or 'ldapurl' start with 'ldaps://', show an error message in log file.
@@ -1584,20 +1584,20 @@ parse_hba_line(TokenizedLine *tok_line, int elevel) return NULL; } - /* GPDB_12_MERGE_FIXME: Is this still relevant? Is there some additional GPDB - * features in LDAP authentication, or has PostgreSQL gotten them all by now? */ -#if 0 - if ((parsedline->ldaptls || parsedline->ldapport != 0) && strncmp(parsedline->ldapserver, "ldaps://", 8) == 0) + /* Can't set LDAPS and StartTLS at the same time. Set ldaptls to 1 to + * make the connection between database and the LDAP server use TLS + * encryption. The scheme 'ldaps' makes LDAP connections over SSL. + */ + if (parsedline->ldaptls && strcmp(parsedline->ldapscheme, "ldaps") == 0) { ereport(LOG, (errcode(ERRCODE_CONFIG_FILE_ERROR), - errmsg("cannot use 'ldaptls' or 'ldapport' with 'ldapserver' start with 'ldaps://'"), + errmsg("cannot use 'ldaptls' with 'ldaps' scheme or 'ldapurl' start with 'ldaps://'"), errcontext("line %d of configuration file \"%s\"", line_num, HbaFileName))); return NULL; } -#endif } if (parsedline->auth_method == uaRADIUS)
OcAppleKernel: KmodInfo is packed, clang will access it properly
@@ -186,7 +186,6 @@ PatcherBlockKext ( ) { UINT64 KmodOffset; - UINT64 KmodStartAddr; UINT64 TmpOffset; KMOD_INFO_64_V1 *KmodInfo; UINT8 *PatchAddr; @@ -201,16 +200,13 @@ PatcherBlockKext ( KmodOffset = Context->VirtualKmod - Context->VirtualBase; KmodInfo = (KMOD_INFO_64_V1 *)((UINT8 *) MachoGetMachHeader64 (&Context->MachContext) + KmodOffset); if (OcOverflowAddU64 (KmodOffset, sizeof (KMOD_INFO_64_V1), &TmpOffset) - || KmodOffset > MachoGetFileSize (&Context->MachContext)) { + || KmodOffset > MachoGetFileSize (&Context->MachContext) + || KmodInfo->StartAddr == 0 + || Context->VirtualBase > KmodInfo->StartAddr) { return RETURN_INVALID_PARAMETER; } - CopyMem (&KmodStartAddr, &KmodInfo->StartAddr, sizeof (KmodStartAddr)); - if (KmodStartAddr == 0 || Context->VirtualBase > KmodInfo->StartAddr) { - return RETURN_BAD_BUFFER_SIZE; - } - - TmpOffset = KmodStartAddr - Context->VirtualBase; + TmpOffset = KmodInfo->StartAddr - Context->VirtualBase; if (TmpOffset > MachoGetFileSize (&Context->MachContext) - 6) { return RETURN_BUFFER_TOO_SMALL; }
artik053/Kconfig: remove unnecessary if statement 'artik053/Kconfig' is included only when ARCH_BOARD_ARTIK053 is set. Thus, checking it seems redundant.
-if ARCH_BOARD_ARTIK053 - config ARTIK053_BOOT_FAILURE_DETECTION bool "Clear bootcount when boot completes" default y @@ -155,4 +153,3 @@ config ARTIK053_RAMMTD_MOUNT_POINT string "Mountpoint of the partition for ramfs r/w file system" default "/ramfs" endif -endif
Use sprite alignment offsets to keep sprite position consistent between frames
@@ -68,8 +68,12 @@ const spriteMiddleware: Middleware<{}, RootState> = (store) => (next) => ( } const tile: MetaspriteTile = { id: uuid(), - x: loc.x, - y: spriteDef.coordinates.height - loc.y - 16, + x: loc.x + alignmentOffsets[si].x, + y: + spriteDef.coordinates.height - + loc.y + + alignmentOffsets[si].y - + 16, sliceX: def.coordinates.x, sliceY: def.coordinates.y, flipX: loc.flipX,
common: remove unused struct
@@ -185,11 +185,6 @@ struct strings_t { TAILQ_ENTRY(strings_t) pointers; }; -struct paths_t { - char path[PATH_MAX]; - TAILQ_ENTRY(paths_t) pointers; -}; - /* Maximum number of active fuzzing threads */ #define _HF_THREAD_MAX 1024U typedef struct {
bufr_dump -Efortran creates BUFR which differs from input
@@ -3169,6 +3169,14 @@ int grib_f_get_string(int* gid, char* key, char* val, int len, int len2){ return grib_f_get_string_( gid, key, val, len, len2); } +static int is_all_spaces(const char *s) +{ + while (*s != '\0') { + if (!isspace(*s)) return 0; + s++; + } + return 1; +} int grib_f_set_string_(int* gid, char* key, char* val, int len, int len2){ grib_handle *h = get_handle(*gid); @@ -3181,8 +3189,11 @@ int grib_f_set_string_(int* gid, char* key, char* val, int len, int len2){ if(!h) return GRIB_INVALID_GRIB; /* For BUFR, the value may contain spaces e.g. stationOrSiteName='CAMPO NOVO' */ + /* So do not use cast_char. cast_char_no_cut does not stop at first space */ val_str = cast_char_no_cut(buf2,val,len2); + if (val_str && !is_all_spaces(val_str)) { rtrim( val_str ); /* trim spaces at end of string */ + } return grib_set_string(h, cast_char(buf,key,len), val_str, &lsize); }
[bsp][v2m-mps2] fix "scons --target=mdk5" error
@@ -5,9 +5,6 @@ ARCH='arm' CPU='cortex-m7' CROSS_TOOL='keil' -if os.getenv('RTT_CC'): - CROSS_TOOL = os.getenv('RTT_CC') - if CROSS_TOOL == 'keil': PLATFORM = 'armcc' EXEC_PATH = 'C:/Keil_v5' @@ -48,7 +45,6 @@ if PLATFORM == 'armcc': CFLAGS += ' --c99' POST_ACTION = 'fromelf -z $TARGET' - # POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET' else: print("only support armcc in this bsp") exit(-1)
Moved anything rpm-related to pkg/rpm: Fix a path in libbart-devel.spec
@@ -32,7 +32,7 @@ src=$(cut -d' ' -f1 <<<"$line") dst=%{buildroot}/$(cut -d' ' -f2 <<<"$line") install -d "$dst" install "$src" "$dst" -done < libbart-dev.install +done < pkg/rpm/libbart-dev.install # ^ Contents of https://salsa.debian.org/med-team/bart/-/blob/master/debian/libbart-dev.install %files
remove chffr link
@@ -28,8 +28,7 @@ char pageheader[] = "HTTP/1.0 200 OK\nContent-Type: text/html\n\n" "</head>\n" "<body>\n" "<pre>This is your comma.ai panda\n\n" -"It's open source. Find the code <a href=\"https://github.com/commaai/panda\">here</a>\n" -"Designed to work with our dashcam, <a href=\"http://chffr.comma.ai\">chffr</a>\n"; +"It's open source. Find the code <a href=\"https://github.com/commaai/panda\">here</a>\n"; char pagefooter[] = "</pre>\n" "</body>\n"
Make sure ECONNRESET is correct on windows for gpfdist ECONNRESET is defined as POSIX errno(108) in some windows build environment. Then gpfdist doesn't redefine it. But that is incorrect value on windows. So we make sure ECONNRESET is defined as WSAECONNRESET in gpfdist.
#include <io.h> #define SHUT_WR SD_SEND #define socklen_t int -#ifndef ECONNRESET +#undef ECONNRESET #define ECONNRESET WSAECONNRESET #endif -#endif - #include <postgres.h> #include <pg_config.h> #include <pg_config_manual.h>
add time cost for loading storage
@@ -974,8 +974,11 @@ static void *work_thread(void *arg) // loading block from the local storage g_xdag_state = XDAG_STATE_LOAD; xdag_mess("Loading blocks from local storage..."); + + uint64_t start = get_timestamp(); xdag_show_state(0); xdag_load_blocks(t, get_timestamp(), &t, add_block_callback); + xdag_mess("Finish loading blocks, time cost %ldms", get_timestamp() - start); // waiting for command "run" while (!g_xdag_run) {
conifg: fix error ret
@@ -502,7 +502,7 @@ static void tcmu_cancel_config_thread(struct tcmu_config *cfg) return; } - pthread_join(thread_id, &join_retval); + ret = pthread_join(thread_id, &join_retval); if (ret) { tcmu_err("pthread_join failed with value %d\n", ret); return;
Update capslist.txt sorting
@@ -603,8 +603,6 @@ ID_CAP_WPN_PLATFORM_REG_KEY ID_CAP_WPTOOLS_INSTALL_FOLDER ID_CAP_ZMFSERVICES ID_CAP_ZTRACE -Microsoft.firmwareRead_cw5n1h2txyewy -Microsoft.firmwareWrite_cw5n1h2txyewy accessoryManager activateAsUser activity @@ -619,10 +617,10 @@ appBroadcastSettings appCaptureServices appCaptureSettings appDiagnostics -appLicensing -appManagementSystem applicationDefaults applicationViewActivation +appLicensing +appManagementSystem appointments appointmentsSystem audioDeviceConfiguration @@ -655,8 +653,8 @@ chromeInstallFiles cloudExperienceHost cloudStore codeGeneration -comPort componentUiInWebContent +comPort confirmAppClose constrainedImpersonation contacts @@ -723,8 +721,8 @@ holographicCompositor holographicCompositorSystem humanInterfaceDevice imeSystem -inProcessMediaExtension indexedContent +inProcessMediaExtension inputForegroundObservation inputInjection inputInjectionBrokered @@ -758,8 +756,8 @@ lpacClipboard lpacCom lpacCryptoServices lpacEnterprisePolicyChangeNotifications -lpacIME lpacIdentityServices +lpacIME lpacInstrumentation lpacMedia lpacPackageManagerOperation @@ -770,8 +768,10 @@ lpacServicesManagement lpacSessionManagement lpacWebPlatform microphone -microsoftEdgeRemoteDebugging microsoft.eSIMManagement_8wekyb3d8bbwe +Microsoft.firmwareRead_cw5n1h2txyewy +Microsoft.firmwareWrite_cw5n1h2txyewy +microsoftEdgeRemoteDebugging mixedRealityEnvironmentInternal mmsTransportSystem multiplaneOverlay @@ -895,8 +895,8 @@ visualVoiceMail vmWorkerProcess voipCall walletSystem -webPlatformMediaExtension webcam +webPlatformMediaExtension wiFiControl wifiData wiFiDirect
cpu.c,cosmetics: fix a typo VP8DecGetCPUInfo -> VP8GetCPUInfo
@@ -212,7 +212,7 @@ VP8CPUInfo VP8GetCPUInfo = wasmCPUInfo; #elif defined(WEBP_HAVE_NEON) // In most cases this function doesn't check for NEON support (it's assumed by // the configuration), but enables turning off NEON at runtime, for testing -// purposes, by setting VP8DecGetCPUInfo = NULL. +// purposes, by setting VP8GetCPUInfo = NULL. static int armCPUInfo(CPUFeature feature) { if (feature != kNEON) return 0; #if defined(__linux__) && defined(WEBP_HAVE_NEON_RTCD)
net/tcp: remove the redundant ifdef CONFIG_NET_TCP
@@ -76,7 +76,6 @@ struct accept_s * ****************************************************************************/ -#ifdef CONFIG_NET_TCP static inline void accept_tcpsender(FAR struct socket *psock, FAR struct tcp_conn_s *conn, FAR struct sockaddr *addr, @@ -127,7 +126,6 @@ static inline void accept_tcpsender(FAR struct socket *psock, #endif /* CONFIG_NET_IPv6 */ } } -#endif /* CONFIG_NET_TCP */ /**************************************************************************** * Name: accept_eventhandler
Mention --select-usb and --select-tcpip in README PR <https://github.com/Genymobile/scrcpy/pull/3005>
@@ -422,7 +422,7 @@ scrcpy -b2M -m800 # short version #### Multi-devices -If several devices are listed in `adb devices`, you must specify the _serial_: +If several devices are listed in `adb devices`, you can specify the _serial_: ```bash scrcpy --serial 0123456789abcdef @@ -436,6 +436,19 @@ scrcpy --serial 192.168.0.1:5555 scrcpy -s 192.168.0.1:5555 # short version ``` +If only one device is connected via either USB or TCP/IP, it is possible to +select it automatically: + +```bash +# Select the only device connected via USB +scrcpy -d # like adb -d +scrcpy --select-usb # long version + +# Select the only device connected via TCP/IP +scrcpy -e # like adb -e +scrcpy --select-tcpip # long version +``` + You can start several instances of _scrcpy_ for several devices. #### Autostart on device connection
doccords: lib/deco remove informal comment
:> any line longer than 60 characters is probably too long. :> uppercase or non-ascii letters are strongly discouraged. :> -:> informal comments (lines with {:>}) should be used only for -:> meta-discussion *about* the code. -:> :> whenever possible, use formal decorations. {:>} decorates :> the next expression; {:<} decorates the previous one. :>
[CI] Revert dummy commit
@@ -46,3 +46,29 @@ jobs: python3 scripts/install_dependencies.py python3 scripts/build_os_toolchain.py echo "Toolchain built successfully" + + - name: Build Rust toolchain + shell: bash + run: | + # curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain none -y + # rustup toolchain install nightly --allow-downgrade --profile minimal --component clippy --component rustfmt + python3 scripts/build_rust_toolchain.py --rebuild_libc + + - name: Build OS image + shell: bash + run: | + git submodule update --init --recursive + # PT: For nasm in CI + python3 scripts/install_dependencies.py + python3 scripts/build_kernel.py --no_run + + - name: Update nightly release + uses: meeDamian/[email protected] + with: + token: ${{ secrets.GITHUB_TOKEN }} + tag: nightly-${{ github.sha }} + files: axle.iso + allow_override: true + prerelease: false + commitish: ${{ github.sha }} + gzip: false
OSSL_PARAM example code bug fix. Technically not a bug since the code worked but the array index shouldn't have been constant after searching for the field.
@@ -276,8 +276,8 @@ could fill in the parameters like this: *(char **)params[i].data = "foo value"; *params[i].return_size = 10; /* size of "foo value" */ } else if (strcmp(params[i].key, "bar") == 0) { - memcpy(params[1].data, "bar value", 10); - *params[1].return_size = 10; /* size of "bar value" */ + memcpy(params[i].data, "bar value", 10); + *params[i].return_size = 10; /* size of "bar value" */ } /* Ignore stuff we don't know */ }
Add "NIE Tests" comment
@@ -67,7 +67,7 @@ the first "./a.out" with "./a.out -bench". Combine these changes with the // No mimic library. #endif -// -------- +// ---------------- NIE Tests const char* // test_wuffs_nie_decode_interface() {
build: prefer DRONE_TAG over DRONE_COMMIT
@@ -24,7 +24,9 @@ env.AddPostAction( git_version = "unknown" -if 'DRONE_COMMIT' in os.environ: +if 'DRONE_TAG' in os.environ: + git_version = os.environ.get('DRONE_TAG') +elif 'DRONE_COMMIT' in os.environ: git_version = os.environ.get('DRONE_COMMIT') else: try:
schema compile REFACTOR remove redundant function Its name was misleading and since parsed modules cannot be revmoed for now, useless.
@@ -248,51 +248,6 @@ remove_nodelevel: LOG_LOCSET(NULL, NULL, ctx->path, NULL); } -/** - * @brief Compile information in the import statement - make sure there is the target module - * @param[in] ctx Compile context. - * @param[in] imp_p The parsed import statement structure to fill the module to. - * @return LY_ERR value. - */ -static LY_ERR -lys_compile_import(struct lysc_ctx *ctx, struct lysp_import *imp_p) -{ - const struct lys_module *mod = NULL; - LY_ERR ret = LY_SUCCESS; - - /* make sure that we have the parsed version (lysp_) of the imported module to import groupings or typedefs. - * The compiled version is needed only for augments, deviates and leafrefs, so they are checked (and added, - * if needed) when these nodes are finally being instantiated and validated at the end of schema compilation. */ - if (!imp_p->module->parsed) { - /* try to use filepath if present */ - if (imp_p->module->filepath) { - struct ly_in *in; - if (ly_in_new_filepath(imp_p->module->filepath, 0, &in)) { - LOGINT(ctx->ctx); - } else { - LY_CHECK_RET(lys_parse(ctx->ctx, in, !strcmp(&imp_p->module->filepath[strlen(imp_p->module->filepath - 4)], - ".yin") ? LYS_IN_YIN : LYS_IN_YANG, NULL, &mod)); - if (mod != imp_p->module) { - LOGERR(ctx->ctx, LY_EINT, "Filepath \"%s\" of the module \"%s\" does not match.", - imp_p->module->filepath, imp_p->module->name); - mod = NULL; - } - } - ly_in_free(in, 1); - } - if (!mod) { - if (lys_load_module(ctx->ctx, imp_p->module->name, imp_p->module->revision, 0, NULL, ctx->unres, - (struct lys_module **)&mod)) { - LOGERR(ctx->ctx, LY_ENOTFOUND, "Unable to reload \"%s\" module to import it into \"%s\", source data not found.", - imp_p->module->name, ctx->cur_mod->name); - return LY_ENOTFOUND; - } - } - } - - return ret; -} - LY_ERR lys_identity_precompile(struct lysc_ctx *ctx_sc, struct ly_ctx *ctx, struct lysp_module *parsed_mod, struct lysp_ident *identities_p, struct lysc_ident **identities) @@ -1673,11 +1628,6 @@ lys_compile(struct lys_module *mod, uint32_t options, struct lys_glob_unres *unr LY_CHECK_ERR_RET(!mod_c, LOGMEM(mod->ctx), LY_EMEM); mod_c->mod = mod; - /* process imports */ - LY_ARRAY_FOR(sp->imports, u) { - LY_CHECK_GOTO(ret = lys_compile_import(&ctx, &sp->imports[u]), cleanup); - } - /* identities */ LY_CHECK_GOTO(ret = lys_compile_identities(&ctx), cleanup);
Fix sof of copied bytes in resize.
@@ -963,11 +963,11 @@ void ts_internal_bspline_resize(const tsBSpline *spline, int n, int back, const size_t num_knots = spline->pImpl->n_knots; const size_t nnum_ctrlp = num_ctrlp + n; /**< New length of ctrlp. */ const size_t nnum_knots = num_knots + n; /**< New length of knots. */ - const size_t min_num_ctrlp = n < 0 ? nnum_ctrlp : num_ctrlp; - const size_t min_num_knots = n < 0 ? nnum_knots : num_knots; + const size_t min_num_ctrlp_vec = n < 0 ? nnum_ctrlp : num_ctrlp; + const size_t min_num_knots_vec = n < 0 ? nnum_knots : num_knots; - const size_t sof_min_num_ctrlp = min_num_ctrlp * sof_real; - const size_t sof_min_num_knots = min_num_knots * sof_real; + const size_t sof_min_num_ctrlp = min_num_ctrlp_vec * dim * sof_real; + const size_t sof_min_num_knots = min_num_knots_vec * sof_real; tsBSpline tmp; tsReal* from_ctrlp = spline->pImpl->ctrlp;
Fixed unnecessary typecasts
@@ -2059,8 +2059,7 @@ static ACVP_RESULT acvp_process_vsid(ACVP_CTX *ctx, char *vsid_url, int count) { /* * Wait and try again to retrieve the VectorSet */ - if (acvp_retry_handler(ctx, (unsigned int *)&retry_period, (unsigned int *)&time_waited_so_far, 1) - != ACVP_KAT_DOWNLOAD_RETRY) { + if (acvp_retry_handler(ctx, &retry_period, &time_waited_so_far, 1) != ACVP_KAT_DOWNLOAD_RETRY) { ACVP_LOG_STATUS("Maximum wait time with server reached! (Max: %d seconds)", ACVP_MAX_WAIT_TIME); rv = ACVP_TRANSPORT_FAIL; goto end; @@ -2242,8 +2241,7 @@ static ACVP_RESULT acvp_get_result_test_session(ACVP_CTX *ctx, char *session_url * Retry */ ACVP_LOG_STATUS("TestSession results incomplete..."); - if (acvp_retry_handler(ctx, (unsigned int *)&retry_interval, (unsigned int *)&time_waited_so_far, 2) - != ACVP_KAT_DOWNLOAD_RETRY) { + if (acvp_retry_handler(ctx, &retry_interval, &time_waited_so_far, 2) != ACVP_KAT_DOWNLOAD_RETRY) { ACVP_LOG_STATUS("Maximum wait time with server reached! (Max: %d seconds)", ACVP_MAX_WAIT_TIME); rv = ACVP_TRANSPORT_FAIL; goto end;
Fix Kconfig PR variable
@@ -256,7 +256,7 @@ config USE_PRFLOW default "TRUE" if ENABLE_PRFLOW default "FALSE" if ! ENABLE_PRFLOW -config ENABLE_CLOUD_BITFILE +config ENABLE_CLOUD_BUILD_BITFILE bool "Build bitstream file" default y depends on ENABLE_PRFLOW
Trying to update examples/RadHeat. The case of the letters seems to be causing problems.
@@ -37,5 +37,3 @@ bVarDt 1 # Use variable timestepping? dEta 0.1 # Coefficient for variable timestepping dStopTime 4.5e9 # Stop time for evolution dOutputTime 1e7 # Output timesteps (assuming in body files) - -
examples/st_things: Modify Json file By contract, you have decided to include only the file name in the file path of svrdb, provisioning, certificate, and privateKey in the json file, except for the parent path. The parent path has been added to the logic to allow the previous commit to combine at runtime.
"frequency": 1 }, "filePath":{ - "svrdb": "/mnt/artikserversecured.dat", - "provisioning": "/mnt/provisioning.dat", - "certificate": "/rom/certificate", - "privateKey": "/rom/privatekey" + "svrdb": "artikserversecured.dat", + "provisioning": "provisioning.dat", + "certificate": "certificate", + "privateKey": "privatekey" } } }
socket: add macro for win32 build
#include <fluent-bit/flb_log.h> #include <fluent-bit/flb_socket.h> +#ifndef _WIN32 + int flb_socket_error(int fd) { int ret; @@ -40,3 +42,5 @@ int flb_socket_error(int fd) return 0; } + +#endif
Consistently use rtb->frc_objalloc and rtb->mem
@@ -449,7 +449,7 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, ngtcp2_conn *conn, rv = ngtcp2_strm_streamfrq_push(strm, nfrc); if (rv != 0) { - ngtcp2_frame_chain_objalloc_del(nfrc, &conn->frc_objalloc, conn->mem); + ngtcp2_frame_chain_objalloc_del(nfrc, rtb->frc_objalloc, rtb->mem); return rv; } if (!ngtcp2_strm_is_tx_queued(strm)) { @@ -490,7 +490,7 @@ static ngtcp2_ssize rtb_reclaim_frame(ngtcp2_rtb *rtb, ngtcp2_conn *conn, &nfrc->fr.crypto.offset, nfrc); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); - ngtcp2_frame_chain_objalloc_del(nfrc, &conn->frc_objalloc, conn->mem); + ngtcp2_frame_chain_objalloc_del(nfrc, rtb->frc_objalloc, rtb->mem); return rv; } @@ -1428,12 +1428,12 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, strm = ngtcp2_conn_find_stream(conn, sfr->stream_id); if (!strm) { - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); + ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); break; } rv = ngtcp2_strm_streamfrq_push(strm, frc); if (rv != 0) { - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); + ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); return rv; } if (!ngtcp2_strm_is_tx_queued(strm)) { @@ -1454,7 +1454,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, &frc->fr.crypto.offset, frc); if (rv != 0) { assert(ngtcp2_err_is_fatal(rv)); - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); + ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); return rv; } break; @@ -1472,7 +1472,7 @@ static int rtb_on_pkt_lost_resched_move(ngtcp2_rtb *rtb, ngtcp2_conn *conn, *pfrc = (*pfrc)->next; - ngtcp2_frame_chain_objalloc_del(frc, &conn->frc_objalloc, conn->mem); + ngtcp2_frame_chain_objalloc_del(frc, rtb->frc_objalloc, rtb->mem); break; default: pfrc = &(*pfrc)->next;
ci: add job-level `timeout-minutes: 60`
@@ -32,6 +32,7 @@ jobs: task: dtrace container: h2oserver/h2o-ci:ubuntu2004 + timeout-minutes: 60 steps: - uses: actions/checkout@v2 with: @@ -53,6 +54,7 @@ jobs: # see above if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository + timeout-minutes: 60 steps: - name: Build Fuzzers uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
Split long string in test_ns_extremely_long_prefix() C99 compilers are only required to cope with 4095 character strings. Split the parse text in two to keep it under that limit.
@@ -7615,7 +7615,11 @@ END_TEST /* Exercises a particular string pool growth path */ START_TEST(test_ns_extremely_long_prefix) { - const char *text = + /* C99 compilers are only required to support 4095-character + * strings, so the following needs to be split in two to be safe + * for all compilers. + */ + const char *text1 = "<doc " /* 64 character on each line */ /* ...gives a total length of 2048 */ @@ -7651,7 +7655,9 @@ START_TEST(test_ns_extremely_long_prefix) "ABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOP" "ABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOP" "ABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOP" - ":a='12' xmlns:" + ":a='12'"; + const char *text2 = + " xmlns:" "ABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOP" "ABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOP" "ABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOPABCDEFGHIJKLMNOP" @@ -7687,9 +7693,12 @@ START_TEST(test_ns_extremely_long_prefix) "='foo'\n>" "</doc>"; - if (_XML_Parse_SINGLE_BYTES(parser, text, strlen(text), + if (_XML_Parse_SINGLE_BYTES(parser, text1, strlen(text1), XML_FALSE) == XML_STATUS_ERROR) xml_failure(parser); + if (_XML_Parse_SINGLE_BYTES(parser, text2, strlen(text2), + XML_TRUE) == XML_STATUS_ERROR) + xml_failure(parser); } END_TEST
tsch_schedule_print: use LOG_PRINT instead of printf
@@ -452,16 +452,15 @@ tsch_schedule_print(void) if(!tsch_is_locked()) { struct tsch_slotframe *sf = list_head(slotframe_list); - printf("Schedule: slotframe list\n"); + LOG_PRINT("----- start slotframe list -----\n"); while(sf != NULL) { struct tsch_link *l = list_head(sf->links_list); - printf("[Slotframe] Handle %u, size %u\n", sf->handle, sf->size.val); - printf("List of links:\n"); + LOG_PRINT("Slotframe Handle %u, size %u\n", sf->handle, sf->size.val); while(l != NULL) { - printf("[Link] Options %02x, type %u, timeslot %u, channel offset %u, address %u\n", + LOG_PRINT("* Link Options %02x, type %u, timeslot %u, channel offset %u, address %u\n", l->link_options, l->link_type, l->timeslot, l->channel_offset, l->addr.u8[7]); l = list_item_next(l); } @@ -469,7 +468,7 @@ tsch_schedule_print(void) sf = list_item_next(sf); } - printf("Schedule: end of slotframe list\n"); + LOG_PRINT("----- end slotframe list -----\n"); } } /*---------------------------------------------------------------------------*/
openmpi: include use of %{RMS_DELIM} in package name, make pmix build optional
# #----------------------------------------------------------------------------eh- -# OpenMPI stack that is dependent on compiler toolchain +# OpenMPI stack that is dependent on compiler toolchain (and possibly RMS) %define ohpc_compiler_dependent 1 %include %{_sourcedir}/OHPC_macros +%{!?RMS_DELIM: %global RMS_DELIM %{nil}} # Base package name/config %define pname openmpi3 %{!?with_lustre: %define with_lustre 0} %{!?with_slurm: %define with_slurm 0} %{!?with_tm: %global with_tm 1} +%{!?with_pmix: %define with_pmix 0} Summary: A powerful implementation of MPI -Name: %{pname}-%{compiler_family}%{PROJ_DELIM} +Name: %{pname}%{RMS_DELIM}-%{compiler_family}%{PROJ_DELIM} Version: 3.0.0 Release: 1%{?dist} @@ -49,8 +51,10 @@ BuildRequires: postfix BuildRequires: opensm BuildRequires: opensm-devel BuildRequires: numactl -BuildRequires: libevent-devel +%if 0%{with_pmix} BuildRequires: pmix%{PROJ_DELIM} +BuildRequires: libevent-devel +%endif BuildRequires: hwloc-devel %if 0%{?centos_version} == 700 BuildRequires: libtool-ltdl @@ -118,9 +122,11 @@ Open MPI jobs. BASEFLAGS="--prefix=%{install_path} --disable-static --enable-builtin-atomics --with-sge --enable-mpi-cxx" # build against external pmix and libevent +%if 0%{with_pmix} module load pmix BASEFLAGS="$BASEFLAGS --with-pmix=${PMIX_DIR}" BASEFLAGS="$BASEFLAGS --with-libevent=external --with-hwloc=external" +%endif %if %{with_psm} BASEFLAGS="$BASEFLAGS --with-psm"
opal-msg: Increase the max-async completion count by max chips possible OPAL-OCC command-response framework will also use async requests. So bump up the max-async completion requests to accommodate maximum number of chips possible.
* ideally the value matches to the number of modules using async * infrastructure, but not necessarily the same.. */ -#define OPAL_MAX_ASYNC_COMP 8 +#define OPAL_MAX_ASYNC_COMP 16 int _opal_queue_msg(enum opal_msg_type msg_type, void *data, void (*consumed)(void *data), size_t num_params,
Wrap SVE header with __has_include check
@@ -29,7 +29,17 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "common.h" +// Some compilers will report feature support for SVE without the appropriate +// header available #ifdef HAVE_SVE +#if defined __has_include +#if __has_include(<arm_sve.h>) && __ARM_FEATURE_SVE +#define USE_SVE +#endif +#endif +#endif + +#ifdef USE_SVE #include "dot_kernel_sve.c" #endif #include "dot_kernel_asimd.c" @@ -46,7 +56,7 @@ static RETURN_TYPE dot_compute(BLASLONG n, FLOAT *x, BLASLONG inc_x, FLOAT *y, B if ( n <= 0 ) return dot; -#ifdef HAVE_SVE +#ifdef USE_SVE if (inc_x == 1 && inc_y == 1) { return dot_kernel_sve(n, x, y); }
VERSION bump to version 2.2.28
@@ -65,7 +65,7 @@ endif() # micro version is changed with a set of small changes or bugfixes anywhere in the project. set(SYSREPO_MAJOR_VERSION 2) set(SYSREPO_MINOR_VERSION 2) -set(SYSREPO_MICRO_VERSION 27) +set(SYSREPO_MICRO_VERSION 28) set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION}) # Version of the library
riscv64: Correcting inefficiency in spinlock related functions caused by the usage of rdcycles instruction
@@ -150,10 +150,26 @@ int hal_interruptsDeleteHandler(intr_handler_t *h) return EOK; } +time_t jiffies; __attribute__((aligned(4))) void handler(cpu_context_t *ctx) { - cycles_t c = hal_cpuGetCycles2(); + cycles_t c, d; + +/*if (!jiffies) + jiffies = hal_cpuGetCycles2(); +else + jiffies += 100000; + +c = jiffies; +// hal_cpuGetCycles(&d);*/ + + c = hal_cpuGetCycles2(); + + +//lib_printf("cycles=%p %p\n", d, c); +//c = d / 24000000; + sbi_ecall(SBI_SETTIMER, 0, c + 1000, 0, 0, 0, 0, 0); } @@ -165,6 +181,8 @@ __attribute__ ((section (".init"))) void _hal_interruptsInit(void) { unsigned int k; +jiffies = 0; + for (k = 0; k < SIZE_INTERRUPTS; k++) { interrupts.handlers[k] = NULL; interrupts.counters[k] = 0;
[sensor] fixed step name display abnormal.
@@ -30,7 +30,7 @@ static char *const sensor_name_str[] = "hr_", /* Heart Rate */ "tvoc_", /* TVOC Level */ "noi_", /* Noise Loudness */ - "step_" /* Step sensor */ + "step_", /* Step sensor */ "forc_" /* Force sensor */ };
Handle angle relative path
@@ -174,7 +174,8 @@ function generate_user_headerunits(target, batchcmds, headerunits, opt) if headerunit.type == ":quote" then outputdir = path.join(cachedir, path.directory(path.relative(headerunit.path, projectdir))) else - outputdir = path.join(cachedir, path.directory(headerunit.path):sub(3)) + -- if path is relative then its a subtarget path + outputdir = path.join(cachedir, path.is_absolute(headerunit.path) and path.directory(headerunit.path):sub(3) or headerunit.path) end batchcmds:mkdir(outputdir)
linux-raspberrypi: update 4.9 recipe to current HEAD Contains the following changes: drm/vc4: Move IRQ enable to PM path which fixes an unbalanced IRQ enable warning, which was rapported in
@@ -2,7 +2,7 @@ FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}-${PV}:" LINUX_VERSION ?= "4.9.80" -SRCREV = "ffd7bf4085b09447e5db96edd74e524f118ca3fe" +SRCREV = "7f9c648dad6473469b4133898fa6bb8d818ecff9" SRC_URI = " \ git://github.com/raspberrypi/linux.git;branch=rpi-4.9.y \ file://0001-menuconfig-check-lxdiaglog.sh-Allow-specification-of.patch \
Improve ProcessorBrandString query (experimental)
@@ -824,13 +824,23 @@ VOID PhSipGetCpuBrandString( _Out_writes_(49) PWSTR BrandString ) { - ULONG brandString[4 * 3]; - - __cpuid(&brandString[0], 0x80000002); - __cpuid(&brandString[4], 0x80000003); - __cpuid(&brandString[8], 0x80000004); + // dmex: The __cpuid instruction generates quite a few FPs by security software (malware uses this as an anti-VM trick)... + // TODO: This comment block should be removed if the SystemProcessorBrandString class is more reliable. + //ULONG brandString[4 * 3]; + //__cpuid(&brandString[0], 0x80000002); + //__cpuid(&brandString[4], 0x80000003); + //__cpuid(&brandString[8], 0x80000004); + + CHAR brandString[49]; + + NtQuerySystemInformation( + SystemProcessorBrandString, + brandString, + sizeof(brandString), + NULL + ); - PhZeroExtendToUtf16Buffer((PSTR)brandString, 48, BrandString); + PhZeroExtendToUtf16Buffer(brandString, 48, BrandString); BrandString[48] = 0; }
devices: allow link state down with netlink Type: fix Use the up parameter in vnet_netlink_set_link_state(). It was ignoring the parameter and always setting IFF_UP on an interface.
@@ -201,7 +201,7 @@ vnet_netlink_set_link_state (int ifindex, int up) struct ifinfomsg ifmsg = { 0 }; clib_error_t *err = 0; - ifmsg.ifi_flags = IFF_UP; + ifmsg.ifi_flags = ((up) ? IFF_UP : 0); ifmsg.ifi_change = IFF_UP; ifmsg.ifi_index = ifindex;
Add 4.0 change log
@@ -58,6 +58,9 @@ from 0.89 bits/pixel up to 8 bits/pixel. Release build binaries for the `astcenc` stable releases are provided in the [GitHub Releases page][3]. +**Latest 4.x stable release:** TBD +* Change log: [4.x series](./Docs/ChangeLog-4x.md) + **Latest 3.x stable release:** 3.7 * Change log: [3.x series](./Docs/ChangeLog-3x.md)
engine: on pipe shutdown, consume byte
@@ -267,6 +267,7 @@ static FLB_INLINE int flb_engine_handle_event(flb_pipefd_t fd, int mask, return 0; } else if (config->shutdown_fd == fd) { + flb_utils_pipe_byte_consume(fd); return FLB_ENGINE_SHUTDOWN; } #ifdef FLB_HAVE_STATS @@ -423,6 +424,7 @@ int flb_engine_start(struct flb_config *config) event->mask = MK_EVENT_EMPTY; event->status = MK_EVENT_NONE; config->shutdown_fd = mk_event_timeout_create(evl, 5, 0, event); + flb_warn("[engine] service will stop in 5 seconds"); } else if (ret == FLB_ENGINE_SHUTDOWN) { @@ -488,3 +490,14 @@ int flb_engine_shutdown(struct flb_config *config) return 0; } + +int flb_engine_exit(struct flb_config *config) +{ + int ret; + uint64_t val = FLB_ENGINE_EV_STOP; + + val = FLB_ENGINE_EV_STOP; + ret = flb_pipe_w(config->ch_manager[1], &val, sizeof(uint64_t)); + + return ret; +}
Adding make -j4 and setting appveyor to only build master branch.
version: 1.0.{build} image: Visual Studio 2015 +branches: + only: + - master + environment: matrix: - COMPILER: msys2 @@ -31,7 +35,7 @@ build_script: bash -lc "cd $Env:WDIR && cp -rf ./windows/* ./" bash -lc "cd $Env:WDIR && ./bootstrap.sh" bash -lc "cd $Env:WDIR && ./configure --datadir=/c" - bash -lc "cd $Env:WDIR && make" + bash -lc "cd $Env:WDIR && make -j4" bash -lc "cd $Env:WDIR && make install" bash -lc "cd $Env:WDIR && cp src/.libs/libpostal-*.dll libpostal.dll" & 'C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\lib.exe' /def:libpostal.def /out:libpostal.lib /machine:$Env:PLATFORM
Mock: Add hard reset to PRL mock Add the ability to detect hard reset send through the PRL mock. Note that a hard reset is a transmit type, not a control or data message. BRANCH=None TEST=make -j buildall
@@ -54,7 +54,11 @@ void prl_end_ams(int port) {} void prl_execute_hard_reset(int port) -{} +{ + mock_prl_port[port].last_ctrl_msg = 0; + mock_prl_port[port].last_data_msg = 0; + mock_prl_port[port].last_tx_type = TCPC_TX_HARD_RESET; +} enum pd_rev_type prl_get_rev(int port, enum tcpm_transmit_type partner) {
show warning if limited (old) pcap format is detected
@@ -308,6 +308,7 @@ static int nmealen; static bool ignoreieflag; static bool donotcleanflag; +static bool ancientdumpfileformat; static const uint8_t fakenonce1[] = { @@ -808,6 +809,18 @@ if((eapolwrittencount +eapolncwrittencount +eapolwrittenhcpxcountdeprecated +eap { printf( "\nInformation: no hashes written to hash files\n"); } + +if(ancientdumpfileformat == true) + { + fprintf(stdout, "\nWarning: limited dump file format detected!\n" + "This file format is a very basic format to save captured network data.\n" + "It is recommended to use PCAP Next Generation dump file format (or pcapng for short) instead.\n" + "The PCAP Next Generation dump file format is an attempt to overcome the limitations\n" + "of the currently widely used (but limited) libpcap (cap, pcap) format.\n" + "https://wiki.wireshark.org/Development/PcapNg\n" + "https://wiki.wireshark.org/FileFormatReference\n"); + } + if(sequenceerrorcount > 0) { fprintf(stdout, "\nWarning: out of sequence timestamps!\n" @@ -4775,6 +4788,7 @@ static pcaprec_hdr_t pcaprhdr; static uint64_t timestampcap; static uint8_t packet[MAXPACPSNAPLEN]; +ancientdumpfileformat = true; fprintf(stdout, "%s %s reading from %s...\n", basename(eigenname), VERSION_TAG, basename(pcapinname)); iface = 1; res = read(fd, &pcapfhdr, PCAPHDR_SIZE); @@ -5084,6 +5098,7 @@ static int interfaceid[MAX_INTERFACE_ID]; static uint8_t pcpngblock[2 *MAXPACPSNAPLEN]; static uint8_t packet[MAXPACPSNAPLEN]; +ancientdumpfileformat = false; fprintf(stdout, "%s %s reading from %s...\n", basename(eigenname), VERSION_TAG, basename(pcapinname)); iface = 0; nmealen = 0; @@ -5415,6 +5430,7 @@ pcaptempnameptr = NULL; #endif pcapnameptr = pcapinname; #ifdef WANTZLIB +ancientdumpfileformat = false; if(testgzipfile(pcapinname) == true) { memset(&tmpoutname, 0, PATH_MAX);
NewChannel: fix sig mismatch
@@ -110,7 +110,7 @@ export function NewChannel(props: NewChannelProps & RouteComponentProps) { } }; - const members = group ? Array.from(groups[group]?.members).map(s => `~${s}`) : undefined; + const members = group ? Array.from(groups[group]?.members) : undefined; return ( <Col overflowY="auto" p={3} backgroundColor="white">
YAwn: Fix off-by-one error in error message
@@ -53,8 +53,9 @@ string ErrorListener::visualizeError (Location const & location, string const & errorLine = prefix + errorLine + "\n" + prefix + string (location.begin.column - 1, ' '); // We assume that an error does not span more than one line start = location.begin.column; - end = location.end.column; - for (size_t current = start; current <= end; current++) + end = location.end.column - 1; + errorLine += "^"; // Show at least one caret, even if the token is 0 characters long + for (size_t current = start; current < end; current++) { errorLine += "^"; }
bumped up yp_util revision Note: mandatory check (NEED_CHECK) was skipped
}, "yp-util": { "formula": { - "sandbox_id": [495194990], + "sandbox_id": [496409038], "match": "yp_util" }, "executable": {
feat:build, push docker image
name: Test, Build and Push Docker Image -# on: -# # to enable manual triggering of this workflow. -# workflow_dispatch: +on: + # to enable manual triggering of this workflow. + workflow_dispatch: -# # trigger for pushes to master -# push: -# branches: [master] - -# pull_request: -# branches: [develop, master] -on: [push, pull_request] + # trigger for pushes to master + push: + branches: [master] env: IMAGE_NAME: registry.hub.docker.com/metacall/core jobs: build-metaCall: - name: Pull Metacall Images runs-on: ubuntu-latest steps: # Checkout the code @@ -25,12 +20,24 @@ jobs: with: fetch-depth: 0 - # - name: Login to DockerHub - # uses: docker/login-action@v1 - # with: - # username: ${{ secrets.DOCKER_HUB_USERNAME }} - # password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + - name: Login to DockerHub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + # This Will Print the `IMAGE_NAME` configured in the environment variable + - name: Name of Image Configured in environment variable + run: echo ${IMAGE_NAME} # Run Docker Command - Pull - - run: echo ${IMAGE_NAME} - - run: sh ./docker-compose.sh pull + - name: Pull Metacall Docker Image + run: sh ./docker-compose.sh pull + + # Run Docker Command - Build + - name: Build Metacall Docker Image + run: sh ./docker-compose.sh build + + # Run Docker Command - Push + - name: Push Metacall Docker Image + run: sh ./docker-compose.sh push
lib: mbedtls: do not install static library
@@ -132,10 +132,6 @@ if(USE_STATIC_MBEDTLS_LIBRARY) add_library(${mbedtls_static_target} STATIC ${src_tls}) set_target_properties(${mbedtls_static_target} PROPERTIES OUTPUT_NAME mbedtls) target_link_libraries(${mbedtls_static_target} ${libs} ${mbedx509_static_target}) - - install(TARGETS ${mbedtls_static_target} ${mbedx509_static_target} ${mbedcrypto_static_target} - DESTINATION ${LIB_INSTALL_DIR} - PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) endif(USE_STATIC_MBEDTLS_LIBRARY) if(USE_SHARED_MBEDTLS_LIBRARY)
OcAppleKernelLib: macOS 12.0 support for AppleXcpmForceBoost closes
@@ -448,6 +448,14 @@ mPerfCtrlFind2[] = { 0x0F, 0x30 ///< wrmsr }; +STATIC +UINT8 +mPerfCtrlFind3[] = { + 0xB9, 0x99, 0x01, 0x00, 0x00, ///< mov ecx, 199h + 0x4C, 0x89, 0xF0, ///< mov rax, r14 + 0x0F, 0x30 ///< wrmsr +}; + STATIC UINT8 mPerfCtrlMax[] = { @@ -487,7 +495,8 @@ PatchAppleXcpmForceBoost ( && Current[2] == mPerfCtrlFind1[2] && Current[3] == mPerfCtrlFind1[3]) { if (CompareMem (&Current[4], &mPerfCtrlFind1[4], sizeof (mPerfCtrlFind1) - 4) == 0 - || CompareMem (&Current[4], &mPerfCtrlFind2[4], sizeof (mPerfCtrlFind2) - 4) == 0) { + || CompareMem (&Current[4], &mPerfCtrlFind2[4], sizeof (mPerfCtrlFind2) - 4) == 0 + || CompareMem (&Current[4], &mPerfCtrlFind3[4], sizeof (mPerfCtrlFind3) - 4) == 0) { break; } }