message
stringlengths
6
474
diff
stringlengths
8
5.22k
format & add clear tm_isdst
@@ -88,6 +88,8 @@ struct tm *gmtime_r(const time_t *timep, struct tm *r) ; r->tm_mon = i; r->tm_mday += work - __spm[i]; + + r->tm_isdst = 0; return r; } @@ -103,8 +105,8 @@ struct tm* localtime_r(const time_t* t, struct tm* r) time_t local_tz; int utc_plus; - utc_plus = 3600 * 0; /* GTM: UTC+0 */ - local_tz = *t + utc_plus; + utc_plus = 0; /* GTM: UTC+0 */ + local_tz = *t + utc_plus * 3600; return gmtime_r(&local_tz, r); }
dbug: just use +dor for sorting +vor is literally just +dor.
:: :- 'snd' :- %a - %+ turn (sort ~(tap by snd) vor) :: sort by bone + %+ turn (sort ~(tap by snd) dor) :: sort by bone (cury snd-with-bone ossuary) :: :- 'rcv' :- %a - %+ turn (sort ~(tap by rcv) vor) :: sort by bone + %+ turn (sort ~(tap by rcv) dor) :: sort by bone (cury rcv-with-bone ossuary) :: :- 'nax' :- %a - %+ turn (sort ~(tap in nax) vor) :: sort by bone + %+ turn (sort ~(tap in nax) dor) :: sort by bone |= [=bone =message-num] %- pairs :* 'message-num'^(numb message-num) :: :- 'queued-message-acks' :- %a - %+ turn (sort ~(tap by queued-message-acks) vor) :: sort by msg nr + %+ turn (sort ~(tap by queued-message-acks) dor) :: sort by msg nr |= [=message-num =ack] %- pairs :~ 'message-num'^(numb message-num) :: :- 'live' :- %a - %+ turn (sort ~(tap in live) vor) :: sort by msg nr & frg nr + %+ turn (sort ~(tap in live) dor) :: sort by msg nr & frg nr |= [live-packet-key live-packet-val] %- pairs :~ 'message-num'^(numb message-num) :: :- 'pending-vane-ack' =- a+(turn - numb) - (sort (turn ~(tap in pending-vane-ack) head) vor) :: sort by msg # + (sort (turn ~(tap in pending-vane-ack) head) dor) :: sort by msg # :: :- 'live-messages' :- %a - %+ turn (sort ~(tap by live-messages) vor) :: sort by msg # + %+ turn (sort ~(tap by live-messages) dor) :: sort by msg # |= [=message-num partial-rcv-message] %- pairs :~ 'message-num'^(numb message-num) 'fragments'^(set-array ~(key by fragments) numb) == :: - 'nax'^a+(turn (sort ~(tap in nax) vor) numb) + 'nax'^a+(turn (sort ~(tap in nax) dor) numb) :: (bone-to-pairs bone ossuary) == :: :: helpers :: -:: +vor: value order -:: -:: Orders atoms before cells, and atoms in ascending order. -:: -++ vor - |= [a=* b=*] - ^- ? - ?: =(a b) & - ?. ?=(@ a) - ?: ?=(@ b) | - ?: =(-.a -.b) - $(a +.a, b +.b) - $(a -.a, b -.b) - ?. ?=(@ b) & - (lth a b) -:: ++ poke |= [=wire app=term =mark =vase] ^- card
Update older links to deprecated docs II
@@ -7,7 +7,7 @@ There are several methods of connecting map data from your CARTO account (via th - To use a map as **raster map tiles**, define the tile URL for `RasterTileLayer` - To apply **interactivity** (object click data), use UTFGrid. This uses both raster map tiles and json-based UTF tiles. UTFGrids are applicable to both raster and vector tiles, though are more useful for raster tiles. *For CARTO Builder Map, you will need to enable and define tooltips with the Pop-up feature* - Load **vector tiles**, the CARTO Engine supports Mapbox Vector Tile (MVT) format tiles, which the Mobile SDK can render on the client side. You will also need [CartoCSS]({{site.styling_cartocss}}/) styles to view vector tiles. This is useful for applying advanced styling features, such as zooming and rotating maps based on data that can be packaged for offline line, using mbtiles -- **Load GeoJSON vector data**. This is useful if you need need more advanced interactivity (object click actions) or dynamic client-side styling of the objects. For vector data, the CARTO Engine provides a [SQL API]({{site.sqlapi_docs}}/) and mobile app that can load entire tables and render maps. You can also use client-side simplification and clustering +- **Load GeoJSON vector data**. This is useful if you need need more advanced interactivity (object click actions) or dynamic client-side styling of the objects. For vector data, the CARTO Engine provides a [SQL API]({{site.sqlapi_docs}}/) and mobile app that can load gsimplification and clustering - If the **data table is large** (more than a ten thousand objects), then loading entire tables can overload the mobile client. Alternatively, use on-demand, view-based loading of vector data. Similar to the SQL API and GeoJSON format used on the CARTO Engine side, the SDK applies custom vector data sources to load data. _Only a selected, visible area, of the map will load._ The mobile app can control zoom levels, server-side generalizations, and simplifications can be applied - For point-geometry time-series visualizations, use the _Animated_ aggregation to define Torque maps. This provides animated rendering, and the Mobile SDK has a special layer `TorqueTileLayer` to define this.
docs: add docs about clipspace configuration
@@ -35,6 +35,45 @@ have to compile cglm with **CGLM_ALL_UNALIGNED** macro. For instance if you set CGLM_ALL_UNALIGNED in a project then set it in other projects too + Clipspace Option[s] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By starting **v0.8.3** cglm provides options to switch between clipspace configurations. + +Clipspace related files are located at `include/cglm/[struct]/clipspace.h` but +these are included in related files like `cam.h`. If you don't want to change your existing +clipspace configuration and want to use different clipspace function like `glm_lookat_zo` or `glm_lookat_lh_zo`... +then you can include individual headers or just define `CGLM_CLIPSPACE_INCLUDE_ALL` which will iclude all headers for you. + +1. **CGLM_CLIPSPACE_INCLUDE_ALL** +2. **CGLM_FORCE_DEPTH_ZERO_TO_ONE** +3. **CGLM_FORCE_LEFT_HANDED** + + +1. **CGLM_CLIPSPACE_INCLUDE_ALL**: + +By defining this macro, **cglm** will include all clipspace functions for you by just using +`#include cglm/cglm.h` or `#include cglm/struct.h` or `#include cglm/call.h` + +Otherwise you need to include header you want manually e.g. `#include cglm/clipspace/view_rh_zo.h` + +2. **CGLM_FORCE_DEPTH_ZERO_TO_ONE** + +This is similar to **GLM**'s **GLM_FORCE_DEPTH_ZERO_TO_ONE** option. +This will set clip space between 0 to 1 which makes **cglm** Vulkan, Metal friendly. + +You can use functions like `glm_lookat_lh_zo()` individually. By setting **CGLM_FORCE_DEPTH_ZERO_TO_ONE** +functions in cam.h for instance will use `_zo` versions. + +3. **CGLM_FORCE_LEFT_HANDED** + +Force **cglm** to use the left handed coordinate system by default, currently **cglm** uses right handed coordinate system as default, +you can change this behavior with this option. + +**VERY VERY IMPORTANT:** + +Be careful if you include **cglm** in multiple projects. + SSE and SSE2 Shuffle Option ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **_mm_shuffle_ps** generates **shufps** instruction even if registers are same.
Update the molecule plot caption in the screen shots.
<?xml version="1.0" encoding="UTF-8" ?> <page> <title><![CDATA[VisIt Screen Shots]]></title> + <IM>LLNL-WEB-XXXXXX</IM> <owner><![CDATA[Page Owner Name]]></owner> - <lastchanged><![CDATA[March 19, 2013]]></lastchanged> + <lastchanged><![CDATA[September 9, 2019]]></lastchanged> <template><![CDATA[l2]]></template> <content> <![CDATA[ </div> <div class="span3"> <div class="well muted"> - <p>The <strong>subset plot</strong> is used to display different parts of an assembly. Portions of the assembly can be selectively turned on and off. The first image shows an entire crash impact dummy, while the second image shows only the hips and upper body.</p> + <p>The <strong>molecule plot</strong> is used to display molecules as spheres and cylinders. The image on the left is a crotamine molecule with the atoms shown as spheres sized by the covalent radius, colored by the element type, and the bonds not shown. The image on the right has the atoms shown a constant size, colored by the element type, and the bonds shown as cylinders, colored by the adjacent element type.</p> </div> </div> </div>
github: improve description of basics of PRs
## Basics -Check relevant points but **please do not remove entries**. -Do not describe the purpose of this PR in the PR description but: +These points need to be fulfilled for every PR: -- [ ] Short descriptions should be in the release notes (added as entry in - `doc/news/_preparation_next_release.md` which contains `_(my name)_`) - **Please always add something to the the release notes.** -- [ ] Longer descriptions should be in documentation or in design decisions. -- [ ] Describe details of how you changed the code in commit messages +- [ ] Short descriptions of your changes are in the release notes + (added as entry in `doc/news/_preparation_next_release.md` which + contains `_(my name)_`) + **Please always add something to the release notes.** +- [ ] Details of what you changed are in commit messages (first line should have `module: short statement` syntax) -- [ ] References to issues, e.g. `close #X`, should be in the commit messages. +- [ ] References to issues, e.g. `close #X`, are in the commit messages. +- [ ] The buildservers are happy. +- [ ] The PR is rebased with current master. + +If you have any troubles fulfilling these criteria, please write +about the trouble as comment in the PR. We will help you. +But we cannot accept PRs that do not fulfill the basics. ## Checklist @@ -18,6 +23,8 @@ For docu fixes, spell checking, and similar none of these points below need to be checked. - [ ] I added unit tests for my code +- [ ] I fully described what my PR does in the documentation + (not in the PR description) - [ ] I fixed all affected documentation - [ ] I added code comments, logging, and assertions as appropriate (see [Coding Guidelines](https://master.libelektra.org/doc/CODING.md)) - [ ] I updated all meta data (e.g. README.md of plugins and [METADATA.ini](https://master.libelektra.org/doc/METADATA.ini)) @@ -27,7 +34,7 @@ need to be checked. Reviewers will usually check the following: -- [ ] Documentation is introductory, concise and good to read +- [ ] Documentation is introductory, concise, good to read and describes everything what the PR does - [ ] Examples are well chosen and understandable - [ ] Code is conforming to [our Coding Guidelines](https://master.libelektra.org/doc/CODING.md) - [ ] APIs are conforming to [our Design Guidelines](https://master.libelektra.org/doc/DESIGN.md) @@ -35,6 +42,8 @@ Reviewers will usually check the following: ## Labels +If you are already Elektra developer: + - Add the "work in progress" label if you do not want the PR to be reviewed yet. -- Add the "ready to merge" label **if the build server is happy** and also you +- Add the "ready to merge" label **if the basics are fulfilled** and you also say that everything is ready to be merged.
Update README No Windows support in README
@@ -56,7 +56,7 @@ $ pip install pyccl For the PyPi installation, you will need ``CMake`` installed locally. See [Getting CMake](https://ccl.readthedocs.io/en/latest/source/installation.html#getting-cmake) -for instructions. +for instructions. Note that the code only supports Linux or Mac OS, but no Windows. Once you have the code installed, you can take it for a spin!
added function doc strings formatted as in the rest of the project
#define TMP_SOCK_FILENAME_TPL "owsock.XXXXXX" +// cf. rfc 4656, pg 6 struct _greeting { uint8_t Unused[12]; uint8_t Modes[4]; @@ -37,6 +38,7 @@ struct _greeting { uint8_t MBZ[12]; }; +// cf. rfc 4656, pg 7 struct _setup_response { uint8_t Mode[4]; uint8_t KeyID[80]; @@ -44,6 +46,7 @@ struct _setup_response { uint8_t Client_IV[16]; }; +// cf. rfc 4656, pg 9 struct _server_start { uint8_t MBZ_Accept[16]; uint8_t Server_IV[16]; @@ -52,6 +55,7 @@ struct _server_start { }; +// used with do_server struct _server_test_results { uint32_t start_time; int sent_greeting; @@ -61,11 +65,26 @@ struct _server_test_results { }; +/* + * Function: do_server + * + * Description: emulates the server side of a test session + * + * In Args: void pointer to struct _server_test_results + * with initialized start_time + * + * Out Args: + * + * Scope: + * Returns: non-zero if the server should continue + * accepting new clients + * Side Effect: + */ int do_server(int s, void *context) { - struct _server_test_results *test_results = (struct _server_test_results *) context; - memset(test_results, 0, sizeof (struct _server_test_results)); + test_results->sent_greeting = test_results->setup_response_ok + = test_results->sent_server_start = test_results->test_complete = 0; struct _greeting greeting; memset(&greeting, 0, sizeof greeting); @@ -103,6 +122,39 @@ int do_server(int s, void *context) { } +/* + * Function: server_proc + * + * Description: wrapper for run_server(struct _server_params *) used + * with pthread_create + * + * In Args: ptr to a struct _server_params + * + * Out Args: + * + * Scope: + * Returns: NULL in case of error or server completion + * Side Effect: + */ +void *server_proc(void *context) { + return run_server((struct _server_params *) context); +} + + +/* + * Function: main + * + * Description: launch a simulated owamp server & send commands + * so they can be validated in do_server (above) + * + * In Args: argc, argv (unused) + * + * Out Args: + * + * Scope: unit test (run using make check) + * Returns: non-zero in case of error + * Side Effect: + */ int main( int argc __attribute__((unused)), @@ -181,6 +233,7 @@ main( cleanup: if (thread_valid) { + // possible, but unlikely race condition if (test_results.test_complete) { pthread_join(server_thread, NULL); } else {
stm32/mpconfigport.h: Make "framebuf" module configurable by a board.
#define MICROPY_HW_SOFTSPI_MAX_BAUDRATE (HAL_RCC_GetSysClockFreq() / 48) #define MICROPY_PY_UWEBSOCKET (MICROPY_PY_LWIP) #define MICROPY_PY_WEBREPL (MICROPY_PY_LWIP) +#ifndef MICROPY_PY_FRAMEBUF #define MICROPY_PY_FRAMEBUF (1) +#endif #ifndef MICROPY_PY_USOCKET #define MICROPY_PY_USOCKET (1) #endif
[kernel] add display _num in SimpleMatrix
@@ -684,6 +684,7 @@ void SimpleMatrix::display() const { std::cout << "SimpleMatrix::display(): empty matrix" << std::endl; } + std::cout << "num = " << _num << "\t"; if (_num == 1) { Siconos::algebra::print_m(*mat.Dense);
Fix url_curl on MacOS * Fix url_curl on MacOS Fix libcurl can not read data from gpfdist on MacOS But gpfdist with a pipe can not work on macos as flock(2) which is used in gfile.c is not supported on MacOS.
@@ -676,14 +676,28 @@ fill_buffer(URL_CURL_FILE *curl, int want) e, curl_easy_strerror(e)); } - if (maxfd <= 0) + if (maxfd == 0) { elog(LOG, "curl_multi_fdset set maxfd = %d", maxfd); curl->still_running = 0; break; } + /* When libcurl returns -1 in max_fd, it is because libcurl currently does something + * that isn't possible for your application to monitor with a socket and unfortunately + * you can then not know exactly when the current action is completed using select(). + * You then need to wait a while before you proceed and call curl_multi_perform anyway + */ + if (maxfd == -1) + { + elog(DEBUG2, "curl_multi_fdset set maxfd = %d", maxfd); + pg_usleep(100000); + // to call curl_multi_perform + nfds = 1; + } + else + { nfds = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout); - + } if (nfds == -1) { if (errno == EINTR || errno == EAGAIN)
Fixing windows bulid
@@ -44,7 +44,11 @@ foreach(testSrc ${TEST_SRCS}) add_executable(${testName} ${testSrc}) #link to Boost libraries AND your targets and dependencies + IF(MSVC) + target_link_libraries(${testName} ${Boost_LIBRARIES} epanet2 epanet-output) + ELSE(TRUE) target_link_libraries(${testName} ${Boost_LIBRARIES} pthread epanet2 epanet-output) + ENDIF(MSVC) #Finally add it to test execution #Notice the WORKING_DIRECTORY and COMMAND
Fix -3/4partitionindexlimit CLI options
@@ -918,7 +918,7 @@ static int edit_astcenc_config( return 1; } - config.tune_2partition_index_limit = atoi(argv[argidx - 1]); + config.tune_3partition_index_limit = atoi(argv[argidx - 1]); } else if (!strcmp(argv[argidx], "-4partitionindexlimit")) { @@ -929,7 +929,7 @@ static int edit_astcenc_config( return 1; } - config.tune_2partition_index_limit = atoi(argv[argidx - 1]); + config.tune_4partition_index_limit = atoi(argv[argidx - 1]); } else if (!strcmp(argv[argidx], "-2partitioncandidatelimit")) {
Update: also support old truth value format for Narsese compatibility with OpenNARS
@@ -397,14 +397,27 @@ void Narsese_Sentence(char *narsese, Term *destTerm, char *punctuation, int *ten assert(len < NARSESE_LEN_MAX, "Parsing error: Narsese string too long!"); //< because of '0' terminated strings memcpy(narseseInplace, narsese, len); //tv is present if last letter is '}' - if(len>=2 && narseseInplace[len-1] == '}') + bool oldFormat = len>=2 && narseseInplace[len-1] == '%'; + if(len>=2 && (narseseInplace[len-1] == '}' || oldFormat)) { //scan for opening '{' int openingIdx; - for(openingIdx=len-2; openingIdx>=0 && narseseInplace[openingIdx] != '{'; openingIdx--); - assert(narseseInplace[openingIdx] == '{', "Parsing error: Truth value opener not found!"); + bool hasComma = false; + for(openingIdx=len-2; openingIdx>=0 && narseseInplace[openingIdx] != '{' && narseseInplace[openingIdx] != '%'; openingIdx--) + { + hasComma = hasComma || narseseInplace[openingIdx] == ';'; + } + assert(narseseInplace[openingIdx] == '{' || narseseInplace[openingIdx] == '%', "Parsing error: Truth value opener not found!"); double conf, freq; - sscanf(&narseseInplace[openingIdx], "{%lf %lf}", &freq, &conf); + if(oldFormat && !hasComma) + { + conf = NAR_DEFAULT_CONFIDENCE; + sscanf(&narseseInplace[openingIdx], "%%%lf%%", &freq); + } + else + { + sscanf(&narseseInplace[openingIdx], oldFormat ? "%%%lf;%lf%%" : "{%lf %lf}", &freq, &conf); + } destTv->frequency = freq; destTv->confidence = conf; assert(narseseInplace[openingIdx-1] == ' ', "Parsing error: Space before truth value required!");
Fixed syntax from that was not backwards compatible to Perl 5.10.
@@ -560,7 +560,9 @@ sub required { if (!defined(${$self->{oSectionRequired}}{$strChildPath})) { - &log(INFO, (' ' x (scalar(split('/', $strChildPath)) - 2)) . " require section: ${strChildPath}"); + my @stryChildPath = split('/', $strChildPath); + + &log(INFO, (' ' x (scalar(@stryChildPath) - 2)) . " require section: ${strChildPath}"); ${$self->{oSectionRequired}}{$strChildPath} = true; }
Material textures must be sample-only; Tracking every material's texture would add a lot of overhead, probably
@@ -1858,6 +1858,8 @@ Material* lovrMaterialCreate(MaterialInfo* info) { lovrRetain(textures[i]); Texture* texture = textures[i] ? textures[i] : state.defaultTexture; lovrCheck(i == 0 || texture->info.type == TEXTURE_2D, "Material textures must be 2D"); + // The below is only to make resource tracking more efficient + lovrCheck(texture->info.usage == TEXTURE_SAMPLE, "Currently, Material textures can only have the 'sample' usage"); bindings[i + 1] = (gpu_binding) { i + 1, GPU_SLOT_SAMPLED_TEXTURE, .texture = texture->gpu }; }
Make test_alloc_attribute_predefined_entity() robust vs allocation
@@ -9630,26 +9630,16 @@ START_TEST(test_alloc_attribute_predefined_entity) { const char *text = "<doc a='&amp;'></doc>"; int i; -#define MAX_ALLOC_COUNT 10 - int repeat = 0; +#define MAX_ALLOC_COUNT 15 for (i = 0; i < MAX_ALLOC_COUNT; i++) { - /* Repeat some counts to defeat cached allocations */ - if (i == 3 && repeat == 1) { - i -= 2; - repeat++; - } - else if ((i == 2 && - (repeat == 0 || repeat == 2 || repeat == 3)) || - (i == 3 && repeat == 4)) { - i--; - repeat++; - } allocation_count = i; if (_XML_Parse_SINGLE_BYTES(parser, text, strlen(text), XML_TRUE) != XML_STATUS_ERROR) break; - XML_ParserReset(parser, NULL); + /* See comment in test_alloc_parse_xdecl() */ + alloc_teardown(); + alloc_setup(); } if (i == 0) fail("Parse succeeded despite failing allocator");
Remove call exit from 273759
@@ -20,7 +20,7 @@ program main write(6,*)"Success: if a diagnostic line starting with DEVID was output" ! Always return 1 to show failure in check_smoke_fails.sh. Once ! the depend clause is supported on map this can be reverted. - call exit(1) +! call exit(1) end subroutine foo(a,b,c) parameter (nsize=1000)
Add missing MFC to function arguments
@@ -1385,7 +1385,7 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse & { bool data = map[pi.key()].toBool(); - if (addTaskThermostatReadWriteAttribute(task, deCONZ::ZclWriteAttributesId, 0x0412, deCONZ::ZclBoolean, data)) + if (addTaskThermostatReadWriteAttribute(task, deCONZ::ZclWriteAttributesId, 0, 0x0412, deCONZ::ZclBoolean, data)) { updated = true; } @@ -1483,7 +1483,7 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse & if (mode < 4 && mode != 2) { - if (addTaskThermostatReadWriteAttribute(task, deCONZ::ZclWriteAttributesId, 0x0403, deCONZ::Zcl8BitEnum, mode)) + if (addTaskThermostatReadWriteAttribute(task, deCONZ::ZclWriteAttributesId, 0, 0x0403, deCONZ::Zcl8BitEnum, mode)) { updated = true; }
boards/NICLAV: Enable MicroSpeech module.
@@ -22,7 +22,7 @@ MICROPY_PY_NETWORK_CYW43 = 1 MICROPY_PY_BLUETOOTH = 1 MICROPY_BLUETOOTH_NIMBLE = 1 MICROPY_PY_AUDIO = 1 -MICROPY_PY_MICRO_SPEECH = 0 +MICROPY_PY_MICRO_SPEECH = 1 MICROPY_PY_LCD = 1 MICROPY_PY_TV = 1 MICROPY_PY_BUZZER = 0
proc: fix adding newly created thread to threads_common.id tree New thread is added in thread_alloc() JIRA:
@@ -812,52 +812,41 @@ int proc_threadCreate(process_t *process, void (*start)(void *), unsigned int *i t->execdata = NULL; t->wait = NULL; t->locks = NULL; - - thread_alloc(t); - - if (id != NULL) - *id = t->id; - t->stick = 0; t->utick = 0; t->priorityBase = priority; t->priority = priority; - - if (process != NULL) { - hal_spinlockSet(&threads_common.spinlock, &sc); - LIST_ADD_EX(&process->threads, t, procnext, procprev); - hal_spinlockClear(&threads_common.spinlock, &sc); - } - - t->execdata = NULL; - - /* Insert thread to global queue */ - proc_lockSet(&threads_common.lock); - lib_rbInsert(&threads_common.id, &t->idlinkage); + t->cpuTime = 0; + t->maxWait = 0; + t->startTime = hal_timerGetUs(); + t->lastTime = t->startTime; /* Prepare initial stack */ hal_cpuCreateContext(&t->context, start, t->kstack, t->kstacksz, stack + stacksz, arg); + threads_canaryInit(t, stack); - if (process != NULL) - hal_cpuSetCtxGot(t->context, process->got); + thread_alloc(t); + if (id != NULL) { + *id = t->id; + } - threads_canaryInit(t, stack); + if (process != NULL) { + hal_spinlockSet(&threads_common.spinlock, &sc); - t->startTime = hal_timerGetUs(); - t->cpuTime = 0; - t->lastTime = t->startTime; + LIST_ADD_EX(&process->threads, t, procnext, procprev); + + hal_spinlockClear(&threads_common.spinlock, &sc); + hal_cpuSetCtxGot(t->context, process->got); + } /* Insert thread to scheduler queue */ hal_spinlockSet(&threads_common.spinlock, &sc); - _perf_begin(t); - t->maxWait = 0; + _perf_begin(t); _perf_waking(t); - LIST_ADD(&threads_common.ready[priority], t); - hal_spinlockClear(&threads_common.spinlock, &sc); - proc_lockClear(&threads_common.lock); + hal_spinlockClear(&threads_common.spinlock, &sc); return EOK; }
Label changed for global backup
"adminUI-ClusterActions-no-report": "No report found", "adminUI-ClusterActions-restart-in-progress": "A restart is in progress, the Datafari admin system is up, but the Datafari administration actions are disabled for safety.", "adminUI-ClusterActions-unexpected-error": "The admin interface cannot reach the Datafari servers. It may be normal if you are restarting Datafari, in which case you should wait a few minutes before doing a refresh of the screen. If not, please try again and check the logs or contact your system administrator if the problem persists.", - "adminUI-ClusterActions-ServiceBackup": "Backup", + "adminUI-ClusterActions-ServiceBackup": "Global Backup", "adminUI-ClusterActions-ServiceBackup-info":"Last backup information", "adminUI-ClusterActions-ServiceBackup-launch-backup":"Backup", "adminUI-ClusterActions-ServiceBackup-message-backup-started":"Backup has started",
Added flags to Coverage build type to catch build warnings.
@@ -33,7 +33,7 @@ if(NOT GCOV_EXECUTABLE) message(FATAL_ERROR "gcov not found! Aborting...") endif() -set(GCOV_COMPILE_FLAGS "-g -O0 --coverage -fprofile-arcs -ftest-coverage") +set(GCOV_COMPILE_FLAGS "-g -O0 --coverage -fprofile-arcs -ftest-coverage -Wall -Wextra -Werror") set(GCOV_LINK_FLAGS "-lgcov") set(CMAKE_CXX_FLAGS_COVERAGE
plugins types BUGFIX avoid shifting negative values Refs
@@ -222,8 +222,8 @@ bits_bitmap2items(const char *bitmap, struct lysc_type_bits *type, struct lysc_t { size_t i, bitmap_size = lyplg_type_bits_bitmap_size(type); uint32_t bit_pos; - char bitmask; - const char *byte; + uint8_t bitmask; + const uint8_t *byte; bit_pos = 0; for (i = 0; i < bitmap_size; ++i) {
const nexthop-var. to fix warning when using RPL classic+ack
@@ -1334,7 +1334,7 @@ dao_ack_input(void) } else if(RPL_IS_STORING(instance)) { /* this DAO ACK should be forwarded to another recently registered route */ uip_ds6_route_t *re; - uip_ipaddr_t *nexthop; + const uip_ipaddr_t *nexthop; if((re = find_route_entry_by_dao_ack(sequence)) != NULL) { /* pick the recorded seq no from that node and forward DAO ACK - and clear the pending flag*/
decisions: clarify rationale for notes
@@ -96,5 +96,6 @@ If particular information is important and not present in any sections above, pl Any discarded, incomplete and unexplored ideas/opinions, which are not complete enough to be "Considered Alternatives", can be written here. For example, if it is obvious that the idea does not even solve the problem. +Unlike the main decisions and considered alternatives text in the notes does not need rationale. Furthermore, the author, acknowledgements, dates etc. can be written here.
change settorque function
@@ -54,8 +54,7 @@ bool Turtlebot3MotorDriver::init(void) } // Enable Dynamixel Torque - setTorque(left_wheel_id_, true); - setTorque(right_wheel_id_, true); + setTorque(true); groupSyncWriteVelocity_ = new dynamixel::GroupSyncWrite(portHandler_, packetHandler_, ADDR_X_GOAL_VELOCITY, LEN_X_GOAL_VELOCITY); groupSyncReadEncoder_ = new dynamixel::GroupSyncRead(portHandler_, packetHandler_, ADDR_X_PRESENT_POSITION, LEN_X_PRESENT_POSITION); @@ -106,8 +105,7 @@ bool Turtlebot3MotorDriver::getTorque() void Turtlebot3MotorDriver::close(void) { // Disable Dynamixel Torque - setTorque(left_wheel_id_, false); - setTorque(right_wheel_id_, false); + setTorque(false); // Close port portHandler_->closePort();
Update BUILD.md Update the macOS section. PR <https://github.com/Genymobile/scrcpy/pull/1559>
@@ -176,8 +176,8 @@ Additionally, if you want to build the server, install Java 8 from Caskroom, and make it avaliable from the `PATH`: ```bash -brew tap caskroom/versions -brew cask install java8 +brew tap homebrew/cask-versions +brew cask install adoptopenjdk/openjdk/adoptopenjdk8 export JAVA_HOME="$(/usr/libexec/java_home --version 1.8)" export PATH="$JAVA_HOME/bin:$PATH" ``` @@ -190,12 +190,17 @@ See [pierlon/scrcpy-docker](https://github.com/pierlon/scrcpy-docker). ## Common steps If you want to build the server, install the [Android SDK] (_Android Studio_), -and set `ANDROID_HOME` to its directory. For example: +and set `ANDROID_SDK_ROOT` to its directory. For example: [Android SDK]: https://developer.android.com/studio/index.html ```bash -export ANDROID_HOME=~/android/sdk +# Linux +export ANDROID_SDK_ROOT=~/Android/Sdk +# Mac +export ANDROID_SDK_ROOT=~/Library/Android/sdk +# Windows +set ANDROID_SDK_ROOT=%LOCALAPPDATA%\Android\sdk ``` If you don't want to build the server, use the [prebuilt server].
Do not send work until login is complete. Fixes
@@ -230,7 +230,7 @@ namespace MiningCore.Blockchain.Ethereum { var context = client.GetContextAs<EthereumWorkerContext>(); - if (context.IsSubscribed && context.IsAuthorized) + if (context.IsSubscribed && context.IsAuthorized && context.IsInitialWorkSent) { // check alive var lastActivityAgo = clock.Now - context.LastActivity;
Testing: Fix broken Windows build
@@ -2715,7 +2715,7 @@ int main(int argc, char* argv[]) { size_t len, slen; grib_handle* h = NULL; - const char* packingType[] = { "grid_second_order", "grid_complex_spatial_differencing", "grid_complex", "grid_ccsds" }; + const char* packingType[] = { "grid_second_order", "grid_complex_spatial_differencing", "grid_complex" }; const size_t numTypes = sizeof(packingType)/sizeof(packingType[0]); int ipackingType = 0; const double zmiss = 9999999999.;
Properly set DEX string size
@@ -775,8 +775,8 @@ void dex_parse( "string_ids[%i].offset", i); set_integer( - yr_le32toh(string_id_item->string_data_offset), dex->object, - "string_ids[%i].size", value); + value, dex->object, + "string_ids[%i].size", i); set_sized_string( (const char*) ((dex->data + yr_le32toh(string_id_item->string_data_offset) + 1)),
Minor bug solved in tag of node load from file (port).
@@ -116,7 +116,7 @@ napi_value node_loader_port_load_from_file(napi_env env, napi_callback_info info tag = new char[tag_length + 1]; - napi_get_value_string_utf8(env, argv[0], tag, tag_length, &tag_length); + napi_get_value_string_utf8(env, argv[0], tag, tag_length + 1, &tag_length); tag[tag_length] = '\0'; @@ -139,7 +139,7 @@ napi_value node_loader_port_load_from_file(napi_env env, napi_callback_info info napi_get_value_string_utf8(env, path, paths[path_index], path_length + 1, &path_length); - paths[path_index][path_length] = 0; + paths[path_index][path_length] = '\0'; ++path_index; }
zephyr/shim/chip/mchp/system.c: Format with clang-format BRANCH=none TEST=none
LOG_MODULE_REGISTER(shim_xec_system, LOG_LEVEL_ERR); -#define GET_BBRAM_OFS(node) \ - DT_PROP(DT_PATH(named_bbram_regions, node), offset) +#define GET_BBRAM_OFS(node) DT_PROP(DT_PATH(named_bbram_regions, node), offset) #define GET_BBRAM_SZ(node) DT_PROP(DT_PATH(named_bbram_regions, node), size) /*
Dockerfile: add gdb
@@ -5,7 +5,7 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends \ build-essential doxygen git wget unzip python-serial python-pip \ default-jdk ant srecord iputils-tracepath rlwrap \ - mosquitto mosquitto-clients \ + mosquitto mosquitto-clients gdb \ && apt-get clean # Install ARM toolchain
dm: allow PM1_RTC_EN to be written to PM1A Clear Linux complains about not being able to write the bit to PM1A: ACPI Error: Could not enable RealTimeClock event (20180531/evxfevnt-184)
@@ -200,7 +200,7 @@ pm1_enable_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, * the global lock, but ACPI-CA whines profusely if it * can't set GBL_EN. */ - pm1_enable = *eax & (PM1_PWRBTN_EN | PM1_GBL_EN); + pm1_enable = *eax & (PM1_RTC_EN | PM1_PWRBTN_EN | PM1_GBL_EN); sci_update(ctx); } pthread_mutex_unlock(&pm_lock);
Separate optional linker libraries from core linker libraries
@@ -25,6 +25,7 @@ TMP_ROOT=tmp ### Compiler & Linker flags # any librries required (only names, ommit the "-l" at the begining) LINKER_LIBS=pthread m +LINKER_LIBS_EXT= # optimization level. OPTIMIZATION=-O2 -march=native # Warnings... i.e. -Wpedantic -Weverything -Wno-format-pedantic @@ -102,18 +103,18 @@ INCLUDE_STR = $(foreach dir,$(INCLUDE),$(addprefix -I, $(dir))) $(foreach dir,$( # add BearSSL/OpenSSL library flags ifeq ($(shell printf "\#include <bearssl.h>\\n int main(void) {}" | $(CC) $(INCLUDE_STR) -lbearssl -xc -o /dev/null - >& /dev/null ; echo $$? ), 0) FLAGS:=$(FLAGS) HAVE_BEARSSL -LINKER_LIBS:=$(LINKER_LIBS) bearssl +LINKER_LIBS_EXT:=$(LINKER_LIBS_EXT) bearssl else ifeq ($(shell printf "\#include <openssl/ssl.h>\\nint main(void) {}" | $(CC) $(INCLUDE_STR) -lcrypto -lssl -xc -o /dev/null - >& /dev/null ; echo $$? ), 0) FLAGS:=$(FLAGS) HAVE_OPENSSL -LINKER_LIBS:=$(LINKER_LIBS) crypto ssl +LINKER_LIBS_EXT:=$(LINKER_LIBS_EXT) crypto ssl endif endif # add ZLib library flags ifeq ($(shell printf "\#include \\"zlib.h\\"\\n int main(void) {}" | $(CC) $(INCLUDE_STR) -lbearssl -xc -o /dev/null - >& /dev/null ; echo $$? ), 0) FLAGS:=$(FLAGS) HAVE_ZLIB -LINKER_LIBS:=$(LINKER_LIBS) z +LINKER_LIBS_EXT:=$(LINKER_LIBS_EXT) z endif @@ -125,7 +126,7 @@ LIB_OBJS = $(foreach source, $(LIBSRC), $(addprefix $(TMP_ROOT)/, $(addsuffix .o # the computed C flags CFLAGS= -g -std=c11 -fpic $(FLAGS_STR) $(WARNINGS) $(OPTIMIZATION) $(INCLUDE_STR) CPPFLAGS= -std=c++11 -fpic $(FLAGS_STR) $(WARNINGS) $(OPTIMIZATION) $(INCLUDE_STR) -LINKER_FLAGS=$(foreach lib,$(LINKER_LIBS),$(addprefix -l,$(lib))) +LINKER_FLAGS=$(foreach lib,$(LINKER_LIBS),$(addprefix -l,$(lib))) $(foreach lib,$(LINKER_LIBS_EXT),$(addprefix -l,$(lib))) ######## ## Main Tasks
docs + mustache API improvements
@@ -255,20 +255,63 @@ Loading the template ***************************************************************************** */ /** -Loads a mustache template (and any partials). +Loads the mustache template found in `:filename`. If `:template` is provided it +will be used instead of reading the file's content. + + Iodine::Mustache.new(filename, template = nil) + +When template data is provided, filename (if any) will only be used for partial +template path resolution and the template data will be used for the template's +content. This allows, for example, for front matter to be extracted before +parsing the template. Once a template was loaded, it could be rendered using {render}. + +Accepts named arguments as well: + + Iodine::Mustache.new(filename: "foo.mustache", template: "{{ bar }}") + */ -static VALUE iodine_mustache_new(VALUE self, VALUE filename) { +static VALUE iodine_mustache_new(int argc, VALUE *argv, VALUE self) { + VALUE filename = Qnil, template = Qnil; + if (argc == 1 && RB_TYPE_P(argv[0], T_HASH)) { + /* named arguments */ + filename = rb_hash_aref(argv[0], filename_id); + template = rb_hash_aref(argv[0], template_id); + } else { + /* regular arguments */ + if (argc == 0 || argc > 2) + rb_raise(rb_eArgError, "expecting 1..2 arguments or named arguments."); + filename = argv[0]; + if (argc > 1) { + template = argv[1]; + } + } + if (filename == Qnil && template == Qnil) + rb_raise(rb_eArgError, "missing both template contents and file name."); + + if (template != Qnil) + Check_Type(template, T_STRING); + if (filename != Qnil) + Check_Type(filename, T_STRING); + + fio_str_s str = FIO_STR_INIT; + mustache_s **m = NULL; TypedData_Get_Struct(self, mustache_s *, &iodine_mustache_data_type, m); if (!m) { rb_raise(rb_eRuntimeError, "Iodine::Mustache allocation error."); } - Check_Type(filename, T_STRING); + mustache_error_en err; - *m = mustache_load(.filename = RSTRING_PTR(filename), - .filename_len = RSTRING_LEN(filename), .err = &err); + *m = mustache_load(.filename = + (filename == Qnil ? NULL : RSTRING_PTR(filename)), + .filename_len = + (filename == Qnil ? 0 : RSTRING_LEN(filename)), + .data = (template == Qnil ? NULL : RSTRING_PTR(template)), + .data_len = (template == Qnil ? 0 : RSTRING_LEN(template)), + .err = &err); + if (!*m) goto error; return self; @@ -548,7 +591,7 @@ void iodine_init_mustache(void) { */ VALUE tmp = rb_define_class_under(IodineModule, "Mustache", rb_cData); rb_define_alloc_func(tmp, iodine_mustache_data_alloc_c); - rb_define_method(tmp, "initialize", iodine_mustache_new, 1); + rb_define_method(tmp, "initialize", iodine_mustache_new, -1); rb_define_method(tmp, "render", iodine_mustache_render, 1); rb_define_singleton_method(tmp, "render", iodine_mustache_render_klass, -1); // rb_define_module_function(tmp, "render", iodine_mustache_render_klass, 2);
Old_path may be NULL
@@ -804,7 +804,7 @@ int picoquic_retransmit_needed(picoquic_cnx_t* cnx, picoquic_path_t * old_path = p->send_path; /* we'll report it where it got lost */ - old_path->retrans_count++; + if (old_path) old_path->retrans_count++; *header_length = 0;
Increase precision of timer events
@@ -10,9 +10,9 @@ const fields = [ key: "duration", type: "number", label: l10n("FIELD_TIMER_DURATION"), - min: 0.25, + min: 0.01, max: 60, - step: 0.25, + step: 0.01, defaultValue: 10.0, }, {
addition removed According to this [documentation](https://dresden-elektronik.github.io/deconz-rest-doc/devices/philips/sml001_motion_sensor/#state-attributes_1) for `lightlevel` the addition of 0,5 is not correct
@@ -6,7 +6,7 @@ R.item('state/dark').val = measuredValue <= tholddark; R.item('state/daylight').val = measuredValue >= tholddark + tholdoffset; if (measuredValue >= 0 && measuredValue < 0xffff) { const exp = measuredValue - 1; - const l = Math.pow(10, exp / 10000.0) + 0.5; + const l = Math.pow(10, exp / 10000.0); R.item('state/lux').val = Math.floor(l); } Item.val = measuredValue;
remove cell only when it exist.
@@ -557,12 +557,14 @@ void neighbors_setPreferredParent(uint8_t index, bool isPreferred){ } else { // the neighbor is de-selected as parent // remove the autonomous cell to this neighbor + if (schedule_hasDedicatedCellToNeighbor(&(neighbors_vars.neighbors[index].addr_64b))){ schedule_removeActiveSlot( slotoffset, // slot offset &(neighbors_vars.neighbors[index].addr_64b) // neighbor ); } } +} //===== managing routing info
Add Agent to data.yml
@@ -4921,7 +4921,9 @@ classes: Client::UI::Agent::AgentGatheringNote: vtbls: - ea: 0x141997360 - base: Client::UI::Agent::AgentInterface + base: Client::UI::Agent::AgentGatheringNote + funcs: + 0x1409B47C0: OpenGatherableByItemId Client::UI::Agent::AgentMcguffin: vtbls: - ea: 0x14199BF18
khan: saner tic rollover, remove our
?>(?=([@ ~] s.beam) beam(s i.s.beam)) :: ++ start-spider - |= [our=@p =vase] + |= =vase ^- note [%g %deal [our our] %spider %poke %spider-start vase] :: ++ watch-spider - |= [our=@p =path] + |= =path ^- note [%g %deal [our our] %spider %watch path] -- ~|(%khan-call-dud (mean tang.u.dud)) ?+ -.task [~ khan-gate] %born - [~ khan-gate(hey hen)] + [~ khan-gate(hey hen, tic 0)] :: %fard =/ tid=@ta %^ cat 3 'khan-fyrd--' (scot %uv (sham (mix tic eny))) - =. tic (mod +(tic) (bex 128)) + =. tic +(tic) =* fyd p.task =/ =beak (get-beak bear.fyd now) =/ args [~ `tid beak name.fyd q.args.fyd] :_ khan-gate %+ turn - :~ (watch-spider our /thread-result/[tid]) - (start-spider our !>(args)) + :~ (watch-spider /thread-result/[tid]) + (start-spider !>(args)) == |=(=note ^-(move [hen %pass //g note])) ::
Comment in DCT4 allowed length
* Note that the implementation of Inverse DCT4 and DCT4 is same, hence same process function can be used for both. * * \par Lengths supported by the transform: - * As DCT4 internally uses Real FFT, it supports all the lengths supported by arm_rfft_f32(). + * As DCT4 internally uses Real FFT, it supports all the lengths 128, 512, 2048 and 8192. * The library provides separate functions for Q15, Q31, and floating-point data types. * \par Instance Structure * The instances for Real FFT and FFT, cosine values table and twiddle factor table are stored in an instance data structure.
Increase the draft iteration to avoid binary compatibility issues with older preset discovery plugins
// Use it to retrieve const clap_preset_discovery_factory_t* from // clap_plugin_entry.get_factory() static const CLAP_CONSTEXPR char CLAP_PRESET_DISCOVERY_FACTORY_ID[] = - "clap.preset-discovery-factory/draft-0"; + "clap.preset-discovery-factory/draft-1"; #ifdef __cplusplus extern "C" {
Fix _arg argument not being cast to correct type Also change to TEST_EQUAL, as this is now possible.
@@ -3685,6 +3685,7 @@ void aead_multipart_generate_nonce( int key_type_arg, data_t *key_data, uint8_t nonce_buffer[PSA_AEAD_NONCE_MAX_SIZE]; psa_key_attributes_t attributes = PSA_KEY_ATTRIBUTES_INIT; psa_status_t status = PSA_ERROR_GENERIC_ERROR; + psa_status_t expected_status = expected_status_arg; size_t nonce_generated_len = 0; size_t expected_generated_len = expected_generated_len_arg; unsigned char *output_data = NULL; @@ -3735,13 +3736,13 @@ void aead_multipart_generate_nonce( int key_type_arg, data_t *key_data, nonce_len, &nonce_generated_len ); - TEST_ASSERT( status == expected_status_arg ); + TEST_EQUAL( status, expected_status ); TEST_EQUAL( nonce_generated_len, expected_generated_len ); TEST_ASSERT( nonce_generated_len < PSA_AEAD_NONCE_MAX_SIZE ); - if( expected_status_arg == PSA_SUCCESS ) + if( expected_status == PSA_SUCCESS ) { /* Ensure we can still complete operation. */ @@ -3837,7 +3838,7 @@ void aead_multipart_set_nonce( int key_type_arg, data_t *key_data, status = psa_aead_set_nonce( &operation, nonce_buffer, nonce_len ); - TEST_ASSERT( status == expected_status ); + TEST_EQUAL( status, expected_status ); if( expected_status == PSA_SUCCESS ) {
Add CMake build instructions
@@ -94,6 +94,22 @@ With this one function call: `VmaAllocation` is an object that represents memory assigned to this buffer. It can be queried for parameters like Vulkan memory handle and offset. +# How to build + +On Windows it is recommended to use [CMake UI](https://cmake.org/runningcmake/). Alternatively you can generate a Visual Studio project map using CMake in command line: `cmake -B./build/ -DCMAKE_BUILD_TYPE=Debug -G "Visual Studio 16 2019" -A x64 ./` + +On Linux, use CMake with [Ninja](https://ninja-build.org/) and run `cmake -GNinja -Bbuild -DCMAKE_BUILD_TYPE=Debug` + +The following CMake options are available + +| Target | Description | CMake option | +| ------------- | ------------- | ------------- | +| VmaExample | VMA example application | `VMA_BUILD_EXAMPLE_APP` | +| VmaShaders | Shaders for VmaExample | `VMA_BUILD_EXAMPLE_APP_SHADERS` | +| VmaReplay | Replay tool for VMA .csv trace files | `VMA_BUILD_REPLAY_APP` | + +Please note that while VulkanMemoryAllocator library is supported on other platforms besides Windows, VmaExample and VmaReplay are not. + # Binaries The release comes with precompiled binary executables for "VulkanSample" application which contains test suite and "VmaReplay" tool. They are compiled using Visual Studio 2019, so they require appropriate libraries to work, including "MSVCP140.dll", "VCRUNTIME140.dll", "VCRUNTIME140_1.dll". If their launch fails with error message telling about those files missing, please download and install [Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017 and 2019](https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads), "x64" version.
Relocate callback so there's only one of them.
@@ -47,7 +47,6 @@ namespace blit { if (t->state == Timer::RUNNING){ if (time > (t->started + t->duration)) { // timer triggered if(t->loops == -1){ - t->callback(*t); t->started = time; // reset the start time correcting for any error } else @@ -57,8 +56,8 @@ namespace blit { if (t->loops == 0){ t->state = Timer::FINISHED; } - t->callback(*t); } + t->callback(*t); } } }
extmod/modlwip: Don't require a port to define concurrency macros.
#define ip_reset_option(pcb, opt) ((pcb)->so_options &= ~(opt)) #endif +// A port can define these hooks to provide concurrency protection +#ifndef MICROPY_PY_LWIP_ENTER +#define MICROPY_PY_LWIP_ENTER +#define MICROPY_PY_LWIP_REENTER +#define MICROPY_PY_LWIP_EXIT +#endif + #ifdef MICROPY_PY_LWIP_SLIP #include "netif/slipif.h" #include "lwip/sio.h"
dpdk: postpone updating runtime data This prevents crash due to worker tread accessing device data while device vector is growing. Type: fix
@@ -552,7 +552,6 @@ dpdk_lib_init (dpdk_main_t * dm) vnm, xd->hw_if_index, q, VNET_HW_IF_RXQ_THREAD_ANY); } - vnet_hw_if_update_runtime_data (vnm, xd->hw_if_index); /*Get vnet hardware interface */ hi = vnet_get_hw_interface (vnm, xd->hw_if_index); @@ -597,6 +596,9 @@ dpdk_lib_init (dpdk_main_t * dm) format_dpdk_device_errors, xd); } + for (int i = 0; i < vec_len (dm->devices); i++) + vnet_hw_if_update_runtime_data (vnm, dm->devices[i].hw_if_index); + return 0; }
Fix ruby interface
@@ -92,7 +92,7 @@ VALUE method_find_primitive(VALUE self, VALUE r_symprec, VALUE r_angle_symprec) { - int i, j, k, num_atom, num_prim_atom; + int i, j, num_atom, num_prim_atom; double symprec, lattice[3][3]; VALUE array, vector, lat_ary, pos_ary, typ_ary; @@ -318,7 +318,7 @@ VALUE method_get_symmetry(VALUE self, } } - num_sym = spgat_get_symmetry_numerical(rotations, + num_sym = spgat_get_symmetry(rotations, translations, max_num_sym, lattice, @@ -350,8 +350,6 @@ VALUE method_get_symmetry(VALUE self, rb_ary_push(array, r_rot); rb_ary_push(array, r_trans); - err: - return array; }
Add missing lovr.filesystem.unmount function;
@@ -349,6 +349,7 @@ static const luaL_Reg lovrFilesystem[] = { { "remove", l_lovrFilesystemRemove }, { "setRequirePath", l_lovrFilesystemSetRequirePath }, { "setIdentity", l_lovrFilesystemSetIdentity }, + { "unmount", l_lovrFilesystemUnmount }, { "write", l_lovrFilesystemWrite }, { NULL, NULL } };
docs: charliecloud typo borking recipe
@@ -12,6 +12,6 @@ Preview, so it must be manually enabled with kernel arguments. [sms](*\#*) wwsh -y provision set "${compute_regex}" --kargs=namespace.unpriv_enable=1 # Increase per-user limit on the number of user namespaces that may be created -[sms](*\#*) echo "user.max_user_namespaces=15076" >> $CHROOT/etc/sysctl.conf' +[sms](*\#*) echo "user.max_user_namespaces=15076" >> $CHROOT/etc/sysctl.conf \end{lstlisting} % end_ohpc_run
rune: support SGX in-tree driver FIXES:
@@ -150,7 +150,7 @@ func genEnclaveDeviceTemplate(etype string) []*configs.Device { return []*configs.Device{ &configs.Device{ Type: 'c', - Path: "/dev/isgx", + Path: "/dev/sgx_enclave", Major: 10, }, &configs.Device{ @@ -158,6 +158,11 @@ func genEnclaveDeviceTemplate(etype string) []*configs.Device { Path: "/dev/sgx/enclave", Major: 10, }, + &configs.Device{ + Type: 'c', + Path: "/dev/isgx", + Major: 10, + }, &configs.Device{ Type: 'c', Path: "/dev/gsgx", @@ -197,7 +202,7 @@ func containEnclaveDevice(devices []*configs.Device, s string) bool { func genEnclavePathTemplate(etype string) []string { switch etype { case enclaveConfigs.EnclaveTypeIntelSgx: - return []string{"/dev/isgx", "/dev/sgx/enclave", "/dev/gsgx"} + return []string{"/dev/sgx_enclave", "/dev/sgx/enclave", "/dev/isgx", "/dev/gsgx"} case enclaveConfigs.EnclaveTypeAwsNitroEnclaves: return []string{"/dev/nitro_enclaves"} case enclaveConfigs.EnclaveTypeJailHouse:
Remove TLS 1.2 exception about EC J-PAKE and PSA Crypto
@@ -86,7 +86,6 @@ is enabled, no change required on the application side. Current exceptions: -- EC J-PAKE (when `MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED` is defined) - finite-field (non-EC) Diffie-Hellman (used in key exchanges: DHE-RSA, DHE-PSK)
note OpenBSD accept4.
23 May 2018: Wouter - Use accept4 to speed up incoming TCP (and TLS) connections, - available on Linux and FreeBSD and OpenBSD. + available on Linux, FreeBSD and OpenBSD. 17 May 2018: Ralph - Qname minimisation default changed to yes.
make title in release notes more in-line with existing plugins
@@ -65,7 +65,7 @@ The following section lists news about the [plugins](https://www.libelektra.org/ - <<TODO>> - <<TODO>> -### Uname Plugin +### uname - Minor improvement of source code readability in uname.c _(@lawli3t)_
kdb: sort result of plugin-list alphabetically see
@@ -45,7 +45,7 @@ int PluginListCommand::execute (Cmdline const & cl) plugins = db.listAllPlugins (); } - std::multimap<int, std::string> sortedPlugins; + std::multimap<int, std::string> statusPlugins; for (const auto & plugin : plugins) { try @@ -56,11 +56,11 @@ int PluginListCommand::execute (Cmdline const & cl) *Key ("system/module", KEY_VALUE, "this plugin was loaded without a config", KEY_END), KS_END)), "status")); - sortedPlugins.insert (std::make_pair (s, plugin)); + statusPlugins.insert (std::make_pair (s, plugin)); } catch (std::exception const & e) { - sortedPlugins.insert (std::make_pair (-1000000, plugin)); + statusPlugins.insert (std::make_pair (-1000000, plugin)); if (cl.verbose) { std::cerr << "No status found for " << plugin << std::endl; @@ -70,21 +70,30 @@ int PluginListCommand::execute (Cmdline const & cl) if (cl.verbose) cout << "number of all plugins: " << plugins.size () << endl; - for (auto & plugin : sortedPlugins) + std::vector<std::string> sortedPlugins; + + for (auto & plugin : statusPlugins) { - std::cout << plugin.second; + std::string elem = plugin.second; if (cl.verbose) { - std::cout << " " << plugin.first; + elem += " " + std::to_string (plugin.first); + } + sortedPlugins.push_back (elem); } + sort (sortedPlugins.begin (), sortedPlugins.end ()); + + for (auto & elem : sortedPlugins) + { + std::cout << elem; if (cl.null) { - cout << '\0'; + std::cout << '\0'; } else { - cout << endl; + std::cout << endl; } }
gpstate: Avoid logging ERROR if no expansion is in progress Resolves
@@ -1220,9 +1220,16 @@ class _GpExpandStatus(object): SELECT status FROM gpexpand.status ORDER BY updated DESC LIMIT 1 ''' + status_table_exists_sql = """ + SELECT CASE WHEN to_regclass('gpexpand.status') IS NOT NULL THEN 1 ELSE 0 END + """ + try: dburl = dbconn.DbURL(dbname=self.dbname) with dbconn.connect(dburl, encoding='UTF8') as conn: + if not dbconn.querySingleton(conn, status_table_exists_sql): + conn.close() + return False status = dbconn.querySingleton(conn, sql) conn.close() except Exception:
join: making sure group is in state and retaining join request state
@@ -9,11 +9,11 @@ import { ContinuousProgressBar, } from '@tlon/indigo-react'; import { Formik, Form } from 'formik'; -import React, { useEffect } from 'react'; +import React, { useEffect, useState } from 'react'; import { useHistory, useLocation } from 'react-router-dom'; import useGroupState from '~/logic/state/group'; import { useInviteForResource } from '~/logic/state/invite'; -import { usePreview } from '~/logic/state/metadata'; +import useMetadataState, { usePreview } from '~/logic/state/metadata'; import { decline, Invite } from '@urbit/api'; import { join, JoinRequest } from '@urbit/api/groups'; import airlock from '~/logic/api'; @@ -184,6 +184,8 @@ export function Join(props: JoinProps) { const { group, kind } = desc; const [, , ship, name] = group.split('/'); const graph = kind === 'graph'; + const associations = useMetadataState(s => s.associations); + const joined = graph ? associations.graph[group] : associations.groups[group]; const finishedPath = redir ? redir : graph @@ -192,13 +194,22 @@ export function Join(props: JoinProps) { const history = useHistory(); const joinRequest = useGroupState(s => s.pendingJoin[group]); + const [openedRequest, setOpenedRequest] = useState<JoinRequest>(); const invite = useInviteForResource(kind, ship, name); - const isDone = joinRequest && joinRequest.progress === 'done'; + const isDone = openedRequest && openedRequest.progress === 'done' && joined; const isErrored = - joinRequest && joinError.includes(joinRequest.progress as any); + openedRequest && joinError.includes(openedRequest.progress as any); const isLoading = - joinRequest && joinLoad.includes(joinRequest.progress as any); + openedRequest && joinLoad.includes(openedRequest.progress as any); + + // If we opened this modal from a join request, + // don't let the request getting deleted move us to the wrong state + useEffect(() => { + if (joinRequest) { + setOpenedRequest(joinRequest); + } + }, [joinRequest]); useEffect(() => { if (isDone && desc.kind == 'graph') { @@ -218,11 +229,11 @@ export function Join(props: JoinProps) { modal={modal} dismiss={dismiss} desc={desc} - request={joinRequest} + request={openedRequest} finished={finishedPath} /> ) : isErrored ? ( - <JoinError modal={modal} desc={desc} request={joinRequest} /> + <JoinError modal={modal} desc={desc} request={openedRequest} /> ) : ( <JoinInitial modal={modal} dismiss={dismiss} desc={desc} invite={invite} /> );
remove non workable links
# Current state of Rhomobile solution -Currently Rhomobile solution supported and improved by TAU Technologies Inc (partner of Zebra Technologies). +Currently Rhomobile solution supported and improved by TAU Technologies. For questions on commercial support contact [contact mail](mailto:[email protected]) or reach out to the [TAU Technologies website](http://tau-platform.com). Current actual version is Rhomobile Suite 7.5. [Download RMS 7.5](http://tau-platform.com/developers/downloads/) Actual documentation located on [RMS 7.5 documentation](http://docs.tau-platform.com/en/7.5/home) - -# RhoMobile Open Source Documentation - -This page contains links to documentation and source-code repositories for the RhoMobile Suite, which has been released by Zebra Technologies to the open source community. - -For access to prior documentation, please visit **[Zebra's RhoMobile documentation site](http://docs.rhomobile.com/en/5.4/guide/welcome)**. - - -## Getting RhoMobile source code - -[Getting Source Code](https://github.com/rhomobile/rhodes/blob/master/doc/oss/getting_source_code.md) - How to use git to download all the source code that Zebra is making available. - ## RhoMobile versions RhoMobile has a mainstream development branch which is forked for stable releases and further patches for them.
bugID:20124278:[ble] Add public address API.
@@ -379,6 +379,29 @@ static int set_random_address(const bt_addr_t *addr) return 0; } +//set pub address with ext vs cmd +static int set_public_address(const bt_addr_t *addr) +{ + struct net_buf *buf; + int err; + + BT_DBG("%s", bt_addr_str(addr)); + + buf = bt_hci_cmd_create(BT_HCI_OP_VS_WRITE_BD_ADDR, sizeof(*addr)); + if (!buf) { + return -ENOBUFS; + } + + net_buf_add_mem(buf, addr, sizeof(*addr)); + + err = bt_hci_cmd_send_sync(BT_HCI_OP_VS_WRITE_BD_ADDR, buf, NULL); + if (err) { + return err; + } + BT_DBG("set pub address success.\n"); + return 0; +} + #if defined(CONFIG_BT_PRIVACY) /* this function sets new RPA only if current one is no longer valid */ static int le_set_private_addr(void)
Compilation flag changes for Android, tweak options to reduce binary size. Resulting SDK binaries are about 20% smaller now.
@@ -66,14 +66,9 @@ endif(IOS) if(ANDROID) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -ftemplate-depth=1024 -fexceptions -frtti -fvisibility=hidden -fvisibility-inlines-hidden") - set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Os -flto=full -fomit-frame-pointer") - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Os -flto=full -fomit-frame-pointer") - set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -Wl,-plugin-opt=O2 -s -Wl,--gc-sections -Wl,--as-needed -fuse-ld=lld -Wl,--version-script=${PROJECT_SOURCE_DIR}/../android/version-script") - - if(CMAKE_BUILD_TYPE MATCHES "Release|RELEASE") - string(REPLACE "-Wa,--noexecstack" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - string(REPLACE "-Wa,--noexecstack" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - endif() + set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Oz -flto=full -fomit-frame-pointer") + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Oz -flto=full -fomit-frame-pointer") + set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} -s -fuse-ld=lld -Wl,-plugin-opt=O3 -Wl,--gc-sections -Wl,-icf=all -Wl,-threads=4 -Wl,--as-needed -Wl,--version-script=${PROJECT_SOURCE_DIR}/../android/version-script") endif(ANDROID) # Directories
odissey: add empty line during users log
@@ -293,6 +293,7 @@ void od_schemeprint(od_scheme_t *scheme, od_log_t *log) od_log(log, NULL, " pool_max %d", route->pool_max); } if (! od_listempty(&scheme->users)) { + od_log(log, NULL, ""); od_log(log, NULL, "users"); od_listforeach(&scheme->users, i) { od_schemeuser_t *user;
mm: Remove the unused macro MM_IS_ALLOCATED
# define MMSIZE_MAX UINT32_MAX #endif -#define MM_IS_ALLOCATED(n) \ - ((int)((FAR struct mm_allocnode_s *)(n)->preceding) < 0) - /* What is the size of the allocnode? */ #define SIZEOF_MM_ALLOCNODE sizeof(struct mm_allocnode_s)
Add timed delay before reading VCO comparators
using namespace std; #include <chrono> #include <thread> +#else +#include "lms7002_regx51.h" //MCU timer sfr +uint16_t gComparatorDelayCounter = 0xFF00; // ~100us @ ref 30.72MHz #endif #define VERBOSE 0 @@ -269,25 +272,22 @@ uint8_t SetFrequencySX(const bool tx, const float_type freq_Hz) return MCU_NO_ERROR; } -static void Delay() -{ -#ifdef __cplusplus - std::this_thread::sleep_for(std::chrono::microseconds(1)); -#else - uint16_t i; - volatile uint16_t t=0; - for(i=0; i<400; ++i) - t <<= 1; -#endif -} - /** @brief Performs VCO tuning operations for CLKGEN, SXR, SXT modules @param module module selection for tuning 0-cgen, 1-SXR, 2-SXT @return 0-success, other-failure */ static uint8_t ReadCMP(const bool SX) { - Delay(); +#ifdef __cplusplus + std::this_thread::sleep_for(std::chrono::microseconds(100)); +#else + TR0 = 0; //stop timer 0 + TH0 = (gComparatorDelayCounter >> 8); + TL0 = (gComparatorDelayCounter & 0xFF); + TF0 = 0; // clear overflow + TR0 = 1; //start timer 0 + while( !TF0 ); // wait for timer overflow +#endif return (uint8_t)Get_SPI_Reg_bits(SX ? 0x0123 : 0x008C, MSB_LSB(13, 12)); } @@ -320,7 +320,9 @@ uint8_t TuneVCO(bool SX) // 0-cgen, 1-SXR, 2-SXT msblsb = MSB_LSB(8, 1); //CSW msb lsb Modify_SPI_Reg_bits(0x0086, MSB_LSB(2, 1), 0); //activate VCO and comparator } - +#ifndef __cplusplus + gComparatorDelayCounter = 0xFFFF - (uint16_t)((0.0001/12)*RefClk); // ~100us +#endif //check if lock is within VCO range Modify_SPI_Reg_bits(addrCSW_VCO, msblsb, 0); if(ReadCMP(SX) == 3) //VCO too high
phb4: Mark PHB as fenced on creset If we have to inject an error to trigger recover, we end up not marking the PHB as fenced in the PHB struct. This fixes that.
@@ -2631,7 +2631,7 @@ static int64_t phb4_creset(struct pci_slot *slot) * Force use of ASB for register access until the PHB has * been fully reset. */ - p->flags |= PHB4_CFG_USE_ASB; + p->flags |= PHB4_CFG_USE_ASB | PHB4_AIB_FENCED; /* Clear errors, following the proper sequence */ phb4_err_clear(p);
fix(font): ids pointer is not freed
@@ -310,8 +310,7 @@ size_t LCUIFont_UpdateWeight(const int *font_ids, LCUI_FontWeight weight, if (!font_ids) { return 0; } - for (len = 0; font_ids[len]; ++len) - ; + for (len = 0; font_ids[len]; ++len); if (len < 1) { return 0; } @@ -945,6 +944,7 @@ static void LCUIFont_LoadFontsByFontConfig(void) if (i > 0) { LCUIFont_SetDefault(ids[i - 1]); } + free(ids); } #else @@ -976,6 +976,7 @@ static void LCUIFont_LoadFontsForLinux(void) if (i > 0) { LCUIFont_SetDefault(ids[i - 1]); } + free(ids); } #endif
Fix container repository for SPO container build A previous change introduced the wrong repository, which lead to authorization issues when trying to push the image. This fixes that.
@@ -77,7 +77,7 @@ jobs: uses: docker/metadata-action@v4 with: images: | - quay.io/security-profiles-operator/security-profiles-operator + quay.io/security-profiles-operator/build tags: | type=ref,event=branch type=ref,event=pr @@ -124,7 +124,7 @@ jobs: uses: docker/metadata-action@69f6fc9d46f2f8bf0d5491e4aabe0bb8c6a4678a with: images: | - quay.io/security-profiles-operator/security-profiles-operator + quay.io/security-profiles-operator/build flavor: | suffix=-ubi tags: |
[io] installation of the correct file siconos_pprocess
@@ -55,7 +55,7 @@ if(HAVE_SICONOS_MECHANICS) install(PROGRAMS ${SICONOS_SWIG_ROOT_DIR}/io/siconos_filter DESTINATION bin) install(PROGRAMS ${SICONOS_SWIG_ROOT_DIR}/io/siconos_run DESTINATION bin) if (WITH_MECHANISMS) - install(PROGRAMS ${SICONOS_SWIG_ROOT_DIR}/io/pprocess.py DESTINATION bin RENAME siconos_pprocess) + install(PROGRAMS ${SICONOS_SWIG_ROOT_DIR}/io/siconos_pprocess DESTINATION bin RENAME siconos_pprocess) endif() # automatic manpages using help2man
test_train_on_quantized_pool_with_large_grid: Run fit on GPU properly using fit_catboost_gpu. Note: mandatory check (NEED_CHECK) was skipped
@@ -2229,14 +2229,13 @@ def test_train_on_quantized_pool_with_large_grid(): # borders # # There are 20 rows in a dataset. - cmd = ( - CATBOOST_PATH, 'fit', - '--task-type', 'GPU', - '-f', 'quantized://' + data_file('quantized_with_large_grid', 'train.qbin'), - '-t', 'quantized://' + data_file('quantized_with_large_grid', 'test.qbin'), - '-i', '10') - yatest.common.execute(cmd) + params = { + '-f': 'quantized://' + data_file('quantized_with_large_grid', 'train.qbin'), + '-t': 'quantized://' + data_file('quantized_with_large_grid', 'test.qbin'), + '-i': '10' + } + fit_catboost_gpu(params) @pytest.mark.parametrize('growing_policy', NONSYMMETRIC)
[Components][USB][ECM] fix build error
@@ -419,12 +419,12 @@ rt_err_t rt_ecm_eth_tx(rt_device_t dev, struct pbuf* p) p->tot_len = USB_ETH_MTU; } - result = rt_sem_take(&device->tx_buffer_free, rt_tick_from_millisecond(1000)); + result = rt_sem_take(&ecm_eth_dev->tx_buffer_free, rt_tick_from_millisecond(1000)); if(result != RT_EOK) { LOG_W("wait for buffer free timeout"); /* if cost 1s to wait send done it said that connection is close . drop it */ - rt_sem_release(&device->tx_buffer_free); + rt_sem_release(&ecm_eth_dev->tx_buffer_free); return result; }
Constants: Require `noresolver` plugin for test
@@ -6,4 +6,4 @@ if (DEPENDENCY_PHASE) configure_file (constants.c.in ${CMAKE_CURRENT_BINARY_DIR}/constants.c) endif () -add_plugin (constants SOURCES ${CMAKE_CURRENT_BINARY_DIR}/constants.c TEST_README) +add_plugin (constants SOURCES ${CMAKE_CURRENT_BINARY_DIR}/constants.c TEST_README TEST_REQUIRED_PLUGINS noresolver)
http_client: fix handling of remote broken connections
@@ -622,6 +622,7 @@ int flb_http_do(struct flb_http_client *c, size_t *bytes) } /* Always append a NULL byte */ + if (r_bytes >= 0) { c->resp.data_len += r_bytes; c->resp.data[c->resp.data_len] = '\0'; @@ -636,6 +637,12 @@ int flb_http_do(struct flb_http_client *c, size_t *bytes) continue; } } + else { + flb_error("[http_client] broken connection to %s:%i ?", + c->u_conn->u->tcp_host, c->u_conn->u->tcp_port); + return -1; + } + } return 0; }
Faster lores flip
@@ -116,8 +116,8 @@ void blit_tick() { blit::render(blit::now()); // debug cycle count for flip - //blit::fb.pen(rgba(255, 255, 255)); - //blit::fb.text(std::to_string(flip_cycle_count), &minimal_font[0][0], point(10, 20)); + blit::fb.pen(rgba(255, 255, 255)); + blit::fb.text(std::to_string(flip_cycle_count), &minimal_font[0][0], point(10, 20)); HAL_LTDC_ProgramLineEvent(&hltdc, 252); @@ -407,29 +407,18 @@ void blit_flip() { } else { // pixel double the framebuffer to the LTDC buffer uint32_t *src = (uint32_t *)__fb_lores.data; - uint16_t *dest = (uint16_t *)(&__ltdc_start); + uint32_t *dest = (uint32_t *)(&__ltdc_start); for(uint8_t y = 0; y < 120; y++) { // pixel double the current row while converting from RGBA to RGB565 for(uint8_t x = 0; x < 160; x++) { - uint32_t s = *src; - + uint32_t s = *src++; uint16_t c = ((s & 0xf8000000) >> 27) | ((s & 0x00fc0000) >> 13) | ((s & 0x0000f800)); - - *dest++ = c; - *dest++ = c; - - src++; - } - - // copy the previous converted row (640 bytes / 320 x 2-byte pixels) - uint32_t c = 640 / 4; - uint32_t *d = (uint32_t *)(dest); - uint32_t *s = (uint32_t *)(dest - 320); - while(c--) { - *d++ = *s++; + *(dest) = c | (c << 16); + *(dest + 160) = c | (c << 16); + dest++; } - dest += 320; + dest += 160; // skip the doubled row } }
Fix static build issues caused by yangobj set_target_properties call.
@@ -272,10 +272,11 @@ else() #link dl target_link_libraries(yang ${CMAKE_DL_LIBS}) + + set_target_properties(yangobj PROPERTIES COMPILE_FLAGS "-fvisibility=hidden") endif(ENABLE_STATIC) set_target_properties(yang PROPERTIES VERSION ${LIBYANG_SOVERSION_FULL} SOVERSION ${LIBYANG_SOVERSION}) -set_target_properties(yangobj PROPERTIES COMPILE_FLAGS "-fvisibility=hidden") # link math target_link_libraries(yang m)
Add safeguards for sufficient BUFFER_SIZE
@@ -87,6 +87,30 @@ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #endif #endif +/* Memory buffer must fit two matrix subblocks of maximal size */ +#define XSTR(x) STR(x) +#define STR(x) #x +#if BUFFER_SIZE < (SGEMM_DEFAULT_P * SGEMM_DEFAULT_Q * 4 * 2) || \ + BUFFER_SIZE < (SGEMM_DEFAULT_P * SGEMM_DEFAULT_R * 4 * 2) || \ + BUFFER_SIZE < (SGEMM_DEFAULT_R * SGEMM_DEFAULT_Q * 4 * 2) +#warning BUFFER_SIZE is too small for P, Q, and R of SGEMM - large calculations may crash ! +#endif +#if BUFFER_SIZE < (DGEMM_DEFAULT_P * DGEMM_DEFAULT_Q * 8 * 2) || \ + BUFFER_SIZE < (DGEMM_DEFAULT_P * DGEMM_DEFAULT_R * 8 * 2) || \ + BUFFER_SIZE < (DGEMM_DEFAULT_R * DGEMM_DEFAULT_Q * 8 * 2) +#warning BUFFER_SIZE is too small for P, Q, and R of DGEMM - large calculations may crash ! +#endif +#if BUFFER_SIZE < (CGEMM_DEFAULT_P * CGEMM_DEFAULT_Q * 8 * 2) || \ + BUFFER_SIZE < (CGEMM_DEFAULT_P * CGEMM_DEFAULT_R * 8 * 2) || \ + BUFFER_SIZE < (CGEMM_DEFAULT_R * CGEMM_DEFAULT_Q * 8 * 2) +#warning BUFFER_SIZE is too small for P, Q, and R of CGEMM - large calculations may crash ! +#endif +#if BUFFER_SIZE < (ZGEMM_DEFAULT_P * ZGEMM_DEFAULT_Q * 16 * 2) || \ + BUFFER_SIZE < (ZGEMM_DEFAULT_P * ZGEMM_DEFAULT_R * 16 * 2) || \ + BUFFER_SIZE < (ZGEMM_DEFAULT_R * ZGEMM_DEFAULT_Q * 16 * 2) +#warning BUFFER_SIZE is too small for P, Q, and R of ZGEMM - large calculations may crash ! +#endif + #if defined(COMPILE_TLS) #include <errno.h> @@ -2740,7 +2764,7 @@ void *blas_memory_alloc(int procpos){ #ifdef DEBUG printf(" Position -> %d\n", position); #endif -WMB; + memory[position].used = 1; #if (defined(SMP) || defined(USE_LOCKING)) && !defined(USE_OPENMP) UNLOCK_COMMAND(&alloc_lock);
tools: remove hissistats count max value checking hissistats tool prints N/A if READ REG fails removed hissistats READ REG counter boundary checking, example value_lsb >= 0xfffffff1 and value_msb >= 0xfffffff1
@@ -39,8 +39,6 @@ from ethernet.hssicommon import * # Sleep 50 milliseconds after clearing stats. HSSI_STATS_CLEAR_SLEEP_TIME = 50/1000 -# Invalid hssi stats value -HSSI_INAVLID_STATS = 0xfffffff1 class FPGAHSSISTATS(HSSICOMMON): @@ -132,7 +130,7 @@ class FPGAHSSISTATS(HSSICOMMON): ctl_addr.value = self.register_field_set(ctl_addr.value, 31, 1, 1) res, value_lsb = self.read_reg(0, ctl_addr.value) - if value_lsb >= HSSI_INAVLID_STATS or not res: + if not res: stats_list[port_index] += "{}|".format("N/A").rjust(20, ' ') port_index = port_index + 1 continue @@ -141,7 +139,7 @@ class FPGAHSSISTATS(HSSICOMMON): ctl_addr.value = self.register_field_set(ctl_addr.value, 31, 1, 0) res, value_msb = self.read_reg(0, ctl_addr.value) - if value_msb >= HSSI_INAVLID_STATS or not res: + if not res: stats_list[port_index] += "{}|".format("N/A").rjust(20, ' ') port_index = port_index + 1 continue
npu2: hw-procedures: Update PHY DC calibration procedure Per the updated programming guide (procedure 1.2.4), set rx_pr_edge_track_cntl and rx_pr_fw_off appropriately before and after calibration.
@@ -59,6 +59,8 @@ struct npu2_phy_reg NPU2_PHY_TX_UNLOAD_CLK_DISABLE = {0x103, 56, 1}; struct npu2_phy_reg NPU2_PHY_TX_FIFO_INIT = {0x105, 53, 1}; struct npu2_phy_reg NPU2_PHY_TX_RXCAL = {0x103, 57, 1}; struct npu2_phy_reg NPU2_PHY_RX_INIT_DONE = {0x0ca, 48, 1}; +struct npu2_phy_reg NPU2_PHY_RX_PR_EDGE_TRACK_CNTL = {0x092, 48, 2}; +struct npu2_phy_reg NPU2_PHY_RX_PR_FW_OFF = {0x08a, 56, 1}; /* These registers are per-PHY, not per lane */ struct npu2_phy_reg NPU2_PHY_TX_ZCAL_SWO_EN = {0x3c9, 48, 1}; @@ -520,6 +522,9 @@ static uint32_t phy_rx_dccal(struct npu2_dev *ndev) { int lane; + FOR_EACH_LANE(ndev, lane) + phy_write_lane(ndev, &NPU2_PHY_RX_PR_FW_OFF, lane, 1); + FOR_EACH_LANE(ndev, lane) phy_write_lane(ndev, &NPU2_PHY_RX_RUN_DCCAL, lane, 1); @@ -537,8 +542,11 @@ static uint32_t phy_rx_dccal_complete(struct npu2_dev *ndev) FOR_EACH_LANE(ndev, lane) phy_write_lane(ndev, &NPU2_PHY_RX_RUN_DCCAL, lane, 0); - FOR_EACH_LANE(ndev, lane) + FOR_EACH_LANE(ndev, lane) { phy_write_lane(ndev, &NPU2_PHY_RX_B_BANK_CONTROLS, lane, 0); + phy_write_lane(ndev, &NPU2_PHY_RX_PR_EDGE_TRACK_CNTL, lane, 0); + phy_write_lane(ndev, &NPU2_PHY_RX_PR_FW_OFF, lane, 0); + } return PROCEDURE_NEXT; }
bootloader: suppress -Wstringop-overflow
@@ -157,7 +157,11 @@ static void update_rtc_retain_mem_crc(void) void bootloader_common_reset_rtc_retain_mem(void) { + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wstringop-overflow" + #pragma GCC diagnostic ignored "-Warray-bounds" memset(rtc_retain_mem, 0, sizeof(rtc_retain_mem_t)); + #pragma GCC diagnostic pop } uint16_t bootloader_common_get_rtc_retain_mem_reboot_counter(void)
Renamed command line option --no-daemonize to --no-daemon.
@@ -919,7 +919,7 @@ nxt_runtime_conf_read_cmd(nxt_task_t *task, nxt_runtime_t *rt) continue; } - if (nxt_strcmp(p, "--no-daemonize") == 0) { + if (nxt_strcmp(p, "--no-daemon") == 0) { rt->daemon = 0; continue; }
details.txt: change DAPLink URL to daplink.io.
@@ -527,7 +527,7 @@ static uint32_t update_details_txt_file(uint8_t *buf, uint32_t size, uint32_t st uint32_t pos = 0; pos += util_write_string_in_region(buf, size, start, pos, - "# DAPLink Firmware - see https://mbed.com/daplink\r\n" + "# DAPLink Firmware - see https://daplink.io/\r\n" // Build ID "Build ID: " GIT_DESCRIPTION " (" COMPILER_DESCRIPTION LOCAL_MODS ")\r\n"); // Unique ID
[core] perf: reuse buffer to redirect to directory
int http_response_redirect_to_directory(server *srv, connection *con) { - buffer *o; - - o = buffer_init(); - + buffer *o = srv->tmp_buf; buffer_copy_buffer(o, con->uri.scheme); buffer_append_string_len(o, CONST_STR_LEN("://")); if (!buffer_string_is_empty(con->uri.authority)) { @@ -77,9 +74,6 @@ int http_response_redirect_to_directory(server *srv, connection *con) { con->http_status = 301; con->file_finished = 1; - - buffer_free(o); - return 0; }
feat: compile reddit source code
@@ -11,6 +11,7 @@ ORKA_SRC := $(wildcard orka-*.c) DISCORD_SRC := $(wildcard discord-*.c) SLACK_SRC := $(wildcard slack-*.c) GITHUB_SRC := $(wildcard github-*.c) +REDDIT_SRC := $(wildcard reddit-*.c) SPECS := $(sort $(wildcard specs/*.json)) DB_SRC := $(wildcard sqlite3/*.c) JSB_SRC := $(wildcard jsB/*.c) @@ -31,10 +32,11 @@ ORKA_OBJS := $(ORKA_SRC:%=$(OBJDIR)/%.o) DISCORD_OBJS := $(DISCORD_SRC:%=$(OBJDIR)/%.o) SLACK_OBJS := $(SLACK_SRC:%=$(OBJDIR)/%.o) GITHUB_OBJS := $(GITHUB_SRC:%=$(OBJDIR)/%.o) +REDDIT_OBJS := $(REDDIT_SRC:%=$(OBJDIR)/%.o) SPECS_OBJS := $(SPECS_C:%=$(OBJDIR)/%.o) DB_OBJS := $(DB_SRC:%=$(OBJDIR)/%.o) -OBJS := $(COMMON_OBJS) $(DISCORD_OBJS) $(SLACK_OBJS) $(GITHUB_OBJS) $(ORKA_OBJS) +OBJS := $(COMMON_OBJS) $(DISCORD_OBJS) $(SLACK_OBJS) $(GITHUB_OBJS) $(REDDIT_OBJS) $(ORKA_OBJS) BOT_SRC := $(wildcard bots/bot-*.c) BOT_EXES := $(patsubst %.c, %.exe, $(BOT_SRC)) @@ -103,6 +105,7 @@ orka: mkdir $(ORKA_OBJS) discord: mkdir $(DISCORD_OBJS) libdiscord slack: mkdir $(SLACK_OBJS) github: mkdir $(GITHUB_OBJS) +reddit: mkdir $(REDDIT_OBJS) db: mkdir $(DB_OBJS) specs_h: $(SPECS_H) @@ -120,7 +123,7 @@ bot: $(BOT_EXES) #@todo should we split by categories (bot_discord, bot_github, bot1: $(BOT1_EXES) bot2: $(BOT2_EXES) -test: common orka discord slack github $(TEST_EXES) #@todo should we split by categories too ? +test: common orka discord slack github reddit $(TEST_EXES) #@todo should we split by categories too ? mkdir : mkdir -p $(ACTOR_OBJDIR)/common $(ACTOR_OBJDIR)/test bin
Initialize return variable to the appropriate error code The return variable is initilized to make the code more robust against glitch attacks.
@@ -223,7 +223,8 @@ int mbedtls_pkcs5_pbkdf2_hmac( mbedtls_md_context_t *ctx, unsigned int iteration_count, uint32_t key_length, unsigned char *output ) { - int ret = 0, j; + int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; + int j; unsigned int i; unsigned char md1[MBEDTLS_MD_MAX_SIZE]; unsigned char work[MBEDTLS_MD_MAX_SIZE];
sysdeps/linux: implement sys_getgroups
@@ -779,6 +779,14 @@ int sys_getpgid(pid_t pid, pid_t *out) { return 0; } +int sys_getgroups(size_t size, const gid_t *list, int *retval) { + auto ret = do_syscall(NR_getgroups, size, list); + if (int e = sc_error(ret); e) + return e; + *retval = sc_int_result<int>(ret); + return 0; +} + int sys_dup(int fd, int flags, int *newfd) { auto ret = do_cp_syscall(NR_dup, fd); if (int e = sc_error(ret); e)
Update the RAC spec re version and empty chunks
@@ -323,6 +323,8 @@ Branch Node`s pointing to each source RAC file's `Root Node`. ### Version `Version` must have the value `0x01`, indicating version 1 of the RAC format. +The `0x00` value is reserved, although future editions may use other positive +values. ### Codec @@ -365,10 +367,11 @@ parsing a `CFile` requires knowing the `CFileSize`, and also that a `Root Node`'s `Branch CBias` is zero, so its `COffMax` equals its `CPtrMax`. For a child `Branch Node`, its `Codec` bits must be a subset of its parent's -`Codec` bits, its `COffMax` must be less than or equal to its parent's -`COffMax`, and its `DOffMax` must equal its parent's `SubBranch DOffMax`. The -`DOffMax` condition is equivalent to checking that the parent and child agree -on the child's size in `DSpace`. The parent states that it is its `(DPtr[a+1] - +`Codec` bits, its `Version` must be less than or equal to its parent's +`Version`, its `COffMax` must be less than or equal to its parent's `COffMax`, +and its `DOffMax` must equal its parent's `SubBranch DOffMax`. The `DOffMax` +condition is equivalent to checking that the parent and child agree on the +child's size in `DSpace`. The parent states that it is its `(DPtr[a+1] - DPtr[a])` and the child states that it is its `DPtrMax`. One conservative way to check `Branch Node`s' validity on first visit is to @@ -467,7 +470,9 @@ Repeat this "Search Within a Branch Node" section with the child `Branch Node`. ### Decompressing a Leaf Node If a `Leaf Node`'s `DRange` is empty, decompression is a no-op and skip the -rest of this section. +rest of this section. Specifically, a low-level library that iterates over a +RAC file's chunks, without actually performing decompression, should skip over +empty chunks instead of yielding them to its caller. Otherwise, decompression combines the `Primary CRange`, `Secondary CRange` and `Tertiary CRange` slices of the `CFile`, and the `Leaf STag` and `Leaf TTag`
key zeroization fix for a branch path of tls13_final_finish_mac
@@ -271,6 +271,7 @@ size_t tls13_final_finish_mac(SSL *s, const char *str, size_t slen, key = EVP_PKEY_new_raw_private_key(EVP_PKEY_HMAC, NULL, finsecret, hashlen); + OPENSSL_cleanse(finsecret, sizeof(finsecret)); } if (key == NULL
Remove -Wmisleading-indentation from gcc devteam warning options because this one is enabled by default anyways
@@ -1476,7 +1476,6 @@ if ($strict_warnings) die "ERROR --strict-warnings requires gcc[>=4] or gcc-alike" unless $gccver >= 4; - $gcc_devteam_warn .= " -Wmisleading-indentation" if $gccver >= 6; foreach $wopt (split /\s+/, $gcc_devteam_warn) { push @{$config{cflags}}, $wopt
king: fix zig-zag in stderr logging
@@ -29,6 +29,7 @@ where import Urbit.King.Config import Urbit.Prelude +import RIO (logGeneric) import System.Directory ( createDirectoryIfMissing , getXdgDirectory , XdgDirectory(XdgCache) @@ -88,7 +89,9 @@ runKingEnvStderr verb lvl inner = do <&> setLogUseTime True <&> setLogUseLoc False <&> setLogMinLevel lvl - withLogFunc logOptions $ \logFunc -> runKingEnv logFunc logFunc inner + withLogFunc logOptions $ \logFunc -> + let lf = wrapCarriage logFunc + in runKingEnv lf lf inner runKingEnvLogFile :: Bool -> LogLevel -> Maybe FilePath -> RIO KingEnv a -> IO a runKingEnvLogFile verb lvl fileM inner = do @@ -107,7 +110,7 @@ runKingEnvLogFile verb lvl fileM inner = do <&> setLogUseLoc False <&> setLogMinLevel lvl withLogFunc stderrLogOptions $ \stderrLogFunc -> withLogFunc logOptions - $ \logFunc -> runKingEnv logFunc stderrLogFunc inner + $ \logFunc -> runKingEnv logFunc (wrapCarriage stderrLogFunc) inner withLogFileHandle :: FilePath -> (Handle -> IO a) -> IO a withLogFileHandle f act = @@ -115,6 +118,11 @@ withLogFileHandle f act = hSetBuffering handle LineBuffering act handle +-- XX loses callstack +wrapCarriage :: LogFunc -> LogFunc +wrapCarriage lf = mkLogFunc $ \_ ls ll bldr -> + runRIO lf $ logGeneric ls ll (bldr <> "\r") + defaultLogFile :: IO FilePath defaultLogFile = do logDir <- getXdgDirectory XdgCache "urbit"
Updated error messages and added missing arguments
@@ -1372,7 +1372,8 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse & } else { - rsp.list.append(errorToMap(ERR_ACTION_ERROR, QString("/sensors/%1/config/locked"), QString("Could not set attribute"))); + rsp.list.append(errorToMap(ERR_ACTION_ERROR, QString("/sensors/%1/config/%2").arg(id).arg(pi.key()).toHtmlEscaped(), + QString("Could not set attribute"))); rsp.httpStatus = HttpStatusBadRequest; return REQ_READY_SEND; } @@ -1380,7 +1381,8 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse & } else { - rsp.list.append(errorToMap(ERR_INVALID_VALUE, QString("/sensors/%1/config/locked"), QString("Invalid attribute value"))); + rsp.list.append(errorToMap(ERR_INVALID_VALUE, QString("/sensors/%1/config/%2").arg(id).arg(pi.key()).toHtmlEscaped(), + QString("invalid value, %1, for parameter %2").arg(map[pi.key()].toString()).arg(pi.key()).toHtmlEscaped())); rsp.httpStatus = HttpStatusBadRequest; return REQ_READY_SEND; } @@ -1399,7 +1401,8 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse & } else { - rsp.list.append(errorToMap(ERR_ACTION_ERROR, QString("/sensors/%1/config/displayflipped"), QString("Could not set attribute"))); + rsp.list.append(errorToMap(ERR_ACTION_ERROR, QString("/sensors/%1/config/%2").arg(id).arg(pi.key()).toHtmlEscaped(), + QString("Could not set attribute"))); rsp.httpStatus = HttpStatusBadRequest; return REQ_READY_SEND; } @@ -1407,7 +1410,8 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse & } else { - rsp.list.append(errorToMap(ERR_INVALID_VALUE, QString("/sensors/%1/config/displayflipped"), QString("Invalid attribute value"))); + rsp.list.append(errorToMap(ERR_INVALID_VALUE, QString("/sensors/%1/config/%2").arg(id).arg(pi.key()).toHtmlEscaped(), + QString("invalid value, %1, for parameter %2").arg(map[pi.key()].toString()).arg(pi.key()).toHtmlEscaped())); rsp.httpStatus = HttpStatusBadRequest; return REQ_READY_SEND; } @@ -1426,7 +1430,8 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse & } else { - rsp.list.append(errorToMap(ERR_ACTION_ERROR, QString("/sensors/%1/config/mountingmode"), QString("Could not set attribute"))); + rsp.list.append(errorToMap(ERR_ACTION_ERROR, QString("/sensors/%1/config/%2").arg(id).arg(pi.key()).toHtmlEscaped(), + QString("Could not set attribute"))); rsp.httpStatus = HttpStatusBadRequest; return REQ_READY_SEND; } @@ -1434,7 +1439,8 @@ int DeRestPluginPrivate::changeSensorConfig(const ApiRequest &req, ApiResponse & } else { - rsp.list.append(errorToMap(ERR_INVALID_VALUE, QString("/sensors/%1/config/mountingmode"), QString("Invalid attribute value"))); + rsp.list.append(errorToMap(ERR_INVALID_VALUE, QString("/sensors/%1/config/%2").arg(id).arg(pi.key()).toHtmlEscaped(), + QString("invalid value, %1, for parameter %2").arg(map[pi.key()].toString()).arg(pi.key()).toHtmlEscaped())); rsp.httpStatus = HttpStatusBadRequest; return REQ_READY_SEND; }
misc: configurator: Remove acrn config files when selecting a working folder Instead of removing all .xml and .sh files, just remove: *.board.xml scenario.xml launch*.sh
@@ -72,11 +72,12 @@ export default { .then((r) => { if (r) { for (let i = 0; i < files.length; i++) { - console.log("file: ", files[i].path) - let arr = files[i].path.split('.') - let suffix = arr[arr.length - 1] - console.log("suffix:", suffix) - if (suffix == 'sh' || suffix == 'xml') { + let arr = files[i].path.split(window.systemInfo.pathSplit) + let basename = arr[arr.length-1] + console.log("file: ", basename) + if (basename === 'scenario.xml' || + /^.*\.board\.xml$/.test(basename) || + /^launch.*\.sh$/.test(basename)) { console.log("removing: ", files[i].path) configurator.removeFile(files[i].path) .catch((err) => alert(`${err}`))
Add Windows tests in GitHub actions See: TODO: MinGW/cygwin tests
@@ -63,3 +63,27 @@ jobs: - name: Run tests run: $GITHUB_WORKSPACE/test.sh + + windows: + runs-on: windows-latest + steps: + - name: Checkout code + uses: actions/checkout@v2 + with: + repository: ${{ env.GITHUB_REPOSITORY }} + ref: ${{ env.GITHUB_HEAD_REF }} + + - name: Install dependencies + run: | + choco install -y ninja memurai-developer + + - uses: ilammy/msvc-dev-cmd@v1 + - name: Build hiredis + run: | + mkdir build && cd build + cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_EXAMPLES=ON + ninja -v + + - name: Run tests + run: | + ./build/hiredis-test.exe
Get the outstream while holding the stcb send lock
@@ -6915,7 +6915,6 @@ sctp_msg_append(struct sctp_tcb *stcb, error = EINVAL; goto out_now; } - strm = &stcb->asoc.strmout[srcv->sinfo_stream]; /* Now can we send this? */ if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) || (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_ACK_SENT) || @@ -6973,6 +6972,7 @@ sctp_msg_append(struct sctp_tcb *stcb, if (hold_stcb_lock == 0) { SCTP_TCB_SEND_LOCK(stcb); } + strm = &stcb->asoc.strmout[srcv->sinfo_stream]; sctp_snd_sb_alloc(stcb, sp->length); atomic_add_int(&stcb->asoc.stream_queue_cnt, 1); TAILQ_INSERT_TAIL(&strm->outqueue, sp, next); @@ -14188,6 +14188,8 @@ skip_preblock: goto out; } SCTP_TCB_SEND_LOCK(stcb); + /* The out streams might be reallocated. */ + strm = &stcb->asoc.strmout[srcv->sinfo_stream]; if (sp->msg_is_complete) { strm->last_msg_incomplete = 0; asoc->stream_locked = 0;
APPS: Prevent ASAN hickup on idempotent strncpy() in opt_progname()
@@ -105,6 +105,7 @@ char *opt_progname(const char *argv0) /* Find last special character sys:[foo.bar]openssl */ p = opt_path_end(argv0); q = strrchr(p, '.'); + if (prog != p) strncpy(prog, p, sizeof(prog) - 1); prog[sizeof(prog) - 1] = '\0'; if (q != NULL && q - p < sizeof(prog)) @@ -132,6 +133,7 @@ char *opt_progname(const char *argv0) const char *p; p = opt_path_end(argv0); + if (prog != p) strncpy(prog, p, sizeof(prog) - 1); prog[sizeof(prog) - 1] = '\0'; return prog;
[ctr/lua] add error checking at event
@@ -210,7 +210,6 @@ static int moduleSend(lua_State *L) static int moduleBalance(lua_State *L) { char *contract; - int ret; int *service = (int *)getLuaExecContext(L); lua_Integer amount; @@ -223,7 +222,7 @@ static int moduleBalance(lua_State *L) else { contract = (char *)luaL_checkstring(L, 1); } - if ((ret = LuaGetBalance(L, service, contract)) < 0) { + if (LuaGetBalance(L, service, contract) < 0) { lua_error(L); } @@ -315,7 +314,9 @@ static int moduleEvent(lua_State *L) if (json_args == NULL) { lua_error(L); } - LuaEvent(L, service, event_name, json_args); + if (LuaEvent(L, service, event_name, json_args) < 0) { + lua_error(L); + } free(json_args); return 0; }
use generic hwloc-libs instead
@@ -157,11 +157,7 @@ Conflicts: pbs-cmds Requires: expat Requires: python >= 2.6 Requires: python < 3.0 -%if %{defined suse_version} -Requires: libhwloc5 -%else Requires: hwloc-libs -%endif Autoreq: 1 %description -n %{pname}-%{pbs_execution}%{PROJ_DELIM}
Yan LR: Add missing Doxygen attribute
@@ -33,6 +33,8 @@ namespace * `line`, where the parsing process failed. * @param prefix This variable stores as prefix that this function prepends * to every line of the visualized error message. + * + * @return A string representation of the error */ string visualizeError (Recognizer * recognizer, Token * offendingSymbol, size_t const line, size_t const charPositionInLine, string const & prefix)
Typo , the same
@@ -2606,7 +2606,7 @@ void DeRestPluginPrivate::addLightNode(const deCONZ::Node *node) lightNode.setModelId(QLatin1String("T10W1ZW switch")); lightNode.setNeedSaveDatabase(true); } - else if (lightNode.modelId() == QLatin1String("82c167c95ed756cdbd21d6817f72c593")) + else if (lightNode.modelId() == QLatin1String("82c167c95ed746cdbd21d6817f72c593")) { lightNode.setModelId(QLatin1String("CM10ZW")); lightNode.setNeedSaveDatabase(true);
fix msvc 14.12 compilation
@@ -1000,7 +1000,8 @@ THashMap<ui32, TString> NCB::MergeCatFeaturesHashToString(const NCB::TObjectsDat const auto& perFeatureCatFeaturesHashToString = objectsData.GetCatFeaturesHashToString(catFeatureIdx); for (const auto& [hashedCatValue, catValueString] : perFeatureCatFeaturesHashToString) { - result[hashedCatValue] = catValueString; + // TODO(kirillovs): remove this cast, needed only for MSVC 14.12 compiler bug + result[(ui32)hashedCatValue] = catValueString; } }