message
stringlengths
6
474
diff
stringlengths
8
5.22k
[ya.core.conf] update svn 1.10 toolkit to r3884368
}, "svn110": { "formula": { - "sandbox_id": 281178455, + "sandbox_id": 286028033, "match": "svn" }, "executable": {
Updated draft.
<?rfc strict="no" ?> <?rfc symrefs="no" ?> -<rfc category="exp" ipr="trust200902" docName="draft-dreibholz-taps-neat-socketapi-02.txt"> +<rfc category="exp" ipr="trust200902" docName="draft-dreibholz-taps-neat-socketapi-03.txt"> <?xml-stylesheet type='text/xsl' href='rfc2629.xslt' ?> @@ -20,12 +20,11 @@ NEAT Sockets API <!-- ************** THOMAS DREIBHOLZ *************** --> <author initials="T." surname="Dreibholz" fullname="Thomas Dreibholz"> -<organization abbrev="Simula Research Laboratory">Simula Research Laboratory, Network Systems Group</organization> +<organization abbrev="Simula@OsloMet">Simula Metropolitan Centre for Digital Engineering</organization> <address> <postal> - <street>Martin Linges vei 17</street> - <city>1364 Fornebu</city> - <region>Akershus</region> + <street>Pilestredet 52</street> + <city>0167 Oslo</city> <country>Norway</country> </postal> <phone>+47-6782-8200</phone> @@ -35,7 +34,7 @@ NEAT Sockets API </address> </author> -<date day="30" month="October" year="2017" /> +<date day="7" month="June" year="2018" /> <keyword>Internet-Draft</keyword> <abstract> @@ -1102,6 +1101,8 @@ int nsa_ioctl(int fd, int request, const void* argp)</artwork></figure> </section> <section title="Acknowledgments"> +<t>This work was partially funded by the European Union's Horizon 2020 research and innovation programme under grant agreement No. 644334 (NEAT). The views expressed are solely those of the author(s).</t> + <t> The author would like to thank David Ros,
admin/meta-packages: remove ganglia from %files section
@@ -843,7 +843,6 @@ Collection of parallel library builds for use with the Arm Compiler for Linux an %files -n %{PROJ_NAME}-autotools %files -n %{PROJ_NAME}-base %files -n %{PROJ_NAME}-base-compute -%files -n %{PROJ_NAME}-ganglia %files -n %{PROJ_NAME}-%{compiler_family}-geopm %files -n %{PROJ_NAME}-%{compiler_family}-io-libs %files -n %{PROJ_NAME}-%{compiler_family}-mpich-io-libs
YAML CPP: Use general parse error
@@ -1148,13 +1148,6 @@ module:yamlcpp macro:YAMLCPP_EMITTER_FAILED number:185 -description:Parsing failed -severity:error -ingroup:plugin -module:yamlcpp -macro:YAMLCPP_PARSER_FAILED - -number:186 description:Failed to retrieve YAML representation severity:error ingroup:plugin
doc: two languages support for deploy_docs stage.
@@ -368,7 +368,7 @@ push_master_to_github: deploy_docs: - stage: deploy + stage: assign_test image: $CI_DOCKER_REGISTRY/esp32-ci-env$BOT_DOCKER_IMAGE_TAG tags: - deploy @@ -388,11 +388,17 @@ deploy_docs: - chmod 600 ~/.ssh/id_rsa - echo -e "Host $DOCS_SERVER\n\tStrictHostKeyChecking no\n\tUser $DOCS_SERVER_USER\n" >> ~/.ssh/config - export GIT_VER=$(git describe --always) - - cd docs/_build/ + - cd docs/en/_build/ - mv html $GIT_VER - tar czvf $GIT_VER.tar.gz $GIT_VER - - scp $GIT_VER.tar.gz $DOCS_SERVER:$DOCS_PATH - - ssh $DOCS_SERVER -x "cd $DOCS_PATH && tar xzvf $GIT_VER.tar.gz && rm -f latest && ln -s $GIT_VER latest" + - scp $GIT_VER.tar.gz $DOCS_SERVER:$DOCS_PATH/en + - ssh $DOCS_SERVER -x "cd $DOCS_PATH/en && tar xzvf $GIT_VER.tar.gz && rm -f latest && ln -s $GIT_VER latest" + - cd ../../zh_CN/_build/ + - mv html $GIT_VER + - tar czvf $GIT_VER.tar.gz $GIT_VER + - scp $GIT_VER.tar.gz $DOCS_SERVER:$DOCS_PATH/zh_CN + - ssh $DOCS_SERVER -x "cd $DOCS_PATH/zh_CN && tar xzvf $GIT_VER.tar.gz && rm -f latest && ln -s $GIT_VER latest" + check_doc_links: stage: test
feature_request.md: Add more guidance and hints; use comments and headings
--- name: Feature request about: Suggest an idea for this project -title: "[REQUEST]" +title: '' labels: Feature Request assignees: '' --- -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] +<!-- + - Use this issue template to request a feature in the deCONZ REST-API. + - If you want to request a feature for the Phoscon App, please head over to: https://github.com/dresden-elektronik/phoscon-app-beta + - If you're unsure if the request fits into this issue tracker, please ask for advise in our Discord chat: https://discord.gg/QFhTxqN +--> -**Describe the solution you'd like** -A clear and concise description of what you want to happen. +## Feature request type +<!-- + Please provide a short description of the feature request. + Examples: + - Extend the REST-API with new API endpoints, attributes or capabilities. + - Extend the Websocket interface with new messages. + - Improve or refactor REST-API internal handling of xyz. + - Is your feature request related to a problem but? + Tell us what the problem is. Ex. I'm always frustrated when [...] + Note: If the problem describes a bug please use the "Bug report" issue template instead of this one. +--> -**Describe alternatives you've considered** +## Describtion +<!-- + Please describe the feature in more detail. + - How it should work, e.g. provide an examples of a request/response, extensions to the REST-API interface, etc. + - If applicable describe which benefits will API clients gain. +--> + +## Considered alternatives +<!-- A clear and concise description of any alternative solutions or features you've considered. +--> -**Additional context** +## Additional context +<!-- Add any other context or screenshots about the feature request here. +-->
out_slack: use new upstream prototype for tls handling
@@ -101,7 +101,7 @@ static int cb_slack_init(struct flb_output_instance *ins, ctx->u = flb_upstream_create(config, ctx->host, ctx->port, - FLB_IO_TLS, (void *) &ins->tls); + FLB_IO_TLS, ins->tls); if (!ctx->u) { flb_plg_error(ctx->ins, "error creating upstream context"); goto error;
Deploy builds.
@@ -83,6 +83,54 @@ ssh-add "$DEPLOY_KEY_PATH" ############################################################################### ### Deploy build. ############################################################################### +# Path to the libs that will be deployed. +LIB_DIR="$BUILD_DIR_FIXED/lib" +# Verify that libs have been created. +if [ ! -d "$LIB_DIR" ]; then + echo "Directory 'lib' is not available; aborting." + exit -1 +fi + +# Path to the binaries that will be deployed. +BIN_DIR="$BUILD_DIR_FIXED/bin" +# Verify that binaries have been created. +if [ ! -d "$BIN_DIR" ]; then + echo "Directory 'bin' is not available; aborting." + exit -1 +fi + +# Directory where BUILD_BRANCH is cloned to. +BUILD_BRANCH_DIR="$SCRIPT_DIR/$BUILD_BRANCH" +# Verify that BUILD_BRANCH_DIR does not exist. +if [ -d "$BUILD_BRANCH_DIR" ] \ + || [ -f "$BUILD_BRANCH_DIR" ]; then + echo "'$BUILD_BRANCH_DIR' already exists; aborting." + exit -1 +fi + +# Clone BUILD_BRANCH to BUILD_BRANCH_DIR. +git clone -b $BUILD_BRANCH $SSH_REPO $BUILD_BRANCH_DIR +pushd "$BUILD_BRANCH_DIR" + # Copy libs. + cp -a "$LIB_DIR/." ./ + + # Copy binaries. + cp -a "$BIN_DIR/." ./ + + # Copy CircleCI config directory. + cp -R "$CIRCLECI_CONFIG_DIR" ./ + + # Set user name and email for commit. + git config user.name "Travis CI" + git config user.email "[email protected]" + + # Commit all changes. + git add --all + git commit -m "Deploy build for: ${SHA}" + + # Now that we're all set up, we can push. + git push --force +popd
python: framer: use 'into_buffer' if available [3/8] This change allows us to re-use one buffer for serializing a group of SBP messages and thus avoid unnecessary copies.
@@ -17,6 +17,8 @@ import time import uuid import six +import numpy as np + class Framer(six.Iterator): """ @@ -38,6 +40,7 @@ class Framer(six.Iterator): write, verbose=False, dispatcher=dispatch, + into_buffer=False, skip_metadata=False): self._read = read self._write = write @@ -45,6 +48,8 @@ class Framer(six.Iterator): self._broken = False self._dispatch = dispatcher self._session = str(uuid.uuid4()) + self._buffer = np.zeros(16*1024, dtype=np.uint8) + self._into_buffer = into_buffer self._skip_metadata = skip_metadata def __iter__(self): @@ -154,4 +159,14 @@ class Framer(six.Iterator): Metadata for this batch of messages, e.g. `{'time': 'ISO 8601 str'}` (ignored for now). """ - self._write(bytes.join(b'', (msg.to_binary() for msg in msgs))) + index = 0 + if self._into_buffer: + for msg in msgs: + index += msg.into_buffer(self._buffer, index) + else: + for msg in msgs: + msg_buff = msg.to_binary() + buff_len = len(msg_buff) + self._buffer[index:(index+buff_len)] = bytearray(msg_buff) + index += buff_len + self._write(memoryview(self._buffer)[:index])
actions: update plugins and bindings
@@ -22,28 +22,27 @@ jobs: matrix: include: # The following plugins have been disabled due to problems: - # Unfortunately the tests for the Xerces plugin fail: https://travis-ci.org/ElektraInitiative/libelektra/jobs/483331657#L3740 - # The curlget tests fail: https://github.com/ElektraInitiative/libelektra/issues/3382 - # Yamlcpp and curlget fails to compile on macOS with GCC due to problems with the macOS SDK + # Yamlcpp fails to compile on macOS with GCC due to problems with the macOS SDK - name: GCC 11 CC: gcc-11 CXX: g++-11 - PLUGINS: ALL;-xerces;-curlget;-yamlcpp + PLUGINS: ALL;;-yamlcpp BINDINGS: ALL;-rust - name: Clang CC: clang CXX: clang++ ENABLE_LOGGER: ON TOOLS: ALL;web - PLUGINS: ALL;-curlget + PLUGINS: ALL + BINDINGS: ALL;-rust - name: Clang ASAN CC: clang CXX: clang++ ASAN_OPTIONS: detect_leaks=1 ENABLE_ASAN: ON TOOLS: kdb - PLUGINS: ALL;-curlget - BINDINGS: cpp + PLUGINS: ALL + BINDINGS: ALL;-rust - name: MMap KDB_DEFAULT_STORAGE: mmapstorage KDB_DB_FILE: default.mmap
HLS memcopy : adding hls.mk change
@@ -66,8 +66,8 @@ $(SOLUTION_NAME): $(objs) check: $(symlinks) @grep -A8 critical $(SOLUTION_DIR)*/$(SOLUTION_NAME)/$(SOLUTION_NAME).log ; \ test $$? = 1 - @grep -A8 0x184 vhdl/action_wrapper_ctrl_reg_s_axi.vhd ; \ - test $$? = 1 +# @grep -A8 0x184 vhdl/action_wrapper_ctrl_reg_s_axi.vhd ; \ +# test $$? = 1 clean: $(RM) -r $(SOLUTION_DIR)* run_hls_script.tcl *~ *.log \
fix(docs/codes): Patch footnotes array support Fixes bug that was noticeable when more than one code with a footnote array was present in a table. The symptoms were: footnote descriptions were duplicated footnote refs were not rendered
@@ -19,7 +19,7 @@ function extractFootnoteIds(codes) { new Set( codes .flatMap(({ footnotes }) => Object.values(footnotes)) - .map((refs) => (Array.isArray(refs) ? refs.flat() : refs)) + .flatMap((refs) => (Array.isArray(refs) ? refs.flat() : refs)) ) ); }
wpa_supplicant: Minor bugfix with wpa_supplicant debug logs.
@@ -194,10 +194,6 @@ void wpa2_task(void *pvParameters ) for (;;) { if ( pdPASS == xQueueReceive(s_wpa2_queue, &e, portMAX_DELAY) ) { -#ifdef DEBUG_PRINT - uint32_t sig = 0; - sig = e->sig; -#endif if (e->sig < SIG_WPA2_MAX) { DATA_MUTEX_TAKE(); if(sm->wpa2_sig_cnt[e->sig]) { @@ -234,7 +230,7 @@ void wpa2_task(void *pvParameters ) break; } else { if (s_wifi_wpa2_sync_sem) { - wpa_printf(MSG_DEBUG, "WPA2: wifi->wpa2 api completed sig(%d)", sig); + wpa_printf(MSG_DEBUG, "WPA2: wifi->wpa2 api completed sig(%d)", e->sig); xSemaphoreGive(s_wifi_wpa2_sync_sem); } else { wpa_printf(MSG_ERROR, "WPA2: null wifi->wpa2 sync sem"); @@ -247,7 +243,7 @@ void wpa2_task(void *pvParameters ) wpa_printf(MSG_DEBUG, "WPA2: task deleted"); s_wpa2_queue = NULL; if (s_wifi_wpa2_sync_sem) { - wpa_printf(MSG_DEBUG, "WPA2: wifi->wpa2 api completed sig(%d)", sig); + wpa_printf(MSG_DEBUG, "WPA2: wifi->wpa2 api completed sig(%d)", e->sig); xSemaphoreGive(s_wifi_wpa2_sync_sem); } else { wpa_printf(MSG_ERROR, "WPA2: null wifi->wpa2 sync sem");
Missing unlock on error.
@@ -888,6 +888,7 @@ create_job(ipp3d_client_t *client) /* I - Client */ if ((job = calloc(1, sizeof(ipp3d_job_t))) == NULL) { perror("Unable to allocate memory for job"); + cupsRWUnlock(&(client->printer->rwlock)); return (NULL); }
Date: set locale to \"C\" in testmod_date
* */ +#include <locale.h> #include <stdlib.h> #include <string.h> @@ -62,17 +63,18 @@ int main (int argc, char ** argv) { printf ("DATE TESTS\n"); printf ("==================\n\n"); - + const char *old_locale = setlocale(LC_ALL, NULL); + setlocale(LC_ALL, "C"); init (argc, argv); testFmt ("20:15:00", "%H:%M:%S", 1); testFmt ("20:15:00", "%I:%M:%S", -1); testFmt ("Sat 17 Dec 2016 08:07:43 PM CET", "%a %d %b %Y %r %Z", 1); - testIso ("2016-12-12T23:59:01", "datetime complete", 1); - testIso ("2016-12-12 23:59:01", "datetime complete noT", 1); - testIso ("2016-12-12T23:59:01", "datetime truncated", -1); - testIso ("-12-12T23:59:01", "datetime truncated", 1); + testIso ("2016-12-12T23:59:01Z", "datetime complete", 1); + testIso ("2016-12-12 23:59:01Z", "datetime complete noT", 1); + testIso ("2016-12-12T23:59:01Z", "datetime truncated", -1); + testIso ("-12-12T23:59:01Z", "datetime truncated", 1); testIso ("2016-W23", "weekdate", 1); testIso ("22:30+04", "utc extended", 1); testIso ("22:30-04", "utc extended", 1); @@ -85,6 +87,7 @@ int main (int argc, char ** argv) testRfc2822 ("01 Mar 2016 23:59 +0400", 1); testRfc2822 ("01 Mar 2016 01:00:59", -1); + setlocale(LC_ALL, old_locale); printf ("\ntestmod_date RESULTS: %d test(s) done. %d error(s).\n", nbTest, nbError); return nbError;
Minor struct name change for standards.
@@ -1753,7 +1753,7 @@ typedef struct bool x; bool y; bool z; -} s_axis_principal_toggle; +} s_axis_principal_bool; // Caskey, Damon V. // 2018-04-18 @@ -2190,7 +2190,7 @@ typedef struct { unsigned int ani_bind; // Animation binding type. int sortid; // Relative binding sortid. Default = -1 - s_axis_principal_toggle bind_toggle; // Toggle binding on X, Y and Z axis. + s_axis_principal_bool bind_toggle; // Toggle binding on X, Y and Z axis. s_axis_principal_short offset; // x,y,z offset. e_direction_adjust direction; // Direction force struct entity *ent; // Entity to bind.
Fix base64 for bit overflow
@@ -60,33 +60,32 @@ int bscrypt_base64_encode(char *target, const char *data, int len) { const int target_size = (groups + (mod != 0)) * 4; char *writer = target + target_size - 1; const char *reader = data + len - 1; - char tmp1, tmp2, tmp3; writer[1] = 0; switch (mod) { - case 2: - tmp2 = *(reader--); - tmp1 = *(reader--); + case 2: { + char tmp2 = *(reader--); + char tmp1 = *(reader--); *(writer--) = '='; *(writer--) = base64_encodes[((tmp2 & 15) << 2)]; *(writer--) = base64_encodes[((tmp1 & 3) << 4) | ((tmp2 >> 4) & 15)]; *(writer--) = base64_encodes[(tmp1 >> 2) & 63]; - break; - case 1: - tmp1 = *(reader--); + } break; + case 1: { + char tmp1 = *(reader--); *(writer--) = '='; *(writer--) = '='; *(writer--) = base64_encodes[(tmp1 & 3) << 4]; *(writer--) = base64_encodes[(tmp1 >> 2) & 63]; - break; + } break; } while (groups) { groups--; - tmp3 = *(reader--); - tmp2 = *(reader--); - tmp1 = *(reader--); + const char tmp3 = *(reader--); + const char tmp2 = *(reader--); + const char tmp1 = *(reader--); *(writer--) = base64_encodes[tmp3 & 63]; - *(writer--) = base64_encodes[((tmp2 & 15) << 2) | (tmp3 >> 6)]; - *(writer--) = base64_encodes[((tmp1 & 3) << 4) | ((tmp2 >> 4) & 15)]; + *(writer--) = base64_encodes[((tmp2 & 15) << 2) | ((tmp3 >> 6) & 3)]; + *(writer--) = base64_encodes[(((tmp1 & 3) << 4) | ((tmp2 >> 4) & 15))]; *(writer--) = base64_encodes[(tmp1 >> 2) & 63]; } return target_size; @@ -112,10 +111,12 @@ Returns the number of bytes actually written to the target buffer (excluding the NULL terminator byte). */ int bscrypt_base64_decode(char *target, char *encoded, int base64_len) { - if (base64_len <= 0) - return -1; if (!target) target = encoded; + if (base64_len <= 0) { + target[0] = 0; + return 0; + } int written = 0; char tmp1, tmp2, tmp3, tmp4; while (*encoded == '\r' || *encoded == '\n' || *encoded == ' ') { @@ -202,6 +203,13 @@ void bscrypt_test_base64(void) { {"any carnal pleasure.", "YW55IGNhcm5hbCBwbGVhc3VyZS4="}, {"any carnal pleasure", "YW55IGNhcm5hbCBwbGVhc3VyZQ=="}, {"any carnal pleasur", "YW55IGNhcm5hbCBwbGVhc3Vy"}, + {"", ""}, + {"f", "Zg=="}, + {"fo", "Zm8="}, + {"foo", "Zm9v"}, + {"foob", "Zm9vYg=="}, + {"fooba", "Zm9vYmE="}, + {"foobar", "Zm9vYmFy"}, {NULL, NULL} // Stop }; int i = 0;
[software] Add debug info to binaries
@@ -67,7 +67,7 @@ RISCV_LLVM_TARGET ?= --target=$(RISCV_TARGET) --sysroot=$(GCC_INSTALL_DIR)/$(RI RISCV_WARNINGS += -Wunused-variable -Wconversion -Wall -Wextra # -Werror RISCV_FLAGS_COMMON_TESTS ?= -march=$(RISCV_ARCH) -mabi=$(RISCV_ABI) -I$(ROOT_DIR) -I$(HALIDE_INCLUDE) -static -RISCV_FLAGS_COMMON ?= $(RISCV_FLAGS_COMMON_TESTS) -std=gnu99 -O3 -ffast-math -fno-common -fno-builtin-printf $(DEFINES) $(RISCV_WARNINGS) +RISCV_FLAGS_COMMON ?= $(RISCV_FLAGS_COMMON_TESTS) -g -std=gnu99 -O3 -ffast-math -fno-common -fno-builtin-printf $(DEFINES) $(RISCV_WARNINGS) RISCV_FLAGS_GCC ?= -mcmodel=medany -Wa,-march=$(RISCV_ARCH_AS) -mtune=mempool # -falign-loops=32 -falign-jumps=32 RISCV_FLAGS_LLVM ?= -mcmodel=small -mcpu=mempool-rv32 -mllvm -misched-topdown
test-ipmi-hiomap: Add get-flash-info-error test Cc: stable
@@ -1456,6 +1456,35 @@ static void test_hiomap_get_info_error(void) scenario_exit(); } +static const struct scenario_event +scenario_hiomap_get_flash_info_error[] = { + { .type = scenario_event_p, .p = &hiomap_ack_call, }, + { .type = scenario_event_p, .p = &hiomap_get_info_call, }, + { + .type = scenario_cmd, + .c = { + .req = { + .cmd = HIOMAP_C_GET_FLASH_INFO, + .seq = 3, + .args = { + [0] = HIOMAP_V2, + }, + }, + .cc = IPMI_INVALID_COMMAND_ERR, + }, + }, + SCENARIO_SENTINEL, +}; + +static void test_hiomap_get_flash_info_error(void) +{ + struct blocklevel_device *bl; + + scenario_enter(scenario_hiomap_get_flash_info_error); + assert(ipmi_hiomap_init(&bl) > 0); + scenario_exit(); +} + struct test_case { const char *name; void (*fn)(void); @@ -1488,6 +1517,7 @@ struct test_case test_cases[] = { TEST_CASE(test_hiomap_protocol_persistent_error), TEST_CASE(test_hiomap_protocol_get_flash_info), TEST_CASE(test_hiomap_get_info_error), + TEST_CASE(test_hiomap_get_flash_info_error), { NULL, NULL }, };
fix missized copy.
@@ -92,7 +92,7 @@ assemble(char *asmsrc, char *path) int pid, status; if (outfile != NULL) - strncpy(objfile, outfile, 1024); + strncpy(objfile, outfile, sizeof(objfile)); else { psuffix = strrchr(path, '+'); i = 0;
zephyr/Makefile: Add kobj_types_h_target to Z_EXPORTS. New generated Zephyr header file, without it build breaks.
@@ -106,4 +106,4 @@ outdir/$(BOARD)/Makefile: $(CONF_FILE) $(Z_EXPORTS): outdir/$(BOARD)/Makefile make --no-print-directory -C outdir/$(BOARD) outputexports CMAKE_COMMAND=: >$@ - make -C outdir/$(BOARD) syscall_macros_h_target syscall_list_h_target + make -C outdir/$(BOARD) syscall_macros_h_target syscall_list_h_target kobj_types_h_target
Improve pointer hashing to avoid hash collisions.
@@ -295,6 +295,15 @@ int janet_equals(Janet x, Janet y) { return 1; } +static uint64_t murmur64(uint64_t h) { + h ^= h >> 33; + h *= 0xff51afd7ed558ccdUL; + h ^= h >> 33; + h *= 0xc4ceb9fe1a85ec53UL; + h ^= h >> 33; + return h; +} + /* Computes a hash value for a function */ int32_t janet_hash(Janet x) { int32_t hash = 0; @@ -341,11 +350,8 @@ int32_t janet_hash(Janet x) { default: if (sizeof(double) == sizeof(void *)) { /* Assuming 8 byte pointer (8 byte aligned) */ - uint64_t i = janet_u64(x); - uint32_t lo = (uint32_t)(i & 0xFFFFFFFF); - uint32_t hi = (uint32_t)(i >> 32); - uint32_t hilo = (hi ^ lo) * 2654435769u; - hash = (int32_t)((hilo << 16) | (hilo >> 16)); + uint64_t i = murmur64(janet_u64(x)); + hash = (int32_t)(i >> 32); } else { /* Assuming 4 byte pointer (or smaller) */ uintptr_t diff = (uintptr_t) janet_unwrap_pointer(x);
add oidc-keychain bash completion
@@ -75,6 +75,7 @@ make install BIN_PATH=${RPM_BUILD_ROOT}/usr BIN_AFTER_INST_PATH=/usr MAN_PATH=${ %doc /usr/share/bash-completion/completions/oidc-add %doc /usr/share/bash-completion/completions/oidc-agent %doc /usr/share/bash-completion/completions/oidc-gen +%doc /usr/share/bash-completion/completions/oidc-keychain %doc /usr/share/bash-completion/completions/oidc-token %doc /usr/share/applications/oidc-gen.desktop /usr/lib64/liboidc-agent.so.3
Disable Ltest-init-local-signal on ia64 This makes the tests build on ia64 so that they can be run, a proper fix would be to add unw_init_local2() to src/ia64/Ginit_local.c
@@ -57,7 +57,6 @@ endif Gtest-resume-sig Ltest-resume-sig \ Gtest-resume-sig-rt Ltest-resume-sig-rt \ Gtest-trace Ltest-trace \ - Ltest-init-local-signal \ Ltest-mem-validate \ test-async-sig test-flush-cache test-init-remote \ test-mem test-reg-state Ltest-varargs \ @@ -65,6 +64,11 @@ endif noinst_PROGRAMS_cdep += forker Gperf-simple Lperf-simple \ Gperf-trace Lperf-trace +# unw_init_local2() is not implemented on ia64 +if !ARCH_IA64 + check_PROGRAMS_cdep += Ltest-init-local-signal +endif + if BUILD_PTRACE check_SCRIPTS_cdep += run-ptrace-mapper run-ptrace-misc check_PROGRAMS_cdep += test-ptrace
get lib/vast2 to bail-meme not explicily error
:: ++ apex :: product twig %+ cook - |= tum/(each manx marl):twig ^- twig + |= tum/(each manx:twig marl:twig) ^- twig ?- -.tum $& [%xmn p.tum] $| [%xml p.tum] == :: ++ wide-top :: wide outer top - %+ knee *(each manx marl):twig |. ~+ + %+ knee *(each manx:twig marl:twig) |. ~+ ;~ pose (stag %| wide-quote) (stag %| wide-paren-elems) == :: ++ wide-inner-top :: wide inner top - %+ knee *(each tuna marl):twig |. ~+ + %+ knee *(each tuna:twig marl:twig) |. ~+ ;~ pose wide-top (stag %& ;~(plug tuna-mode wide)) ::+| :: ++ drop-top - |= a/(each tuna marl):twig ^- marl:twig + |= a/(each tuna:twig marl:twig) ^- marl:twig ?- -.a $& [p.a]~ $| p.a == :: ++ join-tops - |= a/(list (each tuna marl)):twig ^- marl:twig + |= a/(list (each tuna:twig marl:twig)) ^- marl:twig (zing (turn a drop-top)) :: ::+| ::+| :: ++ tall-top :: tall top - %+ knee *(each manx marl):twig |. ~+ + %+ knee *(each manx:twig marl:twig) |. ~+ ;~ pose (stag %| ;~(pfix (plus ace) (cook collapse-chars quote-innards))) (stag %& ;~(plug script-or-style script-style-tail))
Docs: Fix shellcheck
@@ -75,12 +75,14 @@ builddocs() { } bumpversion() { - local ocver=$(grep OPEN_CORE_VERSION ../Include/Acidanthera/Library/OcMainLib.h | sed 's/.*"\(.*\)".*/\1/' | grep -E '^[0-9.]+$') + local ocver + ocver=$(grep OPEN_CORE_VERSION ../Include/Acidanthera/Library/OcMainLib.h | sed 's/.*"\(.*\)".*/\1/' | grep -E '^[0-9.]+$') if [ "$ocver" = "" ]; then abort "Invalid OpenCore version" fi - local docver=$(grep -w 'Reference Manual' ./Configuration.tex | sed -e 's/(//g' -e 's/)//g' | awk '{print $3}') + local docver + docver=$(grep -w 'Reference Manual' ./Configuration.tex | sed -e 's/(//g' -e 's/)//g' | awk '{print $3}') if [ "$docver" = "" ]; then abort "Invalid document version" fi
Docs: Add information about fixed potentional memory corruption in AVX to Changelog
@@ -9,6 +9,7 @@ OpenCore Changelog - Fixed selecting `SecureBootModel` on hypervisors (should be `x86legacy`) - Added kext blocking `Strategy` for prelinked and newer - Added global MSR 35h fix to `ProvideCurrentCpuInfo`, allowing `-cpu host` in KVM +- Fixed potential memory corruption with AVX acceleration enabled #### v0.7.8 - Updated ocvalidate to warn about insecure `DmgLoading` with secure `SecureBootModel` (already disallowed in runtime)
Don't sub to configs of comment notification channels.
|= {wat/kind des/cord pub/? vis/? ses/(set ship)} ^+ +> =+ nom=(sane-cord des) - =. +>.$ =- (ta-change-config nom - %coll) [des pub vis ses] - %- ta-emit - :* 0 - %peer - /hall/[nom] - [our.bol %hall] - /circle/(make-circle nom ~)/config-l - == :: ++ ta-submit |= {nom/term tit/cord wat/wain} =. +>.$ %- ta-hall-action [%create nam desc.cof ?:(publ.cof %journal %village)] - ::TODO if ?~ top, sub to config changes + =? +>.$ ?=($~ top) + %- ta-emit + :* 0 + %peer + /hall/[nom] + [our.bol %hall] + /circle/[nam]/config-l + == =? +>.$ visi.cof (ta-hall-set-visible nam &) =? +>.$ ?=(^ top)
hammer: Include hashes in EC image (CONFIG_TOUCHPAD_HASH_FW) BRANCH=none TEST=make TOUCHPAD_FW=SA459C-1211_ForGoogleHammer_3.0.bin \ BOARD=hammer -j CQ-DEPEND=CL:641736
/* Virtual address for touchpad FW in USB updater. */ #define CONFIG_TOUCHPAD_VIRTUAL_OFF 0x80000000 +/* Include touchpad FW hashes in image */ +#define CONFIG_TOUCHPAD_HASH_FW + /* Touchpad firmware size and dimension difference */ #ifdef BOARD_STAFF /* TODO(b:38277869): Adjust values to match hardware. */
Added some SceSysclibForDriver NIDs
@@ -1902,6 +1902,7 @@ modules: nid: 0x7EE45391 functions: __stack_chk_fail: 0xB997493D + look_ctype_table: 0xCDF7F155 memchr: 0x60DAEA30 memcmp: 0xF939E83D memcpy: 0x40C88316 @@ -1919,6 +1920,8 @@ modules: strrchr: 0x7F0E0835 strstr: 0x1304A69D strtol: 0xAB77C5AA + strtoll: 0x87AAAFA2 + strtoul: 0x4E5042DA tolower: 0x0021DAF9 toupper: 0xA685DCB1 vsnprintf: 0x3DDBE2E1
Make t4p4s.sh properly wait for needed ports
@@ -396,6 +396,14 @@ declare -A VSN_TO_EXT=([16]="p4" [14]="p4_14") # -------------------------------------------------------------------- +# Wait for 5 seconds or the availability of port $1, whichever comes first +wait_for_port_availability() { + for i in `seq 1 50`; do + [[ `sudo lsof -i:${1} | grep LISTEN | wc -l` -ne 0 ]] && break + sleep 0.1 + done +} + find_ephemeral_port() { CHOSEN_PORT=$(shuf -n 1 -i $1-$2) while [ `sudo lsof -i -P -n | grep LISTEN | grep $CHOSEN_PORT | wc -l` -ne 0 ]; do CHOSEN_PORT=$(shuf -n 1 -i $1-$2); done @@ -468,8 +476,7 @@ PYTHON_PARSE_HELPER_PROCESS="$!" unset PYTHON_PARSE_HELPER -sleep 0.1 - +wait_for_port_availability ${PYTHON_PARSE_HELPER_PORT} # -------------------------------------------------------------------- # Set defaults @@ -823,7 +830,7 @@ if [ "$(optvalue run)" != off ]; then setopt T4P4S_CTL_PORT $(find_ephemeral_port 49152 65535) verbosemsg "Controller port is $(cc 0)$(optvalue T4P4S_CTL_PORT)$nn" - # Step 3A-3: Run controller + # Step 3A-2: Run controller if [ $(optvalue showctl optv) == y ]; then stdbuf -o 0 $CTRL_PLANE_DIR/$CONTROLLER $(optvalue T4P4S_CTL_PORT) ${OPTS[ctrcfg]} & elif [ "$(optvalue ctrterm)" != off -a "$HAS_TERMINAL" == "0" ]; then @@ -833,7 +840,8 @@ if [ "$(optvalue run)" != off ]; then else (stdbuf -o 0 $CTRL_PLANE_DIR/$CONTROLLER $(optvalue T4P4S_CTL_PORT) ${OPTS[ctrcfg]} >&2> "${CONTROLLER_LOG}" &) fi - sleep 0.2 + + wait_for_port_availability $(optvalue T4P4S_CTL_PORT) fi fi
Fix typo in SLRU stats documentation Author: Noriyoshi Shinoda Discussion:
@@ -3296,7 +3296,7 @@ SELECT pid, wait_event_type, wait_event FROM pg_stat_activity WHERE wait_event i </row> <row> <entry><structfield>blks_hit</structfield></entry> - <entry><type>biging</type></entry> + <entry><type>bigint</type></entry> <entry>Number of times disk blocks were found already in the SLRU, so that a read was not necessary (this only includes hits in the SLRU, not the operating system's file system cache)
mdns: updated doxygen comments documenting mdns api Closes
@@ -81,10 +81,9 @@ typedef struct mdns_result_s { * * @return * - ESP_OK on success - * - ESP_ERR_INVALID_ARG when bad tcpip_if is given - * - ESP_ERR_INVALID_STATE when the network returned error + * - ESP_ERR_INVALID_STATE when failed to register event handler * - ESP_ERR_NO_MEM on memory error - * - ESP_ERR_WIFI_NOT_INIT when WiFi is not initialized by eps_wifi_init + * - ESP_FAIL when failed to start mdns task */ esp_err_t mdns_init(); @@ -127,13 +126,14 @@ esp_err_t mdns_instance_name_set(const char * instance_name); * @param service_type service type (_http, _ftp, etc) * @param proto service protocol (_tcp, _udp) * @param port service port - * @param num_items number of items in TXT data * @param txt string array of TXT data (eg. {{"var","val"},{"other","2"}}) + * @param num_items number of items in TXT data * * @return * - ESP_OK success * - ESP_ERR_INVALID_ARG Parameter error * - ESP_ERR_NO_MEM memory error + * - ESP_FAIL failed to add serivce */ esp_err_t mdns_service_add(const char * instance_name, const char * service_type, const char * proto, uint16_t port, mdns_txt_item_t txt[], size_t num_items); @@ -147,7 +147,7 @@ esp_err_t mdns_service_add(const char * instance_name, const char * service_type * - ESP_OK success * - ESP_ERR_INVALID_ARG Parameter error * - ESP_ERR_NOT_FOUND Service not found - * - ESP_FAIL unknown error + * - ESP_ERR_NO_MEM memory error */ esp_err_t mdns_service_remove(const char * service_type, const char * proto); @@ -177,6 +177,7 @@ esp_err_t mdns_service_instance_name_set(const char * service_type, const char * * - ESP_OK success * - ESP_ERR_INVALID_ARG Parameter error * - ESP_ERR_NOT_FOUND Service not found + * - ESP_ERR_NO_MEM memory error */ esp_err_t mdns_service_port_set(const char * service_type, const char * proto, uint16_t port); @@ -185,8 +186,8 @@ esp_err_t mdns_service_port_set(const char * service_type, const char * proto, u * * @param service_type service type (_http, _ftp, etc) * @param proto service protocol (_tcp, _udp) - * @param num_items number of items in TXT data * @param txt array of TXT data (eg. {{"var","val"},{"other","2"}}) + * @param num_items number of items in TXT data * * @return * - ESP_OK success
test: add platone test case test_002InitWallet_0002SetEIP155CompFailureNullParam fix the issue aitos-io#1176 teambition task id:
@@ -608,6 +608,21 @@ START_TEST(test_002InitWallet_0001SetEIP155CompSuccess) } END_TEST +START_TEST(test_002InitWallet_0002SetEIP155CompFailureNullParam) +{ + BSINT32 rtnVal; + BoatPlatoneWalletConfig wallet = get_platone_wallet_settings(); + + /* 1. execute unit test */ + rtnVal = BoatPlatoneWalletSetEIP155Comp(NULL, wallet.eip155_compatibility); + /* 2. verify test result */ + /* 2-1. verify the return value */ + ck_assert_int_eq(rtnVal, BOAT_ERROR_COMMON_INVALID_ARGUMENT); + + /* 2-2. verify the global variables that be affected */ +} +END_TEST + Suite *make_wallet_suite(void) { /* Create Suite */ @@ -639,6 +654,7 @@ Suite *make_wallet_suite(void) tcase_add_test(tc_wallet_api, test_001CreateWallet_0018DeletePersistWalletSuccess); tcase_add_test(tc_wallet_api, test_002InitWallet_0001SetEIP155CompSuccess); + tcase_add_test(tc_wallet_api, test_002InitWallet_0002SetEIP155CompFailureNullParam); return s_wallet; }
[doc] add a link h2olog in "Key Features"
@@ -40,6 +40,7 @@ Explanation of the benchmark charts can be found in the <a href="benchmarks.html <li><a href="configure/proxy_directives.html">reverse proxy</a> <li><a href="configure/mruby.html">scriptable using mruby</a> (Rack-based) <li>graceful restart and self-upgrade +<li><a href="configure/h2olog.html">BPF-based tracing tool</a> (experimental)</li> </ul> ? })
runtimes/charliecloud: fix typo in sed function
%define pname charliecloud # Specify python version of a given file -%define versionize_script() (sed -i 's,/env python,/env% %1,g' %2) +%define versionize_script() (sed -i 's,/env python,/env %1,g' %2) %{!?build_cflags:%global build_cflags $RPM_OPT_FLAGS} %{!?build_ldflags:%global build_ldflags %nil}
adding extra keys for the trackers so now the tracker behave more like other traditional music trackers
@@ -840,6 +840,13 @@ static void processTrackerKeyboard(Music* music) tic_key_y, tic_key_7, tic_key_u, + + // extra keys + tic_key_i, + tic_key_9, + tic_key_o, + tic_key_0, + tic_key_p, }; if (getChannelPattern(music))
Changed rollover field type
@@ -47,7 +47,7 @@ typedef struct int32_t range_max; int32_t range_min; int32_t step; - bool rollover; // Set to true for rollover functionality + uint8_t rollover : 1; // Set to true for rollover functionality uint16_t digit_count : 4; uint16_t dec_point_pos : 4; /*if 0, there is no separator and the number is an integer*/ uint16_t digit_padding_left : 4;
Contract: Sort providers
@@ -130,14 +130,14 @@ description = The full text describing everything relevant for the [infos/provides] type = enum - resolver - storage - code - notification apply - conv check + code + conv logging + notification + resolver + storage status = implemented usedby = plugin description = Introduces a more abstract name (=provider) for the type
Don't complain if function name doesn't match The "function" argument is now unused in the XXXerr defines, so mkerr doesn't need to check if the value/name match.
@@ -394,10 +394,6 @@ foreach my $file ( @source ) { $fnew{$2}++; } $ftrans{$3} = $func unless exists $ftrans{$3}; - if ( uc($func) ne $3 ) { - print STDERR "ERROR: mismatch $file:$linenr $func:$3\n"; - $errors++; - } print STDERR " Function $1 = $fcodes{$1}\n" if $debug; }
build: run note check not on master
@@ -359,7 +359,7 @@ def build_todo() { def build_check_release_notes() { def stage_name = "check-release-notes" return [(stage_name): { - stage(stage_name) { + maybeStage(stage_name, !isMaster()) { withDockerEnv(DOCKER_IMAGES.stretch) { sh "scripts/run_check_release_notes" deleteDir()
force build_openmp.sh install steps to use -j X, temporary workaround to very serialized build time on openmp
@@ -210,7 +210,7 @@ if [ "$1" == "install" ] ; then cd $BUILD_DIR/build/openmp echo echo " -----Installing to $INSTALL_OPENMP/lib ----- " - $SUDO make install + $SUDO make -j $AOMP_JOB_THREADS install if [ $? != 0 ] ; then echo "ERROR make install failed " exit 1 @@ -223,7 +223,7 @@ if [ "$1" == "install" ] ; then [[ ! -d $_ompd_dir ]] && _ompd_dir="$AOMP_INSTALL_DIR/share/gdb/python/ompd" echo echo " -----Installing to $INSTALL_OPENMP/lib-debug ---- " - $SUDO make install + $SUDO make -j $AOMP_JOB_THREADS install if [ $? != 0 ] ; then echo "ERROR make install failed " exit 1
grid: fix mock notification data to match new type def
@@ -197,7 +197,7 @@ function text(t: string) { function createDmNotification(...content: HarkContent[]): HarkBody { return { title: [ship('~hastuc-dibtux'), text(' messaged you')], - time: unixToDa(Date.now() - 3_600).toString(), + time: unixToDa(Date.now() - 3_600).toJSNumber(), content, binned: '/', link: '/' @@ -207,7 +207,7 @@ function createDmNotification(...content: HarkContent[]): HarkBody { function createBitcoinNotif(amount: string) { return { title: [ship('~silnem'), text(` sent you ${amount}`)], - time: unixToDa(Date.now() - 3_600).toString(), + time: unixToDa(Date.now() - 3_600).toJSNumber(), content: [], binned: '/', link: '/' @@ -218,7 +218,7 @@ function createGroupNotif(to: string): HarkBody { return { title: [ship('~ridlur-figbud'), text(` invited you to ${to}`)], content: [], - time: unixToDa(Date.now() - 3_600).toString(), + time: unixToDa(Date.now() - 3_600).toJSNumber(), binned: '/', link: '/' }; @@ -257,7 +257,7 @@ const onboard = createMockSysNotification('/onboard'); const updateNotification = createMockSysNotification('/desk/bitcoin', [ { title: [{ text: 'App "Bitcoin" updated to version 1.0.1' }], - time: '', + time: 0, content: [], link: '/desk/bitcoin', binned: '/'
stm32/mboot: Always use a flash latency of 1WS to match 48MHz HCLK.
#undef MICROPY_HW_CLK_PLLN #undef MICROPY_HW_CLK_PLLP #undef MICROPY_HW_CLK_PLLQ +#undef MICROPY_HW_FLASH_LATENCY #define MICROPY_HW_CLK_PLLM (HSE_VALUE / 1000000) #define MICROPY_HW_CLK_PLLN (192) #define MICROPY_HW_CLK_PLLP (RCC_PLLP_DIV4) #define MICROPY_HW_CLK_PLLQ (4) +#define MICROPY_HW_FLASH_LATENCY FLASH_LATENCY_1 // Work out which USB device to use for the USB DFU interface #if !defined(MICROPY_HW_USB_MAIN_DEV) @@ -206,10 +208,6 @@ void SystemClock_Config(void) { while(__HAL_RCC_GET_FLAG(RCC_FLAG_PLLRDY) == RESET) { } - #if !defined(MICROPY_HW_FLASH_LATENCY) - #define MICROPY_HW_FLASH_LATENCY FLASH_LATENCY_1 - #endif - // Increase latency before changing clock if (MICROPY_HW_FLASH_LATENCY > (FLASH->ACR & FLASH_ACR_LATENCY)) { __HAL_FLASH_SET_LATENCY(MICROPY_HW_FLASH_LATENCY);
gall: crash properly on failed %boon from ames
=/ sky (rof ~ %cb [our %home case] /[mark.ames-response]) ?- sky ?(~ [~ ~]) - =/ ror "gall: ames mark fail {<mark.ames-response>}" - (mo-give %done `vale+[leaf+ror]~) + (mean leaf+"gall: ames mark fail {<mark.ames-response>}" ~) :: [~ ~ *] =+ !<(=dais:clay q.u.u.sky) =/ res (mule |.((vale:dais noun.ames-response))) ?: ?=(%| -.res) - =/ ror "gall: ames vale fail {<mark.deal>}" - (mo-give %done `vale+[leaf+ror p.res]) + (mean leaf+"gall: ames vale fail {<mark.ames-response>}" p.res) =. mo-core %+ mo-pass /nowhere [%c %warp our %home ~ %sing %b case /[mark.ames-response]]
find all scala source files instead of searching individual project directories
@@ -6,11 +6,10 @@ SHELL=/bin/bash ######################################################################################### # variables to get all *.scala files ######################################################################################### -lookup_scala_srcs = $(shell find -L $(1)/ -iname "*.scala" 2> /dev/null) +lookup_scala_srcs = $(shell find -L $(1)/ -name target -prune -o -iname "*.scala" -print 2> /dev/null) -PACKAGES=$(addprefix generators/, rocket-chip testchipip boom hwacha sifive-blocks sifive-cache example) \ - $(addprefix sims/firesim/sim/, . firesim-lib midas midas/targetutils) -SCALA_SOURCES=$(foreach pkg,$(PACKAGES),$(call lookup_scala_srcs,$(base_dir)/$(pkg)/src/main/scala)) +SOURCE_DIRS=$(addprefix $(base_dir)/,generators sims/firesim/sim) +SCALA_SOURCES=$(call lookup_scala_srcs,$(SOURCE_DIRS)) ######################################################################################### # rocket and testchipip classes
[arch][m68k] add idle loop
@@ -17,7 +17,7 @@ void arch_early_init(void) { // set the exception vector base extern uint32_t exc_vectors[256]; - asm("movec %0, %%vbr" :: "r"(exc_vectors)); + asm volatile("movec %0, %%vbr" :: "r"(exc_vectors)); } void arch_init(void) { @@ -25,7 +25,8 @@ void arch_init(void) { } void arch_idle(void) { -// asm volatile("sleep"); + // set the SR such that we're in supervisor state and no ints are masked + asm("stop #0x2000" ::: "cc"); } void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
.travis.yml: add pair of linux-ppc64le targets. One is clang --strict-warnings and one gcc sanitizer extended test. Sanitizer build is quite expensive, can take >30 mins and is commented for occasions when there is reason to believe that PPC-specific problem can be diagnosed with sanitizer.
@@ -31,6 +31,10 @@ env: matrix: include: + - os: linux-ppc64le + sudo: false + compiler: clang + env: CONFIG_OPTS="--strict-warnings -D__NO_STRING_INLINES" - os: linux addons: apt: @@ -52,6 +56,12 @@ matrix: - gcc-mingw-w64 compiler: i686-w64-mingw32-gcc env: CONFIG_OPTS="no-stdio" BUILDONLY="yes" + # Uncomment if there is reason to believe that PPC-specific problem + # can be diagnosed with this possibly >30 mins sanitizer build... + #- os: linux-ppc64le + # sudo: false + # compiler: gcc + # env: EXTENDED_TEST="yes" CONFIG_OPTS="no-asm enable-asan enable-ubsan no-shared -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -D__NO_STRING_INLINES" - os: linux addons: apt:
volteer: Add GPIO_LOCKED to EC_RST_ODL PSL input BRANCH=none TEST=make buildall
@@ -45,8 +45,13 @@ UNIMPLEMENTED(PCH_DSW_PWROK) /* * GPIO_INT_BOTH is required for PSL wake from hibernate, but we don't need an * interrupt handler because it is automatically handled by the PSL. + * + * We need to lock the setting so this gpio can't be reconfigured to overdrive + * the real reset signal. (This is the PSL input pin not the real reset pin). */ -GPIO(EC_RST_ODL, PIN(0, 2), GPIO_INT_BOTH | GPIO_HIB_WAKE_HIGH) +GPIO(EC_RST_ODL, PIN(0, 2), GPIO_INT_BOTH | + GPIO_HIB_WAKE_HIGH | + GPIO_LOCKED) /* AP/PCH Signals */ GPIO(EC_PCH_SYS_PWROK, PIN(3, 7), GPIO_OUT_LOW) /* TODO - b/140556273 - implement support with power sequencing */
matlab test: comment out TOOLBOX_PATH test (not strictly needed)
@@ -12,8 +12,8 @@ function test_bart() tolFloat = 1e-7; %% Test1: Environmental variable - bartPath = getenv('TOOLBOX_PATH'); - testAssert(~isempty(bartPath), 'Environmental variable (TOOLBOX_PATH)'); + %bartPath = getenv('TOOLBOX_PATH'); + %testAssert(~isempty(bartPath), 'Environmental variable (TOOLBOX_PATH)'); %% Test2: Write/Read cfl file = tempname;
Fix the allocator. It had two bugs that compensated each other. First, an off-by-one in the code that prevented freeing any slabs. Second, the singly linked list of slabs should have been doubly linked. The first prevented the second from mattering. Both should be fixed by this.
+use sys use "die" use "extremum" use "memops" @@ -27,9 +28,9 @@ pkg std = const Zslab = (0 : slab#) const Zchunk = (0 : chunk#) -const Slabsz = 1*MiB /* 1 meg slabs */ -const Cachemax = 16 /* maximum number of slabs in the cache */ -const Bktmax = 32*KiB /* Slabsz / 8; a balance. */ +const Slabsz = 4*MiB +const Cachemax = 4 +const Bktmax = 128*KiB /* a balance between wasted space and falling back to mmap */ const Pagesz = 4*KiB var buckets : bucket[32] /* excessive */ @@ -47,6 +48,7 @@ type bucket = struct type slab = struct head : byte# /* head of virtual addresses, so we don't leak address space */ next : slab# /* the next slab on the chain */ + prev : slab# /* the prev slab on the chain */ freehd : chunk# /* the nodes we're allocating */ nfree : size /* the number of free nodes */ ;; @@ -189,6 +191,8 @@ const mkslab = {bkt s = (align((p : size), Slabsz) : slab#) s.head = p s.nfree = bkt.nper + s.next = Zslab + s.prev = Zslab /* skip past the slab header */ off = align(sizeof(slab), Align) bnext = nextchunk((s : chunk#), off) @@ -215,10 +219,10 @@ const bktalloc = {bkt s = bkt.slabs if s == Zslab s = mkslab(bkt) + bkt.slabs = s if s == Zslab die("No memory left") ;; - bkt.slabs = s ;; /* grab the first chunk on the slab */ @@ -227,7 +231,9 @@ const bktalloc = {bkt s.nfree-- if s.nfree == 0 bkt.slabs = s.next - s.next = Zslab + if s.next != Zslab + s.next.prev = Zslab + ;; ;; -> (b : byte#) @@ -245,21 +251,37 @@ const bktfree = {bkt, m s = (mtrunc(m, Slabsz) : slab#) b = (m : chunk#) if s.nfree == 0 + if bkt.slabs != Zslab + bkt.slabs.prev = s + ;; s.next = bkt.slabs + s.prev = Zslab bkt.slabs = s - elif s.nfree == bkt.nper + elif s.nfree == bkt.nper - 1 /* HACK HACK HACK: if we can't unmap, keep an infinite cache per slab size. We should solve this better somehow. */ if bkt.ncache < Cachemax || !Canunmap s.next = bkt.cache + s.prev = Zslab bkt.cache = s else + /* unlink the slab from the list */ + if s.next != Zslab + s.next.prev = s.prev + ;; + if s.prev != Zslab + s.prev.next = s.next + ;; + if bkt.slabs == s + bkt.slabs = s.next + ;; /* we mapped 2*Slabsz so we could align it, so we need to unmap the same */ freemem(s.head, Slabsz*2) ;; + -> void ;; s.nfree++ b.next = s.freehd
haskell-debian-strech: fix shared cabal build flags on linux
@@ -6,7 +6,6 @@ if (NOT BUILD_STATIC) set (CABAL_INCLUDE_DIRS "\"${CMAKE_SOURCE_DIR}/src/include\", \"${CMAKE_BINARY_DIR}/src/include\"") set (BINDING_HASKELL_NAME "${CMAKE_CURRENT_BINARY_DIR}/libHSlibelektra-haskell") - set (CABAL_OPTS "--prefix=${CMAKE_INSTALL_PREFIX}") if (BUILD_SHARED OR BUILD_FULL) set (GHC_DYNAMIC_SUFFIX "-ghc${GHC_VERSION}") if (APPLE) @@ -15,7 +14,7 @@ if (NOT BUILD_STATIC) set (GHC_DYNAMIC_ENDING ".so") endif (APPLE) set (BINDING_HASKELL_NAME "${BINDING_HASKELL_NAME}${GHC_DYNAMIC_SUFFIX}${GHC_DYNAMIC_ENDING}") - set (CABAL_OPTS "${CABAL_OPTS};--enable-shared") + set (CABAL_OPTS "--enable-shared") if (BUILD_SHARED) set (ELEKTRA_DEPENDENCY "elektra;elektra-kdb;elektra-ease;") elseif (BUILD_FULL) @@ -23,7 +22,7 @@ if (NOT BUILD_STATIC) endif () elseif (BUILD_STATIC) set (BINDING_HASKELL_NAME "${BINDING_HASKELL_NAME}.a") - set (CABAL_OPTS "${CABAL_OPTS};--disable-shared") + set (CABAL_OPTS "--disable-shared") set (ELEKTRA_DEPENDENCY "elektra-static;") endif () string (REPLACE ";" " " CABAL_ELEKTRA_DEPENDENCY "${ELEKTRA_DEPENDENCY}") @@ -51,7 +50,7 @@ if (NOT BUILD_STATIC) "${CMAKE_SOURCE_DIR}/src/plugins/haskell/Setup.hs" ) file (WRITE "${CMAKE_CURRENT_BINARY_DIR}/cabalOptionalDependencies.cmake" - "execute_process (COMMAND ${CABAL_EXECUTABLE} install --only-dependencies -v0)") + "execute_process (COMMAND ${CABAL_EXECUTABLE} ${CABAL_OPTS} install --only-dependencies -v0)") execute_process (COMMAND ${CABAL_EXECUTABLE} sandbox init --sandbox ${CMAKE_BINARY_DIR}/.cabal-sandbox -v0 WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} OUTPUT_QUIET) add_custom_command ( @@ -80,7 +79,7 @@ if (NOT BUILD_STATIC) "${CMAKE_CURRENT_BINARY_DIR}/dist/build/testhaskell_realworld_optimized/testhaskell_realworld_optimized" ) file (WRITE "${CMAKE_CURRENT_BINARY_DIR}/cabalOptionalDependencies.cmake" - "execute_process (COMMAND ${CABAL_EXECUTABLE} install --enable-tests --only-dependencies -v0)") + "execute_process (COMMAND ${CABAL_EXECUTABLE} ${CABAL_OPTS} install --enable-tests --only-dependencies -v0)") add_custom_command ( OUTPUT ${HASKELL_TESTS} # everything is getting installed to the sandbox, and gets cached to avoid reinstalling every time
useResize: fix destructor
@@ -15,11 +15,12 @@ export function useResize<T extends HTMLElement>( callback(entry, observer); } } + let el = ref.current; const resizeObs = new ResizeObserver(observer); - resizeObs.observe(ref.current, { box: 'border-box' }); + resizeObs.observe(el, { box: 'border-box' }); return () => { - resizeObs.unobserve(ref.current); + resizeObs.unobserve(el); }; }, [callback]);
CHANGELOG: Add recent motor updates.
## [Unreleased] +### Added +- Added ability to use more than one `DriveBase` in the same script. + +### Changed +- Changed how `DriveBases` and `Motor` classes can be used together. + Previously, an individual motor could not be used when a drive base used it. + From now on, devices can always be used. If they were already in use by + something else, that other class will just be stopped (coast). +- Changed how unexpected motor problems are handled, such as a cable being + unplugged while it was running. Previously, this raised a `SystemExit` no + matter which motor was unplugged. Now it will return an `OSError` with + `ENODEV`, which is consistent with trying to initialize a motor that isn't + there. The `Motor` class must be initialized again to use the motor again. +- Changing settings while a motor is moving no longer raises an exception. Some + settings will not take effect until a new motor command is given. + ## [3.1.0] - 2021-12-16 ### Changed
Added note about redundant install logic
@@ -28,6 +28,12 @@ ENV TZ="America/New_York" # --- # Install packages. Note extra PPA for Go. # +# Also note that this logic duplicates the top-level `install_build_tools.sh` +# script. Trying to use that script here causes issues because of its use of +# `sudu` when running the package installer. It clears the environment so the +# additions above don't apply and we end up with interaction when some of the +# dependencies (i.e. tzdata) are installed. +# RUN apt-get update && \ apt-get install -y software-properties-common gpg apt-utils && \ add-apt-repository ppa:longsleep/golang-backports && \
change findPlug method that is deprecated for maya 2019
@@ -1834,7 +1834,7 @@ AssetNode::createAsset() // if the asset has been frozen. and DeferAssetLoad in on // and we're reading a file, don't load the assets MFnDependencyNode assetNodeFn(thisMObject()); - MPlug frozenPlug = assetNodeFn.findPlug("frozen"); + MPlug frozenPlug = assetNodeFn.findPlug("frozen", true); bool frozen = frozenPlug.asBool(); int defer = MGlobal::optionVarIntValue("houdiniEngineDeferAssetLoad");
Add mapimg export typedefs
@@ -779,7 +779,10 @@ typedef struct _PH_MAPPED_IMAGE_DEBUG_POGO PPH_IMAGE_DEBUG_POGO_ENTRY PogoEntries; } PH_MAPPED_IMAGE_DEBUG_POGO, *PPH_MAPPED_IMAGE_DEBUG_POGO; -NTSTATUS PhGetMappedImagePogo( +PHLIBAPI +NTSTATUS +NTAPI +PhGetMappedImagePogo( _In_ PPH_MAPPED_IMAGE MappedImage, _Out_ PPH_MAPPED_IMAGE_DEBUG_POGO PogoDebug ); @@ -809,12 +812,17 @@ typedef struct _PH_MAPPED_IMAGE_RELOC PPH_IMAGE_RELOC_ENTRY RelocationEntries; } PH_MAPPED_IMAGE_RELOC, *PPH_MAPPED_IMAGE_RELOC; -NTSTATUS PhGetMappedImageRelocations( +PHLIBAPI +NTSTATUS +NTAPI +PhGetMappedImageRelocations( _In_ PPH_MAPPED_IMAGE MappedImage, _Out_ PPH_MAPPED_IMAGE_RELOC Relocations ); - -VOID PhFreeMappedImageRelocations( +PHLIBAPI +VOID +NTAPI +PhFreeMappedImageRelocations( _In_ PPH_MAPPED_IMAGE_RELOC Relocations );
hv:vtd: fix MISRA-C violations on scope of variable could be reduced This patch fix the MISRA-C violations in arch/x86/vtd.c on scope of variable could be reduced. Acked-by: Anthony Xu
@@ -129,16 +129,15 @@ struct context_table { struct page buses[CONFIG_IOMMU_BUS_NUM]; }; -static struct page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(PAGE_SIZE); -static struct context_table ctx_tables[CONFIG_MAX_IOMMU_NUM] __aligned(PAGE_SIZE); - static inline uint8_t* get_root_table(uint32_t dmar_index) { + static struct page root_tables[CONFIG_MAX_IOMMU_NUM] __aligned(PAGE_SIZE); return root_tables[dmar_index].contents; } static inline uint8_t* get_ctx_table(uint32_t dmar_index, uint8_t bus_no) { + static struct context_table ctx_tables[CONFIG_MAX_IOMMU_NUM] __aligned(PAGE_SIZE); return ctx_tables[dmar_index].buses[bus_no].contents; } @@ -161,7 +160,6 @@ static struct iommu_domain *vm0_domain; /* Domain id 0 is reserved in some cases per VT-d */ #define MAX_DOMAIN_NUM (CONFIG_MAX_VM_NUM + 1) -static struct iommu_domain iommu_domains[MAX_DOMAIN_NUM]; static inline uint16_t vmid_to_domainid(uint16_t vm_id) { @@ -1009,6 +1007,7 @@ static void do_action_for_iommus(void (*action)(struct dmar_drhd_rt *)) struct iommu_domain *create_iommu_domain(uint16_t vm_id, uint64_t translation_table, uint32_t addr_width) { + static struct iommu_domain iommu_domains[MAX_DOMAIN_NUM]; struct iommu_domain *domain; /* TODO: check if a domain with the vm_id exists */
Copying QoS Bits when fragmented, so that marking can happen properly also cleaning up some unused code
@@ -61,6 +61,13 @@ frag_set_sw_if_index (vlib_buffer_t * to, vlib_buffer_t * from) vnet_buffer (from)->ip.adj_index[VLIB_RX]; vnet_buffer (to)->ip.adj_index[VLIB_TX] = vnet_buffer (from)->ip.adj_index[VLIB_TX]; + + /* Copy QoS Bits */ + if (PREDICT_TRUE (from->flags & VNET_BUFFER_F_QOS_DATA_VALID)) + { + vnet_buffer2 (to)->qos = vnet_buffer2 (from)->qos; + to->flags |= VNET_BUFFER_F_QOS_DATA_VALID; + } } static vlib_buffer_t *
Hide sprites behind Window
@@ -122,6 +122,46 @@ _UpdateActors_b:: push af inc sp + ; If WX_REG == 7 - Move sprite + push hl + ld hl, #0xFF4B ; WX_REG + ld a, (hl) + pop hl + cp a, #0x7 + jp z, move_sprite + + ; If WX_REG > screen_x - Move sprite + push hl + ldhl sp, #2 ; screen_x in stack + ld e, a + ld a, (hl) + pop hl + cp a, e + jp c, move_sprite + + ; If WY_REG < screen_y - 16px - Move sprite + push hl + ld hl, #0xFF4A ; WY_REG + ld e, (hl) + ldhl sp, #3; screen_y in stack + ld a, (hl) + sub a, #16; screen_y - 16px + pop hl + cp a, e + jp c, move_sprite + + hide_sprite: + ; Reset stack + add sp, #2 + + ; Get sprite index into a + ld a, #.SPRITE_INDEX_OFFSET + _add_a h, l + ld a, (hl) + jp hide_sprite_pair + + move_sprite: + ; Get sprite index into a ld a, #.SPRITE_INDEX_OFFSET _add_a h, l @@ -129,6 +169,7 @@ _UpdateActors_b:: push af inc sp + move_sprite_pair: ; Move sprite (left) using gbdk fn @@ -450,6 +491,26 @@ _UpdateActors_b:: ld a, #.RERENDER_OFFSET _add_a h, l ld (hl), #1 + jp next_actor + + hide_sprite_pair: + + ld b, #0 + ld c, #0 + push bc + push af + inc sp + + ; Move sprite (left) using gbdk fn + call _move_sprite + + ; Move sprite (right) + ; Reuse previous sprite value incrementing by 1 + pop bc + inc c + push bc + call _move_sprite + add sp, #3 next_actor: ; Clear current actor from stack
updates "target.c" 1, remove "extern" and "USBD_CDC_ACM_Reset"
@@ -37,61 +37,16 @@ target_cfg_t target_device = { }; // RTL8195AM's main cpu can only talk 38400 with DAP UART -//#include "string.h" -#include "RTL.h" -#include "rl_usb.h" -#include "usb_for_lib.h" - -extern int32_t data_send_access; -extern int32_t data_send_active; -extern int32_t data_send_zlp; -extern int32_t data_to_send_wr; -extern int32_t data_to_send_rd; -extern uint8_t *ptr_data_to_send; -extern uint8_t *ptr_data_sent; -extern int32_t data_read_access; -extern int32_t data_receive_int_access; -extern int32_t data_received_pending_pckts; -extern int32_t data_no_space_for_receive; -extern uint8_t *ptr_data_received; -extern uint8_t *ptr_data_read; -extern uint16_t control_line_state; -extern CDC_LINE_CODING line_coding; - -extern int32_t USBD_CDC_ACM_PortReset(void); -extern int32_t USBD_CDC_ACM_PortSetLineCoding(CDC_LINE_CODING *line_coding); - -int32_t USBD_CDC_ACM_Reset(void) -{ - data_send_access = 0; - data_send_active = 0; - data_send_zlp = 0; - data_to_send_wr = 0; - data_to_send_rd = 0; - ptr_data_to_send = USBD_CDC_ACM_SendBuf; - ptr_data_sent = USBD_CDC_ACM_SendBuf; - data_read_access = 0; - data_receive_int_access = 0; - data_received_pending_pckts = 0; - data_no_space_for_receive = 0; - ptr_data_received = USBD_CDC_ACM_ReceiveBuf; - ptr_data_read = USBD_CDC_ACM_ReceiveBuf; - control_line_state = 0; - USBD_CDC_ACM_PortReset(); - line_coding.dwDTERate = 38400; - line_coding.bCharFormat = 0; - line_coding.bParityType = 0; - line_coding.bDataBits = 8; - - return (USBD_CDC_ACM_PortSetLineCoding(&line_coding)); -} +#include "uart.h" +static UART_Configuration UART_Config; int32_t USBD_CDC_ACM_SetLineCoding(void) { - line_coding.dwDTERate = 38400; - line_coding.bCharFormat = USBD_EP0Buf[4]; - line_coding.bParityType = USBD_EP0Buf[5]; - line_coding.bDataBits = USBD_EP0Buf[6]; + UART_Config.Baudrate = 38400; + UART_Config.DataBits = UART_DATA_BITS_8; + UART_Config.Parity = UART_PARITY_NONE; + UART_Config.StopBits = UART_STOP_BITS_1; + UART_Config.FlowControl = UART_FLOW_CONTROL_NONE; - return (USBD_CDC_ACM_PortSetLineCoding(&line_coding)); + return uart_set_configuration(&UART_Config); }
max7456: poke spi txn on is_ready check
@@ -296,7 +296,11 @@ bool max7456_flush() { } bool max7456_is_ready() { - return spi_txn_ready(&bus); + if (!spi_txn_ready(&bus)) { + spi_txn_continue(&bus); + return false; + } + return true; } void osd_read_character(uint8_t addr, uint8_t *out, const uint8_t size) {
fix hfp demo audio not sine Closes
@@ -106,6 +106,7 @@ const char *c_codec_mode_str[] = { #if CONFIG_BT_HFP_AUDIO_DATA_PATH_HCI #define TABLE_SIZE 100 +#define TABLE_SIZE_BYTE 200 // Produce a sine audio static const int16_t sine_int16[TABLE_SIZE] = { 0, 2057, 4107, 6140, 8149, 10126, 12062, 13952, 15786, 17557, @@ -179,14 +180,13 @@ static void bt_app_hf_incoming_cb(const uint8_t *buf, uint32_t sz) static uint32_t bt_app_hf_create_audio_data(uint8_t *p_buf, uint32_t sz) { - static int sine_phase = 0; - - for (int i = 0; i * 2 + 1 < sz; i++) { - p_buf[i * 2] = sine_int16[sine_phase]; - p_buf[i * 2 + 1] = sine_int16[sine_phase]; - ++sine_phase; - if (sine_phase >= TABLE_SIZE) { - sine_phase -= TABLE_SIZE; + static int index = 0; + uint8_t *data = (uint8_t *)sine_int16; + + for (uint32_t i = 0; i < sz; i++) { + p_buf[i] = data[index++]; + if (index >= TABLE_SIZE_BYTE) { + index -= TABLE_SIZE_BYTE; } } return sz;
test/evp_test.c: fix keygen_test_run() There was a misunderstanding what it should return. It should return 0 on internal error, but 1 even if the thing it tests fails (the error is determined by |t->err|).
@@ -2504,8 +2504,8 @@ static int keygen_test_run(EVP_TEST *t) { KEYGEN_TEST_DATA *keygen = t->data; EVP_PKEY *pkey = NULL; + int rv = 1; - t->err = NULL; if (EVP_PKEY_keygen(keygen->genctx, &pkey) <= 0) { t->err = "KEYGEN_GENERATE_ERROR"; goto err; @@ -2514,6 +2514,7 @@ static int keygen_test_run(EVP_TEST *t) if (keygen->keyname != NULL) { KEY_LIST *key; + rv = 0; if (find_key(NULL, keygen->keyname, private_keys)) { TEST_info("Duplicate key %s", keygen->keyname); goto err; @@ -2526,15 +2527,15 @@ static int keygen_test_run(EVP_TEST *t) key->key = pkey; key->next = private_keys; private_keys = key; + rv = 1; } else { EVP_PKEY_free(pkey); } - return 1; + t->err = NULL; err: - EVP_PKEY_free(pkey); - return 0; + return rv; } static const EVP_TEST_METHOD keygen_test_method = {
Fix lighting shader
@@ -336,7 +336,7 @@ namespace carto { uniform vec4 u_lightColor; uniform vec3 u_lightDir; uniform vec3 u_viewDir; - vec4 applyLighting(lowp vec4 color, mediump vec3 normal, highp_opt float height, lowp bool sideVertex) { + vec4 applyLighting(lowp vec4 color, mediump vec3 normal, highp_opt float height, bool sideVertex) { if (sideVertex) { lowp vec3 dimmedColor = color.rgb * (1.0 - 0.5 / (1.0 + height * height)); mediump vec3 lighting = max(0.0, dot(normal, u_lightDir)) * u_lightColor.rgb + u_ambientColor.rgb;
xive: Fix ability to clear some EQ flags We could never clear "unconditional notify" and "escalate"
@@ -4013,10 +4013,14 @@ static int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio, /* Always notify flag */ if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY) eq.w0 |= EQ_W0_UCOND_NOTIFY; + else + eq.w0 &= ~EQ_W0_UCOND_NOTIFY; /* Escalation flag */ if (qflags & OPAL_XIVE_EQ_ESCALATE) eq.w0 |= EQ_W0_ESCALATE_CTL; + else + eq.w0 &= ~EQ_W0_ESCALATE_CTL; /* Unconditionally clear the current queue pointer, set * generation to 1 and disable escalation interrupts.
Configure: clean away perl syntax faults The faults aren't fatal (i.e. perl just shrugs), but are curious.
@@ -2260,7 +2260,7 @@ EOF push @{$check_exist{$s}}, $ddest; my $o = $_; $o =~ s/\.rc$/.res/; # Resource configuration - my $o = cleanfile($buildd, $o, $blddir); + $o = cleanfile($buildd, $o, $blddir); $unified_info{sources}->{$ddest}->{$o} = -1; $unified_info{sources}->{$o}->{$s} = -1; } else { @@ -2296,7 +2296,7 @@ EOF push @{$check_exist{$s}}, $ddest; my $o = $_; $o =~ s/\.rc$/.res/; # Resource configuration - my $o = cleanfile($buildd, $o, $blddir); + $o = cleanfile($buildd, $o, $blddir); $unified_info{shared_sources}->{$ddest}->{$o} = -1; $unified_info{sources}->{$o}->{$s} = -1; } elsif ($s =~ /\.ld$/) {
CSMA: Do not depend depend on sequence numbers to identify packets
@@ -130,7 +130,10 @@ MEMB(packet_memb, struct packet_queue, MAX_QUEUED_PACKETS); MEMB(metadata_memb, struct qbuf_metadata, MAX_QUEUED_PACKETS); LIST(neighbor_list); -static void packet_sent(void *ptr, int status, int num_transmissions); +static void packet_sent(struct neighbor_queue *n, + struct packet_queue *q, + int status, + int num_transmissions); static void transmit_from_queue(void *ptr); /*---------------------------------------------------------------------------*/ static struct neighbor_queue * @@ -161,7 +164,7 @@ backoff_period(void) } /*---------------------------------------------------------------------------*/ static int -send_one_packet(void *ptr) +send_one_packet(struct neighbor_queue *n, struct packet_queue *q) { int ret; int last_sent_ok = 0; @@ -237,7 +240,7 @@ send_one_packet(void *ptr) last_sent_ok = 1; } - packet_sent(ptr, ret, 1); + packet_sent(n, q, ret, 1); return last_sent_ok; } /*---------------------------------------------------------------------------*/ @@ -255,7 +258,7 @@ transmit_from_queue(void *ptr) n->transmissions, list_length(n->packet_queue)); /* Send first packet in the neighbor queue */ queuebuf_to_packetbuf(q->buf); - send_one_packet(n); + send_one_packet(n, q); } } } @@ -388,30 +391,12 @@ tx_ok(struct packet_queue *q, struct neighbor_queue *n, int num_transmissions) } /*---------------------------------------------------------------------------*/ static void -packet_sent(void *ptr, int status, int num_transmissions) +packet_sent(struct neighbor_queue *n, + struct packet_queue *q, + int status, + int num_transmissions) { - struct neighbor_queue *n; - struct packet_queue *q; - - n = ptr; - if(n == NULL) { - return; - } - - /* Find out what packet this callback refers to */ - for(q = list_head(n->packet_queue); - q != NULL; q = list_item_next(q)) { - if(queuebuf_attr(q->buf, PACKETBUF_ATTR_MAC_SEQNO) == - packetbuf_attr(PACKETBUF_ATTR_MAC_SEQNO)) { - break; - } - } - - if(q == NULL) { - LOG_WARN("packet sent: seqno %u not found\n", - packetbuf_attr(PACKETBUF_ATTR_MAC_SEQNO)); - return; - } else if(q->ptr == NULL) { + if(q->ptr == NULL) { LOG_WARN("packet sent: no metadata\n"); return; }
space in path fix
@@ -115,7 +115,7 @@ def buildXamarinNuget(args, target): if not nuget(args, buildDir, 'pack', - '%s/CartoMobileSDK.%s.nuspec' % (buildDir, target), + '"%s/CartoMobileSDK.%s.nuspec"' % (buildDir, target), '-BasePath', '/' ): return False
update zipatcher for r5455689 Note: mandatory check (NEED_CHECK) was skipped
}, "zipatcher": { "formula": { - "sandbox_id": 366228984, + "sandbox_id": 486868843, "match": "zipatcher" }, "executable": {
Update mdw to cdw in gpcheckperf hostfile test We recently changed the mdw hostname to cdw for inclusive terminology. This spot was missed.
@@ -3854,7 +3854,7 @@ def impl(context, contentid): @given('create a gpcheckperf input host file') def impl(context): - cmd = Command(name='create input host file', cmdStr='echo sdw1 > /tmp/hostfile1;echo mdw >> /tmp/hostfile1;') + cmd = Command(name='create input host file', cmdStr='echo sdw1 > /tmp/hostfile1;echo cdw >> /tmp/hostfile1;') cmd.run(validateAfter=True) @given('backup /etc/hosts file and update hostname entry for localhost')
rtdl: don't insert objects into a scope more than once
@@ -941,6 +941,12 @@ Scope::Scope() : _objects(getAllocator()) { } void Scope::appendObject(SharedObject *object) { + // Don't insert duplicates. + for (auto obj : _objects) { + if (obj == object) + return; + } + _objects.push(object); }
Solve minor bugs in metacall license tool.
@@ -28,17 +28,17 @@ find "$EXEC_PATH" -type f \ # License LICENSE=$(cat <<-END - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at + \tLicensed under the Apache License, Version 2.0 (the "License"); + \tyou may not use this file except in compliance with the License. + \tYou may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + \t\thttp://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + \tUnless required by applicable law or agreed to in writing, software + \tdistributed under the License is distributed on an "AS IS" BASIS, + \tWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + \tSee the License for the specific language governing permissions and + \tlimitations under the License. END ) @@ -49,24 +49,25 @@ find "$EXEC_PATH" -type f \ file=$(grep -lrnw {} -e "$COPYRIGHT") linenum=$(grep -n {} -e "$COPYRIGHT" | cut -d : -f 1) - # Swap description and copyright from the header - printf %s\\n $(($linenum + 2))m$(($linenum - 1)) w q | ed -s $file - printf %s\\n $(($linenum + 1))m$(($linenum + 2)) w q | ed -s $file - # Select between comment type expr match "$comment" "\#*" >/dev/null # expr match "$comment" " \**" >/dev/null if [ $? -eq 0 ] then + # Swap description and copyright from the header + printf %s\\n $(($linenum + 2))m$(($linenum - 1)) w q | ed -s $file + printf %s\\n $(($linenum + 1))m$(($linenum + 2)) w q | ed -s $file + lineliteral="i" # Apply prefix depending on comment type - license=$(echo "$LICENSE" | sed "s/^/#\t/g") - # license=$(echo "$LICENSE" | sed "s/^/ \*\t/g") + license=$(echo "$LICENSE" | sed "s/^/#/g") + # license=$(echo "$LICENSE" | sed "s/^/ \*/g") expression="$(($linenum + 3))$lineliteral|$license" + # Write license # TODO: Review $linenum expansion error, remove the pipe to null when solved ex -s -c "$expression" -c x "$file" &> /dev/null fi
Docs: Update CN trans for build-system.rst
@@ -654,7 +654,7 @@ Take care when adding configuration values in this file, as they will be include ``project_include.cmake`` files are used inside ESP-IDF, for defining project-wide build features such as ``esptool.py`` command line arguments and the ``bootloader`` "special app". -Wrappers to redefine or extend existing functions +Wrappers to Redefine or Extend Existing Functions ------------------------------------------------- Thanks to the linker's wrap feature, it is possible to redefine or extend the behavior of an existing ESP-IDF function. To do so, you will need to provide the following CMake declaration in your project's ``CMakeLists.txt`` file: @@ -663,11 +663,11 @@ Thanks to the linker's wrap feature, it is possible to redefine or extend the be target_link_libraries(${COMPONENT_LIB} INTERFACE "-Wl,--wrap=function_to_redefine") -Where ``function_to_redefine`` is the name of the function to redefine or extend. This option will let the linker replace all the calls to ``function_to_redefine`` functions in the binary libraries be changed to calls to ``__wrap_function_to_redefine`` function. Thus, you must define this new symbol in your application. +Where ``function_to_redefine`` is the name of the function to redefine or extend. This option will let the linker replace all the calls to ``function_to_redefine`` functions in the binary libraries with calls to ``__wrap_function_to_redefine`` function. Thus, you must define this new symbol in your application. The linker will provide a new symbol named ``__real_function_to_redefine`` which points to the former implementation of the function to redefine. It can be called from the new implementation, making it an extension of the former one. -This mechanism is shown in the example :example:`build_system/wrappers`. Check its ``README.md`` for more details. +This mechanism is shown in the example :example:`build_system/wrappers`. Check :idf_file:`examples/build_system/wrappers/README.md` for more details. .. _config_only_component:
GCC did not support -mtune for ARM64 before 5.1
ifneq ($(C_COMPILER), PGI) + +ifneq ($(GCCVERSIONGT4), 1) +CCOMMON_OPT += -march=armv8-a +ifneq ($(F_COMPILER), NAG) +FCOMMON_OPT += -march=armv8-a +endif + + +else + + ifeq ($(CORE), ARMV8) CCOMMON_OPT += -march=armv8-a ifneq ($(F_COMPILER), NAG) @@ -138,4 +149,7 @@ FCOMMON_OPT += -march=armv8-a -mtune=emag endif endif endif + +endif + endif \ No newline at end of file
fix for misaligned map if too much shake Dosen't fix repeated colum while running and shake, but seems to fix current colum falling out of alignment after too many shakes (map scroll permanently wrong)
@@ -25,7 +25,7 @@ void RefreshScroll_b() { y = scroll_y_max; } - current_column = scroll_x >> 3; + current_column = (scroll_x - scroll_offset_x) >> 3; new_column = x >> 3; current_row = scroll_y >> 3; new_row = y >> 3;
ToolStatus: Improve toolbar graph performance (experimental)
@@ -283,7 +283,7 @@ VOID ToolbarUpdateGraphs( graph->GraphState.TooltipIndex = ULONG_MAX; Graph_MoveGrid(graph->GraphHandle, 1); Graph_Draw(graph->GraphHandle); - Graph_UpdateTooltip(graph->GraphHandle); + //Graph_UpdateTooltip(graph->GraphHandle); InvalidateRect(graph->GraphHandle, NULL, FALSE); } }
Use h2o_httpclient_ctx_t::protocol_selector.ratio to signal h2 prior knowledge
@@ -123,9 +123,13 @@ static void on_pool_connect(h2o_socket_t *sock, const char *errstr, void *data, h2o_iovec_t alpn_proto; if (sock->ssl == NULL || (alpn_proto = h2o_socket_ssl_get_selected_protocol(sock)).len == 0) { + /* 100% means prior knowledge connect, force h2 */ + if (client->ctx->protocol_selector.ratio.http2 == 100) + goto ForceH2; h2o_httpclient__h1_on_connect(client, sock, origin); } else { if (h2o_memis(alpn_proto.base, alpn_proto.len, H2O_STRLIT("h2"))) { + ForceH2: /* detach this socket from the socketpool to count the number of h1 connections correctly */ h2o_socketpool_detach(client->connpool->socketpool, sock); h2o_httpclient__h2_on_connect(client, sock, origin);
Update de_web_plugin_private.h Add Samsung SmartThings manufaturer code
#define VENDOR_120B 0x120B // Used by Heiman #define VENDOR_XAL 0x122A #define VENDOR_OSRAM_STACK 0xBBAA +#define VENDOR_SAMJIN 0x1241 #define ANNOUNCE_INTERVAL 10 // minutes default announce interval
Rework missing-7z logic. * Rework missing-7z logic. The empty missing-7z-archiver target (when archiver found) was causing build problems on Windows. Modified to only create missing-7z-archiver target when the archiver not found. Updated the DATA_TARGET_DEPENDS to include missing-7z-archiver when ARCHIVER_CMD not defined. * Fix comment
@@ -119,6 +119,15 @@ ENDIF(WIN32) #----------------------------------------------------------------------------- IF("${ARCHIVER_EXE}" STREQUAL "ARCHIVER_EXE-NOTFOUND") MESSAGE(WARNING "Archiver \"${VISIT_DATA_ARCHIVER_NAME}\" not found, data files cannot be extracted. Try setting VISIT_SEVEN_ZIP_DIR to location of 7-zip package, and VISIT_DATA_ARCHIVER_NAME to correct 7-zip executable name (eg, 7za, p7zip, 7z).") + #-------------------------------------------------------------------------- + # CMake target to handle make-time requests to build targets that require + # 7z archiver yet none was provided. The test "" forces make-time failure. + #-------------------------------------------------------------------------- + ADD_CUSTOM_TARGET(missing-7z-archiver ALL + COMMAND ${CMAKE_COMMAND} -E echo "" + COMMAND ${CMAKE_COMMAND} -E echo "No 7z archiver" + COMMAND ${CMAKE_COMMAND} -E echo "" + COMMAND test "") ELSE("${ARCHIVER_EXE}" STREQUAL "ARCHIVER_EXE-NOTFOUND") SET(ARCHIVER_CMD ${ARCHIVER_EXE}) SET(ARCHIVER_XARGS x -y) # Expand archive args @@ -165,10 +174,14 @@ FOREACH(DATASET_TARGET_FILE ${ARCHIVED_TARGETS}) SET(DATA_TARGET_DEPENDS ${DATA_TARGET_DEPENDS} ${CMAKE_CURRENT_BINARY_DIR}/${DATASET_TARGET}) ENDFOREACH() +IF(NOT DEFINED ARCHIVER_CMD) + set(DATA_TARGET_DEPENDS ${DATA_TARGET_DEPENDS} missing-7z-archiver) +endif() + #----------------------------------------------------------------------------- # Add custom targets: "data" & "testdata" #----------------------------------------------------------------------------- -ADD_CUSTOM_TARGET(data DEPENDS missing-7z-archiver ${DATA_TARGET_DEPENDS}) +ADD_CUSTOM_TARGET(data DEPENDS ${DATA_TARGET_DEPENDS}) ADD_CUSTOM_TARGET(testdata) ADD_DEPENDENCIES(testdata data) @@ -180,21 +193,6 @@ ADD_DEPENDENCIES(testdata data) SET(ARCHIVE_NAME {ANAME}) SET(ARCHIVE_FILES {AFILES}) -#----------------------------------------------------------------------------- -# CMake target to handle make-time requests to build targets that require -# 7z archiver yet none was provided. The test "" forces make-time failure -# whereas test " " (note the space) silently succeeds. -#----------------------------------------------------------------------------- -IF(NOT DEFINED ARCHIVER_CMD) - ADD_CUSTOM_TARGET(missing-7z-archiver ALL - COMMAND ${CMAKE_COMMAND} -E echo "" - COMMAND ${CMAKE_COMMAND} -E echo "No 7z archiver" - COMMAND ${CMAKE_COMMAND} -E echo "" - COMMAND test "") -ELSE(NOT DEFINED ARCHIVER_CMD) - ADD_CUSTOM_TARGET(missing-7z-archiver COMMAND test " ") -ENDIF(NOT DEFINED ARCHIVER_CMD) - #----------------------------------------------------------------------------- # Define convenience command/target to create an archive #-----------------------------------------------------------------------------
Update snapcraft builds to use libmupdf-dev and related packages instead.
@@ -32,22 +32,10 @@ apps: plugs: [avahi-observe, home, network] parts: - mupdf: - plugin: make - make-install-var: prefix - make-parameters: [HAVE_X11=no, HAVE_GLFW=no, HAVE_GLUT=no] - source: https://mupdf.com/downloads/mupdf-1.12.0-source.tar.gz - prime: - - -bin/mu* - - -include/mu* - - -lib/libmu* - - -share/doc/mupdf - - -share/man/man1/mu* main: - after: [mupdf] plugin: autotools configflags: [--with-name-prefix=] source: . -build-packages: [cura-engine, libavahi-client-dev, libgnutls28-dev, libjpeg-dev, libpng-dev, zlib1g-dev] +build-packages: [cura-engine, libavahi-client-dev, libgnutls28-dev, libharfbuzz-dev libjbig2dec0-dev, libjpeg-dev, libmupdf-dev, libopenjp2-7-dev, libpng-dev, zlib1g-dev]
xpath BUGFIX skip non-implemented modules
@@ -7095,6 +7095,10 @@ continue_search: if ((format == LY_VALUE_JSON) && !moveto_mod) { /* search all modules for a single match */ while ((mod = ly_ctx_get_module_iter(ctx, &idx))) { + if (!mod->implemented) { + continue; + } + scnode = lys_find_child(NULL, mod, name, name_len, 0, 0); if (scnode) { /* we have found a match */
GPGME: Use fences for code blocks
@@ -29,18 +29,24 @@ The plugin has been tested on Ubuntu 18.04 with `libgpgme` version 1.10. You can mount the plugin like this: +```sh kdb mount test.ecf /t gpgme "encrypt/key=DDEBEF9EE2DC931701338212DAF635B17F230E8D" +``` Now you can specify a key `user/t/a` and protect its content by using: +```sh kdb set user/t/a kdb setmeta user/t/a crypt/encrypt 1 kdb set user/t/a "secret" +``` The value of `user/t/a` (for this example: "secret") will be stored encrypted. You can still access the original value by using `kdb get`: +```sh kdb get user/t/a +``` ## Configuration @@ -53,8 +59,10 @@ The GPG recipient keys can be specified in two ways: The following example illustrates how multiple GPG recipient keys can be specified: +``` encrypt/key/#0 encrypt/key/#1 +``` ### Textmode
Updated win32 docs
All windows based examples are written in Visual Studio 2017 as "Win32 project" and "Console Application". +### Visual Studio configuration + +It may happen that Visual Studio sets different configuration on first project load and this may lead to wrong build and possible errors. Active configuration must be `Debug` and `Win32` or `x86`. Default active build can be set in project settings. + ## NodeMCU development board For development purposes, NodeMCU v3 board was used with virtual COM port support @@ -25,7 +29,3 @@ Communication with NodeMCU hardware is using virtual files for COM ports. Implementation of low-level part (together with memory allocation for library) is available in [esp_ll_win32.c](/src/system/esp_ll_win32.c) file. > In order to start using this port, user must set the appropriate COM port name when opening a virtual file. Please check implementation file for details. - -### Visual Studio configuration - -It may happen that Visual Studio sets different configuration on first project load and this may lead to wrong build and possible errors. Active configuration must be `Debug` and `Win32` or `x86`. Default active build can be set in project settings.
apps/x509: Fix -CAfile option being neglected with -new or -in
@@ -730,7 +730,7 @@ int x509_main(int argc, char **argv) } if ((x = X509_new_ex(app_get0_libctx(), app_get0_propq())) == NULL) goto end; - if (sno == NULL) { + if (CAfile == NULL && sno == NULL) { sno = ASN1_INTEGER_new(); if (sno == NULL || !rand_serial(NULL, sno)) goto end;
Makefile now gets the lists of supported chains automatically
@@ -46,11 +46,13 @@ ifeq ($(CHAIN),) CHAIN=ethereum endif +SUPPORTED_CHAINS=$(shell find makefile_conf/chain/ -type f -name '*.mk'| sed 's/.*\/\(.*\).mk/\1/g' | sort) + # Check if chain is available ifeq ($(shell test -s ./makefile_conf/chain/$(CHAIN).mk && echo -n yes), yes) include ./makefile_conf/chain/$(CHAIN).mk else -$(error Unsupported CHAIN - use ethereum, ropsten, goerli, moonriver, ethereum_classic, expanse, poa, artis_sigma1, artis_tau1, rsk, rsk_testnet, ubiq, wanchain, kusd, musicoin, pirl, akroma, atheios, callisto, ethersocial, ellaism, ether1, ethergem, gochain, mix, reosc, hpb, tomochain, tobalaba, dexon, volta, ewc, webchain, thundercore, bsc, songbird, polygon, shyft) +$(error Unsupported CHAIN - use $(SUPPORTED_CHAINS)) endif ######### @@ -233,4 +235,4 @@ include $(BOLOS_SDK)/Makefile.rules dep/%.d: %.c Makefile listvariants: - @echo VARIANTS CHAIN ethereum ropsten goerli moonriver ethereum_classic expanse poa rsk rsk_testnet ubiq wanchain pirl akroma atheios callisto ethersocial ether1 gochain musicoin ethergem mix ellaism reosc hpb tomochain dexon volta ewc thundercore bsc songbird polygon shyft + @echo VARIANTS CHAIN $(SUPPORTED_CHAINS)
Bindings/Python: drop support for cmake < 3.8
@@ -65,15 +65,11 @@ else () add_cppheaders (HDR_FILES) set_source_files_properties (kdb.i PROPERTIES CPLUSPLUS ON) set_source_files_properties (kdb.i PROPERTIES SWIG_FLAGS "-py3;-extranative") - if (CMAKE_VERSION VERSION_LESS 3.8) - swig_add_module (swig-python python kdb.i) - else (CMAKE_VERSION VERSION_LESS 3.8) swig_add_library ( swig-python LANGUAGE python SOURCES kdb.i TYPE MODULE) - endif (CMAKE_VERSION VERSION_LESS 3.8) swig_link_libraries (swig-python elektra-core elektra-kdb ${PYTHON_LIBRARIES}) set_target_properties (_swig-python PROPERTIES OUTPUT_NAME _kdb) set_target_properties (_swig-python PROPERTIES SKIP_BUILD_RPATH TRUE) @@ -98,15 +94,11 @@ else () add_toolheaders (HDR_FILES) set_source_files_properties (tools.i PROPERTIES CPLUSPLUS ON) set_source_files_properties (tools.i PROPERTIES SWIG_FLAGS "-py3;-extranative") - if (CMAKE_VERSION VERSION_LESS 3.8) - swig_add_module (swig-python-tools python tools.i) - else (CMAKE_VERSION VERSION_LESS 3.8) swig_add_library ( swig-python-tools LANGUAGE python SOURCES tools.i TYPE MODULE) - endif (CMAKE_VERSION VERSION_LESS 3.8) swig_link_libraries (swig-python-tools elektratools ${PYTHON_LIBRARIES}) set_target_properties (_swig-python-tools PROPERTIES OUTPUT_NAME _tools) set_target_properties (_swig-python-tools PROPERTIES SKIP_BUILD_RPATH TRUE) @@ -120,18 +112,13 @@ else () endif (${SWIG_VERSION} VERSION_LESS "4.0.0") # merge module - add_cppheaders (HDR_FILES) set_source_files_properties (merge.i PROPERTIES CPLUSPLUS ON) set_source_files_properties (merge.i PROPERTIES SWIG_FLAGS "-py3;-extranative") - if (CMAKE_VERSION VERSION_LESS 3.8) - swig_add_module (swig-python-merge python merge.i) - else (CMAKE_VERSION VERSION_LESS 3.8) swig_add_library ( swig-python-merge LANGUAGE python SOURCES merge.i TYPE MODULE) - endif (CMAKE_VERSION VERSION_LESS 3.8) swig_link_libraries (swig-python-merge elektra-merge ${PYTHON_LIBRARIES}) set_target_properties (_swig-python-merge PROPERTIES OUTPUT_NAME _merge) set_target_properties (_swig-python-merge PROPERTIES SKIP_BUILD_RPATH TRUE)
doc: add v2.2 to doc menu choices
@@ -189,6 +189,7 @@ else: html_context = { 'current_version': current_version, 'versions': ( ("latest", "/latest/"), + ("2.2", "/2.2/"), ("2.1", "/2.1/"), ("2.0", "/2.0/"), ("1.6.1", "/1.6.1/"),
Test recursive parameter entity is rejected
@@ -4319,6 +4319,31 @@ START_TEST(test_skipped_parameter_entity) } END_TEST +/* Test recursive parameter entity definition rejected in external DTD */ +START_TEST(test_recursive_external_parameter_entity) +{ + const char *text = + "<?xml version='1.0'?>\n" + "<!DOCTYPE root SYSTEM 'http://example.org/dtd.ent' [\n" + "<!ELEMENT root (#PCDATA|a)* >\n" + "]>\n" + "<root></root>"; + ExtFaults dtd_data = { + "<!ENTITY % pe2 '&#37;pe2;'>\n%pe2;", + "Recursive external parameter entity not faulted", + NULL, + XML_ERROR_RECURSIVE_ENTITY_REF + }; + + XML_SetExternalEntityRefHandler(parser, external_entity_faulter); + XML_SetUserData(parser, &dtd_data); + XML_SetParamEntityParsing(parser, XML_PARAM_ENTITY_PARSING_ALWAYS); + expect_failure(text, + XML_ERROR_EXTERNAL_ENTITY_HANDLING, + "Recursive external parameter not spotted"); +} +END_TEST + /* * Namespaces tests. @@ -7470,6 +7495,7 @@ make_suite(void) tcase_add_test(tc_basic, test_group_choice); tcase_add_test(tc_basic, test_standalone_parameter_entity); tcase_add_test(tc_basic, test_skipped_parameter_entity); + tcase_add_test(tc_basic, test_recursive_external_parameter_entity); suite_add_tcase(s, tc_namespace); tcase_add_checked_fixture(tc_namespace,
python: finish removing Python 3.6 CI-Tags: #no_auto_pr
@@ -24,7 +24,6 @@ CLASSIFIERS = [ 'Programming Language :: Python', 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator', 'Topic :: Software Development :: Libraries :: Python Modules', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9',
Reformat JavaScript: Do not reformat broken file
@@ -24,6 +24,7 @@ if [ $# -gt 0 ]; then javascript_files=$(printf "%s\n" "$@" | grep -Ex '.*\.jsx?') [ -z "$javascript_files" ] && exit else - javascript_files=$(git ls-files '*.js' '*.jsx') + # The file `TooltipCreator.js` contains the text `.pragma library`, which is not valid JavaScript code. + javascript_files=$(git ls-files '*.js' '*.jsx' | grep -v 'src/tools/qt-gui/qml/TooltipCreator.js') fi printf "%s\n" "$javascript_files" | sed -nE 's/(.*)/'"'"'\1'"'"'/p' | xargs "${PRETTIER}" --write --
turn on pipefail for sike tests
@@ -97,21 +97,27 @@ sike : sike_patched_bitcode @${MAKE} sike/sike.log sike/word.log : + @set -o pipefail; \ saw sike/verify_word.saw | tee $@ sike/field.log : + @set -o pipefail; \ saw sike/verify_field.saw | tee $@ sike/curve.log : + @set -o pipefail; \ saw sike/verify_curve.saw | tee $@ sike/isogeny.log : + @set -o pipefail; \ saw sike/verify_isogeny.saw | tee $@ sike/sidh.log : + @set -o pipefail; \ saw sike/verify_sidh.saw | tee $@ sike/sike.log : + @set -o pipefail; \ saw sike/verify_sike.saw | tee $@ ###########################################
Feat:Adapt debug log function for MA510
@@ -24,7 +24,7 @@ boatLogConfig.h defines options for compiling. #define __BOATLOG_H__ #include "boattypes.h" -#include "softap_api.h" +#include "qflog_utils.h" //! BOAT LOG LEVEL DEFINITION //! Log level is used to control the detail of log output. @@ -62,7 +62,7 @@ extern const BCHAR * const g_log_level_name_str[]; #else #define BoatLog(level, format,...)\ do{\ - if( level <= BOAT_LOG_LEVEL ) {xy_printf(format,##__VA_ARGS__ );}\ + if( level <= BOAT_LOG_LEVEL ) {QFLOG_MSG(format,##__VA_ARGS__ );}\ }while(0) #endif
start all 3 servers
@@ -813,10 +813,9 @@ u3_http_ef_thou(c3_l sev_l, void u3_http_io_init() { -#if 0 // Lens port { - u3_http *htp_u = c3_malloc(sizeof(*htp_u)); + h2htp *htp_u = c3_malloc(sizeof(*htp_u)); htp_u->sev_l = u3A->sev_l + 2; htp_u->coq_l = 1; @@ -824,16 +823,19 @@ u3_http_io_init() htp_u->sec = c3n; htp_u->lop = c3y; + htp_u->cep_u = 0; + htp_u->hos_u = 0; htp_u->hon_u = 0; htp_u->nex_u = 0; - htp_u->nex_u = u3_Host.htp_u; - u3_Host.htp_u = htp_u; + // XX u3_Host.htp_u + htp_u->nex_u = sev_u; + sev_u = htp_u; } - // Logically secure port. + // Secure port. { - u3_http *htp_u = c3_malloc(sizeof(*htp_u)); + h2htp *htp_u = c3_malloc(sizeof(*htp_u)); htp_u->sev_l = u3A->sev_l + 1; htp_u->coq_l = 1; @@ -841,13 +843,16 @@ u3_http_io_init() htp_u->sec = c3y; htp_u->lop = c3n; + htp_u->cep_u = 0; + htp_u->hos_u = 0; htp_u->hon_u = 0; htp_u->nex_u = 0; - htp_u->nex_u = u3_Host.htp_u; - u3_Host.htp_u = htp_u; + // XX u3_Host.htp_u + htp_u->nex_u = sev_u; + sev_u = htp_u; } -#endif + // Insecure port. { h2htp* htp_u = c3_malloc(sizeof(*htp_u));
Make select_preferred_addr callback optional
@@ -252,7 +252,10 @@ static int conn_call_select_preferred_addr(ngtcp2_conn *conn, ngtcp2_addr *dest) { int rv; - assert(conn->callbacks.select_preferred_addr); + if (!conn->callbacks.select_preferred_addr) { + return 0; + } + assert(conn->remote.settings.preferred_address_present); rv = conn->callbacks.select_preferred_addr(
Also install that one
@@ -14,3 +14,4 @@ add_executable(clap-gui target_link_libraries(clap-gui Qt6::Qml Qt6::Widgets Qt6::Core) set_target_properties(clap-gui PROPERTIES CXX_STANDARD 17) +install(TARGETS clap-gui DESTINATION "${CMAKE_INSTALL_PREFIX}/bin") \ No newline at end of file
Update CHANGELOG.md for syscall dispatching/tcb elimination.
@@ -18,6 +18,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Enclave apps that are built with Make and rely on Open Enclave's pkgconfig must now explicitly include OE crypto wrapper library in linker dependency flags. - See the [Makefile in the helloworld sample](samples/helloworld/enclave/Makefile#L34) for an example. Here `OE_CRYPTO_LIB` is set to `mbedtls` in [parent MakeList file](samples/helloworld/Makefile#L9). +### Changed +- Syscalls are internally dispatched directly to their implementation functions instead of via a switch-case. + This allows the linker to eliminate unused syscalls, leading to slightly reduced TCB. + The command `objdump -t enclave-filename | grep oe_SYS_` can be used to figure out the list of syscalls invoked by + code within the enclave. While most syscall implementations make OCALLs, some may be implemented entirely within + the enclave or may be noops (e.g SYS_futex). + [v0.12.0][v0.12.0_log] --------------
Bugid:17493112:[http2] fix [WhiteScan] [609115] [UNINIT]
@@ -503,6 +503,7 @@ int IOT_HTTP2_Stream_Send(stream_handle_t *handle, stream_data_info_t *info) int header_count = sizeof(static_header) / sizeof(static_header[0]); + memset(&h2_data, 0, sizeof(h2_data)); h2_data.header = (http2_header *)static_header; h2_data.header_count = header_count; h2_data.data = info->stream;
core: rconf: define "PATH_MAX" macro for Windows Insert a simple macro to make `mk_rconf.c` compilable on MSVC. Also I removed <unistd.h> from the header list, since it should be included via <mk_core/unistd.h> for portability reasons.
#include <string.h> #include <sys/types.h> #include <sys/stat.h> -#include <unistd.h> #include <glob.h> #include <mk_core/mk_rconf.h> #include <mk_core/mk_string.h> #include <mk_core/mk_list.h> +#ifdef _MSC_VER +#define PATH_MAX MAX_PATH +#endif + /* Raise a configuration schema error */ static void mk_config_error(const char *path, int line, const char *msg) {
py/objarray.h: Add mp_obj_memoryview_init() helper function.
@@ -55,4 +55,14 @@ typedef struct _mp_obj_array_t { void *items; } mp_obj_array_t; +#if MICROPY_PY_BUILTINS_MEMORYVIEW +static inline void mp_obj_memoryview_init(mp_obj_array_t *self, size_t typecode, size_t offset, size_t len, void *items) { + self->base.type = &mp_type_memoryview; + self->typecode = typecode; + self->free = offset; + self->len = len; + self->items = items; +} +#endif + #endif // MICROPY_INCLUDED_PY_OBJARRAY_H