message
stringlengths
6
474
diff
stringlengths
8
5.22k
api/trace: remove debugging.
@@ -396,7 +396,6 @@ error: static void cb_traces(mk_request_t *request, void *data) { - printf("CANT TRACES TIME\n"); flb_sds_t out_buf; msgpack_sbuffer mp_sbuf; msgpack_packer mp_pck; @@ -458,10 +457,8 @@ static void cb_traces(mk_request_t *request, void *data) goto unpack_error; } - printf("PACK MAP: 2\n"); msgpack_pack_map(&mp_pck, 2); - printf("\tPACK MAP: INPUTS[%d]\n", inputs->size); msgpack_pack_str_with_body(&mp_pck, "inputs", strlen("inputs")); msgpack_pack_map(&mp_pck, inputs->size); @@ -545,10 +542,7 @@ unpack_error: /* Perform registration */ int api_v1_trace(struct flb_hs *hs) { - printf("REGISTER TRACING\n"); - if (mk_vhost_handler(hs->ctx, hs->vid, "/api/v1/trace/*", cb_trace, hs) == -1) { - printf("UNABLE TO REGISTER FOR TRACING\n"); - } - mk_vhost_handler(hs->ctx, hs->vid, "/api/v1/trace", cb_traces, hs); + mk_vhost_handler(hs->ctx, hs->vid, "/api/v1/traces/", cb_traces, hs); + mk_vhost_handler(hs->ctx, hs->vid, "/api/v1/trace/*", cb_trace, hs); return 0; }
bugID:19181246:[linkkitapp] enlarge reset task stack to 6144 for some extreme situation
@@ -280,11 +280,7 @@ extern int awss_report_reset(); static void do_awss_reset() { #ifdef WIFI_PROVISION_ENABLED -#if defined(SUPPORT_ITLS) - aos_task_new("reset", (void (*)(void *))awss_report_reset, NULL, 4096); // stack taken by iTLS is more than taken by TLS. -#else - aos_task_new("reset", (void (*)(void *))awss_report_reset, NULL, 4096); -#endif + aos_task_new("reset", (void (*)(void *))awss_report_reset, NULL, 6144); #endif aos_post_delayed_action(2000, linkkit_reset, NULL); }
webp-container-spec.txt: update 'key words' text RFC 2119 was updated by RFC 8174; use the text from there + change INFORMATIVE to informative to avoid confusion with the key words in the RFCs
@@ -46,9 +46,10 @@ for: * **Animation.** An image may have multiple frames with pauses between them, making it an animation. -The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", -"SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this -document are to be interpreted as described in [RFC 2119][]. +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", +"SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and "OPTIONAL" in this +document are to be interpreted as described in BCP 14 [RFC 2119][] [RFC 8174][] +when, and only when, they appear in all capitals, as shown here. Bit numbering in chunk diagrams starts at `0` for the most significant bit ('MSB 0') as described in [RFC 1166][]. @@ -531,7 +532,7 @@ Reserved (Rsv): 2 bits Pre-processing (P): 2 bits -: These INFORMATIVE bits are used to signal the pre-processing that has +: These _informative_ bits are used to signal the pre-processing that has been performed during compression. The decoder can use this information to e.g. dither the values or smooth the gradients prior to display. @@ -817,3 +818,4 @@ RIFF/WEBP [metadata]: https://web.archive.org/web/20180919181934/http://www.metadataworkinggroup.org/pdf/mwg_guidance.pdf [rfc 1166]: https://datatracker.ietf.org/doc/html/rfc1166 [rfc 2119]: https://datatracker.ietf.org/doc/html/rfc2119 +[rfc 8174]: https://datatracker.ietf.org/doc/html/rfc8174
NetKVM: SM: Move completion check to a separate function
@@ -51,11 +51,16 @@ public: { NETKVM_ASSERT(m_State != FlowState::Stopped); LONG value = m_Counter.Release(NumItems); - if (value == StoppedMask) + CheckCompletion(value); + } + + virtual void CheckCompletion(LONG Value) + { + if (Value == StoppedMask) { CompleteStopping(); } - else if (value) + else if (Value) { // common case, data transfer (StoppedMask not set) // pausing or completing not last packet during pausing (StoppedMask set) @@ -63,7 +68,7 @@ public: else { // illegal case - NETKVM_ASSERT(value != 0); + NETKVM_ASSERT(Value != 0); } }
docs: Fix macos clang-format docs Type: docs
@@ -15,7 +15,7 @@ This is a first attempt to support Cross compilation of VPP on MacOS for develop $ pip3 install ply pyyaml jsonschema $ brew install gnu-sed pkg-config ninja crosstool-ng -* You'll also need to install ``clang-format 10.0.0`` to be able to ``make checkstyle``. This can be done with :ref:`this doc<install_clang_format_10_0_0>` +* You'll also need to install ``clang-format 11.0.0`` to be able to ``make checkstyle``. This can be done with :ref:`this doc<install_clang_format_11_0_0>` * You should link the binaries to make them available in your path with their original names e.g. : .. code-block:: console @@ -51,20 +51,21 @@ This should build vpp on MacOS Good luck :) -.. _install_clang_format_10_0_0 : +.. _install_clang_format_11_0_0 : -Installing clang-format 10.0.0 +Installing clang-format 11.0.0 ------------------------------ In order to install clang-format on macos : .. code-block:: bash - $ wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang-10.0.0.src.tar.xz - $ tar -xvf clang+llvm-10.0.0-x86_64-apple-darwin.tar.xz - $ mv clang+llvm-10.0.0-x86_64-apple-darwin /usr/local/Cellar/ - $ sudo ln -s ../Cellar/clang+llvm-10.0.0-x86_64-apple-darwin/bin/clang-format /usr/local/bin/clang-format - $ sudo ln -s ../Cellar/clang+llvm-10.0.0-x86_64-apple-darwin/bin/clang-format /usr/local/bin/clang-format-10 - $ sudo ln -s ../Cellar/clang+llvm-10.0.0-x86_64-apple-darwin/share/clang/clang-format-diff.py /usr/local/bin/clang-format-diff-10 + brew install clang-format@11 + wget https://raw.githubusercontent.com/llvm/llvm-project/llvmorg-11.0.0/clang/tools/clang-format/clang-format-diff.py \ + -O /usr/local/Cellar/clang-format@11/11.1.0/bin/clang-format-diff.py + chmod +x /usr/local/Cellar/clang-format@11/11.1.0/bin/clang-format-diff.py + ln -s /usr/local/Cellar/clang-format@11/11.1.0/bin/clang-format-diff.py /usr/local/bin/clang-format-diff-11 + ln -s /usr/local/Cellar/clang-format@11/11.1.0/bin/clang-format-11 /usr/local/bin/clang-format + Source `Clang website <https://releases.llvm.org/download.html#git>`_
Add one more element Turns out there could be one more consumption at most. This is a fixup to
@@ -684,7 +684,7 @@ static void on_connection_ready(struct st_h2o_http1client_t *client) return; } - h2o_iovec_t reqbufs[4]; + h2o_iovec_t reqbufs[5]; /* 5 should be the maximum possible elements used */ size_t reqbufcnt = 0; if (props.proxy_protocol->base != NULL) reqbufs[reqbufcnt++] = *props.proxy_protocol; @@ -708,7 +708,7 @@ static void on_connection_ready(struct st_h2o_http1client_t *client) if (client->_is_chunked) { assert(body.base != NULL); size_t bytes; - assert(PTLS_ELEMENTSOF(reqbufs) - reqbufcnt >= 3); + assert(PTLS_ELEMENTSOF(reqbufs) - reqbufcnt >= 3); /* encode_chunk could write to 3 additional elements */ reqbufcnt += encode_chunk(client, reqbufs + reqbufcnt, body, &bytes); client->super.bytes_written.body = bytes; } else if (body.base != NULL) {
system/spi: Fix NxStyle issues
@@ -71,10 +71,12 @@ int spicmd_exch(FAR struct spitool_s *spitool, int argc, FAR char **argv) { 0 }; + uint8_t rxdata[MAX_XDATA] = { 0 }; + uint8_t *txdatap = txdata; struct spi_trans_s trans; struct spi_sequence_s seq; @@ -111,7 +113,6 @@ int spicmd_exch(FAR struct spitool_s *spitool, int argc, FAR char **argv) return ERROR; } - while (argndx < argc) { FAR uint8_t *a = (uint8_t *)argv[argndx]; @@ -119,7 +120,7 @@ int spicmd_exch(FAR struct spitool_s *spitool, int argc, FAR char **argv) { if ((*(a + 1) == 0) || !ISHEX(*a) || !ISHEX(*(a + 1))) { - /* Uneven number of characters or illegal char .... that's an error */ + /* Uneven number of characters or illegal character error */ spitool_printf(spitool, g_spiincompleteparam, argv[0]); return ERROR;
Correct description of BN_mask_bits CLA: trivial Correct right shift to left shift. Pseudo code `a&=~((~0)>>n)` means "get higher n-bits of a", but actually crypto lib gives lower n-bits.
@@ -33,7 +33,7 @@ error occurs if B<a> is shorter than B<n> bits. BN_is_bit_set() tests if bit B<n> in B<a> is set. BN_mask_bits() truncates B<a> to an B<n> bit number -(C<a&=~((~0)E<gt>E<gt>n)>). An error occurs if B<a> already is +(C<a&=~((~0)E<lt>E<lt>n)>). An error occurs if B<a> already is shorter than B<n> bits. BN_lshift() shifts B<a> left by B<n> bits and places the result in
Fix meson.build codestyle
@@ -156,18 +156,18 @@ tests = [ ['test_control_event_serialize', [ 'tests/test_control_msg_serialize.c', 'src/control_msg.c', - 'src/util/str_util.c' + 'src/util/str_util.c', ]], ['test_device_event_deserialize', [ 'tests/test_device_msg_deserialize.c', - 'src/device_msg.c' + 'src/device_msg.c', ]], ['test_queue', [ 'tests/test_queue.c', ]], ['test_strutil', [ 'tests/test_strutil.c', - 'src/util/str_util.c' + 'src/util/str_util.c', ]], ]
Cirrus: Add test workaround for FreeBSD build This update closes
@@ -11,12 +11,13 @@ task: script: - mkdir build && cd build - # - The tests for the process plugin and library stall on FreeBSD: https://issues.libelektra.org/2323 # - The test for the network plugin fails on FreeBSD: https://issues.libelektra.org/2322 - - cmake -GNinja -DPLUGINS='ALL;-process;-network' -DTARGET_PLUGIN_FOLDER="" -DCMAKE_SKIP_INSTALL_RPATH=ON .. + - cmake -GNinja -DPLUGINS='ALL;-network' -DTARGET_PLUGIN_FOLDER="" -DCMAKE_SKIP_INSTALL_RPATH=ON .. - ninja - output="$(ninja install 2>&1)" || printf '%s' "$output" tests_script: - - cd build && ctest --output-on-failure -E 'pluginprocess' + # Work around stalled process plugin and library problems on FreeBSD: https://issues.libelektra.org/2323 + - sudo mount -t fdescfs fdesc /dev/fd + - cd build && ninja run_all - kdb run_all
Ordering of globals
@@ -24,8 +24,8 @@ globals: 0x141D682D8: g_ResidentResourceManager 0x141D6A7E0: g_CullingManager 0x141D6A800: g_TaskManager - 0x141D6FA90: g_ResourceManager 0x141D6F6F0: g_EnvRenderController # Client::Graphics::Environment::EnvRenderController not a ptr + 0x141D6FA90: g_ResourceManager 0x141D81AA0: g_OcclusionCullingManager 0x141D81AD8: g_OffscreenRenderingManager 0x141D81AE0: g_RenderModelLinkedListStart @@ -38,15 +38,14 @@ globals: 0x141D8DF00: g_PlayerMoveController 0x141D8E500: g_AtkStage 0x141D8EB68: g_LayoutWorld + 0x141DAD640: g_InventoryManager 0x141DB1D30: g_BattleCharaStore # this is a struct/object containing a list of all battlecharas (0x100) and the memory ptrs below 0x141DB2050: g_BattleCharaMemory 0x141DB2058: g_CompanionMemory 0x141DB2080: g_ActorList 0x141DB2DC0: g_ActorListEnd 0x141DB6100: g_Client::Game::UI::Telepo - 0x141DD6F38: g_EventFramework - 0x141DDAA50: g_GroupManager # not a pointer - 0x141DDE7C0: g_GroupManager_2 #not a pointer + 0x141DBEBC8: g_ActiveDirector 0x141D6A770: g_CrossRealmGroupManager #pointer 0x141DB6ED8: g_TrustGroupManager #pointer 0x141DE2D80: g_ClientObjectManager @@ -55,8 +54,10 @@ globals: 0x141DC3000: g_LimitBreakController 0x141DC3010: g_TitleController 0x141DC3018: g_TitleList - 0x141DBEBC8: g_ActiveDirector - 0x141D8E490: g_LocalPlayer + 0x141DD6F38: g_EventFramework + 0x141DDAA50: g_GroupManager # not a pointer + 0x141DDE7C0: g_GroupManager_2 # not a pointer + 0x141DE2D80: g_ClientObjectManager functions: # ffxivstring is just their implementation of std::string presumably there are more ctors etc
Fix doc of region example in glossary
@@ -24,7 +24,7 @@ Glossary are 0-based, half-open intervals, i.e., the position 10,000 is part of the interval, but 20,000 is not. An exception are :term:`samtools` compatible region strings such as - 'chr1:10000:20000', which are closed, i.e., both positions 10,000 + 'chr1:10000-20000', which are closed, i.e., both positions 10,000 and 20,000 are part of the interval. column
brick: update getstate command description
@@ -28,7 +28,7 @@ func (c *getStateAccount) Usage() string { } func (c *getStateAccount) Describe() string { - return "create an account with a given amount of balance" + return "get the current state of an account" } func (c *getStateAccount) Validate(args string) error { @@ -44,7 +44,7 @@ func (c *getStateAccount) Validate(args string) error { func (c *getStateAccount) parse(args string) (string, string, error) { splitArgs := context.SplitSpaceAndAccent(args, false) if len(splitArgs) < 1 { - return "", "", fmt.Errorf("need an arguments. usage: %s", c.Usage()) + return "", "", fmt.Errorf("missing arguments. usage: %s", c.Usage()) } expectedResult := "" @@ -72,7 +72,7 @@ func (c *getStateAccount) Run(args string) (string, uint64, []*types.Event, erro if expectedResult == strRet { return "state compare successfully", 0, nil, nil } else { - return "", 0, nil, fmt.Errorf("state compre fail. Expected: %s, Actual: %s", expectedResult, strRet) + return "", 0, nil, fmt.Errorf("state compare failed. Expected: %s, Actual: %s", expectedResult, strRet) } } }
[Kernel] Fix the object find issue when enable MODULE
* 2007-01-28 Bernard rename RT_OBJECT_Class_Static to RT_Object_Class_Static * 2010-10-26 yi.qiu add module support in rt_object_allocate and rt_object_free * 2017-12-10 Bernard Add object_info enum. + * 2018-01-25 Bernard Fix the object find issue when enable MODULE. */ #include <rtthread.h> @@ -254,7 +255,7 @@ void rt_object_init(struct rt_object *object, #ifdef RT_USING_MODULE /* get module object information */ information = (rt_module_self() != RT_NULL) ? - &rt_module_self()->module_object[type] : &rt_object_container[type]; + &rt_module_self()->module_object[type] : rt_object_get_information(type); #else /* get object information */ information = rt_object_get_information(type); @@ -329,7 +330,7 @@ rt_object_t rt_object_allocate(enum rt_object_class_type type, const char *name) * module object should be managed by kernel object container */ information = (rt_module_self() != RT_NULL && (type != RT_Object_Class_Module)) ? - &rt_module_self()->module_object[type] : &rt_object_container[type]; + &rt_module_self()->module_object[type] : rt_object_get_information(type); #else /* get object information */ information = rt_object_get_information(type);
Test reading from lua tables via API functions
@@ -26,6 +26,7 @@ module Foreign.LuaTest (tests) where import Prelude hiding (concat) import Data.ByteString (ByteString) +import Data.Monoid ((<>)) import Foreign.Lua import Test.HsLua.Util (pushLuaExpr) import Test.Tasty (TestTree, testGroup) @@ -66,4 +67,24 @@ tests = testGroup "lua integration tests" -- delete references unref registryindex idx1 unref registryindex idx2 + + , testCase "table reading" . + runLua $ do + openbase + let tableStr = "{firstname = 'Jane', surname = 'Doe'}" + pushLuaExpr $ "setmetatable(" <> tableStr <> ", {'yup'})" + getfield (-1) "firstname" + firstname <- peek (-1) <* pop 1 + liftIO (assert (firstname == Success ("Jane" :: ByteString))) + + push ("surname" :: ByteString) + rawget (-2) + surname <- peek (-1) <* pop 1 + liftIO (assert (surname == Success ("Doe" :: ByteString))) + + hasMetaTable <- getmetatable (-1) + liftIO (assert hasMetaTable) + rawgeti (-1) 1 + mt1 <- peek (-1) <* pop 1 + liftIO (assert (mt1 == Success ("yup" :: ByteString))) ]
use long for tenmul_dims
@@ -1017,12 +1017,12 @@ void md_zfloat2double(unsigned int D, const long dims[D], complex double* dst, c */ void md_tenmul_dims(unsigned int D, long max_dims[D], const long out_dims[D], const long in1_dims[D], const long in2_dims[D]) { - md_max_dims(D, ~0u, max_dims, in1_dims, out_dims); + md_max_dims(D, ~0lu, max_dims, in1_dims, out_dims); long max2_dims[D]; - md_max_dims(D, ~0u, max2_dims, in2_dims, out_dims); + md_max_dims(D, ~0lu, max2_dims, in2_dims, out_dims); - assert(md_check_compat(D, 0u, max_dims, max2_dims)); + assert(md_check_compat(D, 0lu, max_dims, max2_dims)); }
[mod_cml] use cached time from log_epoch_secs
@@ -279,7 +279,7 @@ int cache_parse_lua(request_st * const r, plugin_data * const p, const buffer * if (ret == 0) { const buffer *vb = http_header_response_get(r, HTTP_HEADER_LAST_MODIFIED, CONST_STR_LEN("Last-Modified")); if (NULL == vb) { /* no Last-Modified specified */ - if (0 == mtime) mtime = time(NULL); /* default last-modified to now */ + if (0 == mtime) mtime = log_epoch_secs; /* default last-modified to now */ vb = http_response_set_last_modified(r, mtime); }
Make '!=' not hard coded as evaluating to '==' for enum unions. Oops.
@@ -386,7 +386,12 @@ comparecomplex(Flattenctx *s, Node *n, Op op) r = mkexpr(n->loc, Outag, rval(s, n->expr.args[1]), NULL); l->expr.type = mktype(n->loc, Tyuint32); r->expr.type = mktype(n->loc, Tyuint32); + if (op == Oeq) e = mkexpr(n->loc, Oueq, l, r, NULL); + else if (op == One) + e = mkexpr(n->loc, Oune, l, r, NULL); + else + fatal(n, "unsupported operator %s for enum union", opstr[op]); e->expr.type = mktype(n->loc, Tybool); return e; }
Travis: Fix caching directive The `directories` key requires a list of directories (not a scalar) as value. See also:
@@ -4,10 +4,10 @@ osx_image: xcode10.1 cache: directories: - $HOME/.m2 - $HOME/.cabal - $HOME/elektra-cabal-sandbox - $HOME/Library/Caches/Homebrew + - $HOME/.m2 + - $HOME/.cabal + - $HOME/elektra-cabal-sandbox + - $HOME/Library/Caches/Homebrew # don't cache our own libraries generated in the build # so they always get rebuilt properly and to avoid init issues
Disable -Wpsabi on GCC 7. I can't find any documentation on -Wpsabi until GCC 8, but I'm seeing the warning on GCC 7, so I guess it was just undocumented for a while.
@@ -433,7 +433,7 @@ HEDLEY_STATIC_ASSERT(sizeof(simde_float64) == 8, "Unable to find 64-bit floating #if \ HEDLEY_HAS_WARNING("-Wtautological-compare") || \ - HEDLEY_GCC_VERSION_CHECK(8,0,0) + HEDLEY_GCC_VERSION_CHECK(7,0,0) # if defined(__cplusplus) # if (__cplusplus >= 201402L) # define SIMDE_TAUTOLOGICAL_COMPARE_(expr) \
Enable basic built-in modules for OPENMV2.
+include("$(MPY_DIR)/extmod/uasyncio/manifest.py") +freeze ("$(MPY_LIB_DIR)/", "bno055.py") +freeze ("$(MPY_LIB_DIR)/", "modbus.py") +freeze ("$(MPY_LIB_DIR)/", "mqtt.py") +freeze ("$(MPY_LIB_DIR)/", "mutex.py") +freeze ("$(MPY_LIB_DIR)/", "pid.py") +freeze ("$(MPY_LIB_DIR)/", "rpc.py") +freeze ("$(MPY_LIB_DIR)/", "rtsp.py") +freeze ("$(MPY_LIB_DIR)/", "ssd1306.py") +freeze ("$(MPY_LIB_DIR)/", "tb6612.py") +freeze ("$(MPY_LIB_DIR)/", "vl53l1x.py")
rewrite 'for' loop initial declarations. 'for' loop initial declarations are not allowed some (old) compiler.
@@ -931,7 +931,8 @@ static inline int op_super( mrbc_vm *vm, mrbc_value *regs ) mrbc_value value = regs[a+1]; mrbc_dup( &value ); int argc = value.array->n_stored; - for( int i=0 ; i<argc ; i++ ){ + int i; + for( i = 0; i < argc; i++ ) { mrbc_release( &regs[a+1+i] ); regs[a+1+i] = value.array->data[i]; } @@ -1021,7 +1022,8 @@ static inline int op_enter( mrbc_vm *vm, mrbc_value *regs ) int rest_size = argc - m1 - o; if( rest_size < 0 ) rest_size = 0; mrb_value rest = mrbc_array_new(vm, rest_size); - for( int i = 0 ; i<rest_size ; i++ ){ + int i; + for( i = 0; i < rest_size; i++ ) { rest.array->data[i] = regs[1+m1+o+i]; } rest.array->n_stored = rest_size; @@ -1757,7 +1759,8 @@ static inline int op_arycat( mrbc_vm *vm, mrbc_value *regs ) mrbc_array_resize(&regs[a], new_size); } - for( int i=0 ; i<size_2 ; i++ ){ + int i; + for( i = 0; i < size_2; i++ ) { mrbc_dup( &regs[a+1].array->data[i] ); regs[a].array->data[size_1+i] = regs[a+1].array->data[i]; } @@ -1857,7 +1860,8 @@ static inline int op_apost( mrbc_vm *vm, mrbc_value *regs ) int ary_size = len-pre-post; regs[a] = mrbc_array_new(vm, ary_size); // copy elements - for( int i=0 ; i<ary_size ; i++ ){ + int i; + for( i = 0; i < ary_size; i++ ) { regs[a].array->data[i] = src.array->data[pre+i]; mrbc_dup( &regs[a].array->data[i] ); }
enable bob and tls by default for openwrt
@@ -19,12 +19,12 @@ config KADNODE_ENABLE_DNS config KADNODE_ENABLE_TLS bool "Enable TLS authentication support" depends on PACKAGE_kadnode - default n + default y config KADNODE_ENABLE_BOB bool "Enable BOB authentication support" depends on PACKAGE_kadnode - default n + default y config KADNODE_ENABLE_UPNP bool "Enable UPnP support to add port forwardings on other routers"
macrecovery: Switch Latest Version to Mac-B4831CEBD52A0C4C This makes macrecovery download Ventura instead of Monterey.
@@ -44,5 +44,5 @@ Default version ./macrecovery.py -b Mac-E43C1C25D4880AD6 -m <real MLB> -os default (newer) Latest version -./macrecovery.py -b Mac-E43C1C25D4880AD6 -m 00000000000000000 -os latest -./macrecovery.py -b Mac-E43C1C25D4880AD6 -m <real MLB> -os latest +./macrecovery.py -b Mac-B4831CEBD52A0C4C -m 00000000000000000 -os latest +./macrecovery.py -b Mac-B4831CEBD52A0C4C -m <real MLB> -os latest
Add a signal handler to ippeveprinter.
@@ -342,6 +342,9 @@ static void run_printer(ippeve_printer_t *printer); static int show_media(ippeve_client_t *client); static int show_status(ippeve_client_t *client); static int show_supplies(ippeve_client_t *client); +#ifndef _WIN32 +static void signal_handler(int signum); +#endif // !_WIN32 static char *time_string(time_t tv, char *buffer, size_t bufsize); static void usage(int status) _CUPS_NORETURN; static int valid_doc_attributes(ippeve_client_t *client); @@ -364,6 +367,9 @@ static int KeepFiles = 0, /* Keep spooled job files? */ Verbosity = 0; /* Verbosity level */ static const char *PAMService = NULL; /* PAM service */ +#ifndef _WIN32 +static int StopPrinter = 0;/* Stop the printer server? */ +#endif // !_WIN32 /* @@ -7641,6 +7647,15 @@ run_printer(ippeve_printer_t *printer) /* I - Printer */ ippeve_client_t *client; /* New client */ +#ifndef _WIN32 + /* + * Set signal handlers for SIGINT and SIGTERM... + */ + + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); +#endif // !_WIN32 + /* * Setup poll() data for the DNS-SD service socket and IPv4/6 listeners... */ @@ -7670,6 +7685,11 @@ run_printer(ippeve_printer_t *printer) /* I - Printer */ break; } +#ifndef _WIN32 + if (StopPrinter) + break; +#endif // !_WIN32 + if (polldata[0].revents & POLLIN) { if ((client = create_client(printer, printer->ipv4)) != NULL) @@ -8341,6 +8361,21 @@ show_supplies( } +#ifndef _WIN32 +/* + * 'signal_handler()' - Handle termination signals. + */ + +static void +signal_handler(int signum) /* I - Signal number (not used) */ +{ + (void)signum; + + StopPrinter = 1; +} +#endif // !_WIN32 + + /* * 'time_string()' - Return the local time in hours, minutes, and seconds. */
API: Format documentation about namespaces
@@ -121,33 +121,39 @@ which appears like a tree on its own. See @ref cascading in the documentation of ksLookupByName() on how the selection of keys works. -- The `spec` tree\n +- The `spec` tree + This tree specifies how the lookup should take place and also allows us to define defaults or document a key. The metadata of a key contains this information: - + `override/#`: use these keys *in favour* of the key itself (note that - `#` is the syntax for arrays, e.g. `#0` for the first element, - `#10` for the 11th and so on) - + `namespace/#`: instead of using all namespaces in the predefined order, + + - `override/#`: use these keys *in favour* of the key itself (note that + - `#` is the syntax for arrays, e.g. `#0` for the first element, + -`#10` for the 11th and so on) + - `namespace/#`: instead of using all namespaces in the predefined order, one can specify which namespaces should be searched in which order - + `fallback/#`: when no key was found in any of the (specified) namespaces + - `fallback/#`: when no key was found in any of the (specified) namespaces the `fallback`-keys will be searched - + `default`: this value will be used if nothing else was found + - `default`: this value will be used if nothing else was found + +- The `proc` tree -- The `proc` tree\n Is the only read-only tree. The configuration does not stem from the [KDB (Key Database)](@ref kdb), but any other source, e.g. command-line arguments or environment. -- The `dir` tree\n +- The `dir` tree + Allows us to have a per-directory overwrite of configuration files, e.g. for project specific settings. -- The `user` tree\n +- The `user` tree + Used to store user-specific configurations, like the personal settings of a user to certain programs. The user subtree will always be favoured if present (except for security concerns the user subtree may not be considered). -- The `system` tree\n +- The `system` tree + It is provided to store system-wide configuration keys, that is, the last fallback for applications but the only resort for daemons and system services.
sometimes pthread_t is a long int
@@ -45,7 +45,7 @@ static void iodine_join_io_thread(void) { if (fio_atomic_sub(&sock_io_thread, 1) == 0) { sock_io_thread = 0; pthread_join(sock_io_pthread, NULL); - sock_io_pthread = NULL; + sock_io_pthread = (pthread_t)NULL; } }
line 0 in compute prefix;
@@ -116,7 +116,8 @@ const char* lovrShaderFragmentSuffix = "" "}"; const char* lovrShaderComputePrefix = "" -"#version 420 \n"; +"#version 420 \n" +"#line 0 \n"; const char* lovrShaderComputeSuffix = "" "void main() { \n"
Resolved issue with quotes in exception check
@@ -2914,7 +2914,7 @@ def test_compare(test_case): try: model.compare(**kwargs) except ImportError as ie: - pytest.xfail(str(ie)) if str(ie) == "No module named 'widget'" \ + pytest.xfail(str(ie)) if str(ie) == "No module named widget" \ else pytest.fail(str(ie))
Example orion/client: use the same LPM configuration as in orion/ip64-router
/*---------------------------------------------------------------------------*/ #ifndef PROJECT_CONF_H_ #define PROJECT_CONF_H_ + +/* Prevent the router from dropping below LPM2 to avoid RAM overflow */ +#define LPM_CONF_MAX_PM 0 + /*---------------------------------------------------------------------------*/ /* Use either the cc1200_driver for sub-1GHz, or cc2538_rf_driver (default) * for 2.4GHz built-in radio interface
UnjoinedResource: fix crash
@@ -24,9 +24,9 @@ function isJoined(path: string) { return function ( props: Pick<UnjoinedResourceProps, 'graphKeys'> ) { - const graphKeys = useGraphState(state => state.graphKeys); + const graphKey = path.substr(7); - return graphKeys.has(graphKey); + return props.graphKeys.has(graphKey); }; } @@ -35,11 +35,12 @@ export function UnjoinedResource(props: UnjoinedResourceProps) { const history = useHistory(); const rid = props.association.resource; const appName = props.association['app-name']; - const { title, description, module } = props.association.metadata; - const waiter = useWaitForProps(props); - const app = useMemo(() => module || appName, [props.association]); + const { title, description, module: mod } = props.association.metadata; const graphKeys = useGraphState(state => state.graphKeys); + const waiter = useWaitForProps({...props, graphKeys }); + const app = useMemo(() => mod || appName, [props.association]); + const onJoin = async () => { const [, , ship, name] = rid.split('/'); await api.graph.joinGraph(ship, name);
[protobuf] Fix ubsan error in text_format
@@ -1216,7 +1216,7 @@ class TextFormat::Printer::TextGenerator Write(text + pos, size - pos); } else { Write(text, size); - if (text[size - 1] == '\n') { + if (size > 0 && text[size - 1] == '\n') { at_start_of_line_ = true; } }
Correct string length for memcpy
@@ -175,7 +175,7 @@ static bool replaceString(int argCount) { tmp = pos + len; } - memcpy(newStr + stringLength, tmp, tmpLength); + memcpy(newStr + stringLength, tmp, strlen(tmp)); ObjString *newString = copyString(newStr, length - 1); push(OBJ_VAL(newString)); @@ -397,7 +397,7 @@ static bool formatString(int argCount) { } free(replaceStrings); - memcpy(newStr + stringLength, tmp, tmpLength); + memcpy(newStr + stringLength, tmp, strlen(tmp)); ObjString *newString = copyString(newStr, fullLength - 1); push(OBJ_VAL(newString));
grid: can unsync system OTAs from notifications When shown the Base Blocked notification, the user can opt to disable updates so as to continue using out-of-date apps.
@@ -51,14 +51,23 @@ export const BaseBlockedNotification = () => { const [b, u] = partition(Object.entries(s.pikes), ([, pike]) => pikeIsBlocked(newKelvin, pike)); return [b.map(([d]) => d), u.map(([d]) => d)] as const; }); + const { toggleInstall } = useKilnState(); const blockedCharges = Object.values(pick(charges, blocked)); const count = blockedCharges.length; - const handlePauseOTAs = useCallback(() => {}, []); + const handlePauseOTAs = useCallback(async () => { + const otaSponsor = basePike?.sync?.ship; + if (!otaSponsor) { + return; + } + + await toggleInstall('base', otaSponsor); + push('/leap/upgrading'); + }, []); const handleArchiveApps = useCallback(async () => { - api.poke(kilnBump()); + await api.poke(kilnBump()); push('/leap/upgrading'); }, []);
zephyr/shim/src/power_host_sleep_api.c: Format with clang-format BRANCH=none TEST=none
#include <ap_power/ap_power_interface.h> #include <power_host_sleep.h> -static enum power_state translate_ap_power_state( - enum power_states_ndsx ap_power_state) +static enum power_state +translate_ap_power_state(enum power_states_ndsx ap_power_state) { switch (ap_power_state) { case SYS_POWER_STATE_S5: @@ -24,8 +24,8 @@ static enum power_state translate_ap_power_state( } } -int ap_power_get_lazy_wake_mask( - enum power_states_ndsx state, host_event_t *mask) +int ap_power_get_lazy_wake_mask(enum power_states_ndsx state, + host_event_t *mask) { enum power_state st; @@ -36,8 +36,7 @@ int ap_power_get_lazy_wake_mask( } #if CONFIG_AP_PWRSEQ_HOST_SLEEP -void power_chipset_handle_host_sleep_event( - enum host_sleep_event state, +void power_chipset_handle_host_sleep_event(enum host_sleep_event state, struct host_sleep_event_context *ctx) { ap_power_chipset_handle_host_sleep_event(state, ctx);
Jenkinsfile: disable memcheck for Release builds
@@ -630,13 +630,18 @@ def generateFullBuildStages() { // 'RelWithDebInfo' is build in the debian-stable-full build for(buildType in ['Debug', 'Release']) { def testName = "debian-stable-multiconf[buildType=${buildType}]" + // TODO: add memory tests to release builds when #2320 is fixed + def tests = [TEST.ALL] + if (buildType == 'Debug') { + tests = [TEST.ALL, TEST.MEM] + } tasks << buildAndTest( testName, DOCKER_IMAGES.stretch, CMAKE_FLAGS_BUILD_ALL + [ 'CMAKE_BUILD_TYPE': buildType ], - [TEST.ALL, TEST.MEM] + tests ) }
nimble/host: Fix cleaning conn when sync transfer enabled When host is waiting for the sync transfer, conn->psync is allocated. If ACL disconnection happen before sync is established, make sure to free it.
@@ -28,6 +28,11 @@ ble_hs_atomic_conn_delete(uint16_t conn_handle) conn = ble_hs_conn_find(conn_handle); if (conn != NULL) { ble_hs_conn_remove(conn); +#if MYNEWT_VAL(BLE_PERIODIC_ADV_SYNC_TRANSFER) + if (conn->psync) { + ble_hs_periodic_sync_free(conn->psync); + } +#endif ble_hs_conn_free(conn); }
little adjust on safe_strncpy()
@@ -541,6 +541,7 @@ char* safe_strncpy(char* dest, const char* source, size_t size) char src[size + 1]; memset(src,'\0',size + 1); strncpy(src,source,size); + src[size] = '\0'; strncpy(dest,src,size); return dest; }
oc_oscore:do not protect request if dos != RFNOP
@@ -399,6 +399,12 @@ oc_oscore_send_message(oc_message_t *msg) coap_pkt->code == CSM_7_01 #endif /* OC_TCP */ ) { + oc_sec_pstat_t *pstat = oc_sec_get_pstat(message->endpoint.device); + if (pstat->s != OC_DOS_RFNOP) { + OC_ERR("### device not in RFNOP; stop further processing ###"); + goto oscore_send_error; + } + OC_DBG("### protecting outgoing request ###"); /* Request */ /* Use context->SSN as Partial IV */
improve test for pics with weights
@@ -54,20 +54,21 @@ tests/test-pics-pics: traj scale phantom pics nrmse $(TESTS_OUT)/shepplogan.ra -# test that weights =1 have no effect +# test that weights =0.5 have no effect tests/test-pics-weights: phantom pics ones nrmse $(TESTS_OUT)/shepplogan.ra $(TESTS_OUT)/shepplogan_coil.ra set -e; mkdir $(TESTS_TMP) ; cd $(TESTS_TMP) ;\ $(TOOLDIR)/phantom -S8 coils.ra ;\ $(TOOLDIR)/ones 2 128 128 weights.ra ;\ + $(TOOLDIR)/scale 0.5 weights.ra weights2.ra ;\ $(TOOLDIR)/pics -S -r0.001 $(TESTS_OUT)/shepplogan_coil.ra coils.ra reco1.ra ;\ - $(TOOLDIR)/pics -S -r0.001 -p weights.ra $(TESTS_OUT)/shepplogan_coil.ra coils.ra reco2.ra ;\ + $(TOOLDIR)/pics -S -r0.001 -p weights2.ra $(TESTS_OUT)/shepplogan_coil.ra coils.ra reco2.ra ;\ $(TOOLDIR)/nrmse -t 0.000001 reco2.ra reco1.ra ;\ rm *.ra ; cd .. ; rmdir $(TESTS_TMP) touch $@ -# test that weights =1 have no effect +# test that weights =0.5 have no effect # FIXME: this was 0.005 before but fails on travis tests/test-pics-noncart-weights: traj scale ones phantom pics nrmse set -e; mkdir $(TESTS_TMP) ; cd $(TESTS_TMP) ;\ @@ -76,7 +77,8 @@ tests/test-pics-noncart-weights: traj scale ones phantom pics nrmse $(TOOLDIR)/phantom -s8 -t traj2.ra ksp.ra ;\ $(TOOLDIR)/phantom -S8 coils.ra ;\ $(TOOLDIR)/ones 4 1 256 32 1 weights.ra ;\ - $(TOOLDIR)/pics -S -r0.001 -p weights.ra -t traj2.ra ksp.ra coils.ra reco1.ra ;\ + $(TOOLDIR)/scale 0.5 weights.ra weights2.ra ;\ + $(TOOLDIR)/pics -S -r0.001 -p weights2.ra -t traj2.ra ksp.ra coils.ra reco1.ra ;\ $(TOOLDIR)/pics -S -r0.001 -t traj2.ra ksp.ra coils.ra reco2.ra ;\ $(TOOLDIR)/nrmse -t 0.010 reco1.ra reco2.ra ;\ rm *.ra ; cd .. ; rmdir $(TESTS_TMP)
Build tests on musl
@@ -96,7 +96,7 @@ _test_trace_log (_mongocrypt_tester_t *tester) mongocrypt_destroy (crypt); } -#ifndef _WIN32 +#if defined(__GLIBC__) || defined(__APPLE__) static void _test_no_log (_mongocrypt_tester_t *tester) { @@ -131,7 +131,7 @@ _mongocrypt_tester_install_log (_mongocrypt_tester_t *tester) { INSTALL_TEST (_test_log); INSTALL_TEST (_test_trace_log); -#ifndef _WIN32 +#if defined(__GLIBC__) || defined(__APPLE__) INSTALL_TEST (_test_no_log); #endif }
Decouple ABI version from release version. Until now the release version has been used as the ABI version, but that's not recommended at all. Start tracking the ABI version with its own version number, starting with 8 in order to make clear that this number is not tied to the release version.
+# ABI_VERSION is passed to libtool as --version-number $(ABI_VERSION). This is +# not related to YARA's release version, this is used for tracking changes in +# the ABI, not in the project as a whole. +# +# The three number mean [current]:[revision]:[age], and they should updated as +# follow: +# +# 1. With every release increment "revision". +# +# 2. If any interfaces have been added, removed, or changed since the last +# update, increment "current" and set "revision" to 0. +# +# 3. If the changes in the interface were backward compatible (i.e: only adding +# new APIs) increment "age", or set it to 0 if otherwise. +# +# See https://autotools.io/libtool/version.html for more details. +# +ABI_VERSION = 8:0:0 + # Rules for generating YARA modules from .proto files. For each .proto file # three files are generated: .c, .pb-c.c, and .pb-c.h. The .c file is generated # by protoc-gen-yara and the other two by protoc-gen-c. This is done only if @@ -152,7 +171,7 @@ dist_noinst_DATA = pb/yara.proto lib_LTLIBRARIES = libyara.la -libyara_la_LDFLAGS = -version-number 4:1:0 +libyara_la_LDFLAGS = -version-number $(ABI_VERSION) BUILT_SOURCES = \ lexer.c \
Fix bugs with slice header These fixes allow more than one slice to be used to code a picture. Use correct number of bits to code the slice segment address. Don't offset_len_minus1 for slices without substreams.
@@ -695,18 +695,23 @@ void kvz_encoder_state_write_bitstream_slice_header( #ifdef KVZ_DEBUG printf("=========== Slice ===========\n"); #endif - WRITE_U(stream, (state->slice->start_in_rs == 0), 1, "first_slice_segment_in_pic_flag"); + + bool first_slice_segment_in_pic = (state->slice->start_in_rs == 0); + + WRITE_U(stream, first_slice_segment_in_pic, 1, "first_slice_segment_in_pic_flag"); if (state->frame->pictype >= KVZ_NAL_BLA_W_LP && state->frame->pictype <= KVZ_NAL_RSV_IRAP_VCL23) { - WRITE_U(stream, 1, 1, "no_output_of_prior_pics_flag"); + WRITE_U(stream, 0, 1, "no_output_of_prior_pics_flag"); } WRITE_UE(stream, 0, "slice_pic_parameter_set_id"); - if (state->slice->start_in_rs > 0) { - //For now, we don't support dependent slice segments - //WRITE_U(stream, 0, 1, "dependent_slice_segment_flag"); - WRITE_UE(stream, state->slice->start_in_rs, "slice_segment_address"); + + if (!first_slice_segment_in_pic) { + int lcu_cnt = encoder->in.width_in_lcu * encoder->in.height_in_lcu; + int num_bits = kvz_math_ceil_log2(lcu_cnt); + int slice_start_rs = state->slice->start_in_rs; + WRITE_U(stream, slice_start_rs, num_bits, "slice_segment_address"); } WRITE_UE(stream, state->frame->slicetype, "slice_type"); @@ -821,10 +826,16 @@ void kvz_encoder_state_write_bitstream_slice_header( int num_entry_points = 0; int max_length_seen = 0; + if (state->is_leaf) { + num_entry_points = 1; + } else { encoder_state_entry_points_explore(state, &num_entry_points, &max_length_seen); + } + + int num_offsets = num_entry_points - 1; - WRITE_UE(stream, num_entry_points - 1, "num_entry_point_offsets"); - if (num_entry_points > 0) { + WRITE_UE(stream, num_offsets, "num_entry_point_offsets"); + if (num_offsets > 0) { int entry_points_written = 0; int offset_len = kvz_math_floor_log2(max_length_seen) + 1; WRITE_UE(stream, offset_len - 1, "offset_len_minus1");
linux-raspberrypi-dev: Bump to 4.13.y series
@@ -7,8 +7,8 @@ python __anonymous() { FILESEXTRAPATHS_prepend := "${THISDIR}/linux-raspberrypi:" -LINUX_VERSION ?= "4.12" -LINUX_RPI_DEV_BRANCH ?= "rpi-4.12.y" +LINUX_VERSION ?= "4.13" +LINUX_RPI_DEV_BRANCH ?= "rpi-4.13.y" SRCREV = "${AUTOREV}" SRC_URI = "git://github.com/raspberrypi/linux.git;protocol=git;branch=${LINUX_RPI_DEV_BRANCH} \
initalize usb_motor_test_t for safety
#define quic_errorf(cmd, args...) send_quic_strf(cmd, QUIC_FLAG_ERROR, args) -usb_motor_test_t usb_motor_test; +usb_motor_test_t usb_motor_test = { + .active = 0, + .value = {0, 0, 0, 0}, +}; extern profile_t profile; extern profile_t default_profile;
Update Mac Network Detection Logic Updated Mac Network Detection Logic to be on par with iOS Initialized network cost before starting monitor, otherwise mac frequently reports unknown
@@ -84,7 +84,7 @@ namespace PAL_NS_BEGIN { NetworkInformation::~NetworkInformation() noexcept { - if (@available(iOS 12.0, *)) + if (@available(macOS 10.14, iOS 12.0, *)) { if (m_isNetDetectEnabled) { @@ -105,7 +105,36 @@ namespace PAL_NS_BEGIN { { auto weak_this = std::weak_ptr<NetworkInformation>(shared_from_this()); - if (@available(iOS 12.0, *)) + m_reach = [ODWReachability reachabilityForInternetConnection]; + void (^block)(NSNotification*) = ^(NSNotification*) + { + auto strong_this = weak_this.lock(); + if (!strong_this) + { + return; + } + + // NetworkCost information is not available until iOS 12. + // Just make the best guess here. + switch (m_reach.currentReachabilityStatus) + { + case NotReachable: + strong_this->UpdateType(NetworkType_Unknown); + strong_this->UpdateCost(NetworkCost_Unknown); + break; + case ReachableViaWiFi: + strong_this->UpdateType(NetworkType_Wifi); + strong_this->UpdateCost(NetworkCost_Unmetered); + break; + case ReachableViaWWAN: + strong_this->UpdateType(NetworkType_WWAN); + strong_this->UpdateCost(NetworkCost_Metered); + break; + } + }; + block(nil); // Update the initial status. + + if (@available(macOS 10.14, iOS 12.0, *)) { m_monitor = nw_path_monitor_create(); nw_path_monitor_set_queue(m_monitor, dispatch_get_global_queue(QOS_CLASS_BACKGROUND, 0)); @@ -136,7 +165,7 @@ namespace PAL_NS_BEGIN { type = NetworkType_Wired; } cost = nw_path_is_expensive(path) ? NetworkCost_Metered : NetworkCost_Unmetered; - if (@available(iOS 13.0, *)) + if (@available(macOS 10.15, iOS 13.0, *)) { if (nw_path_is_constrained(path)) { @@ -157,37 +186,7 @@ namespace PAL_NS_BEGIN { nw_path_monitor_cancel(m_monitor); } } - else - { - m_reach = [ODWReachability reachabilityForInternetConnection]; - void (^block)(NSNotification*) = ^(NSNotification*) - { - auto strong_this = weak_this.lock(); - if (!strong_this) - { - return; - } - - // NetworkCost information is not available until iOS 12. - // Just make the best guess here. - switch (m_reach.currentReachabilityStatus) - { - case NotReachable: - strong_this->UpdateType(NetworkType_Unknown); - strong_this->UpdateCost(NetworkCost_Unknown); - break; - case ReachableViaWiFi: - strong_this->UpdateType(NetworkType_Wifi); - strong_this->UpdateCost(NetworkCost_Unmetered); - break; - case ReachableViaWWAN: - strong_this->UpdateType(NetworkType_WWAN); - strong_this->UpdateCost(NetworkCost_Metered); - break; - } - }; - block(nil); // Update the initial status. - if (m_isNetDetectEnabled) + else if (m_isNetDetectEnabled) { m_notificationId = [[NSNotificationCenter defaultCenter] @@ -198,7 +197,6 @@ namespace PAL_NS_BEGIN { [m_reach startNotifier]; } } - } void NetworkInformation::UpdateType(NetworkType type) noexcept {
Fixed scroll focus when object is out of the page in twi sides.
@@ -499,7 +499,7 @@ void lv_page_focus(lv_obj_t * page, const lv_obj_t * obj, uint16_t anim_time) scrlable_y += page_h - obj_h; } /*Out of the page on the left*/ - else if((obj_w <= page_w && left_err > 0) || + if((obj_w <= page_w && left_err > 0) || (obj_w > page_w && left_err < right_err)) { /*Calculate a new position and let some space above*/ scrlable_x = -(obj_x - style_scrl->body.padding.ver - style->body.padding.ver);
BugID:19864463: reboot -> reboot repeat
@@ -177,6 +177,7 @@ void sys_adc_calibration(u8 write, u16 *offset, u16 *gain) */ void sys_reset(void) { + while(1) { rtc_backup_timeinfo(); /* Set processor clock to default(2: 31.25MHz) before system reset */ @@ -187,6 +188,8 @@ void sys_reset(void) SCB->AIRCR = ((0x5FA << SCB_AIRCR_VECTKEY_Pos) | // VECTKEY (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | // PRIGROUP SCB_AIRCR_SYSRESETREQ_Msk); // SYSRESETREQ + DelayUs(100*1000); + } }
tls: enable TLS OpenSSL plugin works in 3.0.0 Type: fix
@@ -831,7 +831,12 @@ tls_init_ca_chain (void) return -1; } +#if OPENSSL_VERSION_NUMBER >= 0x30000000L + rv = X509_STORE_load_file (om->cert_store, tm->ca_cert_path); +#else rv = X509_STORE_load_locations (om->cert_store, tm->ca_cert_path, 0); +#endif + if (rv < 0) { clib_warning ("failed to load ca certificate");
cmdline: correct *pritnf arguments
@@ -71,7 +71,7 @@ static bool cmdlineCheckIfPersistent(const char* fname) { ret = true; } if (munmap(map, fileSz) == -1) { - PLOG_W("munmap(%p, %zu)", map, fileSz); + PLOG_W("munmap(%p, %zu)", map, (size_t)fileSz); } close(fd); return ret;
Simplify chaining with null_op (refactored, M.U.)
@@ -416,6 +416,16 @@ struct linop_s* linop_null_create(unsigned int NO, const long odims[NO], unsigne */ struct linop_s* linop_chain(const struct linop_s* a, const struct linop_s* b) { + if ( operator_zero_or_null_p(a->forward) + || operator_zero_or_null_p(b->forward)) { + + auto dom = linop_domain(a); + auto cod = linop_codomain(b); + + return linop_null_create2(cod->N, cod->dims, cod->strs, + dom->N, dom->dims, dom->strs); + } + PTR_ALLOC(struct linop_s, c); c->forward = operator_chain(a->forward, b->forward);
Yan LR: Explain ASan plugin removal reason
@@ -47,6 +47,9 @@ function (check_dependencies) set (ANTLR4CPP_LIBRARIES ${ANTLR4CPP_LIBRARIES} PARENT_SCOPE) set (ANTLR4CPP_INCLUDE_DIRS ${ANTLR4CPP_INCLUDE_DIRS} PARENT_SCOPE) + # AdressSanitizer enabled builds of the plugin report runtime errors about member calls, which do not point to an object of type + # `_Sp_counted_base` inside the system header file `shared_ptr_base.h`. In Clang builds of the plugin we ignore this error in our + # [blacklist](tests/sanitizer.blacklist). Unfortunately GCC does not support a blacklist, so we remove the plugin in this case. set (DISABLE_PLUGIN_ASAN ${ENABLE_ASAN} AND @@ -58,7 +61,10 @@ function (check_dependencies) VERSION_LESS 7) if (${DISABLE_PLUGIN_ASAN}) - set (FAILURE_MESSAGE "ASAN enabled GCC builds of the plugin report memory leaks" PARENT_SCOPE) + set (FAILURE_MESSAGE + "ASan enabled GCC builds of the plugin report member calls on addresses, " + "which do not point to an object of type `_Sp_counted_base`" + PARENT_SCOPE) return () endif (${DISABLE_PLUGIN_ASAN})
Pass ArrayRefs to lambda by value.
@@ -1003,7 +1003,7 @@ namespace NCB { TArrayRef<TString> target = Data.TargetData.Target[flatTargetIdx]; LocalExecutor->ExecRange( - [&](int objectIdx) { + [=](int objectIdx) { target[objectIdx] = ToString(value[objectIdx]); }, *ObjectCalcParams,
Fix crash when freeing the last allocated bitmap
@@ -141,7 +141,7 @@ bool GraphicsMemoryHeap::Release(void *pHeapBlockData) currentBlk->status = blockFree; // If the next block is free, then subsume next block into current including data length and block header - if (nextBlock->status == blockFree) // end block status is always heap_end + if ((nextBlock != NOT_APPLICABLE) && (nextBlock->status == blockFree)) // end block status is always heap_end { currentBlk->dataLength += nextBlock->dataLength + blockHeaderSize; currentBlk->next = nextBlock->next; @@ -149,7 +149,8 @@ bool GraphicsMemoryHeap::Release(void *pHeapBlockData) } // If the previous block is free, then subsume current block into previous including data length and block // header - if (prevBlock->status == blockFree && prevBlock != ptrfirstBlockHeader) // Guard against looking outside heap + if ((prevBlock != NOT_APPLICABLE) && (prevBlock->status == blockFree) && + (prevBlock != ptrfirstBlockHeader)) // Guard against looking outside heap { prevBlock->dataLength += currentBlk->dataLength + blockHeaderSize; prevBlock->next = currentBlk->next;
github/ci: adding ldconfig only in Linux tests
@@ -37,9 +37,11 @@ jobs: run: make -j 2 check - name: make install - run: | - sudo make install - sudo ldconfig + run: sudo make install + + - name: ldconfig (Linux) + if: runner.os == 'Linux' + run: sudo ldconfig - name: make check-link run: make check-link
input: added input plugin flag for plugins that expose listeners
#define FLB_INPUT_PRIVATE 256 /* plugin is not published/exposed */ #define FLB_INPUT_NOTAG 512 /* plugin might don't have tags */ #define FLB_INPUT_THREADED 1024 /* plugin must run in a separate thread */ +#define FLB_INPUT_NET_SERVER 8 /* Input address may set host and port. + * In addition, if TLS is enabled then a + * private key and certificate are required. + */ /* Input status */ #define FLB_INPUT_RUNNING 1
test: verify that TLS and X509 are independent from mbedtls_ecp_curve functions
@@ -939,6 +939,19 @@ component_test_full_cmake_gcc_asan () { msg "test: context-info.sh (full config, ASan build)" # ~ 15 sec tests/context-info.sh + + # Verify that TLS and X509 libraries have no dipendency from + # "mbedtls_ecp_curve" symbols. + msg "test: verify that TLS and X509 have no dependency from mbedtls_ecp_curve symbols" + docs/architecture/psa-migration/syms.sh full + + not grep mbedtls_ecp_curve full-tls-external + not grep mbedtls_ecp_curve full-x509-external + + rm full-tls-external \ + full-tls-modules \ + full-x509-external \ + full-x509-modules } component_test_psa_crypto_key_id_encodes_owner () {
Add documentation regarding keyword args for Lineout. Resolves
@@ -3925,20 +3925,24 @@ const char *visit_Lineout_doc = "Lineout(start, end, variables) -> integer\n" "Lineout(start, end, samples) -> integer\n" "Lineout(start, end, variables, samples) -> integer\n" +"Lineout(keywordarg1=arg1, keywrdarg2=arg2,...,keywordargn=argn ) -> integer\n" "\n" "\n" "Arguments:\n" "\n" -"start\n" +"start (keyword arg: start_point)\n" "A 2 or 3 item tuple containing the coordinates of the starting point.\n" -"end\n" +"end (keyword arg: end_point)\n" "A 2 or 3 item tuple containing the coordinates of the end point.\n" -"variables\n" +"variables (keyword arg: vars)\n" "A tuple of strings containing the names of the variables for which\n" "lineouts should be created.\n" -"samples\n" +"samples (keyword arg: num_samples)\n" "An integer value containing the number of sample points along the lineout.\n" "\n" +"(keyword arg: use_sampling)\n" +"An integer value specifying whether or not to do a sample-based lineout.\n" +"\n" "\n" "Returns:\n" "\n" @@ -3970,6 +3974,7 @@ const char *visit_Lineout_doc = "Lineout((0.2,0.2), (0.8,1.2))\n" "Lineout((0.2,1.2), (0.8,0.2), (\"default\", \"d\", \"u\"))\n" "Lineout((0.6, 0.1), (0.6, 1.2), 100)\n" +"Lineout(start_point=(0.6, 0.1), end_point=(0.6, 1.2), use_sampling=1, num_samples=100)\n" ; const char *visit_List_doc = "List\n"
fix proper handling of BED12 records with 1 block that differs from main interval.
@@ -80,10 +80,6 @@ void GetBedBlocks(const BED &bed, bedVector &bedBlocks) { cerr << "Input error: found interval having <= 0 blocks." << endl; exit(1); } - else if ( blockCount == 1 ) { - //take a short-cut for single blocks - bedBlocks.push_back(bed); - } else { // get the comma-delimited strings for the BED12 block starts and block ends. string blockSizes(bed.fields[10]);
mkdir for Snap
@@ -90,4 +90,7 @@ parts: fi qmake "USE_UPNP=1" "USE_QRCODE=1" "${MF}" OPENSSL_INCLUDE_PATH=/usr/local/ssl/include OPENSSL_LIB_PATH=/usr/local/ssl/lib denarius-qt.pro OPENSSL_INCLUDE_PATH=/usr/local/ssl/include OPENSSL_LIB_PATH=/usr/local/ssl/lib make -j4 + sudo mkdir $SNAP/usr/local/bin + sudo chmod +x $SNAP/usr/local/bin + sudo chmod -R 777 $SNAP/usr/local/bin sudo cp Denarius $SNAP/usr/local/bin/
fix: remove the useless check
@@ -253,7 +253,7 @@ orka_str_to_ntl( long long orka_str_bounds_check(const char *str, const size_t threshold_len) { - if (!str || threshold_len > LLONG_MAX) + if (!str) return -1; // Missing string or overflow for (long long i=0; i < threshold_len; ++i) {
test frame work pump thread exit: set flag then wake
@@ -83,7 +83,7 @@ def pump_output(testclass): """ pump output from vpp stdout/stderr to proper queues """ stdout_fragment = "" stderr_fragment = "" - while not testclass.pump_thread_stop_flag.wait(0): + while not testclass.pump_thread_stop_flag.is_set(): readable = select.select([testclass.vpp.stdout.fileno(), testclass.vpp.stderr.fileno(), testclass.pump_thread_wakeup_pipe[0]], @@ -391,8 +391,8 @@ class VppTestCase(unittest.TestCase): raw_input("When done debugging, press ENTER to kill the " "process and finish running the testcase...") - os.write(cls.pump_thread_wakeup_pipe[1], 'ding dong wake up') cls.pump_thread_stop_flag.set() + os.write(cls.pump_thread_wakeup_pipe[1], 'ding dong wake up') if hasattr(cls, 'pump_thread'): cls.logger.debug("Waiting for pump thread to stop") cls.pump_thread.join()
CMake: Fix msdfgen target name;
@@ -121,11 +121,11 @@ endif() # MSDF if(LOVR_ENABLE_DATA) - add_subdirectory(deps/msdfgen lib_msdfgen) + add_subdirectory(deps/msdfgen) include_directories(deps/msdfgen) - set(LOVR_MSDF lib_msdfgen) + set(LOVR_MSDF msdfgen) if(APPLE) - set_target_properties(lib_msdfgen PROPERTIES MACOSX_RPATH ON) + set_target_properties(msdfgen PROPERTIES MACOSX_RPATH ON) endif() endif()
Add error check to gpiodriver_set_int Instead of always returning an OK_ATOM there is now a check to return an ERROR_ATOM if setting the interrupt fails. Closes Issue
@@ -348,7 +348,10 @@ static term gpiodriver_set_int(Context *ctx, Context *target, term cmd) gpio_set_direction(gpio_num, GPIO_MODE_INPUT); gpio_set_intr_type(gpio_num, interrupt_type); - gpio_isr_handler_add(gpio_num, gpio_isr_handler, data); + esp_err_t ret = gpio_isr_handler_add(gpio_num, gpio_isr_handler, data); + if (UNLIKELY(ret != ESP_OK)) { + return ERROR_ATOM; + } return OK_ATOM; }
print clang version after updating path
@@ -122,7 +122,7 @@ script: - if [[ "$TRAVIS_OS_NAME" == "osx" && "$TESTS" == "integration" ]]; then scan-build --status-bugs -o /tmp/scan-build make -j8; STATUS=$?; test $STATUS -ne 0 && cat /tmp/scan-build/*/* ; [ "$STATUS" -eq "0" ] ; fi - if [[ "$TESTS" == "integration" ]]; then make clean; make integration ; fi - if [[ "$TESTS" == "fuzz" ]]; then export PATH=$LATEST_CLANG_INSTALL_DIR/bin:$PATH && make clean && make fuzz ; fi - - if [[ "$TRAVIS_OS_NAME" == "linux" && "$TESTS" == "sawHMAC" ]]; then export PATH=$LATEST_CLANG_INSTALL_DIR/bin:$PATH && make -C tests/saw/ tmp/verify_s2n_hmac_$SAW_HMAC_TEST.log ; fi + - if [[ "$TRAVIS_OS_NAME" == "linux" && "$TESTS" == "sawHMAC" ]]; then export PATH=$LATEST_CLANG_INSTALL_DIR/bin:$PATH && clang --version && make -C tests/saw/ tmp/verify_s2n_hmac_$SAW_HMAC_TEST.log ; fi - if [[ "$TESTS" == "sawDRBG" ]]; then make -C tests/saw tmp/spec/DRBG/DRBG.log ; fi - if [[ "$TESTS" == "sawHMACFailure" ]]; then make -C tests/saw failure-tests ; fi - if [[ "$TESTS" == "ctverif" ]]; then .travis/run_ctverif.sh $CTVERIF_INSTALL_DIR ; fi
bignum_core.h: Comment update for mbedtls_mpi_core_get_mont_R2_unsafe
@@ -413,11 +413,12 @@ void mbedtls_mpi_core_montmul( mbedtls_mpi_uint *X, mbedtls_mpi_uint mm, mbedtls_mpi_uint *T ); /** - * \brief Calculate initialisation value for fast Montgomery modular - * multiplication + * \brief Calculate the square of the Montgomery constant. (Needed + * for conversion and operations in Montgomery form.) * * \param[out] X A pointer to the result of the calculation of - * Montgomery const 2^{2*n*biL} mod N. + * the square of the Montgomery constant: + * 2^{2*n*biL} mod N. * \param[in] N Little-endian presentation of the modulus, which must be odd. * * \return 0 if successful.
doc: add additional functions scheduled for removal
@@ -27,6 +27,9 @@ iterators for 1.0 and only the external instead. - ksSetCursor - ksHead - ksTail + - keyRewindMeta + - keyNextMeta + - keyCurrentMeta - change `ksAtCursor` to `ksAt` - add implementation / documentation / tests for the external iterator - start using external iterators in new code
Use package builder script.
@@ -22,11 +22,13 @@ jobs: python-version: "${{ matrix.python-version }}" - name: Install extra dependencies run: sudo apt install -y apache2-dev - - name: "Update pip" - run: python -m pip install --upgrade pip setuptools + - name: "Update pip installation" + run: python -m pip install --upgrade pip setuptools wheel + - name: "Build mod_wsgi packages" + run: ./package.sh && ls -las dist - name: "Install mod_wsgi-express" - run: python -m pip install --verbose . + run: python -m pip install --verbose dist/mod_wsgi-[0-9].*.tar.gz - name: "Run mod_wsgi-express test" run: scripts/run-single-test.sh - - name: "Verify CMMI configure/make/make install" + - name: "Verify configure/make/make install" run: ./configure && make && sudo make install
RTX5: moved Timer Thread creation to osKernelStart (Part 2)
@@ -146,12 +146,6 @@ osTimerId_t svcRtxTimerNew (osTimerFunc_t func, osTimerType_t type, void *argume uint8_t flags; const char *name; - // Check Timer Thread and MessageQueue - if ((osRtxInfo.timer.thread == NULL) || (osRtxInfo.timer.mq == NULL)) { - EvrRtxTimerError(NULL, osErrorResource); - return NULL; - } - // Check parameters if ((func == NULL) || ((type != osTimerOnce) && (type != osTimerPeriodic))) { EvrRtxTimerError(NULL, osErrorParameter); @@ -248,6 +242,10 @@ osStatus_t svcRtxTimerStart (osTimerId_t timer_id, uint32_t ticks) { // Check object state switch (timer->state) { case osRtxTimerStopped: + if (osRtxInfo.timer.tick == NULL) { + EvrRtxTimerError(timer, osErrorResource); + return osErrorResource; + } timer->state = osRtxTimerRunning; timer->load = ticks; break;
Status table in README.md
# Portable SDK for UPnP\* Devices (libupnp) -![master](https://github.com/mrjimenez/pupnp/workflows/C%2FC%2B%2B%20CI/badge.svg) - -<!--- -![1.10.x](https://github.com/mrjimenez/pupnp/workflows/C%2FC%2B%2B%20CI/badge.svg?branch=branch-1.10.x) -![1.8.x](https://github.com/mrjimenez/pupnp/workflows/C%2FC%2B%2B%20CI/badge.svg?branch=branch-1.8.x) -![1.6.x](https://github.com/mrjimenez/pupnp/workflows/C%2FC%2B%2B%20CI/badge.svg?branch=branch-1.6.x) -![1.4.x](https://github.com/mrjimenez/pupnp/workflows/C%2FC%2B%2B%20CI/badge.svg?branch=branch-1.4.x) ---> +branch|status +------|------ +Master| ![master](https://github.com/mrjimenez/pupnp/workflows/C%2FC%2B%2B%20CI/badge.svg) +branch-1.10.x | ![1.10.x](https://github.com/mrjimenez/pupnp/workflows/C%2FC%2B%2B%20CI/badge.svg?branch=branch-1.10.x) +branch-1.8.x | ![1.8.x](https://github.com/mrjimenez/pupnp/workflows/C%2FC%2B%2B%20CI/badge.svg?branch=branch-1.8.x) +branch-1.6.x | ![1.6.x](https://github.com/mrjimenez/pupnp/workflows/C%2FC%2B%2B%20CI/badge.svg?branch=branch-1.6.x) +branch-1.4.x | ![1.4.x](https://github.com/mrjimenez/pupnp/workflows/C%2FC%2B%2B%20CI/badge.svg?branch=branch-1.4.x) Copyright (c) 2000-2003 Intel Corporation - All Rights Reserved.
driver: bmi160: Implement temperature reading BRANCH=None TEST=buildall
@@ -1268,6 +1268,11 @@ static int read(const struct motion_sensor_t *s, intv3_t v) return EC_SUCCESS; } +static int read_temp(const struct motion_sensor_t *s, int *temp_ptr) +{ + return bmi160_get_sensor_temp(s - motion_sensors, temp_ptr); +} + static int init(const struct motion_sensor_t *s) { int ret = 0, tmp, i; @@ -1449,6 +1454,7 @@ const struct accelgyro_drv bmi160_drv = { .set_scale = set_scale, .get_offset = get_offset, .perform_calib = perform_calib, + .read_temp = read_temp, #ifdef CONFIG_ACCEL_INTERRUPTS .irq_handler = irq_handler, #endif
CMSIS-DSP: Added automatic detection of MVE support.
@@ -398,14 +398,34 @@ extern "C" #include <arm_neon.h> #endif +#if __ARM_FEATURE_MVE + #if !defined(ARM_MATH_MVEI) + #define ARM_MATH_MVEI + #endif +#endif + +#if (__ARM_FEATURE_MVE & 2) + #if !defined(ARM_MATH_MVEF) + #define ARM_MATH_MVEF + #endif + #if !defined(ARM_MATH_FLOAT16) + #define ARM_MATH_FLOAT16 + #endif +#endif + #if defined (ARM_MATH_HELIUM) + #if !defined(ARM_MATH_MVEF) #define ARM_MATH_MVEF #endif -#if defined (ARM_MATH_MVEF) + #if !defined(ARM_MATH_MVEI) #define ARM_MATH_MVEI + #endif + + #if !defined(ARM_MATH_FLOAT16) #define ARM_MATH_FLOAT16 #endif +#endif #if defined (ARM_MATH_HELIUM) || defined(ARM_MATH_MVEF) || defined(ARM_MATH_MVEI) #include <arm_mve.h> @@ -438,7 +458,7 @@ extern "C" * This is not fully supported on ARM AC5. */ -#if !defined( __CC_ARM ) && !defined (ARM_MATH_HELIUM) && !defined(ARM_MATH_MVEF) && !defined(ARM_MATH_MVEI) +#if !defined( __CC_ARM ) && !(__ARM_FEATURE_MVE & 2) typedef __fp16 float16_t; #endif
[RPC] If a block is found by hash, make sure that the block is in the main chain.
@@ -41,7 +41,15 @@ func (cs *ChainService) getBlockByNo(blockNo types.BlockNo) (*types.Block, error } func (cs *ChainService) getBlock(blockHash []byte) (*types.Block, error) { - return cs.cdb.getBlock(blockHash) + block, err := cs.cdb.getBlock(blockHash) + if err != nil { + return nil, err + } + blockInMainChain, err := cs.cdb.getBlockByNo(block.Header.BlockNo) + if !bytes.Equal(block.BlockHash(), blockInMainChain.BlockHash()) { + return block, errors.New("block is not in the main chain") + } + return block, nil } func (cs *ChainService) getHashByNo(blockNo types.BlockNo) ([]byte, error) { @@ -55,11 +63,14 @@ func (cs *ChainService) getTx(txHash []byte) (*types.Tx, *types.TxIdx, error) { return nil, nil, err } block, err := cs.cdb.getBlock(txidx.BlockHash) + if err != nil { + return nil, nil, err + } blockInMainChain, err := cs.cdb.getBlockByNo(block.Header.BlockNo) if !bytes.Equal(block.BlockHash(), blockInMainChain.BlockHash()) { return tx, nil, errors.New("tx is not in the main chain") } - return tx, txidx, err + return tx, txidx, nil } type chainProcessor struct {
Documentation: Add missing newline
@@ -185,7 +185,8 @@ within the mountpoint configuration provided by the administrator. Example for a mountpoint configuration: - system/elektra/mountpoints system/elektra/mountpoints/fstab + system/elektra/mountpoints + system/elektra/mountpoints/fstab system/elektra/mountpoints/fstab/config system/elektra/mountpoints/fstab/config/path=fstab system/elektra/mountpoints/fstab/config/struct=list FStab
[Rust] AwmWindow runs sizers when an element is added to the hierarchy
@@ -84,6 +84,8 @@ impl AwmWindow { } pub fn add_component(&self, elem: Rc<dyn UIElement>) { + // Ensure the component has a frame by running its sizer + elem.handle_superview_resize(*self.current_size.borrow()); self.ui_elements.borrow_mut().push(elem); }
T166: fix cowbench to work with array-based pmap
@@ -32,9 +32,16 @@ static errval_t alloc_vnode_noalloc(struct pmap_x86 *pmap, struct vnode *root, // The VNode meta data newvnode->is_vnode = true; newvnode->entry = entry; +#ifdef PMAP_LL newvnode->next = root->u.vnode.children; root->u.vnode.children = newvnode; newvnode->u.vnode.children = NULL; +#elif defined(PMAP_ARRAY) + memset(newvnode->u.vnode.children, 0, sizeof(struct vode *)*PTABLE_SIZE); + root->u.vnode.children[entry] = newvnode; +#else +#error Invalid pmap datastructure +#endif *retvnode = newvnode; return SYS_ERR_OK; @@ -78,6 +85,7 @@ errval_t pmap_cow_init(void) return SYS_ERR_OK; } +#if defined(PMAP_LL) static struct vnode *find_vnode(struct vnode *root, uint16_t entry) { assert(root != NULL); @@ -102,6 +110,22 @@ static struct vnode *find_vnode(struct vnode *root, uint16_t entry) } return NULL; } +#elif defined(PMAP_ARRAY) +static struct vnode *find_vnode(struct vnode *root, uint16_t entry) +{ + assert(root != NULL); + assert(root->is_vnode); + assert(entry < PTABLE_SIZE); + + if (root->u.vnode.children) { + return root->u.vnode.children[entry]; + } else { + return NULL; + } +} +#else +#error Invalid pmap datastructure +#endif errval_t pmap_setup_cow(struct vregion *vregion, void **retbuf) { @@ -159,8 +183,10 @@ errval_t pmap_setup_cow(struct vregion *vregion, void **retbuf) USER_PANIC_ERR(err, "alloc_vnode_noalloc"); } assert(copy_vnode); - // XXX: dangerous! - copy_vnode->u.vnode.children = cow_root_pte->u.vnode.children; + // copy children metadata + // XXX: should copy caps to keep revoke safety + memcpy(copy_vnode->u.vnode.children, cow_root_pte->u.vnode.children, + PTABLE_SIZE * sizeof(struct vnode *)); *retbuf = (void *)(uintptr_t)(new_pml4e << 39);
sixtop: stop redefining typedef Redefinition of a typedef is a C11 feature. These typedefs are identical, so remove one of them.
/** * \brief 6P Transaction Data Structure (for internal use) */ -typedef struct sixp_trans { +struct sixp_trans { struct sixp_trans *next; const sixtop_sf_t *sf; linkaddr_t peer_addr; @@ -68,7 +68,7 @@ typedef struct sixp_trans { uint16_t arg_len; } callback; struct ctimer timer; -} sixp_trans_t; +}; static void handle_trans_timeout(void *ptr); static void process_trans(void *ptr);
os/tools: modify a way to get the path of .config Hard coded path "/root/..." is valid only in some environment. We should not use it. And there are many duplicated getting config path. This commit removes duplicated and get the path of .config file from TOPDIR env.
@@ -23,7 +23,7 @@ import sys import struct import string -CONFIG_DIR = '/root/tizenrt/os' +cfg_path = os.path.dirname(__file__) + '/../.config' # User Binary Format ELF = 1 @@ -90,7 +90,6 @@ def get_static_ram_size(bin_type): # If CONFIG_OPTIMIZE_APP_RELOAD_TIME is enabled, then we will make a copy # of the data section inside the ro section and it will be used in # reload time. So, we add datasize to rosize to make place for data section. - cfg_path = os.getenv('TOPDIR') + '/.config' if check_optimize_config(cfg_path) == True: rosize = rosize + datasize; rosize = roundup_power_two(rosize) @@ -136,8 +135,7 @@ def make_kernel_binary_header(): header_size = SIZE_OF_HEADERSIZE + SIZE_OF_BINVER + SIZE_OF_BINSIZE + SIZE_OF_SECURE_HEADER_SIZE # Get binary version - config_path = CONFIG_DIR + '/.config' - bin_ver = get_config_value(config_path, "CONFIG_BINARY_VERSION=") + bin_ver = get_config_value(cfg_path, "CONFIG_BINARY_VERSION=") if bin_ver < 0 : print("Error : Not Found config for version, CONFIG_BINARY_VERSION") sys.exit(1) @@ -249,11 +247,10 @@ def make_user_binary_header(): print("Dynamic ram size : %d, Main stack size : %d" %(int(dynamic_ram_size), int(main_stack_size))) sys.exit(1) - config_path = CONFIG_DIR + '/.config' - priority = get_config_value(config_path, "CONFIG_BM_PRIORITY_MAX=") + priority = get_config_value(cfg_path, "CONFIG_BM_PRIORITY_MAX=") if priority > 0 : BM_PRIORITY_MAX = priority - priority = get_config_value(config_path, "CONFIG_BM_PRIORITY_MIN=") + priority = get_config_value(cfg_path, "CONFIG_BM_PRIORITY_MIN=") if priority > 0 : BM_PRIORITY_MIN = priority @@ -287,7 +284,6 @@ def make_user_binary_header(): sys.exit(1) static_ram_size = get_static_ram_size(bin_type) - cfg_path = CONFIG_DIR + '/.config' if check_optimize_config(cfg_path) == True: binary_ram_size = int(dynamic_ram_size) else: @@ -295,8 +291,7 @@ def make_user_binary_header(): binary_ram_size = roundup_power_two(binary_ram_size) # Get kernel binary version - config_path = CONFIG_DIR + '/.config' - kernel_ver = get_config_value(config_path, "CONFIG_BINARY_VERSION=") + kernel_ver = get_config_value(cfg_path, "CONFIG_BINARY_VERSION=") if kernel_ver < 0 : print("Error : Not Found config for kernel version, CONFIG_BINARY_VERSION") sys.exit(1)
clock bug change to APB1
@@ -513,7 +513,7 @@ rt_err_t stm32_i2c_register(I2C_TypeDef *I2C, rt_uint32_t bitrate, if(I2C == I2C1) { pi2c = &stm32_i2c1; - RCC_APB2PeriphClockCmd(RCC_APB1Periph_I2C1, ENABLE); + RCC_APB1PeriphClockCmd(RCC_APB1Periph_I2C1, ENABLE); } else #endif /*RT_USING_I2C1*/
BugID:17029597: [AWSS] Remove debug information about passwd.
@@ -397,7 +397,7 @@ int wifimgr_process_switch_ap_request(void *ctx, void *resource, void *remote, v awss_dev_bind_notify_stop(); awss_debug("Sending message to app: %s", msg); - awss_debug("switch to ap: '%s' '%s'", ssid, passwd); + awss_debug("switch to ap: '%s'", ssid); char topic[TOPIC_LEN_MAX] = {0}; awss_build_topic((const char *)TOPIC_AWSS_SWITCHAP, topic, TOPIC_LEN_MAX); for (i = 0; i < 5; i ++) { @@ -414,7 +414,7 @@ int wifimgr_process_switch_ap_request(void *ctx, void *resource, void *remote, v goto SWITCH_AP_END; aplist = zconfig_get_apinfo_by_ssid((uint8_t *)ssid); - awss_debug("connect '%s' '%s'", ssid, passwd); + awss_debug("connect '%s'", ssid); if (aplist) { bssid = aplist->mac; awss_debug("bssid: %02x:%02x:%02x:%02x:%02x:%02x", \ @@ -445,7 +445,7 @@ int wifimgr_process_switch_ap_request(void *ctx, void *resource, void *remote, v produce_random(aes_random, sizeof(aes_random)); } - awss_debug("connect '%s' '%s' %s\r\n", ssid, passwd, switch_ap_done == 1 ? "success" : "fail"); + awss_debug("connect '%s' %s\r\n", ssid, switch_ap_done == 1 ? "success" : "fail"); SWITCH_AP_END: switch_ap_parsed = 0;
nat: test dynamic translation between two vrfs Adding test for NAT44-ED subplugin. Type: test
@@ -94,7 +94,6 @@ class NAT44EDTestCase(VppTestCase): cls.create_and_add_ip4_table(i, table_id) i.admin_up() - i.unconfig_ip4() i.config_ip4() i.resolve_arp() @@ -3331,6 +3330,45 @@ class TestNAT44EDMW(TestNAT44ED): sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0) self.assertEqual(len(sessions) - session_n, 0) + def test_dynamic_vrf(self): + """ NAT44ED dynamic translation test: different VRF""" + + vrf_id_in = 33 + vrf_id_out = 34 + + self.nat_add_address(self.nat_addr, vrf_id=vrf_id_in) + + try: + self.configure_ip4_interface(self.pg7, table_id=vrf_id_in) + self.configure_ip4_interface(self.pg8, table_id=vrf_id_out) + + self.nat_add_inside_interface(self.pg7) + self.nat_add_outside_interface(self.pg8) + + # just basic stuff nothing special + pkts = self.create_stream_in(self.pg7, self.pg8) + self.pg7.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + capture = self.pg8.get_capture(len(pkts)) + self.verify_capture_out(capture, ignore_port=True) + + pkts = self.create_stream_out(self.pg8) + self.pg8.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + capture = self.pg7.get_capture(len(pkts)) + self.verify_capture_in(capture, self.pg7) + + finally: + self.pg7.unconfig() + self.pg8.unconfig() + + self.vapi.ip_table_add_del(is_add=0, + table={'table_id': vrf_id_in}) + self.vapi.ip_table_add_del(is_add=0, + table={'table_id': vrf_id_out}) + def test_dynamic_output_feature_vrf(self): """ NAT44ED dynamic translation test: output-feature, VRF""" @@ -3400,8 +3438,8 @@ class TestNAT44EDMW(TestNAT44ED): self.assertEqual(sessions[:, 0].sum(), 3) finally: - self.configure_ip4_interface(self.pg7, table_id=0) - self.configure_ip4_interface(self.pg8, table_id=0) + self.pg7.unconfig() + self.pg8.unconfig() self.vapi.ip_table_add_del(is_add=0, table={'table_id': new_vrf_id})
add callbacks to CatBoostRegressor
@@ -5156,7 +5156,7 @@ class CatBoostRegressor(CatBoost): def fit(self, X, y=None, cat_features=None, sample_weight=None, baseline=None, use_best_model=None, eval_set=None, verbose=None, logging_level=None, plot=False, column_description=None, verbose_eval=None, metric_period=None, silent=None, early_stopping_rounds=None, - save_snapshot=None, snapshot_file=None, snapshot_interval=None, init_model=None, + save_snapshot=None, snapshot_file=None, snapshot_interval=None, init_model=None, callbacks=None, log_cout=sys.stdout, log_cerr=sys.stderr): """ Fit the CatBoost model. @@ -5247,7 +5247,7 @@ class CatBoostRegressor(CatBoost): return self._fit(X, y, cat_features, None, None, None, sample_weight, None, None, None, None, baseline, use_best_model, eval_set, verbose, logging_level, plot, column_description, verbose_eval, metric_period, silent, early_stopping_rounds, - save_snapshot, snapshot_file, snapshot_interval, init_model, log_cout, log_cerr) + save_snapshot, snapshot_file, snapshot_interval, init_model, callbacks, log_cout, log_cerr) def predict(self, data, prediction_type=None, ntree_start=0, ntree_end=0, thread_count=-1, verbose=None): """
Refine the server name compare logic
@@ -876,19 +876,11 @@ static int ssl_prepare_client_hello( mbedtls_ssl_context *ssl ) if( ssl->tls_version == MBEDTLS_SSL_VERSION_TLS1_3 && ssl->handshake->resume ) { - int hostname_mismatch = 0; - if( ssl->session_negotiate->hostname != NULL ) - { - if( ssl->hostname != NULL ) - { - if( strcmp( ssl->hostname, ssl->session_negotiate->hostname) ) - hostname_mismatch = 1; - } - else - hostname_mismatch = 1; - } - else - hostname_mismatch = ssl->hostname != NULL; + int hostname_mismatch = ssl->hostname != NULL || + ssl->session_negotiate->hostname != NULL; + if( ssl->hostname != NULL && ssl->session_negotiate->hostname != NULL ) + hostname_mismatch = strcmp( + ssl->hostname, ssl->session_negotiate->hostname ) != 0; if( hostname_mismatch ) {
Coding: Add guidelines for Markdown headers
@@ -189,6 +189,8 @@ Most notably use: - Fences with sh are for the [shell recorder syntax](/tests/shell/shell_recorder/tutorial_wrapper) - `README.md` and tutorials should be written exclusively with shell recorder syntax so that we know that the code in the tutorial produces output as expected +- Please use [**title-case**](https://en.wiktionary.org/wiki/title_case) for headings in the general documentation. +- For [man pages](help/) please use **only capital letters for subheadings** and only **small letters for the main header**. We use this header style to match the look and feel of man pages for Unix tools such as `ls` or `mkdir`. Please use [`prettier`](https://prettier.io) to format documentation according to the guidelines given above. If you want, you can also format all Markdown files in the repository using the script [`reformat-markdown`](/scripts/reformat-markdown).
[bsp][stm32] add new bsp ci
@@ -90,6 +90,7 @@ env: - RTT_BSP='stm32/stm32f429-armfly-v6' RTT_TOOL_CHAIN='sourcery-arm' - RTT_BSP='stm32/stm32f429-atk-apollo' RTT_TOOL_CHAIN='sourcery-arm' - RTT_BSP='stm32/stm32f429-fire-challenger' RTT_TOOL_CHAIN='sourcery-arm' + - RTT_BSP='stm32/stm32f446-st-nucleo' RTT_TOOL_CHAIN='sourcery-arm' - RTT_BSP='stm32/stm32f767-atk-apollo' RTT_TOOL_CHAIN='sourcery-arm' - RTT_BSP='stm32/stm32f767-fire-challenger' RTT_TOOL_CHAIN='sourcery-arm' - RTT_BSP='stm32/stm32l475-atk-pandora' RTT_TOOL_CHAIN='sourcery-arm'
[dpos] show BPs from consensus info right after boot
@@ -8,9 +8,10 @@ package dpos import ( "encoding/json" "fmt" - "github.com/aergoio/aergo/p2p/p2pkey" "time" + "github.com/aergoio/aergo/p2p/p2pkey" + "github.com/aergoio/aergo-lib/log" "github.com/aergoio/aergo/config" "github.com/aergoio/aergo/consensus" @@ -252,18 +253,24 @@ func (dpos *DPoS) getBpInfo(now time.Time) *bpInfo { // ConsensusInfo returns the basic DPoS-related info. func (dpos *DPoS) ConsensusInfo() *types.ConsensusInfo { - ci := &types.ConsensusInfo{Type: GetName()} - if dpos.done { - var lpbNo types.BlockNo - - // Use a closure to release the mutex even upon panic. - func() { + withLock := func(fn func()) { dpos.RLock() defer dpos.RUnlock() + fn() + } + ci := &types.ConsensusInfo{Type: GetName()} + withLock(func() { ci.Bps = dpos.bpc.BPs() + + }) + + if dpos.done { + var lpbNo types.BlockNo + + withLock(func() { lpbNo = dpos.lpbNo() - }() + }) if lpbNo > 0 { if block, err := dpos.GetBlockByNo(lpbNo); err == nil {
[build] fix typo in SConstruct (fixes (thx eryretqwewrqr) x-ref: "NameError ins SConstruct"
@@ -523,7 +523,7 @@ if 1: LIBDBI = 'dbi', ) - if env['with_fam'] and not self.CheckCHeader('sys/inotify.h'): + if env['with_fam'] and not autoconf.CheckCHeader('sys/inotify.h'): if not autoconf.CheckLibWithHeader('fam', 'fam.h', 'C'): fail("Couldn't find fam") autoconf.env.Append(
Fix assert_equals of ChordLengths
@@ -29,7 +29,7 @@ assert_equals(CuTest *tc, } // Values - vector<real> lengths1 = cl2.lengths(); + vector<real> lengths1 = cl1.lengths(); vector<real> lengths2 = cl2.lengths(); CuAssertIntEquals(tc, (int) lengths1.size(),
vere: fixes up get next version
#include <curl/curl.h> #include <uv.h> +// XX use a new GCP bucket? +// +// static const c3_c* ver_hos_c = "https://bootstrap.urbit.org/vere"; +static const c3_c* ver_hos_c = "http://localhost:8000"; + // stash config flags for worker // static c3_w sag_w; @@ -305,27 +310,32 @@ _king_get_next(c3_c** out_c) c3_y* hun_y; c3_i ret_i; - ret_i = asprintf(&url_c, "https://bootstrap.urbit.org/vere/%s/next", URBIT_VERSION); + ret_i = asprintf(&url_c, "%s/%s/next", ver_hos_c, URBIT_VERSION); c3_assert( ret_i > 0 ); if ( !_king_curl_bytes(url_c, &len_w, &hun_y) ) { - ret_i = asprintf(&ver_c, "%*.s", len_w, hun_y); + ret_i = asprintf(&ver_c, "%.*s", len_w, hun_y); c3_assert( ret_i > 0 ); } else { - if ( _king_curl_bytes("https://bootstrap.urbit.org/vere/last", - &len_w, &hun_y) ) + c3_free(url_c); + ret_i = asprintf(&url_c, "%s/last", ver_hos_c); + c3_assert( ret_i > 0 ); + + if ( _king_curl_bytes(url_c, &len_w, &hun_y) ) { c3_free(url_c); return -2; } - ret_i = asprintf(&ver_c, "%*.s", len_w, hun_y); + ret_i = asprintf(&ver_c, "%.*s", len_w, hun_y); c3_assert( ret_i > 0 ); } c3_free(url_c); + // XX trim ver_c ? + // if ( 0 == strcmp(ver_c, URBIT_VERSION) ) { c3_free(ver_c); return -1; @@ -928,6 +938,10 @@ u3_king_done(void) if ( c3y == u3_Host.ops_u.nex ) { c3_c* ver_c; + // hack to ensure we only try once + // + u3_Host.ops_u.nex = c3n; + switch ( _king_get_next(&ver_c) ) { case -2: { u3l_log("vere: unable to check for next version\n");
update blog + video page
title: Other Resources --- -To learn more about AppScope, see these resources from Cribl: +To learn more about AppScope, see these resources from Cribl. # Blog Posts -We have a growing number of AppScope-related [blog posts](https://cribl.io/blog/?s=appscope): +Learn why the team created AppScope: -- [AppScope from an AppDynamics Perspective](https://cribl.io/blog/appscope-from-an-appdynamics-perspective/) -- [Latest AppScope Updates: version 0.7 adds ability to attach to a running process, TLS support, and Alpine Linux support](https://cribl.io/blog/latest-appscope-updates-version-0-7-adds-ability-to-attach-to-a-running-process-tls-support-and-alpine-linux-support/) +- [The AppScope Origin Story](https://cribl.io/blog/the-appscope-origin-story/) - [Introducing AppScope: Easy Black Box Instrumentation for Everything](https://cribl.io/blog/introducing-appscope-easy-black-box-instrumentation-for-everything/) - [AppScope Design](https://cribl.io/blog/appscope-design/) - [AppScope: Interposition Mechanisms](https://cribl.io/blog/interposition-mechanisms/) + +See how AppScope addresses use cases that are relevant for you: + +- [AppScope 1.0: Changing the Game for SREs and Devs](https://cribl.io/blog/appscope-1-0-changing-the-game-for-sres-and-devs/) +- [AppScope 1.0: Changing the Game for Infosec, Part 1](https://cribl.io/blog/appscope-1-0-changing-the-game-for-infosec-part-1/) +- [AppScope 1.0: Changing the Game for Infosec, Part 2](https://cribl.io/blog/appscope-1-0-changing-the-game-for-infosec-part-2) +- [AppScope from an AppDynamics Perspective](https://cribl.io/blog/appscope-from-an-appdynamics-perspective/) - [AppScope: Analyzing gRPC and Protobuf](https://cribl.io/blog/analyzing-grpc-and-protobuf/) - [How AppScope helped resolve a DNS problem](https://cribl.io/blog/how-appscope-helped-resolve-a-dns-problem/) - [AppScope: Postgres SQL Observability](https://cribl.io/blog/appscope-postgres-sql-observability/) -## Videos +You can find all of Cribl's blog posts about AppScope [here](https://cribl.io/blog/?s=appscope). -See our: +## Videos -- [Videos](https://cribl.io/resources/?category=videos) about all Cribl products. \ No newline at end of file +For a view of the bigger observability picture, check out Cribl's [videos](https://cribl.io/resources/?category=videos).
Remove obsolete function prototypes
@@ -323,25 +323,6 @@ spi_status_t spi_arch_transfer(const spi_device_t *dev, uint8_t *buf, int rlen, int ignore_len); -/** - * \brief Selects an SPI device - * \param dev An SPI device configuration that specifies the CS pin. - * \return SPI return code - * - * Clears the CS pin. It should work only if the device has already - * locked the SPI controller. - */ -spi_status_t spi_arch_select(const spi_device_t *dev); - -/** - * \brief Deselects an SPI device - * \param dev An SPI device configuration that specifies the CS pin. - * \return SPI return code - * - * Set the CS pin. Locking the SPI controller is not needed. - */ -spi_status_t spi_arch_deselect(const spi_device_t *dev); - #endif /* SPI_H_ */ /*---------------------------------------------------------------------------*/ /**
VERSION bump to version 2.0.57
@@ -58,7 +58,7 @@ set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) # set version of the project set(LIBYANG_MAJOR_VERSION 2) set(LIBYANG_MINOR_VERSION 0) -set(LIBYANG_MICRO_VERSION 56) +set(LIBYANG_MICRO_VERSION 57) set(LIBYANG_VERSION ${LIBYANG_MAJOR_VERSION}.${LIBYANG_MINOR_VERSION}.${LIBYANG_MICRO_VERSION}) # set version of the library set(LIBYANG_MAJOR_SOVERSION 2)
Replaces use of snprintf for asprint in deployment_admin.c
* specific language governing permissions and limitations * under the License. */ -/** - * deployment_admin.c - * - * \date Nov 7, 2011 - * \author <a href="mailto:[email protected]">Apache Celix Project Team</a> - * \copyright Apache License, Version 2.0 - */ #include <stddef.h> #include <stdlib.h> @@ -136,16 +129,6 @@ celix_status_t deploymentAdmin_create(bundle_context_pt context, deployment_admi (*admin)->pollUrl = strdup(pollUrl); (*admin)->auditlogUrl = strdup(auditlogUrl); -// log_store_pt store = NULL; -// log_t *log = NULL; -// log_sync_pt sync = NULL; -// logStore_create(subpool, &store); -// log_create(subpool, store, &log); -// logSync_create(subpool, (*admin)->targetIdentification, store, &sync); -// -// log_log(log, 20000, NULL); - - celixThread_create(&(*admin)->poller, NULL, deploymentAdmin_poll, *admin); } } @@ -199,23 +182,22 @@ static celix_status_t deploymentAdmin_performRequest(deployment_admin_pt admin, fw_log(celix_frameworkLogger_globalLogger(), CELIX_LOG_LEVEL_ERROR, "Error initializing curl."); } - size_t maxUrlLen = strlen(admin->auditlogUrl)+6; - char url[maxUrlLen]; - int written = snprintf(url, sizeof(url), "%s/send", admin->auditlogUrl); - status = written < sizeof(url) ? CELIX_SUCCESS : CELIX_ILLEGAL_ARGUMENT; + char* url; + int rc = asprintf(&url, "%s/send", admin->auditlogUrl); + status = rc < 0 ? CELIX_ENOMEM : CELIX_SUCCESS; if (status == CELIX_SUCCESS) { curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_POSTFIELDS, entry); res = curl_easy_perform(curl); - if (res != CURLE_OK ) { status = CELIX_BUNDLE_EXCEPTION; - fw_log(celix_frameworkLogger_globalLogger(), CELIX_LOG_LEVEL_ERROR, "Error sending auditlog, got curl error code %d", res); + fw_log(celix_frameworkLogger_globalLogger(), CELIX_LOG_LEVEL_ERROR, "Error sending auditlog to %s, got curl error code %d", url, res); } + free(url); } else { - fw_log(celix_frameworkLogger_globalLogger(), CELIX_LOG_LEVEL_ERROR, "Error creating send url for audit log url", admin->auditlogUrl); + fw_log(celix_frameworkLogger_globalLogger(), CELIX_LOG_LEVEL_ERROR, "Error creating send url for audit log url: %s", admin->auditlogUrl); } return status;
OcMemoryLib: Handle appending entries from MMAP to MAT end
@@ -192,6 +192,7 @@ OcExpandAttributesByMap ( UINTN MatIndex; EFI_PHYSICAL_ADDRESS LastAddress; BOOLEAN DoneWithMat; + BOOLEAN LastMat; MatIndex = 0; Status = EFI_NOT_FOUND; @@ -204,13 +205,14 @@ OcExpandAttributesByMap ( DoneWithMat = FALSE; while (MatIndex < MemoryAttributesTable->NumberOfEntries && !DoneWithMat) { + LastMat = (MatIndex + 1) == MemoryAttributesTable->NumberOfEntries; if (MemoryAttributesEntry->PhysicalStart >= MemoryMap->PhysicalStart && MemoryAttributesEntry->PhysicalStart <= LastAddress) { // // We have an attribute for that memory map descriptor, assume it is parsed. // DoneWithMat = TRUE; - } else if (LastAddress < MemoryAttributesEntry->PhysicalStart) { + } else if (LastAddress < MemoryAttributesEntry->PhysicalStart || LastMat) { // // We have an attribute past the memory map descriptor, insert the new one here. // @@ -244,6 +246,12 @@ OcExpandAttributesByMap ( ++MemoryAttributesTable->NumberOfEntries; DoneWithMat = TRUE; Status = EFI_SUCCESS; + // + // Do not increment on last mat, as we may add multiple entries past last. + // + if (LastMat) { + break; + } } MemoryAttributesEntry = NEXT_MEMORY_DESCRIPTOR (
scripts: website: use wait time better see
@@ -53,15 +53,15 @@ netstat -tlpen # then start the backend; succeed if it was not started before.. kdb stop-rest-backend || /bin/true +# cleanup /tmp files from build +find /tmp -mindepth 1 -delete + # avoid 'address already in use' while netstat -tlpen | grep "$IP:$PORT" do sleep 1 # keep waiting (=downtime) short done -# cleanup /tmp files from build -find /tmp -mindepth 1 -delete - # now start again kdb run-rest-backend
error: oom kdbmerge fixed
@@ -709,7 +709,7 @@ static char * getValuesAsArray (KeySet * ks, const Key * arrayStart, Key * infor } if (elektraRealloc ((void **) &buffer, bufferSize) < 0) { - ELEKTRA_SET_OUT_OF_MEMORY_ERROR (informationKey, "Memory allocation failed."); + ELEKTRA_SET_OUT_OF_MEMORY_ERROR (informationKey); elektraFree (buffer); keyDel (iterator); return NULL; @@ -773,14 +773,14 @@ static KeySet * ksFromArray (const char * array, int length, Key * informationKe KeySet * result = ksNew (0, KS_END); if (result == NULL) { - ELEKTRA_SET_OUT_OF_MEMORY_ERROR (informationKey, "Memory allocation failed."); + ELEKTRA_SET_OUT_OF_MEMORY_ERROR (informationKey); return NULL; } Key * iterator = keyNew ("/#0", KEY_END); if (iterator == NULL) { ksDel (result); - ELEKTRA_SET_OUT_OF_MEMORY_ERROR (informationKey, "Memory allocation failed."); + ELEKTRA_SET_OUT_OF_MEMORY_ERROR (informationKey); return NULL; } char * buffer = elektraCalloc (length + 1); // + 1 for terminating null character
remove vulkan from public releases for ios devices
@@ -289,6 +289,10 @@ when ($MAPSMOBI_BUILD_TARGET && $OS_IOS) { when ($MAPS_MOBILE_EXPORT_OBJC_API) { YRT_EXPORT=__attribute__((visibility(\"default\"))) } + when(!$MAPS_MOBILE_PUBLIC_API || $OS_IOSSIM) { + CFLAGS+=-DBUILDING_WITH_VULKAN_IOS + BUILDING_WITH_VULKAN_IOS=yes + } } when ($MAPSMOBI_BUILD_TARGET && $OS_LINUX) { when ($NO_GRAPHICS != "yes") {
py/runtime: Optimise to not create temp float for int to power negative.
@@ -456,8 +456,7 @@ mp_obj_t mp_binary_op(mp_binary_op_t op, mp_obj_t lhs, mp_obj_t rhs) { case MP_BINARY_OP_INPLACE_POWER: if (rhs_val < 0) { #if MICROPY_PY_BUILTINS_FLOAT - lhs = mp_obj_new_float(lhs_val); - goto generic_binary_op; + return mp_obj_float_binary_op(op, lhs_val, rhs); #else mp_raise_ValueError("negative power with no float support"); #endif
Reset material in lovr.graphics.reset;
@@ -60,6 +60,7 @@ void lovrGraphicsReset() { lovrGraphicsSetDefaultFilter((TextureFilter) { .mode = FILTER_TRILINEAR }); lovrGraphicsSetDepthTest(COMPARE_LEQUAL); lovrGraphicsSetFont(NULL); + lovrGraphicsSetMaterial(NULL); lovrGraphicsSetLineWidth(1); lovrGraphicsSetPointSize(1); lovrGraphicsSetShader(NULL);
phb4: Use phb4_ioda_sel() more Use phb4_ioda_sel() in phb4_read_phb_status() rather than re-implementing it.
@@ -1795,7 +1795,6 @@ static void phb4_read_phb_status(struct phb4 *p, { uint16_t val = 0; uint32_t i; - uint64_t val64 = 0; uint64_t *pPEST; memset(stat, 0, sizeof(struct OpalIoPhb4ErrorData)); @@ -1905,17 +1904,13 @@ static void phb4_read_phb_status(struct phb4 *p, * resident tables. */ pPEST = (uint64_t *)p->tbl_pest; - val64 = PHB_IODA_AD_AUTOINC; - val64 = SETFIELD(PHB_IODA_AD_TSEL, val64, IODA3_TBL_PESTA); - phb4_write_reg_asb(p, PHB_IODA_ADDR, val64); + phb4_ioda_sel(p, IODA3_TBL_PESTA, 0, true); for (i = 0; i < OPAL_PHB4_NUM_PEST_REGS; i++) { stat->pestA[i] = phb4_read_reg_asb(p, PHB_IODA_DATA0); stat->pestA[i] |= pPEST[2 * i]; } - val64 = PHB_IODA_AD_AUTOINC; - val64 = SETFIELD(PHB_IODA_AD_TSEL, val64, IODA3_TBL_PESTB); - phb4_write_reg_asb(p, PHB_IODA_ADDR, val64); + phb4_ioda_sel(p, IODA3_TBL_PESTB, 0, true); for (i = 0; i < OPAL_PHB4_NUM_PEST_REGS; i++) { stat->pestB[i] = phb4_read_reg_asb(p, PHB_IODA_DATA0); stat->pestB[i] |= pPEST[2 * i + 1];