message
stringlengths
6
474
diff
stringlengths
8
5.22k
BugID:17061547:Resolve GPIO Input read error for STM32L4
@@ -241,7 +241,6 @@ int32_t hal_gpio_output_toggle(gpio_dev_t *gpio) int32_t hal_gpio_input_get(gpio_dev_t *gpio, uint32_t *value) { - uint16_t pin = 0; GPIO_TypeDef* GPIOx = NULL; int32_t ret = 0; @@ -251,8 +250,7 @@ int32_t hal_gpio_input_get(gpio_dev_t *gpio, uint32_t *value) ret = get_gpio_group(gpio, &GPIOx); if (ret == 0) { - pin = get_gpio_pin(gpio->port); - *value = HAL_GPIO_ReadPin(GPIOx, pin); + *value = HAL_GPIO_ReadPin(GPIOx, get_gpio_pin(gpio->port)); }; return ret;
Fix bugs in repattern changes for zone verification
@@ -1107,7 +1107,7 @@ copy_pat_fixed(region_type* region, struct pattern_options* orig, orig->verify_zone = p->verify_zone; orig->verify_zone_is_default = p->verify_zone_is_default; orig->verifier_timeout = p->verifier_timeout; - orig->verifier_timeout_is_default = p->verifier_feed_zone_is_default; + orig->verifier_timeout_is_default = p->verifier_timeout_is_default; orig->verifier_feed_zone = p->verifier_feed_zone; orig->verifier_feed_zone_is_default = p->verifier_feed_zone_is_default; } @@ -2371,7 +2371,7 @@ config_apply_pattern(struct pattern_options *dest, const char* name) if(dest->verifier != NULL) { size_t size; for(cnt = 0; dest->verifier[cnt] != NULL; cnt++) { - size = strlen(dest->verifier[cnt]); + size = strlen(dest->verifier[cnt]) + 1; region_recycle( region, dest->verifier[cnt], size); }
UseWeights as in CPU
@@ -33,9 +33,8 @@ namespace NCatboostCuda { TMetricHolder Compute(const IGpuMetric* metric) final { CB_ENSURE(Point.GetObjectsSlice().Size(), "Set point first"); auto targets = Target.GetTarget().GetTargets().ConstCopyView(); - TConstVec weights; - if (metric->GetCpuMetric().UseWeights) { + if (metric->GetCpuMetric().UseWeights.IsIgnored() || metric->GetCpuMetric().UseWeights) { weights = Target.GetTarget().GetWeights().ConstCopyView(); } else { using TVec = typename TTarget::TVec;
haskell-debian-stretch: document memleak
- infos/provides = - infos/recommends = - infos/placements = getstorage setstorage -- infos/status = maintained experimental +- infos/status = maintained experimental memleak - infos/metadata = - infos/description = base for haskell plugins @@ -48,3 +48,7 @@ configuration. ## Limitations Currently the Haskell plugin support only executes tests written in C and not directly in Haskell. + +## Other + +The memleak is most likely a false positive due to the forked child process which exits afterwards.
Add mouse shortcut to expand settings panel Double-click on extra mouse button to open the settings panel (a single-click opens the notification panel). This is consistent with the keyboard shortcut MOD+n+n. PR <https://github.com/Genymobile/scrcpy/pull/2264>
@@ -693,7 +693,11 @@ input_manager_process_mouse_button(struct input_manager *im, return; } if (control && event->button == SDL_BUTTON_X2 && down) { + if (event->clicks < 2) { expand_notification_panel(im->controller); + } else { + expand_settings_panel(im->controller); + } return; } if (control && event->button == SDL_BUTTON_RIGHT) {
Use value setter for reverse start angle setting
@@ -151,7 +151,7 @@ void lv_rotary_set_value(lv_obj_t * rotary, int16_t value, lv_anim_enable_t anim lv_arc_set_start_angle( rotary, _lv_map(ext->cur_value, ext->min_value, ext->max_value, - ext->arc.arc_angle_start, ext->arc.bg_angle_start) + ext->arc.arc_angle_end, ext->arc.bg_angle_start) ); } else { lv_arc_set_end_angle( @@ -220,9 +220,8 @@ void lv_rotary_set_reverse(lv_obj_t * rotary, bool reverse) uint16_t end = ext->arc.arc_angle_end; ext->arc.arc_angle_end = ext->arc.bg_angle_end; - ext->arc.arc_angle_start= end; - lv_obj_invalidate(rotary); + lv_rotary_set_value(rotary, ext->cur_value, false); } /**
build: run unit tests on windows
@@ -180,6 +180,7 @@ jobs: CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }} - run: mingw32-make build/urbit + - run: mingw32-make test - run: > build/urbit -l -d -B ../../bin/solid.pill -F bus && curl -f --data '{"source":{"dojo":"+hood/exit"},"sink":{"app":"hood"}}'
add some fuction
@@ -38,6 +38,7 @@ class OMMath Eigen::Vector3f makeEigenVector3(float v1, float v2, float v3); Eigen::Matrix3f makeEigenMatrix3(float m11, float m12, float m13, float m21, float m22, float m23, float m31, float m32, float m33); Eigen::Matrix3f makeRotationMatrix(float roll, float pitch, float yaw); + Eigen::Matrix3f skewSymmetricMatrix(Eigen::Vector3f v); Eigen::Matrix3f rodriguesRotationMatrix(Eigen::Vector3f axis, float angle);
soprano: add frontend ssl request support
@@ -59,6 +59,21 @@ so_fewrite_cancel(so_stream_t *buf, uint32_t pid, uint32_t key) return 0; } +static inline int +so_fewrite_ssl_request(so_stream_t *buf, uint32_t pid, uint32_t key) +{ + int size = sizeof(uint32_t) + /* len */ + sizeof(uint32_t); /* special */ + int rc = so_stream_ensure(buf, size); + if (so_unlikely(rc == -1)) + return -1; + /* len */ + so_stream_write32(buf, size); + /* special */ + so_stream_write32(buf, 80877103); + return 0; +} + static inline int so_fewrite_terminate(so_stream_t *buf) {
epm: solve an issue about permission denied in container If grpc address /var/run/epm is created with 0600 permission by epm service, the file epm can't be accessed by epm client in container. So /var/run/epm's permission need be changed to 0755
@@ -67,7 +67,7 @@ func runServer(opts *options.Options, stopCh <-chan struct{}) error { // registry and start the cache pool manager server v1alpha1.RegisterEnclavePoolManagerServer(s, &server) // listen and serve - if err := os.MkdirAll(filepath.Dir(cfg.GRPC.Address), 0600); err != nil { + if err := os.MkdirAll(filepath.Dir(cfg.GRPC.Address), 0755); err != nil { return err } if err := unix.Unlink(cfg.GRPC.Address); err != nil && !os.IsNotExist(err) {
Don't show update if not running within deCONZ-autostart.sh script
@@ -280,10 +280,13 @@ void DeRestPluginPrivate::internetDiscoveryExtractVersionInfo(QNetworkReply *rep if (gwUpdateVersion != version) { DBG_Printf(DBG_INFO, "discovery found version %s for update channel %s\n", qPrintable(version), qPrintable(gwUpdateChannel)); + if (gwRunFromShellScript) + { gwUpdateVersion = version; updateEtag(gwConfigEtag); } } + } else { DBG_Printf(DBG_ERROR, "discovery reply doesn't contain valid version info for update channel %s\n", qPrintable(gwUpdateChannel));
android: travis - moving back to API 19
@@ -80,9 +80,9 @@ android: - extra-google-m2repository - extra-android-m2repository - build-tools-26.0.2 - - android-${RHO_ANDROID_LEVEL:-24} - - addon-google_apis-google-${RHO_ANDROID_LEVEL:-24} - + - android-${RHO_ANDROID_LEVEL:-19} + - addon-google_apis-google-${RHO_ANDROID_LEVEL:-19} + - sys-img-armeabi-v7a-android-26 env: global: - BUILD_ARTEFACTS_DIR=$HOME/build_artefacts @@ -109,7 +109,7 @@ matrix: - language: android os: linux - env: RHO_TARGET="android" RHO_APP="auto_common_spec" CACHE_NAME="RVM_LINUX" RHO_ANDROID_LEVEL="24" NDK_NUMBER="17b" + env: RHO_TARGET="android" RHO_APP="auto_common_spec" CACHE_NAME="RVM_LINUX" RHO_ANDROID_LEVEL="19" NDK_NUMBER="17b" #- language: android # os: linux @@ -117,7 +117,7 @@ matrix: - language: android os: linux - env: RHO_TARGET="android" RHO_APP="framework_spec" CACHE_NAME="RVM_LINUX" RHO_ANDROID_LEVEL="24" NDK_NUMBER="17b" + env: RHO_TARGET="android" RHO_APP="framework_spec" CACHE_NAME="RVM_LINUX" RHO_ANDROID_LEVEL="19" NDK_NUMBER="17b" #- language: android # os: linux @@ -129,11 +129,11 @@ matrix: - language: android os: linux - env: RHO_TARGET="android" RHO_APP="auto_common_spec" CACHE_NAME="RVM_LINUX" RHO_ANDROID_LEVEL="24" NDK_NUMBER="15c" + env: RHO_TARGET="android" RHO_APP="auto_common_spec" CACHE_NAME="RVM_LINUX" RHO_ANDROID_LEVEL="19" NDK_NUMBER="15c" - language: android os: linux - env: RHO_TARGET="android" RHO_APP="framework_spec" CACHE_NAME="RVM_LINUX" RHO_ANDROID_LEVEL="24" NDK_NUMBER="15c" + env: RHO_TARGET="android" RHO_APP="framework_spec" CACHE_NAME="RVM_LINUX" RHO_ANDROID_LEVEL="19" NDK_NUMBER="15c" #- language: android # os: linux
Maintain backwards compatibility for Redis-py Redis-py unfortunately has backward compatibility issues, so we change the API based on the version.
@@ -223,12 +223,19 @@ def run_read_repair_test(c, num_keys=10): # Make sure that we have the dynomite reserved metadata key assert redis_conn.exists(ADD_SET_MD_KEY) == True - # Get the TS of our 'key' - add_ts = redis_conn.zscore(ADD_SET_MD_KEY, key) + # Get the TS of our 'key' and update it by 1. + add_ts = redis_conn.zscore(ADD_SET_MD_KEY, key) + 1.0 # Update the TS of that key to make it appear that this node has # the latest value. - redis_conn.zadd(ADD_SET_MD_KEY, add_ts + 1.0, key) - assert redis_conn.zscore(ADD_SET_MD_KEY, key) == add_ts + 1.0 + # Note: Redis-py unfortunately has backward compatibility issues, so we change the API + # based on the version. + # https://github.com/andymccurdy/redis-py/issues/1068#issuecomment-439175760 + if redis.VERSION[0] < 3: + redis_conn.zadd(ADD_SET_MD_KEY, add_ts, key) + else: + redis_conn.zadd(ADD_SET_MD_KEY, {key: add_ts}) + score = redis_conn.zscore(ADD_SET_MD_KEY, key) + assert score == add_ts , score # Update with a value that we want to repair with assert redis_conn.set(key, REPAIRED_VALUE) == True
fix special char by removing replacing \ by / line
@@ -4,7 +4,7 @@ import json def read(filename): fileopen = open(filename, "r", encoding="utf-8") text = fileopen.read() - text = text.replace("\\", "/") + # text = text.replace("\\", "/") data = json.loads(text) fileopen.close()
docs: include powertools enablement in stateful
@@ -215,7 +215,7 @@ issue the following [sms](*\#*) (*\chrootinstall*) kernel # Include modules user environment -[sms](*\#*) (*\chrootinstall*) lmod-ohpc +[sms](*\#*) (*\chrootinstall*) --enablerepo=powertools lmod-ohpc \end{lstlisting} % end_ohpc_run
[Android] Cleanup makefile
@@ -160,8 +160,6 @@ LOCAL_CFLAGS := $(COMMON_CFLAGS) LOCAL_LDFLAGS := -lm -latomic ifeq ($(ANDROID_WITH_PTRACE),true) - LOCAL_C_INCLUDES := third_party/android/libunwind/include \ - third_party/android/capstone/include LOCAL_STATIC_LIBRARIES += libunwind-arch \ libunwind \ libunwind-ptrace \
Testing: Add tests for large constant fields (simple packing)
@@ -127,5 +127,20 @@ if [ $HAVE_AEC -eq 0 ]; then grep -q "CCSDS support not enabled. Please rebuild with -DENABLE_AEC=ON" $temp_err fi +# Large constant fields +# ----------------------- +input=${data_dir}/sample.grib2 +ECCODES_GRIB_LARGE_CONSTANT_FIELDS=0 ${tools_dir}/grib_set -d1 $input $temp +grib_check_key_equals $temp const,bitsPerValue,section7Length '1 0 5' + +ECCODES_GRIB_LARGE_CONSTANT_FIELDS=1 ${tools_dir}/grib_set -d1 $input $temp +grib_check_key_equals $temp const,bitsPerValue,section7Length '1 16 997' + +${tools_dir}/grib_set -s produceLargeConstantFields=0 -d1 $input $temp +grib_check_key_equals $temp const,bitsPerValue,section7Length '1 0 5' + +${tools_dir}/grib_set -s produceLargeConstantFields=1 -d1 $input $temp +grib_check_key_equals $temp const,bitsPerValue,section7Length '1 16 997' + rm -f $temp $temp_err
validation BUGFIX validate op checks
@@ -1806,7 +1806,7 @@ lyd_validate_op(struct lyd_node *op_tree, const struct lyd_node *dep_tree, enum struct lyd_node *op_node; uint32_t int_opts; - LY_CHECK_ARG_RET(NULL, op_tree, !op_tree->parent, !dep_tree || !dep_tree->parent, (data_type == LYD_TYPE_RPC_YANG) || + LY_CHECK_ARG_RET(NULL, op_tree, !dep_tree || !dep_tree->parent, (data_type == LYD_TYPE_RPC_YANG) || (data_type == LYD_TYPE_NOTIF_YANG) || (data_type == LYD_TYPE_REPLY_YANG), LY_EINVAL); LY_CHECK_CTX_EQUAL_RET(LYD_CTX(op_tree), dep_tree ? LYD_CTX(dep_tree) : NULL, LY_EINVAL); if (diff) { @@ -1820,8 +1820,23 @@ lyd_validate_op(struct lyd_node *op_tree, const struct lyd_node *dep_tree, enum int_opts = LYD_INTOPT_REPLY; } + if (op_tree->schema && (op_tree->schema->nodetype & (LYS_RPC | LYS_ACTION | LYS_NOTIF))) { + /* we have the operation/notification, adjust the pointers */ + op_node = op_tree; + while (op_tree->parent) { + op_tree = lyd_parent(op_tree); + } + } else { /* find the operation/notification */ + while (op_tree->parent) { + op_tree = lyd_parent(op_tree); + } LYD_TREE_DFS_BEGIN(op_tree, op_node) { + if (!op_node->schema) { + LOGVAL(LYD_CTX(op_tree), LYVE_DATA, "Invalid opaque node \"%s\" found.", LYD_NAME(op_node)); + return LY_EVALID; + } + if ((int_opts & (LYD_INTOPT_RPC | LYD_INTOPT_ACTION | LYD_INTOPT_REPLY)) && (op_node->schema->nodetype & (LYS_RPC | LYS_ACTION))) { break; @@ -1830,6 +1845,8 @@ lyd_validate_op(struct lyd_node *op_tree, const struct lyd_node *dep_tree, enum } LYD_TREE_DFS_END(op_tree, op_node); } + } + if (int_opts & (LYD_INTOPT_RPC | LYD_INTOPT_ACTION | LYD_INTOPT_REPLY)) { if (!(op_node->schema->nodetype & (LYS_RPC | LYS_ACTION))) { LOGERR(LYD_CTX(op_tree), LY_EINVAL, "No RPC/action to validate found.");
Straighten Quickstart
@@ -7,14 +7,16 @@ The next lines will start oidc-agent and the client registration process. eval `oidc-agent` oidc-gen <shortname> ``` -A client will be registered, but it misses the password grant type. -Contact an INDIGO IAM admin to update the client configuration. -All needed information are printed by oidc-gen. + +A client will be registered, but it will lack permission to use the +password grant type. Send the client-id displayed by oidc-gen to an +INDIGO IAM admin (Andrea) to update the client configuration. After the client configuration was updated by an admin, the account -configuration generation can be finished. +configuration generation can be finished. For this you need to provide +the clientconfig file generated during the previous call to oidc-gen to do +so. (Client configs are stored in <$HOME/.config/oidc-agent>.clientconfig) -Provide the clientconfig file to oidc-gen to do so. ``` oidc-gen -f <path_to_clientconfigfile> ``` @@ -44,6 +46,22 @@ To start oidc-agent and directly set the needed environment variables you can us eval `oidc-agent` ``` +### Persistence of oidc-agent +A simple way to make oidc-agent persistent is to include this line in your +`.bashrc`: +``` +test -e ~/tmp/oidc-agent.env && . ~/tmp/oidc-agent.env +``` +And to run the agent as `oidc-agent > ~/tmp/oidc-agent.env` +From now on every new shell should have access to the agent. + +You can test this with: +``` +oidc-token <shortname> +``` + +# General Usage + Using oidc-agent is made as easy as possible. In case you are lost oidc-agent provides a lot of information with its 'help' command, just call `oidc-agent --help`. ```
repo: MAINTAINERS instructions for multi pills
@@ -159,6 +159,37 @@ so that I can type e.g. `git mu origin/foo 1337`. If you're making a Vere release, just play it safe and update all the pills. +To produce multi pills, you will need to set up an environment with the +appropriate desks with the appropriate contents, doing something like the +following (where `> ` denotes an urbit command and `% ` denotes a unix shell +command): + +```console +> |merge %garden our %base +> |merge %landscape our %base +> |merge %bitcoin our %base +> |merge %webterm our %base +% rsync -avL --delete pkg/arvo/ zod/base/ +% for desk in garden landscape bitcoin webterm; do \ + rsync -avL --delete pkg/$desk/ zod/$desk/ \ + done +> |commit %base +> |commit %garden +> |commit %landscape +> |commit %bitcoin +> |commit %webterm +> .multi/pill +solid %base %garden %landscape %bitcoin %webterm +> .brass-multi/pill +brass %base %garden %landscape %bitcoin %webterm +``` + +And then of course: + +```console +> .solid/pill +solid +> .brass/pill +brass +> .ivory/pill +ivory +``` + For an Urbit OS release, after all the merge commits, make a release with the commit message "release: urbit-os-v1.0.xx". This commit should have up-to-date artifacts from pkg/interface and a new version number in the desk.docket-0 of
Fix typo in bytecode description.
newoption { trigger = "bytecode", - description = "Embed scripts as bytecode instead of stripped souce code" + description = "Embed scripts as bytecode instead of stripped source code" } newoption {
Correct some information in the OpenGL section of the developer documentation.
@@ -21,13 +21,13 @@ The sections of Qt that deal with OpenGL are :: qtbase/src/opengl qtbase/src/openglextensions - plugins/platforms/xcb/gl_integrations/xcb_glx + qtbase/src/plugins/platforms/xcb/gl_integrations/xcb_glx - platformsupport/glxconvenience + qtbase/src/platformsupport/glxconvenience The context creation is performed in :: - plugins/platforms/xcb/gl_integrations/xcb_glx/qglxintegration.cpp + qtbase/src/plugins/platforms/xcb/gl_integrations/xcb_glx/qglxintegration.cpp void QGLXContext::init(QXcbScreen *screen, QPlatformOpenGLContext *share)
doc: fixes release note extension
@@ -213,7 +213,7 @@ you up-to-date with the multi-language support provided by Elektra. - <<TODO>> - <<TODO>> - <<TODO>> -- Minor readability improvement in `CODING.md` +- Minor readability improvement in `CODING.md` _(@loessberth)_ - <<TODO>> - <<TODO>> - Fix dead link and compile instructions _(Burkhard Hampl @bhampl)_
doc(bindings): fix format for doc/pikascript
# PikaScript -# What is PikaScript ? +## What is PikaScript ? [PikaScript](https://github.com/pikasTech/pikascript) is a Python interpreter designed specifically for microcontrollers, and it supports a subset of the common Python3 syntax. @@ -12,7 +12,7 @@ It's smarter, with a unique C module mechanism that allows you to generate bindi --- -# Why PikaScript + LVGL ? +## Why PikaScript + LVGL ? PikaScript now supports the main features of LVGL8, and these APIs are fully compatible with Micropython! @@ -22,11 +22,11 @@ Enjoy detailed code hints down to the parameter type for a better programming ex Use a more convenient IDE, such as vs-based simulation projects -# So how does it look like? +## So how does it look like? Here are some examples of lvgl that PikaScript can already run, they are mainly from the lvgl documentation examples -## LV_ARC +### LV_ARC ```python import pika_lvgl as lv @@ -41,7 +41,7 @@ print('mem used max: %0.2f kB' % (mem.getMax())) print('mem used now: %0.2f kB' % (mem.getNow())) ``` -## LV_BAR +### LV_BAR ``` python import pika_lvgl as lv @@ -55,7 +55,7 @@ print('mem used max: %0.2f kB' % (mem.getMax())) print('mem used now: %0.2f kB' % (mem.getNow())) ``` -## LV_BTN +### LV_BTN ``` python import pika_lvgl as lv @@ -77,7 +77,7 @@ print('mem used max: %0.2f kB' % (mem.getMax())) print('mem used now: %0.2f kB' % (mem.getNow())) ``` -## LV_CHECKBOX +### LV_CHECKBOX ``` python import pika_lvgl as lv @@ -104,7 +104,7 @@ print('mem used now: %0.2f kB' % (mem.getNow())) --- -# How does it work? +## How does it work? PikaScript has a unique C module smart binding tool @@ -155,7 +155,7 @@ In addition to binding C modules, the precompiler compiles Python scripts to byt --- -# How can I use it? +## How can I use it? The simulation repo on vs is available on https://github.com/pikasTech/lv_pikascript
OpenCanopy: Support basic keyboard-based scrolling
@@ -185,6 +185,39 @@ InternalBootPickerSelectEntry ( Selector->Obj.OffsetX += (VolumeEntryObj->Width - Selector->Obj.Width) / 2; } +INT64 +InternelBootPickerScrollSelected ( + IN UINT32 Scale + ) +{ + CONST GUI_VOLUME_ENTRY *SelectedEntry; + INT64 EntryOffsetX; + INT64 ScrollDelta; + + ASSERT (mBootPicker.SelectedEntry != NULL); + // + // If the selected entry is outside of the view, scroll it accordingly. + // This function is called every time an entry is added or changed. + // Due to this internal design, the selected entry can never be outside of the + // view by more than one entry's size. + // + SelectedEntry = mBootPicker.SelectedEntry; + EntryOffsetX = mBootPicker.Hdr.Obj.OffsetX + SelectedEntry->Hdr.Obj.OffsetX; + ScrollDelta = (BOOT_ENTRY_WIDTH + BOOT_ENTRY_SPACE) * Scale; + + if (EntryOffsetX < 0) { + mBootPicker.Hdr.Obj.OffsetX += ScrollDelta; + return ScrollDelta; + } + + if (EntryOffsetX + SelectedEntry->Hdr.Obj.Width > mBootPickerView.Width) { + mBootPicker.Hdr.Obj.OffsetX -= ScrollDelta; + return -ScrollDelta; + } + + return 0; +} + VOID InternalBootPickerChangeEntry ( IN OUT GUI_VOLUME_PICKER *This, @@ -195,6 +228,9 @@ InternalBootPickerChangeEntry ( ) { GUI_VOLUME_ENTRY *PrevEntry; + INT64 ScrollOffset; + INT64 DrawX; + UINT32 DrawWidth; ASSERT (This != NULL); ASSERT (DrawContext != NULL); @@ -211,6 +247,8 @@ InternalBootPickerChangeEntry ( PrevEntry = This->SelectedEntry; InternalBootPickerSelectEntry (This, NewEntry); + ScrollOffset = InternelBootPickerScrollSelected (DrawContext->Scale); + if (ScrollOffset == 0) { // // To redraw the entry *and* the selector, draw the entire height of the // Picker object. For this, the height just reach from the top of the entries @@ -233,6 +271,30 @@ InternalBootPickerChangeEntry ( This->Hdr.Obj.Height, TRUE ); + } else { + // + // The X coordinate of the view changed by scrolling, so adjust it. + // The entire view plus an entry prior or following need to be drawn. + // + if (ScrollOffset < 0) { + DrawX = BaseX + ScrollOffset; + DrawWidth = (UINT32) (This->Hdr.Obj.Width - ScrollOffset); + } else { + DrawX = BaseX; + DrawWidth = (UINT32) (This->Hdr.Obj.Width + ScrollOffset); + } + // + // The entry list has been scrolled, redraw the entire view. + // + GuiDrawScreen ( + DrawContext, + DrawX, + BaseY, + DrawWidth, + This->Hdr.Obj.Height, + TRUE + ); + } // // Set voice timeout to N frames from now. @@ -969,6 +1031,10 @@ BootPickerEntriesAdd ( GuiContext->BootEntry = Entry; } + if (mBootPicker.SelectedEntry != NULL) { + InternelBootPickerScrollSelected (GuiContext->Scale); + } + return EFI_SUCCESS; }
Build: Fix openssl lookup for macbuild
@@ -12,6 +12,7 @@ matrix: compiler: clang script: + - HOMEBREW_NO_AUTO_UPDATE=1 brew install openssl - "./macbuild.tool" deploy: @@ -32,6 +33,7 @@ matrix: compiler: clang before_install: + - HOMEBREW_NO_AUTO_UPDATE=1 brew install openssl - curl -Ls https://entrust.com/root-certificates/entrust_l1k.cer -o ~/entrust_l1k.crt || exit 1 - curl -LS https://curl.haxx.se/ca/cacert.pem -o ~/cacert.pem || exit 1 - cat ~/entrust_l1k.crt >> ~/cacert.pem || exit 1
Update run_rocm_test.sh for QA testing. Add ROCM_TARGET_LST path to avoid using user defined gpu list to prevent failures.
# # +# Use bogus path to avoid using target.lst, a user-defined target list +# used by rocm_agent_enumerator. +export ROCM_TARGET_LST=/opt/nowhere + scriptdir=$(dirname "$0") parentdir=`eval "cd $scriptdir;pwd;cd - > /dev/null"` aompdir="$(dirname "$parentdir")"
Fix leak on server start error Fail on SDL_strdup() failure, and free the duplicated serial on further error.
@@ -152,15 +152,20 @@ SDL_bool server_start(struct server *server, const char *serial, Uint16 local_po if (serial) { server->serial = SDL_strdup(serial); + if (!server->serial) { + return SDL_FALSE; + } } if (!push_server(serial)) { + SDL_free((void *) server->serial); return SDL_FALSE; } server->server_copied_to_device = SDL_TRUE; if (!enable_tunnel(server)) { + SDL_free((void *) server->serial); return SDL_FALSE; } @@ -177,6 +182,7 @@ SDL_bool server_start(struct server *server, const char *serial, Uint16 local_po if (server->server_socket == INVALID_SOCKET) { LOGE("Could not listen on port %" PRIu16, local_port); disable_tunnel(server); + SDL_free((void *) server->serial); return SDL_FALSE; } } @@ -188,6 +194,7 @@ SDL_bool server_start(struct server *server, const char *serial, Uint16 local_po close_socket(&server->server_socket); } disable_tunnel(server); + SDL_free((void *) server->serial); return SDL_FALSE; }
White space handling
@@ -118,11 +118,11 @@ int bscrypt_base64_decode(char *target, char *encoded, int base64_len) { target = encoded; int written = 0; char tmp1, tmp2, tmp3, tmp4; - while (base64_len >= 4) { while (*encoded == '\r' || *encoded == '\n' || *encoded == ' ') { base64_len--; encoded++; } + while (base64_len >= 4) { tmp1 = *(encoded++); tmp2 = *(encoded++); tmp3 = *(encoded++); @@ -137,6 +137,12 @@ int bscrypt_base64_decode(char *target, char *encoded, int base64_len) { base64_len -= 4; // count written bytes written += 3; + // skip white space + while (base64_len && + (*encoded == '\r' || *encoded == '\n' || *encoded == ' ')) { + base64_len--; + encoded++; + } } // deal with the "tail" of the mis-encoded stream - this shouldn't happen tmp1 = 0;
OnlineChecks: disable default options
@@ -97,10 +97,10 @@ BEGIN CONTROL "Enable VirusTotal detection highlighting",IDC_ENABLE_IDC_ENABLE_VIRUSTOTAL_HIGHLIGHT, "Button",BS_AUTOCHECKBOX | WS_DISABLED | WS_TABSTOP,7,20,141,10 PUSHBUTTON "Close",IDCANCEL,129,82,50,14 - CONTROL "Enable detection actions",IDC_CHECK1,"Button",BS_AUTOCHECKBOX | WS_TABSTOP,7,34,94,10 - EDITTEXT IDC_EDIT1,91,47,37,14,ES_AUTOHSCROLL + CONTROL "Enable detection actions",IDC_CHECK1,"Button",BS_AUTOCHECKBOX | WS_DISABLED | WS_TABSTOP,7,34,94,10 + EDITTEXT IDC_EDIT1,91,47,37,14,ES_AUTOHSCROLL | WS_DISABLED LTEXT "Minimum detection ratio:",IDC_STATIC,8,50,79,8 - COMBOBOX IDC_COMBO1,91,65,88,14,CBS_DROPDOWN | CBS_SORT | WS_VSCROLL | WS_TABSTOP + COMBOBOX IDC_COMBO1,91,65,88,14,CBS_DROPDOWN | CBS_SORT | WS_DISABLED | WS_VSCROLL | WS_TABSTOP LTEXT "Minimum detection action:",IDC_STATIC,7,67,84,8 END
hw/mcu/dialog: Put OTPC to deep standby This saves a lot of power.
@@ -101,6 +101,14 @@ void SystemInit(void) CRG_TOP->PMU_CTRL_REG |= CRG_TOP_PMU_CTRL_REG_RETAIN_CACHE_Msk; #endif + /* Switch OTPC to deep standby (DSTBY) mode */ + CRG_TOP->CLK_AMBA_REG |= CRG_TOP_CLK_AMBA_REG_OTP_ENABLE_Msk; + OTPC->OTPC_MODE_REG = (OTPC->OTPC_MODE_REG & + ~OTPC_OTPC_MODE_REG_OTPC_MODE_MODE_Msk) | + (1 << OTPC_OTPC_MODE_REG_OTPC_MODE_MODE_Pos); + while (!(OTPC->OTPC_STAT_REG & OTPC_OTPC_STAT_REG_OTPC_STAT_MRDY_Msk)); + CRG_TOP->CLK_AMBA_REG &= ~CRG_TOP_CLK_AMBA_REG_OTP_ENABLE_Msk; + /* XXX temporarily enable PD_COM and PD_PER since we do not control them */ da1469x_pd_acquire(MCU_PD_DOMAIN_COM); da1469x_pd_acquire(MCU_PD_DOMAIN_PER);
address last warnings as seen by gcc7
@@ -237,7 +237,7 @@ static inline void get_cpumap(int node, unsigned long * node_info) { if(k!=0){ name[k]='\0'; affinity[count++] = strtoul(name, &dummy, 16); - k=0; + // k=0; } // 0-63bit -> node_info[0], 64-128bit -> node_info[1] .... // revert the sequence @@ -293,7 +293,7 @@ static inline void get_share(int cpu, int level, unsigned long * share) { if(k!=0){ name[k]='\0'; affinity[count++] = strtoul(name, &dummy, 16); - k=0; + // k=0; } // 0-63bit -> node_info[0], 64-128bit -> node_info[1] .... // revert the sequence
Fix warning, this chunk of code wasn't very useful.
static const constexpr clap_version m = CLAP_VERSION; int main(int, char **) { - clap_param_info param_info; - param_info.module[0] = 'm'; - return !clap_version_is_compatible(m); } \ No newline at end of file
quic: little more simplification
@@ -560,11 +560,13 @@ def build_quic_trace_result(event, fields): load_common_fields(rv, event) for k in fields: rv[k] = getattr(event, k) + if k == "token_preview": + rv[k] = binascii.hexlify(rv[k]) return rv def handle_quic_event(cpu, data, size): ev = b["events"].event(data) - if allowed_quic_event and line.type != allowed_quic_event: + if allowed_quic_event and ev.type != allowed_quic_event: return if ev.type == "accept": @@ -587,7 +589,6 @@ def handle_quic_event(cpu, data, size): res = build_quic_trace_result(ev, ["max_lost_pn", "inflight", "cwnd"]) elif ev.type == "new_token_send": res = build_quic_trace_result(ev, ["token_preview", "len", "token_generation"]) - res["token_preview"] = binascii.hexlify(res["token_preview"]) elif ev.type == "new_token_acked": res = build_quic_trace_result(ev, ["token_generation"]) elif ev.type == "streams_blocked_send":
[Polar] Fix test failure
@@ -22,7 +22,7 @@ import ( // initSvc select Polarises to connect, or disable polaris func TestPolarisConnectSvc_initSvc(t *testing.T) { - polarisIDMain, _ := peer.IDB58Decode("16Uiu2HAkvJTHFuJXxr15rFEHsJWnyn1QvGatW2E9ED9Mvy4HWjVF") + polarisIDMain, _ := peer.IDB58Decode("16Uiu2HAkuxyDkMTQTGFpmnex2SdfTVzYfPztTyK339rqUdsv3ZUa") polarisIDTest, _ := peer.IDB58Decode("16Uiu2HAkvJTHFuJXxr15rFEHsJWnyn1QvGatW2E9ED9Mvy4HWjVF") dummyPeerID2, _ := peer.IDB58Decode("16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm") polar2 := "/ip4/172.21.1.2/tcp/8915/p2p/16Uiu2HAmFqptXPfcdaCdwipB2fhHATgKGVFVPehDAPZsDKSU7jRm"
Update extensions tarball version to 4.1 This drops unused libraries and programs from the extensions tarball. Unused dependencies that were dropped: - clapack - gimli - json-c - net-snmp - pcre
<dependency org="OpenSSL" name="openssl" rev="1.0.2l" conf="rhel7_x86_64->rhel6_x86_64;rhel6_x86_64->rhel6_x86_64;suse11_x86_64->sles11_x86_64;sles11_x86_64->sles11_x86_64;aix7_ppc_64->aix7_ppc_64" /> <dependency org="gnu" name="libstdc" rev="6.0.22" conf="rhel7_x86_64->rhel6_x86_64;rhel6_x86_64->rhel6_x86_64;suse11_x86_64->suse11_x86_64;sles11_x86_64->suse11_x86_64" /> <dependency org="third-party" name="ext" rev="1.1" conf="win32->win32" /> - <dependency org="third-party" name="ext" rev="gpdb5_ext-3.4" conf="rhel7_x86_64->rhel6_x86_64;rhel6_x86_64->rhel6_x86_64;suse11_x86_64->sles11_x86_64;sles11_x86_64->sles11_x86_64" /> + <dependency org="third-party" name="ext" rev="gpdb6_ext-4.1" conf="rhel7_x86_64->rhel6_x86_64;rhel6_x86_64->rhel6_x86_64;suse11_x86_64->sles11_x86_64;sles11_x86_64->sles11_x86_64" /> <dependency org="Hyperic" name="sigar" rev="1.6.5" conf="rhel6_x86_64->rhel6_x86_64;rhel7_x86_64->rhel7_x86_64;suse11_x86_64->sles11_x86_64;sles11_x86_64->sles11_x86_64" /> <dependency org="R-Project" name="R" rev="3.1.0" conf="rhel7_x86_64->rhel6_x86_64;rhel6_x86_64->rhel6_x86_64;suse11_x86_64->suse10_x86_64;sles11_x86_64->suse10_x86_64" /> <dependency org="Python" name="python-gpdb5" rev="2.7.12" conf="rhel6_x86_64->rhel6_x86_64;rhel7_x86_64->rhel6_x86_64;suse11_x86_64->sles11_x86_64;sles11_x86_64->sles11_x86_64" />
fix out of bounds selection crash
@@ -108,8 +108,16 @@ export const RollChannelSelectionAreaFwd = ({ (e: MouseEvent) => { if (gridRef.current && tool === "selection" && e.button === 0) { const bounds = gridRef.current.getBoundingClientRect(); - const x = Math.floor((e.pageX - bounds.left) / cellSize) * cellSize; - const y = Math.floor((e.pageY - bounds.top) / cellSize) * cellSize; + const x = clamp( + Math.floor((e.pageX - bounds.left) / cellSize) * cellSize, + 0, + 63 * cellSize + ); + const y = clamp( + Math.floor((e.pageY - bounds.top) / cellSize) * cellSize, + 0, + 12 * 6 * cellSize - cellSize + ); const newSelectionRect = { x, y, width: cellSize, height: cellSize }; @@ -163,7 +171,6 @@ export const RollChannelSelectionAreaFwd = ({ const width = Math.abs(selectionOrigin.x - x2); const height = Math.abs(selectionOrigin.y - y2); - console.log(x); setSelectionRect({ x, y, width, height }); const selectedCells = selectCellsInRange([], selectionRect);
Tighten struct field restrictions re pointers.
@@ -382,14 +382,9 @@ func (c *Checker) checkFields(fields []*a.Node, banPtrTypes bool) error { if err := q.tcheckTypeExpr(f.XType(), 0); err != nil { return fmt.Errorf("%v in field %q", err, f.Name().String(c.tm)) } - if banPtrTypes { - for x := f.XType(); x.Inner() != nil; x = x.Inner() { - if x.Decorator().Key() == t.KeyPtr { - // TODO: implement nptr (nullable pointer) types. - return fmt.Errorf("check: ptr type %q not allowed for field %q; use nptr instead", - x.String(c.tm), f.Name().String(c.tm)) - } - } + if banPtrTypes && f.XType().HasPointers() { + return fmt.Errorf("check: pointer-containing type %q not allowed for field %q", + f.XType().String(c.tm), f.Name().String(c.tm)) } if dv := f.DefaultValue(); dv != nil { if f.XType().Decorator() != 0 {
Remove unnecessary define guard The GD32VF103 family only has USB-OTG peripherals.
#define USE_SOF 0 #if defined (STM32F105x8) || defined (STM32F105xB) || defined (STM32F105xC) || \ - defined (STM32F107xB) || defined (STM32F107xC) || defined (GD32VF103) + defined (STM32F107xB) || defined (STM32F107xC) #define STM32F1_SYNOPSYS #endif
ble_mesh: Fix compile error when using release optimization level
@@ -606,7 +606,7 @@ static void bt_mesh_scan_cb(const bt_mesh_addr_t *addr, s8_t rssi, { #if (CONFIG_BLE_MESH_PROVISIONER && CONFIG_BLE_MESH_PB_GATT) || \ CONFIG_BLE_MESH_GATT_PROXY_CLIENT - u16_t uuid; + u16_t uuid = 0x0; #endif if (adv_type != BLE_MESH_ADV_NONCONN_IND && adv_type != BLE_MESH_ADV_IND) {
Add w->proto valid check for Widget_CheckType()
@@ -112,6 +112,9 @@ LCUI_BOOL Widget_CheckType( LCUI_Widget w, const char *type ) if( strcmp( w->type, type ) == 0 ) { return TRUE; } + if( !w->proto ) { + return FALSE; + } for( proto = w->proto->proto; proto; proto = proto->proto ) { if( strcmp( proto->name, type ) == 0 ) { return TRUE;
Fix uno getting detected as dos
@@ -23,7 +23,7 @@ void detect_board_type(void) { // SPI lines floating: white (TODO: is this reliable? Not really, we have to enable ESP/GPS to be able to detect this on the UART) set_gpio_output(GPIOC, 14, 1); set_gpio_output(GPIOC, 5, 1); - if(!detect_with_pull(GPIOB, 1, PULL_UP)){ + if(!detect_with_pull(GPIOB, 1, PULL_UP) && detect_with_pull(GPIOB, 15, PULL_UP)){ hw_type = HW_TYPE_DOS; current_board = &board_dos; } else if((detect_with_pull(GPIOA, 4, PULL_DOWN)) || (detect_with_pull(GPIOA, 5, PULL_DOWN)) || (detect_with_pull(GPIOA, 6, PULL_DOWN)) || (detect_with_pull(GPIOA, 7, PULL_DOWN))){
Accurately list htslib/samtools/bcftools contents in MANIFEST.in Rewrite these inclusions so that the sdist tarball contains everything checked into the repository. In particular: * Add new htscodecs source files and build scripts * Add LICENSE files and READMEs * Exclude new config_vars.h and samtools_config_vars.h generated headers
@@ -23,36 +23,27 @@ include win32/*.[ch] prune tests/ # samtools -include samtools/configure -include samtools/config.mk.in -include samtools/config.h.in -include samtools/*.h -include samtools/*.c -exclude samtools/config.h -include samtools/*/*.h +include samtools/LICENSE samtools/README samtools/lz4/LICENSE +recursive-include samtools *.[ch] +include samtools/version.sh +exclude samtools/*config*.h # bcftools -include bcftools/*.h -include bcftools/*.c -exclude bcftools/config.h +include bcftools/LICENSE bcftools/README +include bcftools/*.[ch] +include bcftools/version.sh +exclude bcftools/*config*.h # htslib -include htslib/*.c -include htslib/*.h -include htslib/INSTALL -include htslib/NEWS -exclude htslib/config.h -include htslib/Makefile -include htslib/htslib_vars.mk -include htslib/configure -include htslib/config.mk.in -include htslib/config.h.in -include htslib/htslib.pc.in -include htslib/htslib/*.h -include htslib/cram/*.c -include htslib/cram/*.h -include htslib/os/*.c -include htslib/os/*.h +include htslib/LICENSE htslib/README +recursive-include htslib *.[ch] +exclude htslib/*config*.h + +include htslib/configure.ac htslib/m4/*.m4 htslib/*.in +include htslib/configure htslib/version.sh +include htslib/Makefile htslib/*.mk +exclude htslib/config.mk htslib/htscodecs.mk + include cy_build.py include requirements.txt
tglrvp: Enable Thunderbolt-compatible mode BRANCH=none TEST=Able to detect Thunderbolt-compatible device
/* Enabling SOP* communication */ #define CONFIG_USB_PD_DECODE_SOP +/* Enabling Thunderbolt-compatible mode */ +#define CONFIG_USB_PD_TBT_COMPAT_MODE + #ifndef __ASSEMBLER__ enum tglrvp_charge_ports {
memleaktest with MSVC's AddressSanitizer Disabling memleaktest under MSVC because leak detection is not a supported feature with MSVC's AddressSanitizer. Leaving ASan enabled in this case causes a test failure because the test suite is expecting the leak to be detected. CLA: trivial
# endif #endif /* If __SANITIZE_ADDRESS__ isn't defined, define it to be false */ -#ifndef __SANITIZE_ADDRESS__ +/* Leak detection is not yet supported with MSVC on Windows, so */ +/* set __SANITIZE_ADDRESS__ to false in this case as well. */ +#if !defined(__SANITIZE_ADDRESS__) || defined(_MSC_VER) +# undef __SANITIZE_ADDRESS__ # define __SANITIZE_ADDRESS__ 0 #endif
tools/tcpconnect: Support IPv6 DNS Add IPv6 DNS support for tcpconnect. Fixes
@@ -322,6 +322,31 @@ delete_and_return: return 0; } +#include <uapi/linux/udp.h> + +int trace_udpv6_recvmsg(struct pt_regs *ctx) +{ + struct sk_buff *skb = (struct sk_buff *)PT_REGS_PARM2(ctx); + struct udphdr *hdr = (void*)skb->head + skb->transport_header; + struct dns_data_t *event; + int zero = 0; + void *data; + + /* hex(53) = 0x0035, htons(0x0035) = 0x3500 */ + if (hdr->source != 0x3500) + return 0; + + /* skip UDP header */ + data = skb->data + 8; + + event = dns_data.lookup(&zero); + if (!event) + return 0; + + bpf_probe_read(event->pkt, sizeof(event->pkt), data); + dns_events.perf_submit(ctx, event, sizeof(*event)); + return 0; +} """ if args.count and args.dns: @@ -510,6 +535,7 @@ b.attach_kretprobe(event="tcp_v6_connect", fn_name="trace_connect_v6_return") if args.dns: b.attach_kprobe(event="udp_recvmsg", fn_name="trace_udp_recvmsg") b.attach_kretprobe(event="udp_recvmsg", fn_name="trace_udp_ret_recvmsg") + b.attach_kprobe(event="udpv6_queue_rcv_one_skb", fn_name="trace_udpv6_recvmsg") print("Tracing connect ... Hit Ctrl-C to end") if args.count:
Name each Lily58 side differently.
-if SHIELD_LILY58_LEFT || SHIELD_LILY58_RIGHT +if SHIELD_LILY58_LEFT config ZMK_KEYBOARD_NAME - default "Lily58" + default "Lily58 Left" + +endif + +if SHIELD_LILY58_RIGHT + +config ZMK_KEYBOARD_NAME + default "Lily58 Right" endif
ease: update icheck suppression
@@ -28,3 +28,15 @@ elektraUnsignedLongLongToString elektraFloatToString elektraDoubleToString elektraLongDoubleToString +kdb_octet_t +kdb_boolean_t +kdb_short_t +kdb_long_t +kdb_long_long_t +kdb_unsigned_short_t +kdb_unsigned_long_t +kdb_unsigned_long_long_t +kdb_char_t +kdb_float_t +kdb_double_t +kdb_long_double_t
Change exit_checker comment on returned status
@@ -91,9 +91,9 @@ my %hooks = ( # exit_checker is used by run() directly after completion of a command. # it receives the exit code from that command and is expected to return - # 1 (for success) or 0 (for failure). This is the value that will be - # returned by run(). - # NOTE: When run() gets the option 'capture => 1', this hook is ignored. + # 1 (for success) or 0 (for failure). This is the status value that run() + # will give back (through the |statusvar| referens and as returned value + # when capture => 1 doesn't apply). exit_checker => sub { return shift == 0 ? 1 : 0 }, );
cache exist status
TEST_LOGS="" MAKEFLAGS="" +status=0 source ./common/TEST_ENV || exit 1 source ./common/functions || exit 1 @@ -22,14 +23,16 @@ for compiler in llvm4 $COMPILER_FAMILIES ; do echo "User Environment: Compiler tests: $compiler" echo "-------------------------------------------------------" - module purge || exit 1 - module load $compiler || exit 1 + module purge + module load $compiler ./configure || exit 1 make clean || exit 1 - make -j 4 check || exit 1 + make -j 4 -k check || status=1 save_logs_compiler_family tests $compiler make distclean done + +exit ${status}
Multihit glitch fix added the "multihit glitch" lines to the translation file
@@ -150,6 +150,10 @@ msgid "Infinite Lives Off" msgstr "" msgid "Infinite Lives On" msgstr "" +msgid "Multihit Glitch Off" +msgstr "" +msgid "Multihit Glitch On" +msgstr "" msgid "Level:" msgstr "" msgid "Life bonus"
Fixes the p2p_feefilter functional test
@@ -57,25 +57,28 @@ class FeeFilterTest(RavenTestFramework): NetworkThread().start() test_node.wait_for_verack() - # Test that invs are received for all txs at feerate of 70 sat/byte - node1.settxfee(Decimal("0.00070000")) + # Test that invs are received for all txs at feerate of 2,000,000 sats + node1.settxfee(Decimal("0.02000000")) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, test_node)) test_node.clear_invs() - # Set a filter of 60 sat/byte - test_node.send_and_ping(msg_feefilter(60000)) + # Set a filter of 1,500,000 sats (must be above 1,000,000 sats (min fee is enforced) + test_node.send_and_ping(msg_feefilter(1500000)) # Test that txs are still being received (paying 70 sat/byte) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, test_node)) test_node.clear_invs() - # Change tx fee rate to 50 sat/byte and test they are no longer received - node1.settxfee(Decimal("0.00050000")) + # Change tx fee rate to 1,350,000 sats and test they are no longer received + node1.settxfee(Decimal("0.013500000")) [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] sync_mempools(self.nodes) # must be sure node 0 has received all txs + # Raise the tx fee back up above the mintxfee, submit 1 tx on node 0, + # then sync nodes 0 and 1 - we should only have 1 tx (this one below since + # the one above was below the min txfee). # Send one transaction from node0 that should be received, so that we # we can sync the test on receipt (if node1's txs were relayed, they'd # be received by the time this node0 tx is received). This is @@ -83,7 +86,7 @@ class FeeFilterTest(RavenTestFramework): # to 35 entries in an inv, which means that when this next transaction # is eligible for relay, the prior transactions from node1 are eligible # as well. - node0.settxfee(Decimal("0.1000000")) + node0.settxfee(Decimal("0.01600000")) txids = [node0.sendtoaddress(node0.getnewaddress(), 1)] # assert(allInvsMatch(txids, test_node)) test_node.clear_invs()
VERSION bump to version 0.11.27
@@ -32,7 +32,7 @@ set(CMAKE_C_FLAGS_DEBUG "-g -O0") # set version set(LIBNETCONF2_MAJOR_VERSION 0) set(LIBNETCONF2_MINOR_VERSION 11) -set(LIBNETCONF2_MICRO_VERSION 26) +set(LIBNETCONF2_MICRO_VERSION 27) set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION}.${LIBNETCONF2_MICRO_VERSION}) set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION})
Testing: add test for grib_compare with index files
@@ -50,11 +50,25 @@ EOF diff $tempRef $tempOut - ${tools_dir}/grib_index_build -k mars.levtype -o $tempIndex ${data_dir}/tigge_cf_ecmwf.grib2 |\ grep -q "mars.levtype = { sfc, pl, pv, pt }" ${tools_dir}/grib_index_build -k mars.levtype:i -o $tempIndex ${data_dir}/tigge_cf_ecmwf.grib2 |\ grep -q "mars.levtype = { 103, 1, 106, 100, 101, 8, 109, 107 }" +# grib_compare with index files +# ----------------------------- +tempIndex1=temp.$$.1.idx +tempIndex2=temp.$$.2.idx +tempGribFile1=temp.index.$$.file1.grib +tempGribFile2=temp.index.$$.file2.grib +cat ${data_dir}/high_level_api.grib2 ${data_dir}/sample.grib2 > $tempGribFile1 +cat ${data_dir}/sample.grib2 ${data_dir}/high_level_api.grib2 > $tempGribFile2 + +${tools_dir}/grib_index_build -o $tempIndex1 $tempGribFile1 +${tools_dir}/grib_index_build -o $tempIndex2 $tempGribFile2 +${tools_dir}/grib_compare $tempIndex1 $tempIndex2 +rm -f $tempIndex1 $tempIndex2 $tempGribFile1 $tempGribFile2 + +# Clean up rm -f $tempIndex $tempOut $tempRef
switch bcc to use single instance per bcc process, instance dir is bcc_pid, fixed issue
@@ -373,14 +373,16 @@ void * bpf_attach_kprobe(int progfd, enum bpf_probe_attach_type attach_type, con close(kfd); if (access("/sys/kernel/debug/tracing/instances", F_OK) != -1) { - snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/%s", new_name); + snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d", getpid()); + if (access(buf, F_OK) == -1) { if (mkdir(buf, 0755) == -1) goto retry; - n = snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/%s/events/%ss/%s", - new_name, event_type, new_name); + } + n = snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d/events/%ss/%s", + getpid(), event_type, new_name); if (n < sizeof(buf) && bpf_attach_tracing_event(progfd, buf, reader, pid, cpu, group_fd) == 0) goto out; - snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/%s", new_name); + snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d", getpid()); rmdir(buf); } retry: @@ -471,7 +473,7 @@ int bpf_detach_kprobe(const char *ev_name) { char buf[256]; int ret = bpf_detach_probe(ev_name, "kprobe"); - snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/%s_bcc_%d", ev_name, getpid()); + snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/instances/bcc_%d", getpid()); if (access(buf, F_OK) != -1) { rmdir(buf); }
Test external param entity with no further external parser
@@ -4357,6 +4357,7 @@ external_entity_devaluer(XML_Parser parser, "<!ENTITY % e1 SYSTEM 'bar'>\n" "%e1;\n"; XML_Parser ext_parser; + int clear_handler = (intptr_t)XML_GetUserData(parser); if (systemId == NULL || !strcmp(systemId, "bar")) return XML_STATUS_OK; @@ -4365,6 +4366,8 @@ external_entity_devaluer(XML_Parser parser, ext_parser = XML_ExternalEntityParserCreate(parser, context, NULL); if (ext_parser == NULL) fail("Could note create external entity parser"); + if (clear_handler) + XML_SetExternalEntityRefHandler(ext_parser, NULL); if (_XML_Parse_SINGLE_BYTES(ext_parser, text, strlen(text), XML_TRUE) == XML_STATUS_ERROR) xml_failure(ext_parser); @@ -4379,6 +4382,18 @@ START_TEST(test_undefined_ext_entity_in_external_dtd) XML_SetParamEntityParsing(parser, XML_PARAM_ENTITY_PARSING_ALWAYS); XML_SetExternalEntityRefHandler(parser, external_entity_devaluer); + XML_SetUserData(parser, (void *)(intptr_t)XML_FALSE); + if (_XML_Parse_SINGLE_BYTES(parser, text, strlen(text), + XML_TRUE) == XML_STATUS_ERROR) + xml_failure(parser); + + /* Now repeat without the external entity ref handler invoking + * another copy of itself. + */ + XML_ParserReset(parser, NULL); + XML_SetParamEntityParsing(parser, XML_PARAM_ENTITY_PARSING_ALWAYS); + XML_SetExternalEntityRefHandler(parser, external_entity_devaluer); + XML_SetUserData(parser, (void *)(intptr_t)XML_TRUE); if (_XML_Parse_SINGLE_BYTES(parser, text, strlen(text), XML_TRUE) == XML_STATUS_ERROR) xml_failure(parser);
http_server: api/v1: metrics: expand helper text for Prometheus
@@ -210,6 +210,12 @@ flb_sds_t metrics_help_txt(char *metric_name, flb_sds_t *metric_helptxt) else if (strstr(metric_name, "output_proc_bytes")) { return flb_sds_cat(*metric_helptxt, " Number of processed output bytes.\n", 35); } + else if (strstr(metric_name, "output_dropped_records")) { + return flb_sds_cat(*metric_helptxt, " Number of dropped records.\n", 28); + } + else if (strstr(metric_name, "output_retried_records")) { + return flb_sds_cat(*metric_helptxt, " Number of retried records.\n", 28); + } else { return (flb_sds_cat(*metric_helptxt, " Fluentbit metrics.\n", 20)); }
evp_test: use the test file name as the test title
@@ -2077,6 +2077,7 @@ static int do_test_file(const char *testfile) char buf[10240]; EVP_TEST t; + set_test_title(testfile); current_test_file = testfile; if (!TEST_ptr(in = BIO_new_file(testfile, "rb"))) return 0;
Use macos-latest for CI.
@@ -9,7 +9,7 @@ on: jobs: build-conan: - runs-on: macos-10.15 + runs-on: macos-latest timeout-minutes: 120 steps: - name: Checkout source code @@ -22,10 +22,6 @@ jobs: run: | conan profile new default --detect conan profile update settings.build_type=Release default - conan profile update settings.compiler=apple-clang default - conan profile update settings.compiler.cppstd=17 default - conan profile update settings.compiler.libcxx=libc++ default - conan profile update settings.compiler.version=12.0 default - name: Install Dependencies env: CONAN_BUILD_OPTIONS: |
wireguard: Document wireguard async mode default Type: improvement
@@ -206,12 +206,12 @@ define wireguard_peers_details { /** \brief Wireguard Set Async mode @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request - @param async_enable - wireguard async mode on or off + @param async_enable - wireguard async mode on or off, default off */ autoreply define wg_set_async_mode { u32 client_index; u32 context; - bool async_enable; + bool async_enable [default=false]; }; /*
[mechanics] fix axis min/max in JointStopR
@@ -62,7 +62,7 @@ JointStopR::JointStopR(SP::NewtonEulerJointR joint, SP::SiconosVector pos, , _dir(dir) { _axisMin = 100; - _axisMax = -100; + _axisMax = 0; for (int i=0; i < _axis->size(); i++) { if ((*_axis)[i] > _axisMax) _axisMax = (*_axis)[i];
adds documentation building functions
language: python -# Caching GSL and CLASS +# Caching all the depencencies so that they don't have to be redownloaded and +# compiled everytime cache: directories: - $HOME/build/EiffL/CCL/build/GSL/src @@ -8,6 +9,15 @@ cache: - $HOME/build/EiffL/CCL/build/SWIG/src - $HOME/build/EiffL/CCL/build/CLASS/src +#addons: +# apt: +# packages: +# - texlive-latex-recommended +# - texlive-latex-extra +# - texlive-fonts-recommended +# - texlive-fonts-extra +# - dvipng + os: - linux - osx @@ -25,3 +35,15 @@ script: - python setup.py test - sudo make -Cbuild install - check_ccl + +# Check why the note creation process crashes +# - make -C doc/0000-ccl_note +#after_success: +# if [[ -n "$GITHUB_API_KEY" ]; then +# git checkout --orphan pdf +# git rm -rf . +# cp doc/0000-ccl_note/main.pdf 0000-ccl_note.pdf +# git add -f 0000-ccl_note.pdf +# git -c user.name='travis' -c user.email='travis' commit -m init +# git push -q -f https://drphilmarshall:[email protected]/DarkEnergyScienceCollaboration/CCL pdf +# fi
Android build script fixes
@@ -2,6 +2,7 @@ import os import sys import shutil import argparse +import string from build.sdk_build_utils import * ANDROID_TOOLCHAINS = { @@ -134,7 +135,7 @@ def buildAndroidAAR(args): distDir = getDistDir('android') version = args.buildversion - with open('%s/scripts/build-aar/carto-mobile-sdk.pom.template' % baseDir, 'r') as f: + with open('%s/scripts/android-aar/carto-mobile-sdk.pom.template' % baseDir, 'r') as f: pomFile = string.Template(f.read()).safe_substitute({ 'baseDir': baseDir, 'buildDir': buildDir, 'distDir': distDir, 'version': version }) pomFileName = '%s/carto-mobile-sdk.pom' % buildDir with open(pomFileName, 'w') as f:
Update check_sig_alg_match() to work with provided keys Use EVP_PKEY_is_a() to check whether an EVP_PKEY is compatible with the given signature.
@@ -366,16 +366,15 @@ static int setup_crldp(X509 *x) /* Check that issuer public key algorithm matches subject signature algorithm */ static int check_sig_alg_match(const EVP_PKEY *issuer_key, const X509 *subject) { - int signer_nid, subj_sig_nid; + int subj_sig_nid; if (issuer_key == NULL) return X509_V_ERR_NO_ISSUER_PUBLIC_KEY; - signer_nid = EVP_PKEY_base_id(issuer_key); if (OBJ_find_sigid_algs(OBJ_obj2nid(subject->cert_info.signature.algorithm), NULL, &subj_sig_nid) == 0) return X509_V_ERR_UNSUPPORTED_SIGNATURE_ALGORITHM; - if (signer_nid == EVP_PKEY_type(subj_sig_nid) - || (signer_nid == NID_rsaEncryption && subj_sig_nid == NID_rsassaPss)) + if (EVP_PKEY_is_a(issuer_key, OBJ_nid2sn(subj_sig_nid)) + || (EVP_PKEY_is_a(issuer_key, "RSA") && subj_sig_nid == NID_rsassaPss)) return X509_V_OK; return X509_V_ERR_SIGNATURE_ALGORITHM_MISMATCH; }
[chainmaker][#440]add BoatGetTimes realize
@@ -87,3 +87,10 @@ void BoatSleep(BUINT32 second) { sleep(second); } + +BUINT64 BoatGetTimes() +{ + BUINT64 timesec = 0; + time(&timesec); + return timesec; +} \ No newline at end of file
VERSION bump to version 1.3.70
@@ -31,7 +31,7 @@ endif() # micro version is changed with a set of small changes or bugfixes anywhere in the project. set(SYSREPO_MAJOR_VERSION 1) set(SYSREPO_MINOR_VERSION 3) -set(SYSREPO_MICRO_VERSION 69) +set(SYSREPO_MICRO_VERSION 70) set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION}) # Version of the library
rust/async_usb: impl Sync to concentrate unsafe code in one place It is also a bit safer then the previous `mut` prefix workaround as it prevents mutating the state without going through the RefCell.
@@ -41,17 +41,16 @@ enum UsbTaskState { ResultAvailable(UsbOut), } +/// A safer version of UsbTaskState. RefCell so we cannot accidentally borrow illegally. +struct SafeUsbTaskState(RefCell<UsbTaskState>); + +/// Safety: this implements Sync even though it is not thread safe. This is okay, as we +/// run only in a single thread in the BitBox02. +unsafe impl Sync for SafeUsbTaskState {} + /// Executor main state. Currently we only have at most one task at a time (usb api processing /// task). -/// -/// It is `mut` because without it, Rust requires thread safety via -/// Sync: https://doc.rust-lang.org/reference/items/static-items.html: -/// > Mutable statics have the same restrictions as normal statics, -/// except that the type does not > have to implement the Sync trait. -/// It is de-facto immutable (the contents in the RefCell is -/// internally mutable instead), so all access is safe, as we run only -/// single-threaded. -static mut USB_TASK_STATE: RefCell<UsbTaskState> = RefCell::new(UsbTaskState::Nothing); +static USB_TASK_STATE: SafeUsbTaskState = SafeUsbTaskState(RefCell::new(UsbTaskState::Nothing)); /// Spawn a task to be spinned by the executor. This moves the state /// from Nothing to Running. @@ -61,7 +60,7 @@ pub fn spawn<F>(workflow: fn(UsbIn) -> F, usb_in: &[u8]) where F: core::future::Future<Output = UsbOut> + 'static, { - let mut state = unsafe { USB_TASK_STATE.borrow_mut() }; + let mut state = USB_TASK_STATE.0.borrow_mut(); match *state { UsbTaskState::Nothing => { let task: Task<UsbOut> = Box::pin(workflow(usb_in.to_vec())); @@ -78,7 +77,7 @@ where /// If this spin finishes the task, the state is moved to /// `ResultAvailable`, which contains the result. pub fn spin() { - let mut state = unsafe { USB_TASK_STATE.borrow_mut() }; + let mut state = USB_TASK_STATE.0.borrow_mut(); match *state { UsbTaskState::Running(ref mut task) => { let result = spin_task(task); @@ -107,7 +106,7 @@ pub enum CopyResponseErr { /// pending and a response is expected in the future, or `Err(false)` /// if no task is running. pub fn copy_response(dst: &mut [u8]) -> Result<usize, CopyResponseErr> { - let mut state = unsafe { USB_TASK_STATE.borrow_mut() }; + let mut state = USB_TASK_STATE.0.borrow_mut(); match *state { UsbTaskState::Nothing => Err(CopyResponseErr::NotRunning), UsbTaskState::Running(_) => Err(CopyResponseErr::NotReady), @@ -123,7 +122,7 @@ pub fn copy_response(dst: &mut [u8]) -> Result<usize, CopyResponseErr> { /// Cancel and drop a running task. Returns true if a task was cancelled, false if no task was /// running. pub fn cancel() -> bool { - let mut state = unsafe { USB_TASK_STATE.borrow_mut() }; + let mut state = USB_TASK_STATE.0.borrow_mut(); if let UsbTaskState::Running(_) = *state { *state = UsbTaskState::Nothing; return true;
bio: check for valid socket when closing Fixes coverity 271258 Improper use of negative value (NEGATIVE_RETURNS)
@@ -335,7 +335,7 @@ int BIO_accept_ex(int accept_sock, BIO_ADDR *addr_, int options) */ int BIO_closesocket(int sock) { - if (closesocket(sock) < 0) + if (sock < 0 || closesocket(sock) < 0) return 0; return 1; }
use JAVA_TOOL_OPTIONS instead of _JAVA_OPTIONS
@@ -368,16 +368,17 @@ Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) void initJavaAgent() { - //TODO: - // - check if we are in a java process - // - preserve existing _JAVA_OPTIONS char *var = getenv("LD_PRELOAD"); if (var != NULL) { + /* + set JAVA_TOOL_OPTIONS so that JVM can load libscope.so as a java agent + https://docs.oracle.com/javase/8/docs/platform/jvmti/jvmti.html#tooloptions + */ char buf[1024]; snprintf(buf, sizeof(buf), "-agentpath:%s", var); - int result = setenv("_JAVA_OPTIONS", buf, 1); + int result = setenv("JAVA_TOOL_OPTIONS", buf, 1); if (result) { - scopeLog("ERROR: Could not set _JAVA_OPTIONS failed\n", -1, CFG_LOG_ERROR); + scopeLog("ERROR: Could not set JAVA_TOOL_OPTIONS failed\n", -1, CFG_LOG_ERROR); } } } \ No newline at end of file
test-suite: disable C++ test with hypre - the example it ships with no longer works as it relies on headerfiles which are not included with install.
@@ -36,9 +36,9 @@ TESTS += ex9 check_PROGRAMS += ex9 ex9_SOURCES = ex9.c -TESTS += ex10 -check_PROGRAMS += ex10 -ex10_SOURCES = ex10.cxx +## TESTS += ex10 +## check_PROGRAMS += ex10 +## ex10_SOURCES = ex10.cxx TESTS += ex11 check_PROGRAMS += ex11
arvo: remove refactoring comments
:: pyt: cached types :: [sac=worm vil=vile] - :: +refine-moves: move list from vase (was +said) + :: +refine-moves: move list from vase :: ++ refine-moves |= vax=vase =^ mov sac (refine-move hed) =^ moz sac $(vax tal) [[mov moz] sac] - :: +refine-move: move from vase (was in +sump) + :: +refine-move: move from vase :: ++ refine-move |= vax=vase =^ yat sac (~(spot wa sac) 3 vax) =^ del sac (refine-ball yat) [[duc del] sac] - :: +refine-ball: ball from vase (was in +sump) + :: +refine-ball: ball from vase :: ++ refine-ball |= vax=vase [[%pass wire vane hil] sac] :: [%slip vane=term card] - :: XX remove - :: =/ vane ~> %mean.'bad-vane-label' ?> ((sane %tas) vane.q.vax) ?> ?=(?(%pass %give) -.p.bal) [[%hurl goof p.bal] sac] == - :: +refine-card: card from vase (was +song) + :: +refine-card: card from vase :: ++ refine-card |= vax=vase (~(slym wa sac) vax sam) :: |% - :: +slix: en-hypo XX remove + :: +slix: en-hypo :: ++ slix |= hil=maze
Fix issue All variables without a $ prefix lost their last character due to off by one error
@@ -30,7 +30,7 @@ struct cmd_results *cmd_set(int argc, char **argv) { if (!tmp) { return cmd_results_new(CMD_FAILURE, "set", "Not possible to create variable $'%s'", argv[0]); } - snprintf(tmp, size, "$%s", argv[0]); + snprintf(tmp, size+1, "$%s", argv[0]); argv[0] = tmp; }
Lowering OMV2 sensor clock to work with the higher sensor PLL.
#define OMV_XCLK_SOURCE (OMV_XCLK_TIM) // Sensor external clock timer frequency. -#define OMV_XCLK_FREQUENCY (8000000) +#define OMV_XCLK_FREQUENCY (5000000) // Have built-in RGB->LAB table. #define OMV_HAVE_LAB_TABLE
parser: json: do not stop packing on time lookup error If the parser have time lookup enabled and it fails, do not skip the record, instead add a more verbose message about the exception.
@@ -38,10 +38,12 @@ int flb_parser_json_do(struct flb_parser *parser, char *mp_buf = NULL; char *time_key; char *tmp_out_buf = NULL; + char tmp[255]; size_t tmp_out_size = 0; size_t off = 0; size_t map_size; size_t mp_size; + size_t len; msgpack_sbuffer mp_sbuf; msgpack_packer mp_pck; msgpack_unpacked result; @@ -153,10 +155,19 @@ int flb_parser_json_do(struct flb_parser *parser, ret = flb_parser_time_lookup((char *) v->via.str.ptr, v->via.str.size, 0, parser, &tm, &tmfrac); if (ret == -1) { - msgpack_unpacked_destroy(&result); - return *out_size; + len = v->via.str.size; + if (len > sizeof(tmp) - 1) { + len = sizeof(tmp) - 1; + } + memcpy(tmp, v->via.str.ptr, len); + tmp[len] = '\0'; + flb_warn("[parser:%s] Invalid time format %s for '%s'.", + parser->name, parser->time_fmt, tmp); + time_lookup = time(NULL); } + else { time_lookup = flb_parser_tm2time(&tm); + } /* Compose a new map without the time_key field */ msgpack_sbuffer_init(&mp_sbuf);
VERSION bump to version 0.12.59
@@ -34,7 +34,7 @@ set(CMAKE_C_FLAGS_DEBUG "-g -O0") # set version set(LIBNETCONF2_MAJOR_VERSION 0) set(LIBNETCONF2_MINOR_VERSION 12) -set(LIBNETCONF2_MICRO_VERSION 58) +set(LIBNETCONF2_MICRO_VERSION 59) set(LIBNETCONF2_VERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION}.${LIBNETCONF2_MICRO_VERSION}) set(LIBNETCONF2_SOVERSION ${LIBNETCONF2_MAJOR_VERSION}.${LIBNETCONF2_MINOR_VERSION})
Makefile/mac: sort vars
@@ -70,6 +70,8 @@ ifeq ($(OS)$(findstring Microsoft,$(KERNEL)),Linux) # matches Linux but excludes else ifeq ($(OS),Darwin) ARCH := DARWIN + ARCH_SRCS := $(sort $(wildcard mac/*.c)) + # MacOS-X grep seem to use colors unconditionally GREP_COLOR = --color=never @@ -119,8 +121,6 @@ else ifeq ($(OS),Darwin) -framework CommerceKit $(CRASH_REPORT) XCODE_VER := $(shell xcodebuild -version | grep $(GREP_COLOR) "^Xcode" | cut -d " " -f2) - - ARCH_SRCS := $(sort $(wildcard mac/*.c)) # OS Darwin else ifeq ($(OS),NetBSD) ARCH := NETBSD
graph-store: no-op if we already have *any* of the indexes in an %add-nodes event
|^ =/ [=graph:store mark=(unit mark:store)] (~(got by graphs) resource) + :: no-op if we already have any of the nodes + ?: (check-for-duplicates resource ~(key by nodes)) + ::~& nooped-due-to-duplicate-nodes+[resource nodes] + [~ state] =/ =update-log:store (~(got by update-logs) resource) =. update-log (put:orm-log update-log time [%0 time [%add-nodes resource nodes]]) (add-node-list resource graph mark (sort-nodes nodes)) == :: + ++ check-for-duplicates + |= [=resource:store nodes=(set index:store)] + ^- ? + =| has-duplicates=_| + =/ node-list ~(tap in nodes) + |- + ?~ node-list + has-duplicates + =? has-duplicates (has-node resource i.node-list) %.y + $(node-list t.node-list) + :: + ++ has-node + |= [=resource:store =index:store] + ^- ? + =/ parent-graph=(unit marked-graph:store) + (~(get by graphs) resource) + ?~ parent-graph %.n + =/ node=(unit node:store) ~ + =/ =graph:store p.u.parent-graph + |- + ?~ index + ?=(^ node) + ?~ t.index + ?=(^ (get:orm graph i.index)) + =. node (get:orm graph i.index) + ?~ node %.n + ?- -.children.u.node + %empty %.n + %graph $(graph p.children.u.node, index t.index) + == + :: ++ sort-nodes |= nodes=(map index:store node:store) ^- (list [index:store node:store])
Verbose log enhancement: print client info when client exit It could be useful for debugging to know which client got disconnected.
@@ -2166,7 +2166,11 @@ void readQueryFromClient(connection *conn) { return; } } else if (nread == 0) { - serverLog(LL_VERBOSE, "Client closed connection"); + if (server.verbosity <= LL_VERBOSE) { + sds info = catClientInfoString(sdsempty(), c); + serverLog(LL_VERBOSE, "Client closed connection %s", info); + sdsfree(info); + } freeClientAsync(c); return; } else if (c->flags & CLIENT_MASTER) {
Update data.yml fix fails
@@ -1054,7 +1054,7 @@ classes: - ea: 0x1417DE5F0 Client::UI::Info::InfoProxyPageInterface: vtbls: -#fail - ea: 0x1417DE650 + - ea: 0x1417DE650 base: Client::UI::Info::InfoProxyInterface Common::Configuration::ConfigBase::ChangeEventInterface: vtbls: @@ -2953,7 +2953,7 @@ classes: - ea: 0x141842970 Client::UI::Shell::RaptureShellCommandInterface: vtbls: -#fail - ea: 0x141844220 + - ea: 0x141842980 base: Component::Shell::ShellCommandInterface Client::UI::Shell::RaptureShellModule: vtbls: @@ -3270,7 +3270,7 @@ classes: base: Client::UI::Shell::RaptureShellCommandInterface Client::UI::Shell::ShellCommandHotbarBase: vtbls: -#fail - ea: 0x141846CF8 + - ea: 0x141845458 base: Client::UI::Shell::RaptureShellCommandInterface Client::UI::Shell::ShellCommandHotbar: vtbls:
Added Fractal sound driver includes when module is enabled
#ifndef _GENESIS_H_ #define _GENESIS_H_ -#define SGDK_VERSION 1.70 +#define SGDK_VERSION 1.71 #include "types.h" #include "ext/mw/megawifi.h" #endif +#if (MODULE_FRACTAL != 0) +#include "ext/fractal/fractal.h" +#include "ext/fractal/visual_dbg.h" +#endif + // preserve compatibility with old resources name #define logo_lib sgdk_logo #define font_lib font_default
Fix Mac build Not sure why I'm getting a linker error with these not marked as static. Should investigate.
#pragma once #include "base.h" -inline Glyph gbuffer_peek(Gbuffer gbuf, Usz height, Usz width, Usz y, Usz x) { +static inline Glyph gbuffer_peek(Gbuffer gbuf, Usz height, Usz width, Usz y, + Usz x) { assert(y < height && x < width); (void)height; return gbuf[y + width + x]; } -inline Glyph gbuffer_peek_relative(Gbuffer gbuf, Usz height, Usz width, Usz y, - Usz x, Isz delta_y, Isz delta_x) { +static inline Glyph gbuffer_peek_relative(Gbuffer gbuf, Usz height, Usz width, + Usz y, Usz x, Isz delta_y, + Isz delta_x) { Isz y0 = (Isz)y + delta_y; Isz x0 = (Isz)x + delta_x; if (y0 < 0 || x0 < 0 || (Usz)y0 >= height || (Usz)x0 >= width) @@ -16,15 +18,16 @@ inline Glyph gbuffer_peek_relative(Gbuffer gbuf, Usz height, Usz width, Usz y, return gbuf[(Usz)y0 * width + (Usz)x0]; } -inline void gbuffer_poke(Gbuffer gbuf, Usz height, Usz width, Usz y, Usz x, - Glyph g) { +static inline void gbuffer_poke(Gbuffer gbuf, Usz height, Usz width, Usz y, + Usz x, Glyph g) { assert(y < height && x < width); (void)height; gbuf[y * width + x] = g; } -inline void gbuffer_poke_relative(Gbuffer gbuf, Usz height, Usz width, Usz y, - Usz x, Isz delta_y, Isz delta_x, Glyph g) { +static inline void gbuffer_poke_relative(Gbuffer gbuf, Usz height, Usz width, + Usz y, Usz x, Isz delta_y, Isz delta_x, + Glyph g) { Isz y0 = (Isz)y + delta_y; Isz x0 = (Isz)x + delta_x; if (y0 < 0 || x0 < 0 || (Usz)y0 >= height || (Usz)x0 >= width)
Update jsapi.c Fix typo... oops.
@@ -775,7 +775,7 @@ static duk_ret_t duk_reset(duk_context* duk) return 0; } -static const char* const Apis[] = API_KEYWORDS; +static const char* const ApiKeywords[] = API_KEYWORDS; static const struct{duk_c_function func; s32 params;} ApiFunc[] = { {NULL, 0},
Hack to make it boot.
@@ -357,8 +357,8 @@ doJob serf job = do bootJob :: HasLogFunc e => Serf e -> Job -> RIO e (Job, SerfState) bootJob serf job = do doJob serf job >>= \case - (job, ss, []) -> pure (job, ss) - (job, ss, fx) -> throwIO (EffectsDuringBoot (jobId job) fx) + (job, ss, _) -> pure (job, ss) +-- (job, ss, fx) -> throwIO (EffectsDuringBoot (jobId job) fx) replayJob :: HasLogFunc e => Serf e -> Job -> RIO e SerfState replayJob serf job = do
sctp tests only for supported platforms
@@ -21,10 +21,10 @@ tests_general.append([1, 0, workdir + 'client_http_get -u /cgi-bin/he -v 2 not.r tests_general.append([1, 0, workdir + 'client_http_get -u /cgi-bin/he -v 2 buildbot.nplab.de']) tests_general.append([0, 0, workdir + 'client_http_get -n 2 -u /files/4M bsd10.nplab.de']) tests_general.append([0, 0, workdir + 'tneat -n 1000 -v 3 -L -P ' + workdir + 'prop_tcp.json 127.0.0.1']) -tests_general.append([0, 0, workdir + 'tneat -n 1000 -v 3 -L -P ' + workdir + 'prop_sctp.json 127.0.0.1']) if (platform.system() == "FreeBSD") or (platform.system() == "Linux"): tests_general.append([1, 0, workdir + 'client_http_get -P ' + workdir + 'prop_tcp_security.json -p 443 -v 2 ec.europa.eu']) tests_general.append([0, 0, workdir + 'tneat -v 2 -P ' + workdir + 'prop_sctp_dtls.json interop.fh-muenster.de']) + tests_general.append([0, 0, workdir + 'tneat -n 1000 -v 3 -L -P ' + workdir + 'prop_sctp.json 127.0.0.1']) #tests_general.append([0, 0, 'python3.5 ../../policy/pmtests.py'])
Improve sctp_get_frag_point
@@ -6812,13 +6812,15 @@ sctp_get_frag_point(struct sctp_tcb *stcb) overhead += sctp_get_auth_chunk_len(asoc->peer_hmac_id); } } else { - overhead += sizeof(struct sctp_idata_chunk); + overhead += sizeof(struct sctp_data_chunk); if (sctp_auth_is_required_chunk(SCTP_DATA, asoc->peer_auth_chunks)) { overhead += sctp_get_auth_chunk_len(asoc->peer_hmac_id); } } + KASSERT(overhead % 4 == 0, + ("overhead (%u) not a multiple of 4", overhead)); /* Consider padding. */ - if (asoc->smallest_mtu % 4) { + if (asoc->smallest_mtu % 4 > 0) { overhead += (asoc->smallest_mtu % 4); } KASSERT(asoc->smallest_mtu > overhead,
Corrected some position combinations when R/W to backup memory
@@ -38,11 +38,11 @@ HRESULT Library_nf_hardware_stm32_native_nanoFramework_Hardware_Stm32_BackupMemo // check if the store address is starting at a register address boundary remainder = (uint32_t)((uint32_t *)storeAddress) % sizeof(RTC_BKP0R_Msk); + registerAddress = (uint32_t *)(storeAddress - remainder); if (remainder > 0) { // read register - registerAddress = (uint32_t *)(storeAddress - remainder); tempRegisterValue = *registerAddress; // adjust remainder to the amount of bytes to move @@ -96,15 +96,14 @@ HRESULT Library_nf_hardware_stm32_native_nanoFramework_Hardware_Stm32_BackupMemo // read register tempRegisterValue = *registerAddress; - // adjust remainder to the amount of bytes to move - remainder = 4 - remainder; - + int offset = 3; do { - *data = (uint8_t)(tempRegisterValue >> (remainder * 8)); + *data = (uint8_t)(tempRegisterValue >> (offset * 8)); remainder--; data++; counter++; + offset--; } while (remainder); } @@ -134,6 +133,7 @@ HRESULT Library_nf_hardware_stm32_native_nanoFramework_Hardware_Stm32_BackupMemo uint32_t counter = 0; uint32_t remainder; int32_t index = 3; + uint32_t tempMask, tempLen; // sanity check for out of range position if (position > (BACKUP_SIZE - 1)) @@ -148,18 +148,25 @@ HRESULT Library_nf_hardware_stm32_native_nanoFramework_Hardware_Stm32_BackupMemo // check if the store address is starting at a register address boundary remainder = (uint32_t)((uint32_t *)storeAddress) % sizeof(RTC_BKP0R_Msk); + registerAddress = (uint32_t *)(storeAddress - remainder); if (remainder > 0) { // read register - registerAddress = (uint32_t *)(storeAddress - remainder); tempRegisterValue = *registerAddress; // adjust remainder to the amount of bytes to move remainder = 4 - remainder; // clear the bytes we'll be filling - tempRegisterValue &= (uint32_t)(0xFFFFFF00 << ((remainder - 1) * 8)); + tempMask = 0x00000000; + tempLen = (dataLength > 2) ? 3 : (dataLength > 1) ? 2 : 1; + for (int i = remainder - 1; i >= 0 && tempLen > 0; i--, tempLen--) + { + tempMask |= (uint32_t)(0x000000FF << (i * 8)); + } + + tempRegisterValue &= ~tempMask; do { @@ -218,12 +225,14 @@ HRESULT Library_nf_hardware_stm32_native_nanoFramework_Hardware_Stm32_BackupMemo // adjust remainder to the amount of bytes to move remainder = 4 - remainder; + int offset = 3; do { - tempRegisterValue |= (uint32_t)((uint32_t)*data << (remainder * 8)); + tempRegisterValue |= (uint32_t)((uint32_t)*data << (offset * 8)); remainder--; data++; counter++; + offset--; } while (remainder); // write back register
remove Z axis bias on accelerometer / better settings to fight drift
@@ -27,11 +27,11 @@ extern debug_type debug; // filter time in seconds // time to correct gyro readings using the accelerometer // 1-4 are generally good -#define FILTERTIME 2.0 +#define FILTERTIME 5.0 // accel magnitude limits for drift correction -#define ACC_MIN 0.7f -#define ACC_MAX 1.3f +#define ACC_MIN 0.9f +#define ACC_MAX 1.1f float GEstG[3] = { 0, 0, ACC_1G }; @@ -119,7 +119,7 @@ void imu_calc(void) // remove bias accel[0] = accel[0] - accelcal[0]; accel[1] = accel[1] - accelcal[1]; - + accel[2] = accel[2] - accelcal[2]; // reduce to accel in G for (int i = 0; i < 3; i++)
mesh: Fix resetting configuration model state Instead of manually iterating all app keys and net keys, use the bt_mesh_subnet_del() helper on all subnets. This will also clear any app keys, and ensures that persistent storage is cleared as well.
@@ -3211,22 +3211,15 @@ void bt_mesh_cfg_reset(void) cfg->hb_sub.dst = BT_MESH_ADDR_UNASSIGNED; cfg->hb_sub.expiry = 0; - hb_pub_disable(cfg); - - /* Delete all app keys */ - for (i = 0; i < ARRAY_SIZE(bt_mesh.app_keys); i++) { - struct bt_mesh_app_key *key = &bt_mesh.app_keys[i]; - - if (key->net_idx != BT_MESH_KEY_UNUSED) { - bt_mesh_app_key_del(key); - } - } - + /* Delete all net keys, which also takes care of all app keys which + * are associated with each net key. + */ for (i = 0; i < ARRAY_SIZE(bt_mesh.sub); i++) { struct bt_mesh_subnet *sub = &bt_mesh.sub[i]; - memset(sub, 0, sizeof(*sub)); - sub->net_idx = BT_MESH_KEY_UNUSED; + if (sub->net_idx != BT_MESH_KEY_UNUSED) { + bt_mesh_subnet_del(sub); + } } memset(labels, 0, sizeof(labels));
Attempt to fix FPS cap hopefully this should work
from osr2mp4.ImageProcess import imageproc +import numpy as np def img_resize(img, start, end, step): outputs = [] - for x in range(start, end, step): + if not step: + step = 1 + for x in np.arange(start, end, step): im = imageproc.change_size(img, x/1000, x/1000) outputs.append(im) return outputs
zephyr/zephyr_storage: Use stdint.h types. Zephyr migrated to use uint8_t, etc. instead of u8_t, etc.
@@ -146,7 +146,7 @@ typedef struct _zephyr_flash_area_obj_t { const struct flash_area *area; int block_size; int block_count; - u8_t id; + uint8_t id; } zephyr_flash_area_obj_t; STATIC void zephyr_flash_area_print(const mp_print_t *print, mp_obj_t self_in, mp_print_kind_t kind) {
Added texture format for RGB BC1.
@@ -710,6 +710,7 @@ typedef enum SceGxmTextureFormat { SCE_GXM_TEXTURE_FORMAT_PVRTII4BPP_1BGR = SCE_GXM_TEXTURE_BASE_FORMAT_PVRTII4BPP | SCE_GXM_TEXTURE_SWIZZLE4_1BGR, SCE_GXM_TEXTURE_FORMAT_UBC1_ABGR = SCE_GXM_TEXTURE_BASE_FORMAT_UBC1 | SCE_GXM_TEXTURE_SWIZZLE4_ABGR, + SCE_GXM_TEXTURE_FORMAT_UBC1_1BGR = SCE_GXM_TEXTURE_BASE_FORMAT_UBC1 | SCE_GXM_TEXTURE_SWIZZLE4_1BGR, SCE_GXM_TEXTURE_FORMAT_UBC2_ABGR = SCE_GXM_TEXTURE_BASE_FORMAT_UBC2 | SCE_GXM_TEXTURE_SWIZZLE4_ABGR,
gall: cork a subscription's ames flow on-kick Gall tells ames to %cork flows for subscriptions it has closed. Receiving a kick also closes a subscription, but gall wasn't issuing a %cork in that case. We correct that here. Inlines +mo-handle-ames-response's logic at its only callsite.
:: !! =/ =ames-response ;;(ames-response payload.sign-arvo) - (mo-handle-ames-response ames-response) + :: %d: diff; ask clay to validate .noun as .mark + :: %x: kick; tell agent the publisher canceled the subscription, and + :: cork; tell ames to close the associated flow. + :: + ?- -.ames-response + %d (mo-give %unto %raw-fact mark.ames-response noun.ames-response) + %x =. mo-core (mo-give %unto %kick ~) + (mo-pass wire %a %cork ship) + == :: [%ames %lost *] :: note this should only happen on reverse bones, so only facts %u [%leave ~] == (mo-pass wire %g %deal [ship our] agent-name deal) - :: +mo-handle-ames-response: handle ames response message. - :: - ++ mo-handle-ames-response - |= =ames-response - ^+ mo-core - :: %d: diff; ask clay to validate .noun as .mark - :: %x: kick; tell agent the publisher canceled the subscription - :: - ?- -.ames-response - %d (mo-give %unto %raw-fact mark.ames-response noun.ames-response) - %x (mo-give %unto %kick ~) - == :: +mo-spew: handle request to set verbosity toggles on debug output :: ++ mo-spew
removed hard calls on json blobs
?. ?=($json p.p.cuf) :: ~> %slog.`%*(. >[%backing p.p.cuf %q-p-cuf]< &3.+> (sell q.p.cuf)) (back-turbo tee %json p.cuf) - (get-rush:(ire-ix p.tee) q.tee ((hard json) q.q.p.cuf)) + (get-rush:(ire-ix p.tee) q.tee (,json q.q.p.cuf)) :: $quit (get-quit:(ire-ix p.tee) q.tee) == =/ cay=cage (result-to-cage:ford build-result.result.sih) %+ get-rush:(ire-ix p.tee) q.tee ?> ?=($json p.cay) :: XX others - ((hard json) q.q.cay) + (,json q.q.cay) == == ::
roller: proper parsing of crypto-suite
%- ot :~ ['encrypt' (cu to-hex so)] ['auth' (cu to-hex so)] - ['cryptoSuite' so] + ['cryptoSuite' (su dem)] ['breach' bo] == :: :- 'keys' %- pairs :~ ['life' s+(json-number life.keys.net)] - ['suite' s+`@t`suite.keys.net] + ['suite' s+(json-number suite.keys.net)] ['auth' (hex 32 auth.keys.net)] ['crypt' (hex 32 crypt.keys.net)] ==
remove extra define for __NR_getrandom and add some comments
@@ -319,9 +319,7 @@ static ssize_t sysctl_random(char *buf, size_t buflen) # define __NR_getrandom 352 # elif defined(__cris__) # define __NR_getrandom 356 -# elif defined(__aarch64__) -# define __NR_getrandom 278 -# else /* generic */ +# else /* generic (f.e. aarch64, loongarch, loongarch64) */ # define __NR_getrandom 278 # endif # endif
ConnectionFTDI: fix missing error handling on Windows some error handling in BeginDataReading and BeginDataSending was done only in Linux.
@@ -444,7 +444,11 @@ int ConnectionFT601::BeginDataReading(char *buffer, uint32_t length, int ep) FT_STATUS ftStatus = FT_OK; ftStatus = FT_ReadPipe(mFTHandle, streamRdEp, (unsigned char*)buffer, length, &ulActual, &contexts[i].inOvLap); if (ftStatus != FT_IO_PENDING) + { + lime::error("ERROR BEGIN DATA READING %d", ftStatus); + contexts[i].used = false; return -1; + } #else libusb_transfer *tr = contexts[i].transfer; libusb_fill_bulk_transfer(tr, dev_handle, streamRdEp, (unsigned char*)buffer, length, callback_libusbtransfer, &contexts[i], 0); @@ -589,7 +593,11 @@ int ConnectionFT601::BeginDataSending(const char *buffer, uint32_t length, int e FT_InitializeOverlapped(mFTHandle, &contextsToSend[i].inOvLap); ftStatus = FT_WritePipe(mFTHandle, streamWrEp, (unsigned char*)buffer, length, &ulActualBytesSend, &contextsToSend[i].inOvLap); if (ftStatus != FT_IO_PENDING) + { + lime::error("ERROR BEGIN DATA SENDING %d", ftStatus); + contexts[i].used = false; return -1; + } #else libusb_transfer *tr = contextsToSend[i].transfer; contextsToSend[i].done = false;
bt: fix OS abstraction layer for correct critical section API usage
@@ -254,13 +254,21 @@ bool IRAM_ATTR btdm_queue_generic_deregister(btdm_queue_item_t *queue) static void IRAM_ATTR interrupt_disable(void) { + if (xPortInIsrContext()) { + portENTER_CRITICAL_ISR(&global_int_mux); + } else { portENTER_CRITICAL(&global_int_mux); } +} static void IRAM_ATTR interrupt_restore(void) { + if (xPortInIsrContext()) { + portEXIT_CRITICAL_ISR(&global_int_mux); + } else { portEXIT_CRITICAL(&global_int_mux); } +} static void IRAM_ATTR task_yield_from_isr(void) {
nimble/ll: Rename ble_ll_conn_tx_data_pdu This sends all PDUs, not only data PDUs.
@@ -787,7 +787,7 @@ ble_ll_conn_adjust_pyld_len(struct ble_ll_conn_sm *connsm, uint16_t pyld_len) } static int -ble_ll_conn_tx_data_pdu(struct ble_ll_conn_sm *connsm) +ble_ll_conn_tx_pdu(struct ble_ll_conn_sm *connsm) { int rc; uint8_t md; @@ -1221,7 +1221,7 @@ ble_ll_conn_event_start_cb(struct ble_ll_sched_item *sch) ble_phy_encrypt_disable(); } #endif - rc = ble_ll_conn_tx_data_pdu(connsm); + rc = ble_ll_conn_tx_pdu(connsm); if (!rc) { rc = BLE_LL_SCHED_STATE_RUNNING; } else { @@ -3775,7 +3775,7 @@ chk_rx_terminate_ind: rx_pyld_len += BLE_LL_DATA_MIC_LEN; } if (reply && ble_ll_conn_can_send_next_pdu(connsm, begtime, add_usecs)) { - rc = ble_ll_conn_tx_data_pdu(connsm); + rc = ble_ll_conn_tx_pdu(connsm); } conn_exit:
Test: Add support for old versions of Google Test
@@ -38,8 +38,12 @@ if (ENABLE_TESTING) add_subdirectory (${googletest_SOURCE_DIR} ${googletest_BINARY_DIR}) + # Old versions of Google Test do not include Google Mock + if (TARGET gmock) set_property (TARGET gmock PROPERTY COMPILE_FLAGS "-Wno-undef -Wno-missing-field-initializers") set_property (TARGET gmock_main PROPERTY COMPILE_FLAGS "-Wno-undef") + endif (TARGET gmock) + set_property (TARGET gtest PROPERTY COMPILE_FLAGS "-Wno-undef -Wno-missing-field-initializers") set_property (TARGET gtest_main PROPERTY COMPILE_FLAGS "-Wno-undef") endif (ENABLE_TESTING)
Set copyright to 2020
@@ -23,7 +23,7 @@ subprocess.call('doxygen doxyfile.doxy', shell=True) # -- Project information ----------------------------------------------------- project = 'ESP-AT Lib' -copyright = '2019, Tilen MAJERLE' +copyright = '2020, Tilen MAJERLE' author = 'Tilen MAJERLE' # The full version, including alpha/beta/rc tags