message
stringlengths
6
474
diff
stringlengths
8
5.22k
makefile.unix,dist: use ascii for text output this prevents unknown escapes containing '-'s getting stripped on OS X when a tty targeted font is used
@@ -474,7 +474,7 @@ dist: all for m in man/[cdv]webp.1 man/gif2webp.1 man/webpmux.1 \ man/img2webp.1 man/webpinfo.1; do \ basenam=$$(basename $$m .1); \ - $(GROFF) -t -e -man -T utf8 $$m \ + $(GROFF) -t -e -man -T ascii $$m \ | $(COL) -bx >$(DESTDIR)/doc/$${basenam}.txt; \ $(GROFF) -t -e -man -T html $$m \ | $(COL) -bx >$(DESTDIR)/doc/$${basenam}.html; \
Don't crash if an unrecognised digest is used with dsa_paramgen_md
@@ -189,9 +189,15 @@ static int pkey_dsa_ctrl_str(EVP_PKEY_CTX *ctx, NULL); } if (strcmp(type, "dsa_paramgen_md") == 0) { + const EVP_MD *md = EVP_get_digestbyname(value); + + if (md == NULL) { + DSAerr(DSA_F_PKEY_DSA_CTRL_STR, DSA_R_INVALID_DIGEST_TYPE); + return 0; + } return EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_DSA, EVP_PKEY_OP_PARAMGEN, EVP_PKEY_CTRL_DSA_PARAMGEN_MD, 0, - (void *)EVP_get_digestbyname(value)); + (void *)md); } return -2; }
i2s: Fixed divide 0 issue while setting PDM down sampling issue Closes
@@ -1154,8 +1154,8 @@ esp_err_t i2s_set_pdm_rx_down_sample(i2s_port_t i2s_num, i2s_pdm_dsr_t downsampl ESP_RETURN_ON_FALSE((p_i2s[i2s_num]->mode == I2S_COMM_MODE_PDM), ESP_ERR_INVALID_ARG, TAG, "i2s mode is not PDM mode"); xSemaphoreTake(p_i2s[i2s_num]->rx->mux, portMAX_DELAY); i2s_stop(i2s_num); - i2s_pdm_rx_slot_config_t *slot_cfg = (i2s_pdm_rx_slot_config_t*)p_i2s[i2s_num]; - i2s_pdm_rx_clk_config_t *clk_cfg = (i2s_pdm_rx_clk_config_t*)p_i2s[i2s_num]; + i2s_pdm_rx_slot_config_t *slot_cfg = (i2s_pdm_rx_slot_config_t*)p_i2s[i2s_num]->slot_cfg; + i2s_pdm_rx_clk_config_t *clk_cfg = (i2s_pdm_rx_clk_config_t*)p_i2s[i2s_num]->clk_cfg; clk_cfg->dn_sample_mode = downsample; i2s_ll_rx_set_pdm_dsr(p_i2s[i2s_num]->hal.dev, downsample); i2s_start(i2s_num);
Handle TCP disconnect in remoteConfig().
@@ -176,10 +176,12 @@ remoteConfig() do { rc = g_fn.recv(g_cfg.cmdConn, buf, sizeof(buf), MSG_DONTWAIT); /* - * TODO: if we get an error, we don't get the whoile file + * TODO: if we get an error, we don't get the whole file * When we support ndjson look for new line as EOF */ if (rc <= 0) { + close(g_cfg.cmdConn); + g_cfg.cmdConn = -1; break; } @@ -2165,10 +2167,9 @@ periodic(void *arg) if (evtNeedsConnection(g_evt)) { evtConnect(g_evt); + g_cfg.cmdConn = evtConnection(g_evt); } - // From the config file - sleep(g_thread.interval); remoteConfig(); }
tcp_example error fix (signed /unsigned mismatch)
@@ -48,10 +48,10 @@ void heartbeat_callback(u16 sender_id, u8 len, u8 msg[], void *context) fprintf(stdout, "%s\n", __FUNCTION__); } -u32 socket_read(u8 *buff, u32 n, void *context) +s32 socket_read(u8 *buff, u32 n, void *context) { (void)context; - u32 result; + s32 result; result = read(socket_desc, buff, n); return result;
mostly style
@@ -2171,8 +2171,9 @@ FIO_FUNC inline uintptr_t fio_ct_if2(uintptr_t cond, uintptr_t a, uintptr_t b) { #endif /* Note: using BIG_ENDIAN invokes false positives on some systems */ -#if (defined(__BIG_ENDIAN__) && __BIG_ENDIAN__) || \ - (defined(__LITTLE_ENDIAN__) && !__LITTLE_ENDIAN__) || \ +#if !defined(__BIG_ENDIAN__) +/* nothing to do */ +#elif (defined(__LITTLE_ENDIAN__) && !__LITTLE_ENDIAN__) || \ (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)) #define __BIG_ENDIAN__ 1 #elif !defined(__BIG_ENDIAN__) && !defined(__BYTE_ORDER__) && \ @@ -2220,12 +2221,14 @@ FIO_FUNC inline uintptr_t fio_ct_if2(uintptr_t cond, uintptr_t a, uintptr_t b) { /** 32Bit right rotation, inlined. */ #define fio_rrot32(i, bits) \ (((uint32_t)(i) >> ((bits)&31UL)) | ((uint32_t)(i) << ((-(bits)) & 31UL))) + /** 64Bit left rotation, inlined. */ #define fio_lrot64(i, bits) \ (((uint64_t)(i) << ((bits)&63UL)) | ((uint64_t)(i) >> ((-(bits)) & 63UL))) /** 64Bit right rotation, inlined. */ #define fio_rrot64(i, bits) \ (((uint64_t)(i) >> ((bits)&63UL)) | ((uint64_t)(i) << ((-(bits)) & 63UL))) + /** unknown size element - left rotation, inlined. */ #define fio_lrot(i, bits) \ (((i) << ((bits) & ((sizeof((i)) << 3) - 1))) | \
Fix naming of L2 cache size item reported for Vortex
@@ -424,7 +424,7 @@ void get_cpuconfig(void) sysctlbyname("hw.l1dcachesize",&value,&length,NULL,0); printf("#define L1_DATA_SIZE %d \n",value); sysctlbyname("hw.l2dcachesize",&value,&length,NULL,0); - printf("#define L2_DATA_SIZE %d \n",value); + printf("#define L2_SIZE %d \n",value); break; #endif }
commands/focus: focus view inside container seat_get_focus_inactive_floating and seat_get_focus_inactive_tiling do not always return a view, so get the previously focused view from the container with seat_get_focus_inactive_view. This is the i3 behavior.
@@ -267,6 +267,11 @@ static struct cmd_results *focus_mode(struct sway_workspace *ws, new_focus = seat_get_focus_inactive_tiling(seat, ws); } if (new_focus) { + struct sway_container *new_focus_view = + seat_get_focus_inactive_view(seat, &new_focus->node); + if (new_focus_view) { + new_focus = new_focus_view; + } seat_set_focus_container(seat, new_focus); // If we're on the floating layer and the floating container area
misc: add assert to check the RTVM CPU affinity The current ACRN Configurator have no warning when the user set CPU zero to RTVM. This patch add an assert to check the above.
</xs:annotation> </xs:assert> + <xs:assert test="every $vm in /acrn-config/vm[vm_type = 'RTVM'] satisfies + count($vm//pcpu_id[./text() = '0']) = 0"> + <xs:annotation acrn:severity="error" acrn:report-on="$vm//cpu_affinity"> + <xs:documentation>The CPU 0 can't assign to RTVM {$vm/name/text()}.</xs:documentation> + </xs:annotation> + </xs:assert> + </xs:schema>
doc: improve CLion target description
@@ -153,21 +153,21 @@ as follows: The most thorough way to test your changes is to run all tests. Therefore navigate to your run-configurations (Run -> Edit Configurations...) and look for the entry `run_all`. There, `run_all` should be selected as `Executable`, -`kdb` as `Target`. Now you can execute this run configuration which will run -all enabled tests! Alternatively you can also run all tests using the terminal +`all` as `Target`. Now you can execute this run configuration which will run +all enabled tests. Alternatively you can also run all tests using the terminal by executing `make run_all` inside your build folder (e.g. /cmake-build-debug). -You can also run other specific tests by setting`Target` and `Executable` to +You can also run other specific tests by setting `Executable` to any of the `testmod_*` or `testkdb_*`targets. Additionally all tests using _Google Test_ (e.g. tests/kdb/\*) can be run directly using CLion by opening their source code and clicking on the green icon next to the class name. If you want to test various _kdb_ methods separately, you can create your own run configurations. Add a new one by clicking on the "+"-sign on the -top left of the "Edit Configurations..." dialog and name it. Here both -_Executable_ and _Target_ should have "kdb" selected. If you for example -want to test `kdb plugin-info dump`, write "info dump" next to _Program arguments_. -That's it, now you can just test this part of _kdb_. +top left of the "Edit Configurations..." dialog and name it. Here _Target_ +should be `all` and _Executable_ should have "kdb" selected. If you for example +want to test `kdb plugin-info dump`, write "plugin-info dump" next to +_Program arguments_. That's it, now you can just test this part of _kdb_. For further information please read [this](/doc/TESTING.md).
foomatic-rip: Allow a file with only "%!" as valid (zero-page) PostScript file.
@@ -542,7 +542,7 @@ int guess_file_type(const char *begin, size_t len, int *startpos) if (!p) return UNKNOWN_FILE; *startpos = p - begin; - if ((end - p) > 2 && !memcmp(p, "%!", 2)) + if ((end - p) >= 2 && !memcmp(p, "%!", 2)) return PS_FILE; else if ((end - p) > 7 && !memcmp(p, "%PDF-1.", 7)) return PDF_FILE; @@ -582,7 +582,7 @@ int print_file(const char *filename, int convert) } buf[n] = '\0'; type = guess_file_type(buf, n, &startpos); - /* We do not use any JCL preceeded to the inputr data, as it is simply + /* We do not use any JCL preceeded to the input data, as it is simply the PJL commands from the PPD file, and these commands we can also generate, end we even merge them with PJl from the driver */ /*if (startpos > 0) {
BUFR keys iterator memory leak (fixed)
@@ -146,6 +146,7 @@ static int next_attribute(bufr_keys_iterator* kiter) char* prefix=0; if (!kiter->prefix) return 0; if (!kiter->attributes[i_curr_attribute]) { + grib_context_free(kiter->current->context,kiter->prefix); kiter->prefix=0; return 0; } @@ -167,10 +168,6 @@ int codes_bufr_keys_iterator_next(bufr_keys_iterator* kiter) /* ECC-734: de-allocate last key name stored */ grib_context_free(kiter->handle->context, kiter->key_name); kiter->key_name = NULL; - /* See ECC-937 - grib_context_free(kiter->handle->context, kiter->prefix); - kiter->prefix = NULL; - */ if(kiter->at_start) {
some clean-ups
@@ -1437,7 +1437,7 @@ while(1) } zeiger = essidlistin +essidlistincount; } -fclose(fh_essidlistin); +if(fh_essidlistin != NULL) fclose(fh_essidlistin); qsort(essidlistin, essidlistincount, ESSIDLIST_SIZE, sort_essidlistin); qsort(hashlist, pmkideapolcount, HASHLIST_SIZE, sort_maclist_by_essidlen); @@ -1450,7 +1450,6 @@ if(pmkideapoloutname != NULL) return; } } - zeiger = essidlistin; zeigerhash = hashlist; o = 0; @@ -1469,7 +1468,6 @@ for(i = 0; i < essidlistincount; i++) o++; } } - if(fh_pmkideapol != NULL) fclose(fh_pmkideapol); if(pmkideapoloutname != NULL) { @@ -1478,7 +1476,7 @@ if(pmkideapoloutname != NULL) if(statinfo.st_size == 0) remove(pmkideapoloutname); } } -free(essidlistin); +if(essidlistin != NULL) free(essidlistin); return; } /*===========================================================================*/
Log the reason for settrlimit() failures (errno)
@@ -10152,6 +10152,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon) if (daemon->group->cpu_time_limit > 0) { struct rlimit limit; int result = -1; + errno = ENOSYS; limit.rlim_cur = daemon->group->cpu_time_limit; @@ -10163,7 +10164,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon) #endif if (result == -1) { - ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server, + ap_log_error(APLOG_MARK, APLOG_CRIT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't set CPU time " "limit of %d seconds for process '%s'.", getpid(), daemon->group->cpu_time_limit, @@ -10180,6 +10181,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon) if (daemon->group->memory_limit > 0) { struct rlimit limit; int result = -1; + errno = ENOSYS; limit.rlim_cur = daemon->group->memory_limit; @@ -10190,7 +10192,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon) #endif if (result == -1) { - ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server, + ap_log_error(APLOG_MARK, APLOG_CRIT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't set memory " "limit of %ld for process '%s'.", getpid(), (long)daemon->group->memory_limit, @@ -10207,6 +10209,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon) if (daemon->group->virtual_memory_limit > 0) { struct rlimit limit; int result = -1; + errno = ENOSYS; limit.rlim_cur = daemon->group->virtual_memory_limit; @@ -10219,7 +10222,7 @@ static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon) #endif if (result == -1) { - ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server, + ap_log_error(APLOG_MARK, APLOG_CRIT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't set virtual memory " "limit of %ld for process '%s'.", getpid(), (long)daemon->group->virtual_memory_limit,
VERSION bump to version 1.4.117
@@ -46,7 +46,7 @@ endif() # micro version is changed with a set of small changes or bugfixes anywhere in the project. set(SYSREPO_MAJOR_VERSION 1) set(SYSREPO_MINOR_VERSION 4) -set(SYSREPO_MICRO_VERSION 116) +set(SYSREPO_MICRO_VERSION 117) set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION}) # Version of the library
stake tweaks
@@ -696,8 +696,10 @@ void StakeMiner(CWallet *pwallet) fTryToSync = false; if (vNodes.size() < 3 || nBestHeight < GetNumBlocksOfPeers()) { + if (fDebug && GetBoolArg("-printcoinstake")) + printf("StakeMiner() vNodes.size() < 3 || nBestHeight < GetNumBlocksOfPeers()\n"); vnThreadsRunning[THREAD_STAKE_MINER]--; - MilliSleep(60000); + MilliSleep(5000); vnThreadsRunning[THREAD_STAKE_MINER]++; if (fShutdown) return; @@ -714,14 +716,15 @@ void StakeMiner(CWallet *pwallet) if (nMinStakeInterval > 0 && nTimeLastStake + (int64_t)nMinStakeInterval > GetTime()) { - if (fDebug) + if (fDebug && GetBoolArg("-printcoinstake")) printf("StakeMiner() Rate limited to 1 / %d seconds.\n", nMinStakeInterval); - MilliSleep(nMinStakeInterval * 500); // nMinStakeInterval / 2 seconds + MilliSleep(nMinStakeInterval * 1000); // nMinStakeInterval / 2 seconds continue; }; if (vecFortunastakes.size() == 0 || (mnCount > 0 && vecFortunastakes.size() < mnCount)) { + if (fDebug && GetBoolArg("-printcoinstake")) printf("StakeMiner() waiting for FS list."); vnThreadsRunning[THREAD_STAKE_MINER]--; MilliSleep(10000); vnThreadsRunning[THREAD_STAKE_MINER]++; @@ -732,24 +735,33 @@ void StakeMiner(CWallet *pwallet) // Create new block // int64_t nFees; + if (fDebug && GetBoolArg("-printcoinstake")) printf ("creating block. "); auto_ptr<CBlock> pblock(CreateNewBlock(pwallet, true, &nFees)); if (!pblock.get()) return; + if (fDebug && GetBoolArg("-printcoinstake")) printf ("signing block. "); // Trying to sign a block if (pblock->SignBlock(*pwallet, nFees)) { + if (fDebug && GetBoolArg("-printcoinstake")) printf ("checking stake. "); bool staked; SetThreadPriority(THREAD_PRIORITY_NORMAL); staked = CheckStake(pblock.get(), *pwallet); + if (staked && fDebug && GetBoolArg("-printcoinstake")) printf ("stake is good. \n"); SetThreadPriority(THREAD_PRIORITY_LOWEST); if (fShutdown) return; MilliSleep(nMinerSleep); - if (staked) MilliSleep(nMinerSleep*10); // sleep for a while after successfully staking + if (staked) { + nTimeLastStake = GetAdjustedTime(); + MilliSleep(nMinerSleep*3); // sleep for a while after successfully staking + } + else if (fDebug && GetBoolArg("-printcoinstake")) printf ("stake is bad. \n"); } else { + if (fDebug && GetBoolArg("-printcoinstake")) printf ("failed to sign.\n"); if (fShutdown) return; MilliSleep(nMinerSleep);
Fix `SyntaxWarning` on 'ipaddress'
@@ -1103,7 +1103,7 @@ class _BaseNetwork(_IPAddressBase): try: # Always false if one is v4 and the other is v6. if a._version != b._version: - raise TypeError("%s and %s are not of the same version" (a, b)) + raise TypeError("%s and %s are not of the same version" % (a, b)) return (b.network_address <= a.network_address and b.broadcast_address >= a.broadcast_address) except AttributeError:
zephyr/shim/include/zephyr_gpio_signal.h: Format with clang-format BRANCH=none TEST=none
(GPIO_SIGNAL_NAME_FROM_ORD(id##_ORD))) #define GPIO_SIGNAL(id) GPIO_SIGNAL_NAME(id) -#define GPIO_SIGNAL_WITH_COMMA(id) \ - GPIO_SIGNAL(id), +#define GPIO_SIGNAL_WITH_COMMA(id) GPIO_SIGNAL(id), enum gpio_signal { GPIO_UNIMPLEMENTED = -1, #if DT_NODE_EXISTS(DT_PATH(named_gpios)) @@ -118,8 +117,8 @@ BUILD_ASSERT(GPIO_COUNT < GPIO_LIMIT); */ struct gpio_dt_spec; -#define GPIO_DT_PTR_DECL(id) extern const struct gpio_dt_spec * const \ - GPIO_DT_NAME(GPIO_SIGNAL(id)); +#define GPIO_DT_PTR_DECL(id) \ + extern const struct gpio_dt_spec *const GPIO_DT_NAME(GPIO_SIGNAL(id)); DT_FOREACH_CHILD(DT_PATH(named_gpios), GPIO_DT_PTR_DECL) @@ -127,7 +126,6 @@ DT_FOREACH_CHILD(DT_PATH(named_gpios), GPIO_DT_PTR_DECL) #endif /* DT_NODE_EXISTS(DT_PATH(named_gpios)) */ - #define IOEXPANDER_ID_EXPAND(id) ioex_chip_##id #define IOEXPANDER_ID(id) IOEXPANDER_ID_EXPAND(id) #define IOEXPANDER_ID_FROM_INST_WITH_COMMA(id) IOEXPANDER_ID(id),
Remove win32 third-party tarball from ivy We have a high level goal to remove all ivy dependencies for gpdb6. kfw and pygresql were the only dependencies in this tarball. We are dropping both from ivy so we no longer need to pull the tarball here.
<conf name="rhel7_x86_64" visibility="public"/> <conf name="suse11_x86_64" visibility="public"/> <conf name="sles11_x86_64" visibility="public"/> - <conf name="win32" visibility="public"/> <conf name="aix7_ppc_64" visibility="public"/> </configurations> <dependency org="xerces" name="xerces-c" rev="3.1.1-p1" conf="rhel7_x86_64->rhel5_x86_64;rhel6_x86_64->rhel5_x86_64;suse11_x86_64->suse10_x86_64;sles11_x86_64->suse10_x86_64" /> <dependency org="OpenSSL" name="openssl" rev="1.0.2l" conf="suse11_x86_64->sles11_x86_64;sles11_x86_64->sles11_x86_64;aix7_ppc_64->aix7_ppc_64" /> <dependency org="gnu" name="libstdc" rev="6.0.22" conf="rhel7_x86_64->rhel6_x86_64;rhel6_x86_64->rhel6_x86_64;suse11_x86_64->suse11_x86_64;sles11_x86_64->suse11_x86_64" /> - <dependency org="third-party" name="ext" rev="1.1" conf="win32->win32" /> <dependency org="third-party" name="ext" rev="gpdb6_ext-4.2" conf="suse11_x86_64->sles11_x86_64;sles11_x86_64->sles11_x86_64" /> <dependency org="Hyperic" name="sigar" rev="1.6.5" conf="rhel6_x86_64->rhel6_x86_64;rhel7_x86_64->rhel7_x86_64;suse11_x86_64->sles11_x86_64;sles11_x86_64->sles11_x86_64" /> <dependency org="R-Project" name="R" rev="3.1.0" conf="rhel7_x86_64->rhel6_x86_64;rhel6_x86_64->rhel6_x86_64;suse11_x86_64->suse10_x86_64;sles11_x86_64->suse10_x86_64" />
Remove suspicious friction override;
@@ -184,7 +184,6 @@ int lovrWorldCollide(World* world, Shape* a, Shape* b, float friction, float res contacts[c].surface.mode = 0; contacts[c].surface.mu = friction; contacts[c].surface.bounce = restitution; - contacts[c].surface.mu = dInfinity; if (restitution > 0) { contacts[c].surface.mode |= dContactBounce;
defination: fix the bug that esp_derive_local_mac() defination missing Closes:
@@ -149,7 +149,7 @@ esp_err_t esp_efuse_mac_get_default(uint8_t* mac) esp_err_t system_efuse_read_mac(uint8_t *mac) __attribute__((alias("esp_efuse_mac_get_default"))); esp_err_t esp_efuse_read_mac(uint8_t *mac) __attribute__((alias("esp_efuse_mac_get_default"))); -esp_err_t esp_derive_mac(uint8_t* local_mac, const uint8_t* universal_mac) +esp_err_t esp_derive_local_mac(uint8_t* local_mac, const uint8_t* universal_mac) { uint8_t idx; @@ -203,7 +203,7 @@ esp_err_t esp_read_mac(uint8_t* mac, esp_mac_type_t type) mac[5] += 1; } else if (UNIVERSAL_MAC_ADDR_NUM == TWO_UNIVERSAL_MAC_ADDR) { - esp_derive_mac(mac, efuse_mac); + esp_derive_local_mac(mac, efuse_mac); } break; case ESP_MAC_BT: @@ -222,7 +222,7 @@ esp_err_t esp_read_mac(uint8_t* mac, esp_mac_type_t type) } else if (UNIVERSAL_MAC_ADDR_NUM == TWO_UNIVERSAL_MAC_ADDR) { efuse_mac[5] += 1; - esp_derive_mac(mac, efuse_mac); + esp_derive_local_mac(mac, efuse_mac); } break; default:
driverkit: 1G align allocations so that mem serv can always handle the request
@@ -58,7 +58,7 @@ static void parse_namelist(char *in, struct hwmodel_name *names, int *conversion } #define ALLOC_WRAP_Q "state_get(S)," \ - "alloc_wrap(S, %zu, 21, %"PRIi32",%s, NewS)," \ + "alloc_wrap(S, %zu, %d, %"PRIi32",%s, NewS)," \ "state_set(NewS)." errval_t driverkit_hwmodel_ram_alloc(struct capref *dst, @@ -73,6 +73,8 @@ errval_t driverkit_hwmodel_ram_alloc(struct capref *dst, int bits = log2ceil(bytes); bytes = 1 << bits; + assert(bits >= 21); + // The PT configuration in the SKB is currently using 2M pages. #ifdef DISABLE_MODEL if (dstnode != driverkit_hwmodel_lookup_dram_node_id()) { @@ -85,8 +87,12 @@ errval_t driverkit_hwmodel_ram_alloc(struct capref *dst, format_nodelist(nodes, nodes_str); - debug_printf("Query: " ALLOC_WRAP_Q, bytes, dstnode, nodes_str); - err = skb_execute_query(ALLOC_WRAP_Q, bytes, dstnode, nodes_str); + int alloc_bits = 21; + //HACK: This should + alloc_bits = 30; + //ENDHACK + debug_printf("Query: " ALLOC_WRAP_Q "\n", bytes, alloc_bits, dstnode, nodes_str); + err = skb_execute_query(ALLOC_WRAP_Q, bytes, alloc_bits, dstnode, nodes_str); DEBUG_SKB_ERR(err, "alloc_wrap"); if(err_is_fail(err)){ @@ -108,6 +114,7 @@ errval_t driverkit_hwmodel_ram_alloc(struct capref *dst, struct mem_binding * b = get_mem_client(); debug_printf("Determined addr=0x%"PRIx64" as address for (nodeid=%d, size=%zu) request\n", names[0].address, dstnode, bytes); + err = b->rpc_tx_vtbl.allocate(b, bits, names[0].address, names[0].address + bytes, &msgerr, dst); if(err_is_fail(err)){ @@ -254,7 +261,7 @@ errval_t driverkit_hwmodel_vspace_alloc(struct capref frame, //int32_t mem_nodeid = id.pasid; int32_t mem_nodeid = driverkit_hwmodel_lookup_dram_node_id(); uint64_t mem_addr = id.base; - debug_printf("Query: " MAP_WRAP_Q, + debug_printf("Query: " MAP_WRAP_Q "\n", id.bytes, mem_nodeid, mem_addr, src_nodeid_str); err = skb_execute_query(MAP_WRAP_Q, id.bytes, mem_nodeid, mem_addr, src_nodeid_str);
align: add MCST LCC to compilers known to support __alignof__
HEDLEY_TI_CL7X_VERSION_CHECK(1,2,0) || \ HEDLEY_TI_CL430_VERSION_CHECK(16,9,0) || \ HEDLEY_TI_CLPRU_VERSION_CHECK(2,3,2) || \ + HEDLEY_MCST_LCC_VERSION_CHECK(1,25,10) || \ defined(__IBM__ALIGNOF__) || \ defined(__clang__) #define SIMDE_ALIGN_OF(Type) __alignof__(Type)
kubectl-gadget: Add sys_mod capability needed by python-based gadgets
@@ -291,6 +291,10 @@ spec: # Needed for gadgets that don't dumb the memory rlimit. # (Currently only applies to BCC python-based gadgets) - IPC_LOCK + + # Needed by BCC python-based gadgets to load the kheaders module: + # https://github.com/iovisor/bcc/blob/v0.24.0/src/cc/frontends/clang/kbuild_helper.cc#L158 + - SYS_MODULE volumeMounts: - name: host mountPath: /host
[GB] Implement ADD A, u8 instruction
@@ -829,6 +829,30 @@ impl CpuState { self.update_flag(FlagUpdate::HalfCarry(true)); Some(InstrInfo::seq(2, 2)) } + 0xc6 => { + // ADD A, u8 + // TODO(PT): Refactor with ADD A, Reg8? + let a = self.reg(RegisterName::A); + let val = self.mmu.read(self.get_pc() + 1); + + if debug { + println!("ADD {a}, {val:02x}"); + } + + let prev_a_val = a.read_u8(&self); + let (new_val, did_overflow) = prev_a_val.overflowing_add(val); + + a.write_u8(&self, new_val); + self.update_flag(FlagUpdate::Zero(new_val == 0)); + self.update_flag(FlagUpdate::Subtract(false)); + let half_carry_flag = + ((((prev_a_val as u16) & 0xf) + ((val as u16) & 0xf)) & 0x10) == 0x10; + self.update_flag(FlagUpdate::HalfCarry(half_carry_flag)); + self.update_flag(FlagUpdate::Carry(did_overflow)); + + // TODO(PT): Cycle count should be 2 for (HL) + Some(InstrInfo::seq(2, 2)) + } // Handled down below _ => None, }; @@ -2830,4 +2854,24 @@ mod tests { assert!(cpu.is_flag_set(Flag::Subtract)); assert!(cpu.is_flag_set(Flag::HalfCarry)); } + + /* ADD A, u8 */ + + #[test] + fn test_add_u8() { + // Given an ADD A, u8 instruction instruction + let gb = get_system(); + let mut cpu = gb.cpu.borrow_mut(); + + // And a value just after the instruction pointer + gb.mmu.write(1, 0xff); + cpu.reg(RegisterName::A).write_u8(&cpu, 0x3c); + + gb.run_opcode_with_expected_attrs(&mut cpu, 0xc6, 2, 2); + assert_eq!(cpu.reg(RegisterName::A).read_u8(&cpu), 0x3b); + assert!(!cpu.is_flag_set(Flag::Zero)); + assert!(cpu.is_flag_set(Flag::HalfCarry)); + assert!(!cpu.is_flag_set(Flag::Subtract)); + assert!(cpu.is_flag_set(Flag::Carry)); + } }
fix fedora-review tool errors
@@ -24,6 +24,7 @@ BuildRequires: systemd-rpm-macros BuildRequires: systemd BuildRequires: pybind11-devel BuildRequires: tbb-devel +BuildRequires: git %description Open Programmable Acceleration Engine (OPAE) is a software framework
appveyor: it was unexpected
@@ -104,7 +104,7 @@ install: - gem install rest-client --no-document -f - gem install listen --no-document -f - gem install zip --no-document -f - - gem install bundler --no-document -f + - gem install bundler --quiet --no-ri --no-rdoc -f - bundler --version before_test:
Update status badge link from Travis CI to Github Actions
-# SDL2 binding for Go [![Build Status](https://github.com/veandco/go-sdl2/actions/workflows/test-build.yml/badge.svg)](https://app.travis-ci.com/github/veandco/go-sdl2) [![Go Report Card](https://goreportcard.com/badge/github.com/veandco/go-sdl2)](https://goreportcard.com/report/github.com/veandco/go-sdl2) [![Reviewed by Hound](https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg)](https://houndci.com) [![Financial Contributors on Open Collective](https://opencollective.com/veandco/all/badge.svg?label=financial+contributors)](https://opencollective.com/veandco) +# SDL2 binding for Go [![Build Status](https://github.com/veandco/go-sdl2/actions/workflows/test-build.yml/badge.svg)](https://github.com/veandco/go-sdl2/actions/workflows/test-build.yml) [![Go Report Card](https://goreportcard.com/badge/github.com/veandco/go-sdl2)](https://goreportcard.com/report/github.com/veandco/go-sdl2) [![Reviewed by Hound](https://img.shields.io/badge/Reviewed_by-Hound-8E64B0.svg)](https://houndci.com) [![Financial Contributors on Open Collective](https://opencollective.com/veandco/all/badge.svg?label=financial+contributors)](https://opencollective.com/veandco) `go-sdl2` is SDL2 wrapped for Go users. It enables interoperability between Go and the SDL2 library which is written in C. That means the original SDL2 installation is required for this to work. Note that the first build may take several minutes on machines that are not powerful such as Raspberry Pi.
extmod/vfs: Support opening a file descriptor (int) with VfsPosix. Fixes issue
#include "extmod/vfs_fat.h" #endif +#if MICROPY_VFS_POSIX +#include "extmod/vfs_posix.h" +#endif + // For mp_vfs_proxy_call, the maximum number of additional args that can be passed. // A fixed maximum size is used to avoid the need for a costly variable array. #define PROXY_MAX_ARGS (2) @@ -264,6 +268,13 @@ mp_obj_t mp_vfs_open(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); + #if MICROPY_VFS_POSIX + // If the file is an integer then delegate straight to the POSIX handler + if (MP_OBJ_IS_SMALL_INT(args[ARG_file].u_obj)) { + return mp_vfs_posix_file_open(&mp_type_textio, args[ARG_file].u_obj, args[ARG_mode].u_obj); + } + #endif + mp_vfs_mount_t *vfs = lookup_path(args[ARG_file].u_obj, &args[ARG_file].u_obj); return mp_vfs_proxy_call(vfs, MP_QSTR_open, 2, (mp_obj_t*)&args); }
travis: removing cache_name
@@ -118,15 +118,15 @@ matrix: - language: objective-c os: osx - env: RHO_TARGET="iphone" RHO_APP="auto_common_spec" CACHE_NAME="RVM_OSX" + env: RHO_TARGET="iphone" RHO_APP="auto_common_spec" - language: objective-c os: osx - env: RHO_TARGET="iphone" RHO_APP="framework_spec" CACHE_NAME="RVM_OSX" + env: RHO_TARGET="iphone" RHO_APP="framework_spec" - language: android os: linux - env: RHO_TARGET="android" RHO_APP="auto_common_spec" CACHE_NAME="RVM_LINUX" + env: RHO_TARGET="android" RHO_APP="auto_common_spec" #- language: android # os: linux @@ -134,7 +134,7 @@ matrix: - language: android os: linux - env: RHO_TARGET="android" RHO_APP="framework_spec" CACHE_NAME="RVM_LINUX" + env: RHO_TARGET="android" RHO_APP="framework_spec" #- language: android # os: linux @@ -170,7 +170,7 @@ matrix: - language: objective-c os: osx - env: RHO_TARGET="rhosimulator_osx" CACHE_NAME="RVM_OSX" + env: RHO_TARGET="rhosimulator_osx" # - language: objective-c # os: osx
CBLK: No irqs and little higher cache read timeout
#include "snap_hls_if.h" #include "capiblock.h" -#undef CONFIG_WAIT_FOR_IRQ /* Not fully working */ +#undef CONFIG_WAIT_FOR_IRQ /* Not working */ #undef CONFIG_PRINT_STATUS /* health checking if needed */ #undef CONFIG_FIFO_SCHEDULING /* only root */ #undef CONFIG_PIN_COMPLETION_THREAD /* try to pin completion thread */ #define CONFIG_SLEEP_WHEN_IDLE /* Sleep when there is no work to be done */ #define CONFIG_REQUEST_TIMEOUT 5 -#define CONFIG_REQUEST_DURATION 100 /* usec */ +#define CONFIG_REQUEST_DURATION 1000 /* usec */ static int cblk_reqtimeout = CONFIG_REQUEST_TIMEOUT; static int cblk_prefetch = 0; @@ -1086,8 +1086,7 @@ chunk_id_t cblk_open(const char *path, unsigned int i, j; int timeout = ACTION_WAIT_TIME; unsigned long have_nvme = 0; - snap_action_flag_t attach_flags = - (SNAP_ACTION_DONE_IRQ | SNAP_ATTACH_IRQ); + snap_action_flag_t attach_flags = 0; struct cblk_dev *c = &chunk; block_trace("[%s] opening (%s) CBLK_REQTIMEOUT=%d CBLK_PREFETCH=%d\n", @@ -1095,6 +1094,9 @@ chunk_id_t cblk_open(const char *path, pthread_mutex_lock(&c->dev_lock); +#ifdef CONFIG_WAIT_FOR_IRQ + attach_flags |= (SNAP_ACTION_DONE_IRQ | SNAP_ATTACH_IRQ); +#endif if (flags & CBLK_OPN_VIRT_LUN) { fprintf(stderr, "err: Virtual luns not supported in capi stub\n"); goto out_err0;
feat(venachain):modify venachain solidity2c python file
@@ -755,7 +755,7 @@ class CFunctionGen(): inputs_len = len(inputs) input_str = '(' - input_str += 'BoatPlatoneTx *tx_ptr' + input_str += 'BoatVenachainTx *tx_ptr' if inputs_len != 0: input_str += ', ' @@ -851,7 +851,7 @@ class CFunctionGen(): # Set Nonce if not self.is_Change_Blockchain_State(abi_item): - func_body_str += ' boat_try(BoatPlatoneTxSetNonce(tx_ptr, BOAT_PLATONE_NONCE_AUTO));\n\n' + func_body_str += ' boat_try(BoatVenachainTxSetNonce(tx_ptr, BOAT_VENACHAIN_NONCE_AUTO));\n\n' @@ -1059,11 +1059,11 @@ class CFunctionGen(): if self.is_Change_Blockchain_State(abi_item): # for state-less funciton call - func_body_str += ' call_result_str = BoatPlatoneCallContractFunc(tx_ptr, data_field.field_ptr, data_field.field_len);\n\n' + func_body_str += ' call_result_str = BoatVenachainCallContractFunc(tx_ptr, data_field.field_ptr, data_field.field_len);\n\n' else: # for stateful transaction - func_body_str += ' boat_try(BoatPlatoneTxSetData(tx_ptr, &data_field));\n\n' - func_body_str += ' boat_try(BoatPlatoneTxSend(tx_ptr));\n\n' + func_body_str += ' boat_try(BoatVenachainTxSetData(tx_ptr, &data_field));\n\n' + func_body_str += ' boat_try(BoatVenachainTxSend(tx_ptr));\n\n' func_body_str += ' UtilityBinToHex(tx_hash_str, tx_ptr->tx_hash.field, tx_ptr->tx_hash.field_len, BIN2HEX_LEFTTRIM_UNFMTDATA, BIN2HEX_PREFIX_0x_YES, BOAT_FALSE);\n\n' # Cleanup Label
SetStatusBarModal: prevent flexShrink on mobile
@@ -77,9 +77,11 @@ export const SetStatusBarModal = (props) => { )} <Row {...rest} + flexShrink={0} onClick={() => setModalShown(true)}> <Text color='black' cursor='pointer' + flexShrink={0} fontSize={1}> Set Status </Text>
Oops... remove debug message
@@ -353,7 +353,6 @@ FIO_FUNC void *fio_hash_insert(fio_hash_s *hash, FIO_HASH_KEY_TYPE key, if (!info && !obj) return NULL; while (!info) { - fprintf(stderr, "no info\n"); fio_hash_rehash(hash); info = fio_hash_seek_pos_(hash, key); }
Fix - fix docstring.
@@ -2547,7 +2547,7 @@ JANET_CORE_FN(cfun_ev_thread, "to resume with. " "Unlike `ev/go`, this function will suspend the current fiber until the thread is complete. " "If you want to run the thread without waiting for a result, pass the `:n` flag to return nil immediately. " - "Otherwise, returns (a copy of) the final result from the fiber on the new thread. Available flags:\n\n" + "Otherwise, returns nil. Available flags:\n\n" "* `:n` - return immediately\n" "* `:a` - don't copy abstract registry to new thread (performance optimization)\n" "* `:c` - don't copy cfunction registry to new thread (performance optimization)") {
respect large pages for arena allocation
@@ -232,6 +232,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* for (size_t i = 0; i < MI_MAX_ARENAS; i++) { mi_arena_t* arena = (mi_arena_t*)mi_atomic_read_ptr_relaxed(mi_atomic_cast(void*, &mi_arenas[i])); if (arena==NULL) break; + if (*large || !arena->is_large) { // large OS pages allowed, or arena is not large OS pages size_t block_index = SIZE_MAX; void* p = mi_arena_alloc(arena, bcount, is_zero, &block_index); if (p != NULL) { @@ -250,6 +251,7 @@ void* _mi_arena_alloc_aligned(size_t size, size_t alignment, bool* commit, bool* } } } + } // fall back to the OS *is_zero = true;
include/sys/boardctl.h : Add macro of boardctl To reset the board through sw, we should call boardctl(BOARDIOC_RESET, EXIT_SUCCESS/FAILURE). For easy use, add macro as board_sw_reset()
****************************************************************************/ #include <tinyara/config.h> #include <stdint.h> +#include <stdlib.h> #include <tinyara/fs/ioctl.h> @@ -169,5 +170,9 @@ int boardctl(unsigned int cmd, uintptr_t arg); } #endif +#define board_sw_reset() do { \ + boardctl(BOARDIOC_RESET, EXIT_SUCCESS); \ + } while (0) + #endif /* CONFIG_LIB_BOARDCTL */ #endif /* __INCLUDE_SYS_BOARDCTL_H */
up grpc runtime to 1.43.2
@@ -984,8 +984,8 @@ when ($JAVA_PROTO_RUNTIME == "javalite") { otherwise { _JAVA_PROTO_OUT_ARG=--java_out= JAVA_PROTOBUF_PEERS=contrib/java/com/google/protobuf/protobuf-java/${JAVA_PROTO_RUNTIME_VERSION} - JAVA_GRPC_STUB=contrib/java/io/grpc/grpc-stub/1.26.0 - JAVA_GRPC_PROTOBUF=contrib/java/io/grpc/grpc-protobuf/1.26.0 + JAVA_GRPC_STUB=contrib/java/io/grpc/grpc-stub/1.43.2 + JAVA_GRPC_PROTOBUF=contrib/java/io/grpc/grpc-protobuf/1.43.2 } # tag:proto tag:java-specific
Make depth test more flexible;
@@ -471,13 +471,14 @@ int l_lovrGraphicsGetDepthTest(lua_State* L) { } int l_lovrGraphicsSetDepthTest(lua_State* L) { - if (lua_isnoneornil(L, 1) && lua_isnoneornil(L, 2)) { - lovrGraphicsSetDepthTest(COMPARE_NONE, false); - } else { - CompareMode mode = *(CompareMode*) luax_checkenum(L, 1, &CompareModes, "compare mode"); + CompareMode mode = COMPARE_NONE; + + if (lua_type(L, 1) == LUA_TSTRING) { + mode = *(CompareMode*) luax_checkenum(L, 1, &CompareModes, "compare mode"); + } + bool write = lua_isnoneornil(L, 2) ? true : lua_toboolean(L, 2); lovrGraphicsSetDepthTest(mode, write); - } return 0; }
Fix dealloc size to match virtio_scsi_request alloc size
@@ -158,7 +158,7 @@ closure_function(4, 1, void, virtio_scsi_request_complete, apply(bound(c), s, r); backed_heap contiguous = s->v->virtio_dev.contiguous; dealloc_unmap(contiguous, r, bound(r_phys), - pad(sizeof(*r) + r->alloc_len, contiguous->h.pagesize)); + sizeof(*r) + r->alloc_len); closure_finish(); }
xfpga: fix for cstr_dup Separate the malloc check from the copy check so that a successful allocation can be freed on copy error.
-// Copyright(c) 2017-2018, Intel Corporation +// Copyright(c) 2017-2019, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: @@ -1400,8 +1400,13 @@ STATIC char *cstr_dup(const char *str) { size_t s = strlen(str); char *p = malloc(s+1); + if (!p) { + FPGA_ERR("malloc failed"); + return NULL; + } if (strncpy_s(p, s+1, str, s)) { FPGA_ERR("Error copying string"); + free(p); return NULL; } p[s] = '\0';
fix gpload test fail It seams not to support join python3 string like f'{}'. We now join string by the formatting way.
@@ -444,11 +444,11 @@ def drop_tables(): name = i[1] match = re.search('ext_gpload',name) if match: - queryString = f'DROP EXTERNAL TABLE "{schema}"."{name}";' + queryString = 'DROP EXTERNAL TABLE "%s"."%s";'%(schema, name) db.query(queryString.encode('utf-8')) else: - queryString = f'DROP TABLE "{schema}"."{name}";' + queryString = 'DROP TABLE "%s"."%s";'%(schema, name) db.query(queryString.encode('utf-8')) class PSQLError(Exception):
options/posix: add IN_CLASS* macros in netinet/in.h
#include <sys/socket.h> // struct sockaddr #include <abi-bits/socket.h> #include <abi-bits/in.h> +#include <arpa/inet.h> #ifdef __cplusplus extern "C" { @@ -77,15 +78,30 @@ uint16_t ntohs(uint16_t); ((((const uint8_t *)(a))[1] & 0xf) == 0xe)); \ }) -#define IN_CLASSD(a) ((((in_addr_t)(a)) & 0xf0000000) == 0xe0000000) -#define IN_MULTICAST(a) IN_CLASSD(a) - +#define IN_CLASSA(a) ((((in_addr_t)(a)) & 0x80000000) == 0) #define IN_CLASSA_NET 0xff000000 +#define IN_CLASSA_NSHIFT 24 +#define IN_CLASSA_HOST (0xffffffff & ~IN_CLASSA_NET) +#define IN_CLASSA_MAX 128 +#define IN_CLASSB(a) ((((in_addr_t)(a)) & 0xc0000000) == 0x80000000) #define IN_CLASSB_NET 0xffff0000 +#define IN_CLASSB_NSHIFT 16 +#define IN_CLASSB_HOST (0xffffffff & ~IN_CLASSB_NET) +#define IN_CLASSB_MAX 65536 +#define IN_CLASSC(a) ((((in_addr_t)(a)) & 0xe0000000) == 0xc0000000) #define IN_CLASSC_NET 0xffffff00 +#define IN_CLASSC_NSHIFT 8 +#define IN_CLASSC_HOST (0xffffffff & ~IN_CLASSC_NET) +#define IN_CLASSD(a) ((((in_addr_t)(a)) & 0xf0000000) == 0xe0000000) +#define IN_MULTICAST(a) IN_CLASSD(a) +#define IN_EXPERIMENTAL(a) ((((in_addr_t)(a)) & 0xe0000000) == 0xe0000000) +#define IN_BADCLASS(a) ((((in_addr_t)(a)) & 0xf0000000) == 0xf0000000) #define IN_LOOPBACKNET 127 +#define MCAST_EXCLUDE 0 +#define MCAST_INCLUDE 1 + #ifdef __cplusplus } #endif
memif: map shared memory with clib_mem_vm_map_shared(...) Type: improvement
@@ -344,11 +344,11 @@ clib_error_t * memif_init_regions_and_queues (memif_if_t * mif) { vlib_main_t *vm = vlib_get_main (); + memif_socket_file_t *msf; memif_ring_t *ring = NULL; - int i, j; + int fd, i, j; u64 buffer_offset; memif_region_t *r; - clib_mem_vm_alloc_t alloc = { 0 }; clib_error_t *err; ASSERT (vec_len (mif->regions) == 0); @@ -364,16 +364,31 @@ memif_init_regions_and_queues (memif_if_t * mif) r->region_size += mif->run.buffer_size * (1 << mif->run.log2_ring_size) * (mif->run.num_s2m_rings + mif->run.num_m2s_rings); - alloc.name = "memif region"; - alloc.size = r->region_size; - alloc.flags = CLIB_MEM_VM_F_SHARED; + if ((fd = clib_mem_vm_create_fd (CLIB_MEM_PAGE_SZ_DEFAULT, "%U region 0", + format_memif_device_name, + mif->dev_instance)) == -1) + { + err = clib_mem_get_last_error (); + goto error; + } - err = clib_mem_vm_ext_alloc (&alloc); - if (err) + if ((ftruncate (fd, r->region_size)) == -1) + { + err = clib_error_return_unix (0, "ftruncate"); goto error; + } + + msf = pool_elt_at_index (memif_main.socket_files, mif->socket_file_index); + r->shm = clib_mem_vm_map_shared (0, r->region_size, fd, 0, "memif%lu/%lu:0", + msf->socket_id, mif->id); + + if (r->shm == CLIB_MEM_VM_MAP_FAILED) + { + err = clib_error_return_unix (0, "memif shared region map failed"); + goto error; + } - r->fd = alloc.fd; - r->shm = alloc.addr; + r->fd = fd; if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY) {
headers: whitespace fixes and grammar tweaks
@@ -4568,7 +4568,7 @@ unsigned int RESAMP(_get_num_output)(RESAMP() _q, \ \ /* Execute arbitrary resampler on a single input sample and store the */ \ /* resulting samples in the output array. The number of output samples */ \ -/* is dependent upon the resampling rate but will be at most */ \ +/* depends upon the resampling rate but will be at most */ \ /* \( \lceil{ r \rceil} \) samples. */ \ /* _q : resamp object */ \ /* _x : single input sample */ \ @@ -4581,7 +4581,7 @@ int RESAMP(_execute)(RESAMP() _q, \ \ /* Execute arbitrary resampler on a block of input samples and store */ \ /* the resulting samples in the output array. The number of output */ \ -/* samples is dependent upon the resampling rate and the number of input */ \ +/* samples depends upon the resampling rate and the number of input */ \ /* samples but will be at most \( \lceil{ r n_x \rceil} \) samples. */ \ /* _q : resamp object */ \ /* _x : input buffer, [size: _nx x 1] */ \ @@ -4739,8 +4739,8 @@ unsigned int MSRESAMP(_get_num_output)(MSRESAMP() _q, \ unsigned int _num_input); \ \ /* Execute multi-stage resampler on one or more input samples. */ \ -/* The number of output samples is dependent upon the resampling rate */ \ -/* and the number of input samples. In general it is good practice to */ \ +/* The number of output samples depends upon the resampling rate and */ \ +/* the number of input samples. In general it is good practice to */ \ /* allocate at least \( \lceil{ 1 + 2 r n_x \rceil} \) samples in the */ \ /* output array to avoid overflows. */ \ /* _q : msresamp object */ \
Added VENDOR_KONKE (0x1268)
#define VENDOR_1233 0x1233 // Used by Third Reality #define VENDOR_1234 0x1234 // Used by Xiaomi Mi #define VENDOR_SAMJIN 0x1241 +#define VENDOR_KONKE 0x1268 #define VENDOR_OSRAM_STACK 0xBBAA #define VENDOR_LEGRAND 0x1021 #define VENDOR_C2DF 0xC2DF
chip/mt8192_scp: add static for non-exposed functions BRANCH=none TEST=make BOARD=asurada_scp
@@ -116,14 +116,14 @@ void uart_tx_stop(void) enable_sleep(SLEEP_MASK_UART); } -void uart_process(void) +static void uart_process(void) { uart_process_input(); uart_process_output(); } #if (UARTN < SCP_UART_COUNT) -void irq_group12_handler(void) +static void irq_group12_handler(void) { extern volatile int ec_int;
travis: add GCC 9 build
@@ -17,15 +17,22 @@ env: jobs: include: - - name: "gcc amd64" - compiler: gcc - arch: amd64 - - - name: "clang amd64" - arch: amd64 - compiler: clang + - name: "gcc-9" + compiler: gcc-9 + env: + - C_COMPILER=gcc-9 + - CXX_COMPILER=g++-9 + - COMPILER_FLAGS='-fsanitize=address,undefined' + addons: + apt: + sources: + - sourceline: "ppa:ubuntu-toolchain-r/test" + key_url: https://keyserver.ubuntu.com/pks/lookup?op=get&search=0x60c317803a41ba51845e371a1e9377a2ba9ef27f + packages: + - gcc-9 + - g++-9 - - name: "clang-9 asan,ubsan" + - name: "clang-9" compiler: clang-9 env: - C_COMPILER=clang-9 @@ -107,6 +114,14 @@ jobs: env: - ARCH_FLAGS="" + - name: "gcc-7 amd64" + compiler: gcc + arch: amd64 + + - name: "clang-7 amd64" + arch: amd64 + compiler: clang + - name: icc compiler: icc env:
pppd: Fix PAP Request packet.
@@ -137,7 +137,8 @@ void pap_task(FAR struct ppp_context_s *ctx, FAR uint8_t * buffer) if (!(ctx->pap_state & PAP_TX_UP) && !(ctx->pap_state & PAP_TX_TIMEOUT)) { /* Do we need to send a PAP auth packet? Check if we have a request - * pending */ + * pending. + */ if ((ppp_arch_clock_seconds() - ctx->pap_prev_seconds) > PAP_TIMEOUT) { @@ -159,16 +160,25 @@ void pap_task(FAR struct ppp_context_s *ctx, FAR uint8_t * buffer) /* Write options */ + /* Write peer-ID length */ + t = strlen((char *)ctx->settings->pap_username); + *bptr++ = (uint8_t)t; - /* Write peer length */ + /* Write peer-ID */ - *bptr++ = (uint8_t)t; bptr = memcpy(bptr, ctx->settings->pap_username, t); + bptr += t; + + /* Write passwd length */ t = strlen((char *)ctx->settings->pap_password); *bptr++ = (uint8_t)t; + + /* Write passwd */ + bptr = memcpy(bptr, ctx->settings->pap_password, t); + bptr += t; /* Write length */
clarifying that sky130_nda key is now optional
@@ -6,9 +6,11 @@ vlsi.core.max_threads: 12 # Technology paths technology.sky130: - sky130A: "/path-to-/sky130A" - openram_lib: "/path-to-/sky130_sram_macros" - sky130_nda: "/path-to-/skywater-src-nda" + sky130A: "/path/to/sky130A" + openram_lib: "/path/to/sky130_sram_macros" + + # this key is OPTIONAL, no NDA files will be used if it does not point to a valid path + sky130_nda: "/path/to/skywater-src-nda" # General Hammer Inputs
feat(gui): LCUIWidget_ClearTrash() will return count
@@ -107,16 +107,20 @@ static void Widget_UpdateStatus(LCUI_Widget widget) } } -void LCUIWidget_ClearTrash(void) +size_t LCUIWidget_ClearTrash(void) { + size_t count; LinkedListNode *node; + node = LCUIWidget.trash.head.next; + count = LCUIWidget.trash.length; while (node) { LinkedListNode *next = node->next; LinkedList_Unlink(&LCUIWidget.trash, node); Widget_ExecDestroy(node->data); node = next; } + return count; } static void Widget_AddToTrash(LCUI_Widget w)
docs - update platform requirements to mention GPDB 6, not GPDB 6.0.0
<topic id="topic_eyc_l2h_zz"> <title>Extensions</title> <body> + <p>This table lists the versions of the Pivotal Greenplum Extensions that are compatible + with this release of Greenplum Database 6.</p> <table class="- topic/table " id="table_b1q_m2h_zz"> - <title class="- topic/title ">Pivotal Greenplum 6.0.0 Extensions Compatibility</title> + <title class="- topic/title ">Pivotal Greenplum 6 Extensions Compatibility</title> <tgroup cols="2" class="- topic/tgroup "> <colspec colnum="1" colname="col1" colwidth="297pt" class="- topic/colspec "/> <colspec colnum="2" colname="col3" colwidth="117.58pt" class="- topic/colspec "/> </row> <row> <entry>PL/Java</entry> - <entry>2.0.1</entry> + <entry>2.0.2</entry> </row> <row> <entry>PL/R<sup>2</sup></entry> <body> <p> <ul id="ul_ckf_sfc_hbb"> - <li>Greenplum Platform Extension Framework (PXF) v5.10.0 - PXF, integrated with Greenplum - Database 6, provides access to Hadoop, object store, and SQL external data stores. - Refer to <xref scope="peer" href="../admin_guide/external/pxf-overview.xml">Accessing - External Data with PXF</xref> in the <cite>Greenplum Database Administrator - Guide</cite> for PXF configuration and usage + <li>Greenplum Platform Extension Framework (PXF) v5.10.0 - PXF, integrated with + Greenplum Database 6, provides access to Hadoop, object store, and SQL external data + stores. Refer to <xref scope="peer" href="../admin_guide/external/pxf-overview.xml" + >Accessing External Data with PXF</xref> in the <cite>Greenplum Database + Administrator Guide</cite> for PXF configuration and usage information.<!--Also check OSS information in Hadoop section--></li> <li>Greenplum-Kafka Integration - The Pivotal Greenplum-Kafka Integration provides high speed, parallel data transfer from a Kafka cluster to a Pivotal Greenplum Database <p>PXF can use Cloudera, Hortonworks Data Platform, MapR, and generic Apache Hadoop distributions. PXF bundles all of the JAR files on which it depends, including the following Hadoop libraries:</p> - <table> <title>PXF Hadoop Supported Platforms</title> <tgroup cols="4">
net/tcp: TCP_WAITALL should use state flag The feature of MSG_WAITALL is broken since the wrong flag is used
@@ -402,7 +402,8 @@ static uint16_t tcp_recvhandler(FAR struct net_driver_s *dev, * next receive is performed. */ - if ((pstate->ir_recvlen > 0 && (flags & TCP_WAITALL) == 0) || + if ((pstate->ir_recvlen > 0 && + (pstate->ir_cb->flags & TCP_WAITALL) == 0) || pstate->ir_buflen == 0) { ninfo("TCP resume\n");
pem_read_bio_key_decoder: Avoid spurious error on unknown PEM data
@@ -55,11 +55,24 @@ static EVP_PKEY *pem_read_bio_key_decoder(BIO *bp, EVP_PKEY **x, if (!OSSL_DECODER_CTX_set_pem_password_cb(dctx, cb, u)) goto err; + ERR_set_mark(); while (!OSSL_DECODER_from_bio(dctx, bp) || pkey == NULL) - if (BIO_eof(bp) != 0 || (newpos = BIO_tell(bp)) < 0 || newpos <= pos) + if (BIO_eof(bp) != 0 || (newpos = BIO_tell(bp)) < 0 || newpos <= pos) { + ERR_clear_last_mark(); goto err; - else + } else { + if (ERR_GET_REASON(ERR_peek_error()) == ERR_R_UNSUPPORTED) { + /* unsupported PEM data, try again */ + ERR_pop_to_mark(); + ERR_set_mark(); + } else { + /* other error, bail out */ + ERR_clear_last_mark(); + goto err; + } pos = newpos; + } + ERR_pop_to_mark(); if (!evp_keymgmt_util_has(pkey, selection)) { EVP_PKEY_free(pkey);
network/netmgr: fix svace 637316 fix svace 637316
@@ -221,14 +221,14 @@ static struct addrinfo *_netdev_copy_addrinfo(struct addrinfo *src) memcpy(dst->ai_addr, tmp->ai_addr, sizeof(struct sockaddr)); if (tmp->ai_canonname) { - dst->ai_canonname = (char *)kumm_malloc(sizeof(tmp->ai_canonname)); + dst->ai_canonname = (char *)kumm_malloc(strlen(tmp->ai_canonname) + 1); if (!dst->ai_canonname) { ndbg("kumm_malloc failed\n"); kumm_free(dst->ai_addr); kumm_free(dst); break; } - memcpy(dst->ai_canonname, tmp->ai_canonname, sizeof(tmp->ai_canonname)); + memcpy(dst->ai_canonname, tmp->ai_canonname, strlen(tmp->ai_canonname) + 1); } else { dst->ai_canonname = NULL; }
build: upload windows binaries directly to gcp
@@ -153,3 +153,36 @@ jobs: - run: mingw32-make build/urbit - run: build/urbit -l -d -B ../../bin/solid.pill -F bus && curl -f --data '{"source":{"dojo":"+hood/exit"},"sink":{"app":"hood"}}' http://localhost:12321 + + - uses: actions/setup-python@v2 + if: ${{ env.DO_UPLOAD == 'true' }} + with: + python-version: 3.7 + + - uses: google-github-actions/[email protected] + if: ${{ env.DO_UPLOAD == 'true' }} + env: + # see https://github.com/google-github-actions/setup-gcloud/issues/100 + CLOUDSDK_PYTHON: ${{env.pythonLocation}}\python.exe + with: + service_account_key: ${{ secrets.GCS_SERVICE_ACCOUNT_KEY }} + project_id: ${{ secrets.GCS_PROJECT }} + export_default_credentials: true + + - name: upload binary to bootstrap.urbit.org + if: ${{ env.DO_UPLOAD == 'true' }} + env: + CLOUDSDK_PYTHON: ${{env.pythonLocation}}\python.exe + shell: bash + run: | + version="$(cat ./version)" + system="x86_64-windows" + target="gs://bootstrap.urbit.org/ci/vere/often/${GITHUB_SHA:0:9}/vere-v${version}-${system}.exe" + + gsutil cp -n ./build/urbit.exe "$target" + exitcode=$? + + test $exitcode -eq 0 && + echo "upload to $target complete." || + echo "upload to $target failed."; + exit $exitcode
ocvalidate: Added CryptexFixup.kext
@@ -30,6 +30,7 @@ KEXT_PRECEDENCE mKextPrecedence[] = { { "AirportBrcmFixup.kext", "Lilu.kext" }, { "BrightnessKeys.kext", "Lilu.kext" }, { "CpuTscSync.kext", "Lilu.kext" }, + { "CryptexFixup.kext", "Lilu.kext" }, { "CPUFriend.kext", "Lilu.kext" }, { "CPUFriendDataProvider.kext", "CPUFriend.kext" }, { "DebugEnhancer.kext", "Lilu.kext" }, @@ -68,6 +69,7 @@ KEXT_INFO mKextInfo[] = { { "AirportBrcmFixup.kext/Contents/PlugIns/AirPortBrcmNIC_Injector.kext", "", "Contents/Info.plist" }, { "BrightnessKeys.kext", "Contents/MacOS/BrightnessKeys", "Contents/Info.plist" }, { "CpuTscSync.kext", "Contents/MacOS/CpuTscSync", "Contents/Info.plist" }, + { "CryptexFixup.kext", "Contents/MacOS/CryptexFixup", "Contents/Info.plist" }, { "CPUFriend.kext", "Contents/MacOS/CPUFriend", "Contents/Info.plist" }, { "CPUFriendDataProvider.kext", "", "Contents/Info.plist" }, { "DebugEnhancer.kext", "Contents/MacOS/DebugEnhancer", "Contents/Info.plist" },
Make pressure reader work again.
@@ -36,6 +36,7 @@ func pollSensors() { // If it's not currently connected, try connecting to pressure sensor if globalSettings.Sensors_Enabled && !globalStatus.PressureSensorConnected { globalStatus.PressureSensorConnected = initPressureSensor() // I2C temperature and pressure altitude. + go tempAndPressureSender() } // If it's not currently connected, try connecting to IMU @@ -49,7 +50,6 @@ func initPressureSensor() (ok bool) { bmp, err := sensors.NewBMP280(&i2cbus, 100*time.Millisecond) if err == nil { myPressureReader = bmp - go tempAndPressureSender() log.Println("AHRS Info: Successfully initialized BMP280") return true }
markdownlinkconverter: sanitize array access with size check, close
@@ -46,7 +46,7 @@ const char * const linksToSrc[] = { ".h", ".c", ".cpp", ".hpp", ".cmake", ".ini" // both need to be terminated with an empty string // helpers -static void printTarget (FILE * output, char * target, char * inputFilename, int indexofElektraRoot, bool isMarkdown, int lineCount); +static void printTarget (FILE * output, char * target, size_t targetSize, char * inputFilename, int indexofElektraRoot, bool isMarkdown, int lineCount); static void printConvertedPath (FILE * output, char * path); static int getIndexofElektraRoot (char * cmakeCacheFilename); static void exitError (FILE * f1, FILE * f2, const char * mes); @@ -264,7 +264,8 @@ static void convertLinks (FILE * input, FILE * output, char * inputFilename, int --index; } // extract target - char target[len * sizeof (char) + 1]; + size_t targetSize = len * sizeof (char) + 1; + char target[targetSize]; if (fread (&target[0], sizeof (char), len, input) != len) exitError (input, NULL, "fread"); target[len] = '\0'; @@ -298,7 +299,7 @@ static void convertLinks (FILE * input, FILE * output, char * inputFilename, int // print target if (targetOK) { - printTarget (output, target, inputFilename, indexofElektraRoot, isMarkdown, lineCount); + printTarget (output, target, targetSize, inputFilename, indexofElektraRoot, isMarkdown, lineCount); } else fprintf (output, "%s", target); @@ -464,7 +465,7 @@ int main (int argc, char * argv[]) return EXIT_SUCCESS; } -static void printTarget (FILE * output, char * target, char * inputFilename, int indexofElektraRoot, bool isMarkdown, int lineCount) +static void printTarget (FILE * output, char * target, size_t targetSize, char * inputFilename, int indexofElektraRoot, bool isMarkdown, int lineCount) { char * backupTarget = target; char pathToLink[strlen (inputFilename) + strlen (target) + 11 + 1]; @@ -513,7 +514,7 @@ static void printTarget (FILE * output, char * target, char * inputFilename, int } if (S_ISDIR (st.st_mode)) { - if (target[strlen (target) - 1] == FOLDER_DELIMITER) + if (backupTarget[targetSize - 1] == FOLDER_DELIMITER) { strcpy (&pathToLink[strlen (pathToLink)], "README.md"); }
titanc_spec: assert.truthy -> assert
@@ -38,6 +38,6 @@ describe("Titanc", function() util.shell("./titanc --emit-c test.titan") util.shell("./titanc --emit-asm test.c") local s, err = util.get_file_contents("test.s") - assert.is_truthy(s) + assert(s, err) end) end)
update hrr tests
@@ -11313,14 +11313,13 @@ requires_config_enabled MBEDTLS_SSL_PROTO_TLS1_3 run_test "TLS 1.3: server: HRR check - mbedtls" \ "$P_SRV debug_level=4 force_version=tls13 curves=secp384r1" \ "$P_CLI debug_level=4 force_version=tls13 curves=secp256r1,secp384r1" \ - 1 \ + 0 \ -s "tls13 server state: MBEDTLS_SSL_CLIENT_HELLO" \ -s "tls13 server state: MBEDTLS_SSL_SERVER_HELLO" \ -s "tls13 server state: MBEDTLS_SSL_ENCRYPTED_EXTENSIONS" \ -s "tls13 server state: MBEDTLS_SSL_HELLO_RETRY_REQUEST" \ -c "client state: MBEDTLS_SSL_ENCRYPTED_EXTENSIONS" \ -s "selected_group: secp384r1" \ - -s "SSL - The requested feature is not available" \ -s "=> write hello retry request" \ -s "<= write hello retry request"
Adding comments for godoc
@@ -76,7 +76,9 @@ func filterNets(mixedCIDRs []string, ipVersion uint8) (filtered []string, filter return } -//FilterRuleToIPVersion: Filter rule based on the IPversion +//FilterRuleToIPVersion: If the rule applies to the give IP version, returns a copy of the rule +//excluding the CIDRs that are not for the given IP version. If the rule does not match the IP +//version, return nil func FilterRuleToIPVersion(ipVersion uint8, pRule *proto.Rule) *proto.Rule { // Filter the CIDRs to the IP version that we're rendering. In general, we should have an // explicit IP version in the rule and all CIDRs should match it (and calicoctl, for
doc: fix incomplete ping session initialization Thanks for reporting this issue. Closes
@@ -65,6 +65,11 @@ Example method to create a new ping session and register callbacks: void initialize_ping() { /* convert URL to IP address */ + ip_addr_t target_addr; + struct addrinfo hint; + struct addrinfo *res = NULL; + memset(&hint, 0, sizeof(hint)); + memset(&target_addr, 0, sizeof(target_addr)); getaddrinfo("www.espressif.com", NULL, &hint, &res) == 0); struct in_addr addr4 = ((struct sockaddr_in *) (res->ai_addr))->sin_addr; inet_addr_to_ip4addr(ip_2_ip4(&target_addr), &addr4);
CMSIS-Core(A): Add MMU section_normal_nc macro I added the macro definition for non-cache area.
@@ -1788,6 +1788,21 @@ typedef struct RegionStruct { region.sh_t = NON_SHARED; \ MMU_GetSectionDescriptor(&descriptor_l1, region); +//Sect_Normal_NC. Outer & inner non-cacheable, non-shareable, executable, rw, domain 0 +#define section_normal_nc(descriptor_l1, region) region.rg_t = SECTION; \ + region.domain = 0x0; \ + region.e_t = ECC_DISABLED; \ + region.g_t = GLOBAL; \ + region.inner_norm_t = NON_CACHEABLE; \ + region.outer_norm_t = NON_CACHEABLE; \ + region.mem_t = NORMAL; \ + region.sec_t = SECURE; \ + region.xn_t = EXECUTE; \ + region.priv_t = RW; \ + region.user_t = RW; \ + region.sh_t = NON_SHARED; \ + MMU_GetSectionDescriptor(&descriptor_l1, region); + //Sect_Normal_Cod. Outer & inner wb/wa, non-shareable, executable, ro, domain 0 #define section_normal_cod(descriptor_l1, region) region.rg_t = SECTION; \ region.domain = 0x0; \
test BUGFIX custom timeout for larger data
@@ -502,7 +502,7 @@ test_edit_item_create(struct test_state *state, struct timespec *ts_start, struc return r; } - if ((r = sr_apply_changes(state->sess, 0))) { + if ((r = sr_apply_changes(state->sess, state->count * 100))) { return r; } @@ -513,7 +513,7 @@ test_edit_item_create(struct test_state *state, struct timespec *ts_start, struc if ((r = sr_delete_item(state->sess, "/perf:cont/lst", 0))) { return r; } - if ((r = sr_apply_changes(state->sess, 0))) { + if ((r = sr_apply_changes(state->sess, state->count * 100))) { return r; } @@ -537,7 +537,7 @@ test_edit_batch_create(struct test_state *state, struct timespec *ts_start, stru } } - if ((r = sr_apply_changes(state->sess, 0))) { + if ((r = sr_apply_changes(state->sess, state->count * 100))) { return r; } @@ -546,7 +546,7 @@ test_edit_batch_create(struct test_state *state, struct timespec *ts_start, stru if ((r = sr_delete_item(state->sess, "/perf:cont/lst", 0))) { return r; } - if ((r = sr_apply_changes(state->sess, 0))) { + if ((r = sr_apply_changes(state->sess, state->count * 100))) { return r; }
Check if command input has at least 2 arguments
@@ -31,7 +31,7 @@ static struct cmd_handler input_handlers[] = { struct cmd_results *cmd_input(int argc, char **argv) { struct cmd_results *error = NULL; - if ((error = checkarg(argc, "input", EXPECTED_AT_LEAST, 1))) { + if ((error = checkarg(argc, "input", EXPECTED_AT_LEAST, 2))) { return error; }
fix incorrect tree count comparison
@@ -179,7 +179,7 @@ TFullModel DeserializeModel(const TString& serializedModel) { void TObliviousTrees::TruncateTrees(size_t begin, size_t end) { CB_ENSURE(begin <= end, "begin tree index should be not greater than end tree index."); - CB_ENSURE(end <= TreeSplits.size(), "end tree index should be not greater than tree count."); + CB_ENSURE(end <= TreeStartOffsets.size(), "end tree index should be not greater than tree count."); TObliviousTreeBuilder builder(FloatFeatures, CatFeatures, ApproxDimension); const auto& leafOffsets = MetaData->TreeFirstLeafOffsets; for (size_t treeIdx = begin; treeIdx < end; ++treeIdx) {
scrypt: free the MD reference correctly. The code was calling EVP_MD_meth_free which is incorrect. It should call EVP_MD_free. It happened to work but by luck rather than design.
@@ -75,7 +75,7 @@ static void kdf_scrypt_free(void *vctx) KDF_SCRYPT *ctx = (KDF_SCRYPT *)vctx; if (ctx != NULL) { - EVP_MD_meth_free(ctx->sha256); + EVP_MD_free(ctx->sha256); kdf_scrypt_reset(ctx); OPENSSL_free(ctx); }
fixed qsort() issue
@@ -137,7 +137,10 @@ static int sort_maclist_by_time(const void *a, const void *b) { const maclist_t *ia = (const maclist_t *)a; const maclist_t *ib = (const maclist_t *)b; -return (ia->timestamp < ib->timestamp); + +if(ia->timestamp < ib->timestamp) return 1; +else if(ia->timestamp > ib->timestamp) return -1; +return 0; } /*===========================================================================*/ struct tags_s @@ -192,7 +195,10 @@ static int sort_handshakelist_by_time(const void *a, const void *b) { const handshakelist_t *ia = (const handshakelist_t *)a; const handshakelist_t *ib = (const handshakelist_t *)b; -return (ia->timestamp < ib->timestamp); + +if(ia->timestamp < ib->timestamp) return 1; +else if(ia->timestamp > ib->timestamp) return -1; +return 0; } /*===========================================================================*/ struct scanlist_s @@ -212,7 +218,10 @@ static int sort_scanlist_by_count(const void *a, const void *b) { const scanlist_t *ia = (const scanlist_t *)a; const scanlist_t *ib = (const scanlist_t *)b; -return (ia->count < ib->count); + +if(ia->count < ib->count) return 1; +else if(ia->count > ib->count) return -1; +return 0; } /*===========================================================================*/ struct filterlist_s @@ -226,10 +235,9 @@ static int sort_filterlist_by_mac(const void *a, const void *b) { const filterlist_t *ia = (const filterlist_t *)a; const filterlist_t *ib = (const filterlist_t *)b; -if(memcmp(ia->mac, ib->mac, 6) > 0) - return 1; -else if(memcmp(ia->mac, ib->mac, 6) < 0) - return -1; + +if(memcmp(ia->mac, ib->mac, 6) > 0) return 1; +else if(memcmp(ia->mac, ib->mac, 6) < 0) return -1; return 0; } /*===========================================================================*/
sandbox: Turn off wasm since it's making the sandbox misbehave.
@@ -6,7 +6,7 @@ SET(CMAKE_RANLIB emranlib) SET(MY_EXPORTS "'_get_parser', '_run_parser', '_get_parser_error'") SET(MY_EXPORTS "${MY_EXPORTS}, '_destroy_parser'") -SET(CMAKE_C_FLAGS "-O1 -Wno-warn-absolute-paths --memory-init-file 0 -s EXPORTED_FUNCTIONS=\"[${MY_EXPORTS}]\" -s EXTRA_EXPORTED_RUNTIME_METHODS='[\"cwrap\"]'") +SET(CMAKE_C_FLAGS "-O1 -Wno-warn-absolute-paths --memory-init-file 0 -s WASM=0 -s EXPORTED_FUNCTIONS=\"[${MY_EXPORTS}]\" -s EXTRA_EXPORTED_RUNTIME_METHODS='[\"cwrap\"]'") SET(CMAKE_CXX_FLAGS -g) SET(CMAKE_TOOLCHAIN_FILE "$EMSCRIPTEN/cmake/Platform/Emscripten.cmake") SET(CMAKE_EXECUTABLE_SUFFIX ".js")
Test recursive parameter entity
@@ -4065,6 +4065,12 @@ START_TEST(test_external_entity_values) NULL, XML_ERROR_PARTIAL_CHAR }, + { + "%e1;", + "Recursive parameter entity not faulted", + NULL, + XML_ERROR_RECURSIVE_ENTITY_REF + }, { NULL, NULL, NULL, XML_ERROR_NONE } }; int i;
Fix for new font API
@@ -76,7 +76,7 @@ void render(uint32_t time_ms) { // draw FPS meter & watermark screen.watermark(); screen.pen = Pen(255, 255, 255); - screen.text(std::to_string(ms_end - ms_start) + "ms/frame", &minimal_font[0][0], blit::Point(2, 240 - 10)); + screen.text(std::to_string(ms_end - ms_start) + "ms/frame", minimal_font, blit::Point(2, 240 - 10)); screen.pen = Pen(255, 0, 0); for (int i = 0; i < uint16_t(ms_end - ms_start); i++) { screen.pen = Pen(i * 5, 255 - (i * 5), 0);
docs:update cn trans for jtag-debugging
@@ -183,7 +183,6 @@ In case these issues occur, please remove the component. The figure below shows .. figure:: ../../../_static/esp32-devkitc-c15-location.png :align: center :alt: Location of C15 (colored yellow) on ESP32-DevKitC V4 board - :figclass: align-center :width: 30% Location of C15 (yellow) on ESP32-DevKitC V4 board
add note about when unicorn support was enabled in micropython
@@ -19,8 +19,8 @@ The table below shows the current state of compatibly and some notes to set expe |---|---|---|--- |[Pico Explorer Base](https://shop.pimoroni.com/products/pico-explorer-base)|Yes|Yes| |[Pico RGB Keypad](https://shop.pimoroni.com/products/pico-rgb-keypad-base)|Yes|Yes| -|[Pico Unicorn Pack](https://shop.pimoroni.com/products/pico-unicorn-pack)|Yes|Yes| -|[Pico Audio Pack](https://shop.pimoroni.com/products/pico-audio-pack)|Yes|No|Limited Support for MicroPython planned +|[Pico Unicorn Pack](https://shop.pimoroni.com/products/pico-unicorn-pack)|Yes|Yes|MicroPython support added in v0.0.3 Alpha +|[Pico Audio Pack](https://shop.pimoroni.com/products/pico-audio-pack)|Yes|No|Limited support for MicroPython planned |[Pico Scroll Pack](https://shop.pimoroni.com/products/pico-scroll-pack)|Yes|Yes| |[Pico Display Pack](https://shop.pimoroni.com/products/pico-display-pack)|Yes|Yes|
fix crash if zero blocks
@@ -632,8 +632,10 @@ namespace NKernel { numBlocks.x = (leafSize + blockSize * N - 1) / (blockSize * N); numBlocks.y = 1; numBlocks.z = 1; - - SplitAndMakeSequenceInSingleLeafImpl<N, blockSize><<<numBlocks, blockSize, 0, stream>>>(compressedIndex, loadIndices, parts, leafId, splitFeature, splitBin, splitFlags, indices); + if (numBlocks.x) { + SplitAndMakeSequenceInSingleLeafImpl<N, blockSize> << < numBlocks, blockSize, 0, stream >> + > (compressedIndex, loadIndices, parts, leafId, splitFeature, splitBin, splitFlags, indices); + } }
Clarify legal requirements for contributions
@@ -19,8 +19,6 @@ Making a Contribution 1. Write a test which shows that the bug was fixed or that the feature works as expected. 1. Send a pull request (PR) and work with us until it gets merged and published. Contributions may need some modifications, so a few rounds of review and fixing may be necessary. We will include your name in the ChangeLog :) 1. For quick merging, the contribution should be short, and concentrated on a single feature or topic. The larger the contribution is, the longer it would take to review it and merge it. -1. All new files should include the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) standard license header where possible. -1. Ensure that each commit has at least one `Signed-off-by:` line from the committer. If anyone else contributes to the commit, they should also add their own `Signed-off-by:` line. By adding this line, contributor(s) certify that the contribution is made under the terms of the [Developer Certificate of Origin](dco.txt). The contribution licensing is described in the [License section of the README](README.md#License). Backwards Compatibility ----------------------- @@ -79,3 +77,12 @@ Mbed TLS is well documented, but if you think documentation is needed, speak out 1. If needed, a Readme file is advised. 1. If a [Knowledge Base (KB)](https://tls.mbed.org/kb) article should be added, write this as a comment in the PR description. 1. A [ChangeLog](https://github.com/Mbed-TLS/mbedtls/blob/development/ChangeLog.d/00README.md) entry should be added for this contribution. + +License and Copyright +--------------------- + +All new files should include the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) standard license header where possible. For licensing details, please see the [License section of the README](README.md#License). + +The copyright on contributions is retained by the original authors of the code. Where possible for new files, this should be noted in a comment at the top of the file in the form: "Copyright The Mbed TLS Contributors". + +When contributing code to us, the committer and all authors are required to make the submission under the terms of the [Developer Certificate of Origin](dco.txt), confirming that the code submitted can (legally) become part of the project, and be subject to the same Apache 2.0 license. This is done by including the standard Git `Signed-off-by:` line in every commit message. If more than one person contributed to the commit, they should also add their own `Signed-off-by:` line.
Bump bbr2 to draft-02
@@ -673,7 +673,7 @@ static void bbr_start_probe_bw_down(ngtcp2_bbr2_cc *bbr, ngtcp2_tstamp ts) { bbr_start_round(bbr); bbr->state = NGTCP2_BBR2_STATE_PROBE_BW_DOWN; - bbr->pacing_gain = 0.75; + bbr->pacing_gain = 0.9; bbr->cwnd_gain = 2; } @@ -771,6 +771,8 @@ static void bbr_update_probe_bw_cycle_phase(ngtcp2_bbr2_cc *bbr, static int bbr_check_time_to_cruise(ngtcp2_bbr2_cc *bbr, ngtcp2_conn_stat *cstat, ngtcp2_tstamp ts) { + (void)ts; + if (cstat->bytes_in_flight > bbr_inflight_with_headroom(bbr, cstat)) { return 0; } @@ -779,7 +781,7 @@ static int bbr_check_time_to_cruise(ngtcp2_bbr2_cc *bbr, return 1; } - return bbr_has_elapsed_in_phase(bbr, bbr->min_rtt, ts); + return 0; } static int bbr_has_elapsed_in_phase(ngtcp2_bbr2_cc *bbr,
Rename sctp_dtrace_declare.h to sctp_kdtrace.h.
#ifdef __FreeBSD__ #include <sys/cdefs.h> -__FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 310590 2016-12-26 11:06:41Z tuexen $"); +__FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 353488 2019-10-14 13:02:49Z tuexen $"); #endif #include <netinet/sctp_os.h> @@ -50,8 +50,8 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 310590 2016-12-26 11:06 #include <netinet/sctp_timer.h> #include <netinet/sctp_auth.h> #include <netinet/sctp_asconf.h> -#if defined(__FreeBSD__) && __FreeBSD_version >= 803000 -#include <netinet/sctp_dtrace_declare.h> +#if defined(__FreeBSD__) +#include <netinet/sctp_kdtrace.h> #endif #define SHIFT_MPTCP_MULTI_N 40 @@ -2410,7 +2410,7 @@ sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, const struct sctp_cc_functions sctp_cc_functions[] = { { -#if (defined(__Windows__) || defined(__Userspace_os_Windows)) && !defined(__MINGW32__) +#if defined(__Windows__) || defined(__Userspace_os_Windows) sctp_set_initial_cc_param, sctp_cwnd_update_after_sack, sctp_cwnd_update_exit_pf_common, @@ -2431,7 +2431,7 @@ const struct sctp_cc_functions sctp_cc_functions[] = { #endif }, { -#if (defined(__Windows__) || defined(__Userspace_os_Windows)) && !defined(__MINGW32__) +#if defined(__Windows__) || defined(__Userspace_os_Windows) sctp_set_initial_cc_param, sctp_hs_cwnd_update_after_sack, sctp_cwnd_update_exit_pf_common, @@ -2452,7 +2452,7 @@ const struct sctp_cc_functions sctp_cc_functions[] = { #endif }, { -#if (defined(__Windows__) || defined(__Userspace_os_Windows)) && !defined(__MINGW32__) +#if defined(__Windows__) || defined(__Userspace_os_Windows) sctp_htcp_set_initial_cc_param, sctp_htcp_cwnd_update_after_sack, sctp_cwnd_update_exit_pf_common,
composite: do not require CDCACM driver to be set, composite interface is generic
# error "USB composite device support is not enabled (CONFIG_USBDEV_COMPOSITE)" #endif -#ifndef CONFIG_CDCACM -# error "USB CDC/ACM serial device support is not enabled (CONFIG_CDCACM)" -#endif - -#ifndef CONFIG_CDCACM_COMPOSITE -# error "USB CDC/ACM serial composite device support is not enabled (CONFIG_CDCACM_COMPOSITE)" -#endif - /* Trace initialization *****************************************************/ #ifndef CONFIG_USBDEV_TRACE_INITIALIDSET
tools/offcputime Filter out negative offcpu duration
@@ -146,8 +146,13 @@ int oncpu(struct pt_regs *ctx, struct task_struct *prev) { } // calculate current thread's delta time - u64 delta = bpf_ktime_get_ns() - *tsp; + u64 t_start = *tsp; + u64 t_end = bpf_ktime_get_ns(); start.delete(&pid); + if (t_start > t_end) { + return 0; + } + u64 delta = t_end - t_start; delta = delta / 1000; if ((delta < MINBLOCK_US) || (delta > MAXBLOCK_US)) { return 0;
ensure '-dv' isn't added multiple times to command line. Change placemnt of ';' when creating engineArgs string, fixes parsing problem.
@@ -295,6 +295,11 @@ static bool EndsWith(const char *s, const char *suffix) * visit's components. Use double-quotes when re-surrounding instead of * single. * + * Kathleen Biagas, Mon Jul 17 15:11:09 MST 2017 + * Ensure '-dv' isn't added multiple times to the command line. + * Change placement of ';' when creating the engineArgs string for + * the command line. + * *****************************************************************************/ int @@ -503,6 +508,10 @@ VisItLauncherMain(int argc, char *argv[]) debugLaunch = true; componentArgs.push_back("-debuglaunch"); } + else if(ARG("-dv")) + { + continue; + } else if(ARG("-apitrace")) { apitrace_component = string(argv[i+1]); @@ -703,8 +712,9 @@ VisItLauncherMain(int argc, char *argv[]) string eArgs; for (size_t i = 0; i < engineArgs.size(); ++i) { - eArgs.append(";"); eArgs.append(engineArgs[i]); + if (i < engineArgs.size()-1) + eArgs.append(";"); } command.push_back(eArgs); }
http_server: metrics: prometheus: add fluentbit_build_info metric this PR adds metric fluentbit_build_info to prometheus metrics added metric output: #HELP fluentbit_build_info Build version information. #TYPE fluentbit_build_info gauge fluentbit_build_info{version="1.5.0",edition="Community"} 1
#include <fluent-bit/flb_filter.h> #include <fluent-bit/flb_output.h> #include <fluent-bit/flb_sds.h> +#include <fluent-bit/flb_version.h> #include "metrics.h" #include <fluent-bit/flb_http_server.h> @@ -393,6 +394,25 @@ void cb_metrics_prometheus(mk_request_t *request, void *data) tmp_sds = flb_sds_cat(sds, "\n", 1); null_check(tmp_sds); + /* Attach fluentbit_build_info metric. */ + tmp_sds = flb_sds_cat(sds, "# HELP fluentbit_build_info Build version information.\n", 55); + null_check(tmp_sds); + tmp_sds = flb_sds_cat(sds, "# TYPE fluentbit_build_info gauge\n", 34); + null_check(tmp_sds); + tmp_sds = flb_sds_cat(sds, "fluentbit_build_info{version=\"", 30); + null_check(tmp_sds); + tmp_sds = flb_sds_cat(sds, FLB_VERSION_STR, sizeof(FLB_VERSION_STR) - 1); + null_check(tmp_sds); + tmp_sds = flb_sds_cat(sds, "\",edition=\"", 11); + null_check(tmp_sds); +#ifdef FLB_ENTERPRISE + tmp_sds = flb_sds_cat(sds, "Enterprise\"} 1\n", 15); + null_check(tmp_sds); +#else + tmp_sds = flb_sds_cat(sds, "Community\"} 1\n", 14); + null_check(tmp_sds); +#endif + msgpack_unpacked_destroy(&result); buf->users--;
remove unnecessary construct
@@ -108,21 +108,15 @@ private: TIntrusivePtr<Inner> Impl_; public: - template <EMemoryType Type> - explicit TCudaVec(ui64 size = 0) - : Impl_(new Inner(size, Type)) - { - } + TCudaVec(ui64 size, EMemoryType type) : Impl_(new Inner(size, type)) { } - template <EMemoryType Type> - explicit TCudaVec(TConstArrayRef<T> data) - : TCudaVec(data.size(), Type) - { + explicit TCudaVec(TConstArrayRef<T> data, EMemoryType type) + : TCudaVec(data.size(), type) { Write(data); }
VERSION bump to version 2.2.29
@@ -65,7 +65,7 @@ endif() # micro version is changed with a set of small changes or bugfixes anywhere in the project. set(SYSREPO_MAJOR_VERSION 2) set(SYSREPO_MINOR_VERSION 2) -set(SYSREPO_MICRO_VERSION 28) +set(SYSREPO_MICRO_VERSION 29) set(SYSREPO_VERSION ${SYSREPO_MAJOR_VERSION}.${SYSREPO_MINOR_VERSION}.${SYSREPO_MICRO_VERSION}) # Version of the library
Avoid warning in asm.c on windows.
@@ -252,8 +252,8 @@ static int32_t doarg_1( case JANET_NUMBER: { double y = janet_unwrap_number(x); - if (y >= INT32_MIN && y <= INT32_MAX) { - ret = y; + if (janet_checkintrange(y)) { + ret = (int32_t) y; } else { goto error; }
Use zip rather than enumerate After improving coding style, pylint suggeted using enumerate but zip is more appropriate to avoid indexing
@@ -183,13 +183,13 @@ def test_all_common(): "PSK-AES256-CBC-SHA", ] - for i, m_cipher in enumerate(m_ciphers): + for m, g_exp, o_exp in zip(m_ciphers, g_ciphers, o_ciphers): - g = translate_gnutls(m_cipher) - assert_equal(g, g_ciphers[i]) + g = translate_gnutls(m) + assert_equal(g, g_exp) - o = translate_ossl(m_cipher) - assert_equal(o, o_ciphers[i]) + o = translate_ossl(m) + assert_equal(o, o_exp) def test_mbedtls_ossl_common(): """ @@ -266,10 +266,10 @@ def test_mbedtls_ossl_common(): "DHE-PSK-CHACHA20-POLY1305", ] - for i, m_cipher in enumerate(m_ciphers): + for m, o_exp in zip(m_ciphers, o_ciphers): - o = translate_ossl(m_cipher) - assert_equal(o, o_ciphers[i]) + o = translate_ossl(m) + assert_equal(o, o_exp) def test_mbedtls_gnutls_common(): """ @@ -456,10 +456,10 @@ def test_mbedtls_gnutls_common(): "+RSA-PSK:+AES-128-GCM:+AEAD", ] - for i, m_ciphers in enumerate(m_ciphers): + for m, g_exp in zip(m_ciphers, g_ciphers): - g = translate_gnutls(m_ciphers) - assert_equal(g, g_ciphers[i]) + g = translate_gnutls(m) + assert_equal(g, g_exp) test_all_common() test_mbedtls_ossl_common()
Some comments refinement
@@ -717,7 +717,7 @@ static themis_status_t secure_comparator_bob_step2(secure_comparator_t *comp_ctx } /* Finally Bob sends to Alice on 2 step: - * G2b || G2b signature || G3b || G3b signature || Pb || Qb || Qb signature + * G2b || G2b signature || G3b || G3b signature || Pb || Qb || Pb + Qb signature * Bob proceeds 2 step, Alice responds on 3 step and if it's succeeded, * protocol continues with Bob's 4 step. */ @@ -736,7 +736,7 @@ static themis_status_t secure_comparator_alice_step3(secure_comparator_t *comp_c ge_p3 R; /* Input validation from Bob's 2 step (amount of group elements in brackets): - * G2b (1) || G2b signature (2) || G3b (1) || G3b signature (2) || Pb (1) || Qb (1) || Qb signature (3) + * G2b (1) || G2b signature (2) || G3b (1) || G3b signature (2) || Pb (1) || Qb (1) || Pb + Qb signature (3) * */ if (input_length < (11 * ED25519_GE_LENGTH)) { @@ -844,7 +844,7 @@ static themis_status_t secure_comparator_alice_step3(secure_comparator_t *comp_c } /* Finally Alice sends to Bob on 3 step: - * Pa || Qa || Qa signature || Ra || Ra signature + * Pa || Qa || Pa + Qa signature || Ra || Ra signature * Alice proceeds 3 step, Bob responds on 4 step and if it's succeeded, * protocol continues with Alice's 5 step. */
Add some libssl wrappers
@@ -935,7 +935,7 @@ GO(CRYPTO_strdup, pFppi) //GO(d2i_ASN1_VISIBLESTRING, //GO(d2i_AUTHORITY_INFO_ACCESS, //GO(d2i_AUTHORITY_KEYID, -//GO(d2i_AutoPrivateKey, +GO(d2i_AutoPrivateKey, pFppl) //GO(d2i_BASIC_CONSTRAINTS, //GO(d2i_CERTIFICATEPOLICIES, //GO(d2i_CMS_bio, @@ -1756,7 +1756,7 @@ GO(ERR_remove_thread_state, vFp) //GO(EVP_aes_256_cfb8, //GO(EVP_aes_256_ctr, //GO(EVP_aes_256_ecb, -//GO(EVP_aes_256_gcm, +GO(EVP_aes_256_gcm, pFv) //GO(EVP_aes_256_ofb, //GO(EVP_aes_256_wrap, //GO(EVP_aes_256_xts, @@ -1804,7 +1804,7 @@ GO(EVP_CIPHER_CTX_free, vFp) GO(EVP_CIPHER_CTX_init, vFp) //GO(EVP_CIPHER_CTX_iv_length, //GO(EVP_CIPHER_CTX_key_length, -//GO(EVP_CIPHER_CTX_new, +GO(EVP_CIPHER_CTX_new, pFv) //GO(EVP_CIPHER_CTX_nid, //GO(EVP_CIPHER_CTX_rand_key, //GO(EVP_CIPHER_CTX_set_app_data, @@ -1973,9 +1973,9 @@ GO(EVP_PKEY_CTX_set_app_data, vFpp) //GO(EVP_PKEY_decrypt_init, //GO(EVP_PKEY_decrypt_old, //GO(EVP_PKEY_delete_attr, -//GO(EVP_PKEY_derive, -//GO(EVP_PKEY_derive_init, -//GO(EVP_PKEY_derive_set_peer, +GO(EVP_PKEY_derive, iFppp) +GO(EVP_PKEY_derive_init, iFp) +GO(EVP_PKEY_derive_set_peer, iFppi) //GO(EVP_PKEY_encrypt, //GO(EVP_PKEY_encrypt_init, //GO(EVP_PKEY_encrypt_old, @@ -2030,13 +2030,13 @@ GO(EVP_PKEY_set1_RSA, iFpp) //GO(evp_pkey_set_cb_translate, //GO(EVP_PKEY_set_type, //GO(EVP_PKEY_set_type_str, -//GO(EVP_PKEY_sign, -//GO(EVP_PKEY_sign_init, +GO(EVP_PKEY_sign, iFppppp) +GO(EVP_PKEY_sign_init, iFp) //GO(EVP_PKEY_size, GO(EVP_PKEY_type, iFi) GO(EVP_PKEY_up_ref, iFp) -//GO(EVP_PKEY_verify, -//GO(EVP_PKEY_verify_init, +GO(EVP_PKEY_verify, iFppLpL) +GO(EVP_PKEY_verify_init, iFp) //GO(EVP_PKEY_verify_recover, //GO(EVP_PKEY_verify_recover_init, //GO(EVP_rc2_40_cbc,
Guard use of -march=nehalem to gcc >= 4.9 Fixes
@@ -260,7 +260,24 @@ build_target() { esac case $arch in - x86_64) add cc_flags -march=nehalem;; + x86_64) + # 'nehalem' tuning actually produces faster code for orca than later + # archs, for both gcc and clang, even if it's running on a later arch + # CPU. This is likely due to smaller emitted code size. gcc earlier than + # 4.9 does not recognize the arch flag for it it, though, and I haven't + # tested a compiler that old, so I don't know what optimization behavior + # we get with it is. Just leave it at default, in that case. + case $cc_id in + gcc) + if cc_vers_is_gte 4.9; then + add cc_flags -march=nehalem + fi + ;; + clang) + add cc_flags -march=nehalem + ;; + esac + ;; esac add source_files gbuffer.c field.c mark.c bank.c sim.c
enable caddress ntime sharing with proto 33500
@@ -43,14 +43,14 @@ extern int MIN_MN_PROTO_VERSION; // nTime field added to CAddress, starting with this version; // if possible, avoid requesting addresses nodes older than this -static const int CADDR_TIME_VERSION = 31005; +static const int CADDR_TIME_VERSION = 33500; // start sharing node timeinfo with this proto version 33500 // only request blocks from nodes outside this range of versions static const int NOBLKS_VERSION_START = 70002; static const int NOBLKS_VERSION_END = 70006; // BIP 0031, pong message, is enabled for all versions AFTER this one -static const int BIP0031_VERSION = 21212; +static const int BIP0031_VERSION = 21212; // changed to 21212 from 60000 - start sending nonces to all clients. // "mempool" command, enhanced "getdata" behavior starts with this version: static const int MEMPOOL_GD_VERSION = 60002;
README: update mailing list Update the top-level README.md file to refer to the new mailing list.
@@ -58,7 +58,7 @@ For more information in the source, here are some pointers: Developers welcome! * Our developer mailing list: - http://lists.runtime.co/mailman/listinfo/dev-mcuboot_lists.runtime.co + https://groups.io/g/mcuboot * Our Slack channel: https://mcuboot.slack.com/ <br /> Get your invite [here!](https://join.slack.com/t/mcuboot/shared_invite/MjE2NDcwMTQ2MTYyLTE1MDA4MTIzNTAtYzgyZTU0NjFkMg) * Our IRC channel: http://irc.freenode.net, #mcuboot
BugID:19162920: remove the use of malloc.
@@ -61,7 +61,7 @@ int bt_mesh_conn_disconnect(bt_mesh_conn_t conn, uint8_t reason) struct svc_paire_node { struct bt_mesh_gatt_service *msvc; - struct bt_gatt_service *svc; + struct bt_gatt_service svc; } _svc_paire[SVC_ENTRY_MAX] = {{0}}; /* TODO: manage the services in linked list. */ @@ -85,15 +85,10 @@ int bt_mesh_gatt_service_register(struct bt_mesh_gatt_service *svc) } node->msvc = svc; - node->svc = (struct bt_gatt_service *)malloc(sizeof(struct bt_gatt_service)); - if (node->svc == NULL) { - return -1; - } - - node->svc->attrs = (struct bt_gatt_attr *)svc->attrs; - node->svc->attr_count = svc->attr_count; + node->svc.attrs = (struct bt_gatt_attr *)svc->attrs; + node->svc.attr_count = svc->attr_count; - return bt_gatt_service_register(node->svc); + return bt_gatt_service_register(&(node->svc)); } int bt_mesh_gatt_service_unregister(struct bt_mesh_gatt_service *svc) @@ -114,8 +109,7 @@ int bt_mesh_gatt_service_unregister(struct bt_mesh_gatt_service *svc) return 0; } - ret = bt_gatt_service_unregister(node->svc); - free(node->svc); + ret = bt_gatt_service_unregister(&(node->svc)); return ret; }
updated for RHEL Added in RHEL 7.6 Beta information
- [Arch](#arch---aur) - [Gentoo](#gentoo---portage) - [openSUSE](#opensuse---binary) + - [RHEL](#redhat---binary) * [Source](#source) - [Debian](#debian---source) - [Ubuntu](#ubuntu---source) @@ -145,6 +146,13 @@ sudo zypper ref sudo zypper in bcc-tools bcc-examples ``` +## RHEL - Binary + +For Redhat 7.6 (Beta) bcc is already included in the official yum repository as bcc-tools. As part of the install the following dependencies are installed: bcc.x86_64 0:0.6.0-3.el7 ,llvm-private.x86_64 0:6.0.1-2.el7 ,python-bcc.x86_64 0:0.6.0-3.el7,python-netaddr.noarch 0:0.7.5-9.el7 + +``` +yum install bcc-tools +``` # Source
OcFileLib: Ensure that non-null terminated label is handled properly
@@ -39,6 +39,8 @@ GetVolumeLabel ( EFI_FILE_HANDLE Volume; EFI_FILE_SYSTEM_VOLUME_LABEL *VolumeInfo; + UINTN VolumeLabelSize; + CHAR16 *VolumeLabel; ASSERT (FileSystem != NULL); @@ -56,7 +58,7 @@ GetVolumeLabel ( Volume, &gEfiFileSystemVolumeLabelInfoIdGuid, sizeof (EFI_FILE_SYSTEM_VOLUME_LABEL), - NULL + &VolumeLabelSize ); Volume->Close (Volume); @@ -67,8 +69,18 @@ GetVolumeLabel ( ); if (VolumeInfo != NULL) { - if (VolumeInfo->VolumeLabel[0] != L'\0') { - return VolumeInfo->VolumeLabel; + if (VolumeInfo->VolumeLabel[0] != L'\0' + && VolumeLabelSize <= MAX_UINTN - sizeof (VolumeLabel[0])) { + // + // Some old HFS Plus drivers may not provide terminating \0 on volume label. + // + VolumeLabel = AllocatePool (VolumeLabelSize + sizeof (VolumeLabel[0])); + if (VolumeLabel != NULL) { + CopyMem (VolumeLabel, VolumeInfo->VolumeLabel, VolumeLabelSize); + VolumeLabel[VolumeLabelSize / sizeof (VolumeLabel[0])] = '\0'; + FreePool (VolumeInfo); + return VolumeLabel; + } } FreePool (VolumeInfo); }
Tests: address configuration tests reworked.
@@ -8,6 +8,15 @@ from unit.control import TestControl class TestConfiguration(TestControl): prerequisites = {'modules': {'python': 'any'}} + def try_addr(self, addr): + return self.conf( + { + "listeners": {addr: {"pass": "routes"}}, + "routes": [{"action": {"return": 200}}], + "applications": {}, + } + ) + def test_json_empty(self): assert 'error' in self.conf(''), 'empty' @@ -218,50 +227,20 @@ class TestConfiguration(TestControl): {"*:7080": {"pass": "applications/app"}}, 'listeners' ), 'listeners no app' - def test_listeners_wildcard(self): - assert 'success' in self.conf( - { - "listeners": {"*:7080": {"pass": "applications/app"}}, - "applications": { - "app": { - "type": "python", - "processes": {"spare": 0}, - "path": "/app", - "module": "wsgi", - } - }, - } - ), 'listeners wildcard' + def test_listeners_addr(self): + assert 'success' in self.try_addr("*:7080"), 'wildcard' + assert 'success' in self.try_addr("127.0.0.1:7081"), 'explicit' + assert 'success' in self.try_addr("[::1]:7082"), 'explicit ipv6' - def test_listeners_explicit(self): - assert 'success' in self.conf( - { - "listeners": {"127.0.0.1:7080": {"pass": "applications/app"}}, - "applications": { - "app": { - "type": "python", - "processes": {"spare": 0}, - "path": "/app", - "module": "wsgi", - } - }, - } - ), 'explicit' + def test_listeners_addr_error(self): + assert 'error' in self.try_addr("127.0.0.1"), 'no port' - def test_listeners_explicit_ipv6(self): - assert 'success' in self.conf( - { - "listeners": {"[::1]:7080": {"pass": "applications/app"}}, - "applications": { - "app": { - "type": "python", - "processes": {"spare": 0}, - "path": "/app", - "module": "wsgi", - } - }, - } - ), 'explicit ipv6' + def test_listeners_addr_error_2(self, skip_alert): + skip_alert(r'bind.*failed', r'failed to apply new conf') + + assert 'error' in self.try_addr( + "[f607:7403:1e4b:6c66:33b2:843f:2517:da27]:7080" + ) def test_listeners_port_release(self): for i in range(10): @@ -290,22 +269,6 @@ class TestConfiguration(TestControl): assert 'success' in resp, 'port release' - @pytest.mark.skip('not yet, unsafe') - def test_listeners_no_port(self): - assert 'error' in self.conf( - { - "listeners": {"127.0.0.1": {"pass": "applications/app"}}, - "applications": { - "app": { - "type": "python", - "processes": {"spare": 0}, - "path": "/app", - "module": "wsgi", - } - }, - } - ), 'no port' - def test_json_application_name_large(self): name = "X" * 1024 * 1024
Add protection for rfcomm close API in SPP
@@ -210,16 +210,18 @@ void port_release_port (tPORT *p_port) osi_mutex_global_lock(); RFCOMM_TRACE_DEBUG("port_release_port, p_port:%p", p_port); + if (p_port->rx.queue) { while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->rx.queue, 0)) != NULL) { osi_free (p_buf); } - + } p_port->rx.queue_size = 0; + if (p_port->tx.queue) { while ((p_buf = (BT_HDR *)fixed_queue_dequeue(p_port->tx.queue, 0)) != NULL) { osi_free (p_buf); } - + } p_port->tx.queue_size = 0; osi_mutex_global_unlock();
libnetif_raw: adding k1om to build architectures
build library { target = "net_if_raw", cFiles = [ "interface_raw.c" ], addLibraries = libDeps [ "devif", "devif_backend_idc"], - architectures = ["armv7", "armv8"]} + architectures = ["armv7", "armv8", "k1om"]} ]
khan: docs, lift arg vase into (unit)
?. =(%nonexistent s.args) ~& >>> bad-name+s.args (expect !>(|)) - (expect-eq !>(~) t.args) + (expect-eq !>(`~) t.args) :: ++ test-khan-take-dud :: !! :: ++ test-khan-take-watch-fail
Design: Use list syntax for descriptions
@@ -267,9 +267,9 @@ with `KEY_TYPE`. Data structures start with a capital letter for every part of the word: - KDB ... Key Data Base Handle - KeySet ... Key Set - Key ... Key +- `KDB` ... Key Data Base Handle +- `KeySet` ... Key Set +- `Key` ... Key We use singular for all names.
Enable invalid param test for ecp
@@ -61,7 +61,7 @@ inline static int mbedtls_ecp_group_cmp( mbedtls_ecp_group *grp1, * END_DEPENDENCIES */ -/* BEGIN_CASE depends_on:NOT_DEFINED */ +/* BEGIN_CASE */ void ecp_invalid_param( ) { mbedtls_ecp_group grp;