message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
more cryptocurrency apis
* crypto apis
* tick fixes
* ICObench data api
Zloader API
Voltaire API
Coin Ranking API
* remove spacing in coin rank
* remove voltaire | @@ -185,15 +185,18 @@ API | Description | Auth | HTTPS | CORS |
| [CoinLayer](https://coinlayer.com) | Real-time Crypto Currency Exchange Rates | `apiKey` | Yes | Unknown |
| [CoinMarketCap](https://coinmarketcap.com/api/) | Cryptocurrencies Prices | No | Yes | Unknown |
| [Coinpaprika](https://api.coinpaprika.com) | Cryptocurrencies prices, volume and more | No | Yes | Yes |
+| [CoinRanking](https://docs.coinranking.com/) | Live Cryptocurrency data | No | Yes | Unknown |
| [CryptoCompare](https://www.cryptocompare.com/api#) | Cryptocurrencies Comparison | No | Yes | Unknown |
| [Cryptonator](https://www.cryptonator.com/api/) | Cryptocurrencies Exchange Rates | No | Yes | Unknown |
| [Gemini](https://docs.gemini.com/rest-api/) | Cryptocurrencies Exchange | No | Yes | Unknown |
+| [ICObench](https://icobench.com/developers) | Various information on listing, ratings, stats, and more | `apiKey` | Yes | Unknown |
| [Livecoin](https://www.livecoin.net/api) | Cryptocurrency Exchange | No | Yes | Unknown |
| [MercadoBitcoin](https://www.mercadobitcoin.net/api-doc/) | Brazilian Cryptocurrency Information | No | Yes | Unknown |
| [Nexchange](https://nexchange2.docs.apiary.io/) | Automated cryptocurrency exchange service | No | No | Yes |
| [NiceHash](https://www.nicehash.com/doc-api) | Largest Crypto Mining Marketplace | `apiKey` | Yes | Unknown |
| [Poloniex](https://poloniex.com/support/api/) | US based digital asset exchange | `apiKey` | Yes | Unknown |
| [WorldCoinIndex](https://www.worldcoinindex.com/apiservice) | Cryptocurrencies Prices | `apiKey` | Yes | Unknown |
+| [Zloader](https://www.zloadr.com/cryptocurrency-developers.php) | Due diligence data platform | `apiKey` | Yes | Unknown |
### Currency Exchange
API | Description | Auth | HTTPS | CORS |
|
tests: don't fail when `convert` is missing
...instead, just skip as if the version is too old.
Closes | @@ -19,6 +19,7 @@ from libqtile import images
def get_imagemagick_version():
"Get the installed imagemagick version from the convert utility"
+ try:
p = sp.Popen(['convert', '-version'], stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = p.communicate()
lines = stdout.decode().splitlines()
@@ -28,6 +29,9 @@ def get_imagemagick_version():
version = version.replace('-', '.')
vals = version.split('.')
return [int(x) for x in vals]
+ except FileNotFoundError:
+ # we don't have the `convert` binary
+ return (0, 0)
def should_skip():
|
llvm/execution: Drop redundant frozenset contructors
Rename additional_tags -> tags. | @@ -235,7 +235,7 @@ class CompExecution(CUDAExecution):
self.__bin_run_multi_func = None
self.__debug_env = debug_env
self.__frozen_vals = None
- self.__additional_tags = frozenset(additional_tags)
+ self.__tags = frozenset(additional_tags)
# TODO: Consolidate these
if len(execution_ids) > 1:
@@ -269,8 +269,8 @@ class CompExecution(CUDAExecution):
def _set_bin_node(self, node):
assert node in self._composition._all_nodes
wrapper = builder_context.LLVMBuilderContext.get_global().get_node_wrapper(self._composition, node)
- tags = frozenset(self.__additional_tags.union({"node_wrapper"}))
- self.__bin_func = pnlvm.LLVMBinaryFunction.from_obj(wrapper, tags=tags)
+ self.__bin_func = pnlvm.LLVMBinaryFunction.from_obj(
+ wrapper, tags=self.__tags.union({"node_wrapper"}))
@property
def _conditions(self):
@@ -472,9 +472,8 @@ class CompExecution(CUDAExecution):
@property
def _bin_exec_func(self):
if self.__bin_exec_func is None:
- tags=frozenset(self.__additional_tags)
self.__bin_exec_func = pnlvm.LLVMBinaryFunction.from_obj(
- self._composition, tags=tags)
+ self._composition, tags=self.__tags)
return self.__bin_exec_func
@@ -529,9 +528,8 @@ class CompExecution(CUDAExecution):
@property
def _bin_run_func(self):
if self.__bin_run_func is None:
- run_tags = frozenset({"run"}.union(self.__additional_tags))
self.__bin_run_func = pnlvm.LLVMBinaryFunction.from_obj(
- self._composition, tags=run_tags)
+ self._composition, tags=self.__tags.union({"run"}))
return self.__bin_run_func
|
revent: Add add_listener()
This is a non-backwards compatible replacement for addListener(). Its
big new trick is that if you give it a function without an event, it
will attempt to infer the event name from the function name. | @@ -387,6 +387,24 @@ class EventMixin (object):
kw['byName'] = True
return self.addListener(*args,**kw)
+ def add_listener (self, handler, event_type=None, event_name=None,
+ once=False, weak=False, priority=None):
+ """
+ Add an event handler for an event triggered by this object (subscribe).
+
+ This is a replacement for addListener() (which is being deprecated).
+ """
+ assert not (event_type and event_name)
+ if (not event_type) and not (event_name):
+ if not handler.func_name.startswith("_handle_"):
+ raise RuntimeError("Could not infer event type")
+ event_name = handler.func_name[8:]
+ by_name = True if event_name else False
+ t = event_name if by_name else event_type
+
+ return self.addListener(t, handler, once=once, weak=weak, byName=by_name,
+ priority=priority)
+
def addListener (self, eventType, handler, once=False, weak=False,
priority=None, byName=False):
"""
|
Update visual.py
Typo | @@ -192,7 +192,7 @@ def get_vert_ngrams(
lower: bool = True,
from_sentence: bool = True,
) -> Iterator[str]:
- """Return all ngrams which are visually vertivally aligned with the Mention.
+ """Return all ngrams which are visually vertically aligned with the Mention.
Note that if a candidate is passed in, all of its Mentions will be searched.
|
Ignore "unsubscriptable-object"
This code has been tested, points is subscriptable. | @@ -2554,7 +2554,7 @@ class PDPlotter:
if self._dim == 2:
error = stable_marker_plot.error_y["array"]
points = np.append(x, [y, error]).reshape(3, -1).T
- points = points[points[:, 0].argsort()] # sort by composition
+ points = points[points[:, 0].argsort()] # sort by composition # pylint: disable=E1136
# these steps trace out the boundary pts of the uncertainty window
outline = points[:, :2].copy()
|
Add missing expected_regex parameter to TestCase.assertRaisesRegexp().
This makes it match the signature of assertRaisesRegex() which is the modern name for the same function. | @@ -197,11 +197,13 @@ class TestCase:
@overload
def assertRaisesRegexp(self, # type: ignore
exception: Union[Type[BaseException], Tuple[Type[BaseException], ...]],
+ expected_regex: Union[str, bytes, Pattern[str], Pattern[bytes]],
callable: Callable[..., Any] = ...,
*args: Any, **kwargs: Any) -> None: ...
@overload
def assertRaisesRegexp(self,
exception: Union[Type[_E], Tuple[Type[_E], ...]],
+ expected_regex: Union[str, bytes, Pattern[str], Pattern[bytes]],
msg: Any = ...) -> _AssertRaisesContext[_E]: ...
class FunctionTestCase(TestCase):
|
Update 'stock' icons for part table
Display on-order and building quantity as icons
Simplified display logic
Do not hide information in certain circumstances. | @@ -1460,46 +1460,42 @@ function loadPartTable(table, url, options={}) {
title: '{% trans "Stock" %}',
searchable: false,
formatter: function(value, row) {
- var link = '?display=part-stock';
- if (row.in_stock) {
- // There IS stock available for this part
+ var text = '';
- // Is stock "low" (below the 'minimum_stock' quantity)?
- if (row.minimum_stock && row.minimum_stock > row.in_stock) {
- value += `<span class='badge badge-right rounded-pill bg-warning'>{% trans "Low stock" %}</span>`;
- } else if (value == 0) {
- if (row.ordering) {
- // There is no available stock, but stock is on order
- value = `0<span class='badge badge-right rounded-pill bg-info'>{% trans "On Order" %}: ${row.ordering}</span>`;
- link = '?display=purchase-orders';
- } else if (row.building) {
- // There is no available stock, but stock is being built
- value = `0<span class='badge badge-right rounded-pill bg-info'>{% trans "Building" %}: ${row.building}</span>`;
- link = '?display=build-orders';
+ if (row.unallocated_stock != row.in_stock) {
+ text = `${row.unallocated_stock} / ${row.in_stock}`;
} else {
- // There is no available stock
- value = `0<span class='badge badge-right rounded-pill bg-warning'>{% trans "No stock available" %}</span>`;
+ text = `${row.in_stock}`;
}
+
+ // Construct extra informational badges
+ var badges = '';
+
+ if (row.in_stock == 0) {
+ badges += `<span class='fas fa-exclamation-circle icon-red float-right' title='{% trans "No stock" %}'></span>`;
+ } else if (row.in_stock < row.minimum_stock) {
+ badges += `<span class='fas fa-exclamation-circle icon-yellow float-right' title='{% trans "Low stock" %}'></span>`;
}
- } else {
- // There IS NO stock available for this part
- if (row.ordering) {
- // There is no stock, but stock is on order
- value = `0<span class='badge badge-right rounded-pill bg-info'>{% trans "On Order" %}: ${row.ordering}</span>`;
- link = '?display=purchase-orders';
- } else if (row.building) {
- // There is no stock, but stock is being built
- value = `0<span class='badge badge-right rounded-pill bg-info'>{% trans "Building" %}: ${row.building}</span>`;
- link = '?display=build-orders';
- } else {
- // There is no stock
- value = `0<span class='badge badge-right rounded-pill bg-danger'>{% trans "No Stock" %}</span>`;
+ if (row.ordering && row.ordering > 0) {
+ badges += renderLink(
+ `<span class='fas fa-shopping-cart float-right' title='{% trans "On Order" %}: ${row.ordering}'></span>`,
+ `/part/${row.pk}/?display=purchase-orders`
+ );
}
+
+ if (row.building && row.building > 0) {
+ badges += renderLink(
+ `<span class='fas fa-tools float-right' title='{% trans "Building" %}: ${row.building}'></span>`,
+ `/part/${row.pk}/?display=build-orders`
+ );
}
- return renderLink(value, `/part/${row.pk}/${link}`);
+ text = renderLink(text, `/part/${row.pk}/?display=part-stock`);
+ text += badges;
+
+ return text;
}
};
|
Always set the child index
Fix | @@ -451,10 +451,6 @@ def _in_flow_layout(context, box, index, child, new_children, page_is_empty,
fixed_boxes, adjoining_margins, discard)
if new_child is not None:
- # index in its non-laid-out parent, not in future new parent
- # May be used in find_earlier_page_break()
- new_child.index = index
-
# We need to do this after the child layout to have the
# used value for margin_top (eg. it might be a percentage.)
if not isinstance(new_child, (boxes.BlockBox, boxes.TableBox)):
@@ -545,6 +541,9 @@ def _in_flow_layout(context, box, index, child, new_children, page_is_empty,
abort, stop, resume_at, position_y, adjoining_margins, next_page,
new_children)
+ # index in its non-laid-out parent, not in future new parent
+ # May be used in find_earlier_page_break()
+ new_child.index = index
new_children.append(new_child)
if resume_at is not None:
resume_at = {index: resume_at}
|
Update parser.h
update parser.h for larger apps | #include "common/tiny-json.h"
-#define MAX_NUM_IO 4
-#define MAX_NUM_IO_TILES 8
+#define MAX_NUM_IO 16
+#define MAX_NUM_IO_TILES 16
#define BUFFER_SIZE 1024
#define MAX_NUM_KERNEL 16
-#define MAX_JSON_FIELDS 512
+#define MAX_JSON_FIELDS 2048
#define MAX_CONFIG 40
#define MAX_ADDR_GEN_LOOP 5
|
[Stress Tester XFails] Update XFails
has been fixed
on `Dollar.swift` was listed by mistake
on `Dollar.swift` was previously shadowed by
on `SceneDelegate.swift` is now shadowing on `Model.swift` | "issueUrl" : "https://bugs.swift.org/browse/SR-14545"
},
{
- "path" : "*\/DNS\/Sources\/DNS\/Integer+Data.swift",
+ "path" : "*\/Dollar\/Sources\/Dollar.swift",
+ "modification" : "concurrent-2318",
"issueDetail" : {
- "kind" : "codeComplete",
- "offset" : 194
+ "kind" : "conformingMethodList",
+ "offset" : 2320
},
"applicableConfigs" : [
"main"
],
- "issueUrl" : "https://bugs.swift.org/browse/SR-14546"
+ "issueUrl" : "https://bugs.swift.org/browse/SR-14545"
},
{
- "path" : "*\/Base64CoderSwiftUI\/Base64CoderSwiftUI\/Model.swift",
+ "path" : "*\/DNS\/Sources\/DNS\/Integer+Data.swift",
"issueDetail" : {
"kind" : "codeComplete",
- "offset" : 1909
+ "offset" : 194
},
"applicableConfigs" : [
"main"
],
- "issueUrl" : "https://bugs.swift.org/browse/SR-14547"
+ "issueUrl" : "https://bugs.swift.org/browse/SR-14546"
},
{
"path" : "*\/ACHNBrowserUI\/ACHNBrowserUI\/ACHNBrowserUI\/extensions\/View.swift",
"issueUrl" : "https://bugs.swift.org/browse/SR-14627"
},
{
- "path" : "*\/Dollar\/Sources\/Dollar.swift",
- "issueDetail" : {
- "kind" : "codeComplete",
- "offset" : 5654
- },
- "applicableConfigs" : [
- "main"
- ],
- "issueUrl" : "https://bugs.swift.org/browse/SR-14636"
- },
- {
- "path" : "*\/Dollar\/Sources\/Dollar.swift",
+ "path" : "*\/ACHNBrowserUI\/ACHNBrowserUI\/ACHNBrowserUI\/SceneDelegate.swift",
"issueDetail" : {
- "kind" : "codeComplete",
- "offset" : 5654
+ "kind" : "semanticRefactoring",
+ "refactoring" : "Convert Function to Async",
+ "offset" : 355
},
"applicableConfigs" : [
"main"
"issueUrl" : "https://bugs.swift.org/browse/SR-14637"
},
{
- "path" : "*\/ACHNBrowserUI\/ACHNBrowserUI\/ACHNBrowserUI\/SceneDelegate.swift",
+ "path" : "*\/Base64CoderSwiftUI\/Base64CoderSwiftUI\/Model.swift",
"issueDetail" : {
"kind" : "semanticRefactoring",
"refactoring" : "Convert Function to Async",
- "offset" : 355
+ "offset" : 4723
},
"applicableConfigs" : [
"main"
|
Fix episode termination reward in highway environment
Otherwise the agent just slows down in the end to prevent the episode
termination and keep earning rewards. | @@ -17,8 +17,9 @@ class HighwayEnv(AbstractEnv):
COLLISION_COST = 10
LANE_CHANGE_COST = 0.0
- RIGHT_LANE_REWARD = 0.5
+ RIGHT_LANE_REWARD = 0.4
HIGH_VELOCITY_REWARD = 1.0
+ EPISODE_SUCCESS = 10.0
def __init__(self):
road = Road.create_random_road(lanes_count=4, lane_width=4.0, vehicles_count=20, vehicles_type=IDMVehicle)
@@ -39,18 +40,20 @@ class HighwayEnv(AbstractEnv):
state_reward = \
- self.COLLISION_COST * self.vehicle.crashed \
+ self.RIGHT_LANE_REWARD * self.vehicle.lane_index \
- + self.HIGH_VELOCITY_REWARD * self.vehicle.speed_index()
+ + self.HIGH_VELOCITY_REWARD * self.vehicle.speed_index() \
+ + self.EPISODE_SUCCESS * self.all_vehicles_passed()
return action_reward[action] + state_reward
def is_terminal(self):
"""
The episode is over if the ego vehicle crashed or if it successfully passed all other vehicles.
"""
- is_first = len(self.road.vehicles) > 1 and (self.vehicle.position[0] > 50 + max(
- [o.position[0] for o in self.road.vehicles if o is not self.vehicle]))
-
- return self.vehicle.crashed or is_first
+ return self.vehicle.crashed or self.all_vehicles_passed()
def reset(self):
# TODO
pass
+
+ def all_vehicles_passed(self):
+ return len(self.road.vehicles) > 1 and (self.vehicle.position[0] > 50 + max(
+ [o.position[0] for o in self.road.vehicles if o is not self.vehicle]))
\ No newline at end of file
|
Removed filtering of directories that contain _X_
This is a strange undocumented hack that breaks experiments that contain X in their name. | @@ -222,9 +222,9 @@ def data_from_time(timestamp, folder=None):
def measurement_filename(directory=os.getcwd(), file_id=None, ext='hdf5'):
dirname = os.path.split(directory)[1]
if file_id is None:
- if dirname[6:9] == '_X_':
- fn = dirname[0:7] + dirname[9:] + '.' + ext
- else:
+ # if dirname[6:9] == '_X_':
+ # fn = dirname[0:7] + dirname[9:] + '.' + ext
+ # else:
fn = dirname + '.' + ext
if os.path.exists(os.path.join(directory, fn)):
return os.path.join(directory, fn)
|
Update README.md
Added Configure Indicator Threshold Parameters under Configure Anomali ThreatStream v3 on Cortex XSOAR to clarify how the DBotScore is generated from the thresholds (based on customer request) | @@ -26,6 +26,21 @@ If you are upgrading from a previous version of this integration, see [Breaking
| Create relationships | Create relationships between indicators as part of enrichment. | False |
4. Click **Test** to validate the URLs, token, and connection.
+
+### Configure Indicator Threshold Parameters
+Each indicator has a threshold parameter and an integer `confidence` value that impacts the indicator's DBotScore calculation.
+The indicator DBotScore is calculated as follows:
+- If you do not specify the threshold parameter value in your instance configuration (recommended):
+If the indicator `confidence` > 65, the DBotScore value is set to 3 (Malicious).
+If the indicator `confidence` is between 25 and 65, the DBotScore value is set to 2 (Suspicious).
+If the indicator `confidence` < 25, the DBotScore value is set to 1 (Good).
+For example,
+If the **IP threshold** value is not specified during configuration and the IP indicator `confidence` value is 45, the DBotScore value is set to 2 (Suspicious).
+- If you configure the threshold parameter value:
+If the indicator `confidence` value is above the threshold parameter value, the DBotScore is set to 3 (Malicious). Otherwise the DBotScore is set to 1 (Good).
+**Note:** You cannot define a threshold that sets the DBotScore to 2 (Suspicious).
+For example, if in the instance configuration you set **File threshold** to 10 and the `confidence` value is 15, the DBotScore is set to 3 (Malicious).
+
## Commands
You can execute these commands from the Cortex XSOAR CLI, as part of an automation, or in a playbook.
After you successfully execute a command, a DBot message appears in the War Room with the command details.
|
Don't assume that each item in "pkgs" is a dict (as packed by
pkg.installed state).
Fixes
Also, corrects an issue where a trailing space was added to package
names when pkgs was passed as a dict (i.e. pkg.installed state) | @@ -46,6 +46,7 @@ import salt.utils.data
import salt.utils.functools
import salt.utils.path
import salt.utils.pkg
+from salt.ext.six import string_types
from salt.exceptions import CommandExecutionError
# Define the module's virtual name
@@ -332,7 +333,6 @@ def latest_version(name, **kwargs):
return ret
return ''
-
# available_version is being deprecated
available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
@@ -474,12 +474,16 @@ def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwa
pkg2inst = ''
if pkgs: # multiple packages specified
+ pkg2inst = []
for pkg in pkgs:
+ if getattr(pkg, 'items', False):
if list(pkg.items())[0][1]: # version specified
- pkg2inst += '{0}@{1} '.format(list(pkg.items())[0][0],
- list(pkg.items())[0][1])
+ pkg2inst.append('{0}@{1}'.format(list(pkg.items())[0][0],
+ list(pkg.items())[0][1]))
+ else:
+ pkg2inst.append(list(pkg.items())[0][0])
else:
- pkg2inst += '{0} '.format(list(pkg.items())[0][0])
+ pkg2inst.append("{0}".format(pkg))
log.debug('Installing these packages instead of %s: %s',
name, pkg2inst)
@@ -499,7 +503,10 @@ def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwa
# Install or upgrade the package
# If package is already installed
+ if isinstance(pkg2inst, string_types):
cmd.append(pkg2inst)
+ elif isinstance(pkg2inst, list):
+ cmd = cmd + pkg2inst
out = __salt__['cmd.run_all'](cmd, output_loglevel='trace')
|
Make import sorting tool work on Windows too.
* Due to it changing the line endings of all files, it won't work
though. | @@ -41,7 +41,7 @@ sys.path.insert(
)
)
-from nuitka.tools.Basics import goHome, addPYTHONPATH # isort:skip
+from nuitka.tools.Basics import goHome, addPYTHONPATH, setupPATH # isort:skip
from nuitka.tools.ScanSources import scanTargets # isort:skip
@@ -57,6 +57,7 @@ def main():
target_files.append("nuitka/build/SingleExe.scons")
+ setupPATH()
subprocess.check_call(
[
"isort",
|
descriptors: Add function to clear pypsa output series data
Meant to be run before solving lopf to temporarily free up data for the solver. | @@ -284,6 +284,17 @@ def allocate_series_dataframes(network, series):
pnl[attr] = pnl[attr].reindex(columns=df.index,
fill_value=network.components[component]["attrs"].at[attr,"default"])
+def free_output_series_dataframes(network, components=None):
+ if components is None:
+ components = network.all_components
+
+ for component in components:
+ attrs = network.components[component]['attrs']
+ pnl = network.pnl(component)
+
+ for attr in attrs.index[attrs['varying'] & (attrs['status'] == 'Output')]:
+ pnl[attr] = pd.DataFrame(index=network.snapshots, columns=[])
+
def zsum(s, *args, **kwargs):
"""
pandas 0.21.0 changes sum() behavior so that the result of applying sum
|
Point users to logs when running 'mlflow ui' fails
The change attempts to address by pointing users to the logs of
trying to start the mlflow server instead of printing an exception
that does not explain the cause for failure and is slightly noisy. | @@ -14,6 +14,7 @@ import mlflow.sagemaker.cli
import mlflow.server
from mlflow.entities.experiment import Experiment
+from mlflow.utils.process import ShellCommandException
from mlflow import tracking
@@ -128,7 +129,12 @@ def ui(file_store, host, port):
The UI will be visible at http://localhost:5000 by default.
"""
# TODO: We eventually want to disable the write path in this version of the server.
+ try:
mlflow.server._run_server(file_store, file_store, host, port, 1)
+ except ShellCommandException:
+ print("Running the mlflow server failed. Please see the logs above for details.",
+ file=sys.stderr)
+ sys.exit(1)
@cli.command()
@@ -153,7 +159,12 @@ def server(file_store, artifact_root, host, port, workers):
the local machine. To let the server accept connections from other machines, you will need to
pass --host 0.0.0.0 to listen on all network interfaces (or a specific interface address).
"""
+ try:
mlflow.server._run_server(file_store, artifact_root, host, port, workers)
+ except ShellCommandException:
+ print("Running the mlflow server failed. Please see the logs above for details.",
+ file=sys.stderr)
+ sys.exit(1)
cli.add_command(mlflow.sklearn.commands)
|
PIN: jinja2 < 3.1
See sphinx-doc/sphinx#10291 | @@ -27,7 +27,7 @@ url = https://github.com/nipreps/mriqc
[options]
python_requires = >= 3.7
install_requires =
- jinja2
+ jinja2 < 3.1
markupsafe ~= 2.0.1 # jinja2 imports deprecated function removed in 2.1
matplotlib
mriqc-learn
|
Continue the job even if linters fail.
Current linter has many false positives and it seems not justify blocking PRs.
Please see also | @@ -65,6 +65,7 @@ jobs:
[ ! -s "py_test_files.txt" ] || cat py_test_files.txt | xargs -I {} python {}
- name: Lint with protolint
+ continue-on-error: true
env:
PROTOLINT_VERSION: 0.25.1
shell: bash
@@ -77,6 +78,7 @@ jobs:
[ ! -s "proto_files.txt" ] || cat proto_files.txt | xargs -I {} ./protolint {}
- name: Lint with pylint
+ continue-on-error: true
shell: bash
run: |
pip install pylint
|
use capitalize in currentUser in App.vue
Fixes:
```TypeError: Cannot read properties of undefined (reading 'charAt')
at Proxy.render (VM25715 App.vue:71:37)
at Vue._render (vue.runtime.esm.js?2b0e:2654:1)
at VueComponent.updateComponent (vue.runtime.esm.js?2b0e:3844:1)``` | @@ -49,7 +49,7 @@ limitations under the License.
Share
</v-btn>
<v-avatar color="grey lighten-1" size="25" class="ml-3">
- <span class="white--text">{{ currentUser.charAt(0).toUpperCase() }}</span>
+ <span class="white--text">{{ currentUser | capitalize }}</span>
</v-avatar>
<v-menu v-if="!isRootPage" offset-y>
<template v-slot:activator="{ on, attrs }">
|
Allow kwargs in _log_console_output
As a part of the scenario/manager.py stabilization tracked by
the below BP the patch adds kwargs argument for _log_console_output
method so that the consumers are able to pass additional
parameters if needed.
Implements: blueprint tempest-scenario-manager-stable | @@ -625,7 +625,7 @@ class ScenarioTest(tempest.test.BaseTestCase):
LOG.debug("image:%s", image['id'])
return image['id']
- def _log_console_output(self, servers=None, client=None):
+ def _log_console_output(self, servers=None, client=None, **kwargs):
"""Console log output"""
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
@@ -637,7 +637,7 @@ class ScenarioTest(tempest.test.BaseTestCase):
for server in servers:
try:
console_output = client.get_console_output(
- server['id'])['output']
+ server['id'], **kwargs)['output']
LOG.debug('Console output for %s\nbody=\n%s',
server['id'], console_output)
except lib_exc.NotFound:
|
Replaced the Documentation Link
The original Documentation link to MDA no longer exists. A new link has been provided to a Government of Canada page. | Name: RADARSAT-1
Description: Developed and operated by the Canadian Space Agency, it is Canada's first commercial Earth observation satellite.
-Documentation: https://mdacorporation.com/geospatial/international/satellites/RADARSAT-1/
+Documentation: https://www.asc-csa.gc.ca/eng/satellites/radarsat1/what-is-radarsat1.asp
Contact: https://www.eodms-sgdot.nrcan-rncan.gc.ca
ManagedBy: "[Natural Resources Canada](https://nrcan.gc.ca/)"
UpdateFrequency: The initial product release includes the 36000 products originally ordered by the Government of Canada. Products are added on an adhoc basis driven by prioritized foreign repatriation efforts and new processing orders from the raw archive.
|
be stricter on broadcast message area validation
even if there is a json struct, make sure it actually contains polygons,
or we'll send to the entire country. | @@ -56,7 +56,7 @@ def _update_broadcast_message(broadcast_message, new_status, updating_user):
f'User {updating_user.id} cannot approve their own broadcast_message {broadcast_message.id}',
status_code=400
)
- elif not broadcast_message.areas:
+ elif len(broadcast_message.areas['simple_polygons']) == 0:
raise InvalidRequest(
f'broadcast_message {broadcast_message.id} has no selected areas and so cannot be broadcasted.',
status_code=400
|
Make host backward compatible with transmissionrpc
Code adapted from | @@ -55,10 +55,22 @@ class TransmissionBase:
def create_rpc_client(self, config):
user, password = config.get('username'), config.get('password')
+ urlo = urlparse(config['host'])
+
+ if urlo.scheme == '':
+ urlo = urlparse('http://' + config['host'])
+
+ protocol = urlo.scheme if urlo.scheme else 'http'
+ port = str(urlo.port) if urlo.port else config['port']
+ path = urlo.path.rstrip('rpc') if urlo.path else '/transmission/'
+
+ logger.debug('Connecting to {}://{}:{}{}', protocol, urlo.hostname, port, path)
try:
- cli = transmissionrpc.Client(host = config['host'],
- port = config['port'],
+ cli = transmissionrpc.Client(protocol = protocol,
+ host = urlo.hostname,
+ port = port,
+ path = path,
username = user,
password = password)
except TransmissionError as e:
|
Skip flaky test
See | @@ -227,6 +227,7 @@ class TestPantsDaemonIntegration(PantsDaemonIntegrationTestBase):
for run_pairs in zip(non_daemon_runs, daemon_runs):
self.assertEqual(*(run.stdout_data for run in run_pairs))
+ @unittest.skip('Flaky as described in: https://github.com/pantsbuild/pants/issues/7622')
def test_pantsd_filesystem_invalidation(self):
"""Runs with pantsd enabled, in a loop, while another thread invalidates files."""
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
|
Update listeners.py
fix typo | @@ -35,7 +35,7 @@ class ListenerMixin:
"""
Create a listener from a decorated function.
- To be used as a deocrator:
+ To be used as a decorator:
.. code-block:: python
|
Changed to check if brcmEGL and brcmGLESv2 library files exists in
/opt/vc/lib instead of checking distribution version for rpi platform. | @@ -598,12 +598,20 @@ def determine_gl_flags():
'/opt/vc/include/interface/vcos/pthreads',
'/opt/vc/include/interface/vmcs_host/linux']
flags['library_dirs'] = ['/opt/vc/lib']
- from platform import linux_distribution
- dist = linux_distribution()
- if dist[0] == 'debian' and float(dist[1]) >= 9.1:
- flags['libraries'] = ['bcm_host', 'brcmEGL', 'brcmGLESv2']
+ brcm_lib_files = (
+ '/opt/vc/lib/libbrcmEGL.so',
+ '/opt/vc/lib/libbrcmGLESv2.so')
+ if all(exists(lib for lib in brcm_lib_files)):
+ print(
+ 'Found brcmEGL and brcmGLES library files'
+ 'for rpi platform at /opt/vc/lib/')
+ gl_libs = ['brcmEGL', 'brcmGLESv2']
else:
- flags['libraries'] = ['bcm_host', 'EGL', 'GLESv2']
+ print(
+ 'Failed to find brcmEGL and brcmGLESv2 library files'
+ 'for rpi platform, falling back to EGL and GLESv2.')
+ gl_libs = ['EGL', 'GLESv2']
+ flags['libraries'] = ['bcm_host'] + gl_libs
elif platform == 'mali':
flags['include_dirs'] = ['/usr/include/']
flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']
|
Fix bib file
Author name was duplicated, causing the sphinx build to fail. | -@Book{2007:ritto,
- author = {Ritto, Thiago G.},
- author = {Sampaio, Rubens.},
+@Book{2007ritto,
+ author = {Ritto, Thiago G. and Sampaio, Rubens.},
title = {Rotor Dynamics with MATLAB - Finite Element Analysis},
publisher = {PUC-Rio},
year = {2007}
|
fix: 2FA qrcode generation error
Fixed decoding issues so as to facilitate proper comparison of strings durin qrcode generation for 2FA. | @@ -5,7 +5,7 @@ from __future__ import unicode_literals
import frappe
from frappe import _
-from six.moves.urllib.parse import parse_qs
+from six.moves.urllib.parse import parse_qsl
from frappe.twofactor import get_qr_svg_code
def get_context(context):
@@ -15,10 +15,11 @@ def get_context(context):
def get_query_key():
'''Return query string arg.'''
query_string = frappe.local.request.query_string
- query = parse_qs(query_string)
+ query = dict(parse_qsl(query_string))
+ query = {key.decode(): val.decode() for key, val in query.items()}
if not 'k' in list(query):
frappe.throw(_('Not Permitted'),frappe.PermissionError)
- query = (query['k'][0]).strip()
+ query = (query['k']).strip()
if False in [i.isalpha() or i.isdigit() for i in query]:
frappe.throw(_('Not Permitted'),frappe.PermissionError)
return query
@@ -34,4 +35,4 @@ def get_user_svg_from_cache():
frappe.throw(_('Not Permitted'), frappe.PermissionError)
user = frappe.get_doc('User',user)
svg = get_qr_svg_code(totp_uri)
- return (user,svg)
+ return (user,svg.decode())
|
Couple fewer Telnet colons
"Telnet server started" already lost its colon to match the GDB server.
Drop two more colons for internal consistency. | @@ -330,7 +330,7 @@ class TelnetSemihostIOHandler(SemihostIOHandler):
while not self._shutdown_event.is_set():
self.connected = self._abstract_socket.connect()
if self.connected is not None:
- logging.debug("Telnet: client connected")
+ logging.debug("Telnet client connected")
break
if self._shutdown_event.is_set():
@@ -357,7 +357,7 @@ class TelnetSemihostIOHandler(SemihostIOHandler):
pass
finally:
self._abstract_socket.cleanup()
- logging.info("Telnet: server stopped")
+ logging.info("Telnet server stopped")
def write(self, fd, ptr, length):
# If nobody is connected, act like all data was written anyway.
|
chore: update OKD to latest 4.7
This commit updates the OKD version
to the current latest. | @@ -64,7 +64,7 @@ kubeinit_okd_registry_repository: "{{ kubeinit_okd_registry_repository_aux | rep
kubeinit_okd_registry_release_tag_aux: "{% if ( kubeinit_okd_openshift_deploy | default(False) ) %}
4.7.18
{% else %}
- 4.7.0-0.okd-2021-06-19-191547
+ 4.7.0-0.okd-2021-08-07-063045
{% endif %}"
kubeinit_okd_registry_release_tag: "{{ kubeinit_okd_registry_release_tag_aux | replace(' ','') }}"
|
Skip flaky test on Python 3
Even with the flaky decorator, this test sometimes passes and sometimes fails.
Let's skip it for now. | @@ -93,7 +93,7 @@ class TestSerializers(TestCase):
@skipIf(not yaml.available, SKIP_MESSAGE % 'yaml')
@skipIf(not yamlex.available, SKIP_MESSAGE % 'sls')
- @flaky
+ @skipIf(six.PY3, 'Flaky on Python 3.')
def test_compare_sls_vs_yaml_with_jinja(self):
tpl = '{{ data }}'
env = jinja2.Environment()
|
sql: Explain cop capabilities
Explain cop capabilities | @@ -97,7 +97,9 @@ mysql> EXPLAIN ANALYZE SELECT count(*) FROM trips WHERE start_date BETWEEN '2017
### Introduction to task
-Currently, the calculation task of TiDB contains two different tasks: cop task and root task. The cop task refers to the computing task that is pushed to the KV side and executed distributedly. The root task refers to the computing task that is executed at a single point in TiDB. One of the goals of SQL optimization is to push the calculation down to the KV side as much as possible.
+Currently, there are two types of task execution: cop tasks and root tasks. A cop task refers to a computing task that is executed using the coprocessor in TiKV. A root task refers to a computing task that is executed in TiDB.
+
+One of the goals of SQL optimization is to push the calculation down to TiKV as much as possible. The coprocessor is able to assist in execution of SQL functions (both aggregate and scalar), SQL `LIMIT` operations, index scans, and table scans. All join operations, however, will be performed as root tasks.
### Table data and index data
|
Update tests.py
Update for tests for rgb2gray | @@ -3146,12 +3146,6 @@ def test_plantcv_rgb2gray():
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
- # Test with debug = "print"
- pcv.params.debug = "print"
- _ = pcv.rgb2gray(rgb_img=img)
- # Test with debug = "plot"
- pcv.params.debug = "plot"
- _ = pcv.rgb2gray(rgb_img=img)
# Test with debug = None
pcv.params.debug = None
gray = pcv.rgb2gray(rgb_img=img)
|
Fix dbus-next notification bug
Some notification clients run "GetServerInformation" as part of
the notification process. The old code returned a tuple whereas
dbus requires an array (i.e. list). | @@ -79,7 +79,7 @@ if has_dbus:
@method()
def GetServerInformation(self) -> 'ssss': # type:ignore # noqa: N802, F821
- return ("qtile-notify-daemon", "qtile", "1.0", "1")
+ return ["qtile-notify-daemon", "qtile", "1.0", "1"]
class Notification:
def __init__(self, summary, body='', timeout=-1, hints=None, app_name='',
|
Update test_command_line_client.py
delete command line cp test | @@ -559,66 +559,6 @@ def test_command_get_recursive_and_query():
schedule_for_cleanup(new_paths[0])
-def test_command_copy():
- """Tests the 'synapse cp' function"""
-
- # Create a Project
- project_entity = syn.store(Project(name=str(uuid.uuid4())))
- schedule_for_cleanup(project_entity.id)
-
- # Create a Folder in Project
- folder_entity = syn.store(Folder(name=str(uuid.uuid4()),
- parent=project_entity))
- schedule_for_cleanup(folder_entity.id)
- # Create and upload a file in Folder
- repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
- annots = {'test': ['hello_world']}
- # Create, upload, and set annotations on a file in Folder
- filename = utils.make_bogus_data_file()
- schedule_for_cleanup(filename)
- file_entity = syn.store(File(filename, parent=folder_entity))
- externalURL_entity = syn.store(File(repo_url, name='rand', parent=folder_entity, synapseStore=False))
- syn.setAnnotations(file_entity, annots)
- syn.setAnnotations(externalURL_entity, annots)
- schedule_for_cleanup(file_entity.id)
- schedule_for_cleanup(externalURL_entity.id)
-
- # Test cp function
- output = run('synapse', '--skip-checks', 'cp', file_entity.id, '--destinationId', project_entity.id)
- output_URL = run('synapse', '--skip-checks', 'cp', externalURL_entity.id, '--destinationId', project_entity.id)
-
- copied_id = parse(r'Copied syn\d+ to (syn\d+)', output)
- copied_URL_id = parse(r'Copied syn\d+ to (syn\d+)', output_URL)
-
- # Verify that our copied files are identical
- copied_ent = syn.get(copied_id)
- copied_URL_ent = syn.get(copied_URL_id, downloadFile=False)
- schedule_for_cleanup(copied_id)
- schedule_for_cleanup(copied_URL_id)
- copied_ent_annot = syn.getAnnotations(copied_id)
- copied_url_annot = syn.getAnnotations(copied_URL_id)
-
- copied_prov = syn.getProvenance(copied_id)['used'][0]['reference']['targetId']
- copied_url_prov = syn.getProvenance(copied_URL_id)['used'][0]['reference']['targetId']
-
- # Make sure copied files are the same
- assert_equals(copied_prov, file_entity.id)
- assert_equals(copied_ent_annot, annots)
- assert_equals(copied_ent.properties.dataFileHandleId, file_entity.properties.dataFileHandleId)
-
- # Make sure copied URLs are the same
- assert_equals(copied_url_prov, externalURL_entity.id)
- assert_equals(copied_url_annot, annots)
- assert_equals(copied_URL_ent.externalURL, repo_url)
- assert_equals(copied_URL_ent.name, 'rand')
- assert_equals(copied_URL_ent.properties.dataFileHandleId, externalURL_entity.properties.dataFileHandleId)
-
- # Verify that errors are being thrown when a
- # file is copied to a folder/project that has a file with the same filename
- assert_raises(ValueError, run, 'synapse', '--debug', '--skip-checks', 'cp', file_entity.id,
- '--destinationId', project_entity.id)
-
-
def test_command_line_using_paths():
# Create a Project
project_entity = syn.store(Project(name=str(uuid.uuid4())))
|
soft-deactivation/management: Rename variable to make more sense.
We rename variable user_ids_to_deactivate to users_to_deactivate
since it was storing UserProfiles anyway. | @@ -57,14 +57,14 @@ class Command(ZulipBaseCommand):
elif deactivate:
if user_emails:
print('Soft deactivating forcefully...')
- user_ids_to_deactivate = list(UserProfile.objects.filter(
+ users_to_deactivate = list(UserProfile.objects.filter(
realm=realm,
email__in=user_emails))
else:
- user_ids_to_deactivate = get_users_for_soft_deactivation(realm, int(options['inactive_for']))
+ users_to_deactivate = get_users_for_soft_deactivation(realm, int(options['inactive_for']))
- if user_ids_to_deactivate:
- do_soft_deactivate_users(user_ids_to_deactivate)
+ if users_to_deactivate:
+ do_soft_deactivate_users(users_to_deactivate)
else:
self.print_help("./manage.py", "soft_activate_deactivate_users")
sys.exit(1)
|
Cleans Up Voice Sync Tests
Cleans up the tests related to the voice sync/kick functions by adding a
helper method to simplify mocking. | @@ -291,6 +291,17 @@ class RescheduleTests(unittest.IsolatedAsyncioTestCase):
self.cog.notifier.add_channel.assert_not_called()
+def voice_sync_helper(function):
+ """Helper wrapper to test the sync and kick functions for voice channels."""
+ @autospec(silence.Silence, "_force_voice_sync", "_kick_voice_members", "_set_silence_overwrites")
+ async def inner(self, sync, kick, overwrites):
+ overwrites.return_value = True
+ await function(self, MockContext(),
+ sync, kick)
+
+ return inner
+
+
@autospec(silence.Silence, "previous_overwrites", "unsilence_timestamps", pass_mocks=False)
class SilenceTests(unittest.IsolatedAsyncioTestCase):
"""Tests for the silence command and its related helper methods."""
@@ -363,28 +374,40 @@ class SilenceTests(unittest.IsolatedAsyncioTestCase):
if target is not None and isinstance(target, MockTextChannel):
target.send.reset_mock()
- @mock.patch.object(silence.Silence, "_set_silence_overwrites", return_value=True)
- @mock.patch.object(silence.Silence, "_kick_voice_members")
- @mock.patch.object(silence.Silence, "_force_voice_sync")
- async def test_sync_or_kick_called(self, sync, kick, _):
- """Tests if silence command calls kick or sync on voice channels when appropriate."""
+ @voice_sync_helper
+ async def test_sync_called(self, ctx, sync, kick):
+ """Tests if silence command calls sync on a voice channel."""
channel = MockVoiceChannel()
- ctx = MockContext()
+ await self.cog.silence.callback(self.cog, ctx, 10, channel, kick=False)
+
+ sync.assert_called_once_with(self.cog, channel)
+ kick.assert_not_called()
- with mock.patch.object(self.cog, "bot") as bot_mock:
- bot_mock.get_channel.return_value = AsyncMock()
+ @voice_sync_helper
+ async def test_kick_called(self, ctx, sync, kick):
+ """Tests if silence command calls kick on a voice channel."""
+ channel = MockVoiceChannel()
+ await self.cog.silence.callback(self.cog, ctx, 10, channel, kick=True)
+
+ kick.assert_called_once_with(self.cog, channel)
+ sync.assert_not_called()
+
+ @voice_sync_helper
+ async def test_sync_not_called(self, ctx, sync, kick):
+ """Tests that silence command does not call sync on a text channel."""
+ channel = MockTextChannel()
+ await self.cog.silence.callback(self.cog, ctx, 10, channel, kick=False)
- with self.subTest("Test calls kick"):
- await self.cog.silence.callback(self.cog, ctx, 10, kick=True, channel=channel)
- kick.assert_called_once_with(channel)
sync.assert_not_called()
+ kick.assert_not_called()
- kick.reset_mock()
- sync.reset_mock()
+ @voice_sync_helper
+ async def test_kick_not_called(self, ctx, sync, kick):
+ """Tests that silence command does not call kick on a text channel."""
+ channel = MockTextChannel()
+ await self.cog.silence.callback(self.cog, ctx, 10, channel, kick=True)
- with self.subTest("Test calls sync"):
- await self.cog.silence.callback(self.cog, ctx, 10, kick=False, channel=channel)
- sync.assert_called_once_with(channel)
+ sync.assert_not_called()
kick.assert_not_called()
async def test_skipped_already_silenced(self):
|
Add basic path for image AutoML
* Add basic path for image AutoML
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see | @@ -38,6 +38,9 @@ except ImportError:
OUTPUT_DIR = "."
+AUTOML_DEFAULT_TABULAR_MODEL = "tabnet"
+AUTOML_DEFAULT_TEXT_ENCODER = "bert"
+AUTOML_DEFAULT_IMAGE_ENCODER = "stacked_cnn"
class AutoTrainResults:
@@ -217,7 +220,7 @@ def _model_select(
# tabular dataset heuristics
if len(fields) > 3:
- base_config = merge_dict(base_config, default_configs["combiner"]["tabnet"])
+ base_config = merge_dict(base_config, default_configs["combiner"][AUTOML_DEFAULT_TABULAR_MODEL])
# override combiner heuristic if explicitly provided by user
if user_config is not None:
@@ -230,10 +233,13 @@ def _model_select(
# default text encoder is bert
# TODO (ASN): add more robust heuristics
if input_feature["type"] == "text":
- input_feature["encoder"] = "bert"
- base_config = merge_dict(base_config, default_configs["text"]["bert"])
+ input_feature["encoder"] = AUTOML_DEFAULT_TEXT_ENCODER
+ base_config = merge_dict(base_config, default_configs["text"][AUTOML_DEFAULT_TEXT_ENCODER])
# TODO (ASN): add image heuristics
+ if input_feature["type"] == "image":
+ input_feature["encoder"] = AUTOML_DEFAULT_IMAGE_ENCODER
+ base_config = merge_dict(base_config, default_configs["combiner"]["concat"])
# override and constrain automl config based on user specified values
if user_config is not None:
|
mmctl heading formatting fixes
* mmctl heading formatting fixes
Documentation task:
Updated:
Self-Managed Admin Guide > Administration > mmctl Command Line Tool (Beta) > mmctl export
- Fixed formatting for child command links
* Heading formatting fixes for mmctl export | @@ -25,6 +25,8 @@ This feature was developed to a large extent by community contributions and we'd
- `mmctl docs`_ - Generates mmctl documentation
- `mmctl export`_ - Exports management
- `mmctl group`_ - Group management
+ - `mmctl group channel`_ - Channel group management
+ - `mmctl group team`_ - Team group management
- `mmctl import`_ - Import Management
- `mmctl integrity`_ - Database record integrity
- `mmctl ldap`_ - LDAP management
@@ -36,6 +38,7 @@ This feature was developed to a large extent by community contributions and we'd
- `mmctl roles`_ - Roles Management
- `mmctl system`_ - System Management
- `mmctl team`_ - Team Management
+ - `mmctl team users`_ - Team user management
- `mmctl token`_ - Token Management
- `mmctl user`_ - User Management
- `mmctl version`_ - Version Management
@@ -2035,7 +2038,7 @@ Child Commands
-h, --help help for group
mmctl export create
-^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~~~
**Description**
@@ -2066,7 +2069,7 @@ Create an export file.
--strict will only run commands if the mmctl version matches the server one
mmctl export delete
-^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~~~
**Description**
@@ -2102,7 +2105,7 @@ Delete an export file.
--strict will only run commands if the mmctl version matches the server one
mmctl export download
-^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~~~~~
**Description**
@@ -2143,7 +2146,7 @@ Download export files.
--strict will only run commands if the mmctl version matches the server one
mmctl export job
-^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~
**Description**
@@ -2167,7 +2170,7 @@ List and show export jobs.
--strict will only run commands if the mmctl version matches the server one
mmctl export job list
-^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~~~~~
**Description**
@@ -2206,7 +2209,7 @@ List export jobs.
--strict will only run commands if the mmctl version matches the server one
mmctl export job show
-^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~~~~~
**Description**
@@ -2242,7 +2245,7 @@ Show export job.
--strict will only run commands if the mmctl version matches the server one
mmctl export list
-^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~
**Description**
@@ -2271,7 +2274,6 @@ List export files.
--local allows communicating with the server through a unix socket
--strict will only run commands if the mmctl version matches the server one
-
mmctl group
-----------
|
dplay: check per episode if its premium or not
fixes | @@ -43,7 +43,7 @@ class Dplay(Service):
yield ServiceError("Wrong username or password")
return
- what = self._playable(dataj, premium)
+ what = self._playable(dataj["data"][0], premium)
if what == 1:
yield ServiceError("Premium content")
return
@@ -137,13 +137,13 @@ class Dplay(Service):
return "sv"
def _playable(self, dataj, premium):
- if dataj["data"][0]["content_info"]["package_label"]["value"] == "Premium" and not premium:
+ if dataj["content_info"]["package_label"]["value"] == "Premium" and not premium:
return 1
- if dataj["data"][0]["video_metadata_drmid_playready"] != "none":
+ if dataj["video_metadata_drmid_playready"] != "none":
return 2
- if dataj["data"][0]["video_metadata_drmid_flashaccess"] != "none":
+ if dataj["video_metadata_drmid_flashaccess"] != "none":
return 2
return 0
@@ -166,7 +166,7 @@ class Dplay(Service):
data = self.http.request("get", "%s%s" % (url, page)).text
dataj = json.loads(data)
for i in dataj["data"]:
- what = self._playable(dataj, premium)
+ what = self._playable(i, premium)
if what == 0:
episodes.append(i["url"])
pages = dataj["total_pages"]
@@ -174,7 +174,7 @@ class Dplay(Service):
data = self.http.request("get", "%s%s" % (url, n)).text
dataj = json.loads(data)
for i in dataj["data"]:
- what = self._playable(dataj, premium)
+ what = self._playable(i, premium)
if what == 0:
episodes.append(i["url"])
if len(episodes) == 0:
|
Clarify Query Store naming
Clarify that Query Stores can be named and referenced individually. There can be multiple Query Stores. | @@ -18,11 +18,11 @@ Steps
A SqlAlchemy Query Store acts as a bridge that can query a SqlAlchemy-connected database and return the result of the query to be available for an evaluation parameter.
- Find the ``stores`` section in your ``great_expectations.yml`` file, and add the following configuration for a new store called "query_store".
+ Find the ``stores`` section in your ``great_expectations.yml`` file, and add the following configuration for a new store called "my_query_store". You can add and reference multiple Query Stores with different names.
.. code-block:: yaml
- query_store:
+ my_query_store:
class_name: SqlAlchemyQueryStore
credentials: ${rds_movies_db}
queries:
@@ -63,13 +63,13 @@ Steps
#. **Define an expectation that relies on a dynamic query**
- Great Expectations recognizes several types of :ref:`Evaluation Parameters <reference__core_concepts__evaluation_parameters>` that can use advanced features provided by the Data Context. To dynamically load data, we will be using a store-style URN, which starts with "urn:great_expectations:stores". The next component of the URN is the name of the store we configured above (``query_store``), and the final component is the name of the query we defined above (``current_genre_ids``):
+ Great Expectations recognizes several types of :ref:`Evaluation Parameters <reference__core_concepts__evaluation_parameters>` that can use advanced features provided by the Data Context. To dynamically load data, we will be using a store-style URN, which starts with "urn:great_expectations:stores". The next component of the URN is the name of the store we configured above (``my_query_store``), and the final component is the name of the query we defined above (``current_genre_ids``):
.. code-block:: python
batch.expect_column_values_to_be_in_set(
column="genre_id",
- value_set={"$PARAMETER": "urn:great_expectations:stores:query_store:current_genre_ids"}
+ value_set={"$PARAMETER": "urn:great_expectations:stores:my_query_store:current_genre_ids"}
)
The SqlAlchemyQueryStore that you configured above will execute the defined query and return the results as the value of the ``value_set`` parameter to evaluate your expectation:
|
Fix for issue
* Check for None to fix issue
* Update tenable/reports/nessusv2.py
This makes sense to me | @@ -54,6 +54,7 @@ class NessusReportv2(object):
elif name in ['cvss_base_score', 'cvss_temporal_score']:
# CVSS scores are floats, so lets return them as such.
+ if value:
return float(value)
elif name in ['first_found', 'last_found', 'plugin_modification_date',
|
Uninspired label, remaping to routing_instance
man_facepalming: | @@ -112,7 +112,7 @@ class PrometheusTransport(TransportBase):
self.metrics[error] = Counter(
'napalm_logs_{error}'.format(error=error.lower()),
'Counter for {error} notifications'.format(error=error),
- ['host', 'instance', 'neighbor', 'peer_as']
+ ['host', 'routing_instance', 'neighbor', 'peer_as']
)
instance_name = list(msg['yang_message']['network-instances']['network-instance'].keys())[0]
instance_dict = msg['yang_message']['network-instances']['network-instance'][instance_name]
@@ -120,7 +120,7 @@ class PrometheusTransport(TransportBase):
neighbor = list(neigh_dict.keys())[0]
self.metrics[error].labels(
host=msg['host'],
- instance=instance_name,
+ routing_instance=instance_name,
neighbor=neighbor,
peer_as=neigh_dict[neighbor]['state']['peer_as']
).inc()
|
Make http staging test run with different configs, rather than fixed one
This is already the case for the FTP test. | -import pytest
-
import parsl
from parsl.app.app import App
from parsl.data_provider.files import File
-from parsl.tests.configs.local_threads import config
-
-parsl.clear()
-parsl.load(config)
@App('python')
@@ -19,7 +13,6 @@ def sort_strings(inputs=[], outputs=[]):
s.write(e)
[email protected]
def test_implicit_staging_https():
"""Test implicit staging for an ftp file
@@ -47,7 +40,6 @@ def sort_strings_kw(x=None, outputs=[]):
s.write(e)
[email protected]
def test_implicit_staging_https_kwargs():
# unsorted_file = File('https://testbed.petrel.host/test/public/unsorted.txt')
@@ -71,7 +63,6 @@ def sort_strings_arg(x, outputs=[]):
s.write(e)
[email protected]
def test_implicit_staging_https_args():
# unsorted_file = File('https://testbed.petrel.host/test/public/unsorted.txt')
@@ -95,7 +86,6 @@ def sort_strings_additional_executor(inputs=[], outputs=[]):
s.write(e)
[email protected]
def test_implicit_staging_https_additional_executor():
"""Test implicit staging for an ftp file
|
Fixed typo in Python setup (calling virtualenv
binary directly) and added needs: validate-
content as a dependency. | @@ -103,6 +103,7 @@ jobs:
build-sources:
runs-on: ubuntu-latest
+ needs: validate-content
steps:
- name: Checkout Repo
uses: actions/checkout@v2
@@ -116,7 +117,7 @@ jobs:
run: |
rm -rf venv
- virtualenv --clear venv
+ python3 -m pip --clear venv
source venv/bin/activate
python3 -m pip install -q -r requirements.txt --ignore-installed simplejson
|
update spacing in readme
Addresses | @@ -24,7 +24,7 @@ Each instance is at least composed of:
|------------------|------------------------------------------------------------------------------------------------------------------|
| `prometheus_url` | A URL that points to the metric route (**Note:** must be unique) |
| `namespace` | This namespace is prepended to all metrics (to avoid metrics name collision) |
-| `metrics` | A list of metrics to retrieve as custom metrics in the form `- <METRIC_NAME>` or `- <METRIC_NAME:RENAME_METRIC>` |
+| `metrics` | A list of metrics to retrieve as custom metrics in the form `- <METRIC_NAME>` or `- <METRIC_NAME>: <RENAME_METRIC>` |
When listing metrics, it's possible to use the wildcard `*` like this `- <METRIC_NAME>*` to retrieve all matching metrics. **Note:** use wildcards with caution as it can potentially send a lot of custom metrics.
|
(airline-use-dsl-1) Use DSL for process_on_time_data
Summary: Behold the glory
Test Plan: Buildkite
Reviewers: max, natekupp, alangenfeld | PresetDefinition,
SolidInstance,
String,
+ composite_solid,
file_relative_path,
)
},
)
-process_on_time_data = CompositeSolidDefinition(
- name='process_on_time_data',
- solids=[s3_to_df, join_q2_data, load_data_to_database_from_spark],
- dependencies={
- SolidInstance('s3_to_df', alias='april_on_time_s3_to_df'): {},
- SolidInstance('s3_to_df', alias='may_on_time_s3_to_df'): {},
- SolidInstance('s3_to_df', alias='june_on_time_s3_to_df'): {},
- SolidInstance('s3_to_df', alias='master_cord_s3_to_df'): {},
- 'join_q2_data': {
- 'april_data': DependencyDefinition('april_on_time_s3_to_df'),
- 'may_data': DependencyDefinition('may_on_time_s3_to_df'),
- 'june_data': DependencyDefinition('june_on_time_s3_to_df'),
- 'master_cord_data': DependencyDefinition('master_cord_s3_to_df'),
- },
- SolidInstance('load_data_to_database_from_spark', alias='load_q2_on_time_data'): {
- 'data_frame': DependencyDefinition('join_q2_data')
- },
- },
- output_mappings=[
- OutputDefinition(
- name='table_name',
- dagster_type=String,
- description='''
- The name of table created during loading.
- ''',
- ).mapping_from(solid_name='load_q2_on_time_data', output_name='table_name')
- ],
+
+@composite_solid(outputs=[OutputDefinition(name='table_name', dagster_type=String)])
+def process_on_time_data(context):
+ return load_data_to_database_from_spark.alias('load_q2_on_time_data')(
+ data_frame=join_q2_data(
+ context,
+ april_data=s3_to_df.alias('april_on_time_s3_to_df')(),
+ may_data=s3_to_df.alias('may_on_time_s3_to_df')(),
+ june_data=s3_to_df.alias('june_on_time_s3_to_df')(),
+ master_cord_data=s3_to_df.alias('master_cord_s3_to_df')(),
+ )
)
+
sfo_weather_data = CompositeSolidDefinition(
name='sfo_weather_data',
solids=[
|
Duplicate entries
One entry removed for being a duplicate. Links copied to other entry. Date verified because there were conflicting dates attached. | @@ -13,11 +13,14 @@ A peaceful protest was dispersed with tear gas and flash bangs, with police shoo
### Congresswoman Joyce Beatty reportedly sprayed with "mace or pepper spray" | May 30th
-CNN reports that Joyce Beatty, an African American congresswoman from Ohio, was sprayed with mace or pepper spray at a protest in Columbus.
+Joyce Beatty, an African American congresswoman from Ohio, was sprayed with mace or pepper spray at a protest in Columbus.
**Links**
* https://www.cnn.com/2020/05/30/politics/joyce-beatty-ohio-pepper-sprayed-columbus-protest/index.html
+* https://twitter.com/politico/status/1266866982919516160
+* https://twitter.com/TimWCBD/status/1266787064735043591
+* https://twitter.com/KRobPhoto/status/1266796191469252610
### Police pepper-spray a medic | May 31st
@@ -40,14 +43,3 @@ Police drive their car towards protestors in order to disperse them
**Links**
* https://www.reddit.com/r/PublicFreakout/comments/gtq7i4/columbus_police_officer_trying_to_use_his_car_to/
-
-
-### Police pepper spray Congresswoman | May 31st
-
-Democratic Rep. Joyce Beatty was pepper sprayed by Columbus, Ohio, police during protests
-
-**Links**
-
-* https://twitter.com/politico/status/1266866982919516160
-* https://twitter.com/TimWCBD/status/1266787064735043591
-* https://twitter.com/KRobPhoto/status/1266796191469252610
|
Removed a line that did nothing
It was not necessary to reduce the loop index. The supposed fix did not
even do this. | @@ -359,9 +359,6 @@ class BlackrockRawIO(BaseRawIO):
if length < 2:
nb_empty_segments += 1
self.nsx_data.pop(data_bl)
- # TODO: CHECK WHAT HAPPENS HERE AND IF -1 IS CORRECT
- # Is this even needed? It should be
- data_bl -= 1
continue
if self.__nsx_data_header[self.nsx_to_load] is None:
t_start = 0.
|
Must detect/set js parser (when applicable)
Per Prettier v1.13, the default parser (babylon)
is no longer set. See <https://prettier.io/blog/2018/05/27/1.13.0.html#don-t-default-to-the-javascript-parser-4528-by-duailibe> | @@ -471,6 +471,11 @@ class JsPrettierCommand(sublime_plugin.TextCommand):
prettier_options.append('vue')
continue
+ if self.is_source_js(view):
+ prettier_options.append(cli_option_name)
+ prettier_options.append('babylon')
+ continue
+
if self.is_html(view):
prettier_options.append(cli_option_name)
prettier_options.append('parse5')
@@ -498,6 +503,10 @@ class JsPrettierCommand(sublime_plugin.TextCommand):
prettier_options.append('--ignore-path')
prettier_options.append(prettier_ignore_filepath)
+ # add the current file name to `--stdin-filepath`, only when
+ # the current file being edited is NOT html, and in order
+ # detect and format css/js selection(s) within html files:
+ if not self.is_html(view):
prettier_options.append('--stdin-filepath')
prettier_options.append(file_name)
@@ -584,8 +593,6 @@ class JsPrettierCommand(sublime_plugin.TextCommand):
scopename = view.scope_name(0)
if scopename.startswith('text.html.markdown') or scopename.startswith('text.html.vue'):
return False
- if scopename == 'text.html.basic':
- return True
if scopename.startswith('text.html') or filename.endswith('.html') or filename.endswith('.htm'):
return True
return False
|
Refresh README documentation
Refer to the new `Dockerfile.tmpl`
Remove outdated documentation about GPU layers/
Remove link to outdated external documentation. | [Kaggle Notebooks](https://www.kaggle.com/notebooks) allow users to run a Python Notebook in the cloud against our competitions and datasets without having to download data or set up their environment.
-This repository includes our Dockerfiles for building the [CPU-only](Dockerfile) and [GPU](gpu.Dockerfile) image that runs Python Notebooks on Kaggle.
+This repository includes the [Dockerfile](Dockerfile.tmpl) for building the CPU-only and GPU image that runs Python Notebooks on Kaggle.
-Our Python Docker images are stored on Google Container Registry at:
+Our Python Docker images are stored on the Google Container Registry at:
* CPU-only: [gcr.io/kaggle-images/python](https://gcr.io/kaggle-images/python)
* GPU: [gcr.io/kaggle-gpu-images/python](https://gcr.io/kaggle-gpu-images/python)
-Note: The base image for the GPU image is our CPU-only image. The [gpu.Dockerfile](gpu.Dockerfile) adds a few extra layers to install GPU related libraries and packages (cuda, libcudnn, pycuda etc.) and reinstall packages with specific GPU builds (torch, tensorflow and a few mores).
-
-## Getting started
-
-To get started with this image, read our [guide](https://medium.com/@kaggleteam/how-to-get-started-with-data-science-in-containers-6ed48cb08266) to using it yourself, or browse [Kaggle Notebooks](https://www.kaggle.com/notebooks) for ideas.
-
## Requesting new packages
First, evaluate whether installing the package yourself in your own notebooks suits your needs. See [guide](https://github.com/Kaggle/docker-python/wiki/Missing-Packages).
@@ -23,9 +17,7 @@ If you the first step above doesn't work for your use case, [open an issue](http
## Opening a pull request
-1. Update the *Dockerfile*
- 1. For changes specific to the GPU image, update the [gpu.Dockerfile](gpu.Dockerfile).
- 1. Otherwise, update the [Dockerfile](Dockerfile).
+1. Edit the [Dockerfile](Dockerfile.tmpl).
1. Follow the instructions below to build a new image.
1. Add tests for your new package. See this [example](https://github.com/Kaggle/docker-python/blob/main/tests/test_fastai.py).
1. Follow the instructions below to test the new image.
|
Change to XPath using button label text for Click Modal Button to cover
modals opened by a Quick Action which don't set a title on the button
element | @@ -10,7 +10,7 @@ lex_locators = {
"loading_box": "css: div.auraLoadingBox.oneLoadingBox",
"modal": {
"is_open": "css: div.DESKTOP.uiModal.forceModal.open.active",
- "button": "css: div.uiModal div.modal-footer button[title='{}']",
+ "button": "//div[contains(@class,'uiModal')]//div[contains(@class,'modal-footer')]//button[.//span[text()='{}']]",
"has_error": "css: div.pageLevelErrors",
"error_messages": "css: div.pageLevelErrors ul.errorsList li",
},
|
[Snippets/interpolatable] Use Hungarian algorithm from munkres or scipy when available
Fixes | @@ -77,8 +77,28 @@ def _matching_cost(G, matching):
def min_cost_perfect_bipartite_matching(G):
n = len(G)
- if n <= 8:
- # brute-force
+ try:
+ from scipy.optimize import linear_sum_assignment
+ rows, cols = linear_sum_assignment(G)
+ # This branch untested
+ assert rows == list(range(n))
+ return cols, _matching_cost(G, cols)
+ except ImportError:
+ pass
+
+ try:
+ from munkres import Munkres
+ cols = [None] * n
+ for row,col in Munkres().compute(G):
+ cols[row] = col
+ return cols, _matching_cost(G, cols)
+ except ImportError:
+ pass
+
+ if n > 6:
+ raise Exception("Install Python module 'munkres' or 'scipy >= 0.17.0'")
+
+ # Otherwise just brute-force
permutations = itertools.permutations(range(n))
best = list(next(permutations))
best_cost = _matching_cost(G, best)
@@ -87,26 +107,6 @@ def min_cost_perfect_bipartite_matching(G):
if cost < best_cost:
best, best_cost = list(p), cost
return best, best_cost
- else:
-
- # Set up current matching and inverse
- matching = list(range(n)) # identity matching
- matching_cost = _matching_cost(G, matching)
- reverse = list(matching)
-
- return matching, matching_cost
- # TODO implement real matching here
-
- # Set up cover
- cover0 = [max(c for c in row) for row in G]
- cover1 = [0] * n
- cover_weight = sum(cover0)
-
- while cover_weight < matching_cost:
- break
- NotImplemented
-
- return matching, matching_cost
def test(glyphsets, glyphs=None, names=None):
|
Update test_reopt_url.py
correct expected error message for outage hours | @@ -143,7 +143,7 @@ class EntryResourceTest(ResourceTestCaseMixin, TestCase):
data['Scenario']['Site']['LoadProfile']['outage_end_hour'] = 0
response = self.get_response(data)
err_msg = str(json.loads(response.content)['messages']['input_errors'])
- self.assertTrue("LoadProfile outage_start_hour and outage_end_hour cannot be the same" in err_msg)
+ self.assertTrue("LoadProfile outage_start_hour must be less than outage_end_hour" in err_msg)
def test_valid_data_ranges(self):
|
util.commandline: StoreTarget: add separator kwarg to allow splitting input
Via a specified separator to enable things like splitting a
comma-separated string of args. | @@ -71,9 +71,12 @@ class StoreTarget(argparse._AppendAction):
def __init__(self, *args, **kwargs):
self.allow_sets = kwargs.pop('allow_sets', False)
self.allow_ebuild_paths = kwargs.pop('allow_ebuild_paths', False)
+ self.separator = kwargs.pop('separator', None)
super(StoreTarget, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
+ if self.separator is not None:
+ values = values.split(self.separator)
if self.allow_sets:
namespace.sets = []
|
remove error handling for exception that does not happen
no point complicating Repeater payload with error handling targetting ResourceNotFound
when that has only ever happened once in 2015 | @@ -168,17 +168,9 @@ class Repeater(QuickCachedDocumentMixin, Document, UnicodeMixIn):
generator = self.get_payload_generator(self.format_or_default_format())
return generator.get_payload(repeat_record, self.payload_doc(repeat_record))
- def get_payload_or_none(self, repeat_record):
- try:
- return self.get_payload(repeat_record)
- except ResourceNotFound:
- return None
-
def get_payload_and_save_exception(self, repeat_record):
try:
- return self.get_payload(repeat_record)
- except ResourceNotFound as e:
- repeat_record.handle_payload_exception(e)
+ self.get_payload(repeat_record)
except Exception as e:
repeat_record.handle_payload_exception(e)
raise
@@ -549,7 +541,7 @@ class RepeatRecord(Document):
return not self.succeeded
def get_payload(self):
- return self.repeater.get_payload_or_none(self)
+ return self.repeater.get_payload(self)
def handle_payload_exception(self, exception):
self.succeeded = False
|
CI: try to cache android apk builds
and build qml arm64 on every commit | @@ -174,39 +174,52 @@ task:
CIRRUS_DOCKER_CONTEXT: contrib/build-wine
task:
- name: Android build (Kivy arm64)
+ name: Android build (Kivy $APK_ARCH)
container:
dockerfile: contrib/android/Dockerfile
cpu: 2
memory: 2G
+ env:
+ APK_ARCH: arm64-v8a
+ p4a_cache:
+ folders:
+ - ".buildozer/android/platform/build-$APK_ARCH/packages"
+ - ".buildozer/android/platform/build-$APK_ARCH/build"
+ fingerprint_script:
+ # note: should *at least* depend on Dockerfile and p4a_recipes/, but contrib/android/ is simplest
+ - find contrib/android/ -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
+ - echo "kivy $APK_ARCH"
+ populate_script: mkdir -p ".buildozer/android/platform/$APK_ARCH"/{packages,build}
build_script:
- - ./contrib/android/make_apk.sh kivy arm64-v8a debug
+ - ./contrib/android/make_apk.sh kivy "$APK_ARCH" debug
binaries_artifacts:
path: "dist/*"
task:
- name: Android build (QML arm64)
+ name: Android build (QML $APK_ARCH)
container:
dockerfile: contrib/android/Dockerfile
cpu: 8
memory: 24G
- build_script:
- - ./contrib/android/make_apk.sh qml arm64-v8a debug
- binaries_artifacts:
- path: "dist/*"
+ matrix:
+ - env:
+ APK_ARCH: arm64-v8a
+ - env:
+ APK_ARCH: armeabi-v7a
only_if: $CIRRUS_TAG != '' || $CIRRUS_BRANCH == 'ci-qml-beta'
-
-task:
- name: Android build (QML arm32)
- container:
- dockerfile: contrib/android/Dockerfile
- cpu: 8
- memory: 24G
+ p4a_cache:
+ folders:
+ - ".buildozer/android/platform/build-$APK_ARCH/packages"
+ - ".buildozer/android/platform/build-$APK_ARCH/build"
+ fingerprint_script:
+ # note: should *at least* depend on Dockerfile and p4a_recipes/, but contrib/android/ is simplest
+ - find contrib/android/ -type f -print0 | sort -z | xargs -0 sha256sum | sha256sum
+ - echo "qml $APK_ARCH"
+ populate_script: mkdir -p ".buildozer/android/platform/$APK_ARCH"/{packages,build}
build_script:
- - ./contrib/android/make_apk.sh qml armeabi-v7a debug
+ - ./contrib/android/make_apk.sh qml "$APK_ARCH" debug
binaries_artifacts:
path: "dist/*"
- only_if: $CIRRUS_TAG != '' || $CIRRUS_BRANCH == 'ci-qml-beta'
task:
name: MacOS build
|
wmt should actually use specified tfds eval set
also remove redundant license and add a tokenizer comment. | -# Copyright 2020 The Flax Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
# Lint as: python3
# Copyright 2020 The Flax Authors.
#
@@ -90,7 +76,7 @@ def raw_wmt_datasets(dataset_name='wmt17_translate/de-en',
logging.info('Evaluating on TFDS dataset %s with split %s',
eval_dataset, eval_split + shard_spec)
eval_builder = tfds.builder(eval_dataset, data_dir=data_dir)
- eval_data = builder.as_dataset(split=eval_split + shard_spec,
+ eval_data = eval_builder.as_dataset(split=eval_split + shard_spec,
shuffle_files=False)
features_info = builder.info
@@ -150,6 +136,9 @@ def train_sentencepiece(dataset,
dataset: tf.dataset
vocab_size: int: size of vocab tokens to train.
maxchars: int: number of characters to use for sentencepiece training.
+ character_coverage: amount of characters covered by the model, good
+ defaults are 0.9995 for languages with rich character set like Japanese
+ or Chinese and 1.0 for other languages with small character set.
model_path: str: path of model file to save vocab model to.
model_type: str: type of sentencepiece vocab to train.
data_keys: Tuple[str]: keys of dataset to use for training.
|
Add function `htmlEntities`
`htmlentities()` is a function which converts special characters. This allows you to show to display the string without the browser reading it as HTML. | return "{% url 'helpdesk:view' 1234 %}".replace(/1234/, row.id.toString());
}
+ function htmlEntities(str) {
+ return String(str).replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>').replace(/"/g, '"');
+ }
+
$(document).ready(function () {
// Ticket DataTable Initialization
$('#ticketTable').DataTable({
if (type === 'display') {
data = '<div class="tickettitle"><a href="' + get_url(row) + '" >' +
row.id + '. ' +
- row.title + '</a></div>';
+ htmlEntities(row.title) + '</a></div>';
}
return data
}
|
Add documentation about how to run the tests.
Improve documentation about the location of the installation output,
especially with respect to the example programs.
Most importantly, make installation actually build everything. | # Example programs
+After running ``source install.sh``, the executable versions of the
+example programs can be located in ``bazel-bin/nucleus/examples/``.
+For example, to run ``ascii_pileup``, you would actually run a command
+like
+
+```shell
+bazel-bin/nucleus/examples/ascii_pileup input.sam chr3:99393
+```
+
+If you would like to rebuild just the example programs -- after modifying
+one of them, perhaps -- this can be done with
+
+```shell
+bazel build -c opt $COPT_FLAGS nucleus/examples:all
+```
+
Here is a summary of the [example
programs](https://github.com/google/nucleus/blob/master/nucleus/examples)
included with Nucleus:
|
fix poor implementation
append caused the original list to be updated each time this was done | @@ -44,8 +44,7 @@ class UploadedTranslationsValidator(object):
:param for_type: type of sheet, module_and_forms, module, form
:return: list of errors messages if any
"""
- columns_to_compare = COLUMNS_TO_COMPARE[for_type]
- columns_to_compare.append(self.default_language_column)
+ columns_to_compare = COLUMNS_TO_COMPARE[for_type] + self.default_language_column
expected_rows = self.expected_rows[sheet_name]
msg = []
number_of_uploaded_rows = len(uploaded_rows)
|
fix proforma wind and BESS tax incentives
wind and BESS bonus fraction cell references were switched with each other | @@ -1030,10 +1030,10 @@ def generate_proforma(scenariomodel, output_file_path):
col_idx += 1
ws['{}{}'.format(upper_case_letters[col_idx], current_row)] = batt.macrs_bonus_pct
- wind_bonus_fraction_cell = "\'{}\'!{}{}".format(inandout_sheet_name, upper_case_letters[col_idx], current_row)
+ batt_bonus_fraction_cell = "\'{}\'!{}{}".format(inandout_sheet_name, upper_case_letters[col_idx], current_row)
col_idx += 1
ws['{}{}'.format(upper_case_letters[col_idx], current_row)] = wind.macrs_bonus_pct
- batt_bonus_fraction_cell = "\'{}\'!{}{}".format(inandout_sheet_name, upper_case_letters[col_idx], current_row)
+ wind_bonus_fraction_cell = "\'{}\'!{}{}".format(inandout_sheet_name, upper_case_letters[col_idx], current_row)
col_idx += 1
make_attribute_row(ws, current_row, length=col_idx)
col_idx += 1
|
Clarify docs on IAM role configuration
Closes | @@ -82,6 +82,8 @@ value is ``true``. If this value is ``false`` then chalice will try to load an
IAM policy from disk at ``.chalice/policy-<stage-name>.json`` instead of
auto-generating a policy from source code analysis. You can change the filename
by providing the ``iam_policy_file`` config option.
+See :ref:`iam-role-pol-examples` for examples of how to configure IAM roles
+and policies.
``environment_variables``
@@ -98,12 +100,28 @@ example.
``iam_policy_file``
~~~~~~~~~~~~~~~~~~~
-When ``autogen_policy`` is false, chalice will try to load an IAM policy from
-disk instead of auto-generating one based on source code analysis. The default
-location of this file is ``.chalice/policy-<stage-name>.json``, e.g
+When ``autogen_policy`` is ``false``, Chalice will try to load an IAM policy
+from disk instead of auto-generating one based on source code analysis. The
+default location of this file is ``.chalice/policy-<stage-name>.json``, e.g
``.chalice/policy-dev.json``, ``.chalice/policy-prod.json``, etc. You can
change the filename by providing this ``iam_policy_file`` config option. This
-filename is relative to the ``.chalice`` directory.
+filename is relative to the ``.chalice`` directory. For example, this config
+will create an IAM role using the file in ``.chalice/my-policy.json``::
+
+ {
+ "version": "2.0",
+ "app_name": "app",
+ "stages": {
+ "dev": {
+ "autogen_policy": false,
+ "iam_policy_file": "my-policy.json"
+ }
+ }
+ }
+
+
+See :ref:`iam-role-pol-examples` for more examples of how to configure IAM
+roles and policies.
``iam_role_arn``
@@ -112,6 +130,8 @@ filename is relative to the ``.chalice`` directory.
If ``manage_iam_role`` is ``false``, you must specify this value that indicates
which IAM role arn to use when configuration your application. This value is
only used if ``manage_iam_role`` is ``false``.
+See :ref:`iam-role-pol-examples` for examples of how to configure IAM roles
+and policies.
``lambda_memory_size``
@@ -144,10 +164,10 @@ per Lambda function. See `AWS Lambda Layers Configuration`_.
``automatic_layer``
~~~~~~~~~~~~~~~~~~~~
-Indicates whether chalice will automatically construct a single
-stage layer for all Lambda functions with requirements.txt libraries and
-vendored libraries. Boolean value defaults to ``false`` if not specified.
-See :ref:`package-3rd-party` for more information.
+A boolean value that indicates whether chalice will automatically construct a
+single stage layer for all Lambda functions with requirements.txt libraries and
+vendored libraries. Boolean value defaults to ``false`` if not specified. See
+:ref:`package-3rd-party` for more information.
.. _custom-domain-config-options:
@@ -442,6 +462,8 @@ with specified ``url_prefixes`` that should contain information about
If there is Websocket API ``websocket_api_custom_domain`` should be used
instead of ``api_gateway_custom_domain``.
+.. _iam-role-pol-examples:
+
IAM Roles and Policies
~~~~~~~~~~~~~~~~~~~~~~
|
Move iter_subtrees_topdown into standalone
Move the definition of `iter_subtrees_topdown` into the standalone template section, so it will be included in standalone parsers. | @@ -142,6 +142,20 @@ class Tree(Generic[_Leaf_T]):
del queue
return reversed(list(subtrees.values()))
+ def iter_subtrees_topdown(self):
+ """Breadth-first iteration.
+
+ Iterates over all the subtrees, return nodes in order like pretty() does.
+ """
+ stack = [self]
+ while stack:
+ node = stack.pop()
+ if not isinstance(node, Tree):
+ continue
+ yield node
+ for child in reversed(node.children):
+ stack.append(child)
+
def find_pred(self, pred: 'Callable[[Tree[_Leaf_T]], bool]') -> 'Iterator[Tree[_Leaf_T]]':
"""Returns all nodes of the tree that evaluate pred(node) as true."""
return filter(pred, self.iter_subtrees())
@@ -179,20 +193,6 @@ class Tree(Generic[_Leaf_T]):
if pred(c):
yield c
- def iter_subtrees_topdown(self):
- """Breadth-first iteration.
-
- Iterates over all the subtrees, return nodes in order like pretty() does.
- """
- stack = [self]
- while stack:
- node = stack.pop()
- if not isinstance(node, Tree):
- continue
- yield node
- for child in reversed(node.children):
- stack.append(child)
-
def __deepcopy__(self, memo):
return type(self)(self.data, deepcopy(self.children, memo), meta=self._meta)
|
Fix for a broken method
method had moved to the renderer | @@ -46,7 +46,7 @@ class ImageViewAgg(ImageView.ImageViewBase):
def get_rgb_image_as_bytes(self, format='png', quality=90):
# TO BE DEPRECATED: DO NOT USE
- return self.get_surface_as_rgb_format_bytes(format=format,
+ return self.renderer.get_surface_as_rgb_format_bytes(format=format,
quality=quality)
def save_rgb_image_as_file(self, filepath, format='png', quality=90):
|
Bug when determine if all rows have same length
Should be the length of each row in all rows (len(row)), but not the number of rows (nrow). | @@ -117,7 +117,7 @@ def csv2st(csvfile, headers=False, stubs=False, title=None):
stubs = ()
nrows = len(rows)
ncols = len(rows[0])
- if any(nrows != ncols for row in rows):
+ if any(len(row) != ncols for row in rows):
raise IOError('All rows of CSV file must have same length.')
return SimpleTable(data=rows, headers=headers, stubs=stubs)
|
Adds initialization of self.gates_on_qubits to ProcessorSpec
Since this is needed for RB calculations - currently a little
rough. | @@ -84,6 +84,21 @@ class ProcessorSpec(object):
if len(self.compilations) > 0:
self.construct_compiler_costs()
+ if len(construct_models) > 0:
+ # Compute the gate labels that act on an entire set of qubits
+ self.gates_on_qubits = _collections.defaultdict(list)
+
+ model_to_use = construct_models[0]
+ if model_to_use == 'clifford' and len(construct_models) > 1:
+ model_to_use = construct_models[0] # avoid using the 'clifford' model if possible since it may be missing gates...
+ for gl in self.models[model_to_use].gates:
+ for p in _itertools.permutations(gl.qubits):
+ self.gates_on_qubits[p].append(gl)
+ else:
+ self.gates_on_qubits = None
+
+
+
return # done with __init__(...)
|
Remove typo in echo
partion -> partition | @@ -497,23 +497,23 @@ if [ "$1" = "format" ]; then
sudo parted /dev/${hdd} mkpart primary ext4 0% 100% 1>&2
sleep 6
sync
- # loop until the partion gets available
+ # loop until the partition gets available
loopdone=0
loopcount=0
while [ ${loopdone} -eq 0 ]
do
- >&2 echo "# waiting until the partion gets available"
+ >&2 echo "# waiting until the partition gets available"
sleep 2
sync
loopdone=$(lsblk -o NAME | grep -c ${hdd}1)
loopcount=$(($loopcount +1))
if [ ${loopcount} -gt 10 ]; then
- >&2 echo "# partion failed"
+ >&2 echo "# partition failed"
echo "error='partition failed'"
exit 1
fi
done
- >&2 echo "# partion available"
+ >&2 echo "# partition available"
fi
# make sure /mnt/hdd is unmounted before formatting
|
Update heart_anomaly_detection.py
* Update heart_anomaly_detection.py
Fix wrong initialized node name and comment, organize imports
* Update heart_anomaly_detection.py
Fix indentations
* Fix style | # See the License for the specific language governing permissions and
# limitations under the License.
-import rospy
+import argparse
import torch
+
+import rospy
from vision_msgs.msg import Classification2D
-import argparse
from std_msgs.msg import Float32MultiArray
+
from opendr_bridge import ROSBridge
from opendr.perception.heart_anomaly_detection import GatedRecurrentUnitLearner, AttentionNeuralBagOfFeatureLearner
@@ -49,7 +51,6 @@ class HeartAnomalyNode:
self.channels = 1
self.series_length = 9000
- # Initialize the gesture recognition
if model == 'gru':
self.learner = GatedRecurrentUnitLearner(in_channels=self.channels, series_length=self.series_length,
n_class=4, device=device)
@@ -111,7 +112,8 @@ if __name__ == '__main__':
print("Using CPU")
device = "cpu"
- gesture_node = HeartAnomalyNode(input_ecg_topic=args.input_ecg_topic,
+ heart_anomaly_detection_node = HeartAnomalyNode(input_ecg_topic=args.input_ecg_topic,
output_heart_anomaly_topic=args.output_heart_anomaly_topic,
model=args.model, device=device)
- gesture_node.listen()
+
+ heart_anomaly_detection_node.listen()
|
update epochs_create()
to account for the problem of having insufficient number of samples in the first epoch or the last epoch | @@ -26,8 +26,7 @@ def epochs_create(data, events, sampling_rate=1000, epochs_duration=1, epochs_st
A list containing unique event identifiers. If `None`, will use the event index number.
event_conditions : list
An optional list containing, for each event, for example the trial category, group or experimental conditions.
- baseline_correction : bool
- If True, will substract the mean value of the baseline (until `epochs_start`).
+ baseline_correction :
Returns
@@ -59,9 +58,6 @@ def epochs_create(data, events, sampling_rate=1000, epochs_duration=1, epochs_st
>>> # Baseline correction
>>> epochs = nk.epochs_create(data, events, sampling_rate=200, epochs_duration=3, baseline_correction=True)
>>> nk.epochs_plot(epochs)
- >>>
- >>> epochs = nk.epochs_create(data, events, sampling_rate=200, epochs_duration=3, baseline_correction=[0, 1])
- >>> nk.epochs_plot(epochs)
"""
# Sanitize events input
if isinstance(events, dict) is False:
@@ -80,6 +76,18 @@ def epochs_create(data, events, sampling_rate=1000, epochs_duration=1, epochs_st
# Create epochs
parameters = listify(onset=event_onsets, label=event_labels, condition=event_conditions, start=epochs_start, duration=epochs_duration)
+
+ # Find the maximum numbers in an epoch
+ # Then extend data by the max samples in epochs * NaN
+ epoch_max_duration = int(max((i * sampling_rate
+ for i in parameters["duration"])))
+ extension = pd.DataFrame({"Signal": [np.nan] * (epoch_max_duration)})
+ data = data.append(extension, ignore_index=True)
+ data = extension.append(data, ignore_index=True)
+
+ # Adjust the Onset of the events
+ parameters["onset"] = [i + epoch_max_duration for i in parameters["onset"]]
+
epochs = {}
for i, label in enumerate(parameters["label"]):
@@ -94,9 +102,9 @@ def epochs_create(data, events, sampling_rate=1000, epochs_duration=1, epochs_st
epoch["Index"] = epoch.index.values
epoch.index = np.linspace(start=parameters["start"][i], stop=parameters["start"][i] + parameters["duration"][i], num=len(epoch), endpoint=True)
- # Baseline correction
- if baseline_correction is not False:
- epoch = _epochs_create_baseline(epoch, baseline_correction, epochs_duration=epochs_duration, epochs_start=epochs_start)
+ if baseline_correction is True:
+ baseline_end = 0 if epochs_start <= 0 else epochs_start
+ epoch = epoch - epoch.loc[:baseline_end].mean()
# Add additional
epoch["Label"] = parameters["label"][i]
@@ -107,35 +115,3 @@ def epochs_create(data, events, sampling_rate=1000, epochs_duration=1, epochs_st
epochs[label] = epoch
return epochs
-
-
-
-
-
-
-
-
-
-def _epochs_create_baseline(epoch, baseline_correction=False, epochs_duration=1, epochs_start=0):
- if isinstance(baseline_correction, list):
- if baseline_correction[0] is None:
- baseline_correction[0] = epochs_start
- if baseline_correction[1] is None:
- baseline_correction[1] = 0
- baseline = epoch.loc[baseline_correction[0]:baseline_correction[1]].mean()
-
- elif baseline_correction is True:
- if epochs_start <= 0:
- baseline = epoch.loc[:0].mean()
- else:
- baseline = epoch.loc[:epochs_start].mean()
-
- elif isinstance(baseline_correction, int):
- baseline = epoch.loc[baseline_correction].mean()
-
-
-
- else:
- baseline = 0
-
- return epoch - baseline
|
input-pill: Add copy override functionality for input pills.
When a pill is selected and you press copy, it will by default return
the value, but it can be overridden with the `onCopyReturn` function. | @@ -15,6 +15,7 @@ var input_pill = function ($parent) {
pills: [],
$parent: $parent,
$input: $parent.find(".input"),
+ copyReturnFunction: function (data) { return data.value; },
getKeyFunction: function () {},
validation: function () {},
lastUpdated: null,
@@ -110,6 +111,7 @@ var input_pill = function ($parent) {
if (!payload) {
return false;
}
+
var $pill = this.createPillElement(payload);
store.$input.before($pill);
@@ -217,6 +219,12 @@ var input_pill = function ($parent) {
});
},
+ getByID: function (id) {
+ return _.find(store.pills, function (pill) {
+ return pill.id === id;
+ });
+ },
+
// returns all hidden keys.
// IMPORTANT: this has a caching mechanism built in to check whether the
// store has changed since the keys/values were last retrieved so that
@@ -379,6 +387,13 @@ var input_pill = function ($parent) {
$(this).find(".input").focus();
}
});
+
+ store.$parent.on("copy", ".pill", function (e) {
+ var id = store.$parent.find(":focus").data("id");
+ var data = funcs.getByID(id);
+ e.originalEvent.clipboardData.setData("text/plain", store.copyReturnFunction(data));
+ e.preventDefault();
+ });
}());
// the external, user-accessible prototype.
@@ -401,6 +416,12 @@ var input_pill = function ($parent) {
store.removePillFunction = callback;
},
+ // this is for when a user copies a pill, you can choose in here what
+ // value to return.
+ onCopyReturn: function (callback) {
+ store.copyReturnFunction = callback;
+ },
+
validate: function (callback) {
store.validation = callback;
},
|
Fix doc for solver in LogisticRegression
Answers
Authors:
- Victor Lafargue (https://github.com/viclafargue)
Approvers:
- Dante Gama Dessavre (https://github.com/dantegd)
URL: | @@ -142,12 +142,11 @@ class LogisticRegression(UniversalBase,
See :ref:`verbosity-levels` for more info.
l1_ratio : float or None, optional (default=None)
The Elastic-Net mixing parameter, with `0 <= l1_ratio <= 1`
- solver : 'qn', 'lbfgs', 'owl' (default='qn').
+ solver : 'qn' (default='qn')
Algorithm to use in the optimization problem. Currently only `qn` is
supported, which automatically selects either L-BFGS or OWL-QN
depending on the conditions of the l1 regularization described
- above. Options 'lbfgs' and 'owl' are just convenience values that
- end up using the same solver following the same rules.
+ above.
handle : cuml.Handle
Specifies the cuml.handle that holds internal CUDA state for
computations in this model. Most importantly, this specifies the CUDA
|
subtitle: stpp support
this is used in dash | @@ -54,6 +54,8 @@ class subtitle:
data = self.wrstsegment(subdata)
if self.subtype == "raw":
data = self.raw(subdata)
+ if self.subtype == "stpp":
+ data = self.stpp(subdata)
if self.subfix:
if self.config.get("get_all_subtitles"):
@@ -80,9 +82,11 @@ class subtitle:
def tt(self, subdata):
i = 1
- data = ""
subs = subdata.text
+ return self._tt(subs, i)
+ def _tt(self, subs, i):
+ data = ""
subdata = re.sub(' xmlns="[^"]+"', "", subs, count=1)
tree = ET.XML(subdata)
xml = tree.find("body").find("div")
@@ -92,6 +96,7 @@ class subtitle:
if tag == "p" or tag == "span":
begin = node.attrib["begin"]
if not ("dur" in node.attrib):
+ if "end" not in node.attrib:
duration = node.attrib["duration"]
else:
duration = node.attrib["dur"]
@@ -333,6 +338,63 @@ class subtitle:
return string
+ def stpp(self, subdata):
+ nr = 1
+ entries = []
+
+ for i in self.kwargs["files"]:
+ res = self.http.get(i)
+ start = res.content.find(b"mdat") + 4
+ if start > 3:
+ _data = self._tt(res.content[start:].decode(), nr)
+ if _data:
+ entries.append(_data.split("\n\n"))
+ nr += 1
+
+ new_entries = []
+ for entry in entries:
+ for i in entry:
+ if i:
+ new_entries.append(i.split("\n"))
+
+ entries = new_entries
+ changed = True
+ while changed:
+ changed, entries = _resolv(entries)
+
+ nr = 1
+ data = ""
+ for entry in entries:
+ for item in entry:
+ data += f"{item}\n"
+ data += "\n"
+
+ return data
+
+
+def _resolv(entries):
+ skip = False
+ changed = False
+ new_entries = []
+ for nr, i in enumerate(entries):
+ if skip:
+ skip = False
+ continue
+ time_match = strdate(i[1].replace(",", "."))
+ time_match_next = None
+ if nr + 1 < len(entries):
+ time_match_next = strdate(entries[nr + 1][1].replace(",", "."))
+ left_time = time_match.group(1)
+ right_time = time_match.group(2)
+ if time_match_next and time_match.group(2) == time_match_next.group(1):
+ right_time = time_match_next.group(2)
+ skip = True
+ changed = True
+ next_entries = [nr + 1, f"{left_time} --> {right_time}"]
+ next_entries.extend(i[2:])
+ new_entries.append(next_entries)
+ return changed, new_entries
+
def timestr(msec):
"""
|
Remove prefetch_related query when fetching folder children metadata
[#PLAT-1116] | @@ -432,7 +432,7 @@ class OsfStorageFolder(OsfStorageFileNode, Folder):
@property
def is_preprint_primary(self):
if hasattr(self.target, 'preprint_file') and self.target.preprint_file:
- for child in self.children.all().prefetch_related('target'):
+ for child in self.children.all():
if getattr(child.target, 'preprint_file', None):
if child.is_preprint_primary:
return True
|
docs: fix Python extraction script
I needed to import `tarfile` rather than `tar`.
Closes | @@ -29,7 +29,7 @@ If you want to extract the distribution with Python, use the
.. code-block:: python
- import tar
+ import tarfile
import zstandard
with open("path/to/distribution.tar.zstd", "rb") as ifh:
|
[honggfuzz] version update
New day, new code updates - they improve coverage in some benchmarks | @@ -23,14 +23,14 @@ RUN apt-get update -y && \
libblocksruntime-dev \
liblzma-dev
-# Download honggfuz version 2.1 + 7c34b297d365d1f9fbfc230db7504c8aca384608
+# Download honggfuz version 2.1 + e0d47db4d0d775a3dc42120351a17c646b198beb
# Set CFLAGS use honggfuzz's defaults except for -mnative which can build CPU
# dependent code that may not work on the machines we actually fuzz on.
# Create an empty object file which will become the FUZZER_LIB lib (since
# honggfuzz doesn't need this when hfuzz-clang(++) is used).
RUN git clone https://github.com/google/honggfuzz.git /honggfuzz && \
cd /honggfuzz && \
- git checkout 7c34b297d365d1f9fbfc230db7504c8aca384608 && \
+ git checkout e0d47db4d0d775a3dc42120351a17c646b198beb && \
CFLAGS="-O3 -funroll-loops" make && \
touch empty_lib.c && \
cc -c -o empty_lib.o empty_lib.c
|
axis_pseudolandmarks
added small arrays with 0's into both x and y_axis_pseudolandmarks to cover if statement where len(value) == 0 | @@ -2072,6 +2072,7 @@ def test_plantcv_x_axis_pseudolandmarks():
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
+ _ = pcv.x_axis_pseudolandmarks(obj=np.array([[0, 0], [0, 0]]), mask=mask, img=img)
# Test with debug = None
pcv.params.debug = None
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
@@ -2112,6 +2113,7 @@ def test_plantcv_y_axis_pseudolandmarks():
pcv.params.debug = "plot"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
+ _ = pcv.y_axis_pseudolandmarks(obj=np.array([[0, 0], [0, 0]]), mask=mask, img=img)
# Test with debug = None
pcv.params.debug = None
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
|
Fix Nested field root method
Makes root method in Nested field not return itself but None if the field isn't a schema | @@ -325,7 +325,7 @@ class Field(FieldABC):
ret = self
while hasattr(ret, 'parent') and ret.parent:
ret = ret.parent
- return ret
+ return ret if isinstance(ret, SchemaABC) else None
class Raw(Field):
"""Field that applies no formatting or validation."""
|
Add repeat option for Alerters.
Thanks for the patch.
Closes | @@ -12,6 +12,7 @@ class Alerter:
hostname = gethostname()
available = False
limit = 1
+ repeat = 0
days = range(0, 7)
times_type = "always"
@@ -34,6 +35,8 @@ class Alerter:
self.set_dependencies([x.strip() for x in config_options["depend"].split(",")])
if 'limit' in config_options:
self.limit = int(config_options["limit"])
+ if 'repeat' in config_options:
+ self.repeat = int(config_options["repeat"])
if 'times_type' in config_options:
times_type = config_options["times_type"]
if times_type == "always":
@@ -133,8 +136,8 @@ class Alerter:
return "catchup"
else:
return "failure"
- if monitor.virtual_fail_count() == self.limit:
- # This is the first time we've failed
+ if monitor.virtual_fail_count() == self.limit or (self.repeat and (monitor.virtual_fail_count() % self.limit == 0)):
+ # This is the first time or nth time we've failed
if out_of_hours:
if monitor.name not in self.ooh_failures:
self.ooh_failures.append(monitor.name)
|
Add me to code owners
Add me to code owners. | # Generic rule for the repository. This pattern is actually the one that will
# apply unless specialized by a later rule
-* @ajavadia @ewinston @atilag @delapuente @diego-plan9
+* @ajavadia @ewinston @atilag @delapuente @diego-plan9 @nonhermitian
# Individual folders on root directory
/cmake @ajavadia @atilag @diego-plan9
|
Added unit tests for blacklist and whitelist
Closes-Bug: | # under the License.
import argparse
+import atexit
import os
import shutil
import subprocess
@@ -25,6 +26,7 @@ from tempest.cmd import run
from tempest.tests import base
DEVNULL = open(os.devnull, 'wb')
+atexit.register(DEVNULL.close)
class TestTempestRun(base.TestCase):
@@ -68,6 +70,34 @@ class TestTempestRun(base.TestCase):
self.assertEqual('i_am_a_fun_little_regex',
self.run_cmd._build_regex(args))
+ def test__build_whitelist_file(self):
+ args = mock.Mock(spec=argparse.Namespace)
+ setattr(args, 'smoke', False)
+ setattr(args, 'regex', None)
+ self.tests = tempfile.NamedTemporaryFile(
+ prefix='whitelist', delete=False)
+ self.tests.write(b"volume \n compute")
+ self.tests.close()
+ setattr(args, 'whitelist_file', self.tests.name)
+ setattr(args, 'blacklist_file', None)
+ self.assertEqual("volume|compute",
+ self.run_cmd._build_regex(args))
+ os.unlink(self.tests.name)
+
+ def test__build_blacklist_file(self):
+ args = mock.Mock(spec=argparse.Namespace)
+ setattr(args, 'smoke', False)
+ setattr(args, 'regex', None)
+ self.tests = tempfile.NamedTemporaryFile(
+ prefix='blacklist', delete=False)
+ self.tests.write(b"volume \n compute")
+ self.tests.close()
+ setattr(args, 'whitelist_file', None)
+ setattr(args, 'blacklist_file', self.tests.name)
+ self.assertEqual("^((?!compute|volume).)*$",
+ self.run_cmd._build_regex(args))
+ os.unlink(self.tests.name)
+
class TestRunReturnCode(base.TestCase):
def setUp(self):
|
Update CovidCast
Fixes 3 & 4 mentioned in | @@ -6,8 +6,7 @@ nav_order: 1
# COVIDcast Epidata API
-This is the documentation for accessing the Delphi's COVID-19 Surveillance
-Streams (`covidcast`) endpoint of [Delphi](https://delphi.cmu.edu/)'s
+This is the documentation for accessing Delphi's COVID-19 indicators, an (`covidcast`) endpoint of [Delphi](https://delphi.cmu.edu/)'s
epidemiological data API. This API provides data on the spread and impact of the
COVID-19 pandemic across the United States, most of which is available at the
county level and updated daily. This data powers our public [COVIDcast
@@ -35,7 +34,7 @@ href="https://lists.andrew.cmu.edu/mailman/listinfo/delphi-covidcast-api">subscr
{:toc}
## Licensing
-Like all other Delphi Epidata datasets, our COVIDcast data is freely available to the public. However, our COVID-19 surveillance streams include data from many different sources, with data licensing handled separately for each source. For a summary of the licenses used and a list of the indicators each license applies to, we suggest users visit our [COVIDcast licensing](covidcast_licensing.md) page. Licensing information is also summarized on each indicator's details page.
+Like all other Delphi Epidata datasets, our COVIDcast data is freely available to the public. However, our COVID-19 indicators include data from many different sources, with data licensing handled separately for each source. For a summary of the licenses used and a list of the indicators each license applies to, we suggest users visit our [COVIDcast licensing](covidcast_licensing.md) page. Licensing information is also summarized on each indicator's details page.
We encourage academic users to [cite](README.md#citing) the data if they
use it in any publications. Our [data ingestion
code](https://github.com/cmu-delphi/covidcast-indicators) and [API server
@@ -184,7 +183,7 @@ requests for smaller time intervals.
### Facebook Survey CLI on 2020-04-06 to 2010-04-10 (county 06001)
-https://api.covidcast.cmu.edu/epidata/api.php?source=covidcast&data_source=fb-survey&signal=raw_cli&time_type=day&geo_type=county&time_values=20200406-20200410&geo_value=06001
+https://api.covidcast.cmu.edu/epidata/api.php?source=covidcast&data_source=fb-survey&signal=smoothed_cli&time_type=day&geo_type=county&time_values=20200406-20200410&geo_value=06001
```json
{
@@ -206,7 +205,7 @@ https://api.covidcast.cmu.edu/epidata/api.php?source=covidcast&data_source=fb-su
### Facebook Survey CLI on 2020-04-06 (all counties)
-https://api.covidcast.cmu.edu/epidata/api.php?source=covidcast&data_source=fb-survey&signal=raw_cli&time_type=day&geo_type=county&time_values=20200406&geo_value=*
+https://api.covidcast.cmu.edu/epidata/api.php?source=covidcast&data_source=fb-survey&signal=smoothed_cli&time_type=day&geo_type=county&time_values=20200406&geo_value=*
```json
{
|
provision: Bump provision version after clipboard package update.
The provision wasn't bumped in
which caused some js exceptions. | @@ -8,4 +8,4 @@ ZULIP_VERSION = "1.7.1+git"
# Typically, adding a dependency only requires a minor version bump, and
# removing a dependency requires a major version bump.
-PROVISION_VERSION = '17.10'
+PROVISION_VERSION = '17.11'
|
Use the version.json file generated by CircleCI
Remove the logic that pull the version tag from local git repo. This is
no longer needed now that the build pipeline is moved to pipeline v2. | @@ -36,13 +36,9 @@ RUN localedef -i en_US -f UTF-8 en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LC_ALL en_US.UTF-8
+# version.json is overwritten by CircleCI (see circle.yml).
# The pipeline v2 standard requires the existence of /app/version.json
-# inside the docker image, although the content of the file comes from
-# circle.yml.
-# We realize that the content of version.json is written twice (once in
-# circle.yml, and the other in the end of this file), we should remove
-# the bits at the end of this file after we fully migrate to use pipeline
-# v2.
+# inside the docker image, thus it's copied there.
COPY version.json /app/version.json
COPY . /data/olympia
WORKDIR /data/olympia
@@ -70,15 +66,3 @@ RUN npm install \
RUN rm -f settings_local.py settings_local.pyc
-RUN [[ -z "$CIRCLE_TAG" ]] \
- && { \
- GITREF=$(git rev-parse HEAD) \
- GITTAG=$(git name-rev --tags --name-only $GITREF) \
- SOURCE='https://github.com/mozilla/addons-server' \
- && echo "{\"source\": \"$SOURCE\", \"version\": \"$GITTAG\", \"commit\": \"$GITREF\"}" > version.json ;\
- } || { \
- GITREF=$(git rev-parse HEAD) \
- GITTAG=$CIRCLE_TAG \
- SOURCE='https://github.com/mozilla/addons-server' \
- && echo "{\"source\": \"$SOURCE\", \"version\": \"$GITTAG\", \"commit\": \"$GITREF\"}" > version.json ;\
- }
|
Import and Export buttons grouped with Add and Remove buttons
Fixed issue where path was added after canceling "Select directory" dialog
Disable Remove button when editable rows is length 0
Remove Python 2.x consideration | # Local imports
from spyder.config.base import _
-from spyder.py3compat import PY2
from spyder.utils.icon_manager import ima
from spyder.utils.misc import getcwd_or_home
from spyder.utils.qthelpers import create_toolbutton
@@ -53,6 +52,7 @@ def __init__(self, parent, path=None, read_only_path=None,
self.moveup_button = None
self.movedown_button = None
self.movebottom_button = None
+ self.import_button = None
self.export_button = None
self.selection_widgets = []
self.top_toolbar_widgets = self._setup_top_toolbar()
@@ -168,8 +168,7 @@ def _setup_bottom_toolbar(self):
tip=_("Export to PYTHONPATH environment variable"),
text_beside_icon=True)
- self.selection_widgets.append(self.remove_button)
- return [self.add_button, self.remove_button, None, self.import_button,
+ return [self.add_button, self.remove_button, self.import_button,
self.export_button]
def _create_item(self, path):
@@ -193,9 +192,6 @@ def _create_item(self, path):
def editable_bottom_row(self):
"""Maximum bottom row count that is editable."""
read_only_count = len(self.read_only_path)
- if read_only_count == 0:
- max_row = self.listwidget.count() - 1
- else:
max_row = self.listwidget.count() - read_only_count - 1
return max_row
@@ -313,6 +309,8 @@ def refresh(self):
for widget in disable_widgets:
widget.setEnabled(False)
+ self.remove_button.setEnabled(self.listwidget.count()
+ - len(self.read_only_path))
self.export_button.setEnabled(self.listwidget.count() > 0)
# Ok button only enabled if actual changes occur
@@ -341,23 +339,7 @@ def add_path(self, directory=None):
directory = getexistingdirectory(self, _("Select directory"),
self.last_path)
self.redirect_stdio.emit(True)
-
- if PY2:
- is_unicode = False
- try:
- directory.decode('ascii')
- except (UnicodeEncodeError, UnicodeDecodeError):
- is_unicode = True
-
- if is_unicode:
- QMessageBox.warning(
- self,
- _("Add path"),
- _("You are using Python 2 and the selected path has "
- "Unicode characters."
- "<br> "
- "Therefore, this path will not be added."),
- QMessageBox.Ok)
+ if not directory:
return
directory = osp.abspath(directory)
|
email report enhancement
move summary (passed, failed, skipped...) to the top of the email report
add passed percentage to the subject of email report
fixes: | @@ -1024,7 +1024,7 @@ def add_squad_analysis_to_email(session, soup):
skipped_message = "--unknown--"
skipped["UNASSIGNED"].append((result.nodeid, skipped_message))
- # no failed or skipped tests - exist the function
+ # no failed or skipped tests - exit the function
if not failed and not skipped:
return
@@ -1044,6 +1044,7 @@ def add_squad_analysis_to_email(session, soup):
font-family: monospace;
background-color: #eee;
padding: 5px;
+ margin-top: 10px;
}
.squad-analysis h2 {
margin: 0px;
@@ -1124,11 +1125,41 @@ def add_squad_analysis_to_email(session, soup):
skips_ul_tag.append(skips_li_tag)
+def move_summary_to_top(soup):
+ """
+ Move summary to the top of the eamil report
+
+ """
+ summary = []
+ summary.append(soup.find("h2", text="Summary"))
+ for tag in summary[0].next_siblings:
+ if tag.name == "h2":
+ break
+ else:
+ summary.append(tag)
+ for tag in summary:
+ tag.extract()
+ main_header = soup.find("h1")
+ # because we are inserting the tags just after the header one by one, we
+ # have to insert them in reverse order
+ summary.reverse()
+ for tag in summary:
+ main_header.insert_after(tag)
+
+
def email_reports(session):
"""
Email results of test run
"""
+ # calculate percentage pass
+ reporter = session.config.pluginmanager.get_plugin("terminalreporter")
+ passed = len(reporter.stats.get("passed", []))
+ failed = len(reporter.stats.get("failed", []))
+ error = len(reporter.stats.get("error", []))
+ total = passed + failed + error
+ percentage_passed = (passed / total) * 100
+
build_id = get_ocs_build_number()
build_str = f"BUILD ID: {build_id} " if build_id else ""
mailids = config.RUN["cli_params"]["email"]
@@ -1140,6 +1171,7 @@ def email_reports(session):
f"ocs-ci results for {get_testrun_name()} "
f"({build_str}"
f"RUN ID: {config.RUN['run_id']}) "
+ f"Passed: {percentage_passed:.0f}%"
)
msg["From"] = sender
msg["To"] = ", ".join(recipients)
@@ -1152,6 +1184,7 @@ def email_reports(session):
parse_html_for_email(soup)
if config.RUN["cli_params"].get("squad_analysis"):
add_squad_analysis_to_email(session, soup)
+ move_summary_to_top(soup)
part1 = MIMEText(soup, "html")
msg.attach(part1)
try:
|
you flaky piece of shit
Got pwned by logging in to zhe test server. | @@ -101,7 +101,7 @@ def test_video_Movie_attrs(movies):
assert [i.tag for i in movie.directors] == ['Nina Paley']
assert movie.duration >= 160000
assert movie.fields == []
- assert sorted([i.tag for i in movie.genres]) == ['Animation', 'Comedy', 'Fantasy', 'Musical']
+ assert sorted([i.tag for i in movie.genres]) == ['Animation', 'Drama', 'Music', 'Romance']
assert movie.guid == 'com.plexapp.agents.imdb://tt1172203?lang=en'
assert utils.is_metadata(movie._initpath)
assert utils.is_metadata(movie.key)
|
Fix
Add parser argument "--no-daemonize" | @@ -69,10 +69,14 @@ def abort(message, colour=RED, stream=sys.stderr):
sys.exit(1)
-def start(configfile):
+def start(configfile, daemonize = True):
write("Starting ...")
args = SYNAPSE
+
+ if daemonize:
args.extend(["--daemonize", "-c", configfile])
+ else:
+ args.extend(["-c", configfile])
try:
subprocess.check_call(args)
@@ -143,12 +147,20 @@ def main():
help="start or stop all the workers in the given directory"
" and the main synapse process",
)
+ parser.add_argument(
+ "--no-daemonize",
+ action="store_false",
+ help="Run synapse in the foreground (for debugging)"
+ )
options = parser.parse_args()
if options.worker and options.all_processes:
write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr)
sys.exit(1)
+ if options.no_daemonize and options.all_processes:
+ write('Cannot use "--no-daemonize" with "--all-processes"', stream=sys.stderr)
+ sys.exit(1)
configfile = options.configfile
@@ -276,7 +288,7 @@ def main():
# Check if synapse is already running
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
abort("synapse.app.homeserver already running")
- start(configfile)
+ start(configfile, bool(options.no_daemonize))
for worker in workers:
env = os.environ.copy()
|
[GTK] Implement web-inspector debug support
Based on the latest multi-window feature. | @@ -93,7 +93,13 @@ class BrowserView:
self.webview.connect('notify::visible', self.on_webview_ready)
self.webview.connect('document-load-finished', self.on_load_finish)
self.webview.connect('status-bar-text-changed', self.on_status_change)
+
+ if debug:
+ self.webview.props.settings.props.enable_developer_extras = True
+ self.webview.get_inspector().connect('inspect-web-view', self.on_inspect_webview)
+ else:
self.webview.props.settings.props.enable_default_context_menu = False
+
self.webview.props.opacity = 0.0
scrolled_window.add(self.webview)
@@ -112,6 +118,12 @@ class BrowserView:
self.window.destroy()
del BrowserView.instances[self.uid]
+ try: # Close inspector if open
+ BrowserView.instances[self.uid + '-inspector'].window.destroy()
+ del BrowserView.instances[self.uid + '-inspector']
+ except KeyError:
+ pass
+
if BrowserView.instances == {}:
gtk.main_quit()
@@ -156,6 +168,15 @@ class BrowserView:
code = 'pywebview._bridge.return_val = "{0}";'.format(_escape_string(str(return_val)))
webview.execute_script(code)
+ def on_inspect_webview(self, inspector, webview):
+ title = 'Web Inspector - {}'.format(self.window.get_title())
+ uid = self.uid + '-inspector'
+
+ inspector = BrowserView(uid, title, '', 700, 500, True, False, (300,200),
+ False, '#fff', False, None, self.webview_ready)
+ inspector.show()
+ return inspector.webview
+
def show(self):
self.window.show_all()
|
SceneInspector : Fix transform decomposition
This was broken by the move to the imath module. | @@ -1498,7 +1498,8 @@ class __TransformSection( LocationSection ) :
return matrix
try :
- components = dict( zip( "shrt", matrix.extractSHRT() ) )
+ components = { x : imath.V3f() for x in "shrt" }
+ matrix.extractSHRT( components["s"], components["h"], components["r"], components["t"] )
except :
# decomposition can fail if we have 0 scale.
return "Unavailable"
|
Fix bug on participant roles table qtip
Qtip is now re-initialized every time a participant is edited | $('.js-reset-role-filter').toggleClass('disabled', isInitialState);
}
+ function initTooltip() {
+ $('.js-show-regforms').qtip({
+ content: {
+ text: function() {
+ return $(this).data('title');
+ }
+ },
+ hide: {
+ delay: 100,
+ fixed: true
+ }
+ });
+ }
+
global.setupEventPersonsList = function setupEventPersonsList(options) {
options = $.extend({
hasNoAccountFilter: false
});
}
- $('.js-show-regforms').qtip({
- content: {
- text: function() {
- return $(this).data('title');
- }
- },
- hide: {
- delay: 100,
- fixed: true
- }
- });
+ initTooltip();
var $personFilters = $('#person-filters');
// Sets background color of role filter items based on their colored squared color
$checkbox.prop('checked', !$checkbox.prop('checked')).trigger('change');
toggleResetBtn();
});
+
+ $('#event-participants-list').on('indico:htmlUpdated', initTooltip);
};
|
Fixes some bugs in build_crosstalk_free_model.
Bugs that caused the use of float and tuples (instead of error-dicts)
to not work correctly for describing the errors of the gates in a
crosstalk-free model. | @@ -1070,11 +1070,12 @@ def build_crosstalk_free_model(nQubits, gate_names, error_rates, nonstd_gate_uni
#tuple should have length 4^k-1 for a k-qubit gate (with dimension 4^k)
assert(len(errs) + 1 == gateMx.shape[0]), \
"Invalid number of Pauli stochastic rates: got %d but expected %d" % (len(errs), gateMx.shape[0] - 1)
- gate = _op.StochasticNoiseOp(len(errs) + 1, "pp", evotype, initial_rates=errs)
+ err = _op.StochasticNoiseOp(len(errs) + 1, "pp", evotype, initial_rates=errs)
+ gate = _op.ComposedOp([_op.StaticDenseOp(gateMx, evotype), err])
elif isinstance(errs, float):
#Make a depolarization operator:
- gate = _op.LindbladOp.from_operation_matrix(gateMx, ham_basis=None, nonham_basis="pp",
+ gate = _op.LindbladOp.from_operation_matrix(gateMx, gateMx, ham_basis=None, nonham_basis="pp",
param_mode="depol", nonham_mode="diagonal",
truncate=True, mxBasis="pp", evotype=evotype)
perPauliRate = errs / len(gate.errorgen.other_basis.labels)
|
Start network thread when actually running not loading config
Closes | @@ -117,8 +117,6 @@ class SimpleMonitor:
raise RuntimeError("Broken dependency configuration")
if not self.verify_alerting():
module_logger.critical("No alerters defined and no remote logger found")
- if self._network:
- self._start_network_thread()
def _start_network_thread(self) -> None:
if self._remote_listening_thread:
@@ -736,6 +734,7 @@ class SimpleMonitor:
def run(self) -> None:
self._create_pid_file()
+ self._start_network_thread()
module_logger.info(
"=== Starting... (loop runs every %ds) Hit ^C to stop", self.interval
)
@@ -755,6 +754,7 @@ class SimpleMonitor:
try:
module_logger.warning("Reloading configuration")
self._load_config()
+ self._start_network_thread()
self.hup_loggers()
self._need_hup = False
except Exception:
|
Bug fix: backup role does not provide the listShards privilege
Fixes | @@ -365,9 +365,10 @@ class Connector(threading.Thread):
else: # sharded cluster
while self.can_run:
-
- for shard_doc in retry_until_ok(self.main_conn.admin.command,
- 'listShards')['shards']:
+ # The backup role does not provide the listShards privilege,
+ # so use the config.shards collection instead.
+ for shard_doc in retry_until_ok(
+ lambda: list(self.main_conn.config.shards.find())):
shard_id = shard_doc['_id']
if shard_id in self.shard_set:
shard_thread = self.shard_set[shard_id]
|
Fix for .gro file output with too many atoms.
Addresses issue | @@ -100,7 +100,7 @@ class GromacsGroParser(object):
atom.name = "LMP_{0}".format(atom.name)
# .gro wraps at 100,0000, which is why the field is 5 width.
gro.write('{0:5d}{1:<5s}{2:5s}{3:5d}'.format(
- atom.residue_index, atom.residue_name, atom.name, (n + 1)%100000))
+ atom.residue_index%100000, atom.residue_name, atom.name, (n + 1)%100000))
for pos in atom.position:
gro.write('{0:17.12f}'.format(pos.value_in_unit(nanometers)))
if np.any(atom.velocity):
|
gatherkeys: ensure decoded strings are used when writing to a file
Resolves: rm#39489 | @@ -9,6 +9,7 @@ import time
from ceph_deploy import hosts
from ceph_deploy.cliutil import priority
from ceph_deploy.lib import remoto
+from ceph_deploy.util import as_string
import ceph_deploy.util.paths.mon
LOG = logging.getLogger(__name__)
@@ -143,7 +144,7 @@ def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir):
keyring_path_local = os.path.join(dest_dir, keyring_name_local)
with open(keyring_path_local, 'w') as f:
for line in out:
- f.write(line + '\n')
+ f.write(as_string(line) + '\n')
return True
@@ -162,7 +163,7 @@ def gatherkeys_with_mon(args, host, dest_dir):
mon_name_local = keytype_path_to(args, "mon")
mon_path_local = os.path.join(dest_dir, mon_name_local)
with open(mon_path_local, 'w') as f:
- f.write(mon_key)
+ f.write(as_string(mon_key))
rlogger = logging.getLogger(host)
path_asok = ceph_deploy.util.paths.mon.asok(args.cluster, remote_hostname)
out, err, code = remoto.process.check(
|
dark-mode: Make language settings modal compatible.
This makes the language settings modal compatible with dark mode by
making the background color dark. | -<div id="default_language_modal" class="modal hide" tabindex="-1" role="dialog"
+<div id="default_language_modal" class="modal hide modal-bg" tabindex="-1" role="dialog"
aria-labelledby="default_language_modal_label" aria-hidden="true">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-label="{{t 'Close' }}"><span aria-hidden="true">×</span></button>
|
Update attrib_to_hide_files.yml
Adding dest | @@ -39,7 +39,7 @@ detect:
search: '| tstats `security_content_summariesonly` count min(_time) values(Processes.process)
as process max(_time) as lastTime from datamodel=Endpoint.Processes where
Processes.process_name=attrib.exe (Processes.process=*+h*) by Processes.parent_process
- Processes.process_name Processes.user | `drop_dm_object_name("Processes")`
+ Processes.process_name Processes.user Processes.dest | `drop_dm_object_name("Processes")`
| `security_content_ctime(firstTime)`|`security_content_ctime(lastTime)`| `attrib_to_hide_files_filter`'
suppress:
suppress_fields: dest, process
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.