message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
forms: Mark a bunch of error strings for translation.
These error messages weren't marked for translation.
DEACTIVATED_ACCOUNT_ERROR and PASSWORD_TOO_WEAK_ERROR are used in
several places and imported, so we can't move them to be in-line errors
and we keep them at top-level, marked with gettext_lazy. | @@ -14,6 +14,7 @@ from django.http import HttpRequest
from django.urls import reverse
from django.utils.http import urlsafe_base64_encode
from django.utils.translation import gettext as _
+from django.utils.translation import gettext_lazy
from markupsafe import Markup as mark_safe
from two_factor.forms import AuthenticationTokenForm as TwoFactorAuthenticationTokenForm
from two_factor.utils import totp_digits
@@ -43,31 +44,20 @@ if settings.BILLING_ENABLED:
from corporate.lib.registration import check_spare_licenses_available_for_registering_new_user
from corporate.lib.stripe import LicenseLimitError
+# We don't mark this error for translation, because it's displayed
+# only to MIT users.
MIT_VALIDATION_ERROR = (
"That user does not exist at MIT or is a "
+ '<a href="https://ist.mit.edu/email-lists">mailing list</a>. '
+ "If you want to sign up an alias for Zulip, "
+ '<a href="mailto:[email protected]">contact us</a>.'
)
-WRONG_SUBDOMAIN_ERROR = (
- "Your Zulip account {username} is not a member of the "
- + "organization associated with this subdomain. "
- + "Please contact your organization administrator with any questions."
-)
-DEACTIVATED_ACCOUNT_ERROR = (
+
+DEACTIVATED_ACCOUNT_ERROR = gettext_lazy(
"Your account {username} has been deactivated. "
+ "Please contact your organization administrator to reactivate it."
)
-PASSWORD_RESET_NEEDED_ERROR = (
- "Your password has been disabled because it is too weak. "
- "Reset your password to create a new one."
-)
-PASSWORD_TOO_WEAK_ERROR = "The password is too weak."
-AUTHENTICATION_RATE_LIMITED_ERROR = (
- "You're making too many attempts to sign in. "
- + "Try again in {} seconds or contact your organization administrator "
- + "for help."
-)
+PASSWORD_TOO_WEAK_ERROR = gettext_lazy("The password is too weak.")
def email_is_not_mit_mailing_list(email: str) -> None:
@@ -428,13 +418,23 @@ class OurAuthenticationForm(AuthenticationForm):
except RateLimited as e:
assert e.secs_to_freedom is not None
secs_to_freedom = int(e.secs_to_freedom)
- raise ValidationError(AUTHENTICATION_RATE_LIMITED_ERROR.format(secs_to_freedom))
+ error_message = _(
+ "You're making too many attempts to sign in. "
+ + "Try again in {} seconds or contact your organization administrator "
+ + "for help."
+ )
+ raise ValidationError(error_message.format(secs_to_freedom))
if return_data.get("inactive_realm"):
raise AssertionError("Programming error: inactive realm in authentication form")
if return_data.get("password_reset_needed"):
- raise ValidationError(PASSWORD_RESET_NEEDED_ERROR)
+ raise ValidationError(
+ _(
+ "Your password has been disabled because it is too weak. "
+ "Reset your password to create a new one."
+ )
+ )
if return_data.get("inactive_user") and not return_data.get("is_mirror_dummy"):
# We exclude mirror dummy accounts here. They should be treated as the
@@ -447,7 +447,12 @@ class OurAuthenticationForm(AuthenticationForm):
logging.warning(
"User %s attempted password login to wrong subdomain %s", username, subdomain
)
- error_message = WRONG_SUBDOMAIN_ERROR.format(username=username)
+ error_message = _(
+ "Your Zulip account {username} is not a member of the "
+ + "organization associated with this subdomain. "
+ + "Please contact your organization administrator with any questions."
+ )
+ error_message = error_message.format(username=username)
raise ValidationError(error_message)
if self.user_cache is None:
|
Port appears twice for localhost
* Port appears twice for localhost
Minor fix for bug that shows during start,
which may confuse student developers.
* Minor fixes | @@ -28,6 +28,8 @@ from qiita_core.qiita_settings import qiita_config, r_client
from qiita_ware.ebi import EBISubmission
from qiita_ware.commands import submit_EBI as _submit_EBI
+from re import search
+
try:
conn = qdb.sql_connection.SQLConnectionHandler()
@@ -306,7 +308,14 @@ def start(port, master):
else:
raise
base_url = qiita_config.base_url
- click.echo("Qiita started on %s:%d @ %s" % (base_url, port, ctime()))
+
+ if search(r":\d+$", base_url):
+ tmp = "Qiita started on %s @ %s" % (base_url, ctime())
+ else:
+ tmp = "Qiita started on %s:%d @ %s" % (base_url, port, ctime())
+
+ click.echo(tmp)
+
ioloop = tornado.ioloop.IOLoop.instance()
if master:
|
Changelog entry for PR
Reference Jenkins CI tests and set development status to Beta. | # Pending additions
- [#717](https://github.com/helmholtz-analytics/heat/pull/717) Switch CPU CI over to Jenkins and pre-commit to GitHub action.
- [#720](https://github.com/helmholtz-analytics/heat/pull/720) Ignore test files in codecov report and allow drops in code coverage.
+- [#736](https://github.com/helmholtz-analytics/heat/pull/736) Reference Jenkins CI tests and set development status to Beta.
## New features
- [#660](https://github.com/helmholtz-analytics/heat/pull/660) NN module for data parallel neural networks
|
fix: calculate ex ratio correctly
We had lost a negative sign accidentally in | @@ -781,7 +781,7 @@ def character_ratio(style, character):
ink_extents = ffi.new('PangoRectangle *')
pango.pango_layout_line_get_extents(line, ink_extents, ffi.NULL)
if character == 'x':
- measure = units_to_double(ink_extents.y)
+ measure = -units_to_double(ink_extents.y)
else:
measure = units_to_double(ink_extents.width)
ffi.release(ink_extents)
|
Selection Rectangle
Works. Missing the left directional cross select. | @@ -816,10 +816,32 @@ class RectSelectWidget(Widget):
return RESPONSE_CONSUME
elif event_type == 'move':
self.end_location = space_pos
+ elements.validate_bounds()
for obj in elements.elems():
- # r = Rect(self.start_location, self.end_location)
- # q = Rect(obj.bounds)
- pass
+ sx = self.start_location[0]
+ sy = self.start_location[1]
+ ex = self.end_location[0]
+ ey = self.end_location[1]
+ right_drag = sx <= ex and ey <= ey
+ if not right_drag:
+ ex = self.start_location[0]
+ ey = self.start_location[1]
+ sx = self.end_location[0]
+ sy = self.end_location[1]
+ q = obj.bbox(True)
+ xmin = q[0]
+ ymin = q[1]
+ xmax = q[2]
+ ymax = q[3]
+ if (
+ sx <= xmin <= ex and
+ sy <= ymin <= ey and
+ sx <= xmax <= ex and
+ sy <= ymax <= ey
+ ):
+ obj.emphasize()
+ else:
+ obj.unemphasize()
self.scene.device.signal('refresh_scene', 0)
return RESPONSE_CONSUME
return RESPONSE_DROP
|
fix flake8 issues
unexpected spaces around keyword / parameter equals
continuation line unaligned for hanging indent
max line length 79
are incompatible with very long variable names... | @@ -94,11 +94,11 @@ def async_add_entities_config(hass, config, async_add_entities):
group_address_tunable_white = None
group_address_tunable_white_state = None
- group_address_color_temperature = None
- group_address_color_temperature_state = None
+ group_address_color_temp = None
+ group_address_color_temp_state = None
if config.get(CONF_COLOR_TEMP_MODE) == ColorTempModes.absolute:
- group_address_color_temperature = config.get(CONF_COLOR_TEMP_ADDRESS)
- group_address_color_temperature_state = \
+ group_address_color_temp = config.get(CONF_COLOR_TEMP_ADDRESS)
+ group_address_color_temp_state = \
config.get(CONF_COLOR_TEMP_STATE_ADDRESS)
elif config.get(CONF_COLOR_TEMP_MODE) == ColorTempModes.relative:
group_address_tunable_white = config.get(CONF_COLOR_TEMP_ADDRESS)
@@ -117,9 +117,8 @@ def async_add_entities_config(hass, config, async_add_entities):
group_address_color_state=config.get(CONF_COLOR_STATE_ADDRESS),
group_address_tunable_white=group_address_tunable_white,
group_address_tunable_white_state=group_address_tunable_white_state,
- group_address_color_temperature=group_address_color_temperature,
- group_address_color_temperature_state=\
- group_address_color_temperature_state,
+ group_address_color_temperature=group_address_color_temp,
+ group_address_color_temperature_state=group_address_color_temp_state,
min_kelvin=config.get(CONF_MIN_KELVIN),
max_kelvin=config.get(CONF_MAX_KELVIN))
hass.data[DATA_XKNX].xknx.devices.add(light)
|
WebUI: Fix RSE expression syntax
Additionally, the flow was modified so that the page may be correctly
loaded, even if either of the two requests fails. | * - Thomas Beermann, <[email protected]>, 2014-2015
* - Stefan Prenner, <[email protected]>, 2017-2018
* - Hannes Hansen, <[email protected]>, 2018
+ * - Dimitrios Christidis, <[email protected]>, 2019
*/
html_replicas_base = '<div id="t_replicas" class="columns panel">' +
@@ -877,33 +878,34 @@ $(document).ready(function(){
if ((storage.isSet('expiration') && Date.now() - storage.get('expiration') > 3600000) || !storage.isSet('expiration')) {
storage.set('expiration', Date.now());
r.list_rses({
- expression: 'browser_enabled=1&availability_read=1',
+ expression: 'browser_enabled=True&availability_read=True',
success: function(rses) {
browser_enabled_rses = [];
for(i = 0; i < rses.length; i++){
browser_enabled_rses[i] = rses[i]['rse'];
}
storage.set('browser_enabled_rses', browser_enabled_rses);
+ },
+ error: function(jqXHR, textStatus, errorThrown) {
+ console.log(jqXHR);
+ }
+ });
r.list_rses({
- expression: 'browser_enabled=1&availability_read=1&availability_write=1&type=SCRATCHDISK',
+ expression: 'browser_enabled=True&availability_read=True&availability_write=True&type=SCRATCHDISK',
success: function(rses) {
scratch_rses = [];
for(i = 0; i < rses.length; i++){
scratch_rses[i] = rses[i]['rse'];
}
storage.set('scratch_rses', scratch_rses);
- console.log("refreshed scratch storage");
- build_page();
},
error: function(jqXHR, textStatus, errorThrown) {
console.log(jqXHR);
}
});
}
- });
- } else {
- browser_enabled_rses = storage.get('browser_enabled_rses');
- scratch_rses = storage.get('scratch_rses');
+
+ browser_enabled_rses = storage.get('browser_enabled_rses') || [];
+ scratch_rses = storage.get('scratch_rses') || [];
build_page();
- }
});
|
DOC: added display tutorial
Added an instrument display tutorial, removed 2.x specific text and replaced it with a link to the pysat ecosystem wiki page. | @@ -35,45 +35,43 @@ up to four parameters
=============== ===================================
**Identifier** **Description**
--------------- -----------------------------------
- platform General platform instrument is on
+ platform Name of the platform supporting the instrument
name Name of the instrument
- tag Label for a subset of total data
+ tag Label for an instrument data product
inst_id Label for instrument sub-group
=============== ===================================
-All supported pysat Instruments for v2.x are stored in the pysat.instruments
-submodule. A listing of all currently supported instruments
-is available via help,
+In pysat v3.x+, Instruments are supported by separate instrument packages.
+Local instruments in pysat.instruments submodule have been developed for testing
+purposes. The current list and development status of different packages in
+the pysat ecosystem can be found on the
+`wiki <https://github.com/pysat/pysat/wiki/Pysat-Ecosystem-Status-Chart>`_.
-.. code:: python
-
- help(pysat.instruments)
-
-Each instrument listed will support one or more data sets for analysis. The
-submodules are named with the convention platform_name. To get
-a description of an instrument from a pysat package, you can use a searching
-function. This example uses the ``pysatMadrigal`` package.
+You can learn about the different Instruments in an instrument package using
+the utility display_available_instruments. When providing an Instrument
+submodule as input, this will list the Instrument module name, and all possible
+combinations of tags, inst_ids, and a description of the data (if available).
+This example with use the ``pysatMadrigal`` package.
.. code:: python
+ import pysat
import pysatMadrigal as pysatMad
- mad_inst = pysat.utils.generate_instrument_list(pysatMad.instruments)
- print(mad_inst['names'])
+ pysat.utils.display_available_instruments(pysatMad.instruments)
+You can see each listed instrument supports one or more data sets for analysis.
+The submodules are named with the convention platform_name. When supplying
+an Instrument submodule as input the display utility provides the submodule
+name instead of the platform and name because non-registered Instruments are
+instantiated using the ``inst_module`` keyword instead of the ``platform`` and
+``name`` keywords (jump to the :ref:`Instantiation` section below for more
+information). To use the ``platform`` and ``name`` keywords, the instrument
+must be registered. To display the registered instruments, no input is needed.
-Further, the dictionary::
-
- pysatMadrigal.instruments.dmsp_ivm.tags
-
-is keyed by ``tag`` with a description of each type of data
-the ``tag`` parameter selects. The dictionary::
-
- pysatMadrigal.instruments.dmsp_ivm.inst_ids
+.. code:: python
-indicates which instrument or satellite ids (``inst_id``) support which tag.
-The combination of ``tag`` and ``inst_id`` select the particular dataset
-a pysat.Instrument object will provide and interact with.
+ pysat.utils.display_available_instruments()
Instantiation
|
Update subscriber example in README to current patterns.
Closes | @@ -90,9 +90,9 @@ messages to it
.. code-block:: python
import os
- from google.cloud import pubsub
+ from google.cloud import pubsub_v1
- publisher = pubsub.PublisherClient()
+ publisher = pubsub_v1.PublisherClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(
project_id=os.getenv('GOOGLE_CLOUD_PROJECT'),
topic='MY_TOPIC_NAME', # Set this to something appropriate.
@@ -109,14 +109,14 @@ Subscribing
^^^^^^^^^^^
To subscribe to data in Cloud Pub/Sub, you create a subscription based on
-the topic, and subscribe to that.
+the topic, and subscribe to that, passing a callback function.
.. code-block:: python
import os
- from google.cloud import pubsub
+ from google.cloud import pubsub_v1
- subscriber = pubsub.SubscriberClient()
+ subscriber = pubsub_v1.SubscriberClient()
topic_name = 'projects/{project_id}/topics/{topic}'.format(
project_id=os.getenv('GOOGLE_CLOUD_PROJECT'),
topic='MY_TOPIC_NAME', # Set this to something appropriate.
@@ -127,17 +127,22 @@ the topic, and subscribe to that.
)
subscriber.create_subscription(
name=subscription_name, topic=topic_name)
- subscription = subscriber.subscribe(subscription_name)
-
-The subscription is opened asychronously, and messages are processed by
-use of a callback.
-
-.. code-block:: python
def callback(message):
print(message.data)
message.ack()
- subscription.open(callback)
+
+ future = subscriber.subscribe(subscription_name, callback)
+
+The future returned by the call to ``subscriber.subscribe`` can be used to
+block the current thread until a given condition obtains:
+
+.. code-block:: python
+
+ try:
+ future.result()
+ except KeyboardInterrupt:
+ future.cancel()
To learn more, consult the `subscriber documentation`_.
|
Update apt_gamaredon.txt
Chinese APT instead: | @@ -7637,16 +7637,6 @@ film.plazma.nagaimo.ru
luckily7.freebsdo.ru
released.luckily7.freebsdo.ru
-# Reference: https://twitter.com/ShadowChasing1/status/1506573766456864770
-# Reference: https://www.virustotal.com/gui/file/3001f0a05df31eee89d1bb3721b9cd060c1f20088d4e91bc1d0b243ba73e36f8/detection
-
-microtreely.com
-
-# Reference: https://twitter.com/ShadowChasing1/status/1506573768939909123
-# Reference: https://www.virustotal.com/gui/file/5b558c5fcbed8544cb100bd3db3c04a70dca02eec6fedffd5e3dcecb0b04fba0/detection
-
-miniboxmail.com
-
# Reference: https://www.virustotal.com/gui/ip-address/194.67.93.91/relations
aaa.windowsi.ru
|
stdlib/selectors: change timeout argument type to float
The Selector's code internally uses select.select and passes the
timeout argument to it. The documentation explicitly states the
timeout is a floating point number: | @@ -37,7 +37,7 @@ class BaseSelector(metaclass=ABCMeta):
def modify(self, fileobj: _FileObject, events: _EventMask, data: Any = ...) -> SelectorKey: ...
@abstractmethod
- def select(self, timeout: Optional[int] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
+ def select(self, timeout: Optional[float] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
def close(self) -> None: ...
@@ -53,38 +53,38 @@ class BaseSelector(metaclass=ABCMeta):
class SelectSelector(BaseSelector):
def register(self, fileobj: _FileObject, events: _EventMask, data: Any = ...) -> SelectorKey: ...
def unregister(self, fileobj: _FileObject) -> SelectorKey: ...
- def select(self, timeout: Optional[int] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
+ def select(self, timeout: Optional[float] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
def get_map(self) -> Mapping[_FileObject, SelectorKey]: ...
class PollSelector(BaseSelector):
def register(self, fileobj: _FileObject, events: _EventMask, data: Any = ...) -> SelectorKey: ...
def unregister(self, fileobj: _FileObject) -> SelectorKey: ...
- def select(self, timeout: Optional[int] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
+ def select(self, timeout: Optional[float] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
def get_map(self) -> Mapping[_FileObject, SelectorKey]: ...
class EpollSelector(BaseSelector):
def fileno(self) -> int: ...
def register(self, fileobj: _FileObject, events: _EventMask, data: Any = ...) -> SelectorKey: ...
def unregister(self, fileobj: _FileObject) -> SelectorKey: ...
- def select(self, timeout: Optional[int] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
+ def select(self, timeout: Optional[float] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
def get_map(self) -> Mapping[_FileObject, SelectorKey]: ...
class DevpollSelector(BaseSelector):
def fileno(self) -> int: ...
def register(self, fileobj: _FileObject, events: _EventMask, data: Any = ...) -> SelectorKey: ...
def unregister(self, fileobj: _FileObject) -> SelectorKey: ...
- def select(self, timeout: Optional[int] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
+ def select(self, timeout: Optional[float] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
def get_map(self) -> Mapping[_FileObject, SelectorKey]: ...
class KqueueSelector(BaseSelector):
def fileno(self) -> int: ...
def register(self, fileobj: _FileObject, events: _EventMask, data: Any = ...) -> SelectorKey: ...
def unregister(self, fileobj: _FileObject) -> SelectorKey: ...
- def select(self, timeout: Optional[int] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
+ def select(self, timeout: Optional[float] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
def get_map(self) -> Mapping[_FileObject, SelectorKey]: ...
class DefaultSelector(BaseSelector):
def register(self, fileobj: _FileObject, events: _EventMask, data: Any = ...) -> SelectorKey: ...
def unregister(self, fileobj: _FileObject) -> SelectorKey: ...
- def select(self, timeout: Optional[int] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
+ def select(self, timeout: Optional[float] = ...) -> List[Tuple[SelectorKey, _EventMask]]: ...
def get_map(self) -> Mapping[_FileObject, SelectorKey]: ...
|
library/projections/associativeprojection: Explicitly set type of np.full
This used to always return array of np.float, but starting in
numpy-1.12[0] it returns the same type as fill value.
Set the type explicitly to preserve behaviour and avoid the warning.
[0] | @@ -519,11 +519,11 @@ def get_hetero_matrix(raw_hetero, size):
# similar to get_hetero_matrix() above
def get_auto_matrix(raw_auto, size):
if isinstance(raw_auto, numbers.Number):
- return np.diag(np.full(size, raw_auto))
+ return np.diag(np.full(size, raw_auto, dtype=np.float))
elif ((isinstance(raw_auto, np.ndarray) and raw_auto.ndim == 1) or
(isinstance(raw_auto, list) and np.array(raw_auto).ndim == 1)):
if len(raw_auto) == 1:
- return np.diag(np.full(size, raw_auto[0]))
+ return np.diag(np.full(size, raw_auto[0], dtype=np.float))
else:
if len(raw_auto) != size:
return None
|
fix: more proper exception raised if
TaskAdapter missing readable handler.
Also better warning. | @@ -32,7 +32,7 @@ class TaskAdapter(logging.LoggerAdapter):
)
is_process_dummy = logger.name.endswith("_process")
if not is_readable and not is_process_dummy:
- warnings.warn("Task logger does not have ability to be read. Past history of the task cannot be utilized.")
+ warnings.warn(f"Logger '{logger.name}' for task '{self.task_name}' does not have ability to be read. Past history of the task cannot be utilized.")
def process(self, msg, kwargs):
""
@@ -82,8 +82,7 @@ class TaskAdapter(logging.LoggerAdapter):
yield from records
break
else:
- warnings.warn(f"Logger {self.logger.name} is not readable. Cannot get history.")
- return
+ raise AttributeError(f"Logger '{self.logger.name}' cannot be read. Missing readable handler.")
def get_latest(self, action:str=None) -> dict:
"""Get latest log record. Note that this
@@ -117,6 +116,9 @@ class TaskAdapter(logging.LoggerAdapter):
has_same_name = self.name == o.name
return is_same_type and has_same_logger and has_same_name
+ @property
+ def task_name(self):
+ return self.extra['task_name']
class TaskFilter(logging.Filter):
"""Filter only task related so one logger can be
|
Raise more specific errors when SSL handshake fails
Always raise CertificateError directly. | @@ -29,6 +29,12 @@ except ImportError:
class SSLError(socket.error):
pass
+try:
+ from ssl import CertificateError as _SSLCertificateError
+except ImportError:
+ class _SSLCertificateError(ValueError):
+ pass
+
from bson import DEFAULT_CODEC_OPTIONS
from bson.py3compat import imap, itervalues, _unicode, integer_types
@@ -252,7 +258,7 @@ else:
u'foo'.encode('idna')
-def _raise_connection_failure(address, error):
+def _raise_connection_failure(address, error, msg_prefix=None):
"""Convert a socket.error to ConnectionFailure and raise it."""
host, port = address
# If connecting to a Unix socket, port will be None.
@@ -260,6 +266,8 @@ def _raise_connection_failure(address, error):
msg = '%s:%d: %s' % (host, port, error)
else:
msg = '%s: %s' % (host, error)
+ if msg_prefix:
+ msg = msg_prefix + msg
if isinstance(error, socket.timeout):
raise NetworkTimeout(msg)
elif isinstance(error, SSLError) and 'timed out' in str(error):
@@ -833,9 +841,17 @@ def _configured_socket(address, options):
sock = ssl_context.wrap_socket(sock, server_hostname=host)
else:
sock = ssl_context.wrap_socket(sock)
+ except _SSLCertificateError:
+ sock.close()
+ # Raise CertificateError directly like we do after match_hostname
+ # below.
+ raise
except IOError as exc:
sock.close()
- raise ConnectionFailure("SSL handshake failed: %s" % (str(exc),))
+ # We raise AutoReconnect for transient and permanent SSL handshake
+ # failures alike. Permanent handshake failures, like protocol
+ # mismatch, will be turned into ServerSelectionTimeoutErrors later.
+ _raise_connection_failure(address, exc, "SSL handshake failed: ")
if (ssl_context.verify_mode and not
getattr(ssl_context, "check_hostname", False) and
options.ssl_match_hostname):
|
IECoreArnoldPreview : Remove unused lambda captures
Fixes OSX build:
src/GafferArnold/IECoreArnoldPreview/Renderer.cpp:2530:4: error: lambda capture 'this' is not used [-Werror,-Wunused-lambda-capture] | @@ -2527,7 +2527,7 @@ void LightFilterConnections::update()
parallel_for(
m_connections.range(),
- [this, &deregistered]( ConnectionsMap::range_type &range )
+ [&deregistered]( ConnectionsMap::range_type &range )
{
for( auto it = range.begin(); it != range.end(); ++it )
{
|
Predicate: minor refactoring
TN: | @@ -446,10 +446,10 @@ class Predicate(AbstractExpression):
])
# Append the debug image for the predicate
- closure_exprs.append(LiteralExpr('"{}.{}"'.format(
+ closure_exprs.append(untyped_literal_expr('"{}.{}"'.format(
self.pred_property.name.camel_with_underscores,
self.pred_property.struct.name().camel_with_underscores
- ), NoCompiledType))
+ )))
logic_var_exprs.append(
BasicExpr("Create ({})".format(", ".join(
|
Update setup-remote.md
Fixed link towards mumbai polygon scan | @@ -119,7 +119,7 @@ To get free (fake) MATIC on Mumbai:
2. Request funds for ADDRESS1
3. Request funds for ADDRESS2
-You can confirm receiving funds by going to the following url, and seeing your reported MATIC balance: `https://mumbai.polygonscan.com/<ADDRESS1 or ADDRESS2>`
+You can confirm receiving funds by going to the following url, and seeing your reported MATIC balance: `https://mumbai.polygonscan.com/address/<ADDRESS1 or ADDRESS2>`
## 4. Get (fake) OCEAN on Mumbai
|
Fix direct patches of methods in test_versionhandler.py
Direct patches of methods in unit test may cause errors in other tests.
I use the form of decorators to fix them. | import mock
import six
-from sahara.plugins.vanilla.hadoop2 import run_scripts as run
-from sahara.plugins.vanilla.hadoop2 import starting_scripts as s_scripts
from sahara.plugins.vanilla.v2_7_1.edp_engine import EdpOozieEngine
from sahara.plugins.vanilla.v2_7_1.edp_engine import EdpSparkEngine
from sahara.plugins.vanilla.v2_7_1 import versionhandler as v_h
@@ -72,25 +70,19 @@ class VersionHandlerTest(base.SaharaTestCase):
self.vh.configure_cluster(self.cluster)
configure_cluster.assert_called_once_with(self.vh.pctx, self.cluster)
+ @mock.patch(plugin_path + 'v2_7_1.versionhandler.run')
+ @mock.patch(plugin_path + 'v2_7_1.versionhandler.s_scripts')
@mock.patch('sahara.swift.swift_helper.install_ssl_certs')
@mock.patch(plugin_hadoop2_path + 'keypairs.provision_keypairs')
@mock.patch('sahara.plugins.utils.get_instances')
@mock.patch('sahara.utils.cluster.get_instances')
def test_start_cluster(self, c_get_instances, u_get_instances,
- provision_keypairs, install_ssl_certs):
+ provision_keypairs, install_ssl_certs,
+ s_scripts, run):
self.vh.pctx = mock.Mock()
instances = mock.Mock()
- s_scripts.start_namenode = mock.Mock()
- s_scripts.start_secondarynamenode = mock.Mock()
- s_scripts.start_resourcemanager = mock.Mock()
- s_scripts.start_historyserver = mock.Mock()
- s_scripts.start_oozie = mock.Mock()
- s_scripts.start_hiveserver = mock.Mock()
- s_scripts.start_spark = mock.Mock()
c_get_instances.return_value = instances
u_get_instances.return_value = instances
- run.await_datanodes = mock.Mock()
- run.start_dn_nm_processes = mock.Mock()
self.vh._set_cluster_info = mock.Mock()
self.vh.start_cluster(self.cluster)
provision_keypairs.assert_called_once_with(self.cluster)
|
Added possibility to propagate collected variant
context.data["variant"] might be filled only by collect_batch_data, which should take precedence | @@ -22,9 +22,11 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
break
family = "workfile"
+ # context.data["variant"] might come only from collect_batch_data
+ variant = context.data.get("variant") or self.default_variant
subset = get_subset_name_with_asset_doc(
family,
- self.default_variant,
+ variant,
context.data["anatomyData"]["task"]["name"],
context.data["assetEntity"],
context.data["anatomyData"]["project"]["name"],
|
fix bbox format in flip_bbox
bbox is in range of [0, H] or [0, W] | @@ -26,13 +26,13 @@ def flip_bbox(bbox, size, y_flip=False, x_flip=False):
H, W = size
bbox = bbox.copy()
if y_flip:
- y_max = H - 1 - bbox[:, 0]
- y_min = H - 1 - bbox[:, 2]
+ y_max = H - bbox[:, 0]
+ y_min = H - bbox[:, 2]
bbox[:, 0] = y_min
bbox[:, 2] = y_max
if x_flip:
- x_max = W - 1 - bbox[:, 1]
- x_min = W - 1 - bbox[:, 3]
+ x_max = W - bbox[:, 1]
+ x_min = W - bbox[:, 3]
bbox[:, 1] = x_min
bbox[:, 3] = x_max
return bbox
|
fix typo in 05_laplace.ipynb
fix typo in 05_laplace.ipynb | "metadata": {},
"source": [
"Ok, nice. Now, to re-create this example in Devito we need to look a little bit further under the hood. There are two things that make this different to the examples we covered so far:\n",
- "* We have no time dependence in the `p` field, but we still need to advance the state of p in between buffers. So, instead of using `TimeFunction` objects that provide multiple data buffers for timestepping schemes, we will use `Function` objects that have no time dimension and only allocate a single buffer according to the space dimensions. However, since we are still implementing a pseudo-timestepping loop, we will need to objects, say `p` and `pn`, to act as alternating buffers.\n",
+ "* We have no time dependence in the `p` field, but we still need to advance the state of p in between buffers. So, instead of using `TimeFunction` objects that provide multiple data buffers for timestepping schemes, we will use `Function` objects that have no time dimension and only allocate a single buffer according to the space dimensions. However, since we are still implementing a pseudo-timestepping loop, we will need two objects, say `p` and `pn`, to act as alternating buffers.\n",
"* If we're using two different symbols to denote our buffers, any operator we create will only perform a single timestep. This is desired though, since we need to check a convergence criteria outside of the main stencil update to determine when we stop iterating. As a result we will need to call the operator repeatedly after instantiating it outside the convergence loop.\n",
"\n",
"So, how do we make sure our operator doesn't accidentally overwrite values in the same buffer? Well, we can again let SymPy reorganise our Laplace equation based on `pn` to generate the stencil, but when we create the update expression, we set the LHS to our second buffer variable `p`."
|
Fix git sha detection in gunicorn_start
This script doesn't run within the git directory so we need to specify
the git path. | @@ -34,7 +34,7 @@ export PYTHONPATH=$APPS_ROOT:$PYTHONPATH
export NEW_RELIC_CONFIG_FILE="$REPO_ROOT/newrelic.ini"
export NEW_RELIC_ENVIRONMENT=$ENV
-export SOURCE_COMMIT_ID="$(git rev-parse HEAD)"
+export SOURCE_COMMIT_ID="$(git --git-dir="$REPO_ROOT/.git" rev-parse HEAD)"
echo "Starting $NAME"
echo "whoami: $(whoami)"
|
TreeSource: test removing another child
to confirm that we get a different index in the notification callback | @@ -666,7 +666,7 @@ class TreeSourceTests(TestCase):
listener = Mock()
source.add_listener(listener)
- # Remove the child element
+ # Remove "third.two"
node = source.remove(source[2][1])
self.assertEqual(len(source), 3)
@@ -674,6 +674,14 @@ class TreeSourceTests(TestCase):
listener.remove.assert_called_once_with(item=node, index=1, parent=source[2])
+ # Remove "third.one"
+ node = source.remove(source[2][0])
+
+ self.assertEqual(len(source), 3)
+ self.assertEqual(len(source[2]), 0)
+
+ listener.remove.assert_any_call(item=node, index=0, parent=source[2])
+
def test___setitem___for_root(self):
"A root can be set (changed) with __setitem__"
source = TreeSource(
|
Fixed getter for zoom sensitivity
There was a copy paste error with the zoom sensitivity | @@ -561,7 +561,7 @@ class OrbitCamera(Camera):
This property can also be set::
camera.zoom_sensitivity = 2.5
"""
- return self._mouse_sensitivity
+ return self._zoom_sensitivity
@zoom_sensitivity.setter
def zoom_sensitivity(self, value: float):
|
Unparsers: reject inconsistent postfix parsers for regular nodes
TN: | @@ -812,6 +812,11 @@ class RegularNodeUnparser(NodeUnparser):
other_inter
)
+ self.post_tokens.check_equivalence(
+ 'postfix tokens for {}'.format(self.node.dsl_name),
+ other.post_tokens
+ )
+
result = RegularNodeUnparser(self.node)
result.pre_tokens = self.pre_tokens
result.post_tokens = self.post_tokens
|
Added autospec=True to test_disks.py unit test
Added autospec=True to "patch" instructions
Extracted patches from "with" block and pre-defined them to improve
the code readability | @@ -117,9 +117,13 @@ class DisksGrainsTestCase(TestCase, LoaderModuleMockMixin):
"1",
"1",
]
- with patch("glob.glob", MagicMock(return_value=files)), patch(
- "salt.utils.path.readlink", MagicMock(side_effect=links)
- ), patch("salt.utils.files.fopen", mock_open(read_data=contents)):
+
+ patch_glob = patch("glob.glob", autospec=True, return_value=files)
+ patch_readlink = patch(
+ "salt.utils.path.readlink", autospec=True, side_effect=links
+ )
+ patch_fopen = patch("salt.utils.files.fopen", mock_open(read_data=contents))
+ with patch_glob, patch_readlink, patch_fopen:
ret = disks._linux_disks()
assert ret == {"disks": ["sda", "sdb", "vda"], "SSDs": []}, ret
|
Updated README.md:
Small Changes | @@ -4,7 +4,7 @@ Analyze FPGA tool performance (MHz, resources, runtime, etc)
## Setup environment
-fpga-tool-perf uses the Anaconda/Miniconda (conda) package manager to install and get all the required tools.
+fpga-tool-perf uses the Miniconda (conda) package manager to install and get all the required tools.
Currently, the following tools that are available in conda are:
- vtr
@@ -129,7 +129,7 @@ Its also possible to run a test against a single toolchain and/or project:
python3 exhaust.py --project blinky --toolchain vpr
```
-See build directory for output. Note in particular `all.json`.
+See `build` directory for output. Note in particular `all.json`.
## Project Structure
|
osd: validate devices variable input
Fail with a sane message if the devices or raw_journal_devices variables
are strings instead of lists during manual device assignment. | - not osd_auto_discovery
- devices|length == 0
+- name: make sure the devices variable is a list
+ fail:
+ msg: "devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]"
+ when:
+ - osd_group_name is defined
+ - osd_group_name in group_names
+ - not osd_auto_discovery
+ - devices is string
+
- name: verify journal devices have been provided
fail:
msg: "please provide devices and raw journal devices to your osd scenario"
- raw_journal_devices|length == 0
or devices|length == 0
+- name: make sure the raw_journal_devices variable is a list
+ fail:
+ msg: "raw_journal_devices: must be a list, not a string, i.e. [ \"/dev/sda\" ]"
+ when:
+ - osd_group_name is defined
+ - osd_group_name in group_names
+ - not containerized_deployment
+ - raw_multi_journal
+ - raw_journal_devices is string
+ - raw_journal_devices|length == 0
+ or devices|length == 0
+
- name: check if bluestore is supported by the selected ceph version
fail:
msg: "bluestore is not supported by the selected Ceph version, use Luminous or above."
|
Pin pytest-django to <3.2.0 while we're still not on Django 1.11
* pytest-django 3.2.0 dropped Django 1.8 support
Refs | @@ -33,7 +33,7 @@ pytest-cache==1.0 \
--hash=sha256:be7468edd4d3d83f1e844959fd6e3fd28e77a481440a7118d430130ea31b07a9
pytest-django==3.1.2 \
--hash=sha256:038ccc5a9daa1b1b0eb739ab7dce54e495811eca5ea3af4815a2a3ac45152309 \
- --hash=sha256:00995c2999b884a38ae9cd30a8c00ed32b3d38c1041250ea84caf18085589662
+ --hash=sha256:00995c2999b884a38ae9cd30a8c00ed32b3d38c1041250ea84caf18085589662 # pyup: <3.2.0
pytest-xdist==1.22.2 \
--hash=sha256:be2662264b035920ba740ed6efb1c816a83c8a22253df7766d129f6a7bfdbd35 \
--hash=sha256:e8f5744acc270b3e7d915bdb4d5f471670f049b6fbd163d4cbd52203b075d30f
|
Additional information
1. Link to details on what and how to include information in a .env file
2. Clarify how to activate the FORWARDED_ALLOW_IPS environment variable for those situations where the proxy is not at the default 127.0.0.1 (most often inside a Docker container. | +#For more information on .env files, their content and format: https://pypi.org/project/python-dotenv/
+
HOST=127.0.0.1
PORT=5000
-# uvicorn variable, allow https behind a proxy
+# uvicorn variable, uncomment to allow https behind a proxy
# FORWARDED_ALLOW_IPS="*"
DEBUG=false
|
bug fix - step selection query in tags
Summary:
fix `!(tag.key in ["dagster/is_resume_retry", "dagster/step_selection"])` in D3065
it would always be false
Test Plan:
`dagster/step_selection` doesn't get passed to a child run e.g. a full pipeline run
{F146367}
Reviewers: max, bengotow, prha | @@ -169,10 +169,14 @@ function getExecutionMetadata(
parentRunId: run.runId,
rootRunId: run.rootRunId ? run.rootRunId : run.runId,
tags: [
+ // Clean up tags related to run grouping once we decide its persistence
+ // https://github.com/dagster-io/dagster/issues/2495
...run.tags
.filter(
tag =>
- !(tag.key in ["dagster/is_resume_retry", "dagster/step_selection"])
+ !["dagster/is_resume_retry", "dagster/step_selection"].includes(
+ tag.key
+ )
)
.map(tag => ({
key: tag.key,
@@ -184,7 +188,6 @@ function getExecutionMetadata(
value: resumeRetry.toString()
},
// pass run group info via tags
- // https://github.com/dagster-io/dagster/issues/2495
{
key: "dagster/parent_run_id",
value: run.runId
|
Update calix_b6.py
Moved
def __init__(self, *args, **kwargs):
super(CalixB6SSH, self).__init__(*args, **kwargs)
from CalixB6Base to CalixB6SSH | @@ -21,7 +21,6 @@ class CalixB6Base(CiscoSSHConnection):
def __init__(self, *args, **kwargs):
default_enter = kwargs.get('default_enter')
kwargs['default_enter'] = '\r\n' if default_enter is None else default_enter
- super(CalixB6SSH, self).__init__(*args, **kwargs)
def session_preparation(self):
"""Prepare the session after the connection has been established."""
@@ -74,6 +73,9 @@ class CalixB6SSH(CalixB6Base):
To make it work, we have to override the SSHClient _auth method and manually handle
the username/password.
"""
+ def __init__(self, *args, **kwargs):
+ super(CalixB6SSH, self).__init__(*args, **kwargs)
+
def _build_ssh_client(self):
"""Prepare for Paramiko SSH connection."""
# Create instance of SSHClient object
|
fix: add cloudvolume.datasource.graphene to packages
docs: add Windows 10 Trove classifier | @@ -30,6 +30,7 @@ setuptools.setup(
'cloudvolume',
'cloudvolume.datasource',
'cloudvolume.datasource.boss',
+ 'cloudvolume.datasource.graphene',
'cloudvolume.datasource.precomputed',
'cloudvolume.frontends',
'cloudvolume.storage',
@@ -61,6 +62,7 @@ setuptools.setup(
"Intended Audience :: Science/Research",
"Operating System :: POSIX",
"Operating System :: MacOS",
+ "Operating System :: Microsoft :: Windows :: Windows 10",
"Topic :: Utilities",
],
)
|
doc: Path not provided for "check_message" and "do_send_message".
In the documentation of the "Sending messages," path for the `check_message` and `do_send_message` function is not provided. So, I added the path of both for future contributors. | @@ -70,12 +70,12 @@ number of purposes:
`apply_markdown` and `client_gravatar` features in our
[events API docs](https://zulip.com/api/register-queue)).
* Following our standard naming convention, input validation is done
- inside the `check_message` function, which is responsible for
+ inside the `check_message` function in `zerver/lib/actions.py`, which is responsible for
validating the user can send to the recipient,
[rendering the Markdown](../subsystems/markdown.md), etc. --
basically everything that can fail due to bad user input.
* The core `do_send_messages` function (which handles actually sending
- the message) is one of the most optimized and thus complex parts of
+ the message) in `zerver/lib/actions.py` is one of the most optimized and thus complex parts of
the system. But in short, its job is to atomically do a few key
things:
* Store a `Message` row in the database.
|
Don't confirm overwrite when appending to a .h5 file
For this, we need to use a Qt dialog, because the native does not seem to emit filterSelected | @@ -146,6 +146,9 @@ class SaveAction(PlotAction):
SCATTER_FILTER_NXDATA = 'Scatter as NXdata (%s)' % _NEXUS_HDF5_EXT_STR
DEFAULT_SCATTER_FILTERS = (SCATTER_FILTER_NXDATA,)
+ DEFAULT_ALL_NXDATA_FILTERS = (CURVE_FILTER_NXDATA, IMAGE_FILTER_NXDATA,
+ SCATTER_FILTER_NXDATA)
+
def __init__(self, plot, parent=None):
self._filters = {
'all': OrderedDict(),
@@ -554,6 +557,7 @@ class SaveAction(PlotAction):
# Create and run File dialog
dialog = qt.QFileDialog(self.plot)
+ dialog.setOption(dialog.DontUseNativeDialog)
dialog.setWindowTitle("Output File Selection")
dialog.setModal(1)
dialog.setNameFilters(list(filters.keys()))
@@ -561,6 +565,16 @@ class SaveAction(PlotAction):
dialog.setFileMode(dialog.AnyFile)
dialog.setAcceptMode(dialog.AcceptSave)
+ def onFilterSelection(filt_):
+ # disable overwrite confirmation for NXdata types,
+ # because we append the data to existing files
+ if filt_ in self.DEFAULT_ALL_NXDATA_FILTERS:
+ dialog.setOption(dialog.DontConfirmOverwrite)
+ else:
+ dialog.setOption(dialog.DontConfirmOverwrite, False)
+
+ dialog.filterSelected.connect(onFilterSelection)
+
if not dialog.exec_():
return False
|
Update README.md
add `Known Issues` to the TOC. | @@ -10,6 +10,7 @@ This project consists of documentation, example files, a Python-based test harne
* [Prerequisites](#prerequisites)
* [Get Help](#get-help)
* [FAQ](#faq)
+* [Known Issues](#known-issues)
* [Contributions](#contributions)
# Why Connectors?
|
[ci] Always assume num executors == 1
This is true now and we've seen problems like This could have arisen from the EC2 user data script that is supposed to set up this env variable failing or something, but we don't really need it in the first place. | @@ -63,9 +63,6 @@ if __name__ == "__main__":
logging.info("===== sccache stats =====")
sh.run("sccache --show-stats")
- if "CI" in os.environ:
- executors = int(os.environ["CI_NUM_EXECUTORS"])
- else:
executors = int(os.environ.get("CI_NUM_EXECUTORS", 1))
nproc = multiprocessing.cpu_count()
|
Lexus: add FW for 2020 Lexus RX Hybrid
add: fingerprint for lexus rxh h 2020 | @@ -1823,15 +1823,18 @@ FW_VERSIONS = {
b'\x02348Y3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0234D14000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0234D16000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
+ b'\x02348X4000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648831\x00\x00\x00\x00\x00\x00',
b'F152648891\x00\x00\x00\x00\x00\x00',
b'F152648D00\x00\x00\x00\x00\x00\x00',
b'F152648D60\x00\x00\x00\x00\x00\x00',
+ b'F152648811\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
+ b'8965B48261\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
|
translates .c code
ops_translator running and generating CUDA | @@ -27,5 +27,6 @@ class CompilerOPS(configuration['compiler'].__class__):
c_file.close()
h_file.close()
+ # Calling OPS Translator
translator = '%s/../ops_translator/c/ops.py' % (self._ops_install_path)
- subprocess.run([translator,c_file.name])
\ No newline at end of file
+ subprocess.run([translator, c_file.name], cwd=self.get_jit_dir())
|
Update Dictionaries.md
Organizing the layout of the sections and updating the code. | @@ -58,43 +58,32 @@ Add "Jake" to the phonebook with the phone number 938273443, and remove Jill fro
Tutorial Code
-------------
+# write your code here
phonebook = {
"John" : 938477566,
"Jack" : 938377264,
"Jill" : 947662781
}
-# write your code here
-
-
-# testing code
-if "Jake" in phonebook:
- print("Jake is listed in the phonebook.")
-if "Jill" not in phonebook:
- print("Jill is not listed in the phonebook.")
-
-Expected Output
----------------
-
-test_output_contains("Jake is listed in the phonebook.")
-test_output_contains("Jill is not listed in the phonebook.")
-success_msg("Nice work!")
-
Solution
--------
phonebook = {
"John" : 938477566,
- "Jack" : 938377264,
- "Jill" : 947662781
+ "Jack" : 938273443,
}
-
-# write your code here
phonebook["Jake"] = 938273443
del phonebook["Jill"]
# testing code
-if "Jake" in phonebook:
+if "Jake" in phonebook and "Jill" not in phonebook:
print("Jake is listed in the phonebook.")
-if "Jill" not in phonebook:
print("Jill is not listed in the phonebook.")
+ print("Nice work!")
+
+Expected Output
+---------------
+
+>> Jake is listed in the phonebook.
+>> Jill is not listed in the phonebook.
+>> Nice work!
|
Update hist_scipy.py example
It seems like normed=True argument is no longer accepted. | @@ -18,7 +18,7 @@ def display_histogram_scipy(bench, mean, bins):
pylab.plot(values, fit, '-o', label='mean-stdev')
plt.legend(loc='upper right', shadow=True, fontsize='x-large')
- pylab.hist(values, bins=bins, normed=True)
+ pylab.hist(values, bins=bins)
pylab.show()
|
Add test for vertical bars
This was one of the suggestions in
We just replicate the init and configure test for widgets that
support vertical bars. | @@ -27,7 +27,7 @@ import libqtile.config
import libqtile.confreader
import libqtile.layout
import libqtile.widget as widgets
-from libqtile.widget.base import ORIENTATION_VERTICAL
+from libqtile.widget.base import ORIENTATION_BOTH, ORIENTATION_VERTICAL
from libqtile.widget.clock import Clock
from libqtile.widget.crashme import _CrashMe
from test.widgets.conftest import FakeBar
@@ -112,6 +112,40 @@ def test_widget_init_config(manager_nospawn, minimal_conf_noscreen, widget_class
assert i["widgets"][0]["name"] == widget.name
[email protected](
+ "widget_class,kwargs",
+ [
+ param
+ for param in parameters
+ if param[0]().orientations in [ORIENTATION_BOTH, ORIENTATION_VERTICAL]
+ ],
+)
+def test_widget_init_config_vertical_bar(
+ manager_nospawn, minimal_conf_noscreen, widget_class, kwargs
+):
+ if widget_class in exclusive_backend:
+ if exclusive_backend[widget_class] != manager_nospawn.backend.name:
+ pytest.skip("Unsupported backend")
+
+ widget = widget_class(**kwargs)
+ widget.draw = no_op
+
+ # If widget inits ok then kwargs will now be attributes
+ for k, v in kwargs.items():
+ assert getattr(widget, k) == v
+
+ # Test configuration
+ config = minimal_conf_noscreen
+ config.screens = [libqtile.config.Screen(left=libqtile.bar.Bar([widget], 10))]
+
+ manager_nospawn.start(config)
+
+ i = manager_nospawn.c.bar["left"].info()
+
+ # Check widget is registered by checking names of widgets in bar
+ assert i["widgets"][0]["name"] == widget.name
+
+
def test_incompatible_orientation(fake_qtile, fake_window):
clk1 = Clock()
clk1.orientations = ORIENTATION_VERTICAL
|
Correction: File solv01.py
Identifier 'max' changed in 'maxNumber' , since 'max' is a function. | @@ -17,7 +17,7 @@ def isprime(no):
return False
return True
-max=0
+maxNumber = 0
n=int(input())
if(isprime(n)):
print n
@@ -31,8 +31,8 @@ else:
for i in range(3,n1,2):
if(n%i==0):
if(isprime(n/i)):
- max=n/i
+ maxNumber = n/i
break
elif(isprime(i)):
- max=i
- print max
+ maxNumber = i
+ print maxNumber
|
API documentation for mango execution stats
Adds basic documentation for the execution stats parameter
in Mango/Query. | *Optional*
:<json number skip: Skip the first 'n' results, where 'n' is the value
specified. *Optional*
- :<json array sort: JSON array following :ref:`sort syntax <find/sort>`.
+ :<json json sort: JSON array following :ref:`sort syntax <find/sort>`.
*Optional*
:<json array fields: JSON array specifying which fields of each object
should be returned. If it is omitted, the entire object is returned.
from a "stable" set of shards. *Optional*
:<json string stale: Combination of ``update=false`` and ``stable=true``
options. Possible options: ``"ok"``, ``false`` (default). *Optional*
+ :<json boolean execution_stats: Include
+ :ref:`execution statistics <find/statistics>` in the query response.
+ *Optional, default: ``false``*
:>header Content-Type: :mimetype:`application/json`
:>header Transfer-Encoding: ``chunked``
:>json object docs: Array of documents matching the search. In each matching
document, the fields specified in the ``fields`` part of the request
body are listed, along with their values.
+ :>json string warning: Execution warnings
+ :>json object execution_stats: Execution statistics
:code 200: Request completed successfully
:code 400: Invalid request
@@ -98,7 +103,8 @@ Example request body for finding documents using an index:
"fields": ["_id", "_rev", "year", "title"],
"sort": [{"year": "asc"}],
"limit": 2,
- "skip": 0
+ "skip": 0,
+ "execution_stats": true
}
**Response**:
@@ -128,7 +134,14 @@ Example response when finding documents using an index:
"year": 2011,
"title": "Drive"
}
- ]
+ ],
+ "execution_stats": {
+ "total_keys_examined": 0,
+ "total_docs_examined": 200,
+ "total_quorum_docs_examined": 0,
+ "results_returned": 2,
+ "execution_time_ms": 5.52
+ }
}
.. _find/selectors:
@@ -848,6 +861,45 @@ more results. You can to test whether you have reached the end of the
result set by comparing the number of results returned with the page
size requested - if results returned < `limit`, there are no more.
+.. _find/statistics:
+
+Execution Statistics
+====================
+
+Find can return basic execution statistics for a specific request. Combined with
+the :ref:`_explain <api/db/find/explain>` endpoint, this should provide some
+insight as to whether indexes are being used effectively.
+
+The execution statistics currently include:
+
++--------------------------------+--------------------------------------------+
+| Field | Description |
++================================+============================================+
+| ``total_keys_examined`` | Number of index keys examined. |
+| | Currently always 0. |
++--------------------------------+--------------------------------------------+
+| ``total_docs_examined`` | Number of documents fetched from the |
+| | database / index, equivalent to using |
+| | ``include_docs=true`` in a view. |
+| | These may then be filtered in-memory to |
+| | further narrow down the result set based |
+| | on the selector. |
++--------------------------------+--------------------------------------------+
+| ``total_quorum_docs_examined`` | Number of documents fetched from the |
+| | database using an out-of-band document |
+| | fetch. This is only non-zero when read |
+| | quorum > 1 is specified in the query |
+| | parameters. |
++--------------------------------+--------------------------------------------+
+| ``results_returned`` | Number of results returned from the query. |
+| | Ideally this should not be significantly |
+| | lower than the total documents / keys |
+| | examined. |
++--------------------------------+--------------------------------------------+
+| ``execution_time_ms`` | Total execution time in milliseconds as |
+| | measured by the database. |
++--------------------------------+--------------------------------------------+
+
.. _api/db/find/index:
================
|
Update terminology in element list
Subject to change | @@ -186,7 +186,7 @@ i (a: any, b: number) = a[b] # index
(a: any, b: [x, y, m]) = a[x:y:m] # index, xth to yth item step m items, needs wrapping index
j (a: any, b: any) = a.join(b) # join
k = * constant digraphs (see near the end of docs)
-l (a: any, b: number) = n-wise_group(a, b) # Cummulative grouping/pairing
+l (a: any, b: number) = subvectors of a of length b # Equivalent to b,/a in APL
(a: any, b: non-number) = length(a) == length(b) # Length of a and b are same?
m (a: any) = a + reversed(a) # mirror, mirrored, palindromised (Use vertical mirror below for ASCII art)
n = * context variable
|
Fix testLossLearnerDifferentDistStratDQN on GPUs.
Add 4 logical GPU devices for one physical GPU to test MirroredStrategy on 4
mini-batches. | @@ -43,14 +43,24 @@ class LearnerTest(test_utils.TestCase, parameterized.TestCase):
def setUp(self):
super(LearnerTest, self).setUp()
- devices = tf.config.list_physical_devices('CPU')
+ devices_cpu = tf.config.list_physical_devices('CPU')
+ devices_gpu = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_virtual_device_configuration(
- devices[0], [
+ devices_cpu[0], [
tf.config.experimental.VirtualDeviceConfiguration(),
tf.config.experimental.VirtualDeviceConfiguration(),
tf.config.experimental.VirtualDeviceConfiguration(),
tf.config.experimental.VirtualDeviceConfiguration()
])
+ # If there are GPU devices:
+ if devices_gpu:
+ tf.config.experimental.set_virtual_device_configuration(
+ devices_gpu[0], [
+ tf.config.LogicalDeviceConfiguration(memory_limit=10),
+ tf.config.LogicalDeviceConfiguration(memory_limit=10),
+ tf.config.LogicalDeviceConfiguration(memory_limit=10),
+ tf.config.LogicalDeviceConfiguration(memory_limit=10)
+ ])
def _build_learner_with_strategy(self,
create_agent_and_dataset_fn,
|
Serve static in production mode
This change is requried to deliver a Galaxy application as a
standalone container, which does not requre by default any
additional web server (e.g. nginx) to serve static, so it can
run autonomously. | # along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.conf.urls import patterns, url
-from galaxy.main.views import RoleListView, RoleDetailView, NamespaceListView
from django.conf import settings
-from django.contrib.staticfiles.views import serve as serve_static
from django.views.decorators.cache import never_cache
+from django.contrib.staticfiles.views import serve as serve_staticfiles
+from django.views.static import serve as serve_static
+
+from galaxy.main.views import RoleListView, RoleDetailView, NamespaceListView
urlpatterns = patterns(
'galaxy.main.views',
@@ -51,5 +53,14 @@ urlpatterns = patterns(
url(r'^([\w\-._+]+)/([\w\-._+]+)/$', RoleDetailView.as_view(), name='role-detail'),
)
+# FIX
if settings.DEBUG:
- urlpatterns += patterns('', url(r'^static/(?P<path>.*)$', never_cache(serve_static)),)
+ urlpatterns += [
+ url(r'^static/(?P<path>.*)$',
+ never_cache(serve_staticfiles))
+ ]
+else:
+ urlpatterns += [
+ url(r'^static/(?P<path>.*)$', serve_static,
+ kwargs={'document_root': settings.STATIC_ROOT})
+ ]
|
tests: replace unecessary `map()` call in test_live
caught by flake8 | @@ -184,14 +184,10 @@ def checkpoints_metric(show_results, metric_file, metric_name):
tmp.pop("workspace")
tmp = first(tmp.values())
tmp.pop("baseline")
- return list(
- map(
- lambda exp: exp["data"]["metrics"][metric_file]["data"][
- metric_name
- ],
- list(tmp.values()),
- )
- )
+ return [
+ exp["data"]["metrics"][metric_file]["data"][metric_name]
+ for exp in tmp.values()
+ ]
@pytest.mark.parametrize("typ", ("live", "live_no_cache"))
|
Disambiguate which executor blocks are launched on
Elaborate on x->y notation to be clear what the
x and y values are in the log message | @@ -177,7 +177,7 @@ class BlockProviderExecutor(ParslExecutor):
launch_cmd = self._get_launch_command(block_id)
job_id = self.provider.submit(launch_cmd, 1)
if job_id:
- logger.debug("Launched block {}->{}".format(block_id, job_id))
+ logger.debug(f"Launched block {block_id} on executor {self.label} with job ID {job_id}")
else:
raise ScalingFailed(self,
"Attempt to provision nodes did not return a job ID")
|
Add example for scipy stats.trim1 under docstring
initialize a as np.arange(20), apply trim, 50%, to the left, assign result to b and print b | @@ -3324,6 +3324,14 @@ def trim1(a, proportiontocut, tail='right', axis=0):
Trimmed version of array `a`. The order of the trimmed content is
undefined.
+ Examples
+ --------
+ >>> from scipy import stats
+ >>> a = np.arange(20)
+ >>> b = stats.trim1(a, 0.5, 'left')
+ >>> b
+ array([10, 11, 12, 13, 14, 16, 15, 17, 18, 19])
+
"""
a = np.asarray(a)
if axis is None:
|
ebuild.profiles: _load_and_invoke(): don't catch/rethrow all exceptions as ProfileErrors
Since non-ProfileError exceptions are generally internal errors and
should dump a traceback so as not to look like regular error cases. | @@ -113,12 +113,7 @@ def _load_and_invoke(func, filename, handler, fallback, read_func,
if handler:
data = handler(data)
return func(self, data)
- except IGNORED_EXCEPTIONS:
- raise
- except ProfileError:
- # no point in wrapping/throwing..
- raise
- except Exception as e:
+ except (ValueError, IndexError) as e:
raise ProfileError(profile_path, filename, e) from e
|
Add more temporary logging to Swarming to debug /poll timeouts.
Review-Url: | @@ -267,10 +267,7 @@ class _BotBaseHandler(_BotApiHandler):
REQUIRED_STATE_KEYS = {u'running_time', u'sleep_streak'}
def _process(self):
- """Returns True if the bot has invalid parameter and should be automatically
- quarantined.
-
- Does one DB synchronous GET.
+ """Fetches bot info and settings, does authorization and quarantine checks.
Returns:
_ProcessResult instance, see its fields for more info.
@@ -293,6 +290,7 @@ class _BotBaseHandler(_BotApiHandler):
lease_expiration_ts = None
machine_type = None
if bot_id:
+ logging.debug('Fetching bot info and settings')
bot_info, bot_settings = ndb.get_multi([
bot_management.get_info_key(bot_id),
bot_management.get_settings_key(bot_id)])
@@ -302,6 +300,7 @@ class _BotBaseHandler(_BotApiHandler):
# Make sure bot self-reported ID matches the authentication token. Raises
# auth.AuthorizationError if not.
+ logging.debug('Fetching bot group config')
bot_group_cfg = bot_auth.validate_bot_id_and_fetch_config(
bot_id, machine_type)
@@ -465,6 +464,7 @@ class BotPollHandler(_BotBaseHandler):
It makes recovery of the fleet in case of catastrophic failure much easier.
"""
+ logging.debug('Request started')
if config.settings().force_bots_to_sleep_and_not_run_task:
# Ignore everything, just sleep. Tell the bot it is quarantined to inform
# it that it won't be running anything anyway. Use a large streak so it
@@ -476,6 +476,8 @@ class BotPollHandler(_BotBaseHandler):
sleep_streak = res.state.get('sleep_streak', 0)
quarantined = bool(res.quarantined_msg)
+ logging.debug('Updating stats')
+
# Note bot existence at two places, one for stats at 1 minute resolution,
# the other for the list of known bots.
action = 'bot_inactive' if quarantined else 'bot_active'
@@ -492,6 +494,7 @@ class BotPollHandler(_BotBaseHandler):
# Bot version is host-specific because the host URL is embedded in
# swarming_bot.zip
+ logging.debug('Fetching bot code version')
expected_version, _ = bot_code.get_bot_version(
self.get_bot_contact_server())
if res.version != expected_version:
@@ -533,6 +536,7 @@ class BotPollHandler(_BotBaseHandler):
# The bot is in good shape. Try to grab a task.
try:
# This is a fairly complex function call, exceptions are expected.
+ logging.debug('Reaping task')
request, secret_bytes, run_result = task_scheduler.bot_reap_task(
res.dimensions, res.bot_id, res.version, res.lease_expiration_ts)
if not request:
|
Update release notes generator
Tested-by: Build Bot
Tested-by: Ellis Breen | @@ -25,7 +25,7 @@ soup = BeautifulSoup(relnotes_raw.text, 'html.parser')
content = soup.find("section", class_="aui-page-panel-content")
outputdir = os.path.join("build")
-date = datetime.date.today().strftime("%B {day} %Y").format(day=datetime.date.today().day)
+date = datetime.date.today().strftime("{day} %B %Y").format(day=datetime.date.today().day)
try:
os.makedirs(outputdir)
except:
@@ -56,7 +56,7 @@ with open(os.path.join(outputdir, "relnotes.adoc"), "w+") as outputfile:
result[section_type] += output
output = """
-== Python SDK {version} Release Notes ({date})
+== Version {version} ({date})
[source,bash]
----
|
chore(dcos-ui): update package to 1.10.0-rc.8
Update the DC/OS UI package to include the latest fixes and
improvements.
#close | "single_source" : {
"kind": "git",
"git": "https://github.com/dcos/dcos-ui.git",
- "ref": "8702cc371a8773a151ba4ab1545a763c0c24af93",
- "ref_origin": "v1.10.0-rc.7"
+ "ref": "03ba84aac9ac5a93f641e7bf54993ab6d76f4791",
+ "ref_origin": "v1.10.0-rc.8"
}
}
|
remove dependency on qtconsole for debian8:
Apparently this is not needed with recent ipython (>2) | @@ -25,7 +25,6 @@ Build-Depends: cython,
python-pyopencl-dbg,
python-mako,
ipython,
- ipython-qtconsole,
python-matplotlib,
python-matplotlib-dbg,
python-opengl,
@@ -50,7 +49,6 @@ Build-Depends: cython,
python3-pyopencl-dbg,
python3-mako,
ipython3,
- ipython3-qtconsole,
python3-matplotlib,
python3-matplotlib-dbg,
python3-opengl,
@@ -94,7 +92,6 @@ Depends: ${misc:Depends},
python-pyopencl,
python-mako,
ipython,
- ipython-qtconsole,
python-matplotlib,
python-opengl,
python-pyqt5,
@@ -123,7 +120,6 @@ Depends: ${misc:Depends},
python-pyopencl-dbg,
python-mako,
ipython,
- ipython-qtconsole,
python-matplotlib-dbg,
python-opengl,
python-pyqt5-dbg,
@@ -149,7 +145,6 @@ Depends: ${misc:Depends},
python3-pyopencl,
python3-mako,
ipython3,
- ipython3-qtconsole,
python3-matplotlib,
python3-opengl,
python3-pyqt5,
@@ -177,7 +172,6 @@ Depends: ${misc:Depends},
python3-pyopencl-dbg,
python3-mako,
ipython3,
- ipython3-qtconsole,
python3-matplotlib-dbg,
python3-opengl,
python3-pyqt5-dbg,
|
Add --nvidia command line option to spark-run
This will have the effect of running Spark driver docker container with extra parameters `--runtime=nvidia --env NVIDIA_VISIBLE_DEVICES=all`, which will allow deep learning libraries such as tensorflow-gpu running inside Spark driver to work. | @@ -164,6 +164,13 @@ def add_subparser(subparsers):
'spark.executor.cores=4".',
)
+ list_parser.add_argument(
+ '--nvidia',
+ help='Use nvidia docker runtime for Spark driver process (requires GPU)',
+ action='store_true',
+ default=False,
+ )
+
list_parser.add_argument(
'--mrjob',
help='Pass Spark arguments to invoked command in the format expected by mrjobs',
@@ -277,6 +284,7 @@ def get_docker_run_cmd(
env,
docker_img,
docker_cmd,
+ nvidia,
):
cmd = ['paasta_docker_wrapper', 'run']
cmd.append('--rm')
@@ -299,6 +307,10 @@ def get_docker_run_cmd(
cmd.append(k)
else:
cmd.append(f'{k}={v}')
+ if nvidia:
+ cmd.append('--env')
+ cmd.append('NVIDIA_VISIBLE_DEVICES=all')
+ cmd.append('--runtime=nvidia')
for volume in volumes:
cmd.append('--volume=%s' % volume)
cmd.append('%s' % docker_img)
@@ -587,6 +599,7 @@ def run_docker_container(
docker_img,
docker_cmd,
dry_run,
+ nvidia,
):
docker_run_args = dict(
container_name=container_name,
@@ -594,6 +607,7 @@ def run_docker_container(
env=environment,
docker_img=docker_img,
docker_cmd=docker_cmd,
+ nvidia=nvidia,
)
docker_run_cmd = get_docker_run_cmd(**docker_run_args)
@@ -681,6 +695,7 @@ def configure_and_run_docker_container(
docker_img=docker_img,
docker_cmd=docker_cmd,
dry_run=args.dry_run,
+ nvidia=args.nvidia,
)
|
Add a flag in SAC to control backprop through log-prob
Summary: This detaches log_prob to be backward compatible. | @@ -84,6 +84,7 @@ class SACTrainer(RLTrainerMixin, ReAgentLightningModule):
action_embedding_mean: Optional[List[float]] = None,
action_embedding_variance: Optional[List[float]] = None,
crr_config: Optional[CRRWeightFn] = None,
+ backprop_through_log_prob: bool = True,
) -> None:
"""
Args:
@@ -94,6 +95,9 @@ class SACTrainer(RLTrainerMixin, ReAgentLightningModule):
from overestimation bias
value_network (optional): states -> value of state under actor
# alpha in the paper; controlling explore & exploit
+ backprop_through_log_prob: This is mostly for backward compatibility issue;
+ we used to have a bug that does this and it yields a better result in
+ some cases
# TODO: finish
"""
super().__init__()
@@ -139,6 +143,8 @@ class SACTrainer(RLTrainerMixin, ReAgentLightningModule):
if crr_config:
assert self.value_network is not None
+ self.backprop_through_log_prob = backprop_through_log_prob
+
def configure_optimizers(self):
optimizers = []
@@ -244,19 +250,22 @@ class SACTrainer(RLTrainerMixin, ReAgentLightningModule):
q2_actor_value = self.q2_network(*state_actor_action)
min_q_actor_value = torch.min(q1_actor_value, q2_actor_value)
+ actor_log_prob = actor_output.log_prob
+
+ if not self.backprop_through_log_prob:
+ actor_log_prob = actor_log_prob.detach()
+
if self.crr_config is not None:
cur_value = self.value_network(training_batch.state.float_features)
advantage = (min_q_actor_value - cur_value).detach()
# pyre-fixme[16]: `Optional` has no attribute `get_weight_from_advantage`.
crr_weight = self.crr_config.get_weight_from_advantage(advantage)
assert (
- actor_output.log_prob.shape == crr_weight.shape
- ), f"{actor_output.log_prob.shape} != {crr_weight.shape}"
- actor_loss = -(actor_output.log_prob * crr_weight.detach())
+ actor_log_prob.shape == crr_weight.shape
+ ), f"{actor_log_prob.shape} != {crr_weight.shape}"
+ actor_loss = -(actor_log_prob * crr_weight.detach())
else:
- actor_loss = (
- self.entropy_temperature * actor_output.log_prob - min_q_actor_value
- )
+ actor_loss = self.entropy_temperature * actor_log_prob - min_q_actor_value
# Do this in 2 steps so we can log histogram of actor loss
actor_loss_mean = actor_loss.mean()
|
Add "just docker-build-doc" to justfile
This can be used to run the document build using docker in a manner
similar to Jenkinsfile. | @@ -35,6 +35,10 @@ features := '\
--no-default-features \
'
+docker-build-doc:
+ docker build . -f ci/sawtooth-build-docs -t sawtooth-build-docs
+ docker run --rm -v $(pwd):/project/sawtooth-core sawtooth-build-docs
+
build:
#!/usr/bin/env sh
set -e
|
Python API: override all comparison operators for Token
TN: | @@ -975,6 +975,15 @@ class Token(ctypes.Structure):
self._check_same_unit(other)
return self._identity_tuple < other._identity_tuple
+ def __le__(self, other):
+ return self == other or self < other
+
+ def __gt__(self, other):
+ return not (self <= other)
+
+ def __ge__(self, other):
+ return not (self < other)
+
def to_data(self):
"""
Return a dict representation of this Token.
|
nun error fix / name std_result
Thank you for your suggestion. "np.nan_to_num" is very effective because nan means zero mathematically in this calculation. To avoid confusion, new name "std_result" is defined instead of avg. Since avg is return value, avg = std_result is added. | @@ -157,8 +157,8 @@ def _forward(args, index, config, data, variables, output_image=True):
elif e.repeat_evaluation_type == "std":
mux = np.array([s / e.num_evaluations for s in sum_mux])
muy = np.array([(s / e.num_evaluations)**2 for s in sum])
- val = mux - muy
- avg = np.sqrt(val)
+ std_result = [np.nan_to_num(np.sqrt(x / e.num_evaluations - (y / e.num_evaluations)**2)) for x, y in zip(sum_mux, sum)]
+ avg = std_result
else:
avg = [s / e.num_evaluations for s in sum]
|
Update slot-filling docs
Adds a subsection in the documentation on maintaining values for slots in the same session. | @@ -606,7 +606,7 @@ This decorator replaces the need to define the ``@app.handle`` decorator. MindMe
- ``role`` (str, optional): The role of the entity.
- ``responses`` (list or str, optional): Message for prompting the user for missing entities.
- ``retry_response`` (list or str, optional): Message for re-prompting users. If not provided, defaults to ``responses``.
- - ``value`` (str, optional): The resolved value of the entity.
+ - ``value`` (str, optional): The resolved value of the entity. (Read :ref:`note <session_note>` for maintaining this value in the same session.)
- ``default_eval`` (bool, optional): Use system validation (default: True).
- ``hints`` (list, optional): Developer defined list of keywords to verify the user input against.
- ``custom_eval`` (func, optional): Custom validation function (should return either bool: validated or not) or a custom resolved value for the entity. If custom resolved value is returned, the slot response is considered to be valid. For this validation function, the developer is provided with the current turn's ``request`` object.
@@ -779,6 +779,33 @@ Alternatively, the standalone call to this feature can be called independently o
"Your {account} account balance is ${amount:.2f}"
)
+.. _session_note:
+
+Maintaining slot values in a session
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+When using the slot-filling flow repeatedly, the form is reset in every turn. In order to maintain values in the form across a session with multiple calls to this flow, the ``value`` attribute in the ``FormEntity`` object for the slot can be updated with a pre-defined value or a user-input value at an initial iteration of the form. As an example, if the user asks for their balance of a particular account type, the ``value`` attribute can be populated with this response. Henceforth for all balance check inquiries by the user the app will show details for the same account without reprompting:
+
+.. code:: python
+
+ balance_form = {
+ 'entities': [
+ FormEntity(
+ entity="account_type",
+ responses=["Sure. For which account - checkings, savings, or credit?"],
+ )
+ ],
+ 'max_retries': 1,
+ 'exit_keys': ['cancel', 'quit', 'exit'],
+ 'exit_msg': "Sorry I cannot help you. Please try again."
+ }
+
+ @app.auto_fill(intent="check_balances", form=balance_form)
+ def check_balance(request, responder):
+ for entity in request.entities:
+ if entity["type"] == "account_type":
+ balance_form['entities'][0].value = entity["value"][0]["cname"]
+ ...
+
.. note::
* The order of entities provided in the ``entities`` list in the form is important as the slots will be prompted in that order.
|
GradientOptimization: combine current_step_size and step_size parameter
- step_size now correctly updates | @@ -712,16 +712,16 @@ class GradientOptimization(OptimizationFunction):
return variable
# Update step_size
- if sample_num == 0:
- _current_step_size = self.parameters.step_size.get(execution_id)
- elif self.annealing_function:
- _current_step_size = call_with_pruned_args(self.annealing_function, self._current_step_size, sample_num, execution_id=execution_id)
+ step_size = self.parameters.step_size.get(execution_id)
+ if sample_num != 0 and self.annealing_function:
+ step_size = call_with_pruned_args(self.annealing_function, step_size, sample_num, execution_id=execution_id)
+ self.parameters.step_size.set(step_size, execution_id)
# Compute gradients with respect to current variable
_gradients = call_with_pruned_args(self.gradient_function, variable, execution_id=execution_id)
# Update variable based on new gradients
- return variable + self.parameters.direction.get(execution_id) * _current_step_size * np.array(_gradients)
+ return variable + self.parameters.direction.get(execution_id) * step_size * np.array(_gradients)
def _convergence_condition(self, variable, value, iteration, execution_id=None):
previous_variable = self.parameters.previous_variable.get(execution_id)
|
Fix for form rendering of "required" fields with a default value
Force the "required" parameter to be set | @@ -153,6 +153,11 @@ class InvenTreeMetadata(SimpleMetadata):
if 'default' not in field_info and not field.default == empty:
field_info['default'] = field.get_default()
+ # Force non-nullable fields to read as "required"
+ # (even if there is a default value!)
+ if not field.allow_null and not (hasattr(field, 'allow_blank') and field.allow_blank):
+ field_info['required'] = True
+
# Introspect writable related fields
if field_info['type'] == 'field' and not field_info['read_only']:
|
fw/output: expose metadata
Expose result.metadata as a property of the output. | @@ -81,6 +81,12 @@ class Output(object):
return []
return self.result.events
+ @property
+ def metadata(self):
+ if self.result is None:
+ return {}
+ return self.result.metadata
+
def __init__(self, path):
self.basepath = path
self.result = None
|
better unauthorized handler
unauthorized_handler now displays the correct error message for login required vs admin required. Closes | -from flask import Flask
+from flask import Flask, flash, render_template, redirect, url_for, request
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
-from flask_login import LoginManager, AnonymousUserMixin
+from flask_login import LoginManager, AnonymousUserMixin, current_user
from flask_mail import Mail
from config import Config, populate_defaults
from app.elastic import Elastic
@@ -25,6 +25,17 @@ mail = Mail()
db = SQLAlchemy()
migrate = Migrate()
[email protected]_handler
+def unauthorized():
+ print("UNAUTHORIZED")
+ if current_user.is_anonymous:
+ print("CURRENT_USER IS ANONYMOUS")
+ flash("You must login to continue.", "warning")
+ return redirect(url_for('auth.login'))
+ if not current_user.is_admin:
+ flash("You must be an admin to access %s." % request.path, "warning")
+ return redirect(url_for('main.index'))
+
def create_app(config_class=Config, load_config=False):
app = Flask(__name__)
|
tools: Include `test_ui.py` to be checked by mypy.
This commit adds `test_ui.py` to the `type_consistent_testfiles`
list to check for type consistency with mypy. | @@ -79,7 +79,7 @@ repo_python_files['tests'] = []
# Added incrementally as newer test files are type-annotated.
type_consistent_testfiles = [
"test_run.py", "test_core.py", "test_emoji_data.py", "test_helper.py",
- "test_server_url.py"
+ "test_server_url.py", "test_ui.py"
]
for file_path in python_files:
|
sql: add feedback-probability configuration
Via: | @@ -31,7 +31,7 @@ ANALYZE TABLE TableName INDEX [IndexNameList]
For the `INSERT`, `DELETE`, or `UPDATE` statements, TiDB automatically updates the number of rows and updated rows. TiDB persists this information regularly and the update cycle is 5 * `stats-lease`. The default value of `stats-lease` is `3s`. If you specify the value as `0`, it does not update automatically.
-When the query is executed, TiDB collects feedback with the probability of `feedback-probability` and uses it to update the histogram and Count-Min Sketch.
+When the query is executed, TiDB collects feedback with the probability of `feedback-probability` and uses it to update the histogram and Count-Min Sketch. You can modify the value of `feedback-probability` in the configuration file. The default value is `0`.
### Control `ANALYZE` concurrency
|
Update setup-remote.md
Tweak: prettier printing of private key & address | @@ -89,11 +89,10 @@ from eth_account.account import Account
account1 = Account.create()
account2 = Account.create()
-print(f"REMOTE_TEST_PRIVATE_KEY1={account1.key.hex()}")
-print(f"REMOTE_TEST_PRIVATE_KEY2={account2.key.hex()}")
-
-print(f"ADDRESS1={account1.address}")
-print(f"ADDRESS2={account2.address}")
+print(f"""
+REMOTE_TEST_PRIVATE_KEY1={account1.key.hex()}, ADDRESS1={account1.address}
+REMOTE_TEST_PRIVATE_KEY2={account2.key.hex()}, ADDRESS2={account2.address}
+""")
```
Then, hit Ctrl-C to exit the Python console.
|
smoke test: fix meaning of bool values.
The reboot_in_platform means reboot in platform, not in node. So it
should use the feature to reboot node. Change the logic to match the
purpose. | @@ -165,7 +165,7 @@ class Provisioning(TestSuite):
def verify_reboot_in_platform(
self, log: Logger, node: RemoteNode, log_path: Path
) -> None:
- self._smoke_test(log, node, log_path, reboot_in_platform=False)
+ self._smoke_test(log, node, log_path, reboot_in_platform=True)
@TestCaseMetadata(
description="""
@@ -186,7 +186,7 @@ class Provisioning(TestSuite):
log,
node,
log_path,
- reboot_in_platform=False,
+ reboot_in_platform=True,
is_restart=False,
)
@@ -195,7 +195,7 @@ class Provisioning(TestSuite):
log: Logger,
node: RemoteNode,
log_path: Path,
- reboot_in_platform: bool = True,
+ reboot_in_platform: bool = False,
wait: bool = True,
is_restart: bool = True,
) -> None:
@@ -220,14 +220,12 @@ class Provisioning(TestSuite):
# If successful, the node will be reboot.
# If failed, It distinguishes TCP and SSH errors by error messages.
if reboot_in_platform:
- node.reboot()
- else:
start_stop = node.features[StartStop]
- if not is_restart:
+ if is_restart:
+ start_stop.restart(wait=wait)
+ else:
start_stop.stop(wait=wait)
start_stop.start(wait=wait)
- else:
- start_stop.restart(wait=wait)
is_ready, tcp_error_code = wait_tcp_port_ready(
node.public_address,
node.public_port,
@@ -241,6 +239,8 @@ class Provisioning(TestSuite):
f"Cannot connect to [{node.public_address}:{node.public_port}],"
f" error code: {tcp_error_code}, no panic found in serial log"
)
+ else:
+ node.reboot()
log.info(f"node '{node.name}' rebooted in {timer}")
except Exception as identifier:
serial_console = node.features[SerialConsole]
|
Make util.py consistent with deploy.py
Don't ban 1025 if included in range elsewhere. | @@ -65,10 +65,10 @@ default_config = ConfigDict({
False,
# list of port ranges that should not be assigned to any instances
- # this bans the first ports 0-1025 and 4242 for shellinaboxd
+ # this bans the first ports 0-1024 and 4242 for shellinaboxd
"banned_ports": [{
"start": 0,
- "end": 1025
+ "end": 1024
}, {
"start": 4242,
"end": 4242
|
Enable post-copy by setting unprivileged_userfaultfd
The setting vm.unprivileged_userfaultfd = 1 is required to
make post-copy working for containerised libvirt.
Related: rhbz#2110556 | @@ -199,6 +199,8 @@ outputs:
value: {get_param: BridgeNfCallIp6Tables}
fs.inotify.max_user_instances:
value: {get_param: InotifyInstancesMax}
+ vm.unprivileged_userfaultfd:
+ value: 1
- if:
- fs_aio_max_number_set
- fs.aio-max-nr:
|
Handle project_data returning non-dict objects
Seen when enabling language servers per windows with no project loaded | @@ -54,25 +54,35 @@ def is_in_workspace(window: 'Any', file_path: str) -> bool:
def enable_in_project(window, config_name: str) -> None:
- project_data = window.project_data() or dict()
+ project_data = window.project_data()
+ if isinstance(project_data, dict):
project_settings = project_data.setdefault('settings', dict())
project_lsp_settings = project_settings.setdefault('LSP', dict())
project_client_settings = project_lsp_settings.setdefault(config_name, dict())
project_client_settings['enabled'] = True
window.set_project_data(project_data)
+ else:
+ debug('non-dict returned in project_settings: ', project_data)
def disable_in_project(window, config_name: str) -> None:
- project_data = window.project_data() or dict()
+ project_data = window.project_data()
+ if isinstance(project_data, dict):
project_settings = project_data.setdefault('settings', dict())
project_lsp_settings = project_settings.setdefault('LSP', dict())
project_client_settings = project_lsp_settings.setdefault(config_name, dict())
project_client_settings['enabled'] = False
window.set_project_data(project_data)
+ else:
+ debug('non-dict returned in project_settings: ', project_data)
def get_project_config(window: 'Any') -> dict:
project_data = window.project_data() or dict()
+ if isinstance(project_data, dict):
project_settings = project_data.setdefault('settings', dict())
project_lsp_settings = project_settings.setdefault('LSP', dict())
return project_lsp_settings
+ else:
+ debug('non-dict returned in project_settings: ', project_data)
+ return dict()
|
Correct the msg ipv6 enable in system
net.ipv6.conf.default.disable_ipv6 = 1 means disable ipv6.
We should correct it in the same with code logic. | @@ -48,7 +48,7 @@ def is_enabled_and_bind_by_default():
LOG.info(_LI("IPv6 not present or configured not to bind to new "
"interfaces on this system. Please ensure IPv6 is "
"enabled and /proc/sys/net/ipv6/conf/default/"
- "disable_ipv6 is set to 1 to enable IPv6."))
+ "disable_ipv6 is set to 0 to enable IPv6."))
return _IS_IPV6_ENABLED
|
Update aqara.py
Change from 'click' to 'action' payload key. | @@ -51,7 +51,7 @@ class WXKG01LMLightController(LightController):
"""
Different states reported from the controller:
single, double, triple, quadruple,
- many, long, long_release
+ many, hold, release
"""
def get_z2m_actions_mapping(self) -> TypeActionsMapping:
@@ -61,8 +61,8 @@ class WXKG01LMLightController(LightController):
"triple": Light.ON_MIN_BRIGHTNESS,
"quadruple": Light.SET_HALF_BRIGHTNESS,
# "many": "", # Nothing
- "long": Light.HOLD_BRIGHTNESS_TOGGLE,
- "long_release": Light.RELEASE,
+ "hold": Light.HOLD_BRIGHTNESS_TOGGLE,
+ "release": Light.RELEASE,
}
def get_deconz_actions_mapping(self) -> TypeActionsMapping:
|
Fix //examples/text_embeddings_v2.
During refactor to public APIs, it ended up using the wrong filename. | @@ -127,7 +127,8 @@ class TextEmbeddingModel(tf.train.Checkpoint):
# Assign the table initializer to this instance to ensure the asset
# it depends on is saved with the SavedModel.
self._table_initializer = tf.lookup.TextFileInitializer(
- vocab_file_path, tf.string, tf.lookup.TextFileIndex.WHOLE_LINE,
+ write_vocabulary_file(self._vocabulary),
+ tf.string, tf.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.lookup.TextFileIndex.LINE_NUMBER)
self._table = tf.lookup.StaticVocabularyTable(
self._table_initializer, num_oov_buckets=oov_buckets)
|
Allow to always have play callbacks also with playlists
Fix broken addon action_controller services | @@ -94,6 +94,8 @@ def get_inputstream_listitem(videoid):
list_item.setContentLookup(False)
list_item.setMimeType('application/xml+dash')
list_item.setProperty('IsPlayable', 'true')
+ # Allows the add-on to always have play callbacks also when using the playlist (Kodi versions >= 20)
+ list_item.setProperty('ForceResolvePlugin', 'true')
try:
import inputstreamhelper
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
|
[dagit] Fix "Open in Playground" on schedule row
Summary: Just a path that I overlooked when changing to workspace URL namespacing.
Test Plan: View schedule, use flyout menu to open it in the playground. Verify successful navigation.
Reviewers: prha, dgibson | @@ -358,11 +358,14 @@ export const ScheduleRow: React.FC<{
icon="edit"
target="_blank"
disabled={!runConfigYaml}
- href={`/pipelines/${pipelineName}/playground/setup?${qs.stringify({
+ href={workspacePathFromAddress(
+ repoAddress,
+ `/pipelines/${pipelineName}/playground/setup?${qs.stringify({
mode,
solidSelection,
config: runConfigYaml,
- })}`}
+ })}`,
+ )}
/>
{schedule.partitionSet?.name ? (
@@ -370,7 +373,10 @@ export const ScheduleRow: React.FC<{
text="View Partition History..."
icon="multi-select"
target="_blank"
- href={`/pipelines/${pipelineName}/partitions`}
+ href={workspacePathFromAddress(
+ repoAddress,
+ `/pipelines/${pipelineName}/partitions`,
+ )}
/>
) : null}
</Menu>
|
Update task.py
Adds new kwarg functionality to example task | @@ -47,7 +47,16 @@ class ArithmeticTask(task.Task):
max_queries=5000,
)
- def evaluate_model(self, model):
+ def evaluate_model(self, model, max_examples=None, random_seed=None):
+ if random_seed:
+ np.random.seed(seed)
+ if max_examples:
+ trials = int(max_examples / 5)
+ if trials < 1:
+ raise Indexerror(f"max_examples must be be greater than or equal to 5, but was {max_examples}.")
+ else:
+ trials = self.num_trials
+
score_data = []
@@ -55,7 +64,7 @@ class ArithmeticTask(task.Task):
score = 0
inputs = []
targets = []
- for _ in range(self.num_trials):
+ for _ in range(trials):
a = np.random.randint(10 ** digits)
b = np.random.randint(10 ** digits)
correct = False
|
Use new secrets file for k8s endpoint
Existing users will need to do a user-environment login
and then transcribe the new tokens into a k8s secret
like this:
kubectl delete secret funcx-sdk-tokens
kubectl create secret generic funcx-sdk-tokens --from-file /root/.funcx/storage.db | @@ -4,7 +4,7 @@ mkdir ~/.funcx/$1
mkdir ~/.funcx/credentials
cp /funcx/config/config.py ~/.funcx
cp /funcx/$1/* ~/.funcx/$1
-cp /funcx/credentials/* ~/.funcx/credentials
+cp /funcx/credentials/storage.db ~/.funcx/
if [ -z "$2" ]; then
funcx-endpoint start $1
else
|
show container image in repo location display metadata
Summary: Forgot that the image is only available if you pull it from the location (not the origin)
Test Plan: test_workspace in BK, view docker example and see image
Reviewers: prha, sashank | @@ -190,7 +190,9 @@ def _load_location(self, origin):
repository_location=location,
load_error=error,
load_status=WorkspaceLocationLoadStatus.LOADED,
- display_metadata=origin.get_display_metadata() if origin else {},
+ display_metadata=location.get_display_metadata()
+ if location
+ else origin.get_display_metadata(),
update_timestamp=time.time(),
)
|
wallet: fix dscancel for "not all inputs ismine" case
fixes | @@ -1516,7 +1516,7 @@ class Abstract_Wallet(AddressSynchronizer, ABC):
# grab all ismine inputs
inputs = [txin for txin in tx.inputs()
if self.is_mine(self.get_txin_address(txin))]
- value = sum([txin.value_sats() for txin in tx.inputs()])
+ value = sum([txin.value_sats() for txin in inputs])
# figure out output address
old_change_addrs = [o.address for o in tx.outputs() if self.is_mine(o.address)]
out_address = (self.get_single_change_address_for_new_transaction(old_change_addrs)
|
DEV: updated azure pipeline
[NEW] build source distribution for upload to pypi
[CHANGED] just test on 3.7 on Windows, it's soo slow otherwise | @@ -50,13 +50,10 @@ jobs:
steps:
- {task: UsePythonVersion@0, inputs: {versionSpec: '3.7', architecture: x86}}
- {task: UsePythonVersion@0, inputs: {versionSpec: '3.7', architecture: x64}}
- - {task: UsePythonVersion@0, inputs: {versionSpec: '3.6', architecture: x86}}
- - {task: UsePythonVersion@0, inputs: {versionSpec: '3.6', architecture: x64}}
- bash: |
python -m pip install --upgrade pip
python -m pip install .[dev] codecov
tox -e py37
- tox -e py36
codecov
- job: Linux_Build
@@ -67,6 +64,8 @@ jobs:
python -m pip install --upgrade pip
python -m pip install .[dev] cibuildwheel==0.11.1
cibuildwheel --output-dir wheelhouse .
+ python setup.py sdist
+ cp dist/*.gz wheelhouse/.
- task: PublishBuildArtifacts@1
inputs: {pathtoPublish: 'wheelhouse'}
- job: MacOS_Build
|
fluor_fdark_fail
add a line that will hit a missing if statement in the fluor_fvfm function | @@ -933,6 +933,7 @@ def test_plantcv_fluor_fvfm():
pcv.params.debug = "print"
outfile = os.path.join(cache_dir, TEST_INPUT_FMAX)
_ = pcv.fluor_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, filename=outfile, bins=1000)
+ _ = pcv.fluor_fvfm(fdark=fdark+3000, fmin=fmin, fmax=fmax, mask=fmask, filename=outfile, bins=1000)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.fluor_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, filename=False, bins=1000)
|
Update mekotio.txt
> lampion | @@ -394,12 +394,6 @@ http://185.101.92.241
51.120.2.28:3030
gamesstrond2.servebeer.com
-# Reference: https://twitter.com/noexceptcpp/status/1615832526466990080
-
-http://5.199.162.122
-anydeskkapdo.info
-casadosoftware.net
-
# Generic trail
/amorplus/brume.php
|
Update gcloud_setup.rst
rtd -> docs.studio.ml | @@ -77,7 +77,7 @@ In the config file (the one that you use with the ``--config`` flag, or, if you
use the default, in the ``studio/default_config.yaml``), go to the ``cloud``
section. Change projectId to the project id of the Google project for which
you enabled cloud computing. You can also modify the default instance
-parameters (see `Cloud computing for studio <http://studioml.readthedocs.io/en/latest/cloud.html>`__ for
+parameters (see `Cloud computing for studio <http://docs.studio.ml/en/latest/cloud.html>`__ for
limitations though).
Test
@@ -93,5 +93,5 @@ To test if things are set up correctly, go to
Then run ``studio`` locally, and watch the new experiment. In a little
while, it should change its status to "finished" and show the system
information (number of cpus, amount of ram / hdd) of a default instance.
-See `Cloud computing for studio <http://studioml.readthedocs.io/en/latest/cloud.html>`__ for more instructions on
+See `Cloud computing for studio <http://docs.studio.ml/en/latest/cloud.html>`__ for more instructions on
using an instance with specific hardware parameters.
|
ebuild.domain: find_repo(): don't depend on a sane ROOT value
Previously if ROOT was set in the environment to the empty string this
would infinitely loop. | @@ -652,12 +652,15 @@ class domain(config_domain):
repo = None
path = os.path.abspath(path)
with suppress_logging():
- while path != self.root:
+ while True:
try:
repo = self.add_repo(path, config=config, configure=configure)
break
except repo_errors.InvalidRepo:
- path = os.path.dirname(path)
+ parent = os.path.dirname(path)
+ if parent == path:
+ break
+ path = parent
return repo
def _configure_repo(self, repo):
|
Update `isconnected` utility
isconnected function now try several times to connect to a remote host before calling it NOT connected.
This allows for a more robust check and more reliable tests | @@ -670,24 +670,28 @@ def show_options(file=sys.stdout): # noqa: C901
print(f"{k}: {v}", file=file)
-def isconnected(host="https://www.ifremer.fr"):
+def isconnected(host="https://www.ifremer.fr", maxtry=10):
""" check if we have a live internet connection
Parameters
----------
host: str
URL to use, 'https://www.ifremer.fr' by default
-
+ maxtry: int, default: 10
+ Maximum number of host connections to try before
Returns
-------
bool
"""
if split_protocol(host)[0] in ["http", "https", "ftp", "sftp"]:
+ it = 0
+ while it < maxtry:
try:
urllib.request.urlopen(host, timeout=1) # nosec B310 because host protocol already checked
- return True
+ result, it = True, maxtry
except Exception:
- return False
+ result, it = False, it+1
+ return result
else:
return os.path.exists(host)
|
message_feed: Remove unnecessary "user-select: none".
There is a "user-select: none" (cross-browser) that was put on
the #bottom_whitespace div, but the div doesn't actually have any
content that can be selected, and it also makes it difficult to
deselect selected text because when clicked over it will save the
current selection. | @@ -1961,12 +1961,6 @@ div.floating_recipient {
#bottom_whitespace {
display: block;
height: 300px;
- -webkit-touch-callout: none;
- -webkit-user-select: none;
- -khtml-user-select: none;
- -moz-user-select: none;
- -ms-user-select: none;
- user-select: none;
}
.loading_indicator_spinner {
|
Add bugzilla decorator to add_mds test case
Bug | @@ -7,7 +7,7 @@ import pytest
from ocs_ci.ocs import constants, defaults, ocp
from ocs_ci.framework import config
-from ocs_ci.framework.testlib import tier1, ManageTest
+from ocs_ci.framework.testlib import tier1, ManageTest, bugzilla
from ocs_ci.ocs.resources.ocs import OCS
log = logging.getLogger(__name__)
@@ -69,6 +69,7 @@ def verify_fs_exist(pod_count):
return False
+@bugzilla('1738833')
@tier1
@pytest.mark.usefixtures(
test_fixture.__name__,
|
Fix - Mumbai: ocean.create_data_nft() fails getting token_address from tx_receipt
Fix | @@ -45,7 +45,6 @@ def test_nonocean_tx(tmp_path):
assert bob_eth_after > bob_eth_before
[email protected](reason="Don't skip once fixed #921")
def test_ocean_tx(tmp_path):
"""Do a (simple) Ocean tx on Mumbai"""
@@ -100,6 +99,7 @@ def _remote_config(tmp_path):
"""
[eth-network]
network = https://rpc-mumbai.maticvigil.com
+network_name = mumbai
address.file = ~/.ocean/ocean-contracts/artifacts/address.json
block_confirmations = 0
|
display: Also prefer <4K modes in the vertical dimension
Apparently 2:1 scaled modes like 1920x2160 are a thing? | @@ -26,6 +26,7 @@ static void display_choose_timing_mode(dcp_timing_mode_t *modes, int cnt, dcp_ti
for (int i = 1; i < cnt; i++) {
COMPARE(modes[i].valid, best->valid);
COMPARE(modes[i].width <= 1920, best->width <= 1920);
+ COMPARE(modes[i].height <= 1200, best->height <= 1200);
COMPARE(modes[i].fps <= 60 << 16, best->fps <= 60 << 16);
COMPARE(modes[i].width, best->width);
COMPARE(modes[i].height, best->height);
|
docs: add peer-to-peer communication
describe behaviour of the 'RouteEnvelope' function | @@ -114,6 +114,9 @@ It contains:
- `envelope`: the envelope to be forwarded, in byte representation;
- an `AgentRecord` (see above).
+## Overview of ACN
+
+TODO: add picture from paper
## ACN with direct connection
@@ -178,18 +181,98 @@ by an AEA's skill passes through:
In this section, we describe the interaction between peers.
+Assume an envelope arrives from an agent to peer `DHTPeer1`,
+i.e. `DHTPeer1` is the first hop
+of the routing.
+Let `Agent` be the local agent directly connected
+to `DHTPeer1`, and let `DHTPeer2` a direct peer
+of peer `DHTPeer1`.
+
+
+We may have different scenario:
+
+1) the field `sender` of the envelope
+ is not registered in any of the one connected
+ to the peer locally:
+ the message is considered invalid, and it is dropped.
<div class="mermaid">
sequenceDiagram
+ participant Agent
participant DHTPeer1
participant DHTPeer2
+ Agent->>DHTPeer1: AeaEnvelope
alt envelope sender not registered locally
note over DHTPeer1: stop, log error
end
+</div>
+
+2) the `target` of the envelope is
+ the local agent connected to the peer:
+ the envelope is routed to the local agent.
+
+<div class="mermaid">
+ sequenceDiagram
+ participant Agent
+ participant DHTPeer1
+ participant DHTPeer2
+ Agent->>DHTPeer1: AeaEnvelope
alt target == peer1.my_agent
- note over DHTPeer1: route envelope destinated to <br/>local agent, not routing
+ note over DHTPeer1: envelope destinated<br/> to local agent,<br/> not routing
+ loop agent not ready
+ note over DHTPeer1: sleep for 100ms
+ end
+ DHTPeer1->>Agent: AeaEnvelope
+ Agent->>DHTPeer1: Status(Success)
+ end
+</div>
+
+3) the `target` is a delegate client.
+ Send the envelope via TCP.
+
+<div class="mermaid">
+ sequenceDiagram
+ participant Delegate
+ participant DHTPeer1
+ participant DHTPeer2
+ Delegate->>DHTPeer1: AeaEnvelope
+ alt destination is a delegate
+ note over DHTPeer1: send envelope<br/> to delegate via TCP
+ DHTPeer1->>Delegate: AeaEnvelope
+ Delegate->>DHTPeer1: Status(Success)
end
+</div>
+
+4) Otherwise, look up the local DHT.
+ If an entry is found, use it;
+ otherwise, send a look-up request
+ to connected peers.
+<div class="mermaid">
+ sequenceDiagram
+ participant Agent
+ participant DHTPeer1
+ participant DHTPeer2
+ Agent->>DHTPeer1: AeaEnvelope
+ alt address found in DHT
+ note over DHTPeer1: destination is a relay client
+ else lookup address in DHT
+ note over DHTPeer1: send lookup request<br/> to all peers
+ DHTPeer1->>DHTPeer2: LookUpRequest
+ alt error
+ DHTPeer2->>DHTPeer1: Status(Error)
+ else success
+ DHTPeer2->>DHTPeer1: LookUpResponse
+ note over DHTPeer1: Check PoR
+ end
+ end
+ note over DHTPeer1,DHTPeer2: assume next peer is DHTPeer2
+ DHTPeer1->>DHTPeer2: AeaEnvelope
+ alt success
+ DHTPeer2->>DHTPeer1: Status(Success)
+ else error
+ DHTPeer2->>DHTPeer1: Status(Error)
+ end
</div>
### ACN Exit
|
Use rbt to expand the right-sizer review group.
This prevents spamming everyone in two big groups, and tailors the tickets to people who opt-in. It also prevents review-board from overriding any default groups attached to files. | @@ -127,6 +127,22 @@ def commit(filename, serv):
subprocess.check_call(('git', 'commit', '-n', '-m', message))
+def get_reviewers_in_group(group_name):
+ """Using rbt's target-groups argument overrides our configured default review groups.
+ So we'll expand the group into usernames and pass those users in the group individually.
+ """
+ rightsizer_reviewers = json.loads(
+ subprocess.check_output((
+ 'rbt',
+ 'api-get',
+ '--server', 'https://reviewboard.yelpcorp.com',
+ 'groups/{}/users/'.format(group_name),
+ )
+ ).decode('UTF-8')
+ )
+ return [user.get('username', '') for user in rightsizer_reviewers.get('users', {})]
+
+
def get_reviewers(filename):
recent_authors = set()
authors = subprocess.check_output((
@@ -142,15 +158,15 @@ def get_reviewers(filename):
def review(filename, description, provisioned_state, manual_rb):
- reviewers = ' '.join(get_reviewers(filename))
+ all_reviewers = get_reviewers(filename).union(get_reviewers_in_group('right-sizer'))
+ reviewers_arg = ' '.join(all_reviewers)
if manual_rb:
subprocess.check_call((
'review-branch',
'--summary=automatically updating {} for {}provisioned cpu'.format(filename, provisioned_state),
'--description="{}"'.format(description),
- '--reviewers', reviewers,
+ '--reviewers', reviewers_arg,
'--server', 'https://reviewboard.yelpcorp.com',
- '--target-groups', 'operations perf',
))
else:
subprocess.check_call((
@@ -158,9 +174,8 @@ def review(filename, description, provisioned_state, manual_rb):
'--summary=automatically updating {} for {}provisioned cpu'.format(filename, provisioned_state),
'--description="{}"'.format(description),
'-p',
- '--reviewers', reviewers,
+ '--reviewers', reviewers_arg,
'--server', 'https://reviewboard.yelpcorp.com',
- '--target-groups', 'operations perf',
))
@@ -224,9 +239,8 @@ def main(argv=None):
serv['state'] = provisioned_state
ticket_desc = (
"This ticket and CR have been auto-generated to help keep PaaSTA right-sized."
- "\nPERF will review this CR and give a shipit. After that shipit and your own sanity check of the new"
- " value, proceed to merge and push the branch that jenkins created. Open an issue with any concerns"
- " and someone from PERF will respond."
+ "\nPERF will review this CR and give a shipit. Then an ops deputy from your team should merge, if these values make sense for your service."
+ "\nOpen an issue with any concerns and someone from PERF will respond."
"\nWe suspect that {s}.{i} in {c} may have been {o}-provisioned"
" during the 1 week prior to {d}. It initially had {x} cpus, but based on the below dashboard,"
" we recommend {y} cpus."
|
Use correct function on list
At some point I guess we should add a test covering this code? I managed
to trigger it! | @@ -54,7 +54,7 @@ class LocalExiter(Exiter):
# Log the unrecognized exit code to the fatal exception log.
ExceptionSink.log_exception(run_tracker_msg)
# Ensure the unrecognized exit code message is also logged to the terminal.
- additional_messages.push(run_tracker_msg)
+ additional_messages.append(run_tracker_msg)
outcome = WorkUnit.FAILURE
self._run_tracker.set_root_outcome(outcome)
@@ -65,7 +65,7 @@ class LocalExiter(Exiter):
# so we just log that fact here and keep going.
exception_string = str(e)
ExceptionSink.log_exception(exception_string)
- additional_messages.push(exception_string)
+ additional_messages.append(exception_string)
finally:
if self._repro:
# TODO: Have Repro capture the 'after' state (as a diff) as well? (in reference to the below
|
Added ReceitaWS for CNPJ consultation in Brazil
Adicionado ReceitaWS para consulta de CNPJ no Brasil | @@ -730,6 +730,7 @@ API | Description | Auth | HTTPS | CORS |
| [BCLaws](http://www.bclaws.ca/civix/template/complete/api/index.html) | Access to the laws of British Columbia | No | No | Unknown |
| [Brazil](https://brasilapi.com.br/) | Community driven API for Brazil Public Data | No | Yes | Yes |
| [Brazil Central Bank Open Data](https://dadosabertos.bcb.gov.br/) | Brazil Central Bank Open Data | No | Yes | Unknown |
+| [Brazil Receita WS](https://www.receitaws.com.br/) | Consult companies by CNPJ for Brazilian companies | No | Yes | Unknown |
| [Brazilian Chamber of Deputies Open Data](https://dadosabertos.camara.leg.br/swagger/api.html) | Provides legislative information in Apis XML and JSON, as well as files in various formats | No | Yes | No |
| [BusinessUSA](https://business.usa.gov/developer) | Authoritative information on U.S. programs, events, services and more | `apiKey` | Yes | Unknown |
| [Census.gov](https://www.census.gov/data/developers/data-sets.html) | The US Census Bureau provides various APIs and data sets on demographics and businesses | No | Yes | Unknown |
|
realm logo: Fix realm logo unsupported file upload bug.
Unable to upload a realm logo once we encounter file input error bug
was fixed by clearing `get_file_input()` after file input error
with `get_file_input().val('')`.
The previous .clone() logic was preserved over many years but
apparently was also just wrong.
Fixes | @@ -136,8 +136,7 @@ exports.build_direct_upload_widget = function (
function clear() {
const control = get_file_input();
- const new_control = control.clone(true);
- control.replaceWith(new_control);
+ control.val('');
}
upload_button.on('drop', function (e) {
|
Trying to add js again
This one follows this suggestion: | @@ -106,6 +106,15 @@ try:
nbsphinx_execute = os.environ["NBSPHINX_EXECUTE"]
except KeyError:
nbsphinx_execute = "always"
+html_js_files = []
+nbsphinx_prolog = r"""
+.. raw:: html
+
+ <script src='http://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js'></script>
+ <script>require=requirejs;</script>
+
+
+"""
html_theme = "bootstrap"
htlm_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
@@ -282,6 +291,3 @@ epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
def setup(app):
app.add_css_file("style.css")
- app.add_js_file(
- "https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"
- )
|
Scons: Make sure we can always decode the compiler output.
* Worst case fall back to bashslash replace, which is still readable
and contains all the information.
* Without this, my German MSVC 2019 could crash failing the other
guesses we make. | @@ -48,7 +48,10 @@ def _decode(data):
except UnicodeDecodeError:
import locale
+ try:
return data.decode(locale.getpreferredencoding())
+ except UnicodeDecodeError:
+ return data.decode("utf8", "backslashreplace")
def getArguments():
|
fix invalid exit code (for
This problem occurs when the in-place option and the exit-code option
are specified and the result of modifying W391 is only blank lines. | @@ -3436,7 +3436,7 @@ def fix_file(filename, options=None, output=None, apply_config=False):
with open_with_encoding(filename, 'w', encoding=encoding) as fp:
fp.write(fixed_source)
return fixed_source
- return ''
+ return None
else:
if output:
output.write(fixed_source)
@@ -4060,9 +4060,11 @@ def fix_multiple_files(filenames, options, output=None):
ret = _fix_file((name, options, output))
if ret is None:
continue
- if options.diff or options.in_place:
+ if options.diff:
if ret != '':
results.append(ret)
+ elif options.in_place:
+ results.append(ret)
else:
original_source = readlines_from_file(name)
if "".join(original_source).splitlines() != ret.splitlines():
|
Fixed the HTTPS row
sorry, forgot to change the HTTPS column | @@ -224,9 +224,9 @@ API | Description | Auth | HTTPS | Link |
| Barchart OnDemand | Stock, Futures, and Forex Market Data | `apiKey` | Yes | [Go!](https://www.barchartondemand.com/free) |
| Blockchain | Bitcoin Payment, Wallet & Transaction Data | No | Yes | [Go!](https://www.blockchain.info/api) |
| CoinDesk | Bitcoin Price Index | No | No | [Go!](http://www.coindesk.com/api/) |
-| CoinMarketCap | Cryptocurrencies Prices | No | No | [Go!](https://coinmarketcap.com/api/) |
+| CoinMarketCap | Cryptocurrencies Prices | No | Yes | [Go!](https://coinmarketcap.com/api/) |
| Consumer Financial Protection Bureau | Financial services consumer complains data | `apiKey` | Yes | [Go!](https://data.consumerfinance.gov/resource/jhzv-w97w.json) |
-| CryptoCompare | Cryptocurrencies Comparison | No | No | [Go!](https://www.cryptocompare.com/api#) |
+| CryptoCompare | Cryptocurrencies Comparison | No | Yes | [Go!](https://www.cryptocompare.com/api#) |
| Czech National Bank | A collection of exchange rates | No | No | [Go!](https://www.cnb.cz/cs/financni_trhy/devizovy_trh/kurzy_devizoveho_trhu/denni_kurz.xml) |
| IEX | Stocks and Market Data | No | Yes | [Go!](https://iextrading.com/developer/) |
| Razorpay IFSC | Indian Financial Systems Code (Bank Branch Codes) | No | Yes | [Go!](https://ifsc.razorpay.com/) |
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.