message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
MNT: add a RLock for RE state
And use it in the state machine. This will eventually have to cover
more things | @@ -81,14 +81,20 @@ class LoggingPropertyMachine(PropertyMachine):
super().__init__(machine_type)
def __set__(self, obj, value):
- old_value = self.__get__(obj)
+ own = type(obj)
+ old_value = self.__get__(obj, own)
+ with obj._state_lock:
super().__set__(obj, value)
- value = self.__get__(obj)
+ value = self.__get__(obj, own)
obj.log.info("Change state on %r from %r -> %r",
obj, old_value, value)
if obj.state_hook is not None:
obj.state_hook(value, old_value)
+ def __get__(self, instance, owner):
+ with instance._state_lock:
+ return super().__get__(instance, owner)
+
# See RunEngine.__call__.
_call_sig = Signature(
@@ -230,6 +236,7 @@ class RunEngine:
if loop is None:
loop = get_bluesky_event_loop()
self._th = _ensure_event_loop_running(loop)
+ self._state_lock = threading.RLock()
self._loop = loop
self._during_task = during_task
|
"Getting Started" : Add replacement strings for 3rd party tool paths
and versions | @@ -310,6 +310,20 @@ source_parsers = {
source_suffix = ['.rst', '.md']
+# Variables for string replacement functions
+
+arnold_version = '2018'
+arnold_path_linux = '/opt/solidangle/mtoa/{0}'.format( arnold_version )
+arnold_path_osx = '/opt/solidangle/mtoa/{0}'.format( arnold_version )
+
+delight_version = '8.5.1'
+delight_path_linux = '/opt/3delightForMaya-{0}/Linux-x86_64'.format( delight_version )
+delight_path_osx = '/opt/3delightForMaya-{0}/Darwin'.format( delight_version )
+
+tractor_version = '2.2'
+tractor_path_linux = '/opt/pixar/Tractor-{0}'.format( tractor_version )
+tractor_path_osx = '/Applications/pixar/Tractor-{0}'.format( tractor_version )
+
## \Todo See if the recommonmark folks would accept a patch with this
# functionality.
class GafferTransform( recommonmark.transform.AutoStructify ) :
@@ -377,6 +391,18 @@ def gafferSourceSubstitutions( app, docName, source ) :
source[0] = source[0].replace( "!GAFFER_MINOR_VERSION!", str( Gaffer.About.minorVersion() ) )
source[0] = source[0].replace( "!GAFFER_PATCH_VERSION!", str( Gaffer.About.patchVersion() ) )
+def thirdPartySourceSubtitutions( app, docName, source) :
+
+ source[0] = source[0].replace( "!ARNOLD_VERSION!", arnold_version )
+ source[0] = source[0].replace( "!ARNOLD_PATH_LINUX!", arnold_path_linux )
+ source[0] = source[0].replace( "!ARNOLD_PATH_OSX!", arnold_path_osx )
+ source[0] = source[0].replace( "!DELIGHT_VERSION!", delight_version )
+ source[0] = source[0].replace( "!DELIGHT_PATH_LINUX!", delight_path_linux )
+ source[0] = source[0].replace( "!DELIGHT_PATH_OSX!", delight_path_osx )
+ source[0] = source[0].replace( "!TRACTOR_VERSION!", tractor_version )
+ source[0] = source[0].replace( "!TRACTOR_PATH_LINUX!", tractor_path_linux )
+ source[0] = source[0].replace( "!TRACTOR_PATH_OSX!", tractor_path_osx )
+
def setup( app ) :
app.add_config_value(
@@ -392,6 +418,7 @@ def setup( app ) :
app.add_transform( GafferTransform )
app.connect( "source-read", gafferSourceSubstitutions )
+ app.connect( "source-read", thirdPartySourceSubtitutions )
# Add the custom stylesheet; used in all .md and .rst files in source
app.add_stylesheet( 'gaffer.css' )
|
Fix logic predicate code generation when property uses entity info
TN: | 'Self.Field_{}'.format(i)
for i, _ in enumerate(args_types)
]
+ if prop.uses_entity_info:
+ args.append('Node_0.Info')
args_fmt = '({})'.format(', '.join(args)) if args else ''
%>
return ${prop.name} ${args_fmt};
|
Add a negative test for security_group api
Currently,test_security_groups_negative.py only prevent to create
a security_group named "default",but sometimes,
user create a security_group named "test-123",
and then change the name to "default",
so we should add negative test as below. | @@ -175,6 +175,16 @@ class NegativeSecGroupTest(base.BaseSecGroupTest):
self.security_groups_client.create_security_group,
name=name)
+ @test.attr(type=['negative'])
+ @decorators.idempotent_id('966e2b96-023a-11e7-a9e4-fa163e4fa634')
+ def test_create_security_group_update_name_default(self):
+ # Update security group name to 'default', it should be failed.
+ group_create_body, _ = self._create_security_group()
+ self.assertRaises(lib_exc.Conflict,
+ self.security_groups_client.update_security_group,
+ group_create_body['security_group']['id'],
+ name="default")
+
@test.attr(type=['negative'])
@decorators.idempotent_id('8fde898f-ce88-493b-adc9-4e4692879fc5')
def test_create_duplicate_security_group_rule_fails(self):
|
Adjust Campo Formoso-BA spider
resolve okfn-brasil/querido-diario#788 | +from datetime import date
+
from gazette.spiders.base.doem import DoemGazetteSpider
class BaCampoFormosoSpider(DoemGazetteSpider):
TERRITORY_ID = "2906006"
name = "ba_campo_formoso"
+ start_date = date(2018, 1, 3) # edition_number 873
state_city_url_part = "ba/campoformoso"
|
Fix toy sensor
Summary: Missed a spot here.
Test Plan: Run dagit in root dir, no more sensor error
Reviewers: rexledesma, prha | @@ -52,7 +52,8 @@ def toy_file_sensor(_):
@sensor(pipeline_name="log_asset_pipeline")
def toy_asset_sensor(context):
- events = context.instance.events_for_asset_key(
+ with context.get_instance() as instance:
+ events = instance.events_for_asset_key(
AssetKey(["model"]), after_cursor=context.last_run_key, ascending=False, limit=1
)
|
Use only URL /sendgrid/ to send email
Remove the root URL setup part that may confuse readers
Revise the text | @@ -280,21 +280,14 @@ from django.contrib import admin
from .views import index
-urlpatterns = [
- url(r'^admin/', admin.site.urls),
- url(r'^$', index, name='sendgrid'),
-]
-```
-
-These paths allow the root URL to send the email. For a true Django app, you may want to move this code to another URL like so:
-
-```python
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^sendgrid/', index, name='sendgrid'),
]
```
+These paths allow the URL `/sendgrid/` to send the email.
+
We also assume that you have set up your development environment with your `SENDGRID_API_KEY`. If you have not done it yet, please do so. See the section [Setup Environment Variables](https://github.com/sendgrid/sendgrid-python#setup-environment-variables).
Now we should be able to send an email. Let's run our Django development server to test it.
|
Exclude /etc/openldap to avoid overriding ro file
Currently /etc/openldap is bind-mounted by keystone containers
with ro flag so it should be excluded from config files generaed
by puppet.
Closes-Bug: | @@ -103,6 +103,8 @@ if [ -z "$NO_ARCHIVE" ]; then
ro_files+="/etc/pki/ca-trust/source/anchors /etc/pki/tls/certs/ca-bundle.crt "
ro_files+="/etc/pki/tls/certs/ca-bundle.trust.crt /etc/pki/tls/cert.pem "
ro_files+="/etc/hosts /etc/localtime /etc/hostname "
+ # /etc/openldap is bind mounted with "ro" option in keystone containers.
+ ro_files+="/etc/openldap"
for ro in $ro_files; do
if [ -e "$ro" ]; then
exclude_files+=" --exclude=$ro"
|
Problem: No reason to check the BEP (+typo)
Solution: Say why there is a link to the BEP, and fix a typo. | @@ -75,10 +75,10 @@ configuration file as documented under
[Configuration Settings](configuration.html).
-## bigchaindb upsert-validator
-_[BEP#3](https://github.com/bigchaindb/BEPs/tree/master/3)_
+## bigchaindb upsert-validator (insecure)
+Add, update, or remove a validator from the validators set of the local node. The command implements [3/UPSERT-VALIDATORS](https://github.com/bigchaindb/BEPs/tree/master/3), check it out if you need more details on how this is orchestrated.
-Add/Update/Remove a validator from the validaors set of the local node. Below is the command line syntax,
+Below is the command line syntax,
```bash
$ bigchaindb upsert-validator PUBLIC_KEY_OF_VALIDATOR POWER
|
Remove format properties for campbell plot
This removes properties applied to the campbell plot since now we are
going to set those in the plotly_theme file. | @@ -808,49 +808,18 @@ class CampbellResults:
)
)
- fig.update_xaxes(
- title_text="<b>Frequency</b>",
- title_font=dict(family="Arial", size=20),
- tickfont=dict(size=16),
- range=[0, np.max(speed_range)],
- gridcolor="lightgray",
- showline=True,
- linewidth=2.5,
- linecolor="black",
- mirror=True,
- )
+ fig.update_xaxes(title_text="<b>Frequency</b>", range=[0, np.max(speed_range)])
fig.update_yaxes(
- title_text="<b>Damped Natural Frequencies</b>",
- title_font=dict(family="Arial", size=20),
- tickfont=dict(size=16),
- range=[0, 1.1 * np.max(wd)],
- gridcolor="lightgray",
- showline=True,
- linewidth=2.5,
- linecolor="black",
- mirror=True,
+ title_text="<b>Damped Natural Frequencies</b>", range=[0, 1.1 * np.max(wd)]
)
fig.update_layout(
- width=1200,
- height=900,
- plot_bgcolor="white",
- hoverlabel_align="right",
coloraxis=dict(
cmin=min(log_dec_map),
cmax=max(log_dec_map),
colorscale="rdbu",
- colorbar=dict(
- title=dict(text="<b>Log Dec</b>", side="right", font=dict(size=20)),
- tickfont=dict(size=16),
- ),
- ),
- legend=dict(
- itemsizing="constant",
- bgcolor="white",
- borderwidth=2,
- font=dict(size=14),
- orientation="h",
+ colorbar=dict(title=dict(text="<b>Log Dec</b>", side="right")),
),
+ legend=dict(itemsizing="constant", orientation="h"),
**kwargs,
)
|
Delete the IPSec before the router is deleted
The termination of the IPSec connection process requires router's
netns presence to execute successfully, so the IPSec connection
deletion should be executed before the router is deleted,
rather than the router deletion.
Closes-Bug: | @@ -38,7 +38,7 @@ class VPNService(object):
registry.subscribe(
router_added_actions, resources.ROUTER, events.AFTER_CREATE)
registry.subscribe(
- router_removed_actions, resources.ROUTER, events.AFTER_DELETE)
+ router_removed_actions, resources.ROUTER, events.BEFORE_DELETE)
registry.subscribe(
router_updated_actions, resources.ROUTER, events.AFTER_UPDATE)
|
Use `find_packages` in azure-cli/setup.py
This will scour the azure-cli folder for packages, instead of relying on enumerating them.
Documentation on the find_packages function can be found here: | from __future__ import print_function
from codecs import open
-from setuptools import setup
+from setuptools import setup, find_packages
try:
from azure_bdist_wheel import cmdclass
@@ -134,10 +134,7 @@ setup(
'az.completion.sh',
'az.bat',
],
- packages=[
- 'azure',
- 'azure.cli',
- ],
+ packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=DEPENDENCIES,
cmdclass=cmdclass
)
|
Add CAR.LEXUS_RX various missing firmware
2018 RX350L DongleID/route 45b442be1779b307|2020-09-15--12-04-24 | @@ -1094,6 +1094,7 @@ FW_VERSIONS = {
b'\x01896630E41200\x00\x00\x00\x00',
b'\x01896630E37300\x00\x00\x00\x00',
b'\x018966348R8500\x00\x00\x00\x00',
+ b'\x018966348W1300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648472\x00\x00\x00\x00\x00\x00',
@@ -1101,6 +1102,7 @@ FW_VERSIONS = {
b'F152648492\x00\x00\x00\x00\x00\x00',
b'F152648493\x00\x00\x00\x00\x00\x00',
b'F152648474\x00\x00\x00\x00\x00\x00',
+ b'F152648630\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514810300\x00\x00\x00\x00',
@@ -1111,10 +1113,12 @@ FW_VERSIONS = {
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48102\x00\x00\x00\x00\x00\x00',
+ b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
+ b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
@@ -1122,6 +1126,7 @@ FW_VERSIONS = {
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
+ b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
|
[resotocore][fix] Silence apscheduler logs
It uses the term `Job` which confuses people with resoto jobs. | @@ -248,6 +248,8 @@ def setup_process(args: Namespace, child_process: Optional[str] = None) -> None:
logging.getLogger("backoff").setLevel(logging.FATAL)
# transitions (fsm) creates a lot of log noise. Only show warnings.
logging.getLogger("transitions.core").setLevel(logging.WARNING)
+ # apscheduler uses the term Job when it triggers, which confuses people.
+ logging.getLogger("apscheduler.executors").setLevel(logging.WARNING)
# set/reset process creation method
reset_process_start_method()
|
Update cache.py
make D.feature([symbol], [Feature('close')], disk_cache=1) work correctly | @@ -471,7 +471,7 @@ class DatasetCache(BaseProviderCache):
not_space_fields = remove_fields_space(fields)
data = data.loc[:, not_space_fields]
# set features fields
- data.columns = list(fields)
+ data.columns = [str(i) for i in fields]
return data
@staticmethod
|
Update install.rst
Minor grammar and typo fixes | @@ -186,7 +186,7 @@ prevents ``pip`` from updating system packages. This has to be addressed to
successfully install Scrapy and its dependencies. Here are some proposed
solutions:
-* *(Recommended)* **Don't** use system python, install a new, updated version
+* *(Recommended)* **Don't** use system python. Install a new, updated version
that doesn't conflict with the rest of your system. Here's how to do it using
the `homebrew`_ package manager:
@@ -231,9 +231,9 @@ PyPy
We recommend using the latest PyPy version. The version tested is 5.9.0.
For PyPy3, only Linux installation was tested.
-Most scrapy dependencides now have binary wheels for CPython, but not for PyPy.
-This means that these dependecies will be built during installation.
-On OS X, you are likely to face an issue with building Cryptography dependency,
+Most scrapy dependencies now have binary wheels for CPython, but not for PyPy.
+This means that these dependencies will be built during installation.
+On OS X, you are likely to face an issue with building Cryptography dependency. The
solution to this problem is described
`here <https://github.com/pyca/cryptography/issues/2692#issuecomment-272773481>`_,
that is to ``brew install openssl`` and then export the flags that this command
|
Update address regex
Problem: Currently this regex matches only tz1 addresses. However there
are more supported curves and corresponding addresses.
Solution: Support tz1, tz2, tz3 addresses in the regex. | @@ -54,7 +54,7 @@ systemd_enable = {
ledger_regex = b"ledger:\/\/[\w\-]+\/[\w\-]+\/[\w']+\/[\w']+"
secret_key_regex = b"(encrypted|unencrypted):(?:\w{54}|\w{88})"
-address_regex = b"tz1\w{33}"
+address_regex = b"tz[123]\w{33}"
# Input validators
|
Gate boto_elb tests if proper version of moto isn't installed
For Python 2 tests, we can use an older version. But when running
these tests of Python 3, we need a newer version of moto that supports
Python 3.
This gates the tests if the expected version of moto is missing. | from __future__ import absolute_import
import logging
from copy import deepcopy
+import pkg_resources
# import Python Third Party Libs
# pylint: disable=import-error
@@ -45,8 +46,10 @@ except ImportError:
# Import Salt Libs
import salt.config
+import salt.ext.six as six
import salt.loader
import salt.modules.boto_elb as boto_elb
+import salt.utils.versions
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
@@ -63,11 +66,32 @@ conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key,
boto_conn_parameters = {'aws_access_key_id': access_key,
'aws_secret_access_key': secret_key}
instance_parameters = {'instance_type': 't1.micro'}
+required_moto = '0.3.7'
+required_moto_py3 = '1.0.1'
+
+
+def _has_required_moto():
+ '''
+ Returns True or False depending on if ``moto`` is installed and at the correct version,
+ depending on what version of Python is running these tests.
+ '''
+ if not HAS_MOTO:
+ return False
+ else:
+ moto_version = salt.utils.versions.LooseVersion(pkg_resources.get_distribution('moto').version)
+ if moto_version < required_moto:
+ return False
+ elif six.PY3 and moto_version < required_moto_py3:
+ return False
+
+ return True
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
+@skipIf(_has_required_moto() is False, 'The moto module must be >= to {0} for '
+ 'PY2 or {1} for PY3.'.format(required_moto, required_moto_py3))
class BotoElbTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for salt.modules.boto_elb module
|
tgz_mdf is misspelled
tgz_mdf should be changed to tgz_md5 to make it work | @@ -15,7 +15,7 @@ class CIFAR10(data.Dataset):
base_folder = 'cifar-10-batches-py'
url = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
- tgz_mdf = 'c58f30108f718f92721af3b95e74349a'
+ tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
|
README.md: Add link to bug report guidelines.
Fixes | @@ -103,7 +103,8 @@ discussion mailing list](#community) and uses [GitHub issues
][gh-issues]. There are also lists for the [Android][email-android]
and [iOS][email-ios] apps. Feel free to send any questions or
suggestions of areas where you'd love to see more documentation to the
-relevant list! Please report any security issues you discover to
+relevant list! Check out our [bug report guidelines][bug-report]
+before submitting. Please report any security issues you discover to
[email protected].
* **App codebases**. This repository is for the Zulip server and web
@@ -159,6 +160,7 @@ and posting on whether or not it worked is valuable.
[transifex]: https://zulip.readthedocs.io/en/latest/translating.html#testing-translations
[z-org]: https://github.com/zulip/zulip.github.io
[code-review]: https://chat.zulip.org/#narrow/stream/code.20review
+[bug-report]: http://zulip.readthedocs.io/en/latest/bug-reports.html
## Google Summer of Code
|
Removed useless Json file from zip savefile
_save_attributes is already stored in config, no need for json version
of it | @@ -90,6 +90,7 @@ class Serializable(object):
object_type, save_attributes = cls._load_pickle(zip_file, config_path).values()
loaded_object = object_type.__new__(object_type)
+ setattr(loaded_object, '_save_attributes', save_attributes)
for att, method in save_attributes.items():
method = method[:-1] if method[-1] is '!' else method
@@ -129,7 +130,7 @@ class Serializable(object):
"""
if not hasattr(self, '_save_attributes'):
- self._save_attributes = dict(_save_attributes='json')
+ self._save_attributes = dict()
self._save_attributes.update(attr_dict)
def _post_load(self):
|
Rename parameter: is_cum_func -> numeric_only
For using more general name for the argument.
It's suggested by at | @@ -91,7 +91,7 @@ class GroupBy(object, metaclass=ABCMeta):
return [s.spark.column for s in self._agg_columns]
@abstractmethod
- def _apply_series_op(self, op, should_resolve: bool = False, is_cum_func: bool = False):
+ def _apply_series_op(self, op, should_resolve: bool = False, numeric_only: bool = False):
pass
# TODO: Series support is not implemented yet.
@@ -744,7 +744,7 @@ class GroupBy(object, metaclass=ABCMeta):
return self._apply_series_op(
lambda sg: sg._kser._cum(F.max, True, part_cols=sg._groupkeys_scols),
should_resolve=True,
- is_cum_func=True,
+ numeric_only=True,
)
def cummin(self):
@@ -793,7 +793,7 @@ class GroupBy(object, metaclass=ABCMeta):
return self._apply_series_op(
lambda sg: sg._kser._cum(F.min, True, part_cols=sg._groupkeys_scols),
should_resolve=True,
- is_cum_func=True,
+ numeric_only=True,
)
def cumprod(self):
@@ -842,7 +842,7 @@ class GroupBy(object, metaclass=ABCMeta):
return self._apply_series_op(
lambda sg: sg._kser._cumprod(True, part_cols=sg._groupkeys_scols),
should_resolve=True,
- is_cum_func=True,
+ numeric_only=True,
)
def cumsum(self):
@@ -891,7 +891,7 @@ class GroupBy(object, metaclass=ABCMeta):
return self._apply_series_op(
lambda sg: sg._kser._cum(F.sum, True, part_cols=sg._groupkeys_scols),
should_resolve=True,
- is_cum_func=True,
+ numeric_only=True,
)
def apply(self, func, *args, **kwargs):
@@ -2386,11 +2386,11 @@ class DataFrameGroupBy(GroupBy):
agg_columns=item,
)
- def _apply_series_op(self, op, should_resolve: bool = False, is_cum_func: bool = False):
+ def _apply_series_op(self, op, should_resolve: bool = False, numeric_only: bool = False):
applied = []
for column in self._agg_columns:
applied.append(op(column.groupby(self._groupkeys)))
- if is_cum_func:
+ if numeric_only:
applied = [col for col in applied if isinstance(col.spark.data_type, NumericType)]
if not applied:
raise DataError("No numeric types to aggregate")
@@ -2523,8 +2523,8 @@ class SeriesGroupBy(GroupBy):
return partial(property_or_func, self)
raise AttributeError(item)
- def _apply_series_op(self, op, should_resolve: bool = False, is_cum_func: bool = False):
- if is_cum_func and not isinstance(self._agg_columns[0].spark.data_type, NumericType):
+ def _apply_series_op(self, op, should_resolve: bool = False, numeric_only: bool = False):
+ if numeric_only and not isinstance(self._agg_columns[0].spark.data_type, NumericType):
raise DataError("No numeric types to aggregate")
kser = op(self)
if should_resolve:
|
Allow the emailAddress field to be empty and don't supply a default value. Email address is an optional field
and makes DNs... interesting. | @@ -575,7 +575,7 @@ def create_ca(ca_name,
L='Salt Lake City',
O='SaltStack',
OU=None,
- emailAddress='[email protected]',
+ emailAddress=None,
fixmode=False,
cacert_path=None,
ca_filename=None,
@@ -605,7 +605,7 @@ def create_ca(ca_name,
OU
organizational unit, default is None
emailAddress
- email address for the CA owner, default is '[email protected]'
+ email address for the CA owner, default is None
cacert_path
absolute path to ca certificates root directory
ca_filename
@@ -698,6 +698,7 @@ def create_ca(ca_name,
if OU:
ca.get_subject().OU = OU
ca.get_subject().CN = CN
+ if emailAddress:
ca.get_subject().emailAddress = emailAddress
ca.gmtime_adj_notBefore(0)
@@ -854,7 +855,7 @@ def create_csr(ca_name,
L='Salt Lake City',
O='SaltStack',
OU=None,
- emailAddress='[email protected]',
+ emailAddress=None,
subjectAltName=None,
cacert_path=None,
ca_filename=None,
@@ -886,7 +887,7 @@ def create_csr(ca_name,
OU
organizational unit, default is None
emailAddress
- email address for the request, default is '[email protected]'
+ email address for the request, default is None
subjectAltName
valid subjectAltNames in full form, e.g. to add DNS entry you would call
this function with this value:
@@ -998,6 +999,7 @@ def create_csr(ca_name,
if OU:
req.get_subject().OU = OU
req.get_subject().CN = CN
+ if emailAddress:
req.get_subject().emailAddress = emailAddress
try:
@@ -1074,7 +1076,7 @@ def create_self_signed_cert(tls_dir='tls',
L='Salt Lake City',
O='SaltStack',
OU=None,
- emailAddress='[email protected]',
+ emailAddress=None,
cacert_path=None,
cert_filename=None,
digest='sha256',
@@ -1100,7 +1102,7 @@ def create_self_signed_cert(tls_dir='tls',
OU
organizational unit, default is None
emailAddress
- email address for the request, default is '[email protected]'
+ email address for the request, default is None
cacert_path
absolute path to ca certificates root directory
digest
@@ -1171,6 +1173,7 @@ def create_self_signed_cert(tls_dir='tls',
if OU:
cert.get_subject().OU = OU
cert.get_subject().CN = CN
+ if emailAddress:
cert.get_subject().emailAddress = emailAddress
cert.set_serial_number(_new_serial(tls_dir))
|
[4.0] Remove Python 2 related code from threadedhttp.py
reorder imports
remove UnicodeMixin | # -*- coding: utf-8 -*-
"""Http backend layer, formerly providing a httplib2 wrapper."""
#
-# (C) Pywikibot team, 2007-2019
+# (C) Pywikibot team, 2007-2020
#
# Distributed under the terms of the MIT license.
#
-from __future__ import absolute_import, division, unicode_literals
-
-
__docformat__ = 'epytext'
# standard python libraries
import codecs
import re
-import pywikibot
-from pywikibot.tools import deprecated, PY2, UnicodeMixin
-
-if not PY2:
from urllib.parse import urlparse
-else:
- from urlparse import urlparse
+
+import pywikibot
+from pywikibot.tools import deprecated
_logger = 'comm.threadedhttp'
-class HttpRequest(UnicodeMixin):
+class HttpRequest:
"""Object wrapper for HTTP requests that need to block origin thread.
self.data will be either:
- * a tuple of (dict, unicode) if the request was successful
+ * a tuple of (dict, str) if the request was successful
* an exception
"""
@@ -200,7 +194,7 @@ class HttpRequest(UnicodeMixin):
"""Return the response decoded by the detected encoding."""
return self.decode(self.encoding)
- def __unicode__(self):
+ def __str__(self):
"""Return the response decoded by the detected encoding."""
return self.text
|
Update markdown_widget.html
Updated URL to point to new point in the MathJax documentation. | <script type="text/html" class="latex-info-text">
<p>
- {%- trans link_start='<a href="https://docs.mathjax.org/en/latest/tex.html#supported-latex-commands">'|safe,
+ {%- trans link_start='<a href="https://docs.mathjax.org/en/latest/input/tex/macros/index.html">'|safe,
link_end='</a>'|safe -%}
You may write formulae using {{ link_start }}LaTeX{{ link_end }} (math mode only) by surrounding
them in <code>$...$</code>.
<p>
{%- if field.use_mathjax %}
{%- trans markdown='<a href="http://commonmark.org/help/">Markdown</a>'|safe,
- latex='<a href="https://docs.mathjax.org/en/latest/tex.html#supported-latex-commands">LaTeX</a>'|safe -%}
+ latex='<a href="https://docs.mathjax.org/en/latest/input/tex/macros/index.html">LaTeX</a>'|safe -%}
You can use {{ markdown }} and {{ latex }} formulae.
{%- endtrans -%}
{% else %}
|
Add mount point to /etc/fstab so that PD is mounted after reboot.
Comment in the code could be better. Confirmed that it worked on my centos box, should probably look at some others | @@ -434,6 +434,10 @@ class BaseLinuxMixin(virtual_machine.BaseOsMixin):
mnt_cmd = ('sudo mkdir -p {1};sudo mount -o discard {0} {1};'
'sudo chown -R $USER:$USER {1};').format(device_path, mount_path)
self.RemoteHostCommand(mnt_cmd)
+ # add to /etc/fstab to mount on reboot
+ mnt_cmd = ('echo "{0} {1} ext4 defaults" '
+ '| sudo tee -a /etc/fstab').format(device_path, mount_path)
+ self.RemoteHostCommand(mnt_cmd)
def RemoteCopy(self, file_path, remote_path='', copy_to=True):
self.RemoteHostCopy(file_path, remote_path, copy_to)
|
This provides a way to (optionally) add a tag to the DeepVariant docker image, instead of "latest".
.../build_docker_image.sh $project $tag
will make the images
gcr.io/google.com/brain-genomics/deepvariant{,_gpu,_runner}:$tag
The tag will be applied to the build as well as to the images,
in that context. | @@ -38,14 +38,14 @@ steps:
- name: 'gcr.io/cloud-builders/docker'
args:
- 'build'
- - '--tag=gcr.io/$PROJECT_ID/deepvariant'
+ - '--tag=gcr.io/$PROJECT_ID/deepvariant:$TAG_NAME'
- '.'
id: 'cpu'
dir: 'deepvariant/docker'
- name: 'gcr.io/cloud-builders/docker'
args:
- 'build'
- - '--tag=gcr.io/$PROJECT_ID/deepvariant_gpu'
+ - '--tag=gcr.io/$PROJECT_ID/deepvariant_gpu:$TAG_NAME'
- '--build-arg=FROM_IMAGE=nvidia/cuda:8.0-cudnn6-devel-ubuntu16.04'
- '--build-arg=DV_GPU_BUILD=1'
- '.'
@@ -54,12 +54,12 @@ steps:
- name: 'gcr.io/cloud-builders/docker'
args:
- 'build'
- - '--tag=gcr.io/$PROJECT_ID/deepvariant_runner'
+ - '--tag=gcr.io/$PROJECT_ID/deepvariant_runner:$TAG_NAME'
- '--file=Dockerfile.runner'
- '.'
id: 'runner'
dir: 'deepvariant/docker'
images:
- - 'gcr.io/$PROJECT_ID/deepvariant'
- - 'gcr.io/$PROJECT_ID/deepvariant_gpu'
- - 'gcr.io/$PROJECT_ID/deepvariant_runner'
+ - 'gcr.io/$PROJECT_ID/deepvariant:$TAG_NAME'
+ - 'gcr.io/$PROJECT_ID/deepvariant_gpu:$TAG_NAME'
+ - 'gcr.io/$PROJECT_ID/deepvariant_runner:$TAG_NAME'
|
Added simple invocation tests for render-function
TRAC#7301 | @@ -193,3 +193,17 @@ def test_wrong_upload_headers_rfc7233(rf: RequestFactory):
rf, upload_id, content, 0, 10)
post_request.META["HTTP_CONTENT_RANGE"] = "bytes 54343-3223/*"
assert isinstance(widget.handle_ajax(post_request), HttpResponseBadRequest)
+
+
+def test_render():
+ widget = AjaxUploadWidget(ajax_target_path="/ajax", multifile=True)
+ render_result = widget.render("some_name", None)
+ assert isinstance(render_result, str)
+ render_result = widget.render("some_name", (uuid.uuid4(), uuid.uuid4()))
+ assert isinstance(render_result, str)
+
+ widget = AjaxUploadWidget(ajax_target_path="/ajax", multifile=False)
+ render_result = widget.render("some_name", None)
+ assert isinstance(render_result, str)
+ render_result = widget.render("some_name", (uuid.uuid4(), uuid.uuid4()))
+ assert isinstance(render_result, str)
|
<fix>[installation]: clean useless zops cron after os upgrade
Resolves: ZSTAC-49156,ZSTAC-48956 | @@ -3821,9 +3821,17 @@ check_ha_need_upgrade()
}
add_zops_init_cronjob() {
+ echo_subtitle "Add zops cron job"
if [ ! -f /tmp/zops_init.sock ];then
touch /tmp/zops_init.sock
fi
+ # remove not in current os version cron job
+ ALL_ZOPS_CRON=`crontab -l |grep zops_init.py |grep -v ${ISO_ARCH}/${ISO_VER} | cut -d " " -f10`
+ CRON_ARR=(`echo $ALL_ZOPS_CRON | tr '\n' ' '`)
+ for job in $CRON_ARR;do
+ parseJob=${job//'/'/'\/'}
+ sed -i "/${parseJob}/d" /var/spool/cron/root
+ done
ZOPS_SERVER_INIT="python /opt/zstack-dvd/$BASEARCH/$ZSTACK_RELEASE/zops/zops_init.py"
COUNT=`crontab -l |grep "$ZOPS_SERVER_INIT" | grep -v "grep" |wc -l`
if [ "$COUNT" -eq 1 ];then
|
Update test_nonlinear.py
Fixing assert failure if scip is compiled using quadprecision | @@ -261,7 +261,7 @@ def test_gastrans():
if scip.getStatus() == 'timelimit':
pytest.skip()
- assert abs(scip.getPrimalbound() - 89.08584) < 1.0e-9
+ assert abs(scip.getPrimalbound() - 89.08584) < 1.0e-6
def test_quad_coeffs():
"""test coefficient access method for quadratic constraints"""
|
add regression test coverage for buildtest history query --output.
Also we add coverage for Tee class which was required to capture output from
buildtest build | +import os
+import shutil
+
import pytest
-from buildtest.cli.history import build_history, query_builds
+from buildtest.cli.build import BuildTest, Tee
+from buildtest.cli.history import BUILD_HISTORY_DIR, build_history, query_builds
+from buildtest.config import SiteConfiguration
+from buildtest.defaults import BUILDTEST_ROOT, VAR_DIR
+from buildtest.system import BuildTestSystem
-def test_build_history():
+def test_build_history_list():
class args:
history = "list"
terse = False
@@ -27,16 +34,55 @@ def test_build_history():
# 'buildtest build history list --terse --no-header'
build_history(args)
+
+def test_build_history_query():
+
+ fname = os.path.join(VAR_DIR, "output.txt")
+ configuration = SiteConfiguration()
+
+ configuration.detect_system()
+ configuration.validate()
+
+ system = BuildTestSystem()
+
+ with Tee(fname):
+ cmd = BuildTest(
+ buildspecs=[os.path.join(BUILDTEST_ROOT, "tutorials", "vars.yml")],
+ buildtest_system=system,
+ configuration=configuration,
+ )
+ cmd.build()
+
+ build_history_dir = cmd.get_build_history_dir()
+ shutil.move(fname, os.path.join(build_history_dir, "output.txt"))
+
+ num_ids = list(range(len(os.listdir(BUILD_HISTORY_DIR))))[-1]
+ print(num_ids)
+
class args:
history = "query"
- id = 0
+ id = num_ids
log = False
+ output = False
- # 'buildtest build history query 0'
+ # 'buildtest build history query <ID>'
+ build_history(args)
+
+ class args:
+ history = "query"
+ id = num_ids
+ log = False
+ output = True
+
+ # 'buildtest build history query --output <ID>'
build_history(args)
def test_invalid_buildid():
with pytest.raises(SystemExit):
- query_builds(build_id=-1, log_option=True)
+ query_builds(build_id=-1, log_option=True, output=False)
+
+ shutil.rmtree(BUILD_HISTORY_DIR)
+ with pytest.raises(SystemExit):
+ query_builds(build_id=0, log_option=False, output=False)
|
Fix run_in_executor call in user guide
Addresses | @@ -177,7 +177,7 @@ use `.IOLoop.run_in_executor`, which returns
``Futures`` that are compatible with coroutines::
async def call_blocking():
- await IOLoop.current().run_in_executor(blocking_func, args)
+ await IOLoop.current().run_in_executor(None, blocking_func, args)
Parallelism
^^^^^^^^^^^
|
update plexurl
It appears plex is doing away with the plexapp.com url.
It no longer has a valid SAN cert for that domain as it redirects to plex.tv now.
Update myplex.py to reflect new signin URL. | @@ -29,7 +29,7 @@ class MyPlexAccount(PlexObject):
timeout (int): timeout in seconds on initial connect to myplex (default config.TIMEOUT).
Attributes:
- SIGNIN (str): 'https://my.plexapp.com/users/sign_in.xml'
+ SIGNIN (str): 'https://plex.tv/users/sign_in.xml'
key (str): 'https://plex.tv/users/account'
authenticationToken (str): Unknown.
certificateVersion (str): Unknown.
@@ -67,7 +67,7 @@ class MyPlexAccount(PlexObject):
REMOVEINVITE = 'https://plex.tv/api/invites/requested/{userId}?friend=0&server=1&home=0' # delete
REQUESTED = 'https://plex.tv/api/invites/requested' # get
REQUESTS = 'https://plex.tv/api/invites/requests' # get
- SIGNIN = 'https://my.plexapp.com/users/sign_in.xml' # get with auth
+ SIGNIN = 'https://plex.tv/users/sign_in.xml' # get with auth
WEBHOOKS = 'https://plex.tv/api/v2/user/webhooks' # get, post with data
# Key may someday switch to the following url. For now the current value works.
# https://plex.tv/api/v2/user?X-Plex-Token={token}&X-Plex-Client-Identifier={clientId}
|
DOC: updated and fixed changelog
Updated the changelog for line length, trailing whitespace, and new changes. | @@ -11,17 +11,22 @@ This project adheres to [Semantic Versioning](http://semver.org/).
- Expanded Constellation utility by:
- Adding common properties: `empty`, `index`, `date`, `today`, `yesterday`,
`tomorrow`, and `variables` (#764)
- - Improving the printed output to inform user of the Constellation contents (#764)
+ - Improving the printed output to inform user of the Constellation contents
+ (#764)
- Added methods to download data and create a common time index. (#764)
- - Added utils.listify, a function that returns a list of whatever is input. (#766)
+ - Added utils.listify, a function that returns a list of whatever is input.
+ (#766)
- Added a warning for download requests that result in an empty date range.
- Deprecations
- Documentation
+ - Added missing information about date formatting to file listing docstrings.
- Bug Fix
- - Changed pysat.Instruments.orbits iteration to return a copy of the Instrument
- rather than the Instrument itself. Provides robustness against garbage collection. (#770)
+ - Changed pysat.Instruments.orbits iteration to return a copy of the
+ Instrument rather than the Instrument itself. Provides robustness against
+ garbage collection. (#770)
- Improved error messages for cases where slice of data may not exist (#761)
- Improved windows compatibility (#57, #790)
+ - Fixed Instrument.load bug that prevented use of instrument specific kwargs
- Maintenance
- Changed pysat.Instrument from treating all support functions as partial
functions to retaining the original form provided by developer
|
Update BeyondTrust_Password_Safe_description.md
Updated description. | -To Generate the API key, enter the UI, go to Configuration, under General go to API Registration
\ No newline at end of file
+To configure an integration instance, you need your BeyondTrust API key. The API key is generated after you configure an API Registration. For detailed instructions, see the [BeyondTrust Password Safe Admin Guide](https://www.beyondtrust.com/docs/password-safe/documents/6-9/ps-admin-6-9-0.pdf).
|
Simplify default calib file handing
If an empty string (`''`) is passed as the `calibration_file` value, a default
calib setting will be used by DepthAI. Message from the console:
```
depthai: Calibration file is not specified, will use default setting;
```
So, I think we can collapse the file handling a bit. | @@ -12,13 +12,9 @@ blob_config_fpath = './resources/nn/object_detection_4shave/object_detection.jso
if path.exists('./resources/depthai.calib'):
calib_fpath = './resources/depthai.calib'
- print("Using Calibration File: depthai.calib")
-
-elif path.exists('./resources/default.calib'):
- calib_fpath = './resources/default.calib'
- print("Using Calibration File: default.calib")
-
+ print("Using Custom Calibration File: depthai.calib")
else:
- print("ERROR: NO CALIBRATION FILE")
+ calib_fpath = ''
+ print("No calibration file. Using Calibration Defaults.")
pipeline_config_fpath = './resources/config.json'
|
Fix issue with STORAGE_ENGINE_CACHE_SIZE parameter
In mongodb docker image STORAGE_ENGINE_CACHE_SIZE
will always be set to '' (two quotation marks)
and verification will fail. So it's impossible to
run image without --storage-engine-cache-size set.
This change removes double quotes while assigning
default value to STORAGE_ENGINE_CACHE_SIZE. | @@ -66,7 +66,7 @@ if [[ -z "${REPLICA_SET_NAME:?REPLICA_SET_NAME not specified. Exiting!}" || \
-z "${MONGODB_KEY_FILE_PATH:?MONGODB_KEY_FILE_PATH not specified. Exiting!}" || \
-z "${MONGODB_CA_FILE_PATH:?MONGODB_CA_FILE_PATH not specified. Exiting!}" || \
-z "${MONGODB_CRL_FILE_PATH:?MONGODB_CRL_FILE_PATH not specified. Exiting!}" || \
- -z "${STORAGE_ENGINE_CACHE_SIZE:=''}" ]] ; then
+ -z ${STORAGE_ENGINE_CACHE_SIZE:=''} ]] ; then
#-z "${MONGODB_KEY_FILE_PASSWORD:?MongoDB Key File Password not specified. Exiting!}" || \
exit 1
else
|
schema fix type converter errors on string input
If a variable applies on a int value with validation, there will be type
error. Because the dataclass_json run before type converted. Using the
right fields function can fix it. | @@ -607,12 +607,17 @@ class RemoteNode(Node):
type: str = constants.ENVIRONMENTS_NODES_REMOTE
address: str = ""
port: int = field(
- default=22, metadata=metadata(validate=validate.Range(min=1, max=65535))
+ default=22,
+ metadata=metadata(
+ field_function=fields.Int, validate=validate.Range(min=1, max=65535)
+ ),
)
public_address: str = ""
public_port: int = field(
default=22,
- metadata=metadata(validate=validate.Range(min=1, max=65535)),
+ metadata=metadata(
+ field_function=fields.Int, validate=validate.Range(min=1, max=65535)
+ ),
)
username: str = ""
password: str = ""
@@ -668,7 +673,7 @@ class Environment:
class EnvironmentRoot:
max_concurrency: int = field(
default=1,
- metadata=metadata(validate=validate.Range(min=1)),
+ metadata=metadata(field_function=fields.Int, validate=validate.Range(min=1)),
)
warn_as_error: bool = field(default=False)
environments: List[Environment] = field(default_factory=list)
@@ -790,9 +795,15 @@ class TestCase(BaseTestCaseFilter):
)
# run this group of test cases several times
# default is 1
- times: int = field(default=1, metadata=metadata(validate=validate.Range(min=1)))
+ times: int = field(
+ default=1,
+ metadata=metadata(field_function=fields.Int, validate=validate.Range(min=1)),
+ )
# retry times if fails. Default is 0, not to retry.
- retry: int = field(default=0, metadata=metadata(validate=validate.Range(min=0)))
+ retry: int = field(
+ default=0,
+ metadata=metadata(field_function=fields.Int, validate=validate.Range(min=0)),
+ )
# each case with this rule will be run in a new environment.
use_new_environment: bool = False
# Once it's set, failed test result will be rewrite to success
|
Fix Bon Appetit
Time was manually set to 0 as it appears Bon Appetit no longer provides times
on their website. Please advise if a different workaround would be preferred. | @@ -12,10 +12,10 @@ class BonAppetit(AbstractScraper):
return self.soup.find('h1', {'itemprop': 'name'}).get_text()
def total_time(self):
- return get_minutes(self.soup.find('span', {'itemprop': 'totalTime'}))
+ return 0
def ingredients(self):
- ingredients_html = self.soup.findAll('span', {'itemprop': "ingredients"})
+ ingredients_html = self.soup.findAll('li', {'class': "ingredient"})
return [
normalize_string(ingredient.get_text())
@@ -23,7 +23,8 @@ class BonAppetit(AbstractScraper):
]
def instructions(self):
- instructions_html = self.soup.find('div', {'class': 'prep-steps'}).findAll('li')
+ instructions_html = self.soup.find('div', {'class':
+ 'steps-wrapper'}).findAll('li')
return '\n'.join([
normalize_string(instruction.get_text())
|
Add back missing bots/load handler.
This wasn't merged correctly into the final webapp2->Flask migration
CL. | @@ -161,6 +161,7 @@ handlers = [
('/add-external-user-permission', configuration.AddExternalUserPermission),
('/bots', bots.Handler),
('/bots/dead', bots.DeadBotsHandler),
+ ('/bots/load', bots.JsonHandler),
('/commit-range', commit_range.Handler),
('/commit-range/load', commit_range.JsonHandler),
('/configuration', configuration.Handler),
|
STY: MetaLabel defaults and docstring
Updated the MetaLabel default input for 'plot' and
the Instrument docstring. | @@ -76,29 +76,14 @@ class Instrument(object):
if True, the list of files found will be checked to
ensure the filesizes are greater than zero. Empty files are
removed from the stored list of files.
- units_label : str
- String used to label units in storage. Defaults to 'units'.
- name_label : str
- String used to label long_name in storage. Defaults to 'name'.
- notes_label : str
- label to use for notes in storage. Defaults to 'notes'
- desc_label : str
- label to use for variable descriptions in storage. Defaults to 'desc'
- plot_label : str
- label to use to label variables in plots. Defaults to 'label'
- axis_label : str
- label to use for axis on a plot. Defaults to 'axis'
- scale_label : str
- label to use for plot scaling type in storage. Defaults to 'scale'
- min_label : str
- label to use for typical variable value min limit in storage.
- Defaults to 'value_min'
- max_label : str
- label to use for typical variable value max limit in storage.
- Defaults to 'value_max'
- fill_label : str
- label to use for fill values. Defaults to 'fill' but some
- implementations will use 'FillVal'
+ labels : dict
+ Dict where keys are the label attribute names and the values are tuples
+ that have the label values and value types in that order.
+ (default={'units': ('units', str), 'name': ('long_name', str),
+ 'notes': ('notes', str), 'desc': ('desc', str),
+ 'plot': ('plot', str), 'axis': ('axis', str),
+ 'scale': ('scale', str), 'min_val': ('value_min', float),
+ 'max_val': ('value_max', float), 'fill_val': ('fill', float)})
Attributes
----------
@@ -116,6 +101,10 @@ class Instrument(object):
day of year for loaded data
files : pysat.Files
interface to instrument files
+ labels : pysat.MetaLabels
+ Class containing Meta data labels
+ meta_labels : dict
+ Dict containing defaults for new Meta data labels
meta : pysat.Meta
interface to instrument metadata, similar to netCDF 1.6
orbits : pysat.Orbits
@@ -127,11 +116,10 @@ class Instrument(object):
Note
----
- Pysat attempts to load the module platform_name.py located in
- the pysat/instruments directory. This module provides the underlying
- functionality to download, load, and clean instrument data.
- Alternatively, the module may be supplied directly
- using keyword inst_module.
+ Pysat attempts to load the module platform_name.py located in the
+ pysat/instruments directory. This module provides the underlying
+ functionality to download, load, and clean instrument data. Alternatively,
+ the module may be supplied directly using keyword inst_module.
Examples
--------
@@ -177,7 +165,7 @@ class Instrument(object):
ignore_empty_files=False,
labels={'units': ('units', str), 'name': ('long_name', str),
'notes': ('notes', str), 'desc': ('desc', str),
- 'plot': ('plot_label', str), 'axis': ('axis', str),
+ 'plot': ('plot', str), 'axis': ('axis', str),
'scale': ('scale', str),
'min_val': ('value_min', float),
'max_val': ('value_max', float),
|
home.py: move timezone to register_ret.
Moving the user_profile data section down into fetch_initial_state_data
so it entirely pulls from register_ret in | @@ -221,7 +221,6 @@ def home_real(request):
avatar_url = avatar_url(user_profile),
avatar_url_medium = avatar_url(user_profile, medium=True),
avatar_source = user_profile.avatar_source,
- timezone = user_profile.timezone,
# Stream message notification settings:
stream_desktop_notifications_enabled = user_profile.enable_stream_desktop_notifications,
@@ -310,6 +309,7 @@ def home_real(request):
'referrals',
'subscriptions',
'unsubscribed',
+ 'timezone',
'twenty_four_hour_time',
'zulip_version',
]
|
fix spelling error
constributions should be contributions | </div>
---
-MatchZoo is a toolkit for text matching. It was developed with a focus on facilitating the designing, comparing and sharing of deep text matching models. There are a number of deep matching methods, such as DRMM, MatchPyramid, MV-LSTM, aNMM, DUET, ARC-I, ARC-II, DSSM, and CDSSM, designed with a unified interface. Potential tasks related to MatchZoo include document retrieval, question answering, conversational response ranking, paraphrase identification, etc. We are always happy to receive any code constributions, suggestions, comments from all our MatchZoo users.
+MatchZoo is a toolkit for text matching. It was developed with a focus on facilitating the designing, comparing and sharing of deep text matching models. There are a number of deep matching methods, such as DRMM, MatchPyramid, MV-LSTM, aNMM, DUET, ARC-I, ARC-II, DSSM, and CDSSM, designed with a unified interface. Potential tasks related to MatchZoo include document retrieval, question answering, conversational response ranking, paraphrase identification, etc. We are always happy to receive any code contributions, suggestions, comments from all our MatchZoo users.
<table>
<tr>
|
Unparsers: don't emit anything for empty sequences of tokens
TN: | @@ -576,7 +576,9 @@ package body ${ada_lib_name}.Unparsing.Implementation is
(Result : in out Unparsing_Buffer;
First_Token, Last_Token : Token_Type) is
begin
- if First_Token = No_Token and then Last_Token = No_Token then
+ if (First_Token = No_Token and then Last_Token = No_Token)
+ or else Last_Token < First_Token
+ then
return;
end if;
pragma Assert (First_Token /= No_Token and then Last_Token /= No_Token);
|
Update publish script
1. Add quotes in script
2. Upgrade azure-storage-blob dependencies | set -e
unset AZURE_CLI_DIAGNOSTICS_TELEMETRY
-pip install azure-storage==0.36.0
+pip install azure-storage-blob==1.1.0
wd=`cd $(dirname $0); pwd`
-if [ -z $PUBLISH_STORAGE_SAS ] || [ -z $PUBLISH_STORAGE_ACCT ] || [ -z $PUBLISH_CONTAINER ]; then
+if [ -z "$PUBLISH_STORAGE_SAS" ] || [ -z "$PUBLISH_STORAGE_ACCT" ] || [ -z "$PUBLISH_CONTAINER" ]; then
echo 'Missing publish storage account credential. Skip publishing to store.'
exit 0
fi
@@ -16,13 +16,13 @@ echo 'Generate artifacts'
. $wd/artifacts.sh
echo 'Upload artifacts to store'
-python $wd/publish.py store -b $share_folder -c $PUBLISH_CONTAINER -a $PUBLISH_STORAGE_ACCT -s $PUBLISH_STORAGE_SAS
+python $wd/publish.py store -b $share_folder -c $PUBLISH_CONTAINER -a $PUBLISH_STORAGE_ACCT -s "$PUBLISH_STORAGE_SAS"
-if [ -z $EDGE_STORAGE_SAS ] || [ -z $EDGE_STORAGE_ACCT ] || [ -z $EDGE_CONTAINER ]; then
+if [ -z "$EDGE_STORAGE_SAS" ] || [ -z "$EDGE_STORAGE_ACCT" ] || [ -z "$EDGE_CONTAINER" ]; then
echo 'Missing edge storage account credential. Skip publishing the edge build.'
exit 0
fi
echo 'Upload artifacts to edge feed'
-python $wd/publish.py nightly -b $share_folder -c $EDGE_CONTAINER -a $EDGE_STORAGE_ACCT -s $EDGE_STORAGE_SAS
+python $wd/publish.py nightly -b $share_folder -c $EDGE_CONTAINER -a $EDGE_STORAGE_ACCT -s "$EDGE_STORAGE_SAS"
|
Python3 fixes for CentOS/RHEL
1) python3 should be python34
2) python34-pip does does exist, you must install python34-setuptools and then: easy_install-3.4 pip | @@ -20,7 +20,8 @@ Python 3:
```no-highlight
# yum install -y epel-release
-# yum install -y gcc python3 python3-devel python3-pip libxml2-devel libxslt-devel libffi-devel graphviz openssl-devel
+# yum install -y gcc python34 python34-devel python34-setuptools libxml2-devel libxslt-devel libffi-devel graphviz openssl-devel
+# easy_install-3.4 pip
```
Python 2:
|
Add from_bytes() methods to signing library
Add methods for creating keys from bytes to avoid unnecessary conversion to and
from hex. | @@ -47,11 +47,14 @@ class Secp256k1PrivateKey(PrivateKey):
def secp256k1_private_key(self):
return self._private_key
+ @staticmethod
+ def from_bytes(byte_str):
+ return Secp256k1PrivateKey(secp256k1.PrivateKey(byte_str, ctx=__CTX__))
+
@staticmethod
def from_hex(hex_str):
try:
- priv = binascii.unhexlify(hex_str)
- return Secp256k1PrivateKey(secp256k1.PrivateKey(priv, ctx=__CTX__))
+ return Secp256k1PrivateKey.from_bytes(binascii.unhexlify(hex_str))
except Exception as e:
raise ParseError('Unable to parse hex private key: {}'.format(e))
@@ -79,15 +82,17 @@ class Secp256k1PublicKey(PublicKey):
warnings.simplefilter('ignore')
return self._public_key.serialize()
+ @staticmethod
+ def from_bytes(byte_str):
+ public_key = __PK__.deserialize(byte_str)
+ return Secp256k1PublicKey(secp256k1.PublicKey(public_key, ctx=__CTX__))
+
@staticmethod
def from_hex(hex_str):
try:
- public_key = __PK__.deserialize(binascii.unhexlify(hex_str))
-
- return Secp256k1PublicKey(
- secp256k1.PublicKey(public_key, ctx=__CTX__))
+ return Secp256k1PublicKey.from_bytes(binascii.unhexlify(hex_str))
except Exception as e:
- raise ParseError('Unable to parse public key: {}'.format(e))
+ raise ParseError('Unable to parse hex public key: {}'.format(e))
class Secp256k1Context(Context):
|
Fixes taxonomy of R_uc observable
_process_tex wasn't set, so it was still set to the last value from the
loop above and hence the taxonomy was wrong.
The R_uc observable will now be listed under both Z->uu and Z->cc. | @@ -135,7 +135,8 @@ for _f in (_leptons, _uquarks, _dquarks):
_obs_name = "R_uc"
_obs = flavio.classes.Observable(_obs_name)
_obs.tex = r"$R_{uc}^0$"
-_obs.add_taxonomy(r'Process :: $Z^0$ decays :: Flavour conserving decays :: $' + _process_tex + r"$")
+for q in ('u', 'c'):
+ _obs.add_taxonomy(r"Process :: $Z^0$ decays :: Flavour conserving decays :: $Z^0\to {}\bar {}$".format(q, q))
_obs.set_description(r"Average ratio of $Z^0$ partial widths to $u$ or $c$ pairs vs. all hadrons")
flavio.classes.Prediction(_obs_name, Rq1q2('u', 'c'))
|
fix FeaturedTheme icon, set icon_src for all text_enabled plugins
Any plugin where `text_enabled=True` should
define an icon_src but its only for the admin.
Just give them all a simple icon instead of thumbnail | +from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from cms.extensions.extension_pool import extension_pool
-from sorl.thumbnail import get_thumbnail
-
from .models import (Project, Theme, FeaturedTheme, FeaturedProject,
ProjectList, NetworkGroup, NetworkGroupList, WorkingGroup,
SignupForm, SideBarExtension)
@@ -22,8 +21,7 @@ class FeaturedThemePlugin(CMSPluginBase):
return 'Theme: %s' % instance.theme.name
def icon_src(self, instance):
- im = get_thumbnail(instance.theme.picture, '50x50', quality=99)
- return im.url
+ return settings.STATIC_URL + "cms/img/icons/plugins/snippet.png"
def render(self, context, instance, placeholder):
context = super(FeaturedThemePlugin, self)\
@@ -102,6 +100,12 @@ class NetworkGroupFlagsPlugin(CMSPluginBase):
render_template = "organisation/networkgroup_flags.html"
text_enabled = True
+ def icon_alt(self, instance):
+ return 'Network Group Flags: %s' % instance.theme.name
+
+ def icon_src(self, instance):
+ return settings.STATIC_URL + "cms/img/icons/plugins/snippet.png"
+
def render(self, context, instance, placeholder):
context = super(NetworkGroupFlagsPlugin, self)\
.render(context, instance, placeholder)
@@ -123,6 +127,12 @@ class WorkingGroupPlugin(CMSPluginBase):
render_template = "organisation/workinggroup_shortlist.html"
text_enabled = True
+ def icon_alt(self, instance):
+ return 'Working Groups'
+
+ def icon_src(self, instance):
+ return settings.STATIC_URL + "cms/img/icons/plugins/snippet.png"
+
def render(self, context, instance, placeholder):
context = super(WorkingGroupPlugin, self)\
.render(context, instance, placeholder)
|
add node.nics indexerror assertion
node.nics get_nic_by_index doesn't catch indexerror currently
This makes for confusion output. Add a try except to
give a better description of the error in this case. | @@ -185,7 +185,23 @@ class Nics(InitializableMixin):
# when there are more than one nic on the system
number_of_nics = len(self.get_upper_nics())
assert_that(number_of_nics).is_greater_than(0)
- return self.nics[self.get_upper_nics()[index]]
+ try:
+ nic_name = self.get_upper_nics()[index]
+ except IndexError:
+ raise LisaException(
+ f"Attempted get_upper_nics()[{index}], only "
+ f"{number_of_nics} nics are registered in node.nics. "
+ f"Had upper interfaces: {self.get_upper_nics()}"
+ )
+
+ try:
+ nic = self.nics[nic_name]
+ except KeyError:
+ raise LisaException(
+ f"NicInfo for interface {nic_name} not found! "
+ f"Had upper interfaces: {self.get_upper_nics()}"
+ )
+ return nic
def nic_info_is_present(self, nic_name: str) -> bool:
return nic_name in self.get_upper_nics() or nic_name in self.get_lower_nics()
|
run tests on github not self-hosted
the self-hosted was an experiment on duo that we are
not using (yet) on develop | @@ -19,7 +19,7 @@ on:
jobs:
docker-build:
name: Build Docker Image
- runs-on: self-hosted
+ runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
@@ -40,7 +40,7 @@ jobs:
docker-pytorch-end2end:
name: Docker Torch End2End
needs: docker-build
- runs-on: self-hosted
+ runs-on: ubuntu-18.04
steps:
- name: Run Tests Torch Docker
run: |
@@ -50,7 +50,7 @@ jobs:
docker-tf2-end2end:
name: Docker TF2 End2End
needs: docker-build
- runs-on: self-hosted
+ runs-on: ubuntu-18.04
steps:
- name: Run Tests TF2 Docker
run: |
@@ -60,7 +60,7 @@ jobs:
docker-deepspeech-end2end:
name: Docker Deepspeech End2End
needs: docker-build
- runs-on: self-hosted
+ runs-on: ubuntu-18.04
steps:
- name: Run Tests Deepspeech Docker
run: |
@@ -70,7 +70,7 @@ jobs:
docker-remove:
name: Remove Docker Images
needs: [docker-build, docker-pytorch-end2end, docker-tf2-end2end, docker-deepspeech-end2end]
- runs-on: self-hosted
+ runs-on: ubuntu-18.04
steps:
- name: Run Tests Torch Docker
run: |
|
Drop unneeded filter(None, ...) [ci skip]
(skipping build 'cause this is tiny, untested, and the last one took four
attempts to complete without timing out) | @@ -2539,7 +2539,7 @@ class EditInternalDomainInfoView(BaseInternalDomainSettingsView):
def send_handoff_email(self):
partner_contact = self.internal_settings_form.cleaned_data['partner_contact']
dimagi_contact = self.internal_settings_form.cleaned_data['dimagi_contact']
- recipients = filter(None, [partner_contact, dimagi_contact])
+ recipients = [partner_contact, dimagi_contact]
params = {'contact_name': CouchUser.get_by_username(dimagi_contact).human_friendly_name}
send_html_email_async.delay(
subject="Project Support Transition",
|
Fix forwarding of arguments into kernel function
Summary:
They should be forwarded by their actual type, not their rvalue reference.
This looked like perfect forwarding but actually wasn't.
Pull Request resolved:
ghstack-source-id: | @@ -11,7 +11,7 @@ namespace detail {
template<class FuncType, FuncType* kernel_func, class ReturnType, class... Parameters>
class WrapKernelFunction_<FuncType, kernel_func, ReturnType, guts::typelist::typelist<Parameters...>> final : public c10::OperatorKernel {
public:
- auto operator()(Parameters&&... args) -> decltype((*kernel_func)(std::forward<Parameters>(args)...)) {
+ auto operator()(Parameters... args) -> decltype((*kernel_func)(std::forward<Parameters>(args)...)) {
return (*kernel_func)(std::forward<Parameters>(args)...);
}
};
|
Snowfakery version 7.
Windows compatibility improved, especially in test cases.
Changed the syntax for CSV handling.
Added --output-folder command. | -version = "0.6.1"
+version = "0.7.0"
|
disable script detection by default
pytorch's CTC does not work for script detection as activations are too
peeked and don't capture the entire grapheme during recognition. | @@ -246,7 +246,7 @@ def binarize(threshold, zoom, escale, border, perc, range, low, high):
type=click.Choice(['horizontal-lr', 'horizontal-rl',
'vertical-lr', 'vertical-rl']),
help='Sets principal text direction')
[email protected]('-s/-n', '--script-detect/--no-script-detect', default=True,
[email protected]('-s/-n', '--script-detect/--no-script-detect', default=False,
show_default=True,
help='Enable script detection on segmenter output')
@click.option('-a', '--allowed-scripts', default=None, multiple=True,
|
tv4play: override thumbnail download
in some videos they dont have og:image | @@ -11,6 +11,7 @@ from svtplay_dl.fetcher.dash import dashparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.service import OpenGraphThumbMixin
from svtplay_dl.service import Service
+from svtplay_dl.utils.http import download_thumbnails
class Tv4play(Service, OpenGraphThumbMixin):
@@ -61,6 +62,7 @@ class Tv4play(Service, OpenGraphThumbMixin):
self.output["title"] = item["program_nid"]
self.output["episodename"] = item["title"]
self.output["id"] = str(vid)
+ self.output["episodethumbnailurl"] = item["image"]
if vid is None:
yield ServiceError("Cant find video id for the video")
@@ -192,6 +194,9 @@ class Tv4play(Service, OpenGraphThumbMixin):
moreData = False
return items
+ def get_thumbnail(self, options):
+ download_thumbnails(self.output, options, [(False, self.output["episodethumbnailurl"])])
+
class Tv4(Service, OpenGraphThumbMixin):
supported_domains = ["tv4.se"]
|
Remove user.admin checks from access_list handling
* Remove check for user.admin
Related to: | @@ -289,7 +289,6 @@ class QueryParameters:
.distinct("org_unit_id")
.values_list("org_unit_id", flat=True)
)
- if self.user.admin:
access_list.update(self.parameters.get("access").get(filter_key))
items = set(self.get_filter(filter_key) or [])
result = get_replacement_result(items, access_list, raise_exception, return_access=True)
@@ -301,7 +300,7 @@ class QueryParameters:
items = set(group_by.get(filter_key))
org_unit_access_list = self.access.get("aws.organizational_unit", {}).get("read", [])
org_unit_filter = filters.get("org_unit_id", [])
- if "org_unit_id" in filters and access_key == "aws.account" and self.user.admin:
+ if "org_unit_id" in filters and access_key == "aws.account":
access_list = self.parameters.get("access").get(filter_key)
if (
|
server: don't print exceptions for network errors during opening and closing handshakes
Since full tracebacks are printed if a connection is reset
during handshake. Connection resets are to be expected, even in
controlled environments. Simply log them as info rather than warning,
and don't print a full traceback.
Closes | @@ -62,6 +62,9 @@ class WebSocketServerProtocol(WebSocketCommonProtocol):
path = yield from self.handshake(
origins=self.origins, subprotocols=self.subprotocols,
extra_headers=self.extra_headers)
+ except ConnectionError as exc:
+ logger.info('Connection error during opening handshake', exc_info=True)
+ raise
except Exception as exc:
if self._is_server_shutting_down(exc):
response = ('HTTP/1.1 503 Service Unavailable\r\n\r\n'
@@ -89,6 +92,11 @@ class WebSocketServerProtocol(WebSocketCommonProtocol):
try:
yield from self.close()
+ except ConnectionError as exc:
+ if self._is_server_shutting_down(exc):
+ pass
+ logger.info('Connection error in closing handshake', exc_info=True)
+ raise
except Exception as exc:
if self._is_server_shutting_down(exc):
pass
|
FilePage.download(): add revision parameter to download arbitrary revision
The parameter will default to the latest revision when unprovided,
so that this method remain backwards-compatiable. The revision
(instance of FileInfo) will provide the url and sha1 for the download
and verification process. | @@ -2634,7 +2634,7 @@ class FilePage(Page):
return self.site.upload(self, source_filename=filename, source_url=url,
**kwargs)
- def download(self, filename=None, chunk_size=100 * 1024):
+ def download(self, filename=None, chunk_size=100 * 1024, revision=None):
"""
Download to filename file of FilePage.
@@ -2642,7 +2642,14 @@ class FilePage(Page):
None: self.title(as_filename=True, withNamespace=False)
will be used
str: provided filename will be used.
- @type None or str
+ @type filename: None or str
+ @param chunk_size: the size of each chunk to be received and
+ written to file.
+ @type chunk_size: int
+ @param revision: file revision to download:
+ None: self.latest_file_info will be used
+ FileInfo: provided revision will be used.
+ @type revision: None or FileInfo
@return: True if download is successful, False otherwise.
@raise: IOError if filename cannot be written for any reason.
"""
@@ -2651,7 +2658,10 @@ class FilePage(Page):
filename = os.path.expanduser(filename)
- req = http.fetch(self.latest_file_info.url, stream=True)
+ if revision is None:
+ revision = self.latest_file_info
+
+ req = http.fetch(revision.url, stream=True)
if req.status == 200:
try:
with open(filename, 'wb') as f:
@@ -2661,7 +2671,7 @@ class FilePage(Page):
raise e
sha1 = compute_file_hash(filename)
- return sha1 == self.latest_file_info.sha1
+ return sha1 == revision.sha1
else:
pywikibot.warning('Unsuccesfull request (%s): %s' % (req.status, req.uri))
return False
|
Update README.md - enhance MacOS instructions
Added git cloning flint2 and how to get brew installed. Will update install.sh to compile and install flint2 | # chia-blockchain
Python 3.7 is used for this project. Make sure your python version is >=3.7 by typing python3.
-### Install
+### Install on Debian/Ubuntu
```bash
-# for Debian-based distros
-sudo apt-get install build-essential cmake python3-dev python3-venv --no-install-recommends mongodb-org=4.2.1
+sudo apt-get install build-essential cmake libgmp libboost libflint python3-dev python3-venv --no-install-recommends mongodb-org=4.2.1
+sh install.sh
+
+# Run mongo database
+mongod --fork --dbpath ./db/ --logpath mongod.log
-# for MacOS
-brew tap mongodb/brew
+# Start virtual environemnt
+python3 -m venv .venv
+. .venv/bin/activate
+```
+# Install on MacOS
+Make sure [brew](https://brew.sh/) is available before starting the setup.
+```brew tap mongodb/brew
brew install cmake boost gmp mpir mpfr [email protected]
git clone https://github.com/Chia-Network/chia-blockchain.git && cd chia-blockchain
-#or
-download chia-blockchain-master.zip from https://github.com/Chia-Network/chia-blockchain/, unzip, and cd chia-blockchain-master/
+
+git clone https://github.com/wbhart/flint2
+
sh install.sh
# Run mongo database
mongod --fork --dbpath ./db/ --logpath mongod.log
+
+# Start virtual environemnt
+python3 -m venv .venv
+. .venv/bin/activate
```
### Generate keys
|
[varLib.mutator] Fix mutating of ValueRecord objects
Part of fixing | @@ -740,6 +740,12 @@ def merge(merger, self, lst):
@MutatorMerger.merger(otBase.ValueRecord)
def merge(merger, self, lst):
+
+ # All other structs are merged with self pointing to a copy of base font,
+ # except for ValueRecords which are sometimes created later and initialized
+ # to have 0/None members. Hence the copy.
+ self.__dict__ = lst[0].__dict__.copy()
+
instancer = merger.instancer
# TODO Handle differing valueformats
for name, tableName in [('XAdvance','XAdvDevice'),
|
navbar: Use `filter._sub` instead of calling `stream_data`.
Previously, in `make_tab_data()` we were using the stream name,
which we got from the filter, to call `stream_data.get_sub_by_name()`.
This commit switches to just using `filter._sub`, which is simpler and
better. | @@ -35,10 +35,7 @@ function make_tab_data(filter) {
// We can now be certain that the narrow
// involves a stream which exists and
// the current user can access.
- const stream = filter.operands("stream")[0];
- // We can rely on current_stream because if the stream doesn't
- // exist then the "question-circle-o" case would case early exit.
- const current_stream = stream_data.get_sub_by_name(stream);
+ const current_stream = filter._sub;
tab_data.rendered_narrow_description = current_stream.rendered_description;
tab_data.sub_count = get_sub_count(current_stream);
tab_data.formatted_sub_count = get_formatted_sub_count(current_stream);
|
fix: grid copy from excel with header, field label in user language
fix grid copy from excel with header row, support field label in user language | @@ -87,16 +87,17 @@ frappe.ui.form.ControlTable = class ControlTable extends frappe.ui.form.Control
}
get_field(field_name) {
let fieldname;
+ field_name = field_name.toLowerCase();
this.grid.meta.fields.some(field => {
if (frappe.model.no_value_type.includes(field.fieldtype)) {
return false;
}
- field_name = field_name.toLowerCase();
- const is_field_matching = field_name => {
+ const is_field_matching = () => {
return (
field.fieldname.toLowerCase() === field_name ||
- (field.label || '').toLowerCase() === field_name
+ (field.label || '').toLowerCase() === field_name ||
+ (__(field.label) || '').toLowerCase() === field_name
);
};
|
Adds the plugin names field to scala sources
This adds the previously-defined `ScalaConsumedPluginNamesField` to Scala source files, as implied by the previous state of the code | @@ -51,13 +51,17 @@ class ScalaDependenciesField(Dependencies):
class ScalaConsumedPluginNamesField(StringSequenceField):
- """The names of Scala plugins that this source file requires.
+ help = """The names of Scala plugins that this source file requires.
+
+ The plugin must be defined by a corresponding `scalac_plugin` AND `jvm_artifact` target,
+ and must be present in this target's resolve's lockfile.
If not specified, this will default to the plugins specified in `--scalac-plugins` for this
target's resolve.
"""
alias = "scalac_plugins"
+ required = False
@dataclass(frozen=True)
@@ -89,6 +93,7 @@ class ScalatestTestTarget(Target):
*COMMON_TARGET_FIELDS,
ScalaDependenciesField,
ScalatestTestSourceField,
+ ScalaConsumedPluginNamesField,
JvmResolveField,
JvmProvidesTypesField,
JvmJdkField,
@@ -112,6 +117,7 @@ class ScalatestTestsGeneratorTarget(TargetFilesGenerator):
copied_fields = (
*COMMON_TARGET_FIELDS,
ScalaDependenciesField,
+ ScalaConsumedPluginNamesField,
JvmJdkField,
)
moved_fields = (
@@ -140,6 +146,7 @@ class ScalaJunitTestTarget(Target):
*COMMON_TARGET_FIELDS,
ScalaDependenciesField,
ScalaJunitTestSourceField,
+ ScalaConsumedPluginNamesField,
JvmResolveField,
JvmProvidesTypesField,
JvmJdkField,
@@ -163,6 +170,7 @@ class ScalaJunitTestsGeneratorTarget(TargetFilesGenerator):
copied_fields = (
*COMMON_TARGET_FIELDS,
ScalaDependenciesField,
+ ScalaConsumedPluginNamesField,
JvmJdkField,
)
moved_fields = (
@@ -184,6 +192,7 @@ class ScalaSourceTarget(Target):
*COMMON_TARGET_FIELDS,
ScalaDependenciesField,
ScalaSourceField,
+ ScalaConsumedPluginNamesField,
JvmResolveField,
JvmProvidesTypesField,
JvmJdkField,
@@ -216,6 +225,7 @@ class ScalaSourcesGeneratorTarget(TargetFilesGenerator):
copied_fields = (
*COMMON_TARGET_FIELDS,
ScalaDependenciesField,
+ ScalaConsumedPluginNamesField,
JvmJdkField,
)
moved_fields = (
|
settings: Don't check mobile notifications if they are disabled.
Fixes | @@ -109,6 +109,12 @@ exports.set_up = function () {
exports.update_page = function () {
_.each(exports.notification_settings, function (setting) {
+ if (setting === 'enable_offline_push_notifications'
+ && !page_params.realm_push_notifications_enabled) {
+ // If push notifications are disabled at the realm level,
+ // we should just leave the checkbox always off.
+ return;
+ }
$("#" + setting).prop('checked', page_params[setting]);
});
};
|
[tox] Remove externals folder from exclude list
exclude folder was a pywikibot 2.0 folder
and is no longer needed with pywikibot 3.0 | @@ -106,7 +106,7 @@ deps =
ignore = D105,D211,D401,D413,D412,FI12,FI13,FI15,FI16,FI17,FI5,H101,H236,H301,H404,H405,H903,P101,P102,P103,W503
enable-extensions = H203,H204,H205
-exclude = .tox,.git,./*.egg,ez_setup.py,build,externals,user-config.py,./scripts/i18n/*,scripts/userscripts/*
+exclude = .tox,.git,./*.egg,ez_setup.py,build,user-config.py,./scripts/i18n/*,scripts/userscripts/*
min-version = 2.7
accept-encodings = utf-8
require-code = true
|
Lena Program 1 Bugfix
* Hebbian learning fix
simply changed it from error to warning.
* Removed no-targets error
removed error statement when system is not given targets for learning, because Hebbian doesn't need targets
* Typo Fixes
Fixing a problem discovered by Lena | @@ -916,7 +916,7 @@ def _validate_targets(object, targets, num_input_sets, context=None):
raise RunError("Length ({}) of target{} specified for run of {}"
" does not match expected target length of {}".
format(target_len, plural, append_type_to_name(object),
- np.size(object.target_mechanism.target)))
+ np.size(targetMechanism.input_states[TARGET].instance_defaults.variable)))
return
if object_type is PROCESS:
@@ -927,7 +927,7 @@ def _validate_targets(object, targets, num_input_sets, context=None):
target_len = np.size(target_array[0])
num_target_sets = np.size(target_array, 0)
- if target_len != np.size(object.target_mechanism.input_states[TARGET].instance_defaults.variable):
+ if target_len != np.size(object.target_mechanisms[0].input_states[TARGET].instance_defaults.variable):
if num_target_sets > 1:
plural = 's'
else:
@@ -935,7 +935,7 @@ def _validate_targets(object, targets, num_input_sets, context=None):
raise RunError("Length ({}) of target{} specified for run of {}"
" does not match expected target length of {}".
format(target_len, plural, append_type_to_name(object),
- np.size(object.target_mechanism.target)))
+ np.size(object.target_mechanisms[0].target)))
if any(np.size(target) != target_len for target in target_array):
raise RunError("Not all of the targets specified for {} are of the same length".
|
Rename 'version' arg to new_fulltext() as 'since'.
'since' is more consistent with other API calls. | @@ -642,13 +642,13 @@ class Zotero(object):
data=json.dumps(payload),
)
- def new_fulltext(self, version):
+ def new_fulltext(self, since):
"""
Retrieve list of full-text content items and versions which are newer
- than <version>
+ than <since>
"""
- query_string = "/{t}/{u}/fulltext?since={v}".format(
- t=self.library_type, u=self.library_id, v=version
+ query_string = "/{t}/{u}/fulltext?since={version}".format(
+ t=self.library_type, u=self.library_id, version=since
)
headers = self.default_headers()
self._check_backoff()
|
libmanage.py: minor style fix
TN: | @@ -481,7 +481,7 @@ class ManageScript(object):
raise
print >> sys.stderr, col('Errors, exiting', Colors.FAIL)
sys.exit(1)
- except Exception, e:
+ except Exception as e:
if parsed_args.debug:
raise
import traceback
|
if a trainable impl fit returns None, use it
as the trained impl | @@ -1304,6 +1304,10 @@ class TrainableIndividualOp(PlannedIndividualOp, TrainableOperator):
trained_impl = trainable_impl.fit(X, y)
else:
trained_impl = trainable_impl.fit(X, y, **filtered_fit_params)
+ # if the trainable fit method returns None, assume that
+ # the trainableshould be used as the trained impl as well
+ if trained_impl is None:
+ trained_impl = trainable_impl
result = TrainedIndividualOp(self.name(), trained_impl, self._schemas)
result._hyperparams = self._hyperparams
self._trained = result
@@ -1323,6 +1327,8 @@ class TrainableIndividualOp(PlannedIndividualOp, TrainableOperator):
else:
trained_impl = trainable_impl.partial_fit(
X, y, **filtered_fit_params)
+ if trained_impl is None:
+ trained_impl = trainable_impl
result = TrainedIndividualOp(self.name(), trained_impl, self._schemas)
result._hyperparams = self._hyperparams
self._trained = result
|
renamed function
corrected documentation
added a function to get all attributes of multiple faces | @@ -768,7 +768,7 @@ class FaceAttributesManagement(object):
temp = list(zip(names, values))
return [[self.get_face_attribute(fkey, name, value) for name, value in temp] for fkey in fkeys]
- def get_all_face_attributes(self, key, data=False):
+ def get_face_all_attributes(self, key, data=True):
"""Get all attributes of one face.
Parameters
@@ -776,15 +776,15 @@ class FaceAttributesManagement(object):
key : hashable
The identifier of the face.
data : bool, optional
- Yield the attributes and their values.
- Default is ``False``.
+ Returns the attributes and their values.
+ Default is ``True``.
Returns
------
- attributes: list
- A list of all attribute keys.
dict
The dictionary containing all attributes and their values, if ``data=True``.
+ list
+ A list of all attribute keys, if ``data=False`.
See Also
--------
@@ -792,12 +792,42 @@ class FaceAttributesManagement(object):
* :meth:`get_face_attributes`
* :meth:`get_faces_attribute`
* :meth:`get_faces_attributes`
+ * :meth:'get_faces_all_attributes'
"""
if data:
return self.facedata[key]
else:
- return self.facedata[key].keys()
+ return list(self.facedata[key].keys())
+
+ def get_faces_all_attributes(self, keys, data=True):
+ """Get all attributes of multiple faces.
+
+ Parameters
+ ----------
+ keys : list of hashable
+ A list of face identifiers.
+ data : bool, optional
+ Returns the attributes and their values.
+ Default is ``True``.
+
+ Returns
+ ------
+ list of dict
+ A list of dictionaries containing all attributes and their values, if ``data=True``.
+ list of list
+ A list of all attribute keys for each specified face key, if ``data=False`.
+
+ See Also
+ --------
+ * :meth:`get_face_attribute`
+ * :meth:`get_face_attributes`
+ * :meth:`get_faces_attribute`
+ * :meth:`get_faces_attributes`
+ * :meth:'get_face_all_attributes'
+
+ """
+ return [self.get_face_all_attributes(key, data) for key in keys]
# ==============================================================================
|
Update README.md
Added latest German BERTs from DMBZ | @@ -891,6 +891,8 @@ The table below shows the currently available model types and their models. You
| BERT | bert | bert-large-uncased-whole-word-masking-finetuned-squad | 24-layer, 1024-hidden, 16-heads, 340M parameters. <br>The bert-large-uncased-whole-word-masking model fine-tuned on SQuAD |
| BERT | bert | bert-large-cased-whole-word-masking-finetuned-squad | 24-layer, 1024-hidden, 16-heads, 340M parameters <br>The bert-large-cased-whole-word-masking model fine-tuned on SQuAD |
| BERT | bert | bert-base-cased-finetuned-mrpc | 12-layer, 768-hidden, 12-heads, 110M parameters. <br>The bert-base-cased model fine-tuned on MRPC |
+| BERT | bert | bert-base-german-dbmdz-cased | 12-layer, 768-hidden, 12-heads, 110M parameters. Trained on cased German text by DBMDZ |
+| BERT | bert | bert-base-german-dbmdz-uncased | 12-layer, 768-hidden, 12-heads, 110M parameters. Trained on uncased German text by DBMDZ |
| XLNet | xlnet | xlnet-base-cased | 12-layer, 768-hidden, 12-heads, 110M parameters. <br>XLNet English model |
| XLNet | xlnet | xlnet-large-cased | 24-layer, 1024-hidden, 16-heads, 340M parameters. <br>XLNet Large English model |
| XLM | xlm | xlm-mlm-en-2048 | 12-layer, 2048-hidden, 16-heads <br>XLM English model |
|
Psycopg2Connector __init__ doesn't accept a pool
The Psycopg2Connector __init__ function does not have a pool
argument. A user-created pool can be used, but it's to be
passed to the open function. | @@ -76,7 +76,6 @@ class Psycopg2Connector(connector.BaseConnector):
*,
json_dumps: Optional[Callable] = None,
json_loads: Optional[Callable] = None,
- pool: Optional[psycopg2.pool.ThreadedConnectionPool] = None,
**kwargs: Any,
):
"""
|
purge: fix rbd mirror purge
as of the service launched isn't
it's now `ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}` | - name: stop ceph rbd mirror with systemd
service:
- name: [email protected]
+ name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
state: stopped
failed_when: false
|
[tests] Only find one alternative script with script_tests.py
utils.execute_pwb imports pwb and pywikibot and config variable cannot
be set directly for that environment and will be ignored. To set the
pwb_close_matches variable use the global argument handling.
This partly reverts | @@ -10,14 +10,12 @@ import unittest
from contextlib import suppress
from pywikibot.tools import has_module
-from pywikibot import config
from tests import join_root_path, unittest_print
from tests.aspects import DefaultSiteTestCase, MetaTestCaseClass, PwbTestCase
from tests.utils import execute_pwb
-saved_pwb_close_matches = config.pwb_close_matches
scripts_path = join_root_path('scripts')
# These dependencies are not always the package name which is in setup.py.
@@ -45,7 +43,7 @@ failed_dep_script_set = {name for name in script_deps
if not check_script_deps(name)}
# scripts which cannot be tested
-unrunnable_script_set = {'commonscat'}
+unrunnable_script_set = set()
def list_scripts(path, exclude=None):
@@ -57,8 +55,7 @@ def list_scripts(path, exclude=None):
return scripts
-script_list = (['login']
- + list_scripts(scripts_path, 'login.py'))
+script_list = ['login'] + list_scripts(scripts_path, exclude='login.py')
script_input = {
'interwiki': 'Test page that should not exist\n',
@@ -196,9 +193,11 @@ class TestScriptMeta(MetaTestCaseClass):
.format(script_name))
def test_script(self):
- global_args = 'For global options use -help:global or run pwb'
+ global_args_msg = \
+ 'For global options use -help:global or run pwb'
+ global_args = ['-pwb_close_matches:1']
- cmd = [script_name] + args
+ cmd = global_args + [script_name] + args
data_in = script_input.get(script_name)
timeout = 5 if is_autorun else None
@@ -238,7 +237,7 @@ class TestScriptMeta(MetaTestCaseClass):
elif not is_autorun:
if not stderr_other:
- self.assertIn(global_args, out_result)
+ self.assertIn(global_args_msg, out_result)
else:
self.assertIn('Use -help for further information.',
stderr_other)
@@ -267,7 +266,7 @@ class TestScriptMeta(MetaTestCaseClass):
self.assertNotIn('deprecated', err_result.lower())
# If stdout doesn't include global help..
- if global_args not in out_result:
+ if global_args_msg not in out_result:
# Specifically look for deprecated
self.assertNotIn('deprecated', out_result.lower())
# But also complain if there is any stdout
@@ -368,16 +367,6 @@ class TestScriptSimulate(DefaultSiteTestCase, PwbTestCase,
_results = no_args_expected_results
-def setUpModule(): # noqa: N802
- """Only find one alternative script (T296204)."""
- config.pwb_close_matches = 1
-
-
-def tearDownModule(): # noqa: N802
- """Restore pwb_close_matches setting."""
- config.pwb_close_matches = saved_pwb_close_matches
-
-
if __name__ == '__main__': # pragma: no cover
with suppress(SystemExit):
unittest.main()
|
Update data-and-archives.rst
Updated "Contribute Code" URL to the updated Github page. | Import your data
================
-You can import data from other tools to use with Boards. `Contribute code <https://www.focalboard.com/contribute/getting-started>`_ to expand this.
+You can import data from other tools to use with Boards. `Contribute code <https://mattermost.github.io/focalboard/>`_ to expand this.
Import from Asana
-----------------
|
[ci] add delay to `gdbserver_session`
Force delay to let gdbserver spawn correct and start listening to the tcp port | @@ -10,6 +10,7 @@ import platform
import re
import subprocess
import tempfile
+import time
import unittest
import warnings
from typing import Dict, Iterable, List, Optional, Union
@@ -262,6 +263,7 @@ def gdbserver_session(*args, **kwargs):
port = kwargs.get("port", 0) or GDBSERVER_DEFAULT_PORT
sess = start_gdbserver(exe, port)
try:
+ time.sleep(0.5) # forced delay to allow gdbserver to start listening
yield sess
finally:
stop_gdbserver(sess)
|
Sort `sawtooth peer list` output
This makes the command's output easier to predict. | @@ -59,7 +59,7 @@ def do_peer(args):
def do_peer_list(args):
rest_client = RestClient(base_url=args.url)
- peers = rest_client.list_peers()
+ peers = sorted(rest_client.list_peers())
if args.format == 'csv' or args.format == 'default':
print(','.join(peers))
|
[ci] always handle multiple containers
I missed a few places where CI tries to get the logs of a service.
All of these can fail if the pod has multiple containers. I added
`--all-containers` to ensure it never fails for multi-container Pods. | @@ -740,7 +740,7 @@ set +e
kubectl -n {self.namespace} rollout status --timeout=1h {resource_type} {name} && \
{wait_cmd}
EC=$?
-kubectl -n {self.namespace} logs --tail=999999 -l app={name} | {pretty_print_log}
+kubectl -n {self.namespace} logs --tail=999999 -l app={name} --all-containers=true | {pretty_print_log}
set -e
(exit $EC)
'''
@@ -753,7 +753,7 @@ set +e
kubectl -n {self.namespace} wait --timeout=1h pod --for=condition=podscheduled {name} \
&& python3 wait-for.py {timeout} {self.namespace} Pod {name}
EC=$?
-kubectl -n {self.namespace} logs --tail=999999 {name} | {pretty_print_log}
+kubectl -n {self.namespace} logs --tail=999999 {name} --all-containers=true | {pretty_print_log}
set -e
(exit $EC)
'''
@@ -783,13 +783,13 @@ date
for w in self.wait:
name = w['name']
if w['kind'] == 'Deployment':
- script += f'kubectl -n {self.namespace} logs --tail=999999 -l app={name} | {pretty_print_log}\n'
+ script += f'kubectl -n {self.namespace} logs --tail=999999 -l app={name} --all-containers=true | {pretty_print_log}\n'
elif w['kind'] == 'Service':
assert w['for'] == 'alive', w['for']
- script += f'kubectl -n {self.namespace} logs --tail=999999 -l app={name} | {pretty_print_log}\n'
+ script += f'kubectl -n {self.namespace} logs --tail=999999 -l app={name} --all-containers=true | {pretty_print_log}\n'
else:
assert w['kind'] == 'Pod', w['kind']
- script += f'kubectl -n {self.namespace} logs --tail=999999 {name} | {pretty_print_log}\n'
+ script += f'kubectl -n {self.namespace} logs --tail=999999 {name} --all-containers=true | {pretty_print_log}\n'
script += 'date\n'
self.job = batch.create_job(CI_UTILS_IMAGE,
command=['bash', '-c', script],
|
travis: Drop patch element from python version
It's not used to select python installation. | @@ -12,9 +12,9 @@ dist: bionic
env:
jobs:
- - PYTHON=3.8.2
- - PYTHON=3.7.7
- - PYTHON=3.6.8
+ - PYTHON=3.8
+ - PYTHON=3.7
+ - PYTHON=3.6
global:
- PYTHONWARNINGS="ignore::DeprecationWarning"
- PIP_PROGRESS_BAR="off"
@@ -33,8 +33,8 @@ before_install:
# Install venv on Linux using Ubuntu distributed python
# distutils and lib2to3 are only needed for python3.8
# https://bugs.launchpad.net/ubuntu/+source/python3.8/+bug/1851684
- sudo apt-get install -y python${PYTHON%.*}-dev python${PYTHON%.*}-venv python3-distutils python3-lib2to3
- python${PYTHON%.*} -m venv $HOME/venv
+ sudo apt-get install -y python$PYTHON-dev python$PYTHON-venv python3-distutils python3-lib2to3
+ python$PYTHON -m venv $HOME/venv
# Provide fake xdg-open
echo "#!/bin/sh" > $HOME/venv/bin/xdg-open
chmod +x $HOME/venv/bin/xdg-open
|
Fix empty Total_LCA_operation file
`NONE` feedstock was being ignored which resulted in merging with an empty dataframe | @@ -58,7 +58,9 @@ def lca_operation(locator):
# get the mean of all values for this
factors_resources_simple = [(name, values['GHG_kgCO2MJ'].mean()) for name, values in factors_resources.items()]
- factors_resources_simple = pd.DataFrame(factors_resources_simple, columns=['code', 'GHG_kgCO2MJ'])
+ factors_resources_simple = pd.DataFrame(factors_resources_simple, columns=['code', 'GHG_kgCO2MJ']).append(
+ # append NONE choice with zero values
+ {'code': 'NONE'}, ignore_index=True).fillna(0)
# local variables
Qhs_flag = Qww_flag = Qcs_flag = E_flag = True
|
fix: Minor link fix in `CONTRIBUTING.md`
The link to the python SDK header in the contrib document was incorrect. | ## Overview
This guide is targeted at developers looking to contribute to Feast components in
the main Feast repository:
-- [Feast Python SDK / CLI](#feast-python-sdk-%2F-cli)
+- [Feast Python SDK / CLI](#feast-python-sdk--cli)
- [Feast Java Serving](#feast-java-serving)
- [Feast Go Client](#feast-go-client)
@@ -212,4 +212,3 @@ go test
* This is because github actions cannot pull the branch version for some reason so just find your PR number in your pull request header and hard code it into the `uses: actions/checkout@v2` section (i.e replace `refs/pull/${{ github.event.pull_request.number }}/merge` with `refs/pull/<pr number>/merge`)
* AWS/GCP workflow
* Currently still cannot test GCP/AWS workflow without setting up secrets in a forked repository.
-
|
llvm, functions/LinearCombination: Don't emit pow(x,1.0)
It's not easy to optimize it away. | @@ -946,9 +946,12 @@ class LinearCombination(
pow_f = ctx.get_builtin("pow", [ctx.float_ty])
for i in range(vi.type.pointee.count):
+ ptri = builder.gep(vi, [ctx.int32_ty(0), ctx.int32_ty(i), index])
+ in_val = builder.load(ptri)
+
# No exponent
if isinstance(exponent_type, pnlvm.ir.LiteralStructType):
- exponent = ctx.float_ty(1.0)
+ pass
# Vector exponent
elif isinstance(exponent_type, pnlvm.ir.ArrayType):
assert len(exponent_type) > 1
@@ -957,12 +960,10 @@ class LinearCombination(
exponent_index = builder.add(exponent_index, index)
exponent_ptr = builder.gep(exponent_param_ptr, [ctx.int32_ty(0), exponent_index])
exponent = builder.load(exponent_ptr)
+ in_val = builder.call(pow_f, [in_val, exponent])
# Scalar exponent
else:
exponent = builder.load(exponent_param_ptr)
-
- ptri = builder.gep(vi, [ctx.int32_ty(0), ctx.int32_ty(i), index])
- in_val = builder.load(ptri)
in_val = builder.call(pow_f, [in_val, exponent])
# No weights
|
remove always-on png forcing
only force output format of binarization to png if extension is jpeg or
similar. also prints a warning when forcing. | @@ -57,7 +57,11 @@ def binarizer(threshold, zoom, escale, border, perc, range, low, high, base_imag
try:
res = binarization.nlbin(im, threshold, zoom, escale, border, perc, range,
low, high)
- res.save(output, format='png')
+ form = None
+ if os.path.splitext(output)[1] in ['.jpg', '.jpeg', '.JPG', '.JPEG']:
+ form = 'png'
+ logger.warning('jpeg does not support 1bpp images. Forcing to png.')
+ res.save(output, format=form)
except Exception:
message('\u2717', fg='red')
raise
|
[easy] remove unloadable toy schedule from examples
Test Plan: inspection
Reviewers: sashank, dgibson, yuhan | @@ -156,37 +156,4 @@ def get_toys_schedules():
run_config_fn=lambda _: {"intermediate_storage": {"filesystem": {}}},
execution_timezone=_toys_tz_info(),
),
- ScheduleDefinition(
- name="pandas_hello_world_hourly",
- cron_schedule="0 * * * *",
- pipeline_name="pandas_hello_world_pipeline",
- run_config_fn=lambda _: {
- "solids": {
- "mult_solid": {
- "inputs": {
- "num_df": {
- "csv": {
- "path": file_relative_path(
- __file__, "pandas_hello_world/data/num.csv"
- )
- }
- }
- }
- },
- "sum_solid": {
- "inputs": {
- "num_df": {
- "csv": {
- "path": file_relative_path(
- __file__, "pandas_hello_world/data/num.csv"
- )
- }
- }
- }
- },
- },
- "intermediate_storage": {"filesystem": {}},
- },
- execution_timezone=_toys_tz_info(),
- ),
]
|
Improve expectation docs
See | @@ -319,6 +319,13 @@ programs run on this QVM.
sample. The expectations returned from *different* ``expectation`` calls *will then
generally be different*.
+ To measure the expectation of a PauliSum, you probably want to
+ do something like this::
+
+ progs, coefs = hamiltonian.get_programs()
+ expect_coeffs = np.array(cxn.expectation(prep_program, operator_programs=progs))
+ return np.real_if_close(np.dot(coefs, expect_coeffs))
+
:param Program prep_prog: Quil program for state preparation.
:param list operator_programs: A list of Programs, each specifying an operator whose expectation to compute.
Default is a list containing only the empty Program.
|
link subevent to messages for survey sms's
This will fix a bug where searching by phone number
doesn't pick up the events in the messaging event report
since the SMS isn't linked to the messaging event | @@ -59,6 +59,7 @@ def send_first_message(domain, recipient, phone_entry_or_number, session, respon
metadata = MessageMetadata(
workflow=workflow,
xforms_session_couch_id=session.couch_id,
+ messaging_subevent_id=logged_subevent.pk
)
if isinstance(phone_entry_or_number, PhoneNumber):
send_sms_to_verified_number(
@@ -111,16 +112,18 @@ def handle_due_survey_action(domain, contact_id, session_id):
# Resend the current question in the open survey to the contact
p = PhoneNumber.get_phone_number_for_owner(session.connection_id, session.phone_number)
if p:
+ subevent = session.related_subevent
metadata = MessageMetadata(
workflow=session.workflow,
xforms_session_couch_id=session._id,
+ messaging_subevent_id=subevent.pk if subevent else None
)
resp = FormplayerInterface(session.session_id, domain).current_question()
send_sms_to_verified_number(
p,
resp.event.text_prompt,
metadata,
- logged_subevent=session.related_subevent
+ logged_subevent=subevent
)
session.move_to_next_action()
|
Catch NotFound errors when trying to delete the invocation message when cleaning
This often happens during a raid, when an int e script is added to ban & clean messages. Since the invocation
message will be deleted on the first run, we should except subsequent NotFound errors. | @@ -3,7 +3,7 @@ import random
import re
from typing import Iterable, Optional
-from discord import Colour, Embed, Message, TextChannel, User
+from discord import Colour, Embed, Message, TextChannel, User, errors
from discord.ext import commands
from discord.ext.commands import Cog, Context, group, has_any_role
@@ -115,7 +115,11 @@ class Clean(Cog):
# Delete the invocation first
self.mod_log.ignore(Event.message_delete, ctx.message.id)
+ try:
await ctx.message.delete()
+ except errors.NotFound:
+ # Invocation message has already been deleted
+ log.info("Tried to delete invocation message, but it was already deleted.")
messages = []
message_ids = []
|
Updating README
Moving Travis logo down and using image reference to declutter. | Mininet: Rapid Prototyping for Software Defined Networks
========================================================
-[](https://travis-ci.org/mininet/mininet)
-
*The best way to emulate almost any network on your laptop!*
Mininet 2.3.0d1
+[![Build Status][1]](https://travis-ci.org/mininet/mininet)
+
### What is Mininet?
Mininet emulates a complete network of hosts, links, and switches
@@ -128,3 +128,5 @@ Mininet to change the networking world!
Bob Lantz
Mininet Core Team
+
+[1]: https://travis-ci.org/mininet/mininet.svg?branch=master
|
[bugfix] Make archivebot ignore headers in nowiki, pre, etc.
archivebot.py has been incorrectly searching for thread headers in tags
like nowiki, pre, source and in comments | @@ -109,7 +109,7 @@ import pywikibot
from pywikibot.date import apply_month_delta
from pywikibot import i18n
-from pywikibot.textlib import TimeStripper
+from pywikibot.textlib import TimeStripper, _get_regexes
from pywikibot.textlib import to_local_digits
from pywikibot.tools import issue_deprecation_warning, FrozenDict
@@ -453,12 +453,32 @@ class DiscussionPage(pywikibot.Page):
self.threads = []
self.archives = {}
self.archived_threads = 0
- lines = self.get().split('\n')
+ text = self.get()
+ # Replace text in following exceptions by spaces, but don't change line
+ # numbers
+ exceptions = ['comment', 'code', 'pre', 'source', 'nowiki']
+ exc_regexes = _get_regexes(exceptions, self.site)
+ stripped_text = text
+ for regex in exc_regexes:
+ for match in re.finditer(regex, stripped_text):
+ before = stripped_text[:match.start()]
+ restricted = stripped_text[match.start():match.end()]
+ after = stripped_text[match.end():]
+ restricted = re.sub(r'[^\n]', r'', restricted)
+ stripped_text = before + restricted + after
+ # Find thread headers in stripped text and return their line numbers
+ stripped_lines = stripped_text.split('\n')
+ thread_headers = []
+ for line_number, line in enumerate(stripped_lines, start=1):
+ if re.search(r'^== *[^=].*? *== *$', line):
+ thread_headers.append(line_number)
+ # Fill self by original thread headers on returned line numbers
+ lines = text.split('\n')
found = False # Reading header
cur_thread = None
- for line in lines:
+ for line_number, line in enumerate(lines, start=1):
+ if line_number in thread_headers:
thread_header = re.search('^== *([^=].*?) *== *$', line)
- if thread_header:
found = True # Reading threads now
if cur_thread:
self.threads.append(cur_thread)
|
Review of domain_to_idna() to support more tests cases
Please note this patch comes after an issue reported by
which I could reproduce in 1/3 computer. | @@ -1159,23 +1159,44 @@ def domain_to_idna(line):
"""
if not line.startswith('#'):
- for separator in ['\t', ' ']:
- comment = ''
+ tabs = '\t'
+ space = ' '
- if separator in line:
+ tabs_position, space_position = (line.find(tabs), line.find(space))
+
+ if tabs_position > -1 and space_position > -1:
+ if space_position < tabs_position:
+ separator = space
+ else:
+ separator = tabs
+ elif not tabs_position == -1:
+ separator = tabs
+ elif not space_position == -1:
+ separator = space
+ else:
+ separator = ''
+
+ if separator:
splited_line = line.split(separator)
- if '#' in splited_line[1]:
- index_comment = splited_line[1].find('#')
+
+ index = 1
+ while index < len(splited_line):
+ if splited_line[index]:
+ break
+ index += 1
+
+ if '#' in splited_line[index]:
+ index_comment = splited_line[index].find('#')
if index_comment > -1:
- comment = splited_line[1][index_comment:]
+ comment = splited_line[index][index_comment:]
- splited_line[1] = splited_line[1] \
+ splited_line[index] = splited_line[index] \
.split(comment)[0] \
.encode("IDNA").decode("UTF-8") + \
comment
- splited_line[1] = splited_line[1] \
+ splited_line[index] = splited_line[index] \
.encode("IDNA") \
.decode("UTF-8")
return separator.join(splited_line)
|
Update data-sources.rst
Update 3GPP data source doc | @@ -31,9 +31,21 @@ Ethical Considerations
-ListServ (3GPP)
+3GPP
=================
+The 3GPP mailing archive is managed using the LISTSERV software and is hosted at:
+https://list.etsi.org/scripts/wa.exe?HOME
+In order to be able to view most header fields and text bodies (there might still
+be some fields that are hidden) on needs to create an account here:
+https://list.etsi.org/scripts/wa.exe?GETPW1=&X=&Y=
+
+The entire archive reaches back to 1998 and is multiple GBs in size. Therefore,
+it takes a considerable amount of time to scrape the entire archive and strain on
+the server host if everyone of interest would scrape them individually. To
+resolve this issue we keep an up to date version of the archive.
+
+We are currently working on a improved version of the 'Data Access Permit Application'.
Usage
-------
@@ -43,7 +55,6 @@ Ethical Considerations
-
IETF DataTracker (RFC drafts, etc.)
========================================
|
Update test_platform_base.py
spelling correction | @@ -543,7 +543,7 @@ def test_multi_transition():
assert len(platform.transition_models) == 2
assert len(platform.transition_times) == 2
- # Check that platform states lenght increases as platform moves
+ # Check that platform states length increases as platform moves
assert len(platform) == 1
time = datetime.datetime.now()
time += datetime.timedelta(seconds=1)
|
utils/misc: remove unused to_identifier
This has been moved to devlib, but, it appears, never removed from WA. | @@ -290,14 +290,6 @@ class LoadSyntaxError(Exception):
RAND_MOD_NAME_LEN = 30
-BAD_CHARS = string.punctuation + string.whitespace
-TRANS_TABLE = string.maketrans(BAD_CHARS, '_' * len(BAD_CHARS))
-
-
-def to_identifier(text):
- """Converts text to a valid Python identifier by replacing all
- whitespace and punctuation."""
- return re.sub('_+', '_', text.translate(TRANS_TABLE))
def load_struct_from_python(filepath=None, text=None):
|
docs: add name and description to example SAML2 backend
these attributes are required to generate valid metadata in combination with required_attributes and/or optional_attributes | @@ -18,6 +18,8 @@ config:
enable_metadata_reload: no
sp_config:
+ name: "SP Name"
+ description: "SP Description"
key_file: backend.key
cert_file: backend.crt
organization: {display_name: Example Identities, name: Example Identities Org., url: 'http://www.example.com'}
|
docs: improved description of `virtualenvs.create` option
I tried to clarify how the `virtualenvs.create` config option works and
why it is a good idea to have virtual environments in docker containers
as well. | @@ -257,7 +257,23 @@ Use parallel execution when using the new (`>=1.1.0`) installer.
Create a new virtual environment if one doesn't already exist.
-If set to `false`, poetry will install dependencies into the current python environment.
+If set to `false`, Poetry will not create a new virtual environment. If it detects a virtual environment
+in `{cache-dir}/virtualenvs` or `{project-dir}/.venv` it will install dependencies into them, otherwise it will install
+dependencies into the systems python environment.
+
+{{% note %}}
+If Poetry detects it's running within an activated virtual environment, it will never create a new virtual environment,
+regardless of the value set for `virtualenvs.create`.
+{{% /note %}}
+
+{{% note %}}
+Be aware that installing dependencies into the system environment likely upgrade or uninstall existing packages and thus
+break other applications. Installing additional Python packages after installing the project might break the Poetry
+project in return.
+
+This is why it is recommended to always create a virtual environment. This is also true in Docker containers, as they
+might contain additional Python packages as well.
+{{% /note %}}
### `virtualenvs.in-project`
|
Update config.yml
update slim upload folder | @@ -204,7 +204,7 @@ jobs:
mv dist/escu DA-ESS-ContentUpdate
mv dist/mustang DA-ESS-ContentUpdate-Mustang
slim package -o upload DA-ESS-ContentUpdate
- slim package -o upload_mustang DA-ESS-ContentUpdate-Mustang
+ slim package -o upload DA-ESS-ContentUpdate-Mustang
cp upload/*.tar.gz DA-ESS-ContentUpdate-latest.tar.gz
cp upload_mustang/*.tar.gz DA-ESS-ContentUpdate-Mustang-latest.tar.gz
- store_artifacts:
|
[AAT] Do not assume that StructWithLength is always 32 bits long
Most AAT tables encode struct lengths into 32 bits but some use only 16.
Therefore, take the size of the encoded struct length from otData. | @@ -447,22 +447,24 @@ class StructWithLength(Struct):
table = self.tableClass()
table.decompile(reader, font)
reader.seek(pos + table.StructLength)
- del table.StructLength
return table
def write(self, writer, font, tableDict, value, repeatIndex=None):
- value.StructLength = 0xdeadbeef
+ for convIndex, conv in enumerate(value.getConverters()):
+ if conv.name == "StructLength":
+ break
+ lengthIndex = len(writer.items) + convIndex
+ deadbeef = {1:0xDE, 2:0xDEAD, 4:0xDEADBEEF}[conv.staticSize]
+
before = writer.getDataLength()
- i = len(writer.items)
+ value.StructLength = deadbeef
value.compile(writer, font)
length = writer.getDataLength() - before
- for j,conv in enumerate(value.getConverters()):
- if conv.name != 'StructLength':
- continue
- assert writer.items[i+j] == b"\xde\xad\xbe\xef"
- writer.items[i+j] = struct.pack(">L", length)
- break
- del value.StructLength
+ lengthWriter = writer.getSubWriter()
+ conv.write(lengthWriter, font, tableDict, length)
+ assert(writer.items[lengthIndex] ==
+ b"\xde\xad\xbe\xef"[:conv.staticSize])
+ writer.items[lengthIndex] = lengthWriter.getAllData()
class Table(Struct):
|
Fix formatting in 0.10.x upgrade instructions
Just a tiny bullet list fix | @@ -50,6 +50,7 @@ you will need to update that code to use the more precise name ``batch_kwargs_ge
For example, in the method ``DataContext.get_available_data_asset_names`` the parameter ``generator_names`` is now ``batch_kwargs_generator_names``.
If you are using ``BatchKwargsGenerators`` in your project config, follow these steps to upgrade your existing Great Expectations project:
+
* Edit your ``great_expectations.yml`` file and change the key ``generators`` to ``batch_kwargs_generators``.
* Run a simple command such as: ``great_expectations datasource list`` and ensure you see a list of datasources.
|
Add description for BQ tables and view relations.
Also make changes to table_options macro based on testing against a
real project. | {%- endmacro -%}
{% macro table_options() %}
- {%- set raw_persist_docs = config.get('persist_docs', {}) -%}
- {%- set raw_persist_relation_docs = raw_persist_docs.get('relation', false) -%}
-
- {%- if raw_persist_docs is {} -%}
- {{ return('') }}
- {% endif %}
+ {%- set raw_persist_docs = config.get('persist_docs', none) -%}
+ {%- if raw_persist_docs -%}
+ {%- set raw_relation = raw_persist_docs.get('relation', false) -%}
OPTIONS(
- {% if raw_persist_relation_docs -%}
+ {%- if raw_relation -%}
description={{ model.description | tojson }}
{% endif %}
)
+ {% endif %}
{%- endmacro -%}
{% macro bigquery__create_table_as(temporary, relation, sql) -%}
{% macro bigquery__create_view_as(relation, sql) -%}
- create or replace view {{ relation }} as (
+ create or replace view {{ relation }}
+ {{ table_options() }}
+ as (
{{ sql }}
);
{% endmacro %}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.