message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
doc: Comment out "Consistent Snapshots" snippet
The text above the snippet explains the basic idea of "consistent
snapshots" and how to generate them with `write` and `writeall`.
The commands in the snippet just leave the repo in an inconsistent
state (see comment). | @@ -581,14 +581,24 @@ target file names specified in metadata do not contain digests in their names.)
The repository maintainer is responsible for the duration of multiple versions
of metadata and target files available on a repository. Generating consistent
metadata and target files on the repository is enabled by setting the
-`consistent_snapshot` argument of writeall() or write(). Note that changing the
-consistent_snapshot setting involves writing a new version of root.
+`consistent_snapshot` argument of `writeall()` or `write()` . Note that
+changing the consistent_snapshot setting involves writing a new version of
+root.
+
+<!--
+TODO: Integrate section with an updated consistent snapshot tutorial.
+As it is now, it just messes up the state of the repository, i.e. marks
+"root" as dirty, although all other metadata needs to be re-written with
+<VERSION> prefix and target files need to be re-written with <HASH> prefix in
+their filenames.
+
```Python
# ----- Tutorial Section: Consistent Snapshots
>>> repository.root.load_signing_key(private_root_key)
>>> repository.root.load_signing_key(private_root_key2)
>>> repository.writeall(consistent_snapshot=True)
```
+-->
## Delegate to Hashed Bins ##
Why use hashed bin delegations?
|
push notif: Drop irrelevant fields in `remove` payloads.
These fields don't make much sense in this case; and the client
doesn't look at them and never has. Stop including them. | @@ -723,7 +723,7 @@ class HandlePushNotificationTest(PushNotificationTest):
with self.settings(PUSH_NOTIFICATION_BOUNCER_URL=True), \
mock.patch('zerver.lib.push_notifications'
'.send_notifications_to_bouncer') as mock_send_android, \
- mock.patch('zerver.lib.push_notifications.get_common_payload',
+ mock.patch('zerver.lib.push_notifications.get_base_payload',
return_value={'gcm': True}):
handle_remove_push_notification(user_profile.id, message.id)
mock_send_android.assert_called_with(user_profile.id, {},
@@ -751,7 +751,7 @@ class HandlePushNotificationTest(PushNotificationTest):
with mock.patch('zerver.lib.push_notifications'
'.send_android_push_notification') as mock_send_android, \
- mock.patch('zerver.lib.push_notifications.get_common_payload',
+ mock.patch('zerver.lib.push_notifications.get_base_payload',
return_value={'gcm': True}):
handle_remove_push_notification(self.user_profile.id, message.id)
mock_send_android.assert_called_with(android_devices,
|
try each of the cuda imports individually
it may happen that the import of DynamicSourceModule, while it will also not
be used later on, theoretically in that case the script should not fail | @@ -8,9 +8,15 @@ import numpy
#and run tests without pycuda installed
try:
import pycuda.driver as drv
- from pycuda.compiler import SourceModule, DynamicSourceModule
except ImportError:
drv = None
+try:
+ from pycuda.compiler import SourceModule
+except ImportError:
+ SourceModule = None
+try:
+ from pycuda.compiler import DynamicSourceModule
+except ImportError:
DynamicSourceModule = None
|
Fixed version generation string for deb package.
Previously it failed when trying to build a release version. | @@ -6,7 +6,14 @@ set -e
mkdir -p /tmp/syncplay/DEBIAN
echo "Package: syncplay
-Version: "$(sed -n -e "s/^.*version = //p" syncplay/__init__.py | sed "s/'//g")""$(git describe --exact-match --tags HEAD &>/dev/null && echo -git-$(date -u +%y%m%d%H%M))"
+Version: "$(
+ sed -n -e "s/^.*version = //p" syncplay/__init__.py | sed "s/'//g"
+)$(
+ if [[ $(git describe --exact-match --tags HEAD | wc -l) = '0' ]];
+ then
+ echo -git-$(date -u +%y%m%d%H%M)
+ fi
+)"
Architecture: all
Maintainer: <[email protected]>
Depends: python3 (>= 3.4), python3-pyside2.qtwidgets, python3-pyside2.qtcore, python3-twisted (>= 16.4.0), python3-certifi, mpv (>= 0.23) | vlc (>= 2.2.1)
|
Fix warnings if started without X
gbulb.install(gtk=True) will import `gi.repository.Gtk`, leading to
warnings if X is not available - which is detrimental for its use as
command line tool in ssh/tty environments. | @@ -123,7 +123,7 @@ class _EntryPoint:
def __init__(self, argv=None):
"""Parse command line options, read config and initialize members."""
- gbulb.install(gtk=True)
+ gbulb.install(gtk=_in_X and _has_Gtk)
# parse program options (retrieve log level and config file name):
args = docopt(self.usage, version='udiskie ' + self.version)
default_opts = self.option_defaults
|
Removed unnecessary console.logs
Removed unnecessary console.logs | @@ -110,7 +110,6 @@ export class ReportingGridComponent implements OnInit, AfterViewInit {
refreshData(fullReport, report) {
this.reportData = [...report];
this.fullReport = fullReport;
- console.log(fullReport);
this.checkFilters();
}
@@ -188,7 +187,6 @@ export class ReportingGridComponent implements OnInit, AfterViewInit {
}
private checkMaxRight() {
- console.log('check');
if (this.reportData && this.reportData.length < 5) {
const arg = this.pageWrapper.nativeElement.offsetWidth - 15 +
this.pageWrapper.nativeElement.scrollLeft + 2 <= this.table._elementRef.nativeElement.offsetWidth;
|
portico: Add headings to /for/communities.
Add headings to break up the page.
Remove badly worded reference to /why-zulip.
Add buttons similar to other /for pages at top and bottom. | {% include 'zerver/landing_nav.html' %}
-<div class="portico-landing why-page">
- <div class="hero">
+<div class="portico-landing why-page solutions-page">
+ <div class="hero bg-education">
+ <div class="bg-dimmer"></div>
<h1 class="center">Zulip for communities</h1>
<p>Open-source projects, research collaborations, volunteer organizations.</p>
+ <div class="hero-text">
+ <a href="/plans">Zulip Standard</a> is discounted 85%+ for registered non-profits, and most communities are eligible for a discount. Contact <a href="mailto:[email protected]">[email protected]</a> to check whether your organization qualifies, or <a href="/accounts/go/?next=/upgrade%23sponsorship">request sponsorship</a> today.
+ </div>
+ <div class="hero-buttons center">
+ <a href="/new/" class="button">
+ {{ _('Create organization') }}
+ </a>
+ <a href="/accounts/go/?next=/upgrade%23sponsorship" class="button">
+ {{ _('Request sponsorship') }}
+ </a>
+ <a href="https://zulip.readthedocs.io/en/stable/production/install.html" class="button">
+ {{ _('Self-host Zulip') }}
+ </a>
+ </div>
</div>
<div class="main">
<div class="padded-content">
</div>
</div>
</div>
+ <div class="bottom-register-buttons">
+ <h1>
+ <a href="/plans">Zulip Standard</a> is discounted for most communities!
+ </h1>
+ <div class="hero-buttons center">
+ <a href="/new/" class="button">
+ {{ _('Create organization') }}
+ </a>
+ <a href="/accounts/go/?next=/upgrade%23sponsorship" class="button">
+ {{ _('Request sponsorship') }}
+ </a>
+ <a href="https://zulip.readthedocs.io/en/stable/production/install.html" class="button">
+ {{ _('Self-host Zulip') }}
+ </a>
+ </div>
+ </div>
</div>
{% endblock %}
|
Replace direct use of int32_t with an alias DeviceIndex
Summary:
Pull Request resolved:
It just makes the semantic meaning of the int32_t a little
bit clearer. | namespace c10 {
+/// An index representing a specific device; e.g., the 1 in GPU 1.
+/// A DeviceIndex is not independently meaningful without knowing
+/// the DeviceType it is associated; try to use Device rather than
+/// DeviceIndex directly.
+using DeviceIndex = int32_t;
+
/// Represents a a compute device on which a tensor is located. A device is
/// uniquely identified by a type, which specifies the type of machine it is
/// (e.g. CPU or CUDA GPU), and a device index or ordinal, which identifies the
@@ -26,7 +32,7 @@ struct C10_API Device {
/// Constructs a new `Device` from a `DeviceType` and an optional device
/// index.
- /* implicit */ Device(DeviceType type, int32_t index = -1)
+ /* implicit */ Device(DeviceType type, DeviceIndex index = -1)
: type_(type), index_(index) {
AT_CHECK(
index == -1 || index >= 0,
@@ -58,7 +64,7 @@ struct C10_API Device {
}
/// Sets the device index.
- void set_index(int32_t index) {
+ void set_index(DeviceIndex index) {
index_ = index;
}
@@ -68,7 +74,7 @@ struct C10_API Device {
}
/// Returns the optional index.
- const int32_t& index() const noexcept {
+ DeviceIndex index() const noexcept {
return index_;
}
@@ -89,7 +95,7 @@ struct C10_API Device {
private:
DeviceType type_;
- int32_t index_ = -1;
+ DeviceIndex index_ = -1;
};
C10_API std::ostream& operator<<(
|
[IMPR] use subTest for TestTranslate.test_localized
Also follow PEP 8 naming convention for this test class | @@ -35,19 +35,15 @@ class TestTranslate(TestCase):
self.msg_no_english = {'ja': 'test-no-english JA'}
super(TestTranslate, self).setUp()
- def testLocalized(self):
+ def test_localized(self):
"""Test fully localized translations."""
- self.assertEqual(i18n.translate('en', self.msg_localized,
- fallback=True),
- 'test-localized EN')
- self.assertEqual(i18n.translate('nl', self.msg_localized,
- fallback=True),
- 'test-localized NL')
- self.assertEqual(i18n.translate('fy', self.msg_localized,
+ for code, msg in self.msg_localized.items():
+ with self.subTest(code=code):
+ self.assertEqual(i18n.translate(code, self.msg_localized,
fallback=True),
- 'test-localized FY')
+ msg)
- def testSemiLocalized(self):
+ def test_semi_localized(self):
"""Test translate by fallback to an alternative language."""
self.assertEqual(i18n.translate('en', self.msg_semi_localized,
fallback=True),
@@ -58,7 +54,7 @@ class TestTranslate(TestCase):
fallback=True),
'test-semi-localized NL')
- def testNonLocalized(self):
+ def test_non_localized(self):
"""Test translate with missing localisation."""
for code in ('en', 'fy', 'nl', 'ru'):
with self.subTest(code=code):
|
Use page objects for the 'Validate Contact' keyword
Also added documentation for the keyword, to be a good robot citizen | @@ -41,9 +41,15 @@ Via UI
Validate Contact
[Arguments] ${contact_id} ${first_name} ${last_name}
+ [Documentation]
+ ... Given a contact id, validate that the contact has the
+ ... expected first and last name both through the detail page in
+ ... the UI and via the API.
+
# Validate via UI
- Go To Record Home ${contact_id}
+ Go to page Detail Contact ${contact_id}
Page Should Contain ${first_name} ${last_name}
+
# Validate via API
&{contact} = Salesforce Get Contact ${contact_id}
Should Be Equal ${first_name} &{contact}[FirstName]
|
Updated the Microsoft Graph API README
* Updated the Microsoft Graph API README
Added the authorization process commands - msgraph-api-auth-start, msgraph-api-auth-complete, msgraph-api-test
* Update Packs/MicrosoftGraphAPI/Integrations/MicrosoftGraphAPI/README.md | @@ -65,6 +65,20 @@ The integration supports only Application permission type, and does not support
## Commands
You can execute the command from the Cortex XSOAR CLI, as part of an automation, or in a playbook.
After you successfully execute a command, a DBot message appears in the War Room with the command details.
+
+### msgraph-api-auth-start
+***
+Run this command to start the authorization process and follow the instructions in the command results.
+
+### msgraph-api-auth-complete
+***
+Run this command to complete the authorization process.
+Should be used after running the ***msgraph-api-auth-start*** command.
+
+### msgraph-api-test
+***
+Tests connectivity to Microsoft when using Cortex XSOAR Azure app.
+
### msgraph-api-request
***
Run a Microsoft Graph API query.
|
Add explicit type annotation in adhoc example config
Without this type annotation, mypy will incorrectly infer a type that is too
tight for user_options - either that it contains strings or sequences of
strings (but not both). | @@ -2,7 +2,9 @@ from parsl.providers import AdHocProvider
from parsl.channels import SSHChannel
from parsl.executors import HighThroughputExecutor
from parsl.config import Config
+from typing import Any, Dict
+user_opts: Dict[str, Dict[str, Any]]
user_opts = {'adhoc':
{'username': 'YOUR_USERNAME',
'script_dir': 'YOUR_SCRIPT_DIR',
@@ -10,6 +12,7 @@ user_opts = {'adhoc':
}
}
+
config = Config(
executors=[
HighThroughputExecutor(
|
Update census.ipynb
Fixed incorrect getting started link in census.ipynb | "# Preprocessing data with TensorFlow Transform\n",
"***The Feature Engineering Component of TensorFlow Extended (TFX)***\n",
"\n",
- "This example colab notebook provides a somewhat more advanced example of how \u003ca target='_blank' href='https://www.tensorflow.org/tfx/transform/'\u003eTensorFlow Transform\u003c/a\u003e (`tf.Transform`) can be used to preprocess data using exactly the same code for both training a model and serving inferences in production.\n",
+ "This example colab notebook provides a somewhat more advanced example of how \u003ca target='_blank' href='https://www.tensorflow.org/tfx/transform/get_started'\u003eTensorFlow Transform\u003c/a\u003e (`tf.Transform`) can be used to preprocess data using exactly the same code for both training a model and serving inferences in production.\n",
"\n",
"TensorFlow Transform is a library for preprocessing input data for TensorFlow, including creating features that require a full pass over the training dataset. For example, using TensorFlow Transform you could:\n",
"\n",
|
issue 2.8 PlayContext.connection no longer contains connection name
Not clear what the intention is here. Either need to ferret it out of
some other location, or just stop preloading the connection class in the
top-level process. | @@ -40,6 +40,12 @@ import ansible_mitogen.process
import ansible
import ansible.executor.process.worker
+try:
+ # 2.8+ has a standardized "unset" object.
+ from ansible.utils.sentinel import Sentinel
+except ImportError:
+ Sentinel = None
+
ANSIBLE_VERSION_MIN = '2.3'
ANSIBLE_VERSION_MAX = '2.8'
@@ -261,14 +267,17 @@ class StrategyMixin(object):
name=task.action,
mod_type='',
)
- ansible_mitogen.loaders.connection_loader.get(
- name=play_context.connection,
- class_only=True,
- )
ansible_mitogen.loaders.action_loader.get(
name=task.action,
class_only=True,
)
+ if play_context.connection is not Sentinel:
+ # 2.8 appears to defer computing this value until it's inside the
+ # worker. TODO: figure out where this value has moved.
+ ansible_mitogen.loaders.connection_loader.get(
+ name=play_context.connection,
+ class_only=True,
+ )
return super(StrategyMixin, self)._queue_task(
host=host,
|
Update the query to only return the count from the table since that is
all we care about. | @@ -398,14 +398,14 @@ def insert_notification_history_delete_notifications(
select_to_use = select_into_temp_table_for_letters if notification_type == 'letter' else select_into_temp_table
db.session.execute(select_to_use, input_params)
- result = db.session.execute("select * from NOTIFICATION_ARCHIVE")
+ result = db.session.execute("select count(*) from NOTIFICATION_ARCHIVE").fetchone()[0]
db.session.execute(insert_query)
db.session.execute(delete_query)
db.session.execute("DROP TABLE NOTIFICATION_ARCHIVE")
- return result.rowcount
+ return result
def _move_notifications_to_notification_history(notification_type, service_id, day_to_delete_backwards_from, qry_limit):
|
Accept 204 responses from v0 metrics API
The v0 metrics API is returning 204s for some requests to /node. This
will be fixed later. | @@ -81,6 +81,10 @@ def test_metrics_node(dcos_api_session):
for agent in dcos_api_session.slaves:
response = dcos_api_session.metrics.get('/node', node=agent)
+ # If the response is empty, accept it and continue. To be fixed later.
+ if response.status_code == 204:
+ continue
+
assert response.status_code == 200, 'Status code: {}, Content {}'.format(
response.status_code, response.content)
assert expected_datapoint_response(response.json())
@@ -90,6 +94,10 @@ def test_metrics_node(dcos_api_session):
for agent in dcos_api_session.public_slaves:
response = dcos_api_session.metrics.get('/node', node=agent)
+ # If the response is empty, accept it and continue. To be fixed later.
+ if response.status_code == 204:
+ continue
+
assert response.status_code == 200, 'Status code: {}, Content {}'.format(
response.status_code, response.content)
assert expected_datapoint_response(response.json())
@@ -99,6 +107,10 @@ def test_metrics_node(dcos_api_session):
for master in dcos_api_session.masters:
response = dcos_api_session.metrics.get('/node', node=master)
+ # If the response is empty, accept it and continue. To be fixed later.
+ if response.status_code == 204:
+ continue
+
assert response.status_code == 200, 'Status code: {}, Content {}'.format(
response.status_code, response.content)
assert expected_datapoint_response(response.json())
|
fix: when stashing the singleton to sys.modules, use an actual module object.
At least this won't trip anyone iterating through sys.modules and expects the values are actual modules. | @@ -12,6 +12,7 @@ import os
import pprint
import reprlib
import sys
+import types
import _thread
from coverage.misc import isolate_module
@@ -282,6 +283,7 @@ class DebugOutputFile: # pragma: debugging
self.write(f"New process: pid: {os.getpid()!r}, parent pid: {os.getppid()!r}\n")
SYS_MOD_NAME = '$coverage.debug.DebugOutputFile.the_one'
+ SINGLETON_ATTR = 'the_one_and_is_interim'
@classmethod
def get_one(cls, fileobj=None, show_process=True, filters=(), interim=False):
@@ -310,7 +312,8 @@ class DebugOutputFile: # pragma: debugging
# this class can be defined more than once. But we really want
# a process-wide singleton. So stash it in sys.modules instead of
# on a class attribute. Yes, this is aggressively gross.
- the_one, is_interim = sys.modules.get(cls.SYS_MOD_NAME, (None, True))
+ singleton_module = sys.modules.get(cls.SYS_MOD_NAME)
+ the_one, is_interim = getattr(singleton_module, cls.SINGLETON_ATTR, (None, True))
if the_one is None or is_interim:
if fileobj is None:
debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE)
@@ -321,7 +324,9 @@ class DebugOutputFile: # pragma: debugging
else:
fileobj = sys.stderr
the_one = cls(fileobj, show_process, filters)
- sys.modules[cls.SYS_MOD_NAME] = (the_one, interim)
+ singleton_module = types.ModuleType(cls.SYS_MOD_NAME)
+ setattr(singleton_module, cls.SINGLETON_ATTR, (the_one, interim))
+ sys.modules[cls.SYS_MOD_NAME] = singleton_module
return the_one
def write(self, text):
|
Update to import for ProxyFix due to deprecation
Reference:
Deprecated since version 0.15: ProxyFix has moved to
werkzeug.middleware.proxy_fix. All other code in this module is
deprecated and will be removed in version 1.0. | @@ -5,7 +5,7 @@ import logging
import traceback
from flask import Flask, jsonify, session
-from werkzeug.contrib.fixers import ProxyFix
+from werkzeug.middleware.proxy_fix import ProxyFix
# these have to come first to avoid circular import issues
from api.common import check, PicoException, validate # noqa
|
Fix catastrophic backtracking issue in header parsing regular expression.
The affected pattern is only used from a single non-public function,
which in turn is not actually used anywhere. It's in dead code. No
security issue. | @@ -3017,7 +3017,7 @@ def parse_range_header(header, maxlen=0):
#: Header tokenizer used by _parse_http_header()
-_hsplit = re.compile('(?:(?:"((?:[^"\\\\]+|\\\\.)*)")|([^;,=]+))([;,=]?)').findall
+_hsplit = re.compile('(?:(?:"((?:[^"\\\\]|\\\\.)*)")|([^;,=]+))([;,=]?)').findall
def _parse_http_header(h):
""" Parses a typical multi-valued and parametrised HTTP header (e.g. Accept headers) and returns a list of values
|
Fix typo in `Washington DC.md`
There was an extra `#` in the header for the last entry in the file, which caused it to show up in the JSON with no title. | @@ -69,7 +69,7 @@ A DC resident discusses being accosted by officers when trying to enter his home
* https://twitter.com/suckmyunicornD/status/1267767217392934917
* https://dcist.com/story/20/06/02/dupont-dc-home-protest-rahul-dubey/
-#### Police charge peaceful crowd, beat them with shields |
+### Police charge peaceful crowd, beat them with shields |
Here, police are captured charging into a crowd of peaceful protestors and hitting them with their shields. One individual can be seen bleeding from the mouth after being struck, before being pushed to the ground.
|
[ci/hotfix] Fix race condition in pytest reporting
The AWS test seems to try to create the directory multiple times. | @@ -709,7 +709,7 @@ def append_short_test_summary(rep):
return
if not os.path.exists(summary_dir):
- os.makedirs(summary_dir)
+ os.makedirs(summary_dir, exist_ok=True)
test_name = rep.nodeid.replace(os.sep, "::")
@@ -720,10 +720,6 @@ def append_short_test_summary(rep):
# The test succeeded after failing, thus it is flaky.
# We do not want to annotate flaky tests just now, so remove report.
os.remove(summary_file)
-
- # If there is only the header file left, remove directory
- if len(os.listdir(summary_dir)) <= 1:
- shutil.rmtree(summary_dir)
return
# Only consider failed tests from now on
|
Randomize parallel test run order.
Oversubscribe test load by 1. | @@ -253,7 +253,7 @@ def expand_tests(requested_test_classes, excluded_test_classes,
total_tests = 0
sanity_tests = unittest.TestSuite()
single_tests = unittest.TestSuite()
- parallel_tests = unittest.TestSuite()
+ parallel_test_suites = []
for name, obj in inspect.getmembers(sys.modules[__name__]):
if not inspect.isclass(obj):
continue
@@ -270,7 +270,7 @@ def expand_tests(requested_test_classes, excluded_test_classes,
continue
print('adding test %s' % name)
test_suite = make_suite(
- obj, hw_config, root_tmpdir, ports_sock, multiprocessing.cpu_count())
+ obj, hw_config, root_tmpdir, ports_sock, multiprocessing.cpu_count() + 1)
if name.startswith('FaucetSanity'):
sanity_tests.addTest(test_suite)
else:
@@ -278,8 +278,12 @@ def expand_tests(requested_test_classes, excluded_test_classes,
single_tests.addTest(test_suite)
total_tests += 1
else:
- parallel_tests.addTest(test_suite)
+ parallel_test_suites.append(test_suite)
total_tests += 1
+ random.shuffle(parallel_test_suites)
+ parallel_tests = unittest.TestSuite()
+ for test_suite in parallel_test_suites:
+ parallel_tests.addTest(test_suite)
return (total_tests, sanity_tests, single_tests, parallel_tests)
|
Pull out the bitfield validation functions into their own routine
Mainly so we can monkeypatch them during testing | @@ -285,6 +285,32 @@ def validate_attestation_shard_block_root(attestation_data: AttestationData) ->
)
+def _validate_custody_bitfield(attestation: Attestation) -> None:
+ # NOTE: to be removed in phase 1.
+ empty_custody_bitfield = b'\x00' * len(attestation.custody_bitfield)
+ if attestation.custody_bitfield != empty_custody_bitfield:
+ raise ValidationError(
+ "Attestation custody bitfield is not empty.\n"
+ "\tFound: %s, Expected %s" %
+ (
+ attestation.custody_bitfield,
+ empty_custody_bitfield,
+ )
+ )
+
+
+def _validate_aggregation_bitfield(attestation: Attestation) -> None:
+ empty_aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield)
+ if attestation.aggregation_bitfield == empty_aggregation_bitfield:
+ raise ValidationError(
+ "Attestation aggregation bitfield is empty.\n"
+ "\tFound: %s, Expected some bits set." %
+ (
+ attestation.aggregation_bitfield,
+ )
+ )
+
+
def _validate_custody_bitfield_from_aggregation_bitfield(committee_size: int,
aggregation_bitfield: Bitfield,
custody_bitfield: Bitfield) -> None:
@@ -316,27 +342,9 @@ def validate_attestation_aggregate_signature(state: BeaconState,
All proof of custody bits are assumed to be 0 within the signed data.
This will change to reflect real proof of custody bits in the Phase 1.
"""
- # NOTE: to be removed in phase 1.
- empty_custody_bitfield = b'\x00' * len(attestation.custody_bitfield)
- if attestation.custody_bitfield != empty_custody_bitfield:
- raise ValidationError(
- "Attestation custody bitfield is not empty.\n"
- "\tFound: %s, Expected %s" %
- (
- attestation.custody_bitfield,
- empty_custody_bitfield,
- )
- )
+ _validate_custody_bitfield(attestation)
- empty_aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield)
- if attestation.aggregation_bitfield == empty_aggregation_bitfield:
- raise ValidationError(
- "Attestation aggregation bitfield is empty.\n"
- "\tFound: %s, Expected some bits set." %
- (
- attestation.aggregation_bitfield,
- )
- )
+ _validate_aggregation_bitfield(attestation)
committee = get_crosslink_committee_for_attestation(
state=state,
|
Quote --storage-dir value
Command will fail if the directory contains whitespace | @@ -27,7 +27,7 @@ class TargetAndroidNew(TargetAndroid):
'app', 'p4a.bootstrap', 'sdl2')
self.p4a_apk_cmd += self._p4a_bootstrap
color = 'always' if USE_COLOR else 'never'
- self.extra_p4a_args = ' --color={} --storage-dir={}'.format(
+ self.extra_p4a_args = ' --color={} --storage-dir="{}"'.format(
color, self._build_dir)
hook = self.buildozer.config.getdefault("app", "p4a.hook", None)
if hook is not None:
|
Admin Router: Force Nginx to honour loggin setting during `init_by_lua` stage
More details here: | @@ -4,6 +4,10 @@ include common/main.conf;
http {
resolver 198.51.100.1:53 198.51.100.2:53 198.51.100.3:53 valid=60s;
+ # Check
+ # https://github.com/openresty/lua-nginx-module/issues/467#issuecomment-305529857
+ lua_shared_dict tmp 12k;
+
client_max_body_size 1024M;
# Name: DC/OS Diagnostics (3DT)
|
(doc) add update tz
add time zone for windows & update to 2 sections | @@ -288,14 +288,31 @@ Weight/Request error in logs happens when it encountered a warning or error and
### How do I resize my Hummingbot window without jumbling the text?
When resizing the window of your Hummingbot, text becomes unclear or at the same location as the previous size of the window. To do a refresh to the new window size, while inside Hummingbot press `CTRL + L` and it will refresh Hummingbot window panes. These command applies to all Hummingbot build.
-### How to change time or timezone of Hummingbot docker build?
-Hummingbot uses the time where its installed, its either on your local computer or cloud server. Sometimes docker build Hummingbot time is out of sync, follow these steps to fix it.
+## How to change time or timezone of Hummingbot?
+
+Hummingbot follows the same date/time and timezone on the machine where it is installed. Below are some steps you can follow to change the timezone depending on the operating system and installation type.
+
+**Docker**
+
+While docker `$instance_name` is runnning on background type in command line.
-While docker $instance_name is runnning on background type in command line.
```
docker exec -it $instance_name bash
dpkg-reconfigure tzdata
```
+
Configure geographic location and timezone by inputting the corresponding number, see example below:

+
+**Windows**
+
+You can change the timezone on a Windows computer by doing the following:
+
+1. Press **Win + R** shortcut to open the Run dialog box
+2. Enter `timedate.cpl` to open Date and Time settings
+3. Click **Change time zone**
+
+
+
+Alternatively, you can also follow these steps in Windows Support article: [How to set your time and time zone](https://support.microsoft.com/en-ph/help/4026213/windows-how-to-set-your-time-and-time-zone)
|
add: removed set_defaults from scheduler's maintainer
tasks. Maybe implement later but did not work now. | @@ -82,11 +82,6 @@ class Scheduler:
set_statement_defaults(self.shut_condition, scheduler=self)
- for maintain_task in self.maintain_tasks:
- #maintain_task.start_cond = set_statement_defaults(maintain_task.start_cond, scheduler=self)
- maintain_task.set_logger() # Resetting the logger as group changed
- maintain_task.is_maintenance = True
-
self.min_sleep = min_sleep
self.max_sleep = max_sleep
|
Update HAS_NETWORKX documentation
Since this documentation has been out-of-date. This updates it
to give the correct information. | @@ -93,10 +93,11 @@ External Python Libraries
be installed in order to use them.
* - .. py:data:: HAS_NETWORKX
- - Internally, Qiskit uses the high-performance `retworkx
- <https://github.com/Qiskit/retworkx>`__ library as a core dependency, but sometimes it can
- be convenient to convert things into the Python-only `NetworkX <https://networkx.org/>`__
- format. There are converter methods on :class:`.DAGCircuit` if NetworkX is present.
+ - No longer used by Terra. Internally, Qiskit now uses the high-performance `rustworkx
+ <https://github.com/Qiskit/rustworkx>`__ library as a core dependency, and during the
+ change-over period, it was sometimes convenient to convert things into the Python-only
+ `NetworkX <https://networkx.org/>`__ format. Some tests of application modules, such as
+ `Qiskit Nature <https://qiskit.org/documentation/nature/>`__ still use NetworkX.
* - .. py:data:: HAS_NLOPT
- `NLopt <https://nlopt.readthedocs.io/en/latest/>`__ is a nonlinear optimization library,
|
Apply suggestions from code review
excellent stuff, thanks. | @@ -362,18 +362,18 @@ JWT Authentication
authentication_handlers = {chttpd_auth, cookie_authentication_handler}, {chttpd_auth, jwt_authentication_handler}, {chttpd_auth, default_authentication_handler}
`JWT authentication` enables CouchDB to use externally generated JWT tokens
-instead of defining users or roles in the _users database.
+instead of defining users or roles in the ``_users`` database.
The JWT authentication handler requires that all JWT tokens are signed by a key that
CouchDB has been configured to trust (there is no support for JWT's "NONE" algorithm).
Additionally, CouchDB can be configured to reject JWT tokens that are missing a
-configurable set of claims (e.g, a CouchDB administrator could insist on the exp claim).
+configurable set of claims (e.g, a CouchDB administrator could insist on the ``exp`` claim).
All claims presented in a JWT token are validated if presented, regardless of whether they
are required.
-Two new sections of config have been introduced to configure JWT authentication;
+Two sections of config exist to configure JWT authentication;
.. code-block:: ini
@@ -381,11 +381,11 @@ Two new sections of config have been introduced to configure JWT authentication;
; List of claims to validate
; required_claims =
-The `required_claims` config setting is a comma-separate list of additional mandatory
-JWT claims that much be present in any presented JWT token. A 400 Bad Request is sent
+The `required_claims` config setting is a comma-separated list of additional mandatory
+JWT claims that must be present in any presented JWT token. A ``400 Bad Request`` is sent
if any are missing.
-The `alg` claim is mandatory as it used to lookup the correct key for verifying the
+The ``alg`` claim is mandatory as it used to lookup the correct key for verifying the
signature.
The `sub` claim is mandatory and is used as the CouchDB user's name if the JWT token
@@ -413,13 +413,13 @@ is valid.
; ec:bar = -----BEGIN PUBLIC KEY-----\nMHYwEAYHK...AzztRs\n-----END PUBLIC KEY-----\n
The `jwt_key` section lists all the keys that this CouchDB server trusts. You
-should ensure that all nodes of your cluster has the same list.
+should ensure that all nodes of your cluster have the same list.
JWT tokens that do not include a `kid` claim will be validated against the
`$alg:_default` key.
It is mandatory to specify the algorithm associated with every key for security
-reasons (notably presenting a HMAC signed token using a RSA or EC public key
+reasons (notably presenting a HMAC-signed token using an RSA or EC public key
that the server trusts:
https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
|
Update harvester_api.py
remove unnessuary time.time() | @@ -200,13 +200,13 @@ class HarvesterAPI:
time_taken = time.time() - start
if time_taken > 5:
self.harvester.log.warning(
- f"Looking up qualities on {filename} took: {time.time() - start}. This should be below 5 seconds "
+ f"Looking up qualities on {filename} took: {time_taken}. This should be below 5 seconds "
f"to minimize risk of losing rewards."
)
else:
pass
# If you want additional logs, uncomment the following line
- # self.harvester.log.debug(f"Looking up qualities on {filename} took: {time.time() - start}")
+ # self.harvester.log.debug(f"Looking up qualities on {filename} took: {time_taken}")
for response in sublist:
total_proofs_found += 1
msg = make_msg(ProtocolMessageTypes.new_proof_of_space, response)
|
Theme: Fix float-value rule completions
Completions of "Rule Keys" with floating point values used to suggest sequence values.
Before: "font.size": [11.0]
After: "font.size": 11.0 | { "trigger": "accent_tint_modifier\tproperty", "contents": "\"accent_tint_modifier\": [${0:0}]," },
// floats
- { "trigger": "line_selection_border_radius\tproperty", "contents": "\"line_selection_border_radius\": [${0:0.0}]," },
- { "trigger": "line_selection_border_width\tproperty", "contents": "\"line_selection_border_width\": [${0:0.0}]," },
- { "trigger": "indent\tproperty", "contents": "\"indent\": [${0:0.0}]," },
- { "trigger": "indent_offset\tproperty", "contents": "\"indent_offset\": [${0:0.0}]," },
- { "trigger": "opacity\tproperty", "contents": "\"opacity\": [${0:1.0}]," },
- { "trigger": "viewport_opacity\tproperty", "contents": "\"viewport_opacity\": [${0:0.0}]," },
- { "trigger": "hit_test_level\tproperty", "contents": "\"hit_test_level\": [${0:0.0}]," },
- { "trigger": "font.size\tproperty", "contents": "\"font.size\": [${0:11.0}]," },
+ { "trigger": "line_selection_border_radius\tproperty", "contents": "\"line_selection_border_radius\": ${0:0.0}," },
+ { "trigger": "line_selection_border_width\tproperty", "contents": "\"line_selection_border_width\": ${0:0.0}," },
+ { "trigger": "indent\tproperty", "contents": "\"indent\": ${0:0.0}," },
+ { "trigger": "indent_offset\tproperty", "contents": "\"indent_offset\": ${0:0.0}," },
+ { "trigger": "opacity\tproperty", "contents": "\"opacity\": ${0:1.0}," },
+ { "trigger": "viewport_opacity\tproperty", "contents": "\"viewport_opacity\": ${0:0.0}," },
+ { "trigger": "hit_test_level\tproperty", "contents": "\"hit_test_level\": ${0:0.0}," },
+ { "trigger": "font.size\tproperty", "contents": "\"font.size\": ${0:11.0}," },
// booleans TODO set opposite of defaults
{ "trigger": "dark_content\tproperty", "contents": "\"dark_content\": ${0:true}," },
|
Remove alternative names of name attributes
since they make signing policies less reliable. | @@ -144,18 +144,6 @@ NAME_OID = OrderedDict(
]
)
-NAME_ALT = {
- "CN": ["commonName"],
- "L": ["localityName"],
- "ST": ["SP", "stateOrProvinceName"],
- "O": ["organizationName"],
- "OU": ["organizationUnitName"],
- "GN": ["givenName"],
- "SN": ["surname"],
- "MAIL": ["Email", "emailAddress"],
- "SERIALNUMBER": ["serialNumber"],
-}
-
EXTENDED_KEY_USAGE_OID = {
"serverAuth": cx509.ObjectIdentifier("1.3.6.1.5.5.7.3.1"),
"clientAuth": cx509.ObjectIdentifier("1.3.6.1.5.5.7.3.2"),
@@ -1696,11 +1684,8 @@ def _get_dn(dn):
elif isinstance(dn, dict):
parsed = []
for name, oid in NAME_OID.items():
- for var in [name] + NAME_ALT.get(name, []):
- if var in dn:
- parsed.append(cx509.NameAttribute(oid, dn[var]))
- # only allow a single attribute of the same type for dicts
- break
+ if name in dn:
+ parsed.append(cx509.NameAttribute(oid, dn[name]))
return cx509.Name(parsed)
raise SaltInvocationError("Need string, list or dict to parse distinguished names")
|
pin development dependencies
* Using `pip freeze`, pin the currently installed version of unpinned
development dependencies. This includes:
- ipdb
- ipython
- pyflakes
- python-coveralls
- redis
* Update aiohttpretty to pull from COS's `develop` branch. | -r requirements.txt
-git+https://github.com/cslzchen/aiohttpretty.git@feature/aiohttp3
+git+https://github.com/CenterForOpenScience/aiohttpretty.git@develop
colorlog==2.5.0
flake8==3.0.4
-ipdb
-ipython
+ipdb==0.12.2
+ipython==7.8.0
mypy==0.580
pydevd==0.0.6
-pyflakes
+pyflakes==2.1.1
pytest==2.8.2
pytest-asyncio==0.3.0
pytest-cov==2.2.0
-python-coveralls
-redis
+python-coveralls==2.9.3
+redis==3.3.8
|
Track TP process response types
Adds a counter that tracks each TP process response with a tag for the
message type. | @@ -78,11 +78,20 @@ class TransactionExecutorThread(object):
self._invalid_observers = invalid_observers
self._open_futures = {}
+ self._tp_process_response_counters = {}
self._transaction_execution_count = COLLECTOR.counter(
'transaction_execution_count', instance=self)
self._in_process_transactions_count = COLLECTOR.counter(
'in_process_transactions_count', instance=self)
+ def _get_tp_process_response_counter(self, tag):
+ if tag not in self._tp_process_response_counters:
+ self._tp_process_response_counters[tag] = COLLECTOR.counter(
+ 'tp_process_response_count',
+ tags={'response_type': tag},
+ instance=self)
+ return self._tp_process_response_counters[tag]
+
def _future_done_callback(self, request, result):
"""
:param request (bytes):the serialized request
@@ -102,6 +111,9 @@ class TransactionExecutorThread(object):
result.connection_id).dec_occupancy()
self._processors.notify()
+ self._get_tp_process_response_counter(
+ response.Status.Name(response.status)).inc()
+
if result.connection_id in self._open_futures and \
req.signature in self._open_futures[result.connection_id]:
del self._open_futures[result.connection_id][req.signature]
|
Handle RemoteDisconnected in is_local check.
Fixes | """Common utilities."""
import hashlib
+import http.client
import os
import urllib.request
import urllib.error
@@ -50,6 +51,8 @@ def is_local():
_is_local = False
except urllib.error.URLError:
_is_local = True
+ except http.client.RemoteDisconnected:
+ _is_local = True
return _is_local
|
Update application.yaml
change model | @@ -142,7 +142,7 @@ cls_inference:
################### text task: punc; engine_type: python #######################
text_python:
task: punc
- model_type: 'ernie_linear_p3_wudao'
+ model_type: 'ernie_linear_p3_wudao_fast'
lang: 'zh'
sample_rate: 16000
cfg_path: # [optional]
|
Update generic.txt
All are ```luxnetrat``` instead: | @@ -10890,17 +10890,6 @@ sttsts.ru
linkedliqht.com
-# Reference: https://app.any.run/tasks/3f711d7e-b3b0-4bea-94ce-356db8aeb293/
-
-191.205.215.182:2334
-regedxasd.duckdns.org
-
-# Reference: https://app.any.run/tasks/96757b09-76f2-4e92-9bf0-21b5a3bc49c5/
-# Reference: https://www.virustotal.com/gui/file/1717f043b5ea0db5a43ef7bca9820a3c656dca8336139ccc499683c63ad0f1c3/detection
-
-191.205.215.182:4431
-windowsconnect.duckdns.org
-
# Generic
/newratexploitlink
|
Add C standard specification to flags
Add C standard specification "-std=c99" to Intel flags (without this ndarrays.c does not compile with icc). | @@ -61,6 +61,9 @@ def construct_flags(compiler,
if debug:
flags.append("-fcheck=bounds")
+ if compiler == "icc":
+ flags.append("-std=c99")
+
if compiler == "mpif90":
if debug:
flags.append("-fcheck=bounds")
|
[air] Use custom fsspec handler for GS
`gcsfs` complains about an invalid `create_parents` argument when using google cloud storage with cloud checkpoints. Thus we should use an alternative fs spec handler that omits this argument for gs.
The root issue will be fixed here: | @@ -3,14 +3,30 @@ from typing import Optional, Tuple
try:
import fsspec
+
except ImportError:
fsspec = None
try:
import pyarrow
import pyarrow.fs
+
+ # Todo(krfricke): Remove this once gcsfs > 2022.3.0 is released
+ # (and make sure to pin)
+ class _CustomGCSHandler(pyarrow.fs.FSSpecHandler):
+ """Custom FSSpecHandler that avoids a bug in gcsfs <= 2022.3.0."""
+
+ def create_dir(self, path, recursive):
+ try:
+ # GCSFS doesn't expose `create_parents` argument,
+ # so it is omitted here
+ self.fs.mkdir(path)
+ except FileExistsError:
+ pass
+
except (ImportError, ModuleNotFoundError):
pyarrow = None
+ _CustomGCSHandler = None
from ray import logger
@@ -100,7 +116,12 @@ def get_fs_and_path(
# Raised when protocol not known
return None, None
- fs = pyarrow.fs.PyFileSystem(pyarrow.fs.FSSpecHandler(fsspec_fs))
+ fsspec_handler = pyarrow.fs.FSSpecHandler
+ if parsed.scheme in ["gs", "gcs"]:
+ # GS doesn't support `create_parents` arg in `create_dir()`
+ fsspec_handler = _CustomGCSHandler
+
+ fs = pyarrow.fs.PyFileSystem(fsspec_handler(fsspec_fs))
_cached_fs[cache_key] = fs
return fs, path
|
Call SubResource correctly in list comprehension
Call SubResource(id=x) instead of SubResource(x) in list comprehension
since the latter introduced a type error and prevented az network lb
outbound-rule update from being used with the --frontend-ip-configs flag
This fixes | @@ -1984,7 +1984,7 @@ def set_lb_outbound_rule(instance, cmd, parent, item_name, protocol=None, outbou
_set_param(instance, 'backend_address_pool', SubResource(id=backend_address_pool)
if backend_address_pool else None)
_set_param(instance, 'frontend_ip_configurations',
- [SubResource(x) for x in frontend_ip_configurations] if frontend_ip_configurations else None)
+ [SubResource(id=x) for x in frontend_ip_configurations] if frontend_ip_configurations else None)
return parent
|
don't build linux/arm/v7 (32bit) docker image
Alpine Linux does not correct Rust version to build cryptography and wheels are not available for 32bit. | @@ -113,7 +113,7 @@ jobs:
org.opencontainers.image.title=Maestral
org.opencontainers.image.url=${{ github.event.repository.html_url }}
org.opencontainers.image.version=${{ steps.prep.outputs.version }}
- platforms: linux/amd64,linux/arm64,linux/arm/v7
+ platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}
- name: Move cache
|
i18n: Translate the whole text in stream deactivation modal.
This commit fixes the template of stream deactivation modal
to tag all the text for translation. This commit also removes
the unnecessary span element. | -Archiving stream <strong>{{stream_name}}</strong> <span>will immediately unsubscribe everyone. This action cannot be undone.</span>
+{{#tr}}
+ Archiving stream <z-stream></z-stream> will immediately unsubscribe everyone. This action cannot be undone.
+ {{#*inline "z-stream"}}<strong>{{stream_name}}</strong>{{/inline}}
+{{/tr}}
<p><strong>{{t "Are you sure you want to archive this stream?" }}</strong></p>
|
Update create_instances.py
more lint fix | @@ -33,7 +33,7 @@ def create_instance(ami_name):
instance_id = instance_file.read()
with open('image_id.txt', 'r') as image_id_file:
image_data = image_id_file.read()
- print('Image data is {}.format(image_data))
+ print('Image data is {}'.format(image_data))
with open("./Tests/images_data.txt", "a") as image_data_file:
image_data_file.write(
'{name} Image info is: {data}\n'.format(name=AMI_NAME_TO_READABLE[ami_name], data=image_data))
|
Free tag: link #how-to-get-help
This creates a clickable link in the response embed. Referencing the
category is no longer necessary. | **We have a new help channel system!**
-We recently moved to a new help channel system. You can now use any channel in the **<#691405807388196926>** category to ask your question.
+Please see <#704250143020417084> for further information.
-For more information, check out [our website](https://pythondiscord.com/pages/resources/guides/help-channels/).
+A more detailed guide can be found on [our website](https://pythondiscord.com/pages/resources/guides/help-channels/).
|
Added function "view_database_tables" to download_dataport.py
It will allow the user to see available tables in the dataport database. | @@ -129,6 +129,37 @@ def database_assert(database_table):
or database_table == 'electricity_egauge_seconds'
), "Table not compatible with NILMTK"
+def view_database_tables(database_username, database_password,
+ database_schema):
+
+
+ database_host = 'dataport.pecanstreet.org'
+ database_port = '5434'
+ database_name = 'postgres'
+
+ try:
+ conn = db.connect('host=' + database_host +
+ ' port=' + database_port +
+ ' dbname=' + database_name +
+ ' user=' + database_username +
+ ' password=' + database_password)
+ except:
+ print('Could not connect to remote database')
+ raise
+
+
+ #Loading university schemas
+ sql_query = ("SELECT table_name" +
+ " FROM information_schema.views" +
+ " WHERE table_schema ='" + database_schema + "'" +
+ " ORDER BY table_name")
+ database_tables=pd.read_sql(sql_query, conn)['table_name'].tolist()
+
+ df=pd.DataFrame({database_schema:database_tables})
+ #print(database_tables)
+ print(df)
+ conn.close()
+
def download_dataport(database_username, database_password,
hdf_filename, periods_to_load=None):
|
Grammar.add_rule: don't abort when unable to find the loc of kwarg
TN: | @@ -237,7 +237,7 @@ class Grammar(object):
rule.set_name(names.Name.from_lower(name))
rule.set_grammar(self)
- if loc:
+ if loc and name in keywords:
rule.set_location(Location(loc.file, keywords[name].lineno))
rule.is_root = True
|
Run sudo apt-get update before installing a package
As documented
[here](https://docs.github.com/en/actions/using-github-hosted-runners/customizing-github-hosted-runners#installing-software-on-ubuntu-runners)
and also discussed [here](https://github.com/actions/virtual-environments/issues/1757) | @@ -60,7 +60,9 @@ jobs:
components: rustfmt
- name: Install xmllint
- run: sudo apt-get install libxml2-utils
+ run: |
+ sudo apt-get update
+ sudo apt-get install libxml2-utils
- name: Create working crate
run: make crates
|
[core/output] Re-enable basic pango support
Re-enable pango as simple "pango" dict wherever a normal value (e.g.
prefix, suffix) can go. | @@ -23,7 +23,7 @@ class block(object):
__COMMON_THEME_FIELDS = [
'separator', 'separator-block-width', 'default-separators',
'border-top', 'border-left', 'border-right', 'border-bottom',
- 'pango', 'fg', 'bg', 'padding', 'prefix', 'suffix'
+ 'fg', 'bg', 'padding', 'prefix', 'suffix'
]
def __init__(self, theme, module, widget):
self.__attributes = {}
@@ -39,6 +39,26 @@ class block(object):
def set(self, key, value):
self.__attributes[key] = value
+ def is_pango(self, attr):
+ if isinstance(attr, dict) and 'pango' in attr:
+ return True
+ return False
+
+ def pangoize(self, text):
+ if not self.is_pango(text):
+ return text
+ self.__attributes['markup'] = 'pango'
+ attr = dict(text['pango'])
+ text = attr.get('full_text', '')
+ if 'full_text' in attr:
+ del attr['full_text']
+
+ result = '<span '
+ for key, value in attr.items():
+ result = '{} {}="{}"'.format(result, key, value)
+ result = '{}>{}</span>'.format(result, text)
+ return result
+
def dict(self):
result = {}
@@ -54,11 +74,12 @@ class block(object):
assign(self.__attributes, result, 'background', 'bg')
if 'full_text' in self.__attributes:
+ result['full_text'] = self.pangoize(result['full_text'])
result['full_text'] = self.__format(self.__attributes['full_text'])
for k in [
'name', 'instance', 'separator_block_width', 'border', 'border_top',
- 'border_bottom', 'border_left', 'border_right'
+ 'border_bottom', 'border_left', 'border_right', 'markup'
]:
assign(self.__attributes, result, k)
@@ -71,10 +92,12 @@ class block(object):
def __format(self, text):
if text is None: return None
+ prefix = self.pangoize(self.__attributes.get('prefix'))
+ suffix = self.pangoize(self.__attributes.get('suffix'))
return '{}{}{}'.format(
- self.__pad(self.__attributes.get('prefix')),
+ self.__pad(prefix),
text,
- self.__pad(self.__attributes.get('suffix'))
+ self.__pad(suffix)
)
class i3(object):
|
Improved German translation of the README file.
Fixed comment at the beginning of the file. | <!--
-*** Official Duino Coin README
-*** by revoxhere, 2019-2022
+*** Translated Duino Coin README (de_DE)
+*** by revoxhere and Techcrafter, 2019-2022
-->
<a href="https://duinocoin.com">
|
Fix pylint E1128 for backend.py
E1128: Assigning to function call which only returns None (assignment-from-none)
References: PyCQA/pylint#2332 | import time
from functools import cmp_to_key
+from abc import ABCMeta, abstractmethod
# Copyright 2007,, Frank Scholz <[email protected]>
from lxml import etree
@@ -77,6 +78,7 @@ class Backend(log.LogAble, Plugin):
class BackendStore(Backend):
""" the base class for all MediaServer backend stores
"""
+ __metaclass__ = ABCMeta
logCategory = 'backend_store'
@@ -153,6 +155,7 @@ class BackendStore(Backend):
items.append(child)
return items
+ @abstractmethod
def get_by_id(self, id):
""" called by the CDS or the MediaServer web
|
Added info about future fate of DB models docs
DB models docs will probably be moved to docstrings in every described
model. | @@ -4,7 +4,8 @@ This document includes details on implementation of AMY internals.
Table of contents:
-1. [Database models](./database_models.md)
+1. [Database models](./database_models.md) (to be moved to
+ docstrings in `workshops/models.py`)
2. Templates hierarchy
3. Views hierarchy
4. [Server infrastructure](./server_infrastructure.md)
|
Move tokeninfo call to user_data method
This allows users who implement custom endpoints that call do_auth directly to work | @@ -136,19 +136,20 @@ class GooglePlusAuth(BaseGoogleOAuth2API, BaseOAuth2):
*args, **kwargs)
elif 'id_token' in self.data: # Client-side workflow
token = self.data.get('id_token')
+ return self.do_auth(token, *args, **kwargs)
+ else:
+ raise AuthMissingParameter(self, 'access_token, id_token, or code')
+
+ def user_data(self, access_token, *args, **kwargs):
+ if 'id_token' not in self.data:
+ return super(GooglePlusAuth, self).user_data(access_token, *args,
+ **kwargs)
response = self.get_json(
'https://www.googleapis.com/oauth2/v3/tokeninfo',
- params={'id_token': token}
+ params={'id_token': access_token}
)
self.process_error(response)
- return self.do_auth(token, response=response, *args, **kwargs)
- else:
- raise AuthMissingParameter(self, 'access_token, id_token, or code')
-
- def user_data(self, *args, **kwargs):
- if 'id_token' in self.data:
- return kwargs['response']
- return super(GooglePlusAuth, self).user_data(*args, **kwargs)
+ return response
class GoogleOAuth(BaseGoogleAuth, BaseOAuth1):
|
api_docs: Add "Narrow" common component.
To facilitate re-use of the same parameters in other paths, this commit
store the content of the parameter "narrow" in components. | @@ -2554,21 +2554,7 @@ paths:
items:
type: string
example: ['message']
- - name: narrow
- in: query
- description: |
- A JSON-encoded array of length 2 indicating the narrow for which you'd
- like to receive events. For instance, to receive events for the stream
- `Denmark`, you would specify `narrow=['stream', 'Denmark']`. Another
- example is `narrow=['is', 'private']` for private messages.
- schema:
- type: array
- items:
- anyOf:
- - type: string
- - type: integer
- default: []
- example: ['stream', 'Denmark']
+ - $ref: '#/components/parameters/Narrow'
responses:
'200':
description: Success.
@@ -3420,20 +3406,7 @@ paths:
description: |
(Ignored)
parameters:
- - name: narrow
- in: query
- description: |
- A JSON-encoded array of length 2 indicating the narrow for which you'd
- like to receive events for. For instance, to receive events for the
- stream `Denmark`, you would specify `narrow=['stream', 'Denmark']`.
- Another example is `narrow=['is', 'private']` for private messages.
- Default is `[]`.
- schema:
- type: array
- items:
- type: string
- example: narrow=['stream', 'Denmark']
- required: false
+ - $ref: '#/components/parameters/Narrow'
- $ref: '#/components/parameters/Event_types'
example: event_types=['message']
security:
@@ -3748,3 +3721,21 @@ components:
items:
type: string
required: false
+ Narrow:
+ name: narrow
+ in: query
+ description: |
+ A JSON-encoded array of length 2 indicating the narrow for which you'd
+ like to receive events for. For instance, to receive events for the
+ stream `Denmark`, you would specify `narrow=['stream', 'Denmark']`.
+ Another example is `narrow=['is', 'private']` for private messages.
+ Default is `[]`.
+ schema:
+ type: array
+ items:
+ anyOf:
+ - type: string
+ - type: integer
+ default: []
+ example: ['stream', 'Denmark']
+ required: false
|
Fix import path
Normalize the path to `_torchtext.so`
so that it is correctly found when imported from other directories. | @@ -14,9 +14,23 @@ __all__ = ['data',
def _init_extension():
+ import os
+ import importlib
import torch
- torch.ops.load_library('torchtext/_torchtext.so')
- torch.classes.load_library('torchtext/_torchtext.so')
+
+ # load the custom_op_library and register the custom ops
+ lib_dir = os.path.dirname(__file__)
+ loader_details = (
+ importlib.machinery.ExtensionFileLoader,
+ importlib.machinery.EXTENSION_SUFFIXES
+ )
+
+ extfinder = importlib.machinery.FileFinder(lib_dir, loader_details)
+ ext_specs = extfinder.find_spec("_torchtext")
+ if ext_specs is None:
+ raise ImportError
+ torch.ops.load_library(ext_specs.origin)
+ torch.classes.load_library(ext_specs.origin)
_init_extension()
|
CMake : Build config tweaks
disable warnings as errors
disable strict overflow
disable deprecated warning | @@ -49,12 +49,12 @@ endif()
IF ( "${CMAKE_BUILD_TYPE}" MATCHES "Debug" )
ADD_DEFINITIONS( -DDEBUG=1 -UNDEBUG )
IF ( NOT WINDOWS )
- ADD_DEFINITIONS( -pipe -Wall -O0 -Wno-unused-local-typedefs -Wno-strict-aliasing -Wno-maybe-uninitialized)
+ ADD_DEFINITIONS( -pipe -Wall -O0 -Wno-unused-local-typedefs -Wno-strict-aliasing -Wno-maybe-uninitialized -Wno-deprecated)
ENDIF()
ELSEIF ( "${CMAKE_BUILD_TYPE}" MATCHES "Release" )
ADD_DEFINITIONS( -DNDEBUG=1 -UDEBUG )
IF ( NOT WINDOWS )
- ADD_DEFINITIONS( -pipe -Wall -Werror -O3 -DNDEBUG -DBOOST_DISABLE_ASSERTS -Wno-unused-local-typedefs -Wno-strict-aliasing -Wno-maybe-uninitialized ) #-Wno-return-type )
+ ADD_DEFINITIONS( -pipe -Wall -O3 -DNDEBUG -DBOOST_DISABLE_ASSERTS -Wno-unused-local-typedefs -Wno-strict-aliasing -Wno-maybe-uninitialized -Wno-strict-overflow) #-Wno-return-type )
ENDIF()
ENDIF()
|
Update avcodecs.py
add hardware based scale filters for nvenc and qsv | @@ -795,15 +795,7 @@ class NVEncH264(H264Codec):
"""
codec_name = 'h264_nvenc'
ffmpeg_codec_name = 'h264_nvenc'
- scale_filter = 'npp_scale'
-
- def _codec_specific_parse_options(self, safe, stream=0):
- # NVENC doesn't support scaling
- if 'width' in safe:
- del(safe['width'])
- if 'height' in safe:
- del(safe['height'])
- return safe
+ scale_filter = 'scale_npp'
class VideotoolboxEncH264(H264Codec):
@@ -862,6 +854,7 @@ class H264QSV(H264Codec):
"""
codec_name = 'h264qsv'
ffmpeg_codec_name = 'h264_qsv'
+ scale_filter = 'scale_qsv'
def _codec_specific_produce_ffmpeg_list(self, safe, stream=0):
optlist = super(H264QSV, self)._codec_specific_produce_ffmpeg_list(safe, stream)
@@ -925,6 +918,7 @@ class HEVCQSV(H265Codec):
"""
codec_name = 'hevcqsv'
ffmpeg_codec_name = 'hevc_qsv'
+ scale_filter = 'scale_qsv'
class H265VAAPI(H265Codec):
@@ -967,15 +961,7 @@ class NVEncH265(H265Codec):
"""
codec_name = 'h265_nvenc'
ffmpeg_codec_name = 'hevc_nvenc'
- scale_filter = 'npp_scale'
-
- def _codec_specific_parse_options(self, safe, stream=0):
- # NVENC doesn't support scaling
- if 'width' in safe:
- del(safe['width'])
- if 'height' in safe:
- del(safe['height'])
- return safe
+ scale_filter = 'scale_npp'
class DivxCodec(VideoCodec):
|
Fix a pasto in untyped wrappers code generation
TN: | -- Untyped wrappers for ${cls.name()}
--
- % for prop in props:
+ % for prop in untyped_wrappers:
${prop.untyped_wrapper_decl}
% endfor
% endif
|
ENH: added Table.to_markdown(), direct method for generating markdown str
Note: actually added method accidentally a couple of commits earlier, this
patch contains the tests. | @@ -1048,7 +1048,8 @@ class TableTests(TestCase):
"""Exercising the table markdown method"""
from cogent3.format.table import markdown
- markdown_table = markdown(self.t6_header, self.t6_rows, justify="crl")
+ table = make_table(self.t6_header, self.t6_rows, format="md")
+ markdown_table = table.to_markdown(justify="crl")
markdown_list = markdown_table.split("\n")
self.assertEqual(markdown_list[2].count(r"|"), 5)
# the pipe symbol should have been escaped
|
Add doc for custom lifetime of java actor
Custom lifetime of java Actor is already supported, but the related document is not updated | @@ -155,10 +155,9 @@ created with the specified arguments.
Actor Lifetimes
---------------
-.. tabbed:: Python
+Separately, actor lifetimes can be decoupled from the job, allowing an actor to persist even after the driver process of the job exits.
- Separately, actor lifetimes can be decoupled from the job, allowing an actor to
- persist even after the driver process of the job exits.
+.. tabbed:: Python
.. code-block:: python
@@ -179,7 +178,22 @@ Actor Lifetimes
.. tabbed:: Java
- Customizing lifetime of an actor hasn't been implemented in Java yet.
+ .. code-block:: java
+
+ System.setProperty("ray.job.namespace", "lifetime");
+ Ray.init();
+ ActorHandle<Counter> counter = Ray.actor(Counter::new).setName("some_name").setLifetime(ActorLifetime.DETACHED).remote();
+
+ The CounterActor will be kept alive even after the driver running above process
+ exits. Therefore it is possible to run the following code in a different
+ driver:
+
+ .. code-block:: java
+
+ System.setProperty("ray.job.namespace", "lifetime");
+ Ray.init();
+ Optional<ActorHandle<Counter>> counter = Ray.getActor("some_name");
+ Assert.assertTrue(counter.isPresent());
.. tabbed:: C++
|
bug: filter out display types without file output
don't process renderman display type that are not producing any file output (like `d_it`) | @@ -1093,6 +1093,11 @@ class RenderProductsRenderman(ARenderProducts):
if not enabled:
continue
+ # Skip display types not producing any file output.
+ # Is there a better way to do it?
+ if not display_types.get(display["driverNode"]["type"]):
+ continue
+
aov_name = name
if aov_name == "rmanDefaultDisplay":
aov_name = "beauty"
|
History: request_cancel_execution_initiated is not a task
TODO: check what we put in self._tasks... | @@ -492,7 +492,6 @@ class History(object):
}
if event.workflow_id not in self._external_workflows_canceling:
self._external_workflows_canceling[event.workflow_id] = workflow
- self._tasks.append(workflow)
else:
logger.warning("request_cancel_initiated again for workflow {} (initiated @{}, we're @{})".format(
event.workflow_id,
|
Update config.py
Flask cares about casing, and that one lowercase letter could have ruined everything. Or it could have been harmless, no idea, but best not to find out. | @@ -67,7 +67,7 @@ class Config(object):
MAIL_SERVER = os.environ.get("MAIL_SERVER", None)
MAIL_PORT = int(os.environ.get("MAIL_PORT", 587))
MAIL_USE_TLS = casted_bool(os.environ.get("MAIL_USE_TLS", True))
- MAIL_USE_SSl = casted_bool(os.environ.get("MAIL_USE_SSL", False))
+ MAIL_USE_SSL = casted_bool(os.environ.get("MAIL_USE_SSL", False))
MAIL_USERNAME = os.environ.get("MAIL_USERNAME", None)
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD", None)
MAIL_FROM = os.environ.get("MAIL_FROM", None)
|
Make Battery notification timeout configurable
Users may want a different timeout other than the default 10
seconds.
Closes | @@ -330,6 +330,7 @@ class Battery(base.ThreadPoolText):
("update_interval", 60, "Seconds between status updates"),
("battery", 0, "Which battery should be monitored (battery number or name)"),
("notify_below", None, "Send a notification below this battery level."),
+ ("notification_timeout", 10, "Time in seconds to display notification. 0 for no expiry."),
]
def __init__(self, **config) -> None:
@@ -345,6 +346,7 @@ class Battery(base.ThreadPoolText):
self._battery = self._load_battery(**config)
self._has_notified = False
+ self.timeout = int(self.notification_timeout * 1000)
def _configure(self, qtile, bar):
if not self.low_background:
@@ -378,7 +380,12 @@ class Battery(base.ThreadPoolText):
percent = int(status.percent * 100)
if percent < self.notify_below:
if not self._has_notified:
- send_notification("Warning", "Battery at {0}%".format(percent), urgent=True)
+ send_notification(
+ "Warning",
+ "Battery at {0}%".format(percent),
+ urgent=True,
+ timeout=self.timeout,
+ )
self._has_notified = True
elif self._has_notified:
self._has_notified = False
|
Skipped Forescout instead of Forescout-Test
Added issue number to Athena
Removed Joe Security from skipped (quota supposed to be renewed) | "TestUptycs": "Issue 19750",
"InfoArmorVigilanteATITest": "Test issue 17358",
"calculate_severity_-_critical_assets_-_test": "Issue 17924",
- "Forescout-Test": "issue 17016",
"Lastline - testplaybook": "Checking the integration via Generic detonation playbooks, don't want to load the daily quota",
"entity_enrichment_generic_test": "Issue 16490",
"ArcSight Logger test": "Issue 19117",
"Zoom": "Issue 19832",
"MailListener - POP3": "Issue 18580",
"epo": "Issue 19896",
+ "Forescout": "Cannot run on AWS machines",
"_comment": "~~~ QUOTA ISSUES ~~~",
- "AWS - Athena - Beta": "Issue ",
- "Joe Security": "Monthly quota exceeded, remove from skipped on or after April 1st",
+ "AWS - Athena - Beta": "Issue 19834",
"VirusTotal - Private API": "reached api alloted quota.",
"Google Resource Manager": "Cannot create projects because have reached alloted quota.",
"Looker": "Warehouse 'DEMO_WH' cannot be resumed because resource monitor 'LIMITER' has exceeded its quota."
|
remove block_email_domains_from_hubspot from accounting forms
Note: removing the field on the BillingAccount model in a followup PR | @@ -160,13 +160,6 @@ class BillingAccountBasicForm(forms.Form):
help_text="Users in any projects connected to this account will not "
"have data sent to Hubspot",
)
- block_email_domains_from_hubspot = forms.CharField(
- label="Block Email Domains From Hubspot Data",
- required=False,
- help_text="(ex: dimagi.com, commcarehq.org) Anyone with a username or "
- "email matching an email-domain here, regardless of "
- "project membership, will not have data synced with Hubspot.",
- )
def __init__(self, account, *args, **kwargs):
self.account = account
@@ -188,7 +181,6 @@ class BillingAccountBasicForm(forms.Form):
'last_payment_method': account.last_payment_method,
'pre_or_post_pay': account.pre_or_post_pay,
'block_hubspot_data_for_all_users': account.block_hubspot_data_for_all_users,
- 'block_email_domains_from_hubspot': ', '.join(account.block_email_domains_from_hubspot),
}
else:
kwargs['initial'] = {
@@ -271,10 +263,6 @@ class BillingAccountBasicForm(forms.Form):
'block_hubspot_data_for_all_users',
),
),
- crispy.Field(
- 'block_email_domains_from_hubspot',
- css_class='input-xxlarge',
- ),
])
self.helper.layout = crispy.Layout(
crispy.Fieldset(
@@ -380,12 +368,6 @@ class BillingAccountBasicForm(forms.Form):
)
return transfer_subs
- def clean_block_email_domains_from_hubspot(self):
- email_domains = self.cleaned_data['block_email_domains_from_hubspot']
- if email_domains:
- return [e.strip() for e in email_domains.split(r',')]
- return [] # Do not return a list with an empty string
-
@transaction.atomic
def create_account(self):
name = self.cleaned_data['name']
@@ -421,7 +403,6 @@ class BillingAccountBasicForm(forms.Form):
account.enterprise_restricted_signup_domains = self.cleaned_data['enterprise_restricted_signup_domains']
account.invoicing_plan = self.cleaned_data['invoicing_plan']
account.block_hubspot_data_for_all_users = self.cleaned_data['block_hubspot_data_for_all_users']
- account.block_email_domains_from_hubspot = self.cleaned_data['block_email_domains_from_hubspot']
transfer_id = self.cleaned_data['active_accounts']
if transfer_id:
transfer_account = BillingAccount.objects.get(id=transfer_id)
|
[MetaSchedule] Allow Easy Logging Level Setting
This PR allowed users to set logging level without giving a logger config. Previous implementation hard-coded `logging.INFO` as the default logging level and requires a logger config to change it. Now the logging level and handlers can be inherited from the current `tvm.meta_schedule` logger setting. | @@ -433,16 +433,23 @@ class TuneConfig(NamedTuple):
else:
config = self.logger_config
- global_logger_name = "tvm.meta_schedule"
config.setdefault("loggers", {})
config.setdefault("handlers", {})
config.setdefault("formatters", {})
+ global_logger_name = "tvm.meta_schedule"
+ global_logger = logging.getLogger(global_logger_name)
+ if global_logger.level is logging.NOTSET:
+ global_logger.setLevel(logging.INFO)
+
config["loggers"].setdefault(
global_logger_name,
{
- "level": "INFO",
- "handlers": [global_logger_name + ".console", global_logger_name + ".file"],
+ "level": logging._levelToName[ # pylint: disable=protected-access
+ global_logger.level
+ ],
+ "handlers": [handler.get_name() for handler in global_logger.handlers]
+ + [global_logger_name + ".console", global_logger_name + ".file"],
"propagate": False,
},
)
@@ -502,12 +509,11 @@ class TuneConfig(NamedTuple):
logging.config.dictConfig(p_config)
# check global logger
- global_logger = logging.getLogger(global_logger_name)
if global_logger.level not in [logging.DEBUG, logging.INFO]:
- global_logger.critical(
+ global_logger.warning(
"Logging level set to %s, please set to logging.INFO"
" or logging.DEBUG to view full log.",
- logging._levelToName[logger.level], # pylint: disable=protected-access
+ logging._levelToName[global_logger.level], # pylint: disable=protected-access
)
global_logger.info("Logging directory: %s", log_dir)
|
[mtiLib] Be more lenient in script block parsing
Fixes | @@ -103,6 +103,8 @@ def parseScriptList(lines, featureMap=None):
records = []
with lines.between('script table'):
for line in lines:
+ while len(line) < 4:
+ line.append('')
scriptTag, langSysTag, defaultFeature, features = line
log.debug("Adding script %s language-system %s", scriptTag, langSysTag)
|
Add description to policies in cells.py
blueprint policy-docs | @@ -26,21 +26,72 @@ cells_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
- policy.RuleDefault(
- name=POLICY_ROOT % 'update',
- check_str=base.RULE_ADMIN_API),
- policy.RuleDefault(
- name=POLICY_ROOT % 'create',
- check_str=base.RULE_ADMIN_API),
- policy.RuleDefault(
- name=BASE_POLICY_NAME,
- check_str=base.RULE_ADMIN_API),
- policy.RuleDefault(
- name=POLICY_ROOT % 'sync_instances',
- check_str=base.RULE_ADMIN_API),
- policy.RuleDefault(
- name=POLICY_ROOT % 'delete',
- check_str=base.RULE_ADMIN_API),
+ base.create_rule_default(
+ POLICY_ROOT % 'update',
+ base.RULE_ADMIN_API,
+ 'Update an existing cell',
+ [
+ {
+ 'method': 'PUT',
+ 'path': '/os-cells/{cell_id}'
+ }
+ ]),
+ base.create_rule_default(
+ POLICY_ROOT % 'create',
+ base.RULE_ADMIN_API,
+ 'Create a new cell',
+ [
+ {
+ 'method': 'POST',
+ 'path': '/os-cells'
+ }
+ ]),
+ base.create_rule_default(
+ BASE_POLICY_NAME,
+ base.RULE_ADMIN_API,
+ 'List and get detailed info of a given cell or all cells',
+ [
+ {
+ 'method': 'GET',
+ 'path': '/os-cells'
+ },
+ {
+ 'method': 'GET',
+ 'path': '/os-cells/detail'
+ },
+ {
+ 'method': 'GET',
+ 'path': '/os-cells/info'
+ },
+ {
+ 'method': 'GET',
+ 'path': '/os-cells/capacities'
+ },
+ {
+ 'method': 'GET',
+ 'path': '/os-cells/{cell_id}'
+ }
+ ]),
+ base.create_rule_default(
+ POLICY_ROOT % 'sync_instances',
+ base.RULE_ADMIN_API,
+ 'Sync instances info in all cells',
+ [
+ {
+ 'method': 'POST',
+ 'path': '/os-cells/sync_instances'
+ }
+ ]),
+ base.create_rule_default(
+ POLICY_ROOT % 'delete',
+ base.RULE_ADMIN_API,
+ 'Remove a cell',
+ [
+ {
+ 'method': 'DELETE',
+ 'path': '/os-cells/{cell_id}'
+ }
+ ])
]
|
Use Scala 2.12.4 for --scala-platform-version=2.12
### Problem
The default scala build toolchain is out-of-date and insecure(!). See for details.
### Solution
Bumped version numbers. | @@ -25,7 +25,7 @@ major_version_info = namedtuple('major_version_info', ['full_version'])
scala_build_info = {
'2.10': major_version_info(full_version='2.10.6'),
'2.11': major_version_info(full_version='2.11.11'),
- '2.12': major_version_info(full_version='2.12.2'),
+ '2.12': major_version_info(full_version='2.12.4'),
}
|
Drop 921100 for now
* Remove Content-Length from this rule
It is already handled by 920160.
* Remove Transfer-Encoding
It's possible to have multiple values as per rfc 7230, section 4. | @@ -19,42 +19,6 @@ SecRule TX:EXECUTING_PARANOIA_LEVEL "@lt 1" "id:921012,phase:2,pass,nolog,skipAf
# -= Paranoia Level 1 (default) =- (apply only when tx.executing_paranoia_level is sufficiently high: 1 or higher)
#
-#
-# -=[ HTTP Request Smuggling ]=-
-#
-# [ Rule Logic ]
-# This rule looks for a comma character in either the Content-Length or Transfer-Encoding
-# request headers. This character would indicate that there were more than one request header
-# with this same name. In these instances, Apache treats the data in a similar manner as
-# multiple cookie values.
-#
-# [ References ]
-# http://projects.webappsec.org/HTTP-Request-Smuggling
-# http://article.gmane.org/gmane.comp.apache.mod-security.user/3299
-#
-SecRule REQUEST_HEADERS:'/(?:Content-Length|Transfer-Encoding)/' "@rx ," \
- "id:921100,\
- phase:2,\
- block,\
- capture,\
- t:none,\
- msg:'HTTP Request Smuggling Attack.',\
- logdata:'Matched Data: %{TX.0} found within %{MATCHED_VAR_NAME}: %{MATCHED_VAR}',\
- tag:'application-multi',\
- tag:'language-multi',\
- tag:'platform-multi',\
- tag:'attack-protocol',\
- tag:'OWASP_CRS/WEB_ATTACK/REQUEST_SMUGGLING',\
- tag:'WASCTC/WASC-26',\
- tag:'OWASP_TOP_10/A1',\
- tag:'PCI/6.5.2',\
- ver:'OWASP_CRS/3.0.0',\
- severity:'CRITICAL',\
- setvar:'tx.msg=%{rule.msg}',\
- setvar:'tx.http_violation_score=+%{tx.critical_anomaly_score}',\
- setvar:'tx.anomaly_score_pl1=+%{tx.critical_anomaly_score}',\
- setvar:'tx.%{rule.id}-OWASP_CRS/WEB_ATTACK/REQUEST_SMUGGLING-%{matched_var_name}=%{tx.0}'"
-
#
# -=[ HTTP Request Smuggling ]=-
#
|
add GSFont.fontView
TODO: add docu for GSFontViewController | @@ -2263,6 +2263,7 @@ Properties
masterIndex
currentText
tabs
+ fontView
currentTab
filepath
tool
@@ -2626,6 +2627,13 @@ GSFont.tabs = property(lambda self: FontTabsProxy(self))
:type: list'''
+GSFont.fontView = property(lambda self: self.parent.windowController().tabBarControl().tabItemAtIndex_(0))
+
+
+'''.. attribute:: fontView
+
+ :type GSFontViewController'''
+
def __GSFont__currentTab__(self):
return self.parent.windowController().activeEditViewController()
|
add back _stdvs_sq buffer to Standardize transform
Summary:
Pull Request resolved:
see title. | @@ -175,6 +175,7 @@ class Standardize(OutcomeTransform):
super().__init__()
self.register_buffer("means", torch.zeros(*batch_shape, 1, m))
self.register_buffer("stdvs", torch.zeros(*batch_shape, 1, m))
+ self.register_buffer("_stdvs_sq", torch.zeros(*batch_shape, 1, m))
self._outputs = normalize_indices(outputs, d=m)
self._m = m
self._batch_shape = batch_shape
|
Fix bug in pluginsystem
typo in untested feature | @@ -94,7 +94,7 @@ class Plugin(object):
return cfg
def emit(self, event, **kwargs):
- return self.env.pluginsystem.emit(self.id + "-" + event, **kwargs)
+ return self.env.plugin_controller.emit(self.id + "-" + event, **kwargs)
def to_json(self):
return {
|
Do not fix the configured requested_attributes
This is always done on use, ie, on client_base.py::create_authn_request | @@ -509,50 +509,6 @@ class SPConfig(Config):
return None
- def load(self, cnf, metadata_construction=False):
- super().load(cnf, metadata_construction=False)
- self.fix_requested_attributes()
- return self
-
- def fix_requested_attributes(self):
- """Add friendly_name or name if missing to the requested attributes"""
- requested_attrs = self.getattr('requested_attributes', 'sp')
-
- if not requested_attrs:
- return
-
- for attr in requested_attrs:
- friendly_name = attr.get('friendly_name')
- name = attr.get('name')
- name_format = attr.get('name_format')
-
- if not name and not friendly_name:
- raise ValueError(
- "Missing required attribute: '{}' or '{}'".format(
- 'name', 'friendly_name'))
-
- if not name:
- for converter in self.attribute_converters:
- try:
- attr['name'] = converter._to[friendly_name.lower()]
- except KeyError:
- continue
- else:
- if not name_format:
- attr['name_format'] = converter.name_format
- break
-
- if not friendly_name:
- for converter in self.attribute_converters:
- try:
- attr['friendly_name'] = converter._fro[name.lower()]
- except KeyError:
- continue
- else:
- if not name_format:
- attr['name_format'] = converter.name_format
- break
-
class IdPConfig(Config):
def_context = "idp"
|
Remove unnecessary whitespace removal in Helm templating
Summary: As the title.
Test Plan: integration
Reviewers: max | @@ -37,7 +37,7 @@ spec:
initContainers:
- name: check-db-ready
image: {{ include "image.name" .Values.postgresql.image | quote }}
- imagePullPolicy: {{- .Values.postgresql.image.pullPolicy -}}
+ imagePullPolicy: {{ .Values.postgresql.image.pullPolicy }}
command: ['sh', '-c',
'until pg_isready -h {{ include "dagster.postgresql.host" . }} -p {{ .Values.postgresql.service.port }}; do echo waiting for database; sleep 2; done;',
]
|
Improve undocumented TarFile method type hints
Add type hints for undocumented tarfile.TarFile file methods called via
_extract_member() when extract() is called. | @@ -144,6 +144,18 @@ class TarFile(Iterable[TarInfo]):
path: _Path = ...) -> None: ...
def extractfile(self,
member: Union[str, TarInfo]) -> Optional[IO[bytes]]: ...
+ def makedir(self, tarinfo: TarInfo, targetpath: _Path) -> None: ... # undocumented
+ def makefile(self, tarinfo: TarInfo, targetpath: _Path) -> None: ... # undocumented
+ def makeunknown(self, tarinfo: TarInfo, targetpath: _Path) -> None: ... # undocumented
+ def makefifo(self, tarinfo: TarInfo, targetpath: _Path) -> None: ... # undocumented
+ def makedev(self, tarinfo: TarInfo, targetpath: _Path) -> None: ... # undocumented
+ def makelink(self, tarinfo: TarInfo, targetpath: _Path) -> None: ... # undocumented
+ if sys.version_info >= (3, 5):
+ def chown(self, tarinfo: TarInfo, targetpath: _Path, numeric_owner: bool) -> None: ... # undocumented
+ else:
+ def chown(self, tarinfo: TarInfo, targetpath: _Path) -> None: ... # undocumented
+ def chmod(self, tarinfo: TarInfo, targetpath: _Path) -> None: ... # undocumented
+ def utime(self, tarinfo: TarInfo, targetpath: _Path) -> None: ... # undocumented
if sys.version_info >= (3, 7):
def add(self, name: str, arcname: Optional[str] = ...,
recursive: bool = ..., *,
|
Update uTorrentPostProcess.py
use param 5 for filename (not sure if this ever worked)
debug cleanup | @@ -55,7 +55,7 @@ settings = ReadSettings()
path = str(sys.argv[3])
label = sys.argv[1].lower().strip()
kind = sys.argv[4].lower().strip()
-filename = sys.argv[6].strip()
+filename = sys.argv[5].strip()
categories = [settings.uTorrent['cp'], settings.uTorrent['sb'], settings.uTorrent['sonarr'], settings.uTorrent['radarr'], settings.uTorrent['sr'], settings.uTorrent['bypass']]
torrent_hash = sys.argv[6]
try:
@@ -68,8 +68,8 @@ log.debug("Label: %s." % label)
log.debug("Categories: %s." % categories)
log.debug("Torrent hash: %s." % torrent_hash)
log.debug("Torrent name: %s." % name)
-log.debug("Kind: %s" % kind)
-log.debug("Filename: %s" % filename)
+log.debug("Kind: %s." % kind)
+log.debug("Filename: %s." % filename)
if label not in categories:
log.error("No valid label detected.")
@@ -135,7 +135,6 @@ if settings.uTorrent['convert']:
except:
log.exception("Error creating output sub directory.")
-
converter = MkvtoMp4(settings)
if kind == 'single':
@@ -168,7 +167,6 @@ if settings.uTorrent['convert']:
log.exception("Error converting file %s." % inputfile)
else:
log.debug("Ignoring file %s." % inputfile)
-
path = settings.output_dir
delete_dir = settings.output_dir
else:
|
Use re.escape to escape paths, before handing them to re.match
Addresses | @@ -4005,7 +4005,7 @@ def extract_hash(hash_fn,
hash_matched = True
except IndexError:
pass
- elif re.match(source_hash_name.replace('.', r'\.') + r'\s+',
+ elif re.match(re.escape(file_name) + r'\s+',
line):
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
@@ -4023,7 +4023,7 @@ def extract_hash(hash_fn,
hash_matched = True
except IndexError:
pass
- elif re.match(file_name.replace('.', r'\.') + r'\s+', line):
+ elif re.match(re.escape(file_name) + r'\s+', line):
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
|
Add test to make sure `toil clean` actually works
Before, the google job Store wasn't deleted when invoked this way.
This test should fail, and a fix will follow in the next commit. | @@ -30,6 +30,7 @@ from six.moves import xrange
from toil import resolveEntryPoint
from toil.batchSystems.parasolTestSupport import ParasolTestSupport
+from toil.common import Toil
from toil.job import Job, JobException
from toil.lib.bioio import getLogLevelString
from toil.batchSystems.mesos.test import MesosTestSupport
@@ -171,6 +172,10 @@ class SortTest(ToilTest, MesosTestSupport, ParasolTestSupport):
totalTrys += 1
finally:
subprocess.check_call([resolveEntryPoint('toil'), 'clean', jobStoreLocator])
+ # final test to make sure the jobStore was actually deleted
+ self.assertRaises(NoSuchJobStoreException, Toil.resumeJobStore, jobStoreLocator)
+
+
@needs_aws
def testAwsSingle(self):
|
ENH: moved mapping routine to utilities
Moved the dict/function mapping routine from inside Meta.rename to utils. | @@ -235,6 +235,35 @@ def load_netcdf4(fnames=None, strict_meta=False, file_format='NETCDF4',
return data, meta
+def get_mapped_value(value, mapper):
+ """Adjust value using mapping dict or function.
+
+ Parameters
+ ----------
+ value : str
+ MetaData variable name to be adjusted
+ mapper : dict or function
+ Dictionary with old names as keys and new names as variables or
+ a function to apply to all names
+
+ Returns
+ -------
+ mapped_val : str or NoneType
+ Adjusted MetaData variable name or NoneType if input value
+ should stay the same
+
+ """
+ if isinstance(mapper, dict):
+ if value in mapper.keys():
+ mapped_val = mapper[value]
+ else:
+ mapped_val = None
+ else:
+ mapped_val = mapper(value)
+
+ return mapped_val
+
+
def fmt_output_in_cols(out_strs, ncols=3, max_num=6, lpad=None):
"""Format a string with desired output values in columns.
|
Update treeprettyprinter.py
Issue solved i.e preety_print for tuples | @@ -93,6 +93,8 @@ class TreePrettyPrinter(object):
for n, b in enumerate(a):
if not isinstance(b, Tree):
a[n] = len(sentence)
+ if type(b) == tuple:
+ b = '/'.join(b)
sentence.append('%s' % b)
self.nodes, self.coords, self.edges, self.highlight = self.nodecoords(
tree, sentence, highlight)
|
Improves Voice Chat Matching
Changes the way voice channels are matched with chat channels, to make
it less hardcoded. | @@ -33,6 +33,13 @@ MSG_UNSILENCE_SUCCESS = f"{constants.Emojis.check_mark} unsilenced current chann
TextOrVoiceChannel = Union[TextChannel, VoiceChannel]
+VOICE_CHANNELS = {
+ constants.Channels.code_help_voice_1: constants.Channels.code_help_chat_1,
+ constants.Channels.code_help_voice_2: constants.Channels.code_help_chat_2,
+ constants.Channels.general_voice: constants.Channels.voice_chat,
+ constants.Channels.staff_voice: constants.Channels.staff_voice_chat,
+}
+
class SilenceNotifier(tasks.Loop):
"""Loop notifier for posting notices to `alert_channel` containing added channels."""
@@ -106,20 +113,6 @@ class Silence(commands.Cog):
self.notifier = SilenceNotifier(self.bot.get_channel(constants.Channels.mod_log))
await self._reschedule()
- async def _get_related_text_channel(self, channel: VoiceChannel) -> Optional[TextChannel]:
- """Returns the text channel related to a voice channel."""
- # TODO: Figure out a dynamic way of doing this
- channels = {
- "off-topic": constants.Channels.voice_chat,
- "code/help 1": constants.Channels.code_help_voice,
- "code/help 2": constants.Channels.code_help_voice_2,
- "admin": constants.Channels.admins_voice,
- "staff": constants.Channels.staff_voice
- }
- for name in channels.keys():
- if name in channel.name.lower():
- return self.bot.get_channel(channels[name])
-
async def send_message(
self,
message: str,
@@ -137,9 +130,10 @@ class Silence(commands.Cog):
# Reply to target channel
if alert_target:
if isinstance(target_channel, VoiceChannel):
- voice_chat = await self._get_related_text_channel(target_channel)
+ voice_chat = self.bot.get_channel(VOICE_CHANNELS.get(target_channel.id))
if voice_chat and source_channel != voice_chat:
await voice_chat.send(message.replace("current channel", target_channel.mention))
+
elif source_channel != target_channel:
await target_channel.send(message)
|
Port dimod.bqm.common type definitions for cyDQM
In the future we should unify them with the ones in cyutilities | @@ -21,7 +21,6 @@ from libcpp.vector cimport vector
cimport numpy as np
from dimod.libcpp cimport cppBinaryQuadraticModel
-from dimod.bqm.common cimport Integral32plus, Numeric, Numeric32plus
ctypedef np.float64_t bias_type
ctypedef np.int32_t index_type
@@ -32,6 +31,28 @@ ctypedef fused Unsigned:
np.uint32_t
np.uint64_t
+ctypedef fused Integral32plus:
+ np.uint32_t
+ np.uint64_t
+ np.int32_t
+ np.int64_t
+
+ctypedef fused Numeric:
+ np.uint8_t
+ np.uint16_t
+ np.uint32_t
+ np.uint64_t
+ np.int8_t
+ np.int16_t
+ np.int32_t
+ np.int64_t
+ np.float32_t
+ np.float64_t
+
+ctypedef fused Numeric32plus:
+ Integral32plus
+ np.float32_t
+ np.float64_t
cdef class cyDiscreteQuadraticModel:
cdef cppBinaryQuadraticModel[bias_type, index_type] cppbqm
|
Update README.md
Added a step before git lfs commands | @@ -32,6 +32,7 @@ cd ~/catkin_ws/src
sudo apt-get install python-catkin-tools # If you don't have the package installed yet.
catkin_init_workspace
git clone --recurse-submodules https://github.com/utra-robosoccer/soccer_ws # To clone the repository
+cd soccer_ws # To get into the local repository and perform git lfs commands
git lfs init
git lfs pull
cd soccer_ws
|
llvm, composition: Rename __get_mech_index -> __get_node_index
It will be used for nested compositions aas well. | @@ -2873,13 +2873,13 @@ class Composition(Composition_Base):
data.append(nested_data)
return pnlvm._tupleize(data)
- def __get_mech_index(self, mechanism):
- if mechanism is self.input_CIM:
+ def __get_node_index(self, node):
+ if node is self.input_CIM:
return len(self.c_nodes)
- elif mechanism is self.output_CIM:
+ elif node is self.output_CIM:
return len(self.c_nodes) + 1
else:
- return self.c_nodes.index(mechanism)
+ return self.c_nodes.index(node)
def _get_node_wrapper(self, node):
if node not in self.__generated_wrappers:
@@ -2985,7 +2985,7 @@ class Composition(Composition_Base):
assert output_s in par_mech.output_states
if par_mech is self.input_CIM or par_mech is self.output_CIM \
or par_mech in self.c_nodes:
- par_idx = self.__get_mech_index(par_mech)
+ par_idx = self.__get_node_index(par_mech)
else:
comp = par_mech.composition
assert par_mech is comp.output_CIM
@@ -3029,7 +3029,7 @@ class Composition(Composition_Base):
builder.call(proj_function, [proj_params, proj_context, proj_in, proj_out])
- idx = ctx.int32_ty(self.__get_mech_index(mech))
+ idx = ctx.int32_ty(self.__get_node_index(mech))
zero = ctx.int32_ty(0)
m_params = builder.gep(params, [zero, zero, idx])
m_context = builder.gep(context, [zero, zero, idx])
@@ -3242,7 +3242,7 @@ class Composition(Composition_Base):
builder.call(exec_f, [context, params, data_in_ptr, data, cond])
# Extract output_CIM result
- idx = self.__get_mech_index(self.output_CIM)
+ idx = self.__get_node_index(self.output_CIM)
result_ptr = builder.gep(data, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(idx)])
output_ptr = builder.gep(data_out, [iters])
result = builder.load(result_ptr)
|
Fix devstack: replace deprecated screen functions
Since [1], systemd is the default process init. Therefore,
CK devstack plugin still uses screen_it function instead of
generic run_process function. This leads to strange behavior
of the plugin.
This patch fixes this problem.
[1]
Story:
Task: 4634 | @@ -194,10 +194,10 @@ function install_cloudkitty {
# start_cloudkitty() - Start running processes, including screen
function start_cloudkitty {
- screen_it ck-proc "cd $CLOUDKITTY_DIR; $CLOUDKITTY_BIN_DIR/cloudkitty-processor --config-file=$CLOUDKITTY_CONF"
- screen_it ck-api "cd $CLOUDKITTY_DIR; $CLOUDKITTY_BIN_DIR/cloudkitty-api --config-file=$CLOUDKITTY_CONF"
+ run_process ck-proc "$CLOUDKITTY_BIN_DIR/cloudkitty-processor --config-file=$CLOUDKITTY_CONF"
+ run_process ck-api "$CLOUDKITTY_BIN_DIR/cloudkitty-api --config-file=$CLOUDKITTY_CONF"
echo "Waiting for ck-api ($CLOUDKITTY_SERVICE_HOST:$CLOUDKITTY_SERVICE_PORT) to start..."
- if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$CLOUDKITTY_SERVICE_HOST:$CLOUDKITTY_SERVICE_PORT; do sleep 1; done"; then
+ if ! wait_for_service $SERVICE_TIMEOUT $CLOUDKITTY_SERVICE_PROTOCOL://$CLOUDKITTY_SERVICE_HOST:$CLOUDKITTY_SERVICE_PORT; then
die $LINENO "ck-api did not start"
fi
}
@@ -206,7 +206,7 @@ function start_cloudkitty {
function stop_cloudkitty {
# Kill the cloudkitty screen windows
for serv in ck-api ck-proc; do
- screen_stop $serv
+ stop_process $serv
done
}
|
Update pce.rst
import leave-one-out error function | @@ -8,7 +8,7 @@ The :class:`.PolynomialChaosExpansion` class is imported using the following com
Methods
"""""""
.. autoclass:: UQpy.surrogates.polynomial_chaos.PolynomialChaosExpansion
- :members: fit, predict, validation_error, get_moments
+ :members: fit, predict, validation_error, leaveoneout_error, get_moments
Attributes
""""""""""
|
rwalk default
Changed the default to `'rwalk'` instead of `'unif'`. While the maximal efficiency of `'rwalk'` is substantially lower than `'unif'`, it's more consistent overall and so should require less manual intervention by most users. | @@ -38,7 +38,7 @@ SQRTEPS = math.sqrt(float(np.finfo(np.float64).eps))
def NestedSampler(loglikelihood, prior_transform, ndim, nlive=500,
- bound='multi', sample='unif',
+ bound='multi', sample='rwalk',
update_interval=0.8, first_update=None,
npdim=None, rstate=None, queue_size=None, pool=None,
use_pool=None, live_points=None,
@@ -90,7 +90,7 @@ def NestedSampler(loglikelihood, prior_transform, ndim, nlive=500,
conditioned on the provided bounds. Choices are uniform
(`'unif'`), random walks (`'rwalk'`), multivariate slices (`'slice'`),
random slices (`'rslice'`), and random trajectories ("Hamiltonian
- slices"; `'hslice'`). Default is `'unif'`.
+ slices"; `'hslice'`). Default is `'rwalk'`.
update_interval : int or float, optional
If an integer is passed, only update the proposal distribution every
@@ -313,7 +313,7 @@ def NestedSampler(loglikelihood, prior_transform, ndim, nlive=500,
def DynamicNestedSampler(loglikelihood, prior_transform, ndim,
- bound='multi', sample='unif',
+ bound='multi', sample='rwalk',
update_interval=0.8, first_update=None,
npdim=None, rstate=None, queue_size=None, pool=None,
use_pool=None, logl_args=None, logl_kwargs=None,
|
Documentation - update hooks.py to wagtail_hooks.py
Fixes | @@ -27,7 +27,7 @@ class CustomSettingsForm(forms.ModelForm):
```
```python
-# hooks.py
+# wagtail_hooks.py
from wagtail.admin.views.account import BaseSettingsPanel
from wagtail import hooks
@@ -70,7 +70,7 @@ class CustomProfileSettingsForm(forms.ModelForm):
```
```python
-# hooks.py
+# wagtail_hooks.py
from wagtail.admin.views.account import BaseSettingsPanel
from wagtail import hooks
@@ -90,7 +90,7 @@ class CustomSettingsPanel(BaseSettingsPanel):
You can define a new tab using the `SettingsTab` class:
```python
-# hooks.py
+# wagtail_hooks.py
from wagtail.admin.views.account import BaseSettingsPanel, SettingsTab
from wagtail import hooks
@@ -118,7 +118,7 @@ class CustomSettingsPanel(BaseSettingsPanel):
You can provide a custom template for the panel by specifying a template name:
```python
-# hooks.py
+# wagtail_hooks.py
from wagtail.admin.views.account import BaseSettingsPanel
from wagtail import hooks
|
Avoid python dependency break for python-dateutil
python dateutil was updated to 2.7.0
botocore was updated to 1.8.9 and it breaks against
dateutil 2.7.0 | @@ -33,6 +33,9 @@ setup(
'release.storage',
'ssh'],
install_requires=[
+ # DCOS-21656 - `botocore`` requires less than 2.7.0 while
+ # `analytics-python` package installs 2.7.0 version
+ 'python-dateutil>=2.1,<2.7.0',
'aiohttp==0.22.5',
'analytics-python',
'coloredlogs',
@@ -48,8 +51,8 @@ setup(
'azure-storage==0.32.0',
'azure-mgmt-network==0.30.0rc4',
'azure-mgmt-resource==0.30.0rc4',
- 'boto3',
'botocore',
+ 'boto3',
'checksumdir',
'coloredlogs',
'docopt',
|
Standalone: Do not exclude idlelib from standard library, make it non-automatic
* Without this, it fails to import due to not being followed, but then
it also isn't included. | @@ -158,8 +158,6 @@ def scanStandardLibraryPath(stdlib_dir):
dirs.remove("dist-packages")
if "test" in dirs:
dirs.remove("test")
- if "idlelib" in dirs:
- dirs.remove("idlelib")
if "turtledemo" in dirs:
dirs.remove("turtledemo")
@@ -305,6 +303,7 @@ _stdlib_no_auto_inclusion_list = (
"ttk",
"tkFont",
"tkColorChooser",
+ "idlelib",
)
if not isWin32Windows():
|
Add more prompt to Qtech.QSW2800
HG--
branch : feature/microservices | @@ -19,8 +19,9 @@ class Profile(BaseProfile):
name = "Qtech.QSW2800"
pattern_more = [
(r"^ --More-- $", " "),
+ (r"^Confirm to overwrite current startup-config configuration [Y/N]:", "\nY\n"),
(r"^Confirm to overwrite current startup-config configuration", "\ny\n"),
- (r"^Confirm to overwrite the existed destination file?", "\ny\n")
+ (r"^Confirm to overwrite the existed destination file?", "\ny\n"),
]
pattern_unpriveleged_prompt = r"^\S+>"
pattern_syntax_error = r"% (?:Invalid input detected at '\^' marker|" \
|
Implements Channel Converter
Adds a converter that can decipher more forms of channel mentions, to
lay foundation for voice channel muting. | @@ -536,6 +536,46 @@ class FetchedUser(UserConverter):
raise BadArgument(f"User `{arg}` does not exist")
+class AnyChannelConverter(UserConverter):
+ """
+ Converts to a `discord.Channel` or, raises an error.
+
+ Unlike the default Channel Converter, this converter can handle channels given
+ in string, id, or mention formatting for both `TextChannel`s and `VoiceChannel`s.
+ Always returns 1 or fewer channels, errors if more than one match exists.
+
+ It is able to handle the following formats (caveats noted below:)
+ 1. Convert from ID - Example: 267631170882240512
+ 2. Convert from Explicit Mention - Example: #welcome
+ 3. Convert from ID Mention - Example: <#267631170882240512>
+ 4. Convert from Unmentioned Name: - Example: welcome
+
+ All the previous conversions are valid for both text and voice channels, but explicit
+ raw names (#4) do not work for non-unique channels, instead opting for an error.
+ Explicit mentions (#2) do not work for non-unique voice channels either.
+ """
+
+ async def convert(self, ctx: Context, arg: str) -> t.Union[discord.TextChannel, discord.VoiceChannel]:
+ """Convert the `arg` to a `TextChannel` or `VoiceChannel`."""
+ stripped = arg.strip().lstrip("<").lstrip("#").rstrip(">")
+
+ # Filter channels by name and ID
+ channels = [channel for channel in ctx.guild.channels if stripped in (channel.name, str(channel.id))]
+
+ if len(channels) == 0:
+ # Couldn't find a matching channel
+ log.debug(f"Could not convert `{arg}` to channel, no matches found.")
+ raise BadArgument("The provided argument returned no matches.")
+
+ elif len(channels) > 1:
+ # Couldn't discern the desired channel
+ log.debug(f"Could not convert `{arg}` to channel, {len(channels)} matches found.")
+ raise BadArgument(f"The provided argument returned too many matches ({len(channels)}).")
+
+ else:
+ return channels[0]
+
+
def _snowflake_from_regex(pattern: t.Pattern, arg: str) -> int:
"""
Extract the snowflake from `arg` using a regex `pattern` and return it as an int.
|
changed the assert statement
comparing the accuracy of cuml and sklearn models and not the predicted labels | import pytest
import numpy as np
from cuml.test.utils import get_handle
-from cuml.test.utils import array_equal
from sklearn.datasets import make_classification
from cuml.ensemble import RandomForestClassifier as curfc
from sklearn.ensemble import RandomForestClassifier as skrfc
+from sklearn.metrics import accuracy_score
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('use_handle', [True, False])
def test_rf_predict_numpy(datatype, use_handle):
- X, y = make_classification(n_samples=100, n_features=40,
- n_clusters_per_class=1, n_informative=30,
+ X, y = make_classification(n_samples=1000, n_features=100,
+ n_clusters_per_class=1, n_informative=80,
random_state=123, n_classes=5)
y = y.astype(np.int32)
- handle, stream = get_handle(use_handle)
- cuml_model = curfc(max_depth=-1, max_leaves=-1, max_features=1.0,
- n_bins=4, split_algo=0, min_rows_per_node=2,
- n_estimators=20, handle=handle)
- cuml_model.fit(X, y)
- cu_predict = cuml_model.predict(X)
+ X_train = np.asarray(X[0:900, :])
+ y_train = np.asarray(y[0:900, ])
+ X_test = np.asarray(X[900:, :])
+ y_test = np.asarray(y[900:, ])
- sk_model = skrfc(n_estimators=20, max_depth=None,
+ print("Calling fit_predict")
+ handle, stream = get_handle(use_handle)
+ cuml_model = curfc(max_features=1.0,
+ n_bins=4, split_algo=0, min_rows_per_node=2,
+ n_estimators=40, handle=handle, max_leaves=-1)
+ cuml_model.fit(X_train, y_train)
+ cu_predict = cuml_model.predict(X_test)
+ cu_acc = accuracy_score(y_test, cu_predict)
+ sk_model = skrfc(n_estimators=40, max_depth=None,
min_samples_split=2, max_features=1.0)
- sk_model.fit(X, y)
- sk_predict = sk_model.predict(X)
-
+ sk_model.fit(X_train, y_train)
+ sk_predict = sk_model.predict(X_test)
+ sk_acc = accuracy_score(y_test, sk_predict)
cuml_model.handle.sync()
-
- assert array_equal(sk_predict, cu_predict, 1e-1, with_sign=True)
+ assert cu_acc >= sk_acc
|
docs: Fixes incorrect configuration on Draft Action page
Fixes incorrect configuration on draft action page. | @@ -26,6 +26,6 @@ the pull request to a draft automatically since it's likely not ready to review.
pull_request_rules:
- name: convert to draft
conditions:
- - "#check-failed>0"
+ - "#check-failure>0"
actions:
draft:
|
Fixed the capitalization in _python_function_name_to_component_name
It now only changes the case of the first letter. | @@ -115,7 +115,8 @@ def set_default_base_image(image_or_factory: Union[str, Callable[[], str]]):
def _python_function_name_to_component_name(name):
import re
- return re.sub(' +', ' ', name.replace('_', ' ')).strip(' ').capitalize()
+ name_with_spaces = re.sub(' +', ' ', name.replace('_', ' ')).strip(' ')
+ return name_with_spaces[0].upper() + name_with_spaces[1:]
def _capture_function_code_using_cloudpickle(func, modules_to_capture: List[str] = None) -> str:
|
Update AUTHORS
It's been a real pleasure to work on Swift all these years with you
guys. You're doing an amazing job in the best mind. Don't change
anything! | @@ -32,6 +32,7 @@ Janie Richling ([email protected])
Michael Barton ([email protected])
Mahati Chamarthy ([email protected])
Samuel Merritt ([email protected])
+Romain Le Disez ([email protected])
Contributors
------------
@@ -355,7 +356,6 @@ Richard Hawkins ([email protected])
Robert Francis ([email protected])
Robin Naundorf ([email protected])
Romain de Joux ([email protected])
-Romain Le Disez ([email protected])
Russ Nelson ([email protected])
Russell Bryant ([email protected])
Sachin Patil ([email protected])
|
Fix potential race condition.
Instead of first checking if the channel.id exists and then checking
what it is, we just do a single API call, to prevent cases where
something fucky might happen inbetween the first and the second call. | @@ -548,20 +548,20 @@ class HelpChannels(Scheduler, commands.Cog):
self.bot.stats.incr(f"help.dormant_calls.{caller}")
- if await self.claim_times.contains(channel.id):
claimed_timestamp = await self.claim_times.get(channel.id)
+ if claimed_timestamp:
claimed = datetime.fromtimestamp(claimed_timestamp)
in_use_time = datetime.utcnow() - claimed
self.bot.stats.timing("help.in_use_time", in_use_time)
- if await self.unanswered.contains(channel.id):
- if await self.unanswered.get(channel.id):
+ unanswered = await self.unanswered.get(channel.id)
+ if unanswered is not None:
+ if unanswered:
self.bot.stats.incr("help.sessions.unanswered")
else:
self.bot.stats.incr("help.sessions.answered")
log.trace(f"Position of #{channel} ({channel.id}) is actually {channel.position}.")
-
log.trace(f"Sending dormant message for #{channel} ({channel.id}).")
embed = discord.Embed(description=DORMANT_MSG)
await channel.send(embed=embed)
|
Fix watch
I changed the _download_name return type without realizing that
it was also used by the watch endpoint. This switches the endpoint
to go through get so that watches can be tracked just like downloads | @@ -82,8 +82,11 @@ class HostedEncryptedFile(resource.Resource):
request.setHeader("Content-Security-Policy", "sandbox")
if 'name' in request.args.keys():
if self.is_valid_request_name(request):
- d = self._api._download_name(request.args['name'][0])
- d.addCallback(lambda stream: self._make_stream_producer(request, stream))
+ name = request.args['name'][0]
+ d = self._api.jsonrpc_get({'name': name})
+ d.addCallback(lambda response: response['result']['stream_hash'])
+ d.addCallback(lambda sd_hash: self._api._get_lbry_file_by_sd_hash(sd_hash))
+ d.addCallback(lambda lbry_file: self._make_stream_producer(request, lbry_file))
elif request.args['name'][0] in self._api.waiting_on.keys():
request.redirect(conf.settings.UI_ADDRESS + "/?watch=" + request.args['name'][0])
request.finish()
|
Fix typo in StructBlock documentation
The StructValue class `LinkValue` is refrerenced as `LinkStructValue` in the Meta class. | @@ -241,7 +241,7 @@ Instead, you should define a subclass of ``StructValue`` that implements your cu
from wagtail.core.blocks import StructValue
- class LinkValue(StructValue):
+ class LinkStructValue(StructValue):
def url(self):
external_url = self.get('external_url')
page = self.get('page')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.