message
stringlengths
13
484
diff
stringlengths
38
4.63k
Component: instantiate parameter classes in dependency order ensures functions will be instantiated by the time their dependent functions need to use them
@@ -483,8 +483,10 @@ import copy import dill import functools import inspect +import itertools import logging import numbers +import toposort import types import warnings @@ -2060,8 +2062,19 @@ class Component(JSONDumpable, metaclass=ComponentsMeta): """ from psyneulink.core.components.shellclasses import Function + parameter_function_ordering = list(toposort.toposort({ + p.name: p.dependencies for p in self.parameters if p.dependencies is not None + })) + parameter_function_ordering = list(itertools.chain.from_iterable(parameter_function_ordering)) + + def ordering(p): + try: + return parameter_function_ordering.index(p.name) + except ValueError: + return -1 + # (this originally occurred in _validate_params) - for p in self.parameters: + for p in sorted(self.parameters, key=ordering): if p.getter is None: val = p._get(context) if (
cli: fix variable name in update and get_master_server_id functions The variable previously defined is stack_name and not stack.
@@ -136,7 +136,7 @@ def update(args): temp_resources.extend(resources) if not resources.next_token: break - resources = cfnconn.describe_stack_resources(stack, next_token=resources.next_token) + resources = cfnconn.describe_stack_resources(stack_name, next_token=resources.next_token) resources = temp_resources asg = [r for r in resources if r.logical_resource_id == 'ComputeFleet'][0].physical_resource_id @@ -247,7 +247,7 @@ def get_master_server_id(stack_name, config): temp_resources.extend(resources) if not resources.next_token: break - resources = cfnconn.describe_stack_resources(stack, next_token=resources.next_token) + resources = cfnconn.describe_stack_resources(stack_name, next_token=resources.next_token) return [r.physical_resource_id for r in resources if r.logical_resource_id == 'MasterServer'][0]
#AGENT-271 Do not fail to start agent when cannot contact server #AGENT-271 Do not fail to start agent when cannot contact server
@@ -394,9 +394,9 @@ class ScalyrAgent(object): if ping_result != 'success': if 'badClientClockSkew' in ping_result: # TODO: The server does not yet send this error message, but it will in the future. - raise Exception('Sending request to the server failed due to bad clock skew. The system clock ' - 'on this host is too off from actual time. Please fix the clock and try to ' - 'restart the agent.') + print >> sys.stderr, ('Sending request to the server failed due to bad clock skew. The system ' + 'clock on this host is too off from actual time. Scalyr agent will keep ' + 'trying to connect in the background.') elif 'invalidApiKey' in ping_result: # TODO: The server does not yet send this error message, but it will in the future. raise Exception('Sending request to the server failed due to an invalid API key. This probably ' @@ -404,9 +404,11 @@ class ScalyrAgent(object): 'Please visit https://www.scalyr.com/keys and copy a Write Logs key into the ' '\'api_key\' field in the configuration file' % self.__config.file_path) else: - raise Exception('Failed to send request to the server. The server address could be wrong, ' - 'there maybe a network connectivity issue, or the provided api_token could be ' - 'incorrect. You can disable this check with --no-check-remote-server.') + print >> sys.stderr, ('Failed to send request to the server. The server address could be ' + 'wrong, there maybe a network connectivity issue, or the provided ' + 'api_token could be incorrect. You can disable this check with' + ' --no-check-remote-server. Scalyr agent will keep trying to connect in ' + 'the background.') finally: client.close()
tests: fix `test_nfs_is_up` test the data structure seems to have been modified in ceph@master (quincy). This commit update the test accordingly.
@@ -38,9 +38,14 @@ class TestNFSs(object): cluster=cluster ) output = host.check_output(cmd) - daemons = [i for i in json.loads( - output)["servicemap"]["services"]["rgw-nfs"]["daemons"]] - assert hostname in daemons + keys = [i for i in json.loads( + output)["servicemap"]["services"]["rgw-nfs"]["daemons"].keys()] + keys.remove('summary') + daemons = json.loads(output)["servicemap"]["services"]["rgw-nfs"]["daemons"] + hostnames = [] + for key in keys: + hostnames.append(daemons[key]['metadata']['hostname']) + # NOTE (guits): This check must be fixed. (Permission denied error) # @pytest.mark.no_docker
Update README.md Added logo
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Build Status](https://travis-ci.org/inventree/InvenTree.svg?branch=master)](https://travis-ci.org/inventree/InvenTree) [![Documentation Status](https://readthedocs.org/projects/inventree/badge/?version=latest)](https://inventree.readthedocs.io/en/latest/?badge=latest) [![Coverage Status](https://coveralls.io/repos/github/inventree/InvenTree/badge.svg)](https://coveralls.io/github/inventree/InvenTree) +<img src="images/logo/inventree.png" alt="InvenTree" width="128"/> + # InvenTree InvenTree is an open-source Inventory Management System which provides powerful low-level stock control and part tracking. The core of the InvenTree system is a Python/Django database backend which provides an admin interface (web-based) and a JSON API for interaction with external interfaces and applications.
Update Dockerfile to no longer fetch geoip database It's not needed for local envs. Geoip queries will fail, but that should be ok. Fixes
@@ -56,12 +56,6 @@ RUN apt-get update && apt-get -t stretch install -y \ libmaxminddb-dev \ && rm -rf /var/lib/apt/lists/* -ADD http://geolite.maxmind.com/download/geoip/database/GeoLite2-Country.mmdb.gz /tmp - -RUN mkdir -p /usr/local/share/GeoIP \ - && gunzip -c /tmp/GeoLite2-Country.mmdb.gz > /usr/local/share/GeoIP/GeoLite2-Country.mmdb \ - && rm -f /tmp/GeoLite2-Country.mmdb.gz - # Install `file` and `libmagic` from the `buster` repositories for an up-to-date # file-detection. RUN apt-get update && apt-get -t buster install -y \
Add a note to the ssh_known_hosts State doc. I tried to use this state with multiple hostname aliases in a single entry, and discovered it did not work. Hopefully this documentation tweak will help future users.
@@ -61,6 +61,9 @@ def present( name The name of the remote host (e.g. "github.com") + Note that only a single hostname is supported, if foo.example.com and + bar.example.com have the same host you will need two separate Salt + States to represent them. user The user who owns the ssh authorized keys file to modify @@ -197,6 +200,9 @@ def absent(name, user=None, config=None): name The host name + Note that only single host names are supported. If foo.example.com + and bar.example.com are the same machine and you need to exclude both, + you will need one Salt state for each. user The user who owns the ssh authorized keys file to modify
Fix bad merge, get CouchUser by user_id ... because
@@ -15,7 +15,7 @@ from corehq.apps.domain.auth import ( ) from corehq.apps.users.models import CouchUser, InvalidUser, AnonymousCouchUser from corehq.apps.users.util import username_to_user_id -from corehq.toggles import ANONYMOUS_WEB_APPS_USAGE, PUBLISH_CUSTOM_REPORTS +from corehq.toggles import PUBLISH_CUSTOM_REPORTS SESSION_USER_KEY_PREFIX = "session_user_doc_%s" @@ -49,9 +49,6 @@ class UsersMiddleware(MiddlewareMixin): request.domain = view_kwargs['domain'] if 'org' in view_kwargs: request.org = view_kwargs['org'] - if request.user.is_anonymous and 'domain' in view_kwargs: - if ANONYMOUS_WEB_APPS_USAGE.enabled(view_kwargs['domain']): - request.couch_user = CouchUser.get_anonymous_mobile_worker(request.domain) auth_type = determine_authtype_from_header(request, default='NONE') if auth_type in (BASIC, DIGEST, API_KEY) and 'domain' in view_kwargs: # User is not yet authenticated, but setting request.domain (above) and request.couch_user will allow @@ -60,7 +57,8 @@ class UsersMiddleware(MiddlewareMixin): username, _ = get_username_and_password_from_request(request) request.couch_user = CouchUser.get_by_username(username) or InvalidUser() if request.user and request.user.is_authenticated: - request.couch_user = CouchUser.get_by_username(request.user.username) + user_id = username_to_user_id(request.user.username) + request.couch_user = CouchUser.get_by_user_id(user_id) if not request.couch_user.analytics_enabled: request.analytics_enabled = False if 'domain' in view_kwargs:
[doc] Don't use Sphinx 5.0 There are problems with parameters which are shown with double colons.
# This is a PIP requirements file for building Sphinx documentation of pywikibot # requirements.txt is also needed -sphinx >= 4.1.0 \ No newline at end of file +sphinx >= 4.1.0,!=5.0.0 \ No newline at end of file
Fix incorrect scope for logging in StatisticsGen executor. The "output_uri" variable may have been uninitialized at logging time.
@@ -84,4 +84,5 @@ class Executor(base_executor.BaseExecutor): shard_name_template='', coder=beam.coders.ProtoCoder( statistics_pb2.DatasetFeatureStatisticsList))) - tf.logging.info('Statistics written to {}.'.format(output_uri)) + tf.logging.info('Statistics for split {} written to {}.'.format( + split, output_uri))
remove cattrs pin dependency removal
-apache-airflow[gcp]==1.10.12 +apache-airflow[gcp]==1.10.14 SQLAlchemy==1.3.23 # must be under 1.4 until at least Airflow 2.0 (check airflow setup.py for restrictions) -cattrs==1.0.0 #this has to be explicitly pinned to 1.0.0 until airflow 1.10.13 when a fix should be pushed kubernetes==12.0.1 scipy==1.4.1; python_version > '3.0' scipy==1.2.3; python_version < '3.0'
Ensure nano is the default editor The installation of `joe` will set it as the default editor. This is surprising for many users and is bad for `sudo visudo`. Flip to `nano`.
- expect - pandoc # for `pip install pwntools` +- name: Ensure nano is the default editor + alternatives: + name: editor + path: /bin/nano + - name: Install common pip2 packages for CTF shell servers pip: name: "{{ item }}"
Minor fix of the histogram observer in FBL eval flows Summary: Pull Request resolved: Fix the bug in quantization eval workflow; Add mul_nets option in histogram observer pybind
@@ -22,14 +22,16 @@ PYBIND11_MODULE(dnnlowp_pybind11, m) { m.def( "ObserveHistogramOfOutput", - [](const string& out_file_name, int dump_freq) { - AddGlobalNetObserverCreator([out_file_name, dump_freq](NetBase* net) { + [](const string& out_file_name, int dump_freq, bool mul_nets) { + AddGlobalNetObserverCreator( + [out_file_name, dump_freq, mul_nets](NetBase* net) { return make_unique<HistogramNetObserver>( - net, out_file_name, 2048, dump_freq); + net, out_file_name, 2048, dump_freq, mul_nets); }); }, pybind11::arg("out_file_name"), - pybind11::arg("dump_freq") = -1); + pybind11::arg("dump_freq") = -1, + pybind11::arg("mul_nets") = false); m.def( "RegisterQuantizationParams",
2.7.2 Automatically generated by python-semantic-release
@@ -9,7 +9,7 @@ https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers """ from datetime import timedelta -__version__ = "2.7.1" +__version__ = "2.7.2" PROJECT_URL = "https://github.com/custom-components/alexa_media_player/" ISSUE_URL = "{}issues".format(PROJECT_URL)
Implement open_issues in githubapi Implemented the metric 'Open Issues' in githubapi. This implementation returns the number issues opened per day.
@@ -126,6 +126,36 @@ class GitHubAPI(object): # return the dataframe return df + @annotate(tag='open-issues') + def open_issues(self, owner, repo): + """ + Timeseries of the number of issues opened per day. + + :param owner: The username of the project owner. + :param repo: The name of the repository. + :return: DatFrame with number of issues opened per day. + """ + + url = 'https://api.github.com/repos/{}/{}/issues?state=all'.format(owner, repo) + issues = [] + + while True: + response = requests.get(url, auth=('user', self.GITHUB_API_KEY)) + issues += response.json() + + if 'next' not in response.links: + break + + url = response.links['next']['url'] + + df = pd.DataFrame(issues, columns=['created_at']) + df['created_at'] = pd.to_datetime(df['created_at']).dt.normalize() + df = df.groupby('created_at').size().reset_index(name='count') + + return df + + + ##################################### ### RISK ### #####################################
Fix (for valid Python code) Does not fix the original issue where for ex. a `if` statement is immediately followed by a `else` statement (not valid Python) To fix the original issue, `if ind(leading_text) == ind(prevtxt.rstrip()) and not prevtxt[-1] == ':':` works but is unnecessary in my opinion
@@ -3982,7 +3982,7 @@ def insert_text(event): ind = lambda txt: len(txt)-len(txt.lstrip()) prevtxt = to_text_string(self.textCursor( ).block().previous().text()) - if ind(leading_text) == ind(prevtxt): + if ind(leading_text) == ind(prevtxt.rstrip()): self.unindent(force=True) insert_text(event) elif key == Qt.Key_Space and not shift and not ctrl \ @@ -3993,7 +3993,7 @@ def insert_text(event): ind = lambda txt: len(txt)-len(txt.lstrip()) prevtxt = to_text_string(self.textCursor( ).block().previous().text()) - if ind(leading_text) == ind(prevtxt): + if ind(leading_text) == ind(prevtxt.rstrip()): self.unindent(force=True) insert_text(event) elif key == Qt.Key_Tab and not ctrl:
Give Python CSDK initialize_block default None arg This is specified by the interface.
@@ -80,7 +80,7 @@ class ZmqService(Service): # -- Block Creation -- - def initialize_block(self, previous_id): + def initialize_block(self, previous_id=None): request = ( consensus_pb2.ConsensusInitializeBlockRequest( previous_id=previous_id)
Add test to check role unassignment Create a test that checks if a role gets deleted it will also get unassigned from the user
from django.urls import reverse from .base import AuthenticatedAPITestCase -from ..models import Role +from ..models import Role, User + class CreationTests(AuthenticatedAPITestCase): @@ -35,6 +36,20 @@ class CreationTests(AuthenticatedAPITestCase): permissions=6, position=0, ) + cls.role_to_delete = Role.objects.create( + id=7, + name="role to delete", + colour=7, + permissions=7, + position=0, + ) + cls.role_unassigned_test_user = User.objects.create( + id=8, + name="role_unassigned_test_user", + discriminator="0000", + roles=[cls.role_to_delete.id], + in_guild=True + ) def _validate_roledict(self, role_dict: dict) -> None: """Helper method to validate a dict representing a role.""" @@ -181,6 +196,11 @@ class CreationTests(AuthenticatedAPITestCase): response = self.client.delete(url) self.assertEqual(response.status_code, 204) + def test_role_delete_unassigned(self): + """Tests if the deleted Role gets unassigned from the user.""" + self.role_to_delete.delete() + self.assertEqual(self.role_unassigned_test_user.roles, []) + def test_role_detail_404_all_methods(self): """Tests detail view with non-existing ID.""" url = reverse('api:bot:role-detail', args=(20190815,))
target_test.py: Use clean paths to avoid failures Currently some tests will fail if you run them twice, because there's no proper cleanup done.
@@ -19,6 +19,7 @@ from __future__ import print_function from helpers import unittest, skipOnTravis from mock import Mock import re +import random import luigi.target import luigi.format @@ -251,23 +252,25 @@ class FileSystemTargetTestMixin(object): # We're cheating and retrieving the fs from target. # TODO: maybe move to "filesystem_test.py" or something t = self.create_target() + other_path = t.path + '-' + str(random.randint(0, 999999999)) t._touchz() fs = t.fs self.assertTrue(t.exists()) - fs.move(t.path, t.path+"-yay") + fs.move(t.path, other_path) self.assertFalse(t.exists()) def test_rename_dont_move_on_fs(self): # We're cheating and retrieving the fs from target. # TODO: maybe move to "filesystem_test.py" or something t = self.create_target() + other_path = t.path + '-' + str(random.randint(0, 999999999)) t._touchz() fs = t.fs self.assertTrue(t.exists()) - fs.rename_dont_move(t.path, t.path+"-yay") + fs.rename_dont_move(t.path, other_path) self.assertFalse(t.exists()) self.assertRaises(luigi.target.FileAlreadyExists, - lambda: fs.rename_dont_move(t.path, t.path+"-yay")) + lambda: fs.rename_dont_move(t.path, other_path)) class TemporaryPathTest(unittest.TestCase):
remove gh issue for cursor_res Summary: looking at and - not sure what exactly was meant to be done so putting up this diff Test Plan: bk Reviewers: sidkmenon
@@ -251,7 +251,6 @@ def watcher_thread( ) try: with engine.connect() as conn: - # https://github.com/dagster-io/dagster/issues/3858 cursor_res = conn.execute( db.select([SqlEventLogStorageTable.c.event]).where( SqlEventLogStorageTable.c.id == index
shortened code using abs() and inplace ops n = -n if n < 0 else n --> n = abs(n) n = n // 10 --> n //= 10
@@ -14,11 +14,11 @@ def sum_of_digits(n: int) -> int: >>> sum_of_digits(0) 0 """ - n = -n if n < 0 else n + n = abs(n) res = 0 while n > 0: res += n % 10 - n = n // 10 + n //= 10 return res @@ -35,7 +35,7 @@ def sum_of_digits_recursion(n: int) -> int: >>> sum_of_digits_recursion(0) 0 """ - n = -n if n < 0 else n + n = abs(n) return n if n < 10 else n % 10 + sum_of_digits(n // 10)
Used a shared cache db for sessions. This will make our stuff work when we have multiple app servers.
@@ -55,6 +55,9 @@ INSTALLED_APPS = ( 'rest_framework.authtoken', ) +SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" + + MIDDLEWARE_CLASSES = ( # 'django.middleware.cache.UpdateCacheMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware',
Update nf_core/lint_utils.py Change prettier syntax errors to issue a log instead of error out
@@ -72,7 +72,7 @@ def run_prettier_on_file(file): ) except subprocess.CalledProcessError as e: if ": SyntaxError: " in e.stdout.decode(): - raise ValueError(f"Can't format {file} because it has a synthax error.\n{e.stdout.decode()}") from e + log.critical(f"Can't format {file} because it has a syntax error.\n{e.stdout.decode()}") log.warning( "There was an error running the prettier pre-commit hook.\n" f"STDOUT: {e.stdout.decode()}\nSTDERR: {e.stderr.decode()}"
Add pre-processing step to dials.stills_process. In this program it's a no-op. We'll use it in XFEL though.
@@ -330,6 +330,7 @@ def run(self): # Wrapper function def do_work(i, item_list): processor = Processor(copy.deepcopy(params), composite_tag = "%04d"%i) + for item in item_list: processor.process_datablock(item[0], item[1]) processor.finalize() @@ -457,6 +458,12 @@ def process_datablock(self, tag, datablock): dump.as_json(self.params.output.datablock_filename) # Do the processing + try: + self.pre_process(datablock) + except Exception as e: + print("Error in pre-process", tag, str(e)) + if not self.params.dispatch.squash_errors: raise + return try: observed = self.find_spots(datablock) except Exception as e: @@ -482,6 +489,10 @@ def process_datablock(self, tag, datablock): if not self.params.dispatch.squash_errors: raise return + def pre_process(self, datablock): + """ Add any pre-processing steps here """ + pass + def find_spots(self, datablock): from time import time from dials.array_family import flex
fix to loggin in AA Summary: Pull Request resolved:
@@ -202,6 +202,10 @@ std::string AliasDb::toString() const { std::stringstream ss{}; std::unordered_map<size_t, Element*> indexToElementMap; + for (const auto &ent : wildcardIndex_) { + indexToElementMap[ent.second->index] = ent.second; + } + ss << "\n===1. GRAPH===\n"; ss << graph_->toString(); @@ -232,7 +236,7 @@ std::string AliasDb::toString() const { ss << *node; ss << " "; for (const auto value : values) { - ss << indexToElementMap[value]->value->debugName() << ", "; + ss << getElementName(indexToElementMap[value]) << ", "; } ss << "\n"; }
Fix fwaas v1 configuration doc Modify the fwaas v1 config about driver Closes-Bug:
@@ -17,7 +17,7 @@ FWaaS management options are also available in the Dashboard. service_provider = FIREWALL:Iptables:neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver:default [fwaas] - driver = neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver + driver = iptables enabled = True .. note::
Avoid importlib-metadata conflict with tox importlib-metadata does not support installing versions after 2.0 on Python versions prior to py38. Also fix the path for ANSIBLE_CONFIG file
@@ -38,7 +38,7 @@ passenv = SSH_AUTH_SOCK TERM setenv = - ANSIBLE_CONFIG={toxinidir}/dev/null + ANSIBLE_CONFIG={toxinidir}/.ansible.cfg ANSIBLE_CALLABLE_WHITELIST={env:ANSIBLE_CALLABLE_WHITELIST:timer,profile_roles} ANSIBLE_DISPLAY_FAILED_STDERR=1 ANSIBLE_VERBOSITY=1 @@ -51,6 +51,7 @@ deps = devel: ansible>=2.10.0a2,<2.11 dockerfile: ansible>=2.9.12 selinux + py{36,37}: importlib-metadata<2,>=0.12 extras = docker lint
proofreadpage_tests: fix test_page_gen_redlink Recreated pages on en.wikisource.org and renamed pages in file.
@@ -651,11 +651,11 @@ class TestIndexPageMappingsRedlinks(IndexPageTestCase): cached = True - index_name = 'Index:Pywikibot test page 1' - page_names = ['Page:Pywikibot test page 1/1', - 'Page:Pywikibot test page 2/2', + index_name = 'Index:Pywikibot test page.djvu' + page_names = ['Page:Pywikibot test page.djvu/1', + 'Page:Pywikibot test page.djvu/2', ] - missing_name = 'Page:Pywikibot test page 2/2' + missing_name = 'Page:Pywikibot test page.djvu/2' @classmethod def setUpClass(cls): @@ -675,7 +675,6 @@ class TestIndexPageMappingsRedlinks(IndexPageTestCase): n = self.index.get_number(page) self.assertEqual(self.index.get_page(n), page) - @unittest.expectedFailure # T181697 def test_page_gen_redlink(self): """Test Index page generator with redlinks.""" # Check start/end limits.
Refactor code to follow project coding styling. Changes in css styles
@@ -178,12 +178,13 @@ div.languages { color: black; text-align: {{end}}; float: right; - margin-right:5%; + padding-{{end}}: 20px; } div.languages img { vertical-align: text-top; } + div.languages select { background: white; }
XFail Bow For failure with `overriding declarations in extensions is not supported`.
"project": "Bow.xcodeproj", "scheme": "Bow", "destination": "generic/platform=iOS", - "configuration": "Release" + "configuration": "Release", + "xfail": { + "issue": "https://bugs.swift.org/browse/SR-11740", + "branch": ["master"] + } }, { "action": "TestXcodeProjectScheme",
Update issue template Just until juju-crashdump gets fixed
@@ -25,13 +25,13 @@ Please attach tarball of **~/.cache/conjure-up**: tar cvzf conjure-up.tar.gz ~/.cache/conjure-up ``` -## Crashdump +## Sosreport -In order to better get an overall idea of your system setup please also attach a -**juju-crashdump** with **sosreport** plugin enabled. +Please attach a sosreport: ``` -sudo snap install juju-crashdump --classic && juju crashdump -a sosreport` +sudo apt install sosreport +sosreport ``` The resulting output file can be attached to this issue.
l3 notifer should not be a set An exceptions is raised when a gateway is set to a router because l3 notifer is a set Close-Bug:
@@ -95,9 +95,9 @@ class DFL3AgentlessRouterPlugin(service_base.ServicePluginBase, def _start_rpc_notifiers(self): """Initialization RPC notifiers for agents""" - self.agent_notifiers[const.AGENT_TYPE_L3] = { + self.agent_notifiers[const.AGENT_TYPE_L3] = ( l3_rpc_agent_api.L3AgentNotifyAPI() - } + ) def start_rpc_listeners(self): self.topic = topics.L3PLUGIN
drafts: Increase the duration of "Saved as draft" tooltip. Now that it's further away from the composebox, we probably want it to be visible for longer. Doubling it from 1.5 seconds to 3 seconds seems reasonable to start with, although we should tune it based on feedback.
@@ -153,7 +153,7 @@ function draft_notify() { function remove_instance() { instance.destroy(); } - setTimeout(remove_instance, 1500); + setTimeout(remove_instance, 3000); } export function update_draft(opts = {}) {
Update `TFTapasEmbeddings` Update TFTapasEmbeddings
@@ -234,6 +234,16 @@ class TFTapasEmbeddings(tf.keras.layers.Layer): position_ids = tf.math.minimum(self.max_position_embeddings - 1, position - first_position) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) position_embeddings = tf.gather(self.position_embeddings, indices=position_ids)
Realign is_ganglia_enabled with upstream/develop is_ganglia_enabled now relies on the stack parameters and not config parameters
@@ -142,8 +142,10 @@ def create(args): (event.get('ResourceType'), event.get('LogicalResourceId'), event.get('ResourceStatusReason'))) logger.info('') - outputs = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0].get('Outputs', []) - ganglia_enabled = is_ganglia_enabled(config.parameters) + result_stack = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0] + outputs = result_stack.get('Outputs', []) + parameters = result_stack.get('Parameters') + ganglia_enabled = is_ganglia_enabled(parameters) for output in outputs: if not ganglia_enabled and output.get('OutputKey').startswith('Ganglia'): continue @@ -167,13 +169,12 @@ def create(args): sys.exit(1) def is_ganglia_enabled(parameters): - if 'ExtraJson' in parameters: try: - extra_json = json.loads(parameters['ExtraJson']) - if 'cfncluster' in extra_json: - return not extra_json['cfncluster'].get('ganglia_enabled') == 'no' - except ValueError: - logger.warn('Invalid value for extra_json option in config') + extra_json = filter(lambda x: x.get('ParameterKey') == 'ExtraJson', parameters)[0].get('ParameterValue') + extra_json = json.loads(extra_json).get('cfncluster') + return not extra_json.get('ganglia_enabled') == 'no' + except: + pass return True def update(args):
Bugfix support str and byte streamed responses Both Werkzeug and Quart Responses can stream strings, which need to be converted to bytes before being sent to the ASGI server (using the response charset).
@@ -119,19 +119,21 @@ class ASGIHTTPConnection: if isinstance(response, WerkzeugResponse): for data in response.response: + body = data.encode(response.charset) if isinstance(data, str) else data await send( cast( HTTPResponseBodyEvent, - {"type": "http.response.body", "body": data, "more_body": True}, + {"type": "http.response.body", "body": body, "more_body": True}, ) ) else: - async with response.response as body: - async for data in body: + async with response.response as response_body: + async for data in response_body: + body = data.encode(response.charset) if isinstance(data, str) else data await send( cast( HTTPResponseBodyEvent, - {"type": "http.response.body", "body": data, "more_body": True}, + {"type": "http.response.body", "body": body, "more_body": True}, ) ) await send(
Update cscs.py Remove leftovers from alternative config file with system Alps
@@ -980,7 +980,7 @@ site_configuration = { { 'name': 'PrgEnv-aocc', 'target_systems': [ - 'alps', 'eiger', 'pilatus' + 'eiger', 'pilatus' ], 'modules': [ 'PrgEnv-aocc' @@ -989,7 +989,7 @@ site_configuration = { { 'name': 'PrgEnv-cray', 'target_systems': [ - 'alps', 'eiger', 'pilatus' + 'eiger', 'pilatus' ], 'modules': [ 'PrgEnv-cray' @@ -998,7 +998,7 @@ site_configuration = { { 'name': 'PrgEnv-gnu', 'target_systems': [ - 'alps', 'eiger', 'pilatus' + 'eiger', 'pilatus' ], 'modules': [ 'PrgEnv-gnu' @@ -1007,7 +1007,7 @@ site_configuration = { { 'name': 'PrgEnv-intel', 'target_systems': [ - 'alps', 'eiger', 'pilatus' + 'eiger', 'pilatus' ], 'modules': [ 'PrgEnv-intel' @@ -1016,7 +1016,7 @@ site_configuration = { { 'name': 'cpeAMD', 'target_systems': [ - 'alps', 'eiger', 'pilatus' + 'eiger', 'pilatus' ], 'modules': [ 'cpeAMD' @@ -1025,7 +1025,7 @@ site_configuration = { { 'name': 'cpeCray', 'target_systems': [ - 'alps', 'eiger', 'pilatus' + 'eiger', 'pilatus' ], 'modules': [ 'cpeCray' @@ -1034,7 +1034,7 @@ site_configuration = { { 'name': 'cpeGNU', 'target_systems': [ - 'alps', 'eiger', 'pilatus' + 'eiger', 'pilatus' ], 'modules': [ 'cpeGNU' @@ -1043,7 +1043,7 @@ site_configuration = { { 'name': 'cpeIntel', 'target_systems': [ - 'alps', 'eiger', 'pilatus' + 'eiger', 'pilatus' ], 'modules': [ 'cpeIntel'
Update changelog last step before merging.
@@ -5,6 +5,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## Unreleased +- Update smoke tests to count skipped & disabled tests as "pass". Also update the command line results from a TDVT run with more details. - Refactor list command. list is used with suites and list-logical-configs is just for logical configs. ## [2.1.2] - 2019-12-05
tenant: Do not disable TLS when enable_agent_mtls = False The tenant should disable TLS only for the agent when enable_agent_mtls=False, keeping the TLS enabled for other services.
@@ -111,20 +111,22 @@ class Tenant: logger.warning( "Warning: agent mTLS is currently disabled, keys will be sent in the clear! This should only be used for testing." ) - else: + if not verify_server_cert: logger.warning( - "Warning: agent mTLS is enabled, but server certificate verification is disabled as 'trusted_server_ca' option is set to 'all'. This should only be used for testing." + "Warning: server certificate verification is disabled as 'trusted_server_ca' option is set to 'all'. This should only be used for testing." ) - else: - if not os.path.isfile(cert): - logger.warning("Could not find file %s provided in 'client_cert'", cert) - if not os.path.isfile(key): - logger.warning("Could not find file %s provided in 'client_key'", key) if not trusted_ca: logger.warning("No certificates provided in 'trusted_server_ca'") - else: + + if cert and not os.path.isfile(cert): + logger.warning("Could not find file %s provided in 'client_cert'", cert) + + if key and not os.path.isfile(key): + logger.warning("Could not find file %s provided in 'client_key'", key) + + if all([(cert and os.path.isfile(cert)), (key and os.path.isfile(key))]): self.client_cert = cert self.client_key = key self.client_key_password = key_password @@ -136,6 +138,8 @@ class Tenant: ) logger.info("TLS is enabled.") + else: + logger.warning("TLS is disabled.") @property def verifier_base_url(self):
xfail the tag_sync test as well In virtualized enviroments, kvmclock provides a slightly lower time resolution than the host default tsc and therefore this test can fail on fast hosts where sync() is not adding enough wait time.
@@ -66,7 +66,12 @@ def test_etag_sync(tmpdir): new_etag = vdir.get_etag_from_file(fpath) + try: assert old_etag != new_etag + except AssertionError: + pytest.xfail( + "Do we need to sleep?" + ) def test_etag_sleep(tmpdir, sleep_time):
ebd/ebuild.lib: drop unnecessary ORIG_VARS usage for variable exports This is now covered by PKGCORE_BASH_VARS and the other function specific appends to DONT_EXPORT_VARS.
# readonly. This limits users, but also helps to ensure that reloaded envs from older portages don't # overwrite an internal ebd.sh function that has since changed. -ORIG_VARS=( $(compgen -v) ) - DONT_EXPORT_VARS=( "${PKGCORE_BASH_VARS[@]}" BASH._* OLDPWD SANDBOX_.* - ORIG_VARS "CCACHE.*" "DISTCC.*" SYNC DIR FEATURES + "CCACHE.*" "DISTCC.*" SYNC DIR FEATURES "CONFIG_PROTECT.*" WORKDIR "RSYNC_.*" GENTOO_MIRRORS "(DIST|FILES|RPM|ECLASS)DIR" MUST_EXPORT_ENV QA_CONTROLLED_EXTERNALLY myarg "ACCEPT_(KEYWORDS|LICENSE)" "BUILD(_PREFIX|DIR)" T @@ -533,8 +531,7 @@ __dump_metadata_keys() { __ebd_write_line "key DEFINED_PHASES=${phases:--}" } -DONT_EXPORT_VARS+=( $(declare | __filter_env --print-vars | \ - __regex_filter_input ${ORIG_VARS[@]} ${DONT_EXPORT_VARS[@]}) ) +DONT_EXPORT_VARS+=( $(compgen -v | __regex_filter_input ${DONT_EXPORT_VARS[@]}) ) set +f
provision: Improve error messaging when attempting to use Ubuntu Trusty. As part of dropping support, we add appropriate error messaging when a user attempts to provision while using trusty. If the user is running in Vagrant we append information on how to proceed.
@@ -128,6 +128,12 @@ codename = distro_info['DISTRIB_CODENAME'] family = distro_info['DISTRIB_FAMILY'] if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]): logging.critical("Unsupported platform: {} {}".format(vendor, codename)) + if codename == 'trusty': + print() + print("Ubuntu Trusty reached end-of-life upstream and is no longer a supported platform for Zulip") + if os.path.exists('/home/vagrant'): + print("To upgrade, run `vagrant destroy`, and then recreate the Vagrant guest.\n") + print("See: https://zulip.readthedocs.io/en/latest/development/setup-vagrant.html") sys.exit(1) POSTGRES_VERSION_MAP = {
[bugfix] Solve TypeError of RepeatingGenerator A generator is not reversible.
@@ -2185,7 +2185,7 @@ def RepeatingGenerator(generator, key_func=lambda x: x, sleep_duration=60, break pywikibot.sleep(sleep_duration) - yield from reversed(filtered_generator()) + yield from reversed(list(filtered_generator())) @deprecated_args(pageNumber='groupsize', step='groupsize', lookahead=None)
Python API: add docstring for Token.kind TN:
@@ -452,6 +452,7 @@ class Token(ctypes.Structure): @property def kind(self): + ${py_doc('langkit.token_kind', 8)} name = _token_kind_name(self._kind) # The _token_kind_name wrapper is already supposed to handle exceptions # so this should always return a non-null value.
Correct Arc by Sagitta. Keyerror with start, end, sagitta value circular arc definition.
@@ -3557,7 +3557,7 @@ class Arc(PathSegment): bulge = float(kwargs['bulge']) sagitta = bulge * self.start.distance_to(self.end) / 2.0 elif 'sagitta' in kwargs: - sagitta = float(kwargs['bulge']) + sagitta = float(kwargs['sagitta']) if sagitta is not None: control = Point.towards(self.start, self.end, 0.5) angle = self.start.angle_to(self.end)
Update README.md mapping docker paths with spaces causes an error. using -v "$(pwd)":/scripts is more correct as per
@@ -76,7 +76,7 @@ Installing from Git is also supported (OS must have git installed). Move to the local directory which contains your script(s) and run the container -`docker run -it --rm --name pyez -v $PWD:/scripts juniper/pyez sh` +`docker run -it --rm --name pyez -v "$(pwd)":/scripts juniper/pyez sh` Your local scripts will be mounted to /scripts in the container
adopt: convert legacy grafana-server groupname early This is a follow up on PR cephadm-adopt.yml playbook is affected by the same bug Closes:
invoking the playbook when: ireallymeanit != 'yes' + - name: import_role ceph-defaults + import_role: + name: ceph-defaults + + - name: check if a legacy grafana-server group exists + import_role: + name: ceph-facts + tasks_from: convert_grafana_server_group_name.yml + when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0 + - name: gather facts and prepare system for cephadm hosts: - "{{ mon_group_name|default('mons') }}"
refactor: Extract out out root_dir and puppeteer_dir. The puppeteer_dir will use used for passing in the path to save the recording.
@@ -7,6 +7,9 @@ const puppeteer = require("puppeteer"); const test_credentials = require("../../var/puppeteer/test_credentials.js").test_credentials; +const root_dir = path.resolve(__dirname, "../../"); +const puppeteer_dir = path.join(root_dir, "var/puppeteer"); + class CommonUtils { constructor() { this.browser = null; @@ -86,8 +89,7 @@ class CommonUtils { this.screenshot_id += 1; } - const root_dir = path.resolve(__dirname, "../../"); - const screenshot_path = path.join(root_dir, "var/puppeteer", `${name}.png`); + const screenshot_path = path.join(puppeteer_dir, `${name}.png`); await page.screenshot({ path: screenshot_path, });
[tune] Deflake test_tune_restore.py By switching to on_step_end and keeping track of the number of trials we avoid race conditions in this test suite.
@@ -169,15 +169,12 @@ class TuneFailResumeGridTest(unittest.TestCase): class FailureInjectorCallback(Callback): """Adds random failure injection to the TrialExecutor.""" - def __init__(self, steps=20): - self._step = 0 - self.steps = steps - - def on_trial_start(self, trials, **info): - self._step += 1 - if self._step >= self.steps: - print(f"Failing after step {self._step} with " - f"{len(trials)} trials") + def __init__(self, num_trials=20): + self.num_trials = num_trials + + def on_step_end(self, trials, **kwargs): + if len(trials) == self.num_trials: + print(f"Failing after {self.num_trials} trials.") raise RuntimeError class CheckStateCallback(Callback):
Enable text wrapping for comment text editor This avoids expensive resizing of the editor pane.
@@ -585,6 +585,7 @@ Use -/= to move items up or down.</property> <object class="GtkTextView" id="comment"> <property name="visible">True</property> <property name="can_focus">True</property> + <property name="wrap_mode">word</property> </object> </child> </object>
minor fix in iterating values ah yes, let me optimize these for the other functions
@@ -65,7 +65,7 @@ def entropy_shannon(signal, base=2): if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1: # n-dimensional signal = _sanitize_multichannel(signal) - info["Values"] = np.full(len(signal), np.nan) # Initialize empty vector of values + info["Values"] = np.full(signal.shape[1], np.nan) # Initialize empty vector of values for i, colname in enumerate(signal): info["Values"][i] = _entropy_shannon(signal[colname]) out = np.mean(info["Values"])
Don't try to guess fixed "sections". That would have way too many false positives.
@@ -114,5 +114,6 @@ if __name__ == '__main__': for lang_code in lang_codes: cfg = config.get_localized_config(lang_code) + if cfg.extract == 'snippet': compute_fixed_snippets(cfg) log.info('all done in %d seconds.' % (time.time() - start))
Host based ssh * Add a toggle for using host-based authentication Fixes * Always load host keys * Minor flake fixes * Set `host_auth` attribute This is required for `RepresentationMixin`.
@@ -10,6 +10,12 @@ from parsl.utils import RepresentationMixin logger = logging.getLogger(__name__) +class HostAuthSSHClient(paramiko.SSHClient): + def _auth(self, username, *args): + self._transport.auth_none(username) + return + + class SSHChannel(Channel, RepresentationMixin): ''' SSH persistent channel. This enables remote execution on sites accessible via ssh. It is assumed that the user has setup host keys @@ -20,7 +26,7 @@ class SSHChannel(Channel, RepresentationMixin): ''' - def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, **kwargs): + def __init__(self, hostname, username=None, password=None, script_dir=None, envs=None, host_auth=False, **kwargs): ''' Initialize a persistent connection to the remote system. We should know at this point whether ssh connectivity is possible @@ -42,7 +48,11 @@ class SSHChannel(Channel, RepresentationMixin): self.password = password self.kwargs = kwargs self.script_dir = script_dir + self.host_auth = host_auth + if host_auth: + self.ssh_client = HostAuthSSHClient() + else: self.ssh_client = paramiko.SSHClient() self.ssh_client.load_system_host_keys() self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
Do test discovery via unitttest discover This will allow us to distinguish between test errors and test failures. Also, define travis-test in terms of a target dependency, rather than copying the target body.
-PYTEST_OPTS = --verbose +UNITTEST_OPTS = --verbose .PHONY: lint lint: @@ -46,18 +46,18 @@ api-docs: sphinx-build-3 -b html api api/_build/html dbus-tests: - py.test-3 ${PYTEST_OPTS} ./tests/whitebox/integration + python3 -m unittest discover ${UNITTEST_OPTS} --top-level-directory ./tests/whitebox --start-directory ./tests/whitebox/integration unittest-tests: - py.test-3 ${PYTEST_OPTS} ./tests/whitebox/unittest + python3 -m unittest discover ${UNITTEST_OPTS} --start-directory ./tests/whitebox/unittest .PHONY: coverage-no-html coverage-no-html: python3 -m coverage --version - python3 -m coverage run --timid --branch -m pytest ./tests/whitebox/integration - python3 -m coverage run --timid --branch -a -m pytest ./tests/whitebox/unittest - python3 -m coverage run --timid --branch -a -m pytest ./tests/whitebox/monkey_patching/test_keyboard_interrupt.py - python3 -m coverage run --timid --branch -a -m pytest ./tests/whitebox/monkey_patching/test_stratisd_version.py + python3 -m coverage run --timid --branch -m unittest discover --quiet --top-level-directory ./tests/whitebox --start-directory ./tests/whitebox/integration + python3 -m coverage run --timid --branch -a -m unittest discover --quiet --start-directory ./tests/whitebox/unittest + python3 -m coverage run --timid --branch -a -m unittest --quiet tests.whitebox.monkey_patching.test_keyboard_interrupt.KeyboardInterruptTestCase + python3 -m coverage run --timid --branch -a -m unittest --quiet tests.whitebox.monkey_patching.test_stratisd_version.StratisdVersionTestCase python3 -m coverage report -m --fail-under=100 --show-missing --include="./src/*" .PHONY: coverage @@ -65,13 +65,12 @@ coverage: coverage-no-html python3 -m coverage html --include="./src/*" keyboard-interrupt-test: - py.test-3 ${PYTEST_OPTS} ./tests/whitebox/monkey_patching/test_keyboard_interrupt.py + python3 -m unittest ${UNITTEST_OPTS} tests.whitebox.monkey_patching.test_keyboard_interrupt.KeyboardInterruptTestCase stratisd-version-test: - py.test-3 ${PYTEST_OPTS} ./tests/whitebox/monkey_patching/test_stratisd_version.py + python3 -m unittest ${UNITTEST_OPTS} tests.whitebox.monkey_patching.test_stratisd_version.StratisdVersionTestCase -test-travis: - py.test ${PYTEST_OPS} ./tests/whitebox/unittest +test-travis: unittest-tests .PHONY: yamllint yamllint:
save reader always after first ckpt beginning fixes
@@ -83,20 +83,19 @@ def train_tensorflow(reader, train_data, test_data, dev_data, configuration: dic def side_effect(metrics, prev_metric): """Returns: a state (in this case a metric) that is used as input for the next call""" + if prev_metric is None: # store whole reader only at beginning of training + reader.store(save_dir) m = metrics[preferred_metric] if prev_metric is not None and m < prev_metric: reader.session.run(lr_decay_op) logger.info("Decayed learning rate to: %.5f" % reader.session.run(learning_rate)) elif m > best_metric[0] and save_dir is not None: best_metric[0] = m - if prev_metric is None: # store whole reader_type only at beginning of training - reader.store(save_dir) - else: reader.model_module.store(os.path.join(save_dir, "model_module")) - logger.info("Saving reader_type to: %s" % save_dir) + logger.info("Saving reader to: %s" % save_dir) return m - # this is the standard hook for the reader_type + # this is the standard hook for the reader hooks.append(readers.eval_hooks[reader_type]( reader, dev_data, dev_batch_size, summary_writer=sw, side_effect=side_effect, iter_interval=validation_interval, @@ -107,7 +106,7 @@ def train_tensorflow(reader, train_data, test_data, dev_data, configuration: dic reader.train(tf_optimizer, train_data, batch_size, max_epochs=epochs, hooks=hooks, l2=l2, clip=clip_value, clip_op=tf.clip_by_value, summary_writer=sw) - # Test final reader_type + # Test final reader if dev_data is not None and save_dir is not None: reader.load(save_dir) result_dict = evaluate_reader(reader, dev_data, batch_size)
fix and simplify code logic fix and simplify code logic
@@ -235,8 +235,8 @@ class ExecuteTaFuncWithQueue(AbstractTAFunc): def get_splitter(self, D): y = D.data['Y_train'].ravel() train_size = 0.67 - if not self.resampling_strategy_args and self.resampling_strategy_args.get('train_size'): - train_size = self.resampling_strategy_args.get('train_size') + if self.resampling_strategy_args: + train_size = self.resampling_strategy_args.get('train_size', train_size) test_size = 1 - train_size if D.info['task'] in CLASSIFICATION_TASKS and \ D.info['task'] != MULTILABEL_CLASSIFICATION:
Redirect new-style type params to old-style type params in search This allows users searching on new frontend to be able to switch to the old frontend and then back seamlessly without losing their search.
@@ -362,10 +362,16 @@ def search(request, tag_name=None): extra_params = {'sort': {'newest': 'created'}} else: extra_params = None + fixed = fix_search_query(request.GET, extra_params=extra_params) if fixed is not request.GET: - return http.HttpResponsePermanentRedirect(urlparams(request.path, - **fixed)) + # We generally want a 301, except if it's a "type", because that's only + # here to support the new frontend, so a permanent redirect could mess + # things up when the user is going back and forth between the old and + # new frontend. https://github.com/mozilla/addons-server/issues/6846 + status = 302 if 'type' in request.GET else 301 + return http.HttpResponseRedirect( + urlparams(request.path, **fixed), status=status) facets = request.GET.copy() @@ -571,6 +577,7 @@ def fix_search_query(query, extra_params=None): keys = { 'lver': 'appver', 'pid': 'platform', + 'type': 'atype', } for old, new in keys.items(): if old in query: @@ -588,7 +595,9 @@ def fix_search_query(query, extra_params=None): }, 'platform': { str(p.id): p.shortname - for p in amo.PLATFORMS.values()} + for p in amo.PLATFORMS.values() + }, + 'atype': {k: str(v) for k, v in amo.ADDON_SEARCH_SLUGS.items()}, } if extra_params: params.update(extra_params)
refactor loss calculation Now the loss calculation for 1D and 2D learners is configurable via a parameter to their constructors.
@@ -40,7 +40,7 @@ def areas(ip): return areas -def _losses_per_triangle(ip): +def _default_loss_per_triangle(ip): devs = deviations(ip) area_per_triangle = areas(ip) losses = np.sum([dev * area_per_triangle for dev in devs], axis=0) @@ -58,6 +58,13 @@ class Learner2D(BaseLearner): bounds : list of 2-tuples A list ``[(a1, b1), (a2, b2)]`` containing bounds, one per dimension. + loss_per_triangle : callable, optional + A function that returns the loss for every triangle. + If not provided, then a default is used, which uses + the deviation from a linear estimate, as well as + triangle area, to determine the loss. See the notes + for more details. + Attributes ---------- @@ -86,10 +93,21 @@ class Learner2D(BaseLearner): This sampling procedure is not extremely fast, so to benefit from it, your function needs to be slow enough to compute. + + 'loss_per_triangle' takes a single parameter, 'ip', which is a + `scipy.interpolate.LinearNDInterpolator`. You can use the + *undocumented* attributes 'tri' and 'values' of 'ip' to get a + `scipy.spatial.Delaunay` and a vector of function values. + These can be used to compute the loss. The functions + `adaptive.learner.learner2D.areas` and + `adaptive.learner.learner2D.deviations` to calculate the + areas and deviations from a linear interpolation + over each triangle. """ - def __init__(self, function, bounds): + def __init__(self, function, bounds, loss_per_triangle=None): self.ndim = len(bounds) + self.loss_per_triangle = loss_per_triangle or _default_loss_per_triangle self._vdim = None if self.ndim != 2: raise ValueError("Only 2-D sampling supported.") @@ -221,7 +239,7 @@ class Learner2D(BaseLearner): ip = self.ip_combined() tri = ip.tri - losses = _losses_per_triangle(ip) + losses = self.loss_per_triangle(ip) def point_exists(p): eps = np.finfo(float).eps * self.points_combined.ptp() * 100
test_retention: Delete redundant get_user_profile_by_email call. This does absolutely nothing and must be in the code accidentally.
@@ -34,7 +34,6 @@ from zerver.models import ( get_realm, get_stream, get_system_bot, - get_user_profile_by_email, ) # Class with helper functions useful for testing archiving of reactions: @@ -134,7 +133,6 @@ class ArchiveMessagesTestingBase(RetentionTestingBase): def _send_cross_realm_personal_message(self) -> int: # Send message from bot to users from different realm. bot_email = "[email protected]" - get_user_profile_by_email(bot_email) zulip_user = self.example_user("hamlet") msg_id = internal_send_private_message( sender=get_system_bot(bot_email),
The incoming parameters is not effective The image_driver parameter is not used, and it will not take effect when call the function with image_driver.. Closes-Bug:
@@ -124,15 +124,18 @@ def upload_image_data(context, image, image_tag, image_data, return img -def delete_image(context, img_id, image_driver): +def delete_image(context, img_id, image_driver=None): + if image_driver: + image_driver_list = [image_driver.lower()] + else: image_driver_list = CONF.image_driver_list - for driver in image_driver_list: + for driver_name in image_driver_list: try: - image_driver = load_image_driver(driver) + image_driver = load_image_driver(driver_name) image_driver.delete_image(context, img_id) except exception.ZunException: - LOG.exception('Unknown exception occurred while deleting image %s', - img_id) + LOG.exception('Unknown exception occurred while deleting' + 'image %s', img_id) class ContainerImageDriver(object):
Space out printing garbled characters [ci skip] These sometimes run together, so stick a little spacing in there so you can see what's happening a little better
@@ -24,7 +24,7 @@ class Command(BaseCommand): old_source = form.source new_source = fix_form(old_source) if old_source != new_source: - if input("commit the above changes?\n[y/N] ") == 'y': + if input("\n\ncommit the above changes?\n[y/N] ") == 'y': form.source = new_source app.save() print("saved") @@ -66,7 +66,7 @@ def fix_form(source): if unicode_block: transformed = latin_to_utf(unicode_block) new_source += transformed - print("{} --> {}".format(unicode_block, transformed)) + print("{} --> {}\n".format(unicode_block, transformed)) unicode_block = "" new_source += char return new_source
Check that '' is in sys.path in insight:main Make sure the the current directory is in the python path to recognize modules in the current dir.
@@ -2,6 +2,7 @@ from __future__ import print_function import logging import pkgutil import os +import sys import yaml from .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401 from .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401 @@ -250,6 +251,8 @@ def run(component=None, root=None, print_summary=False, def main(): + if "" not in sys.path: + sys.path.insert(0, "") run(print_summary=True)
Update stale.yml More lenient closing times.
@@ -22,19 +22,19 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: This issue is stale because it has been open 30 days with no activity. Remove the stale label or add a comment, or this issue - will be closed in 5 days. You can always re-open if you still feel this + will be closed in 15 days. You can always re-open if you still feel this is still an issue. Tag @heynemann for more information. - stale-pr-message: This PR is stale because it has been open 45 days with + stale-pr-message: This PR is stale because it has been open 60 days with no activity. Remove the stale label or add a comment, or this PR will - be closed in 10 days. You can always re-open if you feel this is something + be closed in 30 days. You can always re-open if you feel this is something we should still keep working on. Tag @heynemann for more information. close-issue-message: This issue was closed because it has been stale for - 5 days with no activity. - close-pr-message: This PR was closed because it has been stale for 10 days + 15 days with no activity. + close-pr-message: This PR was closed because it has been stale for 30 days with no activity. - days-before-issue-stale: 15 - days-before-pr-stale: 45 - days-before-issue-close: 30 - days-before-pr-close: 60 + days-before-issue-stale: 30 + days-before-pr-stale: 60 + days-before-issue-close: 45 + days-before-pr-close: 30 exempt-draft-pr: true operations-per-run: 300
Core & Internals: directly fetch tokens from db columns() doesn't accept strings (column names) anymore. It must be given a column object. However, here the code can be simplified to avoid using columns all together.
@@ -496,15 +496,11 @@ def __delete_expired_tokens_account(account, *, session: "Session"): :param account: Account to delete expired tokens. :param session: The database session in use. """ - stmt_select = select(models.Token) \ + stmt_select = select(models.Token.token) \ .where(and_(models.Token.expired_at < datetime.datetime.utcnow(), models.Token.account == account)) \ .with_for_update(skip_locked=True) - result_select = session.execute(stmt_select) - - tokens = [] - for t in result_select.columns('token'): - tokens.append(t.token) + tokens = session.execute(stmt_select).scalars().all() for t in chunks(tokens, 100): stmt_delete = delete(models.Token) \
Fix hardsigmoid/hardswish for proper device dispatch. Summary: make `hardsigmoid_backward` use tensoriterator but that can be done only after proper device dispatch. Pull Request resolved:
- func: hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor use_c10_dispatcher: full python_module: nn + dispatch: + CPU: hardsigmoid_backward + CUDA: hardsigmoid_backward - func: hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) python_module: nn - func: hardswish_backward(Tensor grad_output, Tensor self) -> Tensor use_c10_dispatcher: full python_module: nn + dispatch: + CPU: hardswish_backward + CUDA: hardswish_backward - func: leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) python_module: nn
Consolidate bit_count / popCount methods Fixes
__all__ = ['popCount'] -def popCount(v): - """Return number of 1 bits (population count) of an integer. +try: + bit_count = int.bit_count +except AttributeError: + def bit_count(v): + return bin(v).count('1') - If the integer is negative, the number of 1 bits in the - twos-complement representation of the integer is returned. i.e. - ``popCount(-30) == 28`` because -30 is:: +"""Return number of 1 bits (population count) of the absolute value of an integer. - 1111 1111 1111 1111 1111 1111 1110 0010 - - Uses the algorithm from `HAKMEM item 169 <https://www.inwap.com/pdp10/hbaker/hakmem/hacks.html#item169>`_. - - Args: - v (int): Value to count. - - Returns: - Number of 1 bits in the binary representation of ``v``. +See https://docs.python.org/3.10/library/stdtypes.html#int.bit_count """ - - if v > 0xFFFFFFFF: - return popCount(v >> 32) + popCount(v & 0xFFFFFFFF) - - # HACKMEM 169 - y = (v >> 1) & 0xDB6DB6DB - y = v - y - ((y >> 1) & 0xDB6DB6DB) - return (((y + (y >> 3)) & 0xC71C71C7) % 0x3F) +popCount = bit_count
test: Split IAM template tests with paramtrize See also:
"""Test IAM Policy templates are valid JSON.""" +import json + import jinja2 +import pytest from foremast.iam.construct_policy import render_policy_template from foremast.utils.templates import LOCAL_TEMPLATES @@ -18,6 +21,11 @@ def iam_templates(): yield iam_template_name [email protected](argnames='template_name', argvalues=iam_templates()) +def test_all_iam_templates(template_name): + """Verify all IAM templates render as proper JSON.""" + *_, service_json = template_name.split('/') + service, *_ = service_json.split('.') items = ['resource1', 'resource2'] @@ -27,6 +35,7 @@ def iam_templates(): 'resource2': 'user2', } + try: rendered = render_policy_template( account_number='', app='coreforrest', @@ -40,5 +49,7 @@ def iam_templates(): }, region='us-east-1', service=service) + except json.decoder.JSONDecodeError: + pytest.fail('Bad template: {0}'.format(template_name), pytrace=False) assert isinstance(rendered, list)
Fix UBSAN non-security crash type list The UBSAN_CRASH_TYPES_NON_SECURITY and UBSAN_CRASH_TYPES_SECURITY lists of crash_analyzer have overlapping elements. Since during the analyzis, crash_type is compared to the security list first, the common elements can be removed from the non-security list.
@@ -74,13 +74,11 @@ UBSAN_CRASH_TYPES_NON_SECURITY = [ 'Integer-overflow', 'Invalid-bool-value', 'Invalid-builtin-use', - 'Incorrect-function-pointer-type', 'Invalid-enum-value', 'Invalid-null-argument', 'Invalid-null-return', 'Misaligned-address', 'No-return-value', - 'Non-positive-vla-bound-value', 'Pointer-overflow', 'Potential-null-reference', 'Undefined-shift',
Remove out-of-date TODO. Summary: Pull Request resolved:
@@ -41,7 +41,7 @@ inline void THTensor_maybe_zero_dim(THTensor *tensor, bool condition_when_zero_d } // [NOTE: nDimension vs nDimensionLegacyNoScalars vs nDimensionLegacyAll] -// nDimension corresponds to the "true" ATen dimension. TODO: implement. +// nDimension corresponds to the "true" ATen dimension. // nDimensionLegacyNoScalars correpsonds to the ATen dimension, except scalars are viewed as 1-dimensional tensors. // nDimensionLegacyAll corresponds to the ATen dimension, except scalars are viewed as 1-dimensional tensors // and tensors with a dimension of size zero are collapsed to 0-dimensional tensors.
conda_forge_yml.rst: Add `remote_ci_setup` option Users now have the option to override the `conda-forge-ci-setup` package by installing it from a remote channel.
@@ -41,6 +41,7 @@ Top-level fields * osx * provider * recipe_dir +* remote_ci_setup * skip_render * templates * test_on_native_only @@ -318,6 +319,16 @@ The relative path to the recipe directory. The default is: recipe_dir: recipe +remote_ci_setup +--------------- +This option can be used to override the default ``conda-forge-ci-setup`` package. +Can be given with ``${url or channel_alias}::package_name``, defaults to conda-forge +channel_alias if no prefix given. + +.. code-block:: yaml + + remote_ci_setup: "conda-forge-ci-setup=3" + skip_render ----------- This option specifies a list of files which conda smithy will skip rendering.
docs / minor change to note minor change to markdown
@@ -34,7 +34,8 @@ Transactions from Hummingbot are normal transactions conducted on exchanges; the Hummingbot has the ability to send error logs to us. -!!! note Private keys and API keys are stored locally for the operation of the Hummingbot client only. At no point will private or API keys be shared to CoinAlpha or be used in any way other than to authorize transactions required for the operation of Hummingbot. +!!! note + Private keys and API keys are stored locally for the operation of the Hummingbot client only. At no point will private or API keys be shared to CoinAlpha or be used in any way other than to authorize transactions required for the operation of Hummingbot. ### Why would I want to send you my usage data? - **Get better support**: Granting access to your logs and client commands enables us to diagnose your issue more quickly and provide better support.
Add check for dependent packages of tuners before starting restful server * add module info for launcher check * update launcher.py Add packages check before start restful server. * check sub-key * modify mistype * delete non-tuner in constants/ModuleName * Delete ModuleName to prevent double maintain * only catch ModuleNotFoundError
import json import os +import sys import shutil import string -from subprocess import Popen, PIPE, call, check_output +from subprocess import Popen, PIPE, call, check_output, check_call import tempfile +from nni.constants import ModuleName from nni_annotation import * from .launcher_utils import validate_all_content from .rest_utils import rest_put, rest_post, check_rest_server, check_rest_server_quick, check_response @@ -282,6 +284,17 @@ def set_experiment(experiment_config, mode, port, config_file_name): def launch_experiment(args, experiment_config, mode, config_file_name, experiment_id=None): '''follow steps to start rest server and start experiment''' nni_config = Config(config_file_name) + + # check packages for tuner + if experiment_config.get('tuner') and experiment_config['tuner'].get('builtinTunerName'): + tuner_name = experiment_config['tuner']['builtinTunerName'] + module_name = ModuleName[tuner_name] + try: + check_call([sys.executable, '-c', 'import %s'%(module_name)]) + except ModuleNotFoundError as e: + print_error('The tuner %s should be installed through nnictl'%(tuner_name)) + exit(1) + # start rest server rest_process, start_time = start_rest_server(args.port, experiment_config['trainingServicePlatform'], mode, config_file_name, experiment_id) nni_config.set_config('restServerPid', rest_process.pid)
Save scheme/authority Since we go through the trouble of parsing it, might as well keep em. Especially if we want to be a "proxy" these might be important.
@@ -748,6 +748,9 @@ class HTTPRequest(object): return False path = b'%2F'.join(atoms) + if scheme is not EMPTY: + self.scheme = scheme + self.authority = authority self.path = path # Note that, like wsgiref and most other HTTP servers,
Move p2pd installation logics to install_p2pd.sh To remove the complexity in circleci.yml
@@ -72,7 +72,7 @@ geth_steps: &geth_steps sudo apt-get install -y build-essential; python -m geth.install $GETH_VERSION; fi - sudo ln -s /home/circleci/.py-geth/geth-$GETH_VERSION/bin/geth /usr/local/bin/geth + sudo ln -s $GETH_BINARY /usr/local/bin/geth geth version - run: name: run tox @@ -87,45 +87,19 @@ geth_steps: &geth_steps - ~/.py-geth key: cache-v1-{{ arch }}-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }} -p2pd_env: &p2pd_env - LIBP2P_DAEMON_VERSION: de7ca07 - GOPACKAGE: go1.11.5.linux-amd64.tar.gz - LIBP2P_DAEMON_REPO: github.com/libp2p/go-libp2p-daemon - p2pd_steps: &p2pd_steps steps: - checkout - - run: - name: generate a temporary file, just to get the checksum from LIBP2P_DAEMON_VERSION - command: echo "$LIBP2P_DAEMON_VERSION" > p2pd-version.txt - restore_cache: keys: - - cache-v1-{{ arch }}-p2pd-{{ checksum "p2pd-version.txt" }} + - cache-v1-{{ arch }}-p2pd-{{ checksum "./.circleci/install_p2pd.sh" }} - run: name: build libp2p daemon - command: | - P2PD_DIR=$HOME/.p2pd - P2PD_BINARY=$P2PD_DIR/p2pd-$LIBP2P_DAEMON_VERSION - if [ ! -e "$P2PD_BINARY" ]; then - wget https://dl.google.com/go/$GOPACKAGE - sudo tar -C /usr/local -xzf $GOPACKAGE - export GOPATH=$HOME/go - export GOROOT=/usr/local/go - export PATH=$GOROOT/bin:$GOPATH/bin:$PATH - go version - go get $LIBP2P_DAEMON_REPO - cd $GOPATH/src/$LIBP2P_DAEMON_REPO - git checkout $LIBP2P_DAEMON_VERSION - make bin - mkdir -p $P2PD_DIR - cp `which p2pd` $P2PD_BINARY - cd - - fi - sudo ln -s $P2PD_BINARY /usr/local/bin/p2pd + command: ./.circleci/install_p2pd.sh - save_cache: paths: - - ~/.p2pd - key: cache-v1-{{ arch }}-p2pd-{{ checksum "p2pd-version.txt" }} + - $HOME/.p2pd + key: cache-v1-{{ arch }}-p2pd-{{ checksum "./.circleci/install_p2pd.sh" }} - restore_cache: keys: - cache-v1-{{ arch }}-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }} @@ -261,7 +235,6 @@ jobs: - image: circleci/python:3.6 environment: TOXENV: py36 - <<: *p2pd_env <<: *p2pd_steps py36-wheel-cli: <<: *common @@ -299,7 +272,6 @@ jobs: - image: circleci/python:3.7 environment: TOXENV: py37 - <<: *p2pd_env <<: *p2pd_steps py37-wheel-cli: <<: *common
refactor: Show versions from Installed Applications to show "real" versions synced with the site database
@@ -223,30 +223,31 @@ def install_app(context, apps): @click.command('list-apps') [email protected]('--only-apps', is_flag=True) @pass_context -def list_apps(context, only_apps): +def list_apps(context): "List apps in site" - import click - titled = False - - if len(context.sites) > 1: - titled = True for site in context.sites: frappe.init(site=site) frappe.connect() - apps = sorted(frappe.get_installed_apps()) + site_title = click.style(f"{site}", fg="green") if len(context.sites) > 1 else "" + apps = frappe.get_single("Installed Applications").installed_applications + + if apps: + name_len, ver_len, branch_len = [ + max([len(x.get(y)) for x in apps]) for y in ["app_name", "app_version", "git_branch"] + ] + template = "{{0:{0}}} {{1:{1}}} {{2}}".format(name_len, ver_len, branch_len) - if only_apps: - apps.remove("frappe") + installed_applications = [template.format(app.app_name, app.app_version, app.git_branch) for app in apps] + applications_summary = "\n".join(installed_applications) + summary = f"\n{site_title}\n{applications_summary}".strip() - if titled: - summary = "{}{}".format(click.style(site + ": ", fg="green"), ", ".join(apps)) else: - summary = "\n".join(apps) + applications_summary = "\n".join(frappe.get_installed_apps()) + summary = f"\n{site_title}\n{applications_summary}".strip() - if apps and summary.strip(): + if applications_summary and summary: print(summary) frappe.destroy()
CustomPageXML parsing: allow ":" in value In situation where Regions have `:`, this would yield an error, such as the Segmonto syntax: ```xml <TextRegion id="region_1601885451429_143" custom="structure {type:NumberingZone:page;}"> ``` This fixes this CustomPAGE way to do stuff.
@@ -106,8 +106,8 @@ def parse_page(filename): tag_vals = {} vals = [val.strip() for val in vals.split(';') if val.strip()] for val in vals: - key, val = val.split(':') - tag_vals[key] = val + key, *val = val.split(':') + tag_vals[key] = ":".join(val) o[tag.strip()] = tag_vals return o
BUG: Fix nan error in degenerate euler axes For certain gimbal locked cases, it appears that certain entries in `dcm_transformed` are greater than unit norm. That results in nan values when arccos function us used.
@@ -57,6 +57,11 @@ def _compute_euler_from_dcm(dcm, seq, extrinsic=False): # Step 4 angles = np.empty((num_rotations, 3)) + # Ensure less than unit norm + positive_unity = dcm_transformed[:, 2, 2] > 1 + negative_unity = dcm_transformed[:, 2, 2] < -1 + dcm_transformed[positive_unity, 2, 2] = 1.0 + dcm_transformed[negative_unity, 2, 2] = -1.0 angles[:, 1] = np.arccos(dcm_transformed[:, 2, 2]) # Steps 5, 6
Remove excess indentation in broadcast alert In response to: [^1]. [^1]:
+import inspect from datetime import datetime from flask import current_app @@ -69,7 +70,7 @@ def _create_p1_zendesk_alert(broadcast_message): if broadcast_message.status != BroadcastStatusType.BROADCASTING: return - message = f""" + message = inspect.cleandoc(f""" Broadcast Sent https://www.notifications.service.gov.uk/services/{broadcast_message.service_id}/current-alerts/{broadcast_message.id} @@ -80,7 +81,7 @@ def _create_p1_zendesk_alert(broadcast_message): Follow the runbook to check the broadcast went out OK: https://docs.google.com/document/d/1J99yOlfp4nQz6et0w5oJVqi-KywtIXkxrEIyq_g2XUs/edit#heading=h.lzr9aq5b4wg - """.strip() + """) ticket = NotifySupportTicket( subject='Live broadcast sent',
Adjusted unit tests for stride_tricks Fixed coverage of Raise-Error functions
@@ -13,8 +13,11 @@ class TestStrideTricks(unittest.TestCase): # invalid value ranges with self.assertRaises(ValueError): ht.core.stride_tricks.broadcast_shape((5, 4), (5,)) + with self.assertRaises(ValueError): ht.core.stride_tricks.broadcast_shape((5, 4), (2, 3)) + with self.assertRaises(ValueError): ht.core.stride_tricks.broadcast_shape((5, 2), (5, 2, 3)) + with self.assertRaises(ValueError): ht.core.stride_tricks.broadcast_shape((2, 1), (8, 4, 3)) def test_sanitize_axis(self): @@ -30,11 +33,13 @@ class TestStrideTricks(unittest.TestCase): # invalid types with self.assertRaises(TypeError): ht.core.stride_tricks.sanitize_axis((5, 4), 1.0) + with self.assertRaises(TypeError): ht.core.stride_tricks.sanitize_axis((5, 4), 'axis') # invalid value ranges with self.assertRaises(ValueError): ht.core.stride_tricks.sanitize_axis((5, 4), 2) + with self.assertRaises(ValueError): ht.core.stride_tricks.sanitize_axis((5, 4), -3) def test_sanitize_shape(self):
Adds launch bounds for CTC loss kernel Summary: Fixes Pull Request resolved:
@@ -254,7 +254,9 @@ std::tuple<Tensor, Tensor> ctc_loss_gpu_template(const Tensor& log_probs, const // The second (backward) half of the forward backward algorithm, (10) and (11). This is parallel to the // alpha kernel above. (As mentioned above, it might make sense do the calculation in the alpha kernel.) template<typename scalar_t, typename target_t> -__global__ void ctc_loss_backward_log_beta_gpu_kernel(scalar_t* __restrict__ log_beta_data, +__global__ void +__launch_bounds__((std::is_same<scalar_t, float>::value ? 1024 : 896), 1) +ctc_loss_backward_log_beta_gpu_kernel(scalar_t* __restrict__ log_beta_data, const scalar_t*log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const target_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, int64_t lp_input_stride, int64_t lp_batch_stride, int64_t lp_char_stride,
Use is_connected() instead of _connected in checks Was doing a falsy check on an Event object instead of using the (unused) is_connected() function.
@@ -261,7 +261,7 @@ class VoiceClient: Disconnects this voice client from voice. """ - if not force and not self._connected.is_set(): + if not force and not self.is_connected(): return self.stop() @@ -348,7 +348,7 @@ class VoiceClient: source is not a :class:`AudioSource` or after is not a callable. """ - if not self._connected: + if not self.is_connected(): raise ClientException('Not connected to voice.') if self.is_playing():
Update CHANGELOG.md Added information about
## Other changes - [Rule Test] Fix issue related to --start/--end/--days params - [#424](https://github.com/jertel/elastalert2/pull/424), [#433](https://github.com/jertel/elastalert2/pull/433) - @thican +- [TheHive] Reduce risk of sourceRef collision for Hive Alerts by using full UUID -[#513](https://github.com/jertel/elastalert2/pull/513) - @fwalloe - Changed the wording of ElastAlert to ElastAlert 2 and Update FAQ -[#446](https://github.com/jertel/elastalert2/pull/446) - @nsano-rururu - Add missing show_ssl_warn and silence_qk_value params to docs - [#469](https://github.com/jertel/elastalert2/pull/469) - @jertel - [OpsGenie] Clarify documentation for URL endpoint to use in European region - [#475](https://github.com/jertel/elastalert2/pull/475) - @nsano-rururu
Update CONTRIBUTING.md Change Reddit to Google Groups Mailing List
@@ -8,10 +8,9 @@ If you encounter any issues installing or using NetBox, try one of the following Join the #netbox channel on [Freenode IRC](https://freenode.net/). You can connect to Freenode at irc.freenode.net using an IRC client, or you can use their [webchat client](https://webchat.freenode.net/). -### Reddit +### Mailing List -We have established [/r/netbox](https://www.reddit.com/r/netbox) on Reddit for NetBox issues and general discussion. -Reddit registration is free and does not require providing an email address (although it is encouraged). +We have established a Google Groups Mailing List for issues and general discussion. You can find us [here]( https://groups.google.com/forum/#!forum/netbox-discuss). ## Reporting Bugs @@ -24,7 +23,7 @@ click "add a reaction" in the top right corner of the issue and add a thumbs up comment describing how it's affecting your installation. This will allow us to prioritize bugs based on how many users are affected. -* If you haven't found an existing issue that describes your suspected bug, please inquire about it on IRC or Reddit. +* If you haven't found an existing issue that describes your suspected bug, please inquire about it on IRC or Google Groups. **Do not** file an issue until you have received confirmation that it is in fact a bug. Invalid issues are very distracting and slow the pace at which NetBox is developed.
Fix versionadded reference for new eauth token modularity The version should be `Oxygen`, not `2017.7.2`. Refs
@@ -529,7 +529,7 @@ def sync_roster(saltenv='base', extmod_whitelist=None, extmod_blacklist=None): def sync_eauth_tokens(saltenv='base', extmod_whitelist=None, extmod_blacklist=None): ''' - .. versionadded:: 2017.7.2 + .. versionadded:: Oxygen Sync eauth token modules from ``salt://_tokens`` to the master
update test on cli gui search one less package in local registry because we removed p2p_noise
@@ -141,7 +141,7 @@ def test_real_search(): assert response_list.status_code == 200 data = json.loads(response_list.get_data(as_text=True)) - assert len(data) == 13, data + assert len(data) == 12, data i = 0 assert data[i]["id"] == "fetchai/gym:0.1.0"
Upgrade django-allow-cidr to 0.3.0 Handles host header values with ports.
@@ -462,9 +462,9 @@ funcsigs==1.0.2 \ --hash=sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50 tzlocal==1.5.1 \ --hash=sha256:4ebeb848845ac898da6519b9b31879cf13b6626f7184c496037b818e238f2c4e -django-allow-cidr==0.1.0 \ - --hash=sha256:94b436b7ebf0bba9c1c4bddc28ada13aa86baa692c5c10c3012837bb8cf44a07 \ - --hash=sha256:6b4e6f28109aebf06645e0218212acb2ba4ac6e98ee370fe9da0a90a4c0cf2b0 +django-allow-cidr==0.3.0 \ + --hash=sha256:63436f3820dd788863ef35f4fad5499f5ad23063b52033b03afdb1df7929797f \ + --hash=sha256:f19558a27ea558ad2174d15874f8eaf36d9e50e16587c7558d2a3bdc69c0d3bb netaddr==0.7.19 \ --hash=sha256:56b3558bd71f3f6999e4c52e349f38660e54a7a8a9943335f73dfc96883e08ca \ --hash=sha256:38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd
undo some of my changes the case i was trying to cover was already covered but didn't work due to a bug fixing the bugs next commit
@@ -294,9 +294,6 @@ class MeasurementControl(Instrument): return isinstance(obj, type) and issubclass(obj, test_obj) self.save_optimization_settings() self.adaptive_function = self.af_pars.pop('adaptive_function') - # Not sure where this line belongs, but for now is only used here - self.expects_scalar = is_subclass(self.adaptive_function, - SKOptLearnerND) if self.live_plot_enabled(): self.initialize_plot_monitor_adaptive() for sweep_function in self.sweep_functions: @@ -558,9 +555,6 @@ class MeasurementControl(Instrument): if hasattr(vals, '__iter__'): if len(vals) > 1: vals = vals[self.par_idx] - # return a scalar for optmizer learners - if self.expects_scalar: - vals = vals[0] return vals def finish(self, result):
Fix pybind11 warnings in python_rpc_handler.cpp Summary: Pull Request resolved: The warnings related to usage of the deprecated != operator. Instead of checking the member field on every function call, we can check it once, on construction of PythonRpcHandler. Test Plan: Imported from OSS
@@ -4,13 +4,27 @@ namespace torch { namespace distributed { namespace rpc { +namespace { + +py::object getFunction(const py::object& module, const char* name) { + py::object fn = module.attr(name); + TORCH_CHECK( + py::isinstance<py::function>(fn), + "attribute ", + name, + " is not a function"); + return fn; +} + +} // namespace + PythonRpcHandler::PythonRpcHandler() { AutoGIL ag; py::object module = py::module::import("torch.distributed.internal_rpc_utils"); - runUDFFunction_ = module.attr("run_python_udf_internal"); - loadResultFunction_ = module.attr("load_python_udf_result_internal"); - serializeFunction_ = module.attr("serialize"); + runUDFFunction_ = getFunction(module, "run_python_udf_internal"); + loadResultFunction_ = getFunction(module, "load_python_udf_result_internal"); + serializeFunction_ = getFunction(module, "serialize"); } PythonRpcHandler& PythonRpcHandler::getInstance() { @@ -24,7 +38,6 @@ std::vector<char> PythonRpcHandler::generatePythonUDFResult( std::vector<torch::Tensor>& responseTensorTable) { AutoGIL ag; auto pargs = py::bytes(pickledPayload.data(), pickledPayload.size()); - TORCH_CHECK(runUDFFunction_ != nullptr, "runUDFFunction_ is nullptr"); py::tuple pres = serializeFunction_(runUDFFunction_(pargs, requestTensorTable)); const auto& presStr = pres[0].cast<std::string>(); @@ -38,7 +51,6 @@ py::object PythonRpcHandler::loadPythonUDFResult( const std::vector<torch::Tensor>& tensorTable) { AutoGIL ag; auto pargs = py::bytes(pickledPayload.data(), pickledPayload.size()); - TORCH_CHECK(loadResultFunction_ != nullptr, "loadResultFunction_ is nullptr"); return loadResultFunction_(pargs, tensorTable); }
(buildkite 2/n) Update Buildkite medium queue Summary: Moves us to the new Buildkite queues again, now that AWS has increased our EC2 instance quotas in us-west-2 Depends on D5770 Test Plan: buildkite Reviewers: dgibson, alangenfeld
@@ -24,12 +24,8 @@ def wait_step(): class BuildkiteQueue(Enum): - """These are the Buildkite CloudFormation queues that we use. All queues with "-p" suffix are - provisioned by Pulumi. - """ - DOCKER = "docker-p" - MEDIUM = "medium-v4-3-2" + MEDIUM = "buildkite-medium-v5-0-1" WINDOWS = "windows-medium" @classmethod @@ -40,7 +36,6 @@ def contains(cls, value): class StepBuilder: def __init__(self, label, key=None): self._step = { - # use Pulumi-managed medium queue by default "agents": {"queue": BuildkiteQueue.MEDIUM.value}, "label": label, "timeout_in_minutes": TIMEOUT_IN_MIN,
Prepare 2.5.2rc2 [ci skip-rust]
See https://www.pantsbuild.org/v2.5/docs/release-notes-2-5 for an overview of the changes in this release series. +## 2.5.2rc2 (Aug 06, 2021) + +### Bug fixes + +* Resolve plugins using the PEX --python option. (cherrypick of #12500) ([#12505](https://github.com/pantsbuild/pants/pull/12505)) + ## 2.5.2rc1 (Jul 28, 2021) ### New Features
Add missing TORCH_CUDA_API annotation to throw_nccl_error Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -19,7 +19,7 @@ namespace nccl { // Don't use them outside of these files. namespace detail { -void throw_nccl_error(ncclResult_t status); +TORCH_CUDA_API void throw_nccl_error(ncclResult_t status); static inline void NCCL_CHECK(ncclResult_t status) { if (status != ncclSuccess) {
feat(device): z2m support for RDM001 (Philips) related to
@@ -115,6 +115,18 @@ class HueSmartButtonLightController(LightController): class Philips929003017102LightController(LightController): + def get_z2m_actions_mapping(self) -> DefaultActionsMapping: + return { + "left_press": Light.TOGGLE, + # "left_press_release": "", + "left_hold": Light.HOLD_BRIGHTNESS_TOGGLE, + "left_hold_release": Light.RELEASE, + "right_press": Light.TOGGLE, + # "right_press_release": "", + "right_hold": Light.HOLD_BRIGHTNESS_TOGGLE, + "right_hold_release": Light.RELEASE, + } + def get_deconz_actions_mapping(self) -> DefaultActionsMapping: return { # 1000: "", # Initial press
Do not render anything while training in Colab Fix
"\n", "env = load_environment(env_config)\n", "agent = load_agent(agent_config, env)\n", - "evaluation = Evaluation(env, agent, num_episodes=3000, display_env=False)\n", + "evaluation = Evaluation(env, agent, num_episodes=3000, display_env=False, display_agent=False)\n", "print(f\"Ready to train {agent} on {env}\")" ], "execution_count": null,
Fix --selective-upgrade compatibility with Python 2 `list.copy()` is equivalent to `list[:]`, but the former is only available in Python 3.
@@ -1840,7 +1840,7 @@ def do_install( # Support for --selective-upgrade. if selective_upgrade: - for i, package_name in enumerate(package_names.copy()): + for i, package_name in enumerate(package_names[:]): section = project.packages if not dev else project.dev_packages package = convert_deps_from_pip(package_name) package__name = list(package.keys())[0]
Fix test that broke on prerelease builds The page I was using for the test apparently was updated to use lightning components, when the test requires non-lightning components.
@@ -81,25 +81,26 @@ Non-lightning based form - checkbox ... e.g.: <input type="checkbox"> [Setup] Run keywords - ... Go to page Home ServiceCrewMember + ... Go to page Home Campaign ... AND Click Object Button New - ... AND Wait for modal New ServiceCrewMember + ... AND Wait for modal New Campaign [Teardown] Click modal button Cancel # first, let's make sure that the keyword returns an element # that is a plain html input element - ${element}= Get webelement label:Leader + ${element}= Get webelement label:Active Should be equal ${element.tag_name} input + ... Expected to find an <input> element but did not. # next, set the checkbox and assert it is checked Input form data - ... Leader checked - Checkbox should be selected label:Leader + ... Active checked + Checkbox should be selected label:Active # finally, unset it and assert it is unchecked Input form data - ... Leader unchecked - Checkbox should not be selected label:Leader + ... Active unchecked + Checkbox should not be selected label:Active Lightning based form - radiobutton [Documentation]
ui: Hide loading indicators for non-existant narrows. We were still displaying the loading spinner even after displaying the error text, which was confusing as we do not try to fetch again. This fixes it.
@@ -248,6 +248,7 @@ exports.load_messages = function (opts) { // retry or display a connection error. // // FIXME: Warn the user when this has happened? + message_scroll.hide_indicators(); const data = { messages: [], };
Tweaks to robot.rst after a review Robot command-line options => Robot CLI options
@@ -683,8 +683,8 @@ The Robot Framework command-line test runner supports more than 50 <http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#command-line-options-for-test-execution>`_. To make the ``robot`` task simpler to use, we've only exposed a few of the command-line options at the task level. For example, the ``robot`` task -options ``include`` and ``exclude`` directly map to the Robot -command-line options ``--include`` and ``--exclude``. These options +options ``include`` and ``exclude`` directly map to the Robot CLI +options ``--include`` and ``--exclude``. These options are specified the same way as task options elsewhere in the CumulusCI framework, using either command-line options as shown above or by including them in the ``options`` section of a task configuration in ``cumulusci.yml``:: @@ -694,17 +694,17 @@ section of a task configuration in ``cumulusci.yml``:: options: include: <value> -Other Robot -command-line options, such as ``tagstatlink``, ``expandkeywords``, and +Other Robot CLI +options, such as ``tagstatlink``, ``expandkeywords``, and many others, have no direct task option counterpart. -There may be times when you want to use some of the Robot Framework +There may be times when you want to use some of the Robot CLI options which haven't been exposed as task options. We support that through an additional ``options`` section nested inside the typical task options in ``cumulusci.yml``. For example, one of the most common uses of this inner ``options`` section is to -use the Robot command line option ``--outputdir`` to specify where Robot should +use the Robot CLI option ``--outputdir`` to specify where Robot should write its report and log files. To configure this option for the task, you must remove the leading dashes from the option name and then place that option @@ -718,8 +718,8 @@ and value in a nested ``options`` section. options: outputdir: robot/my_project/results -Any Robot command-line option which takes a value can be specified -this way. For example, to use the robot option ``--name`` along with +Any Robot CLI option which takes a value can be specified +this way. For example, to use the Robot CLI option ``--name`` along with ``--outputdir``, your ``cumulusci.yml`` file should look like this: @@ -736,7 +736,7 @@ this: Configuring the ``libdoc`` task --------------------------- -If you have defined a robot resource file named MyProject.resource and +If you have defined a Robot resource file named MyProject.resource and placed it in the ``resources`` folder, you can add the following configuration to your cumulusci.yml file in order to enable the ``robot_libdoc`` task to generate documentation:
Update android_bankbot.txt Update for Reference section
@@ -86,8 +86,10 @@ vodafone5gapps.com http://218.187.103.198 27.255.64.95:8080 -# Reference: https://twitter.com/malwrhunterteam/status/1252287608274722817 +# Reference: https://twitter.com/malwrhunterteam/status/1252287608274722817 (# Android variation) # Reference: https://www.virustotal.com/gui/file/10cf5bdab95219661759bc58d572379953233ec44b30bf2f83a89f6058610f09/detection +# Reference: https://twitter.com/ninoseki/status/1253272702573395972 (# iOS variation) +# Reference: https://www.virustotal.com/gui/file/748b9f36e5a738665d082b347b5b1f4448d06a70906a32b52b77acd5aa70052e/detection 23.251.45.232:8080
llvm, composition: Do not switch to 'bin_execute = True' for nested compositions 'True' allows graceful fallback which is not what we want when using 'LLVM'
@@ -8905,8 +8905,6 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): # Compile all mechanism wrappers for m in mechanisms: _comp_ex._set_bin_node(m) - - bin_execute = True except Exception as e: if bin_execute is not True: raise e from None
Review and update of component removed 'uniform' from lognormal, changed assertion to number_of_nodes, changed soil depth to 0.005 if negative.
@@ -314,7 +314,7 @@ class LandslideProbability(Component): size=self.n) self.Re /= 1000. # Convert mm to m # Lognormal Distribution - Uniform in space - elif self.groundwater__recharge_distribution == 'lognormal_uniform': + elif self.groundwater__recharge_distribution == 'lognormal': assert (groundwater__recharge_mean != None), ( 'Input mean of the distribution!') assert (groundwater__recharge_standard_deviation != None), ( @@ -332,10 +332,10 @@ class LandslideProbability(Component): # Lognormal Distribution - Variable in space elif self.groundwater__recharge_distribution == 'lognormal_spatial': assert (groundwater__recharge_mean.shape[0] == ( - self.grid.number_of_core_nodes)), ( + self.grid.number_of_nodes)), ( 'Input array should be of the length of grid.number_of_nodes!') assert (groundwater__recharge_standard_deviation.shape[0] == ( - self.grid.number_of_core_nodes)), ( + self.grid.number_of_nodes)), ( 'Input array should be of the length of grid.number_of_nodes!') self.recharge_mean = groundwater__recharge_mean self.recharge_stdev = groundwater__recharge_standard_deviation @@ -411,11 +411,12 @@ class LandslideProbability(Component): Tmax = self.Tmode+(0.1*self.Tmode) self.T = np.random.triangular(Tmin, self.Tmode, Tmax, size=self.n) # Cohesion - # if provide fields of min and max C, uncomment 2 lines below + # if don't provide fields of min and max C, uncomment 2 lines below # Cmin = self.Cmode-0.3*self.Cmode # Cmax = self.Cmode+0.3*self.Cmode self.C = np.random.triangular(self.Cmin, self.Cmode, self.Cmax, size=self.n) + # phi - internal angle of friction provided in degrees phi_min = self.phi_mode-0.18*self.phi_mode phi_max = self.phi_mode+0.32*self.phi_mode @@ -426,7 +427,7 @@ class LandslideProbability(Component): hs_max = self.hs_mode+0.1*self.hs_mode self.hs = np.random.triangular(hs_min, self.hs_mode, hs_max, size=self.n) - self.hs[self.hs <= 0.] = 0.0001 + self.hs[self.hs <= 0.] = 0.005 # calculate Factor of Safety for n number of times # calculate components of FS equation @@ -484,7 +485,6 @@ class LandslideProbability(Component): self.prob_fail[i] = self.landslide__probability_of_failure self.landslide__factor_of_safety_distribution[i] = ( self.FS_distribution) - # stores FS values from last loop (node) # replace unrealistic values in arrays self.mean_Relative_Wetness[ self.mean_Relative_Wetness < 0.] = 0. # so can't be negative