message
stringlengths
13
484
diff
stringlengths
38
4.63k
When adding new item allocations, filter the available stock items Must match the appropriate part Remove items that are already allocated
@@ -12,6 +12,7 @@ from django.forms import HiddenInput from part.models import Part from .models import Build, BuildItem +from stock.models import StockItem from .forms import EditBuildForm, EditBuildItemForm from InvenTree.views import AjaxView, AjaxUpdateView, AjaxCreateView @@ -145,7 +146,9 @@ class BuildItemCreate(AjaxCreateView): # If the Build object is specified, hide the input field. # We do not want the users to be able to move a BuildItem to a different build - if form['build'].value() is not None: + build_id = form['build'].value() + + if build_id is not None: form.fields['build'].widget = HiddenInput() # If the sub_part is supplied, limit to matching stock items @@ -153,9 +156,17 @@ class BuildItemCreate(AjaxCreateView): if part_id: try: - part = Part.objects.get(pk=part_id) + Part.objects.get(pk=part_id) + query = form.fields['stock_item'].queryset + + # Only allow StockItem objects which match the current part query = query.filter(part=part_id) + + if build_id is not None: + # Exclude StockItem objects which are already allocated to this build and part + query = query.exclude(id__in=[item.stock_item.id for item in BuildItem.objects.filter(build=build_id, stock_item__part=part_id)]) + form.fields['stock_item'].queryset = query except Part.DoesNotExist: pass
Update config.py format config with black
@@ -5,7 +5,6 @@ from core.constants import YETI_ROOT class Dictionary(dict): - def __getattr__(self, key): return self.get(key, None) @@ -14,10 +13,9 @@ class Dictionary(dict): class Config: - def __init__(self): config = ConfigParser(allow_no_value=True) - config.read(os.path.join(YETI_ROOT, "yeti.conf"), encoding='utf-8') + config.read(os.path.join(YETI_ROOT, "yeti.conf"), encoding="utf-8") for section in config.sections(): setattr(self, section, Dictionary()) @@ -50,16 +48,16 @@ class Config: yeti_config = Config() -yeti_config.set_default_value('auth', 'module', 'local') -yeti_config.set_default_value('auth', 'apache_variable', 'REMOTE_USER') -yeti_config.set_default_value('mongodb', 'host', '127.0.0.1') -yeti_config.set_default_value('mongodb', 'port', 27017) -yeti_config.set_default_value('mongodb', 'database', 'yeti') -yeti_config.set_default_value('mongodb', 'username', None) -yeti_config.set_default_value('mongodb', 'password', None) -yeti_config.set_default_value('redis', 'host', '127.0.0.1') -yeti_config.set_default_value('redis', 'port', 6379) -yeti_config.set_default_value('redis', 'database', 0) -yeti_config.set_default_value('proxy', 'http', None) -yeti_config.set_default_value('proxy', 'https', None) -yeti_config.set_default_value('logging', 'filename', '/var/log/yeti/user_activity.log') +yeti_config.set_default_value("auth", "module", "local") +yeti_config.set_default_value("auth", "apache_variable", "REMOTE_USER") +yeti_config.set_default_value("mongodb", "host", "127.0.0.1") +yeti_config.set_default_value("mongodb", "port", 27017) +yeti_config.set_default_value("mongodb", "database", "yeti") +yeti_config.set_default_value("mongodb", "username", None) +yeti_config.set_default_value("mongodb", "password", None) +yeti_config.set_default_value("redis", "host", "127.0.0.1") +yeti_config.set_default_value("redis", "port", 6379) +yeti_config.set_default_value("redis", "database", 0) +yeti_config.set_default_value("proxy", "http", None) +yeti_config.set_default_value("proxy", "https", None) +yeti_config.set_default_value("logging", "filename", "/var/log/yeti/user_activity.log")
[varLib] fix undefined name 'masters' Ouch!
@@ -272,6 +272,7 @@ def build(designspace_filename, master_finder=lambda s:s, axisMap=None): axes = ds['axes'] if 'axes' in ds else [] if 'sources' not in ds or not ds['sources']: raise VarLibError("no 'sources' defined in .designspace") + masters = ds['sources'] instances = ds['instances'] if 'instances' in ds else [] base_idx = None
(trivial) qt settings: fix a type hint (no change in behaviour)
@@ -192,8 +192,8 @@ class SettingsDialog(WindowModalDialog): msat_cb = QCheckBox(_("Show amounts with msat precision")) msat_cb.setChecked(bool(self.config.get('amt_precision_post_satoshi', False))) - def on_msat_checked(b: bool): - prec = 3 if b else 0 + def on_msat_checked(v): + prec = 3 if v == Qt.Checked else 0 if self.config.amt_precision_post_satoshi != prec: self.config.amt_precision_post_satoshi = prec self.config.set_key('amt_precision_post_satoshi', prec)
show 10 slowest core tests Summary: the dagster core tests are hitting 18m need to figure out which ones are worst Test Plan: bk Reviewers: dgibson, jordansanders, max Subscribers: rexledesma
@@ -21,13 +21,13 @@ commands = flake8 . --count --exclude=./.*,dagster/seven/__init__.py --select=E9,F63,F7,F82 --show-source --statistics echo -e "--- \033[0;32m:pytest: Running tox tests\033[0m" - api_tests: pytest -vv ./dagster_tests/api_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} {posargs} - cli_tests: pytest -vv ./dagster_tests/cli_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} {posargs} - core_tests: pytest -vv ./dagster_tests/core_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} {posargs} - daemon_tests: pytest -vv ./dagster_tests/daemon_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} {posargs} - scheduler_tests: pytest -vv ./dagster_tests/scheduler_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} {posargs} - scheduler_tests_old_pendulum: pytest -vv ./dagster_tests/scheduler_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} {posargs} - general_tests: pytest -vv ./dagster_tests/general_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} {posargs} + api_tests: pytest -vv ./dagster_tests/api_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} --durations 10 {posargs} + cli_tests: pytest -vv ./dagster_tests/cli_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} --durations 10 {posargs} + core_tests: pytest -vv ./dagster_tests/core_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} --durations 10 {posargs} + daemon_tests: pytest -vv ./dagster_tests/daemon_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} --durations 10 {posargs} + scheduler_tests: pytest -vv ./dagster_tests/scheduler_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} --durations 10 {posargs} + scheduler_tests_old_pendulum: pytest -vv ./dagster_tests/scheduler_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} --durations 10 {posargs} + general_tests: pytest -vv ./dagster_tests/general_tests --junitxml=test_results.xml {env:COVERAGE_ARGS} --durations 10 {posargs} !windows: coverage report --omit='.tox/*,**/test_*.py' --skip-covered !windows: coverage html --omit='.tox/*,**/test_*.py'
Include kubernetes pbft yaml in docs artifacts Preserve the kubernetes pbft yaml file in the docs build artifacts for publishing.
@@ -71,6 +71,7 @@ html: templates cli @cp $(SAWTOOTH)/docker/compose/sawtooth-default-pbft.yaml $(HTMLDIR)/app_developers_guide/sawtooth-default-pbft.yaml @cp $(SAWTOOTH)/docker/compose/sawtooth-default-poet.yaml $(HTMLDIR)/app_developers_guide/sawtooth-default-poet.yaml @cp $(SAWTOOTH)/docker/kubernetes/sawtooth-kubernetes-default.yaml $(HTMLDIR)/app_developers_guide/sawtooth-kubernetes-default.yaml + @cp $(SAWTOOTH)/docker/kubernetes/sawtooth-kubernetes-default-pbft.yaml $(HTMLDIR)/app_developers_guide/sawtooth-kubernetes-default-pbft.yaml @cp $(SAWTOOTH)/docker/kubernetes/sawtooth-kubernetes-default-poet.yaml $(HTMLDIR)/app_developers_guide/sawtooth-kubernetes-default-poet.yaml dirhtml: templates cli
fix: always use parameter fallback Even when the filesystem encoding and sys.argv encoding are the same we should still fall back to trying for decoding command line arguments.
@@ -119,6 +119,8 @@ class StringParamType(ParamType): value = value.decode(fs_enc) except UnicodeError: value = value.decode('utf-8', 'replace') + else: + value = value.decode('utf-8', 'replace') return value return value
Install tox as user Gets around issue where circleci docker images don't run as root
@@ -20,7 +20,7 @@ base_test_step: &base_test_step - run: name: install dependencies command: | - pip install --upgrade setuptools tox + pip install --user --upgrade setuptools tox tox --notest - save_cache:
source: df: Do not require features be passed to dataflow Fixes:
@@ -24,7 +24,8 @@ class DataFlowSourceConfig: dataflow: DataFlow = field("DataFlow to use for preprocessing") features: Features = field( "Features to pass as definitions to each context from each " - "record to be preprocessed" + "record to be preprocessed", + default=Features(), ) inputs: List[str] = field( "Other inputs to add under each ctx (record's key will "
raise GenericYetiError if not 200 ``` python tests/testfeeds.py X DEBUG:root:Scheduler started Running X... DEBUG:root:Running X (ID: X) DEBUG:urllib3.connectionpool:Starting new HTTP connection (1): X:80 DEBUG:urllib3.connectionpool:http://X:80 "GET /X.php HTTP/1.1" 404 293 X: success! ```
@@ -10,6 +10,7 @@ from lxml import etree from mongoengine import DoesNotExist from mongoengine import StringField +from core.errors import GenericYetiError from core.config.celeryctl import celery_app from core.config.config import yeti_config from core.scheduling import ScheduleEntry @@ -141,8 +142,7 @@ class Feed(ScheduleEntry): self.source, headers=headers, proxies=yeti_config.proxy) if r.status_code != 200: - yield "" - return + raise GenericYetiError("{} returns code: {}".format(self.source, r.status_code)) return self.parse_xml(r.content, main_node, children) @@ -185,8 +185,7 @@ class Feed(ScheduleEntry): self.source, headers=headers, proxies=yeti_config.proxy) if r.status_code != 200: - yield "" - return + raise GenericYetiError("{} returns code: {}".format(self.source, r.status_code)) feed = r.text.split('\n') @@ -223,8 +222,7 @@ class Feed(ScheduleEntry): self.source, headers=headers, proxies=yeti_config.proxy) if r.status_code != 200: - yield "" - return + raise GenericYetiError("{} returns code: {}".format(self.source, r.status_code)) feed = r.text.split('\n') reader = csv.reader( @@ -257,8 +255,7 @@ class Feed(ScheduleEntry): self.source, headers=headers, proxies=yeti_config.proxy, params=params) if r.status_code != 200: - yield "" - return + raise GenericYetiError("{} returns code: {}".format(self.source, r.status_code)) return r.json()
[IMPR] Wait for _putthread is done in BaseBot.exit() For asychronous puts there is a pywikibot._putthread which holds the queue to be done. Wait in BaseBot exit() method until all asychronous put were made and write statistics after it.
@@ -1289,28 +1289,36 @@ class BaseBot(OptionHandler): May be overridden by subclasses. """ self.teardown() + if hasattr(self, '_start_ts'): + read_delta = pywikibot.Timestamp.now() - self._start_ts + read_seconds = int(read_delta.total_seconds()) + + if pywikibot._putthread.is_alive(): + pywikibot._flush() + pywikibot.output('\n{} pages read' '\n{} pages written' '\n{} pages skipped' .format(self._treat_counter, self._save_counter, self._skip_counter)) + if hasattr(self, '_start_ts'): - delta = (pywikibot.Timestamp.now() - self._start_ts) - seconds = int(delta.total_seconds()) - if delta.days: + write_delta = pywikibot.Timestamp.now() - self._start_ts + write_seconds = int(write_delta.total_seconds()) + if write_delta.days: pywikibot.output( 'Execution time: {d.days} days, {d.seconds} seconds' - .format(d=delta)) + .format(d=write_delta)) else: pywikibot.output('Execution time: {} seconds' - .format(delta.seconds)) + .format(write_delta.seconds)) if self._treat_counter: pywikibot.output('Read operation time: {:.1f} seconds' - .format(seconds / self._treat_counter)) + .format(read_seconds / self._treat_counter)) if self._save_counter: pywikibot.output('Write operation time: {:.1f} seconds' - .format(seconds / self._save_counter)) + .format(write_seconds / self._save_counter)) # exc_info contains exception from self.run() while terminating exc_info = sys.exc_info()
Modify "Initialize the databse" command The command `sudo service postgresql initdb` doesn't work within CentOS 7.1. I have updated this to the command that I was able to use to initialize the database.
@@ -19,7 +19,7 @@ Installing PostgreSQL Database 5. Initialize the database. - ``sudo service postgresql initdb`` + ``sudo /usr/pgsql-9.4/bin/postgresql94-setup initdb`` 6. Set PostgreSQL to start on boot.
Fix handling of ad.Zero in _select_and_scatter_add_transpose. Fixes
@@ -5573,6 +5573,8 @@ def _select_and_scatter_add_transpose( t, source, operand, *, select_prim, window_dimensions, window_strides, padding): assert ad.is_undefined_primal(source) and not ad.is_undefined_primal(operand) + if type(t) is ad_util.Zero: + return [ad_util.Zero(source.aval), None] ones = (1,) * len(window_dimensions) source_t = _select_and_gather_add(t, operand, select_prim, window_dimensions, window_strides, padding, ones, ones)
BUG: Pickling class Windows compatibility Child processes cannot access global dictionary in master process. Moved storing the function into the pickling class itself.
@@ -52,25 +52,15 @@ def get_rank(): else: process_name = multiprocessing.current_process().name if process_name is not "MainProcess": - rank = int(process_name[-1]) + rank = int(process_name.split("-")[-1]) return rank -# Helping ProcessPoolExecutor map unpicklable functions -_FUNCTIONS = {} - - class PicklableAndCallable: - def __init__(self, key): - self.key = key - self.func = None + def __init__(self, func): + self.func = func def __call__(self, *args, **kw): - if self.func is None: - try: - self.func = _FUNCTIONS[self.key] - except KeyError: - raise RuntimeError return self.func(*args, **kw) @@ -98,6 +88,7 @@ def imap(f, s, max_workers=None, use_mpi=False, if_serial="raise", chunksize=1): imap is a generator yielding result of f(s[i]), map returns the result series """ + if_serial = if_serial.lower() assert if_serial in ("ignore", "raise", "warn"), f"invalid choice '{if_serial}'" @@ -127,9 +118,7 @@ def imap(f, s, max_workers=None, use_mpi=False, if_serial="raise", chunksize=1): if not max_workers: max_workers = multiprocessing.cpu_count() - 1 assert max_workers < multiprocessing.cpu_count() - key = id(f) - _FUNCTIONS[key] = f - f = PicklableAndCallable(id(f)) + f = PicklableAndCallable(f) with concurrentfutures.ProcessPoolExecutor(max_workers) as executor: for result in executor.map(f, s, chunksize=chunksize): yield result
Remove --no-update-dependencies Summary: Absolutely no idea why this is needed. This should be a valid argument. Pull Request resolved:
@@ -724,7 +724,7 @@ binary_linux_test_and_upload: &binary_linux_test_and_upload # Install the package if [[ "$PACKAGE_TYPE" == conda ]]; then - conda install -y "$pkg" --offline --no-update-dependencies + conda install -y "$pkg" --offline else pip install "$pkg" fi @@ -862,7 +862,7 @@ binary_mac_build: &binary_mac_build # Install the package if [[ "$PACKAGE_TYPE" == conda ]]; then - conda install -y "$pkg" --offline --no-update-dependencies + conda install -y "$pkg" --offline else pip install "$pkg" --no-index --no-dependencies -v fi
notifications: Add tests for `relative_to_full_url()` function. Fixes:
from __future__ import absolute_import from __future__ import print_function +import os import random import re +import ujson from django.conf import settings from django.core import mail @@ -13,7 +15,8 @@ from mock import patch, MagicMock from six.moves import range from typing import Any, Dict, List, Text -from zerver.lib.notifications import handle_missedmessage_emails +from zerver.lib.notifications import handle_missedmessage_emails, \ + relative_to_full_url from zerver.lib.actions import do_update_message from zerver.lib.message import access_message from zerver.lib.test_classes import ZulipTestCase @@ -380,3 +383,39 @@ class TestMissedMessages(ZulipTestCase): self.assertEqual(mail.outbox[0].subject, subject) subject = 'Othello, the Moor of Venice sent you a message' self.assertEqual(mail.outbox[1].subject, subject) + + def test_relative_to_full_url(self): + # type: () -> None + # Run `relative_to_full_url()` function over test fixtures present in + # 'markdown_test_cases.json' and check that it converts all the relative + # URLs to absolute URLs. + fixtures_file = os.path.join(os.path.dirname( + os.path.dirname(__file__)), "fixtures", "markdown_test_cases.json") + fixtures = ujson.load(open(fixtures_file)) + test_fixtures = {} + for test in fixtures['regular_tests']: + test_fixtures[test['name']] = test + for test_name in test_fixtures: + test_data = test_fixtures[test_name]["expected_output"] + output_data = relative_to_full_url("http://example.com", test_data) + if re.search("(?<=\=['\"])/(?=[^<]+>)", output_data) is not None: + raise AssertionError("Relative URL present in email: " + output_data + + "\nFailed test case's name is: " + test_name + + "\nIt is present in markdown_test_cases.json") + + # Specific test cases. + test_data = "<p>Check out the file at: '/static/generated/emoji/images/emoji/'</p>" + actual_output = relative_to_full_url("http://example.com", test_data) + expected_output = "<p>Check out the file at: '/static/generated/emoji/images/emoji/'</p>" + self.assertEqual(actual_output, expected_output) + + test_data = '<a href="/user_uploads/2/1f/some_random_value">/user_uploads/2/1f/some_random_value</a>' + actual_output = relative_to_full_url("http://example.com", test_data) + expected_output = '<a href="http://example.com/user_uploads/2/1f/some_random_value">' + \ + '/user_uploads/2/1f/some_random_value</a>' + self.assertEqual(actual_output, expected_output) + + test_data = '<p>Set src="/avatar/[email protected]?s=30"</p>' + actual_output = relative_to_full_url("http://example.com", test_data) + expected_output = '<p>Set src="/avatar/[email protected]?s=30"</p>' + self.assertEqual(actual_output, expected_output)
Update graphql.yaml Reference:
@@ -46,6 +46,8 @@ requests: - "{{BaseURL}}/graph_cms" - "{{BaseURL}}/query-api" - "{{BaseURL}}/api/cask/graphql-playground" + - "{{BaseURL}}/altair" + - "{{BaseURL}}/playground" headers: Content-Type: application/json
WL: xwayland windows need to know which outputs they are on So that they can damage them when they need to show new content
@@ -1082,6 +1082,7 @@ class Static(base.Static, Window): else: self.surface.configure(x, y, self._width, self._height) self.paint_borders(bordercolor, borderwidth) + self._find_outputs() self.damage() def cmd_bring_to_front(self) -> None:
Update installing.rst Made it clear that we prefer pyqt5.
@@ -36,7 +36,10 @@ is used to provide an abstract interface to the two most widely used QT bindings * `pyqt5 <https://riverbankcomputing.com/software/pyqt/intro>`_ -- version 5 * `PySide2 <https://wiki.qt.io/Qt_for_Python>`_ -- version 5 -At least one of those bindings must be installed for the interative GUIs to work. +At least one of those bindings must be installed for the interative GUIs to work. DO NOT INSTALL BOTH, as these +two packages do not play nicely together. We strongly recommend that you go with pyqt5, unless you are attracted +to the more flexible licensing that PySide2. PySide2 can occasionally cause GUIs to crash because +of conflicts with other packages in your environment that use pyqt5. Developer-only items --------------------
Fix MatrixStore test fixture importer Previously we weren't restricting to import events of type "prescribing". This happened to be OK in the contexts where we were using it, but it breaks on the `one_month_of_measures` fixture.
@@ -48,7 +48,7 @@ def matrixstore_from_postgres(): This provides an easy way of using existing test fixtures with the MatrixStore. """ - latest_date = ImportLog.objects.latest("current_at").current_at + latest_date = ImportLog.objects.latest_in_category("prescribing").current_at end_date = str(latest_date)[:7] return matrixstore_from_data_factory( _DatabaseFixtures(), end_date=end_date, months=60
Catch Permission SSL error Give a suggestion on possible fix for binding to port 443
@@ -28,7 +28,7 @@ This is the main file the defines what URLs get routed to what handlers import sys from setup import __version__ -from os import urandom, _exit +from os import urandom, _exit, path as os_path from modules.Menu import Menu from modules.Recaptcha import Recaptcha from modules.AppTheme import AppTheme @@ -277,7 +277,15 @@ def start_server(): ) else: server = HTTPServer(app, xheaders=options.x_headers) + try: sockets = netutil.bind_sockets(options.listen_port, options.listen_interface) + except PermissionError: + pypath = sys.executable + if os_path.islink(pypath): + pypath = os_path.realpath(pypath) + logging.error("Problem binding to port %s", str(options.listen_port)) + logging.error("Possible Fix: sudo setcap CAP_NET_BIND_SERVICE=+eip %s", pypath) + sys.exit() server.add_sockets(sockets) Scoreboard.now(app) try:
Update core.py This is a small bugfix to stop an error where None would be returned by metadata.findtext. Instead, an empty string is returned.
@@ -1215,7 +1215,7 @@ class SoCo(_SocoSingletonBase): metadata = XML.fromstring(really_utf8(metadata)) # Try parse trackinfo trackinfo = metadata.findtext('.//{urn:schemas-rinconnetworks-com:' - 'metadata-1-0/}streamContent') + 'metadata-1-0/}streamContent') or '' index = trackinfo.find(' - ') if index > -1:
fix docs mistakes in lr_scheduler.MultiplicativeLR Summary: This PR is referenced to an issue: [The docs of `MultiplicativeLR` use `LambdaLR` as example](https://github.com/pytorch/pytorch/issues/33752#issue-570374087) Pull Request resolved:
@@ -247,9 +247,8 @@ class MultiplicativeLR(_LRScheduler): last_epoch (int): The index of last epoch. Default: -1. Example: - >>> # Assuming optimizer has two groups. >>> lmbda = lambda epoch: 0.95 - >>> scheduler = LambdaLR(optimizer, lr_lambda=lmbda) + >>> scheduler = MultiplicativeLR(optimizer, lr_lambda=lmbda) >>> for epoch in range(100): >>> train(...) >>> validate(...)
Let.Expr: add types in the constructor's docstring TN: minor
@@ -1569,6 +1569,11 @@ class Let(AbstractExpression): pretty_class_name = 'Let' def __init__(self, vars, var_exprs, expr, abstract_expr=None): + """ + :type vars: list[VariableExpr] + :type vars_exprs: list[ResolvedExpression] + :type expr: ResolvedExpression + """ self.vars = vars self.var_exprs = var_exprs self.expr = expr
Do not install all azure packages Often azure brokes packege dependencies and our CI became broken. Install only that azure packages, tht we are using.
@@ -52,8 +52,9 @@ setup( 'google-api-python-client==1.6.4', 'google-auth==1.2.1', 'google-auth-httplib2==0.0.3', - 'azure==2.0.0', - 'azure-mgmt-containerservice==3.0.1', + 'azure-common==1.1.9', + 'azure-mgmt-containerservice==3.0.0', + 'msrestazure==0.4.25', 'urllib3==1.22' ], setup_requires=[
Update abcd.py Better doc
@@ -283,9 +283,11 @@ class Matrix(object): return outputRay def largestDiameter(self): + """ Largest diameter of the element or group of elements """ return self.apertureDiameter def hasFiniteApertureDiameter(self): + """ True if the element or group of elements have a finite aperture size """ return self.apertureDiameter != float("+Inf") def transferMatrix(self, upTo=float('+Inf')): @@ -856,7 +858,7 @@ class MatrixGroup(Matrix): return rayTrace def hasFiniteApertureDiameter(self): - """ True if OpticalPath has at least one element of finite diameter """ + """ True if ImagingPath has at least one element of finite diameter """ for element in self.elements: if element.hasFiniteApertureDiameter(): return True
pipeline: unify object exporting Remove output.export and associated logic in pipeline.assemble. Instead, return output or None, and export only once in pipeline.run.
@@ -284,11 +284,11 @@ class Pipeline: return results, build_tree, tree - def assemble(self, object_store, build_tree, tree, monitor, libdir, output_directory): + def assemble(self, object_store, build_tree, tree, monitor, libdir): results = {"success": True} if not self.assembler: - return results + return results, None output = object_store.new() @@ -312,19 +312,16 @@ class Pipeline: if not r.success: output.cleanup() results["success"] = False - return results + return results, None if self.assembler.checkpoint: object_store.commit(output, self.assembler.id) - if output_directory: - output.export(output_directory) - output.cleanup() - return results + return results, output def run(self, store, monitor, libdir, output_directory): os.makedirs("/run/osbuild", exist_ok=True) - results = {} + results = {"success": True} monitor.begin(self) @@ -336,11 +333,7 @@ class Pipeline: # usually be needless overhead. obj = object_store.get(self.output_id) - if obj: - results = {"success": True} - obj.export(output_directory) - - else: + if not obj: results, build_tree, tree = self.build_stages(object_store, monitor, libdir) @@ -348,15 +341,19 @@ class Pipeline: if not results["success"]: return results - r = self.assemble(object_store, + r, obj = self.assemble(object_store, build_tree, tree, monitor, - libdir, - output_directory) + libdir) results.update(r) # This will also update 'success' + if obj: + if output_directory: + obj.export(output_directory) + obj.cleanup() + monitor.finish(results) return results
Update faq.md fixed a typo: "... by opening and issue," -> "... by opening an issue,"
@@ -133,7 +133,7 @@ The main Presidio modules (analyzer, anonymizer, image-redactor) can be used bot ### How can I contribute to Presidio? -First, review the [contribution guidelines](https://github.com/microsoft/presidio/blob/main/CONTRIBUTING.md), and feel free to reach out by opening and issue, posting a discussion or emailing us at [email protected] +First, review the [contribution guidelines](https://github.com/microsoft/presidio/blob/main/CONTRIBUTING.md), and feel free to reach out by opening an issue, posting a discussion or emailing us at [email protected] ### How can I report security vulnerabilities? Please see the [security information](https://github.com/microsoft/presidio/blob/main/SECURITY.md).
Optimization: Annotate bool type shape of dictionary "in" operations * This was missing in constrast to generic dictionary operations. * This should enable more optimization as it removes false exception annotations for conditions with it.
@@ -48,7 +48,12 @@ from .NodeMakingHelpers import ( makeStatementOnlyNodesFromExpressions, wrapExpressionWithSideEffects, ) -from .shapes.BuiltinTypeShapes import tshape_dict, tshape_list, tshape_none +from .shapes.BuiltinTypeShapes import ( + tshape_bool, + tshape_dict, + tshape_list, + tshape_none, +) from .shapes.StandardShapes import tshape_iterator @@ -1122,6 +1127,10 @@ class ExpressionDictOperationInNotInUncertainBase(ExpressionChildrenHavingBase): self.known_hashable_key = None + @staticmethod + def getTypeShape(): + return tshape_bool + def computeExpression(self, trace_collection): if self.known_hashable_key is None: self.known_hashable_key = self.subnode_key.isKnownToBeHashable() @@ -1148,6 +1157,10 @@ class ExpressionDictOperationInNotInUncertainBase(ExpressionChildrenHavingBase): or self.known_hashable_key is not True ) + @staticmethod + def mayRaiseExceptionBool(exception_type): + return False + def mayHaveSideEffects(self): return self.mayRaiseException(BaseException)
Update seq2seq_model.py replaced marian.prepare_translation_batch() with marian.prepare_seq2seq_batch()
@@ -844,7 +844,7 @@ class Seq2SeqModel: to_predict[i : i + self.args.eval_batch_size] for i in range(0, len(to_predict), self.args.eval_batch_size) ]: if self.args.model_type == "marian": - input_ids = self.encoder_tokenizer.prepare_translation_batch( + input_ids = self.encoder_tokenizer.prepare_seq2seq_batch( batch, max_length=self.args.max_seq_length, padding="max_length",
Update python-app.yml Added pytest-cov
@@ -23,7 +23,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - pip install flake8 pytest + pip install flake8 pytest pytest-cov if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - name: Lint with flake8 run: |
fix valid logic 0 is false, None check is safer
@@ -35,7 +35,7 @@ class OpenPypeVersion: self.prerelease = prerelease is_valid = True - if not major or not minor or not patch: + if major is None or minor is None or patch is None: is_valid = False self.is_valid = is_valid
Update .travis.yml Attempt to include pyglow on build server
language: python python: - "2.7" - #- "3.3" - - "3.4" - "3.5" - "3.6" sudo: false @@ -38,8 +36,10 @@ install: # - conda install --yes -c dan_blanchard python-coveralls nose-cov - source activate test-environment - pip install coveralls - # pysatCDF installed via setup.py requirement - # - pip install pysatCDF + - git clone https://github.com/timduly4/pyglow.git + - cd pyglow + - ./pyglow_install.sh + - cd .. - "python setup.py install" # command to run tests script:
Update formbook.txt Removing some dups + cleaning.
# Reference: https://blog.talosintelligence.com/2018/06/my-little-formbook.html?m=1 -http://www.drylipc.com/em1/ -http://www.handanzhize.info/d5/ -http://www.bddxpso.info/d7/ -http://www.newraxz.com/as/ -http://www.atopgixn.info/de8/ -http://www.cretezzy.com/am/ -http://www.casiinoeuros.info/d3/ -http://www.newraxz.com/as/ -http://www.cretezzy.com/do/ -http://www.newraxz.com/as/ +drylipc.com/em1/ +handanzhize.info/d5/ +bddxpso.info/d7/ +atopgixn.info/de8/ +cretezzy.com/am/ +cretezzy.com/do/ +casiinoeuros.info/d3/ +newraxz.com/as/ # Reference: https://twitter.com/avman1995/status/1038285919219068928 # Reference: https://pastebin.com/6MRD35Pq
testing, ignore why is the link broken?
@@ -9,3 +9,12 @@ The Epidata API is built and maintained by the Carnegie Mellon University [Delphi research group](https://delphi.cmu.edu/). Explore one way in which Delphi is responding to the pandemic by visiting the [COVID-19 Survey page](covid_survey.md). + +# ignore + +gotta love testing in production. + +visiting the [COVID-19 Survey page](covid_survey.md). + +visiting the [COVID-19 Survey +page](covid_survey.md).
Provide a little more help in error case Hopefully never a problem in production, but helpful while building tests. We have the information, why not share it?
@@ -156,7 +156,9 @@ class EndpointInterchange: executor.endpoint_id = self.endpoint_id else: if not executor.endpoint_id == self.endpoint_id: - raise Exception("InconsistentEndpointId") + eep_id = f"Executor({executor.endpoint_id})" + sep_id = f"Interchange({self.endpoint_id})" + raise Exception(f"InconsistentEndpointId: {eep_id} != {sep_id}") self.executors[executor.label] = executor if executor.run_dir is None: executor.run_dir = self.logdir
add classname() and classnames() Try to provide a convenient and consistent method to retrieve ROOT class names as plain strings
@@ -107,6 +107,10 @@ class ROOTDirectory(object): out.__dict__.update(self.__dict__) return out + @classmethod + def classname(cls): + return cls._classname.decode('ascii') + @staticmethod def read(source, *args, **options): if len(args) == 0: @@ -300,6 +304,9 @@ class ROOTDirectory(object): def classes(self, recursive=False, filtername=nofilter, filterclass=nofilter): return list(self.iterclasses(recursive=recursive, filtername=filtername, filterclass=filterclass)) + def classnames(self): + return list(key._fClassName.decode('ascii') for key in self._keys) + def allkeys(self, filtername=nofilter, filterclass=nofilter): return self.keys(recursive=True, filtername=filtername, filterclass=filterclass) @@ -1295,6 +1302,10 @@ class ROOTStreamedObject(ROOTObject): return numpy.dtype(dtypesout) + @classmethod + def classname(cls): + return cls.__name__ + class TObject(ROOTStreamedObject): @classmethod def _recarray(cls):
Remove f-strings This fixes the Python 3.5 tests
@@ -270,7 +270,8 @@ def accepts(**arg_units): dimension = arg_units[arg_name] if not _has_units(arg_value, dimension): raise TypeError( - f"arg '{arg_name}={arg_value}' does not match {dimension}" + "arg '%s=%s' does not match %s" + % (arg_name, arg_value, dimension) ) return f(*args, **kwargs) @@ -337,7 +338,7 @@ def returns(r_unit): """ result = f(*args, **kwargs) if not _has_units(result, r_unit): - raise TypeError(f"result '{result}' does not match {r_unit}") + raise TypeError("result '%s' does not match %s" % (result, r_unit)) return result return new_f
component: pass --no-pager to git in log_cince.py We don't need to use pager in log_since.py
@@ -23,7 +23,9 @@ def get_logs(root, pseudo_revision, mergebase, start, end): if end is not None: end_ref += '~%d' % (pseudo_revision - end) refspec = '%s..%s' % (start_ref, end_ref) - cmd = ['git', 'log', refspec, '--date=short', '--format=%ad %ae %s'] + cmd = [ + 'git', '--no-pager', 'log', refspec, '--date=short', '--format=%ad %ae %s' + ] nb_commits = (end or pseudo_revision) - start try: log = subprocess.check_output(cmd, cwd=root)
Add __init__ save data_context set self.custom_styles_directory if directory present
@@ -50,9 +50,16 @@ class DefaultJinjaView(object): * Vega-Lite 3.2.1 * Vega-Embed 4.0.0 """ - _template = NoOpTemplate + def __init__(self, data_context): + self.data_context = data_context + plugins_directory = data_context.plugins_directory + if os.path.isdir(os.path.join(plugins_directory, "custom_data_docs", "styles")): + self.custom_styles_directory = os.path.join(plugins_directory, "custom_data_docs/styles") + else: + self.custom_styles_directory = None + def render(self, document, template=None, **kwargs): self._validate_document(document)
Use a shared database cache. Fixes The default was a local memory cache, which was fast, but led to discrepancies in each app's cache.
@@ -77,6 +77,13 @@ INSTALLED_APPS = ( SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', + 'LOCATION': 'studio_db_cache', + } +} + MIDDLEWARE_CLASSES = ( # 'django.middleware.cache.UpdateCacheMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware',
Minor typo fix requies -> requires
``std=c99`` added if compiler is named ``gcc`` ---------------------------------------------- -GCC before version 5 requies the ``-std=c99`` command line argument. Newer +GCC before version 5 requires the ``-std=c99`` command line argument. Newer compilers automatically turn on C99 mode. The compiler setup code will automatically add the code if the compiler name has ``gcc`` in it.
Implement suggestions Added links to mypy and PEP484 (the type hint pep). Removed the temp variable for clarity, maybe it's fine if we don't show that you can typehint normal variables?
**Type Hints** -A typehint indicates what type something should be. For example, +A type hint indicates what type something is expected to be. For example, ```python def add(a: int, b: int) -> int: - sum: int = a + b - return sum + return a + b ``` -In this case, `a` and `b` are expected to be ints, and the function returns an int. We also declare an intermediate variable `sum`, which we indicate to be an int. +In this case, we have a function,`add`, with parameters `a` and `b`. The type hints indicate that the parameters and return type are all integers. It's important to note these are just hints and have no runtime effect. For example, ```python -#uh oh +# Uh oh add("hello ", "world") ``` -This code will run without error, even though it doesn't follow the function's type hints. +This code won't error even though it doesn't follow the function's type hints. It will just concatenate the two strings. + +Third party tools like [mypy](http://mypy-lang.org/) can enforce your type hints. Mypy would error in the second example. + +For more info about type hints, check out [PEP 484](https://www.python.org/dev/peps/pep-0484/).
add failing test The distutils build_sass command runs the Manifest.build() method, not the Manifest.build_one() method. The former does not honor the strip_extension option.
@@ -635,7 +635,7 @@ class ManifestTestCase(BaseTestCase): ) -def test_manifest_strip_extension(tmpdir): +def test_manifest_build_one_strip_extension(tmpdir): src = tmpdir.join('test').ensure_dir() src.join('a.scss').write('a{b: c;}') @@ -645,6 +645,16 @@ def test_manifest_strip_extension(tmpdir): assert tmpdir.join('css/a.css').read() == 'a {\n b: c; }\n' +def test_manifest_build_strip_extension(tmpdir): + src = tmpdir.join('test').ensure_dir() + src.join('x.scss').write('a{b: c;}') + + m = Manifest(sass_path='test', css_path='css', strip_extension=True) + m.build(package_dir=str(tmpdir)) + + assert tmpdir.join('css/x.css').read() == 'a {\n b: c; }\n' + + class WsgiTestCase(BaseTestCase): @staticmethod
Added Channel Stat Created a stat to count how many channels are being saved by curators
@@ -4,6 +4,7 @@ import uuid import hashlib import functools import json +import newrelic.agent from django.conf import settings from django.contrib import admin from django.core.cache import cache @@ -256,6 +257,9 @@ class Channel(models.Model): if self.pk and Channel.objects.filter(pk=self.pk).exists(): original_node = Channel.objects.get(pk=self.pk) + if original_node is None: + newrelic.agent.record_custom_metric('Custom/ChannelStats/NumCreatedChannels', 1) + super(Channel, self).save(*args, **kwargs) # Check if original thumbnail is no longer referenced
Small questioning ... Wouldnt it be better to print both mAP values since the evaluation takes some time already for some datasets? It will take twice this time if we want to compute these values separately... (this is happening too me right now and I really start to find this annoying :) ).
@@ -108,7 +108,6 @@ def parse_args(args): parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800) parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333) parser.add_argument('--config', help='Path to a configuration parameters .ini file (only used with --convert-model).') - parser.add_argument('--weighted-average', help='Compute the mAP using the weighted average of precisions among classes.', action='store_true') return parser.parse_args(args) @@ -181,9 +180,7 @@ def main(args=None): print('No test instances found.') return - if args.weighted_average: - print('mAP: {:.4f}'.format(sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances))) - else: + print('mAP using the weighted average of precisions among classes: {:.4f}'.format(sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances))) print('mAP: {:.4f}'.format(sum(precisions) / sum(x > 0 for x in total_instances)))
feat(spatial_index): reverse lookup for labels -> locations Requires querying the whole dataset.
+from collections import defaultdict import json import os @@ -6,7 +7,7 @@ import numpy as np from ...exceptions import SpatialIndexGapError from ...storage import Storage, SimpleStorage from ... import paths -from ...lib import Bbox, Vec, xyzrange, min2 +from ...lib import Bbox, Vec, xyzrange, min2, toiter class SpatialIndex(object): """ @@ -58,6 +59,44 @@ class SpatialIndex(object): return { res['filename']: res['content'] for res in results } + def index_file_paths_for_bbox(self, bbox): + bbox = bbox.expand_to_chunk_size(self.chunk_size, offset=self.bounds.minpt) + + if bbox.subvoxel(): + return [] + + index_files = [] + for pt in xyzrange(bbox.minpt, bbox.maxpt, self.chunk_size): + search = Bbox( pt, min2(pt + self.chunk_size, self.bounds.maxpt) ) + index_files.append(search.to_filename() + '.spatial') + + return index_files + + def file_locations_per_label(self, labels, allow_missing=False): + """ + Queries entire dataset to find which spatial index files the + given labels are located in. Can be expensive. + + Returns: { filename: [ labels... ], ... } + """ + labels = toiter(labels) + index_files = self.index_file_paths_for_bbox(self.bounds) + index_files = self.fetch_index_files(index_files) + locations = defaultdict(list) + for filename, content in index_files.items(): + if content is None: + if allow_missing: + continue + else: + raise SpatialIndexGapError(filename + " was not found.") + + segid_bbox_dict = json.loads(content) + for label in labels: + if str(label) in segid_bbox_dict: + locations[label].append(filename) + + return locations + def query(self, bbox, allow_missing=False): """ For the specified bounding box (or equivalent representation), @@ -75,11 +114,7 @@ class SpatialIndex(object): if bbox.subvoxel(): return [] - index_files = [] - for pt in xyzrange(bbox.minpt, bbox.maxpt, self.chunk_size): - search = Bbox( pt, min2(pt + self.chunk_size, self.bounds.maxpt) ) - index_files.append(search.to_filename() + '.spatial') - + index_files = self.index_file_paths_for_bbox(bbox) results = self.fetch_index_files(index_files) labels = set()
Change check_robustness dataset As Adversarial training using training data, although here has a random sample, it's more reasonable to use eval data to check robustness.
@@ -576,7 +576,7 @@ def train_model(args): break if args.check_robustness: - samples_to_attack = list(zip(train_text, train_labels)) + samples_to_attack = list(zip(eval_text, eval_labels)) samples_to_attack = random.sample(samples_to_attack, 1000) adv_attack_results = _generate_adversarial_examples( model_wrapper, attack_class, samples_to_attack
[meta] remove 7.x branch from backport config 7.x branch has been removed has there won't be any 7.18 minor version.
{ - "upstream": "elastic/helm-charts", + "all": true, + "prFilter": "label:need-backport", + "sourcePRLabels": [ + "backported" + ], "targetBranchChoices": [ "6.8", - "7.17", - "7.x" + "7.17" ], - "all": true, - "prFilter": "label:need-backport", "targetPRLabels": [ "backport" ], - "sourcePRLabels": [ - "backported" - ] + "upstream": "elastic/helm-charts" }
Also inspect the path for the script tag Part of
Referer: "<script >alert(1);</script>" output: log_contains: id "941110" + - + test_title: 941110-5 + desc: XSS in URI / PATH_INFO going undetected - GH issue 1022 + stages: + - + stage: + input: + dest_addr: 127.0.0.1 + method: GET + port: 80 + uri: "/foo/bar%3C/script%3E%3Cscript%3Ealert(1)%3C/script%3E/" + headers: + Host: localhost + Accept: "*/*" + User-Agent: ModSecurity CRS 3 Tests + output: + log_contains: id "941110"
Update Google_News.py Added comment to function and updated print feature using the new format syntax.
@@ -4,7 +4,9 @@ from bs4 import BeautifulSoup as soup from urllib.request import urlopen def news(xml_news_url): - + '''Print select details from a html response containing xml + @param xml_news_url: url to parse + ''' Client=urlopen(xml_news_url) xml_page=Client.read() Client.close() @@ -15,10 +17,10 @@ def news(xml_news_url): for news in news_list: - print(news.title.text) - print(news.link.text) - print(news.pubDate.text) - print("\n\n") + print(f'news title: {news.title.text}') + print(f'news link: {news.link.text}') + print(f'news pubDate: {news.pubDate.text}') + print("+-"*20,"\n\n")
New RandomPreprocessor class: Randomly apply one preprocessor as a subpreprocessor with certain probability.
@@ -3208,6 +3208,76 @@ class RepeatPreprocessor(Preprocessor): return dtypes +class RandomApplyPreprocessor(Preprocessor): + """Randomly apply a preprocessor with certain probability. + + This preprocessor takes a preprocessor as a subprocessor and apply the + subprocessor to features with certain probability. + + """ + + @classmethod + def Params(cls): + p = super(RandomApplyPreprocessor, cls).Params() + p.Define('prob', 1.0, 'The probability the subprocessor being executed.') + p.Define('subprocessor', None, 'Params for an input preprocessor.') + + return p + + @base_layer.initializer + def __init__(self, params): + super(RandomApplyPreprocessor, self).__init__(params) + p = self.params + if p.subprocessor is None: + raise ValueError('No subprocessor was specified for RepeatPreprocessor.') + if p.prob < 0 or p.prob > 1 or not isinstance(p.prob, float): + raise ValueError( + 'prob must be >= 0 and <=1 and float type, prob={}'.format(p.prob)) + + with tf.variable_scope(p.name): + self.CreateChild('subprocessor', p.subprocessor) + + self.choice = tf.random_uniform( + (), minval=0.0, maxval=1.0, seed=p.random_seed) < p.prob + + def TransformFeatures(self, features): + transformed_features = self.subprocessor.FPropDefaultTheta(features) + features = tf.cond(self.choice, lambda: transformed_features, + lambda: features) + + return features + + def TransformShapes(self, shapes): + shapes_transformed = self.subprocessor.TransformShapes(shapes) + + if not shapes.IsCompatible(shapes_transformed): + raise ValueError( + 'NestedMap structures are different between shapes and transformed' + 'shapes. Original shapes: {}. Transformed shapes: {}'.format( + shapes, shapes_transformed)) + + shapes_zipped = shapes.Pack( + zip(shapes.Flatten(), shapes_transformed.Flatten())) + shapes_compatible = shapes_zipped.Transform( + lambda xs: xs[0].is_compatible_with(xs[1])) + + if not all(shapes_compatible.Flatten()): + raise ValueError( + 'Shapes after transformation - {} are different from original ' + 'shapes - {}.'.format(shapes_transformed, shapes)) + + return shapes + + def TransformDTypes(self, dtypes): + transformed_dtypes = self.subprocessor.TransformDTypes(dtypes) + if transformed_dtypes != dtypes: + raise ValueError( + 'DTypes after transformation of preprocessor - {} should be ' + 'the same as {}, but get {}.'.format(self.params.subprocessor, dtypes, + transformed_dtypes)) + return dtypes + + class SparseSampler(Preprocessor): """Fused SparseCenterSelector and SparseCellGatherFeatures.
Fix initialization order in KafkaClient Fix initialization order in KafkaClient
@@ -201,10 +201,15 @@ class KafkaClient(object): if key in configs: self.config[key] = configs[key] + # these properties need to be set on top of the initialization pipeline + # because they are used when __del__ method is called + self._closed = False + self._wake_r, self._wake_w = socket.socketpair() + self._selector = self.config['selector']() + self.cluster = ClusterMetadata(**self.config) self._topics = set() # empty set will fetch all topic metadata self._metadata_refresh_in_progress = False - self._selector = self.config['selector']() self._conns = Dict() # object to support weakrefs self._api_versions = None self._connecting = set() @@ -212,7 +217,6 @@ class KafkaClient(object): self._refresh_on_disconnects = True self._last_bootstrap = 0 self._bootstrap_fails = 0 - self._wake_r, self._wake_w = socket.socketpair() self._wake_r.setblocking(False) self._wake_w.settimeout(self.config['wakeup_timeout_ms'] / 1000.0) self._wake_lock = threading.Lock() @@ -226,7 +230,6 @@ class KafkaClient(object): self._selector.register(self._wake_r, selectors.EVENT_READ) self._idle_expiry_manager = IdleConnectionManager(self.config['connections_max_idle_ms']) - self._closed = False self._sensors = None if self.config['metrics']: self._sensors = KafkaClientMetrics(self.config['metrics'],
paraminfo_tests.py: Add 'text/x-collabkit' as a new content format test_content_format should expect 'text/x-collabkit' as a new content format when it finds CollaborationKit extension on a wiki.
@@ -127,6 +127,9 @@ class MediaWikiKnownTypesTestCase(KnownTypesTestBase, if isinstance(self.site, DataSite): # It is not clear when this format has been added, see T129281. base.append('application/vnd.php.serialized') + extensions = set(e['name'] for e in self.site.siteinfo['extensions']) + if 'CollaborationKit' in extensions: + base.append('text/x-collabkit') self._check_param_values(self.site, 'edit', 'contentformat', base) self._check_param_values(self.site, 'parse', 'contentformat', base)
[cluster autoscaler] expand logic around scaling cancelled resources 2 new rules for scaling cancelled resources (asg/sfr): 1. If any resource in a pool needs to scale up, ignore cancelled resources 2. Otherwise, if any resource (in a pool) is cancelled, ignore running resources 3. Else work as normal
@@ -17,6 +17,7 @@ import logging import math import os import time +from collections import defaultdict from collections import namedtuple from datetime import datetime from datetime import timedelta @@ -937,12 +938,14 @@ def autoscale_local_cluster(config_folder, dry_run=False, log_level=None): autoscaling_resources = system_config.get_cluster_autoscaling_resources() autoscaling_draining_enabled = system_config.get_cluster_autoscaling_draining_enabled() all_pool_settings = system_config.get_resource_pool_settings() - autoscaling_scalers = [] + autoscaling_scalers = defaultdict(list) utilization_errors = get_all_utilization_errors(autoscaling_resources, all_pool_settings) + any_cancelled_pool = defaultdict(lambda: False) + any_scale_up_pool = defaultdict(lambda: False) for identifier, resource in autoscaling_resources.items(): pool_settings = all_pool_settings.get(resource['pool'], {}) try: - autoscaling_scalers.append(get_scaler(resource['type'])( + scaler = get_scaler(resource['type'])( resource=resource, pool_settings=pool_settings, config_folder=config_folder, @@ -950,13 +953,28 @@ def autoscale_local_cluster(config_folder, dry_run=False, log_level=None): log_level=log_level, utilization_error=utilization_errors[(resource['region'], resource['pool'])], draining_enabled=autoscaling_draining_enabled, - )) + ) + autoscaling_scalers[resource['pool']].append(scaler) + any_scale_up_pool[resource['pool']] |= utilization_errors[(resource['region'], resource['pool'])] > 0 + any_cancelled_pool[resource['pool']] |= scaler.is_resource_cancelled() except KeyError: log.warning("Couldn't find a metric provider for resource of type: {}".format(resource['type'])) continue log.debug("Sleep 3s to throttle AWS API calls") time.sleep(3) - sorted_autoscaling_scalers = sorted(autoscaling_scalers, key=lambda x: x.is_resource_cancelled(), reverse=True) + filtered_autoscaling_scalers = [] + for pool, scalers in autoscaling_scalers.items(): + # if any resource in a pool is scaling up, don't look at cancelled resources + if any_scale_up_pool[pool]: + filtered_autoscaling_scalers += [s for s in scalers if not s.is_resource_cancelled()] + # if any resource in a pool is cancelled (and none are scaling up), only look at cancelled ones + elif any_cancelled_pool[pool]: + filtered_autoscaling_scalers += [s for s in scalers if s.is_resource_cancelled()] + else: + filtered_autoscaling_scalers += scalers + sorted_autoscaling_scalers = sorted( + filtered_autoscaling_scalers, key=lambda x: x.is_resource_cancelled(), reverse=True, + ) event_loop = asyncio.get_event_loop() event_loop.run_until_complete(run_parallel_scalers(sorted_autoscaling_scalers)) event_loop.close()
settings: Use id of the container to find status element. This commit changes the code to use container id in the selector of the status element of presence_enabled setting such that the correct element is selected because we will add another element with same class in the realm-level presence_enabled setting.
@@ -694,7 +694,7 @@ export function set_up() { channel.patch, "/json/settings", data, - $(".privacy-setting-status").expectOne(), + $("#account-settings .privacy-setting-status").expectOne(), ); }); }
TST: added attributes to test class Added new and missing attributes to the attribute test list.
@@ -218,7 +218,8 @@ class TestConstellationFunc: self.ref_time = pysat.instruments.pysat_testing._test_dates[''][''] self.attrs = ["platforms", "names", "tags", "inst_ids", "instruments", "bounds", "empty", "empty_partial", "index_res", - "common_index"] + "common_index", "date", "yr", "doy", "yesterday", "today", + "tomorrow", "variables"] def teardown(self): """Clean up after each test
Remove newlines from added file lines New files created by a patch were being read in with newline characters causing any patches that then change the new file to fail validation.
@@ -525,7 +525,7 @@ def _apply_file_unidiff(patched_file, files_under_test): assert len(patched_file) == 1 # Should be only one hunk assert patched_file[0].removed == 0 assert patched_file[0].target_start == 1 - files_under_test[patched_file_path] = [x.value for x in patched_file[0]] + files_under_test[patched_file_path] = [x.value.rstrip('\n') for x in patched_file[0]] elif patched_file.is_removed_file: # Remove lines to see if file to be removed matches patch _modify_file_lines(patched_file, files_under_test[patched_file_path])
Making LFP file detection automatic Removes lfp flag from __init__ params, sets lfp flag based on file name, also checks that input file is a .bin
@@ -13,14 +13,19 @@ class SpikeGLXRecordingExtractor(RecordingExtractor): mode = 'file' installation_mesg = "" # error message when not installed - def __init__(self, file_path, lfp=False, x_pitch=21, y_pitch=20): + def __init__(self, file_path, x_pitch=21, y_pitch=20): RecordingExtractor.__init__(self) self._npxfile = Path(file_path) self._basepath = self._npxfile.parents[0] # Gets file type: 'imec0.ap', 'imec0.lf' or 'nidq' + assert 'bin' in self._npxfile.name, "The 'npx_file should be either the 'ap' or the 'lf' bin file." if 'ap' in str(self._npxfile): + lfp = False self.is_filtered = True + else: + assert 'lf' in self._npxfile.name, "The 'npx_file should be either the 'ap' or the 'lf' file." + lfp = True aux = self._npxfile.stem.split('.')[-1] if aux == 'nidq': self._ftype = aux
encryptor: remove duplicate len() statements For some reason pylint does not like these.
@@ -183,6 +183,7 @@ class DecryptorFile(FileWrap): # else temporarily but we keep _decrypt_offset intact until we actually do a # read in case the caller just called seek in order to then immediately seek back self._decrypt_offset = None + self.offset = None self._reset() def _reset(self): @@ -260,8 +261,9 @@ class DecryptorFile(FileWrap): data = self._boundary_block[self.offset % AES_BLOCK_SIZE:self.offset % AES_BLOCK_SIZE + size] if self.offset % AES_BLOCK_SIZE + size == len(self._boundary_block): self._boundary_block = None - self.offset += len(data) - self._decrypt_offset += len(data) + data_len = len(data) + self.offset += data_len + self._decrypt_offset += data_len return data # Only serve multiples of AES_BLOCK_SIZE whenever possible to keep things simpler @@ -284,8 +286,9 @@ class DecryptorFile(FileWrap): if size < AES_BLOCK_SIZE: self._boundary_block = decrypted return self._read_block(size) - self.offset += len(decrypted) - self._decrypt_offset += len(decrypted) + decrypted_len = len(decrypted) + self.offset += decrypted_len + self._decrypt_offset += decrypted_len return decrypted def close(self):
WL: Pick up changed app_id in _on_map Without this, some apps (e.g. Alacritty) will not have app_id detected.
@@ -76,6 +76,9 @@ class XdgWindow(Window[XdgSurface]): def _on_map(self, _listener: Listener, _data: Any) -> None: logger.debug("Signal: xdgwindow map") + if not self._wm_class == self.surface.toplevel.app_id: + self._wm_class = self.surface.toplevel.app_id + if self in self.core.pending_windows: self.core.pending_windows.remove(self) self._wid = self.core.new_wid()
[tests/module/disk] Adapt to new input parameters Open application now defaults to xdg-open, replace with nautilus manually.
@@ -20,6 +20,7 @@ class TestDiskModule(unittest.TestCase): self._os = mock.patch("bumblebee.modules.disk.os") self.os = self._os.start() self.config.set("disk.path", "somepath") + self.config.set("disk.open", "nautilus") def tearDown(self): self._os.stop()
Update receive.py +existing code, works fine in python 2.7 +in python 3 it breaks +In python 3 there is change in chardet module and hence the error +convert-string-to-bytes-in-python-3 +this is also fixed in v11-hotfix branch
@@ -481,7 +481,10 @@ class Email: """Detect chartset.""" charset = part.get_content_charset() if not charset: + if six.PY2: charset = chardet.detect(str(part))['encoding'] + else: + charset = chardet.detect(part.encode())['encoding'] return charset
Update link to latest docs The docs-badge was pointing to old documentation containing references to model hosting, etc.
@@ -6,7 +6,7 @@ Cognite Python SDK ========================== [![build](https://github.com/cognitedata/cognite-sdk-python/workflows/release/badge.svg)](https://github.com/cognitedata/cognite-sdk-python/actions?query=workflow:release) [![codecov](https://codecov.io/gh/cognitedata/cognite-sdk-python/branch/master/graph/badge.svg)](https://codecov.io/gh/cognitedata/cognite-sdk-python) -[![Documentation Status](https://readthedocs.com/projects/cognite-sdk-python/badge/?version=latest)](https://cognite-docs.readthedocs-hosted.com/en/latest/) +[![Documentation Status](https://readthedocs.com/projects/cognite-sdk-python/badge/?version=latest)](https://cognite-docs.readthedocs-hosted.com/projects/cognite-sdk-python/en/latest/) [![PyPI version](https://badge.fury.io/py/cognite-sdk.svg)](https://pypi.org/project/cognite-sdk/) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black)
DEV: updated pinned versions to the latest [CHANGED] requirements.txt used for github actions, make these the latest versions
chardet==4.0.0 -pillow>=8.3.2 -psutil==5.8.0 -scipy==1.6.1; python_version > '3.6' -tox==3.23.1 -tox-gh-actions==2.5.0 -plotly +pillow==9.0.0 +psutil==5.9.0 +scipy==1.7.3 +tox==3.24.5 +tox-gh-actions==2.8.1 +plotly==5.5.0 .[dev]
fix files filtering Filter both expected and published folders and skip temporary files. Temporary files might be pulled into expected folder when directly copying result of one test run to source zip file.
@@ -336,21 +336,31 @@ class PublishTest(ModuleUnitTest): glob.glob(expected_dir_base + "\\**", recursive=True) if f != expected_dir_base and os.path.exists(f)) - filtered_published = set() - for pub_path in published: - if skip_compare_folders: - if not any([re.search(val, pub_path) - for val in skip_compare_folders]): - filtered_published.add(pub_path) - else: - filtered_published.add(pub_path) + filtered_published = self._filter_files(published, + skip_compare_folders) + + # filter out temp files also in expected + # could be polluted by accident by copying 'output' to zip file + filtered_expected = self._filter_files(expected, skip_compare_folders) - not_matched = expected.symmetric_difference(filtered_published) - if not_matched: + not_mtched = filtered_expected.symmetric_difference(filtered_published) + if not_mtched: ModuleUnitTest.failed = True raise AssertionError("Missing {} files".format( - "\n".join(sorted(not_matched)))) + "\n".join(sorted(not_mtched)))) + + def _filter_files(self, source_files, skip_compare_folders): + """Filter list of files according to regex pattern.""" + filtered = set() + for file_path in source_files: + if skip_compare_folders: + if not any([re.search(val, file_path) + for val in skip_compare_folders]): + filtered.add(file_path) + else: + filtered.add(file_path) + return filtered class DeadlinePublishTest(PublishTest): @pytest.fixture(scope="module")
Update clang_format_ci.sh Summary: shellcheck led me astray! Pull Request resolved:
@@ -5,7 +5,7 @@ set -eux # Requires a single argument, which is the <commit> argument to git-clang-format # If you edit this whitelist, please edit the one in clang_format_all.py as well -find . -type f -print0 \ +find . -type f \ -path './torch/csrc/jit/*' -or \ -path './test/cpp/jit/*' -or \ -path './test/cpp/tensorexpr/*' \
Fix file extensions to remove leading '.' Fix incorrect variable reference.
@@ -141,7 +141,9 @@ def collect_local_artifacts(): def create_artifact_data(artifact_dir): for artifact in listdir(artifact_dir): filename, file_extension = os.path.splitext(artifact) - if file_extension in artifacts_dict: + # Remove leading '.' + file_extension = file_extension[1:] + if file_extension in file_manifest: data = {"name": artifact, "file_location": "%s/%s" % (artifact_dir, artifact)} data.update(file_manifest[file_extension])
Fix memory leak in Dense.copy() Also had knock-on effects in add_dense() due the call to Dense.copy(). Fix
@@ -120,6 +120,7 @@ cdef class Dense(base.Data): out.shape = self.shape out.data = ptr out.fortran = self.fortran + out._deallocate = True return out cdef void _fix_flags(self, object array, bint make_owner=False):
drafts: Rename two functions in puppeteer tests for better clarity. Soon there will be another way to restore a message draft, and this rename helps specify which kind of restoring is happening here.
@@ -110,7 +110,7 @@ async function test_previously_created_drafts_rendered(page: Page): Promise<void ); } -async function test_restore_message_draft(page: Page): Promise<void> { +async function test_restore_message_draft_via_draft_overlay(page: Page): Promise<void> { console.log("Restoring stream message draft"); await page.click("#drafts_table .message_row:not(.private-message) .restore-draft"); await wait_for_drafts_to_disappear(page); @@ -165,7 +165,7 @@ async function test_edited_draft_message(page: Page): Promise<void> { ); } -async function test_restore_private_message_draft(page: Page): Promise<void> { +async function test_restore_private_message_draft_via_draft_overlay(page: Page): Promise<void> { console.log("Restoring private message draft."); await page.click(".message_row.private-message .restore-draft"); await wait_for_drafts_to_disappear(page); @@ -248,11 +248,11 @@ async function drafts_test(page: Page): Promise<void> { await open_drafts_after_markdown_preview(page); await test_previously_created_drafts_rendered(page); - await test_restore_message_draft(page); + await test_restore_message_draft_via_draft_overlay(page); await edit_stream_message_draft(page); await test_edited_draft_message(page); - await test_restore_private_message_draft(page); + await test_restore_private_message_draft_via_draft_overlay(page); await test_delete_draft(page); await test_save_draft_by_reloading(page); }
Update quick_entry.js Add missing init_callback in constructor
@@ -13,7 +13,7 @@ frappe.ui.form.make_quick_entry = (doctype, after_insert, init_callback) => { }; frappe.ui.form.QuickEntryForm = Class.extend({ - init: function(doctype, after_insert){ + init: function(doctype, after_insert, init_callback){ this.doctype = doctype; this.after_insert = after_insert; this.init_callback = init_callback;
[output] Add support for pango markup Add a new parameter "output.markup" that allows a user to pass in a custom markup string (e.g. "pango"). Note: To make use of this, the user still has to use a Pango font, as well as use a bumblebee-status module that supports Pango output. fixes
@@ -164,6 +164,7 @@ class I3BarOutput(object): "align": self._theme.align(widget), "instance": widget.id, "name": module.id, + "markup": self._config.get("output.markup", "none"), }) def begin(self):
Sets autosize=False for reports. Having reports resize all their plots whenever the window changes size is too expensive in most cases.
@@ -233,9 +233,9 @@ def _merge_template(qtys, templateFilename, outputFilename, auto_open, precision #print("DB: rendering ",key) if isinstance(val,_ws.WorkspaceTable): #supply precision argument - out = val.render("html", precision=precision, resizable=True, autosize=True) + out = val.render("html", precision=precision, resizable=True, autosize=False) elif isinstance(val,_ws.WorkspacePlot): - out = val.render("html", resizable=True, autosize=True) + out = val.render("html", resizable=True, autosize=False) else: #switchboards usually out = val.render("html")
WL: correctly check failed xwayland startup If XWayland can't be started up it raises a RuntimeError, it doesn't return None, so this needs to be handled with a catch.
@@ -241,14 +241,16 @@ class Core(base.Core, wlrq.HasListeners): self.foreign_toplevel_manager_v1 = ForeignToplevelManagerV1.create(self.display) # Set up XWayland + self._xwayland: xwayland.XWayland | None = None + try: self._xwayland = xwayland.XWayland(self.display, self.compositor, True) - if self._xwayland: + except RuntimeError: + logger.info("Failed to set up XWayland. Continuing without.") + else: os.environ["DISPLAY"] = self._xwayland.display_name or "" logger.info("Set up XWayland with DISPLAY=%s", os.environ["DISPLAY"]) self.add_listener(self._xwayland.ready_event, self._on_xwayland_ready) self.add_listener(self._xwayland.new_surface_event, self._on_xwayland_new_surface) - else: - logger.info("Failed to set up XWayland. Continuing without.") # Start self.backend.start()
Minor comment edits Minor comment edts to make clear what the default values are in the doc string.
@@ -183,8 +183,11 @@ def load_graphml(filepath, node_type=int, node_dtypes=None, edge_dtypes=None): convert node ids to this data type node_dtypes : dict of attribute name -> data type identifies additional is a numpy.dtype or Python type to cast one or more additional node attributes + defaults to {"elevation":float, "elevation_res":float, "lat":float, "lon":float, "x":float, "y":float} if None edge_dtypes : dict of attribute name -> data type - identifies additional is a numpy.dtype or Python type to cast one or more additional edge attributes + identifies additional is a numpy.dtype or Python type to cast one or more additional edge attributes. Defaults + to {"length": float, "grade": float, "grade_abs": float, "bearing": float, "speed_kph": float, + "travel_time": float} if None Returns ------- G : networkx.MultiDiGraph @@ -227,7 +230,7 @@ def _convert_node_attr_types(G, node_type, node_dtypes=None): convert node ID (osmid) to this type node_dtypes : dict of attribute name -> data type identifies additional is a numpy.dtype or Python type to cast one or more additional node attributes - defaults to {"elevation":float, "elevation_res":float, "lat":float, "lon":float, "x":float, "y":float} + defaults to {"elevation":float, "elevation_res":float, "lat":float, "lon":float, "x":float, "y":float} if None Returns ------- G : networkx.MultiDiGraph @@ -259,7 +262,7 @@ def _convert_edge_attr_types(G, node_type, edge_dtypes=None): edge_dtypes : dict of attribute name -> data type identifies additional is a numpy.dtype or Python type to cast one or more additional edge attributes. Defaults to {"length": float, "grade": float, "grade_abs": float, "bearing": float, "speed_kph": float, - "travel_time": float} + "travel_time": float} if None Returns ------- G : networkx.MultiDiGraph
Change cryptdev.mapping `immediate` map option Change `delay_mapping` with default `True` to `immediate` with default `False`. This is consistent with `unmapping`. See
@@ -44,7 +44,7 @@ def mapped(name, opts=None, config='/etc/crypttab', persist=True, - delay_mapping=True, + immediate=False, match_on='name'): ''' Verify that a device is mapped @@ -71,9 +71,10 @@ def mapped(name, persist Set if the map should be saved in the crypttab, Default is ``True`` - delay_mapping - Set if the device mapping should not be executed until the next boot. - Default is ``True``. + immediate + Set if the device mapping should be executed immediately. Note that + options cannot be passed through on the initial mapping. + Default is ``False``. match_on A name or list of crypttab properties on which this state should be applied. @@ -86,7 +87,7 @@ def mapped(name, 'result': True, 'comment': ''} - if not delay_mapping: + if immediate: # Get the active crypt mounts. If ours is listed already, no action is necessary. active = __salt__['cryptdev.active']() if name not in active.keys():
Problem: py-abci not upgraded Solution: Upgrade py-abci to the latest fix
@@ -84,7 +84,7 @@ install_requires = [ 'pyyaml~=3.12', 'aiohttp~=2.3', 'python-rapidjson-schema==0.1.1', - 'abci==0.4.3', + 'abci==0.4.4', 'setproctitle~=1.1.0', ] @@ -131,7 +131,7 @@ setup( ], }, install_requires=install_requires, - dependency_links=['git+https://github.com/kansi/py-abci.git@master#egg=abci-0.4.3'], + dependency_links=['git+https://github.com/kansi/py-abci.git@master#egg=abci-0.4.4'], setup_requires=['pytest-runner'], tests_require=tests_require, extras_require={
Update TODO with description of [skip ci]
todo ==== -* move Model classmethods (select/insert/update/delete) to meta-class? -* better schema-manager support for sequences (and views?) -* additional examples in example dir +* GitHub #1991 - two left-outer joins, the intervening model should probably + be set to NULL rather than an empty model, when there is no data present.
MAINT: fixed wording Fixed spelling and wording of docstrings.
@@ -519,7 +519,7 @@ class TestNetCDF4Integration(object): # Test the filtered output for mkey in mdict.keys(): if mkey not in fdict.keys(): - # Determine of the data is NaN + # Determine if the data is NaN try: is_nan = np.isnan(mdict[mkey]) except TypeError: @@ -552,7 +552,7 @@ class TestNetCDF4Integration(object): @pytest.mark.parametrize('missing', [True, False]) def test_add_netcdf4_standards_to_meta(self, caplog, missing): - """Test for SPDF ISTP/IACG NetCDF standards after update. + """Test for simplified SPDF ISTP/IACG NetCDF standards after update. Parameters ----------
Fix ModelFeatureConfig bug Summary: Didn't import all the subclasses prior to filling union
@@ -24,6 +24,14 @@ from reagent.workflow.result_registries import ( from reagent.workflow.tagged_union import TaggedUnion # noqa F401 +try: + from reagent.fb.models.model_feature_config_builder import ( # noqa + ConfigeratorModelFeatureConfigProvider, + ) +except ImportError: + pass + + @dataclass class Dataset: parquet_url: str
fix: Deference symbolic link when create tar file Dereference all symbolic link when packing source code and dependencies. Symbolic link will unlikely have any meaning once deployed in the Docker instance.
@@ -331,7 +331,7 @@ def create_tar_file(source_files, target=None): else: _, filename = tempfile.mkstemp() - with tarfile.open(filename, mode="w:gz") as t: + with tarfile.open(filename, mode="w:gz", dereference=True) as t: for sf in source_files: # Add all files from the directory into the root of the directory structure of the tar t.add(sf, arcname=os.path.basename(sf))
raise execption when trying to create CompoundType with nested structured data type arrays
@@ -4795,11 +4795,12 @@ def _set_alignment(dt): for name in names: fmt = dt.fields[name][0] if fmt.kind == 'V': - if fmt.shape == () or fmt.subdtype[0].str[1] == 'V': - # nested scalar or array structured type + if fmt.shape == (): dtx = _set_alignment(dt.fields[name][0]) else: - # primitive data type + if fmt.subdtype[0].kind == 'V': # structured dtype + raise TypeError('nested structured dtype arrays not supported') + else: dtx = dt.fields[name][0] else: # primitive data type @@ -4851,12 +4852,13 @@ cdef _def_compound(grp, object dt, object dtype_name): nested_namstring,\ offset, xtype_tmp) _ensure_nc_success(ierr) - else: # array compound element + else: # nested array compound element + # the following does not work, disable for now... ndims = len(format.shape) dim_sizes = <int *>malloc(sizeof(int) * ndims) for n from 0 <= n < ndims: dim_sizes[n] = format.shape[n] - if format.subdtype[0].str[1] != 'V': # primitive type. + if format.subdtype[0].kind != 'V': # primitive type. try: xtype_tmp = _nptonctype[format.subdtype[0].str[1:]] except KeyError: @@ -4865,15 +4867,17 @@ cdef _def_compound(grp, object dt, object dtype_name): offset,xtype_tmp,ndims,dim_sizes) _ensure_nc_success(ierr) else: # nested array compound type. - # find this compound type in this group or it's parents. - xtype_tmp = _find_cmptype(grp, format.subdtype[0]) - bytestr = _strencode(name) - nested_namstring = bytestr - ierr = nc_insert_array_compound(grp._grpid,xtype,\ - nested_namstring,\ - offset,xtype_tmp,\ - ndims,dim_sizes) - _ensure_nc_success(ierr) + raise TypeError('nested structured dtype arrays not supported') + # this code is untested and probably does not work + # # find this compound type in this group or it's parents. + # xtype_tmp = _find_cmptype(grp, format.subdtype[0]) + # bytestr = _strencode(name) + # nested_namstring = bytestr + # ierr = nc_insert_array_compound(grp._grpid,xtype,\ + # nested_namstring,\ + # offset,xtype_tmp,\ + # ndims,dim_sizes) + # _ensure_nc_success(ierr) free(dim_sizes) return xtype
ebuild.domain: freeze jitted forced_use and stable_forced_use attrs To make sure they're immutable.
@@ -303,17 +303,19 @@ class domain(config_domain): @klass.jit_attr_none def forced_use(self): - c = ChunkedDataDict() - c.merge(getattr(self.profile, 'forced_use')) - c.add_bare_global((), (self.arch,)) - return c + use = ChunkedDataDict() + use.merge(getattr(self.profile, 'forced_use')) + use.add_bare_global((), (self.arch,)) + use.freeze() + return use @klass.jit_attr_none def stable_forced_use(self): - c = ChunkedDataDict() - c.merge(getattr(self.profile, 'stable_forced_use')) - c.add_bare_global((), (self.arch,)) - return c + use = ChunkedDataDict() + use.merge(getattr(self.profile, 'stable_forced_use')) + use.add_bare_global((), (self.arch,)) + use.freeze() + return use @load_property("package.mask", package_masks) def pkg_masks(self, data):
[mysql] fix version comparison operator Compare tuple against tuple, not string.
@@ -504,7 +504,7 @@ class MySql(AgentCheck): self.log.debug("Collecting Extra Status Metrics") metrics.update(OPTIONAL_STATUS_VARS) - if self._version_compatible(db, host, "5.6.6"): + if self._version_compatible(db, host, (5, 6, 6)): metrics.update(OPTIONAL_STATUS_VARS_5_6_6) if _is_affirmative(options.get('galera_cluster', False)): @@ -513,7 +513,7 @@ class MySql(AgentCheck): metrics.update(GALERA_VARS) performance_schema_enabled = self._get_variable_enabled(results, 'performance_schema') - above_560 = self._version_compatible(db, host, "5.6.0") + above_560 = self._version_compatible(db, host, (5, 6, 0)) if _is_affirmative(options.get('extra_performance_metrics', False)) and above_560 and \ performance_schema_enabled: # report avg query response time per schema to Datadog @@ -541,7 +541,7 @@ class MySql(AgentCheck): # MySQL 5.7.x might not have 'Slave_running'. See: https://bugs.mysql.com/bug.php?id=78544 # look at replica vars collected at the top of if-block - if self._version_compatible(db, host, "5.7.0"): + if self._version_compatible(db, host, (5, 7, 0)): slave_io_running = self._collect_string('Slave_IO_Running', results) slave_sql_running = self._collect_string('Slave_SQL_Running', results) if slave_io_running:
README: Add Pyperclip dependency info in the "Configuration" section. This commit adds a "Copy to clipboard" sub-section in the Configuration section of the README. It explains the various utility packages required for copy/pasting operations via pyperclip on different OSes.
@@ -154,6 +154,26 @@ echo 'export ZT_NOTIFICATION_SOUND=Ping' >> ~/.zshenv source ~/.zshenv ``` +### Copy to clipboard + +Zulip Terminal allows users to copy certain texts to the clipboard via a Python module, [`Pyperclip`](https://pypi.org/project/pyperclip/). This module makes use of various system packages which may or may not come with the OS. +The "Copy to clipboard" feature is currently only available for copying Stream email, from the [Stream information popup](docs/hotkeys.md#stream-list-actions). + +#### Linux + +On Linux, this module makes use of `xclip` or `xsel` commands, which should already come with the OS. If none of these commands are installed on your system, then install any ONE using: +``` +sudo apt-get install xclip [Recommended] +``` +OR +``` +sudo apt-get install xsel +``` + +#### OSX and WSL + +No additional package is required to enable copying to clipboard. + ## Contributor Guidelines Zulip Terminal is being built by the awesome [Zulip](https://zulip.com/team) community.
Clarify cryptdev.rm_crypttab documentation See
@@ -136,7 +136,8 @@ def crypttab(config='/etc/crypttab'): def rm_crypttab(name, device, config='/etc/crypttab'): ''' - Remove the device point from the crypttab + Remove the device point from the crypttab. If the described entry does not + exist, nothing is changed, but the command succeeds. CLI Example:
Cleanup test_blueprint.py to use test fixtures Modify several tests to use the app and client test fixtures.
@@ -202,9 +202,7 @@ def test_templates_and_static(test_apps): assert flask.render_template('nested/nested.txt') == 'I\'m nested' -def test_default_static_cache_timeout(): - app = flask.Flask(__name__) - +def test_default_static_cache_timeout(app): class MyBlueprint(flask.Blueprint): def get_send_file_max_age(self, filename): return 100 @@ -660,8 +658,7 @@ def test_add_template_test_with_name_and_template(app, client): assert b'Success!' in rv.data -def test_context_processing(): - app = flask.Flask(__name__) +def test_context_processing(app, client): answer_bp = flask.Blueprint('answer_bp', __name__) template_string = lambda: flask.render_template_string( @@ -691,10 +688,8 @@ def test_context_processing(): # Register the blueprint app.register_blueprint(answer_bp) - c = app.test_client() - - app_page_bytes = c.get('/').data - answer_page_bytes = c.get('/bp').data + app_page_bytes = client.get('/').data + answer_page_bytes = client.get('/bp').data assert b'43' in app_page_bytes assert b'42' not in app_page_bytes @@ -703,8 +698,7 @@ def test_context_processing(): assert b'43' in answer_page_bytes -def test_template_global(): - app = flask.Flask(__name__) +def test_template_global(app): bp = flask.Blueprint('bp', __name__) @bp.app_template_global() @@ -724,8 +718,7 @@ def test_template_global(): rv = flask.render_template_string('{{ get_answer() }}') assert rv == '42' -def test_request_processing(): - app = flask.Flask(__name__) +def test_request_processing(app, client): bp = flask.Blueprint('bp', __name__) evts = [] @bp.before_request @@ -748,12 +741,11 @@ def test_request_processing(): app.register_blueprint(bp) assert evts == [] - rv = app.test_client().get('/bp') + rv = client.get('/bp') assert rv.data == b'request|after' assert evts == ['before', 'after', 'teardown'] -def test_app_request_processing(): - app = flask.Flask(__name__) +def test_app_request_processing(app, client): bp = flask.Blueprint('bp', __name__) evts = [] @@ -783,11 +775,11 @@ def test_app_request_processing(): assert evts == [] # first request - resp = app.test_client().get('/').data + resp = client.get('/').data assert resp == b'request|after' assert evts == ['first', 'before', 'after', 'teardown'] # second request - resp = app.test_client().get('/').data + resp = client.get('/').data assert resp == b'request|after' assert evts == ['first'] + ['before', 'after', 'teardown'] * 2
Corrected typo in rules.rst documentation Corrected typo from 'safed' to 'saved' in section *Defining groups for execution* in the rules documentation.
@@ -985,7 +985,7 @@ Defining groups for execution From Snakemake 5.0 on, it is possible to assign rules to groups. Such groups will be executed together in **cluster** or **cloud mode**, as a so-called **group job**, i.e., all jobs of a particular group will be submitted at once, to the same computing node. -By this, queueing and execution time can be safed, in particular if one or several short-running rules are involved. +By this, queueing and execution time can be saved, in particular if one or several short-running rules are involved. When executing locally, group definitions are ignored. Groups can be defined via the ``group`` keyword, e.g.,
get proxies: logging and performance improvement Improve logging and performace of get_cluster_proxies function: change log level from info to debug save loaded proxy configuration to config object related to
@@ -33,9 +33,12 @@ def get_cluster_proxies(): http_proxy = proxy_obj.get("spec", {}).get("httpProxy", "") https_proxy = proxy_obj.get("spec", {}).get("httpsProxy", "") no_proxy = proxy_obj.get("status", {}).get("noProxy", "") - logger.info("Using http_proxy: '%s'", http_proxy) - logger.info("Using https_proxy: '%s'", https_proxy) - logger.info("Using no_proxy: '%s'", no_proxy) + config.ENV_DATA["http_proxy"] = http_proxy + config.ENV_DATA["https_proxy"] = https_proxy + config.ENV_DATA["no_proxy"] = no_proxy + logger.debug("Using http_proxy: '%s'", http_proxy) + logger.debug("Using https_proxy: '%s'", https_proxy) + logger.debug("Using no_proxy: '%s'", no_proxy) return http_proxy, https_proxy, no_proxy
op-guide: update jmespath Via:
@@ -701,3 +701,11 @@ Python 2.7.5 (default, Nov 6 2016, 00:28:07) Type "help", "copyright", "credits" or "license" for more information. >>> import jmespath ``` + +If `import jmespath` still reports an error after the `python2-jmespath` package is installed, install the Python `jmespath` module using pip: + +``` +$ sudo yum -y install epel-release +$ sudo yum -y install python-pip +$ sudo pip install jmespath +``` \ No newline at end of file
Fixed moving item between mailboxes using impersonation Will now use delegation in this specific case.
@@ -96,7 +96,7 @@ script: logging.basicConfig(stream=log_stream, level=logging.DEBUG) from exchangelib.errors import ErrorItemNotFound, ResponseMessageError, TransportError, \ - ErrorFolderNotFound + ErrorFolderNotFound, ErrorToFolderNotFound from exchangelib.items import Item, Message from exchangelib.services import EWSService, EWSAccountService from exchangelib.util import create_element, add_xml_child @@ -844,7 +844,12 @@ script: destination_account = get_account(destination_mailbox or ACCOUNT_EMAIL) destination_folder = get_folder_by_path(destination_account, destination_folder_path) item = get_item_from_mailbox(source_account, item_id) + try: + source_account.bulk_move(ids=[item], to_folder=destination_folder) + except ErrorToFolderNotFound: + source_account = get_account(source_mailbox or ACCOUNT_EMAIL, access_type=DELEGATE) source_account.bulk_move(ids=[item], to_folder=destination_folder) + move_result = { MOVED_TO_MAILBOX: destination_mailbox, MOVED_TO_FOLDER: destination_folder_path, @@ -1753,3 +1758,4 @@ script: description: Move an item from one mailbox to another. dockerimage: demisto/py-ews:2.0 isfetch: true +releaseNotes: "Fixed error when moving and item between mailboxes using impersonation." \ No newline at end of file
move mixer offset correction setting to ro_lutman half of the mixer corrections are already known by the lutman, it's just natural that the lutman controls all of it. should also move the calibration routines here.
@@ -26,8 +26,16 @@ class Base_RO_LutMan(Base_LutMan): self.add_parameter('mixer_phi', vals=vals.Numbers(), unit='deg', parameter_class=ManualParameter, initial_value=0.0) + self.add_parameter('mixer_offs_I', unit='V', + parameter_class=ManualParameter, initial_value=0) + self.add_parameter('mixer_offs_Q', unit='V', + parameter_class=ManualParameter, initial_value=0) comb_msg = ( - 'Resonator combinations specifies blablab needs to be format like bla example ablabj ') + 'Resonator combinations specifies which pulses are uploaded to' + 'the device. Given as a list of lists:' + 'e.g. [[0], [1], [0, 1]] specifies that pulses for readout' + 'of resonator 0, 1, and a pulse for mux readout on both should be' + 'uploaded.') self.add_parameter('resonator_combinations', vals=vals.Lists(), parameter_class=ManualParameter, docstring=comb_msg, @@ -237,6 +245,11 @@ class UHFQC_RO_LutMan(Base_RO_LutMan): self.AWG.get_instr().awg_sequence_acquisition_and_DIO_triggered_pulse( I_waves, Q_waves, cases, self.acquisition_delay(), timeout=timeout) + def set_mixer_offsets(self): + UHFQC = self.AWG.get_instr() + UHFQC.sigouts_0_offset(self.mixer_offs_I()) + UHFQC.sigouts_1_offset(self.mixer_offs_Q()) + def load_waveforms_onto_AWG_lookuptable( self, regenerate_waveforms: bool=True, stop_start: bool = True):
Fix ndb test to use ipv6 localhost instead of ipv4. ipv6 setup stolen from https://groups.google.com/a/google.com/forum/#!msg/python-users/xwO_0_LK_oM/mnBEVBbfCgAJ All ndb tests pass with and without the --run_under=//tools/test:forge_ipv6_only
@@ -573,8 +573,8 @@ class ContextTestMixin(object): self.assertEqual(bar.name, 'updated-bar') def start_test_server(self): - host = '127.0.0.1' - s = socket.socket() + host = 'localhost' + s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) for i in range(10): port = random.randrange(32768, 60000)
Add copy logic for LibTorch to avoid issues on Windows Summary: This should work both on VS and Ninja. Pull Request resolved:
@@ -39,6 +39,18 @@ this: target_link_libraries(example-app "${TORCH_LIBRARIES}") set_property(TARGET example-app PROPERTY CXX_STANDARD 11) + # The following code block is suggested to be used on Windows. + # According to https://github.com/pytorch/pytorch/issues/25457, + # the DLLs need to be copied to avoid memory errors. + if (MSVC) + file(GLOB TORCH_DLLS "${TORCH_INSTALL_PREFIX}/lib/*.dll") + add_custom_command(TARGET example-app + POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different + ${TORCH_DLLS} + $<TARGET_FILE_DIR:example-app>) + endif (MSVC) + The implementation of our example will simply create a new `torch::Tensor` and print it:
Support newer Python graphviz module Python graphviz 0.19 or later has _repr_image_svg_xml() instead of _repr_svg_().
@@ -453,8 +453,16 @@ def block_to_svg(block=None, split_state=True, maintain_arg_order=False): """ try: from graphviz import Source - return Source(block_to_graphviz_string(block, split_state=split_state, - maintain_arg_order=maintain_arg_order))._repr_svg_() + src = Source(block_to_graphviz_string(block, split_state=split_state, + maintain_arg_order=maintain_arg_order)) + try: + svg = src._repr_image_svg_xml() + except AttributeError: + # py-graphviz 0.18.3 or earlier + return src._repr_svg_() + else: + # py-graphviz 0.19 or later + return svg except ImportError: raise PyrtlError('need graphviz installed (try "pip install graphviz")')
fix dtype of SwapInflateTake with empty results The `evaluable.SwapInflateTake` evaluable generates two `list` objects with indices, `newtake` and `newinflate`, and returns them as `numpy.array(newtake)` and `numpy.array(newinflate)`. If the lists are empty, numpy assumes the dtype is `float`, which is wrong in this case. This patch forces the dtype to `int`.
@@ -2644,7 +2644,7 @@ class SwapInflateTake(Evaluable): for j in [subinflate[k]] if uniqueinflate else numpy.equal(inflateidx.ravel(), n).nonzero()[0]: newinflate.append(i) newtake.append(j) - return numpy.array(newtake), numpy.array(newinflate), numpy.array(len(newtake)) + return numpy.array(newtake, dtype=int), numpy.array(newinflate, dtype=int), numpy.array(len(newtake)) class Diagonalize(Array):
Fix x11 set wallpaper bug The `set_wallpaper` function can result in an unhandled `ConnectionException` when retrieving the root pixmap. This PR catches that exception and ensures a new pixmap is generated. Fixes
@@ -698,9 +698,13 @@ class Painter: width = max((win.x + win.width for win in root_windows)) height = max((win.y + win.height for win in root_windows)) + try: root_pixmap = self.default_screen.root.get_property( "_XROOTPMAP_ID", xcffib.xproto.Atom.PIXMAP, int ) + except xcffib.ConnectionException: + root_pixmap = None + if not root_pixmap: root_pixmap = self.default_screen.root.get_property( "ESETROOT_PMAP_ID", xcffib.xproto.Atom.PIXMAP, int
Add Google PAIR-code Facets [Google PAIR-code Facets](https://github.com/pair-code/facets)
@@ -331,6 +331,9 @@ RUN pip install --upgrade mpld3 && \ pip install git+https://github.com/dvaida/hallucinate.git && \ pip install scikit-surprise && \ pip install pymongo && \ + # Add google PAIR-code Facets + cd /opt/ && git clone https://github.com/PAIR-code/facets && cd facets/ && jupyter nbextension install facets-dist/ --user && \ + export PYTHONPATH=$PYTHONPATH:/opt/facets/facets_overview/python/ && \ ##### ^^^^ Add new contributions above here # clean up pip cache rm -rf /root/.cache/pip/* && \
Remove wrong commit "Fix course scraping issue" This reverts commit
@@ -8,7 +8,6 @@ from functools import partial BASE_COURSE_URL = 'https://my.uq.edu.au/programs-courses/course.html?course_code=' BASE_ASSESSMENT_URL = 'https://www.courses.uq.edu.au/student_section_report.php?report=assessment&profileIds=' # noqa BASE_CALENDAR_URL = 'http://www.uq.edu.au/events/calendar_view.php?category_id=16&year=' -SEMESTER_CODE = '&offer=53544c554332494e' # appended to BASE_COURSE_URL to highlight the current semester class DateSyntaxException(Exception): @@ -58,18 +57,17 @@ def get_course_profile_url(course_name): ''' Returns the URL to the latest course profile for the given course. ''' - course_url = BASE_COURSE_URL + course_name + SEMESTER_CODE + course_url = BASE_COURSE_URL + course_name http_response = requests.get(course_url) if http_response.status_code != requests.codes.ok: raise HttpException(course_url, http_response.status_code) html = BeautifulSoup(http_response.content, 'html.parser') if html.find(id='course-notfound'): raise CourseNotFoundException(course_name) - profile = html.find('tr', class_='current') - url = profile.find('a', class_='profile-available') - if url is None: + profile = html.find('a', class_='profile-available') + if profile is None: raise ProfileNotFoundException(course_name) - return url.get('href') + return profile.get('href') def get_course_profile_id(course_name):
don't anti-alias box-type graph widgets The "box" graph type is all square shapes, like a bar-chart, with no curves, and so the anti-aliasing makes it look worse rather than being sharply defined edges that align with the pixels anyway.
@@ -75,6 +75,10 @@ class _Graph(base._Widget): self.oldtime = time.time() self.lag_cycles = 0 + def _configure(self, qtile, bar): + super()._configure(qtile, bar) + self.drawer.ctx.set_antialias(cairocffi.ANTIALIAS_NONE) + def timer_setup(self): self.timeout_add(self.frequency, self.update)