message
stringlengths
13
484
diff
stringlengths
38
4.63k
Add evm tx to tx_perf On my macbook pro 2.9 GHz Intel Core i7 (venv) zgw@mac:~/projects/pyquarkchain/quarkchain/experimental$ python tx_perf.py --evm=true Creating 10 identities Creating 5000 transactions... Creations PS: 4087.09 Verifying transactions Verifications PS: 4222.17 (venv) zgw@mac:~/projects/pyquarkchain/quarkchain/experimental$ python tx_perf.py Creating 10 identities Creating 5000 transactions... Creations PS: 5875.79 Verifying transactions Verifications PS: 9028.78
from quarkchain.tests.test_utils import create_random_test_transaction from quarkchain.core import Identity, Address +from quarkchain.evm.transactions import Transaction as EvmTransaction import argparse import random import time @@ -48,13 +49,64 @@ def test_perf(): print("Verifications PS: %.2f" % (N / duration)) +def test_perf_evm(): + N = 5000 + IDN = 10 + print("Creating %d identities" % IDN) + idList = [] + for i in range(IDN): + idList.append(Identity.createRandomIdentity()) + + accList = [] + for i in range(IDN): + accList.append(Address.createFromIdentity(idList[i])) + + print("Creating %d transactions..." % N) + startTime = time.time() + txList = [] + fromList = [] + for i in range(N): + fromId = idList[random.randint(0, IDN - 1)] + toAddr = accList[random.randint(0, IDN - 1)] + evmTx = EvmTransaction( + branchValue=1, + nonce=0, + gasprice=1, + startgas=2, + to=toAddr.recipient, + value=3, + data=b'', + withdrawSign=1, + withdraw=0, + withdrawTo=b'') + evmTx.sign( + key=fromId.getKey(), + network_id=1) + txList.append(evmTx) + fromList.append(fromId.getRecipient()) + duration = time.time() - startTime + print("Creations PS: %.2f" % (N / duration)) + + print("Verifying transactions") + startTime = time.time() + for i in range(N): + txList[i]._sender = None + assert(txList[i].sender == fromList[i]) + duration = time.time() - startTime + print("Verifications PS: %.2f" % (N / duration)) + + def main(): parser = argparse.ArgumentParser() parser.add_argument("--profile", default=False) + parser.add_argument("--evm", default=False) args = parser.parse_args() if args.profile: profile.run('test_perf()') + else: + if args.evm: + test_perf_evm() else: test_perf()
switch-to-containers: do not fail when stopping the ceph-mgr daemon If we are working with a jewel cluster ceph mgr does not exist and this makes the playbook fail.
become: true pre_tasks: + # failed_when: false is here because if we're + # working with a jewel cluster then ceph mgr + # will not exist - name: stop non-containerized ceph mgr(s) service: name: "ceph-mgr@{{ ansible_hostname }}" state: stopped enabled: no + failed_when: false - set_fact: ceph_uid: 64045
Make URLs test insensitive to parameter order The order is generally based on the order in which they come out of a dictionary which varies between Python versions and makes our tests brittle.
# edited by hand! from collections import defaultdict +from urllib import parse from django.contrib.humanize.templatetags.humanize import intcomma from django.core.management import call_command @@ -62,9 +63,9 @@ class MeasuresTests(SeleniumTestCase): element = base_element.find_element_by_css_selector(css_selector) a_element = element.find_element_by_tag_name("a") self.assertEqual(a_element.text, exp_text) - self.assertEqual( - a_element.get_attribute("href"), self.live_server_url + exp_path - ) + href = _normalize_url(a_element.get_attribute("href")) + expected_href = _normalize_url(self.live_server_url + exp_path) + self.assertEqual(href, expected_href) def _verify_num_elements(self, base_element, css_selector, exp_num): self.assertEqual( @@ -2009,3 +2010,20 @@ def _get_cost_savings(measure_values, rollup_by=None, target_percentile=50): if mean_percentile > target_percentile: total_savings += sum(all_savings[rollup_id]) return total_savings + + +def _normalize_url(url): + """ + Return the URL with query parameters (including query parameters set in the + fragment) in lexical order by key + """ + parsed = parse.urlparse(url) + if parsed.query: + parsed_query = parse.parse_qsl(parsed.query) + query = parse.urlencode(sorted(parsed_query)) + parsed = parsed._replace(query=query) + if "=" in parsed.fragment and "&" in parsed.fragment: + parsed_fragment = parse.parse_qsl(parsed.fragment) + fragment = parse.urlencode(sorted(parsed_fragment)) + parsed = parsed._replace(fragment=fragment) + return parse.urlunparse(parsed)
Diagram tools should also work for LinePresentation Like they do for DiagramLine.
@@ -23,6 +23,7 @@ from gaphas.tool import ( from gi.repository import Gdk from gi.repository import Gtk +from gaphor.UML.presentation import LinePresentation from gaphor.core import Transaction, transactional from gaphor.diagram.diagramline import DiagramLine from gaphor.diagram.elementitem import ElementItem @@ -39,7 +40,7 @@ OUT_CURSOR_TYPE = Gdk.CursorType.CROSSHAIR log = logging.getLogger(__name__) [email protected]_type(DiagramLine) [email protected]_type(DiagramLine, LinePresentation) class DiagramItemConnector(ItemConnector): """ Handle Tool (acts on item handles) that uses the IConnect protocol @@ -96,7 +97,7 @@ class DiagramItemConnector(ItemConnector): @transactional def disconnect(self): - super(DiagramItemConnector, self).disconnect() + super().disconnect() class DisconnectHandle:
Move Git SHA defining at end of Dockerfile to re-enable caching Defining SHA at the beginning of build breaks caching, so this should be avoided.
FROM python:3.8-slim -# Define Git SHA build argument -ARG git_sha="development" - # Set pip to have cleaner logs and no saved cache ENV PIP_NO_CACHE_DIR=false \ PIPENV_HIDE_EMOJIS=1 \ PIPENV_IGNORE_VIRTUALENVS=1 \ - PIPENV_NOSPIN=1 \ - GIT_SHA=$git_sha + PIPENV_NOSPIN=1 RUN apt-get -y update \ && apt-get install -y \ @@ -25,6 +21,12 @@ WORKDIR /bot COPY Pipfile* ./ RUN pipenv install --system --deploy +# Define Git SHA build argument +ARG git_sha="development" + +# Set Git SHA environment variable here to enable caching +ENV GIT_SHA=$git_sha + # Copy the source code in last to optimize rebuilding the image COPY . .
Add logic to process the case that fails the consensus of a leader complaint, even though it gets all votes of reps. Increase a round if the consensus of a leader complaint fails, even though it gets all votes of reps.
@@ -727,6 +727,11 @@ class BlockManager: if elected_leader: self.__channel_service.reset_leader(elected_leader, complained=True) self.__channel_service.reset_leader_complain_timer() + elif elected_leader is False: + util.logger.warning(f"Fail to elect the next leader on {self.epoch.round} round.") + # In this case, a new leader can't be elected by the consensus of leader complaint. + # That's why the leader of current `round` is set to the next `round` again. + self.epoch.new_round(self.epoch.leader_id) elif self.epoch.height < block_height: self.__channel_service.state_machine.block_sync()
Initialize telegram variable Prevent errors when telegram is not set.
@@ -296,12 +296,14 @@ class Notification: tweeted = False pushed = False + telegram = False if PUSHBULLET: pushed = self.pbpush() if TWITTER: tweeted = self.tweet() + if TELEGRAM: telegram = self.sendToTelegram()
Corrected the name to Median Corrected the name to Median
{ - "word": "Modulate", + "word": "Median", "definitions": [ "denoting or relating to a value or quantity lying at the midpoint of a frequency distribution of observed values or quantities, such that there is an equal probability of falling above or below it", "situated in the middle, especially of the body"
Fixed: Color loss after re-editing under certain circumstances None values in style string from old node are not considered anymore. Resolves
@@ -1190,7 +1190,8 @@ class SvgElement(object): # Fetch the part of the source dict which is interesting for colorization src_style_dict = ss.parseStyle(src_style_string) color_style_dict = {key: value for key, value in src_style_dict.items() if - key in ["fill", "stroke", "opacity", "stroke-opacity", "fill-opacity"]} + key.lower() in ["fill", "stroke", "opacity", "stroke-opacity", + "fill-opacity"] and value.lower() != "none"} # Iterate over all nodes of self._node and apply the imported color style for dest_node in self._node.getiterator():
Handle missing data from the API The following ensures that we handle missing data returned back from the new API. Although the new API isn't turned on in this branch, we should at least handle the case where it could be.
@@ -325,7 +325,7 @@ class AddApplicationChange(ChangeInfo): charm_url=charm, application=self.application, series=self.series, - config=self.options, + config=options, constraints=self.constraints, endpoint_bindings=self.endpoint_bindings, resources=resources, @@ -337,10 +337,10 @@ class AddApplicationChange(ChangeInfo): def __str__(self): series = "" - if self.series != "": + if self.series is not None and self.series != "": series = " on {}".format(self.series) units_info = "" - if self.num_units > 0: + if self.num_units is not None: plural = "" if self.num_units > 1: plural = "s" @@ -413,7 +413,7 @@ class AddCharmChange(ChangeInfo): def __str__(self): series = "" channel = "" - if self.series != "": + if self.series is not None and self.series != "": series = " for series {}".format(self.series) if self.channel is not None: channel = " from channel {}".format(self.channel) @@ -681,9 +681,12 @@ class CreateOfferChange(ChangeInfo): await context.model.create_offer(ep, offer_name=self.offer_name, application_name=application) def __str__(self): + endpoints = "" + if self.endpoints is not None: + endpoints = self.endpoints.join(",") return "create offer {offer_name} using {application}:{endpoints}".format(offer_name=self.offer_name, application=self.application, - endpoints=self.endpoints.join(",")) + endpoints=endpoints) class ConsumeOfferChange(ChangeInfo):
Update pi18.py fix first response (drop extra char)
@@ -168,7 +168,7 @@ class pi18(AbstractProtocol): return ["NAK"] # Drop ^Dxxx from first response - responses[0] = responses[0][4:] + responses[0] = responses[0][5:] # Remove CRC of last response responses[-1] = responses[-1][:-3] return responses
python3 compatibility for raise python3 compatibility for raise
@@ -1027,7 +1027,7 @@ class Collections(PlexObject): 'showItems': '2'} key = mode_dict.get(mode) if mode is None: - raise BadRequest('Unknown collection mode : %s. Options %s' % (mode, mode_dict.key())) + raise BadRequest('Unknown collection mode : %s. Options %s' % (mode, list(mode_dict.key()))) part = '/library/metadata/%s/prefs?collectionMode=%s' % (self.ratingKey, key) return self._server.query(part, method=self._server._session.put) @@ -1047,7 +1047,7 @@ class Collections(PlexObject): 'alpha': '1'} key = sort_dict.get(sort) if key is None: - raise BadRequest('Unknown sort dir: %s. Options: %s' % (sort, sort_dict.keys())) + raise BadRequest('Unknown sort dir: %s. Options: %s' % (sort, list(sort_dict.keys()))) part = '/library/metadata/%s/prefs?collectionSort=%s' % (self.ratingKey, key) return self._server.query(part, method=self._server._session.put)
Change BlockValidationAborted to BlockValidationError This exception is raised by helper functions, and it's not their job to declare that a block should be aborted.
@@ -32,12 +32,10 @@ from sawtooth_validator.state.merkle import INIT_ROOT_KEY LOGGER = logging.getLogger(__name__) -class BlockValidationAborted(Exception): +class BlockValidationError(Exception): """ - Indication that the validation of this fork has terminated for an - expected(handled) case and that the processing should exit. + Indication that an error has occurred during block validation. """ - pass class ChainHeadUpdated(Exception): @@ -387,7 +385,7 @@ class BlockValidator(object): last block in the shorter chain. Ordered newest to oldest. Raises: - BlockValidationAborted + BlockValidationError The block is missing a predecessor. Note that normally this shouldn't happen because of the completer.""" fork_diff = [] @@ -411,7 +409,7 @@ class BlockValidator(object): # as invalid. for blk in fork_diff: blk.status = BlockStatus.Invalid - raise BlockValidationAborted() + raise BlockValidationError() return blk, fork_diff @@ -430,7 +428,7 @@ class BlockValidator(object): cur_blkw, new_blkw) for b in new_chain: b.status = BlockStatus.Invalid - raise BlockValidationAborted() + raise BlockValidationError() new_chain.append(new_blkw) try: @@ -442,7 +440,7 @@ class BlockValidator(object): new_blkw.previous_block_id) for b in new_chain: b.status = BlockStatus.Invalid - raise BlockValidationAborted() + raise BlockValidationError() cur_chain.append(cur_blkw) cur_blkw = self._block_cache[cur_blkw.previous_block_id] @@ -582,7 +580,7 @@ class BlockValidator(object): callback(commit_new_chain, result) LOGGER.info("Finished block validation of: %s", block) - except BlockValidationAborted: + except BlockValidationError: callback(False, result) return except ChainHeadUpdated:
Fix CurrentLayout widget for default layouts The widget defaults to showing layout at index 0 when it loads but if a user has set a default layout for the group, the current layout index will be different. Fixes
@@ -46,7 +46,8 @@ class CurrentLayout(base._TextBox): def _configure(self, qtile, bar): base._TextBox._configure(self, qtile, bar) - self.text = self.bar.screen.group.layouts[0].name + layout_id = self.bar.screen.group.current_layout + self.text = self.bar.screen.group.layouts[layout_id].name self.setup_hooks() self.add_callbacks({ @@ -110,7 +111,8 @@ class CurrentLayoutIcon(base._TextBox): def _configure(self, qtile, bar): base._TextBox._configure(self, qtile, bar) - self.text = self.bar.screen.group.layouts[0].name + layout_id = self.bar.screen.group.current_layout + self.text = self.bar.screen.group.layouts[layout_id].name self.current_layout = self.text self.icons_loaded = False self.icon_paths = []
pytorch_to_onnx.py: allow specifying multiple model paths The intended use for this is to enable the use of custom model creation modules placed in the model config directory, for cases when a model can't just be instantiated with a constructor with simple arguments.
import argparse import importlib +import os import sys from pathlib import Path @@ -48,7 +49,7 @@ def parse_args(): help='Shape of the input blob') parser.add_argument('--output-file', type=Path, required=True, help='Path to the output ONNX model') - parser.add_argument('--model-path', type=str, + parser.add_argument('--model-path', type=str, action='append', dest='model_paths', help='Path to PyTorch model\'s source code') parser.add_argument('--import-module', type=str, required=True, help='Name of module, which contains model\'s constructor') @@ -61,19 +62,20 @@ def parse_args(): return parser.parse_args() -def load_model(model_name, weights, model_path, module_name, model_params): +def load_model(model_name, weights, model_paths, module_name, model_params): """Import model and load pretrained weights""" - if model_path: - sys.path.append(model_path) + if model_paths: + sys.path.extend(model_paths) try: module = importlib.import_module(module_name) creator = getattr(module, model_name) model = creator(**model_params) except ImportError as err: - if model_path: - print('Module {} in {} doesn\'t exist. Check import path and name'.format(model_name, model_path)) + if model_paths: + print('Module {} in {} doesn\'t exist. Check import path and name'.format( + model_name, os.pathsep.join(model_paths))) else: print('Module {} doesn\'t exist. Check if it is installed'.format(model_name)) sys.exit(err) @@ -123,7 +125,7 @@ def convert_to_onnx(model, input_shape, output_file, input_names, output_names): def main(): args = parse_args() model = load_model(args.model_name, args.weights, - args.model_path, args.import_module, dict(args.model_param)) + args.model_paths, args.import_module, dict(args.model_param)) convert_to_onnx(model, args.input_shape, args.output_file, args.input_names, args.output_names)
Allow pre-releases vesions (dev, alpha, etc.) of documentation requirements in the docs build, so long as the meet the minimum requirements. This will be needed especially for development/testing of our own packages, such as sphinx-astropy. Fixes [skip ci]
@@ -51,7 +51,7 @@ for line in importlib_metadata.requires('astropy'): except importlib_metadata.PackageNotFoundError: missing_requirements[req_package] = req_specifier - if version not in SpecifierSet(req_specifier): + if version not in SpecifierSet(req_specifier, prereleases=True): missing_requirements[req_package] = req_specifier if missing_requirements:
Add settings to schema.yaml chatwork_proxy chatwork_proxy_login chatwork_proxy_pass dingtalk_proxy dingtalk_proxy_login dingtalk_proxy_pass
@@ -305,6 +305,9 @@ properties: ### Chatwork chatwork_apikey: {type: string} chatwork_room_id: {type: string} + chatwork_proxy: {type: string} + chatwork_proxy_login: {type: string} + chatwork_proxy_pass: {type: string} ### Command command: *arrayOfString @@ -322,6 +325,9 @@ properties: dingtalk_single_title: {type: string} dingtalk_single_url: {type: string} dingtalk_btn_orientation: {type: string} + dingtalk_proxy: {type: string} + dingtalk_proxy_login: {type: string} + dingtalk_proxy_pass: {type: string} ## Discord discord_webhook_url: {type: string}
Optimization: Slightly faster float digit checks * Avoid recalculating float digit boundary for every call, that can only be slow.
@@ -350,7 +350,7 @@ def isDebugPython(): return hasattr(sys, "gettotalrefcount") -def isPythonValidDigitValue(value): +def _getFloatDigitBoundaryValue(): if python_version < 0x270: bits_per_digit = 15 elif python_version < 0x300: @@ -358,10 +358,19 @@ def isPythonValidDigitValue(value): else: bits_per_digit = sys.int_info.bits_per_digit - boundary = (2**bits_per_digit) - 1 + return (2**bits_per_digit) - 1 + + +_float_digit_boundary = _getFloatDigitBoundaryValue() + + +def isPythonValidDigitValue(value): + """Does the given value fit into a float digit. + + Note: Digits in long objects do not use 2-complement, but a boolean sign. + """ - # Note:Digits in long objects do not use 2-complement, but a boolean sign. - return -boundary <= value <= boundary + return -_float_digit_boundary <= value <= _float_digit_boundary sizeof_clong = ctypes.sizeof(ctypes.c_long)
[setup] add pathlib2 to PY2 dependencies version.package_version() uses pathlib which is a python 3 library. pathlib2 is required for python 2.7 then.
@@ -110,6 +110,10 @@ if PY2: # ipaddr 2.1.10+ is distributed with Debian and Fedora. See T105443. dependencies.append('ipaddr>=2.1.10') + # version.package_version() uses pathlib which is a python 3 library. + # pathlib2 is required for python 2.7 + dependencies.append('pathlib2') + if (2, 7, 6) < PYTHON_VERSION < (2, 7, 9): # Python versions before 2.7.9 will cause urllib3 to trigger # InsecurePlatformWarning warnings for all HTTPS requests. By
is_visible_from: remove obsolete TODO TN:
@@ -287,10 +287,6 @@ def is_visible_from(referenced_env, base_env): Expression that will return whether an env's associated compilation unit is visible from another env's compilation unit. - TODO: This is mainly exposed on envs because the AnalysisUnit type is not - exposed in the DSL yet. We might want to change that eventually if there - are other compelling reasons to do it. - :param AbstractExpression base_env: The environment from which we want to check visibility. :param AbstractExpression referenced_env: The environment referenced
Add extra tests from upstream Add analogues of new upstream tests, with somewhat different behaviour because metadata loading is different.
@@ -2977,6 +2977,21 @@ class PhoneNumberUtilTest(TestMetadataTestCase): # Python version extra test: check with bogus region self.assertFalse(phonenumbers.is_mobile_number_portable_region("XY")) + def testGetMetadataForRegionForNonGeoEntity_shouldBeNull(self): + self.assertTrue(PhoneMetadata.metadata_for_region("001") is None) + + def testGetMetadataForRegionForUnknownRegion_shouldBeNull(self): + self.assertTrue(PhoneMetadata.metadata_for_region("ZZ") is None) + + def testGetMetadataForNonGeographicalRegionForGeoRegion_shouldBeNull(self): + self.assertTrue(PhoneMetadata.metadata_for_nongeo_region(country_code=1) is None) + + def testGetMetadataForRegionForMissingMetadata(self): + self.assertTrue(PhoneMetadata.metadata_for_region("YYZ") is None) + + def testGetMetadataForNonGeographicalRegionForMissingMetadata(self): + self.assertTrue(PhoneMetadata.metadata_for_nongeo_region("800000") is None) + def testMetadataEquality(self): # Python version extra tests for equality against other types desc1 = PhoneNumberDesc(national_number_pattern="\\d{4,8}")
Add documentation fro terminate mutation in graphql Summary: Resolves Test Plan: View docs page Reviewers: yuhan, sashank
@@ -264,3 +264,28 @@ If you want to use a preset instead of defining the run config, use the `preset` } } } + +### Terminate a running pipeline + +If you want to stop execution of a pipeline that's currently running, use the `terminatePipelineExecution` mutation. The only required argument for this mutation is the ID of the run. + + mutation TerminatePipeline($runId: String!) { + terminatePipelineExecution(runId: $runId){ + __typename + ... on TerminatePipelineExecutionSuccess{ + run { + runId + } + } + ... on TerminatePipelineExecutionFailure { + message + } + ... on PipelineRunNotFoundError { + runId + } + ... on PythonError { + message + stack + } + } + }
docs: remove lfs_criterion_us_states Remove it from the default installation for now until its ported to Django 1.10.
@@ -149,7 +149,6 @@ execute following steps: ["lfs.criteria.models.WeightCriterion", _(u"Weight")], ["lfs.criteria.models.ShippingMethodCriterion", _(u"Shipping Method")], ["lfs.criteria.models.PaymentMethodCriterion", _(u"Payment Method")], - ["lfs_criterion_us_states.models.USStatesCriterion", _(u"US State")], ] LFS_ORDER_NUMBER_GENERATOR = "lfs_order_numbers.models.OrderNumberGenerator"
Update CODE_OF_CONDUCT.md Updated contact e-mail address for code of conduct issues.
@@ -55,7 +55,7 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at william.usher{THATSIGN}ouce.ox.ac.uk. All +reported by contacting the project team at wusher{THATSIGN}kth.se. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident.
Fix update method in handler It should not be an aliast for assign. It should be used only for files.
# from typing import TYPE_CHECKING, Union, Iterable +from neptune.new.attributes import File from neptune.new.attributes.file_set import FileSet from neptune.new.attributes.series import FileSeries from neptune.new.attributes.series.float_series import FloatSeries @@ -23,7 +24,7 @@ from neptune.new.attributes.sets.string_set import StringSet from neptune.new.internal.utils import verify_type, is_collection, verify_collection_type, is_float, is_string, \ is_float_like, is_string_like from neptune.new.internal.utils.paths import join_paths, parse_path -from neptune.new.types.atoms.file import File +from neptune.new.types.atoms.file import File as FileVal if TYPE_CHECKING: from neptune.new.experiment import Experiment @@ -64,7 +65,16 @@ class Handler: self._experiment.define(self._path, value, wait) def upload(self, value, wait: bool = False) -> None: - self.assign(File.create_from(value), wait) + value = FileVal.create_from(value) + + with self._experiment.lock(): + attr = self._experiment.get_attribute(self._path) + if not attr: + attr = File(self._experiment, parse_path(self._path)) + attr.upload(value, wait) + self._experiment.set_attribute(self._path, attr) + else: + attr.upload(value, wait) def upload_files(self, value: Union[str, Iterable[str]], wait: bool = False) -> None: if is_collection(value): @@ -82,7 +92,7 @@ class Handler: attr.upload_files(value, wait) def log(self, - value: Union[int, float, str, File, Iterable[int], Iterable[float], Iterable[str], Iterable[File]], + value: Union[int, float, str, FileVal, Iterable[int], Iterable[float], Iterable[str], Iterable[FileVal]], step=None, timestamp=None, wait: bool = False) -> None: @@ -104,7 +114,7 @@ class Handler: attr = FloatSeries(self._experiment, parse_path(self._path)) elif is_string(first_value): attr = StringSeries(self._experiment, parse_path(self._path)) - elif isinstance(first_value, File): + elif isinstance(first_value, FileVal): attr = FileSeries(self._experiment, parse_path(self._path)) elif is_float_like(first_value): attr = FloatSeries(self._experiment, parse_path(self._path))
igw: stop tcmu-runner on iscsi purge When the iscsi purge playbook is run we stop the gw and api daemons but not tcmu-runner which I forgot on the previous PR. Fixes Red Hat BZ:
igw_purge: mode="disks" when: igw_purge_type == 'all' - - name: stop and disable rbd-target-api daemon + - name: stop and disable daemons service: - name: rbd-target-api - state: stopped - enabled: no - when: igw_purge_type == 'all' - - - name: stop and disable rbd-target-gw daemon - service: - name: rbd-target-gw + name: "{{ item }}" state: stopped enabled: no when: igw_purge_type == 'all' + with_items: + - rbd-target-api + - rbd-target-gw + - tcmu-runner - name: restart rbd-target-gw daemons service: name=rbd-target-gw state=restarted
asset store bug fix Test Plan: bk Reviewers: sandyryza, cdecarolis, schrockn
@@ -42,7 +42,7 @@ def asset_pipeline(): def test_result_output(): with seven.TemporaryDirectory() as tmpdir_path: - asset_store = default_filesystem_asset_store.configured({"base_dir": tmpdir_path}) + asset_store = fs_asset_store.configured({"base_dir": tmpdir_path}) pipeline_def = define_asset_pipeline(asset_store, {}) result = execute_pipeline(pipeline_def)
lint: Check for occurrences of `.includes` except in `frontend_tests/`. Adds a custom check to js_rules in `/tools/lint/lib/custom_check.py`.
@@ -188,6 +188,9 @@ def build_custom_checkers(by_lang): 'description': 'Do not concatenate i18n strings'}, {'pattern': '\+.*i18n\.t\(.+\)', 'description': 'Do not concatenate i18n strings'}, + {'pattern': '[.]includes[(]', + 'exclude': ['frontend_tests/'], + 'description': '.includes() is incompatible with Internet Explorer. Use .indexOf() !== -1 instead.'}, {'pattern': '[.]html[(]', 'exclude_pattern': '[.]html[(]("|\'|templates|html|message.content|sub.rendered_description|i18n.t|rendered_|$|[)]|error_text|widget_elem|[$]error|[$][(]"<p>"[)])', 'exclude': ['static/js/portico', 'static/js/lightbox.js', 'static/js/ui_report.js',
modify comments of rgb and lab conversion * modify comments of rgb and lab conversion * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see
@@ -17,7 +17,7 @@ def rgb_to_lab(image: torch.Tensor) -> torch.Tensor: .. image:: _static/img/rgb_to_lab.png - The image data is assumed to be in the range of :math:`[0, 1]`. Lab + The input RGB image is assumed to be in the range of :math:`[0, 1]`. Lab color is computed using the D65 illuminant and Observer 2. Args: @@ -25,7 +25,7 @@ def rgb_to_lab(image: torch.Tensor) -> torch.Tensor: Returns: Lab version of the image with shape :math:`(*, 3, H, W)`. - The L channel values are in the range 0..100. a and b are in the range -127..127. + The L channel values are in the range 0..100. a and b are in the range -128..127. Example: >>> input = torch.rand(2, 3, 4, 5) @@ -67,12 +67,16 @@ def rgb_to_lab(image: torch.Tensor) -> torch.Tensor: def lab_to_rgb(image: torch.Tensor, clip: bool = True) -> torch.Tensor: r"""Convert a Lab image to RGB. + The L channel is assumed to be in the range of :math:`[0, 100]`. + a and b channels are in the range of :math:`[-128, 127]`. + Args: image: Lab image to be converted to RGB with shape :math:`(*, 3, H, W)`. clip: Whether to apply clipping to insure output RGB values in range :math:`[0, 1]`. Returns: Lab version of the image with shape :math:`(*, 3, H, W)`. + The output RGB image are in the range of :math:`[0, 1]`. Example: >>> input = torch.rand(2, 3, 4, 5)
Fix parameter interpretation of 'with_latest_from' The following instructions are not correctly interpretated, because the result selector gets ignored: s1.with_latest_from([s2], lambda v1, v2=v1+v2) Observable.with_latest_from([s2], lambda v1, v2=v1+v2)
@@ -23,13 +23,14 @@ def with_latest_from(self, *args): elements of the sources using the specified result selector function. """ - args = list(args) if args and isinstance(args[0], list): - args = args[0] - - args.insert(0, self) + children = args[0] + result_selector = args[1] + args_ = [self] + children + [result_selector] + else: + args_ = [self] + list(args) - return Observable.with_latest_from(*args) + return Observable.with_latest_from(*args_) @extensionclassmethod(Observable) @@ -50,11 +51,12 @@ def with_latest_from(cls, *args): """ if args and isinstance(args[0], list): - args = args[0] + observables = args[0] + result_selector = args[1] else: - args = list(args) + observables = list(args[:-1]) + result_selector = args[-1] - result_selector = args.pop() NO_VALUE = object() def subscribe(observer): @@ -90,5 +92,5 @@ def with_latest_from(cls, *args): *(subscribe_child(*a) for a in enumerate(children)) ) - return CompositeDisposable(subscribe_all(*args)) + return CompositeDisposable(subscribe_all(*observables)) return AnonymousObservable(subscribe)
Update data.rst Update csv format according to feedback
@@ -72,13 +72,18 @@ Converting CSV Format into Qlib Format ``Qlib`` has provided the script ``scripts/dump_bin.py`` to convert **any** data in CSV format into `.bin` files (``Qlib`` format) as long as they are in the correct format. -Users can download the 1 day demo china-stock data in CSV format as follows for reference to the CSV format. +Besides downloading the prepared demo data, users could download demo data directly from the Collector as follows for reference to the CSV format. +Here are some example: +for daily data: .. code-block:: bash python scripts/get_data.py csv_data_cn --target_dir ~/.qlib/csv_data/cn_data -For 1min demo, please refer to the script `here <https://github.com/microsoft/qlib/issues/434>`_. +for 1min data: + .. code-block:: bash + + python scripts/data_collector/yahoo/collector.py download_data --source_dir ~/.qlib/stock_data/source/cn_1min --region CN --start 2021-05-20 --end 2021-05-23 --delay 0.1 --interval 1min --limit_nums 10 Users can also provide their own data in CSV format. However, the CSV data **must satisfies** following criterions:
[tests] Fix site_tests.TestLogPages tests test_logpages and test_list_namespace are sometimes failing due to autoblock removal entries. Check for this case.
@@ -1353,6 +1353,7 @@ class TestLogPages(DefaultSiteTestCase, DeprecationTestCase): self.assertLessEqual(len(le), 10) for entry in le: self.assertIsInstance(entry, tuple) + if not isinstance(entry[0], int): # autoblock removal entry self.assertIsInstance(entry[0], pywikibot.Page) self.assertIsInstance(entry[1], basestring) self.assertIsInstance( @@ -1363,6 +1364,8 @@ class TestLogPages(DefaultSiteTestCase, DeprecationTestCase): """Test the deprecated site.logpages() when namespace is a list.""" le = list(self.site.logpages(namespace=[2, 3], number=10)) for entry in le: + if isinstance(entry[0], int): # autoblock removal entry + continue try: self.assertIn(entry[0].namespace(), [2, 3]) except HiddenKeyError as e:
llvm, functions/ContentAddressableMemory: Cleanup Reuse existing poitners. Use 'None' instance to zero out output memory location.
@@ -771,14 +771,9 @@ class ContentAddressableMemory(MemoryFunction): # ----------------------------- var_val_ptr = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(1)]) # Zero output + builder.store(arg_out.type.pointee(None), arg_out) out_key_ptr = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) out_val_ptr = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(1)]) - with pnlvm.helpers.array_ptr_loop(builder, out_key_ptr, "zero_key") as (b, i): - out_ptr = b.gep(out_key_ptr, [ctx.int32_ty(0), i]) - b.store(out_ptr.type.pointee(0), out_ptr) - with pnlvm.helpers.array_ptr_loop(builder, out_val_ptr, "zero_val") as (b, i): - out_ptr = b.gep(out_val_ptr, [ctx.int32_ty(0), i]) - b.store(out_ptr.type.pointee(0), out_ptr) # Check retrieval probability retr_ptr = builder.alloca(pnlvm.ir.IntType(1)) @@ -832,7 +827,7 @@ class ContentAddressableMemory(MemoryFunction): # ----------------------------- builder.store(ctx.int32_ty(0), selected_idx_ptr) with pnlvm.helpers.for_loop_zero_inc(builder, entries, "distance_loop") as (b,idx): selection_val = b.load(b.gep(selection_arg_out, [ctx.int32_ty(0), idx])) - non_zero = b.fcmp_ordered('!=', selection_val, ctx.float_ty(0)) + non_zero = b.fcmp_ordered('!=', selection_val, selection_val.type(0)) with b.if_then(non_zero): b.store(idx, selected_idx_ptr) @@ -841,8 +836,8 @@ class ContentAddressableMemory(MemoryFunction): # ----------------------------- selected_idx])) selected_val = builder.load(builder.gep(vals_ptr, [ctx.int32_ty(0), selected_idx])) - builder.store(selected_key, builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)])) - builder.store(selected_val, builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(1)])) + builder.store(selected_key, out_key_ptr) + builder.store(selected_val, out_val_ptr) # Check storage probability store_ptr = builder.alloca(pnlvm.ir.IntType(1))
[skip ci][CI][Fix] Fixing lint A linting issue was introduced in fixing this up.
@@ -59,7 +59,9 @@ def conv2d_transpose_nchw(cfg, data, kernel, stride, padding, out_dtype, output_ stride_height, stride_width = stride outpad_height, outpad_width = output_padding assert outpad_height < stride_height and outpad_width < stride_width - assert inp_channels % groups == 0, f"input channels {inp_channels} must divide group size {groups}" + assert ( + inp_channels % groups == 0 + ), f"input channels {inp_channels} must divide group size {groups}" cfg.stride = stride pad_top, pad_left, pad_bottom, pad_right = nn.get_pad_tuple( padding, (kernel_height, kernel_width) @@ -112,14 +114,14 @@ def conv2d_transpose_nchw(cfg, data, kernel, stride, padding, out_dtype, output_ data_out = te.compute( (batch, out_channels * groups, out_height, out_width), lambda b, c, h, w: te.sum( - data[ - b, c // out_channels * (inp_channels // groups) + dc, h + dh, w + dw - ].astype(out_dtype) + data[b, c // out_channels * (inp_channels // groups) + dc, h + dh, w + dw].astype( + out_dtype + ) * kernel[ c // out_channels * (inp_channels // groups) + dc, c % out_channels, kernel_height - 1 - dh, - kernel_width - 1 - dw + kernel_width - 1 - dw, ].astype(out_dtype), axis=[dc, dh, dw], ),
Remove regularization_weight from hpo_default since it belongs to Regularizer
@@ -23,8 +23,7 @@ class ComplEx(BaseModule): """An implementation of ComplEx [trouillon2016]_.""" hpo_default = dict( - embedding_dim=dict(type=int, low=50, high=300, q=50), - regularization_weight=dict(type=float, low=0.0, high=0.1, scale='log'), + embedding_dim=dict(type=int, low=50, high=300, q=50) ) loss_default = SoftplusLoss
Shortcut syntax for choices with same label & data This would allow for a convenient shortcut syntax for entering choices. If label and data are both the same, you can do ['One', 'Two', 'Three'] instead of [('One', 'One'), ('Two', 'Two'), ('Three', 'Three')]
@@ -522,7 +522,12 @@ class SelectField(SelectFieldBase): self.validate_choice = validate_choice def iter_choices(self): - for value, label in self.choices: + if isinstance(self.choices[0], (list, tuple)): + choices = self.choices + else: + choices = zip(self.choices, self.choices) + + for value, label in choices: yield (value, label, self.coerce(value) == self.data) def process_data(self, value):
Correctly handle invalid titles in harvest_template.py Neither of those constructors raises that exception.
@@ -182,14 +182,15 @@ class HarvestRobot(WikidataBot): def _template_link_target(self, item, link_text): link = pywikibot.Link(link_text) - try: linked_page = pywikibot.Page(link) + try: + exists = linked_page.exists() except pywikibot.exceptions.InvalidTitle: - pywikibot.error('%s is not a valid title so it cannot be linked. ' + pywikibot.error('"%s" is not a valid title so it cannot be linked.' ' Skipping.' % link_text) return None - if not linked_page.exists(): + if not exists: pywikibot.output('%s does not exist so it cannot be linked. ' 'Skipping.' % (linked_page)) return None
Fix open file not working when using open command and double click files * When using open command or double click on macOS would send a file url with trailing slash. It's the cause of failing to open a file. By removing it, the problem would be solved.
@@ -241,6 +241,7 @@ class DocumentApp(App): fileURL (str): The URL/path to the file to add as a document. """ # Convert a cocoa fileURL to a file path. + fileURL = fileURL.strip('/') path = unquote(urlparse(fileURL).path) extension = os.path.splitext(path)[1][1:]
Link directly to 7.0.0 release notes for migration guide The previous link is to the releases page, so requires a bit of hunting to find the actual migration guide. This PR links directly to the release (7.0.0) which lists the main breaking changes for Thumbor 7 and how to migrate.
@@ -32,7 +32,7 @@ more details):: .. warning:: Release 7.0.0 introduces a major breaking change due to the migration to python 3 and the modernization of our codebase. Please read the - `release notes <https://github.com/thumbor/thumbor/releases>`_ + `release notes <https://github.com/thumbor/thumbor/releases/tag/7.0.0>`_ for details on how to upgrade. Contents
Update translation *.po/*.mo files regenerating django.po/django.mo file and translate it Some of the old translations weren't showing up
Binary files a/taggit/locale/ar/LC_MESSAGES/django.mo and b/taggit/locale/ar/LC_MESSAGES/django.mo differ
Use eval() replace globals() & fix spell mistake `globals()` would return all the current scope variable dict, it will make the code a bit chaos. Lucky, the `FX` dict value is also the calling function name itself. So the `eval()` could be a little helper to handle this dirty work property.
@@ -158,8 +158,9 @@ while run: ''') choice = input() try: - fx = FX[int(choice)] - run = globals()[fx]() + # Via eval() let `str expression` to `function` + fx = eval(FX[int(choice)]) + run = fx() except KeyError: system('clear') if count <= 5: @@ -167,5 +168,5 @@ while run: print("----------enter proper key-------------") else: system('clear') - print("You have attemted 5 times , try again later") + print("You have attempted 5 times , try again later") run = False
[circleci] Tolerate 20 DRC/ LVS errors before erroring out
@@ -251,7 +251,7 @@ jobs: pip install align[test] -f ./dist filter="<<parameters.design>>" filter="<<parameters.pdk>>${filter:+ and $filter}" - pytest -vv --runnightly --maxerrors=10 --timeout=<<parameters.timeout>> -k "$filter" -- tests/integration + pytest -vv --runnightly --maxerrors=20 --timeout=<<parameters.timeout>> -k "$filter" -- tests/integration mkdir test-reports && cp -r junit.xml LOG test-reports find $ALIGN_WORK_DIR -name *.json -exec cp --parents \{\} test-reports \;
test_home: Fix wrong bot references in test_people. These are all referring to email_gateway_bot, when they're supposed to refer to the notification and welcome bots, respectively. The values are the same though, so the tests were passing anyway.
@@ -615,7 +615,7 @@ class HomeTest(ZulipTestCase): is_guest=False, ), dict( - avatar_version=email_gateway_bot.avatar_version, + avatar_version=notification_bot.avatar_version, bot_owner_id=None, bot_type=1, email=notification_bot.email, @@ -629,7 +629,7 @@ class HomeTest(ZulipTestCase): is_guest=False, ), dict( - avatar_version=email_gateway_bot.avatar_version, + avatar_version=welcome_bot.avatar_version, bot_owner_id=None, bot_type=1, email=welcome_bot.email,
Install pyOpenSSL from pip for chromium images. Chromium's install-build-deps.sh installs 16.04's version, which is incompatible with google cloud SDK.
@@ -37,7 +37,10 @@ RUN apt-get update && \ nodejs-legacy \ pulseaudio \ xdotool \ - xvfb + xvfb && \ + # 16.04's pyOpenSSL (installed by install-build-deps.sh) is too old for + # Google Cloud SDK. + sudo pip install pyOpenSSL==19.0.0 # Needed for older versions of Chrome. RUN ln -s /usr/lib/x86_64-linux-gnu/libudev.so /usr/lib/x86_64-linux-gnu/libudev.so.0
Address comments on CONTRIBUTING Added a couple more bullets to naming convention Linked to Google's guide Removed Additional Style subsection Added 15 lines of example code. It obviously leaves much uncovered, but perhaps captures the essence of the style.
@@ -141,37 +141,48 @@ C++ code should be compatible with standard C++11. Naming ~~~~~~ -* File names should be lowercase with underscores and end with ``.cpp`` or ``.hpp``. -* Type names should be PascalCase. i.e. :code:`AdjArrayBQM` + +* File names should be lowercase with underscores or dashes and end with ``.cpp`` or ``.hpp``. +* Namespaces should be lowercase with underscores. +* Class names should be PascalCase. i.e. :code:`AdjArrayBQM`. +* Type aliases may follow other naming coventions to be more like the standard library. i.e. :code:`MyVector::value_type` * Function names should be lowercase with underscores. i.e. :code:`num_variables()`. -* Variables names should be lowercase with underscores. Private data members should have a trailing underscore. +* Variable names should be lowercase with underscores. Private data members should have a trailing underscore. +* Global variable names should be ``ALL_CAPS_WITH_UNDERSCORES``. * Macros should be ``ALL_CAPS_WITH_UNDERSCORES``. Format ~~~~~~ -When starting a new C++ project, use clang-format with the .clang-format file included here. - -Additional Style -~~~~~~~~~~~~~~~~ +* When starting a new C++ project, use clang-format with the .clang-format file included here. +* Our format is based on `Google C++ style guide <https://google.github.io/styleguide/cppguide.html>`_ with some exceptions: -Favor the use of the optional braces for single-line control statements, which enhance consistency and extensibility. + - The naming conventions are as stated above. + - Column width is limited to 120 characters. + - Indent widths are doubled so the base indent level is 4 spaces, line continuations indent 8 spaces, and access modifiers indent 2 spaces. -Example: - -Use the following format +Example Code +~~~~~~~~~~~~ .. code-block:: c++ - if (a) { - return; - } + // example_project/src/my_class.cpp + namespace example_project { + int GLOBAL_MATRIX_OF_INTS[2][2] = {{1, 2}, // Arrays representing matricies may be formatted as such. + {3, 4}}; -as opposed to + template <typename T, typename IntType = bool> + class MyClass { + public: + using value_type = T; + value_type* y = nullptr; + value_type& find_element(int& argument) { return *(y + GLOBAL_MATRIX_OF_INTS[x_][argument++]); } -.. code-block:: c++ + private: + IntType x_; + }; + } // namespace example_project - if (a) return; Versioning Scheme -----------------
deploy: update component label value for recovery Adds new component label "app-recovery" for the recovery endpoint"
@@ -269,7 +269,7 @@ parameters: value: "quay-component" displayName: quay app selector label - name: QUAY_APP_COMPONENT_LABEL_VALUE - value: "app" + value: "app-recovery" displayName: quay app selector label value - name: LOADBALANCER_SERVICE_PORT value: "443"
Fix collision in workunit affecting patches. and collided, and a test broke on master.
@@ -651,7 +651,9 @@ class StreamingWorkunitTests(unittest.TestCase, SchedulerTestBase): scheduler.product_request(Output, subjects=[0]) finished = list(itertools.chain.from_iterable(tracker.finished_workunit_chunks)) - workunit = next(item for item in finished if item["name"] == "a_rule") + workunit = next( + item for item in finished if item["name"] == "pants.engine.internals.engine_test.a_rule" + ) artifacts = workunit["artifacts"] assert artifacts["some_arbitrary_key"] == EMPTY_DIGEST
Implement Twilio Copilot Alerting Maintain compatibility with Twilio SMS API, validate new settings conditionally using the 'twilio_use_copilot' flag. Throws EAException in case of an incorrect combination of settings.
@@ -1483,10 +1483,20 @@ class TwilioAlerter(Alerter): client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token) try: + if self.twilio_use_copilot: + if self.twilio_message_service_sid == None: + raise EAException("Twilio Copilot requires the 'twilio_message_service_sid' option") + client.messages.create(body=self.rule['name'], to=self.twilio_to_number, - from_=self.twilio_from_number) + messaging_service_sid=self.twilio_message_service_sid) + else: + if self.twilio_from_number == None: + raise EAException("Twilio SMS requires the 'twilio_from_number' option") + client.messages.create(body=self.rule['name'], + to=self.twilio_to_number, + from_=self.twilio_from_number) except TwilioRestException as e: raise EAException("Error posting to twilio: %s" % e)
Update ac_train.sh change the folder name for train.rules.json & entity_rules.json from DATA_FOLDER to ORACLE_FOLDER
@@ -39,8 +39,8 @@ else # Copy variables that we will need for standalone cp $DATA_FOLDER/dict.* ${MODEL_FOLDER}-seed${seed}/ - cp $DATA_FOLDER/train.rules.json ${MODEL_FOLDER}-seed${seed}/ - cp $DATA_FOLDER/entity_rules.json ${MODEL_FOLDER}-seed${seed}/ + cp $ORACLE_FOLDER/train.rules.json ${MODEL_FOLDER}-seed${seed}/ + cp $ORACLE_FOLDER/entity_rules.json ${MODEL_FOLDER}-seed${seed}/ # if [[ $arch == "transformer_tgt_pointer" ]]; then if [[ $arch != *"graph"* ]]; then
Apply flake8 and black formatting Use helper to retrieve schemas
@@ -280,13 +280,15 @@ class TestComponents: class TestPlugin(BasePlugin): def init_spec(self, spec): spec.components.schema( - "TestSchema", {"properties": {"key": {"type": "string"}}, "type": "object"} + "TestSchema", + {"properties": {"key": {"type": "string"}}, "type": "object"}, ) - spec = APISpec("Test API", version="0.0.1", openapi_version="2.0", plugins=[TestPlugin()]) - metadata = spec.to_dict() - assert metadata["definitions"] == { - 'TestSchema': {'properties': {'key': {'type': 'string'}}, 'type': 'object'} + spec = APISpec( + "Test API", version="0.0.1", openapi_version="2.0", plugins=[TestPlugin()] + ) + assert get_schemas(spec) == { + "TestSchema": {"properties": {"key": {"type": "string"}}, "type": "object"} }
[air] Update to use more verbose default config for trainers. Internal user feedback showing that more detailed logging is preferred:
@@ -169,4 +169,4 @@ class RunConfig: stop: Optional[Union[Mapping, "Stopper", Callable[[str, Mapping], bool]]] = None failure: Optional[FailureConfig] = None sync_config: Optional[SyncConfig] = None - verbose: Union[int, Verbosity] = Verbosity.V2_TRIAL_NORM + verbose: Union[int, Verbosity] = Verbosity.V3_TRIAL_DETAILS
purge-container-cluster: always prune force Since podman 2.x, there's now a confirmation when running podman container prune command.
- name: remove stopped/exited containers command: > - {{ container_binary }} container prune{% if container_binary == 'docker' %} -f{% endif %} + {{ container_binary }} container prune -f changed_when: false - name: show container list on all the nodes (should be empty)
Remove unused methods on ParseContext `ParseContext.create_object_if_not_exists` and `Storage.add_if_not_exists` are no longer invoked anywhere in pants. This commit removes these methods.
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -import functools import threading @@ -20,14 +19,6 @@ class Storage(threading.local): self.objects_by_name[name] = obj self.objects.append(obj) - def add_if_not_exists(self, name, obj_creator): - if name is None: - raise ValueError("Method requires a `name`d object.") - obj = self.objects_by_name.get(name) - if obj is None: - obj = self.objects_by_name[name] = obj_creator() - return obj - class ParseContext: """The build file context that context aware objects - aka BUILD macros - operate against. @@ -66,24 +57,6 @@ class ParseContext: raise KeyError("There is no type registered for alias {0}".format(alias)) return object_type(*args, **kwargs) - def create_object_if_not_exists(self, alias, name=None, *args, **kwargs): - """Constructs the type with the given alias using the given args and kwargs. - - NB: aliases may be the alias' object type itself if that type is known. - - :API: public - - :param alias: Either the type alias or the type itself. - :type alias: string|type - :param *args: These pass through to the underlying callable object. - :param **kwargs: These pass through to the underlying callable object. - :returns: The created object, or an existing object with the same `name`. - """ - if name is None: - raise ValueError("Method requires an object `name`.") - obj_creator = functools.partial(self.create_object, alias, name=name, *args, **kwargs) - return self._storage.add_if_not_exists(name, obj_creator) - @property def rel_path(self): """Relative path from the build root to the BUILD file the context aware object is called
build: Bump isort from 5.11.4 to 5.12.0 New version requires python >=3.8 but that should be ok now with the refactored requirements files.
# (We are not so interested in the specific versions of the tools: the versions # are pinned to prevent unexpected linting failures when tools update) black==22.12.0 -isort==5.11.4 +isort==5.12.0 pylint==2.16.1 mypy==0.991 bandit==1.7.4
[docs] Deprecate RTD -- add meta refresh to redirect to new site Summary: {F141658} Test Plan: bk Reviewers: sashank, schrockn
.. title:: Home \ No newline at end of file - -.. toctree:: - :maxdepth: 3 - :includehidden: - :name: Documentation - - Install <sections/install/index> - Tutorial <sections/tutorial/index> - Learn <sections/learn/index> - API Docs <sections/api/index> - Deploying <sections/deploying/index> - Community <sections/community/index>
Clean up settings overlay "Your Account" tab display. This enforces a max-width of 1024px on the #settings overlay. This commit also cleans up the "Your Account" tab to display correctly without the avatar bleeding over to the next line.
@@ -39,6 +39,16 @@ label { min-width: 200px; } +.new-style .grid .user-name-section label { + min-width: 120px; +} + +.new-style .grid .user-name-section .warning { + display: block; + width: calc(100% - 20px - 5px); + text-align: right; +} + .new-style .grid .warning { display: inline-block; vertical-align: top; @@ -70,6 +80,11 @@ label { padding: 20px; } +.realm-icon-section { + float: none; + display: inline-block; +} + .user-avatar-section .inline-block, .realm-icon-section .inline-block { display: block; @@ -562,7 +577,7 @@ input[type=checkbox].inline-block { .bots_list .bot-information-box { position: relative; display: inline-block; - width: calc(33.3% - 10px); + width: calc(50% - 10px); max-height: 180px; margin: 5px; @@ -693,13 +708,20 @@ input[type=checkbox].inline-block { position: inherit; } -#alert_words_list, #attachments_list { list-style-type: none; margin: auto; background-color: #fff; } +#alert_words_list { + margin: 0; +} + +#alert_words_list li { + list-style-type: none; +} + #alert_words_list li.alert-word-item:last-child { background: none; margin-top: 8px; @@ -749,7 +771,8 @@ input[type=checkbox].inline-block { #settings_page { height: 95vh; width: 97vw; - margin: 2.5vh 1.5vw; + max-width: 1024px; + margin: 2.5vh auto; background-color: #fff; overflow: hidden; border-radius: 4px; @@ -833,6 +856,7 @@ input[type=checkbox].inline-block { #settings_page input.search { font-size: 0.9rem; + margin: 0px 0px 20px 0px; } #settings_page .form-sidebar { @@ -1040,23 +1064,7 @@ input[type=text]#settings_search { bottom: 0px; } /* -- end new settings overlay -- */ -@media (min-width: 1850px) { - .bots_list .bot-information-box { - width: calc(20% - 10px); - } -} - -@media (min-width: 1500px) and (max-width: 1850px) { - .bots_list .bot-information-box { - width: calc(25% - 10px); - } -} - @media (max-width: 1215px) { - .bots_list .bot-information-box { - width: calc(50% - 10px); - } - .user-avatar-section, .realm-icon-section { float: none;
Add bucket suffixes Based on what I found and other reports, I add some suffix like prod, production, staging, etc
@@ -22,7 +22,7 @@ class sfp_s3bucket(SpiderFootPlugin): # Default options opts = { "endpoints": "s3.amazonaws.com,s3-external-1.amazonaws.com,s3-us-west-1.amazonaws.com,s3-us-west-2.amazonaws.com,s3.ap-south-1.amazonaws.com,s3-ap-south-1.amazonaws.com,s3.ap-northeast-2.amazonaws.com,s3-ap-northeast-2.amazonaws.com,s3-ap-southeast-1.amazonaws.com,s3-ap-southeast-2.amazonaws.com,s3-ap-northeast-1.amazonaws.com,s3.eu-central-1.amazonaws.com,s3-eu-central-1.amazonaws.com,s3-eu-west-1.amazonaws.com,s3-sa-east-1.amazonaws.com", - "suffixes": "test,dev,web,beta,bucket,space,files,content,data,-test,-dev,-web,-beta,-bucket,-space,-files,-content,-data", + "suffixes": "test,dev,web,beta,bucket,space,files,content,data,prod,staging,production,stage,app,media,development,-test,-dev,-web,-beta,-bucket,-space,-files,-content,-data,-prod,-staging,-production,-stage,-app,-media,-development", "_maxthreads": 20 }
Add Guild.get_channel_or_thread helper method The name might change in the future, unsure.
@@ -599,6 +599,24 @@ class Guild(Hashable): return self._channels.get(id) or self._threads.get(id) + def get_channel_or_thread(self, channel_id: int, /) -> Optional[Union[Thread, GuildChannel]]: + """Returns a channel or thread with the given ID. + + .. versionadded:: 2.0 + + Parameters + ----------- + channel_id: :class:`int` + The ID to search for. + + Returns + -------- + Optional[Union[:class:`Thread`, :class:`.abc.GuildChannel`]] + The returned channel or thread or ``None`` if not found. + """ + return self._channels.get(channel_id) or self._threads.get(channel_id) + + def get_channel(self, channel_id: int, /) -> Optional[GuildChannel]: """Returns a channel with the given ID.
Document how to use tox This is probably a good idea, so that developers can test with the same set of tools and envronments that the CI server uses.
@@ -52,34 +52,63 @@ performance by running the following command. python tests/benchmarks.py -Makefile Utility ----------------- +Running all the tests +--------------------- -Makefiles are a simple way to perform code compilation on ``Linux platforms``. +You can run all of ChatterBot's tests with a single command: ``tox``. -We often forgot to build docs, run nosetes or Django tests whenever we make any change in existing files, -and when we create a pull request for the same,it fails the build giving the explanation : -`Some checks were not successful` +Tox is a tool for managing virtual environments and running tests. -To avoid all your problems with the Travis CI, use the ``MAKEFILE``. It will help you with the code to avoid problems, -failing the build by Travis CI. +Installing tox +++++++++++++++ -To see the list of avaliable commands with MAKEFILE: +You can install ``tox`` with ``pip``. -.. sourcecode:: sh +.. code-block:: bash - make help + pip install tox -To run all tests: +Using tox ++++++++++ -.. sourcecode:: sh +When you run the ``tox`` command from within the root directory of +the ``ChatterBot`` repository it will run the following tests: - make all +1. Tests for ChatterBot's core files. +2. Tests for ChatterBot's integration with multiple versions of Django. +3. Tests for each of ChatterBot's example files. +4. Tests to make sure ChatterBot's documentation builds. +5. Code style and validation checks (linting). +6. Benchmarking tests for performance. -To clean your workspace with un-versioned files +You can run specific tox environments using the ``-e`` flag. +A few examples include: -.. sourcecode:: sh +.. code-block:: bash + + # Run the documentation tests + tox -e docs + +.. code-block:: bash + + # Run the tests with Django 1.10 + tox -e django110 + +.. code-block:: bash + + # Run the code linting scripts + tox -e lint + +To see the list of all available environments that you can run tests for: + +.. code-block:: bash + + tox -l + +To run tests for all environments: + +.. code-block:: bash - make clean + tox .. _`nose documentation`: https://nose.readthedocs.org/en/latest/
Add register_manager docs * Add register_manager docs Fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see
@@ -142,6 +142,16 @@ class BaseModelClass(metaclass=BaseModelMetaClass): def register_manager(cls, adata_manager: AnnDataManager): """ Registers an :class:`~scvi.data.AnnDataManager` instance with this model class. + + Stores the :class:`~scvi.data.AnnDataManager` reference in a class-specific manager store. + Intended for use in the ``setup_anndata()`` class method followed up by retrieval of the + :class:`~scvi.data.AnnDataManager` via the ``_get_most_recent_anndata_manager()`` method in + the model init method. + + Notes + ----- + Subsequent calls to this method with an :class:`~scvi.data.AnnDataManager` instance referring to the same + underlying AnnData object will overwrite the reference to previous :class:`~scvi.data.AnnDataManager`. """ adata_id = adata_manager.adata_uuid cls._setup_adata_manager_store[adata_id] = adata_manager
Changes travis max-line-length to 80 Fixes
@@ -12,8 +12,8 @@ install: before_script: # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + # exit-zero treats all errors as warnings. Texar limits lines to a maximum of 80 chars. + - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=80 --statistics script: # units test
Update Sauter link This appears to be the active one. FAO
@@ -102,7 +102,7 @@ Matthew Webber. .. _`CCP4`: http://www.ccp4.ac.uk/ .. _`Diamond Light Source`: http://www.diamond.ac.uk/Home.html .. _`Dr Gwyndaf Evans`: http://www.diamond.ac.uk/Beamlines/Mx/VMXm/Staff/Evans.html -.. _`Dr Nicholas Sauter`: http://pbd.lbl.gov/scientists/nicholas-sauter/ +.. _`Dr Nicholas Sauter`: https://biosciences.lbl.gov/profiles/nicholas-sauter/ .. _`Lawrence Berkeley National Laboratory`: http://www.lbl.gov/ .. _`National Institutes of Health`: http://www.nih.gov/ .. _`National Institute of General Medical Sciences`: http://www.nigms.nih.gov/
fix: Clear user cache on doctype insert No need to do Settings -> Reload after creating a new doctype
from __future__ import unicode_literals import re, copy, os, shutil import json +from frappe.cache_manager import clear_user_cache # imports - third party imports import six @@ -103,6 +104,10 @@ class DocType(Document): self.owner = 'Administrator' self.modified_by = 'Administrator' + def after_insert(self): + # clear user cache so that on the next reload this doctype is included in boot + clear_user_cache(frappe.session.user) + def set_default_in_list_view(self): '''Set default in-list-view for first 4 mandatory fields''' if not [d.fieldname for d in self.fields if d.in_list_view]:
Rely on types.UpdateChatPinnedMessage for chat unpins Fixes probably.
@@ -38,6 +38,10 @@ class ChatAction(EventBuilder): return cls.Event(types.PeerChannel(update.channel_id), unpin=True) + elif isinstance(update, types.UpdateChatPinnedMessage) and update.id == 0: + return cls.Event(types.PeerChat(update.chat_id), + unpin=True) + elif isinstance(update, types.UpdateChatParticipantAdd): return cls.Event(types.PeerChat(update.chat_id), added_by=update.inviter_id or True, @@ -104,8 +108,9 @@ class ChatAction(EventBuilder): return cls.Event(msg, users=msg.from_id, new_photo=True) - elif isinstance(action, types.MessageActionPinMessage): - # Telegram always sends this service message for new pins + elif isinstance(action, types.MessageActionPinMessage) and msg.reply_to_msg_id: + # Seems to not be reliable on unpins, but when pinning + # we prefer this because we know who caused it. return cls.Event(msg, users=msg.from_id, new_pin=msg.reply_to_msg_id)
enable vector test for clang 4.0 fix
@@ -185,10 +185,6 @@ class BaseTestCompleter(object): # Verify that we got the expected completions back. self.assertIsNotNone(completions) - if platform.system() == "Windows": - # disable the windows tests for now until AppVeyor fixes things - self.tear_down() - return expected = ['begin\titerator begin()', 'begin()'] self.assertIn(expected, completions) self.tear_down_completer()
create settings method for LibrarySection return current library settings
@@ -4,6 +4,7 @@ from plexapi.base import PlexObject from plexapi.compat import unquote, urlencode, quote_plus from plexapi.media import MediaTag from plexapi.exceptions import BadRequest, NotFound +from plexapi.settings import Setting class Library(PlexObject): @@ -401,6 +402,12 @@ class LibrarySection(PlexObject): key = '/library/sections/%s/all%s' % (self.key, sortStr) return self.fetchItems(key, **kwargs) + def settings(self): + """ Returns a list of all library settings. """ + key = '/library/sections/%s/prefs' % self.key + data = self._server.query(key) + return self.findItems(data, cls=Setting) + def onDeck(self): """ Returns a list of media items on deck from this library section. """ key = '/library/sections/%s/onDeck' % self.key
Fix ReplyKeyboardMarkup's add method Fix when add method taking multiple buttons was adding one button to a new row and then adding items to rows according to row_width
@@ -41,7 +41,7 @@ class ReplyKeyboardMarkup(base.TelegramObject): :rtype: :obj:`types.ReplyKeyboardMarkup` """ row = [] - for index, button in enumerate(args): + for index, button in enumerate(args, start=1): row.append(button) if index % self.row_width == 0: self.keyboard.append(row)
vocabulary raises warning when loading from nonexistent dataset series fixes
@@ -123,6 +123,10 @@ def from_dataset(datasets: List[Dataset], series_ids: List[str], max_size: int, warn("Inferring vocabulary from lazy dataset!") for series_id in series_ids: + if not dataset.has_series(series_id): + warn("Data series '{}' not present in the dataset" + .format(series_id)) + series = dataset.get_series(series_id, allow_none=True) if series: vocabulary.add_tokenized_text( @@ -143,7 +147,6 @@ def from_dataset(datasets: List[Dataset], series_ids: List[str], max_size: int, directory = os.path.dirname(save_file) if not os.path.exists(directory): os.makedirs(directory) - vocabulary.save_to_file(save_file, overwrite) return vocabulary
MNT: remove dead code This got moved out to __read_and_stash_a_motor
@@ -1051,19 +1051,6 @@ def reset_positions_wrapper(plan, devices=None): else: coupled_parents = set() - def read_and_stash_a_motor(obj): - try: - cur_pos = obj.position - except AttributeError: - reading = yield Msg('read', obj) - if reading is None: - # this plan may be being list-ified - cur_pos = 0 - else: - k = list(reading.keys())[0] - cur_pos = reading[k]['value'] - initial_positions[obj] = cur_pos - def insert_reads(msg): eligible = devices is None or msg.obj in devices seen = msg.obj in initial_positions
Update task.json Fixes typo, lack of closing quotes
{ "input": "In which direction should one look to see the Sun in the morning?", "target_scores": { - "East: 1, + "East": 1, "North": 0, "South": 0, "West": 0, { "input": "In which direction should one look to see the Sun in the evening?", "target_scores": { - "East: 1, + "East": 1, "North": 0, "South": 0, "West": 0,
Reinstate 'counter' function in subdoc. Tested-by: Ellis Breen Tested-by: Build Bot
@@ -2,7 +2,7 @@ from typing import * from couchbase_core import subdocument as SD import couchbase_core.priv_constants as _P from .options import OptionBlockTimeOut -from couchbase_core.subdocument import array_addunique, array_append, array_insert, array_prepend, insert, remove, replace, upsert, Spec +from couchbase_core.subdocument import array_addunique, array_append, array_insert, array_prepend, insert, remove, replace, upsert, counter, Spec try: from abc import abstractmethod
docstrings and better handling added docstring and better handling of the parameters.
@@ -1008,18 +1008,47 @@ class Collections(PlexObject): part = '/library/metadata/%s' % self.ratingKey return self._server.query(part, method=self._server._session.delete) - def modeUpdate(self, mode=['default', 'hide', 'hideItems', 'showItems']): + def modeUpdate(self, mode=None): + """ Update Collection Mode + + Parameters: + mode: default (Library default) + hide (Hide Collection) + hideItems (Hide Items in this Collection) + showItems (Show this Collection and its Items) + Example: + + colleciton = 'plexapi.library.Collections' + collection.updateMode(mode="hide") + """ mode_dict = {'default': '-2', 'hide': '0', 'hideItems': '1', 'showItems': '2'} - part = '/library/metadata/%s/prefs?collectionMode=%s' % (self.ratingKey, mode_dict[mode]) + key = mode_dict.get(mode) + if mode is None: + raise BadRequest('Unknown collection mode : %s. Options %s' % (mode, mode_dict.key())) + part = '/library/metadata/%s/prefs?collectionMode=%s' % (self.ratingKey, key) return self._server.query(part, method=self._server._session.put) - def sortUpdate(self, sort=['release', 'alpha']): + def sortUpdate(self, sort=None): + """ Update Collection Sorting + + Parameters: + mode: realease (Order Collection by realease dates) + alpha (Order Collection Alphabetically) + + Example: + + colleciton = 'plexapi.library.Collections' + collection.updateSort(mode="alpha") + """ sort_dict = {'release': '0', 'alpha': '1'} - part = '/library/metadata/%s/prefs?collectionSort=%s' % (self.ratingKey, sort_dict[sort]) + key = sort_dict.get(sort) + if key is None: + raise BadRequest('Unknown sort dir: %s. Options: %s' % (sort, sort_dict.keys())) + part = '/library/metadata/%s/prefs?collectionSort=%s' % (self.ratingKey, key) return self._server.query(part, method=self._server._session.put) # def edit(self, **kwargs):
Remove non-sequitur Jenkins configuraton How to set up and use sudoers has nothing to do with this function.
@@ -186,22 +186,6 @@ def send( salt-call event.send myco/mytag foo=Foo bar=Bar salt-call event.send 'myco/mytag' '{foo: Foo, bar: Bar}' - A convenient way to allow Jenkins to execute ``salt-call`` is via sudo. The - following rule in sudoers will allow the ``jenkins`` user to run only the - following command. - - ``/etc/sudoers`` (allow preserving the environment): - - .. code-block:: text - - jenkins ALL=(ALL) NOPASSWD:SETENV: /usr/bin/salt-call event.send* - - Call Jenkins via sudo (preserve the environment): - - .. code-block:: bash - - sudo -E salt-call event.send myco/jenkins/build/success with_env=[BUILD_ID, BUILD_URL, GIT_BRANCH, GIT_COMMIT] - """ data_dict = {}
Incorrect Striping of Hashtags Comment on Hashtags method
@@ -433,8 +433,8 @@ def menu_comment(): hashtag = input("what?").strip() else: hashtag = random.choice(bot.read_list_from_file(hashtag_file)) - for hashtags in hashtag: - bot.comment_hashtag(hashtags) + #for hashtags in hashtag: + bot.comment_hashtag(hashtag) elif ans == "2": print("""
Fixing new_project fixture We need to ensure to switch to the default project
@@ -138,7 +138,10 @@ def create_project(request): """ Delete the project """ - class_instance.project_obj.delete(resource_name=class_instance.namespace) + ocp.switch_to_default_rook_cluster_project() + class_instance.project_obj.delete( + resource_name=class_instance.namespace + ) class_instance.project_obj.wait_for_delete(class_instance.namespace) request.addfinalizer(finalizer)
enhancement: solid voxelization add methods for solid voxelization, call show(solid_mode=True) for visualization
@@ -203,6 +203,14 @@ class VoxelMesh(Voxel): self._cache['origin'] = origin return voxels + @util.cache_decorator + def sparse_solid(self): + voxels, origin = voxelize_subdivide_solid(mesh=self._data['mesh'], + pitch=self._data['pitch'], + max_iter=self._data['max_iter'][0]) + self._cache['origin'] = origin + return voxels + @util.cache_decorator def as_boxes(self): """ @@ -217,11 +225,28 @@ class VoxelMesh(Voxel): mesh = multibox(centers=centers, pitch=self.pitch) return mesh - def show(self): + @util.cache_decorator + def as_boxes_solid(self): + """ + A rough Trimesh representation of the voxels with a box for each filled voxel. + + Returns + --------- + mesh: Trimesh object representing the current voxel object. + """ + centers = (self.sparse_solid * + self.pitch).astype(np.float64) + self.origin + mesh = multibox(centers=centers, pitch=self.pitch) + return mesh + + def show(self,solid_mode=False): """ Convert the current set of voxels into a trimesh for visualization and show that via its built- in preview method. """ + if solid_mode: + self.as_boxes_solid.show() + else: self.as_boxes.show() @@ -266,6 +291,62 @@ def voxelize_subdivide(mesh, pitch, max_iter=10): return voxels_sparse, origin_position +def voxelize_subdivide_solid(mesh,pitch,max_iter=10): + voxels_sparse,origin_position=voxelize_subdivide(mesh,pitch,max_iter) + #create grid and mark inner voxels + max=voxels_sparse.max()+1 + max=max+2 #enlarge grid to ensure that the voxels of the bound are empty + grid = [[[0 for k in range(max)] for j in range(max)] for i in range(max)] + for v in voxels_sparse: + grid[v[0]+1][v[1]+1][v[2]+1]=1 + + for i in range(max): + check_dir2=False + for j in range(0,max-1): + idx=[] + #find transitions first + for k in range(1,max-1): + if grid[i][j][k]!=grid[i][j][k-1]: + idx.append(k) + + c=len(idx) + check_dir2=(c%4)>0 + if c<4: + continue + + for s in range(0,c-c%4,4): + for k in range(idx[s],idx[s+3]): + grid[i][j][k]=1 + + if not check_dir2: + continue + + #check another direction for robustness + for k in range(0,max-1): + idx=[] + #find transitions first + for j in range(1,max-1): + if grid[i][j][k]!=grid[i][j-1][k]: + idx.append(k) + + c=len(idx) + if c<4: + continue + + for s in range(0,c-c%4,4): + for j in range(idx[s],idx[s+3]): + grid[i][j][k]=1 + + #gen new voxels + new_voxels=[] + for i in range(max): + for j in range(max): + for k in range(max): + if grid[i][j][k]==1: + new_voxels.append([i-1,j-1,k-1]) + + new_voxels=np.array(new_voxels) + return new_voxels,origin_position def matrix_to_points(matrix, pitch, origin): """
[IMPR] Add support for translated dates/times This implementation just combines date, year and time from pywikibot.date lookup. It could be more precise if MonthFormat would have a day_years_format entry but most of the current year_formats entries are default which just append the year to the month.
@@ -43,8 +43,10 @@ and override its `callback` method. Here is a sample: from typing import Union import pywikibot + from pywikibot import i18n from pywikibot.bot import OptionHandler +from pywikibot.date import format_date, formatYear from pywikibot.exceptions import APIError, Error from pywikibot.tools.formatter import color_format @@ -92,6 +94,16 @@ class BaseRevertBot(OptionHandler): """Callback function.""" return 'top' in item + def local_timestamp(self, ts) -> str: + """Convert Timestamp to a localized timestamp string. + + .. versionadded:: 7.0 + """ + year = formatYear(self.site.lang, ts.year) + date = format_date(ts.month, ts.day, self.site) + *_, time = str(ts).strip('Z').partition('T') + return ' '.join((date, year, time)) + def revert(self, item) -> Union[str, bool]: """Revert a single item.""" page = pywikibot.Page(self.site, item['title']) @@ -110,7 +122,7 @@ class BaseRevertBot(OptionHandler): self.site, 'revertbot-revert', {'revid': rev.revid, 'author': rev.user, - 'timestamp': rev.timestamp}) + 'timestamp': self.local_timestamp(rev.timestamp)}) if self.opt.comment: comment += ': ' + self.opt.comment
Update RELEASE.md Add upstream note
* Update version constants (find them by running `git grep [VERSION_NUMBER]`) * Create changelog entry (edit CHANGELOG.md with a one-liner for each closed issue going in the release) * Commit and push changes to master with the message: "Version Bump to v[VERSION_NUMBER]" -* Push tag and PyPi `fab release:[VERSION_NUMBER]`. Before you do this, make sure you have fabric installed (`pip install fabric`) and also make sure that you have pip set up with your PyPi user credentials. The easiest way to do that is to create a file at `~/.pypirc` with the following contents: +* Push tag and PyPi `fab release:[VERSION_NUMBER]`. Before you do this, make sure you have the organization repository set up as upstream remote & fabric installed (`pip install fabric`), also make sure that you have pip set up with your PyPi user credentials. The easiest way to do that is to create a file at `~/.pypirc` with the following contents: ``` [server-login]
Add converters to and from dictionary Implement PWInput.as_dict() and PWInput.from_dict(pwinput_dict) methods to the PWInput class.
@@ -173,6 +173,44 @@ class PWInput(object): out.append(" %f %f %f" % (vec[0], vec[1], vec[2])) return "\n".join(out) + def as_dict(self): + """ + Create a dictionary representation of a PWInput object + + Returns: + dict + """ + pwinput_dict = {'structure': self.structure.as_dict(), + 'pseudo': self.pseudo, + 'sections': self.sections, + 'kpoints_mode': self.kpoints_mode, + 'kpoints_grid': self.kpoints_grid, + 'kpoints_shift': self.kpoints_shift} + return pwinput_dict + + @classmethod + def from_dict(cls, pwinput_dict): + """ + Load a PWInput object from a dictionary. + + Args: + pwinput_dict (dict): dictionary with PWInput data + + Returns: + PWInput object + """ + pwinput = cls(structure=Structure.from_dict(pwinput_dict['structure']), + pseudo=pwinput_dict['pseudo'], + control=pwinput_dict['sections']['control'], + system=pwinput_dict['sections']['system'], + electrons=pwinput_dict['sections']['electrons'], + ions=pwinput_dict['sections']['ions'], + cell=pwinput_dict['sections']['cell'], + kpoints_mode=pwinput_dict['kpoints_mode'], + kpoints_grid=pwinput_dict['kpoints_grid'], + kpoints_shift=pwinput_dict['kpoints_shift']) + return pwinput + def write_file(self, filename): """ Write the PWSCF input file.
Standalone: Do not include "site" module as compiled * The "site" module should not be included, but if it is, do not compile via C code, as some modern ".pth" files insist on Python frame stacks.
@@ -1365,6 +1365,7 @@ class NuitkaPluginPopularImplicitImports(NuitkaPluginBase): "telethon.tl.types", # Not performance relevant and slow C compile "importlib_metadata", # Not performance relevant and slow C compile "comtypes.gen", # Not performance relevant and slow C compile + "site", # Not performance relevant and problems with .pth files ) def decideCompilation(self, module_name, source_ref):
[benchmarks][libxml2-v2.9.2] Fix broken build Fix broken build by ignoring git's unhelpful conversion of CRLF to LF.
@@ -33,7 +33,15 @@ build_lib() { ) } -get_git_tag https://gitlab.gnome.org/GNOME/libxml2.git v2.9.2 SRC +git clone https://gitlab.gnome.org/GNOME/libxml2.git SRC +cd SRC + +# Git is converting CRLF to LF automatically and causing issues when checking +# out the branch. So use -f to ignore the complaint about lost changes that we +# don't even want. +git checkout -f v2.9.2 +cd - + build_lib $CXX $CXXFLAGS -std=c++11 $SCRIPT_DIR/target.cc -I BUILD/include BUILD/.libs/libxml2.a $FUZZER_LIB -o $FUZZ_TARGET
Build and install VMAF Fixes
@@ -210,18 +210,33 @@ RUN \ rm -vf /etc/ssh/ssh_host_* && \ curl -sSL https://github.com/xiph/rd_tool/tarball/master | tar zxf - -C ${RD_TOOL_DIR} --strip-components=1 +# install meson +RUN \ + apt-get install -y python3 python3-pip python3-setuptools python3-wheel ninja-build && \ + pip3 install meson + # install dav1d and dependencies ENV \ DAV1D_DIR=/opt/dav1d RUN \ - apt-get install -y meson && \ git clone https://code.videolan.org/videolan/dav1d.git ${DAV1D_DIR} && \ cd ${DAV1D_DIR} && \ mkdir build && cd build && \ meson .. && \ ninja +# install VMAF +ENV \ + VMAF_DIR=/opt/vmaf + +RUN \ + git clone https://github.com/Netflix/vmaf.git ${VMAF_DIR} && \ + cd ${VMAF_DIR}/libvmaf && \ + meson build --buildtype release && \ + ninja -C build && \ + ninja -C build install + # clear package manager cache RUN \ apt-get clean && \
Simplify nonnull_count computation in PandasDataset Accommodates error raised in pandas 0.21 (plus it's simpler).
@@ -192,7 +192,9 @@ class MetaPandasDataset(Dataset): element_count = int(len(series)) nonnull_values = series[null_indexes == False] - nonnull_count = int((null_indexes == False).sum()) + # Simplify this expression because the old version fails under pandas 0.21 (but only that version) + # nonnull_count = int((null_indexes == False).sum()) + nonnull_count = len(nonnull_values) null_count = element_count - nonnull_count evaluation_result = func(self, nonnull_values, *args, **kwargs)
Update JARVIS.py Added "open" to "open {x program}" in all app-opening processes added github to website opener added discord to app-opening process changed some grammar added a clip and record/stop recording function
@@ -15,7 +15,8 @@ import subprocess # subprocess module allows you to spawn new processes import pyjokes import requests import json - +#for 30 seconds clip "Jarvis, clip that!" and discord ctrl+k quick-move (might not come to fruition) +from pynut import keyboard # ======= from playsound import * #for sound output # master @@ -136,26 +137,29 @@ def get_app(Q): elif Q=="news": speak_news() - elif Q == "notepad": + elif Q == "open notepad": subprocess.call(['Notepad.exe']) - elif Q == "calculator": + elif Q == "open calculator": subprocess.call(['calc.exe']) - elif Q == "stikynot": + elif Q == "open stikynot": subprocess.call(['StikyNot.exe']) - elif Q == "shell": + elif Q == "open shell": subprocess.call(['powershell.exe']) - elif Q == "paint": + elif Q == "open paint": subprocess.call(['mspaint.exe']) - elif Q == "cmd": + elif Q == "open cmd": subprocess.call(['cmd.exe']) - elif Q == "browser": + elif Q == "open discord": + subprocess.call(['discord.exe']) + elif Q == "open browser": subprocess.call(['C:\Program Files\Internet Explorer\iexplore.exe']) # patch-1 elif Q == "open youtube": webbrowser.open("https://www.youtube.com/") # open youtube elif Q == "open google": - webbrowser.open("https://www.google.com") # open google - + webbrowser.open("https://www.google.com/") # open google + elif Q == "open github": + webbrowser.open elif Q == "email to other": # here you want to change and input your mail and password whenver you implement try: speak("What should I say?") @@ -169,7 +173,7 @@ def get_app(Q): speak("Email has been sent!") except Exception as e: print(e) - speak("Sorray i am not send this mail") + speak("Sorry, I can't send the email.") # ======= # master elif Q=="Take screenshot": @@ -182,7 +186,25 @@ def get_app(Q): snapshot.save(folder_to_save_files) elif Q=="Jokes": - print(pyjokes.get_joke()) + speak(pyjokes.get_joke()) + + elif Q=="start recording": + current.add('Win', 'Alt', 'r') + speak("Started recording. just say stop recording to stop.") + + elif Q=="stop recording": + current.add('Win', 'Alt', 'r') + speak("Stopped recording. check your game bar folder for the video") + + elif Q=="clip that": + current.add('Win', 'Alt', 'g') + speak("Clipped. check you game bar file for the video") + with keyboard.Listener(on_press=on_press, on_release=on_release) as listener: + listener.join() + + + + # master else:
Added to complete cfg for MM1 serial port change. Linked with previous commit.
@@ -176,6 +176,13 @@ MM1_MAX_FORWARD = 2000 # Max throttle to go fowrward. The bigger the fa MM1_STOPPED_PWM = 1500 MM1_MAX_REVERSE = 1000 # Max throttle to go reverse. The smaller the faster MM1_SHOW_STEERING_VALUE = False +# Serial port -- Default Pi: '/dev/ttyS0' +# -- Jetson Nano: '/dev/ttyTHS1' +# -- Google coral: '/dev/ttymxc0' +# -- Windows: 'COM3', Arduino: '/dev/ttyACM0' +# -- MacOS/Linux:please use 'ls /dev/tty.*' to find the correct serial port for mm1 +# eg.'/dev/tty.usbmodemXXXXXX' and replace the port accordingly +MM1_SERIAL_PORT = '/dev/ttyS0' # Serial Port for reading and sending MM1 data. #RECORD OPTIONS RECORD_DURING_AI = False #normally we do not record during ai mode. Set this to true to get image and steering records for your Ai. Be careful not to use them to train.
Need to quote python versions. Also added 3.11 because why not.
@@ -7,7 +7,7 @@ jobs: strategy: matrix: os: [ubuntu-18.04, ubuntu-20.04, ubuntu22.04, macos-12, windows-2022] - python-version: [3.9, 3.10] + python-version: ['3.9', '3.10', '3.11'] runs-on: ${{ matrix.os }} steps:
Typo on line 942 interactive should be interaction
@@ -939,7 +939,7 @@ calculation so that it can be used later with new values. JobConnections -------------- Larger pyQuil programs can take longer than 30 seconds to run. These jobs can be posted into the -cloud job queue using a different connection object. The mode of interactive with the API is +cloud job queue using a different connection object. The mode of interaction with the API is asynchronous. This means that there is a seperate query to post a job and to get the result. ::
fix: VM Resize with StopStart VM Resize testcase with StopStart leaves VM in Stop state if Resize raises an exception. This PR is to Start VM before raising an exception
@@ -154,9 +154,10 @@ class VmResize(TestSuite): else: raise identifier time.sleep(1) - assert expected_vm_capability, "fail to find proper vm size" + finally: if not hot_resize: start_stop.start() + assert expected_vm_capability, "fail to find proper vm size" test_result.information["final_vm_size"] = final_vm_size test_result.information["origin_vm_size"] = origin_vm_size
Add GitLab CI trigger for Conan package Build Conan package as part of CI Upload to Artifactory for commits to "main": - use "PACKAGE/VERSION@xed/stable" reference when tagged with "vVERSION" - use "PACKAGE/SHA@xed/ci" reference for unversioned commits Update alias reference "PACKAGE/latest@xed/ci"
# .gitlab-ci.yml +variables: + PACKAGE_NAME: xed-common + build: #image: ubuntu:18.04 #image: xed-testing-container @@ -7,3 +10,26 @@ build: stage: build script: - python3 ci-internal.py + +build-conan: + image: amr-registry.caas.intel.com/syssim/teamcity-agent:2020.1.5-21ww05 + stage: build + script: + - virtualenv --python="$(which python3)" ~/.syssim-virtualenv + - source ~/.syssim-virtualenv/bin/activate + - pip install conan pylint astroid yapf + - conan config install https://gitlab.devtools.intel.com/syssim/conan-config.git + - |- + if [[ $CI_COMMIT_REF_NAME == main && $CI_COMMIT_TAG == v* ]]; then + PACKAGE_REF=$PACKAGE_NAME/${CI_COMMIT_TAG#v*}@xed/stable + else + PACKAGE_REF=$PACKAGE_NAME/$CI_COMMIT_SHA@xed/ci + fi + conan create . $PACKAGE_REF --build=missing --profile=gcc9-native + if [[ $CI_COMMIT_REF_NAME == main ]]; then + conan user -r syssim-public "$CONAN_USERNAME" -p "$CONAN_PASSWORD" + conan upload $PACKAGE_REF -r syssim-public --force + LATEST_REF=$PACKAGE_NAME/latest@xed/ci + conan alias $LATEST_REF $PACKAGE_REF + conan upload $LATEST_REF -r syssim-public --force + fi
Use single part as default The data stored in artifact storage are usually small. Using multi-part is not strictly a requirement. Change the default to true to better support more platform out of box.
@@ -281,7 +281,7 @@ func initMinioClient(initConnectionTimeout time.Duration) storage.ObjectStoreInt accessKey := getStringConfig("ObjectStoreConfig.AccessKey") secretKey := getStringConfig("ObjectStoreConfig.SecretAccessKey") bucketName := getStringConfig("ObjectStoreConfig.BucketName") - disableMultipart := getBoolConfigWithDefault("ObjectStoreConfig.Multipart.Disable", false) + disableMultipart := getBoolConfigWithDefault("ObjectStoreConfig.Multipart.Disable", true) minioClient := client.CreateMinioClientOrFatal(minioServiceHost, minioServicePort, accessKey, secretKey, initConnectionTimeout)
Python 3 support #catalyst/curate/poloniex.py: Change `print url` to `print(url)`
@@ -129,7 +129,7 @@ class PoloniexCurator(object): start = str(newstart), end = str(end) ) - print url + print(url) attempts = 0 success = 0
Tidy up ClusterNodeGenerator docstring added notes on the valid range for `lam`, and the specific criteria that `q` must divide into the number of clusters, which might otherwise be unclear/non-obvious to the end-user added information on default values of each parameter minor formatting fixes See
@@ -34,7 +34,7 @@ from .base import Generator class ClusterNodeGenerator(Generator): """ - A data generator for use with ClusterGCN models on homogeneous graphs, [1]. + A data generator for use with ClusterGCN models on homogeneous graphs, see [1]. The supplied graph G should be a StellarGraph object with node features. Use the :meth:`flow` method supplying the nodes and (optionally) targets @@ -49,13 +49,15 @@ class ClusterNodeGenerator(Generator): Args: G (StellarGraph): a machine-learning StellarGraph-type graph - clusters (int or list): If int then it indicates the number of clusters (default is 1 that is the given graph). - If clusters is greater than 1, then nodes are uniformly at random assigned to a cluster. If list, - then it should be a list of lists of node IDs such that each list corresponds to a cluster of nodes - in G. The clusters should be non-overlapping. - q (float): The number of clusters to combine for each mini-batch. The default is 1. - lam (float): The mixture coefficient for adjacency matrix normalisation. - name (str): an optional name of the generator + clusters (int or list, optional): If int, it indicates the number of clusters (default is 1, corresponding to the entire graph). + If `clusters` is greater than 1, then nodes are randomly assigned to a cluster. + If list, then it should be a list of lists of node IDs, such that each list corresponds to a cluster of nodes + in `G`. The clusters should be non-overlapping. + q (int, optional): The number of clusters to combine for each mini-batch (default is 1). + The total number of clusters must be divisible by `q`. + lam (float, optional): The mixture coefficient for adjacency matrix normalisation (default is 0.1). + Valid values are in the interval [0, 1]. + name (str, optional): Name for the node generator. """ def __init__(self, G, clusters=1, q=1, lam=0.1, name=None): @@ -157,8 +159,8 @@ class ClusterNodeGenerator(Generator): name (str, optional): An optional name for the returned generator object. Returns: - A ClusterNodeSequence object to use with ClusterGCN in Keras - methods :meth:`fit`, :meth:`evaluate`, and :meth:`predict` + A :class:`ClusterNodeSequence` object to use with :class:`ClusterGCN` in Keras + methods :meth:`fit`, :meth:`evaluate`, and :meth:`predict`. """ if targets is not None: @@ -194,9 +196,9 @@ class ClusterNodeSequence(Sequence): A Keras-compatible data generator for node inference using ClusterGCN model. Use this class with the Keras methods :meth:`keras.Model.fit`, :meth:`keras.Model.evaluate`, and - :meth:`keras.Model.predict`, + :meth:`keras.Model.predict`. - This class should be created using the `.flow(...)` method of + This class should be created using the :meth:`flow` method of :class:`ClusterNodeGenerator`. Args:
Fix type annotations for `dd.from_pandas` and `dd.from_delayed` * remove union * Revert "remove union" This reverts commit * add overloads * match overload args * ignore pd.series overload * set overload args back to original ones
@@ -6,7 +6,7 @@ from functools import partial from math import ceil from operator import getitem from threading import Lock -from typing import TYPE_CHECKING, Iterable, Literal +from typing import TYPE_CHECKING, Iterable, Literal, overload import numpy as np import pandas as pd @@ -153,6 +153,30 @@ def from_array(x, chunksize=50000, columns=None, meta=None): return new_dd_object(dsk, name, meta, divisions) +@overload +def from_pandas( + data: pd.DataFrame, + npartitions: int | None = None, + chunksize: int | None = None, + sort: bool = True, + name: str | None = None, +) -> DataFrame: + ... + + +# We ignore this overload for now until pandas-stubs can be added. +# See https://github.com/dask/dask/issues/9220 +@overload +def from_pandas( # type: ignore + data: pd.Series, + npartitions: int | None = None, + chunksize: int | None = None, + sort: bool = True, + name: str | None = None, +) -> Series: + ... + + def from_pandas( data: pd.DataFrame | pd.Series, npartitions: int | None = None,
fix horizaontal line breaks linter issue fix
@@ -545,7 +545,7 @@ class md026(mddef): else: mr = re.match(self.ratx, title) title = mr.group(2) - if title[-1] in self.settings: + if len(title) > 0 and title[-1] in self.settings: ret[s] = '%s found' % repr(title[-1]) return ret
Replace self._execution.signal with schedule_task This let us have ExternalWorkflowExecutionSignaled events in the history, so we don't resend signals multiple times.
@@ -1148,6 +1148,8 @@ class Executor(executor.Executor): known_workflows_ids = frozenset(known_workflows_ids) + signals_scheduled = False + for signal in history.signals.values(): input = signal['input'] if not isinstance(input, dict): # foreign signal: don't try processing it @@ -1157,12 +1159,8 @@ class Executor(executor.Executor): continue name = signal['name'] - input = { - 'args': input.get('args'), - 'kwargs': input.get('kwargs'), - '__propagate': propagate, - } - + args = input.get('args', ()) + kwargs = input.get('kwargs', {}) sender = ( signal['external_workflow_id'], signal['external_run_id'] @@ -1171,16 +1169,21 @@ class Executor(executor.Executor): (w['workflow_id'], w['run_id']) for w in history.signaled_workflows[name] ) not_signaled_workflows_ids = list(known_workflows_ids - signaled_workflows_ids - {sender}) + extra_input = {'__propagate': propagate} for workflow_id, run_id in not_signaled_workflows_ids: - try: - self._execution.signal( - signal_name=name, - input=input, - workflow_id=workflow_id, - run_id=run_id, - ) - except swf.models.workflow.WorkflowExecutionDoesNotExist: - logger.info('Workflow {} {} disappeared'.format(workflow_id, run_id)) + self.schedule_task(SignalTask( + name, + workflow_id, + run_id, + None, + extra_input, + *args, + **kwargs + )) + signals_scheduled = True + if signals_scheduled: + self._append_timer = True + raise exceptions.ExecutionBlocked() def record_marker(self, name, details=None): return MarkerTask(name, details)
Adds some documentation changes Specifying which models are discrete or continuous. At time of commit, all are discrete (I think).
@@ -188,8 +188,9 @@ class LinearGaussianTimeInvariantTransitionModel(LinearGaussianTransitionModel, class ConstantNthDerivative(LinearGaussianTransitionModel, TimeVariantModel): - r"""Discrete model based on the Nth derivative with respect to time being constant, - to set derivative use keyword argument :attr:`constant_derivative` + r"""Discrete model based on the Nth derivative with respect to time being + constant, to set derivative use keyword argument + :attr:`constant_derivative` The model is described by the following SDEs: @@ -381,8 +382,8 @@ class ConstantAcceleration(ConstantNthDerivative): class NthDerivativeDecay(LinearGaussianTransitionModel, TimeVariantModel): - r"""Discrete model based on the Nth derivative with respect to time decaying to 0 - exponentially, to set derivative use keyword argument + r"""Discrete model based on the Nth derivative with respect to time + decaying to 0 exponentially, to set derivative use keyword argument :attr:`decay_derivative` The model is described by the following SDEs:
EditScopeUI : Don't prune if EditScope isn't being viewed This would lead to a terribly confusing experience where things were being pruned in a downstream node without any feedback in the Viewer.
@@ -62,6 +62,13 @@ def __pruningKeyPress( viewer, event ) : # that all its descendants are selected? return True + viewedNode = viewer.view()["in"].getInput().node() + if editScope != viewedNode and editScope not in Gaffer.NodeAlgo.upstreamNodes( viewedNode ) : + # Spare folks from deleting things in a downstream EditScope. + ## \todo When we have a nice Viewer notification system we + # should emit a warning here. + return True + sceneGadget = viewer.view().viewportGadget().getPrimaryChild() selection = sceneGadget.getSelection() if not selection.isEmpty() :
Check for run when resolving schedule attempt Summary: Fixes Test Plan: Delete run, load scheduler page, verify there is no error Reviewers: alangenfeld
@@ -148,6 +148,7 @@ def resolve_attempts(self, graphene_info, **kwargs): ): status = DauphinScheduleAttemptStatus.SUCCESS run_id = json_result['run']['runId'] + if graphene_info.context.instance.has_run(run_id): run = graphene_info.schema.type_named('PipelineRun')( graphene_info.context.instance.get_run_by_id(run_id) )
Fix and improvements for tiles Added automatic tile number selection Fixed tiles displacement
@@ -69,7 +69,8 @@ class Tiles: a range between ``low`` and ``high`` for each dimension. Args: - n_tilings (int): number of tilings; + n_tilings (int): number of tilings, or -1 to compute the number + automatically; n_tiles (list): number of tiles for each tilings for each dimension; low (np.ndarray): lowest value for each dimension; high (np.ndarray): highest value for each dimension. @@ -84,6 +85,13 @@ class Tiles: """ assert len(n_tiles) == len(low) == len(high) + assert n_tilings > 0 or n_tilings == -1 + + if n_tilings == -1: + d = np.size(low) # space-dim + m = np.max([np.ceil(np.log(4 * d) / np.log(2)), + np.ceil(np.log(n_tilings) / np.log(2))]) + n_tilings = m**2 # Min, max coord., side length of the state-space low = np.array(low, dtype=np.float) @@ -91,16 +99,10 @@ class Tiles: L = high - low # Unit shift displacement vector - shift = 1 if uniform else 2 * np.arange(len(low)) + 1 # Miller, Glanz (1996) - - # N tilings and N_mod, useful for Miller, Glanz tilings (1996) - d = np.size(low) # space-dim - m = np.max([np.ceil(np.log(4 * d) / np.log(2)), - np.ceil(np.log(n_tilings) / np.log(2))]) - N_mod = n_tilings if uniform else 2 ** m # Miller, Glanz (1996) + shift = 1 if uniform else 2 * np.arange(len(low)) + 1 # Length of the sides of the tiles, l - be = (N_mod - 1) / N_mod + be = (n_tilings - 1) / n_tilings l = L / (np.array(n_tiles) - be) # Generate the list of tilings @@ -108,10 +110,10 @@ class Tiles: for i in range(n_tilings): # Shift vector - v = (i * shift) % N_mod + v = (i * shift) % n_tilings # Min, max of the coordinates of the i-th tiling - x_min = low + (-N_mod + 1 + v) / N_mod * l + x_min = low + (-n_tilings + 1 + v) / n_tilings * l x_max = x_min + l * n_tiles # Rearrange x_min, x_max and append new tiling to the list
Fixed image_format typo in doc Closes-Bug:
@@ -1663,7 +1663,7 @@ select a format from the set that the Glance service supports. This supported set can be seen by querying the ``/v2/schemas/images`` resource. An operator can add or remove disk formats to the supported set. This is done by setting the ``disk_formats`` parameter which is found in the -``[image_formats]`` section of ``glance-api.conf``. +``[image_format]`` section of ``glance-api.conf``. ``disk_formats=<Comma separated list of disk formats>`` Optional. Default: ``ami,ari,aki,vhd,vhdx,vmdk,raw,qcow2,vdi,iso,ploop``