message
stringlengths
13
484
diff
stringlengths
38
4.63k
Modify custom models to take state copies I don't fully understand why this is needed, but it prevents some bugs when rerunning a platform simulation. Since the model maker is only called once per platform, having a deepcopy shouldn't slow things down much. [ci skip]
@@ -132,12 +132,12 @@ def create_smooth_transition_models(initial_state, x_coords, y_coords, times, tu if d > 0: # if platform is not already at target coord, add linear acceleration model try: - accel_model = Point2PointConstantAcceleration(state=state, + accel_model = Point2PointConstantAcceleration(state=deepcopy(state), destination=(x_coord, y_coord), duration=timedelta(seconds=t2)) except OvershootError: # if linear accel leads to overshoot, apply model to stop at target coord instead - accel_model = Point2PointStop(state=state, + accel_model = Point2PointStop(state=deepcopy(state), destination=(x_coord, y_coord)) state.state_vector = accel_model.function(state=state, time_interval=timedelta(seconds=t2))
Fix wrong production name in the test Love when tests just encode the erroneous data!
@@ -80,7 +80,7 @@ class GlyphDataTest(unittest.TestCase): self.assertEqual(prod("brevecomb_acutecomb"), "uni03060301") self.assertEqual(prod("vaphalaa-malayalam"), "uni0D030D35.1") self.assertEqual(prod("onethird"), "uni2153") - self.assertEqual(prod("Jacute"), "uni00A40301") + self.assertEqual(prod("Jacute"), "uni004A0301") def test_unicode(self): def uni(n):
Refactor Token_Start/End to rely on Node.Token TN:
@@ -2445,10 +2445,7 @@ package body ${ada_lib_name}.Analysis is function Token_Start (Node : access ${root_node_value_type}'Class) return Token_Type - is - ((TDH => Token_Data (Node.Unit), - Token => Node.Token_Start, - Trivia => No_Token_Index)); + is (Node.Token (Node.Token_Start)); --------------- -- Token_End -- @@ -2459,10 +2456,8 @@ package body ${ada_lib_name}.Analysis is return Token_Type is (if Node.Token_End = No_Token_Index - then Token_Start (Node) - else (TDH => Token_Data (Node.Unit), - Token => Node.Token_End, - Trivia => No_Token_Index)); + then Node.Token_Start + else Node.Token (Node.Token_End)); ----------- -- Token --
[main] offset periodic tasks and add random jitter this prevents fetching profile pics and checking for updates immediately on startup
@@ -14,6 +14,7 @@ import shutil import logging.handlers from collections import deque import asyncio +import random from concurrent.futures import ThreadPoolExecutor from typing import Union, List, Iterator, Dict, Optional, Deque, Any @@ -237,9 +238,8 @@ class Maestral: thread_name_prefix="maestral-thread-pool", max_workers=2, ) - self._refresh_task = self._loop.create_task( - self._periodic_refresh(), - ) + self._refresh_info_task = self._loop.create_task(self._periodic_refresh_info()) + self._update_task = self._loop.create_task(self._period_update_check()) # create a future which will return once `shutdown_daemon` is called # can be used by an event loop wait until maestral has been stopped @@ -1322,7 +1322,8 @@ class Maestral: self.stop_sync() - self._refresh_task.cancel() + self._refresh_info_task.cancel() + self._update_task.cancel() self._thread_pool.shutdown(wait=False) if self._loop.is_running(): @@ -1410,7 +1411,9 @@ class Maestral: batch_op.drop_constraint(constraint_name=name, type_="unique") - async def _periodic_refresh(self) -> None: + async def _periodic_refresh_info(self) -> None: + + await asyncio.sleep(60 * 5) while True: # update account info @@ -1425,6 +1428,13 @@ class Maestral: self._thread_pool, self.get_profile_pic ) + await asyncio.sleep(60 * (44.5 + random.random())) # (45 +/- 1) min + + async def _period_update_check(self) -> None: + + await asyncio.sleep(60 * 3) + + while True: # check for maestral updates res = await self._loop.run_in_executor( self._thread_pool, self.check_for_updates @@ -1433,7 +1443,7 @@ class Maestral: if not res["error"]: self._state.set("app", "latest_release", res["latest_release"]) - await asyncio.sleep(60 * 60) # 60 min + await asyncio.sleep(60 * (59.5 + random.random())) # (60 +/- 1) min def __repr__(self) -> str:
Rename PathManipulationTests to PathManipulationTestBase for consistency This base class has no tests, and all other non-concrete test case base classes in this file use a *TestBase convention.
@@ -3958,12 +3958,12 @@ class ResolvePathTest(FakeFileOpenTestBase): self.assertEqual('!!foo!bar!baz', self.filesystem.ResolvePath('!!foo!bar!baz!!')) -class PathManipulationTests(TestCase): +class PathManipulationTestBase(TestCase): def setUp(self): self.filesystem = fake_filesystem.FakeFilesystem(path_separator='|') -class CollapsePathPipeSeparatorTest(PathManipulationTests): +class CollapsePathPipeSeparatorTest(PathManipulationTestBase): """Tests CollapsePath (mimics os.path.normpath) using | as path separator.""" def testEmptyPathBecomesDotPath(self): @@ -4015,7 +4015,7 @@ class CollapsePathPipeSeparatorTest(PathManipulationTests): 'bar', self.filesystem.CollapsePath('foo|..|yes|..|no|..|bar')) -class SplitPathTest(PathManipulationTests): +class SplitPathTest(PathManipulationTestBase): """Tests SplitPath (which mimics os.path.split) using | as path separator.""" def testEmptyPath(self): @@ -4047,7 +4047,7 @@ class SplitPathTest(PathManipulationTests): self.assertEqual(('|a||b', 'c'), self.filesystem.SplitPath('|a||b||c')) -class JoinPathTest(PathManipulationTests): +class JoinPathTest(PathManipulationTestBase): """Tests JoinPath (which mimics os.path.join) using | as path separator.""" def testOneEmptyComponent(self):
Re-add validate_unique Cannot save the object otherwise. the View form_valid always returns None, even when saving with commit None
@@ -71,11 +71,21 @@ class BaseDeterminationForm(forms.ModelForm): return cleaned_data + def validate_unique(self): + # Update the instance data + # form_valid on the View does not return the determination instance so we have to do this here. + self.instance.submission = self.submission + self.instance.author = self.request.user + self.instance.data = {key: value for key, value in self.cleaned_data.items() + if key not in ['outcome', 'message']} + + try: + self.instance.validate_unique() + except ValidationError as e: + self._update_errors(e) def save(self, commit=True): self.instance.outcome = int(self.cleaned_data['outcome']) self.instance.message = self.cleaned_data['message'] - self.instance.data = {key: value for key, value in self.cleaned_data.items() - if key not in ['outcome', 'message']} self.instance.is_draft = self.draft_button_name in self.data if self.transition and not self.instance.is_draft:
Created helper function `get_webhook` and added property in `News` `News.get_webhook` fetch discord.Webhook by ID provided in config. `self.webhook` use webhook that it got from this function.
+import discord from discord.ext.commands import Cog +from bot import constants from bot.bot import Bot MAIL_LISTS = [ @@ -15,10 +17,11 @@ class News(Cog): def __init__(self, bot: Bot): self.bot = bot self.bot.loop.create_task(self.sync_maillists()) + self.webhook = self.bot.loop.create_task(self.get_webhook()) async def sync_maillists(self) -> None: """Sync currently in-use maillists with API.""" - # Wait until guild is available to avoid running before API is ready + # Wait until guild is available to avoid running before everything is ready await self.bot.wait_until_guild_available() response = await self.bot.api_client.get("bot/bot-settings/news") @@ -32,6 +35,10 @@ class News(Cog): await self.bot.api_client.put("bot/bot-settings/news", json=response) + async def get_webhook(self) -> discord.Webhook: + """Get #python-news channel webhook.""" + return await self.bot.fetch_webhook(constants.Webhooks.python_news) + def setup(bot: Bot) -> None: """Add `News` cog."""
Set the BUILD_ENVIRONMENT variable before installing sccache. Summary: Set the build environment before installing sccache in order to make sure the docker images have the links set up. Pull Request resolved:
@@ -5,6 +5,10 @@ ARG EC2 ADD ./install_base.sh install_base.sh RUN bash ./install_base.sh && rm install_base.sh +# Include BUILD_ENVIRONMENT environment variable in image +ARG BUILD_ENVIRONMENT +ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT} + # Install Python ARG PYTHON_VERSION ADD ./install_python.sh install_python.sh @@ -70,7 +74,3 @@ ARG JENKINS_GID ADD ./add_jenkins_user.sh add_jenkins_user.sh RUN if [ -n "${JENKINS}" ]; then bash ./add_jenkins_user.sh ${JENKINS_UID} ${JENKINS_GID}; fi RUN rm add_jenkins_user.sh - -# Include BUILD_ENVIRONMENT environment variable in image -ARG BUILD_ENVIRONMENT -ENV BUILD_ENVIRONMENT ${BUILD_ENVIRONMENT}
Updated README.md for nginx The default parameter for logs in conftest.py is modsec2-apache, which causes ModSecurity to always look for [modsec2-apache] in config.ini.This causes problem with nginx server as its logging regex are different.This is solved by changing default parameter from modsec2-apache to modsec3-nginx in conftest.py.
@@ -17,7 +17,7 @@ Requirements ============ There are Three requirements for running the OWASP CRS regressions. -1. You must have ModSecurity specify the location of your error.log, this is done in the config.ini file.If you are using nginx you need to specify ModSecurity to look for [modsec3-nginx] in config.ini, this is done in conftest.py(need to change default parameter from modsec2-apache to modsec3-nginx) +1. You must have ModSecurity specify the location of your error.log, this is done in the config.ini file.If you are using nginx you need to change default parameter from modsec2-apache to modsec3-nginx in conftest.py 2. ModSecurity must be in DetectionOnly (or anomaly scoring) mode 3. You must disable IP blocking based on previous events
Fix get_all_prefixes() call context.jsonld prefixes are modelled in two different ways at the moment: as the 'old' `prefix = string URL` tag-values or as `'<prefix>' = dict(@id, tag-value This is probably a LinkML bug(?) but for now, to permit validation, we fix the unit test here.
@@ -125,7 +125,11 @@ class Validator(object): """ if not jsonld: jsonld = get_jsonld_context() - prefixes: Set = set(k for k, v in jsonld.items() if isinstance(v, str)) # type: ignore + prefixes: Set = set( + k for k, v in jsonld.items() + if isinstance(v, str) or + (isinstance(v, dict) and v.setdefault('@prefix', False)) + ) # @type: ignored if 'biolink' not in prefixes: prefixes.add('biolink') return prefixes
Small fix in printing download location * Revert "Replaced with ThrowException fn (#290)" This reverts commit * Minor fix in logging
@@ -1062,7 +1062,7 @@ Function Get-LISAv2Tools($XMLSecretFile) $WebClient.DownloadFile("$toolFileAccessLocation/$_","$CurrentDirectory\Tools\$_") # Successfully downloaded files - LogMsg "File $_ successfully downloaded in Tools folder: $_." + LogMsg "File $_ successfully downloaded in Tools folder: $CurrentDirectory\Tools." } } } \ No newline at end of file
Replace outdated link with rationale for pinning Closes Refs (supersedes)
@@ -5,7 +5,7 @@ pip-tools = pip-compile + pip-sync ================================== A set of command line tools to help you keep your ``pip``-based packages fresh, -even when you've pinned them. `You do pin them, right?`_ +even when you've pinned them. You do pin them, right? (In building your Python application and its dependencies for production, you want to make sure that your builds are predictable and deterministic.) .. image:: https://github.com/jazzband/pip-tools/raw/master/img/pip-tools-overview.png :alt: pip-tools overview for phase II
boost: corrected check for key my test used a modified conandata.yml so that I did not test in real conditions.
@@ -140,7 +140,7 @@ class BoostConan(ConanFile): def source(self): tools.get(**self.conan_data["sources"][self.version]) - if self.conan_data["patches"][self.version]: + if self.version in self.conan_data["patches"]: for patch in self.conan_data["patches"][self.version]: tools.patch(**patch)
Updates ObjectMapper hash to build against 4.2 This new hash passes project_precommit_check when built against Xcode 10 Beta 3's compiler.
"maintainer": "[email protected]", "compatibility": [ { - "version": "3.0", - "commit": "eef27bfcfd201036a12992b6988e64a088fe7354" + "version": "4.2", + "commit": "ed1caa237b9742135996fefe3682b834bb394a6a" } ], "platforms": [ "workspace": "ObjectMapper.xcworkspace", "scheme": "ObjectMapper-iOS", "destination": "generic/platform=iOS", - "configuration": "Release", - "xfail": { - "compatibility": { - "3.0": { - "branch": { - "master": "https://bugs.swift.org/browse/SR-6690", - "swift-4.2-branch": "https://bugs.swift.org/browse/SR-6690" - } - } - } - } + "configuration": "Release" }, { "action": "BuildXcodeWorkspaceScheme", "workspace": "ObjectMapper.xcworkspace", "scheme": "ObjectMapper-Mac", "destination": "generic/platform=macOS", - "configuration": "Release", - "xfail": { - "compatibility": { - "3.0": { - "branch": { - "master": "https://bugs.swift.org/browse/SR-6690", - "swift-4.2-branch": "https://bugs.swift.org/browse/SR-6690" - } - } - } - } + "configuration": "Release" }, { "action": "BuildXcodeWorkspaceScheme", "workspace": "ObjectMapper.xcworkspace", "scheme": "ObjectMapper-tvOS", "destination": "generic/platform=tvOS", - "configuration": "Release", - "xfail": { - "compatibility": { - "3.0": { - "branch": { - "master": "https://bugs.swift.org/browse/SR-6690", - "swift-4.2-branch": "https://bugs.swift.org/browse/SR-6690" - } - } - } - } + "configuration": "Release" }, { "action": "BuildXcodeWorkspaceScheme", "workspace": "ObjectMapper.xcworkspace", "scheme": "ObjectMapper-watchOS", "destination": "generic/platform=watchOS", - "configuration": "Release", - "xfail": { - "compatibility": { - "3.0": { - "branch": { - "master": "https://bugs.swift.org/browse/SR-6690", - "swift-4.2-branch": "https://bugs.swift.org/browse/SR-6690" - } - } - } - } + "configuration": "Release" } ] },
astdoc.py: rename "is_inherit" to "is_inherited" TN:
@@ -118,11 +118,11 @@ def print_field(context, file, struct, field): )), )) - is_inherit = not field.struct == struct + is_inherited = field.struct != struct inherit_note = ( ' [inherited from {}]'.format(field_ref(field)) - if is_inherit else '' + if is_inherited else '' ) print('<div class="node_wrapper">', file=file) @@ -139,7 +139,7 @@ def print_field(context, file, struct, field): field.type.name().camel ), inherit_note=inherit_note, - inherit_class='class="inherited" ' if is_inherit else "" + inherit_class='class="inherited" ' if is_inherited else "" ), file=file )
bump to 3.6 Anaconda is now at P3.6
@@ -4,12 +4,12 @@ language: python python: - 2.7 - - 3.5 + - 3.6 matrix: include: - - python: 3.5 + - python: 3.6 env: CC=clang CXX=clang++ - - python: 3.5 + - python: 3.6 env: NOMKL=1 addons: apt: @@ -58,6 +58,6 @@ script: - nosetests --verbosity=2 --with-coverage --cover-package=qutip qutip after_success: - - if [[ $TRAVIS_PYTHON_VERSION == '3.5' ]] && [[ $CC == 'gcc' ]]; then + - if [[ $TRAVIS_PYTHON_VERSION == '3.6' ]] && [[ $CC == 'gcc' ]]; then coveralls; fi \ No newline at end of file
data/stubconfig/repos.conf: use tar-based repo instead of sqfs To avoid requiring namespace and sqfs mounting support for external usage (e.g. the github pkgcheck action).
@@ -6,8 +6,5 @@ location = ../stubrepo [gentoo] location = /var/db/repos/gentoo -repo-type = sqfs-v1 -# distfiles.gentoo.org certs aren't actually valid, defeating the purpose of https; thus -# forcing http. -sync-uri = http://distfiles.gentoo.org/snapshots/squashfs/gentoo-current.lzo.sqfs -sync-type = sqfs +sync-uri = https://github.com/gentoo-mirror/gentoo/archive/stable.tar.gz +sync-type = tar
Fix help for `--process-total-child-memory-usage` and `--process-per-child-memory-usage` In particular, the `--process-total-child-memory-usage` flag was setting `default_help_repr="1GiB",`, even though it did not have a `default`. Additionally, add some clarifications around which processes are impacted, and what happens when `--process-total-child-memory-usage` is not set. [ci skip-rust] [ci skip-build-wheels]
@@ -460,6 +460,9 @@ class LocalStoreOptions: ) +_PER_CHILD_MEMORY_USAGE = "512MiB" + + DEFAULT_EXECUTION_OPTIONS = ExecutionOptions( # Remote execution strategy. remote_execution=False, @@ -470,7 +473,7 @@ DEFAULT_EXECUTION_OPTIONS = ExecutionOptions( remote_ca_certs_path=None, # Process execution setup. process_total_child_memory_usage=None, - process_per_child_memory_usage=memory_size("512MiB"), + process_per_child_memory_usage=memory_size(_PER_CHILD_MEMORY_USAGE), process_execution_local_parallelism=CPU_COUNT, process_execution_remote_parallelism=128, process_execution_cache_namespace=None, @@ -1083,23 +1086,28 @@ class BootstrapOptions: """ ), ) + + _process_total_child_memory_usage = "--process-total-child-memory-usage" + _process_per_child_memory_usage_flag = "--process-per-child-memory-usage" process_total_child_memory_usage = MemorySizeOption( - "--process-total-child-memory-usage", + _process_total_child_memory_usage, advanced=True, default=None, - default_help_repr="1GiB", help=softwrap( - """ - The maximum memory usage for all child processes. + f""" + The maximum memory usage for all "pooled" child processes. + + When set, this value participates in precomputing the pool size of child processes + used by Pants (pooling is currently used only for the JVM). When not set, Pants will + default to spawning `2 * {_process_execution_local_parallelism_flag}` pooled processes. - This value participates in precomputing the pool size of child processes used by - `pantsd`. A high value would result in a high number of child processes spawned, - potentially overconsuming your resources and triggering the OS' OOM killer. A low - value would mean a low number of child processes launched and therefore less - paralellism for the tasks that need those processes. + A high value would result in a high number of child processes spawned, potentially + overconsuming your resources and triggering the OS' OOM killer. A low value would + mean a low number of child processes launched and therefore less parallelism for the + tasks that need those processes. - If setting this value, consider also setting a value for the `process-per-child-memory-usage` - option too. + If setting this value, consider also adjusting the value of the + `{_process_per_child_memory_usage_flag}` option. You can suffix with `GiB`, `MiB`, `KiB`, or `B` to indicate the unit, e.g. `2GiB` or `2.12GiB`. A bare number will be in bytes. @@ -1107,15 +1115,15 @@ class BootstrapOptions: ), ) process_per_child_memory_usage = MemorySizeOption( - "--process-per-child-memory-usage", + _process_per_child_memory_usage_flag, advanced=True, default=DEFAULT_EXECUTION_OPTIONS.process_per_child_memory_usage, - default_help_repr="512MiB", + default_help_repr=_PER_CHILD_MEMORY_USAGE, help=softwrap( - """ - The default memory usage for a child process. + f""" + The default memory usage for a single "pooled" child process. - Check the documentation for the `process-total-child-memory-usage` for advice on + Check the documentation for the `{_process_total_child_memory_usage}` for advice on how to choose an appropriate value for this option. You can suffix with `GiB`, `MiB`, `KiB`, or `B` to indicate the unit, e.g.
DOC: updated changelog Updated changelog with description of things changed in the PR.
@@ -42,6 +42,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). - Added .zenodo.json file, to improve specification of authors in citation - Improved __str__ and __repr__ functions for basic classes - Improved docstring readability and consistency + - Added Travis-CI testing for the documentation - Bug Fix - Fixed custom instrument attribute persistence upon load - Improved string handling robustness when writing netCDF4 files in Python 3
creates_node handles Opt parser Also, adding a non-transitive mode to use in the pp pass.
@@ -1437,7 +1437,7 @@ class NodeToParsersPass(): self.compute(c) -def creates_node(p): +def creates_node(p, follow_refs=True): """ Predicate that is true on parsers that create a node directly, or are just a reference to one or several parsers that creates nodes, without @@ -1449,12 +1449,17 @@ def creates_node(p): Row(a, b, c) # <- False Pick(";", "lol", c) # <- False """ - if isinstance(p, Or): + if isinstance(p, Or) and follow_refs: return all(creates_node(c) for c in p.children()) + if isinstance(p, Defer) and follow_refs: + return p.get_type().matches(ASTNode) + return ( - isinstance(p, Transform) or isinstance(p, List) - or (isinstance(p, Defer) and p.get_type().matches(ASTNode)) + isinstance(p, Transform) + or isinstance(p, List) + or (isinstance(p, Opt) + and p._booleanize and p._booleanize[0].matches(ASTNode)) )
Add Puerto Rico data source I forgot to do this in
@@ -128,6 +128,7 @@ Real-time electricity data is obtained using [parsers](https://github.com/tmrowc - New England: [NEISO](https://www.iso-ne.com/isoexpress/) - New York: [NYISO](http://www.nyiso.com/public/markets_operations/market_data/graphs/index.jsp) - PJM: [PJM](http://www.pjm.com/markets-and-operations.aspx) + - Puerto Rico: [AEEPR](https://aeepr.com/en-us/Pages/Generaci%C3%B3n.aspx) - Southwest Power Pool: [SPP](https://marketplace.spp.org/pages/generation-mix) - Southwest Variable Energy Resource Initiative: [SVERI](https://sveri.energy.arizona.edu/#generation-by-fuel-type) - Texas: [ERCOT](http://www.ercot.com/content/cdr/html/real_time_system_conditions.html)
[modules/datetime] Add encoding for locale When creating the date/time string, use the locale's preferred encoding to format the string. hopefully, this fixes
@@ -35,6 +35,7 @@ class Module(bumblebee.engine.Module): locale.setlocale(locale.LC_TIME, lcl.split(".")) def get_time(self, widget): - return datetime.datetime.now().strftime(self._fmt) + enc = locale.getpreferredencoding() + return datetime.datetime.now().strftime(self._fmt).decode(enc) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
datapaths: Allow pcap_switch without pxpcap pcap_switch has virtual ports which don't require pxpcap, so we no longer strictly require pxpcap.
@@ -291,9 +291,6 @@ def launch (address = '127.0.0.1', port = 6633, max_retry_delay = 16, Launches a switch """ - if not pxpcap.enabled: - raise RuntimeError("You need PXPCap to use this component") - if ctl_port: if ctl_port is True: ctl_port = DEFAULT_CTL_PORT @@ -479,6 +476,9 @@ class PCapSwitch (ExpireMixin, SoftwareSwitchBase): if isinstance(virtual, str): px.channel = virtual else: + if not pxpcap.enabled: + on_error("Not adding port %s because PXPCap is not available", name) + return devs = pxpcap.PCap.get_devices() if name not in devs: on_error("Device %s not available -- ignoring", name)
pin away bad recent grpcio version did not track down the lock but local processes and tests lock up when using 1.48.0 ### How I Tested These Changes bk
@@ -67,7 +67,9 @@ def get_version() -> str: # alembic 1.7.0 is a breaking change "alembic>=1.2.1,!=1.6.3,<1.7.0", "croniter>=0.3.34", - "grpcio>=1.32.0", # ensure version we require is >= that with which we generated the grpc code (set in dev-requirements) + # ensure grpcio version we require is >= that with which we generated the grpc code (set in dev-requirements) + # https://github.com/dagster-io/dagster/issues/9099 + "grpcio>=1.32.0,<1.48.0", "grpcio-health-checking>=1.32.0,<1.44.0", "packaging>=20.9", "pendulum",
remove roles names as str, snapshot order This commit removes the role names as strings. Also do a slight change for clarity.
@@ -357,11 +357,11 @@ class TestRefresh(unittest.TestCase): self._assert_version_equals(Timestamp.type, 99999) # repo add new timestamp keys and recovers the timestamp version - self.sim.root.roles["timestamp"].keyids.clear() - self.sim.signers["timestamp"].clear() + self.sim.root.roles[Timestamp.type].keyids.clear() + self.sim.signers[Timestamp.type].clear() key, signer = self.sim.create_key() - self.sim.root.add_key("timestamp", key) - self.sim.add_signer("timestamp", signer) + self.sim.root.add_key(Timestamp.type, key) + self.sim.add_signer(Timestamp.type, signer) self.sim.root.version += 1 self.sim.publish_root() self.sim.timestamp.version = 1 @@ -445,20 +445,20 @@ class TestRefresh(unittest.TestCase): self._assert_version_equals(Snapshot.type, 99999) # repo add new snapshot and timestamp keys and recovers snapshot version - self.sim.root.roles["snapshot"].keyids.clear() - self.sim.signers["snapshot"].clear() - self.sim.root.roles["timestamp"].keyids.clear() - self.sim.signers["timestamp"].clear() + self.sim.root.roles[Snapshot.type].keyids.clear() + self.sim.signers[Snapshot.type].clear() + self.sim.root.roles[Timestamp.type].keyids.clear() + self.sim.signers[Timestamp.type].clear() snapshot_key, snapshot_signer = self.sim.create_key() - self.sim.root.add_key("snapshot", snapshot_key) - self.sim.add_signer("snapshot", snapshot_signer) + self.sim.root.add_key(Snapshot.type, snapshot_key) + self.sim.add_signer(Snapshot.type, snapshot_signer) timestamp_key, timestamp_signer = self.sim.create_key() - self.sim.root.add_key("timestamp", timestamp_key) - self.sim.add_signer("timestamp", timestamp_signer) - self.sim.snapshot.version = 1 + self.sim.root.add_key(Timestamp.type, timestamp_key) + self.sim.add_signer(Timestamp.type, timestamp_signer) self.sim.root.version += 1 self.sim.publish_root() + self.sim.snapshot.version = 1 self.sim.update_timestamp() # client refresh the metadata and see the initial snapshot version @@ -530,15 +530,15 @@ class TestRefresh(unittest.TestCase): self.sim.compute_metafile_hashes_length = True self.sim.update_snapshot() self._run_refresh() - self._assert_version_equals("timestamp", 2) - self._assert_version_equals("snapshot", 2) + self._assert_version_equals(Timestamp.type, 2) + self._assert_version_equals(Snapshot.type, 2) self.sim.compute_metafile_hashes_length = False self.sim.update_snapshot() self._run_refresh() - self._assert_version_equals("timestamp", 3) - self._assert_version_equals("snapshot", 3) + self._assert_version_equals(Timestamp.type, 3) + self._assert_version_equals(Snapshot.type, 3) if __name__ == "__main__":
Fix wrongly converted assert * Fix wrongly converted assert Seems like this assertion was replaced by an exception but the condition got wrongly converted. * Update src/datasets/search.py
@@ -305,7 +305,7 @@ class FaissIndex(BaseIndex): scores (`List[List[float]`): The retrieval scores of the retrieved examples. indices (`List[List[int]]`): The indices of the retrieved examples. """ - if len(query.shape) != 1 or (len(query.shape) == 2 and query.shape[0] != 1): + if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1): raise ValueError("Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)") queries = query.reshape(1, -1)
[CI] Chaos tests for dataset random shuffle 1tb Add chaos tests for dataset random shuffle 1tb: both simple shuffle and push-based shuffle Mark dataset_shuffle_push_based_random_shuffle_1tb as stable
test_name: dataset_shuffle_push_based_random_shuffle_1tb test_suite: dataset_test - stable: false - frequency: nightly team: core cluster: num_nodes: 20 type: sdk_command file_manager: sdk + +- name: chaos_dataset_shuffle_random_shuffle_1tb + group: core-dataset-tests + working_dir: nightly_tests + legacy: + test_name: chaos_dataset_shuffle_random_shuffle_1tb + test_suite: chaos_test + + stable: false + + frequency: nightly + team: core + cluster: + cluster_env: shuffle/shuffle_app_config.yaml + cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml + + run: + timeout: 7200 + prepare: ' python setup_chaos.py --node-kill-interval 600 --max-nodes-to-kill 2' + script: python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle + wait_for_nodes: + num_nodes: 20 + type: sdk_command + file_manager: sdk + +- name: chaos_dataset_shuffle_push_based_random_shuffle_1tb + group: core-dataset-tests + working_dir: nightly_tests + legacy: + test_name: chaos_dataset_shuffle_push_based_random_shuffle_1tb + test_suite: chaos_test + + stable: false + + frequency: nightly + team: core + cluster: + cluster_env: shuffle/shuffle_app_config.yaml + cluster_compute: shuffle/datasets_large_scale_compute_small_instances.yaml + + run: + timeout: 7200 + prepare: ' python setup_chaos.py --node-kill-interval 600 --max-nodes-to-kill 2' + script: RAY_DATASET_PUSH_BASED_SHUFFLE=1 python dataset/sort.py --num-partitions=1000 --partition-size=1e9 --shuffle + wait_for_nodes: + num_nodes: 20 + type: sdk_command + file_manager: sdk
Combine nginx federation server blocks I'm pretty sure there's no technical reason these have to be distinct server blocks, so collapse into one and go with the more terse location block.
@@ -38,6 +38,11 @@ the reverse proxy and the homeserver. server { listen 443 ssl; listen [::]:443 ssl; + + # For the federation port + listen 8448 ssl default_server; + listen [::]:8448 ssl default_server; + server_name matrix.example.com; location /_matrix { @@ -48,17 +53,6 @@ server { client_max_body_size 10M; } } - -server { - listen 8448 ssl default_server; - listen [::]:8448 ssl default_server; - server_name example.com; - - location / { - proxy_pass http://localhost:8008; - proxy_set_header X-Forwarded-For $remote_addr; - } -} ``` **NOTE**: Do not add a path after the port in `proxy_pass`, otherwise nginx will
ubuiltins: Fix next. MICROPY_PY_BUILTINS_NEXT2 is not enabled.
@@ -746,21 +746,9 @@ def min(*args): """ -@overload def next(iterator: Iterator) -> Any: - ... - - -@overload -def next(iterator: Iterator, default: Any) -> Any: - ... - - -def next(*args): """ Retrieves the next item from the iterator by calling its ``__next__()`` method. - If ``default`` is given, it is returned if the iterator is exhausted, - otherwise ``StopIteration`` is raised. """
Quickfix : Motorconversion Linked to
@@ -22,8 +22,11 @@ def derive_oldMotor(obj): if oldProps in obj.keys(): if oldProps == 'motor/type' and obj[oldProps] == 'PID': new_motor.update({newProps: 'generic_bldc'}) + elif oldProps == 'motor/type' and obj[oldProps] == 'DC': + new_motor.update({newProps: 'generic_dc'}) else: new_motor.update({newProps: obj[oldProps]}) + if not 'motor/name' in new_motor.keys(): new_motor.update({'motor/name': obj.name + '_Motor'})
fix: typo in role advanced_dns_server Fixes:
service: name: "{{ item }}" state: restarted - loop: "{{ dns_server_services_to_start }}" + loop: "{{ advanced_dns_server_services_to_start }}" when: - "'service' not in ansible_skip_tags" - (start_services | bool)
Fix create default experiment error Fix This adds additional condition test to properly handle experiment data stored as unicode data type.
@@ -181,7 +181,7 @@ class SqlAlchemyStore(AbstractStore): default_experiment = { SqlExperiment.experiment_id.name: int(SqlAlchemyStore.DEFAULT_EXPERIMENT_ID), SqlExperiment.name.name: Experiment.DEFAULT_EXPERIMENT_NAME, - SqlExperiment.artifact_location.name: self._get_artifact_location(0), + SqlExperiment.artifact_location.name: str(self._get_artifact_location(0)), SqlExperiment.lifecycle_stage.name: LifecycleStage.ACTIVE }
Updates IMAG_TOL from 1e-8 to 1e-7, as 1e-8 was causing machine- precision errors with 2Q-GST germ selection.
@@ -17,7 +17,7 @@ from ..tools import basis as _basis from . import gaugegroup as _gaugegroup from .protectedarray import ProtectedArray as _ProtectedArray -IMAG_TOL = 1e-8 #tolerance for imaginary part being considered zero +IMAG_TOL = 1e-7 #tolerance for imaginary part being considered zero def optimize_gate(gateToOptimize, targetGate): """
accept new param 'for' as request user This allows us to exactly match the request with what would actually happen if a user was making the request.
@@ -185,12 +185,14 @@ def formplayer_as_user_auth(view): @wraps(view) def _inner(request, *args, **kwargs): with mutable_querydict(request.GET): - as_user = request.GET.pop('as', None) + request_user = request.GET.pop('for', None) + if not request_user: + request_user = request.GET.pop('as', None) - if not as_user: + if not request_user: return HttpResponse('User required', status=401) - couch_user = CouchUser.get_by_username(as_user[-1]) + couch_user = CouchUser.get_by_username(request_user[-1]) if not couch_user: return HttpResponse('Unknown user', status=401)
Small adjustment to formatting of doctest Trying this as a quick fix to . If this doesn't work, I recommend we comment it out, since we have this function covered elsewhere.
@@ -75,9 +75,9 @@ def _check_latest_data(lang): def tag_ner(lang, input_text, output_type=list): """Run NER for chosen language. Choosing output_type=list, returns a list of tuples: + >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list) [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')] - """ _check_latest_data(lang)
Fix Eltex.MES get_spanning_tree HG-- branch : feature/dcs
@@ -38,6 +38,7 @@ class Script(BaseScript): PORT_ROLE = { "altn": "alternate", "back": "backup", + "bkup": "backup", "boun": "master", "desg": "designated", "dsbl": "disabled",
docs: Remove dead link to citizencodeofconduct.org. This website has been defunct for years, and seems unlikely to return. I have removed the dead link to citizencodeofconduct.org that consistently breaks the `tools/test-documentation` test tool.
@@ -98,7 +98,6 @@ community members. ## License and attribution This Code of Conduct is adapted from the -[Citizen Code of Conduct](http://citizencodeofconduct.org/) and the [Django Code of Conduct](https://www.djangoproject.com/conduct/), and is under a [Creative Commons BY-SA](https://creativecommons.org/licenses/by-sa/4.0/)
fix how 'connected_cb' works in the processprotocol Conflicts: test/test_torconfig.py txtorcon/controller.py
@@ -1313,8 +1313,8 @@ ControlPort Port''') trans.signalProcess = Mock(side_effect=error.ProcessExitedAlready) trans.loseConnection = Mock() + # obsolete? conflict from cherry-pick 37bc60f tpp.timeout_expired() - self.assertTrue(tpp.transport.loseConnection.called) @defer.inlineCallbacks @@ -1691,7 +1691,8 @@ ControlPort Port''') process.status_client( 'STATUS_CLIENT BOOTSTRAP PROGRESS=100 TAG=foo SUMMARY=cabbage' ) - self.assertEqual(None, process.connected_cb) + # XXX why this assert? + self.assertEqual(None, process._connected_cb) class Value(object): exitCode = 123
generate-fixtures: Extract zulip_test_template creation as a function. This will be helpful in the upcoming changes which will make use of this extracted function to re-create zulip_test_template after migrating zulip_test db so that we have latest schema in tests.
@@ -3,6 +3,14 @@ set -e export DJANGO_SETTINGS_MODULE=zproject.test_settings +create_template_database() +{ + psql -h localhost postgres zulip_test << EOF +DROP DATABASE IF EXISTS zulip_test_template; +CREATE DATABASE zulip_test_template TEMPLATE zulip_test; +EOF +} + if [ "$1" != "--force" ]; then "$(dirname "$0")/../../scripts/setup/terminate-psql-sessions" zulip zulip_test zulip_test_base zulip_test_template psql -h localhost postgres zulip_test << EOF @@ -36,7 +44,4 @@ sh "$(dirname "$0")/../../scripts/setup/flush-memcached" zerver.DefaultStream > zerver/tests/fixtures/messages.json # create pristine template database, for fast fixture restoration after tests are run. -psql -h localhost postgres zulip_test << EOF -DROP DATABASE IF EXISTS zulip_test_template; -CREATE DATABASE zulip_test_template TEMPLATE zulip_test; -EOF +create_template_database
Update issue matching regex fixes it being unable to get issue numbers larger than 9 limits it somewhat length-wise and character-wise to the actual github limits
@@ -51,7 +51,8 @@ CODE_BLOCK_RE = re.compile( MAXIMUM_ISSUES = 5 # Regex used when looking for automatic linking in messages -AUTOMATIC_REGEX = re.compile(r"((?P<org>.+?)\/)?(?P<repo>.+?)#(?P<number>.+?)") +# regex101 of current regex https://regex101.com/r/V2ji8M/6 +AUTOMATIC_REGEX = re.compile(r"((?P<org>[a-zA-Z0-9][a-zA-Z0-9\-]{1,39})\/)?(?P<repo>[\w\-\.]{1,100})#(?P<number>[0-9]+)") @dataclass
Uses MULTIPART_MISSING_SEMICOLON instead of MULTIPART_SEMICOLON_MISSING MULTIPART_SEMICOLON_MISSING does not exists in SecLang nor in ModSec.
@@ -176,7 +176,7 @@ SecRule MULTIPART_STRICT_ERROR "!@eq 0" \ DA %{MULTIPART_DATA_AFTER},\ HF %{MULTIPART_HEADER_FOLDING},\ LF %{MULTIPART_LF_LINE},\ - SM %{MULTIPART_SEMICOLON_MISSING},\ + SM %{MULTIPART_MISSING_SEMICOLON},\ IQ %{MULTIPART_INVALID_QUOTING},\ IH %{MULTIPART_INVALID_HEADER_FOLDING},\ FLE %{MULTIPART_FILE_LIMIT_EXCEEDED}',\
modified: tests/test_ansible_roll_over.py ... Added config params mixed_lvm_configs and device_to_add, which are needed adding new OSD node. device_to_add --> to add device to the existing OSD node for cluster expansion. Added 'limit' option for config roll over Fixed a typo 'shotrtname'
@@ -61,6 +61,8 @@ def run(ceph_cluster, **kw): ubuntu_repo = config.get('ubuntu_repo', None) base_url = config.get('base_url', None) installer_url = config.get('installer_url', None) + mixed_lvm_configs = config.get('is_mixed_lvm_configs', None) + device_to_add = config.get('device', None) ceph_cluster.ansible_config = config['ansi_config'] ceph_cluster.use_cdn = config.get('use_cdn') @@ -88,7 +90,7 @@ def run(ceph_cluster, **kw): if len(osds_required) > len(free_volumes): raise RuntimeError( 'Insufficient volumes on the {node_name} node. Rquired: {required} - Found: {found}'.format( - node_name=matched_ceph_node.shotrtname, required=len(osds_required), + node_name=matched_ceph_node.shortname, required=len(osds_required), found=len(free_volumes))) log.debug('osds_required: {}'.format(osds_required)) log.debug('matched_ceph_node.shortname: {}'.format(matched_ceph_node.shortname)) @@ -109,7 +111,8 @@ def run(ceph_cluster, **kw): ceph_installer.install_ceph_ansible(build) - hosts_file = ceph_cluster.generate_ansible_inventory(bluestore) + hosts_file = ceph_cluster.generate_ansible_inventory( + device_to_add, mixed_lvm_configs, bluestore=True if bluestore else False) ceph_installer.write_inventory_file(hosts_file) if config.get('docker-insecure-registry'): @@ -127,8 +130,8 @@ def run(ceph_cluster, **kw): log.info("Ceph versions " + ceph_installer.get_installed_ceph_versions()) out, rc = ceph_installer.exec_command( - cmd='cd {} ; ANSIBLE_STDOUT_CALLBACK=debug; ansible-playbook -vv -i hosts site.yml'.format(ansible_dir), - long_running=True) + cmd='cd {} ; ANSIBLE_STDOUT_CALLBACK=debug; ansible-playbook -vv -i hosts site.yml --limit {daemon}'.format( + ansible_dir, daemon=demon + 's'), long_running=True) # manually handle client creation in a containerized deployment (temporary) if ceph_cluster.containerized:
When possible, disable unparsing code generation in testcases This improves the time it takes to run most testcases. TN:
@@ -194,6 +194,7 @@ class Emitter(object): self.generate_ada_api = generate_ada_api or bool(main_programs) self.generate_astdoc = generate_astdoc self.generate_gdb_hook = generate_gdb_hook + self.generate_unparser = context.generate_unparser self.pretty_print = pretty_print self.post_process_ada = post_process_ada self.post_process_cpp = post_process_cpp @@ -344,7 +345,7 @@ class Emitter(object): class Unit(object): def __init__(self, template_base_name, rel_qual_name, - has_body=True, ada_api=False): + has_body=True, ada_api=False, unparser=False): """ :param str template_base_name: Common prefix for the name of the templates to use in order to generate spec/body sources @@ -356,6 +357,9 @@ class Emitter(object): :param bool ada_api: Whether we can avoid generating this unit if the Ada API is disabled. + :param bool unparser: Whether we can avoid generating this unit + if unparsing is disabled. + :param bool has_body: Whether this unit has a body (otherwise, it's just a spec). """ @@ -365,6 +369,7 @@ class Emitter(object): if rel_qual_name else [] ) self.ada_api = ada_api + self.unparser = unparser self.has_body = has_body for u in [ @@ -392,13 +397,15 @@ class Emitter(object): Unit('pkg_private_converters', 'Private_Converters', has_body=False), # Unit for AST rewriting primitives - Unit('pkg_rewriting', 'Rewriting', ada_api=True), + Unit('pkg_rewriting', 'Rewriting', ada_api=True, unparser=True), # Unit for AST rewriting implementation - Unit('pkg_rewriting_impl', 'Rewriting_Implementation'), + Unit('pkg_rewriting_impl', 'Rewriting_Implementation', + unparser=True), # Unit for AST unparsing primitives - Unit('pkg_unparsing', 'Unparsing', ada_api=True), + Unit('pkg_unparsing', 'Unparsing', ada_api=True, unparser=True), # Unit for AST implementation of unparsing primitives - Unit('pkg_unparsing_impl', 'Unparsing_Implementation'), + Unit('pkg_unparsing_impl', 'Unparsing_Implementation', + unparser=True), # Unit for all parsers Unit('parsers/pkg_main', 'Parsers'), # Units for the lexer @@ -409,7 +416,10 @@ class Emitter(object): # Unit for debug helpers Unit('pkg_debug', 'Debug'), ]: - if not self.generate_ada_api and u.ada_api: + if ( + (not self.generate_ada_api and u.ada_api) or + (not self.generate_unparser and u.unparser) + ): continue self.write_ada_module(self.src_path, u.template_base_name, u.qual_name, u.has_body, in_library=True)
use existing PipelineIndex when possible Summary: Observed in {F359602} when loading dagit pages we end up spending a lot of time creating pipeline snapshot ids, beacuse of the callsite modified here. Test Plan: before {F359677} after {F359679} Reviewers: schrockn, prha, dgibson
@@ -83,6 +83,7 @@ def get_full_external_pipeline(self, pipeline_name): return ExternalPipeline( self.external_repository_data.get_external_pipeline_data(pipeline_name), repository_handle=self.handle, + pipeline_index=self.get_pipeline_index(pipeline_name), ) def get_all_external_pipelines(self): @@ -116,16 +117,18 @@ class ExternalPipeline(RepresentedPipeline): objects such as these to interact with user-defined artifacts. """ - def __init__(self, external_pipeline_data, repository_handle): + def __init__(self, external_pipeline_data, repository_handle, pipeline_index=None): check.inst_param(repository_handle, "repository_handle", RepositoryHandle) check.inst_param(external_pipeline_data, "external_pipeline_data", ExternalPipelineData) + check.opt_inst_param(pipeline_index, "pipeline_index", PipelineIndex) - super(ExternalPipeline, self).__init__( + if pipeline_index is None: pipeline_index = PipelineIndex( external_pipeline_data.pipeline_snapshot, external_pipeline_data.parent_pipeline_snapshot, ) - ) + + super(ExternalPipeline, self).__init__(pipeline_index=pipeline_index) self._external_pipeline_data = external_pipeline_data self._repository_handle = repository_handle self._active_preset_dict = {ap.name: ap for ap in external_pipeline_data.active_presets}
Update distributed training doc for RC * update distributed_CN add the catalog * Update index_en.rst * Update index_en.rst * Update index_en.rst
Distributed Training ###################### -docs about distributed training +For more distributed training features and practices, please follow: + +- `fleetx docs <https://fleet-x.readthedocs.io/en/latest/index.html>`_ : including quickstart guide, parallel computing setups, on-cloud training practice, etc.
Fix, make more certain that the binary directory remains valid. * Keep an extra reference should protect better against in-place changes.
@@ -1746,6 +1746,8 @@ static PyObject *getBinaryDirectoryObject() { static PyObject *binary_directory = NULL; if (binary_directory != NULL) { + CHECK_OBJECT(binary_directory); + return binary_directory; } @@ -1766,6 +1768,9 @@ static PyObject *getBinaryDirectoryObject() { abort(); } + // Make sure it's usable for caching. + Py_INCREF(binary_directory); + return binary_directory; }
Adds missing instructions in docs. The docs missed a `cd ..` command due to which installation was failing. Fixes
@@ -91,6 +91,7 @@ To set up Portia for development use the commands below:: npm install && bower install cd node_modules/ember-cli && npm install && cd ../../ ember build + cd .. docker build . -t portia You can run it using::
Include commits from tags in level tree Some commits may only be referenced by a tag, we should still keep track of these
@@ -368,6 +368,7 @@ class Operator(): } for tag in repo.tags: + commits.add(tag.commit) commit_hash = tag.commit.hexsha tree['tags'][tag.name] = { 'target': commit_hash,
Add FIXME To solve the C_D issue in the future
@@ -145,7 +145,7 @@ class EarthSatellite: if atmosphere is not None and A_over_m is not None: perturbations[atmospheric_drag_model] = { "R": Earth.R.to(u.km).value, - "C_D": 2.2, # dimensionless (any value would do) + "C_D": 2.2, # FIXME, add C_D as a parameter of the EarthSatellite object "A_over_m": A_over_m, "model": atmosphere, }
Make DC options static Some datacenters don't allow calling GetConfigRequest, this way it can both be reused and such calls omitted.
@@ -53,6 +53,9 @@ class TelegramBareClient: # Current TelegramClient version __version__ = '0.13.3' + # TODO Make this thread-safe, all connections share the same DC + _dc_options = None + # region Initialization def __init__(self, session, api_id, api_hash, @@ -85,7 +88,6 @@ class TelegramBareClient: self.updates = UpdateState(process_updates) # These will be set later - self.dc_options = None self._sender = None # endregion @@ -147,7 +149,7 @@ class TelegramBareClient: elif initial_query: return self._init_connection(initial_query) else: - self.dc_options = \ + TelegramBareClient._dc_options = \ self._init_connection(GetConfigRequest()).dc_options else: # TODO Avoid duplicated code @@ -157,8 +159,9 @@ class TelegramBareClient: )) elif initial_query: return self(initial_query) - if not self.dc_options: - self.dc_options = self(GetConfigRequest()).dc_options + if TelegramBareClient._dc_options is None: + TelegramBareClient._dc_options = \ + self(GetConfigRequest()).dc_options return True @@ -221,7 +224,7 @@ class TelegramBareClient: def _get_dc(self, dc_id, ipv6=False, cdn=False): """Gets the Data Center (DC) associated to 'dc_id'""" - if not self.dc_options: + if TelegramBareClient._dc_options is None: raise ConnectionError( 'Cannot determine the required data center IP address. ' 'Stabilise a successful initial connection first.') @@ -233,15 +236,15 @@ class TelegramBareClient: rsa.add_key(pk.public_key) return next( - dc for dc in self.dc_options if dc.id == dc_id and - bool(dc.ipv6) == ipv6 and bool(dc.cdn) == cdn + dc for dc in TelegramBareClient._dc_options if dc.id == dc_id + and bool(dc.ipv6) == ipv6 and bool(dc.cdn) == cdn ) except StopIteration: if not cdn: raise # New configuration, perhaps a new CDN was added? - self.dc_options = self(GetConfigRequest()).dc_options + TelegramBareClient._dc_options = self(GetConfigRequest()).dc_options return self._get_dc(dc_id, ipv6=ipv6, cdn=cdn) def _get_exported_client(self, dc_id,
docs(style): block highlighting * docs(style): block highlighting fixed target block highlighting * docs(style): style and inherit explicit background-color and added back background inherit fixed spacing
@@ -56,6 +56,9 @@ code.xref.docutils.literal { div.viewcode-block:target { background: inherit; + background-color: #dadada; + border-radius: 5px; + padding: 5px; } a:hover, div.sphinxsidebar a:hover, a.reference:hover, a.reference.internal:hover code {
docs(introduction.py): update date to introduction.py update date to introduction.py
@@ -1775,8 +1775,8 @@ if __name__ == "__main__": symbol="01611", period="1", adjust="", - start_date="2022-06-02 09:30:00", - end_date="2022-06-02 18:32:00", + start_date="2022-10-02 09:30:00", + end_date="2022-10-29 18:32:00", ) print(stock_hk_hist_min_em_df)
Enable contiguous gradients with Z1+MoE MoE training with zero stage 1 only works with `contiguous gradients=True`.
@@ -1376,7 +1376,6 @@ class DeepSpeedEngine(Module): # Overlap and contiguous grads are meaningless in stage 1 and are ignored if zero_stage == ZeroStageEnum.optimizer_states: overlap_comm = False - contiguous_gradients = False round_robin_gradients = False if isinstance(self.module, PipelineModule):
Updating Pennsylvania folder Removed redundant link to the May 30th Philadelphia incident where police beat an individual, correct date on initial one.
@@ -22,7 +22,7 @@ Protesters try to help someone stand up; police wait until the person is halfway ## Philadelphia -### Police beat down man | May 31st +### Police beat down man | May 30th The journalist was trying to get a closer look at the police, while they were beating an individual. @@ -66,10 +66,4 @@ Three protestors kneeling on the ground with their hands on their heads/covering * https://twitter.com/d0wnrrrrr/status/1267691766188310528 -### Police swat reporter with baton | May 30th -Police pin young black man to the ground with their knees, swat UR reporter with a baton for filming the scene. - -**Links** - -* https://twitter.com/UR_Ninja/status/1266913490301792257
Adjust count to be fore all AbstractNode targets, which includes Registrations and QuickFileNodes [#PLAT-1024]
@@ -8,7 +8,7 @@ from dateutil.parser import parse from datetime import datetime, timedelta from django.utils import timezone -from osf.models import Node, QuickFilesNode +from osf.models import AbstractNode from website.app import init_app from scripts.analytics.base import SummaryAnalytics @@ -31,21 +31,16 @@ class FileSummary(SummaryAnalytics): timestamp_datetime = datetime(date.year, date.month, date.day).replace(tzinfo=timezone.utc) file_qs = OsfStorageFile.objects - node_content_type = ContentType.objects.get_for_model(Node) - - quickfiles_query = Q( - target_object_id__in=QuickFilesNode.objects.values('id'), - target_content_type=ContentType.objects.get_for_model(QuickFilesNode) - ) + abstract_node_content_type = ContentType.objects.get_for_model(AbstractNode) public_query = Q( - target_object_id__in=Node.objects.filter(is_public=True).values('id'), - target_content_type=node_content_type + target_object_id__in=AbstractNode.objects.filter(is_public=True).values('id'), + target_content_type=abstract_node_content_type ) private_query = Q( - target_object_id__in=Node.objects.filter(is_public=False).values('id'), - target_content_type=node_content_type + target_object_id__in=AbstractNode.objects.filter(is_public=False).values('id'), + target_content_type=abstract_node_content_type ) daily_query = Q(created__gte=timestamp_datetime) @@ -57,10 +52,10 @@ class FileSummary(SummaryAnalytics): # OsfStorageFiles - the number of files on OsfStorage 'osfstorage_files_including_quickfiles': { 'total': file_qs.count(), - 'public': file_qs.filter(public_query).count() + file_qs.filter(quickfiles_query).count(), + 'public': file_qs.filter(public_query).count(), 'private': file_qs.filter(private_query).count(), 'total_daily': file_qs.filter(daily_query).count(), - 'public_daily': file_qs.filter(public_query & daily_query).count() + file_qs.filter(quickfiles_query & daily_query).count(), + 'public_daily': file_qs.filter(public_query & daily_query).count(), 'private_daily': file_qs.filter(private_query & daily_query).count(), }, }
Update vxvault_url.py small correction in your notes and error msg.
@@ -24,7 +24,7 @@ class VXVaultUrl(Feed): self.analyze(line) # don't need to do much here; want to add the information - # and tag it with 'phish' + # and tag it with 'malware' def analyze(self, data): if data.startswith('http'): tags = ['malware'] @@ -35,4 +35,4 @@ class VXVaultUrl(Feed): url.add_source(self.name) url.tag(tags) except ObservableValidationError as e: - logging.error(e) + logging.error('While processing the following line we hit an error {}, {}'.format(data, e))
Remove extraneous copies in StabilizerStateChForm JSON parsing As pointed out in
@@ -73,12 +73,12 @@ class StabilizerStateChForm: def _from_json_dict_(cls, n, G, F, M, gamma, v, s, omega, **kwargs): copy = StabilizerStateChForm(n) - copy.G = np.array(G.copy()) - copy.F = np.array(F.copy()) - copy.M = np.array(M.copy()) - copy.gamma = np.array(gamma.copy()) - copy.v = np.array(v.copy()) - copy.s = np.array(s.copy()) + copy.G = np.array(G) + copy.F = np.array(F) + copy.M = np.array(M) + copy.gamma = np.array(gamma) + copy.v = np.array(v) + copy.s = np.array(s) copy.omega = omega return copy
Fix locking in swift-recon-cron The previous locking method would leave the lock dir lying around if the process died unexpectedly, preventing others swift-recon-cron process from running sucessfuly and requiring a manual clean.
@@ -19,9 +19,10 @@ swift-recon-cron.py import os import sys -from gettext import gettext as _ +from eventlet import Timeout -from swift.common.utils import get_logger, dump_recon_cache, readconf +from swift.common.utils import get_logger, dump_recon_cache, readconf, \ + lock_path from swift.obj.diskfile import ASYNCDIR_BASE @@ -62,21 +63,14 @@ def main(): conf['log_name'] = conf.get('log_name', 'recon-cron') logger = get_logger(conf, log_route='recon-cron') try: - os.mkdir(lock_dir) - except OSError as e: - logger.critical(str(e)) - print(str(e)) - sys.exit(1) - try: + with lock_path(lock_dir): asyncs = get_async_count(device_dir, logger) dump_recon_cache({'async_pending': asyncs}, cache_file, logger) - except Exception: - logger.exception( - _('Exception during recon-cron while accessing devices')) - try: - os.rmdir(lock_dir) - except Exception: - logger.exception(_('Exception remove cronjob lock')) + except (Exception, Timeout) as err: + msg = 'Exception during recon-cron while accessing devices' + logger.exception(msg) + print('%s: %s' % (msg, err)) + sys.exit(1) if __name__ == '__main__': main()
Tests: When deleting really fails on Windows, try again next test run * This gives an extra chance, for it to become deleted before the next deleting attempt. * With this spurious errors on Windows will hopefully become less.
@@ -584,6 +584,9 @@ Exit codes {exit_cpython:d} (CPython) != {exit_nuitka:d} (Nuitka)""".format( # It appears there is a tiny lock race that we randomly cause, # likely because --run spawns a subprocess that might still # be doing the cleanup work. + if os.path.exists(nuitka_cmd2[0]+".away"): + os.unlink(nuitka_cmd2[0]+".away") + for _i in range(10): try: os.rename(nuitka_cmd2[0], nuitka_cmd2[0]+".away") @@ -600,8 +603,6 @@ Exit codes {exit_cpython:d} (CPython) != {exit_nuitka:d} (Nuitka)""".format( else: break - assert not os.path.exists(nuitka_cmd2[0]+".away") - if os.path.exists(pdb_filename): os.unlink(pdb_filename) else:
Fix Jigsaw not only depending on its own RNG Jigsaw behaved previously non-deterministically as it dependent on its own local RNG and additionally the global RNG. This patch fixes that issue.
@@ -5604,7 +5604,9 @@ class Jigsaw(meta.Augmenter): for i in np.arange(len(samples.destinations)): padder = size_lib.CenterPadToMultiplesOf( width_multiple=samples.nb_cols[i], - height_multiple=samples.nb_rows[i]) + height_multiple=samples.nb_rows[i], + seed=random_state + ) row = batch.subselect_rows_by_indices([i]) row = padder.augment_batch_(row, parents=parents + [self], hooks=hooks)
Performance improvements Regex can potentially be very slow, so I removed as many regex checks as possible. Only one that's left in is the mentions regex as that's impossible to do without using regex
@@ -12,7 +12,7 @@ import re import time import datetime -BUCKET_RE = r"(\d{17,18})-(\d{17,18})-\d+" +MENTION_RE = re.compile("<@[!&]?\\d+>") class ViolationException(Exception): @@ -53,13 +53,15 @@ class AntiSpam(BaseCog): await self.violate(ex) async def process_message(self, ctx: Message): - # Use the discord's message timestamp to hopefully not trigger false positives - msg_time = int(ctx.created_at.timestamp()) * 1000 - + # print(f'{datetime.datetime.now().isoformat()} - Processing message') if self.is_exempt(ctx.guild.id, ctx.author): return + # Use the discord's message timestamp to hopefully not trigger false positives + msg_time = int(ctx.created_at.timestamp()) * 1000 + async def check_bucket(check, friendly_text, amount): + print(f"{check} - {amount}") if amount == 0: return bucket = self.get_bucket(ctx.guild.id, check) @@ -70,8 +72,10 @@ class AntiSpam(BaseCog): ctx.channel, await bucket.get(ctx.author.id, msg_time, expire=False)) await check_bucket("max_messages", Translator.translate('spam_max_messages', ctx), 1) - await check_bucket("max_newlines", Translator.translate('spam_max_newlines', ctx), len(re.split("\\r\\n|\\r|\\n", ctx.content))) - await check_bucket("max_mentions", Translator.translate('spam_max_mentions', ctx), len(re.findall("<@[!&]?\\d+>", ctx.content))) + await check_bucket("max_newlines", Translator.translate('spam_max_newlines', ctx), + len(ctx.content.split("\n"))) + await check_bucket("max_mentions", Translator.translate('spam_max_mentions', ctx), + len(MENTION_RE.findall(ctx.content))) await self.check_duplicates(ctx) async def check_duplicates(self, ctx: Message): @@ -102,7 +106,8 @@ class AntiSpam(BaseCog): duration = cfg.get("PUNISHMENT_DURATION", 0) until = time.time() + duration - reason = Translator.translate('spam_infraction_reason', ex.guild, channel=f"#{ex.channel}", friendly=ex.friendly) + reason = Translator.translate('spam_infraction_reason', ex.guild, channel=f"#{ex.channel}", + friendly=ex.friendly) GearbotLogging.log_to(ex.guild.id, 'spam_violate', user=Utils.clean_user(ex.member), user_id=ex.member.id, check=ex.check.upper(), friendly=ex.friendly, channel=ex.channel.mention) if punishment == "kick": @@ -188,11 +193,10 @@ class AntiSpam(BaseCog): @staticmethod def _process_bucket_entries(entries): def extract_re(key): - matches = re.findall(BUCKET_RE, key) - if len(matches) > 0: - return matches[0][0], matches[0][1] - else: + parts = key.split("-") + if len(parts) != 3: return None + return parts[0], parts[1] return set(filter(lambda x: x is not None, map(extract_re, entries)))
[ci] Fix runtime env tests This fixes failing tests from
@@ -36,6 +36,9 @@ def test_get_wheel_filename(): ray_version = "3.0.0.dev0" for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38", "39"]: + if sys_platform == "win32" and py_version == "36": + # Windows wheels are not built for py3.6 anymore + continue filename = get_wheel_filename(sys_platform, ray_version, py_version) prefix = "https://s3-us-west-2.amazonaws.com/ray-wheels/latest/" url = f"{prefix}{filename}" @@ -44,9 +47,12 @@ def test_get_wheel_filename(): def test_get_master_wheel_url(): ray_version = "3.0.0.dev0" - test_commit = "58a73821fbfefbf53a19b6c7ffd71e70ccf258c7" + test_commit = "c3ac6fcf3fcc8cfe6930c9a820add0e187bff579" for sys_platform in ["darwin", "linux", "win32"]: for py_version in ["36", "37", "38", "39"]: + if sys_platform == "win32" and py_version == "36": + # Windows wheels are not built for py3.6 anymore + continue url = get_master_wheel_url( test_commit, sys_platform, ray_version, py_version )
Update Maceio-AL spider Updates Maceio-AL spider. It sorts imports, replaces MUNICIPALITY_ID (deprecated field) with TERRITORY_ID, replaces some insecure selectors and reorganizes pagination requests
-from dateparser import parse from datetime import datetime import scrapy +from dateparser import parse from gazette.items import Gazette from gazette.spiders.base import BaseGazetteSpider class AlMaceioSpider(BaseGazetteSpider): - MUNICIPALITY_ID = "2704302" + TERRITORY_ID = "2704302" + name = "al_maceio" allowed_domains = ["maceio.al.gov.br"] start_urls = ["http://www.maceio.al.gov.br/noticias/diario-oficial/"] - page_number = 1 def parse(self, response): - gazettes = list(response.xpath("//article")) + gazettes = response.xpath("//article") for gazette in gazettes: - url = gazette.xpath("a/@href").extract_first() - + url = gazette.xpath("a/@href").get() if not url: # In some cases the href attr is empty, e.g. 24-11-2015 continue - date_str = gazette.xpath("time/text()").extract_first() - date = parse(date_str, languages=["pt"]) - title = gazette.xpath("a/@title").extract_first() - is_extra_edition = "suplemento" in (title.lower()) + gazette_date = gazette.xpath("time/text()").get() + date = parse(gazette_date, languages=["pt"]).date() + + title = gazette.xpath("a/@title").get() + is_extra_edition = "suplemento" in title.lower() if "wp-content/uploads" in url: gazette = self.create_gazette(date, url, is_extra_edition) yield gazette else: - request = scrapy.Request(url, self.parse_additional_page) - request.meta["date"] = date - request.meta["is_extra_edition"] = is_extra_edition - yield request - - """ - This condition is necessary to stop crawling when there are no more gazettes - """ - if gazettes: - self.page_number += 1 yield scrapy.Request( - "{0}/page/{1}".format(self.start_urls[0], str(self.page_number)) + url, + callback=self.parse_additional_page, + meta={ + "date": date, + "is_extra_edition": is_extra_edition, + }, ) + next_pages = response.css(".envolve-content nav a::attr(href)").getall() + for next_page_url in next_pages: + yield scrapy.Request(next_page_url) + def parse_additional_page(self, response): - url = response.xpath('//p[@class="attachment"]/a/@href').extract_first() + url = response.css("p.attachment a::attr(href)").get() gazette = self.create_gazette( response.meta["date"], url, response.meta["is_extra_edition"] ) @@ -57,7 +56,7 @@ class AlMaceioSpider(BaseGazetteSpider): date=date, file_urls=[url], is_extra_edition=is_extra_edition, - municipality_id=self.MUNICIPALITY_ID, + territory_id=self.TERRITORY_ID, power="executive_legislature", scraped_at=datetime.utcnow(), )
TRAC#7497 Fix integration tests The pages are now removed from the admin views, so skip these tests. Keep them around until this is deployed in production and we know that the new app works well.
import re from random import choice, randint +import pytest from bs4 import BeautifulSoup from django.conf import settings from django.contrib import admin @@ -583,6 +584,7 @@ class ViewsTest(ComicframeworkTestCase): self._test_page_can_be_viewed(user, testpage1) self._test_page_can_be_viewed(self.root, testpage1) + @pytest.mark.skip # Deprecated functionality def test_page_permissions_view(self): """ Test that the permissions page in admin does not crash: for root https://github.com/comic/comic-django/issues/180 @@ -600,6 +602,7 @@ class ViewsTest(ComicframeworkTestCase): otheruser = self._create_random_user("other_") self._test_url_can_not_be_viewed(otheruser, url) + @pytest.mark.skip # Deprecated functionality def test_page_change_view(self): """ Root can in admin see a page another user created while another regular user can not @@ -1482,12 +1485,8 @@ class AdminTest(ComicframeworkTestCase): self._check_project_admin_view(self.testproject, "admin:comicmodels_comicsite_changelist") - def test_project_admin_views(self): - """ Is javascript being included on admin pages correctly? - """ - - self._check_project_admin_view(self.testproject, "admin:index") - + @pytest.mark.skip # Deprecated functionality + def test_project_page_views(self): # check page add view self._check_project_admin_view(self.testproject, "admin:comicmodels_page_add") @@ -1508,6 +1507,23 @@ class AdminTest(ComicframeworkTestCase): self._check_project_admin_view(self.testproject, "admin:comicmodels_page_changelist") + # see if adding a page crashes the admin + create_page_in_projectadmin(self.testproject, + "test_project_admin_page_add") + + # Projectadminsite has the special feature that any 'comicsite' field in a form is automatically + # set to the project this projectadmin is for. Test this by creating a + # page without a project. + create_page_in_projectadmin(self.testproject, + "test_project_admin_page_add_without_comicsite", + comicsite_for_page=None) + + def test_project_admin_views(self): + """ Is javascript being included on admin pages correctly? + """ + + self._check_project_admin_view(self.testproject, "admin:index") + # Do the same for registration requests: check of standard views do not crash # Create some registrationrequests @@ -1535,15 +1551,4 @@ class AdminTest(ComicframeworkTestCase): "admin:comicmodels_registrationrequest_changelist", user=self.root) - # see if adding a page crashes the admin - create_page_in_projectadmin(self.testproject, - "test_project_admin_page_add") - - # Projectadminsite has the special feature that any 'comicsite' field in a form is automatically - # set to the project this projectadmin is for. Test this by creating a - # page without a project. - create_page_in_projectadmin(self.testproject, - "test_project_admin_page_add_without_comicsite", - comicsite_for_page=None) - # check that expected links are present in main admin page
[IMPR] Improve PropertyGenerator._update_old_result_dict add str, int or list to old_dict only raise a ValueError if there is an unexpected type instead of an AssertionError print the unexpected type instead of the value
@@ -763,14 +763,14 @@ class PropertyGenerator(QueryGenerator): def _update_old_result_dict(old_dict, new_dict) -> None: """Update old result dict with new_dict.""" for k, v in new_dict.items(): - if k not in old_dict: - old_dict[k] = v - continue - if isinstance(v, list): - old_dict[k].extend(v) - continue - assert isinstance(v, (str, int)), ( - 'continued API result had an unexpected type: {}'.format(v)) + if isinstance(v, (str, int)): + old_dict.setdefault(k, v) + elif isinstance(v, list): + old_dict.setdefault(k, []).extend(v) + else: + raise ValueError( + 'continued API result had an unexpected type: {}' + .format(type(v).__name__)) class ListGenerator(QueryGenerator):
Update 1_getting_started.rst We can verify *the* that the data ONE *the* is enough:)
@@ -59,7 +59,7 @@ You should see a CSV version of the data dumped into your terminal. All csvkit t ``data.csv`` will now contain a CSV version of our original file. If you aren't familiar with the ``>`` syntax, it means "redirect standard out to a file". If that's hard to remember it may be more convenient to think of it as "save to". -We can verify the that the data is saved to the new file by using the ``cat`` command to print it: +We can verify that the data is saved to the new file by using the ``cat`` command to print it: .. code-block:: bash
context_processors: Enable platform detection in templates. This enables the ability to detect the platform in a template.
@@ -95,6 +95,11 @@ def zulip_default_context(request): settings_path = "/etc/zulip/settings.py" settings_comments_path = "/etc/zulip/settings.py" + if hasattr(request, "client") and request.client.name == "ZulipElectron": + platform = "ZulipElectron" + else: + platform = "ZulipWeb" + return { 'root_domain_landing_page': settings.ROOT_DOMAIN_LANDING_PAGE, 'custom_logo_url': settings.CUSTOM_LOGO_URL, @@ -137,6 +142,7 @@ def zulip_default_context(request): 'settings_path': settings_path, 'secrets_path': secrets_path, 'settings_comments_path': settings_comments_path, + 'platform': platform, }
Removed unused parameter from log std policy useless parameter removed the parameter is taken from the plain gaussian state dependant std policy
@@ -273,7 +273,7 @@ class StateLogStdGaussianPolicy(ParametricPolicy): This policy is similar to the State std gaussian policy, but here the regressor represents the logarithm of the standard deviation """ - def __init__(self, mu, log_std, eps=1e-6): + def __init__(self, mu, log_std): """ Constructor. @@ -285,11 +285,9 @@ class StateLogStdGaussianPolicy(ParametricPolicy): dimensionality of the regressor must be equal to the action dimensionality """ - assert(eps > 0) self._mu_approximator = mu self._log_std_approximator = log_std - self._eps = eps def __call__(self, state, action): mu, sigma = self._compute_multivariate_gaussian(state)
[IMPR] fix typos in logging.py Fix typos in docstring.
@@ -35,6 +35,7 @@ from typing import Any from pywikibot.backports import Callable, List from pywikibot.tools import deprecated_args, issue_deprecation_warning + STDOUT = 16 #: VERBOSE = 18 #: INPUT = 25 #: @@ -87,7 +88,7 @@ def logoutput(msg: Any, the log message to include an exception traceback. :param msg: The message to be printed. - :param args: Not used yet; prevents positinal arguments except `msg`. + :param args: Not used yet; prevents positional arguments except `msg`. :param level: The logging level; supported by :func:`logoutput` only. :keyword newline: If newline is True (default), a line feed will be added after printing the msg. @@ -95,7 +96,7 @@ def logoutput(msg: Any, :keyword layer: Suffix of the logger name separated by dot. By default no suffix is used. :type layer: str - :keyword decoder: If msg is bytes, this decoder is used to deccode. + :keyword decoder: If msg is bytes, this decoder is used to decode. Default is 'utf-8', fallback is 'iso8859-1' :type decoder: str :param kwargs: For the other keyword arguments refer @@ -170,7 +171,7 @@ def info(msg: Any = '', *args: Any, **kwargs: Any) -> None: output = info -"""Synomym for :func:`info` for backward compatibility. The arguments +"""Synonym for :func:`info` for backward compatibility. The arguments are interpreted as for :func:`logoutput`. .. versionchanged:: 7.2 @@ -189,13 +190,13 @@ def stdout(msg: Any = '', *args: Any, **kwargs: Any) -> None: ``msg`` will be sent to standard output (stdout) via :mod:`pywikibot.userinterfaces`, so that it can be piped to another - process. All other functions will sent to stderr. + process. All other functions will send to stderr. `msg` may be omitted and a newline is printed in that case. The arguments are interpreted as for :func:`logoutput`. .. versionchanged:: 7.2 - `text`was renamed to `msg`; `msg` paramerer may be omitted; + `text`was renamed to `msg`; `msg` parameter may be omitted; only keyword arguments are allowed except for `msg`. .. seealso:: - :python:`Logger.log()<library/logging.html#logging.Logger.log>`
MAINT: _lib: Fix a build warning. Cast pos to size_t when comparing to nread. Fixes this warning: gcc: scipy/_lib/messagestream.c scipy/_lib/messagestream.c:2050:35: warning: comparison of integers of different signs: 'size_t' (aka 'unsigned long') and 'long' [-Wsign-compare] __pyx_t_1 = ((__pyx_v_nread != __pyx_v_pos) != 0); ~~~~~~~~~~~~~ ^ ~~~~~~~~~~~ 1 warning generated.
@@ -64,7 +64,7 @@ cdef class MessageStream: try: stdio.rewind(self.handle) nread = stdio.fread(buf, 1, pos, self.handle) - if nread != pos: + if nread != <size_t>pos: raise IOError("failed to read messages from buffer") obj = PyBytes_FromStringAndSize(buf, nread)
Update noaa-gefs.yaml Removed duplicate email reference
@@ -4,7 +4,7 @@ Documentation: https://github.com/awslabs/open-data-docs/tree/main/docs/noaa/noa Contact: | For questions regarding data content or quality, visit [the NOAA GEFS site](http://www.emc.ncep.noaa.gov/index.php?branch=GEFS). <br/> For any questions regarding data delivery not associated with this platform or any general questions regarding the NOAA Big Data Program, email [email protected]. - <br /> We also seek to identify case studies on how NOAA data is being used and will be featuring those stories in joint publications and in upcoming events. If you are interested in seeing your story highlighted, please share it with the NOAA BDP team here: [email protected]@noaa.gov. + <br /> We also seek to identify case studies on how NOAA data is being used and will be featuring those stories in joint publications and in upcoming events. If you are interested in seeing your story highlighted, please share it with the NOAA BDP team here: [email protected] ManagedBy: "[NOAA](http://www.noaa.gov/)" UpdateFrequency: 4 times a day, every 6 hours starting at midnight. Collabs:
Update _version.py Add Naoya as a maintainer
@@ -3,8 +3,8 @@ import subprocess __all__ = ['__author__', '__author_email__', '__version__', '__git_uri__', '__dependencies__', '__optional_dependencies__'] -__author__ = "Erik Ritter (maintainer), Serena Jiang, John Bodley, Bill Ulammandakh, Robert Chang, Dan Frank, Chetan Sharma, Matthew Wardrop" -__author_email__ = "[email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected]" +__author__ = "Erik Ritter (maintainer), Serena Jiang, John Bodley, Bill Ulammandakh, Naoya Kanai, Robert Chang, Dan Frank, Chetan Sharma, Matthew Wardrop" +__author_email__ = "[email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected]" __version__ = "0.9.0" try: with open(os.devnull, 'w') as devnull:
Update overview.rst Missing whitespace casuses it to be broken.
@@ -50,7 +50,7 @@ and use that number to determine which of these installation tutorials you shoul follow to complete your installation. If you don't see your number, choose the closest that you can. -#. `Big Sur (11.5.1)<https://openmined.github.io/PySyft/install_tutorials/osx_11_5_1.html#>`__ +#. `Big Sur (11.5.1) <https://openmined.github.io/PySyft/install_tutorials/osx_11_5_1.html#>`__. Linux Tutorials ~~~~~~~~~~~~~~~ @@ -68,7 +68,7 @@ Which should print something like the following: See where this image says "20.04.3"? Figure out what number yours says in that place -#. `Ubuntu (20.04.3 - Focal Fossa)<https://openmined.github.io/PySyft/install_tutorials/linux.html##>`__ +#. `Ubuntu (20.04.3 - Focal Fossa) <https://openmined.github.io/PySyft/install_tutorials/linux.html##>`__. Windows Tutorials ~~~~~~~~~~~~~~~~~ @@ -89,7 +89,7 @@ and use those number to determine which of these installation tutorials you shou follow to complete your installation. If you don't see one of your numbers, choose the closest that you can. -#. `Windows 10 (20H2)<https://openmined.github.io/PySyft/install_tutorials/windows.html>`__ +#. `Windows 10 (20H2) <https://openmined.github.io/PySyft/install_tutorials/windows.html>`__. Best of luck on your journey!
Fix typo in dense.py typo
@@ -336,7 +336,7 @@ class EmbeddingRetriever(BaseRetriever): from sentence_transformers import SentenceTransformer except ImportError: raise ImportError("Can't find package `sentence-transformers` \n" - "You can install it via `pip install sentece-transformers` \n" + "You can install it via `pip install sentence-transformers` \n" "For details see https://github.com/UKPLab/sentence-transformers ") # pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models # e.g. 'roberta-base-nli-stsb-mean-tokens'
Add logging for boto3 calls Adds default logging for all boto3 calls performed through the Boto3Client and Boto3Resource classes. Logging level will be changed to info once we disable printing logging output to stdout
@@ -83,11 +83,21 @@ class AWSExceptionHandler: return wrapper +def _log_boto3_calls(params, **kwargs): + service = kwargs["event_name"].split(".")[-2] + operation = kwargs["event_name"].split(".")[-1] + region = kwargs["context"].get("client_region", boto3.session.Session().region_name) + LOGGER.debug( # TODO: change this to info level once we disable printing logging output to stdout + "Executing boto3 call: region=%s, service=%s, operation=%s, params=%s", region, service, operation, params + ) + + class Boto3Client(ABC): """Abstract Boto3 client.""" def __init__(self, client_name: str): self._client = boto3.client(client_name) + self._client.meta.events.register("provide-client-params.*.*", _log_boto3_calls) def _paginate_results(self, method, **kwargs): """ @@ -108,3 +118,4 @@ class Boto3Resource(ABC): def __init__(self, resource_name: str): self._resource = boto3.resource(resource_name) + self._resource.meta.client.meta.events.register("provide-client-params.*.*", _log_boto3_calls)
When calling a submenu command from a higher level, strip off the first argument (which enters the submenu) and pass the rest on to the submenu
@@ -672,8 +672,9 @@ class AddSubmenu(object): for sub_attr, par_attr in self.shared_attributes.items(): setattr(submenu, sub_attr, getattr(parent_cmd, par_attr)) - if line: - # Execute the command + if line.parsed.args: + # Remove the menu argument and execute the command in the submenu + line = submenu.parser_manager.parsed(line.parsed.args) submenu.precmd(line) ret = submenu.onecmd(line) submenu.postcmd(ret, line)
Make collecting of the logs more error prone for internall error Fixes:
@@ -387,7 +387,10 @@ def pytest_runtest_makereport(item, call): ): test_case_name = item.name mcg = True if any(x in item.location[0] for x in ['mcg', 'ecosystem']) else False + try: collect_ocs_logs(dir_name=test_case_name, mcg=mcg) + except Exception as ex: + log.error(f"Failed to collect OCS logs. Error: {ex}") # Collect Prometheus metrics if specified in gather_metrics_on_fail marker if ( @@ -396,12 +399,15 @@ def pytest_runtest_makereport(item, call): and item.get_closest_marker('gather_metrics_on_fail') ): metrics = item.get_closest_marker('gather_metrics_on_fail').args + try: collect_prometheus_metrics( metrics, f'{item.name}-{call.when}', call.start, call.stop ) + except Exception as ex: + log.error(f"Failed to collect prometheus metrics. Error: {ex}") # Get the performance metrics when tests fails for scale or performance tag from tests.helpers import collect_performance_stats @@ -411,4 +417,7 @@ def pytest_runtest_makereport(item, call): and (item.get_closest_marker('scale') or item.get_closest_marker('performance')) ): test_case_name = item.name + try: collect_performance_stats(test_case_name) + except Exception as ex: + log.error(f"Failed to collect performance stats. Error: {ex}")
Plugins: Enhanced PySide2 workaround even further * Copy the exact signature and strip annotations to provide exact number of args as it seems PySide2 will allow to not have some, but star arguments do not allow to tell that.
@@ -585,19 +585,35 @@ def nuitka_wrap(cls): wrapper_count += 1 wrapper_name = "_wrapped_function_%s_%d" % (attr, wrapper_count) + signature = inspect.signature(value) + + # Remove annotations junk that cannot be executed. + signature = signature.replace( + return_annotation = inspect.Signature.empty, + parameters=[ + parameter.replace(annotation=inspect.Signature.empty) + for parameter in + signature.parameters.values() + ] + ) + + v = r''' +def %(wrapper_name)s%(signature)s: + return %(wrapper_name)s.func(%(parameters)s) + ''' % { + "signature": signature, + "parameters": ",".join(signature.parameters), + "wrapper_name": wrapper_name + } + # TODO: Nuitka does not currently statically optimize this, might change! exec( - ''' -def %(wrapper_name)s(self, *args, **kwargs): - return %(wrapper_name)s.func(self, *args, **kwargs) - ''' - % {"wrapper_name": wrapper_name}, + v, globals(), ) - func = value wrapper = globals()[wrapper_name] - wrapper.func = func + wrapper.func = value setattr(cls, attr, wrapper)
[GodvilleData] ability to remove apikey from config unhide apikey cmd
@@ -122,7 +122,7 @@ class GodvilleData: finaltext += chat.box(times) await self.bot.say(finaltext) - @godville.command(pass_context=True, hidden=True) + @godville.group(pass_context=True, invoke_without_command=True) async def apikey(self, ctx: commands.Context, apikey: str, *, godname: str): """Set apikey for your character. Only one character per user""" @@ -131,6 +131,13 @@ class GodvilleData: dataIO.save_json(self.config_file, self.config) await self.bot.say("Your name and apikey has been saved") + @apikey.command(pass_context=True) + async def remove(self, ctx: commands.Context): + """Remove your apikey and godname from bot's data""" + del self.config[ctx.message.author.id] + dataIO.save_json(self.config_file, self.config) + await self.bot.say("Your key removed from database") + class GodvilleUser(object): """Godville API wrapper"""
Update mediaprocessor.py self reference
@@ -54,10 +54,10 @@ class MediaProcessor: # QTFS if self.settings.relocate_moov: - converter.QTFS(output['output']) + self.QTFS(output['output']) # Copy to additional locations - output_files = converter.replicate(output['output']) + output_files = self.replicate(output['output']) # Run any post process scripts if self.settings.postprocess:
IE config : Locate Qt.py This is needed to build the documentation.
@@ -71,6 +71,7 @@ oiioVersion = gafferReg["OpenImageIO"] ocioVersion = gafferReg["OpenColorIO"] oslVersion = gafferReg["OpenShadingLanguage"] vdbVersion = gafferReg.get( "OpenVDB", "3.0.0" ) +qtPyVersion = gafferReg.get( "qtPyVersion", "1.0.0.b3" ) if targetApp : @@ -236,6 +237,7 @@ LOCATE_DEPENDENCY_PYTHONPATH = [ os.path.join( IEEnv.Environment.rootPath(), "tools", "python", pythonVersion, "noarch" ), os.path.join( sphinxRoot, "lib", "python" + pythonVersion, "site-packages" ), os.path.join( sphinxRoot, "lib64", "python" + pythonVersion, "site-packages" ), + os.path.join( IEEnv.Environment.rootPath(), "apps", "Qt.py", qtPyVersion ), ]
r1.2.1: connection functions initialize to None Yeah, having a null function was a bad idea. It seemed like a good idea at the time.
@@ -40,12 +40,9 @@ class HsDev(object): self.part = '' - def null_func(self): - pass - - self.on_connected = null_func - self.on_disconnected = null_func - self.on_reconnect = null_func + self.on_connected = None + self.on_disconnected = None + self.on_reconnect = None def __del__(self): self.close()
Support Python 3.9 (Fix This issue was already raised on adding a TODO to consider going back to Generic[T] once this is solved.
@@ -205,8 +205,10 @@ class DecoderComparer: return f"<DecoderComparer {self.value}:{self.value.priority()}>" -class CrackResult(NamedTuple, Generic[T]): - value: T +class CrackResult(NamedTuple): + # TODO consider using Generic[T] again for value's type once + # https://bugs.python.org/issue36517 is resolved + value: Any key_info: Optional[str] = None misc_info: Optional[str] = None
TST: fixed parametrize bug Fixed a bug in parametrized function, as it turns out that input should not be provided as a dictionary. Dictionary input only works sometimes, and may cause the dict values to be cast as lists.
@@ -63,23 +63,27 @@ class TestBasics(): with pytest.raises(ValueError): self.meta = pysat.Meta(metadata='Not a Panda') - @pytest.mark.parametrize("set_dict", - [({}), ({'units': 'V', 'long_name': 'Longgggg'})]) - def test_inst_data_assign_meta(self, set_dict): + @pytest.mark.parametrize("labels,vals", + [([], []), + (['units', 'long_name'], ['V', 'Longgggg'])]) + def test_inst_data_assign_meta(self, labels, vals): """ Test Meta initialization with data """ - # Update the testing data - for skey in set_dict: - if skey in self.default_name: - self.default_name.pop(self.default_name.index(skey)) - elif skey in self.default_nan: - self.default_nan.pop(self.default_nan.index(skey)) - self.default_val[skey] = set_dict[skey] - - # Initialize the Meta data + # Initialize the instrument self.testInst.load(*self.stime) self.dval = 'test_inst_data_assign_meta' - set_dict['data'] = self.testInst['mlt'] + + # Update the testing data and set the new data dictionary + set_dict = {'data': self.testInst['mlt']} + for i, slabel in enumerate(labels): + if slabel in self.default_name: + self.default_name.pop(self.default_name.index(slabel)) + elif slabel in self.default_nan: + self.default_nan.pop(self.default_nan.index(slabel)) + self.default_val[slabel] = vals[i] + set_dict[slabel] = vals[i] + + # Initialize the Meta data self.testInst[self.dval] = set_dict self.meta = self.testInst.meta @@ -1196,7 +1200,7 @@ class TestBasics(): assert 'extra_check' in f['test_nan_variable'].ncattrs() -class TestBasicsImmuatble(TestBasics): +class TestBasicsImmutable(TestBasics): def setup(self): """Runs before every method to create a clean testing setup """
Remove 12.04 builds Remove 12.04-i386 and 12.04-amd64 builds from Jenkinsfile.
@@ -67,21 +67,6 @@ stage 'Build' deleteDir() } }, - "12.04-amd64" : { - node('master'){ - deleteDir() - sh """ - commit_hash=\"${env.commit_hash}\" - mkdir \$commit_hash - working_dir=\$(pwd) - docker run -v \$working_dir/\$commit_hash:/\$commit_hash --rm dnanexus/dx-toolkit:12.04 /bin/bash -xc \"git clone https://github.com/dnanexus/dx-toolkit.git; cd dx-toolkit; \\ - git checkout \$commit_hash; build/package.sh ubuntu-12.04-amd64; \\ - mv dx-toolkit-*.tar.gz /\$commit_hash/\" - """ - archive "${env.commit_hash}/dx-toolkit-*.tar.gz" - deleteDir() - } - }, "centos-amd64" : { node('master'){ deleteDir() @@ -114,22 +99,6 @@ stage 'Build' deleteDir() } }, - "12.04-i386" : { - node('master'){ - deleteDir() - sh """ - commit_hash=\"${env.commit_hash}\" - mkdir \$commit_hash - working_dir=\$(pwd) - docker run -v \$working_dir/\$commit_hash:/\$commit_hash --rm dnanexus/dx-toolkit:12.04-i386 \\ - /bin/bash -xc \"git clone https://github.com/dnanexus/dx-toolkit.git; \\ - cd dx-toolkit; git checkout \$commit_hash; build/package.sh ubuntu-12.04-i386; \\ - mv dx-toolkit-*.tar.gz /\$commit_hash/\" - """ - archive "${env.commit_hash}/dx-toolkit-*.tar.gz" - deleteDir() - } - }, "precise-deb" : { node('master'){ deleteDir()
Update serializer with explicit fields value Without it DRF raises error (since 3.3.0)
@@ -16,5 +16,6 @@ To accept tags through a `REST` API call we need to add the following to our `Se class Meta: model = YourModel + fields = '__all__' And you're done, so now you can add tags to your model.
Scons: Do not suggest using "clang-cl.exe" as it doesn't work on its own yet.
@@ -580,15 +580,11 @@ c) Install MinGW64 to "C:\\MinGW64" or "\\MinGW", where then it is automatically proper variant (32/64 bits, your Python arch is %r), or else cryptic errors will be shown. -d) Set the environment variable "CC" to the *full* path of either "gcc.exe" or - to "clang-cl.exe". Also be sure to head proper architecture or else errors - will occur. - Normal MinGW will not work! MinGW64 does not mean 64 bits, just better Windows compatibility. Cygwin based gcc will not work. MSYS2 based gcc will not work. - +clang-cl will only work if MSVC already worked. """ % (sys.exec_prefix, target_arch)) else: sys.exit("Error, cannot locate suitable C compiler.")
Tests RBD: Delete csi-rbdplugin while PVC creation, Pod creation and IO are progressing CEPHFS: Delete csi-cephfsplugin while PVC creation, Pod creation and IO are progressing
import logging from concurrent.futures import ThreadPoolExecutor import pytest +from functools import partial from ocs_ci.framework.testlib import ManageTest, tier4 from ocs_ci.ocs import constants from ocs_ci.ocs.resources.pod import ( - get_mds_pods, get_mon_pods, get_mgr_pods, get_osd_pods + get_mds_pods, get_mon_pods, get_mgr_pods, get_osd_pods, get_plugin_pods ) from ocs_ci.utility.utils import TimeoutSampler from tests import helpers, disruption_helpers @@ -44,8 +45,17 @@ log = logging.getLogger(__name__) pytest.param( *[constants.CEPHFILESYSTEM, 'mds'], marks=pytest.mark.polarion_id("OCS-741") + ), + pytest.param( + *[constants.CEPHFILESYSTEM, 'cephfsplugin'], + marks=pytest.mark.polarion_id("OCS-1011") + ), + pytest.param( + *[constants.CEPHBLOCKPOOL, 'rbdplugin'], + marks=[pytest.mark.polarion_id("OCS-1010"), pytest.mark.bugzilla( + '1752487' + )] ) - ] ) class TestResourceDeletionDuringCreationOperations(ManageTest): @@ -114,7 +124,7 @@ class TestResourceDeletionDuringCreationOperations(ManageTest): resource=pod_obj, state=constants.STATUS_RUNNING ) pod_obj.reload() - log.info(f"Created {len(io_pods)} for running IO.") + log.info(f"Created {len(io_pods)} pods for running IO.") return pvc_objs, io_pods, pvc_objs_new_pods, access_modes @@ -158,8 +168,10 @@ class TestResourceDeletionDuringCreationOperations(ManageTest): storageclass = pvc_objs[0].storageclass pod_functions = { - 'mds': get_mds_pods, 'mon': get_mon_pods, 'mgr': get_mgr_pods, - 'osd': get_osd_pods + 'mds': partial(get_mds_pods), 'mon': partial(get_mon_pods), + 'mgr': partial(get_mgr_pods), 'osd': partial(get_osd_pods), + 'rbdplugin': partial(get_plugin_pods, interface=interface), + 'cephfsplugin': partial(get_plugin_pods, interface=interface) } executor = ThreadPoolExecutor(max_workers=len(io_pods))
[modules/traffic] Recreate widget list during each iteration To avoid "stray" devices being kept in the list, empty the widgets list during each iteration and re-populate it from the list of available interfaces. fixes
@@ -44,9 +44,6 @@ class Module(bumblebee.engine.Module): self._update_widgets(widgets) def create_widget(self, widgets, name, txt=None, attributes={}): - widget = self.widget(name) - if widget: return widget - widget = bumblebee.output.Widget(name=name) widget.full_text(txt) widgets.append(widget) @@ -69,6 +66,8 @@ class Module(bumblebee.engine.Module): def _update_widgets(self, widgets): interfaces = [ i for i in netifaces.interfaces() if not i.startswith(self._exclude) ] + del widgets[:] + counters = psutil.net_io_counters(pernic=True) for interface in interfaces: if not interface: interface = "lo"
Use cgi instead of html module The html module is only available for python3. The cgi module provides almost identical functionality and is present for both python2 and python3.
@@ -8,7 +8,7 @@ Bindings normally consists of three parts: """ import base64 -import html +import cgi import logging import saml2 @@ -87,15 +87,15 @@ def http_form_post_message(message, location, relay_state="", _msg = _msg.decode('ascii') saml_response_input = HTML_INPUT_ELEMENT_SPEC.format( - name=html.escape(typ), - val=html.escape(_msg), + name=cgi.escape(typ), + val=cgi.escape(_msg), type='hidden') relay_state_input = "" if relay_state: relay_state_input = HTML_INPUT_ELEMENT_SPEC.format( name='RelayState', - val=html.escape(relay_state), + val=cgi.escape(relay_state), type='hidden') response = HTML_FORM_SPEC.format(
Disable strictPropertyInitialization This is not easily compatible with code that initialises properties outside of the constructor (used in Stimulus)
"noUnusedLocals": true, "noUnusedParameters": true, "strictNullChecks": true, - "strictPropertyInitialization": true, // Requires `--strictNullChecks` be enabled in order to take effect + "strictPropertyInitialization": false, "target": "ES2021" // Since lowest browser support is for Safari 14 }, "files": [
Qt change_password_dialog: fix deadlock in hww case if device unplugged fixes
@@ -2562,25 +2562,27 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): if not ok: return - try: - hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption() - except UserCancelled: - return - except BaseException as e: - self.logger.exception('') - self.show_error(repr(e)) - return + def on_password(hw_dev_pw): old_password = hw_dev_pw if self.wallet.has_password() else None new_password = hw_dev_pw if encrypt_file else None + self._update_wallet_password( + old_password=old_password, new_password=new_password, encrypt_storage=encrypt_file) + + self.thread.add( + self.wallet.keystore.get_password_for_storage_encryption, + on_success=on_password) else: from .password_dialog import ChangePasswordDialogForSW d = ChangePasswordDialogForSW(self, self.wallet) ok, old_password, new_password, encrypt_file = d.run() - if not ok: return + self._update_wallet_password( + old_password=old_password, new_password=new_password, encrypt_storage=encrypt_file) + + def _update_wallet_password(self, *, old_password, new_password, encrypt_storage: bool): try: - self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file) + self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_storage) except InvalidPassword as e: self.show_error(str(e)) return
LandBOSSE second integration Use the default project data in the library.
@@ -5,7 +5,10 @@ with warnings.catch_warnings(): warnings.filterwarnings("ignore", message="numpy.ufunc size changed") import pandas as pd -from .OpenMDAOFileOperations import OpenMDAOFileOperations + +# The library path is where to find the default input data for LandBOSSE. +library_path = '../../library/landbosse' + class OpenMDAODataframeCache: """ @@ -56,7 +59,7 @@ class OpenMDAODataframeCache: xlsx_path : str The path from which to read the .xlsx file. This parameter - has the default value of + has the default value of the library path variable above. Returns ------- @@ -69,10 +72,8 @@ class OpenMDAODataframeCache: original = cls._cache[xlsx_basename] return cls.copy_dataframes(original) - file_ops = OpenMDAOFileOperations() - if xlsx_path is None: - xlsx_filename = os.path.join(file_ops.landbosse_input_dir(), 'project_data', f'{xlsx_basename}.xlsx') + xlsx_filename = os.path.join(library_path, f'{xlsx_basename}.xlsx') else: xlsx_filename = os.path.join(xlsx_path, f'{xlsx_basename}.xlsx')
Update exception message This was incorrectly suggesting the user needed to create an instance of RedisCache, when in fact it is the parent that needs to be instantiated.
@@ -128,7 +128,10 @@ class RedisCache: raise RuntimeError("RedisCache must be a class attribute.") if instance is None: - raise RuntimeError("You must create an instance of RedisCache to use it.") + raise RuntimeError( + "You must access the RedisCache instance through the cog instance " + "before accessing it using the cog's class object." + ) for attribute in vars(instance).values(): if isinstance(attribute, Bot):
Ram 1500: remove harness Update values.py
@@ -56,7 +56,7 @@ CAR_INFO: Dict[str, Optional[Union[ChryslerCarInfo, List[ChryslerCarInfo]]]] = { ], CAR.JEEP_CHEROKEE: ChryslerCarInfo("Jeep Grand Cherokee 2016-18", video_link="https://www.youtube.com/watch?v=eLR9o2JkuRk"), CAR.JEEP_CHEROKEE_2019: ChryslerCarInfo("Jeep Grand Cherokee 2019-21", video_link="https://www.youtube.com/watch?v=jBe4lWnRSu4"), - CAR.RAM_1500: ChryslerCarInfo("Ram 1500 2019-22"), + CAR.RAM_1500: ChryslerCarInfo("Ram 1500 2019-22", harness=Harness.none), } # Unique CAN messages:
Remove script tag from reports/filters_js.html, which is redundant These are the pages where filters_js.html is included, which all descend from either GenericReportView or BaseDownloadExportView, which both use corehq/apps/export/templates/export/download_export.html corehq/apps/reports/templates/reports/standard/base_template.html corehq/apps/hqadmin/templates/hqadmin/faceted_report.html corehq/apps/reports_core/templates/reports_core/base_template_new.html
{# This file may be compressed; it should contain only script tags #} -<script src="{% static 'select2-3.4.5-legacy/select2.min.js' %}"></script> <script src="{% static 'reports/js/filters/button_group.js' %}"></script> <script src="{% static 'reports/js/filters/select2s.js' %}"></script> <script src="{% static 'reports/js/filters/phone_number.js' %}"></script>
Skip slow quanitized tests under ASAN Summary: Skip tests that take more than finish under a sec normally but take 20+ min under ASAN Pull Request resolved: Test Plan: CI
@@ -1455,6 +1455,7 @@ class TestQuantizedOps(TestCase): quantize_ref = torch.quantize_per_tensor(float_ref, Y_scale, Y_zero_point, dtype_x) self.assertEqual(qy.int_repr().numpy(), quantize_ref.int_repr().numpy()) + @unittest.skipIf(TEST_WITH_UBSAN, "Takes 20+ min to finish with ASAN") @given(X=hu.tensor(shapes=hu.array_shapes(min_dims=4, max_dims=5, min_side=1, max_side=32), qparams=hu.qparams()), @@ -1490,6 +1491,7 @@ class TestQuantizedOps(TestCase): quantize_ref = torch.quantize_per_tensor(torch.from_numpy(float_ref_relu), Y_scale, Y_zero_point, dtype_x) self.assertEqual(qy.int_repr().numpy(), quantize_ref.int_repr().numpy()) + @unittest.skipIf(TEST_WITH_UBSAN, "Takes 20+ min to finish with ASAN") @given(X=hu.tensor(shapes=hu.array_shapes(min_dims=5, max_dims=5, min_side=1, max_side=32), qparams=hu.qparams()),
Allow struct types to be exposed in public APIs TN:
@@ -2017,7 +2017,7 @@ class CompileCtx(object): This also emits non-blocking errors for all types that are exposed in the public API whereas they should not. """ - from langkit.compiled_types import ArrayType, Field + from langkit.compiled_types import ArrayType, Field, StructType def expose(t, to_internal, for_field, type_use, traceback): """ @@ -2043,6 +2043,11 @@ class CompileCtx(object): # but we still need to set the converter flags below. pass + elif t.is_entity_type: + # Allow all entity types to be exposed, and don't try to expose + # internals, unlike for regular structs. + pass + elif isinstance(t, ArrayType): # Don't allow public arrays of arrays check( @@ -2059,9 +2064,16 @@ class CompileCtx(object): expose(t.element_type, to_internal, for_field, 'element type', traceback + ['array of {}'.format(t.dsl_name)]) - elif t.is_entity_type: - # Allow all entity types to be exposed - pass + elif isinstance(t, StructType): + # Expose all record fields + for f in t.get_fields(): + # Reject public arrays of bare AST nodes + check( + not f.type.is_ast_node, + '{}, a bare AST node struct field'.format(f.qualname) + ) + expose(f.type, to_internal, for_field, 'field type', + traceback + ['{} structures'.format(t.dsl_name)]) else: # Only array and struct types have their "_exposed" attribute
Fix, helper was wrongly named. * This could lead to linker errors for some code that does int < int comparisons with type certainty.
@@ -1222,7 +1222,7 @@ int RICH_COMPARE_BOOL_GTE_OBJECT_INT(PyObject *operand1, PyObject *operand2) { return MY_RICHCOMPARE_BOOL(operand1, operand2, Py_GE); } -int RICH_COMPARE_LT_BOOL_INT_INT(PyObject *operand1, PyObject *operand2) { +int RICH_COMPARE_BOOL_LT_INT_INT(PyObject *operand1, PyObject *operand2) { assert(PyInt_CheckExact(operand1)); assert(PyInt_CheckExact(operand2));
ac3 6 channel fixed regression
@@ -715,7 +715,7 @@ class Ac3Codec(AudioCodec): if 'channels' in opt: c = opt['channels'] if c > 6: - opt['channels'] = 8 + opt['channels'] = 6 return super(Ac3Codec, self).parse_options(opt, stream)