message
stringlengths
13
484
diff
stringlengths
38
4.63k
Switch tag truncation logic to be more consistent with codebase Previous implementation using `textwrap` is replaced with simpler string formatting, similar to solution used in `_parallel_coordinate.py`
-import textwrap from typing import Any from typing import Callable from typing import Dict @@ -233,8 +232,9 @@ class MLflowCallback(object): # see https://github.com/mlflow/mlflow/issues/2931 for key, value in tags.items(): value = str(value) # make sure it is a string - if len(value) > mlflow.utils.validation.MAX_TAG_VAL_LENGTH: - tags[key] = textwrap.shorten(value, mlflow.utils.validation.MAX_TAG_VAL_LENGTH) + max_val_length = mlflow.utils.validation.MAX_TAG_VAL_LENGTH + if len(value) > max_val_length: + tags[key] = "{}...".format(value[: max_val_length - 3]) # This sets the tags for MLflow. mlflow.set_tags(tags)
Update README.md Due to unavailability of CI server remove images temporary
# Downloads [![Downloads](https://pepy.tech/badge/pony)](https://pepy.tech/project/pony) [![Downloads](https://pepy.tech/badge/pony/month)](https://pepy.tech/project/pony/month) [![Downloads](https://pepy.tech/badge/pony/week)](https://pepy.tech/project/pony/week) -# Tests - -#### PostgreSQL -Python 2 <a href="http://jenkins.agilecode.io:8111/viewType.html?buildTypeId=GithubPonyORMCi_Python2postgres&guest=1"> -<img src="http://jenkins.agilecode.io:8111/app/rest/builds/buildType:(id:GithubPonyORMCi_Python2postgres)/statusIcon"/> -</a> -Python 3 <a href="http://jenkins.agilecode.io:8111/viewType.html?buildTypeId=GithubPonyORMCi_Python3postgres&guest=1"> -<img src="http://jenkins.agilecode.io:8111/app/rest/builds/buildType:(id:GithubPonyORMCi_Python3postgres)/statusIcon"/> -</a> - -#### SQLite -Python 2 <a href="http://jenkins.agilecode.io:8111/viewType.html?buildTypeId=GithubPonyORMCi_Python2sqlite&guest=1"> -<img src="http://jenkins.agilecode.io:8111/app/rest/builds/buildType:(id:GithubPonyORMCi_Python2sqlite)/statusIcon"/> -</a> -Python 3 <a href="http://jenkins.agilecode.io:8111/viewType.html?buildTypeId=GithubPonyORMCi_Python3sqlite&guest=1"> -<img src="http://jenkins.agilecode.io:8111/app/rest/builds/buildType:(id:GithubPonyORMCi_Python3sqlite)/statusIcon"/> -</a> - -#### CockroachDB -Python 2 <a href="http://jenkins.agilecode.io:8111/viewType.html?buildTypeId=GithubPonyORMCi_Python2cockroach&guest=1"> -<img src="http://jenkins.agilecode.io:8111/app/rest/builds/buildType:(id:GithubPonyORMCi_Python2cockroach)/statusIcon"/> -</a> -Python 3 <a href="http://jenkins.agilecode.io:8111/viewType.html?buildTypeId=GithubPonyORMCi_Python3cockroach&guest=1"> -<img src="http://jenkins.agilecode.io:8111/app/rest/builds/buildType:(id:GithubPonyORMCi_Python3cockroach)/statusIcon"/> -</a> - Pony Object-Relational Mapper =============================
LocalDispatcher : Terminate entire process tree on Windows When run via `subprocess`, the Gaffer wrapper is a child process of a Windows `cmd.exe` process. In that case, `os.killpg` does not terminate all child processes. Using Windows' `TASKKILL /T` seems to be the only reliable option without install adding modules like `psutil`.
@@ -291,6 +291,9 @@ class LocalDispatcher( GafferDispatch.Dispatcher ) : while process.poll() is None : if batch.blindData().get( "killed" ) : + if os.name == "nt" : + subprocess.check_call( [ "TASKKILL", "/F", "/PID", str( process.pid ), "/T" ] ) + else : os.killpg( process.pid, signal.SIGTERM ) self.__reportKilled( batch ) return False
Copy FlowControlMixin locally Apparently it should be considered private.
@@ -4,7 +4,6 @@ An ``asyncio.Protocol`` subclass for lower level IO handling. import asyncio import re import ssl -from asyncio.streams import FlowControlMixin from typing import Callable, Optional, cast from .compat import start_tls @@ -27,14 +26,76 @@ LINE_ENDINGS_REGEX = re.compile(rb"(?:\r\n|\n|\r(?!\n))") PERIOD_REGEX = re.compile(rb"(?m)^\.") +class FlowControlMixin(asyncio.Protocol): + """ + Reusable flow control logic for StreamWriter.drain(). + This implements the protocol methods pause_writing(), + resume_writing() and connection_lost(). If the subclass overrides + these it must call the super methods. + StreamWriter.drain() must wait for _drain_helper() coroutine. + + Copied from stdlib as per recommendation: https://bugs.python.org/msg343685. + Logging and asserts removed, type annotations added. + """ + + def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None): + if loop is None: + self._loop = asyncio.get_event_loop() + else: + self._loop = loop + self._paused = False + self._drain_waiter = None # type: Optional[asyncio.Future[None]] + self._connection_lost = False + + def pause_writing(self) -> None: + self._paused = True + + def resume_writing(self) -> None: + self._paused = False + + waiter = self._drain_waiter + if waiter is not None: + self._drain_waiter = None + if not waiter.done(): + waiter.set_result(None) + + def connection_lost(self, exc) -> None: + self._connection_lost = True + # Wake up the writer if currently paused. + if not self._paused: + return + waiter = self._drain_waiter + if waiter is None: + return + self._drain_waiter = None + if waiter.done(): + return + if exc is None: + waiter.set_result(None) + else: + waiter.set_exception(exc) + + async def _drain_helper(self) -> None: + if self._connection_lost: + raise ConnectionResetError("Connection lost") + if not self._paused: + return + waiter = self._drain_waiter + waiter = self._loop.create_future() + self._drain_waiter = waiter + await waiter + + def _get_close_waiter(self, stream: asyncio.StreamWriter) -> asyncio.Future: + raise NotImplementedError + + class SMTPProtocol(FlowControlMixin, asyncio.Protocol): def __init__( self, loop: Optional[asyncio.AbstractEventLoop] = None, connection_lost_callback: Optional[Callable] = None, ) -> None: - self._loop = loop or asyncio.get_event_loop() - super().__init__() + super().__init__(loop=loop) self._over_ssl = False self._buffer = bytearray() self._response_waiter = None # type: Optional[asyncio.Future[SMTPResponse]] @@ -43,6 +104,7 @@ class SMTPProtocol(FlowControlMixin, asyncio.Protocol): self.transport = None # type: Optional[asyncio.Transport] self._command_lock = None # type: Optional[asyncio.Lock] + self._closed = self._loop.create_future() # type: asyncio.Future[None] def __del__(self): waiters = (self._response_waiter, self._connection_lost_waiter) @@ -51,6 +113,9 @@ class SMTPProtocol(FlowControlMixin, asyncio.Protocol): # Avoid 'Future exception was never retrieved' warnings waiter.exception() + def _get_close_waiter(self, stream: asyncio.StreamWriter) -> asyncio.Future: + return self._closed + @property def is_connected(self) -> bool: """
Update utils.py fixed bad copy paste for tests/utils for the qemu session manager
@@ -281,11 +281,11 @@ def stop_qemuuser(process: subprocess.Popen) -> None: def qemuuser_session(*args, **kwargs): exe = kwargs.get("exe", "") or _target("default") port = kwargs.get("port", 0) or GDBSERVER_DEFAULT_PORT - sess = start_gdbserver(exe, port) + sess = start_qemuuser(exe, port) try: yield sess finally: - stop_gdbserver(sess) + stop_qemuuser(sess)
Update face_recognition_learner.py changed torch.load to correctly load when device has only cpu.
@@ -799,7 +799,7 @@ class FaceRecognitionLearner(Learner): if os.path.exists(os.path.join(path, 'backbone_' + self.backbone + '.pth')): self.__create_model(num_class=0) self.backbone_model.load_state_dict(torch.load( - os.path.join(path, 'backbone_' + self.backbone + '.pth'))) + os.path.join(path, 'backbone_' + self.backbone + '.pth'), map_location=torch.device(self.device))) self._model = {self.backbone_model, self.network_head_model} else: raise UserWarning('No backbone_' + self.backbone + '.pth found. Please have a check')
remove .encode(__salt_system_encoding__)) .encode(__salt_system_encoding__)) not necessary in python3
@@ -1305,9 +1305,7 @@ class AESFuncs(object): with salt.utils.files.fopen(cpath, mode) as fp_: if load['loc']: fp_.seek(load['loc']) - if six.PY3: - fp_.write(load['data'].encode(__salt_system_encoding__)) - else: + fp_.write(load['data']) return True
Update test_critical_load_bau.py more sustained time steps now that outage is inclusive of outage_end_time_step
@@ -89,7 +89,7 @@ class CriticalLoadBAUTests(ResourceTestCaseMixin, TestCase): d_expected['total_energy_cost_bau'] = 53967.58 d_expected['year_one_energy_cost_bau'] = 7434.65 d_expected['resilience_check_flag'] = True - d_expected['bau_sustained_time_steps'] = 4 + d_expected['bau_sustained_time_steps'] = 5 try: check_common_outputs(self, c, d_expected)
fix bug in parsers.py A variable change within build_parser_createdb hadn't been carried through to the function prototype.
@@ -215,7 +215,7 @@ def build_parser_index(subps, parents=None): parser.set_defaults(func=subcommands.subcmd_index) -def build_parser_createdb(subparsers, parents=None): +def build_parser_createdb(subps, parents=None): """Returns a command-line parser for the createdb subcommand """ parser = subps.add_parser('createdb', parents=parents,
Change empty list literal compiler error to match actual builtin name Summary: I changed the name of this builtin to match Python's native style, but forgot to change the compiler error to match. Pull Request resolved:
@@ -1380,7 +1380,8 @@ private: auto values = getValues(ll.inputs(), /*maybe_unpack=*/true, identity); if (values.size() == 0) { throw ErrorReport(tree) << "Empty list literals not allowed. " - << "Use _constructEmptyFooList() instead"; + << "Use _construct_empty_foo_list() instead. " + << "`foo` can be `int`, `float` or `tensor`"; } const auto elem_type = values.at(0)->type(); for (auto v : values) {
docs/expensive-migrations: Revise a bit for clarity. Borrows some language from the paragraph in the changelog that points to it.
# Running expensive migrations early -If you'd like to run the major database migrations included in the -Zulip 1.7 release early, before you start the upgrade process, you can -do the following: +Zulip 1.7 contains some significant database migrations that can take +several minutes to run. -* Log into your zulip server as the `zulip` user (or as `root` and +The upgrade process automatically minimizes disruption by running +these first, before beginning the user-facing downtime. However, if +you'd like to watch the downtime phase of the upgrade closely, you +can run them manually before starting the upgrade: + +1. Log into your Zulip server as the `zulip` user (or as `root` and then run `su zulip` to drop privileges), and `cd /home/zulip/deployments/current` -* Run `./manage.py dbshell`. This will open a shell connected to the +2. Run `./manage.py dbshell`. This will open a shell connected to the Postgres database. -* In the postgres shell, run the following commands: +3. In the postgres shell, run the following commands: -``` CREATE INDEX CONCURRENTLY zerver_usermessage_mentioned_message_id ON zerver_usermessage (user_profile_id, message_id) @@ -30,19 +33,20 @@ do the following: CREATE INDEX CONCURRENTLY zerver_usermessage_wildcard_mentioned_message_id ON zerver_usermessage (user_profile_id, message_id) - WHERE (flags & 8) != 0 OR (FLAGS & 16) != 0; + WHERE (flags & 8) != 0 OR (flags & 16) != 0; CREATE INDEX CONCURRENTLY zerver_usermessage_unread_message_id ON zerver_usermessage (user_profile_id, message_id) WHERE (flags & 1) = 0; -``` - -Once these have finished, you can proceed with installing zulip 1.7. -To help you estimate how long these will take on your server, creating -the first 4 indexes took about 1 minute each with chat.zulip.org's 75M -UserMessage rows (from `select COUNT(*) from zerver_usermessage;` in -the `manage.py dbshell`), with no user-facing service disruption. The -final, "unread_message" index took more like 10 minutes. +4. These will take some time to run, during which the server will + continue to serve user traffic as usual with no disruption. Once + they finish, you can proceed with installing Zulip 1.7. +To help you estimate how long these will take on your server: count +the number of UserMessage rows, with `select COUNT(*) from zerver_usermessage;` +at the `./manage.py dbshell` prompt. At the time these migrations +were run on chat.zulip.org, it had 75M UserMessage rows; the first 4 +indexes took about 1 minute each to create, and the final, +"unread_message" index took more like 10 minutes.
updated documentation added a default value so a runtime exception does not occur
@@ -326,7 +326,7 @@ containers for different environments. See the example below. .. code-block:: python def determine_scope(fixture_name, config): - if config.getoption("--keep-containers"): + if config.getoption("--keep-containers", None): return "session" return "function"
Move copyright to top At some point of th python 3 migration process the copyright text has been moved by error.
# Licensed under the MIT license # http://opensource.org/licenses/mit-license.php +# Copyright 2007, Frank Scholz <[email protected]> + import time from operator import attrgetter from abc import ABCMeta, abstractmethod -# Copyright 2007,, Frank Scholz <[email protected]> from lxml import etree import coherence.extern.louie as louie
Update jira-detect.yaml extracts the version number.
@@ -2,7 +2,7 @@ id: jira-detect info: name: Detect Jira Issue Management Software - author: pdteam + author: pdteam & philippedelteil severity: info tags: panel @@ -19,3 +19,9 @@ requests: - type: word words: - "Project Management Software" + extractors: + - type: regex + part: body + group: 1 + regex: + - 'title="JiraVersion" value="([0-9.]+)'
Update analytics README A bit more info on environments.
@@ -25,14 +25,17 @@ Beyond the general infrastructure, there's a JavaScript module for each of the m New tests need to be added to [ab_tests](https://github.com/dimagi/commcare-hq/blob/master/corehq/apps/analytics/ab_tests.py). Typically, view code will check the test's `version` and set a corresponding flag in the template context, which will then use that flag to deliver the appropriate content for the user's test group. -### Debugging +### Handling different environments and debugging -Useful localsettings when working with analytics: -- `ANALYTICS_IDS`: Analytics code doesn't run if the relevant API key isn't provided. For most purposes, setting the key to a dummy value is sufficient. We have test API keys for Google Analytics and Kissmetrics; you can pull these from the [staging vault](https://github.com/dimagi/commcare-cloud/tree/master/src/commcare_cloud/ansible/README.md#managing-secrets-with-vault). +In production, analytics are tracked only\* on SaaS servers - that is, on [www.commcarehq.org](http://www.commcarehq.org). This is controlled by the `isEnabled` property in [global.html](https://github.com/dimagi/commcare-hq/blob/master/corehq/apps/analytics/templates/analytics/initial/global.html). All other servers bypass the setup code. -- `ANALYTICS_CONFIG.DEBUG`: Analytics code isn't run on every server. Set `DEBUG` to `True` to bypass these checks (you still need to set the API keys, too). +Analytics run in staging via a debug flag. This debug flag, along with the necessary API keys, can be set in localsettings to enable analytics on your local server: +- `ANALYTICS_IDS`: Analytics code doesn't run if the relevant API key isn't provided. For most purposes, setting the key to a dummy value is sufficient. We have test API keys for Google Analytics and Kissmetrics; you can pull these from the [staging vault](https://github.com/dimagi/commcare-cloud/tree/master/src/commcare_cloud/ansible/README.md#managing-secrets-with-vault). +- `ANALYTICS_CONFIG.DEBUG`: Set `DEBUG` to `True` to enable analytics, overriding the server-specific checks (you still need to set the API keys, too). - `ANALYTICS_CONFIG.LOG_LEVEL`: Controls the client-side logging. Turning it up to `verbose` can help debug. +\* ICDS also tracks some analytics, but this happens outside of the main analytics framework described in these docs. + ## Individual Services ### Google Analytics
Fix incorrect lookup name. Many apologies.
@@ -8,7 +8,7 @@ import sal.plugin plugin_q = Q(pluginscriptsubmission__plugin='Encryption') # THe name got changed from Filevault to FileVault. Support both. -name_q = Q(pluginscriptsubmission__pluginscriptrow__pluginscript_name__iexect='Filevault') +name_q = Q(pluginscriptsubmission__pluginscriptrow__pluginscript_name__iexact='Filevault') enabled_q = Q(pluginscriptsubmission__pluginscriptrow__pluginscript_data='Enabled') disabled_q = Q(pluginscriptsubmission__pluginscriptrow__pluginscript_data='Disabled') portable_q = Q(machine_model__contains='Book')
Remove workaround for parsing negative numbers This was fixed upstream and bumps the version to include the fix
@@ -115,17 +115,6 @@ class EntityProtoDecoder: model._set_attributes(deserialized_props) - @staticmethod - def _get_safe_int64_value(v: ProtoPropertyValue) -> int: - """ - This exists to work around https://github.com/googleapis/python-ndb/issues/590 - TODO: remove if/when that issue is resolved - """ - result = v.int64value() - if result >= (1 << 63): - result -= 1 << 64 - return result - @staticmethod def _get_prop_value(v: ProtoPropertyValue, p: ProtoProperty) -> Any: # rougly based on https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/model.py#L2647 @@ -151,7 +140,7 @@ class EntityProtoDecoder: pass return sval elif v.has_int64value(): - ival = EntityProtoDecoder._get_safe_int64_value(v) + ival = v.int64value() if p.meaning() == ProtoProperty.GD_WHEN: try: return EPOCH + datetime.timedelta(microseconds=ival)
Update analyze_nir_intensity.py Update analyze_nir_intensity based on upated pcv.visualize.histogram
@@ -10,7 +10,7 @@ from plantcv.plantcv import plot_image from plantcv.plantcv.threshold import binary as binary_threshold from plantcv.plantcv import params from plantcv.plantcv import outputs - +from plantcv.plantcv.visualize import histogram def analyze_nir_intensity(gray_img, mask, bins=256, histplot=False, label="default"): """This function calculates the intensity of each pixel associated with the plant and writes the values out to @@ -51,7 +51,7 @@ def analyze_nir_intensity(gray_img, mask, bins=256, histplot=False, label="defau # Calculate histogram fig_hist, hist_data = histogram(gray_img, mask=mask, bins=bins, lower_bound=0, upper_bound=maxval, title=None) - bin_labels, hist_nir, hist_percent = hist_data['pixel intensity'].tolist(), hist_data['intensity'].tolist(), \ + bin_labels, hist_nir, hist_percent = hist_data['pixel intensity'].tolist(), hist_data['hist_count'].tolist(), \ hist_data['proportion of pixels (%)'].tolist() masked1 = cv2.bitwise_and(rgbimg, rgbimg, mask=mask)
Fix typo in consumers documentation A note in the consumers documentation contredicted the right phrasing in the database documentation. One does not need to use `database_sync_to_async` in synchronous code.
@@ -112,7 +112,7 @@ callable into an asynchronous coroutine. .. important:: If you want to call the Django ORM from an ``AsyncConsumer`` (or any other - synchronous code), you should use the ``database_sync_to_async`` adapter + asynchronous code), you should use the ``database_sync_to_async`` adapter instead. See :doc:`/topics/databases` for more.
Fix redundancy in the documentation template fixes
@@ -17,7 +17,7 @@ assignees: '' Copy the section link here. --> -(Insert the permalink of the section heading in the Wagtail docs. Choose the most specific section / lowest-level heading to which your suggested change would apply. (In the docs, hover over the the heading to see the permalink icon.)) +(Insert the permalink of the section heading in the Wagtail docs. Choose the most specific section / lowest-level heading to which your suggested change would apply. (In the docs, hover over the heading to see the permalink icon.)) ### Details
tests/library: remove duplicate parameter Remove duplicate fake_params parameter as it's already defined later as a dict (instead of an empty list).
@@ -8,19 +8,18 @@ fake_container_image = 'docker.io/ceph/daemon:latest' class TestCephDashboardUserModule(object): def setup_method(self): - self.fake_params = [] self.fake_binary = 'ceph' self.fake_cluster = 'ceph' self.fake_name = 'foo' self.fake_user = 'foo' self.fake_password = 'bar' - self.fake_module = MagicMock() - self.fake_module.params = self.fake_params self.fake_roles = ['read-only', 'block-manager'] self.fake_params = {'cluster': self.fake_cluster, 'name': self.fake_user, 'password': self.fake_password, 'roles': self.fake_roles} + self.fake_module = MagicMock() + self.fake_module.params = self.fake_params def test_create_user(self): self.fake_module.params = self.fake_params
Better explain what we are setting in these files for new users. Also note where the variable server comes from.
@@ -199,7 +199,7 @@ To force the mine data to update immediately run: salt '*' mine.update - +Setup the :py:mod:`salt.states.file.managed` state in :file:`/srv/salt/haproxy.sls`: .. code-block:: yaml @@ -210,7 +210,7 @@ To force the mine data to update immediately run: - source: salt://haproxy_config - template: jinja -:file:`/srv/salt/haproxy_config`: +Create the Jinja template in :file:`/srv/salt/haproxy_config`: .. code-block:: yaml @@ -222,6 +222,8 @@ To force the mine data to update immediately run: <...file contents snipped...> +In the above example, ``server`` will be expanded to the ``minion_id``. + .. note:: The expr_form argument will be renamed to ``tgt_type`` in the Nitrogen release of Salt.
Updated test case Made a typo writing name instead of __name__
@@ -12,7 +12,7 @@ def knapsack(W, wt, val, n): dp[i][w] = dp[i-1][w] return dp[n][w] -if name == "__main__": +if __name__ == "__main__": val = [3,2,4,4] wt = [4,3,2,3] W = 6
Suppress third-party warnings and error on ClassMovedWarning 1. We don't need to know about every `invalid escape sequence` in every library. 2. We added `ClassMovedWarning` in but we only throw an error in the integration tests, so we add it here.
@@ -14,6 +14,12 @@ addopts = "-p cumulusci.tests.pytest_plugins.pytest_typeguard -p cumulusci.test markers = [ "metadeploy: mark a test that interacts with the MetaDeploy REST API", ] +filterwarnings = [ + "error:ClassMovedWarning", + "ignore::DeprecationWarning:.*.rflint", + "ignore::DeprecationWarning:.*.selenium", + "ignore::SyntaxWarning:.*.selenium", +] [tool.isort] profile = "black"
Update scene_gltf2.rst Mention shape key sampling requirements.
@@ -382,6 +382,11 @@ option is selected (on by default). All glTF animations are imported as NLA Stri If option is disabled, Blender NLA strip actions will be ignored. Only active action of each objects will be taken into account, and merged into a single glTF animation. +.. note:: + + In order for shape key animations controlled by drivers using bone transformations to be sampled, + they must be on a mesh object which is a child of the bones' armature. + Custom Properties -----------------
sqlalchemy table webhooks unique constraint removed shouldn't be unique as different tenants can use the same url and regex also, this column type is not support on some systems
@@ -16,7 +16,7 @@ import json from oslo_db.sqlalchemy import models from sqlalchemy import Column, DateTime, INTEGER, String, \ - SmallInteger, BigInteger, Index, Boolean, UniqueConstraint + SmallInteger, BigInteger, Index, Boolean from sqlalchemy.ext.declarative import declarative_base import sqlalchemy.types as types @@ -172,9 +172,6 @@ class Webhooks(Base): url = Column(String(256), nullable=False) headers = Column(String(1024)) regex_filter = Column(String(512)) - constraint = UniqueConstraint('url', 'regex_filter') - - __table_args__ = (UniqueConstraint('url', 'regex_filter'),) def __repr__(self): return \
github-actions: Enable python 3.8 on osx Python 3.8 on windows has problems installing GPy.
@@ -6,15 +6,12 @@ jobs: build: runs-on: ${{ matrix.os }} strategy: - max-parallel: 7 matrix: python-version: [3.6, 3.7, 3.8] os: [ubuntu-latest, macos-latest, windows-latest] exclude: - os: windows-latest python-version: 3.8 - - os: macos-latest - python-version: 3.8 steps: - uses: actions/checkout@v1 @@ -34,15 +31,14 @@ jobs: run: | choco install --no-progress -y graphviz python -m pip install --upgrade pip - pip install torch -f https://download.pytorch.org/whl/cpu/torch_stable.html + if ($Env:PY_VER -ne '3.8') { pip install torch -f https://download.pytorch.org/whl/cpu/torch_stable.html } if: matrix.os == 'windows-latest' + env: + PY_VER: ${{ matrix.python-version }} - name: Python3.8 dependencies setup run: | - sed -i.tmp "/torch/d" requirements.txt - pip install cython llvmlite==0.26 + sed "-i.tmp" "/torch/d" requirements.txt if: matrix.python-version == '3.8' - env: - LLVM_CONFIG: llvm-config-6.0 - name: Shared dependencies shell: bash run: |
[docs][Adreno] Remove unnecessary compilation flag Flag `-DUSE_MICRO=OFF` was added to avoid this issue In these changes were reverted. Removed unnecessary compilation flag to avoid user confusing.
@@ -94,7 +94,7 @@ folder of TVM: mkdir build_android cd build_android - cmake .. -DUSE_OPENCL=ON -DUSE_MICRO=OFF -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_NATIVE_API_LEVEL=android-28 -DCMAKE_FIND_ROOT_PATH_MODE_PACKAGE=ON -DANDROID_STL=c++_static -DUSE_CPP_RPC=ON + cmake .. -DUSE_OPENCL=ON -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_HOME}/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_NATIVE_API_LEVEL=android-28 -DCMAKE_FIND_ROOT_PATH_MODE_PACKAGE=ON -DANDROID_STL=c++_static -DUSE_CPP_RPC=ON make -jN tvm_runtime tvm_rpc where **N** is the number of cores available on your *CPU*.
tpm_main: close file descriptor for aik handle The file descriptor was never closed.
@@ -645,7 +645,7 @@ class tpm(tpm_abstract.AbstractTPM): if self.tools_version in ["4.0", "4.2"]: # ok lets write out the key now secdir = secure_mount.mount() # confirm that storage is still securely mounted - _, secpath = tempfile.mkstemp(dir=secdir) + secfd, secpath = tempfile.mkstemp(dir=secdir) if self.tools_version == "3.2": command = ["tpm2_getpubak", "-E", hex(ek_handle), "-k", "0x81010008", @@ -657,6 +657,8 @@ class tpm(tpm_abstract.AbstractTPM): "-G", asym_alg, "-g", hash_alg, "-s", sign_alg, "-u", akpubfile.name, "-p", aik_pw, "-P", owner_pw] retDict = self.__run(command, outputpaths=akpubfile.name) + if secfd >= 0: + os.close(secfd) retout = retDict['retout'] reterr = retDict['reterr'] code = retDict['code']
Document {% include %} Mention that jinja includes need to use the full path and cannot use relative paths.
@@ -88,6 +88,13 @@ the context into the included file is required: {% from 'lib.sls' import test with context %} +Includes must use full paths, like so: + +.. code-block:: jinja + :caption: spam/eggs.jinja + + {% include 'spam/foobar.jinja' %} + Including Context During Include/Import ---------------------------------------
Fixes "not()" reported by newer flake8 Not the one in my system
@@ -596,10 +596,10 @@ def manf_code_qtypart(subpart): num_format = re.compile(r"^\s*[\-\+]?\s*[0-9]*\s*[\.\/]*\s*?[0-9]*\s*$") string0_test = re.match(num_format, strings[0]) string1_test = re.match(num_format, strings[1]) - if string0_test and not(string1_test): + if string0_test and not string1_test: qty = strings[0].strip() part = strings[1].strip() - elif not(string0_test) and string1_test: + elif not string0_test and string1_test: qty = strings[1].strip() part = strings[0].strip() elif string0_test and string1_test:
[ci] Update Jenkins readme to match new directory structure Update Jenkins readme to match new directory structure
@@ -25,7 +25,7 @@ Jenkins runs all of the linux-based TVM CI-enabled regression tests. This includ ## GitHub Actions -GitHub Actions is used to run Windows jobs, MacOS jobs, and various on-GitHub automations. These are defined in [`.github/workflows`](../.github/workflows/). These automations include bots to: +GitHub Actions is used to run Windows jobs, MacOS jobs, and various on-GitHub automations. These are defined in [`.github/workflows`](../../.github/workflows/). These automations include bots to: * [cc people based on subscribed teams/topics](https://github.com/apache/tvm/issues/10317) * [allow non-committers to merge approved / CI passing PRs](https://discuss.tvm.apache.org/t/rfc-allow-merging-via-pr-comments/12220) * [add cc-ed people as reviewers on GitHub](https://discuss.tvm.apache.org/t/rfc-remove-codeowners/12095) @@ -39,19 +39,19 @@ https://github.com/apache/tvm/actions has the logs for each of these workflows. TVM uses Jenkins for running Linux continuous integration (CI) tests on [branches](https://ci.tlcpack.ai/job/tvm/) and [pull requests](https://ci.tlcpack.ai/job/tvm/view/change-requests/) through a -build configuration specified in a [`Jenkinsfile`](../Jenkinsfile). +build configuration specified in a [`Jenkinsfile`](../../Jenkinsfile). Other jobs run in GitHub Actions for Windows and MacOS jobs. ## `Jenkinsfile` -The template files in this directory are used to generate the [`Jenkinsfile`](../Jenkinsfile) used by Jenkins to run CI jobs for each commit to PRs and branches. +The template files in this directory are used to generate the [`Jenkinsfile`](../../Jenkinsfile) used by Jenkins to run CI jobs for each commit to PRs and branches. To regenerate the `Jenkinsfile`, run ```bash python3 -mvenv _venv -_venv/bin/pip3 install -r jenkins/requirements.txt -_venv/bin/python3 jenkins/generate.py +_venv/bin/pip3 install -r ci/jenkins/requirements.txt +_venv/bin/python3 ci/jenkins/generate.py ``` # Infrastructure
All TrainingRequests templ: paste GET params to form action query string This complements changes in
{% block content %} {% if requests %} - <form role="form" class="form-horizontal" method="post"> + <form role="form" class="form-horizontal" method="post" action="{% url 'all_trainingrequests' %}?{{ request.GET.urlencode }}"> {% if form.errors.requests or match_form.errors.requests %} <div class="alert alert-danger" role="alert">You didn't select any request.</div> {% elif form.errors or match_form.errors %}
[IMPR] Don't use a list for lookup Searching in a list is O(n) whereas searching in a set is O(1). Use a set for the seen lookup table and improve the speed a lot.
@@ -18,7 +18,7 @@ Command line options: """ # # (C) Daniel Herding, 2005 -# (C) Pywikibot team, 2005-2017 +# (C) Pywikibot team, 2005-2018 # # Distributed under the terms of the MIT license. # @@ -59,16 +59,15 @@ def refresh_all(sysop=False): """Reload watchlists for all wikis where a watchlist is already present.""" cache_path = CachedRequest._get_cache_dir() files = os.listdir(cache_path) - seen = [] + seen = set() for filename in files: entry = CacheEntry(cache_path, filename) entry._load_cache() entry.parse_key() entry._rebuild() - if entry.site not in seen: - if entry._data.get('watchlistraw'): + if entry.site not in seen and 'watchlistraw' in entry._data: refresh(entry.site, sysop) - seen.append(entry.site) + seen.add(entry.site) def refresh_new(sysop=False):
Change delivery tracking to a dict This allows handling the multiple flag a bit better Fixes
@@ -255,13 +255,29 @@ class ExamplePublisher(object): """ confirmation_type = method_frame.method.NAME.split('.')[1].lower() - LOGGER.info('Received %s for delivery tag: %i', confirmation_type, - method_frame.method.delivery_tag) + ack_multiple = method_frame.method.multiple + delivery_tag = method_frame.method.delivery_tag + + LOGGER.info('Received %s for delivery tag: %i (multiple: %s)', confirmation_type, delivery_tag, ack_multiple) + if confirmation_type == 'ack': self._acked += 1 elif confirmation_type == 'nack': self._nacked += 1 - self._deliveries.remove(method_frame.method.delivery_tag) + + del self._deliveries[delivery_tag] + + if ack_multiple: + for tmp_tag in list(self._deliveries.keys()): + if tmp_tag <= delivery_tag: + self._acked += 1 + del self._deliveries[tmp_tag] + + """ + NOTE: at some point you would check self._deliveries for stale + entries and decide to attempt re-delivery + """ + LOGGER.info( 'Published %i messages, %i have yet to be confirmed, ' '%i were acked and %i were nacked', self._message_number, @@ -304,7 +320,7 @@ class ExamplePublisher(object): json.dumps(message, ensure_ascii=False), properties) self._message_number += 1 - self._deliveries.append(self._message_number) + self._deliveries[self._message_number] = True LOGGER.info('Published message # %i', self._message_number) self.schedule_next_message() @@ -314,7 +330,7 @@ class ExamplePublisher(object): """ while not self._stopping: self._connection = None - self._deliveries = [] + self._deliveries = {} self._acked = 0 self._nacked = 0 self._message_number = 0 @@ -324,9 +340,7 @@ class ExamplePublisher(object): self._connection.ioloop.start() except KeyboardInterrupt: self.stop() - if (self._connection is not None and - not self._connection.is_closed): - # Finish closing + if (self._connection is not None and not self._connection.is_closed): self._connection.ioloop.start() LOGGER.info('Stopped')
Ctrl+g as equivalent to the big Go button Thanks to for the suggestion.
@@ -101,6 +101,7 @@ DEFAULT_KEYMAP = { "ctrl+f": ("", "dialog_fill", "control Fill",), "ctrl+i": ("element* select^",), "ctrl+d": ("element copy",), + "ctrl+g": ("planz clear copy preprocess validate blob preopt optimize spool0",), "ctrl+o": ("", "outline",), "ctrl+r": ("rect 0 0 1000 1000",), "ctrl+s": ("", "dialog_stroke", "control Stroke",),
Catching exception in actor writing summaries for non-scalar metrics. Actor assumes all metrics produce scalar results but the metric API does not restrict it to a scalar.
@@ -160,17 +160,26 @@ class Actor(object): # Generate summaries against the train_step for m in self._metrics: tag = m.name + try: tf.summary.scalar( name=os.path.join("Metrics/", self._name, tag), data=m.result(), step=self._train_step) + except ValueError: + logging.error("Scalar summary could not be written for metric %s", + m) # Generate summaries against the reference_metrics for reference_metric in self._reference_metrics: tag = "Metrics/{}/{}".format(m.name, reference_metric.name) + try: tf.summary.scalar( name=os.path.join(self._name, tag), data=m.result(), step=reference_metric.result()) + except ValueError: + logging.error( + "Scalar summary could not be written for reference_metric %s", + m) def log_metrics(self): """Logs metric results to stdout."""
Skip race condition tests Until we've made them less flaky.
@@ -245,6 +245,9 @@ def test_task_definition_registration( assert len(ecs.list_task_definitions()["taskDefinitionArns"]) == len(task_definitions) + 1 [email protected]( + "https://buildkite.com/dagster/dagster/builds/42816#018530eb-8d74-4934-bbbc-4acb6db1cdaf" +) def test_task_definition_registration_race_condition(ecs, instance, workspace, run): initial_task_definitions = ecs.list_task_definitions()["taskDefinitionArns"] initial_tasks = ecs.list_tasks()["taskArns"]
[IMPR] use float for statistics Show float values for statistical informations. Read operations are often faster than 1/s and the printed value is just 0 which looks faulty. Use a float value instead. Use str.format instead of modulo operator.
@@ -1311,16 +1311,18 @@ class BaseBot(OptionHandler): delta = (pywikibot.Timestamp.now() - self._start_ts) seconds = int(delta.total_seconds()) if delta.days: - pywikibot.output('Execution time: %d days, %d seconds' - % (delta.days, delta.seconds)) + pywikibot.output( + 'Execution time: {d.days} days, {d.seconds} seconds' + .format(d=delta)) else: - pywikibot.output('Execution time: %d seconds' % delta.seconds) + pywikibot.output('Execution time: {} seconds' + .format(delta.seconds)) if self._treat_counter: - pywikibot.output('Read operation time: %d seconds' - % (seconds / self._treat_counter)) + pywikibot.output('Read operation time: {:.1f} seconds' + .format(seconds / self._treat_counter)) if self._save_counter: - pywikibot.output('Write operation time: %d seconds' - % (seconds / self._save_counter)) + pywikibot.output('Write operation time: {:.1f} seconds' + .format(seconds / self._save_counter)) # exc_info contains exception from self.run() while terminating exc_info = sys.exc_info()
Pin pycodestyle until flake8 supports it. See and for more details. Closes
@@ -22,7 +22,7 @@ mccabe==0.6.1 \ # pycodestyle is required by flake8 pycodestyle==2.3.1 \ --hash=sha256:6c4245ade1edfad79c3446fadfc96b0de2759662dc29d07d80a6f27ad1ca6ba9 \ - --hash=sha256:682256a5b318149ca0d2a9185d365d8864a768a28db66a84a2ea946bcc426766 + --hash=sha256:682256a5b318149ca0d2a9185d365d8864a768a28db66a84a2ea946bcc426766 # pyup: <2.4.0 # pyflakes is required by flake8 pyflakes==2.0.0 \ --hash=sha256:f661252913bc1dbe7fcfcbf0af0db3f42ab65aabd1a6ca68fe5d466bace94dae \
Add icy.tools to Cryptocurrency resolve
@@ -440,6 +440,7 @@ API | Description | Auth | HTTPS | CORS | | [Gateio](https://www.gate.io/api2) | API provides spot, margin and futures trading operations | `apiKey` | Yes | Unknown | | [Gemini](https://docs.gemini.com/rest-api/) | Cryptocurrencies Exchange | No | Yes | Unknown | | [Huobi](https://huobiapi.github.io/docs/spot/v1/en/) | Seychelles based cryptocurrency exchange | `apiKey` | Yes | Unknown | +| [icy.tools](https://developers.icy.tools/) | GraphQL based NFT API | `apiKey` | Yes | Unknown | | [Indodax](https://github.com/btcid/indodax-official-api-docs) | Trade your Bitcoin and other assets with rupiah | `apiKey` | Yes | Unknown | | [INFURA Ethereum](https://infura.io/product/ethereum) | Interaction with the Ethereum mainnet and several testnets | `apiKey` | Yes | Yes | | [Kraken](https://docs.kraken.com/rest/) | Cryptocurrencies Exchange | `apiKey` | Yes | Unknown |
fix pylint error for s3 variable name Test Plan: ran lint Reviewers: schrockn
def create_s3_session(signed=True, region_name=None, endpoint_url=None): - s3 = boto3.resource( + s3 = boto3.resource( # pylint:disable=C0103 's3', region_name=region_name, endpoint_url=endpoint_url - ).meta.client # pylint:disable=C0103 + ).meta.client if not signed: s3.meta.events.register('choose-signer.s3.*', disable_signing) return s3
lisa runner: fix limits on deploy concurrency. The original logic assign all fitable cases to an environment. If all cases are assigned, the rest tasks are not used. This fix assigns one case to deploy task. Other test results can trigger more deployment tasks.
@@ -135,7 +135,7 @@ class LisaRunner(BaseRunner): return self._generate_task( task_method=self._deploy_environment_task, environment=environment, - test_results=can_run_results, + test_results=can_run_results[:1], ) # run on deployed environment
Create cached fibonacci algorithm * feat: Add `fib_recursive_cached` func * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see * doc: Show difference in time when caching algorithm
@@ -16,6 +16,7 @@ fib_memoization runtime: 0.0107 ms fib_binet runtime: 0.0174 ms """ +from functools import lru_cache from math import sqrt from time import time @@ -92,6 +93,39 @@ def fib_recursive(n: int) -> list[int]: return [fib_recursive_term(i) for i in range(n + 1)] +def fib_recursive_cached(n: int) -> list[int]: + """ + Calculates the first n (0-indexed) Fibonacci numbers using recursion + >>> fib_iterative(0) + [0] + >>> fib_iterative(1) + [0, 1] + >>> fib_iterative(5) + [0, 1, 1, 2, 3, 5] + >>> fib_iterative(10) + [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] + >>> fib_iterative(-1) + Traceback (most recent call last): + ... + Exception: n is negative + """ + + @lru_cache(maxsize=None) + def fib_recursive_term(i: int) -> int: + """ + Calculates the i-th (0-indexed) Fibonacci number using recursion + """ + if i < 0: + raise Exception("n is negative") + if i < 2: + return i + return fib_recursive_term(i - 1) + fib_recursive_term(i - 2) + + if n < 0: + raise Exception("n is negative") + return [fib_recursive_term(i) for i in range(n + 1)] + + def fib_memoization(n: int) -> list[int]: """ Calculates the first n (0-indexed) Fibonacci numbers using memoization @@ -163,8 +197,9 @@ def fib_binet(n: int) -> list[int]: if __name__ == "__main__": - num = 20 + num = 30 time_func(fib_iterative, num) - time_func(fib_recursive, num) + time_func(fib_recursive, num) # Around 3s runtime + time_func(fib_recursive_cached, num) # Around 0ms runtime time_func(fib_memoization, num) time_func(fib_binet, num)
Fix error handling for room photo upload When posting a room picture there is now cleaner error handling for invalid file types or files with non-RGB mode.
@@ -11,12 +11,14 @@ from io import BytesIO from flask import jsonify, request, session from marshmallow import missing +from PIL import Image from sqlalchemy.orm import joinedload from webargs import fields from webargs.flaskparser import abort from werkzeug.exceptions import Forbidden, NotFound from indico.core.db import db +from indico.core.errors import UserValueError from indico.modules.rb import logger, rb_settings from indico.modules.rb.controllers import RHRoomBookingBase from indico.modules.rb.controllers.backend.rooms import RHRoomsPermissions @@ -373,8 +375,19 @@ class RHRoomPhoto(RHRoomAdminBase): return '', 204 def _process_POST(self): - photo = request.files['photo'].read() - self.room.photo = Photo(data=photo) + f = request.files['photo'] + try: + photo = Image.open(f) + except IOError: + raise UserValueError(_('You cannot upload this file as a room picture.')) + if photo.format.lower() not in {'jpeg', 'png', 'gif'}: + raise UserValueError(_('The file has an invalid format ({format}).').format(format=photo.format)) + if photo.mode != 'RGB': + photo = photo.convert('RGB') + image_bytes = BytesIO() + photo.save(image_bytes, 'JPEG') + image_bytes.seek(0) + self.room.photo = Photo(data=image_bytes.read()) token = build_rooms_spritesheet() return jsonify(rooms_sprite_token=unicode(token))
Disabling the EIS tests for now The EIS tests are going to continue to fail until the old files are replaced by the new files at the SDC and SPDF, so I'm temporarily disabling the EIS tests so we can check for other (unknown) regressions
@@ -55,7 +55,7 @@ jobs: run: | coverage run -a -m pyspedas.mms.tests.load_routine_tests coverage run -a -m pyspedas.mms.tests.feeps - coverage run -a -m pyspedas.mms.tests.eis + # coverage run -a -m pyspedas.mms.tests.eis coverage run -a -m pyspedas.mms.tests.file_filter coverage run -a -m pyspedas.mms.tests.data_rate_segments coverage run -a -m pyspedas.dscovr.tests.tests
Move old style exception to new style exceptions as old style exceptions are deprecated in Python 2 and removed from Python 3 because they are ambiguous. Addresses pull request from cclauss@.
@@ -176,7 +176,7 @@ class SamReaderTests(parameterized.TestCase): self.assertEqual(reads[0].aligned_sequence, 'CCC') self.assertEqual(reads[0].alignment.position.reference_name, 'chr1') self.assertEqual(reads[0].alignment.position.position, 0) - except ValueError, e: + except ValueError as e: if 'Failed to parse SAM record' not in str(e): self.fail('Parsing failed but unexpected exception was seen: ' + str(e))
Fix typo in TransformerEncoder and TransformerEncoderLayer documentation Summary: Fixes a few small typos in the documentation, changing "endocder" to "encoder" and "sequnce" to "sequence" Pull Request resolved:
@@ -159,7 +159,7 @@ class TransformerEncoder(Module): self.norm = norm def forward(self, src, mask=None, src_key_padding_mask=None): - r"""Pass the input through the endocder layers in turn. + r"""Pass the input through the encoder layers in turn. Args: src: the sequnce to the encoder (required). @@ -269,7 +269,7 @@ class TransformerEncoderLayer(Module): self.activation = _get_activation_fn(activation) def forward(self, src, src_mask=None, src_key_padding_mask=None): - r"""Pass the input through the endocder layer. + r"""Pass the input through the encoder layer. Args: src: the sequnce to the encoder layer (required).
explicitly check for first-party modules under pwd could be identified as first-party instead of default configuration
@@ -187,6 +187,8 @@ class PathFinder(BaseFinder): return sections.THIRDPARTY if os.path.normcase(prefix).startswith(self.stdlib_lib_prefix): return sections.STDLIB + if os.getcwd() in package_path: + return sections.FIRSTPARTY return self.config.default_section return None
Update flake8_diff.sh Remove the temporary remote if it was previously added and regardless of other conditions.
@@ -95,13 +95,14 @@ if [[ -z "$COMMIT_RANGE" ]]; then echo -e '\nRunning flake8 on the diff in the range'\ "$DIFF_RANGE ($(git rev-list $REV_RANGE | wc -l) commit(s)):" echo '--------------------------------------------------------------------------------' +else + echo "Got the commit range from Travis: $COMMIT_RANGE" +fi +#Remove temporary remote only if it was previously added. if [[ -n "$TMP_REMOTE" ]]; then git remote remove $TMP_REMOTE fi -else - echo "Got the commit range from Travis: $COMMIT_RANGE" -fi # We ignore files from doc/sphintext. Unfortunately there is no # way to do it with flake8 directly (the --exclude does not seem to
Remove unused set_stream_logger from mpi worker pool This comes after brief discussion on PR which makes a change to a different copy of set_stream_logger.
@@ -439,33 +439,6 @@ def start_file_logger(filename, rank, name='parsl', level=logging.DEBUG, format_ logger.addHandler(handler) -def set_stream_logger(name='parsl', level=logging.DEBUG, format_string=None, stream=None): - """Add a stream log handler. - - Args: - - name (string) : Set the logger name. - - level (logging.LEVEL) : Set to logging.DEBUG by default. - - format_string (sting) : Set to None by default. - - stream (io.TextIOWrapper) : Specify sys.stdout or sys.stderr for stream. - If not specified, the default stream for logging.StreamHandler is used. - - Returns: - - None - """ - if format_string is None: - format_string = "%(asctime)s %(name)s [%(levelname)s] Thread:%(thread)d %(message)s" - # format_string = "%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s" - - global logger - logger = logging.getLogger(name) - logger.setLevel(logging.DEBUG) - handler = logging.StreamHandler(stream) - handler.setLevel(level) - formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S') - handler.setFormatter(formatter) - logger.addHandler(handler) - - if __name__ == "__main__": parser = argparse.ArgumentParser() @@ -492,7 +465,6 @@ if __name__ == "__main__": os.makedirs(args.logdir, exist_ok=True) - # set_stream_logger() try: if rank == 0: start_file_logger('{}/manager.mpi_rank_{}.log'.format(args.logdir, rank),
Update readSettings.py symlink
@@ -19,9 +19,8 @@ class ReadSettings: def __init__(self, directory=None, filename="autoProcess.ini", logger=None): if not directory: - directory = os.path.dirname(sys.argv[0]) - - directory = os.path.realpath(directory) + directory = os.path.realpath(sys.argv[0]) + directory = os.path.dirname(directory) # Setup logging if logger:
Allow !role for any staff role Closes
@@ -77,7 +77,7 @@ class Information(Cog): channel_type_list = sorted(channel_type_list) return "\n".join(channel_type_list) - @has_any_role(*constants.MODERATION_ROLES) + @has_any_role(*constants.STAFF_ROLES) @command(name="roles") async def roles_info(self, ctx: Context) -> None: """Returns a list of all roles and their corresponding IDs.""" @@ -97,7 +97,7 @@ class Information(Cog): await LinePaginator.paginate(role_list, ctx, embed, empty=False) - @has_any_role(*constants.MODERATION_ROLES) + @has_any_role(*constants.STAFF_ROLES) @command(name="role") async def role_info(self, ctx: Context, *roles: Union[Role, str]) -> None: """
skip failing dagstermill test in buildkite Test Plan: bk Reviewers: dgibson, max
@@ -299,6 +299,8 @@ def test_resources_notebook(): assert msgs[3] == "Hello, notebook!" +# https://github.com/dagster-io/dagster/issues/3722 [email protected] @pytest.mark.notebook_test def test_resources_notebook_with_exception(): result = None
Change predecessor tree nodes to use sets for readers The order of readers doesn't matter and duplicates aren't needed, so use sets instead of lists.
@@ -188,9 +188,9 @@ class PredecessorTree: def add_reader(self, address, reader): def updater(data): if data is None: - return Predecessors(readers=[reader], writer=None) + return Predecessors(readers={reader}, writer=None) - data.readers.append(reader) + data.readers.add(reader) return data @@ -199,7 +199,7 @@ class PredecessorTree: def set_writer(self, address, writer): def updater(data): if data is None: - return Predecessors(readers=[], writer=writer) + return Predecessors(readers=set(), writer=writer) data.writer = writer data.readers.clear()
modeld: read frame_id if sm.update(0)>0 * read frameid if sm.update(0)>0 * move postion same line
@@ -181,7 +181,7 @@ int main(int argc, char **argv) { cl_mem yuv_cl; VisionBuf yuv_ion = visionbuf_allocate_cl(buf_info.buf_len, device_id, context, &yuv_cl); - uint32_t last_vipc_frame_id = 0; + uint32_t frame_id = 0, last_vipc_frame_id = 0; double last = 0; int desire = -1; while (!do_exit) { @@ -202,6 +202,7 @@ int main(int argc, char **argv) { if (sm.update(0) > 0){ // TODO: path planner timeout? desire = ((int)sm["pathPlan"].getPathPlan().getDesire()) - 1; + frame_id = sm["frame"].getFrame().getFrameId(); } double mt1 = 0, mt2 = 0; @@ -212,7 +213,6 @@ int main(int argc, char **argv) { } mat3 model_transform = matmul3(yuv_transform, transform); - uint32_t frame_id = sm["frame"].getFrame().getFrameId(); mt1 = millis_since_boot();
remove unused method Custom resolution of a schema class can now be done directly in a custom schema_name_resolver function
@@ -20,7 +20,6 @@ from marshmallow.orderedset import OrderedSet from apispec.compat import RegexType, iteritems from apispec.utils import OpenAPIVersion, build_reference from .common import ( - resolve_schema_cls, get_fields, make_schema_key, resolve_schema_instance, @@ -721,11 +720,3 @@ class OpenAPIConverter(object): return schema return self.resolve_nested_schema(schema) - - def resolve_schema_class(self, schema): - """Return schema class for given schema (instance or class) - - :param type|Schema|str: instance, class or class name of marshmallow.Schema - :return: schema class of given schema (instance or class) - """ - return resolve_schema_cls(schema)
2.5.5 Automatically generated by python-semantic-release
@@ -9,7 +9,7 @@ https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers """ from datetime import timedelta -__version__ = "2.5.4" +__version__ = "2.5.5" PROJECT_URL = "https://github.com/custom-components/alexa_media_player/" ISSUE_URL = "{}issues".format(PROJECT_URL)
`socket`: `sendmsg` can accept `None` for the address Per the Python docs and socketmodule.c source, the address argument can be None.
@@ -624,7 +624,7 @@ class socket: __buffers: Iterable[ReadableBuffer], __ancdata: Iterable[_CMSGArg] = ..., __flags: int = ..., - __address: _Address = ..., + __address: _Address | None = ..., ) -> int: ... if sys.platform == "linux": def sendmsg_afalg(
Add to changelog Looks like a changelog entry was missed when the PR was merged.
@@ -13,6 +13,8 @@ Next Release (TBD) (`#958 <https://github.com/aws/chalice/pull/958>`__) * Log internal exceptions as errors (`#254 <https://github.com/aws/chalice/issues/254>`__) +* Generate swagger documentation from docstrings + (`#574 <https://github.com/aws/chalice/issues/574>`__) 1.6.1
Add brief note on normalization to convolution kernel docs [ci skip]
@@ -32,6 +32,8 @@ class Gaussian1DKernel(Kernel1D): The Gaussian filter is a filter with great smoothing properties. It is isotropic and does not produce artifacts. + The generated kernel is normalized so that it integrates to 1. + Parameters ---------- stddev : number @@ -95,6 +97,8 @@ class Gaussian2DKernel(Kernel2D): The Gaussian filter is a filter with great smoothing properties. It is isotropic and does not produce artifacts. + The generated kernel is normalized so that it integrates to 1. + Parameters ---------- x_stddev : float @@ -168,7 +172,9 @@ class Box1DKernel(Kernel1D): 1D Box filter kernel. The Box filter or running mean is a smoothing filter. It is not isotropic - and can produce artifacts, when applied repeatedly to the same data. + and can produce artifacts when applied repeatedly to the same data. + + The generated kernel is normalized so that it integrates to 1. By default the Box kernel uses the ``linear_interp`` discretization mode, which allows non-shifting, even-sized kernels. This is achieved by @@ -236,7 +242,9 @@ class Box2DKernel(Kernel2D): 2D Box filter kernel. The Box filter or running mean is a smoothing filter. It is not isotropic - and can produce artifact, when applied repeatedly to the same data. + and can produce artifacts when applied repeatedly to the same data. + + The generated kernel is normalized so that it integrates to 1. By default the Box kernel uses the ``linear_interp`` discretization mode, which allows non-shifting, even-sized kernels. This is achieved by @@ -308,6 +316,8 @@ class Tophat2DKernel(Kernel2D): The Tophat filter is an isotropic smoothing filter. It can produce artifacts when applied repeatedly on the same data. + The generated kernel is normalized so that it integrates to 1. + Parameters ---------- radius : int @@ -366,6 +376,8 @@ class Ring2DKernel(Kernel2D): The Ring filter kernel is the difference between two Tophat kernels of different width. This kernel is useful for, e.g., background estimation. + The generated kernel is normalized so that it integrates to 1. + Parameters ---------- radius_in : number @@ -423,6 +435,8 @@ class Trapezoid1DKernel(Kernel1D): """ 1D trapezoid kernel. + The generated kernel is normalized so that it integrates to 1. + Parameters ---------- width : number @@ -481,6 +495,8 @@ class TrapezoidDisk2DKernel(Kernel2D): """ 2D trapezoid kernel. + The generated kernel is normalized so that it integrates to 1. + Parameters ---------- radius : number @@ -696,8 +712,9 @@ class AiryDisk2DKernel(Kernel2D): """ 2D Airy disk kernel. - This kernel models the diffraction pattern of a circular aperture. This - kernel is normalized so that it sums to 1. + This kernel models the diffraction pattern of a circular aperture. + + The generated kernel is normalized so that it integrates to 1. Parameters ---------- @@ -761,6 +778,8 @@ class Moffat2DKernel(Kernel2D): This kernel is a typical model for a seeing limited PSF. + The generated kernel is normalized so that it integrates to 1. + Parameters ---------- gamma : float
Debian: Make it harder to confuse the key * This also makes it easier for me to know if a given key is the right one, and not just go by suffix.
@@ -201,7 +201,7 @@ Codename: %(codename)s Architectures: i386 amd64 armel armhf powerpc Components: main Description: Apt repository for project Nuitka %(codename)s -SignWith: 2912B99C +SignWith: D96ADCA1377F1CEB6B5103F11BFC33752912B99C """ % {"codename": codename} )
Stop quoting JSON keys when parsing We chose to be strict about JSON to be nice and consistent.
@@ -152,10 +152,7 @@ class EnergySystem: empty = types.SimpleNamespace() empty.read = lambda *xs, **ks: () empty.headers = () - parse = lambda s: (json.loads(re.sub( - r'([{,] *)([^,:"{} ]*) *:', - r'\1"\2":', - s)) if s else {}) + parse = lambda s: (json.loads(s) if s else {}) data = {} listify = lambda x, n=None: (x if type(x) is list
[internal] java: register dependency inference rules Register the dependency inference rules in the `java` backend. [ci skip-rust] [ci skip-build-wheels]
from pants.backend.java import tailor from pants.backend.java import util_rules as java_util_rules from pants.backend.java.compile import javac +from pants.backend.java.dependency_inference import ( + import_parser, + java_parser, + java_parser_launcher, + package_mapper, +) +from pants.backend.java.dependency_inference import rules as dependency_inference_rules from pants.backend.java.target_types import ( JavaSourcesGeneratorTarget, JavaSourceTarget, @@ -36,6 +43,11 @@ def rules(): *coursier.rules(), *coursier_fetch.rules(), *coursier_setup.rules(), + *import_parser.rules(), + *java_parser.rules(), + *java_parser_launcher.rules(), + *package_mapper.rules(), + *dependency_inference_rules.rules(), *tailor.rules(), *jvm_util_rules.rules(), *java_util_rules.rules(),
Bump up waiting for MongoDB from 15s to 20s I've noticed that `mongo --eval 'db.version()'` has been failing fairly regularly in the last few weeks. Hopefully that extra 5s is enough.
@@ -39,7 +39,7 @@ matrix: before_install: - bash .install_mongodb_on_travis.sh -- sleep 15 # https://docs.travis-ci.com/user/database-setup/#MongoDB-does-not-immediately-accept-connections +- sleep 20 # https://docs.travis-ci.com/user/database-setup/#mongodb-does-not-immediately-accept-connections - mongo --eval 'db.version();' install:
doc: update xcode install instructions to match Node's BUILDING PR-URL:
@@ -35,8 +35,7 @@ You will also need to install: * `python` (`v2.7` recommended, `v3.x.x` is __*not*__ supported) (already installed on macOS) * [Xcode](https://developer.apple.com/xcode/download/) - * You also need to install the `Command Line Tools` via Xcode. You can find this under the menu `Xcode -> Preferences -> Locations` (or by running `xcode-select --install` in your Terminal) - * This step will install `gcc` and the related toolchain containing `make` + * You also need to install the `XCode Command Line Tools` by running `xcode-select --install`. Alternatively, if you already have the full Xcode installed, you can find them under the menu `Xcode -> Open Developer Tool -> More Developer Tools...`. This step will install `clang`, `clang++`, and `make`. ### On Windows
Remove explicit dependency on py4j The `hyperopt` package now lists `py4j` as a dependency: See: b/206631257
@@ -240,8 +240,6 @@ RUN pip install mpld3 && \ pip install haversine && \ pip install toolz cytoolz && \ pip install plotly && \ - # b/206631257: hyperopt requires py4j >= 0.2.6 requires py4j but doesn't list it as a dependency. Remove once hyperopt properly list it. - pip install py4j && \ pip install hyperopt && \ pip install fitter && \ pip install langid && \
Force update_webext_descriptions to run in the 'addons' queue (Trying to understand the issue with that task being spawn to many times - the command that spawns it use chaining, so we want to see how forcing it to use a specific queue changes the behaviour)
@@ -1114,6 +1114,7 @@ CELERY_ROUTES = { 'olympia.addons.tasks.update_incompatible_appversions': { 'queue': 'addons'}, 'olympia.addons.tasks.version_changed': {'queue': 'addons'}, + 'olympia.files.tasks.update_webext_descriptions': {'queue': 'addons'}, # API 'olympia.api.tasks.process_results': {'queue': 'api'},
[CI][DOC] Fix incorrect commands in docs/readme.md Fix incorrect commands in docs/readme.md
@@ -79,14 +79,14 @@ the path that matches the regular expression pattern. For example, to only build tutorials under `/vta/tutorials`, run ```bash -python tests/scripts/ci.py docs --tutorials=/vta/tutorials +python tests/scripts/ci.py docs --tutorial-pattern=/vta/tutorials ``` To only build one specific file, do ```bash # The slash \ is used to get . in regular expression -python tests/scripts/ci.py docs --tutorials=file_name\.py +python tests/scripts/ci.py docs --tutorial-pattern=file_name\.py ``` ## Helper Scripts @@ -95,14 +95,14 @@ You can run the following script to reproduce the CI sphinx pre-check stage. This script skips the tutorial executions and is useful to quickly check the content. ```bash -python tests/scripts/ci.py docs --precheck +tests/scripts/task_python_docs.sh ``` The following script runs the full build which includes tutorial executions. You will need a GPU CI environment. ```bash -python tests/scripts/ci.py --precheck --full +python tests/scripts/ci.py docs --full ``` ## Define the Order of Tutorials
Update Models.py Fix a bug. The argument ``lengths`` is not passed. Hence, the hidden states with PAD as input are also included in the attention models, which is not correct.
@@ -549,7 +549,8 @@ class NMTModel(nn.Module): enc_state = self.decoder.init_decoder_state(src, context, enc_hidden) out, dec_state, attns = self.decoder(tgt, context, enc_state if dec_state is None - else dec_state) + else dec_state, + context_lengths=lengths) if self.multigpu: # Not yet supported on multi-gpu dec_state = None
Update request.js make Server Error Title translatable
@@ -492,7 +492,7 @@ frappe.request.report_error = function(xhr, request_opts) { if (!frappe.error_dialog) { frappe.error_dialog = new frappe.ui.Dialog({ - title: 'Server Error', + title: __('Server Error'), primary_action_label: __('Report'), primary_action: () => { if (error_report_email) {
chore: fix .editorconfig for Makefile & markdown Makefile must have tab used for indentation, while in Markdown files two trailing spaces means a linebreak.
+# EditorConfig helps developers define and maintain consistent coding styles between different editors and IDEs. +# See: http://editorconfig.org root = true [*] @@ -7,3 +9,9 @@ indent_size = 4 indent_style = space insert_final_newline = true trim_trailing_whitespace = true + +[*.md] +trim_trailing_whitespace = false + +[Makefile] +indent_style = tab
Update android_anubis.txt ```android_hydra``` instead:
@@ -1521,10 +1521,6 @@ www-ecimer-destek.com safepage.cloud -# Reference: https://twitter.com/bl4ckh0l3z/status/1280477627137101826 - -dontworryman.club - # Reference: https://twitter.com/ReBensk/status/1280121056141901829 # Reference: https://twitter.com/malwrhunterteam/status/1301141047679094784 # Reference: https://www.virustotal.com/gui/ip-address/85.95.240.191/relations
Bump timeouts for GKE cluster operations to 3 hours The previous PR test timed out after 30 minutes even though it normally takes only around 10 minutes. The cluster was actually created in the end but it was delayed due to current GCP issues.
@@ -25,8 +25,8 @@ resource "google_container_cluster" "cluster" { } timeouts { - create = "30m" - delete = "30m" - update = "30m" + create = "180m" + delete = "180m" + update = "180m" } }
Adds handling for daemon not sending `initial_target_state` Currently, if the daemon doesn't send `initial_target_state` for new pool_wallets, it will cause a `KeyError`. This commit makes it give a more informative error, so less users are confused by this.
@@ -604,6 +604,9 @@ class WalletRpcApi: } elif request["wallet_type"] == "pool_wallet": if request["mode"] == "new": + if "initial_target_state" not in request: + raise AttributeError("Daemon didn't send `initial_target_state`. Try updating the daemon.") + owner_puzzle_hash: bytes32 = await self.service.wallet_state_manager.main_wallet.get_puzzle_hash(True) from chia.pools.pool_wallet_info import initial_pool_state_from_dict
added ParameterEnsemble.from_parfiles() classmethod. needs a test, just too lazy today
@@ -2,6 +2,7 @@ from __future__ import print_function, division import os from datetime import datetime import copy +import warnings import math import numpy as np import pandas as pd @@ -1328,6 +1329,65 @@ class ParameterEnsemble(Ensemble): self.loc[pfile] = df.loc[:,'parval1'] self.loc[:,:] = self.loc[:,:].astype(np.float64) + @classmethod + def from_parfiles(cls,pst,parfile_names,real_names=None): + """ create a parameter ensemble from parfiles. Accepts parfiles with less than the + parameters in the control (get NaNs in the ensemble) or extra parameters in the + parfiles (get dropped) + + Parameters: + pst : pyemu.Pst + + parfile_names : list of str + par file names + + real_names : str + optional list of realization names. If None, a single integer counter is used + + Returns: + pyemu.ParameterEnsemble + + + """ + if isinstance(pst,str): + pst = pyemu.Pst(pst) + dfs = {} + if real_names is not None: + assert len(real_names) == len(parfile_names) + else: + real_names = np.arange(len(parfile_names)) + + for rname,pfile in zip(real_names,parfile_names): + assert os.path.exists(pfile), "ParameterEnsemble.read_parfiles() error: " + \ + "file: {0} not found".format(pfile) + df = read_parfile(pfile) + dfs[rname] = df.parval1.values + + df_all = pd.DataFrame(data=dfs).T + df_all.columns = df.index + + + + if len(pst.par_names) != df_all.shape[1]: + #if len(pst.par_names) < df_all.shape[1]: + # raise Exception("pst is not compatible with par files") + pset = set(pst.par_names) + dset = set(df_all.columns) + diff = pset.difference(dset) + warnings.warn("the following parameters are not in the par files (getting NaNs) :{0}". + format(','.join(diff))) + blank_df = pd.DataFrame(index=df_all.index,columns=diff) + + df_all = pd.concat([df_all,blank_df],axis=1) + + diff = dset.difference(pset) + if len(diff) > 0: + warnings.warn("the following par file parameters are not in the control (being dropped):{0}". + format(','.join(diff))) + df_all = df_all.loc[:, pst.par_names] + + return ParameterEnsemble.from_dataframe(df=df_all,pst=pst) + def to_csv(self,*args,**kwargs): """overload of pandas.DataFrame.to_csv() to account
Pick machine list based on machine type * Pick machine list based on machine type Fixes
@@ -829,10 +829,11 @@ def delete_and_create_osd_node_ipi(osd_node_name): drain_nodes([osd_node_name]) log.info("Getting machine name from specified node name") machine_name = machine.get_machine_from_node_name(osd_node_name) + machine_type = machine.get_machine_type(machine_name) log.info(f"Node {osd_node_name} associated machine is {machine_name}") log.info(f"Deleting machine {machine_name} and waiting for new machine to come up") machine.delete_machine_and_check_state_of_new_spinned_machine(machine_name) - new_machine_list = machine.get_machines() + new_machine_list = machine.get_machines(machine_type=machine_type) for machines in new_machine_list: # Trimming is done to get just machine name # eg:- machine_name:- prsurve-40-ocs-43-kbrvf-worker-us-east-2b-nlgkr @@ -843,9 +844,12 @@ def delete_and_create_osd_node_ipi(osd_node_name): log.info("Waiting for new worker node to be in ready state") machine.wait_for_new_node_to_be_ready(machineset_name) new_node_name = get_node_from_machine_name(new_machine_name) + if not is_node_labeled(new_node_name): log.info("Adding ocs label to newly created worker node") node_obj = ocp.OCP(kind="node") - node_obj.add_label(resource_name=new_node_name, label=constants.OPERATOR_NODE_LABEL) + node_obj.add_label( + resource_name=new_node_name, label=constants.OPERATOR_NODE_LABEL + ) log.info(f"Successfully labeled {new_node_name} with OCS storage label") return new_node_name
Fixed lint Adding NOQA comments to ignore stupid error which can't be fixed.
# Made by Maxim Iliouchenko (https://github.com/maxily1) # Importing Libraries -from moviepy.editor import * +from moviepy.editor import * # noqa: F403, E261 import argparse # Creating an argument parser @@ -25,11 +25,11 @@ color_choice = args.c font_size = args.s # Start of code -clip = VideoFileClip(file_name, audio=True) +clip = VideoFileClip(file_name, audio=True) # noqa: F405, E261 w, h = clip.size # A clip with a text, and semi-opaque bg -text = TextClip( +text = TextClip( # noqa: F405, E261 watermark_text, font=font_choice, color=color_choice, fontsize=font_size ) @@ -38,9 +38,7 @@ text_col = text.on_color( color=(0, 0, 0), pos=(6, 'center'), col_opacity=0.6 ) - - # Save the file -final_clip = CompositeVideoClip([clip, text_col]) +final_clip = CompositeVideoClip([clip, text_col]) # noqa: F405, E261 final_clip.duration = clip.duration final_clip.write_videofile("Output.mp4", fps=24, codec='libx264')
[Core] Unset RAY_RAYLET_NODE_ID for long running nightly tests For long running tests that has ray stop as part of the prepare steps, the node started in the test will re-use the node id. More details in this
run: timeout: 86400 prepare: ray stop - script: python workloads/actor_deaths.py + script: RAY_RAYLET_NODE_ID="" python workloads/actor_deaths.py long_running: true type: sdk_command file_manager: sdk run: timeout: 86400 prepare: ray stop - script: python workloads/many_actor_tasks.py + script: RAY_RAYLET_NODE_ID="" python workloads/many_actor_tasks.py long_running: true type: sdk_command file_manager: sdk run: timeout: 86400 prepare: ray stop - script: python workloads/many_drivers.py --iteration-num=4000 + script: RAY_RAYLET_NODE_ID="" python workloads/many_drivers.py --iteration-num=4000 long_running: true type: sdk_command file_manager: sdk run: timeout: 86400 prepare: ray stop - script: python workloads/many_tasks.py + script: RAY_RAYLET_NODE_ID="" python workloads/many_tasks.py long_running: true type: sdk_command file_manager: sdk run: timeout: 86400 prepare: ray stop - script: python workloads/many_tasks_serialized_ids.py + script: RAY_RAYLET_NODE_ID="" python workloads/many_tasks_serialized_ids.py long_running: true type: sdk_command file_manager: sdk run: timeout: 86400 prepare: ray stop - script: python workloads/node_failures.py + script: RAY_RAYLET_NODE_ID="" python workloads/node_failures.py long_running: true type: sdk_command file_manager: sdk run: timeout: 86400 prepare: ray stop - script: python workloads/pbt.py + script: RAY_RAYLET_NODE_ID="" python workloads/pbt.py long_running: true type: sdk_command file_manager: sdk run: timeout: 86400 prepare: ray stop - script: python workloads/serve.py + script: RAY_RAYLET_NODE_ID="" python workloads/serve.py long_running: true type: sdk_command file_manager: job run: timeout: 86400 prepare: ray stop - script: python workloads/serve_failure.py + script: RAY_RAYLET_NODE_ID="" python workloads/serve_failure.py long_running: true type: sdk_command file_manager: job
Update wind capacity for Norway Some new values based on numbers from NVE. Expect it to increase by another 900MW in the coming months.
"nuclear": 0, "oil": 0, "solar": 0, - "wind": 1749 + "wind": 2203 }, "contributors": [ "https://github.com/corradio" "nuclear": 0, "oil": 0, "solar": 0, - "wind": 627 + "wind": 651 }, "parsers": { "consumption": "ENTSOE.fetch_consumption", "nuclear": 0, "oil": 0, "solar": 0, - "wind": 643 + "wind": 998 }, "parsers": { "consumption": "ENTSOE.fetch_consumption", "nuclear": 0, "oil": 0, "solar": 0, - "wind": 313 + "wind": 389 }, "parsers": { "consumption": "ENTSOE.fetch_consumption",
Upgrade RSA Swcurity Analytics integration Review Fixes
@@ -1203,7 +1203,7 @@ script: }); md.push(toPush); ctx["Netwitness.Event"].push(toPush); - return [{Type: entryTypes.note, Contents: event, ContentsFormat: formats.json, HumanReadable: tableToMarkdown(command, md), ReadableContentsFormat: formats.markdown, EntryContext: ctx}, content]; + return {Type: entryTypes.note, Contents: event, ContentsFormat: formats.json, HumanReadable: tableToMarkdown(command, md), ReadableContentsFormat: formats.markdown, EntryContext: ctx}; } function buildRetValGetAlerts (alerts) { @@ -1236,6 +1236,9 @@ script: function buildRetValGetAlertOrig (alertOrig) { var md = []; var ctx = {"Netwitness.Event": []} + if(!alertOrig.events) { + return {"ContentsFormat": formats["markdown"], "Type": entryTypes["error"], "Contents": "Received an error from NetWitness Please ensure that the referred alert Id exist in NetWitness"}; + } alertOrig.events.forEach(function (evnt) { for (var key in evnt) { if (key === 'time') { @@ -1348,9 +1351,18 @@ script: case 'netwitness-im-update-incident': var availableAssignees = getAvailableAssignees(sessionId, args); var update_incident = updateIncident(sessionId, args, incidentManagementId, availableAssignees); - results = {"ContentsFormat": formats["text"], "Type": entryTypes["note"], "Contents": "Incident updated successfully."}; - if(!(update_incident.success==true && update_incident.data >= 1)){ - results = {"ContentsFormat": formats["markdown"], "Type": entryTypes["error"], "Contents": "Received an error from NetWitness Please ensure that the referred incidents exist in NetWitness.\n" + tblToMd("Data returned:", update_incident)}; + if( update_incident.success != true) { + results = {"ContentsFormat": formats["markdown"], "Type": entryTypes["error"], "Contents": "Didn't succed to update incident.\n" + tableToMarkdown("Data returned:", update_incident)}; + } + else if (update_incident.success == true && update_incident.data < 1) { + results = {"ContentsFormat": formats["markdown"], "Type": entryTypes["error"], "Contents": "Received an error from NetWitness Please ensure that the referred incidents exist in NetWitness. Incidents count = " + update_incident.data}; + } + else { + results = { + "ContentsFormat": formats["text"], + "Type": entryTypes["note"], + "Contents": "Incident updated successfully." + }; } break; default:
Updated the report link in README.md Fixes
@@ -86,7 +86,7 @@ pip install -e . * [Documentation](https://texar.readthedocs.io) ### Reference -If you use Texar, please cite the [report](https://arxiv.org/pdf/1809.00794.pdf) with the following BibTex entry: +If you use Texar, please cite the [report](https://arxiv.org/abs/1809.00794) with the following BibTex entry: ``` Texar: A Modularized, Versatile, and Extensible Toolkit for Text Generation Zhiting Hu, Haoran Shi, Zichao Yang, Bowen Tan, Tiancheng Zhao, Junxian He, Wentao Wang, Xingjiang Yu, Lianhui Qin, Di Wang, Xuezhe Ma, Hector Liu, Xiaodan Liang, Wanrong Zhu, Devendra Singh Sachan, Eric P. Xing
Check only dependencies that are in the Content folder We ignore native dependencies and dependencies from plugins
@@ -33,6 +33,7 @@ class ValidateNoDependencies(pyblish.api.InstancePlugin): )) if dependencies: for dep in dependencies: + if str(dep).startswith("/Game/"): all_dependencies.append(str(dep)) if all_dependencies:
Update files.py Minor corrections / clarifications.
@@ -170,7 +170,7 @@ class FileMod(CoreModule): ('file:txtref', {}, [ ('file', {'ptype': 'file:bytes', 'doc': 'The guid of the file containing the reference.', 'ro': 1}), ('xref', {'ptype': 'propvalu', - 'doc': 'The form=valu of the object referenced in the fiole, e.g., inet:fqdn=foo.com.', 'ro': 1}), + 'doc': 'The form=valu of the object referenced in the file, e.g., inet:fqdn=foo.com.', 'ro': 1}), ('xref:prop', {'ptype': 'str', 'doc': 'The property (form) of the referenced object, as specified by the propvalu.', 'ro': 1}), ('xref:intval', {'ptype': 'int', @@ -181,7 +181,7 @@ class FileMod(CoreModule): ('file:path', {}, ( ('dir', {'ptype': 'file:path', - 'doc': 'The parent directory of the file path. Can be the directory path (if the file:path ends in a file name) or the parent directory (if the file:path is itself a directory).', 'ro': 1}), + 'doc': 'The parent directory of the file path.', 'ro': 1}), ('ext', {'ptype': 'str:lwr', 'doc': 'The file extension of the file name, (if present); for example: exe, bat, py, docx.', 'ro': 1}), ('base', {'ptype': 'file:base',
Fix libevent pinning Version 2.0.22 has introduced new symbols, so we cannot allow conda to install a lower version
@@ -53,7 +53,7 @@ pinned = { 'icu': 'icu 58.*', # 54.1 'jpeg': 'jpeg 9*', # 9b (but defaults is probably using 8d) 'libblitz': 'libblitz 0.10|0.10.*', # NA - 'libevent': 'libevent 2.0.*', # 2.0.22 + 'libevent': 'libevent 2.0.22', # 2.0.22 'libmatio': 'libmatio 1.5.*', # NA 'libnetcdf': 'libnetcdf 4.4.*', # 4.4.1 'libpng': 'libpng >=1.6.28,<1.7', # 1.6.27
chore(refactor): Changing name of sample region for Job product * chore(refactor): Changing name of sample region for Job product * Revert "chore(refactor): Changing name of sample region for Job product" This reverts commit * Changing only conflicting region names
@@ -24,7 +24,7 @@ parent = 'projects/' + os.environ['GOOGLE_CLOUD_PROJECT'] # [END instantiate] -# [START batch_job_create] +# [START job_discovery_batch_job_create] def batch_job_create(client_service, company_name): import base_job_sample created_jobs = [] @@ -55,10 +55,10 @@ def batch_job_create(client_service, company_name): batch.execute() return created_jobs -# [END batch_job_create] +# [END job_discovery_batch_job_create] -# [START batch_job_update] +# [START job_discovery_batch_job_update] def batch_job_update(client_service, jobs_to_be_updated): updated_jobs = [] @@ -91,10 +91,10 @@ def batch_job_update(client_service, jobs_to_be_updated): batch.execute() return updated_jobs -# [END batch_job_update] +# [END job_discovery_batch_job_update] -# [START batch_job_delete] +# [START job_discovery_batch_job_delete] def batch_job_delete(client_service, jobs_to_be_deleted): def job_delete_callback(request_id, response, exception): @@ -113,7 +113,7 @@ def batch_job_delete(client_service, jobs_to_be_deleted): callback=job_delete_callback) batch.execute() -# [END batch_job_delete] +# [END job_discovery_batch_job_delete] def run_sample():
tests: update docker image tag used in ooo job ceph-ansible@master isn't intended to deploy luminous. Let's use latest-master on ceph-ansible@master branch
@@ -10,7 +10,7 @@ all: ceph_mgr_docker_extra_env: '-e MGR_DASHBOARD=0' cluster: mycluster ceph_docker_image: ceph/daemon - ceph_docker_image_tag: v3.0.3-stable-3.0-luminous-centos-7-x86_64 + ceph_docker_image_tag: latest-master ceph_docker_registry: docker.io ceph_origin: repository ceph_repository: community
Add arguments to have different masses for x and y The mass provided to the element can be different on the x and y direction (e.g. different support inertia for x and y directions).
This module defines the PointMass class which will be used to link elements. """ import numpy as np -from .element import Element +from ross.element import Element __all__ = ["PointMass"] @@ -13,26 +13,50 @@ class PointMass(Element): This class will create a point mass element. This element can be used to link other elements in the analysis. + The mass provided to the element can be different on the x and y direction + (e.g. different support inertia for x and y directions). Parameters ---------- n: int Node which the bearing will be located in. - m: float + m: float, optional Mass for the element. - """ + mx: float, optional + Mass for the element on the x direction. + my: float, optional + Mass for the element on the y direction. - def __init__(self, n=None, m=None): + Examples + -------- + >>> p0 = PointMass(n=0, m=2) + >>> p0.M() + array([[2, 0], + [0, 2]]) + >>> p1 = PointMass(n=0, mx=2, my=3) + >>> p1.M() + array([[2, 0], + [0, 3]]) + """ + def __init__(self, n=None, m=None, mx=None, my=None): self.n = n self.m = m + if mx is None and my is None: + mx = m + my = m + + self.mx = mx + self.my = my + def M(self): """Mass matrix.""" - m = self.m + mx = self.mx + my = self.my # fmt: off - M = np.array([[m, 0], - [0, m]]) + M = np.array([[mx, 0], + [0, my]]) # fmt: on return M
Update README: tweak: header Make this header importance par with other headers
@@ -72,7 +72,7 @@ Here are flows to try out, from simple to specific detailed variants. - [Learn about off-chain services](READMEs/services.md) - Ocean Provider for data services, Aquarius metadata store - [Learn about wallets](READMEs/wallets.md) - on generating, storing, and accessing private keys -## Specialty flows +### Specialty flows - [Key-value database](READMEs/key-value-flow.md) - use data NFTs to store arbitrary key-value pairs on-chain. - [Profile NFTs](READMEs/profile-nfts-flow.md) - enable "login with Web3" where Dapp can access private user profile data.
Get rid of dead Makefile targets There are no longer any tests left over that can be run on tox. All tests are run as D-Bus tests. Remove similar coverage target.
@@ -4,14 +4,6 @@ TOX=tox lint: $(TOX) -c tox.ini -e lint -.PHONY: coverage -coverage: - $(TOX) -c tox.ini -e coverage - -.PHONY: test -test: - $(TOX) -c tox.ini -e test - .PHONY: fmt fmt: yapf --style pep8 --recursive --in-place check.py setup.py src tests
pylint: Correct 'dangerous-default-value' W0102: Dangerous default value [] as argument Arrays are mutable objects, so these should not be used for default assignment, as the same object will be used for every function all. Not a unique instance, as is expected.
@@ -89,7 +89,7 @@ class VLANHost(FaucetHost): vlans = None vlan_intfs = None - def config(self, vlans=[100], **params): # pylint: disable=arguments-differ + def config(self, vlans=None, **params): # pylint: disable=arguments-differ """Configure VLANHost according to (optional) parameters: vlans (list): List of VLAN IDs (for the VLANs the host is configured to have) @@ -97,6 +97,8 @@ class VLANHost(FaucetHost): vlan_intfs (dict): Dictionary of interface IP addresses keyed by VLAN indices """ super_config = super().config(**params) + if vlans is None: + vlans = [100] self.vlans = vlans self.vlan_intfs = {} cmds = []
Add some methods on ASTList And correct an error on Array
@@ -15,9 +15,11 @@ package body Liblktlang.Implementation.Extensions is "@builtin enum Bool (false, true) {}" & ASCII.LF & "@builtin generic[T] struct Array {" & ASCII.LF & " fun __call__(index : Int): T" & ASCII.LF & - " fun length(): int" & ASCII.LF & + " fun length(): Int" & ASCII.LF & "}" & ASCII.LF & "@builtin generic[T] struct ASTList {" & ASCII.LF & + " fun __call__(index : Int): T" & ASCII.LF & + " fun length(): Int" & ASCII.LF & "}" & ASCII.LF & "";
Implemented number of properties & error checking Implemented function, frequency & voltage (amplitude, offset, high & low) properties, and implemented error checking
# THE SOFTWARE. # +import logging +log = logging.getLogger(__name__) +log.addHandler(logging.NullHandler()) + from pymeasure.instruments import Instrument +from pymeasure.instruments.validators import strict_discrete_set, strict_range class Agilent33220A(Instrument): - pass + function = Instrument.control( + "FUNC?", "FUNC %s", + """ A string property that controls the output waveform. Can be set to: + SIN<USOID>, SQU<ARE>, RAMP, PULS<E>, NOIS<E>, DC, USER. """, + validator=strict_discrete_set, + values=["SINUSOID", "SIN", "SQUARE", "SQU", "RAMP", + "PULSE", "PULS", "NOISE", "NOIS", "DC", "USER"], + ) + + frequency = Instrument.control( + "FREQ?", "FREQ %s", + """ A floating point property that controls the frequency of the output + waveform in Hz, from ... to .... Can be set. """, + # validator=strict_range, + # values=[..., ...] + ) + + voltage = Instrument.control( + "VOLT?", "VOLT %f", + """ A floating point property that controls the voltage amplitude of the output + waveform in V, from ... to .... Can be set. """, + # validator=strict_range, + # values=[..., ...] + ) + + voltage_offset = Instrument.control( + "VOLT:OFFS?", "VOLT:OFFS %f", + """ A floating point property that controls the voltage offset of the output + waveform in V, from ... to .... Can be set. """, + # validator=strict_range, + # values=[..., ...] + ) + + voltage_high = Instrument.control( + "VOLT:HIGH?", "VOLT:HIGH %f", + """ A floating point property that controls the upper voltage of the output + waveform in V, from ... to .... Can be set. """, + # validator=strict_range, + # values=[..., ...] + ) + + voltage_low = Instrument.control( + "VOLT:LOW?", "VOLT:LOW %f", + """ A floating point property that controls the lower voltage of the output + waveform in V, from ... to .... Can be set. """, + # validator=strict_range, + # values=[..., ...] + ) + +# FUCNtion:SQUare:DCYCLe +# FUNCtion:RAMP:SYMMetry +# FUNCtion:PULSe:WIDTh <seconds> +# FUNCtion:PULSe:DCYCLe <percent> +# FUNCtion:PULSe:TRANsition <seconds> +# FUNCtion:PULSe: +# OUTPut +# OUTPut:LOAD ??? +# OUTPut:POLarity {NORMal / INVerted} + +# BURSt:MODE {TRIGgered / GATed} +# BURSt:NCYCLes + +# TRIG +# OUTput:TRIGger {OFF / ON} + +# SYSTem:LOCal +# SYSTem:REMote + + def check_errors(self): + """ Read all errors from the instrument.""" + while True: + err = self.values("SYST:ERR?") + if int(err[0]) != 0: + errmsg = "Agilent 33220A: %s: %s" % (err[0], err[1]) + log.error(errmsg + '\n') + else: + break
chore: drop flake8 from project dependencies It is incompatible with Sphinx due to importlib-metadata requirements, and a better option is to run via pre-commit anyways. See also:
@@ -41,7 +41,7 @@ include = [ python = "^3.7" uvloop = { version = "^0.14", optional = true } -sphinx = { version = ">=5,<6", optional = true } +sphinx = { version = "^5.3.0", optional = true } sphinx_autodoc_typehints = { version = "^1.7.0", optional = true } [tool.poetry.dev-dependencies] @@ -53,9 +53,6 @@ hypothesis = "^6.56" aiosmtpd = "^1.4.2" pytest-xdist = "^3.0.0" mypy = "^0.982" -flake8 = "^5.0" -flake8-bugbear = "^22.10.27" -flake8-typing-imports = "^1.14.0" trustme = "^0.9.0" black = "22.10.0"
tests: Verify error logs by log_and_report function. This commit verify error logs printed during testing of log_and report function using assertLogs without printing it in test output and hence avoiding spam.
@@ -1305,8 +1305,11 @@ class TestEmailMirrorLogAndReport(ZulipTestCase): incoming_valid_message['Subject'] = "Test Subject" incoming_valid_message['From'] = self.example_email('hamlet') incoming_valid_message['To'] = stream_to_address - + with self.assertLogs('zerver.lib.email_mirror', 'ERROR') as error_log: log_and_report(incoming_valid_message, "test error message", stream_to_address) + self.assertEqual(error_log.output, [ + 'ERROR:zerver.lib.email_mirror:Sender: [email protected]\nTo: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX@testserver <Address to stream id: 1>\ntest error message' + ]) message = most_recent_message(user_profile) self.assertEqual("email mirror error", message.topic_name()) @@ -1317,7 +1320,12 @@ class TestEmailMirrorLogAndReport(ZulipTestCase): stream.id) self.assertEqual(msg_content, expected_content) + with self.assertLogs('zerver.lib.email_mirror', 'ERROR') as error_log: log_and_report(incoming_valid_message, "test error message", None) + self.assertEqual(error_log.output, [ + 'ERROR:zerver.lib.email_mirror:Sender: [email protected]\nTo: No recipient found\ntest error message' + ]) + message = most_recent_message(user_profile) self.assertEqual("email mirror error", message.topic_name()) msg_content = message.content.strip('~').strip()
Update generate_lm.py Modify the scripts/generate_lm.py since the project uses the list to store targets by default.
@@ -26,7 +26,7 @@ class GenerateLm(torch.nn.Module): super(GenerateLm, self).__init__() self.embedding = str2embedding[args.embedding](args, len(args.tokenizer.vocab)) self.encoder = str2encoder[args.encoder](args) - self.target = str2target[args.target](args, len(args.tokenizer.vocab)) + self.target = LmTarget(args, len(args.tokenizer.vocab)) def forward(self, src, seg): emb = self.embedding(src, seg)
Mistake in MSELoss documentation Summary: Replaced `sum` with `mean` in [line 392](https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/loss.py#L392) Pull Request resolved:
@@ -389,7 +389,7 @@ class MSELoss(_Loss): :math:`x` and :math:`y` are tensors of arbitrary shapes with a total of :math:`n` elements each. - The sum operation still operates over all the elements, and divides by :math:`n`. + The mean operation still operates over all the elements, and divides by :math:`n`. The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``.
fetch latest_block_header to extract highest_block_number when using eth protocol
@@ -20,6 +20,7 @@ from async_generator import ( ) from cancel_token import CancelToken, OperationCancelled from eth_typing import ( + BlockIdentifier, BlockNumber, Hash32, ) @@ -706,6 +707,7 @@ class HeaderMeatSyncer(BaseService, PeerSubscriber, Generic[TChainPeer]): raise ValidationError( f"Can't request {length} headers, because peer maximum is {peer.max_headers_fetch}" ) + headers = await self._request_headers( peer, BlockNumber(parent_header.block_number + 1), @@ -759,7 +761,8 @@ class HeaderMeatSyncer(BaseService, PeerSubscriber, Generic[TChainPeer]): return headers async def _request_headers( - self, peer: TChainPeer, start_at: BlockNumber, length: int) -> Tuple[BlockHeader, ...]: + self, peer: TChainPeer, start_at: BlockIdentifier, length: int + ) -> Tuple[BlockHeader, ...]: self.logger.debug("Requesting %d headers from %s", length, peer) try: return await peer.chain_api.get_block_headers(start_at, length, skip=0, reverse=False) @@ -780,10 +783,19 @@ class HeaderMeatSyncer(BaseService, PeerSubscriber, Generic[TChainPeer]): raise async def _init_sync_progress(self, parent_header: BlockHeader, peer: TChainPeer) -> None: + try: + latest_block_number = peer.head_info.head_number + except AttributeError: + headers = await self._request_headers(peer, peer.head_info.head_hash, 1) + if headers: + latest_block_number = headers[0].block_number + else: + return + self.sync_progress = SyncProgress( parent_header.block_number, parent_header.block_number, - peer.head_info.head_number, + latest_block_number, )
raise when jit-load.ing a folder Summary: Very similar to but handling directories. Stoked to contribute! Pull Request resolved:
@@ -216,6 +216,8 @@ def load(f, map_location=None, _extra_files=DEFAULT_EXTRA_FILES_MAP): if isinstance(f, string_classes): if not os.path.exists(f): raise ValueError("The provided filename {} does not exist".format(f)) + if os.path.isdir(f): + raise ValueError("The provided filename {} is a directory".format(f)) if isinstance(map_location, string_classes): map_location = torch.device(map_location) elif not (map_location is None or
dse: Change semantics of graph.extract This in turn impact the clusterizer
@@ -148,7 +148,7 @@ class TemporariesGraph(OrderedDict): def trace(self, key): """ - Extract the tree computing the temporary ``key``. + Return the sequence of operations required to compute the temporary ``key``. """ if key not in self: return [] @@ -202,55 +202,51 @@ class TemporariesGraph(OrderedDict): """ if key not in self: return False + seen = set() queue = [self[key]] while queue: item = queue.pop(0) + seen.add(item) if any(key in i.atoms() for i in retrieve_indexed(item)): # /key/ appears amongst the indices of /item/ return True else: - queue.extend(self.extract(item.lhs, readby=True)) + queue.extend([i for i in self.extract(item.lhs, readby=True) + if i not in seen]) return False def extract(self, key, readby=False): """ - Return the list of temporaries appearing in ``key.reads`` that *preceed* - ``key`` in the TemporariesGraph (ie, in program order). - - If ``readby`` is passed as True, return instead the list of temporaries - appearing in ``key.readby`` *after* ``key`` in the TemporariesGraph - (ie, in program order). + Return the list of nodes appearing in ``key.reads``, in program order + (ie, based on the order in which the temporaries appear in ``self``). If + ``readby is True``, then return instead the list of nodes appearing + ``key.readby``, in program order. Examples ======== Given the following sequence of operations: :: - t0 = ... t1 = ... - u[i, j] = ... + t0 = ... + u[i, j] = ... v ... u[3, j] = ... v = t0 + t1 + u[z, k] - u[i, 5] = ... + t2 = ... - Assuming ``key == v`` and ``readby`` set to False as by default, return + Assuming ``key == v`` and ``readby is False`` (as by default), return the following list of :class:`Temporary` objects: :: - [t0, t1, u[i, j], u[3, j]] + [t1, t0, u[i, j], u[3, j]] - If ``readby`` is set to True, return: :: + If ``readby is True``, return: :: - [u[i, 5]] + [v, t2] """ if key not in self: return [] - if readby is False: - match = self[key].reads - section = self[:key] - else: - match = self[key].readby - section = self[key::1] + match = self[key].reads if readby is False else self[key].readby found = [] - for k, v in section.items(): + for k, v in self.items(): if k in match: found.append(v) return found