message
stringlengths
13
484
diff
stringlengths
38
4.63k
Update paper.bib fix bug in one ref
Journal = {Frontiers in Marine Science, section Ocean Observation}, Title = {{Argo} 1999-2019: two million temperature-salinity profiles and subsurface velocity observations from a global array of profiling floats}, Volume = {in press}, - Doi = {10.3389/fmars.2020.00700} + Doi = {10.3389/fmars.2020.00700}, Year = {2020}} @misc{scoop,
alert-words: Fix broken alert word UI. This fixes the alert word UI in settings by updating the CSS property referenced in the alert_word_settings_item.handlebars file. Fixes
</div> </div> {{else}} - <div class="alert-word-information-box list-container"> + <div class="alert-word-information-box grey-bg"> <div class="alert_word_listing"> <span class="value">{{word}}</span> </div>
webpack: Restart webpack-dev-server on config file changes. This should make the run-dev.py user experience a lot nicer when switching branches away from a branch that is at least as new as this commit, since we won't need to manually restart run-dev.py to restart webpack. Fixes
@@ -4,6 +4,8 @@ import argparse import os import sys import json +import subprocess +import pyinotify if False: from typing import NoReturn @@ -33,7 +35,7 @@ def build_for_prod_or_casper(quiet): os.execvp(webpack_args[0], webpack_args) def build_for_dev_server(host, port, minify, disable_host_check): - # type: (str, str, bool, bool) -> NoReturn + # type: (str, str, bool, bool) -> None """watches and rebuilds on changes, serving files from memory via webpack-dev-server""" # This is our most dynamic configuration, which we use for our @@ -55,7 +57,28 @@ def build_for_dev_server(host, port, minify, disable_host_check): webpack_args.append('--optimize-minimize') if disable_host_check: webpack_args.append('--disable-host-check') - os.execvp(webpack_args[0], webpack_args) + + webpack_process = subprocess.Popen(webpack_args) + + class WebpackConfigFileChangeHandler(pyinotify.ProcessEvent): + def process_default(self, event): + # type: (pyinotify.Event) -> None + nonlocal webpack_process + print('Restarting webpack-dev-server due to config changes...') + webpack_process.terminate() + webpack_process.wait() + webpack_process = subprocess.Popen(webpack_args) + + try: + watch_manager = pyinotify.WatchManager() + event_notifier = pyinotify.Notifier(watch_manager, WebpackConfigFileChangeHandler()) + for file in ['webpack.config.ts', 'webpack-helpers.ts', 'webpack.assets.json']: + filepath = os.path.join(os.path.dirname(__file__), file) + watch_manager.add_watch(filepath, pyinotify.IN_MODIFY) + event_notifier.loop() + finally: + webpack_process.terminate() + webpack_process.wait() def build_for_most_tests(): # type: () -> None
Improve cache iteration speed getitem based iteration included operations that aren't necessary when iterating over the cache continuously. Adding an iter method to the class seems to have improved iteration speed by several orders of magnitude.
@@ -170,6 +170,16 @@ class MessageCache: else: raise TypeError(f"cache indices must be integers or slices, not {type(item)}") + def __iter__(self) -> t.Iterator[Message]: + if self._is_empty(): + return + + if self._start < self._end: + yield from self._messages[self._start:self._end] + else: + yield from self._messages[self._start:] + yield from self._messages[:self._end] + def __len__(self): """Get the number of non-empty cells in the cache.""" if self._is_empty():
Fix typo in Subject sidebar view [#OSF-8284]
{% if perms.osf.view_metrics %} <li><a href="{% url 'metrics:metrics' %}"><i class='fa fa-link'></i> <span>OSF Metrics</span></a></li> {% endif %} - {% if perms.osf.view_subjects%} + {% if perms.osf.view_subject%} <li><a href="{% url 'subjects:list' %}"><i class='fa fa-link'></i> <span>OSF Subjects</span></a></li> {% endif %} </ul><!-- /.sidebar-menu -->
DOC: Make sure tutorial images are included notebooks that include local images were converted into html with missing images because: 1. plotnine-examples did not include tutorial/images as part of the package data. 2. The link_to_tutorials function did not search for images to include. Both have been resolved.
@@ -462,19 +462,30 @@ numpydoc_xref_ignore = {'type', 'optional', 'default'} def link_to_tutorials(): # Linking to the directory does not work well with # nbsphinx. We link to the files themselves - from glob import glob + from pathlib import Path, PurePath from plotnine_examples.tutorials import TUTPATH - dest_dir = os.path.join(CUR_PATH, 'tutorials') + tut_dir = Path(TUTPATH) + dest_dir = Path(CUR_PATH) / 'tutorials' + + tut_image_dir = tut_dir / 'images' + dest_image_dir = dest_dir / 'images' # Unlink files from previous build - for old_file in glob(dest_dir + '/*.ipynb'): + for old_file in dest_dir.glob('*.ipynb'): os.unlink(old_file) # Link files for this build - for file in glob(TUTPATH + '/*.ipynb'): - basename = os.path.basename(file) - dest = os.path.join(dest_dir, basename) + for file in tut_dir.glob('*.ipynb'): + basename = PurePath(file).name + dest = dest_dir / basename + os.symlink(file, dest) + + if tut_image_dir.is_dir(): + dest_image_dir.mkdir(exist_ok=True) + for file in tut_image_dir.glob('*.png'): + basename = PurePath(file).name + dest = dest_image_dir / basename os.symlink(file, dest)
[App] Fixing race condition while setting servers to be free for next batch in the Loadbalancer rece condition fix when setting server to be free for next request
@@ -188,12 +188,6 @@ class _LoadBalancer(LightningWork): timeout=self._timeout_inference_request, headers=headers, ) as response: - # resetting the server status so other requests can be - # scheduled on this node - if server_url in self._server_status: - # TODO - if the server returns an error, track that so - # we don't send more requests to it - self._server_status[server_url] = True if response.status == 408: raise HTTPException(408, "Request timed out") response.raise_for_status() @@ -207,6 +201,11 @@ class _LoadBalancer(LightningWork): result = {request[0]: ex for request in batch} self._responses.update(result) finally: + # resetting the server status so other requests can be + # scheduled on this node + if server_url in self._server_status: + # TODO - if the server returns an error, track that so + # we don't send more requests to it self._server_status[server_url] = True def _find_free_server(self) -> Optional[str]:
Prepare 2.2.1rc2 [ci skip-rust] [ci skip-build-wheels]
@@ -6,6 +6,12 @@ This is the first release to require having a Python 3.7 or 3.8 interpreter to r https://raw.githubusercontent.com/pantsbuild/setup/2f079cbe4fc6a1d9d87decba51f19d7689aee69e/pants` to update your `./pants` script to choose the correct interpreter. +## 2.2.1rc2 (Mar 17, 2021) + +### Bug fixes + +* Upgrade to Pex 2.1.34 to pull in a fix for an import race. (cherrypick of #11711) ([#11714](https://github.com/pantsbuild/pants/pull/11714)) + ## 2.2.1rc1 (Mar 12, 2021) ### Bug fixes
Don't apply no_completion_scopes behaviour unless enabled Also for
@@ -199,6 +199,10 @@ class CompletionHandler(sublime_plugin.ViewEventListener): debug('could not find completion item for inserted "{}"'.format(inserted)) def on_query_completions(self, prefix, locations): + if not self.initialized: + self.initialize() + + if self.enabled: if prefix != "" and self.view.match_selector(locations[0], NO_COMPLETION_SCOPES): # debug('discarding completion because no completion scope with prefix {}'.format(prefix)) return ( @@ -207,10 +211,6 @@ class CompletionHandler(sublime_plugin.ViewEventListener): else sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS ) - if not self.initialized: - self.initialize() - - if self.enabled: reuse_completion = self.is_same_completion(prefix, locations) if self.state == CompletionState.IDLE: if not reuse_completion:
Going back listing service in app.config The listing service was moved to Flask 'g' but that didn't work correctly. In particular it worked during tests but not when actually running the app and making requests through the browser.
@@ -33,8 +33,7 @@ def create_web_app() -> Flask: Base(app) app.register_blueprint(ui.blueprint) - with app.app_context(): - g.listing_service = FakeListingFilesService() + app.config['listing_service'] = FakeListingFilesService() ct_url_for = partial(create_ct_url, app.config.get( 'CLICKTHROUGH_SECRET'), url_for)
fix dd arguments count=0 and seek=big doesn't work on all platforms and can result in a 0 sized file. switch to using /dev/zero and a big block size, count =1
@@ -128,7 +128,7 @@ def sriov_vf_connection_test( dest_ssh.enable_public_key(source_ssh.generate_key_pairs()) # generate 200Mb file - source_node.execute("dd if=/dev/urandom of=large_file bs=100 count=0 seek=2M") + source_node.execute("dd if=/dev/zero of=large_file bs=200M count=1") max_retry_times = 10 for _, source_nic_info in vm_nics[source_node.name].items(): matched_dest_nic_name = ""
Prevent filename glob expansion in _msg_opts in rosbash The `find` argument glob is not properly quoted resulting in bash filename expansion. This leads to incorrect `find` calls. This fixes issue
@@ -369,7 +369,7 @@ function _msg_opts { else path=$(rospack find ${pkgname}) if [ -d ${path}/msg ]; then - echo $(find -L ${path}/msg -maxdepth 1 -mindepth 1 -name *.msg ! -regex ".*/[.][^./].*" -print0 | tr '\000' '\n' | sed -e "s/.*\/\(.*\)\.msg/${pkgname}\/\1/g") + echo $(find -L ${path}/msg -maxdepth 1 -mindepth 1 -name '*.msg' ! -regex ".*/[.][^./].*" -print0 | tr '\000' '\n' | sed -e "s/.*\/\(.*\)\.msg/${pkgname}\/\1/g") fi fi } @@ -883,7 +883,7 @@ function _msg_opts { path=$(rospack find ${pkgname} 2> /dev/null) if [ $? -eq 0 ] && [ -d ${path}/msg ]; then - echo $(find -L ${path}/msg -maxdepth 1 -mindepth 1 -name *.msg ! -regex ".*/[.][^./].*" -print0 | tr '\000' '\n' | sed -e "s/.*\/\(.*\)\.msg/${pkgname}\/\1/g") + echo $(find -L ${path}/msg -maxdepth 1 -mindepth 1 -name '*.msg' ! -regex ".*/[.][^./].*" -print0 | tr '\000' '\n' | sed -e "s/.*\/\(.*\)\.msg/${pkgname}\/\1/g") fi }
add python3 instruction to README Summary: add python3 instruction to README
@@ -20,7 +20,11 @@ For mac users, we recommend using [Anaconda](https://www.continuum.io/downloads) BlueWhale runs on any platform that supports caffe2. To install caffe2, follow this tutorial: [Installing Caffe2](https://caffe2.ai/docs/getting-started.html). -You may need to override caffe2's cmake defaults to use homebrew's protoc instead of Anaconda's protoc and to use Anaconda's Python instead of system Python. +You may need to override caffe2's cmake defaults to use homebrew's protoc instead of Anaconda's protoc and to use Anaconda's Python instead of system Python. Also add the following switch when running cmake to make sure caffe2 uses python3: + +``` +cmake -DPYTHON_EXECUTABLE=`which python3` +``` ### Thrift
Update source.py Added a check for self.tess_mag. This is not necessarily a list, hence raised a TypeError if it was only an int. Only subscribes if it is a list now.
@@ -211,8 +211,9 @@ class Source(object): assert False, ("Source: one of the following keywords must be given: " "tic, gaia, coords, fn.") - + if isinstance(self.tess_mag,list): self.tess_mag = self.tess_mag[0] + self.locate_on_tess() self.tesscut_size = 31
added zone config zone attirbute check - updated default.rb to check for the zone param in the zone_config class if zone config is enabled. this prevents an empty zone file from creating if there is no zone set
@@ -19,6 +19,10 @@ region = node['bcpc']['cloud']['region'] zone_config = ZoneConfig.new(node, region, method(:data_bag_item)) if zone_config.enabled? && worknode? + if zone_config.zone.nil? + raise 'zones are enabled but this node is not configured to be in a zone' + end + unless File.file?(zone_config.state_file) FileUtils.mkdir_p File.dirname(zone_config.state_file) File.write(zone_config.state_file, "#{zone_config.zone}\n")
Fixed a recent break in Cloud Redis resource initialization. REVID=187532484
@@ -52,7 +52,7 @@ def GetCloudRedisClass(cloud): Raises: Exception: An invalid cloud was provided """ - resource.GetResourceClass(BaseCloudRedis, CLOUD=cloud) + return resource.GetResourceClass(BaseCloudRedis, CLOUD=cloud) class BaseCloudRedis(resource.BaseResource):
[docs] update CN docs * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see
from typing import Any, cast, Dict, List, Optional, Tuple, Union + import torch from torch.nn.functional import pad
set correct url for rosunit repo I suppose this got moved at some point or that part of the package.xml was copied over? Either way, I just spend 5 minutes looking for this package in the wrong repository...
<license>BSD</license> <url type="website">http://ros.org/wiki/rosunit</url> - <url type="bugtracker">https://github.com/ros/ros_comm/issues</url> - <url type="repository">https://github.com/ros/ros_comm</url> + <url type="bugtracker">https://github.com/ros/ros/issues</url> + <url type="repository">https://github.com/ros/ros</url> <author>Ken Conley</author> <buildtool_depend version_gte="0.5.78">catkin</buildtool_depend>
Fix `test_security_multistream` By passing initiator keypairs to node.
import asyncio -import multiaddr import pytest from libp2p import new_node from libp2p.crypto.rsa import create_new_key_pair -from libp2p.peer.peerinfo import info_from_p2p_addr from libp2p.protocol_muxer.multiselect_client import MultiselectClientError from libp2p.security.insecure.transport import InsecureSession, InsecureTransport from libp2p.security.simple.transport import SimpleSecurityTransport +from tests.configs import LISTEN_MADDR from tests.utils import cleanup, connect # TODO: Add tests for multiple streams being opened on different @@ -16,9 +15,7 @@ from tests.utils import cleanup, connect def peer_id_for_node(node): - addr = node.get_addrs()[0] - info = info_from_p2p_addr(addr) - return info.peer_id + return node.get_id() initiator_key_pair = create_new_key_pair() @@ -35,14 +32,16 @@ async def perform_simple_test( # TODO: implement -- note we need to introduce the notion of communicating over a raw connection # for testing, we do NOT want to communicate over a stream so we can't just create two nodes # and use their conn because our mplex will internally relay messages to a stream - sec_opt1 = transports_for_initiator - sec_opt2 = transports_for_noninitiator - node1 = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"], sec_opt=sec_opt1) - node2 = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"], sec_opt=sec_opt2) + node1 = await new_node( + key_pair=initiator_key_pair, sec_opt=transports_for_initiator + ) + node2 = await new_node( + key_pair=noninitiator_key_pair, sec_opt=transports_for_noninitiator + ) - await node1.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) - await node2.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0")) + await node1.get_network().listen(LISTEN_MADDR) + await node2.get_network().listen(LISTEN_MADDR) await connect(node1, node2)
handle different error in test_invalid_origin_value In Python 3.9.0 the error message of invalid value contains outer class name. This patch handles the error message.
@@ -76,7 +76,7 @@ class TransformationsFilterTest(object): def test_invalid_origin_value(self): with pytest.raises(ValueError) as excinfo: TransformationsFilter(Origin=5) - excinfo.match("is not a valid Origin") + excinfo.match("is not a valid (TransformationsFilter\.)?Origin") def test_empty_glyph(self, font): filter_ = TransformationsFilter(OffsetY=51, include={"space"})
TcpClient: Catch ConnectionError instead of its particular cases That can be more reliable, especially in the case of using PySocks.
@@ -113,7 +113,7 @@ class TcpClient: self._socket.sendall(data) except socket.timeout as e: raise TimeoutError() from e - except (BrokenPipeError, ConnectionAbortedError): + except ConnectionError: self._raise_connection_reset() except OSError as e: if e.errno == errno.EBADF: @@ -139,10 +139,7 @@ class TcpClient: partial = self._socket.recv(bytes_left) except socket.timeout as e: raise TimeoutError() from e - except ConnectionAbortedError: - # ConnectionAbortedError: [WinError 10053] - # An established connection was aborted by - # the software in your host machine. + except ConnectionError: self._raise_connection_reset() except OSError as e: if e.errno == errno.EBADF or e.errno == errno.ENOTSOCK:
Use correct config for hourly-scheduled pandas_hello_world Summary: The config was incorrect for this schedule Test Plan: Load schedule into config editor and execute Reviewers: #ft, sashank, alangenfeld
@@ -26,10 +26,10 @@ def define_scheduler(): "solids": { "sum_solid": { "inputs": { - "num": { + "num_df": { "csv": { "path": file_relative_path( - __file__, "../pandas_hello_world/data/num.csv" + __file__, "pandas_hello_world/data/num.csv" ) } }
Fixes to local cache update process - Make sure to source the shared_functions - git -C is 1.8.5 feature and pre 1.8.5 git doesn't support it
@@ -7,6 +7,8 @@ set -e if [[ ! -z "$BOOTSTRAP_HTTP_PROXY_URL" ]] || [[ ! -z "$BOOTSTRAP_HTTPS_PROXY_URL" ]] ; then echo "Testing configured proxies..." source "$REPO_ROOT/bootstrap/shared/shared_proxy_setup.sh" +else + source "$REPO_ROOT/bootstrap/shared/shared_functions.sh" fi REQUIRED_VARS=( BOOTSTRAP_CACHE_DIR REPO_ROOT ) @@ -100,9 +102,11 @@ clone_repo() { version="$3" if [[ -d "$BOOTSTRAP_CACHE_DIR/$local_dir/.git" ]]; then - git -C "$BOOTSTRAP_CACHE_DIR/$local_dir" log --pretty=format:'%H' | \ + pushd "$BOOTSTRAP_CACHE_DIR/$local_dir" + git log --pretty=format:'%H' | \ grep -q "$version" || \ - git -C "$BOOTSTRAP_CACHE_DIR/$local_dir" pull + git pull + popd else git clone "$repo_url" "$BOOTSTRAP_CACHE_DIR/$local_dir" fi
Fix initialization of shared_storage_options The shared_storage_options is overwritten in all cases except EBS, where it represents a list of shared directories. The old code would generate shared directory list as ["None","/shared",...]
@@ -84,7 +84,7 @@ class ClusterCdkStack(core.Stack): self.instance_profiles = {} self.compute_security_groups = {} self.shared_storage_mappings = {storage_type: [] for storage_type in SharedStorageType} - self.shared_storage_options = {storage_type: "NONE" for storage_type in SharedStorageType} + self.shared_storage_options = {storage_type: "" for storage_type in SharedStorageType} self._add_resources() self._add_outputs()
input events: use original request Since the view class has the original request in its object scope, there is no need to create new request objects for input events.
@@ -617,13 +617,8 @@ class ViewRuntime: payload[0], ) - request = Request( - view_runtime=self, - connection=connection, - ) - input_event = InputEvent( - request=request, + request=self.request, payload=payload, document=self.document, connection=connection,
Add kwargs argument to reactor caller local_salt_call: caller.cmd.run: - args: - "mkdir test" - kwargs: cwd: /tmp
@@ -348,10 +348,11 @@ class ReactWrap(object): ''' log.debug("in caller with fun {0} args {1} kwargs {2}".format(fun, args, kwargs)) args = kwargs.get('args', []) + kwargs = kwargs.get('kwargs', {}) if 'caller' not in self.client_cache: self.client_cache['caller'] = salt.client.Caller(self.opts['conf_file']) try: - self.client_cache['caller'].function(fun, *args) + self.client_cache['caller'].cmd(fun, *args, **kwargs) except SystemExit: log.warning('Attempt to exit reactor. Ignored.') except Exception as exc:
dvc: do not check for isdir on recursive out collect The things is we create dirs as needed, so if outs are not checked out a directory contraining nothing but outs may be absent.
@@ -399,14 +399,13 @@ class Repo(object): abs_path = os.path.abspath(path) path_info = PathInfo(abs_path) - is_dir = self.tree.isdir(abs_path) match = path_info.__eq__ if strict else path_info.isin_or_eq def func(out): if out.scheme == "local" and match(out.path_info): return True - if is_dir and recursive and out.path_info.isin(path_info): + if recursive and out.path_info.isin(path_info): return True return False
Add param subset Handle None output_col
@@ -227,7 +227,7 @@ def rows(self): return df @staticmethod - def tag_duplicated(keep="first", output_col=None): + def tag_duplicated(keep="first", subset=None, output_col=None): """ Find the rows that have null values @@ -238,7 +238,10 @@ def rows(self): df = self - df[output_col] = df.duplicated(keep=keep) + if output_col is None: + output_col = "__duplicated__" + + df[output_col] = df.duplicated(keep=keep, subset=subset) return df
TypeRepo: add a defer_root_node property TN:
@@ -2833,6 +2833,10 @@ class TypeRepo(object): """ return StructMetaclass.root_grammar_class + @property + def defer_root_node(self): + return self.Defer(lambda: self.root_node) + @property def env_md(self): """
Diagnostics: add a shortcut for check_source_language to emit warnings TN:
@@ -239,6 +239,16 @@ def check_source_language(predicate, message, severity=Severity.error): Diagnostics.has_pending_error = True +def warn_if(predicate, message): + """ + Shortcut for check_source_language with severity=Severity.warning. + + Note that the predicated is negated: the warning is emitted if predicate is + False. + """ + return check_source_language(not predicate, message, Severity.warning) + + def check_multiple(predicates_and_messages, severity=Severity.error): """ Helper around check_source_language, check multiple predicates at once.
TST: added line to register modules Added a line to register Instrument modules when these modules are needed.
@@ -671,6 +671,11 @@ class TestAvailableInst(TestWithRegistration): plat_flag): """Test display_available_instruments options """ + # If using the pysat registry, make sure there is something registered + if inst_loc is None: + pysat.utils.registry.register(self.module_names) + + # Initialize the STDOUT stream new_stdout = StringIO() with contextlib.redirect_stdout(new_stdout):
Update create-new-project.mdx fix a typing mistake.
@@ -38,7 +38,7 @@ Inside of the directory `PROJECT_NAME/`, the following files and directories are | `PROJECT_NAME/jobs/` | A Python package that contains JobDefinitions, which are built up from ops | | `PROJECT_NAME/schedules/` | A Python package that contains ScheduleDefinitions, to trigger recurring job runs based on time | | `PROJECT_NAME/sensors/` | A Python package that contains SensorDefinitions, to trigger job runs based on external state | -| `PROJECT_NAME/repository.py` | A Python module that contains a RepositoryDefinition, to specify which jbos, schedules, and sensors are available in your repository | +| `PROJECT_NAME/repository.py` | A Python module that contains a RepositoryDefinition, to specify which jobs, schedules, and sensors are available in your repository | This file structure is a good starting point and suitable for most Dagster projects. As you build more and more jobs, you may eventually find your own way of structuring your code that works best for you.
[swarming] Be tolerant to inconsistent index This is to handle new logging code in
@@ -646,7 +646,16 @@ def cron_delete_old_bot_events(): if not first_ts: # Fetch the very first entity to get an idea of the range being # processed. - first_ts = keys[0].get().ts + while keys: + # It's possible that the query returns ndb.Key for entities that do + # not exist anymore due to an inconsistent index. Handle this + # explicitly. + e = keys[0].get() + if not e: + keys = keys[1:] + continue + first_ts = e.ts + break ndb.delete_multi(keys) count += len(keys) if utils.utcnow() >= time_to_stop:
Use the controller for topic metadata requests Closes
@@ -473,7 +473,7 @@ class KafkaAdminClient(object): return response - def _get_cluster_metadata(self, topics=None, auto_topic_creation=False): + def _get_cluster_metadata(self, topics=None, auto_topic_creation=False, use_controller=False): """ topics == None means "get all topics" """ @@ -492,6 +492,9 @@ class KafkaAdminClient(object): allow_auto_topic_creation=auto_topic_creation ) + if use_controller: + future = self._send_request_to_controller(request) + else: future = self._send_request_to_node( self._client.least_loaded_node(), request @@ -505,7 +508,7 @@ class KafkaAdminClient(object): return [t['topic'] for t in obj['topics']] def describe_topics(self, topics=None): - metadata = self._get_cluster_metadata(topics=topics) + metadata = self._get_cluster_metadata(topics=topics, use_controller=True) obj = metadata.to_object() return obj['topics']
Specify output file encoding be utf-8. On windows the file encoding does not default to utf-8
@@ -670,7 +670,7 @@ async def build_set(session, set_name, language): json_ready = await apply_set_config_options(set_name, cards_holder) print('BuildSet: Generated JSON for {}'.format(set_stat)) - with (OUTPUT_DIR / '{}.json'.format(set_output)).open('w') as fp: + with (OUTPUT_DIR / '{}.json'.format(set_output)).open('w', encoding='utf-8') as fp: json.dump(json_ready, fp, indent=4, sort_keys=True, ensure_ascii=False) print('BuildSet: JSON written for {0} ({1})'.format(set_stat, set_name[1]))
Fix CORS configuration `CORS_ORIGIN_ALLOW_ALL` was renamed to `CORS_ALLOW_ALL_ORIGINS` More info:
@@ -174,7 +174,8 @@ TEMPLATES = [ # CORS -CORS_ORIGIN_ALLOW_ALL = True +# ------------------------------------------------------------------------------ +CORS_ALLOW_ALL_ORIGINS = True CORS_ALLOW_HEADERS = list(default_cors_headers) + [ "if-match", "if-modified-since",
Update pyproject.toml Add --force sugar flag
[tool.pytest.ini_options] python_files = 'test_*.py' testpaths = 'test' # space seperated list of paths from root e.g test tests doc/testing -addopts = '--cov=git --cov-report=term --maxfail=10 --disable-warnings' +addopts = '--cov=git --cov-report=term --maxfail=10 --force-sugar --disable-warnings' filterwarnings = 'ignore::DeprecationWarning' # --cov coverage # --cov-report term # send report to terminal term-missing -> terminal with line numbers html xml
Update docker_install.txt update cli
@@ -15,10 +15,10 @@ pip3 install pyproj==2.2.1 cd /PyRate && python3 setup.py install # Run workflow -pyrate converttogeotiff input_parameters.conf -pyrate prepifg input_parameters.conf -pyrate process input_parameters.conf -c 3 -r 4 -pyrate postprocess input_parameters.conf -c 3 -r 4 +pyrate converttogeotiff -f input_parameters.conf +pyrate prepifg -f input_parameters.conf +pyrate process -f input_parameters.conf -c 3 -r 4 +pyrate postprocess -f input_parameters.conf -c 3 -r 4 # Build Sphinx docs cd /PyRate/docs && make html
run_isolated: leave TODO to take isolated package/tag from luci-config This is spawned from crrev.com/c/1940395/8/client/run_isolated.py#764
@@ -109,8 +109,10 @@ ISOLATED_OUT_DIR = u'io' ISOLATED_TMP_DIR = u'it' ISOLATED_CLIENT_DIR = u'ic' +# TODO(tikuta): take these parameter from luci-config? # Take revision from # https://ci.chromium.org/p/infra-internal/g/infra-packagers/console +ISOLATED_PACKAGE = 'infra/tools/luci/isolated/${platform}' ISOLATED_REVISION = 'git_revision:2ee27ca739de90c29d46eb3af3371a42fec3ebff' # Keep synced with task_request.py @@ -1018,9 +1020,8 @@ def install_client_and_packages(run_dir, packages, service_url, packages) # Install isolated client to |isolated_dir|. - _install_packages( - isolated_dir, cipd_cache_dir, client, - [('', 'infra/tools/luci/isolated/${platform}', ISOLATED_REVISION)]) + _install_packages(isolated_dir, cipd_cache_dir, client, + [('', ISOLATED_PACKAGE, ISOLATED_REVISION)]) file_path.make_tree_files_read_only(run_dir)
Add policy required for test purpose Policy is required to be able to read from test bucket (e.g. to test pre/post_install scripts)
@@ -264,6 +264,26 @@ Resources: Version: '2012-10-17' Type: AWS::IAM::Role + ### INTEG-TESTS POLICIES + + IntegTestsPolicy: + Type: AWS::IAM::ManagedPolicy + Properties: + Roles: + - !Ref HeadNodeRoleSlurm + - !Ref ComputeNodeRoleSlurm + - !Ref HeadNodeRoleBatch + PolicyDocument: + Version: '2012-10-17' + Statement: + # Required to use test bucket (e.g. to test pre/post_install scripts) + - Action: + - s3:Get* + - s3:List* + Resource: + - !Sub arn:${AWS::Partition}:s3:::aws-parallelcluster-* + Effect: Allow + Outputs: HeadNodeRoleSlurm: Value: !GetAtt HeadNodeRoleSlurm.Arn
Switched GridView back to old CompositeView style These docs say v4 CollectionView supports the same behavior as v2 CompositeView, but earlier commits already replaced our v2 CompositeViews for the v3 upgrade. This reverts to the previous code, jsut replacing the CompositeView with a CollectionView.
@@ -48,24 +48,10 @@ hqDefine("cloudcare/js/formplayer/apps/views", function() { }, }; - GridContainerView = Marionette.CollectionView.extend({ + GridView = Marionette.CollectionView.extend({ + template: _.template($("#grid-template").html() || ""), childView: GridItem, childViewContainer: ".js-application-container", - }); - - GridView = Marionette.View.extend({ - template: _.template($("#grid-template").html() || ""), - - regions: { - body: { - el: '.js-application-container', - }, - }, - onRender: function () { - this.getRegion('body').show(new GridContainerView({ - collection: this.collection, - })); - }, events: _.extend(BaseAppView.events), incompleteSessionsClick: _.extend(BaseAppView.incompleteSessionsClick),
Fix random redeploy failure during certificate extraction During the extraction of the local certificate, the ansible task uses the output of an unregistered variable, so it passes based on a random input. Closes-Bug:
@@ -117,7 +117,8 @@ outputs: test -e ${ca_pem} && openssl x509 -checkend 0 -noout -in ${ca_pem} retries: 5 delay: 1 - until: result.rc == 0 + register: local_ca_extract_result + until: local_ca_extract_result.rc == 0 when: certmonger_ca != 'IPA' and (ipa_realm is not defined) - include_role: name: linux-system-roles.certificate
FIX Set temp dir fo pytest Fixes A100 testing errors of `OSError: could not create numbered dir with prefix pytest- in /tmp/pytest-of-jenkins after 10 tries`
@@ -120,9 +120,9 @@ GTEST_OUTPUT="xml:${WORKSPACE}/test-results/libcuml_cpp/" ./test/ml logger "Python pytest for cuml..." cd $WORKSPACE/python -pytest --cache-clear --junitxml=${WORKSPACE}/junit-cuml.xml -v -s -m "not memleak" --durations=50 --timeout=300 --ignore=cuml/test/dask --ignore=cuml/raft +pytest --cache-clear --basetemp=${WORKSPACE}/cuml-cuda-tmp --junitxml=${WORKSPACE}/junit-cuml.xml -v -s -m "not memleak" --durations=50 --timeout=300 --ignore=cuml/test/dask --ignore=cuml/raft -timeout 7200 sh -c "pytest cuml/test/dask --cache-clear --junitxml=${WORKSPACE}/junit-cuml-mg.xml -v -s -m 'not memleak' --durations=50 --timeout=300" +timeout 7200 sh -c "pytest cuml/test/dask --cache-clear --basetemp=${WORKSPACE}/cuml-mg-cuda-tmp --junitxml=${WORKSPACE}/junit-cuml-mg.xml -v -s -m 'not memleak' --durations=50 --timeout=300" ################################################################################
undo image_embeddings changes rm space
"source": [ "import collections\n", "\n", - "\n", "def generate_fiftyone_classification(embedding, collection_name=\"mnist\"):\n", " search_results = client.search(\n", " collection_name=collection_name,\n",
Add ISWAP_INV to zoo Had to wait for to be done before adding to zoo (chicken, meet egg. Egg, chicken)
}, "outputs": [], "source": [ - "display_gates(\"CX\", \"CZ\", \"SWAP\", \"ISWAP\", \"SQRT_ISWAP\", \"SQRT_ISWAP_INV\")" + "display_gates(\"CX\", \"CZ\", \"SWAP\", \"ISWAP\", \"ISWAP_INV\", \"SQRT_ISWAP\", \"SQRT_ISWAP_INV\")" ] }, {
Delete ec datapool during cleanup only when it exists Modified: tests/rbd/rbd_utils.py
@@ -83,6 +83,7 @@ class Rbd: self.exec_cmd(cmd='rm -rf {}'.format(kw.get('dir_name'))) if kw.get('pools'): pool_list = kw.get('pools') + if self.datapool: pool_list.append(self.datapool) for pool in pool_list: self.exec_cmd(cmd='ceph osd pool delete {pool} {pool} '
sources: curl max_workers 2 * num_cpus This changes the curl source to use the number of cpus times two for its thread count. A conservative number but a commonly used default.
@@ -84,7 +84,7 @@ SCHEMA = """ class CurlSource(sources.SourceService): content_type = "org.osbuild.files" - max_workers = 4 + max_workers = 2 * os.cpu_count() def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
Corrected Mis-Formatted Reference Links The in-line style was breaking link display on the website.
- Python provides a wide range of [ways to modify `lists`][ways to modify `lists`]. -[common sequence operations](https://docs.python.org/3/library/stdtypes.html#sequence-types-list-tuple-range) -[constructed](https://docs.python.org/3/library/stdtypes.html#list) -[iterate over a list in python](https://www.geeksforgeeks.org/iterate-over-a-list-in-python/) -[return](https://www.w3schools.com/python/ref_keyword_return.asp) -[ways to modify `lists`](https://realpython.com/python-lists-tuples/#lists-are-mutable) +[common sequence operations]: https://docs.python.org/3/library/stdtypes.html#sequence-types-list-tuple-range +[constructed]: https://docs.python.org/3/library/stdtypes.html#list +[iterate over a list in python]: https://www.geeksforgeeks.org/iterate-over-a-list-in-python/ +[return]: https://www.w3schools.com/python/ref_keyword_return.asp +[ways to modify `lists`]: https://realpython.com/python-lists-tuples/#lists-are-mutable
style: make static code analysis happy flake8: E302 expected 2 blank lines, found 1 E305 expected 2 blank lines after class or function definition, found 1 ansible-lint: [206] Variables should have spaces before and after: {{ var_name }}
@@ -4,6 +4,7 @@ import sys from prometheus_client.core import GaugeMetricFamily, REGISTRY from prometheus_client import start_http_server + class CustomCollector(object): def __init__(self): pass @@ -21,6 +22,7 @@ class CustomCollector(object): g.add_metric(["nhc_exit_code"], retcode) yield g + if __name__ == '__main__': start_http_server(8777) REGISTRY.register(CustomCollector())
GDB helpers: enhance GNAT encodings matching for env getter printer TN:
@@ -228,9 +228,12 @@ class EnvGetterPrinter(BasePrinter): return '<EnvGetter dynamic>' else: # With GNAT encodings, GDB exposes the variant part as a field that - # is an union. + # is an union. Sometimes it's half-decoded... + try: union = self.value['dynamic___XVN'] variant = union['O'] + except gdb.error: + variant = self.value['S'] return str(variant['env'])
Travis py37 scipy0.19.1 fix Remove python-3.7, scipy-0.19.1 build from test matrix (since scipy-0.19.1 only claims support for python 2.7-3.6). This fixes issue
@@ -28,6 +28,12 @@ env: - SCIPY=scipy SLYCOT= # default, w/out slycot - SCIPY="scipy==0.19.1" SLYCOT= # legacy support, w/out slycot +# Exclude combinations that are very unlikely (and don't work) +matrix: + exclude: + - python: "3.7" # python3.7 should use latest scipy + env: SCIPY="scipy==0.19.1" SLYCOT= + # install required system libraries before_install: # Install gfortran for testing slycot; use apt-get instead of conda in
fix: disable react query cache Seems to be a weird bug with the experimental plugin where user sensitive data like messages and friends are not purged on logout
@@ -5,9 +5,7 @@ import { UseQueryOptions, UseQueryResult, } from "react-query"; -import { createLocalStoragePersistor } from "react-query/createLocalStoragePersistor-experimental"; import { ReactQueryDevtools } from "react-query/devtools"; -import { persistQueryClient } from "react-query/persistQueryClient-experimental"; import { reactQueryRetries } from "./constants"; @@ -22,13 +20,6 @@ export const queryClient = new QueryClient({ }, }); -const persistor = createLocalStoragePersistor(); - -persistQueryClient({ - maxAge: 14 * 24 * 60 * 60 * 1000, - persistor, - queryClient, -}); interface ReactQueryClientProviderProps { children: React.ReactNode; }
Bug fix Wrong namespace on uniform call.
@@ -1295,7 +1295,7 @@ class Worker: @staticmethod async def random_sleep(minimum=10.1, maximum=14): """Sleeps for a bit""" - await sleep(random.uniform(minimum, maximum), loop=LOOP) + await sleep(uniform(minimum, maximum), loop=LOOP) @property def status(self):
Use bytes instead of a string Summary: - Fixes a bug where we were trying to concatanate string to bytes.
@@ -153,7 +153,7 @@ class ConsoleCommandSession(SSHCommandSession): def _send_clearline(self): self.send(b'\x15\r\n') - def _send_newline(self, end="\n"): + def _send_newline(self, end=b"\n"): self.send(b'\r', end) async def _setup_connection(self):
Fix stop_watcher function Apache should be reloaded after watcher-api is disabled.
@@ -318,6 +318,7 @@ function start_watcher { function stop_watcher { if [[ "$WATCHER_USE_MOD_WSGI" == "True" ]]; then disable_apache_site watcher-api + restart_apache_server else stop_process watcher-api fi
Add step to publish package on PyPI Note, this uses feature of PyPI. Instead of - I use the recent commit hash from the gh-action-pypi-publish repo to make action more stable. The secret used in ${{ secrets.PYPI_API_TOKEN }} needs to be created on the settings page of the mpmath project.
@@ -31,3 +31,9 @@ jobs: codecov --required - name: Make packages run: python setup.py sdist bdist_wheel + - name: Publish package on PyPI + if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') + uses: pypa/gh-action-pypi-publish@f91f98d65eb3eb032447201d64f2c25d67c28efe + with: + user: __token__ + password: ${{ secrets.PYPI_API_TOKEN }}
Turn log.exception into log.error Also, refactor error messages to be consistent and DRY throughout the file.
@@ -130,21 +130,24 @@ class RedisCache: async def _validate_cache(self) -> None: """Validate that the RedisCache is ready to be used.""" if self.bot is None: - log.exception("Attempt to use RedisCache with no `Bot` instance.") - raise RuntimeError( + error_message = ( "Critical error: RedisCache has no `Bot` instance. " "This happens when the class RedisCache was created in doesn't " "have a Bot instance. Please make sure that you're instantiating " "the RedisCache inside a class that has a Bot instance " "class attribute." ) + log.error(error_message) + raise RuntimeError(error_message) if self._namespace is None: - log.exception("Attempt to use RedisCache with no namespace.") - raise RuntimeError( + error_message = ( "Critical error: RedisCache has no namespace. " "Did you initialize this object as a class attribute?" ) + log.error(error_message) + raise RuntimeError(error_message) + await self.bot.redis_ready.wait() def __set_name__(self, owner: Any, attribute_name: str) -> None: @@ -176,18 +179,17 @@ class RedisCache: return self if self._namespace is None: - log.exception("RedisCache must be a class attribute.") - raise RuntimeError("RedisCache must be a class attribute.") + error_message = "RedisCache must be a class attribute." + log.error(error_message) + raise RuntimeError(error_message) if instance is None: - log.exception( - "Attempt to access RedisCache instance through the cog's class object " - "before accessing it through the cog instance." - ) - raise RuntimeError( + error_message = ( "You must access the RedisCache instance through the cog instance " "before accessing it using the cog's class object." ) + log.error(error_message) + raise RuntimeError(error_message) for attribute in vars(instance).values(): if isinstance(attribute, Bot): @@ -195,14 +197,15 @@ class RedisCache: self._redis = self.bot.redis_session return self else: - log.exception("Attempt to use RedisCache with no `Bot` instance.") - raise RuntimeError( + error_message = ( "Critical error: RedisCache has no `Bot` instance. " "This happens when the class RedisCache was created in doesn't " "have a Bot instance. Please make sure that you're instantiating " "the RedisCache inside a class that has a Bot instance " "class attribute." ) + log.error(error_message) + raise RuntimeError(error_message) def __repr__(self) -> str: """Return a beautiful representation of this object instance.""" @@ -340,16 +343,18 @@ class RedisCache: # Can't increment a non-existing value if value is None: - log.exception("Attempt to increment/decrement value for non-existent key.") - raise KeyError("The provided key does not exist!") + error_message = "The provided key does not exist!" + log.error(error_message) + raise KeyError(error_message) # If it does exist, and it's an int or a float, increment and set it. if isinstance(value, int) or isinstance(value, float): value += amount await self.set(key, value) else: - log.exception("Attempt to increment/decrement non-numerical value.") - raise TypeError("You may only increment or decrement values that are integers or floats.") + error_message = "You may only increment or decrement values that are integers or floats." + log.error(error_message) + raise TypeError(error_message) async def decrement(self, key: RedisType, amount: Optional[int, float] = 1) -> None: """
get interactive command properly working Summary: There are instances where the devices are prefixing the prompts with extra characters followed by '\r' (e.g 'show lldb interface | xml' on nexus). This changes make sure the we can ignore these characters
@@ -131,7 +131,8 @@ class DeviceVendor(ServiceObj): # reduces the probability of this matching some random text in the # output. Not that we are matching at end of the text, not at the end of # each line in text (re.M is not specified) - return re.compile(b"^(?P<prompt>" + b"|".join(all_prompts) + b")\s*" + + return re.compile(b"(?<=[\n\r])(?P<prompt>" + + b"|".join(all_prompts) + b")\s*" + trailer + b"$", re.M)
Verification: set 'tasks_running' to 0 on suspicious 403s Prevent the tasks from starting again if the bot restarts.
@@ -307,6 +307,7 @@ class Verification(Cog): await request(member) except StopExecution as stop_execution: await self._alert_admins(stop_execution.reason) + await self.task_cache.set("tasks_running", 0) self._stop_tasks(gracefully=True) # Gracefully finish current iteration, then stop break except discord.HTTPException as http_exc:
tests/EpisodicMemoryMechanism: Use 'size' instead of 'content_size' in construction The latter is deprecated.
@@ -48,7 +48,7 @@ names = [ @pytest.mark.parametrize('variable, func, params, expected', test_data, ids=names) def test_with_dictionary_memory(variable, func, params, expected, benchmark, mech_mode): f = func(seed=0, **params) - m = EpisodicMemoryMechanism(content_size=len(variable[0]), assoc_size=len(variable[1]), function=f) + m = EpisodicMemoryMechanism(size=len(variable[0]), assoc_size=len(variable[1]), function=f) EX = pytest.helpers.get_mech_execution(m, mech_mode) EX(variable)
realm_logo: Fix incorrect display of realm logo delete button. This commit fixes the bug of incorrectly showing/hiding the realm logo delete button by using realm_night_logo_source for checking the source of night mode logo instead of previously used realm_logo_source for both day and night logos.
exports.build_realm_logo_widget = function (upload_function, is_night) { let logo_section_id = '#day-logo-section'; + let logo_source = page_params.realm_logo_source; + if (is_night) { logo_section_id = '#night-logo-section'; + logo_source = page_params.realm_night_logo_source; } const delete_button_elem = $(logo_section_id + " .realm-logo-delete-button"); @@ -17,7 +20,7 @@ exports.build_realm_logo_widget = function (upload_function, is_night) { return; } - if (page_params.realm_logo_source === 'D') { + if (logo_source === 'D') { delete_button_elem.hide(); } else { delete_button_elem.show();
Update pytests.yml add concurrency grouping
@@ -8,6 +8,10 @@ on: branches-ignore: - 'dependabot*' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + jobs: detect-ci-trigger: @@ -235,7 +239,7 @@ jobs: fail_ci_if_error: true env_vars: RUNNER_OS,PYTHON_VERSION - free: + free-all: name: Env. all free - Py ${{matrix.python-version}} - ${{ matrix.os }} needs: free-core
Bug fix. Need a stills-specific version of this function for the case that treat_single_image_as_still=True. In that case, we don't want to run the close-to-spindle test etc., but we can't detect this as a still as the experiment contains a Scan and a Goniometer.
@@ -887,6 +887,25 @@ class StillsReflectionManager(ReflectionManager): _weighting_strategy = weighting_strategies.StillsWeightingStrategy() + def _id_refs_to_keep(self, obs_data): + """Create a selection of observations that pass certain conditions. + + Stills-specific version removes checks relevant only to experiments + with a rotation axis.""" + + # first exclude reflections with miller index set to 0,0,0 + sel1 = obs_data["miller_index"] != (0, 0, 0) + + # exclude reflections with overloads, as these have worse centroids + sel2 = ~obs_data.get_flags(obs_data.flags.overloaded) + + # combine selections + sel = sel1 & sel2 + inc = flex.size_t_range(len(obs_data)).select(sel) + obs_data = obs_data.select(sel) + + return inc + def print_stats_on_matches(self): """Print some basic statistics on the matches"""
fix: Disable submit button while doing network request to avoid duplicate entries
@@ -115,6 +115,7 @@ frappe.ui.form.Review = class Review { label: __('Reason') }], primary_action: (values) => { + review_dialog.disable_primary_action(); if (values.points > this.points.review_points) { return frappe.msgprint(__('You do not have enough points')); } @@ -133,6 +134,8 @@ frappe.ui.form.Review = class Review { this.frm.get_docinfo().energy_point_logs.unshift(review); this.frm.timeline.refresh(); this.update_reviewers(); + }).finally(() => { + review_dialog.enable_primary_action(); }); }, primary_action_label: __('Submit')
Clarify limitations & understanding with fixed_rows & table-layout: fixed Including related issues:
@@ -559,7 +559,7 @@ layout = html.Div( '''), - rc.Markdown("## Individual Column Widths"), + rc.Markdown("## Setting Column Widths"), rc.Markdown( ''' @@ -567,8 +567,7 @@ layout = html.Div( The widths of individual columns can be supplied through the `style_cell_conditional` property. These widths can be specified as - percentages or fixed pixels. You can supply the widths for _all_ of the - columns or just a few of them. + percentages or fixed pixels. ''' ), @@ -586,6 +585,74 @@ layout = html.Div( ) '''), + rc.Markdown( + ''' + By default, the column width is the maximum of the percentage given + and the width of the content. So, if the content in the column is wide, + the column may be wider than the percentage given. This prevents overflow. + + In the example below, note the first column is actually wider than 10%; + if it were shorter, the text "New York City" would overflow. + ''' + ), + + Display( + ''' + html.Div([ + html.Div('10%', style={'backgroundColor': 'hotpink', 'color': 'white', 'width': '10%'}), + dash_table.DataTable( + data=df.to_dict('records'), + columns=[{'id': c, 'name': c} for c in df.columns if c != 'Date'], + style_cell_conditional=[ + {'if': {'column_id': 'Region'}, + 'width': '10%'} + ] + ) + ]) + '''), + + rc.Markdown( + ''' + To force columns to be a certain width (even if that causes overflow) + use `table-layout: fixed`. + + ### Percentage Based Widths and `table-layout: fixed` + If you want all columns to have the same percentage-based width, + use `style_data` and `table-layout: fixed`. + ''' + ), + + Display( + ''' + dash_table.DataTable( + data=df.to_dict('records'), + columns=[{'id': c, 'name': c} for c in df.columns], + + css=[{'selector': 'table', 'rule': 'table-layout: fixed'}], + style_cell={ + 'width': '{}%'.format(len(df.columns)), + 'textOverflow': 'ellipsis', + 'overflow': 'hidden' + } + ) + '''), + + rc.Markdown( + ''' + Setting consistent percentage-based widths is a good option if you are using + `virtualization`, sorting (`sort_action`), or `filtering` (`filter_action`). + Without fixed column widths, the table will dynamically resize the + columns depending on the width of the data that is displayed. + + **Limitations** + + 1. Percentage-based widths is not available with `fixed_rows` & `table-layout: fixed`. + See [plotly/dash-table#745](https://github.com/plotly/dash-table/issues/748) + 2. Percentage-based widths with `fixed_rows` and without `table-layout: fixed` + has some issues when resizing the window. See [plotly/dash-table#747](https://github.com/plotly/dash-table/issues/747) + ''' + ), + rc.Markdown( ''' ### Individual Column Widths with Pixels
[GCB] Fix image tagging Don't add two tags in a single command. One is sufficient and two is an error.
@@ -31,10 +31,10 @@ steps: # Use two tags so that the image builds properly and we can push it to the # correct location. '--tag', - 'gcr.io/fuzzbench/builders/coverage:${_EXPERIMENT}:${_EXPERIMENT}', + 'gcr.io/fuzzbench/builders/coverage:${_EXPERIMENT}', '--tag', - '${_REPO}/builders/coverage:${_EXPERIMENT}:${_EXPERIMENT}', + '${_REPO}/builders/coverage:${_EXPERIMENT}', '--cache-from', '${_REPO}/builders/coverage',
Adding a comment about NodeJS example, refs Thanks
@@ -128,6 +128,9 @@ request.post({ url: 'http://api-adresse.data.gouv.fr/search/csv/', formData: formData }).then(function (text) { + // You might want to use fs.writeFile instead because writeFileSync + // blocks the event loop. See section fs.writeFileSync() at + // http://www.daveeddy.com/2013/03/26/synchronous-file-io-in-nodejs/ fs.writeFileSync('./out.csv', text); }) .catch(function (err) {
Prevent division by zero Problem: `rnd.getrandbits` can result in 0, so that `b` could equal to 0. This fix makes it that b is a pseudorandom number close, but not equal, to zero.
@@ -253,7 +253,7 @@ def shanks(ctx, seq, table=None, randomized=False): b = row[j-1] - table[i-1][j-1] if not b: if randomized: - b = rnd.getrandbits(10)*eps + b = (1 + rnd.getrandbits(10))*eps elif i & 1: return table[:-1] else:
fix: Now ScriptTask main_func can be determined in __init__ (so init from config is nicer).
@@ -14,10 +14,10 @@ class ScriptTask(Task): ScriptTask("folder/subfolder/main.py") ScriptTask("folder/subfolder/mytask.py") """ - main_func = "main" - def __init__(self, path, **kwargs): + def __init__(self, path, main_func=None, **kwargs): self.path = path + self.main_func = "main" if main_func is None else main_func super().__init__(**kwargs) def execute_action(self, **params):
[NixIO] Test skipping: {setUp,tearDown}Class methods Class methods are not skipped when module is missing (by the unittest decorator), so we need the checks to avoid failing when there is no NIX module.
@@ -903,6 +903,7 @@ class NixIOReadTest(NixIOTest): @classmethod def setUpClass(cls): + if HAVE_NIX: cls.nixfile = cls.create_full_nix_file(cls.filename) def setUp(self): @@ -912,6 +913,7 @@ class NixIOReadTest(NixIOTest): @classmethod def tearDownClass(cls): + if HAVE_NIX: cls.nixfile.close() os.remove(cls.filename) @@ -1109,6 +1111,7 @@ class NixIOPartialWriteTest(NixIOTest): @classmethod def setUpClass(cls): + if HAVE_NIX: cls.nixfile = cls.create_full_nix_file(cls.filename) def setUp(self): @@ -1119,6 +1122,7 @@ class NixIOPartialWriteTest(NixIOTest): @classmethod def tearDownClass(cls): + if HAVE_NIX: cls.nixfile.close() os.remove(cls.filename)
Fixed message truncation bug The length of the utf-8 encoded body may be different than the unicode length of the string. This causes message truncation on the response. This patch fixes the issue.
@@ -576,6 +576,8 @@ class ChaliceRequestHandler(BaseHTTPRequestHandler): def _send_http_response_with_body(self, code, headers, body): # type: (int, HeaderType, Union[str,bytes]) -> None self.send_response(code) + if not isinstance(body, bytes): + body = body.encode('utf-8') self.send_header('Content-Length', str(len(body))) content_type = headers.pop( 'Content-Type', 'application/json') @@ -583,8 +585,6 @@ class ChaliceRequestHandler(BaseHTTPRequestHandler): for header_name, header_value in headers.items(): self.send_header(header_name, header_value) self.end_headers() - if not isinstance(body, bytes): - body = body.encode('utf-8') self.wfile.write(body) do_GET = do_PUT = do_POST = do_HEAD = do_DELETE = do_PATCH = do_OPTIONS = \
Updated GoDjango video url GoDjango site does not exist anymore, so the URL for the screencast was not working.
@@ -30,7 +30,7 @@ Getting Started The easiest way to figure out what Django Extensions are all about is to watch the `excellent screencast by Eric Holscher`__ (`watch the video on vimeo`__). In a couple minutes Eric walks you through a half a dozen command extensions. There is also a -`short screencast on GoDjango`__ to help show you even more. +`short screencast on GoDjango's Youtube Channel`__ to help show you even more. Requirements @@ -123,4 +123,4 @@ between putting food on the table, family, this project and the rest of life :-) __ http://ericholscher.com/blog/2008/sep/12/screencast-django-command-extensions/ __ http://vimeo.com/1720508 -__ https://godjango.com/39-be-more-productive-with-django_extensions/ +__ https://www.youtube.com/watch?v=1F6G3ONhr4k
[CI] Fix android build by constraining numpy version Temporarily constrain the version of numpy to workaround the deprecated value used in mxnet. See
@@ -251,7 +251,8 @@ CONSTRAINTS = [ ("h5py", "==2.10.0"), ("image", None), ("matplotlib", None), - ("numpy", None), + # Workaround, see https://github.com/apache/tvm/issues/13647 + ("numpy", "<=1.23.*"), ("onnx", None), ("onnxoptimizer", None), ("onnxruntime", None),
remove spurious mention of conda As pointed out by at
@@ -228,10 +228,9 @@ To view the documentation, and then navigate your web browser to the ``docs/_build/html/`` subdirectory. -To test out the *code*, you can either use conda (as described above), -or, if you change to the ``python/`` subdirectory, -run ``make`` to compile the C code, -and execute ``python`` from this subdirectory, +To test out changes to the *code*, you can change to the ``python/`` subdirectory, +and run ``make`` to compile the C code. +If you then execute ``python`` from this subdirectory (and only this one!), it will use the modified version of the package. (For instance, you might want to open an interactive ``python`` shell from the ``python/`` subdirectory,
Fix issue If the input parameter type to a traced model is tensor.cuda(), ct.convert fails with the below error TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
@@ -225,7 +225,7 @@ def _list_select(shape_var, index): def _construct_constant(val, name): # Converter cannot handle torch tensors. if isinstance(val, torch.Tensor): - val = val.numpy() + val = val.cpu().numpy() # MIL casts ints to int32, which can't represent the 64 bit magic number. # So we instead represent it with None, and any ops that might get the
Fix matrix_to_marching_cubes offset The previous code produces an incorrect offset for the mesh produced by marching cubes, as illustrated by the below code. import numpy as np from trimesh.voxel import matrix_to_marching_cubes voxels = np.ones((3,3,3), dtype=np.bool) mesh = matrix_to_marching_cubes(voxels, 1.0, np.zeros(3)) print(mesh.bounds) mesh = matrix_to_marching_cubes(voxels, 3.0, np.zeros(3)) print(mesh.bounds)
@@ -338,7 +338,7 @@ def matrix_to_marching_cubes(matrix, pitch, origin): vertices, faces, normals, vals = meshed # Return to the origin, add in the pad_width - vertices = np.subtract(np.add(vertices, origin), pad_width) + vertices = np.subtract(np.add(vertices, origin), pad_width*pitch) mesh = Trimesh(vertices=vertices, faces=faces) return mesh
Added incident report Added incident report: Philadelphia Police disperse crowd with batons | June 1st
@@ -67,6 +67,15 @@ Three protestors kneeling on the ground with their hands on their heads/covering * https://twitter.com/d0wnrrrrr/status/1267691766188310528 +### Philadelphia Police disperse crowd with batons | June 1st + +Police officers strike several unarmed protesters and one parked vehicle with batons. + +**Links** + +* https://twitter.com/Peopledelphia/status/1267588991655784448 + + ## Pittsburgh ### Officer pepper-sprays a woman who is on her knees with her hands up
Update AZ Primary and Quinary Screenshots Changing second await page.waitForDelay() from 20000 to 30000. More reliable - tested locally!
@@ -11,7 +11,7 @@ primary: page.manualWait(); await page.waitForDelay(10000); page.mouse.click(615, 1100); - await page.waitForDelay(20000); + await page.waitForDelay(30000); page.done(); message: clicking on cases for AZ primary @@ -723,7 +723,7 @@ quinary: page.manualWait(); await page.waitForDelay(10000); page.mouse.click(880, 1100); - await page.waitForDelay(20000); + await page.waitForDelay(30000); page.done(); message: clicking on deaths for AZ quinary
[bugfix] enable "old" logentries tests use wowwiki:hu because cs is very small skip tests if there aren't any entries add tests for wowwiki:en
@@ -44,9 +44,14 @@ class TestLogentriesBase(TestCase): 'code': 'de', 'target': 'Hauptseite', }, + 'enwow': { + 'family': 'wowwiki', + 'code': 'en', + 'target': None, + }, 'old': { 'family': 'wowwiki', - 'code': 'cs', + 'code': 'hu', 'target': None, } } @@ -58,7 +63,11 @@ class TestLogentriesBase(TestCase): # MW versions and otherwise it might not be visible that the test # isn't run on an older wiki. self.assertLess(self.site.mw_version, '1.20') - return next(iter(self.site.logevents(logtype=logtype, total=1))) + try: + le = next(iter(self.site.logevents(logtype=logtype, total=1))) + except StopIteration: + self.skipTest('No entry found for {!r}'.format(logtype)) + return le def _test_logevent(self, logtype): """Test a single logtype entry."""
Fix - Harmony 21.1 messed up Javascript Qt API Removed missed logging
@@ -337,7 +337,6 @@ function start() { var host = '127.0.0.1'; /** port of the server */ var port = parseInt(System.getenv('AVALON_HARMONY_PORT')); - MessageLog.trace("port " + port.toString()); // Attach the client to the QApplication to preserve. var app = QCoreApplication.instance(); @@ -350,7 +349,6 @@ function start() { var widgets = QApplication.topLevelWidgets(); for (var i = 0 ; i < widgets.length; i++) { if (widgets[i] instanceof QMainWindow){ - MessageLog.trace('(DEBUG): START Main window '); mainWindow = widgets[i]; } }
fix typo the `ExportContainer` was broken before version 1183.
@@ -43,7 +43,7 @@ def exportAllInstances(): ''' possible keys: - ExportContiner: "woff", "woff2", "eot" + ExportContainer: "woff", "woff2", "eot" Destination: NSURL autoHint: bool (default = true) removeOverlap: bool (default = true)
Update Match typing to include re.Pattern Some `Match` properties support `str | re.Pattern | None` so let's type accordingly. Also updates the docstring to clarify this a bit.
@@ -37,6 +37,7 @@ from libqtile.command.base import CommandObject, expose_command from libqtile.log_utils import logger if TYPE_CHECKING: + import re from typing import Any, Callable, Iterable from libqtile.backend import base @@ -758,11 +759,9 @@ class Match: """ Match for dynamic groups or auto-floating windows. - It can match by title, wm_class, role, wm_type, wm_instance_class or net_wm_pid. - - :class:`Match` supports both regular expression objects (i.e. the result of - ``re.compile()``) or strings (match as an "include"-match). If a window matches all - specified values, it is considered a match. + For some properties, :class:`Match` supports both regular expression objects (i.e. + the result of ``re.compile()``) or strings (match as an "include"-match). If a + window matches all specified values, it is considered a match. Parameters ========== @@ -788,11 +787,11 @@ class Match: def __init__( self, - title: str | None = None, - wm_class: str | None = None, - role: str | None = None, - wm_type: str | None = None, - wm_instance_class: str | None = None, + title: str | re.Pattern | None = None, + wm_class: str | re.Pattern | None = None, + role: str | re.Pattern | None = None, + wm_type: str | re.Pattern | None = None, + wm_instance_class: str | re.Pattern | None = None, net_wm_pid: int | None = None, func: Callable[[base.Window], bool] | None = None, wid: int | None = None,
Fix add of js file Previous code is used when we build documentation using readthedocs, which is not the case for ROSS.
@@ -108,9 +108,6 @@ except KeyError: nbsphinx_execute = "always" html_theme = "bootstrap" htlm_theme_path = sphinx_bootstrap_theme.get_html_theme_path() -html_js_files = [ - "https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js" -] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -285,3 +282,6 @@ epub_exclude_files = ["search.html"] # -- Extension configuration ------------------------------------------------- def setup(app): app.add_css_file("style.css") + app.add_js_file( + "https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js" + )
Update CONTRIBUTING.md Fixed section links
@@ -6,16 +6,16 @@ We will document examples of excessive force being used by law enforcement offic Our goal in doing this is to assist journalists, politicians, prosecutors, activists and concerned citizens who can use the evidence accumulated here for political campaigns, news reporting, public education and prosecution of criminal police officers. -### [Frequently Asked Questions](#Frequently-Asked-Questions) -### [Code of Conduct](#Code-of-Conduct) -### [How to Contribute](#How-to-Contribute) -### [Incident Report Guidelines](#Incident-Report-Guidelines) +* ### [Frequently Asked Questions](#Frequently-Asked-Questions-1) +* ### [Code of Conduct](#Code-of-Conduct-1) +* ### [How to Contribute](#How-to-Contribute-1) +* ### [Incident Report Guidelines](#Incident-Report-Guidelines-1) ## Frequently Asked Questions ### How can I help? -Check out [How to Contribute](#How-to-Contribute). +Check out [How to Contribute](#How-to-Contribute-1). ### Where can I find evidence? @@ -68,9 +68,9 @@ If you are here to bring awareness to what you see as inexcusable behavior by th There are many ways you can contribute to this effort. -We need people to identify and research incidents, ensure that the current repository abides by the [incident report guidelines](#Incident-Report-Guidelines) and share the evidence accumulated here. +We need people to identify and research incidents, ensure that the current repository abides by the [incident report guidelines](#Incident-Report-Guidelines-1) and share the evidence accumulated here. -Before you start, please read our [code of conduct](#Code-of-Conduct). +Before you start, please read our [code of conduct](#Code-of-Conduct-1). ### Adding & Updating Incident Reports @@ -102,7 +102,7 @@ If you find the same incident in this repository, check the following: **If the incident has not been reported** -If you have identified a new incident, ensure that it meets our [content standards](#Incident-Report-Guidelines). +If you have identified a new incident, ensure that it meets our [content standards](#Incident-Report-Guidelines-1). #### 3. Share it! @@ -111,7 +111,7 @@ Send it to your friends and bring attention to what you find. ### Maintaining the Repository -If you would like to help maintain the repository, please read our [incident report guidelines](#Incident-Report-Guidelines). +If you would like to help maintain the repository, please read our [incident report guidelines](#Incident-Report-Guidelines-1). #### Cleanup misinformation and poorly documented reports
Project: make sure the issue template tells people where to get develop version. * This is in response to people asking the obvious question, how to do it. * Pointing to the download page, which probably should make a better job of distinguishing stable and develop versions top level.
@@ -4,6 +4,9 @@ Before submitting an Issue, please review the [Issue Guidelines](https://github. * Please check out if the develop version of Nuitka works better for you. + Download source, packages [from here](http://nuitka.net/pages/download.html) + where you will also find instructions how to do it via PyPI. + If you want to post a problem/bug, to help us understand and resolve your issue please check that you have provided at least the information below, and discard up to here:
Fixed some 404 end links FIxed and links
@@ -297,7 +297,7 @@ The `create` subcommand makes a new workflow using the nf-core base template. With a given pipeline name, description and author, it makes a starter pipeline which follows nf-core best practices. After creating the files, the command initialises the folder as a git repository and makes an initial commit. This first "vanilla" commit which is identical to the output from the templating tool is important, as it allows us to keep your pipeline in sync with the base template in the future. -See the [nf-core syncing docs](http://nf-co.re/sync) for more information. +See the [nf-core syncing docs](https://nf-co.re/developers/sync) for more information. ```console $ nf-core create @@ -332,7 +332,7 @@ git push --set-upstream origin master You can then continue to edit, commit and push normally as you build your pipeline. -Please see the [nf-core documentation](https://nf-co.re/adding_pipelines) for a full walkthrough of how to create a new nf-core workflow. +Please see the [nf-core documentation](https://nf-co.re/developers/adding_pipelines) for a full walkthrough of how to create a new nf-core workflow. Note that if the required arguments for `nf-core create` are not given, it will interactively prompt for them. If you prefer, you can supply them as command line arguments. See `nf-core create --help` for more information.
fix(stock_a_ttm_lyr): fix stock_a_ttm_lyr interface fix stock_a_ttm_lyr interface
@@ -328,18 +328,20 @@ def stock_a_ttm_lyr() -> pd.DataFrame: """ url = "https://www.legulegu.com/api/stock-data/market-ttm-lyr" params = { - 'marketId': '5', + "marketId": "5", "token": token, } r = requests.get(url, params=params) data_json = r.json() temp_df = pd.DataFrame(data_json["data"]) - temp_df['date'] = pd.to_datetime( - temp_df["date"], unit="ms", utc=True).dt.tz_convert("Asia/Shanghai").dt.date - del temp_df['marketId'] + temp_df["date"] = ( + pd.to_datetime(temp_df["date"], unit="ms", utc=True) + .dt.tz_convert("Asia/Shanghai") + .dt.date + ) return temp_df -if __name__ == '__main__': +if __name__ == "__main__": stock_a_ttm_lyr_df = stock_a_ttm_lyr() print(stock_a_ttm_lyr_df)
Update CHANGELOG for 1.8 Also fix line endings
+1.8: + * REMOVED SUPPORT FOR Python 2.6 + * LAST RELEASE TO SUPPORT 2.7 + * CHANGED REMOTE MONITOR PROTOCOL (security fix) + * Support Python 3 + * Add JSON logger + * Add 46elks SMS alerter + * Add PushBullet alerter + * Add Telegram alerter + * Add Notification Center alerter (for macOS) + * Add systemd unit monitor + * Add Home Automation monitor + * Add MQTT logger + * Improve Slack alerter's configurability + * Add basic HTTP Auth and timeouts to HTTP monitor + * Verify SSL certificates by defaults + * Add notification groups + * Add support for environment variables in config values and section names + * Add tests + * Add sample docker configurations + * Use Pipenv for requirements management + * Added example startup scripts including a Windows Service + * Improved logging output (to stdout, not the Logger class) + * IPv6 support for network Logger + * DB Loggers now auto-create the database/table as needed, and can update schema version + * Email Logger now supports multiple addresses + * DNS Monitor now supports multivalue responses + * Use JSON format for remote monitor protocol; more secure than pickle 1.7: + Add Slack alerter + Add Command monitor
Update passive_dns.py finished review
@@ -36,16 +36,19 @@ class PassiveDNS(Feed): context_domain = dict(source=self.name) context_ip = dict(source=self.name) - domain_name = Hostname.get_or_create(value=item["Domain Name"]) + domain_name = Hostname.get_or_create(value=item["Domain name"]) ip = Ip.get_or_create(value=item["Current IP address"]) infos_ip = pdns.get_reverse(item["Current IP address"]) - infos_domain = pdns.get_records(item["Domain Name"]) + infos_domain = pdns.get_records(item["Domain name"]) - company = Company.get_or_create(value=infos_domain["ip"]["organization"]) - context_ip["ISP"] = infos_ip["ip"]["ISP"] - context_ip["country"] = infos_ip["ip"]["country"] - context_ip["last_updated"] = infos_ip["last_updated_at"] - context_ip["first_updated"] = infos_ip["first_updated_at"] + infos = list(filter(lambda x:x['domain_name'] == item["Domain name"] , + infos_ip['resolution_list']))[0] + + company = Company.get_or_create(name=infos_domain["ip"]["location"]["organization"]) + context_ip["ISP"] = infos_domain["ip"]["location"]["ISP"] + context_ip["country"] = infos_domain["ip"]["location"]["country"] + context_ip["last_updated"] = infos["last_updated_at"] + context_ip["first_updated"] = infos["first_updated_at"] ip.active_link_to(company, "compagny", self.name) @@ -53,10 +56,10 @@ class PassiveDNS(Feed): context_domain["last updated"] = "%s : %s" % ( ip.value, - infos_ip["last_updated_at"], + infos["last_updated_at"], ) - ns_servers = list(filter(lambda x: x["type"] == "NS", infos_domain["records"])) + ns_servers = filter(lambda x: x["type"] == "NS", infos_domain["dn"]["records"]) for name_server in ns_servers: ns_serv = Hostname.get_or_create(value=name_server["target"]) ns_serv.active_link_to(domain_name, "NS", self.name, clean_old=False)
ScorerModel is no longer there Consistency with module documentation
@@ -12,11 +12,11 @@ Wikipedia. Using a scorer_model to score a revision:: ``` import mwapi - from revscoring import ScorerModel + from revscoring import Model from revscoring.extractors.api.extractor import Extractor with open("models/enwiki.damaging.linear_svc.model") as f: - scorer_model = ScorerModel.load(f) + scorer_model = Model.load(f) extractor = Extractor(mwapi.Session(host="https://en.wikipedia.org", user_agent="revscoring demo"))
Python 3 fix for flask-bcrypt Python 3 uses unicode string, which needed to be encoded for bcrypt.hashpw
@@ -116,7 +116,7 @@ def hash_password(password): Secure hash of password. """ - return bcrypt.hashpw(password, bcrypt.gensalt(8)) + return bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt(8)) def get_team(uid=None): """
Address typos and layout Resolve some minor typos and line up long line wrap point
-For this project to work well in your pipeline, a commit convention -must be followed. +For this project to work well in your pipeline, a commit convention must be followed. -By default commitizen uses the known [conventional commits][conventional_commits], but you can create -your own following the docs information over [customization][customization]. +By default commitizen uses the known [conventional commits][conventional_commits], but +you can create your own following the docs information over at +[customization][customization]. ## Conventional commits If you are using [conventional commits][conventional_commits], the most important -thing to know is that you must begin your commits with at least one of these tags: `fix`, `feat`. And if you introduce a breaking change, then, you must +thing to know is that you must begin your commits with at least one of these tags: +`fix`, `feat`. And if you introduce a breaking change, then, you must add to your commit body the following `BREAKING CHANGE`. Using these 3 keywords will allow the proper identification of the semantic version. Of course, there are other keywords, but I'll leave it to the reader to explore them. ## Writing commits -Not to the important part, when writing commits, it's important to think about: +Now to the important part, when writing commits, it's important to think about: - Your future self - Your colleagues @@ -27,8 +28,10 @@ understand what happened. - **Keep the message short**: Makes the list of commits more readable (~50 chars). - **Talk imperative**: Follow this rule: `If applied, this commit will <commit message>` - **Think about the CHANGELOG**: Your commits will probably end up in the changelog - so try writing for it, but also keep in mind that you can skip sending commits to the CHANGELOG by using different keywords (like `build`). -- **Use a commit per new feature**: if you introduce multiple things related to the same commit, squash them. This is useful for auto-generating CHANGELOG. + so try writing for it, but also keep in mind that you can skip sending commits to the + CHANGELOG by using different keywords (like `build`). +- **Use a commit per new feature**: if you introduce multiple things related to the same + commit, squash them. This is useful for auto-generating CHANGELOG. | Do's | Don'ts | | ---- | ------ |
wallet save.. adding a stake transaction failed to update the wallet files..
@@ -510,6 +510,8 @@ def post_block_logic(): f.send_stake_reveal_one() if chain.mining_address not in [s[0] for s in chain.next_stake_list_get()]: f.send_st_to_peers(chain.CreateStakeTransaction()) + wallet.f_save_winfo() + return
FIX: tweak the teleporter class decorator There were subtle bugs with multiply decorated sub-classes
@@ -24,21 +24,19 @@ def _maybe_use_teleporter(cls): @functools.wraps(orig_init) def __init__(self, *args, maybe_use_teleporter=True, **kwargs): orig_init(self, *args, **kwargs) - self._orig_init = orig_init - self._orig_call = orig_call - if Teleporter is not None and maybe_use_teleporter: + _orig_call = orig_call if not hasattr(self, '__teleporters'): self.__teleporters = {} + if Teleporter is not None and maybe_use_teleporter: + def inner_func(name, doc, *, target=self): - self._orig_call(target, name, doc) + _orig_call(target, name, doc) teleporter = Teleporter() teleporter.name_doc.connect(inner_func) self.__teleporters[cls_name] = teleporter - else: - self.__teleporter = None @functools.wraps(orig_call) def __call__(self, name, doc):
pkg_analysis_body_ada.mako: restore wrongly deleted comment line TN:
@@ -641,6 +641,7 @@ package body ${ada_lib_name}.Analysis is declare Unit : constant Analysis_Unit := Element (Cur); begin + -- As unloading a unit can change how any AST node property in the -- whole analysis context behaves, we have to invalidate caches. This -- is likely overkill, but kill all caches here as it's easy to do. Reset_Caches (Unit.Context);
Fix dagster_home in dagster-aws CLI Test Plan: manually tested dagster-aws init Reviewers: sashank
from botocore.exceptions import ClientError +from dagster import DagsterInvariantViolationError +from dagster.utils import dagster_home_dir + from .config import HostConfig from .term import Spinner, Term def get_dagster_home(): '''Ensures that the user has set a valid DAGSTER_HOME in environment and that it exists ''' - dagster_home = os.getenv('DAGSTER_HOME') - if not dagster_home: + try: + dagster_home = dagster_home_dir() + except DagsterInvariantViolationError: Term.fatal( '''DAGSTER_HOME is not set! Before continuing, set with e.g.: @@ -45,7 +49,7 @@ def get_dagster_home(): You may want to add this line to your .bashrc or .zshrc file. ''' ) - else: + Term.info('Found DAGSTER_HOME in environment at: %s\n' % dagster_home) if not os.path.isdir(dagster_home):
fix: filter git diff from commit message When running `git commit --verbose` when using commitizen as a pre-commit we have a bug because of the diff that is incorrectly included. I have filtered out everything that is auto generated by git and that is normally excluded. See issue:
@@ -98,8 +98,35 @@ class Check: # Get commit messages from git log (--rev-range) return git.get_commits(end=self.rev_range) - def _filter_comments(self, msg: str) -> str: - lines = [line for line in msg.split("\n") if not line.startswith("#")] + @staticmethod + def _filter_comments(msg: str) -> str: + """Filter the commit message by removing comments. + + When using `git commit --verbose`, we exclude the diff that is going to + generated, like the following example: + + ```bash + ... + # ------------------------ >8 ------------------------ + # Do not modify or remove the line above. + # Everything below it will be ignored. + diff --git a/... b/... + ... + ``` + + Args: + msg: The commit message to filter. + + Returns: + The filtered commit message without comments. + """ + + lines = [] + for line in msg.split("\n"): + if "# ------------------------ >8 ------------------------" in line: + break + if not line.startswith("#"): + lines.append(line) return "\n".join(lines) def validate_commit_message(self, commit_msg: str, pattern: str) -> bool:
Change AuoGOAL to AutoGOAL Fix small typo
@@ -153,7 +153,7 @@ These are our consistency rules: This documentation is available online at [autogoal.github.io](https://autogoal.github.io). Check the following sections: -- [**User Guide**](https://autogoal.github.io/guide/): Step-by-step showcase of everything you need to know to use AuoGOAL. +- [**User Guide**](https://autogoal.github.io/guide/): Step-by-step showcase of everything you need to know to use AutoGOAL. - [**Examples**](https://autogoal.github.io/examples/): The best way to learn how to use AutoGOAL by practice. - [**API**](https://autogoal.github.io/api/autogoal): Details about the public API for AutoGOAL.
only use stdin if it has a value Closes
@@ -86,7 +86,7 @@ def main(*args): ) print_help() raise SystemExit(1) - + if stdin_raw_text: import_path = save_stdin_source(stdin_raw_text) ### Handle ingesting urls from a remote file/feed
Remove unused type ignores The latest mypy/typeshed is more accurate and hence these warnings can be removed.
@@ -248,13 +248,13 @@ def find_package(name: str) -> Tuple[Optional[Path], Path]: package_path = Path.cwd() else: if hasattr(loader, "get_filename"): - filename = loader.get_filename(module) # type: ignore + filename = loader.get_filename(module) else: __import__(name) filename = sys.modules[name].__file__ package_path = Path(filename).resolve().parent if hasattr(loader, "is_package"): - is_package = loader.is_package(module) # type: ignore + is_package = loader.is_package(module) if is_package: package_path = Path(package_path).resolve().parent sys_prefix = Path(sys.prefix).resolve() @@ -389,7 +389,7 @@ def _split_blueprint_path(name: str) -> List[str]: return bps -def abort(code: int, *args: Any, **kwargs: Any) -> NoReturn: # type: ignore[misc] +def abort(code: int, *args: Any, **kwargs: Any) -> NoReturn: """Raise an HTTPException for the given status code.""" if current_app: current_app.aborter(code, *args, **kwargs)