message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Fix and clarify upgrade process
Fix package name to remove and clarify upgrade process | @@ -261,7 +261,8 @@ This project was forked from bread-and-pepper/django-userena v2.0.1.
To migrate from this project you just need to install the package
and update a key which was changed in django-guardian:
-1. Remove django-userena-ce from your installation
+1. Remove `django-userena` from your installation `pip uninstall django-userena`
2. `pip install django-userena-ce==3.1.0`
-3. Update your django settings, remove `ANONYMOUS_USER_ID` and set `ANONYMOUS_USER_NAME`
+3. Replace `django-userena` to `django-userena-ce` in your `INSTALLED_APPS`
+4. Update your django settings, remove `ANONYMOUS_USER_ID` and set `ANONYMOUS_USER_NAME`
|
Update dynamic_domain.txt
Due to ```Subdomains```section of VT-link, this seems as dyn. | @@ -1945,3 +1945,9 @@ x443.pw
# Reference: https://www.virustotal.com/gui/domain/dnsabr.com/relations
dnsabr.com
+
+# Reference: https://twitter.com/P3pperP0tts/status/1133897358402564096
+# Reference: https://app.any.run/tasks/6306d7be-e338-4838-a863-9cd58c880c07/
+# Reference: https://www.virustotal.com/gui/domain/yahoodns.net/relations
+
+yahoodns.net
|
[CI] Install xgboost in Hexagon image
Needed for
cc | @@ -83,3 +83,7 @@ RUN bash /install/ubuntu_install_tflite.sh
# Install ONNX
COPY install/ubuntu_install_onnx.sh /install/ubuntu_install_onnx.sh
RUN bash /install/ubuntu_install_onnx.sh
+
+# xgboost (for tuning)
+COPY install/ubuntu_install_redis.sh /install/ubuntu_install_redis.sh
+RUN bash /install/ubuntu_install_redis.sh
|
MAINT: Improve error message dtype appearance
This changes the string conversion of an expected
dtype in an error message from e.g.
"<class 'numpy.float64'>" to "float64". | raise ValueError('Supplied output array is not contiguous, writable or aligned.')
if out_array.dtype != dtype:
raise TypeError('Supplied output array has the wrong type. '
- 'Expected {0}, got {1}'.format(dtype, out_array.dtype))
+ 'Expected {0}, got {1}'.format(np.dtype(dtype), out_array.dtype))
if size is not None:
try:
tup_size = tuple(size)
|
Fixes Use LDAP groups to find permissions
When AUTH_LDAP_FIND_GROUP_PERMS is set to true the filter to find the
users permissions is extended to search for all permissions assigned to
groups in which the LDAP user is. | @@ -11,7 +11,7 @@ from users.models import ObjectPermission
from utilities.permissions import permission_is_exempt, resolve_permission, resolve_permission_ct
-class ObjectPermissionBackend(ModelBackend):
+class ObjectPermissionMixin():
def get_all_permissions(self, user_obj, obj=None):
if not user_obj.is_active or user_obj.is_anonymous:
@@ -20,13 +20,16 @@ class ObjectPermissionBackend(ModelBackend):
user_obj._object_perm_cache = self.get_object_permissions(user_obj)
return user_obj._object_perm_cache
+ def get_permission_filter(self, user_obj):
+ return Q(users=user_obj) | Q(groups__user=user_obj)
+
def get_object_permissions(self, user_obj):
"""
Return all permissions granted to the user by an ObjectPermission.
"""
# Retrieve all assigned and enabled ObjectPermissions
object_permissions = ObjectPermission.objects.filter(
- Q(users=user_obj) | Q(groups__user=user_obj),
+ self.get_permission_filter(user_obj),
enabled=True
).prefetch_related('object_types')
@@ -86,6 +89,10 @@ class ObjectPermissionBackend(ModelBackend):
return model.objects.filter(constraints, pk=obj.pk).exists()
+class ObjectPermissionBackend(ObjectPermissionMixin, ModelBackend):
+ pass
+
+
class RemoteUserBackend(_RemoteUserBackend):
"""
Custom implementation of Django's RemoteUserBackend which provides configuration hooks for basic customization.
@@ -163,8 +170,15 @@ class LDAPBackend:
"Required parameter AUTH_LDAP_SERVER_URI is missing from ldap_config.py."
)
- # Create a new instance of django-auth-ldap's LDAPBackend
- obj = LDAPBackend_()
+ # Create a new instance of django-auth-ldap's LDAPBackend with our own ObjectPermissions
+ class NBLDAPBackend(ObjectPermissionMixin, LDAPBackend_):
+ def get_permission_filter(self, user_obj):
+ permission_filter = Q(users=user_obj) | Q(groups__user=user_obj)
+ if self.settings.FIND_GROUP_PERMS:
+ permission_filter = permission_filter | Q(groups__name__in=user_obj.ldap_user.group_names)
+ return permission_filter
+
+ obj = NBLDAPBackend()
# Read LDAP configuration parameters from ldap_config.py instead of settings.py
settings = LDAPSettings()
|
Redundant word in error message
original error message: Result type can only be one of or an array of one of the following types **types**...
proposed error message: Result type can only be one of or an array of one of the following types... | @@ -812,7 +812,7 @@ def spark_udf(spark, model_uri, result_type="double"):
if not any([isinstance(elem_type, x) for x in supported_types]):
raise MlflowException(
message="Invalid result_type '{}'. Result type can only be one of or an array of one "
- "of the following types types: {}".format(str(elem_type), str(supported_types)),
+ "of the following types: {}".format(str(elem_type), str(supported_types)),
error_code=INVALID_PARAMETER_VALUE,
)
|
Fix test_raising_repr test
Closes
Python <3.11 versions depend on `exceptiongroup>=1.0.0rc8`, and they released version `1.0.1`
6 days ago (2022/11/03) that as a side-effect changed the output of exceptions. | @@ -1664,15 +1664,7 @@ def test_raise_assertion_error_raising_repr(pytester: Pytester) -> None:
"""
)
result = pytester.runpytest()
- if sys.version_info >= (3, 11):
- # python 3.11 has native support for un-str-able exceptions
- result.stdout.fnmatch_lines(
- ["E AssertionError: <exception str() failed>"]
- )
- else:
- result.stdout.fnmatch_lines(
- ["E AssertionError: <unprintable AssertionError object>"]
- )
+ result.stdout.fnmatch_lines(["E AssertionError: <exception str() failed>"])
def test_issue_1944(pytester: Pytester) -> None:
|
Update model.py
Fix lint indentation | @@ -1798,12 +1798,10 @@ class Model:
metadata = utils.get_local_charm_metadata(charm_dir)
charm_series = charm_series or await get_charm_series(metadata,
self)
- charm_origin.base = utils.get_local_charm_base(charm_series, channel, metadata, charm_dir, client.Base)
- base = utils.get_local_charm_base(charm_series,
- channel,
- metadata,
- charm_dir,
- client.Base)
+ charm_origin.base = utils.get_local_charm_base,
+ charm_series, channel, metadata, charm_dir, client.Base)
+ base = utils.get_local_charm_base(
+ charm_series, channel, metadata, charm_dir, client.Base)
charm_origin.base = base
if not application_name:
application_name = metadata['name']
|
Fixed Typo in Code.gov description
Corrected the description from "Open Sourse" to "Open Source. | @@ -497,7 +497,7 @@ API | Description | Auth | HTTPS | CORS |
| [City, Lyon Opendata](https://data.beta.grandlyon.com/fr/accueil) | Lyon(FR) City Open Data | `apiKey` | Yes | Unknown |
| [City, Nantes Opendata](https://data.nantesmetropole.fr/pages/home/) | Nantes(FR) City Open Data | `apiKey` | Yes | Unknown |
| [City, Prague Opendata](http://opendata.praha.eu/en) | Prague(CZ) City Open Data | No | No | Unknown |
-| [Code.gov](https://code.gov) | The primary platform for Open Sourse and code sharing for the U.S. Federal Government | `apiKey` | Yes | Unknown |
+| [Code.gov](https://code.gov) | The primary platform for Open Source and code sharing for the U.S. Federal Government | `apiKey` | Yes | Unknown |
| [Colorado Data Engine](http://codataengine.org/) | Formatted and geolocated Colorado public data | No | Yes | Unknown |
| [Colorado Information Marketplace](https://data.colorado.gov/) | Colorado State Government Open Data | No | Yes | Unknown |
| [Data USA](https://datausa.io/about/api/) | US Public Data | No | Yes | Unknown |
|
Add `bytes` as legal type for `RawConfigParser.read`.
Add `bytes` as legal type for `RawConfigParser.read` in 3.7.
Closes | @@ -18,7 +18,9 @@ _converter = Callable[[str], Any]
_converters = Dict[str, _converter]
_T = TypeVar('_T')
-if sys.version_info >= (3, 6):
+if sys.version_info >= (3, 7):
+ _Path = Union[str, bytes, PathLike[str]]
+elif sys.version_info >= (3, 6):
_Path = Union[str, PathLike[str]]
else:
_Path = str
|
Don't always subscript venv names when searching
Causes a bug on windows (verbally reported to me at work)
/cc if you can provide a log when you get a sec | @@ -263,17 +263,17 @@ class Project(object):
# Check for different capitalization of the same project.
from pipenv.patched.pew.pew import lsenvs
for env in lsenvs():
- env_name = env[:-9]
- if not (env[-9] != '-' and
- env[-8:].isalpha() and
- env_name.lower() != name.lower()):
+ try:
+ env_name, hash_ = env.rsplit('-', 1)
+ except ValueError:
+ continue
+ if len(hash_) != 8 or env_name.lower() != name.lower():
continue
return get_name(env_name, self.pipfile_location.replace(name, env_name))
# Use the default if no matching env exists.
return clean_name, encoded_hash
-
@property
def virtualenv_name(self):
sanitized, encoded_hash = self._get_virtualenv_hash(self.name)
|
Hide the source code upload form by default
Fixes | @@ -82,6 +82,11 @@ ol.submit-addon-progress.unlisted {
margin-bottom: 1em;
}
+.addon-submission-process #option_yes_source,
+.addon-submission-process #option_no_source {
+ display: none;
+}
+
div.done-next-steps {
p {
margin-bottom: .3em;
|
Update javascript-env.yaml
Fix FP | @@ -2,7 +2,7 @@ id: javascript-env
info:
name: JavaScript Environment Config
- author: pdp
+ author: pdp,geeknik
severity: low
description: Detects common JavaScript environment configuration files.
tags: javascript,config,exposure
@@ -42,3 +42,12 @@ requests:
- "PASSWORD"
- "VERSION"
condition: or
+
+ - type: word
+ part: body
+ words:
+ - "Bootstrap"
+ - "jQuery"
+ - "CSS TRANSITION SUPPORT"
+ negative: true
+ condition: or
|
Update README.md
replace non working link | @@ -69,7 +69,7 @@ Or to configure an ESP32 to run as a Wifi access point:
meshtastic --set wifi_ap_mode true --set wifi_ssid mywifissid --set wifi_password mywifipsw
```
-For a full list of preferences which can be set (and their documentation) see [here](https://github.com/meshtastic/Meshtastic-protobufs/blob/master/docs/docs.md#.RadioConfig.UserPreferences).
+For a full list of preferences which can be set (and their documentation) see [here](https://meshtastic.org/docs/developers/protobufs/api#radioconfiguserpreferences).
### Changing channel settings
|
Cell metadata from ipynb on markdown cells as well
Compare cell content up to blank lines, and restore markdown cell metadata from ipynb | """Combine source and outputs from two notebooks
"""
+import re
from .cell_metadata import _IGNORE_METADATA
+_BLANK_LINE = re.compile(r'^\s*$')
+
+
+def same_content(ref, test):
+ """Is the content of two cells the same, except for blank lines?"""
+ ref = [line for line in ref.splitlines() if not _BLANK_LINE.match(line)]
+ test = [line for line in test.splitlines() if not _BLANK_LINE.match(line)]
+ return ref == test
+
def combine_inputs_with_outputs(nb_source, nb_outputs):
- '''Copy outputs of the second notebook into
- the first one, for cells that have matching inputs'''
+ """Copy outputs of the second notebook into
+ the first one, for cells that have matching inputs"""
- remaining_output_cells = nb_outputs.cells
- for cell in nb_source.cells:
- if cell.cell_type != 'code':
- continue
+ output_code_cells = [cell for cell in nb_outputs.cells if cell.cell_type == 'code']
+ output_other_cells = [cell for cell in nb_outputs.cells if cell.cell_type != 'code']
- # Remove outputs to warranty that trust of returned
- # notebook is that of second notebook
+ for cell in nb_source.cells:
+ # Remove outputs to warranty that trust of returned notebook is that of second notebook
+ if cell.cell_type == 'code':
cell.execution_count = None
cell.outputs = []
- # Fill outputs with that of second notebook
- for i, ocell in enumerate(remaining_output_cells):
- if ocell.cell_type == 'code' and cell.source == ocell.source:
+ for i, ocell in enumerate(output_code_cells):
+ if same_content(cell.source, ocell.source):
cell.execution_count = ocell.execution_count
cell.outputs = ocell.outputs
ometadata = ocell.metadata
- cell.metadata.update({k: ometadata[k] for k in ometadata
- if k in _IGNORE_METADATA})
- remaining_output_cells = remaining_output_cells[(i + 1):]
+ cell.metadata.update({k: ometadata[k] for k in ometadata if k in _IGNORE_METADATA})
+ output_code_cells = output_code_cells[(i + 1):]
+ break
+ else:
+ # Fill outputs with that of second notebook
+ for i, ocell in enumerate(output_other_cells):
+ if cell.cell_type == ocell.cell_type and same_content(cell.source, ocell.source):
+ ometadata = ocell.metadata
+ cell.metadata.update({k: ometadata[k] for k in ometadata if k in _IGNORE_METADATA})
+
+ output_other_cells = output_other_cells[(i + 1):]
break
|
Adding Fedora instructions
Updated to include Fedora Linux instructions | @@ -68,6 +68,32 @@ Ubuntu
3. When docker is done fetching the image, open ``http://localhost:8065/``
in your browser.
+Fedora
+^^^^^^
+
+1. Install and run the image in Docker using the following commands or use the full documentation here for extensive
+ installation instructions https://docs.docker.com/engine/installation/linux/fedora/
+
+ .. code:: bash
+
+ sudo dnf -y install dnf-plugins-core
+ sudo dnf config-manager \
+ --add-repo \
+ https://download.docker.com/linux/fedora/docker-ce.repo
+ sudo dnf install docker-ce docker-compose git # Accepting the new docker repository key
+ sudo usermod -aG docker <username>
+ sudo systemctl start docker
+ cd mattermost-docker
+ sudo docker-compose build
+
+ 2. Start the container:
+
+ .. code:: bash
+
+ sudo docker-compose up -d
+
+ 3. When the startup is complete, visit https://localhost/ in your browser.
+
Arch
^^^^
|
Fixed "create_attach_volumes" salt-cloud action for GCP
Fixes
- "create_attach_volumes" salt-cloud works with mandatory and
default arguments as mentioned in docstring of the function. | @@ -2400,9 +2400,10 @@ def create_attach_volumes(name, kwargs, call=None):
'-a or --action.'
)
- volumes = kwargs['volumes']
+ volumes = literal_eval(kwargs['volumes'])
node = kwargs['node']
- node_data = _expand_node(node)
+ conn = get_conn()
+ node_data = _expand_node(conn.ex_get_node(node))
letter = ord('a') - 1
for idx, volume in enumerate(volumes):
@@ -2412,9 +2413,9 @@ def create_attach_volumes(name, kwargs, call=None):
'disk_name': volume_name,
'location': node_data['extra']['zone']['name'],
'size': volume['size'],
- 'type': volume['type'],
- 'image': volume['image'],
- 'snapshot': volume['snapshot']
+ 'type': volume.get('type', 'pd-standard'),
+ 'image': volume.get('image', None),
+ 'snapshot': volume.get('snapshot', None)
}
create_disk(volume_dict, 'function')
|
fix random plot styles for plot styles not in dictionary
random was not imported, so plot generation was failing for new models.
additionally, I made the randomly chosen plot style deterministic based on the model family name, so that it will be consistent across plots. | @@ -13,6 +13,7 @@ from bigbench.api import results
import numpy as np
import dataclasses
import os
+import random
sns.set(font_scale=1.2)
sns.set_style("whitegrid")
@@ -99,9 +100,12 @@ def plot_task(
model_plot_settings = plot_settings.get(model_family, DEFAULT_PLOT_SETTINGS)
- palette = model_plot_settings.palette or random.choice(PALETTES)
- linestyle = model_plot_settings.linestyle or random.choice(LINESTYLES)
- marker = model_plot_settings.marker or random.choice(MARKERS)
+ # deterministic plot style for each model family
+ rnd = random.Random(model_family)
+
+ palette = model_plot_settings.palette or rnd.choice(PALETTES)
+ linestyle = model_plot_settings.linestyle or rnd.choice(LINESTYLES)
+ marker = model_plot_settings.marker or rnd.choice(MARKERS)
model_label = model_plot_settings.label or model_family
for shot, color in zip(shots, sns.color_palette(palette, len(shots))):
|
Add a marker to the generated C code if the NumPy declarations came from Cython/Includes/numpy/.
See | @@ -23,6 +23,15 @@ from cpython.object cimport PyObject, PyTypeObject
from cpython.type cimport type
cimport libc.stdio as stdio
+
+cdef extern from *:
+ # Leave a marker that the NumPy declarations came from Cython and not from NumPy itself.
+ # See https://github.com/cython/cython/issues/3573
+ """
+ /* Using NumPy API declarations from "Cython/Includes/numpy/" */
+ """
+
+
cdef extern from "Python.h":
ctypedef int Py_intptr_t
|
removing comment 'generated on' comment
Since the file is not auto-generated and we're changing it in PRs, I found this comment misleading. | // Karma configuration
-// Generated on Thu Feb 11 2016 12:59:11 GMT-0800 (PST)
var RewirePlugin = require("rewire-webpack");
var _ = require("lodash");
var webpack_config = _.clone(require("../frontend_build/src/webpack.config.base"));
|
BUG: distutils, place fortranobject files in subfolder
Placing them all under the same name in the top level folder breaks when
using the parallel extension compilation option of python 3.5. | @@ -569,14 +569,14 @@ def f2py_sources(self, sources, extension):
if not os.path.isfile(target_file):
raise DistutilsError("f2py target file %r not generated" % (target_file,))
- target_c = os.path.join(self.build_src, 'fortranobject.c')
- target_h = os.path.join(self.build_src, 'fortranobject.h')
+ build_dir = os.path.join(self.build_src, target_dir)
+ target_c = os.path.join(build_dir, 'fortranobject.c')
+ target_h = os.path.join(build_dir, 'fortranobject.h')
log.info(" adding '%s' to sources." % (target_c))
new_sources.append(target_c)
- if self.build_src not in extension.include_dirs:
- log.info(" adding '%s' to include_dirs." \
- % (self.build_src))
- extension.include_dirs.append(self.build_src)
+ if build_dir not in extension.include_dirs:
+ log.info(" adding '%s' to include_dirs." % (build_dir))
+ extension.include_dirs.append(build_dir)
if not skip_f2py:
import numpy.f2py
|
Switch to raw urls on Readme
In hopes they will show up on package control | @@ -20,11 +20,11 @@ Tested against language servers for javascript/typescript, python, c/c++ (clangd
Rich hover support from js/ts language server
-
+
Cycle through diagnostics shown in output panel with `F4`. Code actions supplied by tslint language server plugin, applied by `super+.`
-
+
## Installing
|
Update indexing.rst
Grammar error | @@ -371,7 +371,7 @@ Vectorized indexing also works with ``isel``, ``loc``, and ``sel``:
ind = xr.DataArray([['a', 'b'], ['b', 'a']], dims=['a', 'b'])
da.loc[:, ind] # same as da.sel(y=ind)
-These methods may and also be applied to ``Dataset`` objects
+These methods may also be applied to ``Dataset`` objects
.. ipython:: python
|
[Fix]: Improve documentation
Fix an error in training_step_end() documentation | @@ -273,8 +273,8 @@ In this case, implement the `training_step_end` method
return {'loss': loss, 'pred': pred}
def training_step_end(self, batch_parts):
- gpu_0_prediction = batch_parts.pred[0]['pred']
- gpu_1_prediction = batch_parts.pred[1]['pred']
+ gpu_0_prediction = batch_parts[0]['pred']
+ gpu_1_prediction = batch_parts[1]['pred']
# do something with both outputs
return (batch_parts[0]['loss'] + batch_parts[1]['loss']) / 2
|
Update changelog entry for
Per | @@ -21,7 +21,7 @@ Features
Bugfixes
--------
-- Prevent device names from appearing in device list updates when `allow_device_name_lookup_over_federation` is `false`. ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
+- Prevent device names from appearing in device list updates in some situations when `allow_device_name_lookup_over_federation` is `false`. (This is not comprehensive: see [\#13114](https://github.com/matrix-org/synapse/issues/13114).) ([\#10015](https://github.com/matrix-org/synapse/issues/10015))
- Fix a long-standing bug where redactions were not being sent over federation if we did not have the original event. ([\#13813](https://github.com/matrix-org/synapse/issues/13813))
- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled or have their new content applied. ([\#14034](https://github.com/matrix-org/synapse/issues/14034))
- Fix a bug introduced in Synapse 1.53.0 when querying `/publicRooms` with both a `room_type` filter and a `third_party_instance_id`. ([\#14053](https://github.com/matrix-org/synapse/issues/14053))
|
Add logic to ensure the working dir is not a subdir of imagery dir.
This is to make sure that when we do the traversal to find the NTF
imagery we don't include output files from the computation pipeline, if
the script is restarted after some of these files are already present. | @@ -18,16 +18,28 @@ import fit_dtm
import orthorectify
-def create_working_dir(working_dir):
+def create_working_dir(working_dir, imagery_dir):
"""
Create working directory for running algorithms
All files generated by the system are written to this directory.
+
+ :param working_dir: Directory to create for work. Cannot be a subdirectory of `imagery_dir`.
+ This is to avoid adding the work images to the pipeline when traversing the `imagery_dir`.
+ :type working_dir: str
+
+ :param imagery_dir: Directory where imagery is stored.
+ :type imagery_dir: str
+
+ :raises ValueError: If `working_dir` is a subdirectory of `imagery_dir`.
"""
if not working_dir:
date_str = str(datetime.datetime.now().timestamp())
working_dir = 'danesfield-' + date_str.split('.')[0]
if not os.path.isdir(working_dir):
os.mkdir(working_dir)
+ if os.path.realpath(imagery_dir) in os.path.realpath(working_dir):
+ raise ValueError('The working directory ({}) is a subdirectory of the imagery directory '
+ '({}).'.format(working_dir, imagery_dir))
return working_dir
@@ -91,7 +103,8 @@ def main(config_fpath):
# This either parses the working directory from the configuration file and passes it to
# create the working directory or passes None and so some default working directory is
# created (based on the time of creation)
- working_dir = create_working_dir(config['paths'].get('work_dir'))
+ working_dir = create_working_dir(config['paths'].get('work_dir'),
+ config['paths']['imagery_dir'])
aoi_name = config['aoi']['name']
aoi_bounds = map(int, config['aoi']['bounds'].split(' '))
|
Bind: enforce logic variable operands for left operands
For GitHub issue
TN: | @@ -36,8 +36,8 @@ def untyped_literal_expr(expr_str, operands=[]):
@dsl_document
class Bind(AbstractExpression):
"""
- Bind the two logic variables `from_expr` and `to_expr`, through a property
- call.
+ Bind the two logic variables `from_expr` and `to_expr`, or one logic
+ variable `from_expr` and an entity `to_expr`, through a property call.
If provided, `conv_prop` must be a property that takes no argument and that
return any ``ASTNode`` subclass. It is used to convert `from_expr` into a
@@ -236,38 +236,36 @@ class Bind(AbstractExpression):
else:
pred_func = untyped_literal_expr('No_Logic_Converter_Default')
- def construct_operand(op):
- from langkit.expressions import Cast, make_as_entity
- expr = construct(op)
-
- if expr.type.matches(T.root_node):
- expr = make_as_entity(expr)
- expr.create_result_var('Ent')
-
+ # Left operand must be a logic variable. Make sure the resulting
+ # equation will work on a clean logic variable.
+ lhs = ResetLogicVar(construct(self.from_expr, T.LogicVarType))
+
+ # Second one can be either a logic variable or an entity (or an AST
+ # node that is promoted to an entity).
+ rhs = construct(self.to_expr)
+
+ if rhs.type.matches(T.LogicVarType):
+ # For this operand too, make sure it will work on a clean logic
+ # variable.
+ rhs = ResetLogicVar(rhs)
+ elif rhs.type.matches(T.root_node):
+ from langkit.expressions import make_as_entity
+ rhs = make_as_entity(rhs)
+ else:
check_source_language(
- expr.type == T.LogicVarType
- or expr.type.matches(T.root_node.entity),
-
- 'Operands to a logic bind operator should be either'
- ' a logic variable or an entity, got {}'.format(expr.type)
+ rhs.type.matches(T.root_node.entity),
+ 'Right operand must be either a logic variable or an entity,'
+ ' got {}'.format(rhs.type.dsl_name)
)
# Because of Ada OOP typing rules, for code generation to work
- # properly, make sure the type of `expr` is the root node entity.
+ # properly, make sure the type of `rhs` is the root node entity.
if (
- expr.type.matches(T.root_node.entity)
- and expr.type is not T.root_node.entity
+ rhs.type.matches(T.root_node.entity)
+ and rhs.type is not T.root_node.entity
):
- expr = Cast.Expr(expr, T.root_node.entity)
-
- # Make sure this equation will work on a clean logic variable
- if expr.type.matches(T.LogicVarType):
- expr = ResetLogicVar(expr)
-
- return expr
-
- lhs = construct_operand(self.from_expr)
- rhs = construct_operand(self.to_expr)
+ from langkit.expressions import Cast
+ rhs = Cast.Expr(rhs, T.root_node.entity)
return Bind.Expr(self.conv_prop, self.eq_prop, cprop_uid, eprop_uid,
lhs, rhs, pred_func, abstract_expr=self)
|
[ENH] Proximity forest faster test param settings
This PR changes test parameters of `ProximityForest` and `ProximityTree` to faster settings (reduced recursion depth), for lower runtime.
It leaves the special "unit test" parameters unchanged. | @@ -1251,7 +1251,7 @@ class ProximityTree(BaseClassifier):
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`.
"""
- return {"max_depth": 2, "n_stump_evaluations": 1}
+ return {"max_depth": 1, "n_stump_evaluations": 1}
class ProximityForest(BaseClassifier):
@@ -1572,7 +1572,7 @@ class ProximityForest(BaseClassifier):
if parameter_set == "results_comparison":
return {"n_estimators": 3, "max_depth": 2, "n_stump_evaluations": 2}
else:
- return {"n_estimators": 2, "max_depth": 2, "n_stump_evaluations": 1}
+ return {"n_estimators": 2, "max_depth": 1, "n_stump_evaluations": 1}
# start of util functions
|
Fix `search` when the result is empty
Return an empty result instead of raising | @@ -828,6 +828,9 @@ class Client(object):
)
result = j["payload"]["search_snippets"][query]
+ if not result:
+ return {}
+
if fetch_messages:
search_method = self.searchForMessages
else:
|
BUG: Complex input gives complex result(fix
Complex input for either the matrix A or the vector b should give
a complex vector as a result. This fixes | @@ -108,6 +108,11 @@ def spsolve(A, b, permc_spec=None, use_umfpack=True):
A.sort_indices()
A = A.asfptype() # upcast to a floating point format
+ result_dtype = np.promote_types(A.dtype, b.dtype)
+ if A.dtype != result_dtype:
+ A = A.astype(result_dtype)
+ if b.dtype != result_dtype:
+ b = b.astype(result_dtype)
# validate input shapes
M, N = A.shape
|
fix introduced VAR-related error
Error was introduced by changing a signature in var_model.py. (Errors started to show up in test_var.py) Now the signature has been changed again to support not only the parameters needed by the VECM framework, but to also work again with the tests for VAR. | @@ -194,7 +194,7 @@ def mse(ma_coefs, sigma_u, steps):
return forc_covs
-def forecast(y, coefs, trend_coefs, steps, exog):
+def forecast(y, coefs, trend_coefs, steps, exog=None):
"""
Produce linear minimum MSE forecast
|
templates/pkg_analysis_body_ada.mako: minor reformatting
TN: | @@ -470,9 +470,7 @@ package body ${ada_lib_name}.Analysis is
-- Remove --
------------
- procedure Remove (Context : Analysis_Context;
- File_Name : String)
- is
+ procedure Remove (Context : Analysis_Context; File_Name : String) is
use Units_Maps;
Cur : Cursor := Context.Units.Find
|
Add test for do_deactivate_realm exit if deactivated.
Rename existing test_do_deactivate_realm to indicate test purpose
is to confirm user realm cache clears when a realm is deactivated. | @@ -128,7 +128,7 @@ class RealmTest(ZulipTestCase):
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
- def test_do_deactivate_realm(self):
+ def test_do_deactivate_realm_clears_user_realm_cache(self):
# type: () -> None
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
@@ -141,6 +141,18 @@ class RealmTest(ZulipTestCase):
user = self.example_user('hamlet')
self.assertTrue(user.realm.deactivated)
+ def test_do_deactivate_realm_on_deactived_realm(self):
+ # type: () -> None
+ """Ensure early exit is working in realm deactivation"""
+ realm = get_realm('zulip')
+ self.assertFalse(realm.deactivated)
+
+ do_deactivate_realm(realm)
+ self.assertTrue(realm.deactivated)
+
+ do_deactivate_realm(realm)
+ self.assertTrue(realm.deactivated)
+
def test_change_realm_default_language(self):
# type: () -> None
new_lang = "de"
|
A couple of LSF fixes
- bsub needs to have input piped from file
- `-n` seems to be more supported than --nnodes | import functools
import re
+import subprocess
import time
import reframe.core.runtime as rt
@@ -31,14 +32,11 @@ class LsfJobScheduler(PbsJobScheduler):
)
def emit_preamble(self, job):
- num_tasks_per_node = job.num_tasks_per_node or 1
- num_nodes = job.num_tasks // num_tasks_per_node
-
preamble = [
self._format_option(f'-J {job.name}'),
self._format_option(f'-o {job.stdout}'),
self._format_option(f'-e {job.stderr}'),
- self._format_option(f'-nnodes {num_nodes}')
+ self._format_option(f'-n {job.num_tasks}')
]
# add job time limit in minutes
@@ -61,10 +59,17 @@ class LsfJobScheduler(PbsJobScheduler):
return preamble
def submit(self, job):
- cmd = f'bsub {job.script_filename}'
- completed = _run_strict(cmd, timeout=self._submit_timeout)
+ #cmd = f'bsub < {job.script_filename}'
+ cmd = f'bsub < {job.script_filename}'
+ with open(job.script_filename, 'r') as f:
+ #completed = subprocess.Popen(args='bsub', stdin=f, stdout=subprocess.PIPE)
+ completed = subprocess.run(args='bsub', stdin=f, capture_output=True)
+ print(f'stdout: {completed.stdout}')
+ print(f'stderr: {completed.stderr}')
+
+ #completed = _run_strict(cmd, timeout=self._submit_timeout, shell=True)
jobid_match = re.search(r'^Job <(?P<jobid>\S+)> is submitted',
- completed.stdout)
+ completed.stdout.decode('utf-8'))
if not jobid_match:
raise JobSchedulerError('could not retrieve the job id '
'of the submitted job')
|
todo_tools_1,3:
[x] todo_tools_1: provide example for `tools.get_full_data_path`
[x] todo_tools_3: provide examples for `tools.get_pythainlp_path` | @@ -50,6 +50,16 @@ def get_pythainlp_data_path() -> str:
def get_pythainlp_path() -> str:
"""
- Return full path of PyThaiNLP code
+ This function returns full path of PyThaiNLP code
+
+ :return: full path of :mod:`pythainlp` code
+ :rtype: str
+
+ :Example:
+
+ >>> from pythainlp.tools import get_pythainlp_path
+ >>>
+ >>> get_pythainlp_path()
+ '/usr/local/lib/python3.6/dist-packages/pythainlp'
"""
return os.path.dirname(pythainlp.__file__)
|
Don't check /dev/kvm access, only check existence
This avoid race condition between swarming_bot and udev which is responsible
for setting the correct ACL of /dev/kvm. | @@ -215,9 +215,13 @@ def get_ssd():
return ()
[email protected]
def get_kvm():
- """Check whether KVM is supported."""
- return os.path.exists('/dev/kvm') and os.access("/dev/kvm", os.R_OK|os.W_OK)
+ """Check whether KVM is available."""
+ # We only check the file existence, not whether we can access it. This avoids
+ # the race condition between swarming_bot and udev which is responsible for
+ # setting the correct ACL of /dev/kvm.
+ return os.path.exists('/dev/kvm')
## Mutating code.
|
use conda/environment.yml when building the package
test the package in a new environment after the upload | @@ -23,6 +23,7 @@ jobs:
python-version: ${{ matrix.python-version }}
activate-environment: matchms
auto-activate-base: false
+ environment-file: conda/environment.yml
channels: conda-forge,bioconda,nlesc
- name: Install required packages
shell: bash -l {0}
@@ -48,3 +49,22 @@ jobs:
run: |
export BUILD_FOLDER=./output
anaconda -t ${{ secrets.ANACONDA_TOKEN }} upload -u nlesc --force $BUILD_FOLDER/noarch/*.tar.bz2
+ - uses: goanpeca/setup-miniconda@v1
+ with:
+ miniconda-version: 'latest'
+ auto-update-conda: true
+ python-version: ${{ matrix.python-version }}
+ activate-environment: matchms_test
+ auto-activate-base: false
+ channels: conda-forge,bioconda,nlesc
+ - name: Install the conda package from anaconda repository
+ shell: bash -l {0}
+ run: |
+ conda install conda build -c bioconda -c conda-forge -c nlesc matchms
+ - name: Show conda config
+ shell: bash -l {0}
+ run: |
+ conda info
+ conda list
+ conda config --show-sources
+ conda config --show
|
add more cache-control headers to gcs
Make sure cache-control is private, so there are no intervening downstream caches. | @@ -111,7 +111,7 @@ class GoogleCloudStorage(Storage):
# set a max-age of 5 if we're uploading to content/databases
if self.is_database_file(name):
- blob.cache_control = 'public, max-age={}'.format(CONTENT_DATABASES_MAX_AGE)
+ blob.cache_control = 'private, max-age={}, no-transform'.format(CONTENT_DATABASES_MAX_AGE)
blob.upload_from_file(
fobj,
|
Fix writing LP to file
Before, the conversion to a byte string was performed twice. This caused a Python error. | @@ -3919,7 +3919,7 @@ cdef class Model:
:param filename: file name (Default value = "LP.lp")
"""
absfile = str_conversion(abspath(filename))
- PY_SCIP_CALL( SCIPwriteLP(self._scip, str_conversion(absfile)) )
+ PY_SCIP_CALL( SCIPwriteLP(self._scip, absfile) )
def createSol(self, Heur heur = None):
"""Create a new primal solution.
|
Use minio server again
* Revert "Run minio in nas mode (#2026)"
This reverts commit
* Use latest minio release | @@ -29,7 +29,7 @@ services:
command: >-
-c "
mkdir -p /data/grand-challenge-private/
- && minio --compat gateway nas /data
+ && minio --compat server /data
"
restart: always
@@ -46,7 +46,7 @@ services:
command: >-
-c "
mkdir -p /data/grand-challenge-protected/
- && minio --compat gateway nas --address :9081 /data
+ && minio --compat server --address :9081 /data
"
restart: always
@@ -62,7 +62,7 @@ services:
command: >-
-c "
mkdir -p /data/grand-challenge-public/
- && minio --compat gateway nas /data
+ && minio --compat server /data
"
restart: always
|
portico: Hide realm details when registering new realm.
If there was a realm on the base URL, its logo and name were being
displayed when registering a new realm (i.e. the page where realm details
are entered, after confirming email). This commit prevents the realm
details from being displayed.
Fixes | @@ -27,7 +27,7 @@ Form is validated both client-side using jquery-validate (see signup.js) and ser
{{ csrf_input }}
<section class="user-registration">
- {% if realm_name %}
+ {% if realm_name and not creating_new_team %}
<img class="avatar inline-block" src="{{ realm_icon }}" alt="" />
<div class="info-box inline-block">
<div class="organization-name">{{ realm_name }}</div>
|
Fix tutorial completion
Set tutorial_state to an empty list if it's not found. | @@ -229,7 +229,7 @@ class Worker:
raise ex.BannedAccountException
player_data = get_player['player_data']
- tutorial_state = player_data['tutorial_state']
+ tutorial_state = player_data.get('tutorial_state', [])
self.item_capacity = player_data['max_item_storage']
if 'created' not in self.account:
self.account['created'] = player_data['creation_timestamp_ms'] / 1000
|
[plot][CurveStatsWidget] Manage the case we try to set a dict of ROI.
Was missing, needing to have coherent getRois / setRois | @@ -572,6 +572,9 @@ class ROITable(TableWidget):
# backward compatibility since 0.10.0
if isinstance(rois, dict):
for roiName, roi in rois.items():
+ if isinstance(roi, ROI):
+ _roi = roi
+ else:
roi['name'] = roiName
_roi = ROI._fromDict(roi)
self.addRoi(_roi)
|
Let's store job storage in a file again!
Avoids a bug where we have race conditions with an in-memory DB. | @@ -15,6 +15,7 @@ import requests
from django.core.management import call_command
from django.http import Http404
from django.utils.translation import ugettext as _
+from django.conf import settings
from kolibri.content.models import ChannelMetadataCache
from kolibri.content.utils.channels import get_mounted_drives_with_channel_info
from kolibri.content.utils.paths import get_content_database_file_url
@@ -28,7 +29,7 @@ from .permissions import IsDeviceOwnerOnly
logging = logger.getLogger(__name__)
-client = SimpleClient(app="kolibri")
+client = SimpleClient(app="kolibri", storage_path=settings.QUEUE_JOB_STORAGE_PATH)
# all tasks are marked as remote imports for nwo
TASKTYPE = "remoteimport"
|
integrations: Use phrase_match instead of fuzzysearch.
Fixes
fuzzysearch matched query if the query letters appeared in sequence.
Here we use the extracted phrase_match to match query with the prefixes
of words. | -import fuzzysearch from 'fuzzysearch';
import blueslip from './../blueslip';
import { path_parts } from './landing-page';
@@ -117,8 +116,9 @@ var update_integrations = _.debounce(function () {
}
if (!$integration.hasClass('integration-create-your-own')) {
+ var display_name = INTEGRATIONS[$integration.data('name')];
var display =
- fuzzysearch(state.query, $integration.data('name').toLowerCase()) &&
+ common.phrase_match(state.query, display_name) &&
($integration.data('categories').indexOf(CATEGORIES[state.category]) !== -1 ||
state.category === 'all');
|
SDK - Client - Fixed client on Windows
Also fixed the client for systems that do not have $HOME env var. | @@ -28,7 +28,7 @@ import json
IAM_SCOPE = 'https://www.googleapis.com/auth/iam'
OAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'
-LOCAL_KFP_CREDENTIAL = os.path.join(os.environ['HOME'], '.config/kfp/credentials.json')
+LOCAL_KFP_CREDENTIAL = os.path.expanduser('~/.config/kfp/credentials.json')
def get_gcp_access_token():
"""Get and return GCP access token for the current Application Default
|
addresses: Add IPAddr.is_broadcast
This is sort of completes the picture that .is_multicast starts. We
should probably add a similar property for EthAddr and IPv6 addresses. | @@ -364,6 +364,10 @@ class IPAddr (object):
netmask = cidr_to_netmask(prefix).unsigned_h
return (IPAddr(self.unsigned_h & netmask, networkOrder=False),prefix)
+ @property
+ def is_broadcast (self):
+ return self == IP_BROADCAST
+
@property
def is_multicast (self):
return ((self.toSigned(networkOrder = False) >> 24) & 0xe0) == 0xe0
|
[Test] Fix flaky OSX shuffle
Seems like the last RPC is failing after shuffle succeeds. Adding retry to fix the issue. | @@ -311,7 +311,17 @@ def run(
time.sleep(0.5)
print()
- print(ray._private.internal_api.memory_summary(stats_only=True))
+
+ summary = None
+ for i in range(5):
+ try:
+ summary = ray._private.internal_api.memory_summary(stats_only=True)
+ except Exception:
+ time.sleep(1)
+ pass
+ if summary:
+ break
+ print(summary)
print()
print(
"Shuffled", int(sum(output_sizes) / (1024 * 1024)), "MiB in", delta, "seconds"
|
fix the description of create_shared
The type of parameter options should be a str list. | @@ -22,8 +22,8 @@ def create_shared(output,
objects : list
List of object files.
- options : str
- The additional options.
+ options : list
+ The list of additional options string.
cc : str, optional
The compile string.
|
Loosen type annotations around hook decorators
Summary: Mypy 0.812 breaks our hook decorator type annotations. This is because mypy cannot infer that pipeline defs are only used with hooks when the hook being invoked is already a hook definition.
Test Plan: mypy
Reviewers: alangenfeld, max | @@ -36,7 +36,7 @@ def __init__(
required_resource_keys, "required_resource_keys"
)
- def __call__(self, fn: Callable[["HookContext", List["DagsterEvent"]], Any]) -> HookDefinition:
+ def __call__(self, fn) -> HookDefinition:
check.callable_param(fn, "fn")
@@ -48,7 +48,7 @@ def __call__(self, fn: Callable[["HookContext", List["DagsterEvent"]], Any]) ->
_validate_hook_fn_params(fn, expected_positionals)
hook_def = HookDefinition(
- name=self.name,
+ name=self.name or "",
hook_fn=fn,
required_resource_keys=self.required_resource_keys,
)
@@ -107,10 +107,7 @@ def slack_on_materializations(context, event_list):
def success_hook(
name: Union[Optional[str], Callable[..., Any]] = None,
required_resource_keys: Optional[AbstractSet[str]] = None,
-) -> Union[
- Union[HookDefinition, _Hook],
- Callable[[Callable[["HookContext"], Any]], Union[HookDefinition, _Hook]],
-]:
+) -> Union[Union[HookDefinition, _Hook], Callable[..., Union[HookDefinition, _Hook]],]:
"""Create a hook on step success events with the specified parameters from the decorated function.
Args:
@@ -134,7 +131,7 @@ def do_something_on_success(context):
"""
- def wrapper(fn: Callable[["HookContext"], Any]) -> Union[HookDefinition, _Hook]:
+ def wrapper(fn) -> Union[HookDefinition, _Hook]:
check.callable_param(fn, "fn")
|
hv: Use hv_write_hcr() for initial configuration
This might make things work if GXF is already enabled, maybe. | @@ -31,7 +31,7 @@ void hv_init(void)
hv_pt_init();
// Configure hypervisor defaults
- msr(HCR_EL2, HCR_API | // Allow PAuth instructions
+ hv_write_hcr(HCR_API | // Allow PAuth instructions
HCR_APK | // Allow PAuth key registers
HCR_TEA | // Trap external aborts
HCR_E2H | // VHE mode (forced)
|
Update code owners of gcs and workflow
Update the code owners to include people working on this module. | # ==== Ray core ====
# API compatibility
-/src/ray/protobuf/common.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen
-/src/ray/protobuf/gcs.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen
-/src/ray/protobuf/gcs_service.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen
+/src/ray/protobuf/common.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen @iycheng @scv119 @mwtian
+/src/ray/protobuf/gcs.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen @iycheng @scv119 @mwtian
+/src/ray/protobuf/gcs_service.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen @iycheng @scv119 @mwtian
/dashboard/modules/snapshot @wuisawesome @ijrsvt @edoakes @alanwguo @architkulkarni
/python/ray/autoscaler/_private/monitor.py @wuisawesome @DmitriGekhtman
# ==== Libraries and frameworks ====
-# Ray tune.
-/python/ray/tune/ @ray-project/ray-tune
-
# Ray data.
/python/ray/data/ @ericl @scv119 @clarkzinzow @jjyao
/doc/source/data/ @ericl @scv119 @clarkzinzow @jjyao
# Ray workflows.
-/python/ray/workflow/ @ericl @iycheng
-/doc/source/workflows/ @ericl @iycheng
+/python/ray/workflow/ @ericl @iycheng @stephanie-wang @suquark
+/doc/source/workflows/ @ericl @iycheng @stephanie-wang @suquark
# RLlib.
/rllib/ @sven1977 @gjoliver @avnishn
|
Fix pre-commit hook for python 3.x.
sh() return value is a string due to Popen(universal_newlines=True).
Traceback (most recent call last):
File ".git/hooks/pre-commit", line 118, in <module>
main()
File ".git/hooks/pre-commit", line 78, in main
py_files = [x for x in out.split(b'\n') if x.endswith(b'.py') and
TypeError: must be str or None, not bytes | @@ -75,7 +75,7 @@ def sh(cmd):
def main():
out = sh("git diff --cached --name-only")
- py_files = [x for x in out.split(b'\n') if x.endswith(b'.py') and
+ py_files = [x for x in out.split('\n') if x.endswith('.py') and
os.path.exists(x)]
lineno = 0
|
Make new epoch after block sync. And Allow add complain when my epoch
has same height with complain message. If the epoch height is lower than
the message, execute block sync. | @@ -603,9 +603,13 @@ class BlockManager:
util.logger.debug(f"block_manager:block_height_sync is complete.")
next_leader = self.__current_last_block().header.next_leader
leader_peer = self.__channel_service.peer_manager.get_peer(next_leader.hex_hx()) if next_leader else None
+
+ if self.__channel_service.block_manager.epoch.height < my_height:
+ self.epoch = Epoch.new_epoch()
+
if leader_peer:
self.__channel_service.peer_manager.set_leader_peer(leader_peer, None)
- self.__channel_service.block_manager.epoch.set_epoch_leader(leader_peer.peer_id)
+ self.epoch.set_epoch_leader(leader_peer.peer_id)
self.__channel_service.state_machine.complete_sync()
else:
logging.warning(f"it's not completed block height synchronization in once ...\n"
@@ -689,12 +693,15 @@ class BlockManager:
util.logger.info(f"Complained new leader is current leader({new_leader_id})")
return
+ if self.epoch.height == block_height:
self.epoch.add_complain(complained_leader_id, new_leader_id, block_height, peer_id, group_id)
elected_leader = self.epoch.complain_result()
if elected_leader:
self.__channel_service.reset_leader(elected_leader, complained=True)
self.__channel_service.reset_leader_complain_timer()
+ elif self.epoch.height < block_height:
+ self.__channel_service.state_machine.block_sync()
def leader_complain(self):
# util.logger.notice(f"do leader complain.")
|
Disable cuda_distributions_test and converter_nomigraph_test on Windows.
Summary:
Pull Request resolved:
See and
Test Plan: Imported from OSS | @@ -6,17 +6,22 @@ test_api.exe --gtest_filter="-IntegrationTest.MNIST*"
if errorlevel 1 exit /b 1
-cd %TMP_DIR_WIN%\build\torch\test
for /r "." %%a in (*.exe) do (
- echo Running "%%~fa"
- if "%%~na" == "c10_Metaprogramming_test" (
- echo Skipping "%%~fa" because it is broken
- ) else (
- if "%%~na" == "module_test" (
- echo Skipping "%%~fa" because it is broken
- ) else (
- call "%%~fa"
- if errorlevel 1 exit /b 1
- )
- )
+ call :libtorch_check "%%~na" "%%~fa"
)
+
+goto :eof
+
+:libtorch_check
+if "%~1" == "c10_metaprogramming_test" goto :eof
+if "%~1" == "module_test" goto :eof
+rem See https://github.com/pytorch/pytorch/issues/25304
+if "%~1" == "cuda_distributions_test" goto :eof
+rem See https://github.com/pytorch/pytorch/issues/25312
+if "%~1" == "converter_nomigraph_test" goto :eof
+
+echo Running "%~2"
+call "%~2"
+if errorlevel 1 exit /b 1
+
+goto :eof
|
Fix Buildkite integration tests
Test Plan: integration
Reviewers: alangenfeld, dgibson, johann | @@ -39,7 +39,7 @@ def integration_steps():
).get_tox_build_steps()
integration_suites_root = os.path.join(
- SCRIPT_PATH, "..", "..", "..", "integration_tests", "test_suites"
+ SCRIPT_PATH, "..", "..", "..", "..", "integration_tests", "test_suites"
)
integration_suites = [
os.path.join("integration_tests", "test_suites", suite)
|
[Java] Shade jackson to avoid conflict.
Jackson is a widely-used utility. User from Ant reports the jackson class is conflicted between Ray jar and user's jar.
This PR shade the jackson in Ray jar to avoid the conflict. | rule com.google.common.** io.ray.shaded.com.google.common.@1
rule com.google.protobuf.** io.ray.shaded.com.google.protobuf.@1
rule com.google.thirdparty.** io.ray.shaded.com.google.thirdparty.@1
+# jackson jar is introduced in `de.ruedigermoeller:fst`. It's easy to be
+# conflict with users'.
+rule com.fasterxml.jackson.** io.ray.shaded.com.fasterxml.jackson.@1
|
Update build_hardware.md
fix roll your own link | @@ -48,7 +48,7 @@ For more detail and other options, follow the link to: [supported cars](/support
## Roll Your Own Car
-Alternatively If you know RC or need something the standard Donkey does not support, you can roll your own. Here is a quick reference to help you along the way. [Roll Your Own](/roll_your_own/)
+Alternatively If you know RC or need something the standard Donkey does not support, you can roll your own. Here is a quick reference to help you along the way. [Roll Your Own](/roll_your_own)
## Video Overview of Hardware Assembly
|
Add schema migration for uri search changes
See commit for original additions
This also retires the enforced check for unification changes. | @@ -124,12 +124,12 @@ def schema_is_latest(engine):
"""
Is the schema up-to-date?
"""
- is_unification = _pg_exists(engine, schema_qualified('dataset_type'))
- is_updated = not _pg_exists(engine, schema_qualified('uq_dataset_source_dataset_ref'))
-
# We may have versioned schema in the future.
- # For now, we know updates ahve been applied if the dataset_type table exists,
- return is_unification and is_updated
+ # For now, we know updates have been applied if certain objects exist,
+
+ has_dataset_source_update = not _pg_exists(engine, schema_qualified('uq_dataset_source_dataset_ref'))
+ has_uri_searches = _pg_exists(engine, schema_qualified('ix_agdc_dataset_location_dataset_ref'))
+ return has_dataset_source_update and has_uri_searches
def update_schema(engine):
@@ -137,7 +137,7 @@ def update_schema(engine):
if not is_unification:
raise ValueError('Pre-unification database cannot be updated.')
- # Remove surrogate key from dataset_source: it makes the table larger for no benefit.
+ # Removal of surrogate key from dataset_source: it makes the table larger for no benefit.
if _pg_exists(engine, schema_qualified('uq_dataset_source_dataset_ref')):
_LOG.info('Applying surrogate-key update')
engine.execute("""
@@ -150,9 +150,25 @@ def update_schema(engine):
""")
_LOG.info('Completed surrogate-key update')
+ # float8range is needed if the user uses the double-range field type.
if not engine.execute("SELECT 1 FROM pg_type WHERE typname = 'float8range'").scalar():
engine.execute(TYPES_INIT_SQL)
+ # Update uri indexes to allow dataset search-by-uri.
+ if not _pg_exists(engine, schema_qualified('ix_agdc_dataset_location_dataset_ref')):
+ _LOG.info('Applying uri-search update')
+ engine.execute("""
+ begin;
+ -- Add a separate index by dataset.
+ create index ix_agdc_dataset_location_dataset_ref on agdc.dataset_location (dataset_ref);
+
+ -- Replace (dataset, uri) index with (uri, dataset) index.
+ alter table agdc.dataset_location add constraint uq_dataset_location_uri_scheme unique (uri_scheme, uri_body, dataset_ref);
+ alter table agdc.dataset_location drop constraint uq_dataset_location_dataset_ref;
+ commit;
+ """)
+ _LOG.info('Completed uri-search update')
+
def _ensure_role(engine, name, inherits_from=None, add_user=False, create_db=False):
if has_role(engine, name):
|
Updating mkdocs to automatically adjust theme
Automatically adjusts documentation theme between default/slate based on users preference for dark mode. | @@ -8,11 +8,13 @@ theme:
icon:
repo: fontawesome/brands/github
palette:
- - scheme: default
+ - media: "(prefers-color-scheme: light)"
+ scheme: default
toggle:
icon: material/lightbulb-outline
name: Switch to Dark Mode
- - scheme: slate
+ - media: "(prefers-color-scheme: dark)"
+ scheme: slate
toggle:
icon: material/lightbulb
name: Switch to Light Mode
|
Attempt to stream text/html content-types
Somewhat fixes | @@ -85,8 +85,8 @@ class Playlist(EventEmitter, Serializable):
raise ExtractionError("Invalid content type \"%s\" for url %s" % (content_type, song_url))
elif content_type.startswith('text/html'):
- log.warning("Got text/html for content-type, this might be a stream")
- pass # TODO: Check for shoutcast/icecast
+ log.warning("Got text/html for content-type, this might be a stream. Attempting to stream.")
+ return await self.add_stream_entry(song_url, info=info, **meta) # TODO: Check for shoutcast/icecast
elif not content_type.startswith(('audio/', 'video/')):
log.warning("Questionable content-type \"{}\" for url {}".format(content_type, song_url))
|
Apply suggestions from code review
minor nit | @@ -161,13 +161,13 @@ class Snapshot(models.Model):
class ArchiveResult(models.Model):
snapshot = models.ForeignKey(Snapshot, on_delete=models.CASCADE)
cmd = models.CharField(max_length=500)
- pwd = models.CharField(max_length=200)
+ pwd = models.CharField(max_length=256)
cmd_version = models.CharField(max_length=32)
- output = models.CharField(max_length=500)
+ output = models.CharField(max_length=512)
start_ts = models.DateTimeField()
end_ts = models.DateTimeField()
status = models.CharField(max_length=16, choices=STATUS_CHOICES)
- extractor = models.CharField(choices=EXTRACTORS, blank=False, max_length=32)
+ extractor = models.CharField(choices=EXTRACTORS, max_length=32)
def __str__(self):
return self.extractor
|
version="4.3.25"
version="4.3.25" | @@ -24,7 +24,7 @@ with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='donkeycar',
- version="4.3.24",
+ version="4.3.25",
long_description=long_description,
description='Self driving library for python.',
url='https://github.com/autorope/donkeycar',
|
Fix default Tiberius icon load in GTK
The default icon is not loading because it is missing the extension.
This leads to an error when trying to run toga_demo from local clone.
This patch fixes the icon filename adding the default extension from
the GTK icon class. | +import os
+
from gi.repository import Gtk, GdkPixbuf
@@ -17,8 +19,14 @@ class Icon:
try:
return self.native[size]
except KeyError:
+ valid_icon_extensions = ('.png', '.ico', self.EXTENSION)
+ file_path, file_extension = os.path.splitext(self.interface.filename)
+ if file_extension not in valid_icon_extensions:
+ file_extension = self.EXTENSION
+
self.native[size] = Gtk.Image.new_from_pixbuf(
- GdkPixbuf.Pixbuf.new_from_file(self.interface.filename).scale_simple(size, size, GdkPixbuf.InterpType.BILINEAR)
+ GdkPixbuf.Pixbuf.new_from_file(file_path + file_extension).scale_simple(
+ size, size, GdkPixbuf.InterpType.BILINEAR)
)
return self.native[size]
|
Update scalyr query utility script so the query can be passed in either
as a single string argument or as multiple arguments.
This allows us to split long queries over multiple lines without too
much bash / string concatination. | @@ -26,7 +26,15 @@ SLEEP_DELAY=${SLEEP_DELAY:-"15"}
# Script will fail if query doesn't return at least this number of results / lines
MINIMUM_RESULTS=${MINIMUM_RESULTS:-"1"}
+if [ $# -eq 1 ]; then
+ # Query passed in as a single string argument
+ echo "Query passed in as a single string argument"
SCALYR_TOOL_QUERY=$1
+else
+ # Query passed in as multiple arguments
+ echo "Query passed in as multiple arguments"
+ SCALYR_TOOL_QUERY="$*"
+fi
echo_with_date() {
date +"[%Y-%m-%d %H:%M:%S] $*"
|
Re-enabling Airflow integration tests.
Airflow's (very quick!) fix: | @@ -8,8 +8,6 @@ Having your project tested here is the most sure way to keep those regressions f
"""
from subprocess import check_call
-import pytest
-
from isort.main import main
@@ -96,9 +94,6 @@ def test_websockets(tmpdir):
)
-# TODO Re-enable when airflow updates their isort config
-# Reference: https://github.com/apache/airflow/pull/10543#issuecomment-680231216
[email protected](reason="Airflow isort config currently broken")
def test_airflow(tmpdir):
check_call(
["git", "clone", "--depth", "1", "https://github.com/apache/airflow.git", str(tmpdir)]
|
Fix `reset_parameters` in `DimeNet`
I've fixed the duplicated for-loop by specifying the correct variable
(see issue | @@ -275,7 +275,7 @@ class InteractionPPBlock(torch.nn.Module):
res_layer.reset_parameters()
glorot_orthogonal(self.lin.weight, scale=2.0)
self.lin.bias.data.fill_(0)
- for res_layer in self.layers_before_skip:
+ for res_layer in self.layers_after_skip:
res_layer.reset_parameters()
def forward(self, x: Tensor, rbf: Tensor, sbf: Tensor, idx_kj: Tensor,
|
Add transfer capacties for Norway
Values are based on max reported to ENTSOE and will obviously be lower in summer due to thermal limits. | "rotation": 0
},
"NO-NO1->NO-NO2": {
+ "capacity": [
+ -2200,
+ 3500
+ ],
"lonlat": [
9.3,
59.844429
"rotation": -110
},
"NO-NO1->NO-NO3": {
+ "capacity": [
+ -500,
+ 500
+ ],
"lonlat": [
9.533218,
61.731596
"rotation": -45
},
"NO-NO1->NO-NO5": {
+ "capacity": [
+ -600,
+ 3900
+ ],
"lonlat": [
8.3,
61.132896
"rotation": 90
},
"NO-NO1->SE-SE3": {
+ "capacity": [
+ -2100,
+ 2150
+ ],
"parsers": {
"exchangeForecast": "ENTSOE.fetch_exchange_forecast"
}
},
"NO-NO2->NO-NO5": {
+ "capacity": [
+ -500,
+ 600
+ ],
"lonlat": [
7.461,
60.0
"rotation": -10
},
"NO-NO3->NO-NO4": {
+ "capacity": [
+ -1200,
+ 400
+ ],
"lonlat": [
12.465132,
64.745638
"rotation": 30
},
"NO-NO3->NO-NO5": {
+ "capacity": [
+ -500,
+ 500
+ ],
"lonlat": [
7.1,
61.194118
"rotation": 90
},
"NO-NO3->SE-SE2": {
+ "capacity": [
+ -1000,
+ 600
+ ],
"parsers": {
"exchangeForecast": "ENTSOE.fetch_exchange_forecast"
}
"rotation": 130
},
"NO-NO4->SE-SE2": {
+ "capacity": [
+ -300,
+ 250
+ ],
"parsers": {
"exchangeForecast": "ENTSOE.fetch_exchange_forecast"
}
},
"NO-NO4->SE-SE1": {
+ "capacity": [
+ -600,
+ 700
+ ],
"parsers": {
"exchangeForecast": "ENTSOE.fetch_exchange_forecast"
}
|
Delete validatingwebhookconfigurations and mutatingwebhookconfiguration
in make undeploy and make undeploy-dev | @@ -33,9 +33,13 @@ deploy-dev: manifests
undeploy:
kustomize build config/default | kubectl delete -f -
+ kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io kfservice.serving.kubeflow.org
+ kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io kfservice.serving.kubeflow.org
undeploy-dev:
kustomize build config/overlays/development | kubectl delete -f -
+ kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io kfservice.serving.kubeflow.org
+ kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io kfservice.serving.kubeflow.org
# Generate manifests e.g. CRD, RBAC etc.
manifests:
|
shutdown haproxy conn on check failed
HG--
branch : feature/dcs | @@ -21,7 +21,7 @@ backend postgres-patroni
http-check expect status 200
default-server inter 3s fall 3 rise 2
{% for n in groups['svc-patroni'] %}
- server cluster-pgsql-{{loop.index}} {{hostvars[n].ansible_host}}:5432 maxconn {{postgres_max_connections}} check port {{patroni_listen}}
+ server cluster-pgsql-{{loop.index}} {{hostvars[n].ansible_host}}:5432 maxconn {{postgres_max_connections}} check port {{patroni_listen}} on-marked-down shutdown-sessions
{% endfor %}
{%endif%}
\ No newline at end of file
|
tests/function/LinearCombination: Run dummy benchmark in skipped configurations
Fixes unused benchmark fixture warnings | @@ -85,6 +85,8 @@ RAND3_S = np.random.rand()
def linear_combination_function(variable, operation, exponents, weights, scale, offset, bin_execute, benchmark):
if weights is not None and not np.isscalar(weights) and len(variable) != len(weights):
+ benchmark.disabled = True
+ benchmark(lambda _:0,0)
pytest.skip("variable/weights mismatch")
f = pnl.core.components.functions.combinationfunctions.LinearCombination(default_variable=variable,
|
Fix escaping for special charaters
Issue | @@ -584,7 +584,7 @@ def quote_identifier(identifier, for_grants=False):
'''
if for_grants:
return '`' + identifier.replace('`', '``').replace('_', r'\_') \
- .replace('%', r'\%%') + '`'
+ .replace('%', r'%%') + '`'
else:
return '`' + identifier.replace('`', '``').replace('%', '%%') + '`'
|
fix: remove non-existing user from CODEOWNERS
Removes user that does not exist, or seems to have been renamed to | # /data_structures/ @cclauss # TODO: Uncomment this line after Hacktoberfest
-/digital_image_processing/ @mateuszz0000
+# /digital_image_processing/
# /divide_and_conquer/
# /searches/
-/sorts/ @mateuszz0000
+# /sorts/
# /strings/ @cclauss # TODO: Uncomment this line after Hacktoberfest
|
Modifying the parameters
Interface has changed in zun/image/driver.py:
delete_image(context, img_id, image_driver=None) | @@ -908,7 +908,7 @@ class Manager(periodic_task.PeriodicTasks):
LOG.exception("Unexpected exception while uploading image: %s",
six.text_type(e))
image_driver.delete_image(context, snapshot_image.id,
- glance.GlanceDriver())
+ 'glance')
self.driver.delete_image(container_image_id)
raise
@@ -935,7 +935,7 @@ class Manager(periodic_task.PeriodicTasks):
LOG.error("Error occurred while calling docker commit API: %s",
six.text_type(e))
image_driver.delete_image(context, snapshot_image.id,
- glance.GlanceDriver())
+ 'glance')
raise
finally:
if unpause:
|
cuDNN 7.1 fix.
Output from cudnnGetFilterNdDescriptor has changed in cuDNN 7.1 this fix will be forward and backward compatible. | @@ -443,11 +443,10 @@ namespace {
// (same for the hh weights, and the ih and hh biases).
// Since we're storing all the weights in a single tensor anyway,
// might as well merge the CUDNN ones into a single tensor as well
+ int mat_numel = *filter_dim_a.prod().data<int>();
if (linear_id == 0 || linear_id == num_linear_layers / 2) {
- AT_ASSERT(*filter_dim_a.prod().data<int>() == *filter_dim_a[0].data<int>(), "filter_dim_a.prod() == filter_dim_a[0]");
std::initializer_list<int64_t> size = {
- *filter_dim_a[0].data<int>() * num_linear_layers / 2,
- *filter_dim_a[2].data<int>()};
+ mat_numel * num_linear_layers / 2, 1};
// Generate a new parameter tensor which is a view into the
// weight_buf.
Tensor param = weight_buf.type().tensor().set_(*weight_buf.storage(), offset, size);
@@ -456,7 +455,7 @@ namespace {
} else {
AT_ASSERT(cur_offset == offset, "cur_offset == offset");
}
- cur_offset = offset + *filter_dim_a[0].data<int>();
+ cur_offset = offset + mat_numel;
}
} // for cudnn_method
if (layer == 0) {
|
Switch doc to NGO rather than 1+1
* switch doc to Shiwa rather than 1+1
* Shiwa --> NGO
ok then I switch to NGO which is already quite better than OnePlusOne and which is supposed to stay for eternity.
* fix typo | @@ -15,7 +15,7 @@ More installation options, including windows installation, and complete instruct
You can join Nevergrad users Facebook group [here](https://www.facebook.com/groups/nevergradusers/).
-Minimizing a function using an optimizer (here `OnePlusOne`) is straightforward:
+Minimizing a function using an optimizer (here `NGO`) is straightforward:
```python
import nevergrad as ng
@@ -23,7 +23,7 @@ import nevergrad as ng
def square(x):
return sum((x - .5)**2)
-optimizer = ng.optimizers.OnePlusOne(parametrization=2, budget=100)
+optimizer = ng.optimizers.NGO(parametrization=2, budget=100)
recommendation = optimizer.minimize(square)
print(recommendation.value) # recommended value
>>> [0.49971112 0.5002944]
@@ -50,7 +50,7 @@ parametrization = ng.p.Instrumentation(
architecture=ng.p.Choice(["conv", "fc"])
)
-optimizer = ng.optimizers.OnePlusOne(parametrization=parametrization, budget=100)
+optimizer = ng.optimizers.NGO(parametrization=parametrization, budget=100)
recommendation = optimizer.minimize(fake_training)
# show the recommended keyword arguments of the function
|
Forcing error file download on link click.
Rather than opening file in new window.
Adding optional prop to k-external-link.
Adding default file name. | <template>
<!-- no extra whitespace inside link -->
- <a :class="buttonClasses" :href="href">{{ text }}</a>
+ <a
+ :class="buttonClasses"
+ :href="href"
+ :download="download"
+ >
+ {{ text }}
+ </a>
</template>
type: String,
required: true,
},
+ /**
+ * URL string
+ */
+ href: {
+ type: String,
+ required: true,
+ },
/**
* Link appearance: 'raised-button', 'flat-button', or 'basic-link'
*/
default: false,
},
/**
- * URL string
+ * Specifies that the file is meant to be downloaded, not displayed in a separate tab.
*/
- href: {
+ download: {
type: String,
- required: true,
+ required: false,
},
},
};
|
Remove pass keyword
Pass keyword is not brought up in tutorial so far and would therefor be confusing to new students, replaced with empty returns. | @@ -81,11 +81,11 @@ Tutorial Code
# Modify this function to return a list of strings as defined above
def list_benefits():
- pass
+ return []
# Modify this function to concatenate to each benefit - " is a benefit of functions!"
def build_sentence(benefit):
- pass
+ return ""
def name_the_benefits_of_functions():
list_of_benefits = list_benefits()
|
Update _params.py
Fix typo, specify Compute Gallery instead of Image Gallery. | @@ -163,7 +163,7 @@ def load_arguments(self, _):
c.argument('disk_mbps_read_only', type=int, help='The total throughput (MBps) that will be allowed across all VMs mounting the shared disk as ReadOnly. MBps means millions of bytes per second - MB here uses the ISO notation, of powers of 10')
c.argument('image_reference', help='ID or URN (publisher:offer:sku:version) of the image from which to create a disk')
c.argument('image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
- c.argument('gallery_image_reference', help='ID of the shared galley image version from which to create a disk')
+ c.argument('gallery_image_reference', help='ID of the Compute Gallery image version from which to create a disk')
c.argument('gallery_image_reference_lun', type=int, help='If the disk is created from an image\'s data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null')
c.argument('logical_sector_size', type=int, help='Logical sector size in bytes for Ultra disks. Supported values are 512 ad 4096. 4096 is the default.')
c.argument('tier', help='Performance tier of the disk (e.g, P4, S10) as described here: https://azure.microsoft.com/pricing/details/managed-disks/. Does not apply to Ultra disks.')
|
synctl warns when no process is stopped and avoids start
* If an error occurs when stopping a process synctl now logs a warning.
* During a restart, synctl will avoid attempting to start Synapse if an error
occurs during stopping Synapse. | @@ -142,12 +142,23 @@ def start_worker(app: str, configfile: str, worker_configfile: str) -> bool:
return False
-def stop(pidfile, app):
+def stop(pidfile: str, app: str) -> bool:
+ """Attempts to kill a synapse worker from the pidfile.
+ Args:
+ pidfile: path to file containing worker's pid
+ app: name of the worker's appservice
+
+ Returns:
+ True if the process stopped successfully
+ False if process was already stopped or an error occured
+ """
+
if os.path.exists(pidfile):
pid = int(open(pidfile).read())
try:
os.kill(pid, signal.SIGTERM)
write("stopped %s" % (app,), colour=GREEN)
+ return True
except OSError as err:
if err.errno == errno.ESRCH:
write("%s not running" % (app,), colour=YELLOW)
@@ -155,6 +166,14 @@ def stop(pidfile, app):
abort("Cannot stop %s: Operation not permitted" % (app,))
else:
abort("Cannot stop %s: Unknown error" % (app,))
+ return False
+ else:
+ write(
+ "No running worker of %s found (from %s)\nThe process might be managed by another controller (e.g. systemd)"
+ % (app, pidfile),
+ colour=YELLOW,
+ )
+ return False
Worker = collections.namedtuple(
@@ -300,11 +319,17 @@ def main():
action = options.action
if action == "stop" or action == "restart":
+ has_stopped = True
for worker in workers:
- stop(worker.pidfile, worker.app)
+ if not stop(worker.pidfile, worker.app):
+ # A worker could not be stopped.
+ has_stopped = False
if start_stop_synapse:
- stop(pidfile, "synapse.app.homeserver")
+ if not stop(pidfile, "synapse.app.homeserver"):
+ has_stopped = False
+ if not has_stopped:
+ sys.exit(1)
# Wait for synapse to actually shutdown before starting it again
if action == "restart":
|
On-the-fly gamma alteration
Fix | @@ -272,7 +272,8 @@ def get_channel_value(color, channel, space='hsl'):
def Colormap(*args, extend='both',
xi=None, xf=None, # optionally truncate color range by these indices
ratios=1, resample=True, reverse=False,
- name=None, register=True, save=False, N=None, **kwargs):
+ gamma=None, gamma1=None, gamma2=None,
+ name=None, register=False, save=False, N=None, **kwargs):
"""
Convenience function for generating colormaps in a variety of ways.
The 'extend' property will be used to resample LinearSegmentedColormap
@@ -311,22 +312,31 @@ def Colormap(*args, extend='both',
raise ValueError('Function requires at least 1 positional arg.')
for cmap in args:
# Retrieve Colormap instance
+ if isinstance(cmap,str) and cmap in mcm.cmap_d:
+ cmap = mcm.cmap_d[cmap]
if isinstance(cmap, mcolors.Colormap):
- # Do nothing
- pass
- elif type(cmap) is dict:
+ # Allow gamma override, otherwise do nothing
+ if isinstance(cmap, PerceptuallyUniformColormap):
+ if gamma1 or gamma2:
+ segmentdata = cmap._segmentdata.copy()
+ if gamma1:
+ segmentdata['gamma1'] = gamma1
+ if gamma2:
+ segmentdata['gamma2'] = gamma2
+ cmap = type(cmap)(cmap.name, segmentdata, space=cmap.space, mask=cmap.mask)
+ elif isinstance(cmap, mcolors.LinearSegmentedColormap):
+ if gamma:
+ cmap._gamma = gamma
+ cmap._init()
+ elif isinstance(cmap, dict):
# Dictionary of hue/sat/luminance values or 2-tuples representing linear transition
cmap = PerceptuallyUniformColormap.from_hsl(name, **cmap)
- elif type(cmap) is not str:
+ elif not isinstance(cmap, str):
# List of colors
cmap = mcolors.ListedColormap(cmap, name=name)
- elif cmap in mcm.cmap_d:
- # Map name or color for generating monochrome gradiation
- cmap = mcm.cmap_d[cmap] # get the instance
else:
- # Parse extra options
+ # Monochrome colormap based from input color (i.e. single hue)
light = True # by default
- cmap_kw = kwargs.copy() # may be different for each cmap in *args
regex = '([0-9].)$'
match = re.search(regex, cmap) # declare options with _[flags]
cmap = re.sub(regex, '', cmap) # remove options
@@ -398,6 +408,7 @@ def Colormap(*args, extend='both',
# Optionally register a colormap
if name and register:
+ print(name, 'Registering')
if name.lower() in [cat_cmap.lower() for cat,cat_cmaps in _categories_default.items()
for cat_cmap in cat_cmaps if 'PubPlot' not in cat]:
print(f'Warning: Overwriting existing colormap "{name}".')
|
help: Rename back button text.
Renamed back button text to Zulip instead of Home. | <div class="content">
<h1><a href="{{ doc_root }}" class="no-underline">Index</a></h1>
{{ render_markdown_path(sidebar_index, api_uri_context) }}
- <h1 class="home-link"><a href="/" class="no-underline">Back to Home</a></h1>
+ <h1 class="home-link"><a href="/" class="no-underline">Back to Zulip</a></h1>
</div>
</div>
|
Update sentinel-2-l2a-cogs.yaml
Hi! I just added my beginner tutorial on how to access Landsat and Sentinel data on AWS with Python. Many of the other tutorials are outdated and not working, btw. | @@ -51,6 +51,9 @@ Resources:
Type: SNS Topic
DataAtWork:
Tutorials:
+ - Title: How to Work with Landsat and Sentinel-2 on AWS with Python
+ URL: https://www.matecdev.com/posts/landsat-sentinel-aws-s3-python.html
+ AuthorName: Martin D. Maas
- Title: Intake-STAC with sat-search
URL: https://github.com/intake/intake-stac/blob/master/examples/aws-earth-search.ipynb
AuthorName: Scott Henderson
|
textlib_tests.py: split test_regexes
Divide test_regexes() in:
test_etp_regex()
test_nested_template_regex() | @@ -587,8 +587,8 @@ class TestTemplateParams(TestCase):
self.assertEqual(func('{{a|{{c|{{d|{{e|}}}} }} }} foo {{b}}'),
[(None, OrderedDict())])
- def test_regexes(self):
- """Test _ETP_REGEX and NESTED_TEMPLATE_REGEX."""
+ def test_etp_regex(self):
+ """Test _ETP_REGEX."""
func = textlib._ETP_REGEX.search
self.assertIsNotNone(func('{{{1}}}'))
@@ -620,6 +620,8 @@ class TestTemplateParams(TestCase):
self.assertIsNone(func('{{a|{{c}} }}'))
self.assertIsNone(func('{{a|{{c|d}} }}'))
+ def test_nested_template_regex(self):
+ """Test NESTED_TEMPLATE_REGEX."""
func = textlib.NESTED_TEMPLATE_REGEX.search
# Numerically named templates are rejected
|
Fix "NoneType object is not callable" exception when using a tracing function that returns None
CPython uses the return value of tracing function to assign it to frame.f_trace field and then it uses it for the frame instead of tstate.c_tracefunc. If it is None, the context shouldn't be traced line by line. | PyThreadState *tstate; \
PyGILState_STATE state = PyGILState_Ensure(); \
tstate = PyThreadState_GET(); \
- if (unlikely(tstate->use_tracing && tstate->c_tracefunc)) { \
+ if (unlikely(tstate->use_tracing && __pyx_frame->f_trace != Py_None)) { \
ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \
} \
PyGILState_Release(state); \
} \
} else { \
PyThreadState* tstate = PyThreadState_GET(); \
- if (unlikely(tstate->use_tracing && tstate->c_tracefunc)) { \
+ if (unlikely(tstate->use_tracing && __pyx_frame->f_trace != Py_None)) { \
int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \
if (unlikely(ret)) goto_error; \
} \
#define __Pyx_TraceLine(lineno, nogil, goto_error) \
if (likely(!__Pyx_use_tracing)); else { \
PyThreadState* tstate = PyThreadState_GET(); \
- if (unlikely(tstate->use_tracing && tstate->c_tracefunc)) { \
+ if (unlikely(tstate->use_tracing && __pyx_frame->f_trace != Py_None)) { \
int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \
if (unlikely(ret)) goto_error; \
} \
|
feat: minor disclaimer on nfs server role readme
* feat: minor disclaimer on nfs server role readme
Add a note in nfs-server readme about exported directories that must be created before applying the role
* Update readme.rst | @@ -27,6 +27,8 @@ All configuration is done in *group_vars/all/general_settings/nfs.yml*:
This role will not modify default nfs server configuration (number of threads, nfs v4.2 force, etc).
+Note that exported paths will not be created by the role. They must be created by the administrator before running this role.
+
Input
^^^^^
|
Bubbles up "algorithm" arg to create_nqubit_sequences.
So now you can specify "greedy" (the default) or "sequential" (if
you don't have time to wait for greedy). | @@ -1530,7 +1530,7 @@ def get_candidates_for_core(gateset, core_qubits, candidate_counts, seedStart):
def create_nqubit_sequences(nQubits, maxLengths, geometry, cnot_edges, maxIdleWeight=1, maxhops=0,
extraWeight1Hops=0, extraGateWeight=0, sparse=False, verbosity=0,
- cache=None, idleOnly=False):
+ cache=None, idleOnly=False, algorithm="greedy"):
"""
TODO: docstring
@@ -1586,7 +1586,7 @@ def create_nqubit_sequences(nQubits, maxLengths, geometry, cnot_edges, maxIdleWe
idle_gateset = build_nqnoise_gateset(maxIdleWeight, 'line', [], maxIdleWeight, maxhops,
extraWeight1Hops, extraGateWeight, sparse, verbosity=printer-5,
sim_type="termorder:1", parameterization="H+S terms")
- idle_params = idle_gateset['Gi'].gpindices # these are the params we want to amplify at first...
+ idle_params = idle_gateset.gates['Gi'].gpindices # these are the params we want to amplify at first...
if maxIdleWeight in cache['Idle gatename fidpair lists']:
printer.log("Getting cached sequences needed for max-weight=%d errors on the idle gate" % maxIdleWeight)
@@ -1598,7 +1598,7 @@ def create_nqubit_sequences(nQubits, maxLengths, geometry, cnot_edges, maxIdleWe
find_amped_polys_for_syntheticidle(list(range(maxIdleWeight)),
idleGateStr, idle_gateset, singleQfiducials,
prepLbl, None, wrtParams=idle_params,
- verbosity=printer-1)
+ algorithm=algorithm, verbosity=printer-1)
#ampedJ, ampedJ_rank, idle_maxwt_gatename_fidpair_lists = None,0,[] # DEBUG GRAPH ISO
cache['Idle gatename fidpair lists'][maxIdleWeight] = idle_maxwt_gatename_fidpair_lists
@@ -1637,11 +1637,12 @@ def create_nqubit_sequences(nQubits, maxLengths, geometry, cnot_edges, maxIdleWe
sidle_gateset = build_nqnoise_gateset(maxSyntheticIdleWt, 'line', [], maxIdleWeight, maxhops,
extraWeight1Hops, extraGateWeight, sparse, verbosity=printer-5,
sim_type="termorder:1", parameterization="H+S terms")
- idle_params = sidle_gateset['Gi'].gpindices # these are the params we want to amplify...
+ idle_params = sidle_gateset.gates['Gi'].gpindices # these are the params we want to amplify...
_, _, idle_gatename_fidpair_lists = find_amped_polys_for_syntheticidle(
list(range(maxSyntheticIdleWt)), idleGateStr, sidle_gateset,
- singleQfiducials, prepLbl, None, wrtParams=idle_params, verbosity=printer-1)
+ singleQfiducials, prepLbl, None, wrtParams=idle_params,
+ algorithm=algorithm, verbosity=printer-1)
#idle_gatename_fidpair_lists = [] # DEBUG GRAPH ISO
cache['Idle gatename fidpair lists'][maxSyntheticIdleWt] = idle_gatename_fidpair_lists
|
Prepare the 1.24.1rc1 release.
Work towards
[ci skip-rust-tests]
[ci skip-jvm-tests] | This document describes releases leading up to the ``1.24.x`` ``stable`` series.
+1.24.1rc1 (6/16/2020)
+---------------------
+
+N.B.: No further releases are expected in the ``1.24.x`` ``stable`` series. This ``.1rc1``
+release is for those upgrading through stable versions who wish to retain pytest console
+coverage support in Pants "v1".
+
+Bugfixes
+~~~~~~~~
+
+* Restore pytest coverage console report. (#10019)
+ `PR #10019 <https://github.com/pantsbuild/pants/pull/10019>`_
+
1.24.1rc0 (02/06/2020)
----------------------
|
Move property to enable Intel HPC in the right level of the dna.json
In the cookbook recipes we are searching for the "enable_intel_hpc_platform"
key in the "cfncluster" dictionary of the dna.json file. | ]
}
]
- }
},
"enable_intel_hpc_platform": {
"Ref": "IntelHPCPlatform"
+ }
},
"run_list": {
"Fn::Sub": "recipe[aws-parallelcluster::${Scheduler}_config]"
|
Lexical envs: reword Is_Cache_Valid's comment
For AdaCore/libadalang#45
(no-tn-check) | @@ -11,7 +11,7 @@ with Langkit_Support.Images; use Langkit_Support.Images;
package body Langkit_Support.Lexical_Env is
function Is_Cache_Valid (Env : Lexical_Env) return Boolean;
- -- Returns whether Env's cache is valid or not. This will check every
+ -- Return whether Env's lookup cache is valid. This will check every
-- Cache_Valid flag up Env's parent chain.
function Wrap
|
devstack: redis on opensuse needs to have default config
this patch adds a default config and uses template version of
unit-file to restart redis | @@ -96,8 +96,15 @@ function _ceilometer_install_redis {
else
# This will fail (correctly) where a redis package is unavailable
install_package redis
+ if is_suse; then
+ # opensuse intsall multi-instance version of redis
+ # and admin is expected to install the required conf
+ cp /etc/redis/default.conf.example /etc/redis/default.conf
+ restart_service redis@default
+ else
restart_service redis
fi
+ fi
pip_install_gr redis
}
|
Fix Dataset add single file
Fix get project name from parent dataset if not specified | @@ -589,15 +589,16 @@ class Dataset(object):
return [f.relative_path for f in matching_errors if f is not None]
@classmethod
- def create(cls, dataset_project, dataset_name, parent_datasets=None):
- # type: (str, str, Optional[Sequence[Union[str, Dataset]]]) -> Dataset
+ def create(cls, dataset_name, dataset_project=None, parent_datasets=None):
+ # type: (str, Optional[str], Optional[Sequence[Union[str, Dataset]]]) -> Dataset
"""
Create a new dataset. Multiple dataset parents are supported.
Merging of parent datasets is done based on the order,
where each one can override overlapping files in the previous parent
- :param dataset_project: Project containing the dataset
:param dataset_name: Naming the new dataset
+ :param dataset_project: Project containing the dataset.
+ If not specified, infer project name form parent datasets
:param parent_datasets: Expand a parent dataset by adding/removing files
:return: Newly created Dataset object
"""
@@ -605,6 +606,13 @@ class Dataset(object):
if any(not p.is_final() for p in parent_datasets):
raise ValueError("Cannot inherit from a parent that was not finalized/closed")
+ # get project name
+ if not dataset_project:
+ if not parent_datasets:
+ raise ValueError("Missing dataset project name. Could not infer project name from parent dataset.")
+ # get project name from parent dataset
+ dataset_project = parent_datasets[-1]._task.get_project_name()
+
# merge datasets according to order
dataset_file_entries = {}
dependency_graph = {}
@@ -847,7 +855,7 @@ class Dataset(object):
if path.is_file():
file_entry = self._calc_file_hash(
FileEntry(local_path=path.absolute().as_posix(),
- relative_path=Path(dataset_path or '.') / path.relative_to(local_base_folder),
+ relative_path=(Path(dataset_path or '.') / path.relative_to(local_base_folder)).as_posix(),
parent_dataset_id=self._id))
file_entries = [file_entry]
else:
@@ -1242,3 +1250,12 @@ class Dataset(object):
config_type='read-only',
config_text=dataset_details
)
+
+ def is_dirty(self):
+ # type: () -> bool
+ """
+ Return True if the dataset has pending uploads (i.e. we cannot finalize it)
+
+ :return: Return True means dataset has pending uploads, call 'upload' to start an upload process.
+ """
+ return self._dirty
|
python_api/module_py.mako: fix Unicode handling in get_from_provider
TN: | @@ -515,8 +515,7 @@ class AnalysisContext(object):
def get_from_provider(self, name, kind, charset=None, reparse=False):
${py_doc('langkit.get_unit_from_provider', 8)}
- name = _py2to3.text_to_bytes(name)
- name_text = _py2to3.bytes_to_text(name)
+ name = _py2to3.bytes_to_text(name)
charset = _py2to3.text_to_bytes(charset or '')
_name = _text._unwrap(name)
@@ -528,7 +527,7 @@ class AnalysisContext(object):
return AnalysisUnit._wrap(c_value)
else:
raise InvalidUnitNameError('Invalid unit name: {} ({})'.format(
- repr(name_text), kind
+ repr(name), kind
))
def discard_errors_in_populate_lexical_env(self, discard):
|
Restore use of Version constructor with defined constants
packaging does not allow extracting minor versions, so use regular old
string parsing there. | """
General constants.
"""
+# isort: THIRDPARTY
+from packaging.specifiers import Version
+
SERVICE = "org.storage.stratis3"
TOP_OBJECT = "/org/storage/stratis3"
@@ -21,6 +24,7 @@ SECTOR_SIZE = 512
MAXIMUM_STRATISD_VERSION = "4.0.0"
MINIMUM_STRATISD_VERSION = "3.0.0"
+assert Version(MINIMUM_STRATISD_VERSION) < Version(MAXIMUM_STRATISD_VERSION)
REVISION = "r%s" % MINIMUM_STRATISD_VERSION.split(".")[1]
|
update command line scripts
the command "configure" has been split into "set-dir" and "set-excluded"
configuration management has been renamed from "env" to "config" | @@ -81,11 +81,21 @@ def gui():
@main.command()
@with_config_opt
-def configure():
- """Runs the command line configuration wizard."""
+def set_dir():
+ """Set or change the location of your Dropbox folder."""
+ if is_linked():
from maestral.main import Maestral
m = Maestral(run=False)
m.move_dropbox_directory()
+
+
[email protected]()
+@with_config_opt
+def set_excluded():
+ """Select folders to exclude from sync."""
+ if is_linked():
+ from maestral.main import Maestral
+ m = Maestral(run=False)
m.select_excluded_folders()
@@ -93,6 +103,7 @@ def configure():
@with_config_opt
def unlink():
"""Unlinks your Dropbox account."""
+ if is_linked():
from maestral.main import Maestral
m = Maestral(run=False)
m.unlink()
@@ -210,11 +221,11 @@ def list_configs():
@main.group()
-def env():
+def config():
"""Manage different Maestral configuration environments."""
[email protected]()
[email protected]()
@click.argument("name")
def new(name: str):
"""Set up and activate a fresh Maestral configuration."""
@@ -227,7 +238,7 @@ def new(name: str):
click.echo("Created configuration '{0}'.".format(name))
[email protected](name='list')
[email protected](name='list')
def env_list():
"""List all Maestral configurations."""
click.echo("Available Maestral configurations:")
@@ -235,7 +246,7 @@ def env_list():
click.echo(' ' + c)
[email protected]()
[email protected]()
@click.argument("name")
def delete(name: str):
"""Remove a Maestral configuration."""
|
simplify loops of length one
An `evaluable.LoopSum` or `evaluable.LoopConcatenate` can be omitted if the
length of the loop is one. This patch makes this happen by letting the
`evaluable.LoopIndex` simplify to a constant if the loop has length one. | @@ -3457,6 +3457,10 @@ class _LoopIndex(Argument):
lower_length, upper_length = self.length._intbounds
return 0, max(0, upper_length - 1)
+ def _simplified(self):
+ if equalindex(self.length, 1):
+ return Zeros((), int)
+
class LoopSum(Array):
__cache__ = '_serialized'
|
[C2] Optimize MulGradient Operator when inner_size is 1
Summary:
Pull Request resolved:
Add a simpler implementation of the MulGradient cuda kernel for when inner_size==1, inner loop is eliminated. | @@ -63,7 +63,43 @@ __global__ void ComputeMulGradientCUDAKernel(
__syncthreads();
}
}
-
+template <typename TGrad, typename TIn, int D>
+__global__ void ComputeMulGradientOuterCUDAKernel(
+ const int outer_size,
+ const SimpleArray<FixedDivisor<int>, D> Y_dims,
+ const SimpleArray<int, D> Y_strides,
+ const SimpleArray<int, D> W_strides,
+ const SimpleArray<FixedDivisor<int>, D> X_dims,
+ const TGrad* dY,
+ const TIn* W,
+ TGrad* dX) {
+ CUDA_1D_KERNEL_LOOP(i, outer_size) {
+ TGrad sum = 0;
+ const int X_index = i;
+ int Y_index = 0;
+ int X_index_val = X_index;
+#pragma unroll
+ for (int d = D - 1; d >= 0; --d) {
+ int r;
+ X_dims.data[d].DivMod(X_index_val, &X_index_val, &r);
+ Y_index += r * Y_strides.data[d];
+ }
+ int W_index = 0;
+ int Y_index_val = Y_index;
+#pragma unroll
+ for (int d = D - 1; d >= 0; --d) {
+ int r;
+ Y_dims.data[d].DivMod(Y_index_val, &Y_index_val, &r);
+ W_index += r * W_strides.data[d];
+ }
+#if __CUDA_ARCH__ >= 350
+ sum += __ldg(dY + Y_index) * __ldg(W + W_index);
+#else
+ sum += dY[Y_index] * W[W_index];
+#endif
+ dX[i] = sum;
+ }
+}
template <typename TGrad, typename TIn, int D>
void ComputeMulGradientCUDAImpl(
const int outer_size,
@@ -89,6 +125,21 @@ void ComputeMulGradientCUDAImpl(
W_strides_arr.data[i] = W_dims[i] == 1 ? 0 : cur_stride;
cur_stride *= W_dims[i];
}
+ if (inner_size == 1) {
+ ComputeMulGradientOuterCUDAKernel<TGrad, TIn, D>
+ <<<CAFFE_MAXIMUM_NUM_BLOCKS,
+ CAFFE_CUDA_NUM_THREADS,
+ 0,
+ context->cuda_stream()>>>(
+ outer_size,
+ Y_dims_arr,
+ Y_strides_arr,
+ W_strides_arr,
+ X_dims_arr,
+ dY,
+ W,
+ dX);
+ } else {
int threads = std::min(inner_size, CAFFE_CUDA_NUM_THREADS);
ComputeMulGradientCUDAKernel<TGrad, TIn, D>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
@@ -105,6 +156,7 @@ void ComputeMulGradientCUDAImpl(
W,
dX);
}
+}
template <typename TGrad, typename TIn>
void ComputeMulGradientCUDA(
|
Update show.py
Correctly show up/down for all types of nodes | @@ -229,7 +229,7 @@ async def show_async(args, parser):
f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
f" {con['node_id'].hex()[:8]}... "
f"{last_connect} "
- f"{mb_down:7.1f}|{mb_up:<7.1f}"
+ f"{mb_up:7.1f}|{mb_down:<7.1f}"
)
print(con_str)
# if called together with state, leave a blank line
|
Fall back when consensus is not set
When the on-chain consensus settings aren't set: fall back to PoET if
PoET is configured, otherwise fall back to devmode. | @@ -374,4 +374,18 @@ def get_configured_engine(block, settings_view_factory):
conf_version = settings_view.get_setting(
'sawtooth.consensus.algorithm.version')
- return conf_name, conf_version
+ # Fallback to devmode if nothing else is set
+ name = "Devmode"
+ version = "0.1"
+
+ # If name and version settings aren't set, check for PoET
+ if conf_name is None or conf_version is None:
+ algorithm = settings_view.get_setting('sawtooth.consensus.algorithm')
+ if algorithm and (algorithm.lower() == 'poet'):
+ name = "PoET"
+ # Otherwise use name and version settings
+ else:
+ name = conf_name
+ version = conf_version
+
+ return name, version
|
ui: fix extra newline in dvc version
Additionally don't try to detect cache fs type unless cache dir exists. | @@ -27,23 +27,13 @@ class CmdVersion(CmdBaseNoRepo):
def run(self):
from dvc.repo import Repo
- dvc_version = __version__
- python_version = platform.python_version()
- platform_type = platform.platform()
- binary = is_binary()
- info = (
- "DVC version: {dvc_version}\n"
- "Python version: {python_version}\n"
- "Platform: {platform_type}\n"
- "Binary: {binary}\n"
- "Package: {package}\n"
- ).format(
- dvc_version=dvc_version,
- python_version=python_version,
- platform_type=platform_type,
- binary=binary,
- package=PKG,
- )
+ info = [
+ "DVC version: {}".format(__version__),
+ "Python version: {}".format(platform.python_version()),
+ "Platform: {}".format(platform.platform()),
+ "Binary: {}".format(is_binary()),
+ "Package: {}".format(PKG),
+ ]
try:
repo = Repo()
@@ -54,8 +44,13 @@ class CmdVersion(CmdBaseNoRepo):
# later decides to enable shared cache mode with
# `dvc config cache.shared group`.
if os.path.exists(repo.cache.local.cache_dir):
- info += "Cache: {cache}\n".format(
- cache=self.get_linktype_support_info(repo)
+ info.append(
+ "Cache: {}".format(self.get_linktype_support_info(repo))
+ )
+ if psutil:
+ fs_type = self.get_fs_type(repo.cache.local.cache_dir)
+ info.append(
+ "Filesystem type (cache directory): {}".format(fs_type)
)
else:
logger.warning(
@@ -66,19 +61,14 @@ class CmdVersion(CmdBaseNoRepo):
"check.".format(relpath(repo.cache.local.cache_dir))
)
- if psutil:
- info += (
- "Filesystem type (cache directory): {fs_cache}\n"
- ).format(fs_cache=self.get_fs_type(repo.cache.local.cache_dir))
except NotDvcRepoError:
root_directory = os.getcwd()
if psutil:
- info += ("Filesystem type (workspace): {fs_root}").format(
fs_root = self.get_fs_type(os.path.abspath(root_directory))
- )
+ info.append("Filesystem type (workspace): {}".format(fs_root))
- logger.info(info)
+ logger.info("\n".join(info))
return 0
@staticmethod
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.