message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Remove easy-thumbnail dependency following refactor
See | @@ -305,9 +305,6 @@ dockerflow==2022.1.0 \
drf-nested-routers==0.93.4 \
--hash=sha256:996b77f3f4dfaf64569e7b8f04e3919945f90f95366838ca5b8bed9dd709d6c5 \
--hash=sha256:01aa556b8c08608bb74fb34f6ca065a5183f2cda4dc0478192cc17a2581d71b0
-easy-thumbnails==2.8.1 \
- --hash=sha256:5f59f722508469d482d8d76b99c7b9e6c1abbce19aefd5932dc6e1b9eacd2473 \
- --hash=sha256:1a283fe8a3569c3feab4605e8279929d75c85c1151b2fd627f95b1863b5fc6c2
# elasticsearch is required by elasticsearch-dsl
elasticsearch==6.8.2 \
--hash=sha256:1aedf00b73f5d1e77cb4df70fec58f2efb664be4ce2686374239aa6c0373c65c \
|
Update Pennsylvania.md
typos | @@ -123,7 +123,7 @@ id: pa-philadelphia-5
Police shove a group of protestors. A particularly aggressive police officer in a white shirt runs and shoves someone, then grabs another man. He is then approached by a young man with long hair whom he strikes with his baton, at full strength, in the head before jumping on him and cuffing him. He has assistance from another officer who presses the man's face to the pavement using his knee on his neck. Twitter accounts misreport the individual as female; later accounts in media report the individual is a male Temple University engineering student.
-tags: shove, batonm strike, beat
+tags: shove, baton, strike, beat
id: pa-philadelphia-7
|
fix: Fix export customisation
convert `sync_on_migrate` and `with_permissions` from string to Int | @@ -8,6 +8,7 @@ from __future__ import unicode_literals, print_function
import frappe, os, json
import frappe.utils
from frappe import _
+from frappe.utils import cint
def export_module_json(doc, is_standard, module):
"""Make a folder for the given doc and add its json file (make it a standard
@@ -39,11 +40,15 @@ def get_doc_module(module, doctype, name):
def export_customizations(module, doctype, sync_on_migrate=0, with_permissions=0):
"""Export Custom Field and Property Setter for the current document to the app folder.
This will be synced with bench migrate"""
+
+ sync_on_migrate = cint(sync_on_migrate)
+ with_permissions = cint(with_permissions)
+
if not frappe.get_conf().developer_mode:
raise Exception('Not developer mode')
custom = {'custom_fields': [], 'property_setters': [], 'custom_perms': [],
- 'doctype': doctype, 'sync_on_migrate': 1}
+ 'doctype': doctype, 'sync_on_migrate': sync_on_migrate}
def add(_doctype):
custom['custom_fields'] += frappe.get_all('Custom Field',
|
Removed removal from app:select:menus
This was causing clearing the selections when the user changes pages. | @@ -152,7 +152,6 @@ hqDefine("cloudcare/js/formplayer/menus/api", function () {
FormplayerFrontend.getChannel().reply("app:select:menus", function (options) {
if (sessionStorage.selectedValues !== undefined) {
options.selectedValues = sessionStorage.selectedValues.split(',');
- sessionStorage.removeItem('selectedValues');
}
if (!options.endpointId) {
return API.queryFormplayer(options, options.isInitial ? "navigate_menu_start" : "navigate_menu");
|
Avoid calling tensor.data.set_() in DDP
Summary: Pull Request resolved: | @@ -245,12 +245,8 @@ class DistributedDataParallel(Module):
else:
self._module_copies = [self.module]
- self.modules_params_data = [[] for _ in range(len(self.device_ids))]
- self.modules_buffers_data = [[] for _ in range(len(self.device_ids))]
-
- for dev_idx, module in enumerate(self._module_copies):
- self.modules_params_data[dev_idx] = [p.data for p in module.parameters()]
- self.modules_buffers_data[dev_idx] = [b.data for b in module.buffers()]
+ self.modules_params = [list(m.parameters()) for m in self._module_copies]
+ self.modules_buffers = [list(m.buffers()) for m in self._module_copies]
# This is a triply-nested list where the "dimensions" are: devices, buckets, bucket_elems
param_buckets = []
@@ -386,29 +382,31 @@ class DistributedDataParallel(Module):
dist._dist_broadcast_coalesced(self.process_group, tensors, buffer_size, False)
def _sync_params(self):
+ with torch.no_grad():
if len(self.device_ids) > 1:
# intra-node parameter sync
- result = broadcast_coalesced(self.modules_params_data[0],
+ result = broadcast_coalesced(self.modules_params[0],
self.device_ids,
self.broadcast_bucket_size)
- for tensors, module_params_data in zip(result[1:], self.modules_params_data[1:]):
- for tensor, param_data in zip(tensors, module_params_data):
- param_data.set_(tensor)
+ for tensors, module_params in zip(result[1:],
+ self.modules_params[1:]):
+ for tensor, param in zip(tensors, module_params):
+ param.set_(tensor)
# module buffer sync
- if self.broadcast_buffers:
- if len(self.modules_buffers_data[0]) > 0:
+ if self.broadcast_buffers and len(self.modules_buffers[0]) > 0:
# cross-node buffer sync
- self._dist_broadcast_coalesced(self.modules_buffers_data[0],
+ self._dist_broadcast_coalesced(self.modules_buffers[0],
self.broadcast_bucket_size)
if len(self.device_ids) > 1:
# intra-node buffer sync
- result = broadcast_coalesced(self.modules_buffers_data[0],
+ result = broadcast_coalesced(self.modules_buffers[0],
self.device_ids,
self.broadcast_bucket_size)
- for tensors, module_buffers_data in zip(result[1:], self.modules_buffers_data[1:]):
- for tensor, buffer_data in zip(tensors, module_buffers_data):
- buffer_data.set_(tensor)
+ for tensors, module_buffers in zip(result[1:],
+ self.modules_buffers[1:]):
+ for tensor, buffer in zip(tensors, module_buffers):
+ buffer.set_(tensor)
def _passing_sync_batchnorm_handle(self, module_copies):
for dev_idx, module in enumerate(module_copies):
@@ -447,7 +445,8 @@ class DistributedDataParallel(Module):
# We can flush these and save memory for replicas
if device_idx > 0:
param.grad = None
- param.data.set_()
+ with torch.no_grad():
+ param.set_()
# Current device's bucket is full
if self.buckets_ready_size[bucket_idx][device_idx] == self.bucket_sizes[bucket_idx]:
|
Fix typo (Feature*s*Dict)
was FeatureDict, should be FeaturesDict | @@ -105,7 +105,7 @@ class MyDataset(tfds.core.GeneratorBasedBuilder):
"""Dataset metadata (homepage, citation,...)."""
return tfds.core.DatasetInfo(
builder=self,
- features=tfds.features.FeatureDict({
+ features=tfds.features.FeaturesDict({
'image': tfds.features.Image(shape=(256, 256, 3)),
'label': tfds.features.ClassLabel(names=['no', 'yes']),
}),
@@ -421,7 +421,7 @@ Version can refer to two different meaning:
* The "external" original data version: e.g. COCO v2019, v2017,...
* The "internal" TFDS code version: e.g. rename a feature in
- `tfds.features.FeatureDict`, fix a bug in `_generate_examples`
+ `tfds.features.FeaturesDict`, fix a bug in `_generate_examples`
To update a dataset:
|
remove volume term in Src
This makes sensitivity function smooth (need to double check why .. ) | @@ -23,7 +23,7 @@ class StreamingCurrents(Src.BaseSrc):
raise Exception("SP source requires mesh")
self.mesh.setCellGradBC("neumann")
# self.Div = -sdiag(self.mesh.vol)*self.mesh.cellGrad.T
- self.Div = -sdiag(self.mesh.vol)*self.mesh.cellGrad.T
+ self.Div = -self.mesh.cellGrad.T
def eval(self, prob):
"""
@@ -40,11 +40,11 @@ class StreamingCurrents(Src.BaseSrc):
"""
if prob._formulation == 'HJ':
if self.modelType == "Head":
- q = -prob.Div*self.MfLiI*prob.Grad*prob.h
+ q = -self.Div*self.MfLiI*prob.Grad*prob.h
elif self.modelType == "CurrentSource":
q = prob.q
elif self.modelType == "CurrentDensity":
- q = -prob.Div*prob.mesh.aveF2CCV.T*np.r_[prob.jsx, prob.jsy, prob.jsz]
+ q = -self.Div*prob.mesh.aveF2CCV.T*np.r_[prob.jsx, prob.jsy, prob.jsz]
else:
raise NotImplementedError()
elif prob._formulation == 'EB':
|
Export with Always Sample Animations by default
Export more reliably correct animation playback for skinned meshes | @@ -247,7 +247,7 @@ class ExportGLTF2_Base:
export_force_sampling = BoolProperty(
name='Always Sample Animations',
description='Apply sampling to all animations',
- default=False
+ default=True
)
export_nla_strips = BoolProperty(
|
change "The NumPy community" to "NumPy Developers"
change "community" to "Developers" based on | @@ -108,7 +108,7 @@ class PyTypeObject(ctypes.Structure):
# General substitutions.
project = 'NumPy'
-copyright = '2008-2022, The NumPy community'
+copyright = '2008-2022, NumPy Developers'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
|
fix: failing tutorial translations should be ignored
Exclude only the names, instead of full paths. This means it will
effectively exclude translated titles too. | @@ -29,7 +29,7 @@ translated_notebooks = [
# Exclude all translated basic tutorials that are also
# excluded in their original version.
excluded_translated_notebooks = [
- nb for part in ["10", "13b", "13c"] for nb in translated_notebooks if part in nb
+ Path(nb).name for part in ["10", "13b", "13c"] for nb in translated_notebooks if part in nb
]
# buggy notebooks with explanation what does not work
|
[cleanup] Desupport Page.getVersionHistory() method
getVersionHistory has multiple breaking changes against compat
and is deprecated for 5 years. Throw a FutureWarning and announce
their removal.
Also desupport Revision.hist_entry method and Revision.HistEntry
class attribute which are only used for getVersionHistory. | @@ -1770,7 +1770,7 @@ class BasePage(UnicodeMixin, ComparableMixin):
# (revid, timestamp, user, comment, size, tags)
#
# timestamp is a pywikibot.Timestamp, not a MediaWiki timestamp string
- @deprecated('Page.revisions()', since='20150206')
+ @deprecated('Page.revisions()', since='20150206', future_warning=True)
@deprecated_args(forceReload=None, revCount='total', step=None,
getAll=None, reverseOrder='reverse')
def getVersionHistory(self, reverse=False, total=None):
@@ -1784,9 +1784,13 @@ class BasePage(UnicodeMixin, ComparableMixin):
@param total: iterate no more than this number of revisions in total
"""
- return [rev.hist_entry()
+ with suppress_warnings(
+ 'pywikibot.page.Revision.hist_entry is deprecated'):
+ revisions = [
+ rev.hist_entry()
for rev in self.revisions(reverse=reverse, total=total)
]
+ return revisions
@deprecated_args(forceReload=None, reverseOrder='reverse', step=None)
def getVersionHistoryTable(self, reverse=False, total=None):
@@ -5799,7 +5803,7 @@ class Revision(DotReadableDict):
"""A structure holding information about a single revision of a Page."""
- HistEntry = namedtuple('HistEntry', ['revid',
+ _HistEntry = namedtuple('HistEntry', ['revid',
'timestamp',
'user',
'comment'])
@@ -5857,6 +5861,12 @@ class Revision(DotReadableDict):
self._sha1 = sha1
self.slots = slots
+ @classproperty
+ @deprecated(since='20200329', future_warning=True)
+ def HistEntry(cls):
+ """Class property which returns deprecated class attribute."""
+ return cls._HistEntry
+
@classproperty
@deprecated(since='20200329', future_warning=True)
def FullHistEntry(cls):
@@ -5941,10 +5951,14 @@ class Revision(DotReadableDict):
return self._sha1
+ @deprecated(since='20200329', future_warning=True)
def hist_entry(self):
"""Return a namedtuple with a Page history record."""
- return Revision.HistEntry(self.revid, self.timestamp, self.user,
+ with suppress_warnings(
+ 'pywikibot.page.Revision.HistEntry is deprecated'):
+ entry = Revision.HistEntry(self.revid, self.timestamp, self.user,
self.comment)
+ return entry
@deprecated(since='20200329', future_warning=True)
def full_hist_entry(self):
|
ebuild.domain: _make_keywords_filter(): force usage of immutable type for default keys
To fix unhashable warning. | @@ -451,7 +451,7 @@ class domain(config_domain):
"""Generates a restrict that matches iff the keywords are allowed."""
if not accept_keywords and not self.profile.keywords:
return packages.PackageRestriction(
- "keywords", values.ContainmentMatch2(default_keys))
+ "keywords", values.ContainmentMatch2(frozenset(default_keys)))
if self.unstable_arch not in default_keys:
# stable; thus empty entries == ~arch
|
Type-annotate resolve_composes result
To avoid confusion about the plugin result in the future, give it a
TypedDict annotation.
Note that this will not actually catch incorrect usage of the plugin
result, because the type info gets lost when saving the result in
workflow.data.plugin_results. It is mainly for "documentation". | @@ -8,6 +8,7 @@ of the BSD license. See the LICENSE file for details.
from collections import defaultdict
from copy import deepcopy
from datetime import datetime, timedelta
+from typing import TypedDict, List, Dict, Any, Optional
from osbs.repo_utils import ModuleSpec
@@ -28,6 +29,14 @@ UNPUBLISHED_REPOS = 'include_unpublished_pulp_repos'
IGNORE_ABSENT_REPOS = 'ignore_absent_pulp_repos'
+class ResolveComposesResult(TypedDict):
+ composes: List[Dict[str, Any]]
+ yum_repourls: Dict[str, List[str]]
+ include_koji_repo: bool
+ signing_intent: Optional[str]
+ signing_intent_overridden: bool
+
+
class ResolveComposesPlugin(Plugin):
"""Request a new, or use existing, ODCS compose
@@ -83,7 +92,7 @@ class ResolveComposesPlugin(Plugin):
self.yum_repourls = defaultdict(list)
self.architectures = get_platforms(self.workflow.data)
- def run(self):
+ def run(self) -> ResolveComposesResult:
if self.allow_inheritance():
self.adjust_for_inherit()
self.workflow.data.all_yum_repourls = self.repourls
@@ -355,13 +364,14 @@ class ResolveComposesPlugin(Plugin):
if not self.has_complete_repos:
self.include_koji_repo = True
- def make_result(self):
+ def make_result(self) -> ResolveComposesResult:
signing_intent = None
signing_intent_overridden = False
if self.compose_config:
signing_intent = self.compose_config.signing_intent['name']
signing_intent_overridden = self.compose_config.has_signing_intent_changed()
- result = {
+
+ result: ResolveComposesResult = {
'composes': self.composes_info,
'yum_repourls': self.yum_repourls,
'include_koji_repo': self.include_koji_repo,
|
Fix Travis-CI build button on GitHub
Fix build button status and link, which were broken after migration from travis-ci.org to travis-ci.com | @@ -223,10 +223,10 @@ Contributing
TODO
-.. |build-status| image:: https://travis-ci.org/pyccel/pyccel.svg?branch=master
+.. |build-status| image:: https://travis-ci.com/pyccel/pyccel.svg?branch=master
:alt: build status
:scale: 100%
- :target: https://travis-ci.org/pyccel/pyccel
+ :target: https://travis-ci.com/pyccel/pyccel
.. |docs| image:: https://readthedocs.org/projects/pyccel/badge/?version=latest
:alt: Documentation Status
|
[IMPR] parse global args before local args
detached from | @@ -446,11 +446,13 @@ def main(*args):
recentchanges = False
newpages = False
repeat = False
- gen_factory = pagegenerators.GeneratorFactory()
options = {}
# Parse command line arguments
- for arg in pywikibot.handle_args(args):
+ local_args = pywikibot.handle_args(args)
+ site = pywikibot.Site()
+ gen_factory = pagegenerators.GeneratorFactory(site)
+ for arg in local_args:
if arg.startswith('-ask'):
options['ask'] = True
elif arg.startswith('-autopatroluserns'):
@@ -475,9 +477,6 @@ def main(*args):
m = arg.split(':')
options[m[0]] = m[1]
- site = pywikibot.Site()
- site.login()
-
if usercontribs:
user = pywikibot.User(site, usercontribs)
if user.isAnonymous() or user.isRegistered():
|
Update validation.rst
Making a wrong thing right. | @@ -88,7 +88,7 @@ This is especially powerful when combined with great_expectations's command line
.. code-block:: bash
- $ validate tests/examples/titanic.csv \
+ $ great_expectations validate tests/examples/titanic.csv \
tests/examples/titanic_expectations.json
{
"results" : [
|
refactor(routing): caches: change function definition order
This patch groups all caching functions together to make the code more
readable. | @@ -118,30 +118,33 @@ class Router:
)
# caches ##################################################################
+ # name
def resize_name_cache(self, max_size):
self._name_lru_cache = lru_cache(max_size)(self._get_route)
- def resize_resolve_cache(self, max_size):
- self._resolve_lru_cache = lru_cache(max_size)(self._resolve)
-
- def resize_reverse_cache(self, max_size):
- self._reverse_lru_cache = lru_cache(max_size)(self._reverse)
-
def get_name_cache_info(self):
return self._name_lru_cache.cache_info()
- def get_resolve_cache_info(self):
- return self._resolve_lru_cache.cache_info()
-
- def get_reverse_cache_info(self):
- return self._reverse_lru_cache.cache_info()
-
def clear_name_cache_info(self):
return self._name_lru_cache.cache_clear()
+ # resolve
+ def resize_resolve_cache(self, max_size):
+ self._resolve_lru_cache = lru_cache(max_size)(self._resolve)
+
+ def get_resolve_cache_info(self):
+ return self._resolve_lru_cache.cache_info()
+
def clear_resolve_cache_info(self):
return self._resolve_lru_cache.cache_clear()
+ # reverse
+ def resize_reverse_cache(self, max_size):
+ self._reverse_lru_cache = lru_cache(max_size)(self._reverse)
+
+ def get_reverse_cache_info(self):
+ return self._reverse_lru_cache.cache_info()
+
def clear_reverse_cache_info(self):
return self._reverse_lru_cache.cache_clear()
|
STY: fake operating system
Changed the fake operating system to something more interesting. | @@ -26,7 +26,7 @@ your test configuration
- Test B
**Test Configuration**:
-* Operating system: iOs
+* Operating system: Hal
* Version number: Python 3.X
* Any details about your local setup that are relevant
|
Fix typo
This commit fixes typo. | @@ -107,4 +107,4 @@ we use:
We have made this [dataset available along with the original raw data](https://github.com/allenai/genia-dependency-trees).
* **[word2vec word vectors](http://bio.nlplab.org/#word-vectors)** trained on the Pubmed Central Open Access Subset.
* **[The MedMentions Entity Linking dataset](https://github.com/chanzuckerberg/MedMentions)**, used for training a mention detector.
-* **[Ontonotes 5.0](https://catalog.ldc.upenn.edu/LDC2013T19)** to make the parser and tagger more robust to non-biomedical text. Unfortunately this is not publically available.
+* **[Ontonotes 5.0](https://catalog.ldc.upenn.edu/LDC2013T19)** to make the parser and tagger more robust to non-biomedical text. Unfortunately this is not publicly available.
|
BUG: Fix `_resolve_dtypes_and_context` refcounting error returns
Reusing `result` doesn't work with a single "finish" goto, since
result must be NULL on error then.
This copies the result over for continuation, which is maybe also a bit
awkward, but at least not buggy... | @@ -6370,6 +6370,7 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context,
* state (or mind us messing with it).
*/
PyObject *result = NULL;
+ PyObject *result_dtype_tuple = NULL;
PyArrayObject *dummy_arrays[NPY_MAXARGS] = {NULL};
PyArray_DTypeMeta *DTypes[NPY_MAXARGS] = {NULL};
@@ -6527,6 +6528,9 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context,
if (result == NULL || !return_context) {
goto finish;
}
+ /* Result will be (dtype_tuple, call_info), so move it and clear result */
+ result_dtype_tuple = result;
+ result = NULL;
/* We may have to return the context: */
ufunc_call_info *call_info;
@@ -6559,12 +6563,13 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context,
context->descriptors[i] = operation_descrs[i];
}
- Py_SETREF(result, PyTuple_Pack(2, result, capsule));
+ result = PyTuple_Pack(2, result_dtype_tuple, capsule);
Py_DECREF(capsule);
finish:
npy_promotion_state = original_promotion_state;
+ Py_XDECREF(result_dtype_tuple);
for (int i = 0; i < ufunc->nargs; i++) {
Py_XDECREF(signature[i]);
Py_XDECREF(dummy_arrays[i]);
|
Drop dead code in dup_root_upper_bound()
Should be removed in | @@ -82,9 +82,6 @@ def dup_root_upper_bound(f, K):
q = t[j] + a - K.log(f[j], 2)
QL.append([q // (j - i), j])
- if not QL:
- continue
-
q = min(QL)
t[q[1]] = t[q[1]] + 1
|
Update setup.py
Remove py2 references and add pycryptodome as a deps.
fixes | @@ -8,15 +8,10 @@ import svtplay_dl
deps = []
-if sys.version_info[0] == 2 and sys.version_info[1] <= 7 and sys.version_info[2] < 9:
+
deps.append("requests>=2.0.0")
deps.append("PySocks")
- deps.append("pyOpenSSL")
- deps.append("ndg-httpsclient")
- deps.append("pyasn1")
-else:
- deps.append(["requests>=2.0.0"])
- deps.append("PySocks")
+deps.append("pycryptodome")
setup(
name="svtplay-dl",
@@ -36,11 +31,10 @@ setup(
"Environment :: Console",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
- "Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
- "Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Multimedia :: Video",
|
Fixing typos in Coremltools specification website
radar: rdar://problem/53137702 | @@ -188,7 +188,7 @@ message ModelDescription {
*
* 3 : iOS 12, macOS 10.14, tvOS 12, watchOS 5 (Core ML 2)
* - Flexible shapes and image sizes
- * - Categorical squences
+ * - Categorical sequences
* - Core ML Vision Feature Print, Text Classifier, Word Tagger
* - Non Max Suppression
* - Crop and Resize Bilinear NN layers
@@ -200,7 +200,7 @@ message ModelDescription {
* - Large expansion of supported neural network layers
* - Generalized operations
* - Control flow
- * - Dynmaic layers
+ * - Dynamic layers
* - See NeuralNetwork.proto
* - Nearest Neighbor Classifier
* - Sound Analysis Prepreocessing
|
css: Use more consistent visuals for edit bot form.
* Use more consistent font style, both within the form and with the
rest of the app.
* Use more consistent spacing.
Fixed | @@ -818,17 +818,19 @@ input[type="checkbox"] {
}
}
+.edit-bot-name {
+ margin-bottom: 20px;
+}
+
+.avatar-section {
+ margin-bottom: 20px;
+}
+
.edit_bot_form {
font-size: 100%;
margin: 0;
padding: 0;
- label {
- font-weight: 600;
- color: hsl(0, 0%, 67%);
- margin-top: 5px;
- }
-
.buttons {
margin: 10px 0 5px;
}
@@ -836,13 +838,11 @@ input[type="checkbox"] {
.edit_bot_email {
font-weight: 400;
- font-size: 18px;
text-align: left;
margin-top: 0;
- margin-bottom: 10px;
+ margin-bottom: 20px;
overflow: hidden;
- max-height: 1.1em;
text-overflow: ellipsis;
white-space: pre;
}
|
CHANGES.md doc of dagster_spark and dagster_aws updates
Test Plan: none
Reviewers: nate | **Breaking Changes**
-- `Path` is no longer as built-in dagster type.
+- `Path` is no longer a built-in dagster type.
- The CLI option `--celery-base-priority` is no longer available for the command:
`dagster pipeline backfill`. Use the tags option to specify the celery priority, (e.g.
`dagster pipeline backfill my_pipeline --tags '{ "dagster-celery/run_priority": 3 }'`
- `dagit-cli` has been removed and `dagit` is now the only console entrypoint.
- All `PartitionSetDefinitions` must have unique names with a `RepositoryDefinition`, including those from `PartitionScheduleDefinition`
- The partitioned schedule decorators now generate names for their `PartitionSetDefinition` using the schedule name, suffixed with `_partitions`.
+- The AWS CLI has been removed.
+- `dagster_aws.EmrRunJobFlowSolidDefinition` has been removed.
+- `dagster_spark.SparkSolidDefinition` has been removed - use `create_spark_solid` instead.
+- The `SparkRDD` Dagster type, which only worked with an in-memory engine, has been removed.
**New**
- The partitioned schedule decorators now support optional `end_time`.
+- `dagster_spark.create_spark_solid` now accepts a `required_resource_keys` argument, which enables
+ setting up a step launcher for Spark solids, like the `emr_pyspark_step_launcher`.
## 0.7.15 (Latest)
|
Explicitly pass through environ to test run commands
Needed in this case to ensure that COLUMNS is provided in cases where
it's defined in the newly created shell. | @@ -898,6 +898,7 @@ def _run(cmd, quiet=False, ignore=None, timeout=60, cut=None):
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
preexec_fn=os.setsid,
+ env=os.environ,
)
with _kill_after(p, timeout):
out, err = p.communicate()
|
Normalization derivative should be real (not a bug, but just to clean
up) | @@ -541,7 +541,7 @@ def optimize_orthogonal(
normalization = np.zeros(nwf - 1)
total_energy = 0
#energy_derivative = np.zeros(len(parameters))
- N_derivative = np.zeros(len(parameters), dtype=dtype)
+ N_derivative = np.zeros(len(parameters))
condition = np.zeros((len(parameters), len(parameters)))
overlaps = np.zeros(nwf - 1, dtype=dtype)
overlap_derivatives = np.zeros((nwf - 1, len(parameters)), dtype=dtype)
|
Transfers: set 1.27 compatibility attribute on request
Otherwise the 1.27 submitters can pick the intermediate transfer
and start working on it. | @@ -391,6 +391,11 @@ def __create_missing_replicas_and_requests(
logger(logging.ERROR, '%s: Problem adding replicas on %s : %s', initial_request_id, rws.dest_rse, str(error))
rws.attributes['is_intermediate_hop'] = True
+ # next_hop_request_id and initial_request_id are not used anymore in rucio >=1.28, but are needed
+ # for running at the same time 1.27 and 1.28 on the same database.
+ # TODO: remove following two rows
+ rws.attributes['next_hop_request_id'] = transfer_path[i + 1].rws.request_id
+ rws.attributes['initial_request_id'] = initial_request_id
rws.attributes['source_replica_expression'] = hop.src.rse.name
new_req = queue_requests(requests=[{'dest_rse_id': rws.dest_rse.id,
'scope': rws.scope,
|
Workaround for variables binding on xs:assert statement
- A fix on variables of static context will be needed for the
next minor release of elementpath | @@ -62,6 +62,7 @@ class XsdAssert(XsdComponent, ElementPathMixin):
return self.token is not None and (self.base_type.parent is None or self.base_type.built)
def parse_xpath_test(self):
+ # FIXME: parser's variables filled with XSD type with next elementpath minor release
if not self.base_type.has_simple_content():
variables = {'value': XSD_BUILTIN_TYPES['anyType'].value}
else:
@@ -85,17 +86,22 @@ class XsdAssert(XsdComponent, ElementPathMixin):
except ElementPathError as err:
self.parse_error(err, elem=self.elem)
self.token = self.parser.parse('true()')
+ finally:
+ self.parser.variables.clear()
def __call__(self, elem, value=None, source=None, namespaces=None, **kwargs):
- if value is not None:
- self.parser.variables['value'] = self.base_type.text_decode(value)
if not self.parser.is_schema_bound():
self.parser.schema.bind_parser(self.parser)
+ if value is not None:
+ variables = {'value': self.base_type.text_decode(value)}
+ else:
+ variables = {'value': ''}
+
if source is None:
- context = XPathContext(root=elem)
+ context = XPathContext(root=elem, variables=variables)
else:
- context = XPathContext(root=source.root, item=elem)
+ context = XPathContext(root=source.root, item=elem, variables=variables)
default_namespace = self.parser.namespaces['']
if namespaces and '' in namespaces:
|
Allow Mapping in the typing for response values
Whilst strictly only `dict`s are valid, Mapping is typically used to
mean the same and helps mypy understand that things like TypedDicts
are valid response values. | @@ -14,6 +14,7 @@ from typing import (
Dict,
Generator,
List,
+ Mapping,
Optional,
Tuple,
Type,
@@ -49,7 +50,7 @@ ResponseValue = Union[
"Response",
"WerkzeugResponse",
AnyStr,
- Dict[str, Any], # any jsonify-able dict
+ Mapping[str, Any], # any jsonify-able dict
List[Any], # any jsonify-able list
AsyncGenerator[AnyStr, None],
Generator[AnyStr, None, None],
|
Avoid strict aliasing warning in float/half conversions.
Verified that at least for GCC 4.47 this generates identical code. | @@ -77,15 +77,17 @@ float TH_half2float(THHalf h)
}
int temp = ((sign << 31) | (exponent << 23) | mantissa);
-
- return *((float*)((void*)&temp));
+ float x;
+ memcpy(&x,&temp,sizeof(float));
+ return x;
}
THHalf TH_float2half(float f)
{
THHalf ret;
- unsigned x = *((int*)(void*)(&f));
+ unsigned x;
+ memcpy(&x,&f,sizeof(f));
unsigned u = (x & 0x7fffffff), remainder, shift, lsb, lsb_s1, lsb_m1;
unsigned sign, exponent, mantissa;
|
Update pua.txt
Minus dups. | @@ -1825,6 +1825,11 @@ non-block.net/wpad.dat
nonblock.net/wpad.dat
nonblocks.com/wpad.dat
none-stop.net/wpad.dat
+no-stop.net/wpad.dat
+notblocked.net/wpad.dat
+no-blocked.com/wpad.dat
+nonblock.net/wpad.dat
+none-stop.net/wpad.dat
noneblock.com/wpad.dat
nostopped.net/wpad.dat
stoppblock.biz/wpad.dat
@@ -1845,16 +1850,6 @@ unstops.net/wpad.dat
unstopweb.com/wpad.dat
unstopweb.net/wpad.dat
unstopweb.org/wpad.dat
-no-stop.net/wpad.dat
-notblocked.net/wpad.dat
-no-blocked.com/wpad.dat
-no-blocked.com/wpad.dat
-no-blocked.com/wpad.dat
-no-blocked.com/wpad.dat
-nonblock.net/wpad.dat
-none-stop.net/wpad.dat
-stoppblock.com/wpad.dat
-unstopp.me/wpad.dat
webunstop.org/wpad.dat
# Reference: https://twitter.com/James_inthe_box/status/1125543661259771904
|
Prevent ST from locking up when a server doesn't respond to an initialize request
We wait for at most 3 seconds for a server to respond to an initialize request. | @@ -4,16 +4,20 @@ from .transports import start_tcp_transport, start_tcp_listener, TCPTransport, T
from .rpc import Client, attach_stdio_client, Response
from .process import start_server
from .logging import debug
+from contextlib import contextmanager
import os
import threading
from .protocol import completion_item_kinds, symbol_kinds, WorkspaceFolder
try:
- from typing import Callable, Dict, Any, Optional, List, Tuple
- assert Callable and Dict and Any and Optional and Transport and List and WorkspaceFolder and Tuple
+ from typing import Callable, Dict, Any, Optional, List, Tuple, Generator
+ assert Callable and Dict and Any and Optional and Transport and List and WorkspaceFolder and Tuple and Generator
except ImportError:
pass
+ACQUIRE_READY_LOCK_TIMEOUT = 3
+
+
def create_session(config: ClientConfig,
workspace_folders: 'List[WorkspaceFolder]',
env: dict,
@@ -164,6 +168,14 @@ def diff_folders(old: 'List[WorkspaceFolder]',
return added, removed
+@contextmanager
+def acquire_timeout(lock: threading.Lock, timeout: float) -> 'Generator[bool, None, None]':
+ result = lock.acquire(True, timeout)
+ yield result
+ if result:
+ lock.release()
+
+
class Session(object):
def __init__(self,
config: ClientConfig,
@@ -193,7 +205,10 @@ class Session(object):
def handles_path(self, file_path: 'Optional[str]') -> bool:
if not file_path:
return False
- with self.ready_lock:
+ with acquire_timeout(self.ready_lock, ACQUIRE_READY_LOCK_TIMEOUT) as acquired:
+ if not acquired:
+ raise TimeoutError("{} did not respond to the initialize request within {} seconds".format(
+ self.config.name, ACQUIRE_READY_LOCK_TIMEOUT))
# If we're in a window with no folders, or we're a multi-folder session, then we handle any path.
if not self._workspace_folders or self._unsafe_supports_workspace_folders():
return True
|
examples/pup/hubs/hub_primehub: fix button
Corrected Bluetooth button name (from BT to BLUETOOTH) | @@ -23,7 +23,7 @@ if Button.LEFT in pressed:
hub.display.image(Icon.ARROW_LEFT_DOWN)
elif Button.RIGHT in pressed:
hub.display.image(Icon.ARROW_RIGHT_DOWN)
-elif Button.BT in pressed:
+elif Button.BLUETOOTH in pressed:
hub.display.image(Icon.ARROW_RIGHT_UP)
wait(3000)
|
Update magentocore.txt
More domains from mentioned ```124.156.210.169``` | @@ -965,16 +965,30 @@ vamberlo.com
# Reference: https://www.rapidspike.com/blog/multiple-hacking-groups-attempt-to-skim-credit-cards-from-perricone-md/
# Reference: https://twitter.com/BreachMessenger/status/1057394505266151425
+# Reference: https://www.virustotal.com/gui/ip-address/124.156.210.169/relations
+a4c.cloud
ajaxstatic.com
+apipack.host
+authorizeplus.com
autojspack.com
cdndeskpro.com
+cdnpack.net
+cdnpack.site
+dusk.net.in
+faceapiget.com
fbpixelget.com
gstaticapi.com
-js-react.com
jspack.pro
kegland.top
lightgetjs.com
+listrakjs.com
+olarkcdn.com
perriconemd.me.uk
+priceapigate.com
rackapijs.com
section.ws
+sectionget.com
+sectionio.com
+topapigate.com
+worx.top
|
Record queries for helpful failure msg in bounds test
This will let us actually see what extra queries are being
executed.
Related-Bug: | @@ -6536,10 +6536,10 @@ class DbOperationBoundMixin(object):
def setUp(self, *args, **kwargs):
super(DbOperationBoundMixin, self).setUp(*args, **kwargs)
- self._db_execute_count = 0
+ self._recorded_statements = []
- def _event_incrementer(*args, **kwargs):
- self._db_execute_count += 1
+ def _event_incrementer(conn, clauseelement, *args, **kwargs):
+ self._recorded_statements.append(str(clauseelement))
engine = db_api.context_manager.writer.get_engine()
db_api.sqla_listen(engine, 'after_execute', _event_incrementer)
@@ -6553,7 +6553,7 @@ class DbOperationBoundMixin(object):
context_ = self._get_context()
return {'set_context': True, 'tenant_id': context_.tenant}
- def _list_and_count_queries(self, resource, query_params=None):
+ def _list_and_record_queries(self, resource, query_params=None):
kwargs = {'neutron_context': self._get_context()}
if query_params:
kwargs['query_params'] = query_params
@@ -6561,23 +6561,28 @@ class DbOperationBoundMixin(object):
# otherwise the first list after a create will be different than
# a subsequent list with no create.
self._list(resource, **kwargs)
- self._db_execute_count = 0
+ self._recorded_statements = []
self.assertNotEqual([], self._list(resource, **kwargs))
- query_count = self._db_execute_count
# sanity check to make sure queries are being observed
- self.assertNotEqual(0, query_count)
- return query_count
+ self.assertNotEqual(0, len(self._recorded_statements))
+ return list(self._recorded_statements)
def _assert_object_list_queries_constant(self, obj_creator, plural,
filters=None):
obj_creator()
- before_count = self._list_and_count_queries(plural)
+ before_queries = self._list_and_record_queries(plural)
# one more thing shouldn't change the db query count
obj = list(obj_creator().values())[0]
- after_count = self._list_and_count_queries(plural)
- self.assertEqual(before_count, after_count)
+ after_queries = self._list_and_record_queries(plural)
+ self.assertEqual(len(before_queries), len(after_queries),
+ self._qry_fail_msg(before_queries, after_queries))
# using filters shouldn't change the count either
if filters:
query_params = "&".join(["%s=%s" % (f, obj[f]) for f in filters])
- after_count = self._list_and_count_queries(plural, query_params)
- self.assertEqual(before_count, after_count)
+ after_queries = self._list_and_record_queries(plural, query_params)
+ self.assertEqual(len(before_queries), len(after_queries),
+ self._qry_fail_msg(before_queries, after_queries))
+
+ def _qry_fail_msg(self, before, after):
+ return "\n".join(["queries before:"] + before +
+ ["queries after:"] + after)
|
Add note about escape_markdown in Message.clean_content
Fix | @@ -406,6 +406,12 @@ class Message:
This will also transform @everyone and @here mentions into
non-mentions.
+
+ .. note::
+
+ This *does not* escape markdown. If you want to escape
+ markdown then use :func:`utils.escape_markdown` along
+ with this function.
"""
transformations = {
|
Updates QubitProcessorSpec serialization to be more backward compatible.
Allows loading (from json) older-version processor specs that don't have
SPAM & instrument members and adds a warning to update and resave them. | @@ -13,8 +13,10 @@ Defines the QubitProcessorSpec class and supporting functionality.
import numpy as _np
import itertools as _itertools
import collections as _collections
+import warnings as _warnings
from functools import lru_cache
+
from pygsti.tools import internalgates as _itgs
from pygsti.tools import symplectic as _symplectic
from pygsti.tools import optools as _ot
@@ -298,9 +300,9 @@ class QuditProcessorSpec(ProcessorSpec):
elif isinstance(obj, dict): return {k: _unserialize_instrument_member(v) for k, v in obj.items()}
raise ValueError("Cannot unserialize Instrument specifier of type %s!" % str(type(obj)))
- nonstd_preps = {k: _unserialize_state(obj) for k, obj in state['nonstd_preps'].items()}
- nonstd_povms = {k: _unserialize_povm(obj) for k, obj in state['nonstd_povms'].items()}
- nonstd_instruments = {k: _unserialize_instrument(obj) for k, obj in state['nonstd_instruments'].items()}
+ nonstd_preps = {k: _unserialize_state(obj) for k, obj in state.get('nonstd_preps', {}).items()}
+ nonstd_povms = {k: _unserialize_povm(obj) for k, obj in state.get('nonstd_povms', {}).items()}
+ nonstd_instruments = {k: _unserialize_instrument(obj) for k, obj in state.get('nonstd_instruments', {}).items()}
return nonstd_gate_unitaries, nonstd_preps, nonstd_povms, nonstd_instruments
@@ -853,9 +855,15 @@ class QubitProcessorSpec(QuditProcessorSpec):
availability = {k: _tuplize(v) for k, v in state['availability'].items()}
geometry = _qgraph.QubitGraph.from_nice_serialization(state['geometry'])
+ if 'prep_names' not in state:
+ _warnings.warn(("Loading an old-format QubitProcessorSpec that doesn't contain SPAM information."
+ " You should check to make sure you don't want/need to add this information and"
+ " then re-save this processor spec."))
+
return cls(len(state['qubit_labels']), state['gate_names'], nonstd_gate_unitaries, availability,
- geometry, state['qubit_labels'], symplectic_reps, state['prep_names'], state['povm_names'],
- state['instrument_names'], nonstd_preps, nonstd_povms, nonstd_instruments, state['aux_info'])
+ geometry, state['qubit_labels'], symplectic_reps, state.get('prep_names', []),
+ state.get('povm_names', []), state.get('instrument_names', []), nonstd_preps, nonstd_povms,
+ nonstd_instruments, state['aux_info'])
@property
def qubit_labels(self):
|
Removed some unneeded skips for real FS after fix
documented the remaining issues
see | @@ -401,7 +401,7 @@ class FakeCopyFileTest(RealFsTestCase):
def testRaisesIfDestDirIsNotWritableUnderPosix(self):
self.checkPosixOnly()
- self.skipRealFsFailure(skipPython3=False)
+ self.skipRealFsFailure(skipLinux=False, skipPython3=False)
src_file = self.makePath('xyzzy')
dst_dir = self.makePath('tmp', 'foo')
dst_file = os.path.join(dst_dir, 'xyzzy')
@@ -411,6 +411,7 @@ class FakeCopyFileTest(RealFsTestCase):
os.chmod(dst_dir, 0o555)
self.assertTrue(os.path.exists(src_file))
self.assertTrue(os.path.exists(dst_dir))
+ # FIXME: raises IOError (permission denied) under MacOS
self.assertRaises(OSError, shutil.copyfile, src_file, dst_file)
def testRaisesIfSrcDoesntExist(self):
|
Change bootstrap theme to lumen
This changes the bootstrap theme to lumen and the navbar style. | @@ -158,7 +158,7 @@ html_theme_options = {
"globaltoc_includehidden": "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
- "navbar_class": "navbar navbar-inverse",
+ "navbar_class": "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
"navbar_fixed_top": "true",
@@ -176,7 +176,7 @@ html_theme_options = {
# Currently, the supported themes are:
# - Bootstrap 2: https://bootswatch.com/2
# - Bootstrap 3: https://bootswatch.com/3
- "bootswatch_theme": "united",
+ "bootswatch_theme": "lumen",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
"bootstrap_version": "3",
|
Remove unnecessary Worker param
* Remove unnecessary Worker param
when in test, it will throw warning log. like following
[2021-02-02 18:02:49,547] [WARNING] [args.py:239:parse_worker_args] Unknown arguments: ['--worker_id', '0']
* Update worker_ps_interaction_test.py | @@ -71,8 +71,6 @@ class WorkerPSInteractionTest(unittest.TestCase):
tf.keras.backend.clear_session()
tf.random.set_seed(22)
arguments = [
- "--worker_id",
- i,
"--job_type",
elasticai_api_pb2.TRAINING,
"--minibatch_size",
@@ -156,8 +154,6 @@ class WorkerPSInteractionTest(unittest.TestCase):
model_def = "mnist.mnist_functional_api.custom_model"
self._create_pserver(model_def, 2)
arguments = [
- "--worker_id",
- 0,
"--job_type",
elasticai_api_pb2.TRAINING,
"--minibatch_size",
@@ -210,8 +206,6 @@ class WorkerPSInteractionTest(unittest.TestCase):
images, labels = get_random_batch(self._batch_size)
# TODO(yunjian.lmh): test optimizer wrapper
arguments = [
- "--worker_id",
- 0,
"--job_type",
elasticai_api_pb2.TRAINING,
"--minibatch_size",
@@ -371,8 +365,6 @@ class WorkerPSInteractionTest(unittest.TestCase):
for w in range(2):
self._reset_pserver()
arguments = [
- "--worker_id",
- 0,
"--job_type",
elasticai_api_pb2.TRAINING,
"--minibatch_size",
|
Typo and grammar corrections
Two minor edits:
* Corrected a typo: <``text_align`` value of ``center``> -> <``alignment`` value of ``center``>
* Changed all instances of <a ``alignment``> to <an ``alignment``> | @@ -356,14 +356,14 @@ specifying the allocated width and allocated height.
The extra height for a child is defined as the difference between the
parent elements final height and the child's full height.
- If the parent element has a ``alignment`` value of ``top``, the
+ If the parent element has an ``alignment`` value of ``top``, the
vertical position of the child is set to 0, relative to the parent.
- If the parent element has a ``alignment`` value of ``bottom``, the
+ If the parent element has an ``alignment`` value of ``bottom``, the
vertical position of the child is set to the extra height, relative to
the parent.
- If the parent element has a ``alignment`` value of ``center``, the
+ If the parent element has an ``alignment`` value of ``center``, the
vertical position of the child is set to 1/2 of the extra height,
relative to the parent.
@@ -438,13 +438,13 @@ specifying the allocated width and allocated height.
The extra width for a child is defined as the difference between the
parent element's final width and the child's full width.
- If the parent element has a ``alignment`` value of ``left``, the
+ If the parent element has an ``alignment`` value of ``left``, the
horizontal position of the child is set to 0, relative to the parent.
- If the parent element has a ``alignment`` value of ``right``, the
+ If the parent element has an ``alignment`` value of ``right``, the
horizontal position of the child is set to the extra width, relative to
the parent.
- If the parent element has a ``text_align`` value of ``center``, the
+ If the parent element has an ``alignment`` value of ``center``, the
horizontal position of the child is set to 1/2 of the extra width,
relative to the parent.
|
Add sys.breakpointhook() and sys.__breakpointhook__
Closes | @@ -188,6 +188,10 @@ def intern(string: str) -> str: ...
if sys.version_info >= (3, 5):
def is_finalizing() -> bool: ...
+if sys.version_info >= (3, 7):
+ __breakpointhook__: Any # contains the original value of breakpointhook
+ def breakpointhook(*args: Any, **kwargs: Any) -> Any: ...
+
def setcheckinterval(interval: int) -> None: ... # deprecated
def setdlopenflags(n: int) -> None: ... # Linux only
def setrecursionlimit(limit: int) -> None: ...
|
added a `clean` argument to piccolo migrations backwards
It will remove the migration files after the backwards migration completes. | from __future__ import annotations
+import os
import sys
from piccolo.apps.migrations.auto import MigrationManager
@@ -9,11 +10,16 @@ from piccolo.utils.sync import run_sync
class BackwardsMigrationManager(BaseMigrationManager):
def __init__(
- self, app_name: str, migration_id: str, auto_agree: bool = False
+ self,
+ app_name: str,
+ migration_id: str,
+ auto_agree: bool = False,
+ clean: bool = False,
):
self.migration_id = migration_id
self.app_name = app_name
self.auto_agree = auto_agree
+ self.clean = clean
super().__init__()
def run(self):
@@ -87,12 +93,19 @@ class BackwardsMigrationManager(BaseMigrationManager):
Migration.delete().where(
Migration.name == migration_id
).run_sync()
+
+ if self.clean:
+ os.unlink(migration_module.__file__)
+
else: # pragma: no cover
sys.exit("Not proceeding.")
def backwards(
- app_name: str, migration_id: str = "1", auto_agree: bool = False
+ app_name: str,
+ migration_id: str = "1",
+ auto_agree: bool = False,
+ clean: bool = False,
):
"""
Undo migrations up to a specific migration.
@@ -106,6 +119,9 @@ def backwards(
value of '1' to undo the most recent migration.
:param auto_agree:
Automatically agree to any input prompts.
+ :param clean:
+ If true, the migration files which have been run backwards are deleted
+ from the disk after completing.
"""
if app_name == "all":
@@ -133,6 +149,9 @@ def backwards(
manager.run()
else:
manager = BackwardsMigrationManager(
- app_name=app_name, migration_id=migration_id, auto_agree=auto_agree
+ app_name=app_name,
+ migration_id=migration_id,
+ auto_agree=auto_agree,
+ clean=clean,
)
manager.run()
|
Update mppcommand.py
encode self.response | @@ -70,7 +70,7 @@ class mppCommand(object):
def __str__(self):
# TODO: fix so print(class) provides the the decription and help etc??
- result = "{}\n{}\n{}\n{}\n{}".format(self.name, self.description, self.help, self.response.encode('utf-8'), self.response_dict)
+ result = "{}\n{}\n{}\n{}\n{}".format(self.name, self.description, self.help, self.response, self.response_dict)
return result
def __init__(self, name, description, command_type, response_definition, test_responses=[], regex="", value=None, help=""):
@@ -100,7 +100,10 @@ class mppCommand(object):
self.response = None
def set_response(self, response):
+ if(response is None):
self.response = response
+ return
+ self.response = response.encode('utf-8')
self.valid_response = self.is_response_valid(response)
if self.valid_response:
self.response_dict = self.get_response_dict()
|
Save distributions in class
Distributions are needed for estimation of higher statistical moments | @@ -15,7 +15,7 @@ class PolynomialBasis(metaclass=NoPublicConstructor):
def __init__(self, inputs_number: int,
polynomials_number,
multi_index_set,
- polynomials):
+ polynomials, distributions):
"""
Create polynomial basis for a given multi index set.
"""
@@ -23,6 +23,7 @@ class PolynomialBasis(metaclass=NoPublicConstructor):
self.multi_index_set = multi_index_set
self.polynomials_number = polynomials_number
self.inputs_number = inputs_number
+ self.distributions= distributions
@classmethod
def create_total_degree_basis(cls, distributions, max_degree, hyperbolic=1):
@@ -42,7 +43,7 @@ class PolynomialBasis(metaclass=NoPublicConstructor):
mask = np.round(np.sum(multi_index_set**hyperbolic, axis=1)**(1/hyperbolic), 4) <= max_degree
multi_index_set=multi_index_set[mask]
polynomials = PolynomialBasis.construct_arbitrary_basis(inputs_number, distributions, multi_index_set)
- return cls._create(inputs_number, len(multi_index_set), multi_index_set, polynomials)
+ return cls._create(inputs_number, len(multi_index_set), multi_index_set, polynomials, distributions)
@classmethod
def create_tensor_product_basis(cls, distributions, max_degree):
|
Don't show full path in tests
Now that path display is coupled to hierarchical ordering, these tests
were breaking. The simplest fix is to keep the name ordering by turning
off show_full_path. | @@ -126,7 +126,7 @@ class LocationChoiceProviderTest(ChoiceProviderTestMixin, LocationHierarchyTestC
choice_tuples = [
(location.name, SearchableChoice(
location.location_id,
- location.get_path_display(),
+ location.display_name,
searchable_text=[location.site_code, location.name]
))
for location in six.itervalues(cls.locations)
@@ -137,7 +137,7 @@ class LocationChoiceProviderTest(ChoiceProviderTestMixin, LocationHierarchyTestC
cls.choice_provider = LocationChoiceProvider(report, None)
cls.choice_provider.configure({
"include_descendants": False,
- "show_full_path": True,
+ "show_full_path": False,
})
cls.static_choice_provider = StaticChoiceProvider(choices)
cls.choice_query_context = partial(ChoiceQueryContext, user=cls.web_user)
@@ -177,7 +177,7 @@ class LocationChoiceProviderTest(ChoiceProviderTestMixin, LocationHierarchyTestC
scoped_choices = [
SearchableChoice(
location.location_id,
- location.get_path_display(),
+ location.display_name,
searchable_text=[location.site_code, location.name]
)
for location in [
|
[fix]: updated the sql output
Github Issue: | CASE WHEN last_referral_date BETWEEN %(start_date)s AND %(end_date)s
THEN 1 ELSE 0 END
) as cases_person_referred
- FROM "ucr_icds-cas_static-person_cases_v3_2ae0879a" ucr INNER JOIN
+ FROM "ucr_icds-cas_static-person_cases_v3_2ae0879a" ucr LEFT JOIN
"icds_dashboard_migration_forms" agg_migration ON (
ucr.doc_id = agg_migration.person_case_id AND
agg_migration.month = %(start_date)s AND
ucr.supervisor_id = adolescent_girls_table.supervisor_id AND
adolescent_girls_table.month=%(start_date)s
)
- INNER JOIN
+ LEFT JOIN
"icds_dashboard_migration_forms" agg_migration ON (
ucr.doc_id = agg_migration.person_case_id AND
agg_migration.month = %(start_date)s AND
|
ignore examples for now
hopefully will get fixed once is merged | @@ -74,7 +74,7 @@ templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = ["_build"]
+exclude_patterns = ["_build", "examples"]
# Ignore duplicated sections warning
suppress_warnings = ["epub.duplicated_toc_entry"]
|
Update virustotal_apiv3.py
add subject of rule in tags | import logging
import re
-import json
from datetime import timedelta, datetime
from core import Feed
@@ -60,7 +59,7 @@ class VirusTotalPriv(Feed):
sha256 = Hash.get_or_create(value=sha2)
f_vt3.active_link_to(sha256, "sha256", self.name)
tags.append(tags2)
-
+ tags.append(subject)
context["date_added"] = date_string
context["snippet"] = item["attributes"]["snippet"]
# context['source_country'] = item["attributes"]['source_country']
|
Hotfix: Stroke color of inkscape colored nodes not kept
Fixes | @@ -794,8 +794,9 @@ class TexTextElement(inkex.Group):
def import_group_color_style(self, src_svg_ele):
"""
- Extracts the color relevant style attributes of src_svg_ele (of class SVGElement) and applies them to all items
- of self._node. Ensures that non color relevant style attributes are not overwritten.
+ Extracts the color relevant style attributes of src_svg_ele (of class TexTextElement) and
+ applies them to all items of self. Ensures that non color relevant style
+ attributes are not overwritten.
"""
# Take the top level style information which is set when coloring the group in Inkscape
@@ -811,10 +812,16 @@ class TexTextElement(inkex.Group):
for it in self.iter():
# Update style
it.style.update(color_style_dict)
+
+ # Ensure that simple strokes are also colored if the the group has a fill color
+ if "stroke" in it.style and "fill" in color_style_dict:
+ it.style.update({"stroke": color_style_dict["fill"]})
+
# Remove style-duplicating attributes
for prop in ("stroke", "fill"):
if prop in style:
it.pop(prop)
+
# Avoid unintentional bolded letters
if "stroke-width" not in it.style:
it.style["stroke-width"] = "0"
|
Add region when creating a new version of an osfstorage file
Also add __unicode__ method to make it easier to identify regions in the shell | @@ -291,8 +291,9 @@ class OsfStorageFile(OsfStorageFileNode, File):
return latest_version
if metadata:
- version.update_metadata(metadata)
+ version.update_metadata(metadata, save=False)
+ version.region = self.target.osfstorage_region
version._find_matching_archive(save=False)
version.save()
@@ -449,6 +450,9 @@ class Region(models.Model):
mfr_url = models.URLField(default=website_settings.MFR_SERVER_URL)
waterbutler_settings = DateTimeAwareJSONField(default=dict)
+ def __unicode__(self):
+ return '{}'.format(self.name)
+
def get_absolute_url(self):
return '{}regions/{}'.format(self.absolute_api_v2_url, self._id)
|
fix_segment_redundancy.py edited online with Bitbucket
HG--
branch : pnpwin/fix_segment_redundancypy-edited-online-w-1493278995873 | @@ -15,7 +15,7 @@ from noc.sa.models.managedobject import ManagedObject
def fix():
uplinks = dict(
- (d["_id"], d["uplinks"])
+ (d["_id"], d.get("uplinks", []))
for d in ObjectData._get_collection().find()
)
seg_status = defaultdict(lambda: False)
|
Swaps CSS loading order in init_notebook_mode(...).
Now pygsti_dataviz.css is loaded *after* jQuery's "smoothness" theme
CSS so that the former can override behavior (such as fixed-length
dropdown boxes) in the latter. | @@ -382,13 +382,13 @@ class Workspace(object):
script += _merge.insert_resource(connected, None, "pygsti_plotly_ex.js")
script += "<script type='text/javascript'> window.plotman = new PlotManager(); </script>"
- # Load style sheets for displaying tables
- script += _merge.insert_resource(connected, None, "pygsti_dataviz.css")
-
#jQueryUI_CSS = "https://code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css"
jQueryUI_CSS = "https://code.jquery.com/ui/1.12.1/themes/smoothness/jquery-ui.css"
script += _merge.insert_resource(connected, jQueryUI_CSS, "smoothness-jquery-ui.css")
+ # Load style sheets for displaying tables
+ script += _merge.insert_resource(connected, None, "pygsti_dataviz.css")
+
#To fix the UI tooltips within Jupyter (b/c they use an old/custom JQueryUI css file)
if connected:
imgURL = "https://code.jquery.com/ui/1.12.1/themes/smoothness/images/ui-icons_222222_256x240.png"
|
set boundary groups of demo mesh with withgroups
The demo mesh overrides the `boundary` attribute of the `UnstructuredTopology`
instance, essentially to set groups. Use `withgroups(boundary=...)` instead.
This is necessary for the pending change to make topologies immutable. | @@ -373,8 +373,7 @@ def demo(xmin=0, xmax=1, ymin=0, ymax=1):
topo = topology.UnstructuredTopology(2, elems)
belems = [elem.edge(0) for elem in elems[:12]]
- btopos = [topology.UnstructuredTopology(1, subbelems) for subbelems in (belems[0:3], belems[3:6], belems[6:9], belems[9:12])]
- topo.boundary = topology.UnionTopology(btopos, ['top','left','bottom','right'])
+ topo = topo.withboundary(**{name: topology.UnstructuredTopology(1, subbelems) for name, subbelems in (('top',belems[0:3]), ('left',belems[3:6]), ('bottom',belems[6:9]), ('right',belems[9:12]))})
geom = [.5*(xmin+xmax),.5*(ymin+ymax)] \
+ [.5*(xmax-xmin),.5*(ymax-ymin)] * function.rootcoords(2)
|
Replace deprecated 'nocapitalize' in interwiki.py
While processing pages in Wiktionaries, when langlinks differed
from page title only in capitalization, a DeprecationWarning was
issued regarding the use of obsolete property BaseSite.nocapitalize. | @@ -1125,8 +1125,8 @@ class Subject(interwiki_graph.Subject):
% (page, self.originPage))
return True
elif (page.title() != self.originPage.title() and
- self.originPage.site.nocapitalize and
- page.site.nocapitalize):
+ self.originPage.namespace().case == 'case-sensitive' and
+ page.namespace().case == 'case-sensitive'):
pywikibot.output(
u"NOTE: Ignoring %s for %s in wiktionary mode because both "
u"languages are uncapitalized."
|
Add browserstack logo
Closes | @@ -68,7 +68,8 @@ This will give a shell, at which you can start Django, specifying the ``0.0.0.0`
python manage.py runserver_plus 0.0.0.0:8000 --settings=openprescribing.settings.local
-The application should then be accessible at ``http://localhost:8000/`` from a web-browser on the host computer.
+The application should then be accessible at ``http://localhost:8000/`` from a web-
+on the host computer.
## On bare metal
@@ -160,6 +161,8 @@ If required, you can run individual Django tests as follows:
We support IE8 and above. We have a free account for testing across
multiple browsers, thanks to [BrowserStack](www.browserstack.com).
+
+
Note that tests are run using the settings in
`openprescribing/settings/test.py`; this happens automatically
|
Documentation: clarify the state of multiple context managers
Clarify that the backslash & paren-wrapping formatting for multiple
context managers aren't yet implemented. | @@ -19,7 +19,7 @@ with make_context_manager1() as cm1, make_context_manager2() as cm2, make_contex
... # nothing to split on - line too long
```
-So _Black_ will eventually format it like this:
+So _Black_ will, when we implement this, format it like this:
```py3
with \
@@ -31,8 +31,8 @@ with \
... # backslashes and an ugly stranded colon
```
-Although when the target version is Python 3.9 or higher, _Black_ will use parentheses
-instead since they're allowed in Python 3.9 and higher.
+Although when the target version is Python 3.9 or higher, _Black_ will, when we
+implement this, use parentheses instead since they're allowed in Python 3.9 and higher.
An alternative to consider if the backslashes in the above formatting are undesirable is
to use {external:py:obj}`contextlib.ExitStack` to combine context managers in the
|
delete validation error cluster
story:
task: 42465 | @@ -127,9 +127,8 @@ def clusters_update(cluster_id, data):
def clusters_delete(cluster_id):
data = u.request_data()
force = data.get('force', False)
- stack_name = api.get_cluster(cluster_id).get(
- 'extra', {}).get(
- 'heat_stack_name', None)
+ extra = api.get_cluster(cluster_id).get('extra', {})
+ stack_name = extra.get('heat_stack_name', None) if extra else None
api.terminate_cluster(cluster_id, force=force)
if force:
return u.render({"stack_name": stack_name}, status=200)
|
Update url for the "Linehaul project" repository
The old one was archived and replaced with this one. | @@ -10,7 +10,7 @@ Download Statistics Table
The download statistics table allows you learn more about downloads patterns of
packages hosted on PyPI. This table is populated through the `Linehaul
-project <https://github.com/pypa/linehaul>`_ by streaming download logs from PyPI
+project <https://github.com/pypa/linehaul-cloud-function/>`_ by streaming download logs from PyPI
to BigQuery. For more information on analyzing PyPI package downloads, see the `Python
Package Guide <https://packaging.python.org/guides/analyzing-pypi-package-downloads/>`_
|
Update black and jupyterbook version in pre-commit
* update black and jupyterbook version in pre-commit
* [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see | @@ -24,7 +24,7 @@ repos:
args: ["--profile", "black", "--filter-files"]
- repo: https://github.com/psf/black
- rev: 21.12b0
+ rev: 22.3.0
hooks:
- id: black
@@ -34,7 +34,7 @@ repos:
- id: flake8
- repo: https://github.com/executablebooks/jupyter-book
- rev: v0.12.1
+ rev: v0.12.3
hooks:
- id: jb-to-sphinx
args: ["docs/source"]
|
Language settings not saved after session end
Fixes | @@ -607,6 +607,8 @@ if type(EXTRA_URL_SCHEMES) not in [list]: # pragma: no cover
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = get_setting('INVENTREE_LANGUAGE', 'language', 'en-us')
+# Store language settings for 30 days
+LANGUAGE_COOKIE_AGE = 2592000
# If a new language translation is supported, it must be added here
LANGUAGES = [
|
fix serialization problem
thanks and for the .tolist() suggestion | @@ -122,16 +122,16 @@ def _fid_json(raw, unit, orient, manufacturer, fname):
FIFF.FIFFV_POINT_CARDINAL}
if fids:
if FIFF.FIFFV_POINT_NASION in fids:
- coords['NAS'] = list(fids[FIFF.FIFFV_POINT_NASION]['r'])
+ coords['NAS'] = fids[FIFF.FIFFV_POINT_NASION]['r'].tolist()
if FIFF.FIFFV_POINT_LPA in fids:
- coords['LPA'] = list(fids[FIFF.FIFFV_POINT_LPA]['r'])
+ coords['LPA'] = fids[FIFF.FIFFV_POINT_LPA]['r'].tolist()
if FIFF.FIFFV_POINT_RPA in fids:
- coords['RPA'] = list(fids[FIFF.FIFFV_POINT_RPA]['r'])
+ coords['RPA'] = fids[FIFF.FIFFV_POINT_RPA]['r'].tolist()
hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}
if hpi:
for ident in hpi.keys():
- coords['coil%d' % ident] = list(hpi[ident]['r'])
+ coords['coil%d' % ident] = hpi[ident]['r'].tolist()
coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))])
if len(coord_frame) > 1:
@@ -143,7 +143,7 @@ def _fid_json(raw, unit, orient, manufacturer, fname):
'MEGCoordinateUnits': unit, # XXX validate that this is correct
'CoilCoordinates': coords,
'CoilCoordinateSystem': orient,
- 'CoilCoordinateUnits': unit, # XXX validate that this is correct too
+ 'CoilCoordinateUnits': unit # XXX validate that this is correct too
}
json_output = json.dumps(fid_json)
open(fname, 'w').write(json_output)
@@ -186,7 +186,7 @@ def _meg_json(raw, task, manufacturer, fname):
'TriggerChannelCount': n_stimchan,
}
json_output = json.dumps(meg_json)
- open(fname, 'w').write(meg_output)
+ open(fname, 'w').write(json_output)
return fname
@@ -274,7 +274,7 @@ def raw_to_bids(subject_id, run, task, input_fname, output_path,
_meg_json(raw, task, manufacturer, meg_fname)
_channel_tsv(raw, channels_fname)
if events_fname:
- events = mne.read_events(events_fname).astype(int)
+ events = read_events(events_fname).astype(int)
else:
events = mne.find_events(raw, min_duration=0.001)
|
Correct the StackUnderflowException
StackUnderflowException was being raised even though there were enough elements in the stack. | @@ -394,7 +394,7 @@ class MachineState:
def pop(self, amount=1) -> Union[BitVec, List[BitVec]]:
""" Pops amount elements from the stack"""
- if amount >= len(self.stack):
+ if amount > len(self.stack):
raise StackUnderflowException
values = self.stack[-amount:][::-1]
del self.stack[-amount:]
|
add extra info to report subcommand log
This commit also slightly restructures the call to write
tabular output | @@ -481,41 +481,50 @@ def subcmd_report(args, logger):
formats = list(set(formats)) # remove duplicates
logger.info("Creating output in formats: %s", formats)
+ # Declare which database is being used
+ logger.info("Using database: %s", args.dbpath)
+
# Report runs in the database
if args.show_runs:
+ outfname = os.path.join(args.outdir, "runs")
+ logger.info("Writing table of pyani runs from the database to %s.*",
+ outfname)
data = pyani_db.get_all_runs(args.dbpath)
headers = ['run ID', 'method', 'date run', 'command-line']
- pyani_report.write_dbtable(data, headers,
- os.path.join(args.outdir, "runs"),
- formats, index='run ID')
+ pyani_report.write_dbtable(data, headers, outfname, formats,
+ index='run ID')
# Report genomes in the database
if args.show_genomes:
+ outfname = os.path.join(args.outdir, "genomes")
+ logger.info("Writing table of genomes from the database to %s.*",
+ outfname)
data = pyani_db.get_all_genomes(args.dbpath)
headers = ['genome ID', 'description', 'path',
'MD5 hash', 'genome length']
- pyani_report.write_dbtable(data, headers,
- os.path.join(args.outdir, "genomes"),
- formats, index='genome ID')
+ pyani_report.write_dbtable(data, headers, outfname, formats,
+ index='genome ID')
# Report table of all genomes used for each run
if args.show_runs_genomes:
+ outfname = os.path.join(args.outdir, "runs_genomes")
+ logger.info("Writing table of pyani runs, with associated genomes " +
+ "to %s.*", outfname)
data = pyani_db.get_runs_by_genomes(args.dbpath)
headers = ['run ID', 'method', 'date run',
'genome ID', 'description', 'path', 'MD5 hash',
'genome length']
- pyani_report.write_dbtable(data, headers,
- os.path.join(args.outdir, "runs_genomes"),
- formats)
+ pyani_report.write_dbtable(data, headers, outfname, formats)
# Report table of all runs in which a genome is involved
if args.show_genomes_runs:
+ outfname = os.path.join(args.outdir, "genomes_runs")
+ logger.info("Writing table of genomes, with associated pyani runs" +
+ "to %s.*", outfname)
data = pyani_db.get_genomes_by_runs(args.dbpath)
headers = ['genome ID', 'description', 'path', 'MD5 hash',
'genome length', 'run ID', 'method', 'date run']
- pyani_report.write_dbtable(data, headers,
- os.path.join(args.outdir, "genomes_runs"),
- formats)
+ pyani_report.write_dbtable(data, headers, outfname, formats)
# Classify input genomes on basis of ANI coverage and identity output
|
Fix workstation __str__
Closes | @@ -90,7 +90,8 @@ class Workstation(UUIDModel, TitleSlugDescriptionModel):
)
def __str__(self):
- return f"Workstation {self.title}" + " (Public)" if self.public else ""
+ public = " (Public)" if self.public else ""
+ return f"Workstation {self.title}{public}"
def get_absolute_url(self):
return reverse("workstations:detail", kwargs={"slug": self.slug})
|
Update info.json
`[p]cleverbot` -> `[p]cleverbotset`
include `[p]cleverbotset ioapikey` | "description" : "Allows for interaction with cleverbot.com through mention/command",
"disabled" : false,
"hidden" : false,
- "install_msg" : "Needs to be setup with an API key first. See `[p]cleverbot apikey`\n[p]cleverbot <text>` to talk with cleverbot.\n`@Mention <text>` works too.\n`[p]cleverbot toggle` disables replies by mention.",
+ "install_msg" : "Needs to be setup with an API key first. See `[p]cleverbotset apikey` / `[p]cleverbotset ioapikey`\n[p]cleverbot <text>` to talk with cleverbot.\n`@Mention <text>` works too.\n`[p]cleverbotset toggle` toggles replies by mention.",
"min_python_version" : [
3,
6,
|
ci: Run zulip backend test suite for Debian bullseye.
This also verifies the Zulip codebase's Python 3.9 support. | @@ -34,6 +34,15 @@ jobs:
is_focal: true
include_frontend_tests: false
+ # This docker image was created by a generated Dockerfile at:
+ # tools/ci/images/focal/Dockerfile
+ # Bullseye ships with Python 3.9.2.
+ - docker_image: zulip/ci:bullseye
+ name: Debian 11 Bullseye (Python 3.9, backend)
+ os: bullseye
+ is_bullseye: true
+ include_frontend_tests: false
+
runs-on: ubuntu-latest
name: ${{ matrix.name }}
container: ${{ matrix.docker_image }}
@@ -231,7 +240,7 @@ jobs:
retention-days: 60
- name: Check development database build
- if: ${{ matrix.is_focal }}
+ if: ${{ matrix.is_focal || matrix.is_bullseye }}
run: ./tools/ci/setup-backend
- name: Report status
|
[cli] added `revs` and `restore` commands
see
see | @@ -409,7 +409,7 @@ def excluded():
"""View and manage excluded folders."""
[email protected](cls=SpecialHelpOrder, help_priority=17)
[email protected](cls=SpecialHelpOrder, help_priority=18)
def notify():
"""Manage Desktop notifications."""
@@ -896,6 +896,48 @@ def rebuild_index(config_name: str):
@main.command(help_priority=16)
[email protected]('dropbox_path', type=click.Path())
+@existing_config_option
+@catch_maestral_errors
+def revs(dropbox_path: str, config_name: str):
+ """Lists old revisions of a file."""
+ from datetime import datetime, timezone
+ from maestral.daemon import MaestralProxy
+
+ with MaestralProxy(config_name, fallback=True) as m:
+
+ entries = m.list_revisions(dropbox_path)
+
+ rev = []
+ last_modified = []
+
+ for e in entries:
+
+ rev.append(e['rev'])
+
+ dt = datetime.strptime(e['client_modified'], '%Y-%m-%dT%H:%M:%SZ')
+ dt = dt.replace(tzinfo=timezone.utc).astimezone(tz=None)
+ last_modified.append(dt.strftime('%d %b %Y %H:%M'))
+
+ click.echo(format_table(columns=[rev, last_modified]))
+
+
[email protected](help_priority=17)
[email protected]('dropbox_path', type=click.Path())
[email protected]('rev')
+@existing_config_option
+@catch_maestral_errors
+def restore(dropbox_path: str, rev: str, config_name: str):
+ """Restores an old revision of a file to the given path."""
+ from maestral.daemon import MaestralProxy
+
+ with MaestralProxy(config_name, fallback=True) as m:
+ m.restore(dropbox_path, rev)
+
+ click.echo(f'Restored {rev} to "{dropbox_path}"')
+
+
[email protected](help_priority=18)
def configs():
"""Lists all configured Dropbox accounts."""
from maestral.daemon import is_running
@@ -917,7 +959,7 @@ def configs():
click.echo('')
[email protected](help_priority=18)
[email protected](help_priority=20)
@click.option('--yes', '-Y', is_flag=True, default=False)
@click.option('--no', '-N', is_flag=True, default=False)
@existing_config_option
@@ -951,7 +993,7 @@ def analytics(yes: bool, no: bool, config_name: str):
click.echo(f'Automatic error reports are {enabled_str}.')
[email protected](help_priority=20)
[email protected](help_priority=22)
@existing_config_option
def account_info(config_name: str):
"""Shows your Dropbox account information."""
@@ -973,7 +1015,7 @@ def account_info(config_name: str):
click.echo('')
[email protected](help_priority=21)
[email protected](help_priority=23)
def about():
"""Returns the version number and other information."""
import time
|
TFM Package: Update to TFM pacakage URL
TFM package for LPC55S69 got first relase. | "site": [
{
"version": "v1.0-beta",
- "URL": "https://github.com/RT-Thread-packages/trusted-firmware-m/archive/lpc-v1.0.zip",
+ "URL": "https://github.com/RT-Thread-packages/trusted-firmware-m/archive/v1.0-beta.zip",
"filename": "trusted-firmware-m-1.0-beta.zip"
},
{
|
[easy] make dagster-example-tmpl pass isort out of the box
Summary: As title
Test Plan:
create example, run make isort, no changes
Reviewers: max, prha, sashank, nate | -from ..repo import my_pipeline
-
from dagster import execute_pipeline
+from ..repo import my_pipeline
+
def test_{{EXAMPLE_NAME}}():
assert execute_pipeline(my_pipeline).success
|
backward compatibility for old scope["surt"]
and make sure to store ssurt as string in rethinkdb | @@ -183,14 +183,24 @@ class Site(doublethink.Document, ElapsedMixIn):
self.last_claimed = brozzler.EPOCH_UTC
if not "scope" in self:
self.scope = {}
+
+ # backward compatibility
+ if "surt" in self.scope:
+ if not "accepts" in self.scope:
+ self.scope["accepts"] = []
+ self.scope["accepts"].append({"surt": self.scope["surt"]})
+ del self.scope["surt"]
+
+ # backward compatibility
if ("max_hops_off_surt" in self.scope
and not "max_hops_off" in self.scope):
self.scope["max_hops_off"] = self.scope["max_hops_off_surt"]
if "max_hops_off_surt" in self.scope:
del self.scope["max_hops_off_surt"]
+
if self.seed:
self._accept_ssurt_if_not_redundant(
- brozzler.site_surt_canon(self.seed).ssurt())
+ brozzler.site_surt_canon(self.seed).ssurt().decode('ascii'))
if not "starts_and_stops" in self:
if self.get("start_time"): # backward compatibility
@@ -219,7 +229,7 @@ class Site(doublethink.Document, ElapsedMixIn):
def note_seed_redirect(self, url):
self._accept_ssurt_if_not_redundant(
- brozzler.site_surt_canon(url).ssurt())
+ brozzler.site_surt_canon(self.seed).ssurt().decode('ascii'))
def extra_headers(self):
hdrs = {}
|
Update noaa-gefs.yaml
Updating to include SNS Topic. | @@ -20,6 +20,10 @@ Resources:
ARN: arn:aws:s3:::noaa-gefs-pds
Region: us-east-1
Type: S3 Bucket
+ - Description: New data notifications for GFS, only Lambda and SQS protocols allowed
+ ARN: arn:aws:sns:us-east-1:123901341784:NewGEFSObject
+ Region: us-east-1
+ Type: SNS Topic
DataAtWork:
Tutorials:
Tools & Applications:
|
Correct python-requests link
Updated with correct link | @@ -54,7 +54,7 @@ Alternative, this envrionmental variable can be set via the `os` module in-line
return vault_client
-.. _documented in the advanced usage section for requests: http://docs.python-requests.org/en/master/user/advanced/
+.. _documented in the advanced usage section for requests: https://2.python-requests.org/en/master/user/advanced/#id4
Custom Requests / HTTP Adapter
------------------------------
|
Add documentation for expected scores.
Also add links from the `model` field to the objective and the expected scores fields. | @@ -13,7 +13,7 @@ An identifier for the experiment that will be used to name the report and all :r
model
"""""
-The machine learner you want to use to build the scoring model. Possible values include :ref:`built-in linear regression models <builtin_models>` as well as all of the regressors available via `SKLL <http://skll.readthedocs.io/en/latest/run_experiment.html#learners>`_.
+The machine learner you want to use to build the scoring model. Possible values include :ref:`built-in linear regression models <builtin_models>` as well as all of the learners available via `SKLL <http://skll.readthedocs.io/en/latest/run_experiment.html#learners>`_. With SKLL learners, you can customize the :ref:`tuning objective <skll_objective>` and also :ref:`compute expected scores as predictions <predict_expected_scores>`.
train_file
""""""""""
@@ -23,10 +23,18 @@ test_file
"""""""""
The path to the evaluation data feature file in one of the :ref:`supported formats <input_file_format>`. Each row should correspond to a single response and contain numeric feature values extracted for this response. In addition, there should be a column with a unique identifier (ID) for each response and a column with the human score for each response. The path can be absolute or relative to the location of config file.
+.. _skll_objective:
+
skll_objective *(Optional)*
"""""""""""""""""""""""""""
The tuning objective to use if a SKLL model is chosen to build the scoring model. Possible values are the objectives available via `SKLL <http://skll.readthedocs.io/en/latest/run_experiment.html#objectives>`_. Defaults to ``neg_mean_squared_error`` for SKLL regressors and ``f1_score_micro`` for SKLL classifiers. Note that if this option is specified with the :ref:`built-in linear regression models <builtin_models>`, it will simply be ignored.
+.. _predict_expected_scores:
+
+predict_expected_scores *(Optional)*
+""""""""""""""""""""""""""""""""""""
+If a probablistic SKLL classifier is chosen to build the scoring model, then *expected scores* --- probability-weighted averages over all score points --- can be generated as the machine predictions instead of the most likely score point, which would be the default for a classifier. Set this field to ``true`` to compute expected scores as predictions. Defaults to ``false``.
+
description *(Optional)*
""""""""""""""""""""""""
A brief description of the experiment. This will be included in the report. The description can contain spaces and punctuation. It's blank by default.
|
Remove duplicative mkdir_p
Summary: This function exists in dagster.utils
Test Plan: Buildkite
Reviewers: natekupp, alangenfeld | -import errno
-import os
-
import sqlalchemy
-def mkdir_p(newdir, mode=0o777):
- """The missing mkdir -p functionality in os."""
- try:
- os.makedirs(newdir, mode)
- except OSError as err:
- # Reraise the error unless it's about an already existing directory
- if err.errno != errno.EEXIST or not os.path.isdir(newdir):
- raise
-
-
def create_redshift_db_url(username, password, hostname, db_name, jdbc=True):
if jdbc:
db_url = (
|
Update note on PoET SGX support to include 1.2
Also removed statement about PoET-SGX availability in Sawtooth 1.1. | @@ -4,9 +4,8 @@ Using Sawtooth with PoET-SGX
.. note::
- PoET-SGX is currently not compatible with Sawtooth 1.1. Users looking to
- leverage PoET-SGX should remain on Sawtooth 1.0. PoET-SGX is being upgraded
- to be made compatible with 1.1 and will be released before the end of 2018.
+ PoET-SGX is currently not compatible with Sawtooth 1.1 or later.
+ Users looking to leverage PoET-SGX should remain on Sawtooth 1.0.
This procedure describes how to install, configure, and run Hyperledger Sawtooth
with PoET simulator consensus on a system with |Intel (R)| Software Guard
|
Fixed symbols.Symbol.suggest function: suggest only name without
qualifier | @@ -577,7 +577,7 @@ class Symbol(SymbolId):
def suggest(self):
""" Returns suggestion for this declaration """
- return ('{0}\t{1}'.format(self.scope_name(), self.module.name), self.scope_name())
+ return ('{0}\t{1}'.format(self.scope_name(), self.module.name), self.name)
def brief(self, _short=False):
""" Brief information, just a name by default """
@@ -682,7 +682,7 @@ class Function(Symbol):
self.type,
self.module.name
)),
- self.scope_name()
+ self.name
)
@unicode_operators
@@ -715,7 +715,7 @@ class TypeBase(Symbol):
' '.join(self.args),
self.module.name
)),
- self.scope_name()
+ self.name
)
@unicode_operators
@@ -809,7 +809,7 @@ class UnknownSymbol(Symbol):
self.module.name
)
),
- self.scope_name()
+ self.name
)
@unicode_operators
|
Fix up Travis-CI AWS credentials
Store them in Travis environment variables instead of the .travis.yml
configuration file.
They'll be easier to maintain and rotate there. | @@ -91,9 +91,6 @@ deploy:
# For tags, the object will be datacube/datacube-1.4.2.tar.gz
# For develop, the object will be datacube/datacube-1.4.1+91.g43bd4e12.tar.gz
- provider: s3
- access_key_id: "AKIAJMZN4F5L5KXPQKVQ"
- secret_access_key:
- secure: owxrrQc4i1jfAwvASFewdMzNdi93zpwnoEhsTJQS/f3SkD083XpefUr4H7Mg8cZiq5grT6NpoYkG6QDoxz30Kn5IbDIHewRy3I11uwz02JGUkzxv+/7hLOnoM/LM993WWXA+YQJmrO4HvncDy3Hgbw98xkKOeqsQ1XlYR8Lp5NUQnwJtLkouOJYfJSU/ZuIyetYC3bO2JkF/smtEPmVPJ8ZJjAQEqCdiUJsvMWKDlDh+cLOKM6EBTorE8GTECfMY+HUy74nH7xdkJ2xQm6PjO2wJVtcgCW377Jfn7IJqMhmlhA6fnkguyb58S5NkwwSynFizm7GidtbAxTeaIHimB3B6ImWUQu/maiWrWpMZGyqoC+l3+zQ/jg5Pam9Crtff8odFNT/tvwVGMTxuVkUxYRZ87XGM+G4D29xV/FGs0oqyn3Jw4SPXmg1bBymJOwmL+YogYFvJAR77MmbBUcf6SCogJzvI4Z2IFe9afMCbZaNeSD1GGh+EYCjJIKrpc9sY2ZxON1Mgv1bQ9FnSrDxyCUYEsshh8x9AaAFcYdGRGc6V0IT/5gopH3gf+XC+Q2OQHGC5oiaJ0+R4BoT3YuxEYfpBHvrpWk30Ug8C9ZgxoGK2xjabKiJ/2LlKe+xHoe77ZWZCkc1RocZnSHFBaFgaQrzppWsty04WGHl5fIVgH1Q=
bucket: "datacube-core-deployment"
region: "ap-southeast-2"
local_dir: dist
|
Capture exception as close as possible to error
To get the line number. | @@ -287,7 +287,13 @@ class StructuredValue(FieldDefinition):
self.filename,
self.line_num,
)
+
+ try:
value = evaluate_function(func, self.args, self.kwargs, context)
+ except DataGenError:
+ raise
+ except Exception as e:
+ raise DataGenError(str(e), self.filename, self.line_num)
return value
|
tools/run_on_bots: Enable giving a hash instead of script.
This allows you to specify an isolated hash instead of giving a script.
This makes it easier to test an existing uploaded test.
(Also fix description for extra arguments.) | @@ -15,10 +15,11 @@ __version__ = '0.2'
import json
import os
-import tempfile
import shutil
+import string
import subprocess
import sys
+import tempfile
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(
__file__.decode(sys.getfilesystemencoding()))))
@@ -114,6 +115,8 @@ def run_serial(
cmd.extend(('-d', k, v))
for k, v in env:
cmd.extend(('--env', k, v))
+ if args:
+ cmd.append('--')
cmd.extend(args)
r = subprocess.call(cmd, cwd=ROOT_DIR)
result = max(r, result)
@@ -143,7 +146,9 @@ def run_parallel(
def main():
parser = parallel_execution.OptionParser(
- usage='%prog [options] script.py', version=__version__)
+ usage='%prog [options] (script.py|isolated hash) '
+ '-- [script.py arguments]',
+ version=__version__)
parser.add_option(
'--serial', action='store_true',
help='Runs the task serially, to be used when debugging problems since '
@@ -152,6 +157,9 @@ def main():
'--repeat', type='int', default=1,
help='Runs the task multiple time on each bot, meant to be used as a '
'load test')
+ parser.add_option(
+ '--name',
+ help='Name to use when providing an isolated hash')
options, args = parser.parse_args()
if len(args) < 1:
@@ -164,7 +172,31 @@ def main():
'so the task completes as fast as possible, or an high number so the\n'
'task only runs when the bot is idle.')
- # 1. Query the bots list.
+ # 1. Archive the script to run.
+ if not os.path.exists(args[0]):
+ if not options.name:
+ parser.error(
+ 'Please provide --name when using an isolated hash.')
+ if len(args[0]) not in (40, 64):
+ parser.error(
+ 'Hash wrong length %d (%r)' % (len(args.hash), args[0]))
+ for i, c in enumerate(args[0]):
+ if c not in string.hexdigits:
+ parser.error(
+ 'Hash character invalid\n'
+ ' %s\n' % args[0] +
+ ' '+'-'*i+'^\n'
+ )
+
+ isolated_hash = args[0]
+ name = options.name
+ else:
+ isolated_hash = archive(options.isolate_server, args[0])
+ name = os.path.basename(args[0])
+
+ print('Running %s' % isolated_hash)
+
+ # 2. Query the bots list.
bots, quarantined_bots, dead_bots = get_bot_list(
options.swarming, options.dimensions)
print('Found %d bots to process' % len(bots))
@@ -175,12 +207,7 @@ def main():
if not bots:
return 1
- # 2. Archive the script to run.
- isolated_hash = archive(options.isolate_server, args[0])
- print('Running %s' % isolated_hash)
-
# 3. Trigger the tasks.
- name = os.path.basename(args[0])
if options.serial:
return run_serial(
options.swarming,
|
Remove "test" flag from tox environment names.
Astropy uses this, but it isn't necessary. Bad things happen if you
forget the tag when running tests manually, so it is better to just not
have it. | @@ -24,27 +24,27 @@ jobs:
- name: Python 3.9 with minimal dependencies
os: ubuntu-latest
python: 3.9
- toxenv: py39-test
+ toxenv: py39
- name: Python 3.9 with full coverage
os: ubuntu-latest
python: 3.9
- toxenv: py39-test-cov
+ toxenv: py39-cov
- name: Python 3.6 with oldest supported version of all dependencies
os: ubuntu-latest
python: 3.6
- toxenv: py36-test-oldestdeps
+ toxenv: py36-oldestdeps
- name: Python 3.7 with all optional dependencies (MacOS X)
os: macos-latest
python: 3.7
- toxenv: py37-test-alldeps
+ toxenv: py37-alldeps
- name: Python 3.8 with all optional dependencies (Windows)
os: windows-latest
python: 3.8
- toxenv: py38-test-alldeps
+ toxenv: py38-alldeps
steps:
- name: Checkout code
|
Update phishing.txt
Generaziling by entire ```4-16.icu``` domain detection. | @@ -2618,8 +2618,9 @@ mail-nepalarmy-milnp-owa.herokuapp.com
ppporous.co.kr
# Reference: https://twitter.com/rpsanch/status/1123060265975787521
+# Reference: https://twitter.com/PhishingAi/status/1123192408739647488
-ziraatbank.tr.4-16.icu
+4-16.icu
# Reference: https://twitter.com/PhishingAi/status/1123050989651521536
@@ -2638,7 +2639,3 @@ ripple-infogiveaway.com
# Reference: https://twitter.com/PhishingAi/status/1123192411801423872
gob-mx.icu
-
-# Reference: https://twitter.com/PhishingAi/status/1123192408739647488
-
-4-16.icu
|
Adds quotation marks to escape the codeversion. Without the quotations, the codeversion may be interpreted as a number, which can yield errors.
Updates the comments to reflect the usage of the variables. | var uniqueId = "{{ uniqueId }}"; // a unique string identifying the worker/task
var condition = {{ condition }}; // the condition number
var counterbalance = {{ counterbalance }}; // a number indexing counterbalancing conditions
- var codeversion = {{ codeversion }}; // a number indexing counterbalancing conditions
- var contact_address = "{{ contact_address }}"; // a number indexing counterbalancing conditions
+ var codeversion = "{{ codeversion }}"; // a string representing the current experiment version
+ var contact_address = "{{ contact_address }}"; // your email address
var adServerLoc = "{{ adServerLoc }}"; // the location of your ad (so you can send user back at end of experiment)
var mode = "{{ mode }}";
</script>
|
Make RogersEnvironment a Source.
Proportion is now a property of the RogersEnvironment. | @@ -142,16 +142,34 @@ class RogersAgent(Agent):
return self.infos(type=LearningGene)[0]
-class RogersEnvironment(Environment):
+class RogersEnvironment(Source):
"""The Rogers environment."""
__mapper_args__ = {"polymorphic_identity": "rogers_environment"}
- def create_state(self, proportion):
- """Create an environmental state."""
+ @hybrid_property
+ def proportion(self):
+ """Convert property1 to propoertion."""
+ return float(self.property1)
+
+ @proportion.setter
+ def proportion(self, proportion):
+ """Make proportion settable."""
+ self.property1 = repr(proportion)
+
+ @proportion.expression
+ def proportion(self):
+ """Make proportion queryable."""
+ return cast(self.property1, Float)
+
+ def _info_type(self):
+ return State
+
+ def _contents(self):
if random.random() < 0.5:
- proportion = 1 - proportion
- State(origin=self, contents=proportion)
+ return self.proportion
+ else:
+ return 1- self.proportion
def step(self):
"""Prompt the environment to change."""
|
FetchInfo.re_fetch_result has no reason to be public
And when using the API interactively, having it show up as public is
confusing. | @@ -208,7 +208,7 @@ class FetchInfo(object):
NEW_TAG, NEW_HEAD, HEAD_UPTODATE, TAG_UPDATE, REJECTED, FORCED_UPDATE, \
FAST_FORWARD, ERROR = [1 << x for x in range(8)]
- re_fetch_result = re.compile(r'^\s*(.) (\[?[\w\s\.$@]+\]?)\s+(.+) -> ([^\s]+)( \(.*\)?$)?')
+ _re_fetch_result = re.compile(r'^\s*(.) (\[?[\w\s\.$@]+\]?)\s+(.+) -> ([^\s]+)( \(.*\)?$)?')
_flag_map = {'!': ERROR,
'+': FORCED_UPDATE,
@@ -263,7 +263,7 @@ class FetchInfo(object):
fetch line is the corresponding line from FETCH_HEAD, like
acb0fa8b94ef421ad60c8507b634759a472cd56c not-for-merge branch '0.1.7RC' of /tmp/tmpya0vairemote_repo"""
- match = cls.re_fetch_result.match(line)
+ match = cls._re_fetch_result.match(line)
if match is None:
raise ValueError("Failed to parse line: %r" % line)
|
update html [skip ci]
remove inline styling
remove legends
remove redundant container and row classes | {% initial_page_data 'appsProfiles' apps_profiles %}
{% registerurl 'toggle_release_restriction_by_app_profile' domain '---'%}
{% registerurl "paginate_releases" domain '---' %}
-<div class="container">
- <div class="row">
- <ul class="nav nav-tabs sticky-tabs" style="margin-bottom: 10px;">
+ <ul class="nav nav-tabs sticky-tabs">
<li><a data-toggle="tab" href="#search-form" id="search-tab">{% trans "Search" %}</a></li>
<li><a data-toggle="tab" href="#create-form" id="create-tab">{% trans "Add" %}</a></li>
</ul>
+ <div class="spacer"></div>
<div class="tab-content">
<div id="search-form" class="tab-pane">
<form class="form form-horizontal" method="post" id="search-manage-app-releases">
- <legend>{% trans "Search" %}</legend>
{% crispy search_form %}
</form>
</div>
<div id="create-form" class="tab-pane">
<form class="form form-horizontal disable-on-submit" method="post" id="create-manage-app-releases">
- <legend>{% trans "Add" %}</legend>
{% crispy create_form %}
</form>
</div>
</div>
- </div>
- <div class="row">
+
{% if app_releases_by_app_profile %}
<table id="managed-releases" class="table">
<thead>
</tbody>
</table>
{% endif %}
- </div>
-</div>
+
{% endblock %}
|
Update playbook-URL_Enrichment_-_Generic.yml
Fixed typo | @@ -261,7 +261,7 @@ tasks:
task:
id: 551589fe-87f8-4b47-8865-304283d81363
version: -1
- name: Take Screenshits
+ name: Take Screenshots
description: ""
type: title
iscommand: false
|
Print warning about an issue with mapped network drives on Windows // Issue
Starting with Python 3.8 paths to mapped network drives are resolved
to their real path in the system, e.g.: "Z:\path" becomes "\\path" which
causes weird errors in the default terminal with a message that UNC
paths are not supported | @@ -28,7 +28,7 @@ from SCons.Script import DefaultEnvironment # pylint: disable=import-error
from SCons.Script import Import # pylint: disable=import-error
from SCons.Script import Variables # pylint: disable=import-error
-from platformio import fs
+from platformio import compat, fs
from platformio.compat import dump_json_to_unicode
from platformio.managers.platform import PlatformBase
from platformio.proc import get_pythonexe_path
@@ -120,6 +120,16 @@ env.Replace(
],
)
+if (
+ compat.WINDOWS
+ and sys.version_info >= (3, 8)
+ and env["PROJECT_DIR"].startswith("\\\\")
+):
+ click.secho(
+ "There is a known issue with Python 3.8+ and mapped network drives on "
+ "Windows.\nPlease downgrade Python to the latest 3.7. More details at:\n"
+ "https://github.com/platformio/platformio-core/issues/3417", fg="yellow")
+
if env.subst("$BUILD_CACHE_DIR"):
if not isdir(env.subst("$BUILD_CACHE_DIR")):
makedirs(env.subst("$BUILD_CACHE_DIR"))
|
ArrayLiteral: fix type names in error message
TN: | @@ -2475,9 +2475,9 @@ class ArrayLiteral(AbstractExpression):
else:
check_source_language(
self.element_type == el.static_type,
- "In Array literal, expected element of type {},"
- " got {}".format(self.element_type,
- el.static_type)
+ 'In Array literal, expected element of type {},'
+ ' got {}'.format(self.element_type.dsl_name,
+ el.static_type.dsl_name)
)
self.array_type = resolve_type(self.element_type).array
|
docs: ext: consoletest: Prepend dffml runner to path
This ensures that we always run the correct venv of Python | @@ -14,6 +14,7 @@ import atexit
import shutil
import asyncio
import pathlib
+import inspect
import tempfile
import functools
import traceback
@@ -124,11 +125,12 @@ class ActivateVirtualEnvCommand(ConsoletestCommand):
)
async def run(self, ctx):
+ tempdir = ctx["stack"].enter_context(tempfile.TemporaryDirectory())
self.old_virtual_env = os.environ.get("VIRTUAL_ENV", None)
self.old_path = os.environ.get("PATH", None)
env_path = os.path.abspath(os.path.join(ctx["cwd"], self.directory))
os.environ["PATH"] = ":".join(
- [os.path.join(env_path, "bin")]
+ [os.path.abspath(tempdir), os.path.join(env_path, "bin")]
+ os.environ.get("PATH", "").split(":")
)
# conda
@@ -151,6 +153,29 @@ class ActivateVirtualEnvCommand(ConsoletestCommand):
print("VIRTUAL_ENV")
os.environ["VIRTUAL_ENV"] = env_path
+ # Find full path
+ for pathdir in os.environ.get("PATH", "").split(":"):
+ check_path = pathlib.Path(pathdir, "python")
+ if check_path.is_file():
+ python_path = str(check_path.resolve())
+ break
+ # Prepend a dffml command to the path to ensure the correct
+ # version of dffml always runs
+ # Write out the file
+ dffml_path = pathlib.Path(os.path.abspath(tempdir), "dffml")
+ dffml_path.write_text(
+ inspect.cleandoc(
+ f"""
+ #!{python_path}
+ import os
+ import sys
+
+ os.execv("{python_path}", ["{python_path}", "-m", "dffml", *sys.argv[1:]])
+ """
+ )
+ )
+ dffml_path.chmod(0o755)
+
return
# TODO Related to the coverage issue
@@ -256,7 +281,7 @@ class DFFMLProcess:
async def run_dffml_command(cmd, ctx, kwargs):
# Run the DFFML command if its not the http server
- if cmd[2:6] != ["dffml", "service", "http", "server"]:
+ if cmd[:4] != ["dffml", "service", "http", "server"]:
# Run the command
print()
print("Running", cmd)
@@ -418,11 +443,6 @@ async def run_commands(
if "2>&1" in cmd:
kwargs["stderr"] = subprocess.STDOUT
cmd.remove("2>&1")
- # Make sure dffml commands are run with python -m
- # TODO XXX Raise issue with command that don't run this way
- if cmd[0] == "dffml":
- cmd.insert(0, "-m")
- cmd.insert(0, "python")
# If not in venv ensure correct Python
if (
"VIRTUAL_ENV" not in os.environ
@@ -432,7 +452,8 @@ async def run_commands(
cmd[0] = sys.executable
# Handle temporary environment variables prepended to command
with tmpenv(cmd) as cmd:
- if cmd[2:3] == ["dffml"]:
+ # Run the command
+ if cmd[0] == "dffml":
# Run dffml command through Python so that we capture coverage info
proc = await run_dffml_command(cmd, ctx, kwargs)
else:
@@ -1034,7 +1055,7 @@ class ConsoleTestBuilder(DocTestBuilder):
tempdir = stack.enter_context(tempfile.TemporaryDirectory())
venvdir = stack.enter_context(tempfile.TemporaryDirectory())
- ctx = {"cwd": tempdir}
+ ctx = {"cwd": tempdir, "stack": stack}
venvdir = os.path.abspath(venvdir)
# Create a virtualenv for every document
|
DOC: Update for return value of np.ptp()
* Update documentation for return value of np.ptp()
* Update integer value to scalar to accomodate other numeric data types
* Add examples to np.char.isdigit()
* Remove extra space and example
Closes | @@ -2647,9 +2647,9 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue):
Returns
-------
- ptp : ndarray
- A new array holding the result, unless `out` was
- specified, in which case a reference to `out` is returned.
+ ptp : ndarray or scalar
+ The range of a given array - `scalar` if array is one-dimensional
+ or a new array holding the result along the given axis
Examples
--------
|
Changes.md : Add missing bugfixes
I forgot these when merging 0.61_maintenance after the release of 0.61.14.7. | @@ -8,6 +8,14 @@ Improvements
- Added `maintainReferencePosition` plug. This allows the constraint to maintain the original position of the object at a specifed reference time.
- Improved performance for UV constraints where the target has static geometry but an animated transform. One such benchmark shows a greater than 40x speedup.
+Fixes
+-----
+
+- Shape : Fixed unnecessary serialisation of internal connection.
+- ScaleTool :
+ - Fixed oversensitive scaling when zoomed out in the Viewer.
+ - Prevented uniform scaling from becoming negative.
+
1.0.5.1 (relative to 1.0.5.0)
=======
|
More accurate jerk limits
* More accurate jerk limits
* Min is not - max
For example max_curvature_rate can be negative.
* reduce diff | @@ -122,7 +122,7 @@ def get_lag_adjusted_curvature(CP, v_ego, psis, curvatures, curvature_rates):
# This is the "desired rate of the setpoint" not an actual desired rate
desired_curvature_rate = curvature_rates[0]
- max_curvature_rate = MAX_LATERAL_JERK / (v_ego**2)
+ max_curvature_rate = MAX_LATERAL_JERK / (v_ego**2) # inexact calculation, check https://github.com/commaai/openpilot/pull/24755
safe_desired_curvature_rate = clip(desired_curvature_rate,
-max_curvature_rate,
max_curvature_rate)
|
migrations: Deprecate migration 0064 into a noop.
This ancient migration imports boto, which interferes
with our upgrade to boto3.
> git name-rev
tags/1.6.0~1924
We can safely assume nobody is upgrading from a server on <1.6.0,
since we have no supported platforms in common with those releases. | # Generated by Django 1.10.5 on 2017-03-18 12:38
-import os
-from typing import Optional
-
-from boto.s3.connection import S3Connection
-from boto.s3.key import Key
-from django.conf import settings
from django.db import migrations
-from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
-from django.db.migrations.state import StateApps
-
-
-class MissingUploadFileException(Exception):
- pass
-
-def get_file_size_local(path_id: str) -> int:
- file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, 'files', path_id)
- try:
- size = os.path.getsize(file_path)
- except OSError:
- raise MissingUploadFileException
- return size
-
-def sync_filesizes(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
- attachments = apps.get_model('zerver', 'Attachment')
- if settings.LOCAL_UPLOADS_DIR is not None:
- for attachment in attachments.objects.all():
- if attachment.size is None:
- try:
- new_size = get_file_size_local(attachment.path_id)
- except MissingUploadFileException:
- new_size = 0
- attachment.size = new_size
- attachment.save(update_fields=["size"])
- else:
- conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
- bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
- bucket = conn.get_bucket(bucket_name, validate=False)
- for attachment in attachments.objects.all():
- if attachment.size is None:
- file_key: Optional[Key] = bucket.get_key(attachment.path_id)
- if file_key is None:
- new_size = 0
- else:
- new_size = file_key.size
- attachment.size = new_size
- attachment.save(update_fields=["size"])
-
-def reverse_sync_filesizes(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
- """Does nothing"""
- return None
class Migration(migrations.Migration):
dependencies = [
('zerver', '0063_realm_description'),
]
-
- operations = [
- migrations.RunPython(sync_filesizes, reverse_sync_filesizes),
- ]
|
Lower severity of some log messages during update handling
Some people were complaining that their logs were being spammed by it. | @@ -275,7 +275,7 @@ class UpdateMethods:
get_diff = self._message_box.get_difference()
if get_diff:
- self._log[__name__].info('Getting difference for account updates')
+ self._log[__name__].debug('Getting difference for account updates')
try:
diff = await self(get_diff)
except (errors.ServerError, ValueError) as e:
@@ -289,12 +289,15 @@ class UpdateMethods:
self._message_box.end_difference()
continue
updates, users, chats = self._message_box.apply_difference(diff, self._mb_entity_cache)
+ if updates:
+ self._log[__name__].info('Got difference for account updates')
+
updates_to_dispatch.extend(self._preprocess_updates(updates, users, chats))
continue
get_diff = self._message_box.get_channel_difference(self._mb_entity_cache)
if get_diff:
- self._log[__name__].info('Getting difference for channel %s updates', get_diff.channel.channel_id)
+ self._log[__name__].debug('Getting difference for channel %s updates', get_diff.channel.channel_id)
try:
diff = await self(get_diff)
except (
@@ -348,6 +351,9 @@ class UpdateMethods:
continue
updates, users, chats = self._message_box.apply_channel_difference(get_diff, diff, self._mb_entity_cache)
+ if updates:
+ self._log[__name__].info('Got difference for channel %d updates', get_diff.channel.channel_id)
+
updates_to_dispatch.extend(self._preprocess_updates(updates, users, chats))
continue
@@ -358,7 +364,7 @@ class UpdateMethods:
try:
updates = await asyncio.wait_for(self._updates_queue.get(), deadline_delay)
except asyncio.TimeoutError:
- self._log[__name__].info('Timeout waiting for updates expired')
+ self._log[__name__].debug('Timeout waiting for updates expired')
continue
else:
continue
|
[change] series CLI - Hide unseen episodes
Episodes which have never been seen by FlexGet are now hidden when
using the `series show <show_name>` subcommand. This includes the
`begin` episode if that episode was never seen by FlexGet. The `begin`
episode is still listed below the table, if it is set. | @@ -204,6 +204,8 @@ def display_details(options):
table_data = [header]
entities = get_all_entities(series, session=session)
for entity in entities:
+ if not entity.releases:
+ continue
if entity.identifier is None:
identifier = colorize(ERROR_COLOR, 'MISSING')
age = ''
|
consistent vertical spacing using p tags
fixes | </template>
<template v-slot:abovechannels>
+ <p>
<KButton
appearance="basic-link"
:text="multipleMode ? $tr('selectTopicsAndResources') : $tr('selectEntireChannels')"
@click="toggleMultipleMode"
/>
- <section
- v-if="showUnlistedChannels"
- class="unlisted-channels"
- >
+ </p>
+ <p v-if="showUnlistedChannels">
<KButton
class="token-button"
:text="$tr('channelTokenButtonLabel')"
name="showtokenmodal"
@click="showTokenModal=true"
/>
- </section>
-
+ </p>
+ <p>
<UiAlert
v-show="notEnoughFreeSpace"
:dismissible="false"
>
{{ $tr('notEnoughSpaceForChannelsWarning') }}
</UiAlert>
+ </p>
</template>
{{ $tr('noChannelsAvailable') }}
</p>
- <div v-else>
+ <p v-else>
<ChannelPanel
v-for="channel in allChannels"
v-show="showItem(channel) && !channelIsBeingDeleted(channel.id)"
@clickselect="goToSelectContentPageForChannel(channel)"
@checkboxchange="handleChange"
/>
- </div>
+ </p>
</template>
</FilteredChannelListContainer>
margin-left: 0;
}
- .unlisted-channels {
- padding: 16px 0;
- }
-
</style>
|
flavors: Always use leverage multiqueue
VirtI/O multi-queue allows VMs to scale their network
throughput with vCPU count quite well; build all flavors
with this feature by default. | @@ -43,7 +43,8 @@ node['bcpc']['openstack']['flavors'].each do |flavor, spec|
openstack flavor create "#{flavor}" \
--vcpus #{spec['vcpus']} \
--ram #{spec['ram']} \
- --disk #{spec['disk']}
+ --disk #{spec['disk']} \
+ --property hw:vif_multiqueue_enabled=true
DOC
not_if { node.run_state['os_flavors'].include? flavor }
end
|
Remove some tests
CI complications | @@ -54,13 +54,6 @@ class ValidatorTest(TestCase):
class TestHelpers(TestCase):
""" Tests for InvenTree helper functions """
- def test_is_image(self):
- img = os.path.abspath(os.path.join(STATIC_ROOT, 'img/blank_image.png'))
- self.assertTrue(helpers.TestIfImage(img))
-
- css = os.path.abspath(os.path.join(STATIC_ROOT, 'css/inventree.css'))
- self.assertFalse(helpers.TestIfImage(css))
-
def test_image_url(self):
""" Test if a filename looks like an image """
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.