message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Update cscs.py
Add TDS in description of Pilatus and remove collection for cpe modules | @@ -712,7 +712,7 @@ site_configuration = {
},
{
'name': 'pilatus',
- 'descr': 'Alps Cray EX Supercomputer',
+ 'descr': 'Alps Cray EX Supercomputer TDS',
'hostnames': [
'pilatus'
],
@@ -957,8 +957,8 @@ site_configuration = {
'eiger', 'pilatus'
],
'modules': [
- {'name': 'cpeAMD', 'collection': False}
- ]
+ 'cpeAMD'
+ ],
},
{
'name': 'cpeCray',
@@ -966,8 +966,8 @@ site_configuration = {
'eiger', 'pilatus'
],
'modules': [
- {'name': 'cpeCray', 'collection': False}
- ]
+ 'cpeCray'
+ ],
},
{
'name': 'cpeGNU',
@@ -975,8 +975,8 @@ site_configuration = {
'eiger', 'pilatus'
],
'modules': [
- {'name': 'cpeGNU', 'collection': False}
- ]
+ 'cpeGNU'
+ ],
},
{
'name': 'cpeIntel',
@@ -984,8 +984,8 @@ site_configuration = {
'pilatus'
],
'modules': [
- {'name': 'cpeIntel', 'collection': False}
- ]
+ 'cpeIntel'
+ ],
},
{
'name': 'PrgEnv-cray',
|
Update validators.py
adding deprecation message | @@ -519,6 +519,7 @@ class ValidateNestedInput:
output["Deprecations"] = [
"The sustain_hours output will be deprecated soon in favor of bau_sustained_time_steps.",
"outage_start_hour and outage_end_hour will be deprecated soon in favor of outage_start_time_step and outage_end_time_step",
+ "Avoided outage costs will be deprecated soon from the /results endpoint, but retained from the /resilience_stats endpoint"
]
return output
|
Make .to(other) return the result for a value with all ones.
And fix .items() to directly return a tuple, for nicer interactive viewing. | @@ -9,7 +9,7 @@ import operator
import numpy as np
-from .core import Unit, UnitBase
+from .core import Unit, UnitBase, UNITY
__all__ = ['StructuredUnit']
@@ -167,7 +167,7 @@ class StructuredUnit:
return self._units.dtype.names
def items(self):
- return zip(self._units.dtype.names, self._units.item())
+ return tuple(zip(self._units.dtype.names, self._units.item()))
def __iter__(self):
yield from self._units.dtype.names
@@ -203,11 +203,16 @@ class StructuredUnit:
The routine does presume that the type of the first tuple is
representative of the rest. Used in ``_get_converter``.
+ For the special value of ``UNITY``, all fields are assumed to be 1.0,
+ and hence this will return an all-float dtype.
+
"""
if enter_lists:
while isinstance(value, list):
value = value[0]
- if not isinstance(value, tuple) or len(self) != len(value):
+ if value is UNITY:
+ value = (UNITY,) * len(self)
+ elif not isinstance(value, tuple) or len(self) != len(value):
raise ValueError(f"cannot interpret value {value} for unit {self}.")
descr = []
for (name, unit), part in zip(self.items(), value):
@@ -311,11 +316,12 @@ class StructuredUnit:
result = np.empty_like(value)
for name, converter_ in zip(result.dtype.names, converters):
result[name] = converter_(value[name])
- return result
+ # Index with empty tuple to decay array scalars to numpy void.
+ return result if result.shape else result[()]
return converter
- def to(self, other, value, equivalencies=[]):
+ def to(self, other, value=np._NoValue, equivalencies=[]):
"""Return values converted to the specified unit.
Parameters
@@ -323,11 +329,12 @@ class StructuredUnit:
other : `~astropy.units.StructuredUnit`
The unit to convert to. If necessary, will be converted to
a `~astropy.units.StructuredUnit` using the dtype of ``value``.
- value : array-like
+ value : array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If a sequence, the first element must have
entries of the correct type to represent all elements (i.e.,
not have, e.g., a ``float`` where other elements have ``complex``).
+ If not given, assumed to have 1. in all fields.
equivalencies : list of tuple, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
@@ -345,6 +352,10 @@ class StructuredUnit:
UnitsError
If units are inconsistent
"""
+ if value is np._NoValue:
+ # We do not have UNITY as a default, since then the docstring
+ # would list 1.0 as default, yet one could not pass that in.
+ value = UNITY
return self._get_converter(other, equivalencies=equivalencies)(value)
def to_string(self, format='generic'):
|
Use index instead of element when searching for a volume
`range` copies the elements by value. Avoid using it when searching for
volumes. This should improve overall performance when
`AppendVolumeIfNotExists` is used. | @@ -67,8 +67,8 @@ func IncludesArg(slice []string, arg string) bool {
}
func AppendVolumeIfNotExists(slice []v1.Volume, volume v1.Volume) []v1.Volume {
- for _, ele := range slice {
- if ele.Name == volume.Name {
+ for i := range slice {
+ if slice[i].Name == volume.Name {
return slice
}
}
|
Locking RAFT hash for 0.19
Authors:
- Corey J. Nolet (https://github.com/cjnolet)
Approvers:
- Dante Gama Dessavre (https://github.com/dantegd)
- John Zedlewski (https://github.com/JohnZed)
URL: | @@ -39,7 +39,7 @@ else(DEFINED ENV{RAFT_PATH})
ExternalProject_Add(raft
GIT_REPOSITORY https://github.com/rapidsai/raft.git
- GIT_TAG d1fd927bc4ec67bfd765620b5fa93f17c54cfa70
+ GIT_TAG f0cd81fb49638eaddc9bf18998cc894f292bc293
PREFIX ${RAFT_DIR}
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
|
remove subrefs
This patch removes the MosaicReference object's subrefs attribute and rewrites
its points method in terms of simplices. | @@ -1050,7 +1050,7 @@ class MosaicReference(Reference):
'triangulation'
__slots__ = 'baseref', '_edge_refs', '_midpoint', 'edge_refs', 'edge_transforms', 'vertices', '_imidpoint'
- __cache__ = 'subrefs', 'simplices'
+ __cache__ = 'simplices'
@types.apply_annotations
def __init__(self, baseref, edge_refs: tuple, midpoint: types.arraydata):
@@ -1158,22 +1158,16 @@ class MosaicReference(Reference):
indices.append([self._imidpoint, *index] if not etrans.isflipped else [index[0], self._imidpoint, *index[1:]])
return types.frozenarray(indices, dtype=int)
- @property
- def subrefs(self):
- return tuple(ref.cone(trans, self._midpoint) for trans, ref in zip(self.baseref.edge_transforms, self._edge_refs) if ref)
-
def getpoints(self, ischeme, degree):
if ischeme == 'vertex':
return self.baseref.getpoints(ischeme, degree)
- if ischeme == '_centroid':
- return super().getpoints(ischeme, degree)
- subpoints = [subvol.getpoints(ischeme, degree) for subvol in self.subrefs]
+ elif ischeme in ('gauss', 'uniform', 'bezier'):
+ simplexpoints = getsimplex(self.ndims).getpoints(ischeme, degree)
+ subpoints = [points.TransformPoints(simplexpoints, strans) for strans in self.simplex_transforms]
dups = points.find_duplicates(subpoints) if ischeme == 'bezier' else ()
- # NOTE We could consider postprocessing gauss1 to a single point scheme,
- # rather than a concatenation, but for that we would have to verify that
- # the centroid is contained in the element. We leave this optimization for
- # later, to be combined with a reduction of gauss schemes of any degree.
return points.ConcatPoints(subpoints, dups)
+ else:
+ return super().getpoints(ischeme, degree)
def get_ndofs(self, degree):
return self.baseref.get_ndofs(degree)
|
Fix OTB test on Fedora
The metrics and floats roundings seems to be slightly different on different
platforms. Using a lower line-height value should fix the problem everywhere.
Fix | @@ -845,7 +845,7 @@ def test_otb_font(assert_pixels):
color: red;
font-family: weasyprint-otb;
font-size: 4px;
- line-height: 1;
+ line-height: 0.8;
}
</style>
AaA''')
|
Adding string_encoding of utf-8 for MAVEN CDFs
Should fix the regression with EUV files not loading | @@ -263,11 +263,11 @@ def load_data(filenames=None,
# Loop through CDF files
desc = l2_regex.match(os.path.basename(f)).group("description")
if desc != '' and suffix == '':
- created_vars = pytplot.cdf_to_tplot(f, varformat=varformat, varnames=varnames,
+ created_vars = pytplot.cdf_to_tplot(f, varformat=varformat, varnames=varnames, string_encoding='utf-8',
get_support_data=get_support_data, prefix=prefix,
suffix=desc, merge=True)
else:
- created_vars = pytplot.cdf_to_tplot(f, varformat=varformat, varnames=varnames,
+ created_vars = pytplot.cdf_to_tplot(f, varformat=varformat, varnames=varnames, string_encoding='utf-8',
get_support_data=get_support_data, prefix=prefix,
suffix=suffix, merge=True)
|
Workaround to compare some Float's
This will restore back evaluation of some comparisons
after e.g. Float('+inf') < pi or exp(-3) < Float('+inf'). | @@ -456,7 +456,9 @@ def _eval_is_irrational(self):
def _eval_is_positive(self):
if self.is_number:
- return super(Add, self)._eval_is_positive()
+ n = super(Add, self)._eval_is_positive()
+ if n is not None:
+ return n
if any(a.is_infinite for a in self.args):
args = [a for a in self.args if not a.is_finite]
@@ -483,7 +485,9 @@ def _eval_is_positive(self):
def _eval_is_negative(self):
if self.is_number:
- return super(Add, self)._eval_is_negative()
+ n = super(Add, self)._eval_is_negative()
+ if n is not None:
+ return n
if any(a.is_infinite for a in self.args):
args = [a for a in self.args if not a.is_finite]
|
[JIT] Add more ops to 'removableGuard' in guard elimination pass.
Summary: Pull Request resolved:
Test Plan: Imported from OSS | @@ -221,7 +221,23 @@ private:
case aten::div:
case aten::t:
case aten::sigmoid:
+ case aten::sin:
+ case aten::cos:
+ case aten::tan:
+ case aten::sinh:
+ case aten::cosh:
case aten::tanh:
+ case aten::asin:
+ case aten::acos:
+ case aten::atan:
+ case aten::atan2:
+ case aten::floor:
+ case aten::fmod:
+ case aten::ceil:
+ case aten::trunc:
+ case aten::sqrt:
+ case aten::rsqrt:
+ case aten::remainder:
case aten::mm:
case aten::min:
case aten::max:
@@ -246,6 +262,16 @@ private:
case aten::rand_like:
case aten::erf:
case aten::erfc:
+ case aten::exp:
+ case aten::expm1:
+ case aten::log:
+ case aten::log2:
+ case aten::log10:
+ case aten::frac:
+ case aten::lerp:
+ case aten::lgamma:
+ case aten::reciprocal:
+ case aten::addcmul:
return checkInputs(n, no_exceptions);
case aten::slice:
return !n->input(0)->type()->expect<TensorType>()->isSummarized() &&
|
Open dashboard window asynchronously
Modified `dallinger debug` to open the dashboard asynchronously. | @@ -406,7 +406,12 @@ class DebugDeployment(HerokuLocalDeployment):
dashboard_url = self.with_proxy_port("{}/dashboard/".format(base_url))
self.display_dashboard_access_details(dashboard_url)
if not self.no_browsers:
- self.open_dashboard(dashboard_url)
+ self.async_open_dashboard(dashboard_url)
+
+ # A little delay here ensures that the experiment window always opens
+ # after the dashboard window.
+ time.sleep(0.1)
+
self.heroku = heroku
self.out.log(
"Monitoring the Heroku Local server for recruitment or completion..."
@@ -443,6 +448,11 @@ class DebugDeployment(HerokuLocalDeployment):
)
)
+ def async_open_dashboard(self, url):
+ threading.Thread(
+ target=self.open_dashboard, name="Open dashboard", kwargs={"url": url}
+ ).start()
+
def open_dashboard(self, url):
config = get_config()
self.out.log("Opening dashboard")
|
Remove HTTPClient.last_request and last_response
Closes | @@ -27,10 +27,6 @@ class HTTPClient(Client):
# Make use of Requests' sessions feature
self.session = Session()
self.session.headers.update(self.DEFAULT_HEADERS)
- # Keep last request and response - don't use, will be removed in next
- # major release
- self.last_request = None
- self.last_response = None
def prepare_request(self, request, **kwargs):
"""
@@ -61,12 +57,8 @@ class HTTPClient(Client):
Raised by the requests module in the event of a communications
error.
"""
- # Keep last request - don't use, will be removed in next major release
- self.last_request = request
# Send the message with Requests, passing any final config options
response = self.session.send(request.prepped, **kwargs)
- # Keep last response - don't use, will be removed in next major release
- self.last_response = response
# Give some extra information to include in the response log entry
return self.process_response(
response.text,
|
Update avcodecs.py
quotes | @@ -406,7 +406,7 @@ class VideoCodec(BaseCodec):
for line in vf:
vfstring = "%s:%s" % (line, vfstring)
- optlist.extend(['-vf', vfstring[:-1]])
+ optlist.extend(['-vf', "\'%s\'" % vfstring[:-1]])
return optlist
|
Add BiggerPockets.com
Add in BiggerPockets.com forum - a popular real estate forum
Examples:
Claimed: - returns a status code of 200
Unclaimed: - returns a status code of 404 | "username_claimed": "Jackson",
"username_unclaimed": "ktobysietaknazwalnawb69"
},
+ "BiggerPockets": {
+ "errorType": "status_code",
+ "url": "https://www.biggerpockets.com/users/{}",
+ "urlMain": "https://www.biggerpockets.com/",
+ "username_claimed": "blue",
+ "username_unclaimed": "noonewouldeverusethis7"
+ },
"Bikemap": {
"errorType": "status_code",
"url": "https://www.bikemap.net/en/u/{}/routes/created/",
|
fix : Flickering tooltip on the Give Up button
fix: | -<span ng-if="solutionIsAvailable()" tooltip="<['I18N_PLAYER_GIVE_UP_TOOLTIP' | translate]>">
+<span ng-if="solutionIsAvailable()" tooltip="<['I18N_PLAYER_GIVE_UP_TOOLTIP' | translate]>" tooltip-placement="left">
<md-button class="solution-button protractor-test-view-solution"
ng-click="onClickSolutionButton()"
aria-label="Give up?">
|
search aggregations for fields
* search aggregations for fields
fixes
* remove comment | @@ -7,7 +7,7 @@ from hail.utils.linkedlist import LinkedList
from hail.genetics import Locus, Interval, Call
from hail.typecheck import *
from collections import Mapping, Sequence, OrderedDict
-
+import itertools
class Indices(object):
@typecheck_method(source=anytype, axes=setof(str))
@@ -325,7 +325,7 @@ def unify_all(*exprs):
from collections import defaultdict
sources = defaultdict(lambda: [])
for e in exprs:
- for name, inds in e._refs:
+ for name, inds in itertools.chain(e._refs, *(a.refs for a in e._aggregations)):
sources[inds.source].append(str(name))
raise ExpressionException("Cannot combine expressions from different source objects."
"\n Found fields from {n} objects:{fields}".format(
|
Fix to use . to source script files
Refer to ``Code conventions`` at [1] for details.
When you have to source a script file, for example, a credentials file
to gain access to user-only or admin-only CLI commands,
use . instead of source.
[1] | @@ -56,7 +56,7 @@ With `nose`
You can use `nose`_ to run individual tests, as well as use for debugging
portions of your code::
- source .venv/bin/activate
+ . .venv/bin/activate
pip install nose
nosetests
|
Update install.sh
Add testing for 'yum' && fedora-release. In a later step one could be testing 'dnf' and fedora separately. | @@ -46,13 +46,13 @@ if [ "$(uname)" = "Linux" ]; then
# Arch Linux
echo "Installing on Arch Linux"
sudo pacman -S --needed python git
- elif type yum && [ ! -f "/etc/redhat-release" ] && [ ! -f "/etc/centos-release" ]; then
+ elif type yum && [ ! -f "/etc/redhat-release" ] && [ ! -f "/etc/centos-release" ] && [ ! -f "/etc/fedora-release" ]; then
# AMZN 2
echo "Installing on Amazon Linux 2"
sudo yum install -y python3 git
- elif type yum && [ -f /etc/redhat-release ] || [ -f /etc/centos-release ]; then
- # CentOS or Redhat
- echo "Installing on CentOS/Redhat"
+ elif type yum && [ -f "/etc/redhat-release" ] || [ -f "/etc/centos-release" ] || [ -f "/etc/fedora-release" ]; then
+ # CentOS or Redhat or Fedora
+ echo "Installing on CentOS/Redhat/Fedora"
fi
elif [ "$(uname)" = "Darwin" ] && ! type brew >/dev/null 2>&1; then
echo "Installation currently requires brew on MacOS - https://brew.sh/"
|
fix(account adapter): render_to_string takes self.request
This adds request as an available context to all items using render_to_string, emails, messages ...etc. | @@ -111,8 +111,9 @@ class DefaultAccountAdapter(object):
for ext in ['html', 'txt']:
try:
template_name = '{0}_message.{1}'.format(template_prefix, ext)
- bodies[ext] = render_to_string(template_name,
- context).strip()
+ bodies[ext] = render_to_string(
+ template_name, context, self.request,
+ ).strip()
except TemplateDoesNotExist:
if ext == 'txt' and not bodies:
# We need at least one body
@@ -311,8 +312,9 @@ class DefaultAccountAdapter(object):
try:
if message_context is None:
message_context = {}
- message = render_to_string(message_template,
- message_context).strip()
+ message = render_to_string(
+ message_template, message_context, self.request,
+ ).strip()
if message:
messages.add_message(request, level, message,
extra_tags=extra_tags)
|
Update sentinel-2-l2a-cogs.yaml
Added reference from CVPR2022 | @@ -82,6 +82,9 @@ DataAtWork:
- Title: STAC and Sentinel-2 COGs (ESIP Summer Meeting 2020)
URL: https://docs.google.com/presentation/d/14NsKFZ3UF2Swwx_9L7sPMX9ccFUK1ruQyZXWK9Cz4L4/edit?usp=sharing
AuthorName: Matthew Hanson
+ - Title: OpenSentinelMap: A Large-Scale Land Use Dataset using OpenStreetMap and Sentinel-2 Imagery
+ URL: https://openaccess.thecvf.com/content/CVPR2022W/EarthVision/papers/Johnson_OpenSentinelMap_A_Large-Scale_Land_Use_Dataset_Using_OpenStreetMap_and_Sentinel-2_CVPRW_2022_paper.pdf
+ AuthorName: Noah Johnson, Wayne Treible, Daniel Crispell
Tools & Applications:
- Title: Fire detection Twitter bot using satellite imagery
URL: https://twitter.com/FirewatchBot
|
Update CHANGELOG.rst for 0.5.5
* Update CHANGELOG.rst
Bump unreleased changes to 0.5.5
* Update CHANGELOG.rst
Updating links
* Update CHANGELOG.rst
Extra newline for separating Unreleased changes section.
* Moving `Remove OpenQuantumCompiler` to unreleased. | @@ -18,6 +18,23 @@ The format is based on `Keep a Changelog`_.
`UNRELEASED`_
=============
+Added
+-----
+
+Changed
+-------
+
+Removed
+-------
+- Remove OpenQuantumCompiler (#610).
+
+Fixed
+-----
+
+
+`0.5.5`_ - 2018-07-02
+=====================
+
Added
-----
- Retrieve IBM Q jobs from server (#563, #585).
@@ -36,7 +53,6 @@ Changed
Removed
-------
-- Remove OpenQuantumCompiler (#610).
- Remove Clifford simulator from default available_backends, until its stable
release (#555).
- Remove ProjectQ simulators for moving to new repository (#553).
@@ -491,7 +507,8 @@ Fixed
- Correct operator precedence when parsing expressions (#190).
- Fix "math domain error" in mapping (#111, #151).
-.. _UNRELEASED: https://github.com/QISKit/qiskit-core/compare/0.5.4...HEAD
+.. _UNRELEASED: https://github.com/QISKit/qiskit-core/compare/0.5.5...HEAD
+.. _0.5.5: https://github.com/QISKit/qiskit-core/compare/0.5.4...0.5.5
.. _0.5.4: https://github.com/QISKit/qiskit-core/compare/0.5.3...0.5.4
.. _0.5.3: https://github.com/QISKit/qiskit-core/compare/0.5.2...0.5.3
.. _0.5.2: https://github.com/QISKit/qiskit-core/compare/0.5.1...0.5.2
|
Add zoom instructions for the UML diagram
closes | .. _schemas:
An automatically generated UML diagram of the current schema can be seen below.
-Please open it a new tab for details. Containments are indicated by orange lines, whereas Id-references are indicated by dashed green lines.
+Please open it a new tab for details. Containments are indicated by orange lines, whereas Id-references are indicated by dashed green lines. Click the image below to open it in a separate browser window. You should be able to zoom in (command+ or control+ on most browsers) and scroll around the image to see the details.
.. image:: /_build/generated_images/schema_uml.svg
:target: ../_images/schema_uml.svg
|
Update methodology.html
Added link to Localization section | <li class="toctree-l1"><a class="reference internal" href="motivation.html">Motivation</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">Methodology</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#carbon-intensity">Carbon Intensity</a></li>
+<li class="toctree-l2"><a class="reference internal" href="#locaization">Localization</a></li>
<li class="toctree-l2"><a class="reference internal" href="#power-usage">Power Usage</a></li>
<li class="toctree-l2"><a class="reference internal" href="#references">References</a></li>
</ul>
|
Update `CUDA` Flags [skip ci]
This PR updates the flags for to ignore certain warnings from being treated as errors. This is a necessary hotfix to get DLFW builds to complete successfully. | @@ -28,7 +28,7 @@ list(APPEND CUML_CUDA_FLAGS --expt-extended-lambda --expt-relaxed-constexpr)
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 11.2.0)
list(APPEND CUML_CUDA_FLAGS -Werror=all-warnings)
endif()
-list(APPEND CUML_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations)
+list(APPEND CUML_CUDA_FLAGS -Xcompiler=-Wall,-Werror,-Wno-error=deprecated-declarations,-Wno-error=sign-compare)
if(DISABLE_DEPRECATION_WARNING)
list(APPEND CUML_CXX_FLAGS -Wno-deprecated-declarations)
|
Add --short flag to dagster-release version (for automation)
Test Plan: Manual
Reviewers: dgibson, alangenfeld, prha | @@ -152,7 +152,8 @@ def release(ver, dry_run):
@cli.command()
-def version():
[email protected]("--short", is_flag=True)
+def version(short):
"""Gets the most recent tagged version."""
dmp = DagsterModulePublisher()
@@ -170,6 +171,9 @@ def version():
git_tag=git_tag, versions=format_module_versions(module_versions)
)
)
+ else:
+ if short:
+ click.echo(git_tag)
else:
click.echo(
"All modules in lockstep with most recent tagged version: {git_tag}".format(
|
Better error message in exception of deploy_done
We were just raising an exception with the path of the script rather than the
actual error. | @@ -11,7 +11,7 @@ def wait_for_applications(script, msg_cb):
""" Processes a 00_deploy-done to verify if applications are available
Arguments:
- script: script to run (00_deploy-done.sh)
+ script: script to run (00_deploy-done)
msg_cb: message callback
"""
if os.path.isfile(script) \
@@ -25,7 +25,9 @@ def wait_for_applications(script, msg_cb):
if sh.returncode != 0:
app.log.error("error running {}:\n{}".format(script,
sh.stderr))
- raise Exception("Error running {}".format(script))
+ raise Exception(
+ "Error in waiting for deployment to finish: "
+ "{}".format(sh.stderr.decode()))
try:
lines = sh.stdout.decode('utf8').splitlines()
|
Update savedmodel_test.py
I added a cast to `float32`. This is needed because of an obscure bug in JAX | @@ -83,7 +83,7 @@ class SavedModelTest(tf_test_util.JaxToTfTestCase):
x, = primals
x_dot, = tangents
primal_out = f_jax(x)
- tangent_out = 3. * x * x_dot
+ tangent_out = np.float32(3.) * x * x_dot
return primal_out, tangent_out
model = tf.Module()
|
Remove support for async script loading, as Firefox sets async
on all dynamically injected script tags.
Prevent repeated loading of the same tags. | @@ -23,6 +23,8 @@ export const runScriptTypes = [
];
export default function replaceScript($script, callback) {
+ if (!$script.loaded) {
+ $script.loaded = true;
const s = document.createElement('script');
s.type = 'text/javascript';
[].forEach.call($script.attributes, attribute => {
@@ -33,7 +35,6 @@ export default function replaceScript($script, callback) {
}
});
if ($script.src) {
- if (!$script.async) {
const cb = () => {
// Clean up onload and onerror handlers
// after they have been triggered to avoid
@@ -44,8 +45,8 @@ export default function replaceScript($script, callback) {
};
s.onload = cb;
s.onerror = cb;
- }
s.src = $script.src;
+ s.async = false;
} else {
s.innerHTML = $script.innerHTML;
}
@@ -65,7 +66,8 @@ export default function replaceScript($script, callback) {
}
// run the callback immediately for inline scripts
- if (!$script.src || $script.async) {
+ if (!$script.src) {
callback();
}
}
+}
|
Update extensions.py
dvd/dvb to mks | @@ -4,6 +4,9 @@ subtitle_codec_extensions = {'srt': 'srt',
'webvtt': 'vtt',
'ass': 'ass',
'pgs': 'sup',
- 'hdmv_pgs_subtitle': 'sup'}
+ 'hdmv_pgs_subtitle': 'sup',
+ 'dvdsub': 'mks',
+ 'dvb_subtitle': 'mks',
+ 'dvd_subtitle': 'mks'}
bad_post_files = ['resources', '.DS_Store']
bad_post_extensions = ['.txt', '.log', '.pyc']
|
MAINT: simplify flow in np.require
Move the possible_flags dictionary to a global value so it is not
re-constructed each call. | __all__ = ["require"]
+POSSIBLE_FLAGS = {
+ 'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
+ 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
+ 'A': 'A', 'ALIGNED': 'A',
+ 'W': 'W', 'WRITEABLE': 'W',
+ 'O': 'O', 'OWNDATA': 'O',
+ 'E': 'E', 'ENSUREARRAY': 'E'
+}
+
def _require_dispatcher(a, dtype=None, requirements=None, *, like=None):
return (like,)
@@ -97,16 +106,10 @@ def require(a, dtype=None, requirements=None, *, like=None):
like=like,
)
- possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
- 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
- 'A': 'A', 'ALIGNED': 'A',
- 'W': 'W', 'WRITEABLE': 'W',
- 'O': 'O', 'OWNDATA': 'O',
- 'E': 'E', 'ENSUREARRAY': 'E'}
if not requirements:
return asanyarray(a, dtype=dtype)
- else:
- requirements = {possible_flags[x.upper()] for x in requirements}
+
+ requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
if 'E' in requirements:
requirements.remove('E')
@@ -128,8 +131,7 @@ def require(a, dtype=None, requirements=None, *, like=None):
for prop in requirements:
if not arr.flags[prop]:
- arr = arr.copy(order)
- break
+ return arr.copy(order)
return arr
|
feat(device): add WXKG02LMSwitchController
related to | -from cx_const import Light, TypeActionsMapping
-from cx_core import LightController
+from cx_const import Light, Switch, TypeActionsMapping
+from cx_core import LightController, SwitchController
class WXKG02LMLightController(LightController):
- """
- This controller allows click, double click, hold and release for
- both, left and the right button. All action will do the same for both, left
- and right. Then from the apps.yaml the needed actions can be included and create
- different instances for different lights.
- """
-
- # Different states reported from the controller:
- # both, both_double, both_long, right, right_double
- # right_long, left, left_double, left_long
-
def get_z2m_actions_mapping(self) -> TypeActionsMapping:
return {
"single_both": Light.TOGGLE,
@@ -41,6 +30,22 @@ class WXKG02LMLightController(LightController):
}
+class WXKG02LMSwitchController(SwitchController):
+ def get_z2m_actions_mapping(self) -> TypeActionsMapping:
+ return {
+ "single_both": Switch.TOGGLE,
+ "single_left": Switch.TOGGLE,
+ "single_right": Switch.TOGGLE,
+ }
+
+ def get_deconz_actions_mapping(self) -> TypeActionsMapping:
+ return {
+ 1002: Switch.TOGGLE,
+ 2002: Switch.TOGGLE,
+ 3002: Switch.TOGGLE,
+ }
+
+
class WXKG01LMLightController(LightController):
"""
Different states reported from the controller:
|
alias import
Somehow `connections` was referring to the `corehq.sql_db.connections` module | from django.apps import apps
from django.conf import settings
from django.core import checks
-from django.db import connections, DEFAULT_DB_ALIAS, router
+from django.db import connections as django_connections, DEFAULT_DB_ALIAS, router
from corehq.sql_db.exceptions import PartitionValidationError
@@ -122,7 +122,7 @@ def check_db_tables(app_configs, **kwargs):
def _check_model(model_class, using=None):
db = using or router.db_for_read(model_class)
try:
- with connections[db].cursor() as cursor:
+ with django_connections[db].cursor() as cursor:
cursor.execute("SELECT %s::regclass", [model_class._meta.db_table])
except Exception as e:
errors.append(checks.Error('checks.Error querying model on database "{}": "{}.{}": {}.{}({})'.format(
|
Screengrab app : Rename `-scriptEditor` argument to `-pythonEditor`
Breaking Change :
Screengrab app : Renamed `-scriptEditor` argument to `-pythonEditor` | @@ -109,12 +109,12 @@ class screengrab( Gaffer.Application ) :
),
IECore.CompoundParameter(
- name = "scriptEditor",
- description = "Parameters that configure ScriptEditors.",
+ name = "pythonEditor",
+ description = "Parameters that configure PythonEditors.",
members = [
IECore.StringParameter(
name = "execute",
- description = "Some python code to execute in the script editor.",
+ description = "Some python code to execute in the editor.",
defaultValue = "",
),
]
@@ -297,14 +297,14 @@ class screengrab( Gaffer.Application ) :
grabWidget = GafferUI.PlugValueWidget.acquire( script.descendant( args["nodeEditor"]["grab"].value ) )
self.setGrabWidget( grabWidget )
- # Set up the ScriptEditors as requested.
+ # Set up the PythonEditors as requested.
- for scriptEditor in scriptWindow.getLayout().editors( GafferUI.ScriptEditor ) :
+ for pythonEditor in scriptWindow.getLayout().editors( GafferUI.PythonEditor ) :
- if args["scriptEditor"]["execute"].value :
- scriptEditor.inputWidget().setText( args["scriptEditor"]["execute"].value )
- scriptEditor.inputWidget()._qtWidget().selectAll()
- scriptEditor.execute()
+ if args["pythonEditor"]["execute"].value :
+ pythonEditor.inputWidget().setText( args["pythonEditor"]["execute"].value )
+ pythonEditor.inputWidget()._qtWidget().selectAll()
+ pythonEditor.execute()
# Set up the Viewers as requested.
|
components: Add basic styling component for guest avatar marker.
Fixes | background-color: hsl(0, 0%, 100%);
border: 1px solid hsl(0, 0%, 87%);
}
+
+.guest-avatar {
+ position: relative;
+ background-size: 100%;
+ width: 100%;
+ height: 100%;
+ overflow: hidden;
+
+ &::after {
+ content: " ";
+ background-color: hsl(0, 0%, 47%);
+ position: absolute;
+ bottom: -30%;
+ right: -30%;
+ width: 50%;
+ height: 50%;
+ -webkit-transform: rotate(45deg);
+ -moz-transform: rotate(45deg);
+ transform: rotate(45deg);
+ }
+}
|
output_dir fix
Fixed error where output_dir was not appropriately set when using
NZBGetPostProcess | @@ -59,10 +59,9 @@ output_dir = None
if 'NZBPO_OUTPUT_DIR' in os.environ:
output_dir = os.environ['NZBPO_OUTPUT_DIR'].strip()
if len(output_dir) > 0:
- output_dir = os.environ['NZBPO_MP4_FOLDER'].strip()
- output_dir = MP4folder.replace('"', '')
- output_dir = MP4folder.replace("'", "")
- output_dir = MP4folder.replace("\\", "/")
+ output_dir = output_dir.replace('"', '')
+ output_dir = output_dir.replace("'", "")
+ output_dir = output_dir.replace("\\", "/")
if not(output_dir.endswith("/")):
output_dir += "/"
#DEBUG#print Overriding output directory
|
Suggest git submodule update --init --recursive
Summary:
We now have submodules that have submodules
Pull Request resolved: | @@ -457,7 +457,7 @@ class build_deps(PytorchCommand):
def check_file(f):
if not os.path.exists(f):
print("Could not find {}".format(f))
- print("Did you run 'git submodule update --init'?")
+ print("Did you run 'git submodule update --init --recursive'?")
sys.exit(1)
check_file(os.path.join(third_party_path, "gloo", "CMakeLists.txt"))
|
Update doc intro section; mention pywin32 [ci skip]
Slight wording change to program locations.
Move requirements paragraph to last in section, and
mention pywin32 is recommended on Windows. | @@ -214,9 +214,9 @@ that you want to use to build your target files
are not in standard system locations,
&scons;
will not find them unless
-you explicitly set the &scons;
-<envar>PATH</envar> in the internal environment
-to include those locations.
+you explicitly include the locations into the value of
+<varname>PATH</varname> in the <varname>ENV</varname>
+variable in the internal &consenv;.
Whenever you create a &consenv;,
you can propagate the value of <envar>PATH</envar>
from your external environment as follows:</para>
@@ -436,11 +436,6 @@ command-line options. The
option is useful to prevent multiple builds
from trying to update the cache simultaneously.</para>
-<para>&scons;
-requires Python 3.5 or higher.
-There should be no other dependencies or requirements to run &scons;.
-</para>
-
<!-- The following paragraph reflects the default tool search orders -->
<!-- currently in SCons/Tool/__init__.py. If any of those search orders -->
<!-- change, this documentation should change, too. -->
@@ -479,6 +474,13 @@ and the Intel compiler tools.
These default values may be overridden
by appropriate setting of &consvars;.</para>
+<para>&scons;
+requires Python 3.5 or higher.
+There should be no other dependencies or requirements to run &scons;,
+although the <package>pywin32</package> Python package is
+strongly recommended if running on Windows systems.
+</para>
+
</refsect1>
<refsect1 id='options'>
|
MAINT: Fix tests failures on travis CI merge.
Disable "-Wsign-compare" and "-Wunused-result" gcc warnings.
These seem to have been enabled by default recently for wheels
sdist builds. | @@ -138,6 +138,8 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
# ensure that the pip / setuptools versions deployed inside
# the venv are recent enough
$PIP install -U virtualenv
+ # ensure some warnings are not issued
+ export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
$PYTHON setup.py bdist_wheel
# Make another virtualenv to install into
virtualenv --python=`which $PYTHON` venv-for-wheel
@@ -151,6 +153,8 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
# use an up-to-date pip / setuptools inside the venv
$PIP install -U virtualenv
+ # ensure some warnings are not issued
+ export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
$PYTHON setup.py sdist
# Make another virtualenv to install into
virtualenv --python=`which $PYTHON` venv-for-wheel
|
Add an intermediate class in the error class hierarchy
Add a class between StratisCliUserError and
StratisCliOverprovisionChangeError. Call it
StratisCliNoPropertyChangeError. | @@ -41,10 +41,10 @@ class StratisCliUserError(StratisCliRuntimeError):
"""
-class StratisCliOverprovisionChangeError(StratisCliUserError):
+class StratisCliNoPropertyChangeError(StratisCliUserError):
"""
- Raised when the user requests the same overprovision state that the pool
- already has. May in future be generalized to a no state change error.
+ Raised when the user requests that a property be changed to its existing
+ value.
"""
def __init__(self, value):
@@ -56,6 +56,13 @@ class StratisCliOverprovisionChangeError(StratisCliUserError):
# pylint: disable=super-init-not-called
self.value = value
+
+class StratisCliOverprovisionChangeError(StratisCliNoPropertyChangeError):
+ """
+ Raised when the user requests the same overprovision state that the pool
+ already has.
+ """
+
def __str__(self):
return f"Pool's overprovision mode is already set to {str(self.value).lower()}"
|
io: Remove stale csv files
When exporting into an already existing directory, make sure to clean left-over
files from previous exports that are not overwritten. | @@ -111,11 +111,24 @@ def export_to_csv_folder(network, csv_folder_name, encoding=None, export_standar
#first do static attributes
-
+ filename = os.path.join(csv_folder_name,list_name+".csv")
df.index.name = "name"
if df.empty:
- logger.warning("No {} to export".format(list_name))
+ logger.info("No {} to export".format(list_name))
+ if os.path.exists(filename):
+ os.unlink(filename)
+
+ fns = [os.path.basename(filename)]
+ for attr in attrs.index[attrs.varying]:
+ fn = os.path.join(csv_folder_name,list_name+'-'+attr+'.csv')
+ if os.path.exists(fn):
+ os.unlink(fn)
+ fns.append(os.path.basename(fn))
+
+ logger.warning("Stale csv file(s) {} removed".format(', '.join(fns)))
+
continue
+
col_export = []
for col in df.columns:
#do not export derived attributes
@@ -130,7 +143,7 @@ def export_to_csv_folder(network, csv_folder_name, encoding=None, export_standar
col_export.append(col)
- df[col_export].to_csv(os.path.join(csv_folder_name,list_name+".csv"),encoding=encoding)
+ df[col_export].to_csv(filename,encoding=encoding)
#now do varying attributes
@@ -145,8 +158,14 @@ def export_to_csv_folder(network, csv_folder_name, encoding=None, export_standar
else:
col_export = pnl[attr].columns[(pnl[attr] != default).any()]
+ filename = os.path.join(csv_folder_name,list_name+"-" + attr + ".csv")
if len(col_export) > 0:
- pnl[attr].loc[:,col_export].to_csv(os.path.join(csv_folder_name,list_name+"-" + attr + ".csv"),encoding=encoding)
+ pnl[attr].loc[:,col_export].to_csv(filename,encoding=encoding)
+ else:
+ if os.path.exists(filename):
+ os.unlink(filename)
+ logger.warning("Stale csv file {} removed"
+ .format(os.path.basename(filename)))
|
Add level/severity threshold for diagnostics panel opening
auto_show_diagnostics_panel_level, defaulted to 3 (info) | // depending on available diagnostics.
"auto_show_diagnostics_panel": true,
+ // Open the diagnostics panel automatically
+ // when diagnostics level is equal to or less than:
+ // error: 1
+ // warning: 2
+ // info: 3
+ // hint: 4
+ "auto_show_diagnostics_panel_level": 3,
+
// Show in-line diagnostics using phantoms for unchanged files.
"show_diagnostics_phantoms": false,
|
[config] Update proto/Makefile
Update proto/Makefile to use compile_proto.py instead of protoc directly. | @@ -7,4 +7,4 @@ compile:
# Move the root of proto compilation to //appengine/components.
# This is consistent with other components assuming that "components"
# is in the import path.
- cd ../../../ && protoc --python_out=. components/config/proto/*.proto
+ cd ../../../ && tools/compile_proto.py components/config/proto
|
Correct Bug in move_to_laser
Math requires that the bounds be the group of the data so rotations are unaffected. | @@ -3813,15 +3813,14 @@ class Elemental(Modifier):
ty = input_driver.current_y
except AttributeError:
ty = 0
- m = Matrix("translate(%f,%f)" % (tx, ty))
try:
- for e in data:
- otx = e.transform.value_trans_x()
- oty = e.transform.value_trans_y()
+ bounds = Group.union_bbox([abs(e) for e in data])
+ otx = bounds[0]
+ oty = bounds[1]
ntx = tx - otx
nty = ty - oty
- m = Matrix("translate(%f,%f)" % (ntx, nty))
- e *= m
+ for e in data:
+ e.transform.post_translate(ntx, nty)
if hasattr(e, "node"):
e.node.modified()
except ValueError:
|
Updated the doc to settle the confusion regarding Gregorian and Julian.
Updated BaseRepresentation.represent_as to to_cartesian since the former mentions example that is not there.
The former line was recursive, which arose confusion since the docstring belonged exclusively to "astropy.coordinates.BaseRepresentation.represent_as", adding to_cartesian makes it clearer.
[ci skip] | @@ -836,7 +836,7 @@ class BaseRepresentation(BaseRepresentationOrDifferential):
By default, conversion is done via Cartesian coordinates.
Also note that orientation information at the origin is *not* preserved by
conversions through Cartesian coordinates. See the docstring for
- `~astropy.coordinates.BaseRepresentation.represent_as()` for an example.
+ `astropy.coordinates.BaseRepresentationOrDifferential.to_cartesian()` for an example.
Parameters
----------
|
"nested_path" needs to be specified in the sorting block as of elasticsearch 2.4
in v 1.7, this field was automatically determined based on the closest inherited nested field | @@ -155,6 +155,7 @@ class ApplicationStatusReport(GetParamsMixin, PaginatedReportMixin, DeploymentsR
sort_dict = {
sort_prop: {
"order": sort_dir,
+ "nested_path": "reporting_metadata.last_submissions",
"nested_filter": {
"term": {
self.sort_filter: self.selected_app_id
|
Add consensus message signature verification test
Add a unit test to verify this functions correctly | @@ -25,6 +25,9 @@ from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader, \
Transaction
from sawtooth_validator.protobuf.batch_pb2 import BatchHeader, Batch
from sawtooth_validator.protobuf.block_pb2 import BlockHeader, Block
+from sawtooth_validator.protobuf.consensus_pb2 import ConsensusPeerMessage
+from sawtooth_validator.protobuf.consensus_pb2 import \
+ ConsensusPeerMessageHeader
from sawtooth_validator.gossip import signature_verifier as verifier
from sawtooth_validator.gossip import structure_verifier
@@ -161,6 +164,30 @@ class TestMessageValidation(unittest.TestCase):
return block_list
+ def _create_consensus_message(self, valid=True):
+ name, version = "test", "1.0"
+ content = b"123"
+ message_type = "test"
+ header_bytes = ConsensusPeerMessageHeader(
+ signer_id=bytes.fromhex(self.public_key),
+ content_sha512=hashlib.sha512(content).digest(),
+ message_type=message_type,
+ name=name,
+ version=version,
+ ).SerializeToString()
+
+ if valid:
+ signature = bytes.fromhex(self.signer.sign(header_bytes))
+ else:
+ signature = b"bad_signature"
+
+ message = ConsensusPeerMessage(
+ header=header_bytes,
+ content=content,
+ header_signature=signature)
+
+ return message
+
def test_valid_transaction(self):
txn_list = self._create_transactions(1)
txn = txn_list[0]
@@ -228,3 +255,11 @@ class TestMessageValidation(unittest.TestCase):
block = block_list[0]
valid = verifier.is_valid_block(block)
self.assertFalse(valid)
+
+ def test_valid_consensus_message(self):
+ message = self._create_consensus_message()
+ self.assertTrue(verifier.is_valid_consensus_message(message))
+
+ def test_invalid_consensus_message(self):
+ message = self._create_consensus_message(valid=False)
+ self.assertFalse(verifier.is_valid_consensus_message(message))
|
Navigation: Change + reorder links, add deprecation badges
This commit adds link to non-existing view, `all_workshoprequests`,
which triggers an error on all (or almost all) AMY pages. The view will
be added shortly. | {% navbar_element "Training requests" "all_trainingrequests" True %}
{% navbar_element "Bulk upload training request scores" "bulk_upload_training_request_scores" True %}
<div class="dropdown-divider"></div>
- {% navbar_element "Workshop requests" "all_eventrequests" True %}
- {% navbar_element "Workshop submissions" "all_eventsubmissions" True %}
- {% navbar_element "DC self-organized workshop requests" "all_dcselforganizedeventrequests" True %}
+ {% navbar_element "Workshop requests" "all_workshoprequests" True False %}
+ {% navbar_element "SWC/DC Event requests" "all_eventrequests" True False %}
+ {% navbar_element "DC self-organized workshop requests" "all_dcselforganizedeventrequests" True False %}
<div class="dropdown-divider"></div>
- {% navbar_element "Profile update requests" "all_profileupdaterequests" True %}
- {% navbar_element "Invoice requests" "all_invoicerequests" True %}
+ {% navbar_element "Workshop submissions" "all_eventsubmissions" True True %}
+ {% navbar_element "Profile update requests" "all_profileupdaterequests" True True %}
+ {% navbar_element "Invoice requests" "all_invoicerequests" True True %}
</div>
</li>
</ul>
|
Fix for sed in write_bifrost_clouds_yaml function
Fixed sed replacement line. | @@ -627,7 +627,7 @@ function write_bifrost_clouds_yaml {
if [[ ! -f ~/.config/openstack/clouds.yaml ]]; then
mkdir -p ~/.config/openstack
scp stack@$SEED_IP:/home/stack/.config/openstack/clouds.yaml ~/.config/openstack/clouds.yaml
- sed -i 's|/home/stack/.config/openstack/bifrost.crt|~/.config/bifrost/bifrost.crt|g' ~/.config/openstack/clouds.yaml
+ sed -i 's|/home/stack/.config/openstack/bifrost.crt|~/.config/openstack/bifrost.crt|g' ~/.config/openstack/clouds.yaml
else
echo "Not updating clouds.yaml file because it already exists at $HOME/.config/openstack/clouds.yaml. Try removing it if authentication against Bifrost fails."
fi
|
convert weights using torch.as_tensor to avoid warning
Summary:
Minor change which fixes
Pull Request resolved: | @@ -116,7 +116,7 @@ class WeightedRandomSampler(Sampler):
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
- self.weights = torch.tensor(weights, dtype=torch.double)
+ self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
|
Fixes incorrect kind
Issues:
Fixes
Problem:
the kind in the sdk did not match the kind returned by iworkflow
Analysis:
this patch fixes it
Tests: | @@ -32,12 +32,12 @@ class Iapps(Collection):
def __init__(self, templates):
super(Iapps, self).__init__(templates)
self._meta_data['required_json_kind'] = \
- 'cm:cloud:templates:iapp:iapptemplatecollectionworkerstate'
+ 'cm:cloud:templates:iapp:templatesiappcollectionworkerstate'
self._meta_data['allowed_lazy_attributes'] = [
Iapp
]
self._meta_data['attribute_registry'] = {
- 'cm:cloud:templates:iapp:iapptemplateitemstate': Iapp
+ 'cm:cloud:templates:iapp:templatesiappitemstate': Iapp
}
@@ -45,5 +45,5 @@ class Iapp(Resource):
def __init__(self, iapps):
super(Iapp, self).__init__(iapps)
self._meta_data['required_json_kind'] = \
- 'cm:cloud:templates:iapp:iapptemplateitemstate'
+ 'cm:cloud:templates:iapp:templatesiappitemstate'
self._meta_data['required_load_parameters'] = {'name', }
|
Remove autodiscovery and dmcrypt from cluster updates
remove osd autoiscovery from cluster shrink and expand
remove dmcrypt from cluster shrink and expand | @@ -702,8 +702,7 @@ tests:
ceph_stable_release: luminous
ceph_repository: rhcs
osd_scenario: collocated
- dmcrypt: True
- osd_auto_discovery: True
+ osd_auto_discovery: False
journal_size: 1024
ceph_stable: True
ceph_stable_rh_storage: True
@@ -745,8 +744,7 @@ tests:
ceph_stable_release: luminous
ceph_repository: rhcs
osd_scenario: collocated
- dmcrypt: True
- osd_auto_discovery: True
+ osd_auto_discovery: False
journal_size: 1024
ceph_stable: True
ceph_stable_rh_storage: True
@@ -791,8 +789,7 @@ tests:
ceph_stable_release: luminous
ceph_repository: rhcs
osd_scenario: collocated
- dmcrypt: True
- osd_auto_discovery: True
+ osd_auto_discovery: False
journal_size: 1024
ceph_stable: True
ceph_stable_rh_storage: True
@@ -838,8 +835,7 @@ tests:
ceph_stable_release: luminous
ceph_repository: rhcs
osd_scenario: collocated
- dmcrypt: True
- osd_auto_discovery: True
+ osd_auto_discovery: False
journal_size: 1024
ceph_stable: True
ceph_stable_rh_storage: True
|
Force pushing > multiple PRs (for the same issue)
Suggestions by | @@ -146,6 +146,7 @@ When working on your own changes, fork this repository, create a branch and subm
Give the pull request any name you like and submit it.
If there's already an open issue for your pull request, link it by including the line `fixes #[issue_id]` in the body of the pull request.
If you're not sure what to say, don't feel obligated to write more than you think you need to.
+Try to keep it to one PR at most for each issue you tackle. Don't worry about force pushing if you want to rebase your commits! As far as this project is concerned, that pre-existing branch is the definitive source of the work done to solve, so keeping it all in the same PR is best.
I'll be able to see the code and tell what issue you're trying to solve.
I'm excited to get any contributions and look forward to seeing your code integrated into my repo and published on PyPI!
Good luck!
|
Unit-test to verify behavior of chunked+insert_many.
Refs | @@ -159,6 +159,17 @@ class TestModelAPIs(ModelTestCase):
self.assertEqual(pd2['content'], 'p2')
self.assertEqual(pd2['timestamp'], ts2)
+ @requires_models(User)
+ def test_insert_many(self):
+ data = [('u%02d' % i,) for i in range(100)]
+ with self.database.atomic():
+ for chunk in chunked(data, 10):
+ User.insert_many(chunk).execute()
+
+ self.assertEqual(User.select().count(), 100)
+ names = [u.username for u in User.select().order_by(User.username)]
+ self.assertEqual(names, ['u%02d' % i for i in range(100)])
+
@requires_models(User, Tweet)
def test_create(self):
with self.assertQueryCount(1):
|
Add params.* to Jenkins file parameters
* Prefix all parameters with params.* so that it checks
whether parameters exist before using them
* This is a follow-up fix on so that existing PRs work
without being re-triggered manually twice | @@ -125,13 +125,13 @@ cancel_previous_build()
stage('Prepare') {
node('CPU') {
// When something is provided in ci_*_param, use it, otherwise default with ci_*
- ci_lint = ci_lint_param ?: ci_lint
- ci_cpu = ci_cpu_param ?: ci_cpu
- ci_gpu = ci_gpu_param ?: ci_gpu
- ci_wasm = ci_wasm_param ?: ci_wasm
- ci_i386 = ci_i386_param ?: ci_i386
- ci_qemu = ci_qemu_param ?: ci_qemu
- ci_arm = ci_arm_param ?: ci_arm
+ ci_lint = params.ci_lint_param ?: ci_lint
+ ci_cpu = params.ci_cpu_param ?: ci_cpu
+ ci_gpu = params.ci_gpu_param ?: ci_gpu
+ ci_wasm = params.ci_wasm_param ?: ci_wasm
+ ci_i386 = params.ci_i386_param ?: ci_i386
+ ci_qemu = params.ci_qemu_param ?: ci_qemu
+ ci_arm = params.ci_arm_param ?: ci_arm
sh """
echo "Docker images being used in this build:"
|
Don't call get..Queue on non-existing one,
failed after latest updates | @@ -47,10 +47,6 @@ nn2.setBlobPath(str((Path(__file__).parent / Path('text-recognition-0012.blob'))
manip.out.link(nn2.input)
manip.out.link(manip_xout.input)
-#nn2_in = pipeline.createXLinkIn()
-#nn2_in.setStreamName("in_recognition")
-#nn2_in.out.link(nn2.input)
-
nn2_xout = pipeline.createXLinkOut()
nn2_xout.setStreamName("recognitions")
nn2.out.link(nn2_xout.input)
@@ -67,7 +63,6 @@ def to_tensor_result(packet):
q_prev = device.getOutputQueue("preview")
# This should be set to block, but would get to some extreme queuing/latency!
q_det = device.getOutputQueue("detections", 1, blocking=False)
-q_rec_in = device.getInputQueue("in_recognition")
q_rec = device.getOutputQueue("recognitions")
q_manip_img = device.getInputQueue("manip_img")
|
5xx: Change min-height to reflect new footer.
The min-height for the error pages was not updated to reflect the
height of the new footer, so this updates the value and makes it a
non-scrolling page in most browsers again. | @@ -1752,7 +1752,8 @@ input.new-organization-button {
}
.error_page {
- min-height: calc(100vh - 64px);
+ padding: 20px 0px;
+ min-height: calc(100vh - 290px);
background-color: #c9e9e0;
font-family: 'Source Sans Pro', Helvetica, Arial, sans-serif;
}
|
Guard against zero-length permutations in IndexedArray
Fixes | @@ -43,6 +43,8 @@ class IndexedArray(awkward.array.base.AwkwardArrayWithContent):
@classmethod
def invert(cls, permutation):
+ if permutation.size == 0:
+ return cls.numpy.zeros(0, dtype=cls.IndexedArray.fget(None).INDEXTYPE)
permutation = permutation.reshape(-1)
out = cls.numpy.zeros(permutation.max() + 1, dtype=cls.IndexedArray.fget(None).INDEXTYPE)
identity = cls.numpy.arange(len(permutation))
|
Reorganize orchestration docs
This change organizes the orchestration docs by topic rather than
letting autodoc organize methods by the order they appear in the
_proxy.py file. | @@ -13,5 +13,38 @@ The orchestration high-level interface is available through the
object. The ``orchestration`` member will only be added if the service
is detected.
+Stack Operations
+^^^^^^^^^^^^^^^^
+
+.. autoclass:: openstack.orchestration.v1._proxy.Proxy
+
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.create_stack
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.check_stack
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.update_stack
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.delete_stack
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.find_stack
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.get_stack
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.stacks
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.validate_template
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.resources
+
+Software Configuration Operations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
.. autoclass:: openstack.orchestration.v1._proxy.Proxy
- :members:
+
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.create_software_config
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.delete_software_config
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.get_software_config
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.software_configs
+
+Software Deployment Operations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autoclass:: openstack.orchestration.v1._proxy.Proxy
+
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.create_software_deployment
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.update_software_deployment
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.delete_software_deployment
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.get_software_deployment
+ .. automethod:: openstack.orchestration.v1._proxy.Proxy.software_deployments
|
fixing value error and documentation in blob.py
Closing this. should cover the click issues. | @@ -57,7 +57,7 @@ class BlobDetector(SpotFinderAlgorithmBase):
measurement_type : str ['max', 'mean']
name of the function used to calculate the intensity for each identified spot area
detector_method: str ['blob_dog', 'blob_doh', 'blob_log']
- name of the type of detection method used from skimage.feature
+ name of the type of detection method used from skimage.feature, default: blob_log
Notes
-----
@@ -81,7 +81,7 @@ class BlobDetector(SpotFinderAlgorithmBase):
try:
self.detector_method = blob_detectors[detector_method]
except ValueError:
- "Detector method must be one of {blob_log, blob_dog, blob_doh}"
+ raise ValueError("Detector method must be one of {blob_log, blob_dog, blob_doh}")
def image_to_spots(self, data_image: Union[np.ndarray, xr.DataArray]) -> SpotAttributes:
"""
@@ -178,7 +178,7 @@ class BlobDetector(SpotFinderAlgorithmBase):
@click.option(
"--detector_method", default='blob_log',
help="str ['blob_dog', 'blob_doh', 'blob_log'] name of the type of "
- "detection method used from skimage.feature"
+ "detection method used from skimage.feature. Default: blob_log"
)
@click.pass_context
def _cli(ctx, min_sigma, max_sigma, num_sigma, threshold, overlap, show, detector_method):
|
Only send articles to user
Added the "kind": "article" parameter to the API request. This should
only return articles to the user instead of courses, lessons
or quizzes.
Also updated the "Here are the top x results" message to handle a
variable amount of articles.
[Ticket: python-discord#828] | @@ -33,7 +33,7 @@ class RealPython(commands.Cog):
@commands.cooldown(1, 10, commands.cooldowns.BucketType.user)
async def realpython(self, ctx: commands.Context, *, user_search: str) -> None:
"""Send 5 articles that match the user's search terms."""
- params = {"q": user_search, "limit": 5}
+ params = {"q": user_search, "limit": 5, "kind": "article"}
async with self.bot.http_session.get(url=API_ROOT, params=params) as response:
if response.status != 200:
logger.error(
@@ -56,7 +56,7 @@ class RealPython(commands.Cog):
article_embed = Embed(
title="Search results - Real Python",
url=SEARCH_URL.format(user_search=quote_plus(user_search)),
- description="Here are the top 5 results:",
+ description=f"Here are the top {max(5, len(articles))} results:",
color=Colours.orange,
)
|
scripts: Don't terminate current session in terminate-psql-sessions.
This is a prep commit. Running terminate-psql-sessions command on
docker-zulip results in the script exiting with non-zero exit status
2. This is because the current session also gets terminated while
running terminate-psql-sessions command. To prevent that from happening
we don't terminate the session created by terminate-psql-sessions. | @@ -32,5 +32,6 @@ SELECT pg_terminate_backend(s.pid)
WHERE
s.datname IN ($tables)
AND r.rolname = CURRENT_USER
- AND (s.usename = r.rolname OR r.rolsuper = 't');
+ AND (s.usename = r.rolname OR r.rolsuper = 't')
+ AND s.pid <> pg_backend_pid();
EOF
|
Update Dockerfile to include mysqlclient
cc: | FROM python:3.9-slim-buster as builder
COPY requirements.txt /tmp
-RUN apt update && apt install -y build-essential libpq-dev
+RUN apt update && apt install -y build-essential libpq-dev libmariadb-dev
RUN \
if [ `dpkg --print-architecture` = "armhf" ]; then \
printf "[global]\nextra-index-url=https://www.piwheels.org/simple\n" > /etc/pip.conf ; \
fi
-RUN pip wheel --wheel-dir /wheels apprise uwsgi -r /tmp/requirements.txt
+RUN pip wheel --wheel-dir /wheels apprise uwsgi mysqlclient -r /tmp/requirements.txt
FROM python:3.9-slim-buster
@@ -20,7 +20,7 @@ COPY --from=builder /wheels /wheels
RUN \
apt update && \
- apt install -y libpq5 && \
+ apt install -y libpq5 libmariadb3 && \
rm -rf /var/apt/cache
RUN pip install --no-cache /wheels/*
|
Tidy up an error message
`Sorry` is an exception, not a `logging` method. With `logging` methods, you can pass a tuple of strings and `strings[1:]` are evaluated to populate any string substitutions in `strings[0]`. Exception values don't do that. | @@ -299,9 +299,9 @@ def prepare_input(params, experiments, reflections):
"""The experiments have different space groups:
space group numbers found: %s
Please reanalyse the data so that space groups are consistent,
- (consider using dials.reindex, dials.symmetry or dials.cosym)
- or remove incompatible experiments (using the option exclude_datasets=)""",
- set(sgs),
+ (consider using dials.reindex, dials.symmetry or dials.cosym) or
+ remove incompatible experiments (using the option exclude_datasets=)"""
+ % ", ".join(map(str, set(sgs)))
)
logger.info(
"Space group being used during scaling is %s",
|
hiero: otio p3 compatibility issue - metadata on effect use update
rather then __setter__ | @@ -132,7 +132,7 @@ def create_time_effects(otio_clip, track_item):
otio_effect = otio.schema.TimeEffect()
otio_effect.name = name
otio_effect.effect_name = effect_name
- otio_effect.metadata = metadata
+ otio_effect.metadata.update(metadata)
# add otio effect to clip effects
otio_clip.effects.append(otio_effect)
|
Document let option for aggregate
Document $out/$merge usage for aggregate | @@ -547,6 +547,11 @@ class AgnosticDatabase(AgnosticBaseProperties):
returning aggregate results using a cursor.
- `collation` (optional): An instance of
:class:`~pymongo.collation.Collation`.
+ - `let` (dict): A dict of parameter names and values. Values must be
+ constant or closed expressions that do not reference document
+ fields. Parameters can then be accessed as variables in an
+ aggregate expression context (e.g. ``"$$var"``). This option is
+ only supported on MongoDB >= 5.0.
Returns a :class:`MotorCommandCursor` that can be iterated like a
cursor from :meth:`find`::
@@ -827,6 +832,26 @@ class AgnosticCollection(AgnosticBaseProperties):
:meth:`~MotorClient.start_session`.
- `**kwargs`: send arbitrary parameters to the aggregate command
+ All optional `aggregate command`_ parameters should be passed as
+ keyword arguments to this method. Valid options include, but are not
+ limited to:
+
+ - `allowDiskUse` (bool): Enables writing to temporary files. When set
+ to True, aggregation stages can write data to the _tmp subdirectory
+ of the --dbpath directory. The default is False.
+ - `maxTimeMS` (int): The maximum amount of time to allow the operation
+ to run in milliseconds.
+ - `batchSize` (int): The maximum number of documents to return per
+ batch. Ignored if the connected mongod or mongos does not support
+ returning aggregate results using a cursor.
+ - `collation` (optional): An instance of
+ :class:`~pymongo.collation.Collation`.
+ - `let` (dict): A dict of parameter names and values. Values must be
+ constant or closed expressions that do not reference document
+ fields. Parameters can then be accessed as variables in an
+ aggregate expression context (e.g. ``"$$var"``). This option is
+ only supported on MongoDB >= 5.0.
+
Returns a :class:`MotorCommandCursor` that can be iterated like a
cursor from :meth:`find`::
@@ -835,6 +860,20 @@ class AgnosticCollection(AgnosticBaseProperties):
async for doc in collection.aggregate(pipeline):
print(doc)
+ Note that this method returns a :class:`MotorCommandCursor` which
+ lazily runs the aggregate command when first iterated. In order to run
+ an aggregation with ``$out`` or ``$merge`` the application needs to
+ iterate the cursor, for example::
+
+ cursor = motor_coll.aggregate([{'$out': 'out'}])
+ # Iterate the cursor to run the $out (or $merge) operation.
+ await cursor.to_list(length=None)
+ # Or more succinctly:
+ await motor_coll.aggregate([{'$out': 'out'}]).to_list(length=None)
+ # Or:
+ async for _ in motor_coll.aggregate([{'$out': 'out'}]):
+ pass
+
:class:`MotorCommandCursor` does not allow the ``explain`` option. To
explain MongoDB's query plan for the aggregation, use
:meth:`MotorDatabase.command`::
|
2.3.2
Automatically generated by python-semantic-release | @@ -9,7 +9,7 @@ https://community.home-assistant.io/t/echo-devices-alexa-as-media-player-testers
"""
from datetime import timedelta
-__version__ = '2.3.1'
+__version__ = '2.3.2'
PROJECT_URL = "https://github.com/custom-components/alexa_media_player/"
ISSUE_URL = "{}issues".format(PROJECT_URL)
|
Add metric conversion to bearing elements from table
The conversion to metric when bearings were instantiated from table was
left out by mistake. I'm adding it now. | @@ -208,11 +208,21 @@ def read_table_file(file, element, sheet_name=0, n=0, sheet_type="Model"):
parameters['material'] = new_material
if convert_to_metric:
for i in range(0, df.shape[0]):
+ if element == 'bearing':
+ parameters['kxx'][i] = parameters['kxx'][i] * 175.1268369864
+ parameters['cxx'][i] = parameters['cxx'][i] * 175.1268369864
+ parameters['kyy'][i] = parameters['kyy'][i] * 175.1268369864
+ parameters['kxy'][i] = parameters['kxy'][i] * 175.1268369864
+ parameters['kyx'][i] = parameters['kyx'][i] * 175.1268369864
+ parameters['cyy'][i] = parameters['cyy'][i] * 175.1268369864
+ parameters['cxy'][i] = parameters['cxy'][i] * 175.1268369864
+ parameters['cyx'][i] = parameters['cyx'][i] * 175.1268369864
+ parameters['w'][i] = parameters['w'][i] * 0.1047197551197
if element == 'shaft':
parameters['L'][i] = parameters['L'][i] * 0.0254
parameters['i_d'][i] = parameters['i_d'][i] * 0.0254
parameters['o_d'][i] = parameters['o_d'][i] * 0.0254
- parameters['axial_force'][i] = parameters['axial_force'][i] * 4.44822161
+ parameters['axial_force'][i] = parameters['axial_force'][i] * 4.448221615255
elif element == 'disk':
parameters['m'][i] = parameters['m'][i] * 0.45359237
parameters['Id'][i] = parameters['Id'][i] * 0.0002926397
|
Changelog for 0.7.3
Summary:
---
Pull Request resolved: | The release log for BoTorch.
+## [0.7.3] - Nov 10, 2022
+
+### Highlights
+* #1454 fixes a critical bug that affected multi-output `BatchedMultiOutputGPyTorchModel`s that were using a `Normalize` or `InputStandardize` input transform and trained using `fit_gpytorch_model/mll` with `sequential=True` (which was the default until 0.7.3). The input transform buffers would be reset after model training, leading to the model being trained on normalized input data but evaluated on raw inputs. This bug had been affecting model fits since the 0.6.5 release.
+* #1479 changes the inheritance structure of `Model`s in a backwards-incompatible way. If your code relies on `isinstance` checks with BoTorch `Model`s, especially `SingleTaskGP`, you should revisit these checks to make sure they still work as expected.
+
+#### Compatibility
+* Require linear_operator == 0.2.0 (#1491).
+
+#### New Features
+* Introduce `bvn`, `MVNXPB`, `TruncatedMultivariateNormal`, and `UnifiedSkewNormal` classes / methods (#1394, #1408).
+* Introduce `AffineInputTransform` (#1461).
+* Introduce a `subset_transform` decorator to consolidate subsetting of inputs in input transforms (#1468).
+
+#### Other Changes
+* Add a warning when using float dtype (#1193).
+* Let Pyre know that `AcquisitionFunction.model` is a `Model` (#1216).
+* Remove custom `BlockDiagLazyTensor` logic when using `Standardize` (#1414).
+* Expose `_aug_batch_shape` in `SaasFullyBayesianSingleTaskGP` (#1448).
+* Adjust `PairwiseGP` `ScaleKernel` prior (#1460).
+* Pull out `fantasize` method into a `FantasizeMixin` class, so it isn't so widely inherited (#1462, #1479).
+* Don't use Pyro JIT by default , since it was causing a memory leak (#1474).
+* Use `get_default_partitioning_alpha` for NEHVI input constructor (#1481).
+
+#### Bug Fixes
+* Fix `batch_shape` property of `ModelListGPyTorchModel` (#1441).
+* Tutorial fixes (#1446, #1475).
+* Bug-fix for Proximal acquisition function wrapper for negative base acquisition functions (#1447).
+* Handle `RuntimeError` due to constraint violation while sampling from priors (#1451).
+* Fix bug in model list with output indices (#1453).
+* Fix input transform bug when sequentially training a `BatchedMultiOutputGPyTorchModel` (#1454).
+* Fix a bug in `_fit_multioutput_independent` that failed mll comparison (#1455).
+* Fix box decomposition behavior with empty or None `Y` (#1489).
+
+
## [0.7.2] - Sep 27, 2022
#### New Features
|
bugfix: avoid division by zero
if points are on a planar, points.std(axis=0) will result a zero value in points_std | @@ -249,6 +249,7 @@ def k_means(points, k, **kwargs):
points = np.asanyarray(points, dtype=np.float64)
points_std = points.std(axis=0)
+ points_std[points_std == 0] = 1
whitened = points / points_std
centroids_whitened, distortion = kmeans(whitened, k, **kwargs)
centroids = centroids_whitened * points_std
|
Improve type annotations related to TransformerChains
With this mypy will raise an error is the code attemps to use __mul__ when the
left hand side does not produce a Tree | -from typing import TypeVar, Tuple, List, Callable, Generic, Type, Union, Optional
+from typing import TypeVar, Tuple, List, Callable, Generic, Type, Union, Optional, cast
from abc import ABC
from functools import wraps
@@ -11,7 +11,9 @@ from .lexer import Token
from inspect import getmembers, getmro
_Return_T = TypeVar('_Return_T')
+_Return_V = TypeVar('_Return_V')
_Leaf_T = TypeVar('_Leaf_T')
+_Leaf_U = TypeVar('_Leaf_U')
_R = TypeVar('_R')
_FUNC = Callable[..., _Return_T]
_DECORATED = Union[_FUNC, type]
@@ -136,7 +138,7 @@ class Transformer(_Decoratable, ABC, Generic[_Return_T, _Leaf_T]):
"Transform the given tree, and return the final result"
return self._transform_tree(tree)
- def __mul__(self, other: 'Transformer[_Return_T, _Leaf_T]') -> 'TransformerChain[_Return_T, _Leaf_T]':
+ def __mul__(self: 'Transformer[Tree[_Leaf_U], _Leaf_T]', other: 'Transformer[_Return_V, _Leaf_U]') -> 'TransformerChain[_Return_V, _Leaf_T]':
"""Chain two transformers together, returning a new transformer.
"""
return TransformerChain(self, other)
@@ -158,17 +160,17 @@ class Transformer(_Decoratable, ABC, Generic[_Return_T, _Leaf_T]):
class TransformerChain(Generic[_Return_T, _Leaf_T]):
- transformers: Tuple[Transformer[_Return_T, _Leaf_T], ...]
+ transformers: Tuple[Transformer, ...]
- def __init__(self, *transformers: Transformer[_Return_T, _Leaf_T]) -> None:
+ def __init__(self, *transformers: Transformer) -> None:
self.transformers = transformers
def transform(self, tree: Tree[_Leaf_T]) -> _Return_T:
for t in self.transformers:
tree = t.transform(tree)
- return tree
+ return cast(_Return_T, tree)
- def __mul__(self, other: Transformer[_Return_T, _Leaf_T]) -> 'TransformerChain[_Return_T, _Leaf_T]':
+ def __mul__(self: 'TransformerChain[Tree[_Leaf_U], _Leaf_T]', other: Transformer[_Return_V, _Leaf_U]) -> 'TransformerChain[_Return_V, _Leaf_T]':
return TransformerChain(*self.transformers + (other,))
|
Update Dockerfile to use pip-installed Grow. Note that the git URL
should be pinned to a release however using this for now for testing the
Docker workflow. | FROM ubuntu
MAINTAINER Grow SDK Authors <[email protected]>
-
-RUN apt-get update && apt-get install -y python python-pip git curl nodejs npm
+RUN apt-get update && \
+ apt-get install -y \
+ python \
+ python-pip \
+ libyaml-dev \
+ git \
+ nodejs \
+ npm
+RUN ln -s /usr/bin/nodejs /usr/bin/node
RUN npm install -g bower
RUN npm install -g gulp
-RUN \
- URL=https://raw.github.com/grow/grow/master/install.py \
- && scratch=$(mktemp -d -t tmp.0.0.46.XXXXXXXXX) || exit \
- && script_file=$scratch/install_growsdk.py \
- && curl -fsSL $URL > $script_file || exit \
- && chmod 775 $script_file \
- && python $script_file --force
+RUN echo "{ \"allow_root\": true }" > $HOME/.bowerrc
+RUN pip install git+git://github.com/grow/grow.git
+ENTRYPOINT ["grow"]
|
Update v_generate_tbl_ddl.sql
updated History | @@ -42,6 +42,7 @@ History:
2016-05-24 chriz-bigdata Added support for BACKUP NO tables
2017-05-03 pvbouwel Change table & schemaname of Foreign key constraints to allow for filters
2018-01-15 pvbouwel Add QUOTE_IDENT for identifiers (schema,table and column names)
+2018-05-30 adedotua Add table_id column
**********************************************************************************************/
CREATE OR REPLACE VIEW admin.v_generate_tbl_ddl
AS
|
Split build and run of Docker container.
Split commands to get separate logs in CI. | @@ -22,6 +22,9 @@ jobs:
steps:
- uses: actions/checkout@v2
+ - name: Build Docker
+ run: |
+ make docker-qa-build PYTHON_VERSION=${{matrix.python-version}}
- name: Run Docker
run: |
.github/bump_version ./ minor > atlassian/VERSION
|
Logging: add explicit admin email sending
Use email backend specified in settings.
Send HTML, too. | @@ -464,6 +464,12 @@ LOGGING = {
'null': {
'class': 'logging.NullHandler',
},
+ 'mail_admins': {
+ 'level': 'ERROR',
+ 'class': 'django.utils.log.AdminEmailHandler',
+ 'email_backend': EMAIL_BACKEND,
+ 'include_html': True,
+ },
},
'loggers': {
# disable "Invalid HTTP_HOST" notifications
|
EmbeddingComposite accepts a find_embedding function
It also accepts an embedding_parameters dict to provide missing
arguments to find_embedding | # See the License for the specific language governing permissions and
# limitations under the License.
#
-# ================================================================================================
+# =============================================================================
import unittest
import warnings
@@ -153,6 +153,43 @@ class TestEmbeddingComposite(unittest.TestCase):
__, kwargs = mock_unembed.call_args
self.assertEqual(kwargs['chain_break_method'], chain_breaks.discard)
+ def test_find_embedding_kwarg(self):
+ child = dimod.StructureComposite(dimod.NullSampler(), [0, 1], [(0, 1)])
+
+ def my_find_embedding(S, T):
+ # does nothing
+ return {v: [v] for v in set().union(*S)}
+
+ sampler = EmbeddingComposite(child, find_embedding=my_find_embedding)
+
+ # nothing breaks
+ sampler.sample_ising({0: -1}, {})
+
+ def test_embedding_parameters_construction(self):
+ child = dimod.StructureComposite(dimod.NullSampler(), [0, 1], [(0, 1)])
+
+ def my_find_embedding(S, T, a):
+ assert a == -1
+ return {v: [v] for v in set().union(*S)}
+
+ sampler = EmbeddingComposite(child, find_embedding=my_find_embedding,
+ embedding_parameters={'a': -1})
+
+ # nothing breaks
+ sampler.sample_ising({0: -1}, {})
+
+ def test_embedding_parameters_sample(self):
+ child = dimod.StructureComposite(dimod.NullSampler(), [0, 1], [(0, 1)])
+
+ def my_find_embedding(S, T, a):
+ assert a == -1
+ return {v: [v] for v in set().union(*S)}
+
+ sampler = EmbeddingComposite(child, find_embedding=my_find_embedding)
+
+ # nothing breaks
+ sampler.sample_ising({0: -1}, {}, embedding_parameters={'a': -1})
+
class TestFixedEmbeddingComposite(unittest.TestCase):
def test_without_embedding_and_adjacency(self):
|
Added error exception
Used try, except to add error exceptions for invalid file names | -"""Get the number of each character in any given text.
+"""Get the number of each character in any given text.
Inputs:
A txt file -- You will be asked for an input file. Simply input the name
of the txt file in which you have the desired text.
-
"""
import pprint
@@ -13,12 +12,16 @@ import collections
def main():
file_input = input('File Name: ')
-
+ try:
with open(file_input, 'r') as info:
count = collections.Counter(info.read().upper())
+ except FileNotFoundError:
+ print("Please enter a valid file name.")
+ main()
value = pprint.pformat(count)
print(value)
+ exit()
if __name__ == "__main__":
|
[Stress Tester XFails] Update XFails
was previously shadowed.
was introduced by | "main"
],
"issueUrl" : "https://bugs.swift.org/browse/SR-14627"
+ },
+ {
+ "path" : "*\/Dollar\/Sources\/Dollar.swift",
+ "issueDetail" : {
+ "kind" : "codeComplete",
+ "offset" : 5654
+ },
+ "applicableConfigs" : [
+ "main"
+ ],
+ "issueUrl" : "https://bugs.swift.org/browse/SR-14636"
+ },
+ {
+ "path" : "*\/Dollar\/Sources\/Dollar.swift",
+ "issueDetail" : {
+ "kind" : "codeComplete",
+ "offset" : 5654
+ },
+ "applicableConfigs" : [
+ "main"
+ ],
+ "issueUrl" : "https://bugs.swift.org/browse/SR-14637"
+ },
+ {
+ "path" : "*\/ACHNBrowserUI\/ACHNBrowserUI\/ACHNBrowserUI\/SceneDelegate.swift",
+ "issueDetail" : {
+ "kind" : "semanticRefactoring",
+ "refactoring" : "Convert Function to Async",
+ "offset" : 355
+ },
+ "applicableConfigs" : [
+ "main"
+ ],
+ "issueUrl" : "https://bugs.swift.org/browse/SR-14637"
}
]
|
Update __init__.py
version bump | @@ -5,7 +5,7 @@ from pathlib import Path
bl_info = {
"name": "Source Engine model(.mdl, .vvd, .vtx)",
"author": "RED_EYE",
- "version": (3, 3),
+ "version": (3, 3, 4),
"blender": (2, 80, 0),
"location": "File > Import-Export > SourceEngine MDL (.mdl, .vvd, .vtx) ",
"description": "Addon allows to import Source Engine models",
|
Ensure trio-based components don't get a double SIGINT
Our trio-based components are still running on the same process group
of the main process, but we were also attempting to forcibly kill
them, so they'd get a double SIGINT, causing them to raise a
KeyboardInterrupt when handling the first SIGINT. This fixes that | @@ -7,8 +7,7 @@ from async_service import background_trio_service
from lahja import EndpointAPI
-from trinity._utils.ipc import kill_process_gracefully
-from trinity._utils.logging import child_process_logging, get_logger
+from trinity._utils.logging import child_process_logging
from trinity._utils.mp import ctx
from trinity._utils.profiling import profiler
from trinity.boot_info import BootInfo
@@ -42,10 +41,14 @@ class TrioIsolatedComponent(BaseIsolatedComponent):
try:
await loop.run_in_executor(None, process.join)
finally:
- kill_process_gracefully(
- process,
- get_logger('trinity.extensibility.TrioIsolatedComponent'),
- )
+ # XXX: Disabling this for now as our trio-based components currently run in the same
+ # process group (see comment above) as the main process and because of that they
+ # already get a SIGINT from the terminal.
+ # kill_process_gracefully(
+ # process,
+ # get_logger('trinity.extensibility.TrioIsolatedComponent'),
+ # )
+ pass
@classmethod
def run_process(cls, boot_info: BootInfo) -> None:
|
Add Urdu to languages for testing purposes.
Amend Changelog. | @@ -5,6 +5,12 @@ Release Notes
Changes are ordered reverse-chronologically.
+0.6
+---
+
+ - Add support for RTL languages
+
+
0.5
---
|
Arnold ShaderMenu : Improve naming of nodes and light locations
Follow the camel-case convention for other newly-created nodes.
Name light locations to match the type of the light. | @@ -63,6 +63,7 @@ def appendShaders( menuDefinition, prefix="/Arnold" ) :
nodeEntry = arnold.AiNodeEntryIteratorGetNext( it )
shaderName = arnold.AiNodeEntryGetName( nodeEntry )
displayName = " ".join( [ IECore.CamelCase.toSpaced( x ) for x in shaderName.split( "_" ) ] )
+ nodeName = displayName.replace( " ", "" )
category = __aiMetadataGetStr( nodeEntry, "", "gaffer.nodeMenu.category" )
if category == "" :
@@ -71,13 +72,13 @@ def appendShaders( menuDefinition, prefix="/Arnold" ) :
if arnold.AiNodeEntryGetType( nodeEntry ) == arnold.AI_NODE_SHADER :
menuPath = "Shader"
if shaderName == "light_blocker" :
- nodeCreator = functools.partial( __shaderCreator, shaderName, GafferArnold.ArnoldLightFilter )
+ nodeCreator = functools.partial( __shaderCreator, shaderName, GafferArnold.ArnoldLightFilter, nodeName )
else :
- nodeCreator = functools.partial( __shaderCreator, shaderName, GafferArnold.ArnoldShader )
+ nodeCreator = functools.partial( __shaderCreator, shaderName, GafferArnold.ArnoldShader, nodeName )
else :
menuPath = "Light"
if shaderName != "mesh_light" :
- nodeCreator = functools.partial( __shaderCreator, shaderName, GafferArnold.ArnoldLight )
+ nodeCreator = functools.partial( __shaderCreator, shaderName, GafferArnold.ArnoldLight, nodeName )
else :
nodeCreator = GafferArnold.ArnoldMeshLight
@@ -112,11 +113,15 @@ def appendShaders( menuDefinition, prefix="/Arnold" ) :
}
)
-def __shaderCreator( name, nodeType ) :
+def __shaderCreator( shaderName, nodeType, nodeName ) :
- shader = nodeType( name )
- shader.loadShader( name )
- return shader
+ node = nodeType( nodeName )
+ node.loadShader( shaderName )
+
+ if isinstance( node, GafferArnold.ArnoldLight ) :
+ node["name"].setValue( nodeName[:1].lower() + nodeName[1:] )
+
+ return node
def __aiMetadataGetStr( nodeEntry, paramName, name ) :
|
Removes unused queryset argument from base method.
Stops shadowing built in filter function. | @@ -54,8 +54,8 @@ class BasePermissions(object):
"Override `user_can_delete_object` in your permission class before you use it."
)
- def readable_by_user_filter(self, user, queryset):
- """Applies a filter to the provided queryset, only returning items for which the user has read permission."""
+ def readable_by_user_filter(self, user):
+ """Returns a Q object that defines a filter for objects readable by this user."""
raise NotImplementedError(
"Override `readable_by_user_filter` in your permission class before you use it."
)
@@ -297,10 +297,12 @@ class PermissionsFromAll(BasePermissions):
def readable_by_user_filter(self, user):
# call each of the children permissions instances in turn, conjoining each filter
- filter = Q()
+ intersection_filter = Q()
for perm in self.perms:
- filter = filter & perm.readable_by_user_filter(user)
- return filter
+ intersection_filter = intersection_filter & perm.readable_by_user_filter(
+ user
+ )
+ return intersection_filter
# helper functions
|
Update README.md
Fixed a couple typos. | @@ -12,14 +12,14 @@ remote.
## What's its purpose?
gitfs was designed to bring the full powers of git to everyone, no matter how
-little they know about versioning. A user can mount any repository and all the
-his changes will be automatically converted into commits. gitfs will also expose
+little they know about versioning. A user can mount any repository and all their
+changes will be automatically converted into commits. gitfs will also expose
the history of the branch you're currently working on by simulating snapshots of
every commit.
gitfs is useful in places where you want to keep track of all your files, but at
the same time you don't have the possibility of organizing everything into
-commits yourself. A FUSE filesystem for git repositories, with local cache
+commits yourself. A FUSE filesystem for git repositories, with local cache.
## Features
* Automatically commits changes: create, delete, update files and their metadata
|
Add max_weight parameter to CRR
Summary: Exposes the upper bound clip limit for action weights in CRR as a max_weight parameter | # Note: this files is modeled after td3_trainer.py
-import copy
import logging
from typing import List, Tuple
@@ -59,6 +58,7 @@ class DiscreteCRRTrainer(DQNTrainerBaseLightning):
beta: float = 1.0,
entropy_coeff: float = 0.0,
clip_limit: float = 10.0,
+ max_weight: float = 20.0,
) -> None:
"""
Args:
@@ -85,6 +85,7 @@ class DiscreteCRRTrainer(DQNTrainerBaseLightning):
entropy_coeff: coefficient for entropy regularization
clip_limit: threshold for importance sampling when compute entropy
regularization using offline samples
+ max_weight: the maximum possible action weight in the actor loss
Explaination of entropy regularization:
Entropy regularization punishes deterministic policy and encourages
@@ -143,6 +144,7 @@ class DiscreteCRRTrainer(DQNTrainerBaseLightning):
self.beta = beta
self.entropy_coeff = entropy_coeff
self.clip_limit = clip_limit
+ self.max_weight = max_weight
@property
def q_network(self):
@@ -253,7 +255,7 @@ class DiscreteCRRTrainer(DQNTrainerBaseLightning):
weight = torch.clamp(
((1 / self.beta) * (advantages * action).sum(dim=1, keepdim=True)).exp(),
0,
- 20.0,
+ self.max_weight,
)
# Remember: training_batch.action is in the one-hot format
logged_action_idxs = torch.argmax(action, dim=1, keepdim=True)
|
added __len__ function
* added __len__ function
Added a function to count number of nodes in linked list
* Updated __len__ method
used snake_case instead of camel case
* Add tests to __len__() | @@ -107,6 +107,35 @@ class LinkedList:
current = current.next
current.data = data
+ def __len__(self):
+ """
+ Return length of linked list i.e. number of nodes
+ >>> linked_list = LinkedList()
+ >>> len(linked_list)
+ 0
+ >>> linked_list.insert_tail("head")
+ >>> len(linked_list)
+ 1
+ >>> linked_list.insert_head("head")
+ >>> len(linked_list)
+ 2
+ >>> _ = linked_list.delete_tail()
+ >>> len(linked_list)
+ 1
+ >>> _ = linked_list.delete_head()
+ >>> len(linked_list)
+ 0
+ """
+ if not self.head:
+ return 0
+
+ count = 0
+ cur_node = self.head
+ while cur_node.next:
+ count += 1
+ cur_node = cur_node.next
+ return count + 1
+
def main():
A = LinkedList()
@@ -135,6 +164,7 @@ def main():
A[1] = input("Enter New Value: ").strip()
print("New list:")
print(A)
+ print(f"length of A is : {len(A)}")
if __name__ == "__main__":
|
add failing testcase
Parametrizes the app fixture to supply apps with and without extra-flags. Not the smallest possible testcase but it does the trick for now | @@ -159,17 +159,20 @@ def output_path(tmp_path):
return output_path
[email protected](params=[None, ["FLAG"]])
+def flag(request):
+ return request.param
+
@pytest.fixture
-def app(output_path, project_path):
+def app(output_path, project_path, flag):
project = Project.from_path(project_path)
env = Environment(project, load_plugins=False)
- lektor_info = LektorInfo(env, output_path)
+ lektor_info = LektorInfo(env, output_path, extra_flags=flag)
app = LektorApp(lektor_info)
app.register_blueprint(serve.bp, url_prefix="/")
app.add_url_rule("/ADMIN/EDIT", "url.edit", build_only=True)
return app
-
################################################################
|
Change Alpine install command
Use gdk-pixbuf-dev instead of gdk-pixbuf | @@ -159,7 +159,7 @@ For Alpine Linux 3.6 or newer:
.. code-block:: sh
- apk --update --upgrade add gcc musl-dev jpeg-dev zlib-dev libffi-dev cairo-dev pango-dev gdk-pixbuf
+ apk --update --upgrade add gcc musl-dev jpeg-dev zlib-dev libffi-dev cairo-dev pango-dev gdk-pixbuf-dev
.. _macos:
|
Display model file name and path in header
Just like Glade does. | @@ -37,6 +37,8 @@ from gaphor.ui.toolbox import Toolbox
log = logging.getLogger(__name__)
+HOME = str(Path.home())
+
class RecentFilesMenu(Gio.Menu):
def __init__(self, recent_manager):
@@ -50,11 +52,10 @@ class RecentFilesMenu(Gio.Menu):
def _on_recent_manager_changed(self, recent_manager):
self.remove_all()
- home = str(Path.home())
for item in recent_manager.get_items():
if APPLICATION_ID in item.get_applications():
menu_item = Gio.MenuItem.new(
- item.get_uri_display().replace(home, "~"), None
+ item.get_uri_display().replace(HOME, "~"), None
)
self.append_item(menu_item)
if self.get_n_items() > 9:
@@ -283,11 +284,17 @@ class MainWindow(Service, ActionProvider):
Sets the window title.
"""
if self.window:
+ if self.filename:
+ p = Path(self.filename)
+ title = p.name
+ subtitle = str(p.parent).replace(HOME, "~")
+ else:
title = self.title
+ subtitle = ""
if self.model_changed:
- title += " [edited]"
+ title += _(" [edited]")
self.window.set_title(title)
- self.window.get_titlebar().set_subtitle(self.filename or None)
+ self.window.get_titlebar().set_subtitle(subtitle)
# Signal callbacks:
|
[ci] Add second bazel mirror
Builds are currently failing because `mirror.bazel.build`'s SSL certificate expired. This PR adds another bazel mirror to avoid this problem.
Builds are still failing because explicitly lists `mirror.bazel.build`. | @@ -37,7 +37,7 @@ def auto_http_archive(
If strip_prefix == True , it is auto-deduced.
"""
DOUBLE_SUFFIXES_LOWERCASE = [("tar", "bz2"), ("tar", "gz"), ("tar", "xz")]
- mirror_prefixes = ["https://mirror.bazel.build/"]
+ mirror_prefixes = ["https://mirror.bazel.build/", "https://storage.googleapis.com/bazel-mirror"]
canonical_url = url if url != None else urls[0]
url_parts = urlsplit(canonical_url)
|
Update EPS_Screen.kv
Adjustment of Value/Units definition. | ##----------------------------------------------------------------------
Label:
id: angle_label
- pos_hint: {"center_x": 0.5, "center_y": 0.17}
+ pos_hint: {"center_x": 0.5, "center_y": 0.19}
text: 'Angle = deg'
markup: True
color: 1,1,1
font_size: 20
Label:
id: current_label
- pos_hint: {"center_x": 0.5, "center_y": 0.12}
+ pos_hint: {"center_x": 0.5, "center_y": 0.14}
text: 'Current = A'
markup: True
color: 1,1,1
font_size: 20
Label:
id: voltage_label
- pos_hint: {"center_x": 0.5, "center_y": 0.05}
+ pos_hint: {"center_x": 0.5, "center_y": 0.09}
text: 'Voltage = V'
markup: True
color: 1,1,1
|
Fixes bug introduced in last commit.
In particular, we shouldn't call bulk_probs for the non-parametric
bootstrap mode. | @@ -86,7 +86,9 @@ def create_bootstrap_dataset(input_data_set, generation_method, input_model=None
simDS = _obj.DataSet(outcome_labels=outcome_labels,
collision_action=input_data_set.collisionAction)
circuit_list = list(input_data_set.keys())
- probs = input_model.sim.bulk_probs(circuit_list)
+ probs = input_model.sim.bulk_probs(circuit_list) \
+ if generation_method == 'parametric' else None
+
for s in circuit_list:
nSamples = input_data_set[s].total
if generation_method == 'parametric':
|
Add large experiment for all UM fuzzers
Adding new experiment for all UM fuzzers | # Please add new experiment requests towards the top of this file.
#
+- experiment: 2022-10-06-um-full
+ description: "UM fuzzer experiment"
+ fuzzers:
+ - aflplusplus
+ - aflplusplus_um_parallel
+ - aflplusplus
+ - libfuzzer_um_prioritize
+ - libfuzzer_um_random
+ - libfuzzer_um_parallel
+ - libfuzzer
+ - afl_um_prioritize
+ - afl_um_random
+ - afl_um_parallel
+ - afl
+ - eclipser_um_prioritize
+ - eclipser_um_random
+ - eclipser_um_parallel
+ - eclipser
+ - honggfuzz_um_prioritize
+ - honggfuzz_um_random
+ - honggfuzz_um_parallel
+ - honggfuzz
+
- experiment: 2022-10-05-um-3
description: "UM fuzzer experiment"
fuzzers:
|
fix display of figures in GitHub
Figure specification for example circuits was using wildcard
extension, which is allowed for Sphinx, but doesn't work for GitHub. | @@ -524,7 +524,7 @@ uses a pair of :math:`\pi/2`-pulses.
Quantum teleportation
---------------------
-.. figure:: _static/teleport.*
+.. figure:: _static/teleport.png
:name: teleport
Example of quantum teleportation. Qubit q[0] is prepared by
@@ -537,7 +537,7 @@ outcomes.
Quantum Fourier transform
-------------------------
-.. figure:: _static/fft4q.*
+.. figure:: _static/fft4q.png
:name: fft4q
Example of a 4-qubit quantum Fourier transform. The circuit applies
@@ -553,7 +553,7 @@ the computational basis.
Inverse QFT followed by measurement
-----------------------------------
-.. figure:: _static/ifft4q.*
+.. figure:: _static/ifft4q.png
:name: ifft4q
Example of a 4-qubit inverse quantum Fourier transform followed by
@@ -575,7 +575,7 @@ example is shown in :numref:`ifft4q`.
Ripple-carry adder
------------------
-.. figure:: _static/ripple_adder.*
+.. figure:: _static/ripple_adder.png
:name: ripple_adder
Example of a quantum ripple-carry adder from [CDKM04]_. This circuit
@@ -588,7 +588,7 @@ The ripple-carry adder [CDKM04]_ (Cuccaro et al. 2004) shown in
Randomized benchmarking
-----------------------
-.. figure:: _static/rb2q.*
+.. figure:: _static/rb2q.png
:name: rb2q
Example of a two-qubit randomized benchmarking (RB) sequence over
@@ -614,7 +614,7 @@ a result of subsequent transformations.
Quantum process tomography
--------------------------
-.. figure:: _static/tomography1q.*
+.. figure:: _static/tomography1q.png
:name: tomography1q
Example of a single-qubit quantum process tomography circuit. The
@@ -640,7 +640,7 @@ textbook QPT, the pre and post gates are both taken from the set
Quantum error-correction
------------------------
-.. figure:: _static/bit_flip_repetition_code.*
+.. figure:: _static/bit_flip_repetition_code.png
:name: bit_flip_repetition_code
Example of a quantum bit-flip repetition code. The circuit begins
|
Make Sandbox Tutorial more Intuitive
Added a code cell to display the populated datasets on a worker
Changed the initial search from boston housing tags to custom tags
Added the boston housing dataset search at a later stage | "bob"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "You can view the pre-populated datasets on a given worker by doing the following:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "bob._objects"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
"metadata": {},
"outputs": [],
"source": [
- "x = torch.tensor([1,2,3,4,5]).tag(\"#fun\", \"#boston\", \"#housing\").describe(\"The input datapoints to the boston housing dataset.\")\n",
- "y = torch.tensor([1,2,3,4,5]).tag(\"#fun\", \"#boston\", \"#housing\").describe(\"The input datapoints to the boston housing dataset.\")\n",
+ "x = torch.tensor([1,2,3,4,5]).tag(\"#radio\", \"#hospital1\").describe(\"The input datapoints to the hospital1 dataset.\")\n",
+ "y = torch.tensor([5,4,3,2,1]).tag(\"#radio\", \"#hospital2\").describe(\"The input datapoints to the hospital2 dataset.\")\n",
"z = torch.tensor([1,2,3,4,5]).tag(\"#fun\", \"#mnist\",).describe(\"The images in the MNIST training dataset.\")"
]
},
"z = z.send(bob)\n",
"\n",
"# this searches for exact match within a tag or within the description\n",
- "results = bob.search([\"#boston\", \"#housing\"])"
+ "results = bob.search([\"#radio\"])"
]
},
{
"print(results[0].description)"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Similarly, you can also search for datasets that are pre-populated on the sandbox workers."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "boston_housing_results = bob.search([\"#boston\", \"#housing\"])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "boston_housing_results"
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {},
|
Reintroduce error handling for pipeline export
The error handling for the case of no runtimes when exporting a
pipeline was removed in this reintroduces it without reverting
the solution in that PR | @@ -587,15 +587,17 @@ const PipelineWrapper: React.FC<IProps> = ({
allowLocal: actionType === 'run'
});
- let title = `${actionType} pipeline`;
- if (type !== undefined) {
- title = `${actionType} pipeline for ${runtimeDisplayName}`;
+ let title =
+ type !== undefined
+ ? `${actionType} pipeline for ${runtimeDisplayName}`
+ : `${actionType} pipeline`;
+ if (actionType === 'export' || type !== undefined) {
if (!isRuntimeTypeAvailable(runtimeData, type)) {
const res = await RequestErrors.noMetadataError(
'runtime',
`${actionType} pipeline.`,
- runtimeDisplayName
+ type !== undefined ? runtimeDisplayName : undefined
);
if (res.button.label.includes(RUNTIMES_SCHEMASPACE)) {
|
If indices of refraction don't match between elements, it is an error
Except in the case of Space(), we raise an error if indices don't match when appending to MatrixGroup. | @@ -36,8 +36,14 @@ class MatrixGroup(Matrix):
if len(self.elements) != 0:
lastElement = self.elements[-1]
if lastElement.backIndex != matrix.frontIndex:
- msg = "Mismatch of indices between element {0} and appended {1}".format(lastElement, matrix)
+ if isinstance(matrix, Space): # For Space(), we fix it
+ msg = "Fixing mismatched indices between last element and appended Space(). Use Space(d=someDistance, n=someIndex)."
warnings.warn(msg, UserWarning)
+ matrix.frontIndex = lastElement.backIndex
+ matrix.backIndex = matrix.frontIndex
+ else:
+ msg = "Mismatch of indices between last element and appended element"
+ raise ValueError(msg)
self.elements.append(matrix)
transferMatrix = self.transferMatrix()
|
Updating the updater to use pipenv to perform grow updates.
Fixes | @@ -16,8 +16,7 @@ from grow.sdk import sdk_utils
RELEASES_API = 'https://api.github.com/repos/grow/grow/releases'
TAGS_URL_FORMAT = 'https://github.com/grow/grow/releases/tag/{}'
-INSTALLER_COMMAND = ('/usr/bin/python -c "$(curl -fsSL '
- 'https://raw.github.com/grow/grow/master/install.py)"')
+INSTALLER_COMMAND = 'pipenv install grow=={version}'
class Error(Exception):
@@ -98,6 +97,7 @@ class Updater(object):
colors.stylize(str(sem_current), colors.EMPHASIS),
colors.stylize(str(sem_latest), colors.EMPHASIS)))
+ install_command = INSTALLER_COMMAND.format(version=sem_latest)
if auto_update_prompt:
use_auto_update = grow_rc_config.get('update.always', False)
@@ -116,7 +116,7 @@ class Updater(object):
grow_rc_config.set('update.always', True)
grow_rc_config.write()
- if subprocess.call(INSTALLER_COMMAND, shell=True) == 0:
+ if subprocess.call((install_command), shell=True) == 0:
logging.info('Restarting...')
try:
# Restart on successful install.
@@ -126,13 +126,11 @@ class Updater(object):
'Unable to restart. Please manually restart grow.')
sys.exit(-1)
else:
- text = (
- 'In-place update failed. Update manually or use:\n'
- ' curl https://install.grow.io | bash')
+ text = 'In-place update failed. Update manually or use:\n {}'
+ text = text.format(install_command)
logging.error(text)
sys.exit(-1)
else:
- install_command = colors.stylize('pip install --upgrade grow', colors.CAUTION)
logging.info(' Update using: {}'.format(install_command))
logging.info('')
return True
|
downloader: hash files while they're downloading
This avoid rereading the entire file from disk once it's written. | @@ -36,12 +36,15 @@ CHUNK_SIZE = 1 << 15 if sys.stdout.isatty() else 1 << 20
def process_download(reporter, chunk_iterable, size, file):
start_time = time.monotonic()
progress_size = 0
+ hasher = hashlib.sha256()
try:
for chunk in chunk_iterable:
if chunk:
duration = time.monotonic() - start_time
progress_size += len(chunk)
+ hasher.update(chunk)
+
if duration != 0:
speed = int(progress_size / (1024 * duration))
percent = str(progress_size * 100 // size)
@@ -56,7 +59,7 @@ def process_download(reporter, chunk_iterable, size, file):
if progress_size > size:
break
- return progress_size
+ return progress_size, hasher.digest()
finally:
reporter.end_progress()
@@ -71,35 +74,29 @@ def try_download(reporter, file, num_attempts, start_download, size):
chunk_iterable = start_download()
file.seek(0)
file.truncate()
- actual_size = process_download(reporter, chunk_iterable, size, file)
+ actual_size, hash = process_download(reporter, chunk_iterable, size, file)
if actual_size > size:
reporter.log_error("Remote file is longer than expected ({} B), download aborted", size)
# no sense in retrying - if the file is longer, there's no way it'll fix itself
- return False
+ return None
elif actual_size < size:
reporter.log_error("Downloaded file is shorter ({} B) than expected ({} B)",
actual_size, size)
# it's possible that we got disconnected before receiving the full file,
# so try again
else:
- return True
+ return hash
except (requests.exceptions.RequestException, ssl.SSLError):
reporter.log_error("Download failed", exc_info=True)
- return False
-
-def verify_hash(reporter, file, expected_hash, path):
- actual_hash = hashlib.sha256()
- while True:
- chunk = file.read(1 << 20)
- if not chunk: break
- actual_hash.update(chunk)
+ return None
- if actual_hash.digest() != bytes.fromhex(expected_hash):
+def verify_hash(reporter, actual_hash, expected_hash, path):
+ if actual_hash != bytes.fromhex(expected_hash):
reporter.log_error('Hash mismatch for "{}"', path)
reporter.log_details('Expected: {}', expected_hash)
- reporter.log_details('Actual: {}', actual_hash.hexdigest())
+ reporter.log_details('Actual: {}', actual_hash.hex())
return False
return True
@@ -174,9 +171,9 @@ def try_retrieve(reporter, destination, model_file, cache, num_attempts, start_d
success = False
with destination.open('w+b') as f:
- if try_download(reporter, f, num_attempts, start_download, model_file.size):
- f.seek(0)
- if verify_hash(reporter, f, model_file.sha256, destination, name):
+ actual_hash = try_download(reporter, f, num_attempts, start_download, model_file.size)
+
+ if actual_hash and verify_hash(reporter, actual_hash, model_file.sha256, destination):
try_update_cache(reporter, cache, model_file.sha256, destination)
success = True
|
GDB helpers: attach GDB frames to State instances
TN: | @@ -21,7 +21,14 @@ class State(object):
Holder for the execution state of a property.
"""
- def __init__(self, line_no, prop):
+ def __init__(self, frame, line_no, prop):
+ self.frame = frame
+ """
+ :type: gdb.Frame
+
+ The GDB frame from which this state was decoded.
+ """
+
self.property = prop
"""
:type: langkit.gdb.debug_info.Property
@@ -104,7 +111,7 @@ class State(object):
return None
# Create the result, add the property root scope
- result = cls(line_no, prop)
+ result = cls(frame, line_no, prop)
root_scope_state = ScopeState(result, None, prop)
result.scopes.append(root_scope_state)
|
Fix the link text
Probably a copy-n-paste error. Pascal not relevant here. | @@ -17,7 +17,7 @@ Welcome to Sample Programs in Perl!
## References
- [Perl Wiki][4]
-- [Pascal Docs][5]
+- [Perl Docs][5]
- [Online Perl Interpreter][6]
[1]: https://therenegadecoder.com/code/hello-world-in-perl
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.