message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Patch spectrum.js for jQuery 3 compatibility.
This includes removing deprecated functions
(namely bind/unbind/delegate) and fix an aspect
of event handling that breaks in the new jQuery. | }
}
- offsetElement.bind("click.spectrum touchstart.spectrum", function (e) {
+ offsetElement.on("click.spectrum touchstart.spectrum", function (e) {
toggle();
e.stopPropagation();
// Handle user typed input
textInput.change(setFromTextInput);
- textInput.bind("paste", function () {
+ textInput.on("paste", function () {
setTimeout(setFromTextInput, 1);
});
textInput.keydown(function (e) { if (e.keyCode == 13) { setFromTextInput(); } });
- cancelButton.bind("click.spectrum", function (e) {
+ cancelButton.on("click.spectrum", function (e) {
e.stopPropagation();
e.preventDefault();
hide("cancel");
});
- chooseButton.bind("click.spectrum", function (e) {
+ chooseButton.on("click.spectrum", function (e) {
e.stopPropagation();
e.preventDefault();
}
var paletteEvent = IE ? "mousedown.spectrum" : "click.spectrum touchstart.spectrum";
- paletteContainer.delegate(".sp-thumb-el", paletteEvent, palletElementClick);
- initialColorContainer.delegate(".sp-thumb-el::nth-child(1)", paletteEvent, { ignore: true }, palletElementClick);
+ paletteContainer.on(paletteEvent, ".sp-thumb-el", palletElementClick);
+ initialColorContainer.on(paletteEvent, ".sp-thumb-el:nth-child(1)", { ignore: true }, palletElementClick);
}
function addColorToSelectionPalette(color) {
if (showSelectionPalette) {
hideAll();
visible = true;
- $(doc).bind("click.spectrum", hide);
- $(window).bind("resize.spectrum", resize);
+ $(doc).on("click.spectrum", hide);
+ $(window).on("resize.spectrum", resize);
replacer.addClass("sp-active");
container.show();
if (!visible || flat) { return; }
visible = false;
- $(doc).unbind("click.spectrum", hide);
- $(window).unbind("resize.spectrum", resize);
+ $(doc).off("click.spectrum", hide);
+ $(window).off("resize.spectrum", resize);
replacer.removeClass("sp-active");
container.hide();
function destroy() {
boundElement.show();
- offsetElement.unbind("click.spectrum touchstart.spectrum");
+ offsetElement.off("click.spectrum touchstart.spectrum");
container.remove();
replacer.remove();
spectrums[spect.id] = null;
maxWidth = $(element).width();
offset = $(element).offset();
- $(doc).bind(duringDragEvents);
+ $(doc).on(duringDragEvents);
$(doc.body).addClass("sp-dragging");
if (!hasTouch) {
}
function stop() {
if (dragging) {
- $(doc).unbind(duringDragEvents);
+ $(doc).off(duringDragEvents);
$(doc.body).removeClass("sp-dragging");
onstop.apply(element, arguments);
}
dragging = false;
}
-
- $(element).bind(hasTouch ? "touchstart" : "mousedown", start);
+ $(element).on(hasTouch ? "touchstart" : "mousedown", start);
}
function throttle(func, wait, debounce) {
|
Update table_info.sql
Fixed columns "rows", pct_stats_off, and pct_unsorted to be correct for DISTSTYLE ALL. | @@ -20,6 +20,7 @@ Notes:
History:
2015-02-16 ericfe created
2017-03-23 thiyagu Added percentage encoded column metric (pct_enc) and fixes
+2017-10-01 mscaer Fixed columns "rows", pct_stats_off, and pct_unsorted to be correct for DISTSTYLE ALL.
**********************************************************************************************/
SELECT TRIM(pgn.nspname) AS SCHEMA,
@@ -36,7 +37,7 @@ SELECT TRIM(pgn.nspname) AS SCHEMA,
) AS Skew,
det.head_sort AS "SortKey",
det.n_sortkeys AS "#SKs",
- a.rows,
+ CASE WHEN pgc.reldiststyle = 8 THEN a.rows_all_dist ELSE a.rows END AS rows,
b.mbytes,
decode(det.max_enc,
0,'N',
@@ -47,16 +48,21 @@ SELECT TRIM(pgn.nspname) AS SCHEMA,
0,0,
((b.mbytes/part.total::DECIMAL)*100)::DECIMAL(20,2)
) AS pct_of_total,
- (CASE WHEN a.rows = 0 THEN NULL ELSE ((a.rows - pgc.reltuples)::DECIMAL(20,3) / a.rows::DECIMAL(20,3)*100)::DECIMAL(20,2) END) AS pct_stats_off,
- decode( det.n_sortkeys,
- 0, NULL ,
- DECODE( a.rows,0,0, (a.unsorted_rows::DECIMAL(32)/a.rows)*100)
- ) ::DECIMAL(20,2) AS pct_unsorted
+ (CASE WHEN a.rows = 0 THEN NULL ELSE
+ CASE WHEN pgc.reldiststyle = 8 THEN ((a.rows_all_dist - pgc.reltuples)::DECIMAL(20,3) / a.rows_all_dist::DECIMAL(20,3)*100)::DECIMAL(20,2)
+ ELSE ((a.rows - pgc.reltuples)::DECIMAL(20,3) / a.rows::DECIMAL(20,3)*100)::DECIMAL(20,2) END END
+ ) AS pct_stats_off,
+ CASE WHEN pgc.reldiststyle = 8
+ THEN decode( det.n_sortkeys,0, NULL,DECODE( a.rows_all_dist,0,0, (a.unsorted_rows_all_dist::DECIMAL(32)/a.rows_all_dist)*100))::DECIMAL(20,2)
+ ELSE decode( det.n_sortkeys,0, NULL,DECODE( a.rows,0,0, (a.unsorted_rows::DECIMAL(32)/a.rows)*100))::DECIMAL(20,2) END
+ AS pct_unsorted
FROM (SELECT db_id,
id,
name,
SUM(ROWS) AS ROWS,
- SUM(ROWS) - SUM(sorted_rows) AS unsorted_rows
+ MAX(ROWS) AS rows_all_dist,
+ SUM(ROWS) - SUM(sorted_rows) AS unsorted_rows,
+ MAX(ROWS) - MAX(sorted_rows) AS unsorted_rows_all_dist
FROM stv_tbl_perm a
GROUP BY db_id,
id,
|
Fix "Load more" not shown in Windows
Fixes a bug where the "Load more" button wouldn't appear on browsers in Windows | @@ -610,7 +610,7 @@ class ExperimentRunsTableCompactView extends React.Component {
scrollTop: grid.state.scrollTop,
};
const isRunsListShort = scrollHeight < clientHeight;
- const isAtScrollBottom = isRunsListShort || (clientHeight + scrollTop === scrollHeight);
+ const isAtScrollBottom = isRunsListShort || (clientHeight + scrollTop >= scrollHeight);
this.setState({ isAtScrollBottom });
}, 100);
}
|
Fix issue 724
Vertical_raster is saved in the test file but setting vertical_raster is not possible because it's a To rectify we go ahead and except the attribute error for the setattr. there. | @@ -404,15 +404,18 @@ class SVGLoader:
key,
type_v(element.values[key]),
)
- except (ValueError, KeyError):
+ except (ValueError, KeyError, AttributeError):
pass
elif type_v == bool:
+ try:
setattr(
op.settings,
key,
str(element.values[key]).lower()
in ("true", "1"),
)
+ except (ValueError, KeyError, AttributeError):
+ pass
elements_modifier.add_op(op)
except KeyError:
pass
|
Fix crash when clicking diagnostics hover "Code Action" link
Fixes following traceback:
Traceback (most recent call last):
File "C:\Apps\Sublime\Data\Packages\LSP\main.py", line 1965, in <lambda>
on_navigate=lambda href: self.on_diagnostics_navigate(self, href, point, diagnostics))
TypeError: on_diagnostics_navigate() takes 4 positional arguments but 5 were given | @@ -1967,7 +1967,7 @@ class HoverHandler(sublime_plugin.ViewEventListener):
location=point,
wrapper_class="lsp_hover",
max_width=800,
- on_navigate=lambda href: self.on_diagnostics_navigate(self, href, point, diagnostics))
+ on_navigate=lambda href: self.on_diagnostics_navigate(href, point, diagnostics))
def on_diagnostics_navigate(self, href, point, diagnostics):
# TODO: don't mess with the user's cursor.
|
[Jira] Update jira.py
* Update jira.py
added update filter method
* fixed spaces
* fixed spaces
* added return in docstring | @@ -590,6 +590,22 @@ class Jira(AtlassianRestAPI):
url = "{base_url}/{id}".format(base_url=base_url, id=filter_id)
return self.get(url)
+ def update_filter(self, filter_id, jql, **kwargs):
+ """
+ :param filter_id: int
+ :param jql: str
+ :param kwargs: dict, Optional (name, description, favourite)
+ :return:
+ """
+ allowed_fields = ("name", "description", "favourite")
+ data = {"jql": jql}
+ for k, v in kwargs.items():
+ if k in allowed_fields:
+ data.update({k: v})
+ base_url = self.resource_url("filter")
+ url = "{base_url}/{id}".format(base_url=base_url, id=filter_id)
+ return self.put(url, data=data)
+
def delete_filter(self, filter_id):
"""
Deletes a filter that has the given id.
|
Add PyUnicode_AsUTF8AndSize to cpython imports [0.29.x]
Backport of | @@ -314,9 +314,25 @@ cdef extern from *:
# raised by the codec.
bytes PyUnicode_EncodeUTF8(Py_UNICODE *s, Py_ssize_t size, char *errors)
- # Encode a Unicode objects using UTF-8 and return the result as Python string object. Error handling is ``strict''. Return NULL if an exception was raised by the codec.
+ # Encode a Unicode objects using UTF-8 and return the result as Python bytes object. Error handling is ``strict''. Return NULL if an exception was raised by the codec.
bytes PyUnicode_AsUTF8String(object unicode)
+
+ # Return a pointer to the UTF-8 encoding of the Unicode object,
+ # and store the size of the encoded representation (in bytes) in size.
+ # The size argument can be NULL; in this case no size will be stored.
+ # The returned buffer always has an extra null byte appended
+ # (not included in size), regardless of whether there are any
+ # other null code points.
+
+ # In the case of an error, NULL is returned with an exception set and
+ # no size is stored.
+
+ # This caches the UTF-8 representation of the string in the Unicode
+ # object, and subsequent calls will return a pointer to the same buffer.
+ # The caller is not responsible for deallocating the buffer
+ const char* PyUnicode_AsUTF8AndSize(object unicode, Py_ssize_t *size)
+
# These are the UTF-16 codec APIs:
# Decode length bytes from a UTF-16 encoded buffer string and
|
Update torchvision_tutorial.rst
* Update torchvision_tutorial.rst
Fixes link to ipynb notebook for this article.
* Update torchvision_tutorial.rst | @@ -3,7 +3,7 @@ TorchVision Object Detection Finetuning Tutorial
.. tip::
To get the most of this tutorial, we suggest using this
- `Colab Version <https://colab.research.google.com/github/pytorch/vision/blob/temp-tutorial/tutorials/torchvision_finetuning_instance_segmentation.ipynb>`__.
+ `Colab Version <https://colab.research.google.com/github/pytorch/tutorials/blob/gh-pages/_downloads/torchvision_finetuning_instance_segmentation.ipynb>`__.
This will allow you to experiment with the information presented below.
For this tutorial, we will be finetuning a pre-trained `Mask
|
Fix lint issues
Summary: Pull Request resolved: | @@ -173,8 +173,10 @@ module_tests = [
module_name='Softplus',
constructor_args=(2, -100),
input_size=(10, 20),
- reference_fn=(lambda i, *_: ((i * 2) > -100).type_as(i) * i +
- ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))),
+ reference_fn=(
+ lambda i, *_: ((i * 2) > -100).type_as(i) * i
+ + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))
+ ),
desc='beta_threshold',
),
dict(
@@ -2220,8 +2222,10 @@ new_module_tests = [
module_name='Softplus',
constructor_args=(2, -100),
input_size=(),
- reference_fn=(lambda i, *_: ((i * 2) > -100).type_as(i) * i +
- ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log(1 + torch.exp(2 * i))),
+ reference_fn=(
+ lambda i, *_: ((i * 2) > -100).type_as(i) * i
+ + ((i * 2) <= -100).type_as(i) * 1.0 / 2.0 * torch.log(1 + torch.exp(2 * i))
+ ),
desc='beta_threshold_scalar',
),
dict(
|
Fixed logging message
Added file documentation | # remove all releaseNotes from files in: Itegrations, Playbooks, Reports and Scripts.
-# Note: using yaml will destroy the file structures so filtering as regular text-file.
+# Note: using yaml will destroy the file structures so filtering as regular text-file.\
+# Note2: file must be run from root directory with 4 sub-directories: Integration, Playbook, Reports, Scripts
+# Usage: python release_notes_clear.py
import os
import glob
@@ -86,7 +88,6 @@ def remove_releaseNotes_folder(folder_path, files_extension):
scan folder and remove all references to release notes
:param folder_path: path of the folder
:param files_extension: type of file to look for (json or yml)
- :return:
'''
scan_files = glob.glob(os.path.join(folder_path, files_extension))
@@ -95,7 +96,7 @@ def remove_releaseNotes_folder(folder_path, files_extension):
if FILE_EXTRACTER_DICT[files_extension](path):
count += 1
- return count
+ print '--> Changed %d out of %d files' % (count, len(scan_files), )
def main(root_dir):
@@ -103,14 +104,12 @@ def main(root_dir):
json_folders_to_scan = ['Reports', ] # json
for folder in yml_folders_to_scan:
- print 'scanning directory: %s' % (folder, ),
- changed = remove_releaseNotes_folder(os.path.join(root_dir, folder), '*.yml')
- print 'Done: removed %d from files' % (changed, )
+ print 'Scanning directory: "%s"' % (folder, )
+ remove_releaseNotes_folder(os.path.join(root_dir, folder), '*.yml')
for folder in json_folders_to_scan:
- print 'scanning directory: %s' % (folder, ),
- changed = remove_releaseNotes_folder(os.path.join(root_dir, folder), '*.json')
- print 'Done: removed %d from files' % (changed,)
+ print 'Scanning directory: "%s"' % (folder, )
+ remove_releaseNotes_folder(os.path.join(root_dir, folder), '*.json')
if __name__ == '__main__':
|
[DOCS] Update deployment_google_cloud_composer.rst
* Update deployment_google_cloud_composer.rst
Updated based on comment from user
* Update deployment_google_cloud_composer.rst | @@ -39,6 +39,8 @@ Note: These steps are basically following the :ref:`Deploying Great Expectations
Note: You may want to reference our :ref:`Configuring metadata stores <how_to_guides__configuring_metadata_stores>` and :ref:`Configuring Data Docs <how_to_guides__configuring_data_docs>` how-to guides. All of the stores in the below example are configured to use GCS, however you can use whichever store is applicable to your infrastructure.
+ Important: If your Composer workflow includes spinning up/tearing down Composer environments and deleting the associated GCS bucket, you need to configure a separate bucket to persist your Great Expectations assets.
+
.. code-block:: python
project_config = DataContextConfig(
|
Update CODEOWNERS
Summary:
teng-li is passing the baton to mrshenli. Thanks for all your work on distributed teng-li!! :tada:
Pull Request resolved: | /docs/cpp @goldsborough @ebetica @yf225
/torch/csrc/api/ @ebetica @goldsborough @yf225
/test/cpp/api/ @ebetica @goldsborough @yf225
-/torch/lib/c10d/ @apaszke @pietern @teng-li
-/torch/csrc/distributed/ @apaszke @pietern @teng-li
-/torch/distributed/ @apaszke @pietern @teng-li
-/test/test_c10d.py @apaszke @pietern @teng-li
+/torch/lib/c10d/ @apaszke @pietern @mrshenli
+/torch/csrc/distributed/ @apaszke @pietern @mrshenli
+/torch/distributed/ @apaszke @pietern @mrshenli
+/test/test_c10d.py @apaszke @pietern @mrshenli
/torch/utils/cpp_extension.py @goldsborough @fmassa @apaszke @soumith @ezyang
|
Add members marker and unsafe_name to pkg_resources.Requirement
For now specify Requirement.marker as Optional[Any] (as suggested by
as we can't import packaging.markers (pkg_resource does that via runtime magic
in pkg_resources.external)
Closes | @@ -68,10 +68,14 @@ class Environment:
def parse_requirements(strs: Union[str, Iterable[str]]) -> Generator[Requirement, None, None]: ...
class Requirement:
+ unsafe_name: str
project_name: str
key: str
extras: Tuple[str, ...]
specs: List[Tuple[str, str]]
+ # TODO: change this to Optional[packaging.markers.Marker] once we can import
+ # packaging.markers
+ marker: Optional[Any]
@staticmethod
def parse(s: Union[str, Iterable[str]]) -> Requirement: ...
def __contains__(self, item: Union[Distribution, str, Tuple[str, ...]]) -> bool: ...
|
see also : groupby in resample doc and vice-versa
groupby is like a resample on non-contiguous data | @@ -8931,6 +8931,8 @@ class Dataset(
DataArray.groupby
core.groupby.DatasetGroupBy
pandas.DataFrame.groupby
+ Dataset.resample
+ DataArray.resample
"""
from xarray.core.groupby import DatasetGroupBy
@@ -9210,6 +9212,8 @@ class Dataset(
DataArray.resample
pandas.Series.resample
pandas.DataFrame.resample
+ Dataset.groupby
+ DataArray.groupby
References
----------
|
Poetry: update Windows install instructions, as noted in their github repo
Check
out. The old instructions failed big time for me on a second Windows
box I just tried to install it to. This new one works with the last
release just fine. | @@ -82,9 +82,9 @@ poetry install
Enter the PowerShell command prompt and execute,
```powershell
-(Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py -UseBasicParsing).Content | python -
-# the path can be added to system, so it applies to every terminal.
-$env:PATH += ";$env:USERPROFILE\.poetry\bin"
+(Invoke-WebRequest -Uri https://raw.githubusercontent.com/python-poetry/poetry/master/install-poetry.py -UseBasicParsing).Content | python -
+# Add to PATH Poetry's binary location (either this or via Window's global env. vars. menu):
+$env:PATH += ";$env:APPDATA\Python\Scripts"
poetry install
```
|
[air] Do not use gzip for checkpoint dict conversion
Gzipping binary data is inefficient and slows down data transfer significantly. | @@ -479,7 +479,7 @@ def _temporary_checkpoint_dir() -> str:
def _pack(path: str) -> bytes:
"""Pack directory in ``path`` into an archive, return as bytes string."""
stream = io.BytesIO()
- with tarfile.open(fileobj=stream, mode="w:gz", format=tarfile.PAX_FORMAT) as tar:
+ with tarfile.open(fileobj=stream, mode="w", format=tarfile.PAX_FORMAT) as tar:
tar.add(path, arcname="")
return stream.getvalue()
|
Update install.rst
Added command for installation all the prerequisites if user is on Ubuntu 17.04. | @@ -21,6 +21,12 @@ all the prerequisites using the following command:
sudo apt install git python-pip nodejs-legacy npm postgresql postgresql-server-dev-9.5 postgresql-contrib-9.5 libxml2-dev libxslt1-dev python-dev libmemcached-dev virtualenv
+If you're on Ubuntu 17.04, you can install all the prerequisites using the following command:
+
+ .. code-block:: bash
+
+ sudo apt install git python-pip nodejs-legacy npm postgresql postgresql-server-dev-9.6 postgresql-contrib-9.6 libxml2-dev libxslt1-dev python-dev libmemcached-dev virtualenv
+
.. _Git: https://git-scm.com/
.. _Python 2.7: https://www.python.org/
.. _pip: https://pip.pypa.io/en/stable/
|
Removed new privilege check
It isn't strictly necessary for this PR, and it breaks a bunch of tests
that would need to have domain_has_privilege patched. | @@ -1706,9 +1706,6 @@ class CommCareUser(CouchUser, SingleMembershipMixin, CommCareMobileContactMixin)
return self.user_data.pop(key, default)
def get_user_data_profile(self, profile_id):
- if not domain_has_privilege(self.domain, privileges.APP_USER_PROFILES):
- return None
-
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
from corehq.apps.custom_data_fields.models import CustomDataFieldsProfile
if not profile_id:
|
Update 5-minutes-to-zfit article.
Correct some typos and phrasings.
Update some code snippets that either don't work anymore or are no longer best practice.
Add a paragraph explaining that Parameters are mutable. | The zfit library provides a simple model fitting and sampling framework for a broad list of applications. This section is designed to give an overview of the main concepts and features in the context of likelihood fits in a *crash course* manner. The simplest example is to generate, fit and plot a Gaussian distribution.
-The first step is to naturally import ``zfit`` and verify if the installation has been done successfully:
+The first step is to import ``zfit`` and verify if the installation has been done successfully:
.. code-block:: pycon
@@ -37,7 +37,7 @@ With these parameters we can instantiate the Gaussian PDF from the library
It is recommended to pass the arguments of the PDF as keyword arguments.
-The next stage is to create a dataset to be fitted. There are several ways of producing this within the zfit framework (see the :ref:`Data <data-section>` section). In this case, for simplicity we simply produce it using numpy and the :func:`Data.from_numpy <zfit.Data.from_numpy>` method:
+The next stage is to create a dataset to be fitted. There are several ways of producing this within the zfit framework (see the :ref:`Data <data-section>` section). For simplicity we simply produce it using numpy and the :func:`Data.from_numpy <zfit.Data.from_numpy>` method:
.. code-block:: pycon
@@ -55,7 +55,7 @@ Now we have all the ingredients in order to perform a maximum likelihood fit. Co
>>> # Stage 1: create an unbinned likelihood with the given PDF and dataset
>>> nll = zfit.loss.UnbinnedNLL(model=gauss, data=data)
- >>> # Stage 2: instantiate a minimiser (in this case a basic minuit
+ >>> # Stage 2: instantiate a minimiser (in this case a basic minuit)
>>> minimizer = zfit.minimize.Minuit()
>>> # Stage 3: minimise the given negative likelihood
@@ -125,8 +125,8 @@ As already mentioned, there is no dedicated plotting feature within zfit. Howeve
>>> # Some simple matplotlib configurations
>>> import matplotlib.pyplot as plt
- >>> lower, upper = obs.limits
- >>> data_np = zfit.run(data)
+ >>> lower, upper = obs.rect_limits
+ >>> data_np = zfit.run(data.value())
>>> counts, bin_edges = np.histogram(data_np, 80, range=(lower[-1][0], upper[0][0]))
>>> bin_centres = (bin_edges[:-1] + bin_edges[1:])/2.
>>> err = np.sqrt(counts)
@@ -135,7 +135,7 @@ As already mentioned, there is no dedicated plotting feature within zfit. Howeve
>>> x_plot = np.linspace(lower[-1][0], upper[0][0], num=1000)
>>> y_plot = zfit.run(gauss.pdf(x_plot, norm_range=obs))
- >>> plt.plot(x_plot, y_plot*data_np.shape[0]/80*obs.area(), color='xkcd:blue')
+ >>> plt.plot(x_plot, y_plot*data_np.shape[0]/80*obs.rect_area(), color='xkcd:blue')
>>> plt.show()
.. image:: ../images/Gaussian.png
@@ -144,6 +144,12 @@ The plotting example above presents a distinctive feature that had not been show
While actions like ``minimize`` or ``sample`` return Python objects (including numpy arrays or scalars), functions like ``pdf`` or ``integrate`` return TensorFlow graphs, which are lazy-evaluated.
To obtain the value of these PDFs, we need to execute the graph by using ``zfit.run``.
+Also note that we were able to plot the fitting result with the model gaussian without extracting the loss
+minimizing parameters from ``result``. This is possible because parameters are mutable. This means that the
+minimizer can directly manipulate the value of the floating parameter. So when you call the ``minimizer.minimize()``
+method the value of ``mu`` changes during the optimisation. ``gauss.pdf()`` then uses this new value to calculate the
+pdf.
+
What did just happen?
---------------------
|
Update tox.ini
Ignore numpy security issue, as it is not used in packaged application as is needed for tests only | @@ -54,11 +54,10 @@ commands=isort -c --diff --recursive hug
[testenv:py37-safety]
deps=
-rrequirements/build_style_tools.txt
- numpy==1.16.4
marshmallow==3.0.0rc6
whitelist_externals=flake8
-commands=safety check
+commands=safety check -i 36810
[testenv:pywin]
deps =-rrequirements/build_windows.txt
|
Make it possible to ignore specific resolved expression variables
This will make it possible during resolved expression construction to
introduce dummy variables that will not interfere with the "unused
bindings" warning.
TN: | @@ -1099,6 +1099,7 @@ class AbstractVariable(AbstractExpression):
self.static_type = assert_type(type, CompiledType)
self.name = name
self.abstract_var = abstract_var
+ self._ignored = False
super(AbstractVariable.Expr, self).__init__(
skippable_refcount=True, abstract_expr=abstract_expr
@@ -1125,7 +1126,15 @@ class AbstractVariable(AbstractExpression):
If this comes from the language specification, return whether it is
supposed to be ignored. Return False otherwise.
"""
- return self.abstract_var.ignored if self.abstract_var else False
+ return self._ignored or (self.abstract_var.ignored
+ if self.abstract_var else False)
+
+ def set_ignored(self):
+ """
+ Ignore this resolved variable in the context of the unused bindings
+ warning machinery.
+ """
+ self._ignored = True
def __repr__(self):
src_name = self.source_name
|
Some additional clarity to the proposal after lengthy discussions about scheduling.
+ Make port an external linkage via extern call | @@ -126,7 +126,7 @@ a global scope to all identifiers in order to declare values shared across all `
// Defined within `cal`, so it may not leak back out to the enclosing blocks scope
float new_freq = 5.2e9;
// declare global port
- port d0 = getport("drive", $0);
+ extern port d0;
// reference `freq` variable from enclosing blocks scope
frame d0f = newframe(d0, freq, 0.0);
@@ -199,8 +199,8 @@ existing ``include`` mechanism.
extern drag(complex[size] amp, duration l, duration sigma, float[size] beta) -> waveform;
extern gaussian_square(complex[size] amp, duration l, duration square_width, duration sigma) -> waveform;
- port q0 = getport("q", $0);
- port q1 = getport("q", $1);
+ extern port q0;
+ extern port q1;
frame q0_frame = newframe(q0, q0_freq, 0);
frame q1_frame = newframe(q1, q1_freq, 0);
|
pager: catch startup failures on Windows
If the user's pager settings are broken, display an error message
rather than crash to avoid confusing them.
Tested-by: Mike Frysinger | @@ -56,8 +56,11 @@ def _PipePager(pager):
global pager_process, old_stdout, old_stderr
assert pager_process is None, "Only one active pager process at a time"
# Create pager process, piping stdout/err into its stdin
+ try:
pager_process = subprocess.Popen([pager], stdin=subprocess.PIPE, stdout=sys.stdout,
stderr=sys.stderr)
+ except FileNotFoundError:
+ sys.exit(f'fatal: cannot start pager "{pager}"')
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = pager_process.stdin
|
Update noaa-ufs-shortrangeweather.yaml
Added SNS topic | @@ -33,3 +33,7 @@ Resources:
Type: S3 Bucket
Explore:
- '[Browse Bucket](https://noaa-ufs-srw-pds.s3.amazonaws.com/index.html)'
+ - Description: New data notifications for UFS Short-Range Weather data, only Lambda and SQS protocols allowed
+ ARN: arn:aws:sns:us-east-1:709902155096:NewUFSSRWObject
+ Region: us-east-1
+ Type: SNS Topic
|
Clarify message when `remote_dir` exists
If `remote_dir` exists, SshChannel.push_file complains that
pushing failed. This is a bit confusing for users because pushing
did not actually fail-- it was just not necessary to create
`remote_dir`. | @@ -133,14 +133,14 @@ class SshChannel ():
try:
self.sftp_client.mkdir(remote_dir)
except IOError as e:
+ if e.errno is None:
+ logger.info("Copying {0} into existing directory {1}".format(local_source, remote_dir))
+ else:
logger.error("Pushing {0} to {1} failed".format(local_source, remote_dir))
if e.errno == 2:
raise BadScriptPath(e, self.hostname)
elif e.errno == 13:
raise BadPermsScriptPath(e, self.hostname)
- elif e.errno == None:
- # Directory already exists. Nothing to do
- pass
else :
logger.error("File push failed due to SFTP client failure")
raise FileCopyException(e, self.hostname)
|
Fix playing audio streams from ffpyplayer
Everything in the title - try playing a webradio such as `http://www.tms-radio.com/radio.ogg` and it will fail because duration turns from `None` to `0.0`, which doesn't trigger this condition. | @@ -126,7 +126,7 @@ class SoundFFPy(Sound):
# wait until loaded or failed, shouldn't take long, but just to make
# sure metadata is available.
s = time.perf_counter()
- while ((not player.get_metadata()['duration']) and
+ while (player.get_metadata()['duration'] is None and
not self.quitted and time.perf_counter() - s < 10.):
time.sleep(0.005)
|
Moves 'algorithms' test package from "default" to "drivers" TravisCI case.
Since the "default" case is timing out (now that we can't use
multiprocessing due to the "Darwin" bug) on TravisCI, this attempts
to load balance the TravisCI cases by moving the 'algorithms'
tests out of the "default" and into "drivers" case. | @@ -34,10 +34,10 @@ elif doReportB == 'True':
# Removed: 'testFigureFormatter.py',
elif doDrivers == 'True':
- tests = ['drivers']
+ tests = ['drivers', 'algorithms']
elif doDefault == 'True':
- tests = ['objects', 'tools', 'iotest', 'optimize', 'algorithms', 'construction','extras']
+ tests = ['objects', 'tools', 'iotest', 'optimize', 'construction','extras']
parallel = False #multiprocessing bug in darwin (and apparently TravisCI) causes segfault if used.
elif doMPI == 'True':
|
[IMPR] some improvements for data_ingestion.py
use try..else instead of continue inside exception
use contextlib.closing to close a file | @@ -285,13 +285,11 @@ def main(*args):
filename = os.path.join(csv_dir, configuration['csvFile'])
try:
-
f = codecs.open(filename, 'r', configuration['csvEncoding'])
except (IOError, OSError) as e:
pywikibot.error('%s could not be opened: %s' % (filename, e))
- continue
-
- try:
+ else:
+ with f:
files = CSVReader(f, urlcolumn='url',
site=config_page.site,
dialect=configuration['csvDialect'],
@@ -301,10 +299,7 @@ def main(*args):
configuration['titleFormat'],
configuration['formattingTemplate'],
site=None)
-
bot.run()
- finally:
- f.close()
if __name__ == "__main__":
|
Adjust package version requirements
Allow django-ipware to have a newer version in the future
Update Python version requirement to match package specifiers | @@ -35,8 +35,8 @@ setup(
package_dir={"axes": "axes"},
use_scm_version=True,
setup_requires=["setuptools_scm"],
- python_requires="~=3.6",
- install_requires=["django>=3.2", "django-ipware>=3,<5", "setuptools"],
+ python_requires=">=3.7",
+ install_requires=["django>=3.2", "django-ipware>=3", "setuptools"],
include_package_data=True,
packages=find_packages(exclude=["tests"]),
classifiers=[
|
Update handleAdd to add node at dropped location
Now when a user drag and drop a notebook into the piepline editor
it will add it in the location it is dropped.
This includes some extra code to handle the original usage.
Fixes | @@ -317,8 +317,17 @@ class Pipeline extends React.Component<Pipeline.Props, Pipeline.State> {
this.widgetContext.model.fromJSON(this.canvasController.getPipelineFlow());
}
- handleAdd() {
+ handleAdd(x?: number, y?: number) {
let failedAdd = 0;
+ let position = 0;
+ let missingXY = !(x && y);
+
+ // if either x or y is undefined use the default coordinates
+ if (missingXY) {
+ position = this.position;
+ x = 75;
+ y = 85;
+ }
toArray(this.browserFactory.defaultBrowser.selectedItems()).map(
item => {
@@ -327,13 +336,12 @@ class Pipeline extends React.Component<Pipeline.Props, Pipeline.State> {
//add each selected notebook
console.log('Adding ==> ' + item.path );
- this.position += 20;
const nodeTemplate = this.canvasController.getPaletteNode('execute-notebook-node');
if (nodeTemplate) {
const data = {
'editType': 'createNode',
- 'offsetX': 75 + this.position,
- 'offsetY': 85 + this.position,
+ 'offsetX': x + position,
+ 'offsetY': y + position,
'nodeTemplate': this.canvasController.convertNodeTemplate(nodeTemplate)
};
@@ -343,12 +351,19 @@ class Pipeline extends React.Component<Pipeline.Props, Pipeline.State> {
data.nodeTemplate.app_data['image'] = this.propertiesInfo.parameterDef.current_parameters.image;
this.canvasController.editActionHandler(data);
+
+ position += 20;
}
} else {
failedAdd++;
}
}
- )
+ );
+
+ // update position if the default coordinates were used
+ if (missingXY) {
+ this.position = position;
+ }
if (failedAdd) {
return showDialog({
@@ -526,24 +541,20 @@ class Pipeline extends React.Component<Pipeline.Props, Pipeline.State> {
handleEvent(event: Event): void {
switch (event.type) {
case 'dragenter':
- console.log('dragenter');
event.preventDefault();
break;
case 'dragover':
- console.log('dragover');
event.preventDefault();
break;
case 'p-dragover':
- console.log('p-dragover');
event.preventDefault();
event.stopPropagation();
(event as IDragEvent).dropAction = (event as IDragEvent).proposedAction;
break;
case 'p-drop':
- console.log('p-drop');
event.preventDefault();
event.stopPropagation();
- this.handleAdd();
+ this.handleAdd((event as IDragEvent).offsetX, (event as IDragEvent).offsetY);
break;
default:
break;
|
[TR] DATA_SOURCES: show real URL
change URL in documentation to show the realtime graph (not a login page) | @@ -121,7 +121,7 @@ Real-time electricity data is obtained using [parsers](https://github.com/tmrowc
- Saudi Arabia: [GCCIA](https://www.gccia.com.sa/)
- Switzerland: [ENTSOE](https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html)
- Taiwan: [TAIPOWER](http://www.taipower.com.tw/d006/loadGraph/loadGraph/genshx_.html)
-- Turkey: [ytbs](https://ytbs.teias.gov.tr/ytbs/frm_login.jsf)
+- Turkey: [ytbs](https://ytbsbilgi.teias.gov.tr/ytbsbilgi/frm_istatistikler.jsf)
- Ukraine: [UKRENERGO](https://ua.energy/activity/dispatch-information/ues-operation/)
- United Arab Emirates: [GCCIA](https://www.gccia.com.sa/)
- United States of America
|
Remove GNU specific "tm" struct fields "tm_zone" and "tm_gmtoff" from libc/time.pxd because they get in the way of automatic struct conversions.
See | @@ -20,8 +20,9 @@ cdef extern from "<time.h>" nogil:
int tm_wday
int tm_yday
int tm_isdst
- char *tm_zone
- long tm_gmtoff
+ # GNU specific extensions
+ #char *tm_zone
+ #long tm_gmtoff
int daylight # global state
long timezone
|
[Doc] Added md5sum info for OGB-LSC dataset
* Added md5sum for the large dataset files
md5sum helps in validating the correctness of large dataset files once downloaded.
Refer: | - [Node Classification with MAG240M](https://dgl-data.s3-accelerate.amazonaws.com/dataset/OGB-LSC/mag240m_kddcup2021.zip)
- [Link Prediction with WikiKG90M](https://dgl-data.s3-accelerate.amazonaws.com/dataset/OGB-LSC/wikikg90m_kddcup2021.zip)
- [Graph Classification with PCQM4M](https://dgl-data.s3-accelerate.amazonaws.com/dataset/OGB-LSC/pcqm4m_kddcup2021.zip)
+
+
+# checksum md5sum of the files
+mag240m_kddcup2021.zip : ```bd61c9446f557fbe4430d9a7ce108b34```
+
+wikikg90m_kddcup2021.zip : ```73d4f5dde29d78669330b4db4c12fc9c```
+
+pcqm4m_kddcup2021.zip. : ```5144ebaa7c67d24da1a2acbe41f57f6a```
|
AUTO: Enable statsd exporter on staging
We want to do some load testing so we want to use the Prometheus
metrics for observing the system
Roll out the statsd exporter work to staging too | @@ -69,7 +69,7 @@ applications:
AWS_ACCESS_KEY_ID: '{{ AWS_ACCESS_KEY_ID }}'
AWS_SECRET_ACCESS_KEY: '{{ AWS_SECRET_ACCESS_KEY }}'
- {% if environment == 'preview' %}
+ {% if environment in ['preview', 'staging'] %}
STATSD_HOST: "statsd.notify.tools"
STATSD_PREFIX: ""
{% else %}
|
Remove %s in formatting
previously %s was used to display variable for string formatting which won't work. | @@ -24,7 +24,7 @@ def execute(statespace):
instruction = state.get_current_instruction()
if instruction['opcode'] == "ORIGIN":
- description = "Function %s retrieves the transaction origin (tx.origin) using the ORIGIN opcode. " \
+ description = "The function `{}` retrieves the transaction origin (tx.origin) using the ORIGIN opcode. " \
"Use msg.sender instead.\nSee also: " \
"https://solidity.readthedocs.io/en/develop/security-considerations.html#tx-origin".format(node.function_name)
|
Fix disable_redirect_output to fix
We constant disable and reenable logging/redirection. Apparently we are
disabling it in the wrong order, which in GDB 8.1 causes issues. | @@ -1160,8 +1160,8 @@ def enable_redirect_output(to_file="/dev/null"):
def disable_redirect_output():
"""Disable the output redirection, if any."""
- gdb.execute("set logging redirect off")
gdb.execute("set logging off")
+ gdb.execute("set logging redirect off")
return
|
Update version 0.8.0 -> 0.8.1
New Features
* `BatchReverseComposite` and `ReverseAdvanceComposite`
* Failover support in `DWaveSampler`
* Embedding composites now return QPU `problem_id` in sampleset's info
field and additional embedding context/parameters
* Warnings generation (during embedding/unembedding) | # =============================================================================
__all__ = ['__version__', '__author__', '__authoremail__', '__description__']
-__version__ = '0.8.0'
+__version__ = '0.8.1'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = '[email protected]'
__description__ = 'All things D-Wave System.'
|
Minor fix getting novel info
Minor fix getting novel info | @@ -8,6 +8,7 @@ from ..utils.crawler import Crawler
logger = logging.getLogger(__name__)
chapter_info_url = 'https://www.wattpad.com/v4/parts/%s?fields=id,title,pages,text_url&_=%d'
+story_info_url = 'https://www.wattpad.com/api/v3/stories/%s'
class WattpadCrawler(Crawler):
base_url = [
@@ -21,21 +22,24 @@ class WattpadCrawler(Crawler):
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
- logger.debug('Visiting %s', self.novel_url)
- soup = self.get_soup(self.novel_url)
+ search_id = re.compile(r'\d{9,}')
+ id_no = search_id.search(self.novel_url)
+ story_url = story_info_url % (id_no.group())
- self.novel_title = soup.select_one('.story-info__title').get_text().strip()
+ logger.debug('Visiting %s', story_url)
+ story_info = self.get_json(story_url)
+
+ self.novel_title = story_info['title']
logger.info('Novel title: %s', self.novel_title)
self.novel_cover = self.absolute_url(
- soup.select_one('.story-cover img')['src'])
+ story_info['cover'])
logger.info('Novel cover: %s', self.novel_cover)
- self.novel_author = soup.select_one(
- '.author-info__username').get_text()
+ self.novel_author = story_info['user']['name']
logger.info('Novel author: %s', self.novel_author)
- chapters = soup.select_one('.story-parts').select('ul li a')
+ chapters = story_info['parts']
vols = set([])
for a in chapters:
@@ -45,8 +49,8 @@ class WattpadCrawler(Crawler):
self.chapters.append({
'id': chap_id,
'volume': vol_id,
- 'url': self.absolute_url(a['href']),
- 'title': a.text.strip() or ('Chapter %d' % chap_id),
+ 'url': self.absolute_url(a['url']),
+ 'title': a['title'] or ('Chapter %d' % chap_id),
})
# end for
self.volumes = [{'id': i} for i in vols]
|
remove NameMC field
NameMC now seems to be using some sort of obfuscation | @@ -36,7 +36,7 @@ class MinecraftData:
await self.bot.say(chat.error("This player not found"))
return
em = discord.Embed(timestamp=ctx.message.timestamp)
- em.add_field(name="NameMC profile", value="[{}](https://namemc.com/profile/{})".format(nickname, uuid))
+ # em.add_field(name="NameMC profile", value="[{}](https://namemc.com/profile/{})".format(nickname, uuid))
em.set_author(name=nickname,
icon_url="https://crafatar.com/renders/head/{}{}".format(uuid, "?overlay" if helm_layer else ""),
url="https://crafatar.com/skins/{}".format(uuid))
|
Update telnetconsole.rst
Change spelling of bellow to below. | @@ -45,7 +45,7 @@ the console you need to type::
>>>
By default Username is ``scrapy`` and Password is autogenerated. The
-autogenerated Password can be seen on scrapy logs like the example bellow::
+autogenerated Password can be seen on scrapy logs like the example below::
2018-10-16 14:35:21 [scrapy.extensions.telnet] INFO: Telnet Password: 16f92501e8a59326
|
Orphan: don't create a new lexical env. if input is already an orphan
TN: | @@ -879,13 +879,16 @@ package body Langkit_Support.Lexical_Env is
function Orphan (Self : Lexical_Env) return Lexical_Env is
begin
+ -- If Self is already an orphan, don't create yet another lexical env
+ -- wrapper: just return Self itself.
Inc_Ref (Self);
- return Wrap
- (new Lexical_Env_Type'
+ return (if Self.Kind = Orphaned
+ then Self
+ else Wrap (new Lexical_Env_Type'
(Kind => Orphaned,
Ref_Count => <>,
Orphaned_Env => Self),
- Owner => Self.Owner);
+ Owner => Self.Owner));
end Orphan;
-----------
|
doc: readme: remove travis bagde
CI is now implemented in GitHub actions | .. image:: https://img.shields.io/pypi/l/lona.svg
:alt: pypi.org
:target: https://pypi.org/project/lona
-.. image:: https://img.shields.io/travis/com/lona-web-org/lona/master.svg
- :alt: Travis branch
- :target: https://travis-ci.com/lona-web-org/lona
.. image:: https://img.shields.io/pypi/pyversions/lona.svg
:alt: pypi.org
:target: https://pypi.org/project/lona
|
run tests with node 8
[nodeploy] | @@ -16,7 +16,7 @@ env:
- JOB=JSUNIT
- JOB=LINT
before_install:
-- nvm install 6.0.0
+- nvm install 8.0.0
install:
- travis_retry ./ops/travis/travis-install.sh $JOB
- export PYTHONPATH=${PYTHONPATH}:${HOME}/google_appengine
|
arp_table: Undo strange broadcast behavior
I'm not actually sure what the old behavior was getting at; probably
some hack for DHCP. I think the current behavior is much more
reasonable in general. | @@ -111,10 +111,10 @@ class ARPTable (object):
return
if ipp.dstip == pkt.IPV4.IP_BROADCAST:
# Would be nice to do this for any multicast...
- ipp.dstip = router_ip
- #eth_packet.dst = pkt.ETHERNET.ETHER_BROADCAST
- #send_function(eth_packet.pack())
- #return
+ #ipp.dstip = router_ip # Not sure what this was about
+ eth_packet.dst = pkt.ETHERNET.ETHER_BROADCAST
+ send_function(eth_packet.pack())
+ return
if src_ip is None: src_ip = ipp.srcip
if src_eth is None: src_eth = eth_packet.src
|
adopters: add China Telecom BestPay
Via: | @@ -22,6 +22,7 @@ This is a list of TiDB adopters in various industries.
- [Yuanfudao (EdTech)](https://www.crunchbase.com/organization/yuanfudao)
- [ZuoZhu Financial (FinTech)](http://www.zuozh.com/)
- [360 Financial (FinTech)](https://jinrong.360jie.com.cn/)
+- [China Telecom BestPay (FinTech)](https://www.bestpay.com.cn/global/oig/index.html)
- [GAEA (Gaming)](http://gaea.com/en)
- [YOOZOO GAMES (Gaming)](http://www.yoozoo.com/en)
- [FUNYOURS JAPAN (Gaming)](http://company.funyours.co.jp/)
|
Increase hard_timeout and io_timeout to seven days.
We have stress tests that takes up to seven days. | @@ -96,8 +96,8 @@ TEMPLATE_SKIP = TemplateApplyEnum('TEMPLATE_SKIP')
# Maximum allowed timeout for I/O and hard timeouts.
#
-# Three days in seconds. Includes an additional 10s to account for small jitter.
-MAX_TIMEOUT_SECS = 3 * 24 * 60 * 60 + 10
+# Seven days in seconds. Includes an additional 10s to account for small jitter.
+MAX_TIMEOUT_SECS = 7 * 24 * 60 * 60 + 10
# Maximum allowed expiration for a pending task.
#
@@ -353,7 +353,7 @@ def _validate_hard_timeout(prop, value):
# 0 is tolerated for termination task, but we don't advertize that, that's
# an internal detail.
raise datastore_errors.BadValueError(
- '%s (%ds) must be between %ds and three days' %
+ '%s (%ds) must be between %ds and seven days' %
(prop._name, value, _MIN_TIMEOUT_SECS))
@@ -362,7 +362,7 @@ def _validate_io_timeout(prop, value):
# pylint: disable=protected-access
if value and not (_MIN_TIMEOUT_SECS <= value <= MAX_TIMEOUT_SECS):
raise datastore_errors.BadValueError(
- '%s (%ds) must be 0 or between %ds and three days' %
+ '%s (%ds) must be 0 or between %ds and seven days' %
(prop._name, value, _MIN_TIMEOUT_SECS))
|
Fix typo in preprocessing.md
Remove a 'The' | @@ -16,8 +16,6 @@ preprocessing stack and easy ways to implement your own preprocessors.
Usage
-----
-The
-
Each preprocessor implements three methods:
1. The constructor (`__init__`) for parameter initialization
|
update co2_cost_escalation_pct
Now assuming inflation = default O&M cost escalation rate (2.5%) to convert from real to nominal co2 cost escalation | @@ -468,7 +468,7 @@ nested_input_definitions = {
"type": "float",
"min": -1.0,
"max": 1.0,
- "default": 0.040173,
+ "default": 0.042173,
"description": "Annual nominal Social Cost of CO2 escalation rate (as a decimal)."
},
"nox_cost_escalation_pct": {
|
Do not refer to transit server in Dockerfile
removed the transit server,
breaking the Dockerfile.
This fixes the Dockerfile to run just the rendezvous server. | @@ -62,9 +62,8 @@ ENV WORMHOLE_USER_NAME="wormhole"
RUN adduser --uid 1000 --disabled-password --gecos "" "${WORMHOLE_USER_NAME}"
# Facilitate network connections to the application. The rendezvous server
-# listens on 4000 by default. The transit relay server on 4001.
+# listens on 4000 by default.
EXPOSE 4000
-EXPOSE 4001
# Put the source somewhere pip will be able to see it.
ADD . /magic-wormhole
@@ -87,4 +86,4 @@ ENTRYPOINT ["/app/env/bin/wormhole-server", "start", "--no-daemon"]
# By default, start up a pretty reasonable server. This can easily be
# overridden by another command which will get added to the entrypoint.
-CMD ["--rendezvous", "tcp:4000", "--transit", "tcp:4001"]
+CMD ["--rendezvous", "tcp:4000"]
|
Update phishing.txt
Moving to ```android_roamingmantis.txt``` with trail optimization\generalization. | @@ -11557,14 +11557,11 @@ downloadontheapple.live
# Reference: https://twitter.com/yumy222/status/1147753081762750464
# Reference: https://www.virustotal.com/gui/ip-address/107.179.40.165/relations
-# Reference: https://twitter.com/NaomiSuzuki_/status/1147844385448456197
epos-k.com
epos-l.com
epos-s.com
epos-z.com
-mydocomo-ic.com
-mydocomo-iy.com
# Reference: https://twitter.com/PhishStats/status/1147840004959457280
|
Add a default description for TheHive alerts if one isn't provided
Use the default alert body created by ElastAlert, in line with the other
alerters | @@ -1992,11 +1992,12 @@ class HiveAlerter(Alerter):
alert_config = {
'artifacts': artifacts,
- 'sourceRef': str(uuid.uuid4())[0:6],
- 'customFields': {},
'caseTemplate': None,
+ 'customFields': {},
+ 'date': int(time.time()) * 1000,
+ 'description': self.create_alert_body(matches),
+ 'sourceRef': str(uuid.uuid4())[0:6],
'title': '{rule[index]}_{rule[name]}'.format(**context),
- 'date': int(time.time()) * 1000
}
alert_config.update(self.rule.get('hive_alert_config', {}))
custom_fields = {}
|
Update table_info.sql
Remove unnecessary nested case. | @@ -48,9 +48,9 @@ SELECT TRIM(pgn.nspname) AS SCHEMA,
0,0,
((b.mbytes/part.total::DECIMAL)*100)::DECIMAL(20,2)
) AS pct_of_total,
- (CASE WHEN a.rows = 0 THEN NULL ELSE
- CASE WHEN pgc.reldiststyle = 8 THEN ((a.rows_all_dist - pgc.reltuples)::DECIMAL(20,3) / a.rows_all_dist::DECIMAL(20,3)*100)::DECIMAL(20,2)
- ELSE ((a.rows - pgc.reltuples)::DECIMAL(20,3) / a.rows::DECIMAL(20,3)*100)::DECIMAL(20,2) END END
+ (CASE WHEN a.rows = 0 THEN NULL
+ WHEN pgc.reldiststyle = 8 THEN ((a.rows_all_dist - pgc.reltuples)::DECIMAL(20,3) / a.rows_all_dist::DECIMAL(20,3)*100)::DECIMAL(20,2)
+ ELSE ((a.rows - pgc.reltuples)::DECIMAL(20,3) / a.rows::DECIMAL(20,3)*100)::DECIMAL(20,2) END
) AS pct_stats_off,
CASE WHEN pgc.reldiststyle = 8
THEN decode( det.n_sortkeys,0, NULL,DECODE( a.rows_all_dist,0,0, (a.unsorted_rows_all_dist::DECIMAL(32)/a.rows_all_dist)*100))::DECIMAL(20,2)
|
Get notebook auth token from the JUPYTERHUB_API_TOKEN environment
variable if it is not present in the server info | @@ -543,10 +543,11 @@ class ScriptInfo(object):
data={'_xsrf': cookies['_xsrf'], 'password': password})
cookies.update(r.cookies)
+ auth_token = server_info.get('token') or os.getenv('JUPYTERHUB_API_TOKEN') or ''
try:
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
- headers={'Authorization': 'token {}'.format(server_info.get('token', '')), })
+ headers={'Authorization': 'token {}'.format(auth_token), })
except requests.exceptions.SSLError:
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
@@ -555,7 +556,7 @@ class ScriptInfo(object):
# fire request
r = requests.get(
url=server_info['url'] + 'api/sessions', cookies=cookies,
- headers={'Authorization': 'token {}'.format(server_info.get('token', '')), }, verify=False)
+ headers={'Authorization': 'token {}'.format(auth_token), }, verify=False)
# enable SSL check warning
import warnings
warnings.simplefilter('default', InsecureRequestWarning)
|
GDB helpers: look for 'character' instead of 'char' type
The former is more likely to refer to what GDB expects for strings.
TN: | @@ -85,7 +85,7 @@ class Token(object):
text_addr = (src_buffer['P_ARRAY'].cast(uint32_t) +
(first - src_buffer['P_BOUNDS']['LB0']))
- char = gdb.lookup_type('char').pointer()
+ char = gdb.lookup_type('character').pointer()
return (text_addr.cast(char)
.string('latin-1', length=4 * length)
.decode('utf32'))
|
Grammer_fixes
Implemented grammatical changes. | @@ -346,8 +346,8 @@ You can expand any of the messages that matches the filter to see the full stack
## SRE Recipes
-SRE Recipes is our [Chaos Engineering](https://en.wikipedia.org/wiki/Chaos_engineering) tool to test your sandbox environment. It helps users to familiarize with finding the root cause of a breakage using Cloud Ops monitoring.
-Each 'recipe' simulate a different scenario, there are several recipes that you can run and you can also [contribute your own.](https://github.com/GoogleCloudPlatform/cloud-ops-sandbox/tree/master/sre-recipes#contributing)
+SRE Recipes is our [Chaos Engineering](https://en.wikipedia.org/wiki/Chaos_engineering) tool to test your sandbox environment. It helps users to familiarize themselves with finding the root cause of a breakage using Cloud Operations suite of tools.
+Each 'recipe' simulates a different scenario of real life problems that can occur to the production system. There are several recipes that you can run and you can also [contribute your own.](https://github.com/GoogleCloudPlatform/cloud-ops-sandbox/tree/master/sre-recipes#contributing)
```
$ sandboxctl sre-recipes
@@ -355,7 +355,7 @@ $ sandboxctl sre-recipes
### Running an example SRE Recipe
-> **Note:** Recipe's names are not explicit by design as we don't want allude to the problem.
+> **Note:** Recipe's names are not explicit by design as we don't want to allude to the problem.
1. Run the recipe to manufacture errors in the demo cluster
@@ -370,13 +370,13 @@ $ sandboxctl sre-recipes break recipe0
$ sandboxctl sre-recipes hint recipe0
```
-3. Verify you hypothesis using command line tool
+3. Verify your hypothesis on what could be wrong with the demo app by using command line tool
```
$ sandboxctl sre-recipes verify recipe0
```
-4. After you discovered the problem, you can restore the cluster to it's original state.
+4. After you discover the problem, you can restore the cluster to its original state.
```
$ sandboxctl sre-recipes restore recipe0
|
FIX: be forgiving of higher-numbered mouse buttons coming in
For example the "back" button which reports as mouse 8. | @@ -1125,9 +1125,11 @@ class BackendMatplotlibQt(FigureCanvasQTAgg, BackendMatplotlib):
_MPL_TO_PLOT_BUTTONS = {1: 'left', 2: 'middle', 3: 'right'}
def _onMousePress(self, event):
+ button = self._MPL_TO_PLOT_BUTTONS.get(event.button, None)
+ if button is not None:
self._plot.onMousePress(
event.x, self._mplQtYAxisCoordConversion(event.y),
- self._MPL_TO_PLOT_BUTTONS[event.button])
+ button)
def _onMouseMove(self, event):
if self._graphCursor:
@@ -1148,9 +1150,11 @@ class BackendMatplotlibQt(FigureCanvasQTAgg, BackendMatplotlib):
event.x, self._mplQtYAxisCoordConversion(event.y))
def _onMouseRelease(self, event):
+ button = self._MPL_TO_PLOT_BUTTONS.get(event.button, None)
+ if button is not None:
self._plot.onMouseRelease(
event.x, self._mplQtYAxisCoordConversion(event.y),
- self._MPL_TO_PLOT_BUTTONS[event.button])
+ button)
def _onMouseWheel(self, event):
self._plot.onMouseWheel(
|
Fix test of big message
Since RabbitMQ 3.8.0 max size of message is 128MB. | @@ -1071,7 +1071,7 @@ class TestCase(BaseTestCase):
await queue.bind(exchange, routing_key)
- body = bytes(shortuuid.uuid(), 'utf-8') * 9999999
+ body = bytes(shortuuid.uuid(), 'utf-8') * 6000000
await exchange.publish(
Message(
|
Switch to SafeLoader when loading yaml files
We don't need the functionality of a FullLoader. | @@ -1125,7 +1125,7 @@ class YAMLTemplateSerializer(TemplateSerializer):
try:
return yaml.load(
file_contents,
- Loader=yaml.FullLoader,
+ Loader=yaml.SafeLoader,
)
except ScannerError:
raise RuntimeError(
|
tests/mechanisms/LCControlMechanism: Only run benchmark if enabled
Cleanup | @@ -86,7 +86,9 @@ class TestLCControlMechanism:
default_variable = 10.0
)
if mode == 'Python':
- EX = LC.execute
+ def EX(variable):
+ LC.execute(variable)
+ return LC.output_values
elif mode == 'LLVM':
e = pnlvm.execution.MechExecution(LC)
EX = e.execute
@@ -95,20 +97,15 @@ class TestLCControlMechanism:
EX = e.cuda_execute
val = EX([10.0])
-
- # LLVM returns combination of all output ports so let's do that for
- # Python as well
- if mode == 'Python':
- val = [s.value for s in LC.output_ports]
-
- benchmark(EX, [10.0])
-
# All values are the same because LCControlMechanism assigns all of its ControlSignals to the same value
# (the 1st item of its function's value).
# FIX: 6/6/19 - Python returns 3d array but LLVM returns 2d array
# (np.allclose bizarrely passes for LLVM because all the values are the same)
assert np.allclose(val, [[[3.00139776]], [[3.00139776]], [[3.00139776]], [[3.00139776]]])
+ if benchmark.enabled:
+ benchmark(EX, [10.0])
+
def test_lc_control_modulated_mechanisms_all(self):
T_1 = pnl.TransferMechanism(name='T_1')
|
Refactor pyDAPAccess transfer response checking.
Factor out transfer response checking into a new method.
Add a description to the exception for unexpected ACK values. | @@ -312,6 +312,36 @@ class _Command(object):
write_pos += 1
return buf
+ def _check_response(self, response):
+ """! @brief Check the response status byte from CMSIS-DAP transfer commands.
+
+ The ACK bits [2:0] and the protocol error bit are checked. If any error is indicated,
+ the appropriate exception is raised. An exception is also raised for unrecognised ACK
+ values.
+
+ @param self
+ @param response The "Transfer Response" byte from a DAP_Transfer or DAP_TransferBlock
+ command.
+
+ @exception DAPAccessIntf.TransferFaultError Raised for the ACK_FAULT response.
+ @exception DAPAccessIntf.TransferTimeoutError Raised for ACK_WAIT response.
+ @exception DAPAccessIntf.TransferError Raised for other, less common errors, including No
+ ACK, SWD protocol error, and an unknown ACK value. A descriptive error message is used
+ to indicate which of these errors was the cause.
+ """
+ ack = response & DAPTransferResponse.ACK_MASK
+ if ack != DAPTransferResponse.ACK_OK:
+ if ack == DAPTransferResponse.ACK_FAULT:
+ raise DAPAccessIntf.TransferFaultError()
+ elif ack == DAPTransferResponse.ACK_WAIT:
+ raise DAPAccessIntf.TransferTimeoutError()
+ elif ack == DAPTransferResponse.ACK_NO_ACK:
+ raise DAPAccessIntf.TransferError("No ACK received")
+ else:
+ raise DAPAccessIntf.TransferError("Unexpected ACK value (%d) returned by probe" % ack)
+ elif (response & DAPTransferResponse.PROTOCOL_ERROR_MASK) != 0:
+ raise DAPAccessIntf.TransferError("SWD protocol error")
+
def _decode_transfer_data(self, data):
"""! @brief Take a byte array and extract the data from it
@@ -322,17 +352,8 @@ class _Command(object):
if data[0] != Command.DAP_TRANSFER:
raise ValueError('DAP_TRANSFER response error')
- ack = data[2] & DAPTransferResponse.ACK_MASK
- if ack != DAPTransferResponse.ACK_OK:
- if ack == DAPTransferResponse.ACK_FAULT:
- raise DAPAccessIntf.TransferFaultError()
- elif ack == DAPTransferResponse.ACK_WAIT:
- raise DAPAccessIntf.TransferTimeoutError()
- elif ack == DAPTransferResponse.ACK_NO_ACK:
- raise DAPAccessIntf.TransferError("No ACK received")
- raise DAPAccessIntf.TransferError()
- elif (data[2] & DAPTransferResponse.PROTOCOL_ERROR_MASK) != 0:
- raise DAPAccessIntf.TransferError("SWD protocol error")
+ # Check response and raise an exception on errors.
+ self._check_response(data[2])
# Check for count mismatch after checking for DAP_TRANSFER_FAULT
# This allows TransferFaultError or TransferTimeoutError to get
@@ -391,17 +412,8 @@ class _Command(object):
if data[0] != Command.DAP_TRANSFER_BLOCK:
raise ValueError('DAP_TRANSFER_BLOCK response error')
- ack = data[3] & DAPTransferResponse.ACK_MASK
- if ack != DAPTransferResponse.ACK_OK:
- if ack == DAPTransferResponse.ACK_FAULT:
- raise DAPAccessIntf.TransferFaultError()
- elif ack == DAPTransferResponse.ACK_WAIT:
- raise DAPAccessIntf.TransferTimeoutError()
- elif ack == DAPTransferResponse.ACK_NO_ACK:
- raise DAPAccessIntf.TransferError("No ACK received")
- raise DAPAccessIntf.TransferError()
- elif (data[3] & DAPTransferResponse.PROTOCOL_ERROR_MASK) != 0:
- raise DAPAccessIntf.TransferError("SWD protocol error")
+ # Check response and raise an exception on errors.
+ self._check_response(data[3])
# Check for count mismatch after checking for DAP_TRANSFER_FAULT
# This allows TransferFaultError or TransferTimeoutError to get
|
Update ee-install.rst
Found another file with ubuntu 14.04 | @@ -11,13 +11,12 @@ Installing Enterprise Edition
To install Mattermost Enterprise Edition directly please use one of the following guides:
-1. `Production Enterprise Edition on Ubuntu 14.04 <https://docs.mattermost.com/install/install-ubuntu-1404.html>`__
-2. `Production Enterprise Edition on Ubuntu 16.04 <https://docs.mattermost.com/install/install-ubuntu-1604.html>`__
-3. `Production Enterprise Edition on Ubuntu 18.04 <https://docs.mattermost.com/install/install-ubuntu-1804.html>`__
-4. `Production Enterprise Edition on RHEL 7.1 <https://docs.mattermost.com/install/install-rhel-71.html>`__
-5. `Production Enterprise Edition on RHEL 6.6 <https://docs.mattermost.com/install/install-rhel-66.html>`__
-6. `Production Enterprise Edition on Debian Jessie <https://docs.mattermost.com/install/install-debian-88.html>`__
-7. `Production Docker Deployment using Docker Compose <https://docs.mattermost.com/install/prod-docker.html>`__
+1. `Production Enterprise Edition on Ubuntu 16.04 <https://docs.mattermost.com/install/install-ubuntu-1604.html>`__
+2. `Production Enterprise Edition on Ubuntu 18.04 <https://docs.mattermost.com/install/install-ubuntu-1804.html>`__
+3. `Production Enterprise Edition on RHEL 7.1 <https://docs.mattermost.com/install/install-rhel-71.html>`__
+4. `Production Enterprise Edition on RHEL 6.6 <https://docs.mattermost.com/install/install-rhel-66.html>`__
+5. `Production Enterprise Edition on Debian Jessie <https://docs.mattermost.com/install/install-debian-88.html>`__
+6. `Production Docker Deployment using Docker Compose <https://docs.mattermost.com/install/prod-docker.html>`__
Upgrading to Mattermost Enterprise Edition
-------------------------------------------------
|
Add exclusion for owncloud 10.6.0
to PUT (upload) file, owncloud 10.6.0 use `/remote.php/dav/...` instead of `/remote.php/webdav/...` | @@ -88,7 +88,7 @@ SecRule REQUEST_METHOD "@streq PUT" \
nolog,\
ver:'OWASP_CRS/3.4.0-dev',\
chain"
- SecRule REQUEST_FILENAME "@contains /remote.php/webdav" \
+ SecRule REQUEST_FILENAME "@rx /remote\.php/(?:webdav|dav)" \
"t:none,\
ctl:ruleRemoveById=920000-920999,\
ctl:ruleRemoveById=932000-932999,\
|
ScriptNode : Add comment
This should have been included with | @@ -728,6 +728,10 @@ bool ScriptNode::load( bool continueOnError)
void ScriptNode::save() const
{
+ // Caution : `FileMenu.save()` currently contains a duplicate of this code,
+ // so that `serialiseToFile()` can be done in a background task, and the
+ // plug edit can be made on the UI thread. If editing this function, make
+ // sure FileMenu stays in sync.
serialiseToFile( fileNamePlug()->getValue() );
UndoScope undoDisabled( const_cast<ScriptNode *>( this ), UndoScope::Disabled );
const_cast<BoolPlug *>( unsavedChangesPlug() )->setValue( false );
|
as_array: handle frozen input abstract expressions
TN: | @@ -365,7 +365,7 @@ def as_array(self, list_expr):
:param AbstractExpression list_expr: The AST list to convert.
:rtype: ResolvedExpression
"""
- abstract_result = list_expr.map(lambda x: x)
+ abstract_result = Map(list_expr, expr=collection_expr_identity)
abstract_result.prepare()
result = construct(abstract_result)
root_list_type = get_context().root_grammar_class.list_type()
|
Fix ASSERT_ANY_THROW.
Summary:
Pull Request resolved:
ghimport-source-id: | ASSERT_NE(std::string(e.what()).find(substring), std::string::npos); \
}
#define ASSERT_ANY_THROW(statement) \
+ { \
bool threw = false; \
try { \
(void)statement; \
} catch (const std::exception& e) { \
threw = true; \
} \
- ASSERT_TRUE(threw);
+ ASSERT_TRUE(threw); \
+ }
#endif // defined(USE_GTEST)
|
Update generic.txt
Something with no explicit name. | @@ -5812,3 +5812,9 @@ http://220.158.216.134
# Reference: https://www.virustotal.com/gui/domain/tomx.xyz/relations
tomx.xyz
+
+# Reference: https://twitter.com/SecSome/status/1169972222439690241
+# Reference: https://app.any.run/tasks/21339218-b4fd-4084-95d5-5c42fed4c71d/
+
+204.152.219.82:9008
+jobmalawi.com
|
Fixes a bug in gauge-opt jacobian when Instruments are used.
Adds derivatives of Instrument-elements to the gauge optimization
jacobian. Their absence was caught b/c gauge optimization would
fail to find a good final gateset when the 'ls' (least-squares)
method was used but work fine when BFGS was used. | @@ -401,6 +401,12 @@ def _create_objective_fn(gateset, targetGateset, itemWeights=None,
dS = _np.rollaxis(dS, 2) # shape (n, d1, d2)
assert(dS.shape == (n,d,d))
+ # --- NOTE: ordering here, with running `start` index MUST
+ # correspond to those in GateSet.residuals, which in turn
+ # must correspond to those in GateCalc.residuals - which
+ # currently orders as: gates, compiled_gates, preps, effects.
+
+
# -- Gate terms
# -------------------------
for lbl, G in gs_pre.gates.items():
@@ -414,6 +420,21 @@ def _create_objective_fn(gateset, targetGateset, itemWeights=None,
my_jacMx[start:start+d**2] = wt * result
start += d**2
+
+ # -- Instrument terms
+ # -------------------------
+ for ilbl, Inst in gs_pre.instruments.items():
+ wt = itemWeights.get(ilbl, gateWeight)
+ for lbl, G in Inst.items():
+ # same calculation as for gate terms
+ left = -1 * _np.dot(dS, gs_post.instruments[ilbl][lbl]) # shape (n,d1,d2)
+ right = _np.swapaxes(_np.dot(G, dS), 0,1) # shape (d1, n, d2) -> (n,d1,d2)
+ result = _np.swapaxes(_np.dot(S_inv, left + right), 1,2) # shape (d1, d2, n)
+ result = result.reshape( (d**2, n) ) #must copy b/c non-contiguous
+ my_jacMx[start:start+d**2] = wt * result
+ start += d**2
+
+
# -- prep terms
# -------------------------
for lbl, rho in gs_post.preps.items():
|
dipatch: Remove a stale comment.
This stopped being true in | @@ -453,7 +453,6 @@ export function dispatch_normal_event(event) {
case "stream":
switch (event.op) {
case "update":
- // Legacy: Stream properties are still managed by stream_settings_ui.js on the client side.
stream_events.update_property(event.stream_id, event.property, event.value, {
rendered_description: event.rendered_description,
history_public_to_subscribers: event.history_public_to_subscribers,
|
{App Service} Fix az webapp delete: Fix the command help message
Fixes | @@ -208,7 +208,7 @@ def load_arguments(self, _):
c.argument('logs', options_list=['--logs', '-l'], action='store_true',
help='Enable viewing the log stream immediately after launching the web app')
with self.argument_context('webapp delete') as c:
- c.argument('name', arg_type=webapp_name_arg_type, local_context_attribute=None)
+ c.argument('name', arg_type=webapp_name_arg_type, local_context_attribute=None, help='The name of the webapp')
c.argument('keep_empty_plan', action='store_true', help='keep empty app service plan')
c.argument('keep_metrics', action='store_true', help='keep app metrics')
c.argument('keep_dns_registration', action='store_true', help='keep DNS registration',
|
Improve `webbrowser` stubs
`BaseBrowser.open` is an abstract method that should be overridden in all subclasses.
`UnixBrowser.open` only accepts 0, 1 or 2 for the `new` parameter. | import sys
+from abc import abstractmethod
from typing import Callable, Sequence
+from typing_extensions import Literal
class Error(Exception): ...
@@ -23,16 +25,19 @@ class BaseBrowser:
name: str
basename: str
def __init__(self, name: str = ...) -> None: ...
+ @abstractmethod
def open(self, url: str, new: int = ..., autoraise: bool = ...) -> bool: ...
def open_new(self, url: str) -> bool: ...
def open_new_tab(self, url: str) -> bool: ...
class GenericBrowser(BaseBrowser):
def __init__(self, name: str | Sequence[str]) -> None: ...
+ def open(self, url: str, new: int = ..., autoraise: bool = ...) -> bool: ...
class BackgroundBrowser(GenericBrowser): ...
class UnixBrowser(BaseBrowser):
+ def open(self, url: str, new: Literal[0, 1, 2] = ..., autoraise: bool = ...) -> bool: ... # type: ignore[override]
raise_opts: list[str] | None
background: bool
redirect_stdout: bool
@@ -49,12 +54,20 @@ class Galeon(UnixBrowser):
class Chrome(UnixBrowser): ...
class Opera(UnixBrowser): ...
class Elinks(UnixBrowser): ...
-class Konqueror(BaseBrowser): ...
-class Grail(BaseBrowser): ...
+
+class Konqueror(BaseBrowser):
+ def open(self, url: str, new: int = ..., autoraise: bool = ...) -> bool: ...
+
+class Grail(BaseBrowser):
+ def open(self, url: str, new: int = ..., autoraise: bool = ...) -> bool: ...
if sys.platform == "win32":
- class WindowsDefault(BaseBrowser): ...
+ class WindowsDefault(BaseBrowser):
+ def open(self, url: str, new: int = ..., autoraise: bool = ...) -> bool: ...
if sys.platform == "darwin":
- class MacOSX(BaseBrowser): ...
- class MacOSXOSAScript(BaseBrowser): ... # In runtime this class does not have `name` and `basename`
+ class MacOSX(BaseBrowser):
+ def open(self, url: str, new: int = ..., autoraise: bool = ...) -> bool: ...
+
+ class MacOSXOSAScript(BaseBrowser): # In runtime this class does not have `name` and `basename`
+ def open(self, url: str, new: int = ..., autoraise: bool = ...) -> bool: ...
|
Create missing gfortran symbolic link on macOS
Further, the `--oversubscribe` flag is passed to `mpirun` on the macOS platform. | @@ -7,4 +7,6 @@ runs:
run: |
brew install open-mpi
brew install libomp
+ ln -s /usr/local/bin/gfortran-8 /usr/local/bin/gfortran
+ echo "MPI_OPTS=--oversubscribe" >> $GITHUB_ENV
shell: bash
|
Log a meaningful error if Okta connector isn't invoked correctly
removed connectors_config.accessed_keys loop and checking with container value comparision with JSON object | @@ -324,9 +324,8 @@ class ConfigLoader(object):
options = {}
connectors_config = self.get_directory_connector_configs()
- if connectors_config is not None:
- for accessed_keys_type in connectors_config.accessed_keys:
- if (accessed_keys_type =='okta') and (connector_name == 'ldap'):
+ if ( 'okta' in connectors_config.value) and (connector_name == 'ldap'):
+ if connectors_config.accessed_keys is not None:
raise AssertionException("Failed to match request : for Okta Connector type receiving LDAP as input type from commnand line")
if connectors_config is not None:
|
Update README.md
User smaller domain in example | @@ -38,8 +38,8 @@ Init the fetcher:
```
and then, request data for a domain:
```python
- argo_loader.region([-85,-45,10.,20.,0,1000.]).to_xarray()
- argo_loader.region([-85,-45,10.,20.,0,1000.,'2012-01','2014-12']).to_xarray()
+ argo_loader.region([-85,-45,10.,20.,0,100.]).to_xarray()
+ argo_loader.region([-85,-45,10.,20.,0,100.,'2012-01','2014-12']).to_xarray()
```
for profiles of a given float:
```python
@@ -71,7 +71,7 @@ df = ds.to_dataframe()
and to export it to files:
```python
-ds = argo_loader.region([-85,-45,10.,20.,0,1000.]).to_xarray()
+ds = argo_loader.region([-85,-45,10.,20.,0,100.]).to_xarray()
ds.to_netcdf('my_selection.nc')
# or by profiles:
ds.argo.point2profile().to_netcdf('my_selection.nc')
|
921110 add track and path and 921150 lowercase X
rule 921110 added track and path
rule 921150 lowercase x | @@ -67,7 +67,7 @@ SecRule REQUEST_HEADERS:'/(?:Content-Length|Transfer-Encoding)/' "@rx ," \
# [ References ]
# http://projects.webappsec.org/HTTP-Request-Smuggling
#
-SecRule ARGS_NAMES|ARGS|XML:/* "@rx (?:\n|\r)+(?:get|post|head|options|connect|put|delete|trace|propfind|propatch|mkcol|copy|move|lock|unlock)\s+" \
+SecRule ARGS_NAMES|ARGS|XML:/* "@rx (?:\n|\r)+(?:get|post|head|options|connect|put|delete|trace|track|patch|propfind|propatch|mkcol|copy|move|lock|unlock)\s+" \
"id:921110,\
phase:2,\
block,\
@@ -205,7 +205,7 @@ SecRule ARGS_NAMES "@rx [\n\r]" \
setvar:'tx.%{rule.id}-OWASP_CRS/WEB_ATTACK/HEADER_INJECTION-%{matched_var_name}=%{tx.0}'"
-SecRule ARGS_NAMES|ARGS|XML:/* "@rx (?:\n|\r)+(?:\s+|location|refresh|(?:set-)?cookie|(?:X-)?(?:forwarded-(?:for|host|server)|host|via|remote-ip|remote-addr|originating-IP))\s*:" \
+SecRule ARGS_NAMES|ARGS|XML:/* "@rx (?:\n|\r)+(?:\s+|location|refresh|(?:set-)?cookie|(?:x-)?(?:forwarded-(?:for|host|server)|host|via|remote-ip|remote-addr|originating-IP))\s*:" \
"id:921160,\
phase:2,\
block,\
|
Remove notes about Client.messages
Since `Client.messages` no longer exists, I think we should remove the note about the cache being named that. | @@ -216,7 +216,7 @@ to handle it, which defaults to print a traceback and ignoring the exception.
.. function:: on_message_delete(message)
Called when a message is deleted. If the message is not found in the
- :attr:`Client.messages` cache, then these events will not be called. This
+ internal message cache, then these events will not be called. This
happens if the message is too old or the client is participating in high
traffic guilds. To fix this, increase the ``max_messages`` option of
:class:`Client`.
@@ -242,7 +242,7 @@ to handle it, which defaults to print a traceback and ignoring the exception.
.. function:: on_message_edit(before, after)
Called when a :class:`Message` receives an update event. If the message is not found
- in the :attr:`Client.messages` cache, then these events will not be called.
+ in the internal message cache, then these events will not be called.
This happens if the message is too old or the client is participating in high
traffic guilds. To fix this, increase the ``max_messages`` option of :class:`Client`.
@@ -278,7 +278,7 @@ to handle it, which defaults to print a traceback and ignoring the exception.
.. function:: on_reaction_add(reaction, user)
Called when a message has a reaction added to it. Similar to on_message_edit,
- if the message is not found in the :attr:`Client.messages` cache, then this
+ if the message is not found in the internal message cache, then this
event will not be called.
.. note::
@@ -299,7 +299,7 @@ to handle it, which defaults to print a traceback and ignoring the exception.
.. function:: on_reaction_remove(reaction, user)
Called when a message has a reaction removed from it. Similar to on_message_edit,
- if the message is not found in the :attr:`Client.messages` cache, then this event
+ if the message is not found in the internal message cache, then this event
will not be called.
.. note::
@@ -320,7 +320,7 @@ to handle it, which defaults to print a traceback and ignoring the exception.
.. function:: on_reaction_clear(message, reactions)
Called when a message has all its reactions removed from it. Similar to :func:`on_message_edit`,
- if the message is not found in the :attr:`Client.messages` cache, then this event
+ if the message is not found in the internal message cache, then this event
will not be called.
:param message: The :class:`Message` that had its reactions cleared.
|
Update monolith-repo link in faq
monolith-repo link went to stable which 404'd. Replace with latest | @@ -126,7 +126,7 @@ up and ``ANSIBLE_ROLES_PATH`` is set accordingly. See `this page`_ for more
information.
.. _`monorepo`: https://en.wikipedia.org/wiki/Monorepo
-.. _`this page`: https://molecule.readthedocs.io/en/stable/examples.html#monolith-repo
+.. _`this page`: https://molecule.readthedocs.io/en/latest/examples.html#monolith-repo
How can I add development/testing-only dependencies?
=====================================================
|
GDB helpers: robustify system.address lookup to work in C mode
TN: | @@ -3,7 +3,7 @@ from __future__ import absolute_import, division, print_function
import gdb
-system_address = gdb.lookup_type('system.address')
+system_address = gdb.lookup_type('system__address')
def ptr_to_int(ptr_value):
|
Update install.md
Clarify that logger needs to be in config, and Home Assistant needs to be restarted, before you can add HACS to the config file. | **NB!: If you move from [`custom_updater`](https://github.com/custom-components/custom_updater) to this see the special note at the bottom here.**
-**NB!: You need to have added `logger:` to your `configuration.yaml` for this to work.**
+**NB!: You need to have added `logger:` to your `configuration.yaml` for this to work. You must add it _and_ restart before trying to add HACS to your `configuration.yaml`.**
This integration requires **a lot** of files.
|
Track exit status from all builds and end with the 0 exit status for
pass and 1 for any failures | @@ -8,8 +8,15 @@ git reset --hard $HEROKU_TEST_RUN_COMMIT_VERSION
cd /app
mv CumulusCI/.git .
+failed=0
+
# Run the CumulusCI Unit Tests
nosetests --with-tap --tap-stream --with-coverage --cover-package=cumulusci
+exit_status=$?
+if [ "$exit_status" == "0" ]; then
+ failed=1
+fi
+
# If the last commit message contains [skip CumulusCI-Test], skip running any test flows
git log -n 1 | grep '\[skip CumulusCI-Test\]' > /dev/null
@@ -17,8 +24,7 @@ exit_status=$?
if [ "$exit_status" == "0" ]; then
echo "Found [skip CumulusCI-Test] in the commit message, skipping cci flow test runs"
coveralls
- exit
-
+ exit $failed
fi
# For feature branches, skip running the CumulusCI-Test flows if there is not an open PR unless the last commit message contains [run CumulusCI-Test]
@@ -31,7 +37,7 @@ if [ "$HEROKU_TEST_RUN_BRANCH" != "master" ] &&\
if [ "$pr" == ""] && [ "$exit_status" != "0"]; then
# If there is not an open PR, don't run the CumulusCI-Test flows
coveralls
- exit
+ exit $failed
fi
fi
@@ -56,6 +62,7 @@ if [ "$HEROKU_TEST_RUN_BRANCH" == "master" ] ||\
echo "ok 1 - Successfully ran ci_feature"
else
echo "not ok 1 - Failed ci_feature: `tail -1 cci.log`"
+ failed=1
fi
# Run ci_beta
@@ -65,6 +72,7 @@ if [ "$HEROKU_TEST_RUN_BRANCH" == "master" ] ||\
echo "ok 4 - Successfully ran ci_beta"
else
echo "not ok 4 - Failed ci_beta: `tail -1 cci.log`"
+ failed=1
fi
# Run ci_master
@@ -74,6 +82,7 @@ if [ "$HEROKU_TEST_RUN_BRANCH" == "master" ] ||\
echo "ok 2 - Successfully ran ci_master"
else
echo "not ok 2 - Failed ci_master: `tail -1 cci.log`"
+ failed=1
fi
# Run release_beta
@@ -83,6 +92,7 @@ if [ "$HEROKU_TEST_RUN_BRANCH" == "master" ] ||\
echo "ok 3 - Successfully ran release_beta"
else
echo "not ok 3 - Failed release_beta: `tail -1 cci.log`"
+ failed=1
fi
fi
@@ -94,3 +104,5 @@ coverage combine .coverage CumulusCI-Test/.coverage
# Record to coveralls.io
coveralls
+
+exit $failed
|
tests: clear DB transactions before all db calls
Because of repeatable read isolation,
changes from externally executed command dont reflect until transaction is ended. | @@ -405,23 +405,26 @@ class TestCommands(BaseTestCommands):
def test_set_password(self):
from frappe.utils.password import check_password
+ self.assertEqual(check_password("Administrator", "am"), "Administrator")
self.execute("bench --site {site} set-password Administrator test1")
self.assertEqual(self.returncode, 0)
self.assertEqual(check_password("Administrator", "test1"), "Administrator")
# to release the lock taken by check_password
- frappe.db.commit()
+ frappe.db.rollback()
self.execute("bench --site {site} set-admin-password test2")
self.assertEqual(self.returncode, 0)
+ frappe.db.rollback()
self.assertEqual(check_password("Administrator", "test2"), "Administrator")
- frappe.db.commit()
+ frappe.db.rollback()
# Reset it back to original password
original_password = frappe.conf.admin_password or "admin"
self.execute("bench --site {site} set-admin-password %s" % original_password)
self.assertEqual(self.returncode, 0)
+ frappe.db.rollback()
self.assertEqual(check_password("Administrator", original_password), "Administrator")
- frappe.db.commit()
+ frappe.db.rollback()
@skipIf(
not (
|
add support for scaling the grads in pytorch
If DDP is being used, the gradients are going to
get averaged over the world_size and thats not
necessarily what we want to happen. Add a function
to apply a scalar | @@ -216,6 +216,12 @@ class OptimizerManager:
self.current_lr = self.update_lr()
self.global_step += 1
+ def scale_grads(self, scalar):
+ for param_group in self.optimizer.param_groups:
+ for p in param_group['params']:
+ if p.grad is not None:
+ p.grad.data.mul_(scalar)
+
def zero_grad(self):
self.optimizer.zero_grad()
|
Support for CHM to detect
Adding support for ```*.chm``` to detect. It is also rather popular way to deliver malware.
MIME for CHM:
IANA: | @@ -77,8 +77,8 @@ BAD_TRAIL_PREFIXES = ("127.", "192.168.", "localhost")
LOCALHOST_IP = { 4: "127.0.0.1", 6: "::1" }
IGNORE_DNS_QUERY_SUFFIXES = set(("arpa", "local", "guest", "intranet", "int"))
VALID_DNS_CHARS = string.letters + string.digits + '-' + '.' # Reference: http://stackoverflow.com/a/3523068
-SUSPICIOUS_CONTENT_TYPES = ("application/x-bsh", "application/x-sh", "application/x-shellscript", "application/hta", "text/x-scriptlet", "text/x-sh", "text/x-shellscript")
-SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS = set((".apk", ".egg", ".exe", ".hta", ".hwp", ".ps1", ".scr"))
+SUSPICIOUS_CONTENT_TYPES = ("application/vnd.ms-htmlhelp", "application/x-bsh", "application/x-chm", "application/x-sh", "application/x-shellscript", "application/hta", "text/x-scriptlet", "text/x-sh", "text/x-shellscript")
+SUSPICIOUS_DIRECT_DOWNLOAD_EXTENSIONS = set((".apk", ".chm", ".egg", ".exe", ".hta", ".hwp", ".ps1", ".scr"))
WHITELIST_DIRECT_DOWNLOAD_KEYWORDS = ("cgi", "/scripts/", "/_vti_bin/", "/bin/", "/pub/softpaq/", "/bios/", "/pc-axis/")
SUSPICIOUS_HTTP_REQUEST_REGEXES = (
("potential sql injection", r"information_schema|sysdatabases|sysusers|floor\(rand\(|ORDER BY \d+|\bUNION\s+(ALL\s+)?SELECT\b|\b(UPDATEXML|EXTRACTVALUE)\(|\bCASE[^\w]+WHEN.*THEN\b|\bWAITFOR[^\w]+DELAY\b|\bCONVERT\(|VARCHAR\(|\bCOUNT\(\*\)|\b(pg_)?sleep\(|\bSELECT\b.*\bFROM\b.*\b(WHERE|GROUP|ORDER)\b|\bSELECT \w+ FROM \w+|\b(AND|OR|SELECT)\b.*/\*.*\*/|/\*.*\*/.*\b(AND|OR|SELECT)\b|\b(AND|OR)[^\w]+\d+['\") ]?[=><]['\"( ]?\d+|ODBC;DRIVER|\bINTO\s+(OUT|DUMP)FILE"),
|
Add test for text wrapping
Missed a test. | @@ -185,3 +185,13 @@ def test_table_stringify_booleans(sample_data):
instance = CliTable(data, bool_cols=["Configured"])
assert CHECKMARK in instance.table.table_data[1]
assert CliTable.PICTOGRAM_FALSE in instance.table.table_data[2]
+
+
[email protected]("terminaltables.SingleTable.column_max_width")
+def test_table_wrap_cols(max_width, sample_data):
+ width = 80
+ max_width.return_value = width
+ data = sample_data["service_list"]
+ data[1][1] = data[1][1] + "a" * 256
+ instance = CliTable(data, wrap_cols=["Description"])
+ assert all((len(line) for line in instance.table.table_data[1][1].split("\n")))
|
Make default split work for hidden files and paths with ./..
Fixes | @@ -18,11 +18,11 @@ facilitate pickling.
"""
import torch
import regex
-import pathlib
import unicodedata
import bidi.algorithm as bd
-from os import extsep
+from os import extsep, PathLike
+from pathlib import Path
from PIL import Image
from PIL.Image import Resampling
@@ -91,9 +91,12 @@ def text_reorder(text: str, base_dir: Optional[str] = None) -> str:
return bd.get_display(text, base_dir=base_dir)
-def default_split(x: Union[pathlib.Path, str]) -> str:
- return str(x).split(extsep, 1)[0]
+def default_split(x: Union[PathLike, str]) -> str:
+ x = Path(x)
+ while x.suffixes:
+ x = x.with_suffix('')
+ return str(x)
-def suffix_split(x: Union[pathlib.Path, str], split: Callable[[Union[pathlib.Path, str]], str], suffix: str) -> str:
+def suffix_split(x: Union[PathLike, str], split: Callable[[Union[PathLike, str]], str], suffix: str) -> str:
return split(x) + suffix
|
Re-ordering the if statement so that q>0 is evaluated instead of q==0
Also putting the qs_in calculation in the part of the if statement that
results in it changing. | @@ -34,21 +34,24 @@ np.ndarray[DTYPE_INT_t, ndim=1] flow_receivers,
# choose the node id
node_id = stack_flip_ud[i]
- # if q at the current node is zero, set qs at that node is zero.
- if q[node_id] == 0:
- qs[node_id] = 0
-
- # otherwise, calculate qs based on a local analytical solution. This
- # local analytical solution depends on qs_in, the sediment flux coming
- # into the node from upstream (hence the upstream to downstream node
- # ordering).
+ # If q at current node is greather than zero, calculate qs based on a
+ # local analytical solution. This local analytical solution depends on
+ # qs_in, the sediment flux coming into the node from upstream (hence
+ # the upstream to downstream node ordering).
# Because calculation of qs requires qs_in, this operation must be done
# in an upstream to downstream loop, and cannot be vectorized.
- else:
+
+ if q[node_id] > 0:
qs[node_id] = ((((Es[node_id]) + (1-F_f) * Er[node_id]) / (v_s / q[node_id])) *
(1.0 - exp(-link_lengths[node_id] * v_s / q[node_id])) +
(qs_in[node_id] * exp(-link_lengths[node_id] * v_s / q[node_id])))
# finally, add this nodes qs to recieiving nodes qs_in.
+ # if qs[node_id] == 0, then there is no need for this line to be
+ # evaluated.
qs_in[flow_receivers[node_id]] += qs[node_id]
+
+ else:
+ # if q at the current node is zero, set qs at that node is zero.
+ qs[node_id] = 0
\ No newline at end of file
|
Update README.md
Add description for dynamic_quantize and thread_tune args | @@ -3279,6 +3279,13 @@ Set a manual seed if necessary for reproducible results.
#### *encoding*
Specify an encoding to be used when reading text files.
+#### *dynamic_quantize*
+Set to True during inference on CPU/GPUs to obtain higher-through put.
+
+#### *thread_tune*
+Set to True during inference if you want pytorch to avoid using multiple-cores per process. this helps process avoid competing for the same limited resources (physical cores) and helps scale ML service with multiple workers.
+
+
#### *config*
A dictionary containing configuration options that should be overriden in a model's config.
|
[bugfix] Fix AttributeError on sock.close() on toolforge
On toolforge an old pymysql release is installed.
THe AttributeError bug is solved with pymysql >= 0.7.11.
Backport this fix but deprecate the old package. | -"""Miscellaneous helper functions for mysql queries."""
+"""Miscellaneous helper functions for mysql queries.
+
+.. deprecated:: 7.0
+ Support of pymysql < 0.7.11
+"""
#
# (C) Pywikibot team, 2016-2021
#
# Distributed under the terms of the MIT license.
#
+import struct
from typing import Optional
import pkg_resources
import pywikibot
from pywikibot import config
+from pywikibot.backports import removesuffix
+from pywikibot.tools import issue_deprecation_warning
try:
@@ -18,6 +25,34 @@ except ImportError:
raise ImportError('MySQL python module not found. Please install PyMySQL.')
+COM_QUIT = 0x01
+
+
+class _OldConnection(pymysql.connections.Connection):
+
+ """Representation of a socket with a mysql server.
+
+ This class is used to patch close() method for pymysql<0.7.11 (T216741).
+
+ .. versionadded:: 7.0
+ .. deprecated:: 7.0
+ Update your pymysql package
+ """
+
+ def close(self):
+ """Send the quit message and close the socket."""
+ if self._closed or self._sock is None:
+ super().close()
+
+ send_data = struct.pack('<iB', 1, COM_QUIT)
+ try:
+ self._write_bytes(send_data)
+ except Exception:
+ pass
+ finally:
+ self._force_close()
+
+
def mysql_query(query: str, params=None,
dbname: Optional[str] = None,
verbose: Optional[bool] = None):
@@ -54,14 +89,23 @@ def mysql_query(query: str, params=None,
else:
credentials = {'read_default_file': config.db_connect_file}
- connection = pymysql.connect(host=config.db_hostname_format.format(dbname),
- database=config.db_name_format.format(dbname),
- port=config.db_port,
- charset='utf8',
- defer_connect=query == 'test', # for tests
- **credentials)
-
- pymysql_version = pkg_resources.parse_version(pymysql.__version__)
+ pymysql_version = pkg_resources.parse_version(
+ removesuffix(pymysql.__version__, '.None'))
+ args = {
+ 'host': config.db_hostname_format.format(dbname),
+ 'database': config.db_name_format.format(dbname),
+ 'port': config.db_port,
+ 'charset': 'utf8',
+ 'defer_connect': query == 'test', # for tests
+ }
+
+ if pymysql_version < pkg_resources.parse_version('0.7.11'):
+ issue_deprecation_warning(
+ 'pymysql package release {}'.format(pymysql_version),
+ instead='pymysql >= 0.7.11', since='7.0.0')
+ connection = _OldConnection(**args, **credentials)
+ else:
+ connection = pymysql.connect(**args, **credentials)
if pymysql_version < pkg_resources.parse_version('1.0.0'):
from contextlib import closing
|
Fix like_comment and unlike_comment
Added missing fields in
like_comment and unlike_comment | @@ -638,12 +638,20 @@ class API(object):
return self.send_request(url)
def like_comment(self, comment_id):
- data = self.json_data()
+ data = self.json_data({
+ "is_carousel_bumped_post": "false",
+ "container_module": "comments_v2",
+ "feed_position": "0"
+ })
url = 'media/{comment_id}/comment_like/'.format(comment_id=comment_id)
return self.send_request(url, data)
def unlike_comment(self, comment_id):
- data = self.json_data()
+ data = self.json_data({
+ "is_carousel_bumped_post": "false",
+ "container_module": "comments_v2",
+ "feed_position": "0"
+ })
url = 'media/{comment_id}/comment_unlike/'.format(comment_id=comment_id)
return self.send_request(url, data)
|
Fix race condition with interrupts
Summary:
For some reason this just popped in now on py36.
Test Plan: , was previously failing about 25% of the time now for some reason | @@ -68,10 +68,6 @@ def raise_interrupts_as(error_cls):
yield
return
- if _received_interrupt["received"]:
- _received_interrupt["received"] = False
- raise error_cls()
-
original_signal_handler = signal.getsignal(signal.SIGINT)
def _new_signal_handler(signo, _):
@@ -82,6 +78,12 @@ def _new_signal_handler(signo, _):
try:
_replace_interrupt_signal(_new_signal_handler)
signal_replaced = True
+
+ # Raise if the previous signal handler received anything
+ if _received_interrupt["received"]:
+ _received_interrupt["received"] = False
+ raise error_cls()
+
yield
finally:
if signal_replaced:
|
Updated methodology of timestamp entry
Updated the methodology used to generate timestamp (using $(command)
rather than `command`) | @@ -18,6 +18,7 @@ mkdir -p "$LOGPATH"
# shellcheck disable=SC2129
echo "*** Start config parameters ****" >> "$LOG"
echo "Timestamp: [`date`]" >> "$LOG"
+echo -e "\tTimestamp: $(date -R)" >> "$LOG"
# shellcheck disable=SC2002
cat "$ARM_CONFIG"|sed '/^[#;].*$/d;/^$/d;/if/d;/^ /d;/^else/d;/^fi/d;/KEY=/d;/PASSWORD/d' >> "$LOG"
echo "*** End config parameters ****" >> "$LOG"
|
Fix test
I guess defining it as a variable is needed | @@ -1183,7 +1183,7 @@ class MPHSEBSTest(PymatgenTest):
self.assertEqual(len(vis.kpoints.kpts), 180)
with pytest.warns(BadInputSetWarning, match=r"Hybrid functionals"):
- MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, user_incar_settings={"ALGO": "Fast"})
+ vis = MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, user_incar_settings={"ALGO": "Fast"})
def test_override_from_prev_calc(self):
prev_run = self.TEST_FILES_DIR / "static_silicon"
|
fix for rendering single file from AE in DL for sequence
Solves issue with rendering single frame sequence, eg with 00000 in its file. | @@ -6,6 +6,7 @@ import pyblish.api
from avalon import api
from openpype.lib import env_value_to_bool
+from openpype.lib.delivery import collect_frames
from openpype_modules.deadline import abstract_submit_deadline
from openpype_modules.deadline.abstract_submit_deadline import DeadlineJobInfo
@@ -102,24 +103,18 @@ class AfterEffectsSubmitDeadline(
def get_plugin_info(self):
deadline_plugin_info = DeadlinePluginInfo()
- context = self._instance.context
- script_path = context.data["currentFile"]
render_path = self._instance.data["expectedFiles"][0]
- if len(self._instance.data["expectedFiles"]) > 1:
+ file_name, frame = list(collect_frames([render_path]).items())[0]
+ if frame:
# replace frame ('000001') with Deadline's required '[#######]'
# expects filename in format project_asset_subset_version.FRAME.ext
render_dir = os.path.dirname(render_path)
file_name = os.path.basename(render_path)
- arr = file_name.split('.')
- assert len(arr) == 3, \
- "Unable to parse frames from {}".format(file_name)
- hashed = '[{}]'.format(len(arr[1]) * "#")
-
- render_path = os.path.join(render_dir,
- '{}.{}.{}'.format(arr[0], hashed,
- arr[2]))
+ hashed = '[{}]'.format(len(frame) * "#")
+ file_name = file_name.replace(frame, hashed)
+ render_path = os.path.join(render_dir, file_name)
deadline_plugin_info.Comp = self._instance.data["comp_name"]
deadline_plugin_info.Version = self._instance.data["app_version"]
|
Documenting release workflow
* version doc
* dev to release nb
* Redirected release procedure to wiki
To avoid duplication and risk of divergence. | @@ -269,16 +269,5 @@ reviewers will be notified when you add them.
Versioning
----------
-Versioning uses the following convention: MAJOR.MINOR.PATCH, where:
-
-PATCH version when there are backwards-compatible bug fixes or
-enhancements, without alteration to Python's modules or data/binaries.
-MINOR version when there are minor API changes or new functionality in a
-backwards-compatible manner, or when there are alteration to Python's
-modules or data/binaries (which requires to re-run installer for people
-working on the dev version), MAJOR version when there are major
-incompatible API changes, Beta releases follow the following convention:
-
-MAJOR.MINOR.PATCH-beta.x (with x = 0, 1, 2, etc.) Stable version is
-indicated in the file version.txt. For development version (on master),
-the version is "dev".
+The procedure for creating a new ``ivadomed`` release is described
+`here <https://github.com/ivadomed/ivadomed/wiki/create-new-release>`_.
|
Fedora: Fixup for Python3.7.0 as in not updated Ferora 29
* Mostly needed for OBS which apparently does not update the minor
version and has no yum or dnf installed to do it manually | @@ -309,10 +309,15 @@ typedef long Py_hash_t;
* function that does it instead.
*
* TODO: Make it work for Win32 Python <= 3.7 too.
+ * TODO: The Python 3.7.0 on Linux doesn't work this way either, was a bad
+ * CPython release apparently.
*/
#if (defined(_WIN32) || defined(__MSYS__)) && PYTHON_VERSION < 0x380
#define Nuitka_GC_Track PyObject_GC_Track
#define Nuitka_GC_UnTrack PyObject_GC_UnTrack
+#elif PYTHON_VERSION == 0x370
+#define Nuitka_GC_Track PyObject_GC_Track
+#define Nuitka_GC_UnTrack PyObject_GC_UnTrack
#else
#define Nuitka_GC_Track _PyObject_GC_TRACK
#define Nuitka_GC_UnTrack _PyObject_GC_UNTRACK
|
tests/models/Botvinick: reinitialize binary structures
Add results test. | @@ -190,10 +190,17 @@ def test_botvinick_model(benchmark, mode):
words_hidden_layer.reinitialize([[0,0,0]])
response_layer.reinitialize([[0,0]])
task_layer.reinitialize([[0,0]])
+ comp.reinitialize()
return results
res = benchmark(run, mode=='LLVM')
+ assert np.allclose(res[0], [0.05330691, 0.05330691, 0.03453411])
+ assert np.allclose(res[1], [0.20351701, 0.11078586, 0.04995664])
+ assert np.allclose(res[2], [0.05330691, 0.05330691, 0.03453411])
+ assert np.allclose(res[3], [0.11168014, 0.20204928, 0.04996308])
+ assert np.allclose(res[4], [0.05330691, 0.05330691, 0.03453411])
+ assert np.allclose(res[5], [0.11327619, 0.11238362, 0.09399782])
if mode == 'LLVM':
return
|
Update decoder name for bart
"bart-large" is ""facebook/bart-large" on huggingface model hub | @@ -1997,7 +1997,7 @@ model_args = {
# Initialize model
model = Seq2SeqModel(
encoder_decoder_type="bart",
- encoder_decoder_name="bart-large",
+ encoder_decoder_name="facebook/bart-large",
args=model_args,
)
|
Make _winapi.SetNamedPipeHandleState args Optional
As can be seen here: the arguments can be Optional (and are used as such in CPython). | @@ -64,7 +64,7 @@ def GetVersion() -> int: ...
def OpenProcess(desired_access: int, inherit_handle: bool, process_id: int) -> int: ...
def PeekNamedPipe(handle: int, size: int = ...) -> Union[Tuple[int, int], Tuple[bytes, int, int]]: ...
def ReadFile(handle: int, size: int, overlapped: Union[int, bool] = ...) -> Tuple[bytes, int]: ...
-def SetNamedPipeHandleState(named_pipe: int, mode: int, max_collection_count: int, collect_data_timeout: int) -> None: ...
+def SetNamedPipeHandleState(named_pipe: int, mode: Optional[int], max_collection_count: Optional[int], collect_data_timeout: Optional[int]) -> None: ...
def TerminateProcess(handle: int, exit_code: int) -> None: ...
def WaitForMultipleObjects(handle_seq: Sequence[int], wait_flag: bool, milliseconds: int = ...) -> int: ...
def WaitForSingleObject(handle: int, milliseconds: int) -> int: ...
|
[ci] Fix doc deploy folder
This was unpacking into `tvm-site/docs` instead of just `docs` at the top level | @@ -746,10 +746,10 @@ def deploy_docs() {
git status
git checkout -B $DOCS_DEPLOY_BRANCH
- rm -rf tvm-site/docs
- mkdir -p tvm-site/docs
- tar xf ../docs.tgz -C tvm-site/docs
- COMMIT=$(cat tvm-site/docs/commit_hash)
+ rm -rf docs
+ mkdir -p docs
+ tar xf ../docs.tgz -C docs
+ COMMIT=$(cat docs/commit_hash)
git add .
git config user.name tvm-bot
git config user.email [email protected]
|
fix: no add/change image-field if user is not allowed
no add/change image-field (dropdown) if user is not allowed to change | @@ -65,6 +65,7 @@ frappe.ui.form.setup_user_image_event = function(frm) {
});
}
+ if (frm.fields_dict[frm.meta.image_field].df.read_only == 0) {
frm.sidebar.image_wrapper.on('click', ':not(.sidebar-image-actions)', (e) => {
let $target = $(e.currentTarget);
if ($target.is('a.dropdown-toggle, .dropdown')) {
@@ -74,6 +75,7 @@ frappe.ui.form.setup_user_image_event = function(frm) {
dropdown.toggleClass('open');
e.stopPropagation();
});
+ }
// bind click on image_wrapper
frm.sidebar.image_wrapper.on('click', '.sidebar-image-change, .sidebar-image-remove', function(e) {
|
fix typo
Fixing a (glaring) typo. | @@ -37,7 +37,7 @@ Citations
=========
If you find `dynesty` useful in your research, please cite
-`Speagle (2019) <https://arxiv.org/abs/1904.02180>_`. You are
+**`Speagle (2019) <https://arxiv.org/abs/1904.02180>`_**. You are
also encouraged to cite:
* Nested Sampling:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.