message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Perform passes -> Add Passes
It's actually adding that many passes rather than performing. Add 1 pass makes 2 passes. Does not perform 1 pass. | @@ -2093,9 +2093,11 @@ class RootNode(list):
menu_op.Enable(False)
menu.AppendSubMenu(operation_convert_submenu, _("Convert Operation"))
duplicate_menu = wx.Menu()
+ gui.Bind(wx.EVT_MENU, self.menu_passes(node, 1),
+ duplicate_menu.Append(wx.ID_ANY, _("Add 1 pass."), "", wx.ITEM_NORMAL))
for i in range(2, 10):
gui.Bind(wx.EVT_MENU, self.menu_passes(node, i),
- duplicate_menu.Append(wx.ID_ANY, _("Perform %d passes.") % i, "", wx.ITEM_NORMAL))
+ duplicate_menu.Append(wx.ID_ANY, _("Add %d passes.") % i, "", wx.ITEM_NORMAL))
menu.AppendSubMenu(duplicate_menu, _("Passes"))
if isinstance(node.object, RasterOperation):
raster_step_menu = wx.Menu()
|
Update Pushgateway to 1.3.1
[ENHANCEMENT] Web UI: Improved metrics text alignment.
[BUGFIX] Web UI: Fix deletion of groups with empty label values. | @@ -164,7 +164,7 @@ packages:
<<: *default_context
static:
<<: *default_static_context
- version: 1.3.0
+ version: 1.3.1
license: ASL 2.0
URL: https://github.com/prometheus/pushgateway
summary: Prometheus push acceptor for ephemeral and batch jobs.
|
Increase fio ssh timeout
FIO takes time to write data on nfs system with synthetic networking | @@ -74,7 +74,7 @@ class Fio(Tool):
iodepth: int,
numjob: int,
time: int = 120,
- ssh_timeout: int = 600,
+ ssh_timeout: int = 1800,
block_size: str = "4K",
size_gb: int = 0,
direct: bool = True,
|
Update get-started.rst
fix -- dash | @@ -22,7 +22,7 @@ This will pop up a GUI window showing the full range of widgets available
to an application using Toga.
There is a known issue with the current build on some Mac OS distributions. If you are
-running Mac OS Sierra or higher, use the following installation command instead:
+running Mac OS Sierra or higher, use the following installation command instead::
$ pip install toga-demo --pre
|
Port.to_conf() serialises VLAN names correctly
Resolves | @@ -218,10 +218,14 @@ class Port(Conf):
def to_conf(self):
result = super(Port, self).to_conf()
+ if result is not None:
if 'stack' in result and result['stack'] is not None:
result['stack'] = {}
for stack_config in list(self.stack_defaults_types.keys()):
result['stack'][stack_config] = self.stack[stack_config]
+ if self.native_vlan is not None:
+ result['native_vlan'] = self.native_vlan.name
+ result['tagged_vlans'] = [vlan.name for vlan in self.tagged_vlans]
return result
def vlans(self):
|
Fix dispatching of backwards kernel for ROCm.
Summary:
Use WARP_SIZE consistently also for the dispatch dimensions.
Pull Request resolved: | @@ -324,7 +324,7 @@ Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indice
}
dim3 grid(THCCeilDiv(num_indices, (int64_t) 4), THCCeilDiv(stride, (int64_t) 128));
- dim3 block(32, 4);
+ dim3 block(WARP_SIZE, 4);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad.scalar_type(), "embedding_backward", [&] {
embedding_backward_kernel<<<grid, block, 0, stream>>>(
|
DOC: added use of end_date & start_date in live mode.
issues & | @@ -176,6 +176,13 @@ Here is the breakdown of the new arguments:
- ``simulate_orders``: Enables the paper trading mode, in which orders are
simulated in Catalyst instead of processed on the exchange. It defaults to
``True``.
+- ``end_date``: When setting the end_date to a time in the **future**,
+ it will schedule the live algo to finish gracefully at the specified date.
+- ``start_date``: (**Will be implemented in the future**)
+ The live algo starts by default in the present, as mentioned above.
+ by setting the start_date to a time in the future, the algorithm would
+ essentially sleep and when the predefined time comes, it would start executing.
+
Here is a complete algorithm for reference:
`Buy Low and Sell High <https://github.com/enigmampc/catalyst/blob/master/catalyst/examples/buy_low_sell_high_live.py>`_
|
Extract type of TClonesArray objects from streamer
This enables reading of (split) fTracks branch in the ROOT's Event
example.
We need to attach proper streamers to the branches corresponding to the
class members of objects saved in TClonesArray. The object class name is
available in the title of the corresponding streamer, e.g.
TStreamerObjectPointer. | @@ -174,6 +174,7 @@ class TTreeMethods(object):
_copycontext = True
_vector_regex = re.compile(b"^vector<(.+)>$")
+ _objectpointer_regex = re.compile(b"\((.*)\)")
def _attachstreamer(self, branch, streamer, streamerinfosmap, isTClonesArray):
if streamer is None:
@@ -227,6 +228,9 @@ class TTreeMethods(object):
elif isinstance(streamer, digDeeperTypes):
typename = streamer._fTypeName.rstrip(b"*")
if typename in streamerinfosmap:
+ m = self._objectpointer_regex.search(streamer._fTitle)
+ if typename == b'TClonesArray' and m is not None:
+ typename = m.group(1)
members = streamerinfosmap[typename].members
elif isinstance(streamer, uproot.rootio.TStreamerSTL):
try:
|
Fix Cisco.SMB.get_interfaces script
HG--
branch : feature/microservices | @@ -30,6 +30,7 @@ class Script(BaseScript):
"Po": "aggregated", # Port-channel/Portgroup
"Tu": "tunnel", # Tunnel
"Vl": "SVI", # Vlan
+ "oo": "management", # oob
}
def execute(self):
|
Failed to format message
File "/usr/lib/python2.7/site-packages/oslo_privsep/daemon.py", line 204, in remote_call
raise exc_type(*result[2])
File "/usr/lib/python2.7/site-packages/zun/common/exception.py", line 204, in __init__
self.message = str(self.message) % kwargs
ValueError: unsupported format character ')' (0x29) at index 345 | @@ -201,7 +201,7 @@ class ZunException(Exception):
self.message = message
try:
- self.message = str(self.message) % kwargs
+ self.message = self.message % kwargs
except KeyError:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
|
Update howto-transparent-vms.md for newer versions
Update howto-transparent-vms.md for newer versions | @@ -14,9 +14,13 @@ Internal Network* setup can be applied to other setups.
## 1. Configure Proxy VM
-On the proxy machine, **eth0** is connected to the internet. **eth1** is
-connected to the internal network that will be proxified and configured
-to use a static ip (192.168.3.1).
+First, we have to find out under which name Ubuntu has mapped our network interfaces. You can find this information with:
+
+{{< highlight bash >}}
+ip link
+{{< / highlight >}}
+
+Usually with Ubuntu and Virtualbox, **eth0** or **enp0s3** (Ubuntu 15.10 and newer) is connected to the internet and **eth1** or **enp0s8** (Ubuntu 15.10 and newer) is connected to the internal network that will be proxified and configured to use a static ip (192.168.3.1). If the names differ, use the ones you got from the *ip link* command.
### VirtualBox configuration
@@ -65,6 +69,7 @@ Replace **/etc/dnsmasq.conf** with the following configuration:
{{< highlight none >}}
# Listen for DNS requests on the internal network
interface=eth1
+bind-interfaces
# Act as a DHCP server, assign IP addresses to clients
dhcp-range=192.168.3.10,192.168.3.100,96h
# Broadcast gateway and dns server information
@@ -93,10 +98,11 @@ IP address via DHCP:
## 3. Redirect traffic to mitmproxy
-To redirect traffic to mitmproxy, we need to add two iptables
+To redirect traffic to mitmproxy, we need to enable IP forwarding and add two iptables
rules:
{{< highlight bash >}}
+sudo sysctl -w net.ipv4.ip_forward=1
sudo iptables -t nat -A PREROUTING -i eth1 -p tcp --dport 80 -j REDIRECT --to-port 8080
sudo iptables -t nat -A PREROUTING -i eth1 -p tcp --dport 443 -j REDIRECT --to-port 8080
{{< / highlight >}}
|
Replace str with bytes for imaplib append and ParseFlags
Last Argument of APPEND
You can see it's parsed with the bytes regex MapCLRF re.compile(br'\r\n|\r|\n')
You can see it's parsed with the bytes regex Flags
re.compile(br'.*FLAGS \((?P<flags>[^\)]*)\)') | @@ -55,7 +55,7 @@ class IMAP4:
def socket(self) -> _socket: ...
def recent(self) -> _CommandResults: ...
def response(self, code: str) -> _CommandResults: ...
- def append(self, mailbox: str, flags: str, date_time: str, message: str) -> str: ...
+ def append(self, mailbox: str, flags: str, date_time: str, message: bytes) -> str: ...
def authenticate(self, mechanism: str, authobject: Callable[[bytes], bytes | None]) -> tuple[str, str]: ...
def capability(self) -> _CommandResults: ...
def check(self) -> _CommandResults: ...
@@ -171,5 +171,5 @@ class _Authenticator:
def Internaldate2tuple(resp: bytes) -> time.struct_time: ...
def Int2AP(num: int) -> str: ...
-def ParseFlags(resp: str) -> tuple[str, ...]: ...
+def ParseFlags(resp: bytes) -> tuple[bytes, ...]: ...
def Time2Internaldate(date_time: float | time.struct_time | str) -> str: ...
|
Fix getting chapter list for jpmtl
Fix getting chapter list for jpmtl | @@ -42,26 +42,24 @@ class JpmtlCrawler(Crawler):
logger.info('Novel author: %s', self.novel_author)
toc_url = chapters_url % self.novel_id
- chapters = self.get_json(toc_url)
- toc = (chapters[0]['chapters'])
- #for a in soup.select('ol.book-volume__chapters li a'):
- for chapter in toc:
- chap_id = len(self.chapters) + 1
- if len(self.chapters) % 100 == 0:
- vol_id = chap_id//100 + 1
- vol_title = 'Volume ' + str(vol_id)
+
+ toc = self.get_json(toc_url)
+ print(toc)
+ for volume in toc:
self.volumes.append({
- 'id': vol_id,
- 'title': vol_title,
+ 'id': volume['volume'],
+ 'title': volume['volume_title'],
})
- # end if
+ for chapter in volume['chapters']:
+ chap_id = len(self.chapters) + 1
self.chapters.append({
'id': chap_id,
- 'volume': vol_id,
+ 'volume': volume['volume'],
'url': self.absolute_url(self.novel_url+'/'+str(chapter['id'])),
'title': chapter['title'] or ('Chapter %d' % chap_id),
})
# end for
+ #end for
# end def
def download_chapter_body(self, chapter):
|
Remove par_file argument in SubmititLaumcher
submitit doesn't use this argument anymore | @@ -13,12 +13,11 @@ log = logging.getLogger(__name__)
class SubmititLauncher(Launcher):
- def __init__(self, queue, folder, queue_parameters, conda_file=None, par_file=None):
+ def __init__(self, queue, folder, queue_parameters, conda_file=None):
self.queue = queue
self.queue_parameters = queue_parameters
self.folder = folder
self.conda_file = conda_file
- self.par_file = par_file
self.config = None
self.task_function = None
self.verbose = None
@@ -67,13 +66,13 @@ class SubmititLauncher(Launcher):
self.config.hydra.job.num_jobs = num_jobs
if self.queue == "auto":
executor = submitit.AutoExecutor(
- folder=self.folder, conda_file=self.conda_file, par_file=self.par_file
+ folder=self.folder, conda_file=self.conda_file
)
elif self.queue == "slurm":
executor = submitit.SlurmExecutor(folder=self.folder)
elif self.queue == "chronos":
executor = submitit.ChronosExecutor(
- folder=self.folder, conda_file=self.conda_file, par_file=self.par_file
+ folder=self.folder, conda_file=self.conda_file
)
elif self.queue == "local":
executor = submitit.LocalExecutor(folder=self.folder)
|
Fix "then then" typos in docstrings.
They were both added in commit | @@ -138,7 +138,7 @@ def chunked(iterable, n, strict=False):
To use a fill-in value instead, see the :func:`grouper` recipe.
If the length of *iterable* is not divisible by *n* and *strict* is
- ``True``, then then ``ValueError`` will be raised before the last
+ ``True``, then ``ValueError`` will be raised before the last
list is yielded.
"""
@@ -1138,7 +1138,7 @@ def sliced(seq, n, strict=False):
[(1, 2, 3), (4, 5, 6), (7, 8)]
If the length of *seq* is not divisible by *n* and *strict* is
- ``True``, then then ``ValueError`` will be raised before the last
+ ``True``, then ``ValueError`` will be raised before the last
slice is yielded.
This function will only work for iterables that support slicing.
|
Mark test of cleanup behavior as local
This informs pytest not to try to load its own configuration, but rather
run what is loaded locally in the test. | +import pytest
+
import parsl
from parsl.app.app import App
-from parsl.tests.configs.local_threads import config
+from parsl.tests.configs.local_ipp import config
+
+parsl.clear()
+dfk = parsl.load(config)
@App('python')
@@ -9,14 +14,12 @@ def slow_double(x, dur=0.1):
time.sleep(dur)
return x * 5
-
[email protected]
def test_cleanup_behavior_221():
""" A1 A2 A3 -> cleanup
B1 B2 B3
"""
- parsl.clear()
- dfk = parsl.load(config)
round_1 = []
for i in range(0, 10):
|
MacOS: For CPython on MacOS linking against libpython appears to be
necessary.
* It's not for Anaconda, so lets try and avoid going into that branch
by checking for static Python.
* Also finally have a "macosx_target" to check for, instead of using
darwin checks everywhere. | @@ -124,6 +124,8 @@ static_libpython = getBoolOption("static_libpython", False)
# no longer cross compile this way.
win_target = os.name == "nt"
+macosx_target = sys.platform == "darwin"
+
# Windows subsystem mode: Disable console for windows builds.
win_disable_console = getBoolOption("win_disable_console", False)
@@ -135,7 +137,7 @@ unstripped_mode = getBoolOption("unstripped_mode", False)
# Clang compiler mode, forced on MacOS X and FreeBSD, optional on Linux.
clang_mode = getBoolOption("clang_mode", False)
-if sys.platform == "darwin" or "freebsd" in sys.platform:
+if macosx_target or "freebsd" in sys.platform:
clang_mode = True
# MinGW compiler mode, optional and interesting to Windows only.
@@ -1234,7 +1236,7 @@ if win_target:
env.Append(LIBPATH = [win_lib_path])
env.Append(LIBS = [win_lib_name])
-elif not module_mode:
+elif not module_mode or (macosx_target and not static_libpython):
# Add the python library path to the library path
python_lib_path = os.path.join(python_prefix, "lib")
env.Append(LIBPATH = [python_lib_path])
@@ -1244,7 +1246,9 @@ elif not module_mode:
"libpython" + python_abi_version + ".a"
)
- if static_libpython and os.path.exists(static_lib_filename):
+ if not module_mode and \
+ static_libpython and \
+ os.path.exists(static_lib_filename):
env.Append(LIBS = [File(static_lib_filename)]) # @UndefinedVariable
else:
# Debian and Ubuntu distinguish the system libraries like this.
@@ -1319,7 +1323,7 @@ if debug_mode or unstripped_mode:
)
else:
if gcc_mode:
- if sys.platform == "darwin":
+ if macosx_target:
env.Append(
LINKFLAGS = ["-Wno-deprecated-declarations"]
)
@@ -1361,7 +1365,7 @@ if win_target and target_arch == "x86_64" and gcc_mode:
)
# Set load libpython from binary directory default
-if gcc_mode and sys.platform != "darwin" and not win_target and not module_mode:
+if gcc_mode and not macosx_target and not win_target and not module_mode:
if standalone_mode:
rpath = "$$ORIGIN"
else:
@@ -1928,7 +1932,7 @@ if uninstalled_python:
getWindowsPythonDLLPath(),
os.path.dirname(result_basepath) or '.'
)
- elif sys.platform == "darwin" and not module_mode:
+ elif macosx_target and not module_mode:
# TODO: Move this to where we do it for Linux already.
python_dll_filename = "libpython" + python_abi_version + ".dylib"
|
Update index.md
updated to sound classification | @@ -209,9 +209,9 @@ The task of action recognition is to predict action that is being performed on a
| ------------------------- | ---------------| -------------- | ------ | ------- |
| RGB-I3D, pretrained on ImageNet\* | [TensorFlow\*](./i3d-rgb-tf/i3d-rgb-tf.md) | i3d-rgb-tf | | |
-## Audio Classification
+## Sound Classification
-The task of audio classification is to predict what sounds in an audio fragment.
+The task of sound classification is to predict what sounds are in an audio fragment.
| Model Name | Implementation | OMZ Model Name | GFlops | mParams |
| ------------------------- | ---------------| -------------- | ------ | ------- |
|
Restricting apple-clang to version 10.0 or greater
No support for std::optional before version 10.0 | @@ -34,7 +34,7 @@ class CppTaskflowConan(ConanFile):
# Exclude compilers not supported by cpp-taskflow
if (compiler == "gcc" and compiler_version < "7.3") or \
(compiler == "clang" and compiler_version < "6") or \
- (compiler == "apple-clang" and compiler_version < "9") or \
+ (compiler == "apple-clang" and compiler_version < "10.0") or \
(compiler == "Visual Studio" and compiler_version < "15"):
raise ConanInvalidConfiguration("cpp-taskflow requires C++17 standard or higher. {} {} is not supported.".format(compiler, compiler.version))
|
Minor whitespace tweaks to shorten a few really long lines.
The code which initializes the parser is still really complex and ugly.
But now it is limited to lines no more than 120 characters ;-)
I'm not sure if this is an improvement in readability, but I tried. | @@ -784,9 +784,9 @@ class Cmd(cmd.Cmd):
do_not_parse = self.commentGrammars | self.commentInProgress | pyparsing.quotedString
after_elements = \
pyparsing.Optional(pipe + pyparsing.SkipTo(output_parser ^ string_end, ignore=do_not_parse)('pipeTo')) + \
- pyparsing.Optional(
- output_parser + pyparsing.SkipTo(string_end, ignore=do_not_parse).setParseAction(lambda x: x[0].strip())(
- 'outputTo'))
+ pyparsing.Optional(output_parser +
+ pyparsing.SkipTo(string_end,
+ ignore=do_not_parse).setParseAction(lambda x: x[0].strip())('outputTo'))
if self.case_insensitive:
self.multilineCommand.setParseAction(lambda x: x[0].lower())
oneline_command.setParseAction(lambda x: x[0].lower())
@@ -796,15 +796,19 @@ class Cmd(cmd.Cmd):
self.blankLineTerminator = (pyparsing.lineEnd + pyparsing.lineEnd)('terminator')
self.blankLineTerminator.setResultsName('terminator')
self.blankLineTerminationParser = ((self.multilineCommand ^ oneline_command) +
- pyparsing.SkipTo(self.blankLineTerminator, ignore=do_not_parse).setParseAction(
- lambda x: x[0].strip())('args') + self.blankLineTerminator)('statement')
- self.multilineParser = (((self.multilineCommand ^ oneline_command) + pyparsing.SkipTo(terminator_parser,
+ pyparsing.SkipTo(self.blankLineTerminator,
+ ignore=do_not_parse).setParseAction(
+ lambda x: x[0].strip())('args') +
+ self.blankLineTerminator)('statement')
+ self.multilineParser = (((self.multilineCommand ^ oneline_command) +
+ pyparsing.SkipTo(terminator_parser,
ignore=do_not_parse).setParseAction(
lambda x: x[0].strip())('args') + terminator_parser)('statement') +
pyparsing.SkipTo(output_parser ^ pipe ^ string_end, ignore=do_not_parse).setParseAction(
lambda x: x[0].strip())('suffix') + after_elements)
self.multilineParser.ignore(self.commentInProgress)
- self.singleLineParser = ((oneline_command + pyparsing.SkipTo(terminator_parser ^ string_end ^ pipe ^ output_parser,
+ self.singleLineParser = ((oneline_command +
+ pyparsing.SkipTo(terminator_parser ^ string_end ^ pipe ^ output_parser,
ignore=do_not_parse).setParseAction(
lambda x: x[0].strip())('args'))('statement') +
pyparsing.Optional(terminator_parser) + after_elements)
@@ -2056,8 +2060,8 @@ class Cmd2TestCase(unittest.TestCase):
line_num += 1
command = ''.join(command)
# Send the command into the application and capture the resulting output
- stop = self.cmdapp.onecmd_plus_hooks(command)
- # TODO: should act on ``stop``
+ # TODO: Should we get the return value and act if stop == True?
+ self.cmdapp.onecmd_plus_hooks(command)
result = self.outputTrap.read()
# Read the expected result from transcript
if line.startswith(self.cmdapp.prompt):
@@ -2089,7 +2093,6 @@ class Cmd2TestCase(unittest.TestCase):
self.outputTrap.tear_down()
-#noinspection PyClassHasNoInit
class CmdResult(namedtuple('CmdResult', ['out', 'err', 'war'])):
"""Derive a class to store results from a named tuple so we can tweak dunder methods for convenience.
|
Update README.md
Removed extra empty line. | @@ -73,7 +73,6 @@ https://wiki.fulmo.org/downloads/raspiblitz-2018-10-20.img.gz (or [build your ow
2. Write the SD-Card image to your SD Card - if you need details, see here:
https://www.raspberrypi.org/documentation/installation/installing-images/README.md
-
## Boot your RaspiBlitz
Connect all hardware like on photo and boot it up by connecting the power.
|
Properly log RpcError with no parent request
This should get rid of the unexpected BufferError traceback. | @@ -592,6 +592,14 @@ class MTProtoSender:
# However receiving a File() with empty bytes is "common".
# See #658, #759 and #958. They seem to happen in a container
# which contain the real response right after.
+ #
+ # But, it might also happen that we get an *error* for no parent request.
+ # If that's the case attempting to read from body which is None would fail with:
+ # "BufferError: No more data left to read (need 4, got 0: b''); last read None".
+ # This seems to be particularly common for "RpcError(error_code=-500, error_message='No workers running')".
+ if rpc_result.error:
+ self._log.info('Received error without parent request: %s', rpc_result.error)
+ else:
try:
with BinaryReader(rpc_result.body) as reader:
if not isinstance(reader.tgread_object(), upload.File):
|
add model to admin
to be able to view domain less logs on long run
to support QA as well in short term | @@ -4,7 +4,7 @@ from django.contrib.auth.models import User
from django_digest.models import PartialDigest, UserNonce
-from .models import DomainPermissionsMirror, HQApiKey
+from .models import DomainPermissionsMirror, HQApiKey, UserHistory
class DDUserNonceAdmin(admin.ModelAdmin):
@@ -45,3 +45,21 @@ class HQApiKeyAdmin(admin.ModelAdmin):
admin.site.register(HQApiKey, HQApiKeyAdmin)
+
+
+class UserHistoryAdmin(admin.ModelAdmin):
+ list_display = ['changed_at', 'domain', 'user_id', 'changed_by', 'action', 'message']
+ list_filter = ['domain', 'action']
+ sortable_by = []
+
+ def has_add_permission(self, request):
+ return False
+
+ def has_change_permission(self, request, obj=None):
+ return False
+
+ def has_delete_permission(self, request, obj=None):
+ return False
+
+
+admin.site.register(UserHistory, UserHistoryAdmin)
|
Update transformer.py
one mistake in the docstring of transformer_prepare_decoder | @@ -625,7 +625,7 @@ def transformer_prepare_decoder(targets, hparams, features=None):
Returns:
decoder_input: a Tensor, bottom of decoder stack
- decoder_self_attention_bias: a bias tensor for use in encoder self-attention
+ decoder_self_attention_bias: a bias tensor for use in decoder self-attention
"""
if hparams.prepend_mode == "prepend_inputs_full_attention":
decoder_self_attention_bias = (
|
take last element instead of popping the transaction
Other components doing analysis will otherwise have a mistaken
view of the final globalstate of a transaction | @@ -308,9 +308,9 @@ class LaserEVM:
return [new_global_state], op_code
except TransactionEndSignal as end_signal:
- transaction, return_global_state = (
- end_signal.global_state.transaction_stack.pop()
- )
+ transaction, return_global_state = end_signal.global_state.transaction_stack[
+ -1
+ ]
if return_global_state is None:
if (
|
Parameterize the pyplot "clear_figure" tests
In `st.pyplot`, the logic for clearing the "global" pyplot figure is different from the logic for clearing a specified pyplot figure. This PR makes the distinction clearer in our tests. | """st.pyplot unit tests."""
+from typing import Optional
from unittest.mock import patch
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
+from parameterized import parameterized
import streamlit as st
from streamlit.web.server.server import MEDIA_ENDPOINT
@@ -31,6 +33,11 @@ class PyplotTest(DeltaGeneratorTestCase):
if matplotlib.get_backend().lower() != "agg":
plt.switch_backend("agg")
+ def tearDown(self):
+ # Clear the global pyplot figure between tests
+ plt.clf()
+ super().tearDown()
+
def test_st_pyplot(self):
"""Test st.pyplot.
@@ -55,33 +62,30 @@ class PyplotTest(DeltaGeneratorTestCase):
self.assertEqual(el.imgs.imgs[0].caption, "")
self.assertTrue(el.imgs.imgs[0].url.startswith(MEDIA_ENDPOINT))
- def test_st_pyplot_clear_figure(self):
- """st.pyplot should clear the passed-in figure."""
- # Assert that plt.clf() is called by st.pyplot() only if
- # clear_fig is True
- for clear_figure in [True, False, None]:
+ @parameterized.expand([("true", True), ("false", False), ("none", None)])
+ def test_st_pyplot_clear_global_figure(self, _, clear_figure: Optional[bool]):
+ """st.pyplot should clear the global figure if `clear_figure` is
+ True *or* None.
+ """
plt.hist(np.random.normal(1, 1, size=100), bins=20)
with patch.object(plt, "clf", wraps=plt.clf, autospec=True) as plt_clf:
st.pyplot(clear_figure=clear_figure)
- if clear_figure is False:
- plt_clf.assert_not_called()
- else:
+ if clear_figure in (True, None):
plt_clf.assert_called_once()
+ else:
+ plt_clf.assert_not_called()
- # Manually clear for the next loop iteration
- plt.clf()
-
- # Assert that fig.clf() is called by st.pyplot(fig) only if
- # clear_figure is True
- for clear_figure in [True, False, None]:
+ @parameterized.expand([("true", True), ("false", False), ("none", None)])
+ def test_st_pyplot_clear_figure(self, _, clear_figure: Optional[bool]):
+ """st.pyplot should clear the passed-in figure if `clear_figure` is True."""
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.hist(np.random.normal(1, 1, size=100), bins=20)
with patch.object(fig, "clf", wraps=fig.clf, autospec=True) as fig_clf:
st.pyplot(fig, clear_figure=clear_figure)
- if clear_figure:
+ if clear_figure is True:
fig_clf.assert_called_once()
else:
fig_clf.assert_not_called()
|
Onefile: Fix, LTO mode was always enabled for onefile compilation.
* The Windows gcc doesn't currently support LTO though, support
for it is coming in a future release though.
* This makes sure LTO usage is identical between backend and
onefile compilation. | @@ -381,7 +381,7 @@ if "clang" in env.the_cc_name:
lto_mode = enableLtoSettings(
env=env,
lto_mode=lto_mode,
- pgo_mode=False, # TODO: Have this here too, decompression might benefit.
+ pgo_mode="no", # TODO: Have this here too, decompression might benefit.
msvc_mode=msvc_mode,
gcc_mode=gcc_mode,
clang_mode=clang_mode,
@@ -427,9 +427,6 @@ version for lto mode (>= 4.6)."""
% (env["CXX"], env["CXXVERSION"])
)
- if lto_mode and mingw_mode:
- sys.exit("""The gcc compiler on Windows, doesn't currently support lto mode.""")
-
# For gcc 4.6 there are some new interesting functions.
if gcc_version >= (4, 6):
env.Append(CCFLAGS=["-fpartial-inlining"])
|
fix intro.md references in versioned_docs
The new docusaurus version should have been generated
_after_ the update to the source intro.md
So manually correct this here. | @@ -27,7 +27,8 @@ Use the version switcher in the top bar to switch between documentation versions
| | Version | Release notes | Python Versions |
| -------|---------------------------|-------------------------------------------------------------------------------------| -------------------|
-| ►| 1.1 (Stable) | [Release notes](https://github.com/facebookresearch/hydra/releases/tag/v1.1.1) | **3.6 - 3.9** |
+| ►| 1.2 (Stable) | [Release notes](https://github.com/facebookresearch/hydra/releases/tag/v1.2.0) | **3.6 - 3.10** |
+| | 1.1 | [Release notes](https://github.com/facebookresearch/hydra/releases/tag/v1.1.1) | **3.6 - 3.9** |
| | 1.0 | [Release notes](https://github.com/facebookresearch/hydra/releases/tag/v1.0.7) | **3.6 - 3.8** |
| | 0.11 | [Release notes](https://github.com/facebookresearch/hydra/releases/tag/v0.11.3) | **2.7, 3.5 - 3.8** |
|
Correct SensitivityDemandSimulateTool
Fix to pass output_parameters as `'A' 'B' 'C'` instead of `['A', 'B',
'C'] | @@ -1368,11 +1368,9 @@ class SensitivityDemandSimulateTool(object):
# output_parameters
output_parameters = parameters[6].valueAsText.split(';')
- args = [None, 'sensitivity-demand-simulate', '--scenario-path', scenario_path, '--weather-path', weather_path,
+ run_cli(None, 'sensitivity-demand-simulate', '--scenario-path', scenario_path, '--weather-path', weather_path,
'--samples-folder', samples_folder, '--simulation-folder', simulation_folder, '--num-simulations',
- num_simulations, '--sample-index', sample_index, '--output-parameters', output_parameters]
-
- run_cli(*args)
+ num_simulations, '--sample-index', sample_index, '--output-parameters', *output_parameters)
class SensitivityDemandAnalyzeTool(object):
def __init__(self):
|
Remove cooldown on !module command
This allows for other bots to run !module disable X and then !module
disable Y quickly in succession. | @@ -278,4 +278,6 @@ class AdminCommandsModule(BaseModule):
self.commands["unsilence"] = Command.raw_command(self.cmd_unsilence, level=500, description="Unsilence the bot")
self.commands["unmute"] = self.commands["unsilence"]
- self.commands["module"] = Command.raw_command(self.cmd_module, level=500, description="Modify module")
+ self.commands["module"] = Command.raw_command(
+ self.cmd_module, level=500, description="Modify module", delay_all=0, delay_user=0
+ )
|
Fix wrk2 docker file
It looks like commit overwrote
the content of the wrk2 workload with the one from wrk. Fixing that
here. | FROM REPLACE_NULLWORKLOAD_UBUNTU
-# apache-install-pm
+# nodejs-install-pm
RUN apt-get update
-RUN apt-get install -y apache2
-# service_stop_disable apache2
-# apache-install-pm
+RUN apt-get install -y nodejs
+# service_stop_disable nodejs
+# nodejs-install-pm
-# wrk-ARCHx86_64-install-man
-RUN /bin/true; cd /home/REPLACE_USERNAME/; git clone https://github.com/wg/wrk.git; cd /home/REPLACE_USERNAME/wrk; make all
-# wrk-ARCHx86_64-install-man
+# wrk2-ARCHx86_64-install-man
+RUN /bin/true; cd /home/REPLACE_USERNAME/; git clone https://github.com/giltene/wrk2; cd /home/REPLACE_USERNAME/wrk2; make all
+# wrk2-ARCHx86_64-install-man
-# wrk-ARCHppc64le-install-man
-RUN /bin/true; cd /home/REPLACE_USERNAME/; git clone https://github.com/wg/wrk.git; cd /home/REPLACE_USERNAME/wrk; make all; /bin/true
-# wrk-ARCHppc64le-install-man
+# wrk2-ARCHppc64le-install-man
+RUN /bin/true; cd /home/REPLACE_USERNAME/; git clone https://github.com/giltene/wrk2; cd /home/REPLACE_USERNAME/wrk2; make all; /bin/true
+# wrk2-ARCHppc64le-install-man
RUN chown -R REPLACE_USERNAME:REPLACE_USERNAME /home/REPLACE_USERNAME
|
interfaceio: Start IO loop on demand
It used to start up as part of module initialization. Now it is
started when it's actually going to be used. | @@ -611,9 +611,10 @@ class TapInterface (Interface, EventMixin):
RXData,
])
- io_loop = TapIO()
+ io_loop = None
def __init__ (self, name="", tun=False):
+ self._start_io_loop()
self.tap = None
Interface.__init__(self, name)
EventMixin.__init__(self)
@@ -621,6 +622,12 @@ class TapInterface (Interface, EventMixin):
if not name: self._name = self.tap.name
self.io_loop.add(self)
+ @classmethod
+ def _start_io_loop (self):
+ if TapInterface.io_loop: return
+ TapInterface.io_loop = TapIO()
+ return TapInterface.io_loop
+
@property
def is_tap (self):
return self.tap.is_tap
|
Add 'just fix' command
This run 'cargo fix' across the project to fix rustc warnings. | @@ -62,6 +62,20 @@ clean:
$cmd
done
+fix:
+ #!/usr/bin/env sh
+ set -e
+ for crate in $(echo {{crates}})
+ do
+ for feature in $(echo {{features}})
+ do
+ cmd="cargo fix --manifest-path=$crate/Cargo.toml $feature"
+ echo "\033[1m$cmd\033[0m"
+ $cmd
+ done
+ done
+ echo "\n\033[92mFix Success\033[0m\n"
+
lint:
#!/usr/bin/env sh
set -e
|
swarming: add log when expired task slice is going to run
This is followup for
No functional change. | @@ -1279,7 +1279,20 @@ def bot_reap_task(bot_dimensions, bot_version):
t = request.task_slice(i)
limit += datetime.timedelta(seconds=t.expiration_secs)
- if limit < utils.utcnow():
+ slice_expiration = to_run.created_ts
+ slice_index = task_to_run.task_to_run_key_slice_index(to_run.key)
+ for i in range(slice_index + 1):
+ t = request.task_slice(i)
+ slice_expiration += datetime.timedelta(seconds=t.expiration_secs)
+
+ now = utils.utcnow()
+ if now <= slice_expiration and limit < now:
+ logging.info('Task slice is expired, but task is not expired; '
+ 'slice index: %d, slice expiration: %s, '
+ 'task expiration: %s',
+ slice_index, slice_expiration, limit)
+
+ if limit < now:
if expired >= 5:
# Do not try to expire too many tasks in one poll request, as this
# kills the polling performance in case of degenerate queue: this
|
Update Nameserver for INWX
I've added all Nameserver of INWX | @@ -11,7 +11,7 @@ except ImportError:
LOGGER = logging.getLogger(__name__)
-NAMESERVER_DOMAINS = ["inwx.com"]
+NAMESERVER_DOMAINS = ["ns.inwx.de", "ns2.inwx.de", "ns3.inwx.eu", "ns4.inwx.com", "ns5.inwx.net", "ns.domrobot.com", "ns.domrobot.net", "ns.domrobot.org", "ns.domrobot.info", "ns.domrobot.biz"]
def provider_parser(subparser):
|
GDB helpers: add an option to print gen. variable names in "*state" cmd
TN: | @@ -21,12 +21,30 @@ class BaseCommand(gdb.Command):
class StateCommand(BaseCommand):
- """Display the state of the currently running property."""
+ """Display the state of the currently running property.
+
+This command may be followed by a "/X" flag, where X is one or several of:
+
+ * s: to print the name of the Ada variables that hold DSL values.
+"""
def __init__(self, context):
super(StateCommand, self).__init__(context, 'state', gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
+ with_locs = False
+
+ if arg:
+ if not arg.startswith('/'):
+ print('Invalid argument')
+ return
+
+ for c in arg[1:]:
+ if c == 's':
+ with_locs = True
+ else:
+ print('Invalid flag: {}'.format(repr(c)))
+
frame = gdb.selected_frame()
state = State.decode(self.context, frame)
if state is None:
@@ -44,7 +62,11 @@ class StateCommand(BaseCommand):
# lower-case is required since GDB ignores case insentivity
# for Ada from the Python API.
value = frame.read_var(b.gen_name.lower())
- print('{} = {}'.format(b.dsl_name, value))
+ print('{}{} = {}'.format(
+ b.dsl_name,
+ ' ({})'.format(b.gen_name) if with_locs else '',
+ value
+ ))
class BreakCommand(BaseCommand):
|
Handle None values to compare_dicts utility
compare_dicts utility in ocs_ci/utility/environment_check.py needs
to handle None values passed to it. This is to avoid Errors in
ocs-ci tests. This PR addresses | @@ -44,7 +44,12 @@ def compare_dicts(before, after):
Returns:
list: List of 2 lists - ('added' and 'removed' are lists)
+ None: If both parameters are None
"""
+ if not before and not after:
+ log.debug("compare_dicts: both before and after are None")
+ return None
+
added = []
removed = []
uid_before = [
@@ -186,6 +191,8 @@ def get_status_after_execution(exclude_labels=None):
leftovers = {"Leftovers added": [], "Leftovers removed": []}
for kind, kind_diff in diffs_dict.items():
+ if not kind_diff:
+ continue
if kind_diff[0]:
leftovers["Leftovers added"].append({f"***{kind}***": kind_diff[0]})
leftover_detected = True
|
feat: update harbor-detect template
Added version detection
Change the path
Updated the body matchers | @@ -2,22 +2,37 @@ id: harbor-detect
info:
name: Harbor Detect
- author: pikpikcu
+ author: pikpikcu,daffainfo
severity: info
+ description: Harbor is an open source trusted cloud native registry project that stores, signs, and scans content.
+ reference:
+ - https://github.com/goharbor/harbor
+ metadata:
+ verified: true
+ shodan-query: http.favicon.hash:657337228
tags: tech,harbor
requests:
- method: GET
path:
- - '{{BaseURL}}'
+ - '{{BaseURL}}/api/v2.0/systeminfo'
matchers-condition: and
matchers:
- type: word
- words:
- - "<title>Harbor</title>"
part: body
+ words:
+ - '"auth_mode"'
+ - '"harbor_version"'
+ - '"harbor_version"'
+ condition: and
- type: status
status:
- 200
+
+ extractors:
+ - type: regex
+ group: 1
+ regex:
+ - '(?m)"harbor_version":"([0-9.]+)",'
|
Expand an error diagnosis to include the wider range of possibilities
As we have seen, we do not have, at this time, any way to disambiguate
between stratisd not running at all and stratisd running but no D-Bus
connection established.
It would be possible to do better, by actually checking. | @@ -98,10 +98,13 @@ def interpret_errors(errors):
error.get_dbus_name() == \
'org.freedesktop.DBus.Error.AccessDenied':
return "Most likely stratis has insufficient permissions for the action requested."
+ # We have observed two causes of this problem. The first is that
+ # stratisd is not running at all. The second is that stratisd has not
+ # yet established its D-Bus service.
if isinstance(error, dbus.exceptions.DBusException) and \
error.get_dbus_name() == \
'org.freedesktop.DBus.Error.NameHasNoOwner':
- return "Most likely the Stratis daemon, stratisd, is not running."
+ return "Most likely stratis is unable to connect to the stratisd D-Bus service."
return None
# pylint: disable=broad-except
|
Windows: Fixup for older gcc as used by Anaconda.
* Seems that one doesn't like the "\" in the filename, but likes
the "/" only, despite being on Windows. | @@ -1759,6 +1759,10 @@ if gcc_mode:
with open(tmp_linker_filename, "w") as tmpfile:
for filename in source_files:
filename = ".".join(filename.split(".")[:-1]) + ".o"
+
+ if os.name == "nt":
+ filename = filename.replace(os.path.sep, "/")
+
tmpfile.write('"%s"\n' % filename)
tmpfile.write(env.subst("$SOURCES"))
|
Update All Rescan Endpoint
Modify the Rescan Endpoints to support the movement to multiprocess.
Next steps are to provide a method for interprocess communication via
interprocess Pipes. These pipes will provide an interface for the stop
query interface. | @@ -358,9 +358,11 @@ class SpiderFootWebUi:
# Start running a new scan
newId = sf.genScanInstanceGUID(scanname)
- t = SpiderFootScanner(scanname, scantarget, targetType, newId,
+ p = mp.Process(target=SpiderFootScanner, args=(scanname, scantarget, targetType, newId
modlist, cfg, modopts)
- t.start()
+ )
+ p.start()
+
# Wait until the scan has initialized
while globalScanStatus.getStatus(newId) == None:
@@ -403,8 +405,9 @@ class SpiderFootWebUi:
# Start running a new scan
newId = sf.genScanInstanceGUID(scanname)
- t = SpiderFootScanner(scanname, scantarget.lower(), targetType, newId, modlist, cfg, modopts)
- t.start()
+ p = mp.Process(target=SpiderFootScanner, args=(scanname, scantarget.lower(),
+ targetType, newId, modlist, cfg, modopts))
+ p.start()
# Wait until the scan has initialized
while globalScanStatus.getStatus(newId) == None:
@@ -816,7 +819,8 @@ class SpiderFootWebUi:
if targetType is None:
if not cli:
return self.error("Invalid target type. Could not recognize it as " + \
- "a human name, IP address, IP subnet, ASN, domain name or host name.")
+ "a human name, IP address, IP subnet, ASN, domain " + \
+ "name or host name.")
else:
return json.dumps(["ERROR", "Unrecognised target type."])
|
TST: Additional tests of counts_per_seq
[CHANGED] Made API consistent across all cases of counts_per_seq. Existing test methods extended to check explicitly for exclude_observed=True in test_counts_per_seq.
[NEW] Created a test_entropy_excluding_unobserved to make sure entropy calculations remain the same. | @@ -2155,7 +2155,7 @@ class AlignmentBaseTests(SequenceCollectionBaseTests):
def test_counts_per_seq(self):
"""SequenceCollection.counts_per_seq handles motif length, allow_gaps etc.."""
- data = {"a": "AAAA??????", "b": "CCCGGG--NN"}
+ data = {"a": "AAAA??????", "b": "CCCGGG--NN", "c": "CCGGTTCCAA"}
coll = self.Class(data=data, moltype=DNA)
got = coll.counts_per_seq()
self.assertEqual(got["a", "A"], 4)
@@ -2170,6 +2170,10 @@ class AlignmentBaseTests(SequenceCollectionBaseTests):
self.assertEqual(len(got.motifs), 16)
self.assertEqual(got["a", "AA"], 2)
self.assertEqual(got["b", "GG"], 1)
+ got = coll.counts_per_seq(exclude_unobserved=True)
+ self.assertEqual(
+ got["c"].to_dict(),{"C":4, "G": 2, "T": 2, "A": 2}
+ )
def test_counts_per_pos(self):
"""correctly count motifs"""
@@ -2222,6 +2226,13 @@ class AlignmentBaseTests(SequenceCollectionBaseTests):
entropy = a.entropy_per_pos()
assert_allclose(entropy, [numpy.nan, numpy.nan, numpy.nan])
+ def test_entropy_excluding_unobserved(self):
+ """omitting unobserved motifs should not affect entropy calculation"""
+ a = self.Class(dict(a="ACAGGG", b="AGACCC", c="GGCCTA"), moltype=DNA)
+ entropy_excluded = a.entropy_per_seq(exclude_unobserved=True)
+ entropy_unexcluded = a.entropy_per_seq(exclude_unobserved=False)
+ self.assertEqual(entropy_excluded, entropy_unexcluded)
+
def test_distance_matrix(self):
"""Alignment distance_matrix should produce correct scores"""
data = dict([("s1", "ACGTACGTA"), ("s2", "GTGTACGTA")])
@@ -2763,6 +2774,9 @@ class ArrayAlignmentSpecificTests(TestCase):
a = self.a
f = a.counts_per_seq()
self.assertEqual(f.array, array([[3, 1], [1, 3]]))
+ f = a.counts_per_seq(exclude_unobserved=True)
+ self.assertEqual(f.array, array([[3, 1], [1, 3]]))
+
def test_entropy_per_pos(self):
"""entropy_per_pos should get entropy of each pos"""
|
Fixes broken API key generation link in readme
The previous link no longer exists/has been moved. | @@ -8,7 +8,7 @@ The Labelbox Python API offers a simple, user-friendly way to interact with the
* Use Python 3.7 or 3.8.
* Create an account by visiting http://app.labelbox.com/.
-* [Generate an API key](https://labelbox.com/docs/api/api-keys).
+* [Generate an API key](https://labelbox.com/docs/api/getting-started#create_api_key).
## Installation & authentication
|
Update integration.R
adapted function to provided notebook | @@ -45,14 +45,14 @@ runSeuratRPCA = function(data, batch, hvg=2000) {
require(Seurat)
batch_list = SplitObject(data, split.by = batch)
- batch_list <- lapply(X = batch_list, FUN = function(x) {
- ScaleData(object = x)
- RunPCA(x, features = hvg)
+ features <- SelectIntegrationFeatures(batch_list)
+ batch_list <- lapply(X = objects, FUN = function(x) {
+ x %>% ScaleData(features = features) %>% RunPCA(features = features)
})
anchors = FindIntegrationAnchors(
object.list = batch_list,
- anchor.features = hvg,
+ anchor.features = features,
scale = T,
l2.norm = T,
dims = 1:30,
|
Fix `Mask2FormerForUniversalSegmentation`
fix | @@ -2468,7 +2468,7 @@ class Mask2FormerForUniversalSegmentation(Mask2FormerPreTrainedModel):
transformer_decoder_hidden_states = outputs.transformer_decoder_hidden_states
output_auxiliary_logits = (
- self.config.use_auxiliary_loss if output_auxiliary_logits is None else output_auxiliary_logits
+ self.config.output_auxiliary_logits if output_auxiliary_logits is None else output_auxiliary_logits
)
if not output_auxiliary_logits:
auxiliary_logits = None
|
Spanner: Adding system test for "partial" key ranges
This is a follow-on to | @@ -1003,6 +1003,60 @@ class TestSessionAPI(unittest.TestCase, _TestData):
expected = all_data_rows[START+1 : END+1]
self._check_row_data(rows, expected)
+ def test_read_partial_range_until_end(self):
+ row_count = 3000
+ start = 1000
+ session, committed = self._set_up_table(row_count)
+ snapshot = session.snapshot(read_timestamp=committed, multi_use=True)
+ all_data_rows = list(self._row_data(row_count))
+
+ expected_map = {
+ ('start_closed', 'end_closed'): all_data_rows[start:],
+ ('start_closed', 'end_open'): [],
+ ('start_open', 'end_closed'): all_data_rows[start+1:],
+ ('start_open', 'end_open'): [],
+ }
+ for start_arg in ('start_closed', 'start_open'):
+ for end_arg in ('end_closed', 'end_open'):
+ range_kwargs = {start_arg: [start], end_arg: []}
+ keyset = KeySet(
+ ranges=(
+ KeyRange(**range_kwargs),
+ ),
+ )
+
+ rows = list(snapshot.read(
+ self.TABLE, self.COLUMNS, keyset))
+ expected = expected_map[(start_arg, end_arg)]
+ self._check_row_data(rows, expected)
+
+ def test_read_partial_range_from_beginning(self):
+ row_count = 3000
+ end = 2000
+ session, committed = self._set_up_table(row_count)
+ snapshot = session.snapshot(read_timestamp=committed, multi_use=True)
+ all_data_rows = list(self._row_data(row_count))
+
+ expected_map = {
+ ('start_closed', 'end_closed'): all_data_rows[:end+1],
+ ('start_closed', 'end_open'): all_data_rows[:end],
+ ('start_open', 'end_closed'): [],
+ ('start_open', 'end_open'): [],
+ }
+ for start_arg in ('start_closed', 'start_open'):
+ for end_arg in ('end_closed', 'end_open'):
+ range_kwargs = {start_arg: [], end_arg: [end]}
+ keyset = KeySet(
+ ranges=(
+ KeyRange(**range_kwargs),
+ ),
+ )
+
+ rows = list(snapshot.read(
+ self.TABLE, self.COLUMNS, keyset))
+ expected = expected_map[(start_arg, end_arg)]
+ self._check_row_data(rows, expected)
+
def test_read_with_range_keys_index_single_key(self):
row_count = 10
columns = self.COLUMNS[1], self.COLUMNS[2]
|
ceph-mgr: run mgr_modules.yml only on the first mgr host
the task will be delegated to mons[0] for all mgr hosts, so we can just run it on the first host and have the same effect. | - name: include mgr_modules.yml
include_tasks: mgr_modules.yml
- when: ceph_mgr_modules|length > 0
\ No newline at end of file
+ when:
+ - ceph_mgr_modules|length > 0
+ - inventory_hostname == groups[mgr_group_name][0]
\ No newline at end of file
|
Introducing the changes in Croatia holidays as of 2020
* Remembrance Day was added,
* Independence Day is no longer a holiday
* Statehood Day date has changed
closes | +from datetime import date
+
from ..core import WesternCalendar, ChristianMixin
from ..registry_tools import iso_register
@@ -9,9 +11,7 @@ class Croatia(WesternCalendar, ChristianMixin):
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "International Workers' Day"),
(6, 22, "Anti-Fascist Struggle Day"),
- (6, 25, "Statehood Day"),
(8, 5, "Victory & Homeland Thanksgiving & Day of Croatian defenders"),
- (10, 8, "Independence Day"),
)
include_epiphany = True
@@ -23,3 +23,22 @@ class Croatia(WesternCalendar, ChristianMixin):
include_christmas = True
include_boxing_day = True
boxing_day_label = "St. Stephen's Day"
+
+ def get_fixed_holidays(self, year):
+ days = super().get_fixed_holidays(year)
+
+ if year >= 2020:
+ days.extend([
+ # Statehood date has changed in 2020
+ (date(year, 5, 30), "Statehood Day"),
+ # Remembrance Day has been added in 2020
+ (date(year, 11, 18), "Remembrance Day"),
+ ])
+ else:
+ days.extend([
+ (date(year, 6, 25), "Statehood Day"),
+ # Independance day was a holiday until 2020
+ (date(year, 10, 8), "Independence Day"),
+ ])
+
+ return days
|
Remove redundant .json() innvocation.
The auth method already unmarshals the response body into a dict from JSON. | @@ -606,7 +606,7 @@ class Client(object):
if role:
params['role'] = role
- return self.auth('/v1/auth/{0}/login'.format(mount_point), json=params, use_token=use_token).json()
+ return self.auth('/v1/auth/{0}/login'.format(mount_point), json=params, use_token=use_token)
def create_userpass(self, username, password, policies, mount_point='userpass', **kwargs):
"""
|
Added Aqua Enterprise
Added Aqua Enterprise | @@ -2630,3 +2630,8 @@ requests:
name: "Coverity"
dsl:
- "status_code==200 && (\"-994319624\" == mmh3(base64_py(body)))"
+
+ - type: dsl
+ name: "Aqua Enterprise"
+ dsl:
+ - "status_code==200 && (\"-1261322577\" == mmh3(base64_py(body)))"
|
Select annotation-like objects with string or int
If selector is string, transformed to dict(type=selector), if selector
is int, indexes the resulting list of objects filtered down by row, col
and secondary y. | @@ -1194,6 +1194,10 @@ class BaseFigure(object):
def _selector_matches(obj, selector):
if selector is None:
return True
+ # If selector is a string then put it at the 'type' key of a dictionary
+ # to select objects where "type":selector
+ if type(selector) == type(str()):
+ selector = dict(type=selector)
# If selector is a dict, compare the fields
if (type(selector) == type(dict())) or isinstance(selector, BasePlotlyType):
# This returns True if selector is an empty dict
@@ -1450,27 +1454,46 @@ class BaseFigure(object):
yref_to_row[yref] = r + 1
yref_to_secondary_y[yref] = is_secondary_y
- for obj in self.layout[prop]:
- # Filter by row
- if col is not None and xref_to_col.get(obj.xref, None) != col:
- continue
-
- # Filter by col
- if row is not None and yref_to_row.get(obj.yref, None) != row:
- continue
+ # filter down (select) which graph objects, by applying the filters
+ # successively
+ def _filter_row(obj):
+ """ Filter objects in rows by column """
+ return (col is None) or (xref_to_col.get(obj.xref, None) == col)
- # Filter by secondary y
- if (
- secondary_y is not None
- and yref_to_secondary_y.get(obj.yref, None) != secondary_y
- ):
- continue
+ def _filter_col(obj):
+ """ Filter objects in columns by row """
+ return (row is None) or (yref_to_row.get(obj.yref, None) == row)
- # Filter by selector
- if not self._selector_matches(obj, selector):
- continue
+ def _filter_sec_y(obj):
+ """ Filter objects on secondary y axes """
+ return (secondary_y is None) or (
+ yref_to_secondary_y.get(obj.yref, None) == secondary_y
+ )
- yield obj
+ def _filter_selector_matches(obj):
+ """ Filter objects for which selector matches """
+ return self._selector_matches(obj, selector)
+
+ funcs = [_filter_row, _filter_col, _filter_sec_y]
+ # If selector is not an int, we use the _filter_selector_matches to
+ # filter out items
+ if type(selector) != type(int()):
+ # append selector as filter function
+ funcs += [_filter_selector_matches]
+
+ def _reducer(last, f):
+ # takes list of objects that has been filtered down up to now (last)
+ # and applies the next filter function (f) to filter it down further.
+ return filter(lambda o: f(o), last)
+
+ # filtered_objs is a sequence of objects filtered by the above functions
+ filtered_objs = reduce(_reducer, funcs, self.layout[prop])
+ # If selector is an integer, use it as an index into the sequence of
+ # filtered objects. Note in this case we do not call _filter_selector_matches.
+ if type(selector) == type(int()):
+ # wrap in iter because this function should always return an iterator
+ return iter([list(filtered_objs)[selector]])
+ return filtered_objs
def _add_annotation_like(
self,
|
Update bdzadq.json
amended wording | "anxiety and sleep problems. <a href='https://www.rcpsych.ac.uk/expertadvice/treatments/benzodiazepines.aspx'> The Royal College of Psychiatrists states</a> ",
"that \"around 4 in every 10 people who take them every day for more than 6 weeks will become addicted\" ",
"and therefore they should not be prescribed for longer than 4 weeks. This measure shows the mean Average Daily Quantity (ADQ) given per prescription, ",
- "for both the older benzodiazepines, such as diazepam and temazepam, and the newer \"Z-drugs\", such as zopiclone."
+ "for both the older benzodiazepines, such as diazepam and temazepam, and the newer \"Z-drugs\", such as zopiclone. It excludes items that do not ",
+ "an ADQ, such as melatonin."
],
"numerator_short": "Anxiolytics and Hypnotics ADQ",
"denominator_short": "Prescription items",
|
Update build.yml
Removing 3.5 from Windows and Mac... it's legacy since september 2020 | @@ -64,7 +64,7 @@ jobs:
runs-on: windows-latest
strategy:
matrix:
- python-version: [3.5, 3.6, 3.7, 3.8]
+ python-version: [3.6, 3.7, 3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
@@ -86,7 +86,7 @@ jobs:
runs-on: macos-latest
strategy:
matrix:
- python-version: [3.5, 3.6, 3.7, 3.8]
+ python-version: [3.6, 3.7, 3.8]
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
|
[query] Remove verbose print
Looks like this got added in some dndarray work | @@ -290,7 +290,6 @@ class RVDPartitioner(
}
def keysIfOneToOne(): Option[IndexedSeq[Row]] = {
- log.info(s"keysIfOneToOne ${kType} ${this}")
if (kType.size == 0) {
return None
}
|
Updating the appveyor script to run for the other conda versions.
the conda that is activated in appveyors VM is "clean" no need to set up a new test environment.
only testing on tree currently (to hopefully speed up tests) | @@ -4,45 +4,44 @@ build: off
environment:
matrix:
- - PYTHON: 2.7
- MINICONDA: C:\Miniconda
- PYTHON_ARCH: 32
- - PYTHON: 3.5
- MINICONDA: C:\Miniconda3
- PYTHON_ARCH: 32
- - PYTHON: 3.6
- MINICONDA: C:\Miniconda3
- PYTHON_ARCH: 32
- - PYTHON: 2.7
- MINICONDA: C:\Miniconda
- PYTHON_ARCH: 64
- - PYTHON: 3.5
- MINICONDA: C:\Miniconda3
- PYTHON_ARCH: 64
- - PYTHON: 3.6
- MINICONDA: C:\Miniconda3
- PYTHON_ARCH: 64
+ - PYTHON_VERSION: 2.7
+ CONDA: C:\Miniconda
+
+ - PYTHON_VERSION: 3.5
+ CONDA: C:\Miniconda35
+
+ - PYTHON_VERSION: 3.6
+ CONDA: C:\Miniconda36
pypi_password:
secure: S2mZCxm5LzIF4KFDss/f7g==
+image: Visual Studio 2015
+
+platform:
+- x86
+- x64
+
init:
- - "ECHO %PYTHON% %MINICONDA%"
+ - ps: if ($Env:PLATFORM -eq "x64") { $Env:CONDA = "${Env:CONDA}-x64" }
+ - ps: Write-Host $Env:PYTHON_VERSION
+ - ps: Write-Host $Env:CONDA
+ - ps: Write-Host $Env:PLATFORM
install:
- - "set PATH=%MINICONDA%;%MINICONDA%\\Scripts;%PATH%"
+ # Load the conda root environment, configure and install some packages
+ - '"%CONDA%\Scripts\activate.bat"'
- conda config --set always_yes yes --set changeps1 no
- conda update -q conda
- conda info -a
- - "conda create -q -n test-environment python=%PYTHON% numpy scipy matplotlib cython ipython pillow wheel"
- - activate test-environment
- - conda install --quiet --yes -c menpo vtk
+ - conda install numpy scipy matplotlib cython ipython pillow wheel
+ - conda install -c menpo vtk
- pip install -r requirements_dev.txt
- python setup.py install
test_script:
- cd tests
- - nosetests base tree -v -s
+ - nosetests tree -v -s
- cd ..
after_test:
|
Remove display_claimed_answer
Replaced with append_to_found_embed which is more general | @@ -286,13 +286,6 @@ class DuckGamesDirector(commands.Cog):
found_embed.description = f"{old_desc.rstrip()}\n{text}"
await game.found_msg.edit(embed=found_embed)
- async def display_claimed_answer(self, game: DuckGame, author: discord.Member, answer: tuple[int]) -> None:
- """Add a claimed answer to the game embed."""
- async with game.editing_embed:
- found_embed, = game.found_msg.embeds
- found_embed.description = f"{found_embed.description}\n{str(answer):12s} - {author.display_name}"
- await game.found_msg.edit(embed=found_embed)
-
async def end_game(self, channel: discord.TextChannel, game: DuckGame, end_message: str) -> None:
"""Edit the game embed to reflect the end of the game and mark the game as not running."""
game.running = False
|
Pass multiple regions correctly to make_examples
Currently if more than one region is set for the runner, it does not pass them
properly to make_examples and causes a failure. | @@ -340,8 +340,10 @@ def _run_make_examples(pipeline_args):
localized_region_paths = map('"${{INPUT_REGIONS_{0}}}"'.format,
range(num_localized_region_paths))
region_literals = get_region_literals(pipeline_args.regions)
- extra_args.extend(
- ['--regions', ' '.join(region_literals + localized_region_paths)])
+ extra_args.extend([
+ '--regions',
+ '\'%s\'' % ' '.join(region_literals + localized_region_paths)
+ ])
if pipeline_args.sample_name:
extra_args.extend(['--sample_name', pipeline_args.sample_name])
if pipeline_args.hts_block_size:
|
chore: Add mergify[bot] to exception list
According to this mergify's login id is `mergify[bot]` so am guessing this should work. | @@ -7,6 +7,7 @@ pull_request_rules:
- author!=gavindsouza
- author!=deepeshgarg007
- author!=ankush
+ - author!=mergify[bot]
- or:
- base=version-13
- base=version-12
|
mark private inline functions in mesh.c with static keyword
mark _det3x3(), _tri_area(), _aux_hex() | @@ -910,13 +910,13 @@ int32 mesh_get_centroids(Mesh *mesh, float64 *ccoors, int32 dim)
return(RET_OK);
}
-inline float64 _det3x3(float64 j[9])
+static inline float64 _det3x3(float64 j[9])
{
return (j[0]*j[4]*j[8] + j[3]*j[7]*j[2] + j[1]*j[5]*j[6]
- j[2]*j[4]*j[6] - j[5]*j[7]*j[0] - j[1]*j[3]*j[8]);
}
-inline float64 _tri_area(float64 *coors, uint32 *indices, uint32 nc)
+static inline float64 _tri_area(float64 *coors, uint32 *indices, uint32 nc)
{
#define VS(ic, id) (coors[nc*indices[ic] + id])
@@ -945,7 +945,7 @@ inline float64 _tri_area(float64 *coors, uint32 *indices, uint32 nc)
#undef VS
}
-inline float64 _aux_hex(float64 *coors, uint32 *indices, uint32 nc,
+static inline float64 _aux_hex(float64 *coors, uint32 *indices, uint32 nc,
int32 h, int32 i, int32 j, int32 k)
{
#define VS(ic, id) (coors[nc*indices[ic] + id])
|
removed warnings about obsolete ini section 'stream-blanker'
instead we will have a plausibility check for the whole configuration in
near future | @@ -220,8 +220,6 @@ class VocConfigParser(SafeConfigParser):
or self.getboolean('audio', 'forcevolumecontrol', fallback=False))
def getBlinderEnabled(self):
- if self.has_section('stream-blanker'):
- self.log.error("configuration section 'stream-blanker' is obsolete and will be ignored! Use 'blinder' instead!");
return self.getboolean('blinder', 'enabled', fallback=False)
def isBlinderDefault(self):
@@ -229,8 +227,6 @@ class VocConfigParser(SafeConfigParser):
def getBlinderSources(self):
if self.getBlinderEnabled():
- if self.has_section('stream-blanker'):
- self.log.error("configuration section 'stream-blanker' is obsolete and will be ignored! Use 'blinder' instead!");
if self.isBlinderDefault():
return self.getList('blinder', 'videos')
else:
@@ -239,8 +235,6 @@ class VocConfigParser(SafeConfigParser):
return []
def getBlinderVolume(self):
- if self.has_section('stream-blanker'):
- self.log.error("configuration section 'stream-blanker' is obsolete and will be ignored! Use 'blinder' instead!");
return self.getfloat('blinder', 'volume', fallback=0.0)
def getMirrorsEnabled(self):
|
Update mkvtomp4.py
improved error handling | @@ -204,7 +204,7 @@ class MkvtoMp4:
if self.needProcessing(inputfile):
options, preopts, postopts = self.generateOptions(inputfile, original=original)
if not options:
- self.log.error("Error converting, inputfile had a valid extension but returned no data. Either the file does not exist, was unreadable, or was an incorrect format.")
+ self.log.error("Error converting, inputfile %s had a valid extension but returned no data. Either the file does not exist, was unreadable, or was an incorrect format." % inputfile)
return False
try:
@@ -332,9 +332,14 @@ class MkvtoMp4:
# Generate a dict of data about a source file
def generateSourceDict(self, inputfile):
- output = self.converter.probe(inputfile).toJson()
+ output = {}
input_dir, filename, input_extension = self.parseFile(inputfile)
output['extension'] = input_extension
+ probe = self.converter.probe(inputfile)
+ if probe:
+ output.update(probe.toJson())
+ else:
+ output['error'] = "Invalid input, unable to read"
return output
# Generate a dict of options to be passed to FFMPEG based on selected settings and the source file parameters and streams
@@ -344,7 +349,7 @@ class MkvtoMp4:
info = self.converter.probe(inputfile)
if not info:
- self.log.error("FFProbe returned no value, either the file does not exist or is not a format FFPROBE can read.")
+ self.log.error("FFProbe returned no value for inputfile %s (exists: %s), either the file does not exist or is not a format FFPROBE can read." % (inputfile, os.path.exists(inputfile)))
return None, None, None
self.log.info("Input Data")
|
Alien vault not found fix
fixes | @@ -303,6 +303,9 @@ script:
dbotScoreType = 'hash';
malInfo[argName.toUpperCase()] = args[argName];
}
+ if (raw === 'Not found') {
+ return raw;
+ }
if (dq(raw, 'pulse_info.count') > 0) {
dbotScore = dq(raw, 'pulse_info.count');
malInfo.Malicious.PulseIDs = dq(raw, 'pulse_info.pulses.id');
|
Update QRL Testnet Setup.md
updating instructions | @@ -27,5 +27,28 @@ This document describes the setup procedure to install and configure a QRL node
### Start your node
-`python main.py`
+`qrl/main.py`
+
+
+### Information
+
+Your data and wallet will be stored in ${HOME}/.qrl
+
+Testing PyPI packages (experimental)
+====================================
+
+We have experimental support for pip packages. You will not get a recent version and we recommend to stay with the git repo for a few more days. We will not be updating the pip package with every PR for now. Feedback is appreciated.
+
+### Installing the qrl package:
+
+`pip install -i https://testpypi.python.org/pypi --extra-index-url https://pypi.python.org/simple/ --upgrade qrl`
+
+The command line is more complex because we are using testing repos. Once we release an stable version, things will get simpler.
+
+
+
+
+
+
+
|
Add the mgmtworker/env/source_plugins dir [ci skip]
This is where source plugins are going to be installed,
so this directory needs to exist | @@ -40,6 +40,7 @@ mkdir -p %{buildroot}/var/log/cloudify/mgmtworker
mkdir -p %{buildroot}/opt/mgmtworker/config
mkdir -p %{buildroot}/opt/mgmtworker/work
mkdir -p %{buildroot}/opt/mgmtworker/env/plugins
+mkdir -p %{buildroot}/opt/mgmtworker/env/source_plugins
cp -R ${RPM_SOURCE_DIR}/packaging/mgmtworker/files/* %{buildroot}
@@ -57,5 +58,6 @@ getent passwd cfyuser >/dev/null || useradd -r -g cfyuser -d /etc/cloudify -s /s
%attr(750,cfyuser,cfyuser) /opt/mgmtworker/config
%attr(750,cfyuser,cfyuser) /opt/mgmtworker/work
%attr(750,cfyuser,cfyuser) /opt/mgmtworker/env/plugins
+%attr(750,cfyuser,cfyuser) /opt/mgmtworker/env/source_plugins
/opt/mgmtworker
%attr(750,cfyuser,adm) /var/log/cloudify/mgmtworker
|
Fixes heat-keystone-setup-domain authentication failures with v3
With keystone v3 configured, attempting to create a heat domain
and heat user fails with authentication errors
Closes-Bug: | @@ -29,7 +29,9 @@ DEBUG = False
USERNAME = os.environ.get('OS_USERNAME')
PASSWORD = os.environ.get('OS_PASSWORD')
AUTH_URL = os.environ.get('OS_AUTH_URL', '').replace('v2.0', 'v3')
-TENANT_NAME = os.environ.get('OS_TENANT_NAME')
+PROJECT_NAME = os.environ.get('OS_PROJECT_NAME')
+USER_DOMAIN_NAME = os.environ.get('OS_USER_DOMAIN_NAME')
+PROJECT_DOMAIN_NAME = os.environ.get('OS_PROJECT_DOMAIN_NAME')
opts = [
cfg.StrOpt('stack-user-domain-name',
@@ -82,9 +84,6 @@ HEAT_DOMAIN_PASSWORD = os.environ.get('HEAT_DOMAIN_PASSWORD',
cfg.CONF.stack_domain_admin_password)
HEAT_DOMAIN_DESCRIPTION = 'Contains users and projects created by heat'
-logger.debug("USERNAME=%s" % USERNAME)
-logger.debug("AUTH_URL=%s" % AUTH_URL)
-
CACERT = os.environ.get('OS_CACERT', cfg.CONF.os_cacert)
CERT = os.environ.get('OS_CERT', cfg.CONF.os_cert)
KEY = os.environ.get('OS_KEY', cfg.CONF.os_key)
@@ -98,9 +97,12 @@ def main():
'password': PASSWORD,
'auth_url': AUTH_URL,
'endpoint': AUTH_URL,
- 'tenant_name': TENANT_NAME
+ 'project_name': PROJECT_NAME,
+ 'user_domain_name': USER_DOMAIN_NAME,
+ 'project_domain_name': PROJECT_DOMAIN_NAME
}
+
if insecure:
client_kwargs['verify'] = False
else:
|
Update jkbledelegate.py
Fix | @@ -28,7 +28,7 @@ class jkBleDelegate(btle.DefaultDelegate):
if not self._protocol.is_record_start(self.notificationData):
log.debug(f"Not valid start of record - wiping data {self.notificationData}")
self.notificationData = bytearray()
- if is_record_complete(self.notificationData):
+ if self._protocol.is_record_complete(self.notificationData):
self._jkbleio.record = self.notificationData
self.notificationData = bytearray()
# jkbledelegate.processRecord(record)
|
Felix: Log only Fatal errors to syslog
Note that felix will continue to log warnings and higher to
/var/log/calico/felix.log. | @@ -19,13 +19,13 @@ EtcdCertFile = <%= node['bcpc']['etcd'][@cert_type]['crt']['filepath'] %>
EtcdKeyFile = <%= node['bcpc']['etcd'][@cert_type]['key']['filepath'] %>
# The log severity above which logs are sent to the stdout.
-LogSeverityScreen = Warning
+LogSeverityScreen = Fatal
# The log severity above which logs are sent to the log file.
LogSeverityFile = Warning
# The log severity above which logs are sent to the syslog.
-LogSeveritySys = Warning
+LogSeveritySys = none
# Reports anonymous Calico version number and cluster size to
# projectcalico.org. Logs warnings returned by the usage server. For
|
Typo confirm -> override in wallet send
Wallet look in extra_params/args by "override" key (override = args["override"]) not "confirm".
Due to this confirmation doesn't works for now. | @@ -79,9 +79,9 @@ def get_transactions_cmd(wallet_rpc_port: int, fingerprint: int, id: int, offset
"-o", "--override", help="Submits transaction without checking for unusual values", is_flag=True, default=False
)
def send_cmd(
- wallet_rpc_port: int, fingerprint: int, id: int, amount: str, fee: str, address: str, confirm: bool
+ wallet_rpc_port: int, fingerprint: int, id: int, amount: str, fee: str, address: str, override: bool
) -> None:
- extra_params = {"id": id, "amount": amount, "fee": fee, "address": address, "confirm": confirm}
+ extra_params = {"id": id, "amount": amount, "fee": fee, "address": address, "override": override}
import asyncio
from .wallet_funcs import execute_with_wallet, send
|
Update poullight.txt
One more minor update for Reference section. | # See the file 'LICENSE' for copying permission
# Reference: https://twitter.com/MBThreatIntel/status/1240389621638402049
+# Reference: https://twitter.com/James_inthe_box/status/1240400306858573825
# Reference: https://app.any.run/tasks/9bde133d-2b57-4c69-82b2-ce92afc70617/
poullight.ru
|
Update docstring
Following | @@ -89,8 +89,8 @@ class FastGradientMethod(Attack):
:param x: An array with the original inputs
:type x: `np.ndarray`
- :param y:
- :type y:
+ :param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes)
+ :type y: `np.ndarray`
:return: An array holding the adversarial examples
:rtype: `np.ndarray`
"""
|
Fix libvirt-guests handling post virt-guest-shutdown
Tripleo_nova_libvirt_guests.service calls libvirt-guests.sh
assuminig there is monolithic libvirt container running.
That's not the case for modular libvirt. Use a proper container
name for that. | @@ -1486,8 +1486,8 @@ outputs:
[Service]
EnvironmentFile=-/etc/sysconfig/libvirt-guests
- ExecStart=/bin/{{container_cli}} exec nova_libvirt /bin/rm -f /var/lib/libvirt/libvirt-guests
- ExecStop=/bin/{{container_cli}} exec nova_libvirt /bin/sh -x /usr/libexec/libvirt-guests.sh shutdown
+ ExecStart=/bin/podman exec nova_virtproxyd /bin/rm -f /var/lib/libvirt/libvirt-guests
+ ExecStop=/bin/podman exec nova_virtproxyd /bin/sh -x /usr/libexec/libvirt-guests.sh shutdown
Type=oneshot
RemainAfterExit=yes
StandardOutput=journal+console
|
typo
minor typo | @@ -88,6 +88,6 @@ If you have [`custom_updater`](https://github.com/custom-components/custom_updat
This and [`custom_updater`](https://github.com/custom-components/custom_updater) can not operate on the same installation.
-If you used the special endpoint `/customcards/` endpoint for your Lovelace cards, you now need to reinstall that plugin using HACS and use the url provided in the page for that plugin in the HACS UI, if the plugin is not there you need to use `/local/` insted.
+If you used the special endpoint `/customcards/` endpoint for your Lovelace cards, you now need to reinstall that plugin using HACS and use the url provided in the page for that plugin in the HACS UI, if the plugin is not there you need to use `/local/` instead.
As noted under ['Existing elements'](/hacs#existing-elements) You need to click the "INSTALL" button for each element you previously have installed.
|
minor change
brolti changed to brotli | @@ -76,7 +76,7 @@ WordOps made some fundamental changes:
- We've deprecated the mail stack. As an alternative, you can take a look at [Mail-in-a-Box](https://github.com/mail-in-a-box/mailinabox), [iRedMail](https://www.iredmail.org/) or [Caesonia](https://github.com/vedetta-com/caesonia). As Roundcube alternative, there is [Rainloop](https://www.rainloop.net/) or [Afterlogic WebMail](https://github.com/afterlogic/webmail-lite-8)
- Support for w3tc is dropped as a security precaution.
- PHP 5.6 has been replaced by PHP 7.2 and PHP 7.0 has been replaced by PHP 7.3.
-- Nginx-ee package has been replaced by Nginx-wo (based on Nginx stable v1.14.2 with Brolti support)
+- Nginx-ee package has been replaced by Nginx-wo (based on Nginx stable v1.14.2 with Brotli support)
- HHVM stack has been removed
- Let's Encrypt stack isn't based on letsencrypt-auto anymore, we use acme.sh to handle SSL certificates
|
Correction to remove
Maintains the object form even with the mass removal. | @@ -1660,14 +1660,10 @@ class RootNode(list):
self.build_tree(node, obj_value)
def add_operations_group(self, ops, pathname, basename):
- # group = Node(NODE_OPERATION_GROUP, basename, self.node_operations, self)
- # group.filepath = pathname
self.build_tree(self.node_operations, ops)
def add_element_group(self, elements, pathname, basename):
"""Called to add element group of elements, as loaded with the given pathname and basename."""
- # group = Node(NODE_ELEMENT_GROUP, basename, self.node_elements, self)
- # group.filepath = pathname
self.build_tree(self.node_elements, elements)
def notify_added(self, node):
@@ -2281,12 +2277,15 @@ class RootNode(list):
removed_object = node.object
self.kernel.elements.remove(removed_object)
for i in range(len(self.kernel.operations)):
- self.kernel.operations[i] = [e for e in self.kernel.operations[i]
- if e is not removed_object]
+ elems = [e for e in self.kernel.operations[i] if e is not removed_object]
+ self.kernel.operations[i].clear()
+ self.kernel.operations[i].extend(elems)
if len(self.kernel.operations[i]) == 0:
self.kernel.operations[i] = None
- self.kernel.operations = [op for op in self.kernel.operations
- if op is not None]
+ ops = [op for op in self.kernel.operations if op is not None]
+ self.kernel.operations.clear()
+ self.kernel.operations.extend(ops)
+
elif node.type == NODE_OPERATION:
self.kernel.operations.remove(node.object)
elif node.type == NODE_FILE_FILE:
|
Add logging to webhooks
Fixes | @@ -24,6 +24,7 @@ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
+import logging
import asyncio
import json
import time
@@ -46,6 +47,8 @@ __all__ = (
'Webhook',
)
+log = logging.getLogger(__name__)
+
class WebhookAdapter:
"""Base class for all webhook adapters.
@@ -194,11 +197,14 @@ class AsyncWebhookAdapter(WebhookAdapter):
else:
data.add_field(key, value)
+ base_url = url.replace(self._request_url, '/') or '/'
+ _id = self._webhook_id
for tries in range(5):
for file in files:
file.reset(seek=tries)
async with self.session.request(verb, url, headers=headers, data=data) as r:
+ log.debug('Webhook ID %s with %s %s has returned status code %s', _id, verb, base_url, r.status)
# Coerce empty strings to return None for hygiene purposes
response = (await r.text(encoding='utf-8')) or None
if r.headers['Content-Type'] == 'application/json':
@@ -208,6 +214,7 @@ class AsyncWebhookAdapter(WebhookAdapter):
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
delta = utils._parse_ratelimit_header(r)
+ log.debug('Webhook ID %s has been pre-emptively rate limited, waiting %.2f seconds', _id, delta)
await asyncio.sleep(delta)
if 300 > r.status >= 200:
@@ -216,6 +223,7 @@ class AsyncWebhookAdapter(WebhookAdapter):
# we are being rate limited
if r.status == 429:
retry_after = response['retry_after'] / 1000.0
+ log.warning('Webhook ID %s is rate limited. Retrying in %.2f seconds', _id, retry_after)
await asyncio.sleep(retry_after)
continue
@@ -278,6 +286,8 @@ class RequestsWebhookAdapter(WebhookAdapter):
if multipart is not None:
data = {'payload_json': multipart.pop('payload_json')}
+ base_url = url.replace(self._request_url, '/') or '/'
+ _id = self._webhook_id
for tries in range(5):
for file in files:
file.reset(seek=tries)
@@ -290,6 +300,7 @@ class RequestsWebhookAdapter(WebhookAdapter):
# compatibility with aiohttp
r.status = r.status_code
+ log.debug('Webhook ID %s with %s %s has returned status code %s', _id, verb, base_url, r.status)
if r.headers['Content-Type'] == 'application/json':
response = json.loads(response)
@@ -297,6 +308,7 @@ class RequestsWebhookAdapter(WebhookAdapter):
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429 and self.sleep:
delta = utils._parse_ratelimit_header(r)
+ log.debug('Webhook ID %s has been pre-emptively rate limited, waiting %.2f seconds', _id, delta)
time.sleep(delta)
if 300 > r.status >= 200:
@@ -306,6 +318,7 @@ class RequestsWebhookAdapter(WebhookAdapter):
if r.status == 429:
if self.sleep:
retry_after = response['retry_after'] / 1000.0
+ log.warning('Webhook ID %s is rate limited. Retrying in %.2f seconds', _id, retry_after)
time.sleep(retry_after)
continue
else:
|
Cleanup in main window
Have to figure out what the alternative of Action.connect_proxy would be. | @@ -528,7 +528,6 @@ class Namespace(object):
element_factory = inject('element_factory')
ui_manager = inject('ui_manager')
action_manager = inject('action_manager')
- main_window = inject('main_window')
menu_xml = STATIC_MENU_XML % ('window', 'open-namespace')
@@ -865,7 +864,9 @@ class Toolbox(object):
action_name = button.action_name
action = action_group.get_action(action_name)
if action:
- action.connect_proxy(button)
+ pass
+ # FixMe: action.connect_proxy(button)
+ # AttributeError: 'RadioAction' object has no attribute 'connect_proxy'
def set_active_tool(self, action_name=None, shortcut=None):
"""
@@ -889,7 +890,6 @@ class Diagrams(object):
placement = ('left', 'diagrams')
component_registry = inject('component_registry')
- main_window = inject('main_window')
properties = inject('properties')
def __init__(self):
@@ -908,13 +908,6 @@ class Diagrams(object):
self._notebook.destroy()
self._notebook = None
- # Hook into diagram open event
- def _on_open_diagram(self, diagram):
- if self._notebook:
- tab = Gtk.Label(label=_('TAB'))
- tab.show()
- self._notebook.append_page(tab, Gtk.Label(label=_('About')))
-
@component.adapter(DiagramShow)
def _on_show_diagram(self, event):
"""
@@ -934,11 +927,12 @@ class Diagrams(object):
widget.set_name('diagram-tab')
widget.diagram_tab = tab
assert widget.get_name() == 'diagram-tab'
- # tab.set_drawing_style(self.properties('diagram.sloppiness', 0))
+ tab.set_drawing_style(self.properties('diagram.sloppiness', 0))
- # self.add_tab(dock_item)
page_num = self._notebook.append_page(widget, Gtk.Label(label=diagram.name))
self._notebook.set_current_page(page_num)
+ self.component_registry.handle(DiagramTabChange(widget))
+
return tab
|
Adding slashsolu to the docs.
Shorting the `run_multiline` example.
Fixing code block in `run_multiline`. | @@ -42,6 +42,12 @@ these commands (e.g. ``"/SOLU"``):
mapdl.run('/SOLU')
mapdl.solve()
+You can use the alternative:
+
+.. code:: python
+
+ mapdl.slashsolu()
+
Some commands can only be run non-interactively from within in a
script. PyMAPDL gets around this restriction by writing the commands
to a temporary input file and then reading the input file. To run a
@@ -139,6 +145,8 @@ You can run several MAPDL commands as a unified block using
This is useful when using PyMAPDL with older MAPDL scripts. For
example:
+.. code:: python
+
>>> cmd = '''/prep7
! Mat
MP,EX,1,200000
@@ -146,28 +154,13 @@ example:
MP,DENS,1,7.85e-09
! Elements
et,1,186
- et,2,154
! Geometry
BLC4,0,0,1000,100,10
! Mesh
esize,5
vmesh,all
- nsel,s,loc,x,0
- d,all,all
- nsel,s,loc,x,999,1001
- type,2
- esurf
- esel,s,type,,2
- nsle
- sfe,all,3,pres,,-10
- allsel
- /solu
- antype,0
- solve
- /post1
- set,last
- plnsol,u,sum
'''
+
>>> resp = mapdl.run_multiline(cmd)
>>> resp
@@ -187,14 +180,6 @@ example:
CURRENT NODAL DOF SET IS UX UY UZ
THREE-DIMENSIONAL MODEL
- ELEMENT TYPE 2 IS SURF154 3-D STRUCTURAL SURFACE
- KEYOPT( 1- 6)= 0 0 0 0 0 0
- KEYOPT( 7-12)= 0 0 0 0 0 0
- KEYOPT(13-18)= 0 0 0 0 0 0
-
- CURRENT NODAL DOF SET IS UX UY UZ
- THREE-DIMENSIONAL MODEL
-
CREATE A HEXAHEDRAL VOLUME WITH
X-DISTANCES FROM 0.000000000 TO 1000.000000
Y-DISTANCES FROM 0.000000000 TO 100.0000000
|
test_apk: try to load androguard from local source code first
If the Debian package is installed, the tests should try to load androguard
from the local git/source before trying to load the Debian package. | import unittest
-
+import inspect
+import os
import sys
+# ensure that androguard is loaded from this source code
+localmodule = os.path.realpath(
+ os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), '..'))
+if localmodule not in sys.path:
+ sys.path.insert(0, localmodule)
+
from androguard.core.bytecodes import apk
+print('loaded from', apk.__file__)
class APKTest(unittest.TestCase):
|
add additional dynamic domains
now-dns, other missing domains | @@ -39178,6 +39178,7 @@ dynamic_dns_domains, isDynDNS_default
*.inboxwebmail.net*, True
*.inbrand.net.br*, True
*.inbtec.com.br*, True
+*.inc.gs*, True
*.inca.com.ar*, True
*.incarsreview.info*, True
*.incasi.co.id*, True
@@ -53093,6 +53094,7 @@ dynamic_dns_domains, isDynDNS_default
*.mikroskeem.cf*, True
*.mikrotik-tech.us*, True
*.mikvemenora.org*, True
+*.mil.nf*, True
*.milad-ardabil.ir*, True
*.milady.com.ar*, True
*.milagresouth.org*, True
@@ -91936,5 +91938,39 @@ dynamic_dns_domains, isDynDNS_default
*.8866.org*, True
*.3322.org*, True
*.f3322.net*, True
+*.f3322.org*, True
*.eatuo.com*, True
+*.x3322.net*, True
*.x3322.org*, True
+*.001www.com*, True
+*.16-b.it*, True
+*.2mydns.net*, True
+*.32-b.it*, True
+*.64-b.it*, True
+*.crafting.xyz*, True
+*.ddnslive.com*, True
+*.dnsapi.info*, True
+*.dnsdyn.net*, True
+*.dnsking.ch*, True
+*.dnsup.net*, True
+*.dynip.org*, True
+*.dynserv.org*, True
+*.forumz.info*, True
+*.freeddns.uk*, True
+*.freeddns.us*, True
+*.hicam.net*, True
+*.myiphost.com*, True
+*.mypi.co*, True
+*.n4t.co*, True
+*.now-dns.net*, True
+*.now-dns.org*, True
+*.now-dns.top*, True
+*.nowddns.com*, True
+*.ntdll.top*, True
+*.ownip.net*, True
+*.soundcast.me*, True
+*.tcp4.me*, True
+*.tftpd.net*, True
+*.vpndns.net*, True
+*.wifizone.org*, True
+*.x443.pw*, True
|
MAINT: Punctuate `fromstring` docstring.
[ci skip] | @@ -979,8 +979,8 @@ def luf(lamdaexpr, *args, **kwargs):
elements is also ignored.
.. deprecated:: 1.14
- If this argument is not provided `fromstring` falls back on the
- behaviour of `frombuffer`, after encoding unicode string inputs as
+ If this argument is not provided, `fromstring` falls back on the
+ behaviour of `frombuffer` after encoding unicode string inputs as
either utf-8 (python 3), or the default encoding (python 2).
Returns
|
Ubuntu 17.x + Anaconda (python 3.5) installing
Add instructions to install it in Ubuntu 17.x where Python 3.5 is not available by using Anaconda. | @@ -68,10 +68,35 @@ OSX
Ubuntu/Debian
"""""""""""""
+Ubuntu 16.x or earlier
::
apt-get install libleveldb-dev python3.5-dev python3-pip libssl-dev
+Ubuntu 17.x or later
+
+Python 3.5 is not available in Ubuntu 17.x repositories but you can proceed to install it by using ``Anaconda`` instead of virtualenv:
+
+1) Download and install anaconda <https://www.anaconda.com/download>
+
+2) Run the following command:
+::
+ conda create -n neo-python-env python=3.5
+ conda activate neo-python-env
+
+3) Install the available packages:
+::
+
+ sudo apt-get install libleveldb-dev libssl-dev
+
+4) Install requirements:
+::
+ cd neo-python
+ pip install -r requirements.txt
+
+5) Check the installation is working:
+::
+ python prompt.py
Centos/Redhat/Fedora
""""""""""""""""""""
|
fix broken config
remove repeated lines. | @@ -55,8 +55,6 @@ ExpressionExclusions_Calcs =
#Recommended INI file for full test coverage:
-[StandardTests]
-
[ConnectionTests]
# An auto-generated section that is used to run tests to verify TDVT can connect to the Staples & cast_calcs tables.
# The Connection Tests, and any other tests with the attribute `SmokeTest = True`, are run before the other tests.
@@ -64,8 +62,6 @@ ExpressionExclusions_Calcs =
CastCalcsTestEnabled = True
StaplesTestEnabled = True
-[LODTests]
-
[UnionTest]
#Advanced:
|
Verification: delete bots' messages
Messages are deleted after a delay of 10 seconds. This helps keep the
channel clean. The periodic ping is an exception; it will remain. | @@ -38,6 +38,7 @@ PERIODIC_PING = (
f"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`."
f" If you encounter any problems during the verification process, ping the <@&{Roles.admin}> role in this channel."
)
+BOT_MESSAGE_DELETE_DELAY = 10
class Verification(Cog):
@@ -56,7 +57,11 @@ class Verification(Cog):
async def on_message(self, message: Message) -> None:
"""Check new message event for messages to the checkpoint channel & process."""
if message.author.bot:
- return # They're a bot, ignore
+ # They're a bot, delete their message after the delay.
+ # But not the periodic ping; we like that one.
+ if message.content != PERIODIC_PING:
+ await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)
+ return
if message.channel.id != Channels.verification:
return # Only listen for #checkpoint messages
|
chore: Do not print traceback in console
Instead print class of error and the site name | @@ -43,9 +43,8 @@ def enqueue_events_for_all_sites():
for site in sites:
try:
enqueue_events_for_site(site=site)
- except:
- # it should try to enqueue other sites
- print(frappe.get_traceback())
+ except Exception as e:
+ print(e.__class__, 'Failed to enqueue events for site: {}'.format(site))
def enqueue_events_for_site(site):
def log_and_raise():
|
Retry bucket creation in signing setup.
Closes | @@ -754,7 +754,7 @@ class TestStorageSignURLs(unittest.TestCase):
cls.skipTest("Signing tests requires a service account credential")
bucket_name = "gcp-signing" + unique_resource_id()
- cls.bucket = Config.CLIENT.create_bucket(bucket_name)
+ cls.bucket = retry_429(Config.CLIENT.create_bucket)(bucket_name)
cls.blob = cls.bucket.blob("README.txt")
cls.blob.upload_from_string(cls.BLOB_CONTENT)
|
Add typeguard to MonitoringHub initializer
* Add typeguard to MonitoringHub initializer
This is part of issue
* Change resource monitoring interval to float after feedback from zhuozhao
* change docstring to match new type | @@ -3,6 +3,7 @@ import socket
import pickle
import logging
import time
+import typeguard
import datetime
import zmq
@@ -12,7 +13,7 @@ from parsl.utils import RepresentationMixin
from parsl.monitoring.message_type import MessageType
-from typing import Optional
+from typing import Optional, Tuple
try:
from parsl.monitoring.db_manager import dbm_starter
@@ -119,22 +120,23 @@ class UDPRadio(object):
return
[email protected]
class MonitoringHub(RepresentationMixin):
def __init__(self,
- hub_address,
- hub_port=None,
- hub_port_range=(55050, 56000),
-
- client_address="127.0.0.1",
- client_port_range=(55000, 56000),
-
- workflow_name=None,
- workflow_version=None,
- logging_endpoint='sqlite:///monitoring.db',
- logdir=None,
- monitoring_debug=False,
- resource_monitoring_enabled=True,
- resource_monitoring_interval=30): # in seconds
+ hub_address: str,
+ hub_port: Optional[int] = None,
+ hub_port_range: Tuple[int, int] = (55050, 56000),
+
+ client_address: str = "127.0.0.1",
+ client_port_range: Tuple[int, int] = (55000, 56000),
+
+ workflow_name: Optional[str] = None,
+ workflow_version: Optional[str] = None,
+ logging_endpoint: str = 'sqlite:///monitoring.db',
+ logdir: Optional[str] = None,
+ monitoring_debug: bool = False,
+ resource_monitoring_enabled: bool = True,
+ resource_monitoring_interval: float = 30): # in seconds
"""
Parameters
----------
@@ -164,7 +166,7 @@ class MonitoringHub(RepresentationMixin):
Enable monitoring debug logging. Default: False
resource_monitoring_enabled : boolean
Set this field to True to enable logging the info of resource usage of each task. Default: True
- resource_monitoring_interval : int
+ resource_monitoring_interval : float
The time interval, in seconds, at which the monitoring records the resource usage of each task. Default: 30 seconds
"""
self.logger = None
|
fix(backups): Allow individual backups of all tables
Due to previous logic, only tables under DocType table were allowed to
take partial backups. This change allows backup to be taken for
deprecated doctypes too. | @@ -116,16 +116,16 @@ class BackupGenerator:
def setup_backup_tables(self):
"""Sets self.backup_includes, self.backup_excludes based on passed args"""
- existing_doctypes = set([x.name for x in frappe.get_all("DocType")])
+ existing_tables = frappe.db.get_tables()
def get_tables(doctypes):
tables = []
for doctype in doctypes:
- if doctype and doctype in existing_doctypes:
- if doctype.startswith("tab"):
- tables.append(doctype)
- else:
- tables.append("tab" + doctype)
+ if not doctype:
+ continue
+ table = frappe.utils.get_table_name(doctype)
+ if table in existing_tables:
+ tables.append(table)
return tables
passed_tables = {
|
Add batch_kwargs to page_renderer output
for validation and profiling results | @@ -43,6 +43,7 @@ class ValidationResultsPageRenderer(Renderer):
run_id = validation_results.meta['run_id']
batch_id = BatchKwargs(validation_results.meta['batch_kwargs']).to_id()
expectation_suite_name = validation_results.meta['expectation_suite_name']
+ batch_kwargs = validation_results.meta.get("batch_kwargs")
# Group EVRs by column
columns = {}
@@ -125,6 +126,7 @@ class ValidationResultsPageRenderer(Renderer):
return RenderedDocumentContent(**{
"renderer_type": "ValidationResultsPageRenderer",
"page_title": expectation_suite_name + " / " + run_id + " / " + batch_id,
+ "batch_kwargs": batch_kwargs,
"expectation_suite_name": expectation_suite_name,
"sections": sections,
"utm_medium": "validation-results-page",
@@ -613,6 +615,7 @@ class ProfilingResultsPageRenderer(Renderer):
def render(self, validation_results):
run_id = validation_results.meta['run_id']
expectation_suite_name = validation_results.meta['expectation_suite_name']
+ batch_kwargs = validation_results.meta.get("batch_kwargs")
# Group EVRs by column
#TODO: When we implement a ValidationResultSuite class, this method will move there.
@@ -626,6 +629,7 @@ class ProfilingResultsPageRenderer(Renderer):
"page_title": run_id + "-" + expectation_suite_name + "-ProfilingResults",
"expectation_suite_name": expectation_suite_name,
"utm_medium": "profiling-results-page",
+ "batch_kwargs": batch_kwargs,
"sections":
[
self._overview_section_renderer.render(
|
MAINT: be more tolerant of setuptools>=60
NumPy may fail to build with the default vendored distutils in
setuptools>=60. Rather than panic and die when new setuptools is found,
let's check (or set, if possible) the SETUPTOOLS_USE_DISTUTILS
environment variable that restores "proper" setuptools behavior. | import numpy.distutils.command.sdist
import setuptools
if int(setuptools.__version__.split('.')[0]) >= 60:
- raise RuntimeError(
- "Setuptools version is '{}', version < '60.0.0' is required. "
- "See pyproject.toml".format(setuptools.__version__))
+ # setuptools >= 60 switches to vendored distutils by default; this
+ # may break the numpy build, so make sure the stdlib version is used
+ try:
+ setuptools_use_distutils = os.environ['SETUPTOOLS_USE_DISTUTILS']
+ except KeyError:
+ os.environ['SETUPTOOLS_USE_DISTUTILS'] = "stdlib"
+ else:
+ if setuptools_use_distutils != "stdlib":
+ raise RuntimeError("setuptools versions >= '60.0.0' require "
+ "SETUPTOOLS_USE_DISTUTILS=stdlib in the environment")
# Initialize cmdclass from versioneer
from numpy.distutils.core import numpy_cmdclass
|
Fix deprecation warning about escaped chars
Python3 interprets string literals as Unicode strings.
This means that \S is treated as an escaped Unicode character.
To fix this the RegEx patterns should be declared as raw strings. | @@ -691,8 +691,12 @@ class SAMLMirrorFrontend(SAMLFrontend):
for binding, endp in self.endpoints[endp_category].items():
valid_providers = "|^".join(providers)
parsed_endp = urlparse(endp)
- url_map.append(("(^%s)/\S+/%s" % (valid_providers, parsed_endp.path),
- functools.partial(self.handle_authn_request, binding_in=binding)))
+ url_map.append(
+ (
+ r"(^{})/\S+/{}".format(valid_providers, parsed_endp.path),
+ functools.partial(self.handle_authn_request, binding_in=binding)
+ )
+ )
return url_map
|
Remove wrong color space conversions image in docs
also related to | @@ -21,9 +21,6 @@ Filters in ``histolab`` are designed to be applied singularly or combined in a c
* Image filters:
* **Transforming image color space**: Color images can be represented using alternative color spaces and the most common one is the RGB space, where the image is represented using distinct channels for Red, Green and Blue. RGB images can be converted to grayscale, namely shifting from 3-channel images to single channel images, e.g. for use with thresholding. ``histolab`` leverages the `Pillow's ImageOps <https://pillow.readthedocs.io/en/stable/reference/ImageOps.html>`_ module for the conversion to the grayscale color space. Besides RGB and grayscale, several color models are extensively used, such as the HSV space, a cylindrical color model where the three independent channels represent the Hue, Saturation and Value of the color. The HED space [1]_ has been designed to specifically represent the contribution of Hematoxylin, Eosin and Diaminobenzidine dyes in H&E-stained slides and therefore is widely used in pathology (for example, to analyse microscopic blood images [2]_).
- .. figure:: https://user-images.githubusercontent.com/31658006/111822707-6bf3e880-88e4-11eb-8b38-6e108d7adbb7.jpeg
- :alt: Color space conversion filters
- :align: center
|
Dont allow snap spell types on macos
Fixes | @@ -23,6 +23,7 @@ from pkg_resources import parse_version
from raven.processors import SanitizePasswordsProcessor
from termcolor import cprint
+from conjureup import consts
from conjureup.app_config import app
from conjureup.models.metadata import SpellMetadata
from conjureup.telemetry import track_event
@@ -509,19 +510,21 @@ def __available_on_darwin(key):
""" Returns True if spell is available on macOS
"""
metadata = get_spell_metadata(key)
- if is_darwin() and metadata.cloud_whitelist \
+ if metadata.cloud_whitelist \
and 'localhost' in metadata.cloud_whitelist:
return False
+ if metadata.spell_type == consts.spell_types.SNAP:
+ return False
return True
def find_spells():
- """ Find spells, excluding localhost only spells if not linux
+ """ Find spells, excluding localhost only and snap spells if not linux
"""
_spells = []
for category, cat_dict in app.spells_index.items():
for sd in cat_dict['spells']:
- if not __available_on_darwin(sd['key']):
+ if is_darwin() and not __available_on_darwin(sd['key']):
continue
_spells.append((category, sd))
return _spells
@@ -537,7 +540,7 @@ def find_spells_matching(key):
if key in app.spells_index:
_spells = []
for sd in app.spells_index[key]['spells']:
- if not __available_on_darwin(sd['key']):
+ if is_darwin() and not __available_on_darwin(sd['key']):
continue
_spells.append((key, sd))
return _spells
@@ -545,7 +548,7 @@ def find_spells_matching(key):
for category, d in app.spells_index.items():
for spell in d['spells']:
if spell['key'] == key:
- if not __available_on_darwin(spell['key']):
+ if is_darwin() and not __available_on_darwin(spell['key']):
continue
return [(category, spell)]
return []
|
fix: dialogue label equals
in DialogueLabel.__eq__, compare the label fields
rather than the hash of the label fields | @@ -130,9 +130,12 @@ class DialogueLabel:
def __eq__(self, other: Any) -> bool:
"""Check for equality between two DialogueLabel objects."""
- if isinstance(other, DialogueLabel):
- return hash(self) == hash(other)
- return False
+ return (
+ isinstance(other, DialogueLabel)
+ and self.dialogue_reference == other.dialogue_reference
+ and self.dialogue_opponent_addr == other.dialogue_opponent_addr
+ and self.dialogue_starter_addr == other.dialogue_starter_addr
+ )
def __hash__(self) -> int:
"""Turn object into hash."""
|
Fix MySQL exception handler and typo
There was the same issue about the MySQL exception handler that was
fixed in
Then a typo left in monitoring_purge (obj_attr_list -> _obj_attr_list). | @@ -47,7 +47,6 @@ from lib.auxiliary.value_generation import ValueGeneration
from lib.remote.process_management import ProcessManagement
from lib.auxiliary.data_ops import str2dic, dic2str, makeTimestamp
from lib.operations.base_operations import BaseObjectOperations
-from lib.stores.common_datastore_adapter import MetricStoreMgdConnException
from scripts.common.cb_common import report_app_metrics
qemu_supported = False
@@ -79,6 +78,17 @@ slope_int2str = {0: 'zero',
3: 'both',
4: 'unspecified'}
+class MetricStoreMgdConnException(Exception) :
+ '''
+ TBD
+ '''
+ def __init__(self, msg, status):
+ Exception.__init__(self)
+ self.msg = msg
+ self.status = status
+ def __str__(self):
+ return self.msg
+
class Gmetric:
"""
Class to send gmetric/gmond 2.X packets
@@ -1872,7 +1882,7 @@ class PassiveObjectOperations(BaseObjectOperations) :
if _expid.count('*') :
_expid = {'$regex':_expid}
- self.get_msci(obj_attr_list["cloud_name"]).flush_metric_store(_username, True, {"expid" : _expid })
+ self.get_msci(_obj_attr_list["cloud_name"]).flush_metric_store(_username, True, {"expid" : _expid })
_status = 0
|
remove print
skip deleted release notes | @@ -198,9 +198,7 @@ def createFileReleaseNotes(fileName):
names = fileName.split("\t")
changeType = names[0]
fullFileName = names[1]
- print("fullFileName: " + fullFileName)
- print("changeType: " + changeType)
- if changeType != "R100":
+ if changeType != "R100" and changeType != "D":
with open(contentLibPath + fullFileName, 'r') as f:
data = f.read()
if "/" in fullFileName:
|
Added selector argument and changed _subplot_has_no_traces
It is now called _subplot_not_empty and selector can be used to choose
what is meant by non-empty.
The prior tests pass, but new tests for the subset selection criteria
have to be introduced. | @@ -1302,7 +1302,7 @@ because subplot does not have a secondary y-axis"""
# if exclude_empty_subplots is True, check to see if subplot is
# empty and return if it is
if exclude_empty_subplots and (
- not self._subplot_contains_trace(xref, yref)
+ not self._subplot_not_empty(xref, yref, selector=exclude_empty_subplots)
):
return self
# in case the user specified they wanted an axis to refer to the
@@ -1993,8 +1993,8 @@ Invalid property path '{key_path_str}' for trace class {trace_class}
if exclude_empty_subplots:
data = list(
filter(
- lambda trace: self._subplot_contains_trace(
- trace["xaxis"], trace["yaxis"]
+ lambda trace: self._subplot_not_empty(
+ trace["xaxis"], trace["yaxis"], exclude_empty_subplots
),
data,
)
@@ -3873,19 +3873,56 @@ Invalid property path '{key_path_str}' for layout
single plot and so this returns False. """
return self._grid_ref is not None
- def _subplot_contains_trace(self, xref, yref):
- return any(
+ def _subplot_not_empty(self, xref, yref, selector="all"):
+ """
+ xref: string representing the axis. Objects in the plot will be checked
+ for this xref (for layout objects) or xaxis (for traces) to
+ determine if they lie in a certain subplot.
+ yref: string representing the axis. Objects in the plot will be checked
+ for this yref (for layout objects) or yaxis (for traces) to
+ determine if they lie in a certain subplot.
+ selector: can be "all" or an iterable containing some combination of
+ "traces", "shapes", "annotations", "images". Only the presence
+ of objects specified in selector will be checked. So if
+ ["traces","shapes"] is passed then a plot we be considered
+ non-empty if it contains traces or shapes. If
+ bool(selector) returns False, no checking is performed and
+ this function returns True. If selector is True, it is
+ converted to "all".
+ """
+ if not selector:
+ # If nothing to select was specified then a subplot is always deemed non-empty
+ return True
+ if selector == True:
+ selector = "all"
+ if selector == "all":
+ selector = ["traces", "shapes", "annotations", "images"]
+ ret = False
+ for s in selector:
+ if s == "traces":
+ obj = self.data
+ xaxiskw = "xaxis"
+ yaxiskw = "yaxis"
+ elif s in ["shapes", "annotations", "images"]:
+ obj = self.layout[s]
+ xaxiskw = "xref"
+ yaxiskw = "yref"
+ else:
+ obj = None
+ if obj:
+ ret |= any(
t == (xref, yref)
for t in [
- # if a trace exists but has no xaxis or yaxis keys, then it
- # is plotted with xaxis 'x' and yaxis 'y'
+ # if a object exists but has no xaxis or yaxis keys, then it
+ # is plotted with xaxis/xref 'x' and yaxis/yref 'y'
(
- "x" if d["xaxis"] is None else d["xaxis"],
- "y" if d["yaxis"] is None else d["yaxis"],
+ "x" if d[xaxiskw] is None else d[xaxiskw],
+ "y" if d[yaxiskw] is None else d[yaxiskw],
)
- for d in self.data
+ for d in obj
]
)
+ return ret
class BasePlotlyType(object):
|
Update building_properties.py
fix typo | @@ -769,9 +769,9 @@ def get_properties_technical_systems(locator, prop_hvac):
return result
-def verify_overlap_season(building_name, has_teating_season, has_cooling_season, heat_start, heat_end, cool_start,
+def verify_overlap_season(building_name, has_heating_season, has_cooling_season, heat_start, heat_end, cool_start,
cool_end):
- if has_cooling_season and has_teating_season:
+ if has_cooling_season and has_heating_season:
Range = namedtuple('Range', ['start', 'end'])
# for heating
|
Update CONTRIBUTING.md
Fixed pycryptodome version (Thanks Florian REY) | @@ -24,7 +24,7 @@ You'll need Python 2.7 and we seriously advise you to use virtualenv (http://pyp
The following packages are required:
* Tornado >= 2.3.0
-* pycryptodome >= 3.4.5
+* pycryptodome >= 3.4.7
* pycurl >= 7.19.0
* Pillow >= 2.3.0
* redis >= 2.4.11
|
Fix yaml configuration-envs-interpolation examples
the interpolation of the environment variables in yaml is wrong, I have changed the example from {$ ENV_VAR} to $ {ENV_VAR} | @@ -77,9 +77,9 @@ where ``examples/providers/configuration/config.yml`` is:
.. code-block:: ini
section:
- option1: {$ENV_VAR}
- option2: {$ENV_VAR}/path
- option3: {$ENV_VAR:default}
+ option1: ${ENV_VAR}
+ option2: ${ENV_VAR}/path
+ option3: ${ENV_VAR:default}
See also: :ref:`configuration-envs-interpolation`.
|
Update pyobjects test to be a list
Refs the discussion in | @@ -126,8 +126,8 @@ Pkg.removed("samba-imported", names=[Other.server, Other.client])
random_password_template = '''#!pyobjects
import random, string
-password = ''.join(random.SystemRandom().choice(
- string.ascii_letters + string.digits) for _ in range(20))
+password = ''.join([random.SystemRandom().choice(
+ string.ascii_letters + string.digits) for _ in range(20)])
'''
random_password_import_template = '''#!pyobjects
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.