message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
install_rally.sh: moved zypper pkg manager up to handle case when
yum is installed on SUSE to fulfill dependencies. | @@ -292,6 +292,14 @@ get_pkg_manager () {
else
pkg_manager="dnf install"
fi
+ elif have_command zypper; then
+ # SuSE. Needs to be before yum based RHEL/CentOS/Fedora to handle cases when
+ # yum is installed on SUSE to fulfill dependencies.
+ if [ "$ASKCONFIRMATION" -eq 0 ]; then
+ pkg_manager="zypper -n --no-gpg-checks --non-interactive install --auto-agree-with-licenses"
+ else
+ pkg_manager="zypper install"
+ fi
elif have_command yum; then
# yum based RHEL/CentOS/Fedora
if [ "$ASKCONFIRMATION" -eq 0 ]; then
@@ -299,13 +307,6 @@ get_pkg_manager () {
else
pkg_manager="yum install"
fi
- elif have_command zypper; then
- # SuSE
- if [ "$ASKCONFIRMATION" -eq 0 ]; then
- pkg_manager="zypper -n --no-gpg-checks --non-interactive install --auto-agree-with-licenses"
- else
- pkg_manager="zypper install"
- fi
else
# MacOSX maybe?
echo "Cannot determine what package manager this system has, so I cannot check if requisite software is installed. I'm proceeding anyway, but you may run into errors later."
|
kivy tx dialog: dscancel btn text was too long
see first part of | @@ -197,11 +197,12 @@ class TxDialog(Factory.Popup):
def update_action_button(self):
action_button = self.ids.action_button
+ # note: button texts need to be short; there is only horizontal space for ~13 chars
options = (
ActionButtonOption(text=_('Sign'), func=lambda btn: self.do_sign(), enabled=self.can_sign),
ActionButtonOption(text=_('Broadcast'), func=lambda btn: self.do_broadcast(), enabled=self.can_broadcast),
ActionButtonOption(text=_('Bump fee'), func=lambda btn: self.do_rbf(), enabled=self.can_rbf),
- ActionButtonOption(text=_('Cancel (double-spend)'), func=lambda btn: self.do_dscancel(), enabled=self.can_dscancel),
+ ActionButtonOption(text=_('Cancel') + '\n(double-spend)', func=lambda btn: self.do_dscancel(), enabled=self.can_dscancel),
ActionButtonOption(text=_('Remove'), func=lambda btn: self.remove_local_tx(), enabled=self.can_remove_tx),
)
num_options = sum(map(lambda o: bool(o.enabled), options))
@@ -226,7 +227,13 @@ class TxDialog(Factory.Popup):
self._action_button_fn = dropdown.open
for option in options:
if option.enabled:
- btn = Button(text=option.text, size_hint_y=None, height='48dp')
+ btn = Button(
+ text=option.text,
+ size_hint_y=None,
+ height='48dp',
+ halign='center',
+ valign='center',
+ )
btn.bind(on_release=option.func)
dropdown.add_widget(btn)
|
add dtype parameter to function.dotarg
This patch adds parameter `dtype` to `function.dotarg`, defaulting to `float`,
such that `function.dotarg` can be used for complex-valued arguments. | @@ -3186,7 +3186,7 @@ def rotmat(__arg: IntoArray) -> Array:
arg = Array.cast(__arg)
return stack([trignormal(arg), trigtangent(arg)], 0)
-def dotarg(__argname: str, *arrays: IntoArray, shape: Tuple[int, ...] = ()) -> Array:
+def dotarg(__argname: str, *arrays: IntoArray, shape: Tuple[int, ...] = (), dtype: DType = float) -> Array:
'''Return the inner product of the first axes of the given arrays with an argument with the given name.
An argument with shape ``(arrays[0].shape[0], ..., arrays[-1].shape[0]) +
@@ -3202,6 +3202,8 @@ def dotarg(__argname: str, *arrays: IntoArray, shape: Tuple[int, ...] = ()) -> A
The arrays to take inner products with.
shape : :class:`tuple` of :class:`int`, optional
The shape to be appended to the argument.
+ dtype : :class:`bool`, :class:`int`, :class:`float` or :class:`complex`
+ The dtype of the argument.
Returns
-------
@@ -3209,7 +3211,7 @@ def dotarg(__argname: str, *arrays: IntoArray, shape: Tuple[int, ...] = ()) -> A
The inner product with shape ``shape + arrays[0].shape[1:] + ... + arrays[-1].shape[1:]``.
'''
- result = Argument(__argname, tuple(array.shape[0] for array in arrays) + tuple(shape), dtype=float)
+ result = Argument(__argname, tuple(array.shape[0] for array in arrays) + tuple(shape), dtype=dtype)
for array in arrays:
result = numpy.sum(_append_axes(result.transpose((*range(1, result.ndim), 0)), array.shape[1:]) * array, result.ndim-1)
return result
|
requirements.txt: Don't use mwoauth-0.3.1
Follow-up to | @@ -37,7 +37,7 @@ unicodedata2>=7.0.0-2 ; python_version < '3'
# OAuth support
# mwoauth 0.2.4 is needed because it supports getting identity information
# about the user
-mwoauth>=0.2.4
+mwoauth>=0.2.4,!=0.3.1
# core interwiki_graph.py:
git+https://github.com/nlhepler/pydot#egg=pydot-1.0.29
|
Remove mirror trace for plot 2d
This could become confusing especially when changing units from 0-pk to
pk-pk. | @@ -486,25 +486,6 @@ class Shape(Results):
),
)
- if orientation == "major":
- # duplicate
- fig.add_trace(
- go.Scatter(
- x=Q_(zn, "m").to(length_units).m,
- y=-1 * values,
- mode="lines",
- line=dict(color=self.color),
- name=f"{orientation}",
- showlegend=False,
- customdata=np.stack(
- (values, Q_(self.major_angle, "rad").to(phase_units).m)
- ).T,
- hovertemplate=(
- f"Displacement: %{{customdata[0]:.2f}}<br>"
- + f"Angle {phase_units}: %{{customdata[1]:.2f}}"
- ),
- ),
- )
# plot center line
fig.add_trace(
go.Scatter(
@@ -3028,6 +3009,7 @@ class ForcedResponseResults(Results):
speed,
frequency_units="rad/s",
amplitude_units="m",
+ phase_units="rad",
rotor_length_units="m",
fig=None,
**kwargs,
@@ -3057,7 +3039,22 @@ class ForcedResponseResults(Results):
if fig is None:
fig = go.Figure()
- fig = shape.plot_2d(length_units=rotor_length_units, fig=fig)
+ fig = shape.plot_2d(
+ phase_units=phase_units, length_units=rotor_length_units, fig=fig
+ )
+
+ # customize hovertemplate
+ fig.update_traces(
+ selector=dict(name="major"),
+ hovertemplate=(
+ f"Amplitude ({amplitude_units}): %{{y:.2e}}<br>"
+ + f"Phase ({phase_units}): %{{customdata:.2f}}<br>"
+ + f"Nodal Position ({rotor_length_units}): %{{x:.2f}}"
+ ),
+ )
+ fig.update_yaxes(
+ title_text=f"Major Axis Amplitude ({amplitude_units})",
+ )
return fig
|
Update Changes Section
Updated link to point to CHANGELOG.rst. | @@ -105,5 +105,6 @@ Changes
CfnCluster to AWS ParallelCluster
=================================
-In Version `2.0.0`, we changed the name of CfnCluster to AWS ParallelCluster. With that name change we released several new features, which you can read about here: https://aws.amazon.com/blogs/opensource/aws-parallelcluster/
+In Version `2.0.0`, we changed the name of CfnCluster to AWS ParallelCluster. With that name change we released several new features, which you can read about in the `Change Log`_.
+.. _`Change Log`: https://github.com/aws/aws-parallelcluster/blob/develop/CHANGELOG.rst#200
|
views: Render categorised help menu.
Help menu (opened by pressing `?`) will have keys segregated under
categories according to their usage. | @@ -5,7 +5,11 @@ import threading
import urwid
import time
-from zulipterminal.config.keys import KEY_BINDINGS, is_command_key
+from zulipterminal.config.keys import (
+ KEY_BINDINGS,
+ is_command_key,
+ HELP_CATEGORIES
+)
from zulipterminal.helper import asynch, match_user
from zulipterminal.ui_tools.buttons import (
TopicButton,
@@ -723,14 +727,29 @@ class HelpView(urwid.ListBox):
max_widths = [max(width) for width in zip(*widths)]
self.width = sum(max_widths)
- self.log = urwid.SimpleFocusListWalker(
- [urwid.AttrWrap(
+ help_menu_content = [] # type: List[urwid.AttrWrap]
+ for category in HELP_CATEGORIES:
+ if len(help_menu_content) > 0: # Separate categories by newline
+ help_menu_content.append(urwid.Text(''))
+
+ help_menu_content.append(urwid.Text(('category',
+ HELP_CATEGORIES[category])))
+
+ keys_in_category = (binding for binding in KEY_BINDINGS.values()
+ if binding['key_category'] == category)
+ for help_item_number, binding in enumerate(keys_in_category):
+ help_menu_content.append(
+ urwid.AttrWrap(
urwid.Columns([
urwid.Text(binding['help_text']),
- (max_widths[1], urwid.Text(", ".join(binding['keys'])))
+ (max_widths[1],
+ urwid.Text(", ".join(binding['keys'])))
], dividechars=2),
- None if index % 2 else 'help')
- for index, binding in enumerate(KEY_BINDINGS.values())])
+ None if help_item_number % 2 else 'help'
+ )
+ )
+
+ self.log = urwid.SimpleFocusListWalker(help_menu_content)
self.height = len(self.log)
|
Add SWA in optimizers.__init__
to fix the following error when `tfa.optimizers.SWA` is used:
AttributeError: module 'tensorflow_addons.optimizers' has no attribute 'SWA' | @@ -32,6 +32,7 @@ from tensorflow_addons.optimizers.lazy_adam import LazyAdam
from tensorflow_addons.optimizers.lookahead import Lookahead
from tensorflow_addons.optimizers.moving_average import MovingAverage
from tensorflow_addons.optimizers.rectified_adam import RectifiedAdam
+from tensorflow_addons.optimizers.stochastic_weight_averaging import SWA
from tensorflow_addons.optimizers.weight_decay_optimizers import AdamW
from tensorflow_addons.optimizers.weight_decay_optimizers import SGDW
from tensorflow_addons.optimizers.weight_decay_optimizers import (
|
Ignore failed docker restart in k8s example
This is trying to restart before the service is fully configured I think
and so it is failing. Just skipping for now. | @@ -2289,7 +2289,7 @@ function dind::custom-docker-opts {
local json=$(IFS="+"; echo "${jq[*]}")
docker exec -i ${container_id} /bin/sh -c "mkdir -p /etc/docker && jq -n '${json}' > /etc/docker/daemon.json"
docker exec ${container_id} systemctl daemon-reload
- docker exec ${container_id} systemctl restart docker
+ docker exec ${container_id} systemctl restart docker || true
fi
}
|
Update 14_Modeling_Protein_Ligand_Interactions_With_Atomic_Convolutions.ipynb
removed bug from raw_dataset load | "download_url(\"https://s3-us-west-1.amazonaws.com/deepchem.io/datasets/pdbbind_core_df.csv.gz\")\n",
"data_dir = os.path.join(dc.utils.get_data_dir())\n",
"dataset_file= os.path.join(dc.utils.get_data_dir(), \"pdbbind_core_df.csv.gz\")\n",
- "raw_dataset = dc.utils.save.load_from_disk(dataset_file)"
+ "raw_dataset = dc.utils.load_from_disk(dataset_file)"
],
"execution_count": 4,
"outputs": []
|
$.Iterators: minor documentation fixes
TN: | @@ -66,13 +66,13 @@ package ${ada_lib_name}.Iterators is
function "and" (Left, Right : ${pred_ref}) return ${pred_ref};
-- Return a predicate that accepts only nodes that are accepted by both
- -- Left and Right.
+ -- ``Left`` and ``Right``.
--
--% belongs-to: ${pred_ref}
function "or" (Left, Right : ${pred_ref}) return ${pred_ref};
- -- Return a predicate that accepts only nodes that are accepted by Left
- -- or Right.
+ -- Return a predicate that accepts only nodes that are accepted by ``Left``
+ -- or ``Right``.
--
--% belongs-to: ${pred_ref}
@@ -126,9 +126,13 @@ package ${ada_lib_name}.Iterators is
function Kind_Is (Kind : ${root_node_kind_name}) return ${pred_ref};
-- Return a predicate that accepts only nodes of the given ``Kind``
+ --
+ --% belongs-to: ${pred_ref}
function Text_Is (Text : Text_Type) return ${pred_ref};
-- Return a predicate that accepts only nodes that match the given ``Text``
+ --
+ --% belongs-to: ${pred_ref}
${exts.include_extension(ctx.ext('iterators', 'pred_public_decls'))}
|
Xfail ACHNBrowserUI/5.1, platform=iOS on 5.3.
Example failure: | "compatibility": "5.1",
"branch": "master",
"configuration": "debug"
+ },
+ "xfail": {
+ "issue": "https://bugs.swift.org/browse/SR-13198",
+ "compatibility": "5.1",
+ "branch": "swift-5.3-branch",
+ "configuration": "debug"
}
}
]
|
Update README.md
Adds badge for docker build | @@ -20,6 +20,7 @@ However, powerful business logic works in the background to ensure that stock tr
# Docker
[](https://hub.docker.com/r/inventree/inventree)
+
InvenTree is [available via Docker](https://hub.docker.com/r/inventree/inventree). Read the [docker guide](https://inventree.readthedocs.io/en/latest/start/docker/) for full details.
|
Removed some "typename" bits outside template class as this caused errors on
some platforms. | @@ -68,7 +68,7 @@ namespace dials { namespace af {
Shoebox<>
> reflection_table_type_generator;
- typedef typename reflection_table_type_generator::type reflection_table_types;
+ typedef reflection_table_type_generator::type reflection_table_types;
typedef flex_table<reflection_table_types> reflection_table;
enum Flags {
@@ -125,15 +125,15 @@ namespace dials { namespace af {
class Reflection {
public:
- typedef typename reflection_table_type_generator::data_type data_type;
+ typedef reflection_table_type_generator::data_type data_type;
typedef std::map<std::string, data_type> map_type;
- typedef typename map_type::key_type key_type;
- typedef typename map_type::mapped_type mapped_type;
- typedef typename map_type::value_type map_value_type;
- typedef typename map_type::iterator iterator;
- typedef typename map_type::const_iterator const_iterator;
- typedef typename map_type::size_type size_type;
+ typedef map_type::key_type key_type;
+ typedef map_type::mapped_type mapped_type;
+ typedef map_type::value_type map_value_type;
+ typedef map_type::iterator iterator;
+ typedef map_type::const_iterator const_iterator;
+ typedef map_type::size_type size_type;
/**
* Instantiate
|
swarming: use fixed width bucket for slice expiration delay metrics
Exponential bucket does not have good granularity for this metrics. | @@ -128,10 +128,13 @@ _tasks_expiration_delay = gae_ts_mon.CumulativeDistributionMetric(
# - project_id: e.g. 'chromium-swarm'
_tasks_slice_expiration_delay = gae_ts_mon.CumulativeDistributionMetric(
'swarming/tasks/slice_expiration_delay',
- 'Delay of task slice expiration, in seconds.', [
+ 'Delay of task slice expiration, in seconds.',
+ [
gae_ts_mon.StringField('project_id'),
gae_ts_mon.IntegerField('slice_index'),
- ])
+ ],
+ bucketer=gae_ts_mon.FixedWidthBucketer(width=30),
+)
_task_bots_runnable = gae_ts_mon.CumulativeDistributionMetric(
|
client: log stacktrace
This is to confirm that subprocess42.Popen is directly called from
run_isolated. | @@ -35,6 +35,7 @@ import signal
import sys
import threading
import time
+import traceback
import subprocess
@@ -614,7 +615,8 @@ class Popen(subprocess.Popen):
except OSError:
self.kill()
self.wait()
- logging.info('crbug.com/1271827: before return')
+ logging.info('crbug.com/1271827: before return %s',
+ traceback.format_stack())
def duration(self):
"""Duration of the child process.
|
Remove commented code
bg theories cannot have a variable number of params, each polynomial must be explicitely defined rather than having a generic poly whose degree can be defined in a config widget. | @@ -271,10 +271,6 @@ class FitWidget(qt.QWidget):
self.associateConfigDialog("Polynomial",
getPolyDialog(parent=self,
default=self.fitconfig))
- # self.associateConfigDialog("Polynomial",
- # getPolyDialog(parent=self,
- # default=self.fitconfig),
- # theory_is_background=True)
def _populateFunctions(self):
"""Fill combo-boxes with fit theories and background theories
|
fix Black Magic on Fastchip
The latest commit still has issues with Fastchip, despite it fixing the color issues on //c and //c+. I just removed all the acceleration code completely. Seems to work on ][+, //c, //c+, Fastchip, and //gs without issues. | ;license:MIT
-;(c) 2019 by qkumba
+;(c) 2019 by qkumba/Tom Greene/Frank M.
!cpu 6502
!to "build/PRELAUNCH/BLACK.MAGIC",plain
!source "src/prelaunch/common.a"
- +ENABLE_ACCEL
+ ; acceleration has problems with Fastchip
+ ; let's remove it
lda #$60
sta $9C2
jsr $800 ; decompress
lda #$60
sta $1B2D
sta $D6E6
- jsr DisableAccelerator
jsr $1B00
- jsr EnableAccelerator
jsr $D000
lda #<callback
sta $8D5
lda #>callback
sta $8D6
- jsr DisableAccelerator
- sta $C05F
jmp $800
callback
lda #$4C
sta $D6EB
- jsr EnableAccelerator
- sta $C05F
jsr $D003
ldx #5
- lda reset, x
@@ -40,8 +35,6 @@ callback
bpl -
lda #$20
sta $D6EB
- jsr DisableAccelerator
- sta $C05F
jmp $D6EE
reset
|
issue 2.4 compat: replace iter_read with explicit generator
Can't use yield inside try/finally on 2.4. | @@ -505,6 +505,48 @@ def write_all(fd, s, deadline=None):
poller.close()
+class IteratingRead(object):
+ def __init__(self, fds, deadline=None):
+ self.deadline = deadline
+ self.poller = PREFERRED_POLLER()
+ for fd in fds:
+ self.poller.start_receive(fd)
+
+ self.bits = []
+ self.timeout = None
+
+ def close(self):
+ self.poller.close()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ while self.poller.readers:
+ if self.deadline is not None:
+ timeout = max(0, self.deadline - time.time())
+ if timeout == 0:
+ break
+
+ for fd in self.poller.poll(timeout):
+ s, disconnected = mitogen.core.io_op(os.read, fd, 4096)
+ if disconnected or not s:
+ IOLOG.debug('iter_read(%r) -> disconnected', fd)
+ self.poller.stop_receive(fd)
+ else:
+ IOLOG.debug('iter_read(%r) -> %r', fd, s)
+ self.bits.append(s)
+ return s
+
+ if not poller.readers:
+ raise EofError(u'EOF on stream; last 300 bytes received: %r' %
+ (b('').join(self.bits)[-300:].decode('latin1'),))
+
+ raise mitogen.core.TimeoutError('read timed out')
+
+ __next__ = next
+
+
def iter_read(fds, deadline=None):
"""Return a generator that arranges for up to 4096-byte chunks to be read
at a time from the file descriptor `fd` until the generator is destroyed.
@@ -522,36 +564,7 @@ def iter_read(fds, deadline=None):
:raises mitogen.core.StreamError:
Attempt to read past end of file.
"""
- poller = PREFERRED_POLLER()
- for fd in fds:
- poller.start_receive(fd)
-
- bits = []
- timeout = None
- try:
- while poller.readers:
- if deadline is not None:
- timeout = max(0, deadline - time.time())
- if timeout == 0:
- break
-
- for fd in poller.poll(timeout):
- s, disconnected = mitogen.core.io_op(os.read, fd, 4096)
- if disconnected or not s:
- IOLOG.debug('iter_read(%r) -> disconnected', fd)
- poller.stop_receive(fd)
- else:
- IOLOG.debug('iter_read(%r) -> %r', fd, s)
- bits.append(s)
- yield s
- finally:
- poller.close()
-
- if not poller.readers:
- raise EofError(u'EOF on stream; last 300 bytes received: %r' %
- (b('').join(bits)[-300:].decode('latin1'),))
-
- raise mitogen.core.TimeoutError('read timed out')
+ return IteratingRead(fds=fds, deadline=deadline)
def discard_until(fd, s, deadline):
|
Register API as last step.
Rename internal attributes. | @@ -143,19 +143,20 @@ class Faucet(app_manager.RyuApp):
valve.update_config_metrics(self.metrics)
# Start BGP
- self.dp_bgp_speakers = {}
+ self._dp_bgp_speakers = {}
self._reset_bgp()
+ # Start all threads
+ self._threads = [
+ hub.spawn(thread) for thread in [
+ self._gateway_resolve_request, self._host_expire_request,
+ self._metric_update_request, self._advertise_request]]
+
# Register to API
api = kwargs['faucet_api']
api._register(self)
self.send_event_to_observers(EventFaucetAPIRegistered())
- # Start all threads
- self.threads = [
- hub.spawn(thread) for thread in [
- self._gateway_resolve_request, self._host_expire_request,
- self._metric_update_request, self._advertise_request]]
def _thread_reschedule(self, ryu_event, period):
"""Trigger Ryu events periodically with a jitter.
@@ -226,7 +227,7 @@ class Faucet(app_manager.RyuApp):
def _bgp_update_metrics(self):
"""Update BGP metrics."""
- for dp_id, bgp_speakers in list(self.dp_bgp_speakers.items()):
+ for dp_id, bgp_speakers in list(self._dp_bgp_speakers.items()):
for vlan, bgp_speaker in list(bgp_speakers.items()):
neighbor_states = list(json.loads(bgp_speaker.neighbor_state_get()).items())
for neighbor, neighbor_state in neighbor_states:
@@ -312,9 +313,9 @@ class Faucet(app_manager.RyuApp):
# TODO: port status changes should cause us to withdraw a route.
# TODO: configurable behavior - withdraw routes if peer goes down.
for dp_id, valve in list(self.valves.items()):
- if dp_id not in self.dp_bgp_speakers:
- self.dp_bgp_speakers[dp_id] = {}
- bgp_speakers = self.dp_bgp_speakers[dp_id]
+ if dp_id not in self._dp_bgp_speakers:
+ self._dp_bgp_speakers[dp_id] = {}
+ bgp_speakers = self._dp_bgp_speakers[dp_id]
for bgp_speaker in list(bgp_speakers.values()):
bgp_speaker.shutdown()
for vlan in list(valve.dp.vlans.values()):
|
Update channel-settings.rst
Fixing channel header sentence. | @@ -41,7 +41,7 @@ has `restricted the permissions <https://docs.mattermost.com/administration/conf
Adding links to the Channel Header
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Add links to the channel header using markdown.
+Use markdown to add links to the channel header.
Example: [This is an example link](www.example.com)
|
refactor: test_boxes: Use stream name instead of id in muted test cases.
Similar to pinned stream names in stream_categories, implement muted stream
names in test_generic_autocomplete_streams. | @@ -829,7 +829,7 @@ class TestWriteBox:
3: "#**Web public stream**",
4: "#**Secret stream**",
},
- {"muted": {99}},
+ {"muted": ["Secret stream"]},
),
# With 'Stream 1' and 'Secret stream' muted.
(
@@ -841,7 +841,7 @@ class TestWriteBox:
3: "#**Stream 1**",
4: "#**Secret stream**",
},
- {"muted": {99, 1}},
+ {"muted": ["Secret stream", "Stream 1"]},
),
# With 'Stream 1' and 'Secret stream' pinned, 'Secret stream' muted.
(
@@ -853,7 +853,7 @@ class TestWriteBox:
3: "#**Some general stream**",
4: "#**Web public stream**",
},
- {"pinned": ["Secret stream", "Stream 1"], "muted": {99}},
+ {"pinned": ["Secret stream", "Stream 1"], "muted": ["Secret stream"]},
),
# With 'Stream 1' and 'Secret stream' pinned, 'Some general stream' and 'Stream 2' muted.
(
@@ -865,7 +865,10 @@ class TestWriteBox:
3: "#**Stream 2**",
4: "#**Some general stream**",
},
- {"pinned": ["Secret stream", "Stream 1"], "muted": {1000, 2}},
+ {
+ "pinned": ["Secret stream", "Stream 1"],
+ "muted": ["Some general stream", "Stream 2"],
+ },
),
# With 'Stream 1' and 'Secret stream' pinned, 'Secret stream' and 'Stream 2' muted.
(
@@ -877,7 +880,10 @@ class TestWriteBox:
3: "#**Web public stream**",
4: "#**Stream 2**",
},
- {"pinned": ["Secret stream", "Stream 1"], "muted": {99, 2}},
+ {
+ "pinned": ["Secret stream", "Stream 1"],
+ "muted": ["Secret stream", "Stream 2"],
+ },
),
# With 'Stream 1' as current stream.
(
@@ -903,7 +909,7 @@ class TestWriteBox:
},
{
"pinned": ["Secret stream", "Stream 1"],
- "muted": {99, 2},
+ "muted": ["Secret stream", "Stream 2"],
"current_stream": 2,
},
),
@@ -927,7 +933,11 @@ class TestWriteBox:
write_box.view.pinned_streams = streams_to_pin
write_box.stream_id = stream_categories.get("current_stream", None)
write_box.model.stream_dict = stream_dict
- write_box.model.muted_streams = stream_categories.get("muted", set())
+ write_box.model.muted_streams = set(
+ stream["stream_id"]
+ for stream in stream_dict.values()
+ if stream["name"] in stream_categories.get("muted", set())
+ )
for state, required_typeahead in state_and_required_typeahead.items():
typeahead_string = write_box.generic_autocomplete(text, state)
assert typeahead_string == required_typeahead
|
Fix type in ImageNet/Resnet Example
block_cls type Sequence[int] -> ModuleDef | @@ -82,7 +82,7 @@ class BottleneckResNetBlock(nn.Module):
class ResNet(nn.Module):
"""ResNetV1."""
stage_sizes: Sequence[int]
- block_cls: Sequence[int]
+ block_cls: ModuleDef
num_classes: int
num_filters: int = 64
dtype: Any = jnp.float32
|
followed flake8 linting
Added covid19_telegram_bot
followed flake8 linting
Fix | @@ -4,7 +4,6 @@ Github: @iam-shanmukha
"""
import requests
from covid import Covid
-from os import *
previous_data = []
# #######################Telegram###########################
@@ -14,14 +13,9 @@ def send_msg(text):
token = "YOUR-TOKEN-HERE"
chat_id = "GROUP-CHAT-ID-HERE"
url_req = (
- "https://api.telegram.org/bot" +
- token +
- "/sendMessage" +
- "?chat_id=" +
- chat_id +
- "&text=" +
- text
- )
+ 'https://api.telegram.org/'
+ 'bot' + token + '/sendMessage' +
+ '?chat_id=' + chat_id + "&text=" + text)
results = requests.get(url_req)
print(results.json())
@@ -34,9 +28,10 @@ stat = (
"{} : \t{}".format(k, v)
for k, v in India_cases.items()
if not k.startswith(("population", "total"))
- ) +
- "\n#IndiaFightsCorona"
)
+ + "\n#IndiaFightsCorona"
+)
+
previous_data.append(stat)
while 1:
covid = Covid(source="worldometers")
@@ -46,8 +41,8 @@ while 1:
"{} : \t{}".format(k, v)
for k, v in India_cases.items()
if not k.startswith(("population", "total"))
- ) +
- "\n#IndiaFightsCorona"
+ )
+ + "\n#IndiaFightsCorona"
)
if stat in previous_data:
print("Duplicate")
|
added support for dict action space for dqn agent
DQN loss calculation error when using Dict Action space | @@ -507,6 +507,7 @@ class DqnAgent(tf_agent.TFAgent):
def _compute_q_values(self, time_steps, actions, training=False):
network_observation = time_steps.observation
+ actions = tf.nest.flatten(actions)[0]
if self._observation_and_action_constraint_splitter is not None:
network_observation, _ = self._observation_and_action_constraint_splitter(
@@ -516,7 +517,7 @@ class DqnAgent(tf_agent.TFAgent):
training=training)
# Handle action_spec.shape=(), and shape=(1,) by using the multi_dim_actions
# param. Note: assumes len(tf.nest.flatten(action_spec)) == 1.
- multi_dim_actions = self._action_spec.shape.rank > 0
+ multi_dim_actions = tf.nest.flatten(self._action_spec)[0].shape.rank > 0
return common.index_with_actions(
q_values,
tf.cast(actions, dtype=tf.int32),
@@ -546,6 +547,7 @@ class DqnAgent(tf_agent.TFAgent):
# action constraints are respected and helps centralize the greedy logic.
greedy_actions = self._target_greedy_policy.action(
next_time_steps, dummy_state).action
+ greedy_actions = tf.nest.flatten(greedy_actions)[0]
# Handle action_spec.shape=(), and shape=(1,) by using the multi_dim_actions
# param. Note: assumes len(tf.nest.flatten(action_spec)) == 1.
|
update tikv grafana allocator_stats name
changed allocator stats name from "jemalloc_stats" to "allocator_stats". Update grafana config accordingly.
Tested by deploy a cluster and check grafana:
<img width="1005" alt="Screen Shot 2019-05-03 at 12 34 08 PM" src="https://user-images.githubusercontent.com/2606959/57161191-c68f0480-6d9f-11e9-87a2-3fd476142050.png"> | "steppedLine": false,
"targets": [
{
- "expr": "tikv_jemalloc_stats{instance=~\"$instance\"}",
+ "expr": "tikv_allocator_stats{instance=~\"$instance\"}",
"format": "time_series",
"hide": false,
"intervalFactor": 2,
"thresholds": [],
"timeFrom": null,
"timeShift": null,
- "title": "Jemalloc Stats",
+ "title": "Allocator Stats",
"tooltip": {
"shared": true,
"sort": 0,
|
Change the method of waiting in the playwright tests
We put effort into `Wait until loading is complete` so we might as
well use it! | @@ -12,7 +12,7 @@ Go to user profile
Click button:has-text("View profile")
Click .profile-card-name .profile-link-label
- Wait until network is idle
+ Wait until loading is complete
Take screenshot
Go to contacts home
@@ -20,5 +20,5 @@ Go to contacts home
Type text input[placeholder='Search apps and items...'] Contacts
Click one-app-launcher-menu-item:has-text("Contacts")
- Wait until network is idle
+ Wait until loading is complete
Take screenshot
|
Fix WeChatMessage.send_markdown
1. replace discarded method with `send`, [ref commit](https://github.com/jxtech/wechatpy/commit/c876a71c5f21f3a5449abed72a5656b5e5c1b13c#diff-8d5d79095778bde1f0cd139596ba9d90)
2. unify params used as other methods | @@ -266,10 +266,10 @@ class WeChatMessage(BaseWeChatAPI):
"msgtype": "markdown",
"markdown": {"content": content}
}
- return self._send_message(
- agent_id=agent_id,
- user_ids=user_ids,
- party_ids=party_ids,
- tag_ids=tag_ids,
+ return self.send(
+ agent_id,
+ user_ids,
+ party_ids,
+ tag_ids,
msg=msg
)
|
Fix bugs on sample code
* add missing module
* Fix NameError
* typo
* Update heterogeneous.rst
missing the comma => SyntaxError
* Update heterogeneous.rst
Fix 'x' is not defined in class HGT
* Update heterogeneous.rst
refactor code block
* refactor code
* update
* update | @@ -370,9 +370,12 @@ These operators can be directly used to build heterogeneous GNN models as can be
self.lin = Linear(hidden_channels, out_channels)
def forward(self, x_dict, edge_index_dict):
- x_dict = {key: lin(x).relu() for key, lin in self.lin_dict.items()}
+ for node_type, x in x_dict.items():
+ x_dict[node_type] = self.lin_dict[node_type](x).relu_()
+
for conv in self.convs:
x_dict = conv(x_dict, edge_index_dict)
+
return self.lin(x_dict['author'])
model = HGT(hidden_channels=64, out_channels=dataset.num_classes,
@@ -410,7 +413,7 @@ Performing neighbor sampling using :class:`~torch_geometric.loader.NeighborLoade
train_loader = NeighborLoader(
data,
# Sample 15 neighbors for each node and each edge type for 2 iterations:
- num_neighbors=[15] * 2
+ num_neighbors=[15] * 2,
# Use a batch size of 128 for sampling training nodes of type "paper":
batch_size=128,
input_nodes=('paper', data['paper'].train_mask),
|
GafferDelightTest : Use 3delight's maya lambert as diffuse shader
Previously, it was using a shader named "matte" - I'm not sure where this was supposed to come from?
The 3delight live_edit example uses "matte", but it appears to only ship with this in source form, not compiled. | @@ -77,8 +77,8 @@ class InteractiveDelightRenderTest( GafferSceneTest.InteractiveRenderTest ) :
def _createMatteShader( self ) :
shader = GafferOSL.OSLShader()
- shader.loadShader( "matte" )
- return shader, shader["parameters"]["Cs"]
+ shader.loadShader( "maya/osl/lambert" )
+ return shader, shader["parameters"]["i_color"]
def _createPointLight( self ) :
|
build-langkit_support.py: forward build mode to ManageScript.run
TN: | @@ -98,6 +98,7 @@ def main():
argv.append(args.cmd)
if args.cmd == 'build-langkit-support':
+ argv.append('--build-mode={}'.format(args.build_mode))
if args.gargs:
argv.append('--gargs={}'.format(args.gargs))
if args.cmd == 'install-langkit-support':
|
update: ensure ceph command returns 0
these commands could return something else than 0.
Let's ensure all retries have been done before actually failing. | - name: non container | waiting for the monitor to join the quorum...
command: ceph --cluster "{{ cluster }}" -s --format json
register: ceph_health_raw
- until: >
- hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
- hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"]
+ until:
+ - ceph_health_raw.rc == 0
+ - (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
+ hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
delegate_to: "{{ mon_host }}"
{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json
register: ceph_health_raw
until: >
- hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
- hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"]
+ - ceph_health_raw.rc == 0
+ - (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
+ hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
delegate_to: "{{ mon_host }}"
|
Fix PulseVolume theme_path bug
Widget tried to draw the icons before loading the images from the
provided theme_path. | @@ -246,8 +246,8 @@ class PulseVolume(Volume):
return -1
def timer_setup(self):
+ if self.theme_path:
+ self.setup_images()
self.poll()
if self.update_interval is not None:
self.timeout_add(self.update_interval, self.timer_setup)
- if self.theme_path:
- self.setup_images()
|
Documentation updates for boto_secgroup
Documentation updates for and | @@ -40,13 +40,20 @@ passed in as a dict, or as a string to pull from pillars or minion config:
boto_secgroup.present:
- name: mysecgroup
- description: My security group
+ - vpc_name: myvpc
- rules:
- ip_protocol: tcp
from_port: 80
to_port: 80
cidr_ip:
- - 10.0.0.0/0
- - 192.168.0.0/0
+ - 10.0.0.0/8
+ - 192.168.0.0/16
+ - ip_protocol: tcp
+ from_port: 8080
+ to_port: 8090
+ cidr_ip:
+ - 10.0.0.0/8
+ - 192.168.0.0/16
- ip_protocol: icmp
from_port: -1
to_port: -1
@@ -56,8 +63,8 @@ passed in as a dict, or as a string to pull from pillars or minion config:
from_port: -1
to_port: -1
cidr_ip:
- - 10.0.0.0/0
- - 192.168.0.0/0
+ - 10.0.0.0/8
+ - 192.168.0.0/16
- tags:
SomeTag: 'My Tag Value'
SomeOtherTag: 'Other Tag Value'
|
manpage - fix grammar problem [skip appveyor]
A stray word wasn't immediately spotted as it appeared on the next line. | @@ -1227,7 +1227,7 @@ Python 3.9 and onwards clients always default to MD5, even in FIPS mode.
The default database is <filename>.sconsign.dblite</filename>.
In the presence of this option,
<replaceable>ALGORITHM</replaceable> is
-be included in the name to indicate the difference,
+included in the name to indicate the difference,
even if the argument is <parameter>md5</parameter>.
For example, <option>--hash-format=sha256</option> uses a SConsign
database named <filename>.sconsign_sha256.dblite</filename>.
|
show label for table fields
All fields, except table ones, display their labels which is a natural thing to do. | {%- if data -%}
{%- set visible_columns = get_visible_columns(doc.get(df.fieldname),
table_meta, df) -%}
+ <label>{{ _(df.label) }}</label>
<div {{ fieldmeta(df) }}>
<table class="table table-bordered table-condensed">
<thead>
|
docs: youtube channel in "about" section
* YouTube channel addded in about section.
I have added ERPNext YouTube channel reference in the about section.
#Create an official Handle for YouTube Channel (Like or erpnextofficial)
* chore: handle
[skip ci] | @@ -19,6 +19,8 @@ frappe.ui.misc.about = function () {
Facebook: <a href='https://facebook.com/erpnext' target='_blank'>https://facebook.com/erpnext</a></p>
<p><i class='fa fa-twitter fa-fw'></i>
Twitter: <a href='https://twitter.com/erpnext' target='_blank'>https://twitter.com/erpnext</a></p>
+ <p><i class='fa fa-youtube fa-fw'></i>
+ YouTube: <a href='https://www.youtube.com/@erpnextofficial' target='_blank'>https://www.youtube.com/@erpnextofficial</a></p>
<hr>
<h4>${__("Installed Apps")}</h4>
<div id='about-app-versions'>${__("Loading versions...")}</div>
|
Source: Rename cog + move checks status from footer to field
Renamed cog from `Source` to `BotSource` for itself (bot will be
unable to get cog, because this always return command).
Moved checks status from footer to field and changed it's content. | @@ -8,9 +8,6 @@ from discord.ext.commands import BadArgument, Cog, Command, Context, Converter,
from bot.bot import Bot
from bot.constants import URLs
-CANT_RUN_MESSAGE = "You can't run this command here."
-CAN_RUN_MESSAGE = "You are able to run this command."
-
class SourceConverter(Converter):
"""Convert argument to help command, command or Cog."""
@@ -39,7 +36,7 @@ class SourceConverter(Converter):
raise BadArgument(f"Unable to convert `{argument}` to valid command or Cog.")
-class Source(Cog):
+class BotSource(Cog):
"""Cog of Python Discord projects source information."""
def __init__(self, bot: Bot):
@@ -92,11 +89,11 @@ class Source(Cog):
embed.add_field(name="Source Code", value=f"[Go to GitHub]({link})")
if isinstance(source_object, Command):
- embed.set_footer(text=CAN_RUN_MESSAGE if await source_object.can_run(ctx) else CANT_RUN_MESSAGE)
+ embed.add_field(name="Can be used by you here?", value=await source_object.can_run(ctx))
return embed
def setup(bot: Bot) -> None:
"""Load `Source` cog."""
- bot.add_cog(Source(bot))
+ bot.add_cog(BotSource(bot))
|
Update the bandit.yaml available tests list
According to the bandit current version document,
the B109 and B111 plugin has been removed.
And Add the following tests:
Complete Test Plugin Listing: B507, B610, B611, B703
Blacklist Plugins Listing: B322, B323, B325, B413, B414
Reference URL:
Closes-Bug: | # B106 : hardcoded_password_funcarg
# B107 : hardcoded_password_default
# B108 : hardcoded_tmp_directory
-# B109 : password_config_option_not_marked_secret
# B110 : try_except_pass
-# B111 : execute_with_run_as_root_equals_true
# B112 : try_except_continue
# B201 : flask_debug_true
# B301 : pickle
# B319 : xml_bad_pulldom
# B320 : xml_bad_etree
# B321 : ftplib
+# B322 : input
+# B323 : unverified_context
+# B325 : tempnam
# B401 : import_telnetlib
# B402 : import_ftplib
# B403 : import_pickle
# B410 : import_lxml
# B411 : import_xmlrpclib
# B412 : import_httpoxy
+# B413 : import_pycrypto
+# B414 : import_pycryptodome
# B501 : request_with_no_cert_validation
# B502 : ssl_with_bad_version
# B503 : ssl_with_bad_defaults
# B504 : ssl_with_no_version
# B505 : weak_cryptographic_key
# B506 : yaml_load
+# B507 : ssh_no_host_key_verification
# B601 : paramiko_calls
# B602 : subprocess_popen_with_shell_equals_true
# B603 : subprocess_without_shell_equals_true
# B607 : start_process_with_partial_path
# B608 : hardcoded_sql_expressions
# B609 : linux_commands_wildcard_injection
+# B610 : django_extra_used
+# B611 : django_rawsql_used
# B701 : jinja2_autoescape_false
# B702 : use_of_mako_templates
+# B703 : django_mark_safe
# (optional) list included test IDs here, eg '[B101, B406]':
tests: [B102, B103, B302, B306, B308, B309, B310, B401, B501, B502, B506, B601, B602, B609]
|
Update docstring for check_user method
Remove claimed col
disabled users are not registered | @@ -41,12 +41,12 @@ def check_user(user):
"""
Verify users' status.
- registered confirmed disabled merged usable password claimed
- ACTIVE: x x o o x x
- NOT_CONFIRMED: o o o o x o
- NOT_CLAIMED: o o o o o o
- DISABLED: o x x o x x
- USER_MERGED: x x o x o x
+ registered confirmed disabled merged usable password
+ ACTIVE: x x o o x
+ NOT_CONFIRMED: o o o o x
+ NOT_CLAIMED: o o o o o
+ DISABLED: x x x o x
+ USER_MERGED: x x o x o
:param user: the user
:raises UnconfirmedAccountError
|
PS - all published instances could be grouped with a task name
Subset in a group will show up together in Loader | +import os
import pyblish.api
from avalon import photoshop
@@ -19,6 +20,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
families_mapping = {
"image": []
}
+ # True will add all instances to same group in Loader
+ group_by_task_name = False
def process(self, context):
stub = photoshop.stub()
@@ -49,6 +52,12 @@ class CollectInstances(pyblish.api.ContextPlugin):
layer_data["family"]
]
instance.data["publish"] = layer.visible
+
+ if self.group_by_task_name:
+ task = os.getenv("AVALON_TASK", None)
+ sanitized_task_name = task[0].upper() + task[1:]
+ instance.data["subsetGroup"] = sanitized_task_name
+
instance_names.append(layer_data["subset"])
# Produce diagnostic message for any graphical
|
Add embed message mentioning help channel claimant
Update docstring | @@ -105,10 +105,18 @@ class HelpChannels(commands.Cog):
"""
Claim the channel in which the question `message` was sent.
- Move the channel to the In Use category and pin the `message`. Add a cooldown to the
- claimant to prevent them from asking another question. Lastly, make a new channel available.
+ Send an embed stating the claimant, move the channel to the In Use category, and pin the `message`.
+ Add a cooldown to the claimant to prevent them from asking another question.
+ Lastly, make a new channel available.
"""
log.info(f"Channel #{message.channel} was claimed by `{message.author.id}`.")
+
+ embed = discord.Embed(
+ description=f"Channel claimed by {message.author.mention}.",
+ color=constants.Colours.bright_green,
+ )
+ await message.channel.send(embed=embed)
+
await self.move_to_in_use(message.channel)
# Handle odd edge case of `message.author` not being a `discord.Member` (see bot#1839)
|
Change imported files to read only
as per Arjun's suggestion. Also minor refactoring. | @@ -164,15 +164,19 @@ class FileJobStore(AbstractJobStore):
# Functions that deal with temporary files associated with jobs
##########################################
- def _copyOrLink(self, url, path):
+ def _copyOrLink(self, srcURL, destPath):
# linking is not done be default because of issue #1755
+ srcPath = self._extractPathFromUrl(srcURL)
if self.linkImports:
try:
- os.link(os.path.realpath(self._extractPathFromUrl(url)), path)
+ os.link(os.path.realpath(srcPath), destPath)
except OSError:
- shutil.copyfile(self._extractPathFromUrl(url), path)
+ shutil.copyfile(srcPath, destPath)
else:
- shutil.copyfile(self._extractPathFromUrl(url), path)
+ # make imported files read-only if they're linked for protection
+ os.chmod(destPath, 0o444)
+ else:
+ shutil.copyfile(srcPath, destPath)
def _importFile(self, otherCls, url, sharedFileName=None):
if issubclass(otherCls, FileJobStore):
|
Fix issue with `.loc` returning values out of order.
`cudf.merge` doesn't guarantee item ordering, an important feature of `loc`. This PR fixes that by adding a reordering step to post processing the merge.
Fixes
Authors:
- H. Thomson Comer (https://github.com/thomcom)
Approvers:
- Michael Wang (https://github.com/isVoid)
URL: | @@ -245,12 +245,14 @@ class GeoSeries(cudf.Series):
else 0,
}
)
- index_df = cudf.DataFrame({"map": item})
- new_index = index_df.merge(map_df, how="left")["idx"]
+ index_df = cudf.DataFrame({"map": item}).reset_index()
+ new_index = index_df.merge(
+ map_df, how="left", sort=False
+ ).sort_values("index")
if isinstance(item, Integral):
- return self._sr.iloc[new_index[0]]
+ return self._sr.iloc[new_index["idx"][0]]
else:
- result = self._sr.iloc[new_index]
+ result = self._sr.iloc[new_index["idx"]]
result.index = item
return result
|
Skip E741 for viz flake8 code
Unfortunately this is a keyword arg to a plotly class. | ignore = D203, E124, E126, F403, F405, F811, E402, E129, W605, W504
max-line-length = 160
exclude = parsl/executors/serialize/, test_import_fail.py
+# E741 disallows ambiguous single letter names which look like numbers
+# We disable it in visualization code because plotly uses 'l' as
+# a keyword arg
+per-file-ignores = parsl/monitoring/visualization/*:E741
|
imaplib: Add new functions from Python 3.9
Add imaplib.IMAP4.unselect() | @@ -93,6 +93,8 @@ class IMAP4:
def thread(self, threading_algorithm: str, charset: str, *search_criteria: str) -> _CommandResults: ...
def uid(self, command: str, *args: str) -> _CommandResults: ...
def unsubscribe(self, mailbox: str) -> _CommandResults: ...
+ if sys.version_info >= (3, 9):
+ def unselect(self) -> _CommandResults: ...
def xatom(self, name: str, *args: str) -> _CommandResults: ...
def print_log(self) -> None: ...
|
[Release] Update index.json for extension [ dns-resolver ]
Triggered by Azure CLI Extensions Release Pipeline - ADO_BUILD_URL:
Last commit: | "version": "0.1.0"
},
"sha256Digest": "4537129127c2e4efc69909c5769753d3f8213d5e3eef9c4d42282d4e011905d8"
+ },
+ {
+ "downloadUrl": "https://azcliprod.blob.core.windows.net/cli-extensions/dns_resolver-0.2.0-py3-none-any.whl",
+ "filename": "dns_resolver-0.2.0-py3-none-any.whl",
+ "metadata": {
+ "azext.minCliCoreVersion": "2.39.0",
+ "classifiers": [
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Intended Audience :: System Administrators",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "License :: OSI Approved :: MIT License"
+ ],
+ "extensions": {
+ "python.details": {
+ "contacts": [
+ {
+ "email": "[email protected]",
+ "name": "Microsoft Corporation",
+ "role": "author"
+ }
+ ],
+ "document_names": {
+ "description": "DESCRIPTION.rst"
+ },
+ "project_urls": {
+ "Home": "https://github.com/Azure/azure-cli-extensions/tree/main/src/dns-resolver"
+ }
+ }
+ },
+ "generator": "bdist_wheel (0.30.0)",
+ "license": "MIT",
+ "metadata_version": "2.0",
+ "name": "dns-resolver",
+ "summary": "Microsoft Azure Command-Line Tools DnsResolverManagementClient Extension",
+ "version": "0.2.0"
+ },
+ "sha256Digest": "1c4bb8216e509c2f08fa75c45930ec377768326f30cb9ab125842aa9352c6e2e"
}
],
"edgeorder": [
|
change file names
to avoid confusion with the input csv files called DH_AllEdges.csv and DH_AllNodes.csv | @@ -186,13 +186,13 @@ class InputLocator(object):
"""scenario/outputs/data/optimization/network/layout/DH_AllNodes.csv or DC_AllNodes.csv
List of plant and consumer nodes in a district heating or cooling network and their building names
"""
- return os.path.join(self.get_optimization_network_layout_folder(), network_type + "_" + network_name + "_AllNodes.csv")
+ return os.path.join(self.get_optimization_network_layout_folder(), network_type + "_" + network_name + "_Nodes.csv")
def get_optimization_network_edge_list_file(self, network_type, network_name):
"""scenario/outputs/data/optimization/network/layout/DH_AllEdges.csv or DC_AllEdges.csv
List of edges in a district heating or cooling network and their start and end nodes
"""
- return os.path.join(self.get_optimization_network_layout_folder(), network_type + "_" + network_name + "_AllEdges.csv")
+ return os.path.join(self.get_optimization_network_layout_folder(), network_type + "_" + network_name + "_Edges.csv")
def get_optimization_network_layout_massflow_file(self, network_type, network_name):
"""scenario/outputs/data/optimization/network/layout/DH_MassFlow.csv or DC_MassFlow.csv
|
C API: add exceptions wrapping in unit token primitives
TN: | @@ -289,39 +289,69 @@ package body ${ada_lib_name}.Analysis.C is
procedure ${capi.get_name('unit_first_token')}
(Unit : ${analysis_unit_type};
- Token : ${token_type}_Ptr)
- is
+ Token : ${token_type}_Ptr) is
+ begin
+ Clear_Last_Exception;
+
+ declare
U : constant Analysis_Unit := Unwrap (Unit);
T : constant Token_Type := First_Token (U);
begin
Token.all := Wrap (T);
end;
+ exception
+ when Exc : others =>
+ Set_Last_Exception (Exc);
+ end;
procedure ${capi.get_name('unit_last_token')}
(Unit : ${analysis_unit_type};
- Token : ${token_type}_Ptr)
- is
+ Token : ${token_type}_Ptr) is
+ begin
+ Clear_Last_Exception;
+
+ declare
U : constant Analysis_Unit := Unwrap (Unit);
T : constant Token_Type := Last_Token (U);
begin
Token.all := Wrap (T);
end;
+ exception
+ when Exc : others =>
+ Set_Last_Exception (Exc);
+ end;
function ${capi.get_name('unit_token_count')}
- (Unit : ${analysis_unit_type}) return int
- is
+ (Unit : ${analysis_unit_type}) return int is
+ begin
+ Clear_Last_Exception;
+
+ declare
U : constant Analysis_Unit := Unwrap (Unit);
begin
return int (Token_Count (U));
end;
+ exception
+ when Exc : others =>
+ Set_Last_Exception (Exc);
+ return -1;
+ end;
function ${capi.get_name('unit_trivia_count')}
- (Unit : ${analysis_unit_type}) return int
- is
+ (Unit : ${analysis_unit_type}) return int is
+ begin
+ Clear_Last_Exception;
+
+ declare
U : constant Analysis_Unit := Unwrap (Unit);
begin
return int (Trivia_Count (U));
end;
+ exception
+ when Exc : others =>
+ Set_Last_Exception (Exc);
+ return -1;
+ end;
function ${capi.get_name('unit_filename')}
(Unit : ${analysis_unit_type}) return chars_ptr
|
Update DiyServo.py
Updated the example with how to set the PID values | @@ -24,5 +24,7 @@ motor.attach(arduino)
# Start the DiyServo
servo = Runtime.start("diyservo","DiyServo")
servo.attach(arduino,A0) # Attach the analog pin 0
+# Set the PID values. This example shows what DiyServo has as default.
+servo.pid.setPID("diyservo", 0.020, 0.001, 0.0);
servo.moveTo(90)
# At this stage you can use the gui or a script to control the DiyServo
|
Update android_generic.txt
Moved to dedicated ```android_parcel.txt``` | @@ -631,19 +631,6 @@ korzystna.biz/praca.apk
morefuntfkjaskjfk123.cx
techndevs.us
-# Reference: https://www.virustotal.com/gui/file/c716c56d401815842120a61140098f9e851d1f79cf4088a56ec6f1b6fd4bad62/detection
-# Reference: https://vms.drweb.com/virus/?i=14931549&lng=en
-# Reference: https://www.hybrid-analysis.com/sample/5b4cbd92c1cc6f946704b56845f6b3cec8caab2cb73eb9909f07e7e7d7849595?environmentId=200
-# Reference: https://blog.naver.com/ian3714/220366680356 (Korean)
-
-http://113.10.136.103
-http://220.142.173.138
-/dor000ft.php
-/hp_state.php?telnum=
-/hp_getsmsblockstate.php?telnum=
-/index.php?type=join&telnum=
-/index.php?type=receivesms&telnum=
-
# Reference: https://a.virscan.org/language/en/089b3e6ba3b60181f96708892321e2b7
xoez.xinzj1.com
|
skip import nccl and gloo_gpu in cpu machine
Summary:
Pull Request resolved:
Skip importing nccl and gloo_gpu modules in cpu machine | @@ -18,8 +18,13 @@ from caffe2.proto import caffe2_pb2
import numpy as np
import warnings
-dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/nccl:nccl_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops")
+
+# We only import nccl operators when the machine has GPUs
+# Otherwise the binary can be compiled with CPU-only mode, and
+# will not be able to find those modules
+if workspace.NumGpuDevices() > 0:
+ dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/nccl:nccl_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops_gpu")
log = logging.getLogger("data_parallel_model")
|
Set credentials in STORAGE_URL for S3 func tests
This sets the s3rver username and password for the
S3 functional testing jobs, required since we have
bumped s3rver version. | @@ -41,6 +41,7 @@ for storage in ${GNOCCHI_TEST_STORAGE_DRIVERS}; do
export PATH=$PWD/npm-s3rver/bin:$PATH
fi
eval $(pifpaf -e STORAGE run s3rver)
+ STORAGE_URL=s3://S3RVER:S3RVER@localhost:4568
;;
file)
STORAGE_URL=file://
|
added advanced logging config for audit
* added advanced logging config for audit
* Update source/administration/config-settings.rst
* Update source/administration/config-settings.rst
* Update config-settings.rst
Reorganized logging update. | @@ -1083,15 +1083,13 @@ Advanced Logging
Output logs to multiple targets
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Send log records to multiple targets:
-
-- Multiple local file targets
-- Multiple syslogs
-- Multiple TCP sockets
+Allow any combination of local file, syslog, and TCP socket targets and send log records to multiple targets:
-Allow any combination of local file, syslog, and TCP socket targets. These three targets have been chosen to support the vast majority of log aggregators and other log analysis tools without having to install additional software.
+- Multiple local file targets: Supports rotation and compression triggered by size and/or duration.
+- Multiple syslogs: Supports local and remote syslog servers, with or without TLS transport.
+- Multiple TCP sockets: TCP socket target can be configured with an IP address or domain name, port, and optional TLS certificate.
-File target supports rotation and compression triggered by size and/or duration. Syslog target supports local and remote syslog servers, with or without TLS transport. TCP socket target can be configured with an IP address or domain name, port, and optional TLS certificate.
+These three targets have been chosen to support the vast majority of log aggregators and other log analysis tools without having to install additional software.
Beyond the standard log levels (trace, debug, info, panic), discrete log levels can also be specified.
@@ -4780,6 +4778,26 @@ This setting can be left as default unless you are seeing audit write failures i
| This feature's ``config.json`` setting is ``"FileMaxQueueSize": 1000`` with numerical input. |
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+Advanced Audit Logging Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*Available in Enterprise Edition E20*
+
+Output logs to multiple targets
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Send log records to multiple targets:
+
+- Multiple local file targets
+- Multiple syslogs
+- Multiple TCP sockets
+
+Allow any combination of local file, syslog, and TCP socket targets.
+
+File target supports rotation and compression triggered by size and/or duration. Syslog target supports local and remote syslog servers, with or without TLS transport. TCP socket target can be configured with an IP address or domain name, port, and optional TLS certificate.
+
+This feature's ``config.json`` setting is ``ExperimentalAuditSettings.AdvancedLoggingConfig`` which can contain a filespec to another config file, a database DSN, or JSON. Options are outlined in this txt file: `Log Settings Options <https://github.com/mattermost/docs/files/5066579/Log.Settings.Options.txt>`_. Sample config: `Advanced Logging Options Sample.json.zip <https://github.com/mattermost/docs/files/5066597/Advanced.Logging.Options.Sample.json.zip>`_.
+
Service Settings
~~~~~~~~~~~~~~~~
|
Fix the wheel count check for pex building.
### Problem
fixed most of the locations that were expecting particular whl counts, but missed one.
### Solution
Fix the last whl count check.
### Result
Confirmed that `build-support/bin/release.sh -p` works. | @@ -513,8 +513,8 @@ function fetch_and_check_prebuilt_wheels() {
fi
done
- # N.B. For platform-specific wheels, we expect 6 wheels: {linux,osx} * {cp27m,cp27mu,abi3}.
- if [ "${cross_platform}" != "true" ] && [ ${#packages[@]} -ne 6 ]; then
+ # N.B. For platform-specific wheels, we expect 2 wheels: {linux,osx} * {abi3,}.
+ if [ "${cross_platform}" != "true" ] && [ ${#packages[@]} -ne 2 ]; then
missing+=("${PACKAGE} (expected whls for each platform: had only ${packages[@]})")
continue
fi
|
migrating ipm etl to data packages
Working on Issue | @@ -401,6 +401,54 @@ def _etl_epacems_pkg(etl_params, data_dir, pkg_dir):
# logger.info(time_message)
return(['hourly_emissions_epacems'])
+###############################################################################
+# EPA IPM ETL FUNCTIONS
+###############################################################################
+
+def _validate_input_epaipm(etl_params):
+ epaipm_dict = {}
+ # pull out the etl_params from the dictionary passed into this function
+ try:
+ epaipm_dict['epaipm_tables'] = etl_params['epaipm_tables']
+ except KeyError:
+ epaipm_dict['epaipm_tables'] = [None]
+ return(epaipm_dict)
+
+def _load_static_tables_epaipm(pkg_dir):
+ # compile the dfs in a dictionary, prep for dict_dump
+ static_dfs = {'region_id_ipm': pc.epaipm_region_names}
+
+ # run the dictionary of prepped static tables through dict_dump to make
+ # CSVs
+ pudl.load.dict_dump(static_dfs,
+ "Static IPM Tables",
+ need_fix_inting=pc.need_fix_inting,
+ pkg_dir=pkg_dir)
+
+ return list(static_dfs.keys())
+
+def _etl_epaipm(etl_params,data_dir, pkg_dir):
+ epaipm_dict = _validate_input_epaipm(etl_params)
+ epaipm_tables = epaipm_dict['epaipm_tables']
+ static_tables = _load_static_tables_epaipm(pkg_dir)
+
+ # Extract IPM tables
+ epaipm_raw_dfs = pudl.extract.epaipm.extract(
+ epaipm_tables, data_dir=data_dir)
+
+ epaipm_transformed_dfs = pudl.transform.epaipm.transform(
+ epaipm_raw_dfs, epaipm_tables
+ )
+
+ pudl.load.dict_dump(
+ epaipm_transformed_dfs,
+ "EPA IPM",
+ need_fix_inting=pc.need_fix_inting,
+ pkg_dir=pkg_dir
+ )
+
+ return list(epaipm_transformed_dfs.keys()) + tables
+
###############################################################################
# GLUE EXPORT FUNCTIONS
@@ -482,6 +530,7 @@ def validate_input(pkg_bundle_settings):
'ferc1': _validate_input_ferc1,
'epacems': _validate_input_epacems,
'glue': _validate_input_glue,
+ 'epaipm': _validate_input_epaipm
}
# where we are going to compile the new validated settings
validated_settings = []
@@ -552,6 +601,12 @@ def etl_pkg(pkg_settings, pudl_settings):
dataset_dict['glue'],
pkg_dir=pkg_dir
)
+ elif dataset == 'epaipm':
+ tbls = _etl_epaipm(
+ dataset_dict['epaipm'],
+ data_dir=pudl_settings['data_dir'],
+ pkg_dir=pkg_dir
+ )
else:
raise AssertionError(
f'Invalid dataset {dataset} found in input.'
|
update hekatomb to install with pip
hekatomb is now available on pypi to simplify its installation | @@ -245,12 +245,13 @@ vault::cred /in:C:\Users\demo\AppData\Local\Microsoft\Vault\"
> Finally, it will extract domain controller private key through RPC uses it to decrypt all credentials.
```python
-python3 hekatomb.py -hashes :ed0052e5a66b1c8e942cc9481a50d56 DOMAIN.local/[email protected] -debug -dnstcp
+pip3 install hekatomb
+hekatomb -hashes :ed0052e5a66b1c8e942cc9481a50d56 DOMAIN.local/[email protected] -debug -dnstcp
```
<a href="https://github.com/Processus-Thief/HEKATOMB">https://github.com/Processus-Thief/HEKATOMB</a>
-
+
## Mimikatz - Commands list
|
Update README.md
Fixed bad in-document navigation links (different syntax compared to bitbucket). | @@ -18,7 +18,7 @@ On all platforms pstoedit support is broken by ghostscript releases 9.21 and lat
## New in relase 0.7.2 (2018-04-06)
- Fixed: Failure on missing Inkscape version key (issue #10: "Error occurred while converting text from LaTeX to SVG")
-[(go to full release history...)](#markdown-header-release-history)
+[(go to full release history...)](#release-history)
## Prerequisites
@@ -28,7 +28,7 @@ On all platforms pstoedit support is broken by ghostscript releases 9.21 and lat
- **Important:** If you use Inkscape 0.92 please ensure that the python 2.7 interpreter has been selected during the installation of Inkscape (by default this is the case).
-## Installation for Linux [(go to Windows instructions...)](#markdown-header-installation-for-windows).
+## Installation for Linux [(go to Windows instructions...)](#installation-for-windows).
### Tex Text Extension
@@ -50,7 +50,7 @@ To install *Tex Text*, simply download the package and extract it. A directory w
- Finally, to enable the preview feature of *Tex Text*, you need to install `ImageMagick`.
-### Congratulations, you're done! [Show Usage...](#markdown-header-usage)
+### Congratulations, you're done! [Show Usage...](#usage)
## Installation for Windows
|
Fix re2 dictionary
Fixes: | @@ -25,6 +25,6 @@ RUN apt-get update && \
RUN git clone https://github.com/google/re2.git
-RUN wget -qO $OUT/fuzz-target.dict \
+RUN wget -qO $OUT/fuzzer.dict \
https://raw.githubusercontent.com/google/fuzzing/master/dictionaries/regexp.dict
COPY build.sh target.cc $SRC/
|
mgr: add missing admin key for mgr container
Followup on
Add missing admin key for mgr node in containerized deployment. | set_fact:
ceph_config_keys:
- /etc/ceph/{{ cluster }}.mgr.{{ ansible_hostname }}.keyring
+ - /etc/ceph/{{ cluster }}.client.admin.keyring
- name: stat for ceph config and keys
local_action: stat path={{ fetch_directory }}/{{ fsid }}/{{ item }}
|
csv_export: Add support Enc, Dec Cycles and Instr count parsing
This is for AOM-CTC where we need to measure
+ Encoding Cycle
+ Encoding Instructions Count
+ Decoding Cycle
+ Decoding Instructions Count | @@ -43,6 +43,10 @@ met_index = {
"APSNR Cr (libvmaf)": 28,
"CAMBI (libvmaf)": 29,
"Enc MD5": 30,
+ "Enc Instr Cnt": 31,
+ "Enc Cycle Cnt": 32,
+ "Dec Instr Cnt": 33,
+ "Dec Cycle Cnt": 34,
}
# row_id for different sets inside template.
@@ -242,7 +246,7 @@ def write_set_data(run_path, writer, current_video_set, current_config):
normalized_cfg = 'RA'
# Get the Quality values, if user defined, use that, else do defaults
if 'qualities' in list(info_data.keys()):
- qp_list = info_data['qualities'].split()
+ qp_list = [int(x) for x in info_data['qualities'].split()]
else:
qp_list = quality_presets[info_data['codec']]
if current_video_set in ['aomctc-f1-hires', 'aomctc-f2-midres']:
@@ -282,8 +286,20 @@ def write_set_data(run_path, writer, current_video_set, current_config):
row = encoded_qp_list[this_qp]
frames = int(row[1]) / int(width) / int(height)
enc_md5 = ''
+ enc_instr_cnt = 0
+ enc_cycle_cnt = 0
+ dec_instr_cnt = 0
+ dec_cycle_cnt = 0
if len(row) > 33:
enc_md5 = str(row[met_index["Enc MD5"] + 3])
+ enc_instr_cnt = str(
+ row[met_index["Enc Instr Cnt"] + 3])
+ enc_cycle_cnt = str(
+ row[met_index["Enc Cycle Cnt"] + 3])
+ dec_instr_cnt = str(
+ row[met_index["Dec Instr Cnt"] + 3])
+ dec_cycle_cnt = str(
+ row[met_index["Dec Cycle Cnt"] + 3])
if info_data["codec"] in ["av2-as", "av2-as-st"]:
writer.writerow(
[
@@ -318,10 +334,10 @@ def write_set_data(run_path, writer, current_video_set, current_config):
row[met_index["APSNR Cr (libvmaf)"] + 3],
row[met_index["Encoding Time"] + 3],
row[met_index["Decoding Time"] + 3],
- "", # EncInstr
- "", # DecInstr
- "", # EncCycles
- "", # DecCycles
+ enc_instr_cnt, # EncInstr
+ dec_instr_cnt, # DecInstr
+ enc_cycle_cnt, # EncCycles
+ dec_cycle_cnt, # DecCycles
enc_md5, # EncMD5
]
)
@@ -360,10 +376,10 @@ def write_set_data(run_path, writer, current_video_set, current_config):
row[met_index["CAMBI (libvmaf)"] + 3],
row[met_index["Encoding Time"] + 3],
row[met_index["Decoding Time"] + 3],
- "", # ENCInstr
- "", # DecInstr
- "", # EncCycles
- "", # DecCycles
+ enc_instr_cnt, # EncInstr
+ dec_instr_cnt, # DecInstr
+ enc_cycle_cnt, # EncCycles
+ dec_cycle_cnt, # DecCycles
enc_md5, # EncMD5
]
)
|
Fix, the setting of "__module__" in class creation should not overwrite values
* Also do not leak a reference in case that write is being rejected
by the resulting type object. | @@ -611,11 +611,14 @@ PyObject *BUILTIN_TYPE3(PyObject *module_name, PyObject *name, PyObject *bases,
Py_DECREF(pos_args);
- int res = PyObject_SetAttr(result, const_str_plain___module__, module_name);
+ if (HAS_ATTR_BOOL(result, const_str_plain___module__) == false) {
+ int res = SET_ATTRIBUTE(result, const_str_plain___module__, module_name);
if (res < 0) {
+ Py_DECREF(result);
return NULL;
}
+ }
return result;
}
|
Corrected typo in the introduction
Changed `(key`, `value`) to (`key`, `value`) | @@ -54,7 +54,7 @@ You can easily change a value of an item using its _key_.
## Deleting values using keys
-You can delete an item from a dictionary using `dict.pop(<key>)`. This will remove the `(key`, `value`) pair from the dictionary and return the `value` for use. `dict.pop(<key>)` accepts second argument, `default` that is returned if the `key` is not found (`dict.pop(<key>, <default>)`). Otherwise, a `KeyError` will be raised for any `key` that is missing.
+You can delete an item from a dictionary using `dict.pop(<key>)`. This will remove the (`key`, `value`) pair from the dictionary and return the `value` for use. `dict.pop(<key>)` accepts second argument, `default` that is returned if the `key` is not found (`dict.pop(<key>, <default>)`). Otherwise, a `KeyError` will be raised for any `key` that is missing.
```python
>>> bear.pop("name")
|
$.Analysis: fix public converter codegen for arrays of big integers
TN: | % if cls.to_public_converter_required:
function ${cls.to_public_converter}
(Value : ${cls.name}) return ${cls.api_name} is
- Result : ${cls.api_name} (1 .. Value.N);
begin
+ return Result : ${cls.api_name} (1 .. Value.N) do
for I in Result'Range loop
- Result (I - Value.Items'First + Result'First) :=
- ${cls.element_type.to_public_expr('Value.Items (I)')};
+ <% to_public_expr = cls.element_type.to_public_expr(
+ 'Value.Items (I)') %>
+ Result (I - Value.Items'First + Result'First)
+ % if cls.element_type.is_big_int_type:
+ .Set (${to_public_expr})
+ % else:
+ := ${to_public_expr}
+ % endif
+ ;
end loop;
- return Result;
+ end return;
end;
% endif
|
ebuild.ebuild_built: iterate over attributes to build tracking list
Avoids issues where tracked attributes from EAPI objs might not always
be frozensets in future rewrites. | @@ -5,6 +5,7 @@ built ebuild packages (vdb packages and binpkgs are derivatives of this)
__all__ = ("package", "package_factory")
from functools import partial
+import itertools
import re
from snakeoil import demandimport
@@ -88,7 +89,9 @@ class package(ebuild_src.base):
@property
def tracked_attributes(self):
# tracked attributes varies depending on EAPI, thus this has to be runtime computed
- return tuple(super().tracked_attributes | frozenset(['contents', 'use', 'environment']))
+ return tuple(itertools.chain(
+ super().tracked_attributes, ('contents', 'use', 'environment')
+ ))
@DynamicGetattrSetter.register
def cflags(self):
|
Remove TextChannel.default_thread_slowmode_delay
This doesn't have meaning on text channels apparently | @@ -160,10 +160,6 @@ class TextChannel(discord.abc.Messageable, discord.abc.GuildChannel, Hashable):
The default auto archive duration in minutes for threads created in this channel.
.. versionadded:: 2.0
- default_thread_slowmode_delay: :class:`int`
- The default slowmode delay in seconds for threads created in this channel.
-
- .. versionadded:: 2.1
"""
__slots__ = (
@@ -180,7 +176,6 @@ class TextChannel(discord.abc.Messageable, discord.abc.GuildChannel, Hashable):
'_type',
'last_message_id',
'default_auto_archive_duration',
- 'default_thread_slowmode_delay',
)
def __init__(self, *, state: ConnectionState, guild: Guild, data: Union[TextChannelPayload, NewsChannelPayload]):
@@ -211,7 +206,6 @@ class TextChannel(discord.abc.Messageable, discord.abc.GuildChannel, Hashable):
# Does this need coercion into `int`? No idea yet.
self.slowmode_delay: int = data.get('rate_limit_per_user', 0)
self.default_auto_archive_duration: ThreadArchiveDuration = data.get('default_auto_archive_duration', 1440)
- self.default_thread_slowmode_delay: int = data.get('default_thread_rate_limit_per_user', 0)
self._type: Literal[0, 5] = data.get('type', self._type)
self.last_message_id: Optional[int] = utils._get_as_snowflake(data, 'last_message_id')
self._fill_overwrites(data)
@@ -306,7 +300,6 @@ class TextChannel(discord.abc.Messageable, discord.abc.GuildChannel, Hashable):
category: Optional[CategoryChannel] = ...,
slowmode_delay: int = ...,
default_auto_archive_duration: ThreadArchiveDuration = ...,
- default_thread_slowmode_delay: int = ...,
type: ChannelType = ...,
overwrites: Mapping[OverwriteKeyT, PermissionOverwrite] = ...,
) -> TextChannel:
@@ -366,10 +359,6 @@ class TextChannel(discord.abc.Messageable, discord.abc.GuildChannel, Hashable):
Must be one of ``60``, ``1440``, ``4320``, or ``10080``.
.. versionadded:: 2.0
- default_thread_slowmode_delay: :class:`int`
- The new default slowmode delay in seconds for threads created in this channel.
-
- .. versionadded:: 2.1
Raises
------
|
Update README.md
Metric collection - Steps said 1, 2, 2.
I have updated the last step *[Restart the Agent][8] to begin sending Gunicorn metrics to Datadog.* To say 3. | @@ -47,7 +47,7 @@ instances:
- proc_name: <YOUR_APP_NAME>
```
-2. [Restart the Agent][8] to begin sending Gunicorn metrics to Datadog.
+3. [Restart the Agent][8] to begin sending Gunicorn metrics to Datadog.
#### Log collection
|
Check to make sure the aov exists first
this was causing the macOS addon to fail
depth and object_id aovs are missing on macOS
uncomment the logging statement to see this | @@ -145,6 +145,10 @@ class RenderTargets:
return fb_image
def get_frame_buffer(self, aov_name='default'):
+ name = aov_name
+ if name not in self.aovs:
+ # logging.info("Looking for unknown aov",name)
+ return None
return self.aovs[aov_name].render_buffer
def get_resolved_image(self, fb):
|
disable session tracking
Session tracking generates a log of requests (one for every page navigation) | @@ -25,7 +25,8 @@ hqDefine('cloudcare/js/sentry', [
console: false,
}),
],
- tunnel: initialPageData.reverse('report_sentry_error')
+ tunnel: initialPageData.reverse('report_sentry_error'),
+ autoSessionTracking: false
});
}
};
|
spellchecks
// edited by skirpichev
Based on sympy/sympy#12244 | @@ -39,7 +39,7 @@ def finite_diff_weights(order, x_list, x0=Integer(0)):
x_list: sequence
Sequence of (unique) values for the independent variable.
It is useful (but not necessary) to order x_list from
- nearest to farest from x0; see examples below.
+ nearest to furthest from x0; see examples below.
x0: Number or Symbol
Root or value of the independent variable for which the finite
difference weights should be generated. Defaults to Integer(0).
|
ValuePlugBinding : Use `isSetToDefault()` in preference to Python
This shaves 13% off the serialisation of a gaffer-examples template. | @@ -118,17 +118,12 @@ std::string valueSerialisationWalk( const Gaffer::ValuePlug *plug, const std::st
// Emit the `setValue()` call for this plug.
- object pythonValue = pythonPlug.attr( "getValue" )();
-
- if( PyObject_HasAttrString( pythonPlug.ptr(), "defaultValue" ) )
- {
- object pythonDefaultValue = pythonPlug.attr( "defaultValue" )();
- if( pythonValue == pythonDefaultValue )
+ if( plug->isSetToDefault() )
{
return "";
}
- }
+ object pythonValue = pythonPlug.attr( "getValue" )();
return identifier + ".setValue( " + ValuePlugSerialiser::valueRepr( pythonValue ) + " )\n";
}
|
helper: Add helper function to match groups based on input text.
This parallels the exisiting match_stream and match_user functions. | @@ -404,6 +404,14 @@ def match_stream(stream: Any, text: str) -> bool:
return False
+def match_groups(group_name: str, text: str) -> bool:
+ """
+ True if any group name matches with `text` (case insensitive),
+ False otherwise.
+ """
+ return group_name.lower().startswith(text.lower())
+
+
def powerset(iterable: Iterable[Any],
map_func: Callable[[Any], Any]=set) -> List[Any]:
"""
|
docstring typo fixes
cheers | @@ -166,9 +166,9 @@ def streaming_bulk(client, actions, chunk_size=500, max_chunk_bytes=100 * 1024 *
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
:arg max_retries: maximum number of times a document will be retried when
- ``429`` is received, set to 0 (default) for no retires on ``429``
+ ``429`` is received, set to 0 (default) for no retries on ``429``
:arg initial_backoff: number of seconds we should wait before the first
- retry. Any subsequent retries will be powers of ``inittial_backoff *
+ retry. Any subsequent retries will be powers of ``initial_backoff *
2**retry_number``
:arg max_backoff: maximum number of seconds a retry will wait
:arg yield_ok: if set to False will skip successful documents in the output
|
Skip imports tests on arm64
Skip imports tests on arm64
Authors:
- Jordan Jacobelli (https://github.com/Ethyling)
Approvers:
- Ray Douglass (https://github.com/raydouglass)
URL: | @@ -35,9 +35,9 @@ requirements:
- gdal >=3.3.0,<3.4.0a0
- geopandas >=0.9.0,<0.10.0a0
-test:
- commands:
- - python -c "import cuspatial"
+test: # [linux64]
+ commands: # [linux64]
+ - python -c "import cuspatial" # [linux64]
about:
home: http://rapids.ai/
|
fix common responses button
ref | @@ -8,11 +8,22 @@ div.editor-tools {
line-height: 30px;
padding: 5px 5px 0;
margin-bottom: p.$spacing-sm;
+ display: flex;
}
a.markup-toolbar-link {
padding: 0 6px;
+ display: inline-flex;
+ @include c.text-body-xs;
+ line-height: 1;
+ align-self: center;
+ color: var(--color-heading);
+ text-decoration: none;
+
+ &:hover {
+ text-decoration: underline;
+ }
}
button {
|
Move comment back to the class it describes (it accidentally drifted away).
Summary: Pull Request resolved:
Test Plan: Imported from OSS | @@ -42,14 +42,6 @@ using ::c10::QualifiedName;
using ExtraFilesMap = std::unordered_map<std::string, std::string>;
using ModulePtr = c10::intrusive_ptr<c10::ivalue::Object>;
-// A method in a module, e.g. f in:
-//
-// class M(ScriptModule):
-// @script_method
-// def f(self, x):
-// ...
-// Note: because Method/Module are exposed to python these
-// classes use python method naming conventions
struct Module;
@@ -59,6 +51,14 @@ using slot_list = slot_list_impl<Slot>;
using module_list = slot_list_impl<Module>;
using ModuleLookup = std::function<Module(const std::vector<std::string>&)>;
+// A method in a module, e.g. f in:
+//
+// class M(ScriptModule):
+// @script_method
+// def f(self, x):
+// ...
+// Note: because Method/Module are exposed to python these
+// classes use python method naming conventions
struct TORCH_API Method {
Method(ModulePtr owner, Function* function);
|
Add symbol in Huawei.VRP prompt
HG--
branch : feature/microservices | @@ -22,7 +22,7 @@ class Profile(BaseProfile):
(r"^Delete flash:", "y\n\r"),
(r"^Squeeze flash:", "y\n\r")
]
- pattern_prompt = r"^[<#\[](?P<hostname>[a-zA-Z0-9-_\.\[\(/`\s:]+)(?:-[a-zA-Z0-9/]+)*[>#\]\)]"
+ pattern_prompt = r"^[<#\[](?P<hostname>[a-zA-Z0-9-_\.\[\(/`'\"\s:]+)(?:-[a-zA-Z0-9/]+)*[>#\]\)]"
pattern_syntax_error = r"(ERROR: |% Wrong parameter found at|% Unrecognized command found at|Error:Too many parameters found|% Too many parameters found at|% Ambiguous command found at|Error: Unrecognized command found at)"
command_more = " "
|
Use self.__class__ for new objects, not Class()
For more info on this, see | @@ -1223,7 +1223,7 @@ class DAList(DAObject):
return self
if isinstance(other, DAList):
other._trigger_gather()
- the_list = DAList(elements=self.elements + other.elements, gathered=True, auto_gather=False)
+ the_list = self.__class__(elements=self.elements + other.elements, gathered=True, auto_gather=False)
the_list.set_random_instance_name()
return the_list
return self.elements + other
@@ -1490,7 +1490,7 @@ class DAList(DAObject):
"""Returns a list of the elements that are complete."""
if complete_attribute is None and hasattr(self, 'complete_attribute'):
complete_attribute = self.complete_attribute
- items = DAList(self.instanceName)
+ items = self.__class__(self.instanceName)
for item in self.elements:
if item is None:
continue
@@ -3140,21 +3140,21 @@ class DASet(DAObject):
"""
self._trigger_gather()
- return DASet(elements=self.elements.union(setify(other_set)))
+ return self.__class__(elements=self.elements.union(setify(other_set)))
def intersection(self, other_set):
"""Returns a Python set consisting of the elements of the current set
that also exist in the other_set.
"""
self._trigger_gather()
- return DASet(elements=self.elements.intersection(setify(other_set)))
+ return self.__class__(elements=self.elements.intersection(setify(other_set)))
def difference(self, other_set):
"""Returns a Python set consisting of the elements of the current set
that do not exist in the other_set.
"""
self._trigger_gather()
- return DASet(elements=self.elements.difference(setify(other_set)))
+ return self.__class__(elements=self.elements.difference(setify(other_set)))
def isdisjoint(self, other_set):
"""Returns True if no elements overlap between the current set and the
other_set. Otherwise, returns False."""
|
Update installing.rst
##### SUMMARY
Correcting wording - former vs. latter as AFAIK it is pipx that isolates not pip3
##### ISSUE TYPE
Docs Pull Request
+label: docsite_pr | @@ -35,7 +35,7 @@ related to containers and use the discussions_ forum instead.
Using pip or pipx
-----------------
-You can use either pip3_ or pipx_ to install it, the former one
+You can use either pip3_ or pipx_ to install it; the latter one
automatically isolates the linter from your current python environment.
That approach may avoid having to deal with particularities of installing
python packages, like creating a virtual environment, activating it, installing
|
Fix the downgrade of the migration.
By the way I'm not concerned about fixing service.postage data if we have to rollback. | @@ -56,7 +56,7 @@ def upgrade():
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('services_history', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
- op.add_column('services', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=False))
+ op.add_column('services', sa.Column('postage', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
op.execute("INSERT INTO service_permission_types VALUES ('choose_postage')")
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage_null"
|
Fix y_pos
The y_pos should be equal to the radius -> nodes_o_de / 2. | @@ -386,11 +386,11 @@ class Rotor(object):
for z_pos in z_positions:
dfb_z_pos = dfb[dfb.nodes_pos_l == z_pos]
dfb_z_pos = dfb_z_pos.sort_values(by="n_l")
- y_pos = nodes_o_d[int(dfb_z_pos.iloc[0]["n_l"])]
+ y_pos = nodes_o_d[int(dfb_z_pos.iloc[0]["n_l"])] / 2
mean_od = np.mean(nodes_o_d)
for t in dfb_z_pos.tag:
df.loc[df.tag == t, "y_pos"] = y_pos
- y_pos += mean_od / 2
+ y_pos += mean_od
# define position for point mass elements
dfb = df[df.type == "BearingElement"]
@@ -1320,7 +1320,7 @@ class Rotor(object):
position = (self.nodes_pos[disk.n], self.nodes_o_d[disk.n] / 2)
disk.patch(position, ax)
- mean_od = np.mean(self.nodes_o_d)
+ mean_od = np.mean(self.nodes_o_d) / 2
# plot bearings
for bearing in self.bearing_seal_elements:
position = (self.nodes_pos[bearing.n], self.nodes_o_d[bearing.n] / 2)
@@ -1423,7 +1423,9 @@ class Rotor(object):
mean_od = np.mean(self.nodes_o_d) / 2
# plot bearings
for bearing in self.bearing_seal_elements:
- position = (self.nodes_pos[bearing.n], self.nodes_o_d[bearing.n] / 2)
+ z_pos = self.df[self.df.tag == bearing.tag]["nodes_pos_l"].values[0]
+ y_pos = self.df[self.df.tag == bearing.tag]["y_pos"].values[0]
+ position = (z_pos, y_pos)
bearing.bokeh_patch(position, mean_od, bk_ax)
show(bk_ax)
|
Created AWS User, key, and id for testing upload
to S3 | @@ -261,11 +261,70 @@ jobs:
path: |
report/
+ create-report:
+ runs-on: ubuntu-latest
+ needs: [validate-content, build-sources, build-package, run-appinspect]
+ steps:
+ - name: Checkout Repo
+ uses: actions/checkout@v2
+
+
+ - name: Install System Packages
+ run: |
+ sudo apt update -qq
+ sudo apt install jq -qq
+
+ - uses: actions/setup-python@v2
+ with:
+ python-version: '3.9.5' #Available versions here - https://github.com/actions/python-versions/releases easy to change/make a matrix/use pypy
+ architecture: 'x64' # optional x64 or x86. Defaults to x64 if not specified
+
+ - name: Install Python Dependencies
+ run: |
+ #Get the virtualenv set up
+ rm -rf venv
+ python3 -m venv --clear venv
+ source venv/bin/activate
+ python3 -m pip install -q -r requirements.txt
+ - name: run reporting
+ run: |
+ source venv/bin/activate
+ python3 bin/reporting.py
+ #Official, Verified Amazon-AWS Github Account Provided Action
+ - uses: aws-actions/configure-aws-credentials@v1
+ with:
+ aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ # aws-session-token: ${{ secrets.AWS_SESSION_TOKEN }} # if you have/need it
+ aws-region: us-west-1 #assume we will always use this, could make this an environment variable...
+ - name: Upload Reporting
+ run: |
+ echo "*** THIS IS WHERE WE WOULD UPLOAD TO S3 WHEN WE HAVE THE CREDENTIALS ***"
+ aws s3 cp bin/reporting s3://security-content/reporting --recursive --exclude "*" --include "*.svg"
+ echo "Finished uploading!"
+ # update-sources-github:
+ # runs-on: ubuntu-latest
+ # needs: [validate-content, build-sources, build-package, run-appinspect, create-report]
+ # steps:
+ # - name: Checkout Repo
+ # uses: actions/checkout@v2
+ # - uses: actions/setup-python@v2
+ # with:
+ # python-version: '3.9.5' #Available versions here - https://github.com/actions/python-versions/releases easy to change/make a matrix/use pypy
+ # architecture: 'x64' # optional x64 or x86. Defaults to x64 if not specified
+
+ # - name: Install Python Dependencies
+ # run: |
+ # #Get the virtualenv set up
+ # rm -rf venv
+ # python3 -m venv --clear venv
+ # source venv/bin/activate
+ # python3 -m pip install -q -r requirements.txt
\ No newline at end of file
|
Update cloud_io.py
* Update cloud_io.py
Solves `AttributeError: 'PosixPath' object has no attribute 'startswith'`
* Update cloud_io.py | @@ -25,8 +25,8 @@ def load(path_or_url: Union[str, IO, Path], map_location=None):
if not isinstance(path_or_url, (str, Path)):
# any sort of BytesIO or similiar
return torch.load(path_or_url, map_location=map_location)
- if path_or_url.startswith("http"):
- return torch.hub.load_state_dict_from_url(path_or_url, map_location=map_location)
+ if str(path_or_url).startswith("http"):
+ return torch.hub.load_state_dict_from_url(str(path_or_url), map_location=map_location)
fs = get_filesystem(path_or_url)
with fs.open(path_or_url, "rb") as f:
return torch.load(f, map_location=map_location)
|
tests: fix mtls_cert generation in test_restful.py
The test now manually generates a x509 certificate for testing instead of
trying to initialize an agent for that. | @@ -27,7 +27,7 @@ To run without root privileges, be sure to export KEYLIME_TEST=True
For Python Coverage support (pip install coverage), set env COVERAGE_FILE and:
* coverage run --parallel-mode test_restful.py
'''
-
+import datetime
import sys
import signal
import unittest
@@ -43,7 +43,6 @@ from cryptography.hazmat.primitives import serialization
import dbus
-import keylime.keylime_agent
from keylime import config
from keylime import fs_util
from keylime import tornado_requests
@@ -377,12 +376,11 @@ class TestRestful(unittest.TestCase):
fs_util.ch_dir(config.WORK_DIR)
_ = secure_mount.mount()
- # Create an agent server to get the mtls certificate
- agent_server = keylime.keylime_agent.CloudAgentHTTPServer(("127.0.0.1", 9002),
- keylime.keylime_agent.Handler,
- config.get('cloud_agent', 'agent_uuid'))
+ # Create a mTLS cert for testing
global mtls_cert
- mtls_cert = agent_server.mtls_cert.public_bytes(serialization.Encoding.PEM)
+ rsa_key = crypto.rsa_generate(2048)
+ valid_util = datetime.datetime.utcnow() + datetime.timedelta(days=(360 * 5))
+ mtls_cert = crypto.generate_selfsigned_cert("TEST_CERT", rsa_key, valid_util).public_bytes(serialization.Encoding.PEM)
# Initialize the TPM with AIK
(ekcert, ek_tpm, aik_tpm) = tpm_instance.tpm_init(self_activate=False,
|
Adds KeyboardInterrupt handling to customlm.py
This allows the user to prematurely end an optimization, but it
doesn't work within a MPI context yet - Ctrl+C just kills mpiexec
and it's never handled within Python. A work in progress. | @@ -9,11 +9,15 @@ from __future__ import division, print_function, absolute_import, unicode_litera
import time as _time
import numpy as _np
import scipy as _scipy
+import signal as _signal
#from scipy.optimize import OptimizeResult as _optResult
from ..tools import mpitools as _mpit
from ..baseobjs import VerbosityPrinter as _VerbosityPrinter
+#Make sure SIGINT will generate a KeyboardInterrupt (even if we're launched in the background)
+_signal.signal(_signal.SIGINT, _signal.default_int_handler)
+
#constants
MACH_PRECISION = 1e-12
#MU_TOL1 = 1e10 # ??
@@ -118,6 +122,7 @@ def custom_leastsq(obj_fn, jac_fn, x0, f_norm2_tol=1e-6, jac_norm_tol=1e-6,
# DB: from ..tools import matrixtools as _mt
# DB: print("DB F0 (%s)=" % str(f.shape)); _mt.print_mx(f,prec=0,width=4)
# num_fd_iters = 1000000 # DEBUG: use finite difference iterations instead
+ try:
for k in range(max_iter): #outer loop
# assume x, f, fnorm hold valid values
@@ -307,6 +312,17 @@ def custom_leastsq(obj_fn, jac_fn, x0, f_norm2_tol=1e-6, jac_norm_tol=1e-6,
else:
#if no break stmt hit, then we've exceeded maxIter
msg = "Maximum iterations (%d) exceeded" % max_iter
+ converged=True # call result "converged" even in this case, but issue warning:
+ printer.warning("Treating result as *converged* after maximum iterations (%d) were exceeded." % max_iter)
+
+ except KeyboardInterrupt:
+ if comm is not None:
+ comm.Bcast(x, root=0) # ensure all procs agree on what x is (in case the interrupt occurred around x being updated)
+ printer.log("Rank %d caught keyboard interrupt! Returning the current solution as being *converged*." % comm.Get_rank())
+ else:
+ printer.log("Caught keyboard interrupt! Returning the current solution as being *converged*.")
+ msg = "Keyboard interrupt!"
+ converged = True
#JTJ[idiag] = undampled_JTJ_diag #restore diagonal
return x, converged, msg
|
Remove the unused .text_equals DSL attribute expression
The same can be done with ``tok1.symbol.equals(tok2.symbol)``.
TN: | @@ -3542,38 +3542,6 @@ class FieldAccessExpr(BasicExpr):
self.type.name.camel)
-class TokenTextEq(BasicExpr):
- """
- Resolved expression to test equality of the text of two tokens.
- """
- def __init__(self, left, right, abstract_expr=None):
- """
- :type left: ResolvedExpression
- :type right: ResolvedExpression
- :param AbstractExpression|None abstract_expr: See ResolvedExpression's
- constructor.
- """
- super(TokenTextEq, self).__init__(
- 'Is_Equal', "Text_Type'(Text ({})) = Text_Type'(Text ({}))",
- bool_type, [left, right],
- abstract_expr=abstract_expr
- )
-
-
-@auto_attr
-def text_equals(self, left, right):
- """
- Return whether the two `left` and `right` tokens have the same text.
-
- :param AbstractExpression left: Expression that must resolve to a token,
- whose text will be used for the test.
- :param AbstractExpression right: Likewise.
- """
- return TokenTextEq(construct(left, token_type),
- construct(right, token_type),
- abstract_expr=self)
-
-
class LocalVars(object):
"""
Represents the state of local variables in a property definition.
|
Add better defaulting to pandas message for `groupby`
* Add better defaulting to pandas message for `groupby`
* Resolves
* Adds a function that contains the correct name instead of using an
anonymous lambda.
* User will now see:
```
UserWarning: `DataFrame.groupby_on_multiple_columns` defaulting to pandas implementation.
```
* Fix bug | @@ -490,6 +490,7 @@ class DataFrameGroupBy(object):
else:
by = self._by
- return self._df._default_to_pandas(
- lambda df: f(df.groupby(by=by, axis=self._axis, **self._kwargs)), **kwargs
- )
+ def groupby_on_multiple_columns(df):
+ return f(df.groupby(by=by, axis=self._axis, **self._kwargs), **kwargs)
+
+ return self._df._default_to_pandas(groupby_on_multiple_columns)
|
fix: Do not set page_length as null
Do not set page_length as null since due to this all the records are fetched at once which slows down the system if there are thousands of records | @@ -82,8 +82,6 @@ frappe.views.ReportView = class ReportView extends frappe.views.ListView {
return Object.assign(args, {
with_comment_count: false,
- start: 0,
- page_length: null,
group_by: this.group_by_control.group_by || null,
order_by: this.group_by_control.order_by || null,
});
|
DOC: Fix signal.window.get_window() docstring
Add 'center' argument into get_window() docstring with 'exponential' window and add an example for it. | @@ -1971,7 +1971,7 @@ def get_window(window, Nx, fftbins=True):
- `~scipy.signal.windows.general_gaussian` (needs power, width)
- `~scipy.signal.windows.dpss` (needs normalized half-bandwidth)
- `~scipy.signal.windows.chebwin` (needs attenuation)
- - `~scipy.signal.windows.exponential` (needs decay scale)
+ - `~scipy.signal.windows.exponential` (needs center, decay scale)
- `~scipy.signal.windows.tukey` (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
@@ -1995,6 +1995,9 @@ def get_window(window, Nx, fftbins=True):
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093,
0.97885093, 0.82160913, 0.56437221, 0.29425961])
+ >>> signal.get_window(('exponential', None, 1.), 9)
+ array([ 0.011109 , 0.03019738, 0.082085 , 0.22313016, 0.60653066,
+ 0.60653066, 0.22313016, 0.082085 , 0.03019738])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093,
0.97885093, 0.82160913, 0.56437221, 0.29425961])
|
safer handling of review path
'published_path' might be missing.
Thumbnail path was fixed previously, this one was missed. | @@ -163,17 +163,21 @@ class IntegrateSlackAPI(pyblish.api.InstancePlugin):
def _get_review_path(self, instance):
"""Returns abs url for review if present in instance repres"""
- published_path = None
+ review_path = None
for repre in instance.data.get("representations", []):
tags = repre.get('tags', [])
if (repre.get("review")
or "review" in tags
or "burnin" in tags):
- if os.path.exists(repre["published_path"]):
- published_path = repre["published_path"]
+ repre_review_path = (
+ repre.get("published_path") or
+ os.path.join(repre["stagingDir"], repre["files"])
+ )
+ if os.path.exists(repre_review_path):
+ review_path = repre_review_path
if "burnin" in tags: # burnin has precedence if exists
break
- return published_path
+ return review_path
def _python2_call(self, token, channel, message, publish_files):
from slackclient import SlackClient
|
Respect colorbar tickminor for discrete levels
This ensures tickminor=True is respected if user passes custom
non-discrete locator (e.g. a MultipleLocator) with a BoundaryNorm
normalizer (in which case a default minor DiscreteLocator is used)
and ensures default rc['(x|y)tick.minor.visible'] setting of False
is respected even if DiscreteLocator is passed. | @@ -1089,25 +1089,18 @@ class Axes(maxes.Axes):
locator = _not_none(locator, locator_default, None)
formatter = _not_none(formatter, formatter_default, 'auto')
formatter = constructor.Formatter(formatter, **formatter_kw)
- categorical = isinstance(formatter, mticker.FixedFormatter)
- discrete = isinstance(mappable.norm, pcolors.DiscreteNorm)
if locator is not None:
locator = constructor.Locator(locator, **locator_kw)
if minorlocator is not None:
minorlocator = constructor.Locator(minorlocator, **minorlocator_kw)
- if isinstance(locator, pticker.DiscreteLocator):
- if categorical: # no minor ticks and convert DiscreteLocator
- locator = mticker.FixedLocator(np.array(locator.locs))
- elif minorlocator is not None:
- pass
- elif tickminor or tickminor is None:
- minorlocator = pticker.DiscreteLocator(np.array(locator.locs), minor=True) # noqa: E501
- if tickminor is not None: # whether to apply minorticks_on()
- pass
- elif discrete or categorical: # never use the default minor locator
- tickminor = False
- else:
- tickminor = rc[name + 'tick.minor.visible']
+ discrete = isinstance(locator, pticker.DiscreteLocator)
+ categorical = isinstance(formatter, mticker.FixedFormatter)
+ tickminor = False if categorical else rc[name + 'tick.minor.visible']
+ if categorical and discrete:
+ locator = mticker.FixedLocator(np.array(locator.locs)) # convert locator
+ if tickminor and isinstance(mappable.norm, mcolors.BoundaryNorm):
+ locs = np.array(locator.locs if discrete else mappable.norm.boundaries)
+ minorlocator = pticker.DiscreteLocator(locs, minor=True)
# Special handling for colorbar keyword arguments
# WARNING: Critical to not pass empty major locators in matplotlib < 3.5
|
Add narrative docs for plot/scatter_coord methods
Added missing narrative documentation for methods: plot_coord and
scatter_coord of the WCSAxes class.
Closes: | Overplotting markers and artists
********************************
+The class :class:`~astropy.visualization.wcsaxes.WCSAxes` provides two handy methods:
+:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord`,
+:meth:`~astropy.visualization.wcsaxes.WCSAxes.scatter_coord`
+
+Used to plots and scatter respectively :class:`~astropy.coordinates.SkyCoord` or :class:`~astropy.coordinates.BaseCoordinateFrame` coordinates on the axes. The ``transform`` keyword argument will be created based on the coordinate, specifying it here will throw a :class:`~TypeError`.
+
For the example in the following page we start from the example introduced in
:ref:`initialization`.
|
Verification: simplify kick note reason
This will make it much easier to filter out verification kicks
when querying the infraction database. | @@ -367,7 +367,7 @@ class Verification(Cog):
"actor": self.bot.user.id, # Bot actions this autonomously
"expires_at": None,
"hidden": True,
- "reason": f"Kicked for not having verified after {constants.Verification.kicked_after} days",
+ "reason": "Verification kick",
"type": "note",
"user": member.id,
}
|
Define floating ip's into k8s-config ssl context
to avoid | @@ -478,9 +478,15 @@ class Kubespray:
kubespray_vars["openstack_lbaas_subnet_id"] = metadata["resources"]["subnet_id"]
kubespray_vars["openstack_lbaas_floating_network_id"] = metadata["resources"]["ext_net_id"]
# See https://github.com/kubernetes-incubator/kubespray/issues/2141
- # Set this variable to true to get rid of this issue```
+ # Set this variable to true to get rid of this issue
kubespray_vars["volume_cross_zone_attachment"] = True
+ # See https://github.com/kubernetes-incubator/kubespray/issues/1430
+ # Set all fips in this var to get rid of kubectl ssl-certs issue
+ ssl_fips = [master["fip"] for master in metadata["resources"]["masters"]]
+ if ssl_fips:
+ kubespray_vars["supplementary_addresses_in_ssl_keys"] = ssl_fips
+
image_var_names = [var_name for var_name in dir(config) if var_name.endswith(('_IMAGE_REPO', '_IMAGE_TAG'))]
image_variables = {k.lower(): getattr(config, k) for k in image_var_names}
kubespray_vars.update(image_variables)
|
Using internal logging.
Internal logging could be replaced with Python logging package. | @@ -54,8 +54,7 @@ def is_json(data):
json.dumps(data, cls=BlenderEncoder)
return True
except:
- import logging
- logging.exception("failed to json.dump custom properties.")
+ print_console('DEBUG', 'Failed to json.dumps custom properties')
return False
def create_image_file(context, blender_image, dst_path, file_format):
|
(docs) paper trade update
add supported connectors for paper trading | @@ -41,3 +41,24 @@ Add more paper trade assets by editing `conf_global.yml` using a text editor. **
!!! warning
When adding balances, make sure to exit and restart Hummingbot for the changes to take effect.
+
+
+## Supported Connectors
+
+Here are the list of supported connectors for paper trading.
+
+```
+# Supported Connectors
+Binance
+Coinbase Pro
+Huobi
+DDEX
+IDEX
+Bamboo Relay
+Radar Relay
+
+# Not yet supported
+Bittrex
+Dolomite
+```
+
|
Improve documentation of contrib.sqla.ModelView.get_query.
Mention overriding get_one to prohit access to elements by simply
changing the URL. | @@ -833,6 +833,17 @@ class ModelView(BaseModelView):
class MyView(ModelView):
def get_query(self):
return super(MyView, self).get_query().filter(User.username == current_user.username)
+
+ Individual elements that are filtered in the list view are still
+ accessible through the edit view by simply changing the URL, even
+ when they are filtered by `get_query`. To prohibit this, also
+ override `get_one`.
+
+ Example::
+
+ def get_one(self, id):
+ query = self.get_query()
+ return query.filter(self.model.id == id).first()
"""
return self.session.query(self.model)
@@ -1073,6 +1084,8 @@ class ModelView(BaseModelView):
"""
Return a single model by its id.
+ Also see `get_query` for how to filter the list view.
+
:param id:
Model id
"""
|
Removes f-strings in favor of %-format-strings in circuitstructure.py
(For python 3.5 compatibility) | @@ -200,7 +200,7 @@ class CircuitPlaquette(object):
def element_label(self, irow, icol):
c = self.elements.get((irow, icol), None)
- return f"{c.layerstr}" if (c is not None) else ""
+ return c.layerstr if (c is not None) else ""
class FiducialPairPlaquette(CircuitPlaquette):
@@ -331,14 +331,15 @@ class FiducialPairPlaquette(CircuitPlaquette):
return FiducialPairPlaquette(self.base, self.fidpairs, self.num_rows, self.num_cols, aliases)
def summary_label(self):
- return "{}" if len(self.base) == 0 else f"{self.base.layerstr}"
+ return "{}" if len(self.base) == 0 else self.base.layerstr
def element_label(self, irow, icol):
prep, meas = self.fidpairs.get((irow, icol), (None, None))
if prep is None or meas is None:
return ""
else:
- return f"{prep.layerstr} + " + self.summary_label() + f" + {meas.layerstr}"
+ #return f"{prep.layerstr} + " + self.summary_label() + f" + {meas.layerstr}"
+ return " + ".join(prep.layerstr, self.summary_label(), meas.layerstr)
class GermFiducialPairPlaquette(FiducialPairPlaquette):
@@ -473,7 +474,8 @@ class GermFiducialPairPlaquette(FiducialPairPlaquette):
if len(self.germ) == 0 or self.power == 0:
return "{}"
else:
- return f"({self.germ.layerstr})<sup>{self.power}</sup>"
+ #return f"({self.germ.layerstr})<sup>{self.power}</sup>"
+ return "(%s)<sup>%d</sup>" % (self.germ.layerstr, self.power)
class PlaquetteGridCircuitStructure(_CircuitList):
@@ -669,7 +671,8 @@ class PlaquetteGridCircuitStructure(_CircuitList):
elif axis == 'y':
return [self.truncate(ys_to_keep=self.ys[0:i + 1]) for i in range(len(self.ys))]
else:
- raise ValueError(f"Invalid `axis` argument: {axis} - must be 'x' or 'y'!")
+ #raise ValueError(f"Invalid `axis` argument: {axis} - must be 'x' or 'y'!")
+ raise ValueError("Invalid `axis` argument: %s - must be 'x' or 'y'!" % str(axis))
def process_circuits(self, processor_fn, updated_aliases=None):
"""
|
Adds somehow-clobbered fixes for displaying error bars on errorgen rates in reports.
This 2-line fix must have gotten clobbered by a recent merge, as this functionality
was working before but mysteriously disappeared (?). | @@ -2552,6 +2552,7 @@ class ProjectionsBoxPlot(WorkspacePlot):
elif nQubits == 2:
if projections.size == 15:
projections = _np.concatenate(([0.0], projections)).reshape((4, 4))
+ eb_matrix = _np.concatenate(([0.0], eb_matrix))
xlabel = "Q2"; ylabel = "Q1"
xd, yd = projections.shape
else: # projections.size == 15*15
@@ -2561,6 +2562,7 @@ class ProjectionsBoxPlot(WorkspacePlot):
else:
if projections.size == 4**nQubits - 1:
projections = _np.concatenate(([0.0], projections)).reshape((4, projections.size // 4))
+ eb_matrix = _np.concatenate(([0.0], eb_matrix))
xlabel = "Q*"; ylabel = "Q1"
xd, yd = projections.shape
else: # projections.size == (4**nQ)**2
|
fw/config: add extra_plugin_paths setting
Add extra_plugin_paths setting which gets populated from WA_PLUGIN_PATHS
environment variable. This allows specifying additional locations to
scan for WA plugins. | @@ -458,6 +458,13 @@ class MetaConfiguration(Configuration):
specified when invoking a run.
""",
),
+ ConfigurationPoint(
+ 'extra_plugin_paths',
+ kind=list_of_strings,
+ description="""
+ A list of additional paths to scan for plugins.
+ """,
+ ),
]
configuration = {cp.name: cp for cp in config_points}
@@ -483,6 +490,10 @@ class MetaConfiguration(Configuration):
if user_directory:
self.set('user_directory', user_directory)
+ extra_plugin_paths = environ.pop('WA_PLUGIN_PATHS', '')
+ if extra_plugin_paths:
+ self.set('extra_plugin_paths', extra_plugin_paths.split(os.pathsep))
+
self.plugin_packages = copy(self.core_plugin_packages)
if os.path.isfile(self.additional_packages_file):
with open(self.additional_packages_file) as fh:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.