message
stringlengths
13
484
diff
stringlengths
38
4.63k
Fix confusion between paying and payment channel In seller.py, balance calls payment instead of paying.
@@ -448,7 +448,7 @@ class SimpleSeller(object): if not self._session or not self._session.is_attached(): raise RuntimeError('market-maker session not attached') - paying_balance = await self._session.call('xbr.marketmaker.get_payment_channel_balance', self._channel['channel']) + paying_balance = await self._session.call('xbr.marketmaker.get_paying_channel_balance', self._channel['channel']) return paying_balance
make it easier to set the compiler Hi, I'm trying to package Mininet for Guix which doesn't symlink cc to gcc and this would make it easier to package, as well as configure it for alternative compilers.
@@ -14,6 +14,7 @@ BINDIR ?= $(PREFIX)/bin MANDIR ?= $(PREFIX)/share/man/man1 DOCDIRS = doc/html doc/latex PDF = doc/latex/refman.pdf +CC ?= cc CFLAGS += -Wall -Wextra @@ -46,7 +47,7 @@ slowtest: $(MININET) mininet/examples/test/runner.py -v mnexec: mnexec.c $(MN) mininet/net.py - cc $(CFLAGS) $(LDFLAGS) -DVERSION=\"`PYTHONPATH=. $(PYMN) --version`\" $< -o $@ + $(CC) $(CFLAGS) $(LDFLAGS) -DVERSION=\"`PYTHONPATH=. $(PYMN) --version`\" $< -o $@ install-mnexec: $(MNEXEC) install -D $(MNEXEC) $(BINDIR)/$(MNEXEC)
ci: docs: Check for failure properly Fixes:
@@ -147,8 +147,19 @@ function run_docs() { last_release=$("${PYTHON}" -m dffml service dev setuppy kwarg version setup.py) + # Log failed tests to file + doctest_failures="$(mktemp)" + TEMP_DIRS+=("${doctest_failures}") + # Doctests - ./scripts/doctest.sh + ./scripts/doctest.sh 2>&1 | tee "${doctest_failures}" + + # Fail if any tests errored + skipped=$(grep 'in test' "${doctest_failures}" | grep -v '0 failures in tests' | wc -l) + if [ "$skipped" -ne 0 ]; then + echo "Tests failed" >&2 + exit 1 + fi # Fail if there are any changes to the Git repo changes=$(git status --porcelain | wc -l)
c_api/header_c.mako: replace uses of the _self variable TN:
@@ -29,7 +29,7 @@ typedef void* ${node_type}; ${c_doc('langkit.node_kind_type')} typedef enum { -% for astnode in _self.astnode_types: +% for astnode in ctx.astnode_types: % if astnode.abstract: /* ${astnode.name()} (abstract) */ @@ -51,7 +51,7 @@ typedef void *${lexical_env_type}; typedef uint8_t ${bool_type}; -% for struct_type in _self.struct_types: +% for struct_type in ctx.struct_types: ${struct_types.decl(struct_type)} % endfor @@ -118,7 +118,7 @@ typedef struct { const char *information; } ${exception_type}; -% if _self.default_unit_file_provider: +% if ctx.default_unit_file_provider: /* * Types for unit file providers */ @@ -160,11 +160,11 @@ typedef char *(*${unit_file_provider_get_file_from_name_type})( */ -% for enum_type in _self.sorted_types(_self.enum_types): +% for enum_type in ctx.sorted_types(ctx.enum_types): ${enum_types.decl(enum_type)} % endfor -% for array_type in _self.sorted_types(_self.array_types): +% for array_type in ctx.sorted_types(ctx.array_types): % if array_type.element_type()._exposed: ${array_types.decl(array_type)} % endif @@ -179,7 +179,7 @@ ${c_doc('langkit.create_context')} extern ${analysis_context_type} ${capi.get_name("create_analysis_context")}( const char *charset - % if _self.default_unit_file_provider: + % if ctx.default_unit_file_provider: , ${unit_file_provider_type} unit_file_provider % endif ); @@ -216,7 +216,7 @@ ${capi.get_name("get_analysis_unit_from_buffer")}( size_t buffer_size, int with_trivia); -% if _self.default_unit_file_provider: +% if ctx.default_unit_file_provider: ${c_doc('langkit.get_unit_from_provider')} extern ${analysis_unit_type} ${capi.get_name("get_analysis_unit_from_provider")}( @@ -370,7 +370,7 @@ ${capi.get_name('lexical_env_dec_ref')}(${lexical_env_type} env); if the node does not have the proper type, for instance). When an AST node is returned, its ref-count is left as-is. */ -% for astnode in _self.astnode_types: +% for astnode in ctx.astnode_types: % for field in astnode.fields_with_accessors(): ${astnode_types.accessor_decl(field)} % endfor @@ -399,7 +399,7 @@ ${capi.get_name("node_extension")}( ${capi.get_name("node_extension_destructor")} dtor ); -% if _self.default_unit_file_provider: +% if ctx.default_unit_file_provider: /* * Unit file providers */
Update squad_v2.py Change lines 100 and 102 to prevent overwriting ```predictions``` variable.
@@ -97,9 +97,9 @@ class SquadV2(datasets.Metric): ) def _compute(self, predictions, references, no_answer_threshold=1.0): - predictions = dict((p["id"], p["prediction_text"]) for p in predictions) - dataset = [{"paragraphs": [{"qas": references}]}] no_answer_probabilities = dict((p["id"], p["no_answer_probability"]) for p in predictions) + dataset = [{"paragraphs": [{"qas": references}]}] + predictions = dict((p["id"], p["prediction_text"]) for p in predictions) qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
Remove redundant shape assertion Fixes
@@ -432,8 +432,6 @@ class KerasClassifier(ClassifierNeuralNetwork, ClassifierGradients, Classifier): # Apply preprocessing x_preprocessed, _ = self._apply_preprocessing(x=x_expanded, y=None, fit=False) - assert len(x_preprocessed.shape) == 4 - # Determine shape of expected output and prepare array output_shape = output_func([x_preprocessed[0][None, ...]])[0].shape activations = np.zeros((x_preprocessed.shape[0],) + output_shape[1:], dtype=ART_NUMPY_DTYPE)
Clean up scrolling logic Looks like this makes for a smoother scroll experience.
@@ -17,8 +17,8 @@ class Scrolling: self.vscroll_policy: Optional[Gtk.ScrollablePolicy] = None self._hadjustment_handler_id = 0 self._vadjustment_handler_id = 0 - self._last_hvalue = 0.0 - self._last_vvalue = 0.0 + self._last_hvalue = 0 + self._last_vvalue = 0 def get_property(self, prop): if prop.name == "hadjustment": @@ -55,42 +55,39 @@ class Scrolling: raise AttributeError(f"Unknown property {prop.name}") def update_adjustments(self, width, height, bounds): + """Update scroll bar values (adjustments in GTK), and reset the scroll + value to 0. + + The value will change when a scroll bar is moved. + """ # canvas limits (in view coordinates) c = Rectangle(*bounds) - c.x -= width * 0.7 - c.width += width * 1.4 - c.y -= height * 0.7 - c.height += height * 1.4 - - # view limits - v = Rectangle(0, 0, width, height) + c.expand(min(width, height) / 2) + u = c + Rectangle(0, 0, width, height) - # union of these limits gives scrollbar limits - u = c if v in c else (c + v) if self.hadjustment: - self.hadjustment.set_value(v.x) + self.hadjustment.set_value(0) self.hadjustment.set_lower(u.x) self.hadjustment.set_upper(u.x1) self.hadjustment.set_step_increment(width // 10) self.hadjustment.set_page_increment(width) self.hadjustment.set_page_size(width) + self._last_hvalue = 0 if self.vadjustment: - self.vadjustment.set_value(v.y) + self.vadjustment.set_value(0) self.vadjustment.set_lower(u.y) self.vadjustment.set_upper(u.y1) self.vadjustment.set_step_increment(height // 10) self.vadjustment.set_page_increment(height) self.vadjustment.set_page_size(height) - - self._last_hvalue = v.x - self._last_vvalue = v.y + self._last_vvalue = 0 def on_adjustment_changed(self, adj): """Change the transformation matrix of the view to reflect the value of the x/y adjustment (scrollbar).""" value = adj.get_value() - if value == 0.0: + if value == 0: return m = Matrix()
Do not store weights from NeighNeighbor classes by default These are not usually particularly meaningful
@@ -36,7 +36,7 @@ __author__ = "Matthew Horton, Evan Spotte-Smith" __version__ = "0.1" __maintainer__ = "Matthew Horton" __email__ = "[email protected]" -__status__ = "Beta" +__status__ = "Production" __date__ = "August 2017" ConnectedSite = namedtuple('ConnectedSite', 'site, jimage, index, weight, dist') @@ -192,7 +192,7 @@ class StructureGraph(MSONable): return sg @staticmethod - def with_local_env_strategy(structure, strategy): + def with_local_env_strategy(structure, strategy, weights=False): """ Constructor for StructureGraph, using a strategy from :Class: `pymatgen.analysis.local_env`. @@ -200,12 +200,12 @@ class StructureGraph(MSONable): :param structure: Structure object :param strategy: an instance of a :Class: `pymatgen.analysis.local_env.NearNeighbors` object + :param weights: if True, use weights from local_env class + (consult relevant class for their meaning) :return: """ - sg = StructureGraph.with_empty_graph(structure, name="bonds", - edge_weight_name="weight", - edge_weight_units="") + sg = StructureGraph.with_empty_graph(structure, name="bonds") for n, neighbors in enumerate(strategy.get_all_nn_info(structure)): for neighbor in neighbors: @@ -218,7 +218,7 @@ class StructureGraph(MSONable): from_jimage=(0, 0, 0), to_index=neighbor['site_index'], to_jimage=neighbor['image'], - weight=neighbor['weight'], + weight=neighbor['weight'] if weights else None, warn_duplicates=False) return sg
readme_template.md: Chrome required manually cleanup DNS Cache See:
@@ -254,6 +254,9 @@ You can also refer to the "Third-Party Hosts Managers" section for further recom Your operating system will cache DNS lookups. You can either reboot or run the following commands to manually flush your DNS cache once the new hosts file is in place. +| Chromium/Google Chrome required manually cleanup DNS Cache on `chrome://net-internals/#dns` page for apply changes from custom hosts file. See: https://superuser.com/questions/723703 +:----------------------------------------------------------------------------------------- + ### Windows Open a command prompt with administrator privileges and run this command:
index/schema.py: Make cmd_version actually optional. During migration, we may have to import json index file which was produced with an older version. If the index file is missing cmd_version, migration will fail.
@@ -59,6 +59,7 @@ class ArchiveResult: } info['start_ts'] = parse_date(info['start_ts']) info['end_ts'] = parse_date(info['end_ts']) + info['cmd_version'] = info.get('cmd_version') return cls(**info) def to_dict(self, *keys) -> dict:
make risk measure more memory efficient Summary: Pull Request resolved: Using torch.quantile is way more efficient because it does not create large tensors for values and indices (which are the same size as the input). torch.topk also yields memory improvements
@@ -24,6 +24,7 @@ from abc import ABC, abstractmethod from math import ceil from typing import Optional +import torch from botorch.acquisition.objective import MCAcquisitionObjective from torch import Tensor @@ -144,8 +145,12 @@ class CVaR(RiskMeasureMCObjective): A `sample_shape x batch_shape x q`-dim tensor of CVaR samples. """ prepared_samples = self._prepare_samples(samples) - sorted_samples = prepared_samples.sort(dim=-1, descending=True).values - return sorted_samples[..., self.alpha_idx :].mean(dim=-1) + return torch.topk( + prepared_samples, + k=prepared_samples.shape[-1] - self.alpha_idx, + largest=False, + dim=-1, + ).values.mean(dim=-1) class VaR(CVaR): @@ -157,6 +162,23 @@ class VaR(CVaR): `1 - alpha` quantile of a given random variable. """ + def __init__( + self, + alpha: float, + n_w: int, + weights: Optional[Tensor] = None, + ) -> None: + r"""Transform the posterior samples to samples of a risk measure. + + Args: + alpha: The risk level, float in `(0.0, 1.0]`. + n_w: The size of the `w_set` to calculate the risk measure over. + weights: An optional `m`-dim tensor of weights for scalarizing + multi-objective samples before calculating the risk measure. + """ + super().__init__(n_w=n_w, alpha=alpha) + self._q = 1 - self.alpha_idx / n_w + def forward(self, samples: Tensor, X: Optional[Tensor] = None) -> Tensor: r"""Calculate the VaR corresponding to the given samples. @@ -170,8 +192,20 @@ class VaR(CVaR): A `sample_shape x batch_shape x q`-dim tensor of VaR samples. """ prepared_samples = self._prepare_samples(samples) - sorted_samples = prepared_samples.sort(dim=-1, descending=True).values - return sorted_samples[..., self.alpha_idx] + # this is equivalent to sorting along dim=-1 in descending order + # and taking the values at index self.alpha_idx. E.g. + # >>> sorted_res = prepared_samples.sort(dim=-1, descending=True) + # >>> sorted_res.values[..., self.alpha_idx] + # Using quantile is far more memory efficient since `torch.sort` + # produces values and indices tensors with shape + # `sample_shape x batch_shape x (q * n_w) x m` + return torch.quantile( + input=prepared_samples, + q=self._q, + dim=-1, + keepdim=False, + interpolation="lower", + ) class WorstCase(RiskMeasureMCObjective):
joystick: revert max axes value revert this to 255
@@ -41,7 +41,7 @@ class Joystick: def __init__(self): # TODO: find a way to get this from API, perhaps "inputs" doesn't support it self.min_axis_value = {'ABS_Y': 0., 'ABS_RZ': 0.} - self.max_axis_value = {'ABS_Y': 1023., 'ABS_RZ': 255.} + self.max_axis_value = {'ABS_Y': 255., 'ABS_RZ': 255.} self.cancel_button = 'BTN_TRIGGER' self.axes_values = {'ABS_Y': 0., 'ABS_RZ': 0.} # gb, steer self.axes_order = ['ABS_Y', 'ABS_RZ']
[oracle] corrections about service checks and events [oracle] corrections about service checks and events
## Overview Get metrics from Oracle Database servers in real time to: - -* Visualize and monitor Oracle Database service status. -* Be notified about Oracle Database cluster failovers and events. +* Visualize and monitor your Oracle Database's availability and performance metrics. ## Setup ### Installation @@ -57,7 +55,7 @@ The Oracle check is currently compatible with Linux and macOS. See [metadata.csv](https://github.com/DataDog/integrations-core/blob/master/oracle/metadata.csv) for a list of metrics provided by this integration. ### Events -The Oracle Database check does not include any event at this time. +The Oracle Database check does not include any events at this time. ### Service Checks -The Oracle Database check does not include any service check at this time. +The Oracle Database integration includes the service check `oracle.can_connect` which will verify the database is available and accepting connections.
Fix Test Failure due to assert bar_graphql_type.interfaces == [foo_graphql_type] failed only on tox, because .interfaces was a tuple instead of a list. Error didn't occur using just pytest. Fixed by explicitly converting both to list.
@@ -318,4 +318,4 @@ def test_interface_with_interfaces(): assert isinstance(fields["foo"], GraphQLField) assert isinstance(fields["bar"], GraphQLField) - assert bar_graphql_type.interfaces == [foo_graphql_type] + assert list(bar_graphql_type.interfaces) == list([foo_graphql_type])
A small typo in GAM Fixing a small typo in plot_partial function's docstring.
@@ -315,7 +315,7 @@ class GLMGamResults(GLMResults): ---------- smooth_index : int index of the smooth term within list of smooth terms - plot_se : book + plot_se : bool If plot_se is true, then the confidence interval for the linear prediction will be added to the plot. cpr : bool
message_list_view: Use translated form of "at" in timestamp tooltip. The English word "at" was manually appended to the string output of datetime-related functions to generate the string shown in the tooltip when hovering over the timestamp of a message. Use the translated form "{date} at {time}" instead, as found elsewhere in the codebase.
@@ -256,10 +256,12 @@ export class MessageListView { if (last_edit_timestamp !== undefined) { const last_edit_time = new Date(last_edit_timestamp * 1000); const today = new Date(); - return ( - timerender.render_date(last_edit_time, undefined, today)[0].textContent + - " at " + - timerender.stringify_time(last_edit_time) + return $t( + {defaultMessage: "{date} at {time}"}, + { + date: timerender.render_date(last_edit_time, undefined, today)[0].textContent, + time: timerender.stringify_time(last_edit_time), + }, ); } return undefined;
Fix&simplify finalize_sym_literals Symbol names were not properly attributed and checked for duplicates.
@@ -1897,35 +1897,26 @@ class CompileCtx(object): symbols = self._symbol_literals self._symbol_literals = None - i = 1 - for name in sorted(symbols): - # Create a candidate name for this symbol: replace all - # non-alphabetic characters with underscores and remove - # leading/trailing/consecutive underscores. - candidate_lower_name = '' - last_is_alpha = False - for c in name.lower(): - if 'a' <= c <= 'z': - candidate_lower_name += c - last_is_alpha = True - else: - if last_is_alpha: - candidate_lower_name += '_' - last_is_alpha = False + for i, name in enumerate(sorted(symbols)): + # Replace all non-alphabetic characters with underscores + tmp_1 = (c if c.isalpha() else '_' for c in name.lower()) - candidate_name = names.Name.from_lower( - candidate_lower_name.strip('_') + # Remove consecutive underscores + tmp_2 = reduce( + lambda s, c: s if s.endswith('_') and c == '_' else s + c, + tmp_1 ) - # If we have no candidate or if the candidate is already used, fall - # back to an unique number. - if not candidate_name or candidate_name in self.symbol_literals: - enum_name = names.Name(str(i)) - i += 1 - else: - enum_name = candidate_name + # Remove leading/trailing underscores, and add 'Symbol' prefix + candidate_name = names.Name('Symbol') + names.Name.from_lower( + tmp_2.strip('_') + ) + + # If the candidate is already used, add an unique number + if candidate_name in self.symbol_literals.values(): + candidate_name = candidate_name + names.Name(str(i)) - self.symbol_literals[name] = names.Name('Symbol') + enum_name + self.symbol_literals[name] = candidate_name def annotate_fields_types(self): """
[modules/system] Add parameters to override commands For each command in the system module, add a parameter that allows the user to override the default behaviour. fixes
@@ -10,8 +10,15 @@ the system. Per default a confirmation dialog is shown before the actual action is performed. -Paramters: +Parameters: * system.confirm: show confirmation dialog before performing any action (default: true) + * system.reboot: specify a reboot command (defaults to 'reboot') + * system.shutdown: specify a shutdown command (defaults to 'shutdown -h now') + * system.logout: specify a logout command (defaults to 'i3exit logout') + * system.switch_user: specify a command for switching the user (defaults to 'i3exit switch_user') + * system.lock: specify a command for locking the screen (defaults to 'i3exit lock') + * system.suspend: specify a command for suspending (defaults to 'i3exit suspend') + * system.hibernate: specify a command for hibernating (defaults to 'i3exit hibernate') """ import logging @@ -68,14 +75,22 @@ class Module(bumblebee.engine.Module): def popup(self, widget): menu = bumblebee.popup_v2.PopupMenu() - menu.add_menuitem("shutdown", callback=functools.partial(self._on_command, "Shutdown", "Shutdown?", "shutdown -h now")) - menu.add_menuitem("reboot", callback=functools.partial(self._on_command, "Reboot", "Reboot?", "reboot")) + reboot_cmd = self.parameter("reboot", "reboot") + shutdown_cmd = self.parameter("shutdown", "shutdown -h now") + logout_cmd = self.parameter("logout", "i3exit logout") + switch_user_cmd = self.parameter("switch_user", "i3exit switch_user") + lock_cmd = self.parameter("lock", "i3exit lock") + suspend_cmd = self.parameter("suspend", "i3exit suspend") + hibernate_cmd = self.parameter("hibernate", "i3exit hibernate") + + menu.add_menuitem("shutdown", callback=functools.partial(self._on_command, "Shutdown", "Shutdown?", shutdown_cmd)) + menu.add_menuitem("reboot", callback=functools.partial(self._on_command, "Reboot", "Reboot?", reboot_cmd)) menu.add_menuitem("log out", callback=functools.partial(self._on_command, "Log out", "Log out?", "i3exit logout")) # don't ask for these - menu.add_menuitem("switch user", callback=functools.partial(bumblebee.util.execute, "i3exit switch_user")) - menu.add_menuitem("lock", callback=functools.partial(bumblebee.util.execute, "i3exit lock")) - menu.add_menuitem("suspend", callback=functools.partial(bumblebee.util.execute, "i3exit suspend")) - menu.add_menuitem("hibernate", callback=functools.partial(bumblebee.util.execute, "i3exit hibernate")) + menu.add_menuitem("switch user", callback=functools.partial(bumblebee.util.execute, switch_user_cmd)) + menu.add_menuitem("lock", callback=functools.partial(bumblebee.util.execute, lock_cmd)) + menu.add_menuitem("suspend", callback=functools.partial(bumblebee.util.execute, suspend_cmd)) + menu.add_menuitem("hibernate", callback=functools.partial(bumblebee.util.execute, hibernate_cmd)) menu.show(widget)
Update README.md Updated info on compressed_segmentation.
[![Build Status](https://travis-ci.org/seung-lab/cloud-volume.svg?branch=master)](https://travis-ci.org/seung-lab/cloud-volume) [![PyPI version](https://badge.fury.io/py/cloud-volume.svg)](https://badge.fury.io/py/cloud-volume) -# cloud-volume +# CloudVolume ```python3 from cloudvolume import CloudVolume @@ -17,7 +17,7 @@ Precomputed volumes are typically stored on [AWS S3](https://aws.amazon.com/s3/) The combination of [Neuroglancer](https://github.com/google/neuroglancer/), [Igneous](), and CloudVolume comprises a system for visualizing, processing, and sharing (via browser viewable URLs) petascale datasets within and between laboratories. A typical example usage would be to visualize raw electron microscope scans of mouse, fish, or fly brains up to a cubic millimeter in physical dimension. Neuroglancer and Igneous would enable you to visualize each step of the process of montaging the image, fine tuning alignment, creating segmentation layers, ROI masks, or performing other types of analysis. CloudVolume enables you to read from and write to each of these layers. -CloudVolume can be used in single or multi-process capacity and can be optimized to use no more than a little over a single cutout's worth of memory. It supports reading and writing the `compressed_segmentation` format via a pure python library provided by Yann Leprince. +CloudVolume can be used in single or multi-process capacity and can be optimized to use no more than a little over a single cutout's worth of memory. It supports reading and writing the `compressed_segmentation` format via a C++ extension by Jeremy Maitin-Shepard, Stephen Plaza, and William Silversmith and a fallback to a pure python library provided by Yann Leprince. ## Setup @@ -26,7 +26,7 @@ Cloud-volume is compatible with Python 2.6+ and 3.4+ (we've noticed it's faster #### `pip` Installation ```bash -pip install numpy # additional step only needed for compressed_segmentation +pip install numpy # additional step only needed for accelerated compressed_segmentation pip install cloud-volume ``` @@ -45,7 +45,7 @@ workon cv virtualenv venv source venv/bin/activate -pip install numpy # additional step only needed for compressed_segmentation +pip install numpy # additional step only needed for accelerated compressed_segmentation pip install -e . ``` @@ -113,7 +113,9 @@ Neuroglancer relies on an [`info`](https://github.com/google/neuroglancer/tree/m In the below example, assume you are creating a new segmentation volume from a 3d numpy array "rawdata". Note Precomputed stores data in Fortran (column major) order. You should do a small test to see if the image is written transposed. You can fix this by uploading `rawdata.T`. ```python3 -info = cloudvolume.CloudVolume.create_new_info( +from cloudvolume import CloudVolume + +info = CloudVolume.create_new_info( num_channels = 1, layer_type = 'segmentation', data_type = 'uint64', # Channel images might be 'uint8' @@ -126,7 +128,7 @@ info = cloudvolume.CloudVolume.create_new_info( chunk_size = [ 512, 512, 16 ], # units are voxels volume_size = [ 250000, 250000, 25000 ], # e.g. a cubic millimeter dataset ) -vol = cloudvolume.CloudVolume(cfg.path, info=info) +vol = CloudVolume(cfg.path, info=info) vol.commit_info() vol[cfg.x: cfg.x + cfg.length, cfg.y:cfg.y + cfg.length, cfg.z: cfg.z + cfg.length] = rawdata[:,:,:] ```
array_types_ada.mako: add comment to describe type emission logic TN:
<% elt_type = cls.element_type.name %> + ## If this array type is exposed in the public API, it is declared there, so + ## no need to re-declare it here. There is one exception to this rule: if + ## the element type itself is exposed, but as a different type (for instance + ## entities), then we need a separate type. % if not cls._exposed or cls.array_type_name != cls.api_name: type ${cls.array_type_name} is array (Positive range <>) of ${cls.element_type.name};
IDL lower and upper case are both OK in Markdown files
+import mock from nbformat.v4.nbbase import new_code_cell, new_raw_cell, new_markdown_cell from testfixtures import compare import jupytext @@ -257,3 +258,53 @@ nor be split into two pieces.""" compare(text, nb.cells[0].source) assert nb.cells[0].cell_type == 'markdown' assert len(nb.cells) == 1 + + +def test_read_markdown_idl(text='''--- +jupyter: + kernelspec: + display_name: IDL [conda env:gdl] * + language: IDL + name: conda-env-gdl-idl +--- + +# A sample IDL Markdown notebook + +```idl +a = 1 +``` +'''): + nb = jupytext.reads(text, 'md') + assert len(nb.cells) == 2 + assert nb.cells[1].cell_type == 'code' + assert nb.cells[1].source == 'a = 1' + + nb.metadata.pop('jupytext') + with mock.patch('jupytext.header.INSERT_AND_CHECK_VERSION_NUMBER', False): + text2 = jupytext.writes(nb, 'md') + compare(text, text2) + + +def test_read_markdown_IDL(text='''--- +jupyter: + kernelspec: + display_name: IDL [conda env:gdl] * + language: IDL + name: conda-env-gdl-idl +--- + +# A sample IDL Markdown notebook + +```IDL +a = 1 +``` +'''): + nb = jupytext.reads(text, 'md') + assert len(nb.cells) == 2 + assert nb.cells[1].cell_type == 'code' + assert nb.cells[1].source == 'a = 1' + + nb.metadata.pop('jupytext') + with mock.patch('jupytext.header.INSERT_AND_CHECK_VERSION_NUMBER', False): + text2 = jupytext.writes(nb, 'md') + compare(text.replace('```IDL', '```idl'), text2)
js: only remove hash if there is one Firefox seems to add a history entry because the url changes from '{url}' to '{url}#'.
@@ -9,8 +9,10 @@ export function closeOverlay() { $$('.overlay-wrapper').forEach((el) => { el.classList.remove('shown'); }); + if (window.location.hash) { window.location.hash = ''; } +} // Show various overlays depending on the hash. export function handleHash() {
Catch Limiter Bug Fix Added default values to daily_catch_limit and exit_on_limit_reached incase they are not found in CatchPokemon Task
@@ -24,6 +24,9 @@ class CatchLimiter(BaseTask): self.duration = self.config.get("duration", 15) self.no_log_until = datetime.now() self.min_ultraball_to_keep = 0 + self.daily_catch_limit = 500 # default it to 500 if not found in CatchPokemon + self.exit_on_limit_reached = False # default it to false if not found in CatchPokemon + for catch_cfg in self.bot.config.raw_tasks: if "type" in catch_cfg: if catch_cfg["type"] == "CatchPokemon":
Fixed multiproject test Didn't remove date Didn't raise the error
@@ -61,9 +61,13 @@ def test_multiproject_1(): '-wi', 'tests/multipart.xml', 'tests/multipart2.xml'] logging.debug('Running '+str(cmd)) subprocess.check_output(cmd, stderr=subprocess.STDOUT) - cmd = ['xlsx2csv', 'tests/multipart1+2.xlsx', 'tests/result_test/multipart1+2.csv'] + cmd = ['xlsx2csv', 'tests/multipart1+2.xlsx', 'tests/result_test/multipart1+2.csv.tmp'] logging.debug('Running '+str(cmd)) subprocess.check_output(cmd, stderr=subprocess.STDOUT) + cmd = ['egrep', '-i', '-v', r'(\$ date|kicost|Total purchase)', 'tests/result_test/multipart1+2.csv.tmp'] + with open('tests/result_test/multipart1+2.csv', 'w') as f: + logging.debug('Running '+str(cmd)) + subprocess.run(cmd, stdout=f) cmd = ['diff', '-u', 'tests/expected_test/multipart1+2.csv', 'tests/result_test/multipart1+2.csv'] logging.debug('Running '+str(cmd)) subprocess.check_output(cmd, stderr=subprocess.STDOUT) @@ -72,6 +76,7 @@ def test_multiproject_1(): logging.error('Failed test: '+test_name) if e.output: logging.error('Output from command: ' + e.output.decode()) + raise e class TestKicost(unittest.TestCase):
Change direction of arrow Fixes
@@ -199,7 +199,7 @@ exports.addExchangesConfiguration = function(exchanges) { // GR exchanges['GR->IT'] = { lonlat: [18.759248, 38.902132], - rotation: 90 + rotation: -90 }; exchanges['GR->MK'] = { lonlat: [22.011736, 41.160374],
Add DistMult to the README This wasn't added in or but could've been.
@@ -183,6 +183,7 @@ The StellarGraph library currently includes the following algorithms for graph m | Watch Your Step [14] | The Watch Your Step algorithm computes node embeddings by using adjacency powers to simulate expected random walks. | | Deep Graph Infomax [15] | Deep Graph Infomax trains unsupervised GNNs to maximize the shared information between node level and graph level features. | | Continuous-Time Dynamic Network Embeddings (CTDNE) [16] | Supports time-respecting random walks which can be used in a similar way as in Node2Vec for unsupervised representation learning. | +| DistMult [17] | The DistMult algorithm computes embeddings for nodes (entities) and edge types (relations) in knowledge graphs, and can use these for link prediction | ## Installation @@ -295,3 +296,5 @@ International Conference on Machine Learning (ICML), 2019. ([link](https://arxiv 15. Deep Graph Infomax. P. Velickovic, W. Fedus, W. L. Hamilton, P. Lio, Y. Bengio, R. D. Hjelm, ICLR, 2019, arxiv:1809.10341 ([link](https://arxiv.org/pdf/1809.10341.pdf)). 16. Continuous-Time Dynamic Network Embeddings. Giang Hoang Nguyen, John Boaz Lee, Ryan A. Rossi, Nesreen K. Ahmed, Eunyee Koh, and Sungchul Kim. Proceedings of the 3rd International Workshop on Learning Representations for Big Networks (WWW BigNet) 2018. ([link](https://dl.acm.org/doi/10.1145/3184558.3191526)) + +17. Embedding Entities and Relations for Learning and Inference in Knowledge Bases. Bishan Yang, Wen-tau Yih, Xiaodong He, Jianfeng Gao, and Li Deng, ICLR, 2015. arXiv:1412.6575 ([link](https://arxiv.org/pdf/1412.6575))
Add a note about service.running A common source of confusion, hopefully this will help prevent that confusion.
@@ -378,6 +378,63 @@ exactly like the ``require`` requisite (the watching state will execute if .. note:: + If the watching state ``changes`` key contains values, then ``mod_watch`` + will not be called. If you're using ``watch`` or ``watch_in`` then it's a + good idea to have a state that only enforces one attribute - such as + splitting out ``service.running`` into its own state and have + ``service.enabled`` in another. + +One common source of confusion is expecting ``mod_watch`` to be called for +every necessary change. You might be tempted to write something like this: + +.. code-block:: yaml + + httpd: + service.running: + - enable: True + - watch: + - file: httpd-config + + httpd-config: + file.managed: + - name: /etc/httpd/conf/httpd.conf + - source: salt://httpd/files/apache.conf + +If your service is already running but not enabled, you might expect that Salt +will be able to tell that since the config file changed your service needs to +be restarted. This is not the case. Because the service needs to be enabled, +that change will be made and ``mod_watch`` will never be triggered. In this +case, changes to your ``apache.conf`` will fail to be loaded. If you want to +ensure that your service always reloads the correct way to handle this is +either ensure that your service is not running before applying your state, or +simply make sure that ``service.running`` is in a state on its own: + +.. code-block:: yaml + + enable-httpd: + service.enabled: + - name: httpd + + start-httpd: + service.running: + - name: httpd + - watch: + - file: httpd-config + + httpd-config: + file.managed: + - name: /etc/httpd/conf/httpd.conf + - source: salt://httpd/files/apache.conf + +Now that ``service.running`` is its own state, changes to ``service.enabled`` +will no longer prevent ``mod_watch`` from getting triggered, so your ``httpd`` +service will get restarted like you want. + +.. _requisites-listen: + +listen +~~~~~~ + Not all state modules contain ``mod_watch``. If ``mod_watch`` is absent from the watching state module, the ``watch`` requisite behaves exactly like a ``require`` requisite.
Fix multiple match text for token regex It has to account for the addition of groups. It's easiest to compare the entire string so `finditer` is used to return re.Match objects; the tuples of `findall` would be cumbersome. Also threw in a change to use `assertCountEqual` cause the order doesn't really matter.
@@ -174,8 +174,9 @@ class TokenRemoverTests(unittest.IsolatedAsyncioTestCase): tokens = ["x.y.z", "a.b.c"] message = f"garbage {tokens[0]} hello {tokens[1]} world" - results = token_remover.TOKEN_RE.findall(message) - self.assertEqual(tokens, results) + results = token_remover.TOKEN_RE.finditer(message) + results = [match[0] for match in results] + self.assertCountEqual(tokens, results) @autospec(TokenRemover, "is_valid_user_id", "is_valid_timestamp") def test_is_maybe_token_missing_part_returns_false(self, valid_user, valid_time):
Logged sync and render time in tiled final render mode PURPOSE Synchronization and render times aren't displayed in tiled final render mode log. Add it. EFFECT OF CHANGE synchronization and render times are correctly logged in tiled final render mode.
@@ -284,6 +284,10 @@ class RenderEngine(Engine): athena_data['Stop Time'] = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S.%f") athena_data['Samples'] = round(self.render_samples * progress) + + log.info(f"Scene synchronization time:", perfcounter_to_str(self.sync_time)) + log.info(f"Render time:", perfcounter_to_str(self.current_render_time)) + self.athena_send(athena_data) def render(self):
Dockerfile: Update base image to 20.04 19.10 is now EOL so update to use 20.04 LTS as a base instead.
# # We want to make sure to base this on a recent ubuntu release -FROM ubuntu:19.10 +FROM ubuntu:20.04 # Please update the references below to use different versions of # devlib, WA or the Android SDK
Removed docs from configurable parameters docs is not supported by DynamicProperty and it is removed from the list of configurable parameters. Improved documentation for set_command and get_command. Fixed typo.
@@ -51,7 +51,7 @@ class DynamicProperty(property): def __get__(self, obj, objtype=None): if obj is None: - # Property return itself when invocad from a class + # Property return itself when invoked from a class return self if self.fget is None: raise AttributeError("unreadable attribute") @@ -91,12 +91,11 @@ class Instrument(object): # by users special_keys = ('get_command', 'set_command', - 'docs', 'validator', 'values', 'map_values', 'get_process', - 'set_process=', + 'set_process', 'command_process', 'check_set_errors', 'check_get_errors') @@ -225,8 +224,10 @@ class Instrument(object): commands. This property may be set and read from the instrument. See also :meth:`measurement` and :meth:`setting`. - :param get_command: A string command that asks for the value - :param set_command: A string command that writes the value + :param get_command: A string command that asks for the value, set to `None` + if get is not supported (see also :meth:`setting`). + :param set_command: A string command that writes the value, set to `None` + if set is not supported (see also :meth:`measurement`). :param docs: A docstring that will be included in the documentation :param validator: A function that takes both a value and a group of valid values and returns a valid value, while it otherwise raises an exception
Use author as the title of the embed Allows the icon to be centered
@@ -842,10 +842,10 @@ class HelpChannels(commands.Cog): log.trace(f"Sending available message in {channel_info}.") embed = discord.Embed( - title=AVAILABLE_TITLE, color=constants.Colours.bright_green, description=AVAILABLE_MSG, ) + embed.set_author(name=AVAILABLE_TITLE, icon_url=constants.Icons.green_checkmark) embed.set_footer(text=AVAILABLE_FOOTER) msg = await self.get_last_message(channel)
Update ilapfuncs.py Allow KMLs to be generated even if a timestamp is not present.
@@ -220,7 +220,7 @@ def kmlgen(report_folder, kmlactivity, data_list, data_headers): length = (len(data_list)) while a < length: modifiedDict = dict(zip(data_headers, data_list[a])) - times = modifiedDict['Timestamp'] + times = modifiedDict.get('Timestamp','N/A') lon = modifiedDict['Longitude'] lat = modifiedDict['Latitude'] if lat:
Update contributor-guide.rst Added verbose pip install -v argument, needed to see pyqode.qt install print statements in setupy.py
@@ -594,7 +594,7 @@ To do so, execute these commands in the top-level of the repository: conda env create -n <env_name> environment.yml conda activate <env_name> - python -m pip install -e . + python -m pip install -ve . For convenience, you can also try to install directly in an existing environment such as the `base` environment, if it is relatively up to date. To install `qiskit_metal` and its depenencies into an existing environment named `<env_name>`, execute these commands in the top-level of the repository: @@ -603,7 +603,7 @@ To install `qiskit_metal` and its depenencies into an existing environment named conda env update -n <env_name> environment.yml conda activate <env_name> - python -m pip install -e . + python -m pip install -ve . *Notes on using conda*
Separate htex worker log directories by block id This commit adds an addition block level to htex worker log paths, like this: .../runinfo/004/worker-nodes/block-0/627785a8965e/manager.log This has been useful during a run with multiple large blocks to discern which logs belong to which block.
@@ -474,7 +474,7 @@ def worker(worker_id, pool_id, pool_size, task_queue, result_queue, worker_queue Pop request from queue Put result into result_queue """ - start_file_logger('{}/{}/worker_{}.log'.format(args.logdir, pool_id, worker_id), + start_file_logger('{}/block-{}/{}/worker_{}.log'.format(args.logdir, args.block_id, pool_id, worker_id), worker_id, name="worker_log", level=logging.DEBUG if args.debug else logging.INFO) @@ -580,10 +580,10 @@ if __name__ == "__main__": args = parser.parse_args() - os.makedirs(os.path.join(args.logdir, args.uid), exist_ok=True) + os.makedirs(os.path.join(args.logdir, "block-{}".format(args.block_id), args.uid), exist_ok=True) try: - start_file_logger('{}/{}/manager.log'.format(args.logdir, args.uid), + start_file_logger('{}/block-{}/{}/manager.log'.format(args.logdir, args.block_id, args.uid), 0, level=logging.DEBUG if args.debug is True else logging.INFO)
[Test] increase timeout for `test_traceback.py` `test_traceback.py` was taking ~55s to finish recently, and since today it starts to time out at 60s more frequently. All test cases do succeed so increase its test time out for now. We will look into if there is any performance regression separately.
@@ -116,6 +116,7 @@ py_test_module_list( "test_multi_tenancy.py", "test_scheduling.py", "test_scheduling_2.py", + "test_traceback.py", ], size = "medium", extra_srcs = SRCS, @@ -138,7 +139,6 @@ py_test_module_list( "test_numba.py", "test_queue.py", "test_ray_shutdown.py", - "test_traceback.py", "test_unhandled_error.py", "test_top_level_api.py", "test_list_actors.py",
Update export_result_mysql.dig Made commented changes. Removed api endpoint and moved database under td.
timezone: UTC _export: - mysql: - endpoint: api.treasuredata.com + td: database: sample_datasets + mysql: connection: MY_REMOTE_CONNECTION dbname: MY_DB_NAME table: MY_TABLE_NAME
show status on graphs HG-- branch : feature/microservices
{ "alias": "Input", "transform": "negative-Y" + }, + { + "alias": "Oper status", + "yaxis": 2 + }, + { + "alias": "Admin status", + "yaxis": 2 } ], "span": 12, "value": "/^${{interface.type}}$/" } ] + }, + { + "refId": "C", + "measurement": "Interface | Status | Oper", + "alias": "Oper status", + "hide": false, + "policy": "default", + "dsType": "influxdb", + "resultFormat": "time_series", + "tags": [ + { + "key": "object", + "operator": "=~", + "value": "/^$device$/" + }, + { + "condition": "AND", + "key": "interface", + "operator": "=~", + "value": "/^${{interface.type}}$/" + } + ], + "groupBy": [ + { + "type": "time", + "params": [ + "$interval" + ] + }, + { + "type": "fill", + "params": [ + "null" + ] + } + ], + "select": [ + [ + { + "type": "field", + "params": [ + "value" + ] + }, + { + "type": "last", + "params": [] + } + ] + ] + }, + { + "refId": "D", + "measurement": "Interface | Status | Admin", + "alias": "Admin status", + "hide": false, + "policy": "default", + "dsType": "influxdb", + "resultFormat": "time_series", + "tags": [ + { + "key": "object", + "operator": "=~", + "value": "/^$device$/" + }, + { + "condition": "AND", + "key": "interface", + "operator": "=~", + "value": "/^${{interface.type}}$/" + } + ], + "groupBy": [ + { + "type": "time", + "params": [ + "$interval" + ] + }, + { + "type": "fill", + "params": [ + "null" + ] + } + ], + "select": [ + [ + { + "type": "field", + "params": [ + "value" + ] + }, + { + "type": "last", + "params": [] + } + ] + ] } ], "timeFrom": null, "show": true }, { - "format": "short", - "label": null, "logBase": 1, - "max": null, + "show": false, + "max": "10", + "format": "short", "min": null, - "show": null + "label": null } ] }
[Dataset] GNNBenchmarkDataset * PPIDataset * Revert "PPIDataset" This reverts commit * gnn benchmark dataset * Update gnn_benckmark.py
@@ -10,7 +10,8 @@ from .sbm import SBMMixture from .reddit import RedditDataset from .ppi import PPIDataset, LegacyPPIDataset from .tu import TUDataset, LegacyTUDataset -from .gnn_benckmark import AmazonCoBuy, CoraFull, Coauthor +from .gnn_benckmark import AmazonCoBuy, CoraFull, Coauthor, AmazonCoBuyComputerDataset, \ + AmazonCoBuyPhotoDataset, CoauthorPhysicsDataset, CoauthorCSDataset, CoraFullDataset from .karate import KarateClub, KarateClubDataset from .gindt import GINDataset from .bitcoinotc import BitcoinOTC, BitcoinOTCDataset
addressing issue closes
@@ -5,7 +5,7 @@ from typing import List, Tuple, Callable, Iterator import tensorflow as tf from neuralmonkey.tf_utils import update_initializers -from neuralmonkey.logging import log +from neuralmonkey.logging import log, warn # pylint: enable=invalid-name InitializerSpecs = List[Tuple[str, Callable]] @@ -52,8 +52,8 @@ class Parameterized(metaclass=ABCMeta): # pylint: disable=unidiomatic-typecheck # Here we need an exact match of types if type(self) != type(reuse): - raise TypeError("Can only reuse parameters of ModelPart " - "objects within the same sub-class.") + warn("Warning: sharing parameters between model parts of " + "different types.") # pylint: enable=unidiomatic-typecheck if initializers is not None:
Update phishing.txt Have updated domains due to comment
@@ -306,23 +306,25 @@ netflix-exp.com # Reference: https://twitter.com/PhishingAi/status/1037167256138989569 # Reference: https://paste.ee/p/z6Xng -citycloudbd.com +citycloudbd.com/pot/Share/share ctsluganda.org -figwit.co.uk +figwit.co.uk/SEIREN/Office/Share/share gayatriea.com joaquinpianguita.com kazurimanager.com mutuprop.com -neernikunj.com +neernikunj.com/rss/cgi_bin/Share/share riveratorresyasociados.com -rotarykampalasseseislands.org -sunbridge.co.ug +rotarykampalasseseislands.org/nig/sharpoint/share +sunbridge.co.ug/ADMIN/ch/share/ +sunbridge.co.ug/hot/ch/share/ +sunbridge.co.ug/sars/ch/share sycjamsa.com wisdomhomeloans.com.au www.intechsecurity.com.au -www.intest.com.ve +www.intest.com.ve/flexmation_/Share/share www.sumipancarabobo.com.ve -www.webdesigns.net.au +www.webdesigns.net.au/dustvalue/Share/share/index.php # Reference: https://twitter.com/PhishingAi/status/1037176371435302912
Fix lookback names to be consistent with urdb_parse The previous names were from the URDB documentation which is incorrect
@@ -108,9 +108,9 @@ class RateData: 'demandwindow', 'demandreactivepowercharge', # lookback demand charges - 'lookbackMonths', - 'lookbackPercent', - 'lookbackRange', + 'lookbackmonths', + 'lookbackpercent', + 'lookbackrange', # coincident rates 'coincidentrateunit', 'coincidentratestructure', @@ -447,27 +447,27 @@ class UrdbParse: def prepare_demand_lookback(self, current_rate): """ URDB lookback fields: - lookbackMonths + lookbackmonths Type: array - Array of 12 booleans, true or false, indicating months in which lookbackPercent applies. - If any of these is true, lookbackRange should be zero. + Array of 12 booleans, true or false, indicating months in which lookbackpercent applies. + If any of these is true, lookbackrange should be zero. - lookbackPercent + lookbackpercent Type: decimal - Lookback percentage. Applies to either lookbackMonths with value=1, or a lookbackRange. + Lookback percentage. Applies to either lookbackmonths with value=1, or a lookbackrange. - lookbackRange + lookbackrange Type: integer - Number of months for which lookbackPercent applies. If not 0, lookbackMonths values should all be 0. + Number of months for which lookbackpercent applies. If not 0, lookbackmonths values should all be 0. """ - if current_rate.lookbackPercent in [None, 0, []]: + if current_rate.lookbackpercent in [None, 0, []]: reopt_lookback_months = [] lookback_percentage = 0 lookback_range = 0 else: - lookback_percentage = current_rate.lookbackPercent or 0.0 - lookback_months = current_rate.lookbackMonths # defaults to empty list - lookback_range = current_rate.lookbackRange or 0 + lookback_percentage = current_rate.lookbackpercent or 0.0 + lookback_months = current_rate.lookbackmonths # defaults to empty list + lookback_range = current_rate.lookbackrange or 0 reopt_lookback_months = [] if lookback_range != 0 and len(lookback_months) == 12: for month in range(1, 13):
Remove show_immediately from matplotlib graph It looks like appveyor is hanging after running the example due to the plot being open.
@@ -151,7 +151,7 @@ class PressureMatrix: >>> my_pressure_matrix.plot_shape() >>> my_pressure_matrix.plot_pressure_theta(z=int(nz/2)) >>> my_pressure_matrix.matplot_pressure_theta_cylindrical(z=int(nz/2), - ... show_immediately=True) + ... show_immediately=False) """ def __init__(
Updates stability analyzer to be compatible with sparse data. Changes a dictionary index to a .get(index, default) call so that if an outcome is not present (e.g. when all the the outcomes are of a single type) the stability analysis doesn't fail.
@@ -1771,7 +1771,7 @@ class StabilityAnalyzer(object): # The most likely null hypothesis model, i.e., constant probabilities that are the observed frequencies. counts = self.data[dskey][circuit].counts total = self.data[dskey][circuit].total - means = {o: counts[o] / total for o in outcomes} + means = {o: counts.get(o, 0) / total for o in outcomes} nullptraj = _ptraj.ConstantProbTrajectory(outcomes, means) self._probtrajectories[i, j]['null'] = nullptraj
help_docs: Update `stream-notifications` help doc. Uses new `select-stream-view-personal` for instructions. Also, moves one sentence notes to be under header vs tab block, and updates numbers used in instruction list to all be '1', and clarifies text about notifications table in general personal setting.
@@ -5,34 +5,38 @@ stream basis. ## Set notifications for a single stream +These settings will override any default stream notification settings. + {start_tabs} 1. Hover over the stream in the left sidebar. -2. Click the ellipsis (<i class="zulip-icon zulip-icon-ellipsis-v-solid"></i>) to the +1. Click the ellipsis (<i class="zulip-icon zulip-icon-ellipsis-v-solid"></i>) to the right of the stream. -3. Click **Stream settings**. +1. Click **Stream settings**. -4. Toggle the notifications settings on the right. +{!select-stream-view-personal.md!} -{end_tabs} +1. Under **Notification settings**, toggle your preferred + notifications settings for the stream. -These settings will override any default stream notification settings. +{end_tabs} ## Set default notifications for all streams +These settings only apply to streams where you have not +explicitly set a notification preference. + {start_tabs} {settings_tab|notifications} -1. Configure notifications in the "Notification triggers" table. +1. In the **Notification triggers** table, + toggle the settings for **Streams**. {end_tabs} -These settings only apply to streams where you have not -explicitly set a notification preference. - ## Related articles * [Desktop notifications](/help/desktop-notifications)
scripts/update-plugin-list: Improve requirement detection PEP 566 does not require a space after the dependency name.
@@ -78,7 +78,7 @@ def iter_plugins(): requires = "N/A" if info["requires_dist"]: for requirement in info["requires_dist"]: - if requirement == "pytest" or "pytest " in requirement: + if re.match(r"pytest(?![-.\w])", requirement): requires = requirement break releases = response.json()["releases"]
Ensure networking process is killed before db process Since the networking process currently depends on the database process, it is safer to first kill the networking process and then the db process.
@@ -135,7 +135,7 @@ def trinity_boot(args: Namespace, kill_trinity_gracefully( trinity_config, logger, - (database_server_process, networking_process), + (networking_process, database_server_process), plugin_manager, main_endpoint, reason=reason
swarming: fix crash in metrics. Was introduced in
@@ -335,7 +335,7 @@ def _set_executors_metrics(payload): status = 'quarantined' elif bot_info.is_dead(utils.utcnow()): status = 'dead' - elif bot_info.state.get('maintenance', False): + elif bot_info.state and bot_info.state.get('maintenance', False): status = 'maintenance' target_fields = dict(_TARGET_FIELDS)
Remove `bpr.occupancy` from electricity calculation Also redefines the call for refrigeration, server room and industrial process demands from whether they have a specific type of occupancy to whether they have that specific internal load.
@@ -54,18 +54,18 @@ def calc_Eint(tsd, bpr, schedules): tsd['Ealf'] = tsd['Elf'] + tsd['Eaf'] # calculate other electrical loads in W - if 'COOLROOM' in bpr.occupancy: - tsd['Eref'] = schedules['Ere'] * bpr.internal_loads['Ere_Wm2'] * bpr.occupancy['COOLROOM'] + if bpr.internal_loads['Ere_Wm2'] > 0: + tsd['Eref'] = schedules['Ere'] * bpr.internal_loads['Ere_Wm2'] else: tsd['Eref'] = np.zeros(8760) - if 'SERVERROOM' in bpr.occupancy: - tsd['Edataf'] = schedules['Ed'] * bpr.internal_loads['Ed_Wm2'] * bpr.occupancy['SERVERROOM'] + if bpr.internal_loads['Ed_Wm2'] > 0: + tsd['Edataf'] = schedules['Ed'] * bpr.internal_loads['Ed_Wm2'] else: tsd['Edataf'] = np.zeros(8760) - if 'INDUSTRIAL' in bpr.occupancy: - tsd['Eprof'] = schedules['Epro'] * bpr.internal_loads['Epro_Wm2'] * bpr.occupancy['INDUSTRIAL'] + if bpr.internal_loads['Epro_Wm2'] > 0: + tsd['Eprof'] = schedules['Epro'] * bpr.internal_loads['Epro_Wm2'] tsd['Ecaf'] = np.zeros(8760) # not used in the current version but in the optimization part else: tsd['Eprof'] = np.zeros(8760)
The function suggest_float was not implemented in the class 'ChainerMNTrial'. So, I implemented it.
@@ -189,6 +189,17 @@ class ChainerMNTrial(BaseTrial): self.delegate = trial self.comm = comm + def suggest_float(self, name, low, high, *, log=False): + # type: (str, float, float, bool) -> float + + def func(): + # type: () -> float + + assert self.delegate is not None + return self.delegate.suggest_float(name, low, high) + + return self._call_with_mpi(func) + def suggest_uniform(self, name, low, high): # type: (str, float, float) -> float
Bump all test dependencies There was one new flake8 warning, which this fixes.
-r requirements.txt -flake8==4.0.1 -flake8-bugbear==22.4.25 +flake8==5.0.4 +flake8-bugbear==22.7.1 isort==5.10.1 -moto==3.1.9 +moto==4.0.0 pytest==7.1.2 pytest-env==0.6.2 -pytest-mock==3.7.0 +pytest-mock==3.8.2 pytest-cov==3.0.0 pytest-xdist==2.5.0 -freezegun==1.2.1 +freezegun==1.2.2 requests-mock==1.9.3 # used for creating manifest file locally jinja2-cli[yaml]==0.8.2
Update endpoint tests to work with botocore 1.23.x Verify endpoints are a subset of the data we require to account for the new variant keyword added to endpoints.json in botocore 1.23.x. Fixes
@@ -25,7 +25,11 @@ from chalice.awsclient import TypedAWSClient ]) def test_resolve_endpoint(stubbed_session, service, region, endpoint): awsclient = TypedAWSClient(stubbed_session) - assert endpoint == awsclient.resolve_endpoint(service, region) + if endpoint is None: + assert awsclient.resolve_endpoint(service, region) is None + else: + assert endpoint.items() <= awsclient.resolve_endpoint( + service, region).items() @pytest.mark.parametrize('arn,endpoint', [ @@ -48,7 +52,11 @@ def test_resolve_endpoint(stubbed_session, service, region, endpoint): ]) def test_endpoint_from_arn(stubbed_session, arn, endpoint): awsclient = TypedAWSClient(stubbed_session) - assert endpoint == awsclient.endpoint_from_arn(arn) + if endpoint is None: + assert awsclient.endpoint_from_arn(arn) is None + else: + assert endpoint.items() <= awsclient.endpoint_from_arn( + arn).items() @pytest.mark.parametrize('service,region,dns_suffix', [
Update sys_info.py Used flake8 linting and there is not requirements.txt as all the libraries are pre-installed.
@@ -14,8 +14,9 @@ print("Processor: ", sys_info.processor) b = psutil.boot_time() bootTime = datetime.fromtimestamp(b) -print("Booted On: ", bootTime.day, "/", bootTime.month, "/", bootTime.year, " ", bootTime.hour, ":", bootTime.minute, ":", bootTime.second) - +print("Booted On") +print("Day:", bootTime.day, "/", bootTime.month, "/", bootTime.year) +print("Time:", bootTime.hour, ":", bootTime.minute, ":", bootTime.second) print("+-------------------+") print("| CPU Information |")
Change options for default docker app sometimes test deploying docker app fails. This PR adjust settings to limit resources and increase grace period and health check interval.
@@ -190,16 +190,16 @@ def marathon_test_docker_app(app_name: str, constraints=None): test_uuid = uuid.uuid4().hex app = copy.deepcopy({ 'id': "integration-test-{}-{}".format(app_name, test_uuid), - 'cpus': 1, - 'mem': 1024, + 'cpus': 0.5, + 'mem': 128, 'disk': 0, 'instances': 1, 'healthChecks': [ { - "gracePeriodSeconds": 15, + "gracePeriodSeconds": 30, "ignoreHttp1xx": False, - "intervalSeconds": 3, - "maxConsecutiveFailures": 2, + "intervalSeconds": 10, + "maxConsecutiveFailures": 3, "portIndex": 0, "timeoutSeconds": 2, "delaySeconds": 15,
Add precise location information for lexer matchers TN:
@@ -23,6 +23,9 @@ class Matcher(object): input will trigger a match. """ + def __init__(self, location=None): + self.location = location or extract_library_location() + @property def match_length(self): """ @@ -90,7 +93,8 @@ class Pattern(Matcher): * ``^`` and ``$``, to match the very beginning of the input and its end. """ - def __init__(self, pattern): + def __init__(self, pattern, location=None): + super(Pattern, self).__init__(location) self.pattern = pattern @property @@ -556,13 +560,11 @@ class Lexer(object): :param rules: The list of rules to add. :type rules: list[(Matcher, Action)|RuleAssoc] """ - loc = extract_library_location() - for matcher_assoc in rules: if type(matcher_assoc) is tuple: assert len(matcher_assoc) == 2 matcher, action = matcher_assoc - rule_assoc = RuleAssoc(matcher, action, loc) + rule_assoc = RuleAssoc(matcher, action, matcher.location) else: assert isinstance(matcher_assoc, RuleAssoc) rule_assoc = matcher_assoc @@ -768,7 +770,8 @@ class Literal(Matcher): Pattern("a+") # Matches one or more a Literal("a+") # Matches "a" followed by "+" """ - def __init__(self, to_match): + def __init__(self, to_match, location=None): + super(Literal, self).__init__(location) self.to_match = to_match @property
fix missing string boundaries in dunder all see follow-up
@@ -50,7 +50,7 @@ __all__ = ['ENGINES', 'FORMATS', 'RENDERERS', 'FORMATTERS', 'unflatten', 'version', 'view', 'RequiredArgumentError', 'FileExistsError', 'UnknownSuffixWarning', 'FormatSuffixMismatchWarning', - 'ExecutableNotFound, CalledProcessError', + 'ExecutableNotFound', 'CalledProcessError', 'set_default_engine', 'set_default_format', 'set_jupyter_format'] __title__ = 'graphviz'
Warn about missing --bypass-file-store with in-place update When using `InplaceUpdateRequirement` in the `hints` section, `toil-cwl-runner` now warns that the Toil file store does not support this, and that the option --bypass-file-store should be provided on the command line.
@@ -2974,10 +2974,9 @@ def scan_for_unsupported_requirements( if not bypass_file_store: # If we are using the Toil FileStore we can't do InplaceUpdateRequirement req, is_mandatory = tool.get_requirement("InplaceUpdateRequirement") - if req and is_mandatory: - # The tool actually uses this one, and it isn't just a hint. - # Complain and explain. - raise CWL_UNSUPPORTED_REQUIREMENT_EXCEPTION( + if req: + # The tool actually uses this one. Complain and explain. + msg = ( "Toil cannot support InplaceUpdateRequirement when using the Toil file store. " "If you are running on a single machine, or a cluster with a shared filesystem, " "use the --bypass-file-store option to keep intermediate files on the filesystem. " @@ -2985,7 +2984,12 @@ def scan_for_unsupported_requirements( "options to control where on the filesystem files are placed, if only some parts of " "the filesystem are shared." ) - + if is_mandatory: + # The requirement cannot be fulfilled. Raise an exception. + raise CWL_UNSUPPORTED_REQUIREMENT_EXCEPTION(msg) + else: + # The hint cannot be fulfilled. Issue a warning. + logger.warning(msg) def determine_load_listing(tool: Process) -> str: """
Update setup.py Updated requirements, for later sectors and northern hemisphere to properly work with tess-point
@@ -58,7 +58,7 @@ setup( 'mplcursors', 'photutils>=0.7', 'tqdm', 'lightkurve>=1.1.0', 'astropy>=3.2.3', 'astroquery', 'bokeh', 'fitsio', 'pandas', 'setuptools>=41.0.0', - 'tensorflow<=1.14.0', 'vaneska', 'beautifulsoup4>=4.6.0', 'tess-point'], + 'tensorflow<=1.14.0', 'vaneska', 'beautifulsoup4>=4.6.0', 'tess-point>=0.3.6'], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research',
fix: Add invalid conditions for Check fieldtype Update invalid conditions for select and link
@@ -36,10 +36,11 @@ frappe.ui.Filter = class { Date: ['like', 'not like'], Datetime: ['like', 'not like'], Data: ['Between', 'Previous', 'Next'], - Select: ['like', 'not like'], - Link: ["Between", 'Previous', 'Next'], + Select: ['like', 'not like', 'Between', 'Previous', 'Next'], + Link: ["Between", 'Previous', 'Next', '>', '<', '>=', '<='], Currency: ["Between", 'Previous', 'Next'], - Color: ["Between", 'Previous', 'Next'] + Color: ["Between", 'Previous', 'Next'], + Check: this.conditions.map(c => c[0]).filter(c => c !== '=') }; this.make(); this.make_select(); @@ -232,7 +233,7 @@ frappe.ui.Filter = class { make_field(df, old_fieldtype) { let old_text = this.field ? this.field.get_value() : null; - this.hide_invalid_conditions(df.fieldtype, df.original_type); + this.hide_invalid_conditions(df.original_type); this.toggle_nested_set_conditions(df); let field_area = this.filter_edit_area.find('.filter-field').empty().get(0); let f = frappe.ui.form.make_control({ @@ -328,9 +329,8 @@ frappe.ui.Filter = class { : __("use % as wildcard"))+'</div>'); } - hide_invalid_conditions(fieldtype, original_type) { - let invalid_conditions = this.invalid_condition_map[fieldtype] || - this.invalid_condition_map[original_type] || []; + hide_invalid_conditions(fieldtype) { + let invalid_conditions = this.invalid_condition_map[fieldtype] || []; for (let condition of this.conditions) { this.filter_edit_area.find(`.condition option[value="${condition[0]}"]`).toggle(
DOC: use custom str instead of repr Use str instead of repr for custom in inst str.
@@ -1075,7 +1075,7 @@ class Instrument(object): output_str += 'Data Padding: ' + self.pad.__repr__() + '\n' output_str += 'Keyword Arguments Passed to load(): ' output_str += self.kwargs.__str__() + '\n' - output_str += self.custom.__repr__() + output_str += self.custom.__str__() # Print out the orbit settings if self.orbits.orbit_index is not None:
Fixing Revert view with Reversion 2.0 revision_view and recover_view get extra context added by way of overriding render_revision_form(), but this method got renamed in Reversion 2.0, so we need to override _reversion_revisionform_view() too Unfortunately saving this form hits other issues so this is WIP
@@ -243,8 +243,17 @@ class ItemEditor(ExtensionModelAdmin): recover_form_template = "admin/feincms/recover_form.html" + # For Reversion < v2.0.0 def render_revision_form(self, request, obj, version, context, revert=False, recover=False): context.update(self.get_extra_context(request)) return super(ItemEditor, self).render_revision_form( request, obj, version, context, revert, recover) + + # For Reversion >= v2.0.0 + def _reversion_revisionform_view(self, request, version, template_name, + extra_context=None): + context = extra_context or {} + context.update(self.get_extra_context(request)) + return super(ItemEditor, self)._reversion_revisionform_view( + request, version, template_name, context)
Modernize SCONS_CACHE_MSVC_CONFIG manpage entry [skip appveyor] Mention name change; remove the wording about version changes possibly causing problems - the currrent implementation should be resilient to this.
@@ -8398,29 +8398,29 @@ so the command line can be used to override <listitem> <para>(Windows only). If set, save the shell environment variables generated when setting up the Microsoft Visual C++ compiler -(and/or Build Tools) to a cache file, to give these settings, -which are relatively expensive to generate, persistence -across &scons; invocations. -Use of this option is primarily intended to aid performance -in tightly controlled Continuous Integration setups.</para> +(and/or Build Tools) to a cache file, to give these settings +persistence across &scons; invocations. +Generating this information is relatively expensive, +so using this option may aid performance where &scons; is run often, +such as Continuous Integration setups.</para> <para>If set to a True-like value (<literal>"1"</literal>, <literal>"true"</literal> or <literal>"True"</literal>) will cache to a file named -<filename>.scons_msvc_cache.json</filename> in the user's home directory. +<filename>scons_msvc_cache.json</filename> in the user's home directory. If set to a pathname, will use that pathname for the cache.</para> -<para>Note: use this cache with caution as it -might be somewhat fragile: while each major toolset version -(e.g. Visual Studio 2017 vs 2019) and architecture pair will get separate -cache entries, if toolset updates cause a change -to settings within a given release series, &scons; will not -detect the change and will reuse old settings. -Remove the cache file in case of problems with this. -&scons; will ignore failures reading or writing the file -and will silently revert to non-cached behavior in such cases.</para> - -<para><emphasis>Available since &scons; 3.1 (experimental)</emphasis>.</para> +<para>Note: this implementation may still be somewhat fragile. +In case of problems, remove the cache file - recreating with +fresh info normally resolves any issues. +&SCons; ignores failures reading or writing the cache file +and will silently revert to non-cached behavior in such cases. +</para> + +<para><emphasis>New in 3.1 (experimental). +The default cache file name was changed to +its present value in 4.4, and contents were expanded.</emphasis> +</para> </listitem> </varlistentry>
[example] Fixed incorrect inference result with Tiny Yolo v2 Face model for Face Recognition example. fixes
@@ -283,7 +283,7 @@ class FaceRecognitionExample extends BaseCameraExample { dHeight: this._currentCoModelInfo.inputSize[0], }, }; - await this._coRunner.run(this._currentInputElement, drawOptions); + await this._coRunner.run(element, drawOptions); let frOutput = this._coRunner.getOutput(); inferenceTime += parseFloat(frOutput.inferenceTime); let [...normEmbedding] = Float32Array.from(frOutput.outputTensor);
FIX: removed deprecated handling 'Plot' fixed docstring for argument plot the singular value plot
@@ -1059,7 +1059,7 @@ def singular_values_plot(syslist, omega=None, omega : array_like List of frequencies in rad/sec to be used for frequency response plot : bool - If True (default), plot magnitude and phase + If True (default), generate the singular values plot omega_limits : array_like of two values Limits of the frequency vector to generate. If Hz=True the limits are in Hz otherwise in rad/s. @@ -1091,14 +1091,6 @@ def singular_values_plot(syslist, omega=None, # Make a copy of the kwargs dictionary since we will modify it kwargs = dict(kwargs) - # Check to see if legacy 'Plot' keyword was used - if 'Plot' in kwargs: - import warnings - warnings.warn("'Plot' keyword is deprecated in bode_plot; use 'plot'", - FutureWarning) - # Map 'Plot' keyword to 'plot' keyword - plot = kwargs.pop('Plot') - # Get values for params (and pop from list to allow keyword use in plot) dB = config._get_param('bode', 'dB', kwargs, _bode_defaults, pop=True) Hz = config._get_param('bode', 'Hz', kwargs, _bode_defaults, pop=True)
make window management on mac work a bit better At least for me on a MacBook pro on ventura, the current implementations of `window_move_desktop_{left,right}` produce an error, and even if they didn't they would use keybindings that don't work by default. This fixes that error and uses the default keybindings of ctrl-left/right.
@@ -14,14 +14,16 @@ def _drag_window_mac(win=None): if win is None: win = ui.active_window() fs = win.children.find(AXSubrole="AXFullScreenButton")[0] - rect = fs.AXFrame["$rect2d"] - x = rect["x"] + rect["width"] + 5 - y = rect["y"] + rect["height"] / 2 + rect = fs.AXFrame + x = rect.x + rect.width + 5 + y = rect.y + rect.height / 2 + previous_position = ctrl.mouse_pos() ctrl.mouse_move(x, y) ctrl.mouse_click(button=0, down=True) yield time.sleep(0.1) ctrl.mouse_click(button=0, up=True) + ctrl.mouse_move(*previous_position) @ctx.action_class("user") @@ -41,15 +43,16 @@ class MacActions: def window_move_desktop_left(): with _drag_window_mac(): - actions.key("ctrl-cmd-alt-left") + actions.user.desktop_last() def window_move_desktop_right(): with _drag_window_mac(): - actions.key("ctrl-cmd-alt-right") + actions.user.desktop_next() def window_move_desktop(desktop_number: int): + # TODO: amethyst stuff should be pulled out into a separate file if ui.apps(bundle="com.amethyst.Amethyst"): actions.key(f"ctrl-alt-shift-{desktop_number}") else: with _drag_window_mac(): - actions.key(f"ctrl-{desktop_number}") + actions.user.desktop(desktop_number)
netskope-readme missing #
@@ -434,7 +434,7 @@ Take an action on a quarantined file. There is no context output for this command. -### Command example +#### Command example !netskope-quarantined-file-update file_id=1M_RR4jLPUwclKOhqZ7sPSqkMNS-S6Vyr quarantine_profile_id=1 action=block #### Human Readable Output
Add Google Analytics to built docs This is currently using the same tag as voxel51.com
</footer> {% endblock %} + +{% block footer %} +<!-- Global site tag (gtag.js) - Google Analytics --> +<script async src="https://www.googletagmanager.com/gtag/js?id=UA-141773487-1"></script> +<script> + window.dataLayer = window.dataLayer || []; + function gtag(){dataLayer.push(arguments);} + gtag('js', new Date()); + + gtag('config', 'UA-141773487-1'); +</script> +{% endblock %}
Proof of concept: export process graph to Graphviz for visualization Graphviz graphs have `_repr_svg_()`, so render nicely in jupyter notebook
@@ -881,3 +881,16 @@ class ImageCollectionClient(ImageCollection): newCollection = ImageCollectionClient(id, newbuilder, self.session) newCollection.bands = self.bands return newCollection + + def to_graphviz(self): + """ + Build a graphviz DiGraph from the process graph + :return: + """ + import graphviz + graph = graphviz.Digraph() + for name, process in self.graph.items(): + args = process.get("arguments", {}) + if "data" in args and "from_node" in args["data"]: + graph.edge(args["data"]["from_node"], name) + return graph
Fix E741 Variable name l is ambiguous, as in, "looks like other chars"
@@ -192,27 +192,27 @@ def load_loggers(m, config, quiet): type = config.get(logger, "type") config_options = get_config_dict(config, logger) if type == "db": - l = Loggers.db.DBFullLogger(config_options) + new_logger = Loggers.db.DBFullLogger(config_options) elif type == "dbstatus": - l = Loggers.db.DBStatusLogger(config_options) + new_logger = Loggers.db.DBStatusLogger(config_options) elif type == "logfile": - l = Loggers.file.FileLogger(config_options) + new_logger = Loggers.file.FileLogger(config_options) elif type == "html": - l = Loggers.file.HTMLLogger(config_options) + new_logger = Loggers.file.HTMLLogger(config_options) elif type == "network": - l = Loggers.network.NetworkLogger(config_options) + new_logger = Loggers.network.NetworkLogger(config_options) elif type == "json": - l = Loggers.file.JsonLogger(config_options) + new_logger = Loggers.file.JsonLogger(config_options) else: sys.stderr.write("Unknown logger type %s\n" % type) continue - if l is None: + if new_logger is None: print("Creating logger %s failed!" % logger) continue if not quiet: print("Adding %s logger %s" % (type, logger)) - m.add_logger(logger, l) - del(l) + m.add_logger(logger, new_logger) + del(new_logger) return m
vdb.ondisk: make ConfiguredTree inherit from wrapper instead of multiplex Similar to how the configured binpkg tree is handled.
@@ -18,16 +18,18 @@ from pkgcore.config import ConfigHint from pkgcore.ebuild import ebuild_built from pkgcore.ebuild.cpv import versioned_CPV from pkgcore.ebuild.errors import InvalidCPV -from pkgcore.repository import errors, multiplex, prototype +from pkgcore.repository import errors, prototype, wrapper demandload( 'pkgcore.log:logger', + "pkgcore.package:base@pkg_base", 'pkgcore.vdb:repo_ops', 'pkgcore.vdb.contents:ContentsFile', ) class tree(prototype.tree): + livefs = True configured = False configurables = ("domain", "settings") @@ -199,17 +201,31 @@ class tree(prototype.tree): return '%s: location %s' % (self.repo_id, self.location) -class ConfiguredTree(multiplex.tree): +class ConfiguredTree(wrapper.tree, tree): - livefs = True + configured = True frozen_settable = False - def __init__(self, raw_vdb, domain, domain_settings): + def __init__(self, vdb, domain, domain_settings): + class package_class(pkg_base.wrapper): + + _operations = self._generate_operations + built = True + __slots__ = () + + def __str__(self): + return "installed pkg: %s::%s, source repo %r" % ( + self.cpvstr, self.repo.repo_id, self.source_repository) + + wrapper.tree.__init__(self, vdb, package_class=package_class) self.domain = domain self.domain_settings = domain_settings - self.raw_vdb = raw_vdb - multiplex.tree.__init__(self, raw_vdb) - frozen = klass.alias_attr("raw_vdb.frozen") + def _generate_operations(self, domain, pkg, **kwargs): + pkg = pkg._raw_pkg + return ebd.built_operations( + domain, pkg, initial_env=self.domain_settings, **kwargs) + + tree.configure = ConfiguredTree
mpir: add m4 to build_requires * mpir: add m4 to build_requires To fix checking for suitable m4... configure: error: No usable m4 in $PATH or /usr/5bin * Don't require m4 for Visual Studio
@@ -44,6 +44,8 @@ class MpirConan(ConanFile): del self.settings.compiler.cppstd def build_requirements(self): + if self.settings.compiler != "Visual Studio": + self.build_requires("m4/1.4.18") self.build_requires("yasm/1.3.0") if tools.os_info.is_windows and self.settings.compiler != "Visual Studio" and \ "CONAN_BASH_PATH" not in os.environ and tools.os_info.detect_windows_subsystem() != "msys2":
compiled_types.get_context: forward all arguments to other get_context TN:
@@ -18,7 +18,7 @@ from langkit.utils import (DictProxy, common_ancestor, issubtype, memoized, not_implemented_error, type_check) -def get_context(): +def get_context(*args, **kwargs): """ Return the current compilation context, see langkit.compile_context.get_context. @@ -29,7 +29,7 @@ def get_context(): :rtype: CompileCtx """ from langkit.compile_context import get_context - return get_context() + return get_context(*args, **kwargs) class GeneratedFunction(object):
Add Confluence markup format parameter It will allow to create Confluence pages that will be interpreted with the wiki markup syntax. More info about Confluence formats here:
@@ -279,7 +279,8 @@ class Confluence(AtlassianRestAPI): params['status'] = status return self.delete(url, params=params) - def create_page(self, space, title, body, parent_id=None, type='page'): + def create_page(self, space, title, body, parent_id=None, type='page', + representation='storage'): """ Create page from scratch :param space: @@ -287,17 +288,20 @@ class Confluence(AtlassianRestAPI): :param body: :param parent_id: :param type: + :param representation: OPTIONAL: either Confluence 'storage' or 'wiki' markup format :return: """ log.info('Creating {type} "{space}" -> "{title}"'.format(space=space, title=title, type=type)) + if representation not in ['wiki', 'storage']: + raise ValueError("Wrong value for representation, it should be either wiki or storage") url = 'rest/api/content/' data = { 'type': type, 'title': title, 'space': {'key': space}, - 'body': {'storage': { + 'body': {representation: { 'value': body, - 'representation': 'storage'}}} + 'representation': representation}}} if parent_id: data['ancestors'] = [{'type': type, 'id': parent_id}] return self.post(url, data=data)
Update README.md update manual options
@@ -261,14 +261,15 @@ optional arguments: -nt, --notag Overrides and disables tagging when using the automated option -nd, --nodelete Overrides and disables deleting of original files - -pr, --preserveRelative + -pr, --preserverelative Preserves relative directories when processing multiple files using the copy-to or move-to functionality - -cmp4, --convertmp4 Overrides convert-mp4 setting in autoProcess.ini - enabling the reprocessing of mp4 files - -fc, --forceconvert Overrides force-convert setting in autoProcess.ini - enabling the reprocessing of mp4 files, also sets convert-mp4 to True + -pse, --processsameextensions + Overrides process-same-extenions setting in + autoProcess.ini enabling the reprocessing of files + -fc, --forceconvert Overrides force-convert setting in autoProcess.ini and + also enables process-same-extenions if true forcing the conversion of files -oo, --optionsonly Display generated conversion options only, do not perform conversion ```
Add check for add_reactions permission Fixes the bug report by webhp where GearBot spits out a 403 error when not having add reactions permission in the guild. (https://canary.discordapp.com/channels/365498559174410241/474303535153020969/655626715024064512)
@@ -30,6 +30,7 @@ class Reminders(BaseCog): if ctx.invoked_subcommand is None: await ctx.invoke(self.bot.get_command("help"), query="remind") + @commands.bot_has_permissions(add_reactions=True) @remind.command("me", aliases=["add", "m", "a"]) async def remind_me(self, ctx, duration: Duration, *, reminder: ReminderText): """remind_me_help"""
ci: fail when the playbooks fails This commit makes the CI job to fail when the Ansible playbook fails.
@@ -366,7 +366,7 @@ def run_e2e_job(distro, driver, masters, workers, str(job_type), str(launch_from)) print(deployment_command) - launch_output = subprocess.run(deployment_command, shell=True) + launch_output = subprocess.run(deployment_command, shell=True, check=True) print("'launch_e2e.py' ==> ./ci/launch_e2e.sh output") print(launch_output) except Exception as e: @@ -394,7 +394,7 @@ def run_e2e_job(distro, driver, masters, workers, print("'launch_e2e.py' ==> Ara command") ara_command = "./ci/launch_e2e_ara.sh %s" % (str(job_name) + "-" + str(file_output)) print(ara_command) - ara_output = subprocess.run(ara_command, shell=True) + ara_output = subprocess.run(ara_command, shell=True, check=True) print("'launch_e2e.py' ==> ./ci/launch_e2e_ara.sh output") print(ara_output) except Exception as e:
Tell Android users how to have enough storage Close
@@ -146,6 +146,8 @@ If you do not see any devices, you can create and start an emulator by running: --name robotfriend --abi x86 \ --package 'system-images;android-28;default;x86' --device pixel + $ echo 'disk.dataPartition.size=4096M' >> $HOME/.android/avd/robotfriend.avd/config.ini + $ {emulator_path} -avd robotfriend & """.format(
[bugfix] Fix another error in Fix failing tests after
@@ -1185,7 +1185,7 @@ class TestLagpattern(DefaultSiteTestCase): for info, time in patterns.items(): lag = api.lagpattern.search(info) self.assertIsNotNone(lag) - self.assertEqual(int(lag.group('lag')), time) + self.assertEqual(float(lag.group('lag')), time) if __name__ == '__main__': # pragma: no cover
Change minWithdrawable from 1 msat to 1000 msats for the lnurl_full_withdraw 1 msat is fractional and thus unacceptable anyway. Also, when withdrawing from lnbits itself, this interferes with the input form to expect fractional 1.001 steps
@@ -129,7 +129,7 @@ async def lnurl_full_withdraw(): _external=True, ), "k1": "0", - "minWithdrawable": 1 if wallet.withdrawable_balance else 0, + "minWithdrawable": 1000 if wallet.withdrawable_balance else 0, "maxWithdrawable": wallet.withdrawable_balance, "defaultDescription": f"{LNBITS_SITE_TITLE} balance withdraw from {wallet.id[0:5]}", "balanceCheck": url_for(
Update manual.py tvid fix
@@ -375,7 +375,7 @@ def main(): tagdata = [3, tvdbid, season, episode] else: tagdata = getinfo(path, silent=silent, tvdbid=tvdbid) - elif ((args['imdbid'] or args['tmdbid']) and not args['tvdbid']): + elif ((args['imdbid'] or args['tmdbid']) and not args['tvid']): if (args['imdbid']): imdbid = args['imdbid'] tagdata = [1, imdbid]
docs: remove fixed Ansible limitation The solution was that Mitogen's loader should emulate the behaviour of ansible.executor.module_common, which restricts dependency scanning to the ansible.module_utils namespace.
@@ -77,10 +77,6 @@ Limitations * Only the ``sudo`` become method is available, however adding new methods is straightforward, and eventually at least ``su`` will be included. -* In some cases the module loader may aggressively upload optional dependencies - available on the Ansible host machine but not on the target machine. It's not - yet clear what the correct behaviour should be. - * Due to the integration approach, the only supported strategy is ``linear``, however this should change in the future.
CompileCtx.main_rule_name: turn into a property TN:
@@ -363,8 +363,6 @@ class CompileCtx(object): self.grammar = grammar ":type: langkit.parsers.Grammar" - self.main_rule_name = grammar.main_rule_name - self.python_api_settings = ( PythonAPISettings(lib_name.lower, self.c_api_settings) if enable_python_api else None @@ -557,6 +555,15 @@ class CompileCtx(object): :type: dict[str, mako.template.Template] """ + @property + def main_rule_name(self): + """ + Shortcut for "self.grammar.main_rule_name". + + :rtype: str + """ + return self.grammar.main_rule_name + def sorted_types(self, type_set): """ Turn "type_set" into a list of types sorted by name.
wildfire download report save as entry fixes
@@ -171,7 +171,7 @@ script: var currentTime = new Date(); var fileName = command + '_at_' + currentTime.getTime(); - return {Type: entryTypes.note, + return {Type: 9, FileID: saveFile(resPDF), File: fileName, Contents: fileName,
MAINT: remove commented code Remove old code used to support COSMIC data.
@@ -565,10 +565,6 @@ def load_netcdf_pandas(fnames, strict_meta=False, file_format='NETCDF4', # Prepare dataframe index for this netcdf file time_var = loaded_vars.pop(epoch_name) - - # Convert from GPS seconds to seconds used in pandas (unix time, - # no leap seconds) - # time_var = convert_gps_to_unix_seconds(time_var) loaded_vars[epoch_name] = pds.to_datetime( (1.0E6 * time_var).astype(np.int64)) running_store.append(loaded_vars)
Change the default intermediate directory to be under /tmp/. If we keep it under /opt, this could cause an issue when we convert to singularity image, which will attempt to create a directory without root permission.
@@ -64,7 +64,7 @@ flags.DEFINE_string('output_vcf', None, flags.DEFINE_string('output_gvcf', None, 'Optional. Path where we should write gVCF file.') flags.DEFINE_string( - 'intermediate_results_dir', '/opt/tmp_output', + 'intermediate_results_dir', '/tmp/deepvariant_tmp_output', 'Optional. If specified, this should be an existing ' 'directory that is visible insider docker, and will be ' 'used to to store intermediate outputs.')
Change datatype of revalidator,handler threads Currently these parameters are number type. They fail string validation at vs_config. Default set to none to retain current behavior. Closes-Bug:
@@ -43,15 +43,15 @@ parameters: OvsHandlerCores: description: > Number of cores to be used for ovs handler threads. - type: number - default: 1 + type: string + default: "" tags: - role_specific OvsRevalidatorCores: description: > Number of cores to be used for ovs revalidator threads. - type: number - default: 1 + type: string + default: "" tags: - role_specific OvsDpdkMemoryChannels:
Exclude the stdlib from flake8 CI check We should hopefully be getting rid of this entire subdirectory soon anyway This will make PRs to flake8-pyi a lot easier.
@@ -35,10 +35,8 @@ per-file-ignores = # https://github.com/PyCQA/flake8/issues/1079 # F811 redefinition of unused '...' stubs/*.pyi: E301, E302, E305, E501, E701, E741, F401, F403, F405, F822, Y026, Y027 - stdlib/@python2/*.pyi: E301, E302, E305, E501, E701, E741, F401, F403, F405, F822, Y026, Y027 - stdlib/@python2/typing.pyi: E301, E302, E305, E501, E701, E741, F401, F403, F405, F811, F822, Y026, Y027 stdlib/typing.pyi: E301, E302, E305, E501, E701, E741, F401, F403, F405, F811, F822, Y026, Y034 # We are checking with Python 3 but many of the stubs are Python 2 stubs. builtins = buffer,file,long,raw_input,unicode,xrange -exclude = .venv*,.git,*_pb2.pyi +exclude = .venv*,.git,*_pb2.pyi,stdlib/@python2/*
explicit keyword args for seaborn plot fn seaborn 0.12 got released. keyword arguments are now required for plotting functions:
@@ -926,8 +926,8 @@ def calibration_plot( order = min(3, len(mean_predicted_values[i]) - 1) sns.regplot( - mean_predicted_values[i], - fraction_positives[i], + x=mean_predicted_values[i], + y=fraction_positives[i], order=order, x_estimator=np.mean, color=colors[i],
Update android_pua.txt Regex covers all subdomains from ```a.appjiagu.com``` up to ```z.appjiagu.com``` only.
@@ -36,3 +36,13 @@ admob.linkpc.net easyphonetrack.com /spy_phone/test_connection.php + +# Reference: https://www.virustotal.com/gui/file/e1288cb54727e673ffbd90ef4fcda2079d9f8a3d7b22b54b4e4726864462987c/detection +# Reference: https://www.virustotal.com/gui/file/47ea88989bc1b1e90ea66d535c8c412994dd6eddaee82a4b69d3cd0922d7b219/detection +# Reference: https://www.virustotal.com/gui/file/4bd5d41f9008f2e83a4b20f1104b726d43396eda52466ac3a066f90e432fa509/detection + +[a-z]{1}\.appjiagu\.com +/ad-service/ad/mark +/jiagu/mark/msg +/jiagu/msgs +/jiagu/t/infos
Handle resources with no name This commit prevents silent failures when encountering a resource with no name, which has been observed with glance images that are in a "queued" state. Closes-Bug:
@@ -167,7 +167,7 @@ def obj_from_name(resource_config, resources, typename): pattern = re.compile(patternstr) matching = [resource for resource in resources - if re.search(pattern, resource.name)] + if re.search(pattern, resource.name or "")] if not matching: raise exceptions.InvalidScenarioArgument( "{typename} with pattern '{pattern}' not found".format(
0 must be composite number 0 must be composite number
# Requirement https://sample-programs.therenegadecoder.com/projects/prime-number/ # Issue #1834 # Accept a number on command line and print if it is Prime or Composite -# Prime Numbers will always be Odd and have only 1 Divisor, itself.. Use that to determine Composite. +# Prime Numbers will have only 1 Divisor, itself.. Use that to determine Composite. use warnings; my ($prime) = @ARGV; -if ($#ARGV < 2){ - print("Usage: please input a non-negative integer"); +$num_args = $#ARGV + 1; + +if ($num_args <1 ) { + print "PHAT Usage: please input a non-negative intege"; exit(0); } -if ( $prime <= 0 ) { + +if ( $prime < 0 ) { print("Usage: please input a non-negative integer"); exit(0); } else { if ( $prime % 2 == 0 ) { print("Composite"); + exit(0); } else { + $i = 0; $num_of_divisors = 0; for ( $i = $prime ; $i > 1 ; $i = $i - 2 ) {
Update amazon-eks.template.yaml Formatting changes to Description text for ManagedNodeGroupAMIType. Reordered AllowedValues for KubernetesVersion to match deployment guide.
@@ -134,7 +134,7 @@ Parameters: Description: Choose if you want to use a managed node group. If you select "yes", you must select Kubernetes Version 1.14 or higher. Type: String ManagedNodeGroupAMIType: - Description: Select one of the two AMI Types for your Managed Node Group (Only applies if you selected Managed Node Group "yes". ). GPU instance types should use the AL2_x86_64_GPU AMI type, which uses the Amazon EKS-optimized Linux AMI with GPU support. Non-GPU instances should use the AL2_x86_64 AMI type, which uses the Amazon EKS-optimized Linux AMI. + Description: Select one of the two AMI types for your managed node group (only applies if you chose "yes" for ManagedNodeGroup). GPU instance types should use the AL2_x86_64_GPU AMI type, which uses the Amazon EKS-optimized Linux AMI with GPU support. Non-GPU instances should use the AL2_x86_64 AMI type, which uses the Amazon EKS-optimized Linux AMI. AllowedValues: [ "AL2_x86_64", " AL2_x86_64_GPU"] Default: "AL2_x86_64" Type: String @@ -162,7 +162,7 @@ Parameters: Default: "" KubernetesVersion: Type: String - AllowedValues: [ "1.15", "1.14", "1.13" ] + AllowedValues: [ "1.13", "1.14", "1.15" ] Default: "1.15" ProvisionClusterAutoScaler: Type: String
Metadata update Fixed bug in assigning units to metadata
@@ -610,7 +610,8 @@ def extract_modelled_observations(inst=None, model=None, inst_name=[], # Update the instrument object and attach units to the metadata for mdat in interp_data.keys(): attr_name = mdat.split("{:s}_".format(model_label))[-1] - inst.meta.data.units[mdat] = model.data_vars[attr_name].units + inst.meta.__setitem__(mdat, {inst.meta.units_label: + model.data_vars[attr_name].units}) if inst.pandas_format: inst[mdat] = pds.Series(interp_data[mdat], index=inst.index)
Try another version of apt/dpkg killing. Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -37,9 +37,14 @@ sudo apt-get purge -y unattended-upgrades cat /etc/apt/sources.list -# For the bestest luck, kill -9 now -sudo pkill -9 apt-get || true - -# Bail out early if we detect apt/dpkg is stuck -ps auxfww | (! grep '[a]pt') -ps auxfww | (! grep '[d]pkg') +# For the bestest luck, kill again now +sudo pkill apt || true +sudo pkill dpkg || true + +# Try to detect if apt/dpkg is stuck +if ps auxfww | grep '[a]pt'; then + echo "WARNING: There are leftover apt processes; subsequent apt update will likely fail" +fi +if ps auxfww | grep '[d]pkg'; then + echo "WARNING: There are leftover dpkg processes; subsequent apt update will likely fail" +fi
Update cpu_asimdfhm.c Updated `vfmlal_low_u32` and `vfmlslq_high_u32` to their `f16` new names. Described here: Many of the intrinsics had names updated. Supposedly previous specifications were not published so old names not required.
@@ -10,8 +10,8 @@ int main(void) float32x4_t vf = vdupq_n_f32(1.0f); float32x2_t vlf = vdup_n_f32(1.0f); - int ret = (int)vget_lane_f32(vfmlal_low_u32(vlf, vlhp, vlhp), 0); - ret += (int)vgetq_lane_f32(vfmlslq_high_u32(vf, vhp, vhp), 0); + int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0); + ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0); return ret; }
Replace fn::join with fn::sub in cfn template Improves readability
"Effect": "Allow", "Resource": [ { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":cloudformation:", - { - "Ref": "AWS::Region" - }, - ":", - { - "Ref": "AWS::AccountId" - }, - ":stack/", - { - "Ref": "AWS::StackName" - }, - "/*" - ] - ] + "Fn::Sub": "arn:${AWS::Partition}:cloudformation:${AWS::Region}:${AWS::AccountId}:stack/${AWS::StackName}/*" } ] }, "Effect": "Allow", "Resource": [ { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":dynamodb:", - { - "Ref": "AWS::Region" - }, - ":", - { - "Ref": "AWS::AccountId" - }, - ":table/", - { - "Ref": "DynamoDBTable" - } - ] - ] + "Fn::Sub": "arn:${AWS::Partition}:dynamodb:${AWS::Region}:${AWS::AccountId}:table/${DynamoDBTable}" } ] }, "Effect": "Allow", "Resource": [ { - "Fn::Join": [ - "", - [ - "arn:", - { - "Ref": "AWS::Partition" - }, - ":s3:::", - { - "Ref": "AWS::Region" - }, - "-aws-parallelcluster/*" - ] - ] + "Fn::Sub": "arn:${AWS::Partition}:s3:::${AWS::Region}-aws-parallelcluster/*" } ] },
Enable tests working on ROCm 2.1 dual gfx906 Summary: Pull Request resolved:
@@ -1077,6 +1077,7 @@ class TestCuda(TestCase): self.assertEqual(t._version, old_version + 1) @unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected") + # Note: fails sometimes on the CI, passes on dual gfx906 @skipIfRocm def test_broadcast_coalesced(self): numel = 5 @@ -1147,7 +1148,6 @@ class TestCuda(TestCase): self.assertEqual(t._version, old_version + 1) @unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected") - @skipIfRocm def test_reduce_add_coalesced(self): numel = 5 num_bytes = numel * 8 @@ -1211,6 +1211,7 @@ class TestCuda(TestCase): def test_scatter_gpu(self): self._test_scatter(torch.randn(4, 4).cuda(), dim=0) + # Note: This test fails on ROCm CI gfx900 but passes on gfx906 @skipIfRocm def test_scatter_gpu_dim(self): self._test_scatter(torch.randn(4, 4).cuda(), dim=1) @@ -1553,6 +1554,7 @@ class TestCuda(TestCase): self.assertTrue("torch.cuda.Event" in e.__repr__()) @unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU") + # Note: fails sometimes on the CI, passes on dual gfx906 @skipIfRocm def test_stream_context(self): s0 = torch.cuda.current_stream() @@ -1636,7 +1638,6 @@ class TestCuda(TestCase): self.assertTrue(s1.query()) @unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU") - @skipIfRocm def test_streams_multi_gpu_eq(self): d0 = torch.device('cuda:0') d1 = torch.device('cuda:1') @@ -2577,7 +2578,6 @@ class TestCuda(TestCase): self.assertEqual(t.cpu().bincount(), t.bincount()) self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w)) - @skipIfRocm def test_histc_cuda(self): _TestTorchMixin._test_histc(self, device='cuda') @@ -2591,6 +2591,7 @@ class TestCuda(TestCase): a = torch.ones(65536).cuda().half() self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536) + # Note: This test fails on ROCm CI gfx900 but passes on gfx906 @skipIfRocm # Test that wrap_with_cuda_memory_check successfully detects leak def test_cuda_memory_leak_detection(self):
BUG: fix to check before apply `shlex.split` `shlex.split` will try to read stdin if `None` is passed, so change to check before apply it. see
@@ -466,10 +466,8 @@ def customize(self, dist = None): noarch = self.distutils_vars.get('noarch', noopt) debug = self.distutils_vars.get('debug', False) - f77 = shlex.split(self.command_vars.compiler_f77, - posix=(os.name == 'posix')) - f90 = shlex.split(self.command_vars.compiler_f90, - posix=(os.name == 'posix')) + f77 = self.command_vars.compiler_f77 + f90 = self.command_vars.compiler_f90 f77flags = [] f90flags = [] @@ -477,8 +475,10 @@ def customize(self, dist = None): fixflags = [] if f77: + f77 = shlex.split(f77, posix=(os.name == 'posix')) f77flags = self.flag_vars.f77 if f90: + f90 = shlex.split(f90, posix=(os.name == 'posix')) f90flags = self.flag_vars.f90 freeflags = self.flag_vars.free # XXX Assuming that free format is default for f90 compiler. @@ -490,8 +490,8 @@ def customize(self, dist = None): # environment variable has been customized by CI or a user # should perhaps eventually be more throughly tested and more # robustly handled - fix = shlex.split(fix, posix=(os.name == 'posix')) if fix: + fix = shlex.split(fix, posix=(os.name == 'posix')) fixflags = self.flag_vars.fix + f90flags oflags, aflags, dflags = [], [], []
Update with notes from devs Updating to include notes from Joram + Christopher.
-============================= -WIP: Developer Tool Kit -============================= +WIP: Developer Toolkit +====================== -This document is a work-in-progress for summarizing developer toolkit capabilities currently under development. +High Level Overview +------------------- -High-level: +This document is a work-in-progress document summarizing developer toolkit capabilities currently under development. -- A plug-in architecture is being developed for server, web client and mobile client extensions -- Video and audio calling currently in Mattermost will be re-designed to use the plug-in architecture, as one of many video and audio calling solutions. +The Mattermost Developer Toolkit is meant to give community and in-house customer developers the power to deeply integrate with and extend Mattermost server and clients to better meet their requirements. We are still in the early stages of architecting the toolkit but below is a non-definitive list of what we'd like to offer. -Early feedback: +Features of the Developer Toolkit +--------------------------------- -- We want to support people embedding apps into Mattermost and embedding Nattermost into their apps. -- Backend plugins -- Frontend plugins -- dDfault integrations (jira, github, etc) -- Override profile popup plugin -- Override video plugin -- Slash cmds -- Webhooks -- v4 APIs +1. The ability to build Go 1.8 server plugins to hook directly into server events (think new post events, user update events, etc.), have some sort of database access (possibly access to certain tables, ability to create new tables) and to add custom endpoints to extend the Mattermost REST API +2. The ability to build webapp client plugins to override existing UI components (replace posts with your custom component, use your own video service etc.), modify/extend some client driver to interact with custom server API endpoints, and to add whole new UI views in pre-determined places +3. The ability to build plugins similar to the webapp but for React Native apps for iOS and Android +4. A system or architecture to combine the above plugins into one easy-to-share and easy-to-install package +5. A market or directory to find official and/or certified by Mattermost plugins and a process to get your plugin certified +6. A guide or system to allow the embedding of Mattermost into other apps as a chat service +7. Mattermost HTTP REST APIv4 allowing for much more powerful server interaction +8. Webhooks and slash commands to allow easy, low-effort extension and integration +9. All the documentation required to support the use and building of all of the above + +Example Uses +------------ + +Some examples of the things we will use the plugin architecture for are: +1. Building common integrations such as Jira, GitHub, etc and including them by default in Mattermost +2. Redesigning the current video and audio calling to use the plugin architecture, and offering it as one of many video and audio calling solutions
fix changelog typo Summary: Is the new bullet for 9.4 or 9.3? Test Plan: n/a Reviewers: cdecarolis
@@ -13,6 +13,8 @@ opt_in: * Intermediate Storage and System Storage now default to the first provided storage definition when no configuration is provided. Previously, it would be necessary to provide a run config for storage whenever providing custom storage definitions, even if that storage required no run configuration. Now, if the first provided storage definition requires no run configuration, the system will default to using it. +## 0.9.3 + **Breaking Changes** * Removed deprecated `--env` flag from CLI