message
stringlengths
13
484
diff
stringlengths
38
4.63k
{TestSDK} Delete local context file when local context scenario test is finished * {TestSDK} add delete local context file when test is finished * Revert "{TestSDK} add delete local context file when test is finished" This reverts commit * {TestSDK} add delete local context file when test is finished
@@ -198,6 +198,7 @@ class LocalContextScenarioTest(ScenarioTest): def tearDown(self): super(LocalContextScenarioTest, self).tearDown() self.cmd('local-context off') + self.cmd('local-context delete --all --purge -y') os.chdir(self.original_working_dir) if os.path.exists(self.working_dir): import shutil
First cut of dialogs using WinForms backend. info, question, confirm and error are implemented
+from .libs import WinForms + + +def info(window, title, message): + WinForms.MessageBox.Show(message, title) + + +def question(window, title, message): + result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo) + return result == WinForms.DialogResult.Yes + + +def confirm(window, title, message): + result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel) + return result == WinForms.DialogResult.OK + + +def error(window, title, message): + WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK, WinForms.MessageBoxIcon.Error) + + +def stack_trace(window, title, message, content, retry=False): + raise NotImplementedError() + + +def save_file(window, title, suggested_filename, file_types): + raise NotImplementedError()
fix(pdbparsing): parse "+X" and "X+" charges Fixes
@@ -367,7 +367,11 @@ class PDBFile(TextFile): element[i] = line[_element].strip() altloc_id[i] = line[_alt_loc] atom_id_raw[i] = line[_atom_id] - charge_raw[i] = line[_charge][::-1] # turn "1-" into "-1" + # turn "1-" into "-1", if necessary + if line[_charge][0] in "+-": + charge_raw[i] = line[_charge] + else: + charge_raw[i] = line[_charge][::-1] occupancy[i] = float(line[_occupancy].strip()) b_factor[i] = float(line[_temp_f].strip())
Update circuits.rst Addresses
@@ -155,12 +155,12 @@ for accessing the results of your program execution: .. code-block:: python >>> print(result.state) - <FockState: num_modes=3, cutoff=15, pure=False, hbar=2.0> + <FockState: num_modes=3, cutoff=5, pure=True, hbar=2.0> >>> state = result.state >>> state.trace() # trace of the quantum state - 0.999998 + 0.9999999999999999 >>> state.dm().shape # density matrix - [5, 5, 5] + (5, 5, 5, 5, 5, 5) * :attr:`.Result.samples`: Measurement samples from any measurements performed.
Update twisted version This fixes
-Twisted==16.0.0 -Yapsy==1.11.223 +Twisted==16.6.0 appdirs==1.4.0 argparse==1.2.1 colorama==0.3.7 @@ -24,7 +23,7 @@ six>=1.9.0 slowaes==0.1a1 txJSON-RPC==0.5 wsgiref==0.1.2 -zope.interface==4.1.3 +zope.interface==4.3.3 base58==0.2.2 googlefinance==0.7 pyyaml==3.12
Fix AntiSpam incorrectly invoking tempmute. The AntiSpam punish method incorrectly invoked the tempmute command, as it provided an unconverted duration argument. Since direct invocation of commands bypasses converters, the conversion of the duration string to a datetime object is now done manually. Closes
@@ -14,6 +14,7 @@ from bot.constants import ( Guild as GuildConfig, Icons, STAFF_ROLES, ) +from bot.converters import ExpirationDate log = logging.getLogger(__name__) @@ -37,6 +38,7 @@ class AntiSpam: self.bot = bot role_id = AntiSpamConfig.punishment['role_id'] self.muted_role = Object(role_id) + self.expiration_date_converter = ExpirationDate() @property def mod_log(self) -> ModLog: @@ -130,8 +132,9 @@ class AntiSpam: ping_everyone=AntiSpamConfig.ping_everyone ) - # Run a tempmute - await mod_log_ctx.invoke(Moderation.tempmute, member, f"{remove_role_after}S", reason=reason) + # Since we're going to invoke the tempmute command directly, we need to manually call the converter. + dt_remove_role_after = await self.expiration_date_converter.convert(mod_log_ctx, f"{remove_role_after}S") + await mod_log_ctx.invoke(Moderation.tempmute, member, dt_remove_role_after, reason=reason) async def maybe_delete_messages(self, channel: TextChannel, messages: List[Message]): # Is deletion of offending messages actually enabled?
Implement GCSBucket.objects() Test stats after this CL ======================== Ran 58 tests in 4491.154s FAILED (SKIP=1, errors=12, failures=13)
@@ -2038,6 +2038,10 @@ class GCSBucket(BaseBucket): """ return self._bucket['name'] + @property + def objects(self): + return self._object_container + def delete(self, delete_contents=False): """ Delete this bucket.
Update README.md removed reference to Lamport-Diffie OTS as that has been dropped in favour of pure XMSS/WOTS+
> Python-based blockchain ledger utilising hash-based one-time merkle tree signature scheme (XMSS) instead of ECDSA. Proof-of-stake block selection via a signed iterative hash chain reveal scheme which is both probabilistic and random (https://github.com/theQRL/pos). > -> Hash-based signatures means larger transactions (6kb per tx, binary), longer keypair generation times and the need to record 'state' of transactions as each keypair can only be used once safely. Merkle tree usage enables a single address to be used for signing numerous transactions (up to 2^13 computationally easily enough). Transactions have an incremented nonce to allow wallets to know which MSS keypair to use - currently Lamport-Diffie and Winternitz one-time signatures as part of merkle signature schemes and XMSS/W-OTS+ are natively supported. +> Hash-based signatures means larger transactions (6kb per tx, binary), longer keypair generation times and the need to record 'state' of transactions as each keypair can only be used once safely. Merkle tree usage enables a single address to be used for signing numerous transactions (up to 2^13 computationally easily enough). Transactions have an incremented nonce to allow wallets to know which MSS keypair to use - currently XMSS/W-OTS+ are natively supported. More information: - [theqrl.org](https://theqrl.org)
llvm, CompExecution: Don't regenerate data structure types Reuse the ones from input_CIM wrapper
@@ -177,18 +177,22 @@ class CompExecution: self.__frozen_vals = None self.__conds = None - #TODO: This should use compiled function + # At least the input_CIM wrapper should be generated with LLVMBuilderContext() as ctx: - # Data - c_data = _convert_llvm_ir_to_ctype(self._composition._get_data_struct_type(ctx)) - self.__data_struct = c_data(*self._composition._get_data_initializer()) + input_cim_fn_name = composition._get_node_wrapper(composition.input_CIM) + input_cim_fn = ctx.get_llvm_function(input_cim_fn_name) + + # Context + c_context = _convert_llvm_ir_to_ctype(input_cim_fn.args[0].type.pointee) + self.__context_struct = c_context(*composition.get_context_initializer()) # Params - c_param = _convert_llvm_ir_to_ctype(ctx.get_param_struct_type(self._composition)) + c_param = _convert_llvm_ir_to_ctype(input_cim_fn.args[1].type.pointee) self.__param_struct = c_param(*self._composition.get_param_initializer()) - # Context - c_context = _convert_llvm_ir_to_ctype(ctx.get_context_struct_type(self._composition)) - self.__context_struct = c_context(*self._composition.get_context_initializer()) + # Data + c_data = _convert_llvm_ir_to_ctype(input_cim_fn.args[3].type.pointee) + self.__data_struct = c_data(*self._composition._get_data_initializer()) + @property def __conditions(self):
Remove tulip snapshots from the suggested list Problem: seems to be dead:( Solution: Remove its mention from the baking doc.
@@ -86,7 +86,6 @@ In order to run a baker locally, you'll need a fully-synced local `tezos-node`. The fastest way to bootstrap the node is to import a snapshot. Snapshots can be downloaded from the following websites: -* [Tulip Snapshots](https://snapshots.tulip.tools/#/) * [Tezos Giganode Snapshots](https://snapshots-tezos.giganode.io/) * [XTZ-Shots](https://xtz-shots.io/)
Fix p2p disconnect warning encountered during local p2p testing
@@ -59,7 +59,7 @@ class Disconnect(Command): try: raw_decoded = cast(Dict[str, int], super().decode(data)) except rlp.exceptions.ListDeserializationError: - self.logger.warning("Malformed Disconnect message: %s", data) + self.logger.warning("Malformed Disconnect message: %s" % data) raise MalformedMessage("Malformed Disconnect message: {}".format(data)) return assoc( raw_decoded, "reason_name", self.get_reason_name(raw_decoded["reason"])
Allow empty list as AccountCreateExtension(s). All other graphene objects accept `[ ]` as extensions placeholder, so AccountCreateExtension should comply.
@@ -319,7 +319,7 @@ class AccountCreateExtensions(Extension): if isArgsThisClass(self, args): self.data = args[0].data else: - if len(args) == 1 and len(kwargs) == 0: + if len(args) == 1 and len(kwargs) == 0 and not(isinstance(args[0], list)): kwargs = args[0] # assert "1.3.0" in kwargs["markets"], "CORE asset must be in 'markets' to pay fees" super().__init__(OrderedDict([
Fixed Typos While going through the tutorial, I found some minor typos, so I fixed them. Thanks
@@ -97,9 +97,9 @@ label_pipeline = lambda x: int(x) - 1 # # `torch.utils.data.DataLoader <https://pytorch.org/docs/stable/data.html?highlight=dataloader#torch.utils.data.DataLoader>`__ # is recommended for PyTorch users (a tutorial is `here <https://pytorch.org/tutorials/beginner/data_loading_tutorial.html>`__). -# It works with a map-style dataset that implements the ``getitem()`` and ``len()`` protocols, and represents a map from indices/keys to data samples. It also works with an iterable datasets with the shuffle argumnent of ``False``. +# It works with a map-style dataset that implements the ``getitem()`` and ``len()`` protocols, and represents a map from indices/keys to data samples. It also works with an iterable dataset with the shuffle argument of ``False``. # -# Before sending to the model, ``collate_fn`` function works on a batch of samples generated from ``DataLoader``. The input to ``collate_fn`` is a batch of data with the batch size in ``DataLoader``, and ``collate_fn`` processes them according to the data processing pipelines declared previouly. Pay attention here and make sure that ``collate_fn`` is declared as a top level def. This ensures that the function is available in each worker. +# Before sending to the model, ``collate_fn`` function works on a batch of samples generated from ``DataLoader``. The input to ``collate_fn`` is a batch of data with the batch size in ``DataLoader``, and ``collate_fn`` processes them according to the data processing pipelines declared previously. Pay attention here and make sure that ``collate_fn`` is declared as a top level def. This ensures that the function is available in each worker. # # In this example, the text entries in the original data batch input are packed into a list and concatenated as a single tensor for the input of ``nn.EmbeddingBag``. The offset is a tensor of delimiters to represent the beginning index of the individual sequence in the text tensor. Label is a tensor saving the labels of indidividual text entries.
settings: Use variable for notification sound element. This commit changes the click handler for playing notification sound to use a variable instead of directly using the element id such that we can use the same code for realm-level settings also by just setting the variable accordingly.
@@ -75,6 +75,7 @@ export function set_enable_marketing_emails_visibility() { export function set_up(container, settings_object) { const patch_url = "/json/settings"; + const notification_sound_elem = $("#user-notification-sound-audio"); container.find(".notification-settings-form").on("change", "input, select", function (e) { e.preventDefault(); e.stopPropagation(); @@ -102,7 +103,7 @@ export function set_up(container, settings_object) { container.find(".play_notification_sound").on("click", () => { if (settings_object.notification_sound !== "none") { - $("#user-notification-sound-audio")[0].play(); + notification_sound_elem[0].play(); } });
streams: Make stream settings inputs responsive to narrow screens. This makes the inputs and buttons responsive to narrow screens by gracefully resizing and falling in place. Fixes:
@@ -296,6 +296,10 @@ form#add_new_subscription { -webkit-overflow-scrolling: touch; } +.subscriber_list_container .form_inline input { + vertical-align: top; +} + .subscriber-list { width: 100%; margin: auto; @@ -798,6 +802,10 @@ form#add_new_subscription { margin-top: -5px; } +#subscription_overlay .stream-header .button-group { + float: right; +} + #subscription_overlay .stream-header .subscribe-button.unsubscribed { color: hsl(156, 39%, 54%); border-color: hsl(156, 39%, 77%); @@ -975,6 +983,28 @@ form#add_new_subscription { .search-container { text-align: center; } + + .subscriber_list_settings input { + float: left; + margin: 0px 5px 0px 0px; + width: calc(50% - 45px); + } + + #subscription_overlay .stream-header .button-group { + float: none; + display: block; + clear: both; + margin-top: 10px; + } + + #subscription_overlay .stream-header .button-group > * { + float: none !important; + vertical-align: top; + } + + #subscription_overlay .stream-header .subscribe-button { + margin-top: -7px; + } } @media (max-width: 700px) { @@ -1041,6 +1071,15 @@ form#add_new_subscription { } } +@media (max-width: 367px) { + #subscription_overlay .subscription_settings .button-group { + margin-top: 10px; + float: none; + display: inline-block; + vertical-align: top; + } +} + #stream-creation #invites-warning-modal .modal-footer { position: static; }
Don't try to upgrade DB on CI This was added by mistake - the Concourse pipeline never did this previously, and errors if we try (the necessary environment vars aren't present, even if we wanted to).
@@ -25,7 +25,7 @@ NOTIFY_CREDENTIALS ?= ~/.notify-credentials bootstrap: generate-version-file ## Set up everything to run the app pip3 install -r requirements_for_test.txt createdb notification_api || true - (. environment.sh && flask db upgrade) || flask db upgrade + (. environment.sh && flask db upgrade) || true .PHONY: run-flask run-flask: ## Run flask
DOC: Replace reference to np.swapaxis with np.swapaxes DOC: Replace reference to np.swapaxis with np.swapaxes The former function does not exist.
@@ -1639,7 +1639,7 @@ def moveaxis(a, source, destination): >>> np.transpose(x).shape (5, 4, 3) - >>> np.swapaxis(x, 0, -1).shape + >>> np.swapaxes(x, 0, -1).shape (5, 4, 3) >>> np.moveaxis(x, [0, 1], [-1, -2]).shape (5, 4, 3)
Update example.py Deleted unnecessary blank line 50.
@@ -47,7 +47,6 @@ def parse(input_string): root = None current = None stack = list(input_string) - if input_string == '()': raise ValueError('tree with no nodes')
fix: move to OVNKubernetes This commit moves the default CNI cluster network provider to OVNKubernetes as it will be the eventual default.
@@ -14,7 +14,7 @@ networking: clusterNetwork: - cidr: {{ kubeinit_okd_pod_cidr }} hostPrefix: 23 - networkType: OpenShiftSDN + networkType: OVNKubernetes serviceNetwork: - {{ kubeinit_okd_service_cidr }} platform:
Remove MFaaS, it's broken MFaaS returns http code 404 when accessed
@@ -1765,7 +1765,6 @@ API | Description | Auth | HTTPS | CORS | | [JSON2Video](https://json2video.com) | Create and edit videos programmatically: watermarks,resizing,slideshows,voice-over,text animations | `apiKey` | Yes | No | | [Lucifer Quotes](https://github.com/shadowoff09/lucifer-quotes) | Returns Lucifer quotes | No | Yes | Unknown | | [MCU Countdown](https://github.com/DiljotSG/MCU-Countdown) | A Countdown to the next MCU Film | No | Yes | Yes | -| [MFaaS](https://stitti.github.io/MFaaS) | Modern Family as a Service: API for receiving quotes and actors | No | Yes | No | | [Motivational Quotes](https://nodejs-quoteapp.herokuapp.com/) | Random Motivational Quotes | No | Yes | Unknown | | [Movie Quote](https://github.com/F4R4N/movie-quote/) | Random Movie and Series Quotes | No | Yes | Yes | | [Open Movie Database](http://www.omdbapi.com/) | Movie information | `apiKey` | Yes | Unknown |
filter: Add `maybe_add_search_terms` function to add search_term. This function adds search_term to the operators list. This logic is extracted as a prep commit for the changes related to
@@ -299,6 +299,15 @@ export class Filter { let operand; let term; + function maybe_add_search_terms() { + if (search_term.length > 0) { + operator = "search"; + const _operand = search_term.join(" "); + term = {operator, operand: _operand, negated: false}; + operators.push(term); + } + } + // Match all operands that either have no spaces, or are surrounded by // quotes, preceded by an optional operator that may have a space after it. const matches = str.match(/([^\s:]+: ?)?("[^"]+"?|\S+)/g); @@ -337,12 +346,7 @@ export class Filter { } // NB: Callers of 'parse' can assume that the 'search' operator is last. - if (search_term.length > 0) { - operator = "search"; - operand = search_term.join(" "); - term = {operator, operand, negated: false}; - operators.push(term); - } + maybe_add_search_terms(); return operators; }
Fix false postive CVE-2017-10271 This commit is to increase accuracy by changing the CVE-2017-10271 scan string from "<faultstring>.*" to "<faultstring>java.lang.ProcessBuilder || <faultstring>0".
@@ -2,7 +2,7 @@ id: CVE-2017-10271 info: name: Oracle WebLogic Server - Remote Command Execution - author: dr_set,ImNightmaree + author: dr_set,ImNightmaree,true13 severity: high description: | The Oracle WebLogic Server component of Oracle Fusion Middleware (subcomponent - WLS Security) is susceptible to remote command execution. Supported versions that are affected are 10.3.6.0.0, 12.1.3.0.0, 12.2.1.1.0 and 12.2.1.2.0. This easily exploitable vulnerability allows unauthenticated attackers with network access via T3 to compromise Oracle WebLogic Server. @@ -85,7 +85,7 @@ requests: matchers: - type: dsl dsl: - - regex("<faultstring>.*</faultstring>", body) + - regex("<faultstring>java.lang.ProcessBuilder || <faultstring>0", body) - status_code == 500 condition: and
Solving conflicts Also removed the function `get_data_mobility`, which is obsolete.
@@ -231,16 +231,8 @@ class InputLocator(object): return os.path.join(self.db_path, 'Systems', 'thermal_networks.xls') def get_data_benchmark(self): - """db/Benchmarks/benchmark_targets.xls""" - return os.path.join(self.db_path, 'Benchmarks', 'benchmark_targets.xls') - - def get_data_benchmark_today(self): - """db/Benchmarks/benchmark_today.xls""" - return os.path.join(self.db_path, 'Benchmarks', 'benchmark_today.xls') - - def get_data_mobility(self): - """db/Benchmarks/mobility.xls""" - return os.path.join(self.db_path, 'Benchmarks', 'mobility.xls') + """databases/CH/Benchmarks/benchmark_targets.xls""" + return os.path.join(self.db_path, 'Benchmarks', 'benchmark_2000W.xls') def get_uncertainty_db(self): """db/Uncertainty/uncertainty_distributions.xls"""
CI: make sure we build libadalang_for_customers The libadalang testsuite uses this component, so make sure it runs on an up-to-date version.
@@ -125,6 +125,7 @@ libadalang_build_and_test: - touch fingerprints/x86_64-linux.langkit_support.build.json.assume-unchanged - touch fingerprints/x86_64-linux.langkit.build.json.assume-unchanged - anod build --minimal --disable-cathod libadalang + - anod build --minimal --disable-cathod libadalang_for_customers - anod test --minimal --disable-cathod libadalang # This call to e3-testsuite-report raise an error if there is one failure
bugfix of RhinoNurbsSurface closest_point method change rhino_curve to rhino_surface return parameters as tuple instead (for consistency with compas_occ)
@@ -456,21 +456,21 @@ class RhinoNurbsSurface(NurbsSurface): point : :class:`compas.geometry.Point` The test point. return_parameters : bool, optional - Return the UV parameters of the closest point in addition to the point location. + Return the UV parameters of the closest point as tuple in addition to the point location. Returns ------- :class:`compas.geometry.Point` If ``return_parameters`` is False. - :class:`compas.geometry.Point`, float, float + :class:`compas.geometry.Point`, (float, float) If ``return_parameters`` is True. """ - result, u, v = self.rhino_curve.ClosestPoint(point_to_rhino(point)) + result, u, v = self.rhino_surface.ClosestPoint(point_to_rhino(point)) if not result: return point = self.point_at(u, v) if return_parameters: - return point, u, v + return point, (u, v) return point def aabb(self, precision=0.0, optimal=False):
BUG FIX reopt.jl dvFuelUsage with sub hourly time step since the units are: FuelBurnSlope <=> gallons/kWh FuelBurnYInt <=> gallons/hour dvRatedProduction <=> kW ProductionFactor <=> kW/kW then the TimeStepScaling <=> hour is needed in the definition of dvFuelUsage <=> gallons
@@ -246,10 +246,12 @@ function add_fuel_constraints(m, p) # Constraint (1b): Fuel burn for non-CHP Constraints @constraint(m, FuelBurnCon[t in p.FuelBurningTechs, ts in p.TimeStep], - m[:dvFuelUsage][t,ts] == (p.FuelBurnSlope[t] * p.ProductionFactor[t,ts] * m[:dvRatedProduction][t,ts]) + - (p.FuelBurnYInt[t] * m[:binTechIsOnInTS][t,ts]) + m[:dvFuelUsage][t,ts] == p.TimeStepScaling * ( + p.FuelBurnSlope[t] * p.ProductionFactor[t,ts] * m[:dvRatedProduction][t,ts] + + p.FuelBurnYInt[t] * m[:binTechIsOnInTS][t,ts] ) - m[:TotalGenFuelCharges] = @expression(m, p.pwf_e * p.TimeStepScaling * sum( p.FuelCost[f] * + ) + m[:TotalGenFuelCharges] = @expression(m, p.pwf_e * sum( p.FuelCost[f] * sum(m[:dvFuelUsage][t,ts] for t in p.TechsByFuelType[f], ts in p.TimeStep) for f in p.FuelType) )
CollectionSingleton: switch to the standard result var mechanism TN:
@@ -563,23 +563,20 @@ class CollectionSingleton(AbstractExpression): self.expr.type.array_type().add_to_context() self.static_type = self.expr.type.array_type() - self.array_var = PropertyDef.get().vars.create('Singleton', - self.type) - super(CollectionSingleton.Expr, self).__init__( + result_var_name='Singleton', abstract_expr=abstract_expr ) def _render_pre(self): return self.expr.render_pre() + """ - {array_var} := Create (Items_Count => 1); - {array_var}.Items (1) := {item}; - """.format(array_var=self.array_var.name, - array_type=self.static_type.pointed(), + {result_var} := Create (Items_Count => 1); + {result_var}.Items (1) := {item}; + """.format(result_var=self.result_var.name, item=self.expr.render_expr()) def _render_expr(self): - return self.array_var.name + return self.result_var.name @property def subexprs(self):
Bugfix: Recursive BOM display Actually request recursively! (duh) Fix the idField and parentIdField for the BOM display (was incredibly wrong) Sub-rows are initially displayed in the "collapsed" state
@@ -280,7 +280,9 @@ function loadBomTable(table, options) { params.sub_part_detail = true; } - function requestSubItems(part_pk) { + // Function to request BOM data for sub-items + // This function may be called recursively for multi-level BOMs + function requestSubItems(bom_pk, part_pk) { inventreeGet( options.bom_url, @@ -291,10 +293,17 @@ function loadBomTable(table, options) { { success: function(response) { for (var idx = 0; idx < response.length; idx++) { - response[idx].parentId = part_pk; + + response[idx].parentId = bom_pk; + + if (response[idx].sub_part_detail.assembly) { + requestSubItems(response[idx].pk, response[idx].sub_part) + } } table.bootstrapTable('append', response); + + table.treegrid('collapseAll'); }, error: function() { console.log('Error requesting BOM for part=' + part_pk); @@ -306,9 +315,9 @@ function loadBomTable(table, options) { table.bootstrapTable({ treeEnable: true, rootParentId: options.parent_id, - idField: 'sub_part', + idField: 'pk', uniqueId: 'pk', - parentIdField: 'part', + parentIdField: 'parentId', treeShowField: 'sub_part', sortable: true, search: true, @@ -351,7 +360,7 @@ function loadBomTable(table, options) { table.bootstrapTable('updateRow', idx, row, true); if (row.sub_part_detail.assembly) { - requestSubItems(row.sub_part); + requestSubItems(row.pk, row.sub_part); } } },
[Core] Raise an exception on Ray version mismatch Add unit tests that verify Ray version mismatches
@@ -456,19 +456,18 @@ def test_put_error2(ray_start_object_store_memory): # get_error_message(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1) [email protected]("Publish happeds before we subscribe it") -def test_version_mismatch(error_pubsub, shutdown_only): +def test_version_mismatch(ray_start_cluster): ray_version = ray.__version__ - ray.__version__ = "fake ray version" - - ray.init(num_cpus=1) - p = error_pubsub + try: + cluster = ray_start_cluster + cluster.add_node(num_cpus=1) - errors = get_error_message(p, 1, ray_constants.VERSION_MISMATCH_PUSH_ERROR) - assert False, errors - assert len(errors) == 1 - assert errors[0].type == ray_constants.VERSION_MISMATCH_PUSH_ERROR + # Test the driver. + ray.__version__ = "fake ray version" + with pytest.raises(RuntimeError): + ray.init(address="auto") + finally: # Reset the version. ray.__version__ = ray_version
Change test to check individual check statuses before checking the overall in order to print a more helpful error message
@@ -96,7 +96,10 @@ def test_checks_api(dcos_api_session): results = r.json() assert isinstance(results, dict) - logging.info("returned checks: {}".format(checks)) - logging.info("run checks : {}".format(results['checks'].keys())) - # Make sure we ran all the listed checks. - assert set(checks.keys()) == set(results['checks'].keys()) + # check that the returned statuses of each check is 0 + expected_status = {c: 0 for c in checks.keys()} + response_status = {c: v['status'] for c, v in results['checks'].items()} + assert expected_status == response_status + + # check that overall status is also 0 + assert results['status'] == 0
Fix GCE provider: #create returns bootstrap result fixes
@@ -2580,7 +2580,10 @@ def create(vm_=None, call=None): ssh_user, ssh_key = __get_ssh_credentials(vm_) vm_['ssh_host'] = __get_host(node_data, vm_) vm_['key_filename'] = ssh_key - __utils__['cloud.bootstrap'](vm_, __opts__) + + ret = __utils__['cloud.bootstrap'](vm_, __opts__) + + ret.update(node_dict) log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.trace( @@ -2598,7 +2601,7 @@ def create(vm_=None, call=None): transport=__opts__['transport'] ) - return node_dict + return ret def update_pricing(kwargs=None, call=None):
lightbox: Fix incorrectly displayed avatar image. The lightbox "v" shortcut should not show a user's avatar, so this limits the scope of images it can choose to ones inside of the `.message_content` div.
@@ -140,14 +140,14 @@ exports.open = function (image, options) { exports.show_from_selected_message = function () { var $message = $(".selected_message"); - var $image = $message.find("img"); + var $image = $message.find(".message_content img"); while ($image.length === 0) { $message = $message.prev(); if ($message.length === 0) { break; } - $image = $message.find("img"); + $image = $message.find(".message_content img"); } if ($image.length !== 0) {
hotkey.js: Navigate using page up / page down Using page up / page down, go to the top / bottom of compose textarea Fixes
@@ -536,11 +536,12 @@ exports.process_hotkey = function (e, hotkey) { compose_actions.cancel(); // don't return, as we still want it to be picked up by the code below } else if (event_name === "page_up") { - $("#compose-textarea").caret(0); + $("#compose-textarea").caret(0).animate({ scrollTop: 0 }, "fast"); return true; } else if (event_name === "page_down") { // so that it always goes to the end of the compose box. - $("#compose-textarea").caret(Infinity); + var height = $("#compose-textarea")[0].scrollHeight; + $("#compose-textarea").caret(Infinity).animate({ scrollTop: height }, "fast"); return true; } else { // Let the browser handle the key normally.
Check for jpg bytes before make_response If jpg_bytes wasn't retrieved from either desk or a tracked object, respond with 404 Prevents uncaught error for unknown event ids sent to event_snapshot endpoint
@@ -214,6 +214,9 @@ def event_snapshot(id): except: return "Event not found", 404 + if jpg_bytes is None: + return "Event not found", 404 + response = make_response(jpg_bytes) response.headers["Content-Type"] = "image/jpg" return response
Fixes a bug syncing roles for members who leave. The event that was supposed to handle this was called on_member_leave instead of on_member_remove, so the even was never called when it should have been. This commit renames the method.
@@ -118,7 +118,7 @@ class Sync: # If we got `404`, the user is new. Create them. await self.bot.api_client.post('bot/users', json=packed) - async def on_member_leave(self, member: Member) -> None: + async def on_member_remove(self, member: Member) -> None: """Updates the user information when a member leaves the guild.""" await self.bot.api_client.put( f'bot/users/{member.id}',
Update OS::Glance::CommonImageProperties in metadefs Add missing properties, as of Train, defined in etc/schema-image.json for OS::Glance::CommonImageProperties defined in etc/metadefs/glance-common-image-props.json Closes-bug:
"title": "OS Version", "description": "Operating system version as specified by the distributor. (for example, '11.10')", "type": "string" + }, + "description": { + "title": "Image description", + "description": "A human-readable string describing this image.", + "type": "string" + }, + "cinder_encryption_key_id": { + "title": "Cinder Encryption Key ID", + "description": "Identifier in the OpenStack Key Management Service for the encryption key for the Block Storage Service to use when mounting a volume created from this image", + "type": "string" + }, + "cinder_encryption_key_deletion_policy": { + "title": "Cinder Encryption Key Deletion Policy", + "description": "States the condition under which the Image Service will delete the object associated with the 'cinder_encryption_key_id' image property. If this property is missing, the Image Service will take no action", + "type": "string", + "enum": [ + "on_image_deletion", + "do_not_delete" + ] } } }
undo_votes: Correcting possible crash when submitting an invalid form. Not replying with json on an endpoint that is supposed to do redirects.
@@ -2218,14 +2218,15 @@ def admin_undo_votes(uid): if not current_user.admin: abort(403) - form = DummyForm() - if not form.validate(): - return redirect(url_for('view_user', user=username)) - try: user = User.get(User.uid == uid) except User.DoesNotExist: - return jsonify(status='error', error='User does not exist') + return abort(404) + + form = DummyForm() + if not form.validate(): + return redirect(url_for('view_user', user=user.name)) + post_v = SubPostVote.select().where(SubPostVote.uid == user.uid) comm_v = SubPostCommentVote.select().where(SubPostCommentVote.uid == user.uid)
Scons: Use "ccache.exe" via environment variable too. * That gives users more control and is more like it's done for "clcache.exe" where we cannot guess where it's installed.
@@ -1736,13 +1736,8 @@ if show_scons_mode: # Inject ccache if it happens to be installed. if win_target and gcc_mode: - for location in [ - r"c:\msys64", - r"c:\msys32", - r"\msys64", - r"\msys32", - ]: - candidate = os.path.join(location, r"usr\bin\ccache.exe") + if "NUITKA_CCACHE_BINARY" in os.environ: + candidate = os.environ["NUITKA_CCACHE_BINARY"] if os.path.exists(candidate): env["CC"] = "%s %s" % ( @@ -1752,13 +1747,13 @@ if win_target and gcc_mode: # Do not consider scons cache anymore. if show_scons_mode: - print("scons: Found MSYS2 %s to cache object files." % candidate) + print("scons: Found ccache '%s' to cache object files." % candidate) + cache_mode = False - break else: if show_scons_mode: - print("scons: Unable to find MSYS2 ccache.exe, consider user manual.") + print("scons: Unable to find ccache.exe, consider user manual.") # Inject clcache if it happens to be provided via environment. if win_target and msvc_mode: @@ -1776,6 +1771,8 @@ if win_target and msvc_mode: env._dict["ENV"]["CLCACHE_CL"] ) ) + + cache_mode = False else: if show_scons_mode: print("scons: Unable to find clcache.exe, consider user manual.")
Metadata Editor UI enhancements Adding small UI enhancements to metadata editor: Better search icon Add tooltip to 'search by tags' button Fix dark background in language search input field
@@ -190,12 +190,16 @@ export class FilterTools extends React.Component< type="text" placeholder="Search..." onChange={this.handleSearch} - rightIcon="search" + rightIcon="ui-components:search" value={this.state.searchValue} /> </div> <div className={FILTER_CLASS} id={this.props.schemaId}> - <button className={FILTER_BUTTON} onClick={this.createFilterBox}> + <button + title="Filter by tag" + className={FILTER_BUTTON} + onClick={this.createFilterBox} + > <tagIcon.react /> </button> {this.renderFilterOption()}
Fix ManagedObject.get_profile() HG-- branch : feature/moversion
@@ -937,7 +937,7 @@ class ManagedObject(Model): """ profile = getattr(self, "_profile", None) if not profile: - self._profile = self.profile.get_profile()() + self._profile = self.profile.get_profile() return self._profile def get_parser(self):
Fix the import for filter_utils Changed from importing utils and calling it via utils.filter_utils to a proper import utils.__init__.py didn't export it so the way it was called probably worked for python 2 only. This way is a more ubiquitous way of calling it.
@@ -28,6 +28,7 @@ from yaql.language import utils as yaql_utils from mistral.config import cfg from mistral.db.v2 import api as db_api from mistral import utils +from mistral.utils import filter_utils # TODO(rakhmerov): it's work around the bug in YAQL. # YAQL shouldn't expose internal types to custom functions. @@ -129,7 +130,7 @@ def executions_(context, filter = {} if id is not None: - filter = utils.filter_utils.create_or_update_filter( + filter = filter_utils.create_or_update_filter( 'id', id, "eq", @@ -137,7 +138,7 @@ def executions_(context, ) if root_execution_id is not None: - filter = utils.filter_utils.create_or_update_filter( + filter = filter_utils.create_or_update_filter( 'root_execution_id', root_execution_id, "eq", @@ -145,7 +146,7 @@ def executions_(context, ) if state is not None: - filter = utils.filter_utils.create_or_update_filter( + filter = filter_utils.create_or_update_filter( 'state', state, "eq", @@ -153,7 +154,7 @@ def executions_(context, ) if from_time is not None: - filter = utils.filter_utils.create_or_update_filter( + filter = filter_utils.create_or_update_filter( 'created_at', from_time, "gte", @@ -161,7 +162,7 @@ def executions_(context, ) if to_time is not None: - filter = utils.filter_utils.create_or_update_filter( + filter = filter_utils.create_or_update_filter( 'created_at', to_time, "lt",
Update elf_mirai.txt [0] One more ```Mirai-based``` botnet. C2 addresses + semaphore filenames.
l.ocalhost.host /sonicwall + +# Reference: https://blog.netlab.360.com/threat-alert-a-new-worm-fbot-cleaning-adbminer-is-using-a-blockchain-based-dns-en/ + +musl.lib +rippr.cc +ukrainianhorseriding.com +/adbs +/adbs2 +/fbot.aarch64 +/fbot.arm7 +/fbot.mips +/fbot.mipsel +/fbot.x86 +/fbot.x86_64 +/mipsel.bot.le +/mips.bot.be +/i686.bot.le +/arm7.bot.le +/arm64.bot.le +/x86_64.bot.le
Same changes as in Fixes: pull-request follow-up
" html1.value = '''\n", " <h4>Country code: <b>{}</b></h4>\n", " Country name: {}\n", - " '''.format(feature['id'], feature['properties']['name'])\n", + " '''.format(feature['properties']['ISO_A2'], feature['properties']['NAME'])\n", "\n", "json_layer.on_hover(update_html)" ]
Reraise if errors occur when setting description and suggest checking for RFC1035 compliance.
@@ -1331,7 +1331,10 @@ class GCEVolume(BaseVolume): resource=self.name, body=request_body).execute()) except Exception as e: - cb.log.warning('Exception while setting volume description: %s', e) + cb.log.warning('Exception while setting volume description: %s.' + 'Check for invalid characters in description. Should' + 'confirm to RFC1035.', e) + raise e self.refresh() @property
Delete semi and highlights on rebuildtree The semi and highlighted items are dirty after a tree rebuild. And need to be thrown away.
@@ -1546,6 +1546,7 @@ class RootNode(list): self.object = "Project" self.name = "Project" self.semi_selected = [] + self.highlighted = [] self.type = NODE_ROOT self.kernel = kernel @@ -1567,6 +1568,23 @@ class RootNode(list): self.node_files = None self.rebuild_tree() + def highlight_select(self, item): + if item not in self.highlighted: + self.highlighted.append(item) + self.tree.SetItemBackgroundColour(item, wx.YELLOW) + + def highlight_unselect(self): + self.set_selected_elements(None) + self.set_selected_operations(None) + for item in self.highlighted: + self.tree.SetItemBackgroundColour(item, wx.WHITE) + self.highlighted.clear() + + def highlight_select_all(self, objects): + for e in objects: + self.highlight_select(e) + + def semi_select(self, item): if item not in self.semi_selected: self.semi_selected.append(item) @@ -1589,6 +1607,8 @@ class RootNode(list): self.semi_select(e) def rebuild_tree(self): + self.semi_selected.clear() + self.highlighted.clear() self.tree.DeleteAllItems() self.tree_images = wx.ImageList() self.tree_images.Create(width=20, height=20) @@ -1864,6 +1884,7 @@ class RootNode(list): if node is None: return self.semi_unselect() + self.highlight_unselect() self.semi_select_all(self.tree.GetSelections()) if node.type == NODE_ELEMENTS_BRANCH: for n in self.node_elements: @@ -1871,6 +1892,12 @@ class RootNode(list): self.gui.request_refresh() self.selection_updated() return + elif node.type == NODE_OPERATION: + for n in node: + self.highlight_select(n.item) + self.gui.request_refresh() + self.selection_updated() + return elif node.type == NODE_FILE_FILE: for n in node: obj = n.object
Update emotet.txt Tails can be various.
@@ -2799,22 +2799,24 @@ proyectoin.com # Reference: https://app.any.run/tasks/9056d965-915a-498a-83bc-a750fc0389f2/ -http://98.199.196.197/GPDnrZV7sOIw7mX -http://188.85.143.170/iOEiihW73 -http://195.223.215.190/NG4N0hcJOy +http://98.199.196.197 +http://188.85.143.170 +http://195.223.215.190 http://testtaglabel.com/wp-includes/LqYA88863/ http://www.xishicanting.com/wp-admin/jIx/ # Reference: https://app.any.run/tasks/881f5580-7cee-4156-bc70-d9592d526345/ http://salman.vetkare.com/dashboard/ccABOH4/ -http://113.61.76.239/a40NvbQAxrRJ -http://68.62.245.148/Syvdv0r -http://91.242.136.103/HJNm -http://113.61.76.239/MikLuH -http://68.62.245.148/okNzJJVxmVITzB5hR -http://91.242.136.103/ILhlG -http://91.242.136.103/fmrNxd4xgr7w +http://113.61.76.239 +http://91.242.136.103 +http://68.62.245.148 + +# Reference: https://twitter.com/Jouliok/status/1219952503032250368 +# Reference: https://app.any.run/tasks/4092920b-325b-494e-b00e-edc0b494c2d8/ + +http://68.114.229.171 +74.101.225.121:443 # Generic trails
Update add_request_headers.md Added more docs on add_request_headers feature
@@ -4,7 +4,9 @@ Ambassador can add a dictionary of HTTP headers that can be added to each reques ## The `add_request_headers` annotation -The `add_request_headers` attribute is a dictionary of `header`: `value` pairs. Envoy dynamic values `%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%` and `%PROTOCOL%` are supported, in addition to static values. +The `add_request_headers` attribute is a dictionary of `header`: `value` pairs. The `value` can be a `string`, `bool` or `object`. When its an `object`, the object should have a `value` property, which is the actual header value, and the remaining attributes are additional envoy properties. Look at the example to see the usage. + +Envoy dynamic values `%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%` and `%PROTOCOL%` are supported, in addition to static values. ## A basic example @@ -18,6 +20,11 @@ add_request_headers: x-test-proto: "%PROTOCOL%" x-test-ip: "%DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT%" x-test-static: This is a test header + x-test-static-2: + value: This the test header #same as above x-test-static header + x-test-object: + value: This the value + append: False #True by default service: qotm ```
fix: add mkdir instructions The provided `download_data_kaggle.sh` is missing instructions to create the `data` folder and the `f8k` folder.
@@ -100,6 +100,8 @@ To make this work, we need to get the image files from the `kaggle` [dataset](ht kaggle datasets download adityajn105/flickr8k unzip flickr8k.zip rm flickr8k.zip +mkdir data +mkdir data/f8k mv Images data/f8k/images mv captions.txt data/f8k/captions.txt ```
Added to the ceph-radosgw service template the ca-trust volume avoiding to expose useless information. This bug is referred to the following bugzilla:
@@ -17,6 +17,12 @@ ExecStart=/usr/bin/{{ container_binary }} run --rm --net=host \ -v /etc/ceph:/etc/ceph:z \ -v /var/run/ceph:/var/run/ceph:z \ -v /etc/localtime:/etc/localtime:ro \ + {% if ansible_distribution == 'RedHat' -%} + -v /etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro \ + -v /etc/pki/ca-trust/source/anchors:/etc/pki/ca-trust/source/anchors:ro \ + {% elif ansible_distribution == 'Ubuntu' -%} + -v /etc/ssl/certs:/etc/ssl/certs:ro \ + {% endif -%} -e CEPH_DAEMON=RGW \ -e CLUSTER={{ cluster }} \ -e RGW_NAME={{ ansible_hostname }}.${INST_NAME} \
Cleanup solve_poly_system(), comment on code Also drop solve_generic().
@@ -27,6 +27,10 @@ def test_solve_poly_system(): assert solve_poly_system([y - x, y - x - 1], x, y) == [] + assert solve_poly_system([x - y + 5, x + y - 3], x, y) == [{x: -1, y: 4}] + assert solve_poly_system([x - 2*y + 5, 2*x - y - 3], x, y) == [{x: Rational(11, 3), y: Rational(13, 3)}] + assert solve_poly_system([x**2 + y, x + y*4], x, y) == [{x: 0, y: 0}, {x: Rational(1, 4), y: Rational(-1, 16)}] + assert solve_poly_system([y - x**2, y + x**2], x, y) == [{x: 0, y: 0}] assert (solve_poly_system([2*x - 3, 3*y/2 - 2*x, z - 5*y], x, y, z) ==
Update documentation regarding 0-pk pk-pk Users now can change amplitude units using the prefix pkpk_ (e.g. pkpk_m, pkpk_micron). The docs now mention that the default is 0 to peak and how it can be changed to peak to peak.
@@ -928,7 +928,8 @@ class FrequencyResponseResults: Default is "rad/s" amplitude_units : str, optional Units for the y axis. - Default is "m/N" + Default is "m/N" 0 to peak. + To use peak to peak use the prefix 'pkpk_' (e.g. pkpk_m) fig : Plotly graph_objects.Figure() The figure object with the plot. mag_kwargs : optional @@ -1072,7 +1073,8 @@ class FrequencyResponseResults: Default is "rad/s" amplitude_units : str, optional Units for the y axis. - Default is "m/N" + Default is "m/N" 0 to peak. + To use peak to peak use the prefix 'pkpk_' (e.g. pkpk_m) phase_units : str, optional Units for the x axis. Default is "rad" @@ -1168,7 +1170,8 @@ class FrequencyResponseResults: Default is "rad/s" amplitude_units : str, optional Units for the y axis. - Default is "m/N" + Default is "m/N" 0 to peak. + To use peak to peak use the prefix 'pkpk_' (e.g. pkpk_m) phase_units : str, optional Units for the x axis. Default is "rad" @@ -1298,7 +1301,8 @@ class ForcedResponseResults: Default is "rad/s" amplitude_units : str, optional Units for the y axis. - Default is "m" + Default is "m" 0 to peak. + To use peak to peak use the prefix 'pkpk_' (e.g. pkpk_m) fig : Plotly graph_objects.Figure() The figure object with the plot. kwargs : optional @@ -1469,7 +1473,8 @@ class ForcedResponseResults: Default is "rad/s" amplitude_units : str, optional Units for the y axis. - Default is "m" + Default is "m" 0 to peak. + To use peak to peak use the prefix 'pkpk_' (e.g. pkpk_m) phase_units : str, optional Units for the x axis. Default is "rad" @@ -1586,7 +1591,8 @@ class ForcedResponseResults: Default is "rad/s" amplitude_units : str, optional Amplitude units. - Default is "m/N" + Default is "m/N" 0 to peak. + To use peak to peak use the prefix 'pkpk_' (e.g. pkpk_m) phase_units : str, optional Phase units. Default is "rad"
Added handling for OSError caused by trying to load a GNU readline shared library in Python environments like PyPy which have a pure Python implementation of readline.
@@ -129,10 +129,10 @@ elif 'gnureadline' in sys.modules or 'readline' in sys.modules: import ctypes readline_lib = ctypes.CDLL(readline.__file__) - except AttributeError: # pragma: no cover + except (AttributeError, OSError): # pragma: no cover _rl_warn_reason = ( "this application is running in a non-standard Python environment in\n" - "which readline is not loaded dynamically from a shared library file." + "which GNU readline is not loaded dynamically from a shared library file." ) else: rl_type = RlType.GNU
tests: Remove ignored parameters from bots tests. `service_interface` is not a parameter of `add_bot_backend`, but `interface_type` is, and that has the same default value as what was being provided by the test, so updated for the parameter name change, which was possibly missed in a previous code refactor.
@@ -1448,7 +1448,7 @@ class BotTest(ZulipTestCase, UploadSerializeMixin): "short_name": "hambot", "bot_type": UserProfile.OUTGOING_WEBHOOK_BOT, "payload_url": orjson.dumps("http://foo.bar.com").decode(), - "service_interface": Service.GENERIC, + "interface_type": Service.GENERIC, } result = self.client_post("/json/bots", bot_info) self.assert_json_success(result)
Remove unnecessary bullet points, unify formatting. Use bulleted documents to display problems, uniform formatting.
@@ -64,11 +64,11 @@ The following manufacturers are known to work: - QLogic - Broadcom -For information on **Mellanox SR-IOV Ethernet ConnectX cards**, see: -- `Mellanox: How To Configure SR-IOV VFs on ConnectX-4 or newer <https://support.mellanox.com/s/article/HowTo-Configure-SR-IOV-for-ConnectX-4-ConnectX-5-ConnectX-6-with-KVM-Ethernet>`_. +For information on **Mellanox SR-IOV Ethernet ConnectX cards**, see the +`Mellanox: How To Configure SR-IOV VFs on ConnectX-4 or newer <https://support.mellanox.com/s/article/HowTo-Configure-SR-IOV-for-ConnectX-4-ConnectX-5-ConnectX-6-with-KVM-Ethernet>`_. -For information on **QLogic SR-IOV Ethernet cards**, see: -- `User's Guide OpenStack Deployment with SR-IOV Configuration <http://www.qlogic.com/solutions/Documents/UsersGuide_OpenStack_SR-IOV.pdf>`_. +For information on **QLogic SR-IOV Ethernet cards**, see the +`User's Guide OpenStack Deployment with SR-IOV Configuration <http://www.qlogic.com/solutions/Documents/UsersGuide_OpenStack_SR-IOV.pdf>`_. For information on **Broadcom NetXtreme Series Ethernet cards**, see the `Broadcom NetXtreme Product Page <https://www.broadcom.com/products/ethernet-connectivity/network-adapters>`_.
Version 0.7.3 MMS updates to new pip file.
@@ -19,7 +19,7 @@ with open(path.join(here, 'README.md'), encoding='utf-8') as f: setup( name='pyspedas', - version='0.7.2', + version='0.7.3', description='Python Space Physics Environment Data Analysis \ Software (SPEDAS)', long_description=long_description,
Method stating which commit is being played during an halted rebase This will be useful to me at least. This way, I know that I can tell my script to omit some specific commits. If you accept to merge it, I may also do similar method for merges and cherry pick.
@@ -1062,3 +1062,14 @@ class Repo(object): def __repr__(self): return '<git.Repo "%s">' % self.git_dir + + def currentlyRebasingOn(self): + """ + :return: The hash of the commit which is currently being replayed while rebasing. + + None if we are not currently rebasing. + """ + rebase_head_file = osp.join(self.git_dir, "REBASE_HEAD") + if not osp.isfile(rebase_head_file): + return None + return open(rebase_head_file, "rt").readline().strip()
push notif: Test GCM options parsing more comprehensively. The payoff from making these into real unit tests.
@@ -1352,6 +1352,16 @@ class GCMParseOptionsTest(GCMTest): with self.assertRaises(JsonableError): apn.parse_gcm_options({"priority": "invalid"}, self.get_gcm_data()) + def test_default_priority(self) -> None: + self.assertEqual( + "normal", apn.parse_gcm_options({}, self.get_gcm_data())) + + def test_explicit_priority(self) -> None: + self.assertEqual( + "normal", apn.parse_gcm_options({"priority": "normal"}, self.get_gcm_data())) + self.assertEqual( + "high", apn.parse_gcm_options({"priority": "high"}, self.get_gcm_data())) + class GCMSendTest(GCMTest): @mock.patch('zerver.lib.push_notifications.logger.debug') def test_gcm_is_none(self, mock_debug: mock.MagicMock) -> None:
MAINT: core: Fix a compiler warning. This change fixes: gcc: numpy/core/src/umath/ufunc_object.c numpy/core/src/umath/ufunc_object.c:657:19: warning: comparison of integers of different signs: 'int' and 'size_t' (aka 'unsigned long') [-Wsign-compare] for (i = 0; i < len; i++) { ~ ^ ~~~
@@ -654,8 +654,8 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature) PyErr_NoMemory(); goto fail; } - for (i = 0; i < len; i++) { - ufunc->core_dim_flags[i] = 0; + for (size_t j = 0; j < len; j++) { + ufunc->core_dim_flags[j] = 0; } i = _next_non_white_space(signature, 0);
FIx: check topic exist in `pubsub.peer_topics`
@@ -267,6 +267,7 @@ class GossipSub(IPubsubRouter): num_mesh_peers_in_topic = len(self.mesh[topic]) if num_mesh_peers_in_topic < self.degree_low: + if topic in self.pubsub.peer_topics: gossipsub_peers_in_topic = [peer for peer in self.pubsub.peer_topics[topic] if peer in self.peers_gossipsub]
Add a `fast_test` option to Makefile `fast_test` runs tests in parallel with keeping the db (speeds up things a lot).
@@ -25,6 +25,10 @@ commands : Makefile test : ${MANAGE} test +## fast test : run all tests really fast. +fast_test: + ${MANAGE} test --keepdb --parallel + ## dev_database : re-make database using saved data dev_database : rm -f ${APP_DB}
Fix bug in sensor sorting for smurf export Closes
@@ -208,7 +208,7 @@ def sort_for_yaml_dump(dictionary, category): if category in ['materials', 'motors']: return {category: sort_dict_list(dictionary[category], 'name')} elif category == 'sensors': - return sort_dict_list(dictionary[category], 'name') + return {category: sort_dict_list(dictionary[category], 'name')} elif category == 'simulation': return_dict = {} for viscol in ['collision', 'visual']: @@ -359,12 +359,12 @@ def exportSmurf(model, path): # write materials, sensors, motors & controllers for data in ['materials', 'sensors', 'motors', 'controllers', 'lights']: if exportdata[data]: + log("Writing {} to smurf file.".format(data), 'DEBUG') with open(os.path.join(path, filenames[data]), 'w') as op: op.write('#' + data + infostring) + op.write(yaml.dump(sort_for_yaml_dump({data: list(model[data].values())}, data), default_flow_style=False)) - # TODO delete me? - #op.write(yaml.dump({data: list(model[data].values())}, default_flow_style=False)) # write additional collision information if exportdata['collision']:
Use cv2 to replace scipy.misc (fix * Converted scipy.misc to cv2 Scipy.misc is being deprecated so we should probably switch to OpenCV to reduce the number of needed dependencies. * Removed unused import. * Made Travis happy * Travis fix * Added cv2 dummy func and remove grayscale hack
import six import tensorflow as tf import re -import io from six.moves import range from contextlib import contextmanager @@ -71,20 +70,25 @@ def create_image_summary(name, val): s = tf.Summary() for k in range(n): arr = val[k] - if arr.shape[2] == 1: # scipy doesn't accept (h,w,1) - arr = arr[:, :, 0] + #CV2 will only write correctly in BGR chanel order + if c == 3: + arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR) + elif c == 4: + arr = cv2.cvtColor(arr, cv2.COLOR_RGBA2BGRA) tag = name if n == 1 else '{}/{}'.format(name, k) - buf = io.BytesIO() - # scipy assumes RGB - scipy.misc.toimage(arr).save(buf, format='png') + retval, img_str = cv2.imencode('.png', arr) + if not retval: + # Encoding has failed. + continue + img_str = img_str.tostring() img = tf.Summary.Image() img.height = h img.width = w # 1 - grayscale 3 - RGB 4 - RGBA img.colorspace = c - img.encoded_image_string = buf.getvalue() + img.encoded_image_string = img_str s.value.add(tag=tag, image=img) return s @@ -261,7 +265,7 @@ def add_moving_summary(*args, **kwargs): try: - import scipy.misc + import cv2 except ImportError: from ..utils.develop import create_dummy_func - create_image_summary = create_dummy_func('create_image_summary', 'scipy.misc') # noqa + create_image_summary = create_dummy_func('create_image_summary', 'cv2') # noqa
[bugfix] Add Server414Error to pywikibot.__init__() This is a follow up of: [IMPR] add Server414Error in reflink.py and close file
@@ -44,7 +44,7 @@ from pywikibot.exceptions import ( PageSaveRelatedError, PageNotSaved, OtherPageSaveError, LockedPage, CascadeLockedPage, LockedNoPage, NoCreateError, EditConflict, PageDeletedConflict, PageCreatedConflict, - ServerError, FatalServerError, Server504Error, + ServerError, FatalServerError, Server414Error, Server504Error, CaptchaError, SpamblacklistError, TitleblacklistError, CircularRedirect, InterwikiRedirectPage, WikiBaseError, NoWikibaseEntity, CoordinateGlobeUnknownException, @@ -89,7 +89,8 @@ __all__ = ( 'NoSuchSite', 'NoUsername', 'NoWikibaseEntity', 'OtherPageSaveError', 'output', 'Page', 'PageCreatedConflict', 'PageDeletedConflict', 'PageNotSaved', 'PageRelatedError', 'PageSaveRelatedError', 'PropertyPage', - 'QuitKeyboardInterrupt', 'SectionError', 'Server504Error', 'ServerError', + 'QuitKeyboardInterrupt', 'SectionError', + 'ServerError', 'FatalServerError', 'Server414Error', 'Server504Error', 'showDiff', 'show_help', 'showHelp', 'Site', 'SiteDefinitionError', 'SiteLink', 'SpamblacklistError', 'stdout', 'Timestamp', 'TitleblacklistError', 'translate', 'ui', 'unicode2html',
Fix get_sender when using it on a ChannelForbidden Closes
@@ -44,7 +44,7 @@ class SenderGetter(abc.ABC): # in which case we want to force fetch the entire thing because # the user explicitly called a method. If the user is okay with # cached information, they may use the property instead. - if (self._sender is None or self._sender.min) \ + if (self._sender is None or getattr(self._sender, 'min', None)) \ and await self.get_input_sender(): try: self._sender =\
viafree: don't crash when episode is a text fixes:
@@ -241,6 +241,7 @@ class Viaplay(Service, OpenGraphThumbMixin): else: output = title return output + def _autoname(self, dataj): program = dataj["format_slug"] season = None @@ -253,6 +254,11 @@ class Viaplay(Service, OpenGraphThumbMixin): if season: if len(dataj["format_position"]["episode"]) > 0: episode = dataj["format_position"]["episode"] + try: + episode = int(episode) + except ValueError: + title = filenamify(episode) + episode = None if dataj["type"] == "clip": #Removes the show name from the end of the filename
Skips flax tests for unrelated configurations like conditional or importer node. Flax tests will still be covered in testPenguinPipelineLocal and BulkInferrer test configurations.
@@ -232,9 +232,8 @@ class PenguinPipelineLocalEndToEndTest(tf.test.TestCase, self._assertPipelineExecution(has_bulk_inferrer=True) - @parameterized.parameters(('keras',), ('flax_experimental',)) - def testPenguinPipelineLocalWithImporter(self, model_framework): - module_file = self._module_file_name(model_framework) + def testPenguinPipelineLocalWithImporter(self): + module_file = self._module_file_name('keras') LocalDagRunner().run( penguin_pipeline_local._create_pipeline( pipeline_name=self._pipeline_name, @@ -281,10 +280,7 @@ class PenguinPipelineLocalEndToEndTest(tf.test.TestCase, break return store.get_artifacts_by_id(artifact_ids) - @parameterized.parameters( - ('keras',), - ('flax_experimental',)) - def testPenguinPipelineLocalWithRollingWindow(self, model_framework): + def testPenguinPipelineLocalWithRollingWindow(self): module_file = self._module_file_name('keras') examplegen_input_config = proto.Input(splits=[ proto.Input.Split(name='test', pattern='day{SPAN}/*'), @@ -396,11 +392,8 @@ class PenguinPipelineLocalEndToEndTest(tf.test.TestCase, self._get_input_examples_artifacts(store, trainer_execution_type), 2) - @parameterized.parameters( - ('keras',), - ('flax_experimental',)) - def testPenguinPipelineLocalConditionalWithoutPusher(self, model_framework): - module_file = self._module_file_name(model_framework) + def testPenguinPipelineLocalConditionalWithoutPusher(self): + module_file = self._module_file_name('keras') pipeline = penguin_pipeline_local._create_pipeline( pipeline_name=self._pipeline_name, data_root=self._data_root,
Fix `make client` in Python 3.6 Python 3.6 is more strict than previous versions about passing a `typing.TypeVar` instance to `issubclass`. Fixes
@@ -171,13 +171,13 @@ def name_to_py(name): def strcast(kind, keep_builtins=False): - if issubclass(kind, typing.GenericMeta): - return str(kind)[1:] - if str(kind).startswith('~'): - return str(kind)[1:] if (kind in basic_types or type(kind) in basic_types) and keep_builtins is False: return kind.__name__ + if str(kind).startswith('~'): + return str(kind)[1:] + if issubclass(kind, typing.GenericMeta): + return str(kind)[1:] return kind @@ -291,6 +291,13 @@ class {}(Type): source.append("{}self.{} = {}".format(INDENT * 2, arg_name, arg_name)) + elif type(arg_type) is typing.TypeVar: + source.append("{}self.{} = {}.from_json({}) " + "if {} else None".format(INDENT * 2, + arg_name, + arg_type_name, + arg_name, + arg_name)) elif issubclass(arg_type, typing.Sequence): value_type = ( arg_type_name.__parameters__[0] @@ -326,13 +333,6 @@ class {}(Type): source.append("{}self.{} = {}".format(INDENT * 2, arg_name, arg_name)) - elif type(arg_type) is typing.TypeVar: - source.append("{}self.{} = {}.from_json({}) " - "if {} else None".format(INDENT * 2, - arg_name, - arg_type_name, - arg_name, - arg_name)) else: source.append("{}self.{} = {}".format(INDENT * 2, arg_name,
Syntax: Improve CommonMark compatibility of emphasis This commit improves left flanking delimiters to improve a couple of edge cases. Most fixes require branching though and therefore won't be available for ST3. To prepare future changes, test cases are updated with more detailed boundary checks. Also sync example ids with CM 0.30.0
@@ -248,6 +248,30 @@ variables: ) ) + # https://spec.commonmark.org/0.30/#left-flanking-delimiter-run + bold_italic_asterisk_begin: |- + (?x: + (\*\*)(\*) {{no_space_nor_punct}} + | \B (\*\*)(\*) {{no_space_but_punct}} + ) + + bold_asterisk_begin: |- + (?x: + \*{2} {{no_space_nor_punct}} + | \B \*{2} {{no_space_but_punct}} + ) + + italic_asterisk_begin: |- + (?x: + \* {{no_space_nor_punct}} + | \B \* {{no_space_but_punct}} + ) + + # not followed by Unicode whitespace and not followed by a Unicode punctuation character + no_space_nor_punct: (?![\s*\p{P}]) + # not followed by Unicode whitespace and followed by a Unicode punctuation character + no_space_but_punct: (?=[[^\s*]&&\p{P}]) + ############################################################################## contexts: @@ -2521,12 +2545,14 @@ contexts: bold: # https://spec.commonmark.org/0.30/#emphasis-and-strong-emphasis - - match: (\*\*)(\*)(?=\S)(?!\*) + - match: '{{bold_italic_asterisk_begin}}' captures: 1: punctuation.definition.bold.begin.markdown 2: markup.italic.markdown punctuation.definition.italic.begin.markdown + 3: punctuation.definition.bold.begin.markdown + 4: markup.italic.markdown punctuation.definition.italic.begin.markdown push: bold-italic-asterisk - - match: \*\*(?=\S)(?!\*\*|\*\s) + - match: '{{bold_asterisk_begin}}' scope: punctuation.definition.bold.begin.markdown push: bold-asterisk - match: \b(__)(_)(?=\S)(?!_) @@ -2668,7 +2694,7 @@ contexts: - include: strikethrough italic: - - match: \*(?=\S)(?!\*) + - match: '{{italic_asterisk_begin}}' scope: punctuation.definition.italic.begin.markdown push: italic-asterisk - match: \b_(?=\S)(?!_)
Update sso-ldap.md Added a few issues to the Troubleshooting section.
@@ -73,3 +73,17 @@ Note: Currently the value is case sensitive. If the ID attribute is set to the u This indicates your AD/LDAP server configuration has a maximum page size set and the query coming from Mattermost is returning a result set in excess of that limit. To address this issue you can set the [max page size](https://docs.mattermost.com/administration/config-settings.html#maximum-page-size) for Mattermost's AD/LDAP configuration. This will return a sequence of result sets below the max page size set in AD/LDAP, rather than returning all results in a single query. + +##### How do I include all users in an LDAP sync if they're not logged in? + +You can sync users, even if they're not logged in, by using the [command line tools](https://docs.mattermost.com/administration/command-line-tools.html), in particular, the [platform user tools](https://docs.mattermost.com/administration/command-line-tools.html#platform-user). + +##### Where can I find help on LDAP configuration settings in config.json? + +You can find an explanation of each of the configuration settings [here](https://docs.mattermost.com/administration/config-settings.html#ad-ldap). + +##### Can the LDAP User Filter read Security groups? + +In order for the LDAP user filter to read security groups, check that: + - permissions are correctly configured on the service account you are using + - each user object is a direct member of the security group
Update _percentile.py Returned `math.nan` instead of `math.inf` for simplicity and readability.
@@ -39,7 +39,7 @@ def _get_percentile_intermediate_result_over_trials( is_maximize = direction == StudyDirection.MAXIMIZE if not intermediate_values: - return -math.inf if is_maximize else math.inf + return math.nan if is_maximize: percentile = 100 - percentile
fix _create_stream and tornado 5.0 This should be the last fix for tornado 5.0
@@ -603,10 +603,10 @@ class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.tra self.payload_handler = payload_handler self.io_loop = io_loop self.serial = salt.payload.Serial(self.opts) + with salt.utils.async.current_ioloop(self.io_loop): if USE_LOAD_BALANCER: self.req_server = LoadBalancerWorker(self.socket_queue, self.handle_message, - io_loop=self.io_loop, ssl_options=self.opts.get('ssl')) else: if salt.utils.platform.is_windows(): @@ -616,7 +616,6 @@ class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.tra self._socket.setblocking(0) self._socket.bind((self.opts['interface'], int(self.opts['ret_port']))) self.req_server = SaltMessageServer(self.handle_message, - io_loop=self.io_loop, ssl_options=self.opts.get('ssl')) self.req_server.add_socket(self._socket) self._socket.listen(self.backlog) @@ -704,6 +703,7 @@ class SaltMessageServer(tornado.tcpserver.TCPServer, object): ''' def __init__(self, message_handler, *args, **kwargs): super(SaltMessageServer, self).__init__(*args, **kwargs) + self.io_loop = tornado.ioloop.IOLoop.current() self.clients = [] self.message_handler = message_handler @@ -807,7 +807,9 @@ class TCPClientKeepAlive(tornado.tcpclient.TCPClient): stream = tornado.iostream.IOStream( sock, max_buffer_size=max_buffer_size) + if tornado.version_info < (5,): return stream.connect(addr) + return stream, stream.connect(addr) class SaltMessageClientPool(salt.transport.MessageClientPool): @@ -970,7 +972,8 @@ class SaltMessageClient(object): with salt.utils.async.current_ioloop(self.io_loop): self._stream = yield self._tcp_client.connect(self.host, self.port, - ssl_options=self.opts.get('ssl')) + ssl_options=self.opts.get('ssl'), + **kwargs) self._connecting_future.set_result(True) break except Exception as e:
Update apt_kimsuky.txt Adding ```Aliases``` field.
# Copyright (c) 2014-2020 Maltrail developers (https://github.com/stamparm/maltrail/) # See the file 'LICENSE' for copying permission +# Aliases: Black Banshee, Velvet Chollima + # Reference: https://otx.alienvault.com/pulse/5c93c4e48312d159728a9d78 # Reference: https://blog.alyac.co.kr/2209 (Korean)
Update ref_after_reparse for the new RefEnvs interface TN:
@@ -10,7 +10,7 @@ import os.path from langkit.compiled_types import ASTNode, Field, T, root_grammar_class from langkit.diagnostics import Diagnostics -from langkit.envs import EnvSpec, add_to_env +from langkit.envs import EnvSpec, RefEnvs, add_to_env from langkit.expressions import Env, New, Self, langkit_property from langkit.parsers import Grammar, List, Row, Tok @@ -37,6 +37,10 @@ class Name(FooNode): def ambiant_entity(): return Env.get(Self.sym).at(0) + @langkit_property(has_implicit_env=True) + def designated_env(): + return Self.unit.root.node_env.get(Self.sym).at(0).children_env + @langkit_property(public=True) def entity(): return Self.node_env.eval_in_env(Self.ambiant_entity) @@ -65,7 +69,8 @@ class Decl(FooNode): class Using(FooNode): name = Field() env_spec = EnvSpec( - ref_envs=Self.name.ambiant_entity.children_env.singleton + ref_envs=[RefEnvs(Name.fields.designated_env, + Self.name.cast(FooNode).to_array)] )
Adds reference to DarshanDeshpande/jax-models h/t for for creating this, and to for reporting it.
@@ -94,8 +94,9 @@ submit your own example, we suggest that you start by forking one of the official Flax example, and start from there. | Link | Author | Task type | Reference | -| ---------------------------- | ------------------ | --------------------------------- | --------------------------------------------------------------------- | +| ----------------------------- | ------------------ | --------------------------------- | --------------------------------------------------------------------- | | [matthias-wright/flaxmodels] | [@matthias-wright] | Various | GPT-2, ResNet, StyleGAN-2, VGG, ... | +| [DarshanDeshpande/jax-models] | [@DarshanDeshpande] | Various | Segformer, Swin Transformer, ... also some stand-alone layers | | [google/vision_transformer] | [@andsteing] | Image classification, fine-tuning | https://arxiv.org/abs/2010.11929 and https://arxiv.org/abs/2105.01601 | | [JAX-RL] | [@henry-prior] | Reinforcement learning | N/A | | [DCGAN] Colab | [@bkkaggle] | Image Synthesis | https://arxiv.org/abs/1511.06434 | @@ -103,12 +104,14 @@ official Flax example, and start from there. | [jax-resnet] | [@n2cholas] | Various resnet implementations | `torch.hub` | [matthias-wright/flaxmodels]: https://github.com/matthias-wright/flaxmodels +[DarshanDeshpande/jax-models]: https://github.com/DarshanDeshpande/jax-models [google/vision_transformer]: https://github.com/google-research/vision_transformer [JAX-RL]: https://github.com/henry-prior/jax-rl [DCGAN]: https://github.com/bkkaggle/jax-dcgan [BigBird Fine-tuning]: https://github.com/huggingface/transformers/tree/master/examples/research_projects/jax-projects/big_bird [jax-resnet]: https://github.com/n2cholas/jax-resnet [@matthias-wright]: https://github.com/matthias-wright +[@DarshanDeshpande]: https://github.com/DarshanDeshpande [@andsteing]: https://github.com/andsteing [@henry-prior]: https://github.com/henry-prior [@bkkaggle]: https://github.com/bkkaggle
STY: Small style fixes in numeritypes. [ci skip]
@@ -347,8 +347,7 @@ def _add_integer_aliases(): for info, charname, intname, Intname in [ (i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits), - (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits) - ]: + (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]: if intname not in allTypes.keys(): allTypes[intname] = info.type sctypeDict[intname] = info.type
Keep all-users This is to handle existing PRs that may already be tagged all-users
@@ -10,7 +10,8 @@ from gevent.pool import Pool LABELS_TO_EXPAND = [ "product/all-users-all-environments", "product/prod-india-all-users", - "product/feature-flag" + "product/feature-flag", + "product/all-users", ]
Fixed bug that caused the libdoc task to crash when fed this file As a side benefit, I removed some duplicate code when computing the keyword names. I also fixed the library documentation by using robot's markup for preformatted code (leading pipe).
from robot.api import logger -from robot.libraries.BuiltIn import BuiltIn -from .baseobjects import BasePage +from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError +from cumulusci.robotframework.pageobjects.baseobjects import BasePage import inspect import robot.utils import os import sys +def get_keyword_names(obj): + """Returns a list of method names for the given object + + This excludes methods that begin with an underscore, and + also excludes the special method `get_keyword_names`. + """ + names = [ + member[0] + for member in inspect.getmembers(obj, inspect.isroutine) + if (not member[0].startswith("_")) and member[0] != "get_keyword_names" + ] + return names + + class PageObjects(object): - """Dynamic robot library for importing and using page objects + """Keyword library for importing and using page objects When importing, you can include one or more paths to python files that define page objects. For example, if you have a set of classes in robot/HEDA/resources/PageObjects.py, you can import this library into a test case like this: - Library cumulusci.robotframework.PageObjects - ... robot/HEDA/resources/PageObjects.py + | Library cumulusci.robotframework.PageObjects + | ... robot/HEDA/resources/PageObjects.py """ @@ -38,7 +52,13 @@ class PageObjects(object): # Start with this library at the front of the library search order; # that may change as page objects are loaded. + try: BuiltIn().set_library_search_order("PageObjects") + except RobotNotRunningError: + # this should only happen when trying to load this library + # via the robot_libdoc task, in which case we don't care + # whether this throws an error or not. + pass @classmethod def _reset(cls): @@ -69,29 +89,16 @@ class PageObjects(object): """ This method is required by robot's dynamic library api """ - names = [name for name in dir(self) if self._is_keyword(name, self)] + names = get_keyword_names(self) if self.current_page_object is not None: - names = names + [ - name - for name in dir(self.current_page_object) - if self._is_keyword(name, self.current_page_object) - ] + names = names + get_keyword_names(self.current_page_object) return names - def _is_keyword(self, name, source): - return ( - not name.startswith("_") - and name != "get_keyword_names" - and inspect.isroutine(getattr(source, name)) - ) - def log_page_object_keywords(self): """Logs page objects and their keywords for all page objects which have been imported""" for key in sorted(self.registry.keys()): pobj = self.registry[key] - keywords = sorted( - [method for method in dir(pobj) if self._is_keyword(method, pobj)] - ) + keywords = get_keyword_names(pobj) logger.info("{}: {}".format(key, ", ".join(keywords))) def _get_page_object(self, page_type, object_name):
s/ditionary/dictionary Summary: Spelling is hard Test Plan: Read Reviewers: sandyryza
@@ -27,7 +27,7 @@ class ModeDefinition( resource_defs (Optional[Dict[str, ResourceDefinition]]): A dictionary of string resource keys to their implementations. Individual solids may require resources to be present by these keys. - logger_defs (Optional[Dict[str, LoggerDefinition]]): A ditionary of string logger + logger_defs (Optional[Dict[str, LoggerDefinition]]): A dictionary of string logger identifiers to their implementations. system_storage_defs (Optional[List[SystemStorageDefinition]]): The set of system storage options available when executing in this mode. By default, this will be the 'in_memory'
[Chore] Fix 'Attach bottles to the release' dependencies Problem: Currently this step depends on non-architecture specific step 'uninstall-tsp' which is no longer present. Solution: Make it depend on arch-specific uninstall-tsp steps.
@@ -87,10 +87,11 @@ done ymlappend " - label: Add Big Sur bottle hashes to formulae - depends_on: - - \"uninstall-tsp-arm64\" - - \"uninstall-tsp-x86_64\" - if: build.tag =~ /^v.*/ + depends_on:" +for arch in "${architecture[@]}"; do + ymlappend " - \"uninstall-tsp-$arch\"" +done + ymlappend " if: build.tag =~ /^v.*/ soft_fail: true # No artifacts to download if all the bottles are already built commands: - mkdir -p \"Big Sur\" @@ -99,9 +100,11 @@ ymlappend " - nix-shell ./scripts/shell.nix --run './scripts/sync-bottle-hashes.sh \"\$\$FORMULA_TAG\" \"Big Sur\"' - label: Attach bottles to the release - depends_on: - - \"uninstall-tsp\" - if: build.tag =~ /^v.*/ + depends_on:" +for arch in "${architecture[@]}"; do + ymlappend " - \"uninstall-tsp-$arch\"" +done + ymlappend " if: build.tag =~ /^v.*/ soft_fail: true # No artifacts to download if all the bottles are already built commands: - buildkite-agent artifact download \"*bottle.tar.gz\" .
No longer suggest removal of migration code We might as well keep it in, as we'll only ever have to run any of this command again if there's another major BNF release.
@@ -53,9 +53,9 @@ For each old code -> new code mapping, in reverse order of date data, our measures, and so on, henceforward. * Replace all the codes that have new normalised versions in all local - version of the prescribing data. (This method will be removed once - run, as it's only ever needed for an initial migration; look in git - history if interested) + version of the prescribing data. (If this command ever needs + running again, some time could be saved by applying this only to + prescribing data downloaded since the last this command was run) * Iterate over all known BNF codes, sections, paragraphs etc, looking for codes which have never been prescribed, and mark these as not @@ -285,7 +285,6 @@ def update_existing_prescribing(): p.current_version.bnf_code, p.bnf_code) ) - print "Now delete `update_existing_prescribing` migration" def create_bigquery_views():
travis: Reduce test verbsoity The errors are still reported in detail
@@ -59,7 +59,7 @@ install: script: - if [ "x$RUN_COV" != "x" ] ; then echo "Running with coverage"; export COV_ARGS="--cov=psyneulink"; else echo "Running without coverage"; export COV_ARGS=""; fi - - pytest -n auto -p no:logging $COV_ARGS + - pytest -n auto -p no:logging --verbosity=0 $COV_ARGS after_script: - if [ "x$RUN_COV" != "x" ] ; then coveralls; fi
langkit.parsers.Transform: refactor compute_fields_type TN:
@@ -1541,7 +1541,14 @@ class _Transform(Parser): def get_type(self): return resolve_type(self.typ) - def compute_fields_types(self): + @property + def fields_parsers(self): + """ + Return the list of parsers that return values for the fields in the + node this parser creates. + + :rtype: list[Parser] + """ typ = self.get_type() # Sub-parsers for Token nodes must parse exactly one token @@ -1557,26 +1564,23 @@ class _Transform(Parser): typ.dsl_name, self.parser ) ) - fields_types = [] + return [] # Gather field types that come from all child parsers elif isinstance(self.parser, _Row): # There are multiple fields for _Row parsers - fields_types = [parser.get_type() - for parser in self.parser.parsers - if not parser.discard()] + return [p for p in self.parser.parsers if not p.discard()] elif isinstance(self.parser, _Token): - fields_types = [] + return [] else: - fields_types = [self.parser.get_type()] + return [self.parser] - assert all(t for t in fields_types), ( - "Internal error when computing field types for {}:" - " some are None: {}".format(self.typ, fields_types) - ) + def compute_fields_types(self): + fields_types = [p.get_type() for p in self.fields_parsers] # Check that the number of values produced by self and the number of # fields in the destination node are the same. + typ = self.get_type() nb_transform_values = len(fields_types) nb_fields = len(typ.get_parse_fields( predicate=lambda f: not f.abstract and not f.null))
multiprocessing.pool: Fix return of map_async() Closes
@@ -46,7 +46,7 @@ class Pool(ContextManager[Pool]): iterable: Iterable[_S] = ..., chunksize: Optional[int] = ..., callback: Optional[Callable[[_T], None]] = ..., - error_callback: Optional[Callable[[BaseException], None]] = ...) -> MapResult[List[_T]]: ... + error_callback: Optional[Callable[[BaseException], None]] = ...) -> MapResult[_T]: ... def imap(self, func: Callable[[_S], _T], iterable: Iterable[_S] = ...,
Add an example with `constrain='domain'` Add a following example demonstrating Fixed Ratio Axes with Compressed domain to (axes tutorial)[https://plot.ly/python/axes/].
@@ -481,6 +481,37 @@ fig.update_layout( fig.show() ``` +### Fixed Ratio Axes with Compressed domain + +If an axis needs to be compressed (either due to its own `scaleanchor` and `scaleratio` or those of the other axis), `constrain` determines how that happens: by increasing the "range" (default), or by decreasing the "domain". + +```python +import plotly.graph_objects as go + +fig = go.Figure() + +fig.add_trace(go.Scatter( + x = [0,1,1,0,0,1,1,2,2,3,3,2,2,3], + y = [0,0,1,1,3,3,2,2,3,3,1,1,0,0] +)) + +fig.update_layout( + width = 800, + height = 500, + title = "fixed-ratio axes with compressed axes", + xaxis = dict( + range=[-1,4], # sets the range of xaxis + constrain="domain", # meanwhile compresses the xaxis by decreasing its "domain" + ), + yaxis = dict( + scaleanchor = "x", + scaleratio = 1, + ), +) + +fig.show() +``` + #### Reversed Axes You can tell plotly's automatic axis range calculation logic to reverse the direction of an axis by setting the `autorange` axis property to `"reversed"`.
bugfix for BrozzlerWorker._needs_browsing I'm sorry but with my previous commit I introduced a bug in ``BrozzlerWorker._needs_browsing`` method. More specifically, if the ``brozzler_spy`` param is False (this happens when ``youtube_dl`` is disabled), ``_needs_browsing`` method returns always ``False`` and this messes with ``brozzle_page`` workflow. The browser is nevenr visiting the page. I'm fixing this here.
@@ -448,6 +448,8 @@ class BrozzlerWorker: 'text/html', 'application/xhtml+xml']: return True return False + else: + return True def _already_fetched(self, page, brozzler_spy): if brozzler_spy:
llvm: Use whitelist of allowed characters for function names Fixes autodiff compiled test with cuda testing enabled.
@@ -90,7 +90,7 @@ class LLVMBuilderContext: @classmethod def get_unique_name(cls, name: str): cls.__uniq_counter += 1 - name = re.sub(r"[- ()\[\]]", "_", name) + name = re.sub(r"[^a-zA-Z0-9_]", "_", name) return name + '_' + str(cls.__uniq_counter) def get_builtin(self, name: str, args=[], function_type=None):
Make default parameters consistent This will make things easier to reason about.
[Global_Params] model_name = 'darts_uno' -unrolled = True +unrolled = False data_url = 'ftp.mcs.anl.gov/pub/candle/public/benchmarks/Pilot1/uno/' savepath = './results' log_interval = 10
not use bottleneck change ffil_... method to not use bottleneck (in native xarray ffill)
@@ -1079,7 +1079,15 @@ class DataRecord(Dataset): """ # Forward fill element_id: - self['element_id'] = self['element_id'].ffill('time') + fill_value=[] + ei = self['element_id'].values + for i in range(ei.shape[0]): + for j in range(ei.shape[1]): + if np.isnan(ei[i,j]): + ei[i,j]=fill_value + else: + fill_value=ei[i,j] + self['element_id'] = (['item_id', 'time'], ei) # Can't do ffill to grid_element because str/nan, so: fill_value='' ge = self['grid_element'].values
Fix SESSION_COOKIE_SECURE & CSRF_COOKIE_SECURE values was previously being always set to False.
@@ -962,9 +962,6 @@ REQUIRE_TWO_FACTOR_FOR_SUPERUSERS = False # that adds messages to the partition with the fewest unprocessed messages USE_KAFKA_SHORTEST_BACKLOG_PARTITIONER = False -SESSION_COOKIE_SECURE = CSRF_COOKIE_SECURE = not DEBUG -SESSION_COOKIE_HTTPONLY = CSRF_COOKIE_HTTPONLY = True - try: # try to see if there's an environmental variable set for local_settings custom_settings = os.environ.get('CUSTOMSETTINGS', None) @@ -994,6 +991,13 @@ if callable(COMPRESS_ENABLED): COMPRESS_ENABLED = COMPRESS_ENABLED() if callable(COMPRESS_OFFLINE): COMPRESS_OFFLINE = COMPRESS_OFFLINE() + +# These default values can't be overridden. +# Should you someday need to do so, use the lambda/if callable pattern above +SESSION_COOKIE_SECURE = CSRF_COOKIE_SECURE = lambda: not DEBUG +SESSION_COOKIE_HTTPONLY = CSRF_COOKIE_HTTPONLY = True + + if UNIT_TESTING: # COMPRESS_COMPILERS overrides COMPRESS_ENABLED = False, so must be # cleared to disable compression completely. CSS/less compression is
Update translations properly This adds multiple language support for validation messages, refactors it for labels, and fixes it for hints. Previously, changing a hint in one language would delete that hint for all other languages.
@@ -1009,22 +1009,19 @@ def _update_search_properties(module, search_properties, lang='en'): True """ - current = {p.name: p.label for p in module.search_config.properties} + props_by_name = {p.name: p for p in module.search_config.properties} for prop in search_properties: - if prop['name'] in current: - label = current[prop['name']].copy() - label.update({lang: prop['label']}) - else: - label = {lang: prop['label']} - hint = {lang: prop['hint']} + current = props_by_name.get(prop['name']) + + _current_label = current.label if current else {} + _current_hint = current.hint if current else {} ret = { 'name': prop['name'], - 'label': label, + 'label': {**_current_label, lang: prop['label']}, + 'hint': {**_current_hint, lang: prop['hint']}, } if prop['default_value']: ret['default_value'] = prop['default_value'] - if prop['hint']: - ret['hint'] = hint if prop['hidden']: ret['hidden'] = prop['hidden'] if prop['allow_blank_value']: @@ -1034,9 +1031,10 @@ def _update_search_properties(module, search_properties, lang='en'): if prop['required']: ret['required'] = prop['required'] if prop['validation_xpath']: + _current_msg = current.validation[0].message if current and current.validation else {} ret['validation'] = [{ 'xpath': prop['validation_xpath'], - 'message': {'en': prop['validation_message']}, # TODO + 'message': {**_current_msg, lang: prop['validation_message']}, }] if prop.get('appearance', '') == 'fixture': if prop.get('is_multiselect', False):
Bug in reporting missing objective Was: Eating exceptions that should cause batch to terminate with error Printing traceback for handled error
@@ -289,9 +289,6 @@ class Batch(object): except StopBatch as e: if e.error: raise - except Exception as e: - log.exception( - "error running trial %s: %s", trial.run_id, e) def _run_trial(self, trial, trial_runs, init_only): self._apply_existing_run_id(trial, trial_runs)
proposition of enhancement We should consider the absolute value of the object height and the magnification, because if the display range is 10 but the object height or image height is -20, the display range is not big enough
@@ -693,14 +693,16 @@ class ImagingPath(MatrixGroup): """ displayRange = self.largestDiameter - if displayRange == float('+Inf') or displayRange <= 2 * self.objectHeight: - displayRange = 2 * self.objectHeight + objHeight = abs(self.objectHeight) + if displayRange == float('+Inf') or displayRange <= 2 * objHeight: + displayRange = 2 * objHeight conjugates = self.intermediateConjugates() if len(conjugates) != 0: for (planePosition, magnification) in conjugates: - if displayRange < self.objectHeight * magnification: - displayRange = self.objectHeight * magnification + magnification = abs(magnification) + if displayRange < objHeight * magnification: + displayRange = objHeight * magnification return displayRange def createRayTracePlot(
BackdropNodeGadget : Fix GIL management for `frame()` method `extend_container()` uses the Python API, so we can't release the GIL until after we call it.
@@ -134,9 +134,10 @@ GadgetPtr getEdgeGadget( StandardNodeGadget &g, StandardNodeGadget::Edge edge ) void frame( BackdropNodeGadget &b, object nodes ) { - IECorePython::ScopedGILRelease gilRelease; std::vector<Node *> n; boost::python::container_utils::extend_container( n, nodes ); + + IECorePython::ScopedGILRelease gilRelease; b.frame( n ); }
Note non-standard JSON grammar in tutorial Closes
@@ -79,7 +79,10 @@ By the way, if you're curious what these terminals signify, they are roughly equ Lark will accept this, if you really want to complicate your life :) -(You can find the original definitions in [common.lark](/lark/grammars/common.lark).) +You can find the original definitions in [common.lark](/lark/grammars/common.lark). +They're a bit looser than [json.org](https://json.org/) - accepting `\f` as whitespace, +`.1` as a number, unicode digits, and unescaped control characters in strings - but +strict standards-compliance would make for a somewhat longer tutorial. Notice that terminals are written in UPPER-CASE, while rules are written in lower-case. I'll touch more on the differences between rules and terminals later.
Make HasField and ClearField use Text instead of str This allows one to write `x.HasField("ok")` even if the file has `from __future__ import unicode_literals`"
-from typing import Any, Sequence, Optional, Tuple +from typing import Any, Sequence, Optional, Text, Tuple from .descriptor import FieldDescriptor @@ -21,8 +21,8 @@ class Message: def SerializeToString(self) -> str: ... def SerializePartialToString(self) -> str: ... def ListFields(self) -> Sequence[Tuple[FieldDescriptor, Any]]: ... - def HasField(self, field_name: str) -> bool: ... - def ClearField(self, field_name: str) -> None: ... + def HasField(self, field_name: Text) -> bool: ... + def ClearField(self, field_name: Text) -> None: ... def WhichOneof(self, oneof_group) -> Optional[str]: ... def HasExtension(self, extension_handle): ... def ClearExtension(self, extension_handle): ...
DOC: update logarithm docs as per theory. Made some changes addressing the discussions in Issue . What this PR updates Range of log functions in the Docs Adds Additional Notes on the theoretical range outcomes. -pi
@@ -2011,7 +2011,7 @@ def add_newdoc(place, name, doc): ----- Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `exp(z) = x`. The convention is to return the - `z` whose imaginary part lies in `[-pi, pi]`. + `z` whose imaginary part lies in `(-pi, pi]`. For real-valued input data types, `log` always returns real output. For each value that cannot be expressed as a real number or infinity, it @@ -2022,6 +2022,10 @@ def add_newdoc(place, name, doc): handles the floating-point negative zero as an infinitesimal negative number, conforming to the C99 standard. + In the cases where the input has a negative real part and a very small + negative complex part (approaching 0), the result is so close to `-pi` + that it evaluates to exactly `-pi`. + References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", @@ -2061,7 +2065,7 @@ def add_newdoc(place, name, doc): ----- Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `10**z = x`. The convention is to return the - `z` whose imaginary part lies in `[-pi, pi]`. + `z` whose imaginary part lies in `(-pi, pi]`. For real-valued input data types, `log10` always returns real output. For each value that cannot be expressed as a real number or infinity, @@ -2072,6 +2076,10 @@ def add_newdoc(place, name, doc): `log10` handles the floating-point negative zero as an infinitesimal negative number, conforming to the C99 standard. + In the cases where the input has a negative real part and a very small + negative complex part (approaching 0), the result is so close to `-pi` + that it evaluates to exactly `-pi`. + References ---------- .. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions", @@ -2112,7 +2120,7 @@ def add_newdoc(place, name, doc): Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `2**z = x`. The convention is to return the `z` - whose imaginary part lies in `[-pi, pi]`. + whose imaginary part lies in `(-pi, pi]`. For real-valued input data types, `log2` always returns real output. For each value that cannot be expressed as a real number or infinity, @@ -2123,6 +2131,10 @@ def add_newdoc(place, name, doc): handles the floating-point negative zero as an infinitesimal negative number, conforming to the C99 standard. + In the cases where the input has a negative real part and a very small + negative complex part (approaching 0), the result is so close to `-pi` + that it evaluates to exactly `-pi`. + Examples -------- >>> x = np.array([0, 1, 2, 2**4])
Change scale TC fixture scope to "Class" This is partial fix for 1. Change TC fixture scope to class, this is creating problem for other test executions 2. Planning to re-work on the entire TC to implement using kube jobs will have complete fix there.
@@ -13,7 +13,7 @@ from ocs_ci.framework.pytest_customization.marks import ( log = logging.getLogger(__name__) [email protected](scope='session') [email protected](scope='class') def fioscale(request): """ FIO Scale fixture to create expected number of POD+PVC
Fixed bug where array annotations were copied when returning them When only getting a part of the annotations and altering this, the user might have the intention to alter the original object.
@@ -124,7 +124,7 @@ class DataObject(BaseNeo, pq.Quantity): # if not possible, numpy raises an Error for ann in self.array_annotations.keys(): # NO deepcopy, because someone might want to alter the actual object using this - index_annotations[ann] = self.array_annotations[ann][index].copy() + index_annotations[ann] = self.array_annotations[ann][index] return index_annotations
[trivial] Fix mistaken variable rename In the leading 'u' was removed from the variable name, resulting in the variable being renamed by mistake. This patch corrects it. Related-Bug: rhbz#1920293
@@ -136,7 +136,7 @@ outputs: upgrade_leapp_debug: {get_param: UpgradeLeappDebug} upgrade_leapp_devel_skip: {get_param: UpgradeLeappDevelSkip} upgrade_leapp_command_options: {get_param: UpgradeLeappCommandOptions} - pgrade_leapp_reboot_timeout: {get_param: UpgradeLeappRebootTimeout} + upgrade_leapp_reboot_timeout: {get_param: UpgradeLeappRebootTimeout} upgrade_leapp_post_reboot_delay: {get_param: UpgradeLeappPostRebootDelay} vars: _upgradeLeappEnabled: {get_param: UpgradeLeappEnabled}
MAINT: Remove unnecessary list creation `filter_types` isn't used anywhere else. List creation may already be optimized out, but it's a little more readable, at least.
@@ -5238,13 +5238,12 @@ def iircomb(w0, Q, ftype='notch', fs=2.0): # Check for invalid cutoff frequency or filter type ftype = ftype.lower() - filter_types = ['notch', 'peak'] if not 0 < w0 < fs / 2: raise ValueError("w0 must be between 0 and {}" " (nyquist), but given {}.".format(fs / 2, w0)) if np.round(fs % w0) != 0: raise ValueError('fs must be divisible by w0.') - if ftype not in filter_types: + if ftype not in ('notch', 'peak'): raise ValueError('ftype must be either notch or peak.') # Compute the order of the filter @@ -5259,9 +5258,9 @@ def iircomb(w0, Q, ftype='notch', fs=2.0): # Compute -3dB attenuation # Eqs. 11.4.1 and 11.4.2 (p. 582) from reference [1] if ftype == 'notch': - G0, G = [1, 0] + G0, G = 1, 0 elif ftype == 'peak': - G0, G = [0, 1] + G0, G = 0, 1 GB = 1 / np.sqrt(2) # Compute beta