message
stringlengths
13
484
diff
stringlengths
38
4.63k
travis-ci: cibuildwheel: look for custom binaries in /opt/bin first With $PATH escaped so it appends the cibuildwheel PATH and not the one exported from travis-ci.
@@ -67,6 +67,7 @@ jobs: - CIBW_PLATFORM=linux - CIBW_SKIP="cp26-* cp33-*" - CIBW_BEFORE_BUILD="rpm -ivh /host/$HOME/downloads/$(uname -m)/*.rpm && {pip} install -r requirements/wheel.txt -r requirements/test.txt" + - CIBW_ENVIRONMENT="PATH=/opt/bin:\$PATH" - CIBW_TEST_COMMAND="py.test --pyargs pkgcore" before_script: - git clone https://github.com/pkgcore/pkgdist.git
Transfers: fix log message join() doesn't automatically call str() on elements, so an exception is raised
@@ -280,7 +280,7 @@ class DirectTransferDefinition: self.legacy_def = legacy_definition or {} def __str__(self): - return 'transfer {} from {} to {}'.format(self.rws, ' and '.join(self.sources), self.dst.rse) + return 'transfer {} from {} to {}'.format(self.rws, ' and '.join([str(s) for s in self.sources]), self.dst.rse) @property def src(self):
[core/config] Add stub for autohide Add stub for autohide so that my status bar doesn't terminate with an error (easier testing)
@@ -18,6 +18,8 @@ class Config(util.store.Store): parser.add_argument('-t', '--theme', default='default', help=THEME_HELP) parser.add_argument('-i', '--iconset', default='auto', help='Specify the name of an iconset to use (overrides theme default)') + parser.add_argument("-a", "--autohide", nargs="+", default=[], + help="Specify a list of modules to hide when not in warning/error state") self._args = parser.parse_args(args) parameters = [ item for sub in self._args.parameters for item in sub ]
Fixing seed in string_game unit test Summary: This test is flaky on jenkins
import json import logging +import random import unittest from typing import List +import numpy as np import torch from ml.rl.test.base.horizon_test_base import HorizonTestBase from ml.rl.test.gym.world_model.state_embed_gym import ( @@ -23,6 +25,9 @@ DQN_STRING_GAME_JSON = "ml/rl/test/configs/discrete_dqn_string_game_v0.json" class TestStateEmbedGym(HorizonTestBase): def setUp(self): logging.getLogger().setLevel(logging.INFO) + torch.manual_seed(0) + np.random.seed(0) + random.seed(0) super().setUp() @staticmethod
Update NOTICE License name change
@@ -9,7 +9,7 @@ Version 2.0, an Apache-compatible license. * For a copy of the Apache License Version 2.0, please see LICENSE as included in this repository's top-level directory. -* For a copy of the Neural Magic Engine License, please see LICENSE-NEURALMAGIC +* For a copy of the Neural Magic DeepSparse Community License, please see LICENSE-NEURALMAGIC as included in the Neural Magic's "deepsparse" repository. * For a copy of all other Apache-compatible licenses and notices,
Update the error message check in a test wrt cloudify-cosmo/cloudify-common#262
@@ -459,7 +459,7 @@ class DeploymentsTestCase(base_test.BaseServerTestCase): def test_input_violates_constraint_data_type(self): self.assertRaisesRegexp( CloudifyClientError, - "Value's length could not be computed. Value type is '.+int.+'\\.", + "Value's length could not be computed. Value type is 'int'\\.", self.put_deployment, blueprint_id='b9703', blueprint_file_name='blueprint_with_inputs_and_constraints.yaml',
Update alienvault_ip_reputation.py fixes
@@ -51,6 +51,8 @@ class AlienVaultIPReputation(Feed): context["country"] = country context["threat"] = category + context['reliability'] = item['number_1'] + context['risk'] = item['number_2'] ip.tag(category) ip.add_context(context)
Readded motion blur preprocessor from SceneView. Now that RenderController supports motion blur, we actually need this again.
@@ -1621,11 +1621,23 @@ SceneView::SceneView( const std::string &name ) preprocessor->addChild( m_drawingMode->preprocessor() ); m_drawingMode->preprocessor()->inPlug()->setInput( m_shadingMode->preprocessor()->outPlug() ); + + // remove motion blur, because the opengl renderer doesn't support it. + + StandardOptionsPtr standardOptions = new StandardOptions( "disableBlur" ); + standardOptions->optionsPlug()->getChild<NameValuePlug>( "transformBlur" )->enabledPlug()->setValue( true ); + standardOptions->optionsPlug()->getChild<NameValuePlug>( "transformBlur" )->valuePlug<BoolPlug>()->setValue( false ); + standardOptions->optionsPlug()->getChild<NameValuePlug>( "deformationBlur" )->enabledPlug()->setValue( true ); + standardOptions->optionsPlug()->getChild<NameValuePlug>( "deformationBlur" )->valuePlug<BoolPlug>()->setValue( false ); + + preprocessor->addChild( standardOptions ); + standardOptions->inPlug()->setInput( m_drawingMode->preprocessor()->outPlug() ); + // make the output for the preprocessor ScenePlugPtr preprocessorOutput = new ScenePlug( "out", Plug::Out ); preprocessor->addChild( preprocessorOutput ); - preprocessorOutput->setInput( m_drawingMode->preprocessor()->outPlug() ); + preprocessorOutput->setInput( standardOptions->outPlug() ); setPreprocessor( preprocessor );
Add instructions on silencing warnings from checks Fixes
@@ -64,6 +64,34 @@ Axes uses checks to verify your Django settings configuration for security and f Many people have different configurations for their development and production environments, and running the application with misconfigured settings can prevent security features from working. + +Disabling Axes system checks +---------------------------- + +If you are implementing custom authentication, request middleware, or signal handlers +the Axes checks system might false positives in the Django checks framework. + +You can silence the unnecessary warnings by using the following Django settings:: + + SILENCED_SYSTEM_CHECKS = ['axes.W003'] + + +Axes has the following warnings codes built in: + +- ``axes.W001`` for invalid ``CACHES`` configuration. +- ``axes.W002`` for invalid ``MIDDLEWARE`` configuration. +- ``axes.W003`` for invalid ``AUTHENTICATION_BACKENDS`` configuration. + + +.. note: +Only disable the Axes system checks and warnings if you know what you are doing. +The default checks are implemented to verify and improve your project's security +and should only produce necessary warnings due to misconfigured settings. + + +Disabling Axes components in tests +---------------------------------- + If you get errors when running tests or other configurations, try setting the ``AXES_ENABLED`` flag to ``False`` in your project or test settings configuration file::
Update mkvtomp4.py ignore truehd skip is there's no other audio tracks
@@ -407,7 +407,7 @@ class MkvtoMp4: self.log.info("Audio detected for stream #%s: %s [%s]." % (a.index, a.codec, a.metadata['language'])) - if self.output_extension in valid_tagging_extensions and a.codec.lower() == 'truehd' and self.ignore_truehd: # Need to skip it early so that it flags the next track as default. + if self.output_extension in valid_tagging_extensions and a.codec.lower() == 'truehd' and self.ignore_truehd and len(info.audio) > 1: # Need to skip it early so that it flags the next track as default. self.log.info("MP4 containers do not support truehd audio, and converting it is inconsistent due to video/audio sync issues. Skipping stream %s as typically the 2nd audio track is the AC3 core of the truehd stream." % a.index) continue
Don't log about no notification on GET requests The pecan notifier hook was logging on every GET request that it wasn't doing anything, which led to excessive logging. This just eliminates it. TrivialFix
@@ -66,7 +66,6 @@ class NotifierHook(hooks.PecanHook): return action = pecan_constants.ACTION_MAP.get(state.request.method) if not action or action not in ('create', 'update', 'delete'): - LOG.debug("No notification will be sent for action: %s", action) return if utils.is_member_action(utils.get_controller(state)): return
fw/version: Development version bump Bump dev version to synchronise interface for SSHConnection with devlib.
@@ -21,9 +21,9 @@ from subprocess import Popen, PIPE VersionTuple = namedtuple('Version', ['major', 'minor', 'revision', 'dev']) -version = VersionTuple(3, 2, 1, 'dev2') +version = VersionTuple(3, 2, 1, 'dev3') -required_devlib_version = VersionTuple(1, 2, 1, 'dev2') +required_devlib_version = VersionTuple(1, 2, 1, 'dev3') def format_version(v):
Adalog: fix testsuite material support after the Dec_Ref recent fix TN:
@@ -5,18 +5,18 @@ package body Langkit_Support.Adalog.Main_Support is ------------------------ procedure Free_Relation_Tree (R : in out Relation) is + Var : Relation; begin for C of R.Children loop - declare - C_Var : Relation := C; - begin - Free_Relation_Tree (C_Var); - while C_Var.Ref_Count > 1 loop - Dec_Ref (C_Var); + Var := C; + Free_Relation_Tree (Var); + while Var /= null and then Var.Ref_Count > 1 loop + Dec_Ref (Var); end loop; - end; end loop; - Dec_Ref (R); + Var := R; + Dec_Ref (Var); + R := null; end Free_Relation_Tree; end Langkit_Support.Adalog.Main_Support;
Generic API: minor comment fixes TN:
@@ -1498,7 +1498,7 @@ package body Langkit_Support.Generic_API.Introspection is return Result : Struct_Member_Ref_Array (1 .. Id.Struct_Types.all (Struct.Index).Inherited_Members) do - -- Go through the derivation chain and collect field in ``Result``. + -- Go through the derivation chain and collect members in ``Result``. -- Add them in reverse order so that in the end, inherited members -- are first, and are in declaration order. @@ -1712,7 +1712,7 @@ package body Langkit_Support.Generic_API.Introspection is end; end loop; - -- Finally evaluate the membe + -- Finally evaluate the member if Value.Value.all in Base_Internal_Struct_Value'Class then pragma Assert (Arguments'Length = 0);
Catching errors while reading ROM table. An error message is printed but initialization is allowed to continue.
@@ -163,9 +163,12 @@ class AccessPort(object): self.rom_addr &= 0xfffffffc # clear format and present bits def init_rom_table(self): + try: if self.has_rom_table: self.rom_table = ROMTable(self) self.rom_table.init() + except exceptions.TransferError as error: + logging.error("Transfer error while reading AP#%d ROM table: %s", self.ap_num, error) def read_reg(self, addr, now=True): return self.dp.read_ap((self.ap_num << APSEL_SHIFT) | addr, now)
Add better assertion error; for some reason this test is failing on CI but isn't failing for me locally.
@@ -113,7 +113,8 @@ def test_playwright_dry_run(sarge): "would run /Users/boakley/.venv/cci/bin/python -m Browser.entry init\n", ] ) - assert result.output == expected_output + msg = f"\n-- expected --\n{expected_output}\n\n-- actual --\n{result.output}" + assert result.output == expected_output, msg @mock.patch("cumulusci.cli.robot.sarge")
fix sample code error of fluid.dygraph.NaturalExpDecay fix sample code error of fluid.dygraph.NaturalExpDecay
@@ -53,12 +53,14 @@ NaturalExpDecay import paddle.fluid as fluid base_lr = 0.1 with fluid.dygraph.guard(): + emb = fluid.dygraph.Embedding([10, 10]) sgd_optimizer = fluid.optimizer.SGD( learning_rate=fluid.dygraph.NaturalExpDecay( learning_rate=base_lr, decay_steps=10000, decay_rate=0.5, - staircase=True)) + staircase=True), + parameter_list=emb.parameters())
Updated check funciton to be optional This is clearer than having a 'checker' that just returns all builds
@@ -44,10 +44,6 @@ def form_filter_error(build): prev_form_filter = form_filter -def all_builds(build): - return "error" - - def broken_suite_files(build): db = Application.get_db() code = Application._blobdb_type_code @@ -75,7 +71,6 @@ CHECK_FUNCTIONS = { 'broken_suite_files': broken_suite_files, 'form_filter_error': form_filter_error, 'premature_auto_gps': premature_auto_gps, - 'all': all_builds, } @@ -84,7 +79,7 @@ class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( - 'check_function', + '--check_function', choices=list(CHECK_FUNCTIONS), ) parser.add_argument( @@ -109,9 +104,8 @@ class Command(BaseCommand): help='End date', ) - def handle(self, check_function, **options): - check_fn = CHECK_FUNCTIONS[check_function] - + def handle(self, **options): + check_fn = CHECK_FUNCTIONS.get(options['check_function']) start = options['startdate'] end = options['enddate'] ids = options['build_ids'] @@ -125,7 +119,7 @@ class Command(BaseCommand): print('Checking {} builds\n'.format(len(ids))) reason = input("Reason to use as build_broken_reason (leave blank to skip flagging)? ") - for message in find_broken_builds(check_fn, ids, reason): + for message in find_broken_builds(ids, check_fn, reason): self.stderr.write(message) @@ -140,9 +134,9 @@ def get_build_ids(start, end): return builds_ids -def find_broken_builds(checker, builds_ids, reason=None): +def find_broken_builds(builds_ids, checker=None, reason=None): for build in iter_docs(Application.get_db(), builds_ids): - error = checker(build) + error = checker(build) if checker else "error" if error: yield '%s\t%s\t%s\t%s\t%s\n' % ( build.get('built_on'),
Update cmas-data-warehouse.yaml Minor changes to tags - added aws-pds and removed emission
@@ -5,9 +5,9 @@ Contact: [email protected] ManagedBy: "[CMAS CENTER](https://cmascenter.org/)" UpdateFrequency: New data is added as soon as it is available. Tags: + - aws-pds - air quality - meteorological - - emission - geospatial - environmental - sustainability
integrals: relax assumptions on Dummy's Limits may be extended_real's, but the variable could be considered finite. See also mrv_leadterm().
@@ -483,7 +483,7 @@ def try_meijerg(function, xab): if len(xab) >= 2: if (any(b.is_extended_real for b in xab[1:]) and not any(b.is_extended_real is False for b in xab[1:])): - r = Dummy('r', extended_real=True) + r = Dummy('r', real=True) function = function.subs({xab[0]: r}) function = function.rewrite(Piecewise) function = function.subs({r: xab[0]})
Refactor: remove _items() in nova/api/openstack/compute/attach_interfaces.py removing _items() as this method is called only once and also to ensure policy checks happen in the API methods. blueprint policy-docs
@@ -55,8 +55,24 @@ class InterfaceAttachmentController(wsgi.Controller): @extensions.expected_errors((404, 501)) def index(self, req, server_id): """Returns the list of interface attachments for a given instance.""" - return self._items(req, server_id, - entity_maker=_translate_interface_attachment_view) + context = req.environ['nova.context'] + context.can(ai_policies.BASE_POLICY_NAME) + + instance = common.get_instance(self.compute_api, context, server_id) + search_opts = {'device_id': instance.uuid} + + try: + data = self.network_api.list_ports(context, **search_opts) + except exception.NotFound as e: + raise exc.HTTPNotFound(explanation=e.format_message()) + except NotImplementedError: + common.raise_feature_not_supported() + + ports = data.get('ports', []) + entity_maker = _translate_interface_attachment_view + results = [entity_maker(port) for port in ports] + + return {'interfaceAttachments': results} @extensions.expected_errors((403, 404)) def show(self, req, server_id, id): @@ -162,26 +178,6 @@ class InterfaceAttachmentController(wsgi.Controller): common.raise_http_conflict_for_instance_invalid_state(state_error, 'detach_interface', server_id) - def _items(self, req, server_id, entity_maker): - """Returns a list of attachments, transformed through entity_maker.""" - context = req.environ['nova.context'] - context.can(ai_policies.BASE_POLICY_NAME) - - instance = common.get_instance(self.compute_api, context, server_id) - search_opts = {'device_id': instance.uuid} - - try: - data = self.network_api.list_ports(context, **search_opts) - except exception.NotFound as e: - raise exc.HTTPNotFound(explanation=e.format_message()) - except NotImplementedError: - common.raise_feature_not_supported() - - ports = data.get('ports', []) - results = [entity_maker(port) for port in ports] - - return {'interfaceAttachments': results} - class AttachInterfaces(extensions.V21APIExtensionBase): """Attach interface support."""
Update how array data is printed Modify the display of array data to consider the size. Previously, anything other than an integer (4 bytes) would error out.
@@ -601,12 +601,20 @@ class Writer(object): data = value.get_data() tab = [] elem_size = value.element_width + elem_id = None if elem_size == 4: - for i in range(0, value.size * 4, 4): - tab.append('%s' % unpack('i', data[i:i + 4])[0]) - else: # FIXME: other cases - for i in range(value.size): - tab.append('%s' % data[i]) + elem_id = 'i' +# for i in range(0, value.size * 4, 4): +# tab.append('%s' % unpack('i', data[i:i + 4])[0]) + elif elem_size == 2: + elem_id = 'h' + else: + elem_id = 'b' + for i in range(0, value.size*elem_size, elem_size): + tab.append('%s' % unpack(elem_id, data[i:i+elem_size])[0]) + # else: # FIXME: other cases + # for i in range(value.size): + # tab.append('%s' % unpack('b', data[i])[0]) self.write(', '.join(tab), data="COMMA") self.write('}', data="ARRAY_FILLED_END") self.end_ins()
Add description to policies in console_output.py blueprint: policy-docs
@@ -26,9 +26,16 @@ console_output_policies = [ policy.RuleDefault( name=POLICY_ROOT % 'discoverable', check_str=base.RULE_ANY), - policy.RuleDefault( - name=BASE_POLICY_NAME, - check_str=base.RULE_ADMIN_OR_OWNER), + base.create_rule_default( + BASE_POLICY_NAME, + base.RULE_ADMIN_OR_OWNER, + 'Show console output for a server', + [ + { + 'method': 'POST', + 'path': '/servers/{server_id}/action (os-getConsoleOutput)' + } + ]) ]
Fix spelling error immediatly -> immediately
{% block content %} <div class='alert alert-block alert-danger'> - {% trans "Changing the settings below require you to immediatly restart the server. Do not change this while under active usage." %} + {% trans "Changing the settings below require you to immediately restart the server. Do not change this while under active usage." %} </div> <div class='table-responsive'>
Added a validatetag invoke task to check to make sure a Git tag exists for the current HEAD Also: The pypi and pypi-test invoke tasks now have the validatetag task as their first prerequisite So attempting to publish to pypi without a Git tag will fail
@@ -182,6 +182,13 @@ def tag(context, name='', message=''): context.run('git push origin {}'.format(name)) namespace.add_task(tag) [email protected]() +def validatetag(context): + "Check to make sure that a tag exists for the current HEAD" + # Validate that a Git tag exists for the current commit HEAD + context.run("git describe --exact-match --tags $(git log -n1 --pretty='%h')") +namespace.add_task(validatetag) + @invoke.task(pre=[clean_all]) def sdist(context): "Create a source distribution" @@ -194,13 +201,13 @@ def wheel(context): context.run('python setup.py bdist_wheel') namespace.add_task(wheel) [email protected](pre=[sdist, wheel]) [email protected](pre=[validatetag, sdist, wheel]) def pypi(context): "Build and upload a distribution to pypi" context.run('twine upload dist/*') namespace.add_task(pypi) [email protected](pre=[sdist, wheel]) [email protected](pre=[validatetag, sdist, wheel]) def pypi_test(context): "Build and upload a distribution to https://test.pypi.org" context.run('twine upload --repository-url https://test.pypi.org/legacy/ dist/*')
Update README.rst Type in sentence
@@ -49,7 +49,7 @@ by cloning this repository and doing a manual installation. python setup.py install Make sure that your default Python version is currently supported, or change the python and pip -commands specifying a version, such as ``python3.6``: +commands by specifying a version, such as ``python3.6``: .. code::
Replace some hardcoded values with named variables to make decoder_test more readable and easier to comprehend.
@@ -366,6 +366,8 @@ class TransformerDecoderTest(tf.test.TestCase): src_paddings = tf.zeros([src_time, src_batch], dtype=dtype) tgt_time = 5 tgt_batch = 8 + self.tgt_batch = tgt_batch + tgt_ids = tf.constant( np.random.randint(20, size=[tgt_batch, tgt_time]), dtype=tf.int32) tgt_labels = tf.constant( @@ -381,13 +383,14 @@ class TransformerDecoderTest(tf.test.TestCase): return (src_enc, src_paddings, tgts) def _testPackedInputs(self, dtype=tf.float32): + p = self._DecoderParams() np.random.seed(_NUMPY_RANDOM_SEED) src_time = 5 batch = 2 emb_dims = 4 tgt_time = 5 src_enc = tf.constant( - np.random.normal(size=[src_time, batch, emb_dims]), dtype=dtype) + np.random.normal(size=[src_time, batch, p.source_dim]), dtype=dtype) paddings = tf.zeros([src_time, batch], dtype=dtype) tgt_ids = tf.constant( np.random.randint(20, size=[batch, tgt_time]), dtype=tf.int32) @@ -476,13 +479,15 @@ class TransformerDecoderTest(tf.test.TestCase): self.assertAlmostEqual(15.864315, actual_loss, delta=0.0001) def _testExtendStep(self, sess, dec, src_enc, src_padding, tgts): + p = self._DecoderParams() l_out1 = dec._FProp(dec.theta, src_enc, src_padding, tgts, None) prefix_states = py_utils.NestedMap() for i in range(6): layer_i_states = py_utils.NestedMap() - layer_i_states.key = tf.zeros([8, 0, 4]) - layer_i_states.value = tf.zeros([8, 0, 4]) + # the middle dim is for num of transformer layers. Here's 0 as placeholder + layer_i_states.key = tf.zeros([self.tgt_batch, 0, p.model_dim]) + layer_i_states.value = tf.zeros([self.tgt_batch, 0, p.model_dim]) prefix_states['layer_%i' % i] = layer_i_states l_out2 = []
Catch KeyError in consensus proxy None will be returned if the key can't be found.
@@ -133,22 +133,15 @@ class ConsensusProxy: self._get_blocks([block_id])[0].get_settings_view( self._settings_view_factory) - return [ - (setting, settings_view.get_setting(setting)) - for setting in settings - ] + return _map_with_none(settings_view.get_setting, settings) def state_get(self, block_id, addresses): '''Returns a list of address/data pairs (str, bytes)''' - state_view = \ self._get_blocks([block_id])[0].get_state_view( self._state_view_factory) - return [ - (address, state_view.get(address)) - for address in addresses - ] + return _map_with_none(state_view.get, addresses) def _get_blocks(self, block_ids): try: @@ -158,3 +151,17 @@ class ConsensusProxy: ] except KeyError: raise UnknownBlock() + + +def _map_with_none(function, keys): + result = [] + + for key in keys: + try: + value = function(key) + except KeyError: + value = None + + result.append((key, value)) + + return result
CI: Use informational mode for codecov [skip CI]
@@ -6,8 +6,9 @@ coverage: status: project: default: - # Require 1% coverage, i.e., always succeed - target: 1 - patch: true + informational: true + patch: + default: + informational: true changes: false comment: off
client: do not use isolated in win7 64bit bots have the same issue.
@@ -1339,9 +1339,8 @@ def main(args): # TODO(crbug.com/932396): Remove this. use_go_isolated = ( options.cipd_enabled and - # TODO(crbug.com/1045281): 32bit win7 has flaky connection issue. - not (sys.platform == 'win32' and platform.release() == '7' and - platform.architecture()[0] == '32bit')) + # TODO(crbug.com/1045281): win7 has flaky connection issue. + not (sys.platform == 'win32' and platform.release() == '7')) # TODO(maruel): CIPD caches should be defined at an higher level here too, so # they can be cleaned the same way.
Update quickstart.rst for Windows 10 Added note to check if Windows ADB and Buildozer installed ADB are the same version.
@@ -66,13 +66,12 @@ To see your running application's print() messages and python's error messages, buildozer -v android deploy run logcat | grep python +Run my application from Windows 10 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Run my application on Windows 10 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +- Plug your Android device on a USB port. -- Plug your Android device on a USB port - -- Open Windows PowerShell, go into the folder where you installed the Windows version of ADB, and activate the adb daemon: +- Open Windows PowerShell, go into the folder where you installed the Windows version of ADB, and activate the ADB daemon. When the daemon is started you must see a number besides the word "device" meaning your device was correctly detected. In case of trouble, try another USB port or USB cable. :: @@ -85,6 +84,15 @@ Run my application on Windows 10 buildozer -v android deploy run +It is important to notice that Windows ADB and Buildozer installed ADB must be the same version. To check the versions, open PowerShell and type: + +:: + + cd C:\platform-tools\ + .\adb.exe version + wsl + cd ~/.buildozer/android/platform/android-sdk/platform-tools/ + ./adb version Install on non-connected devices --------------------------------
Use logger.error in loop exception handler Using logger.exception results in `NoneType: None` messages being written to the log. This is because the method is not called from an exception handler. Using logger.error removes this issue.
@@ -60,7 +60,7 @@ class LoopContext(contextlib.AbstractAsyncContextManager): # CancelledErrors happen when we simply cancel the main task during # a normal restart procedure if not isinstance(exc, asyncio.CancelledError): - logger.exception(exc) + logger.error(exc) else: logger.error("unhandled error in event loop: %s", context["msg"])
Properly handle flush error in cache stop In case of initial flush error stop is aborted. In case of failure during the second flush, appropriate error message is presetned to the user.
@@ -466,6 +466,7 @@ static int _cache_mngt_core_flush_uninterruptible(ocf_core_t core) struct _cache_mngt_stop_context { struct _cache_mngt_async_context async; int error; + int flush_status; ocf_cache_t cache; struct task_struct *finish_thread; }; @@ -484,21 +485,21 @@ static int exit_instance_finish(void *data) struct cache_priv *cache_priv; struct _cache_mngt_stop_context *ctx = data; ocf_queue_t mngt_queue; - bool flush_status; int result = 0; if (kthread_should_stop()) return 0; - flush_status = ocf_mngt_cache_is_dirty(ctx->cache); cache_priv = ocf_cache_get_priv(ctx->cache); mngt_queue = cache_priv->mngt_queue; if (ctx->error && ctx->error != -OCF_ERR_WRITE_CACHE) BUG_ON(ctx->error); - if (!ctx->error && flush_status) + if (!ctx->error && ctx->flush_status) result = -KCAS_ERR_STOPPED_DIRTY; + else + result = ctx->error; cas_cls_deinit(ctx->cache); @@ -2411,15 +2412,8 @@ int cache_mngt_exit_instance(const char *cache_name, size_t name_len, int flush) */ if (flush) status = _cache_flush_with_lock(cache); - switch (status) { - case -OCF_ERR_CACHE_IN_INCOMPLETE_STATE: - case -OCF_ERR_FLUSHING_INTERRUPTED: - case -KCAS_ERR_WAITING_INTERRUPTED: + if (status) goto put; - default: - flush_status = status; - break; - } status = _cache_mngt_lock_sync(cache); if (status) @@ -2441,10 +2435,6 @@ int cache_mngt_exit_instance(const char *cache_name, size_t name_len, int flush) goto stop_thread; } } else { - if (flush_status) { - status = flush_status; - goto stop_thread; - } /* * We are being switched to upgrade in flight mode - * wait for finishing pending core requests @@ -2455,6 +2445,8 @@ int cache_mngt_exit_instance(const char *cache_name, size_t name_len, int flush) /* Flush cache again. This time we don't allow interruption. */ if (flush) flush_status = _cache_mngt_cache_flush_uninterruptible(cache); + context->flush_status = flush_status; + if (flush && !flush_status) BUG_ON(ocf_mngt_cache_is_dirty(cache)); @@ -2466,6 +2458,12 @@ int cache_mngt_exit_instance(const char *cache_name, size_t name_len, int flush) "Waiting for cache stop interrupted. " "Stop will finish asynchronously.\n"); + if ((status == 0 || status == -KCAS_ERR_WAITING_INTERRUPTED) && + flush_status) { + /* "removed dirty" error has a precedence over "interrupted" */ + return KCAS_ERR_STOPPED_DIRTY; + } + return status; stop_thread:
Update tutorial.md Adding SRE Recipes to tutorial
@@ -344,6 +344,45 @@ Click **View Logs** for one of the samples to see the log messages that match th You can expand any of the messages that matches the filter to see the full stack trace. +## SRE Recipes + +SRE Recipes is our [Chaos Engineering](https://en.wikipedia.org/wiki/Chaos_engineering) tool to test your sandbox environment. It helps users to familiarize with finding the root cause of a breakage using Cloud Ops monitoring. +Each 'recipe' simulate a different scenario, there are several recipes that you can run and you can also [contribute your own.](https://github.com/GoogleCloudPlatform/cloud-ops-sandbox/tree/master/sre-recipes#contributing) + +``` +$ sandboxctl sre-recipes +``` + +### Running an example SRE Recipe + +> **Note:** Recipe's names are not explicit by design as we don't want allude to the problem. + +1. Run the recipe to manufacture errors in the demo cluster + +``` +$ sandboxctl sre-recipes break recipe0 +``` + +2. Use Cloud Operations suite to diagnose the problem. + +> **Note:** If you are stuck, you can use a hint to direct you to the right direction. +``` +$ sandboxctl sre-recipes hint recipe0 +``` + +3. Verify you hypothesis using command line tool + +``` +$ sandboxctl sre-recipes verify recipe0 +``` + +4. After you discovered the problem, you can restore the cluster to it's original state. + +``` +$ sandboxctl sre-recipes restore recipe0 +``` + + ## Destroying your cluster Once you have finished exploring the Sandbox project, don't forget to destroy it to avoid incurring additional billing.
Special case where anonymous user visits /user/ page isAuthenticated wrapper only works if LOGIN_REQUIRED is set to true, which means when it's false and an anonymous_user visits /user/ they would be presented with a user profile page for an anonymous user, but flask would 500 when the form is submitted.
-from flask import render_template, redirect, url_for, current_app, flash, Response, abort +from flask import render_template, redirect, url_for, current_app, flash, Response, abort, request from flask_login import current_user from app import db from app.user import bp @@ -10,6 +10,11 @@ import ipaddress @bp.route('/', methods=['GET', 'POST']) @isAuthenticated def profile(): + # Handle this case because isAuthenticated only applies when LOGIN_REQUIRED is true + if current_user.is_anonymous: + flash("You must be a user to access %s" % request.path, "warning") + return redirect(url_for('main.index')) + changePasswordForm = ChangePasswordForm(prefix="change-password") displaySettingsForm = DisplaySettingsForm(prefix="display-settings", results_per_page=current_user.results_per_page, \ preview_length=current_user.preview_length)
CI: rename UsePythonVersion task "python" is a shorter and clearer name.
@@ -26,7 +26,7 @@ jobs: steps: - task: UsePythonVersion@0 displayName: 'Set Python version' - name: PythonVersion + name: python inputs: versionSpec: '3.8.x' addToPath: true @@ -34,11 +34,11 @@ jobs: - task: Cache@2 displayName: 'Restore Python environment' inputs: - key: python | $(Agent.OS) | "$(PythonVersion.pythonLocation)" | 0 | ./Pipfile | ./Pipfile.lock + key: python | $(Agent.OS) | "$(python.pythonLocation)" | 0 | ./Pipfile | ./Pipfile.lock restoreKeys: | - python | "$(PythonVersion.pythonLocation)" | 0 | ./Pipfile.lock - python | "$(PythonVersion.pythonLocation)" | 0 | ./Pipfile - python | "$(PythonVersion.pythonLocation)" | 0 + python | "$(python.pythonLocation)" | 0 | ./Pipfile.lock + python | "$(python.pythonLocation)" | 0 | ./Pipfile + python | "$(python.pythonLocation)" | 0 cacheHitVar: PY_ENV_RESTORED path: $(PYTHONUSERBASE) @@ -59,16 +59,16 @@ jobs: # pipenv entirely, which is too dumb to know it should use the system interpreter rather than # creating a new venv. - script: | - printf '%s\n%s' '#!/bin/bash' '"${@:2}"' > $(PythonVersion.pythonLocation)/bin/pipenv \ - && chmod +x $(PythonVersion.pythonLocation)/bin/pipenv + printf '%s\n%s' '#!/bin/bash' '"${@:2}"' > $(python.pythonLocation)/bin/pipenv \ + && chmod +x $(python.pythonLocation)/bin/pipenv displayName: 'Mock pipenv binary' - task: Cache@2 displayName: 'Restore pre-commit environment' inputs: - key: pre-commit | "$(PythonVersion.pythonLocation)" | 0 | .pre-commit-config.yaml + key: pre-commit | "$(python.pythonLocation)" | 0 | .pre-commit-config.yaml restoreKeys: | - pre-commit | "$(PythonVersion.pythonLocation)" | 0 + pre-commit | "$(python.pythonLocation)" | 0 path: $(PRE_COMMIT_HOME) # pre-commit's venv doesn't allow user installs - not that they're really needed anyway.
core: cache stream reference in DelimitedProtocol Stream.set_protocol() was updated to break the reference on the previous protocol, to encourage a crash should an old protocol continue operating after it's not supposed to be active any more. That broke DelimitedProtocol's protocol switching functionality.
@@ -1696,6 +1696,7 @@ class DelimitedProtocol(Protocol): def on_receive(self, broker, buf): _vv and IOLOG.debug('%r.on_receive()', self) + stream = self.stream self._trailer, cont = mitogen.core.iter_split( buf=self._trailer + buf, delim=self.delimiter, @@ -1706,8 +1707,8 @@ class DelimitedProtocol(Protocol): if cont: self.on_partial_line_received(self._trailer) else: - assert self.stream.protocol is not self - self.stream.protocol.on_receive(broker, self._trailer) + assert stream.protocol is not self + stream.protocol.on_receive(broker, self._trailer) def on_line_received(self, line): """
Restored additional validation code This validation is now available to both comboboxes and dropdowns.
@@ -545,6 +545,13 @@ hqDefine("cloudcare/js/form_entry/entrycontrols_full", function () { })); }); + self.isValid = function (value) { + if (!value) { + return true; + } + return _.contains(_.pluck(self.options(), 'text'), value); + }; + self.options.subscribe(function () { self.renderSelect2(); if (!self.isValid(self.rawAnswer())) { @@ -552,6 +559,14 @@ hqDefine("cloudcare/js/form_entry/entrycontrols_full", function () { } }); + // If there is a prexisting answer, set the rawAnswer to the corresponding text. + if (question.answer()) { + var initialOption = _.findWhere(self.options(), {id: self.answer()}); + self.rawAnswer( + initialOption ? initialOption.text : Const.NO_ANSWER + ); + } + self.additionalSelect2Options = function () { return {}; }; @@ -589,8 +604,7 @@ hqDefine("cloudcare/js/form_entry/entrycontrols_full", function () { * Docs: https://confluence.dimagi.com/display/commcarepublic/Advanced+CommCare+Android+Formatting#AdvancedCommCareAndroidFormatting-SingleSelect"ComboBox" */ function ComboboxEntry(question, options) { - var self = this, - initialOption; + var self = this; DropdownEntry.call(this, question, options); // Specifies the type of matching we will do when a user types a query @@ -613,13 +627,6 @@ hqDefine("cloudcare/js/form_entry/entrycontrols_full", function () { }; }; - self.isValid = function (value) { - if (!value) { - return true; - } - return _.contains(_.pluck(self.options(), 'text'), value); - }; - self.enableReceiver(question, options); }
Update README.md Section 1 arrangement
@@ -14,17 +14,15 @@ machines in restaurants, bars, arcades, and elsewhere. Technology and Compatibility ---------------------------- -MPF is written in Python 3. It is compatible with Windows, Mac, and Linux using the same code and configurations. - -MPF interfaces with real, physical pinball machines via modern pinball controller hardware, including: +You can use MPF to power your own custom-built machine, or to update software in existing Williams, Bally, +Stern, or Data East pinball machines. MPF interfaces with machines via modern pinball controller hardware, including (but not limited to): * Multimorphic P-ROC or P3-ROC systems * FAST Pinball controllers * Open Pinball Project (OPP) open source hardware * Stern SPIKE pinball systems -You can use MPF to power your own custom-built machine, or to update software in existing Williams, Bally, -Stern, or Data East machines. +MPF is written in Python 3. It is compatible with Windows, Mac, and Linux using the same code and configurations. There's also an [MPF Media Controller](https://github.com/missionpinball/mpf-mc/) (based on [Kivy](http://kivy.org)) which is used to control graphics and sounds, including high-res LCD displays, classic DMDs, and modern RGB LED DMDs.
Update qBittorrentPostProcess.py include content_path debug print
@@ -37,6 +37,7 @@ try: path_mapping = settings.qBittorrent['path-mapping'] log.debug("Root Path: %s." % root_path) + log.debug("Content Path: %s." % content_path) log.debug("Label: %s." % label) log.debug("Categories: %s." % categories) log.debug("Torrent hash: %s." % torrent_hash)
Add support for p100 in transformer kernels add compute cap of 6.0, support p100
@@ -217,6 +217,8 @@ if BUILD_MASK & DS_BUILD_TRANSFORMER: '-gencode', 'arch=compute_61,code=compute_61', '-gencode', + 'arch=compute_60,code=compute_60', + '-gencode', 'arch=compute_70,code=compute_70', '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__', @@ -248,6 +250,8 @@ if BUILD_MASK & DS_BUILD_TRANSFORMER: '-gencode', 'arch=compute_61,code=compute_61', '-gencode', + 'arch=compute_60,code=compute_60', + '-gencode', 'arch=compute_70,code=compute_70', '-std=c++14', '-U__CUDA_NO_HALF_OPERATORS__',
Better implementation of testH5pyMissing This test was skipped on all CI environments, because h5py is always available. Now it is executed by using a context manager to ensure ImportError is raised on "import h5py", even if it is installed.
@@ -103,10 +103,9 @@ class TestConvertCommand(unittest.TestCase): result = e.args[0] self.assertEqual(result, 0) - @unittest.skipUnless(h5py is None, - "h5py is installed, this test is specific to h5py missing") @utils.test_logging(convert._logger.name, error=1) def testH5pyNotInstalled(self): + with utils.EnsureImportError("h5py"): result = convert.main(["convert", "foo.spec", "bar.edf"]) # we explicitly return -1 if h5py is not imported self.assertNotEqual(result, 0)
doc/build_instrument_method_map: update table header Update the decorator stable header to state "decorator" rather than "prefix" (this was left over from WA2).
@@ -37,7 +37,7 @@ def generate_instrument_method_map(outfile): signal_table = format_simple_table([(k, v) for k, v in SIGNAL_MAP.iteritems()], headers=['method name', 'signal'], align='<<') priority_table = format_simple_table(zip(Priority.names, Priority.values), - headers=['prefix', 'priority'], align='<>') + headers=['decorator', 'priority'], align='<>') with open(OUTPUT_TEMPLATE_FILE) as fh: template = string.Template(fh.read()) with open(outfile, 'w') as wfh:
Fix session cookie Fix session cookie. Error with sessionid in Linux
@@ -102,7 +102,7 @@ def parse_session_cookie(cookie_to_cook): session_value = False tokens = cookie_to_cook.split(";") for tok in tokens: - if 'session' in tok: + if 'session=' in tok: print("found session id: " + tok) session_value = int(tok.replace('session=', '')) return session_value
Sync tests: test sync roles command sync() should be called on the RoleSyncer.
@@ -285,3 +285,12 @@ class SyncCogListenerTests(SyncCogTestCase): self.assertEqual(updated_information[api_field], api_value) else: self.cog.patch_user.assert_not_called() + + +class SyncCogCommandTests(SyncCogTestCase): + def test_sync_roles_command(self): + """sync() should be called on the RoleSyncer.""" + ctx = helpers.MockContext() + asyncio.run(self.cog.sync_roles_command.callback(self.cog, ctx)) + + self.cog.role_syncer.sync.assert_called_once_with(ctx.guild, ctx)
Ada API: introduce the concept of boxed types TN:
@@ -813,6 +813,17 @@ class CompiledType(object): def is_array(self): return isinstance(self, ArrayType) + @property + def public_requires_boxing(self): + """ + Whether the public type in the Ada API for this requires some boxing to + be embedded in a record. This is true for all unbounded types (i.e. + arrays). + + :rtype: bool + """ + return False + def new(self, *args, **kwargs): """ Shortcut to the New expression, allowing type.new(..) syntax. @@ -1734,6 +1745,15 @@ class StructType(BaseStructType): """ return not self.is_entity_type or self == T.entity + @property + def contains_boxed_field(self): + """ + Return if at least one field requires boxing in the public API. + + :rtype: bool + """ + return any(f.type.public_requires_boxing for f in self.get_fields()) + class EntityType(StructType): """ @@ -2753,6 +2773,10 @@ class ArrayType(CompiledType): return (not self.element_type.is_struct_type or self.element_type.emit_c_type) + @property + def public_requires_boxing(self): + return True + def create_enum_node_types(cls): """
Don't assume `self` is in `known_workers` in `BaseWorker.serializer` That assumption makes it possible to have a case where `known_workers` is empty, which leads `len(frameworks) == 1` to be `False`, which defaults to the wrong serialization strategy when `self.framework` is `torch`.
@@ -1178,14 +1178,12 @@ class BaseWorker(AbstractWorker, ObjectStorage): 'torch': serialization will only work between workers that support PyTorch (more to come: 'tensorflow', 'numpy', etc) """ - if workers is not None: + if workers is None: + workers = [w for w in self._known_workers.values() if isinstance(w, AbstractWorker)] + if not isinstance(workers, list): workers = [workers] - else: - workers = [w for w in self._known_workers.values() if isinstance(w, AbstractWorker)] - # self is not referenced in self._known_workers when auto_add=False - if self not in workers: workers.append(self) frameworks = set()
[Hexagon] Handle v69 in RPC launcher on simulator There are a few tweaks that are needed related to directory structure in the SDK.
@@ -463,7 +463,10 @@ std::string SimulatorRPCChannel::Cpu_::str() const { SimulatorRPCChannel::SDKInfo_::SDKInfo_(const std::string& sdk_root, const std::string& cpu) : root(sdk_root) { - qurt_root = root + "/rtos/qurt/compute" + cpu; + // For v69 chips, still look for v68 in the directory names. + std::string check_cpu = cpu == "v69" ? "v68" : cpu; + + qurt_root = root + "/rtos/qurt/compute" + check_cpu; runelf = qurt_root + "/sdksim_bin/runelf.pbn"; // The "run_main_on_hexagon_sim" binary lives in a subdirectory that looks @@ -480,7 +483,7 @@ SimulatorRPCChannel::SDKInfo_::SDKInfo_(const std::string& sdk_root, const std:: std::string name = d->d_name; // Note: The first substr is always safe, and the second only executes // when "name" is at least 13 characters long. - if (name.substr(0, 13) == "hexagon_toolv" && name.substr(name.size() - 3, 3) == cpu) { + if (name.substr(0, 13) == "hexagon_toolv" && name.substr(name.size() - 3, 3) == check_cpu) { dir_names.push_back(name); } }
Fix RCC SWS doc typo RM0008 says this is correct, see "7.3.2 Clock configuration register (RCC_CFGR)".
@@ -36,8 +36,8 @@ RCC: Div512: [15, "SYSCLK divided by 512"] SWS: _read: - HSI: [0, "HSE oscillator used as system clock"] - HSE: [1, "HSI oscillator used as system clock"] + HSI: [0, "HSI oscillator used as system clock"] + HSE: [1, "HSE oscillator used as system clock"] PLL: [2, "PLL used as system clock"] SW: HSI: [0, "HSI selected as system clock"]
Added mention of map file alternative for minion configuration options. Fixes:
@@ -27,10 +27,12 @@ cloud is operating on. Minion Configuration ==================== -The default minion configuration is set up in this file. Minions created by -salt-cloud derive their configuration from this file. Almost all parameters -found in :ref:`Configuring the Salt Minion <configuration-salt-minion>` can -be used here. +The default minion configuration is set up in this file or alternatively used in +any ``.conf`` file listed in the ``/etc/salt/cloud.maps.d/`` +:ref:`map file <salt-cloud-map>` directory. Minions created by salt-cloud +traditionally derive their configuration from this file. Almost all parameters +found in :ref:`Configuring the Salt Minion <configuration-salt-minion>` can be +used here. .. code-block:: yaml
Add closefd keyword argument to Python 2's FileIO.__init__ Fixes
@@ -92,7 +92,7 @@ class _RawIOBase(_IOBase): class FileIO(_RawIOBase, BytesIO): # type: ignore # for __enter__ mode = ... # type: str closefd = ... # type: bool - def __init__(self, file: str, mode: str = ...) -> None: ... + def __init__(self, file: str, mode: str = ..., closefd: bool = ...) -> None: ... def readinto(self, buffer: bytearray)-> int: ... def write(self, pbuf: str) -> int: ...
tests/scheduler/conditions: Drop 'DECISION_TIME' output port It's not used in the test and PsyNeuLink warns about it.
@@ -1592,7 +1592,8 @@ class TestFeedback: time_step_size=1.0), reset_stateful_function_when=pnl.AtTrialStart(), execute_until_finished=False, - output_ports=[pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME], + # Use only the decision variable in this test + output_ports=[pnl.DECISION_VARIABLE], name='DDM') response = pnl.ProcessingMechanism(size=2, name="GATE")
[BUG] Fix `PluginParamsForecaster` docstring and add dict use example This PR fixes the `PluginParamsForecaster` docstring where keys and values where swapped in the `dict` case, and adds a `dict` usage example. The new example requires for the tests to pass.
@@ -31,8 +31,8 @@ class PluginParamsForecaster(_DelegatedForecaster): list of str: parameters in the list are plugged into parameters of the same name only parameters present in both `forecaster` and `param_est` are plugged in str: considered as a one-element list of str with the string as single element - dict: parameters of values are plugged into parameters of respective keys - only keys present in `forecaster` and values in `param_est` are plugged in + dict: parameter with name of key is plugged into parameter with name of value + only keys present in `param_est` and values in `forecaster` are plugged in update_params : bool, optional, default=False whether fitted parameters by param_est_ are to be updated in self.update @@ -60,6 +60,12 @@ class PluginParamsForecaster(_DelegatedForecaster): >>> y_pred = sp_auto.predict() >>> sp_auto.forecaster_.get_params()["sp"] 12 + + using dictionary to plug "foo" parameter into "sp" + >>> from sktime.param_est.fixed import FixedParams + >>> sp_plugin = PluginParamsForecaster( + ... FixedParams({"foo": 12}), NaiveForecaster(), params={"foo": "sp"} + ... ) """ _tags = {
avatar: Fix user avatar delete button bug. Since we migrated to `image_upload_widget.hbs` for upload widget's so we have to access those widget's elements according to the `image_upload_widget.hbs` new CSS class names. We need to access delete button element with `#user-avatar-upload-widget .settings-page-delete-button` not with old CSS id `#user_avatar_delete_button`.
@@ -58,7 +58,7 @@ exports.build_user_avatar_widget = function (upload_function) { channel.del({ url: '/json/users/me/avatar', success: function () { - $("#user_avatar_delete_button").hide(); + $("#user-avatar-upload-widget .settings-page-delete-button").hide(); $("#user-avatar-source").show(); // Need to clear input because of a small edge case // where you try to upload the same image you just deleted.
chore(python): use 'setup.py' to detect repo root Closes
@@ -201,6 +201,11 @@ def _get_repo_root(): break if Path(p / ".git").exists(): return str(p) + # .git is not available in repos cloned via Cloud Build + # setup.py is always in the library's root, so use that instead + # https://github.com/googleapis/synthtool/issues/792 + if Path(p / "setup.py").exists(): + return str(p) p = p.parent raise Exception("Unable to detect repository root.")
Open the 8472 port of master for vxlan When the backend of flanneld service is vxlan(it listens to 8472 UDP port), magnum need open the port from master. Closes-Bug:
@@ -533,6 +533,9 @@ resources: - protocol: tcp port_range_min: 30000 port_range_max: 32767 + - protocol: udp + port_range_min: 8472 + port_range_max: 8472 secgroup_kube_minion: type: OS::Neutron::SecurityGroup
Feature: fine-tuned VGroup: allowing (nested) tuples or lists in constructor VGroup([a,b,c]) = VGroup(a,b,c) VGroup([a,b], c) = VGroup(VGroup(a,b), c) ...
@@ -426,8 +426,18 @@ class VMobject(Mobject): return self class VGroup(VMobject): - #Alternate name to improve readability during use - pass + def __init__(self, *args, **kwargs): + if len(args) == 1 and isinstance(args[0], (tuple, list)): + args = args[0] + + packed_args = [] + for arg in args: + if isinstance(arg, (tuple, list)): + packed_args.append(VGroup(arg)) + else: packed_args.append(arg) + + VMobject.__init__(self, *packed_args, **kwargs) + class VectorizedPoint(VMobject): CONFIG = {
tests: model: Extend test for append_message event for streams. This commit is a followup to the previous commit, adding a test for the case where a message with the same topic name appears in (different) streams which are not a recipient of the message.
@@ -551,6 +551,10 @@ class TestModel: 'display_recipient': 'a'}, [['stream', 'a'], ['topic', 'b']], frozenset(), ['msg_w']), + ({'type': 'stream', 'id': 1, 'subject': 'b', + 'display_recipient': 'a'}, + [['stream', 'c'], ['topic', 'b']], + frozenset(), []), ({'type': 'private', 'id': 1}, [['pm_with', '[email protected]']], frozenset({5827, 5}), ['msg_w']), @@ -562,7 +566,8 @@ class TestModel: frozenset({5827, 3212}), []), ], ids=['stream_to_all_messages', 'private_to_all_private', 'stream_to_stream', 'stream_to_topic', - 'pm_existing_conv', 'search', 'pm_no_existing_conv']) + 'stream_to_different_stream_same_topic', 'pm_existing_conv', + 'search', 'pm_no_existing_conv']) def test_append_message(self, mocker, user_dict, user_profile, response, narrow, recipients, model, log): model.update = True
Ensure that mocked '_Spanner' instances are orderable. Add a test for the two-entries-have-the-same-timestamp race which underlies the Appveyor failure in Closes
# limitations under the License. +from functools import total_ordering import unittest @@ -597,6 +598,32 @@ class TestTransactionPingingPool(unittest.TestCase): self.assertTrue(pool._pending_sessions.empty()) + def test_bind_w_timestamp_race(self): + import datetime + from google.cloud._testing import _Monkey + from google.cloud.spanner import pool as MUT + NOW = datetime.datetime.utcnow() + pool = self._makeOne() + database = _Database('name') + SESSIONS = [_Session(database) for _ in range(10)] + database._sessions.extend(SESSIONS) + + with _Monkey(MUT, _NOW=lambda: NOW): + pool.bind(database) + + self.assertIs(pool._database, database) + self.assertEqual(pool.size, 10) + self.assertEqual(pool.default_timeout, 10) + self.assertEqual(pool._delta.seconds, 3000) + self.assertTrue(pool._sessions.full()) + + for session in SESSIONS: + self.assertTrue(session._created) + txn = session._transaction + self.assertTrue(txn._begun) + + self.assertTrue(pool._pending_sessions.empty()) + def test_put_full(self): from six.moves.queue import Full @@ -755,6 +782,7 @@ class _Transaction(object): return self._committed +@total_ordering class _Session(object): _transaction = None @@ -767,6 +795,9 @@ class _Session(object): self._deleted = False self._transaction = transaction + def __lt__(self, other): + return id(self) < id(other) + def create(self): self._created = True
PlugAdder : Simplify drag snapping Now we have the ConnectionCreator base class, there is no need for separate casts for Nodule and ConnectionGadget. This also fixes a crash when dragging a non-nodule plug onto a PlugAdder.
@@ -278,17 +278,12 @@ bool PlugAdder::dragEnter( const DragDropEvent &event ) setHighlighted( true ); + if( auto connectionCreator = runTimeCast<ConnectionCreator>( event.sourceGadget.get() ) ) + { V3f center = V3f( 0.0f ) * fullTransform(); - center = center * event.sourceGadget->fullTransform().inverse(); + center = center * connectionCreator->fullTransform().inverse(); const V3f tangent = edgeTangent( m_edge ); - - if( Nodule *sourceNodule = runTimeCast<Nodule>( event.sourceGadget.get() ) ) - { - sourceNodule->updateDragEndPoint( center, tangent ); - } - else if( ConnectionGadget *connectionGadget = runTimeCast<ConnectionGadget>( event.sourceGadget.get() ) ) - { - connectionGadget->updateDragEndPoint( center, tangent ); + connectionCreator->updateDragEndPoint( center, tangent ); } return true;
Fix ESP32 memory leak A memory leak was found in ESP32:
@@ -638,7 +638,7 @@ void TaskMining(void *pvParameters) { // Global Definitions unsigned int job_size_task_one = 100; - unsigned char *expectedHashBytes = (unsigned char *)malloc(job_size_task_one * sizeof(unsigned char)); + unsigned char expectedHashBytes[100]; // Clear expectedHashBytes memset(expectedHashBytes, 0, job_size_task_one);
Add some clarifying headings Test Plan: Manual review Reviewers: sashank, nate
@@ -9,6 +9,8 @@ are attempting to isolate breaking changes to the public APIs to minor versions (on a roughly 8-week cadence) and will announce deprecations in Slack and in the release notes to patch versions (on a roughly weekly cadence). +.. rubric:: Core + APIs from the core ``dagster`` package are divided roughly by topic: `Solids <apidocs/solids.html>`_ @@ -56,6 +58,8 @@ APIs from the core ``dagster`` package are divided roughly by topic: how Dagster works with an eye towards extending it: logging, executors, system storage, the Dagster instance & plugin machinery, storage, schedulers. +.. rubric:: Libraries + .. include:: libraries.rst
InstallRequirement.extras is expected to be a set anyway Pointed out in
@@ -78,8 +78,7 @@ def combine_install_requirements( if combined_ireq.req is not None and ireq.req is not None: combined_ireq.req.specifier &= ireq.req.specifier combined_ireq.constraint &= ireq.constraint - # Return a sorted, de-duped tuple of extras - combined_ireq.extras = tuple(sorted({*combined_ireq.extras, *ireq.extras})) + combined_ireq.extras = {*combined_ireq.extras, *ireq.extras} # InstallRequirements objects are assumed to come from only one source, and # so they support only a single comes_from entry. This function breaks this
Added Fun Facts API Added Fun Facts API on Line 564
@@ -561,6 +561,7 @@ API | Description | Auth | HTTPS | CORS | | [Fortnite](https://fortnitetracker.com/site-api) | Fortnite Stats | `apiKey` | Yes | Unknown | | [Forza](https://docs.forza-api.tk) | Show random image of car from Forza | No | Yes | Unknown | | [FreeToGame](https://www.freetogame.com/api-doc) | Free-To-Play Games Database | No | Yes | Yes | +| [Fun Facts](https://asli-fun-fact-api.herokuapp.com/) | Random Fun Facts | No | Yes | Yes | | [GamerPower](https://www.gamerpower.com/api-read) | Game Giveaways Tracker | No | Yes | Yes | | [Giant Bomb](https://www.giantbomb.com/api/documentation) | Video Games | `apiKey` | Yes | Unknown | | [Guild Wars 2](https://wiki.guildwars2.com/wiki/API:Main) | Guild Wars 2 Game Information | `apiKey` | Yes | Unknown |
graph decorator mypy Test Plan: Unit tests / mypy tests Reviewers: alangenfeld
from functools import update_wrapper +from typing import Any, Callable, List, Optional, Union from dagster import check class _Graph: def __init__( self, - name=None, - description=None, - input_defs=None, - output_defs=None, + name: Optional[str] = None, + description: Optional[str] = None, + input_defs: Optional[List[InputDefinition]] = None, + output_defs: Optional[List[OutputDefinition]] = None, ): self.name = check.opt_str_param(name, "name") self.description = check.opt_str_param(description, "description") @@ -25,7 +26,7 @@ def __init__( output_defs, "output_defs", of_type=OutputDefinition ) - def __call__(self, fn): + def __call__(self, fn: Callable[..., Any]) -> GraphDefinition: check.callable_param(fn, "fn") if not self.name: @@ -66,11 +67,11 @@ def __call__(self, fn): def graph( - name=None, - description=None, - input_defs=None, - output_defs=None, -): + name: Optional[str] = None, + description: Optional[str] = None, + input_defs: Optional[List[InputDefinition]] = None, + output_defs: Optional[List[OutputDefinition]] = None, +) -> Union[_Graph, GraphDefinition]: """Create a graph with the specified parameters from the decorated composition function. Using this decorator allows you to build up a dependency graph by writing a
Add further experiment identifier test line This line passes because shouldn't need all experiments to be in a certain order, rather just they are all unique and have a corresponding mapping in the reflection table.
@@ -1110,6 +1110,9 @@ def test_experiment_identifiers(): table.assert_experiment_identifiers_are_consistent(experiments) assert table.are_experiment_identifiers_consistent(experiments) == False + experiments[2].identifier = "mnop" + assert table.are_experiment_identifiers_consistent(experiments) == True + identifiers = table.experiment_identifiers() identifiers[0] = 'abcd' identifiers[1] = 'efgh'
Prepare 1.29.0rc3. [ci skip-rust-tests] [ci skip-jvm-tests]
@@ -5,6 +5,15 @@ This document describes releases leading up to the ``1.29.x`` ``stable`` series. See https://pants.readme.io/v1.29/docs/release-notes-1-29 for an overview of the changes in this release. +1.29.0rc3 (6/09/2020) +--------------------- + +New Features +~~~~~~~~~~~~ + +* Add `--` style passthrough args to V2 `run` and `setup-py` goals (#9911) + `PR #9911 <https://github.com/pantsbuild/pants/pull/9911>`_ + 1.29.0rc2 (6/04/2020) ---------------------
Add max to the ValueError for EmbeddingBag mode check Summary: Related to Pull Request resolved:
@@ -1316,7 +1316,7 @@ def embedding_bag(input, weight, offsets=None, max_norm=None, norm_type=2, raise ValueError("max mode does not support sparse weights") else: - raise ValueError("mode has to be one of sum or mean") + raise ValueError("mode has to be one of sum, mean or max") if max_norm is not None: with torch.no_grad():
hotkeys: Refactor text selectors to be array. This makes it easier in the future to see diffs and to add/remove text selectors for hotkey prevention.
@@ -133,10 +133,19 @@ exports.get_keypress_hotkey = function (e) { return keypress_mappings[e.which]; }; -exports.processing_text = function () { - var selector = 'input:focus,select:focus,textarea:focus,#compose-send-button:focus,.editable-section:focus'; +exports.processing_text = (function () { + var selector = [ + 'input:focus', + 'select:focus', + 'textarea:focus', + '#compose-send-button:focus', + '.editable-section:focus', + ].join(","); + + return function () { return $(selector).length > 0; }; +}()); exports.is_editing_stream_name = function (e) { return $(e.target).is(".editable-section");
Bot: add wait_until_guild_available This coroutine waits until the configured guild is available and ensures the cache is present. The on_ready event is inadequate because it only waits 2 seconds for a GUILD_CREATE gateway event before giving up and thus not populating the cache for unavailable guilds.
+import asyncio import logging import socket from typing import Optional import aiohttp +import discord from discord.ext import commands from bot import api +from bot import constants log = logging.getLogger('bot') @@ -24,6 +27,8 @@ class Bot(commands.Bot): super().__init__(*args, connector=self.connector, **kwargs) + self._guild_available = asyncio.Event() + self.http_session: Optional[aiohttp.ClientSession] = None self.api_client = api.APIClient(loop=self.loop, connector=self.connector) @@ -51,3 +56,37 @@ class Bot(commands.Bot): self.http_session = aiohttp.ClientSession(connector=self.connector) await super().start(*args, **kwargs) + + async def on_guild_available(self, guild: discord.Guild) -> None: + """ + Set the internal guild available event when constants.Guild.id becomes available. + + If the cache appears to still be empty (no members, no channels, or no roles), the event + will not be set. + """ + if guild.id != constants.Guild.id: + return + + if not guild.roles or not guild.members or not guild.channels: + log.warning( + "Guild available event was dispatched but the cache appears to still be empty!" + ) + return + + self._guild_available.set() + + async def on_guild_unavailable(self, guild: discord.Guild) -> None: + """Clear the internal guild available event when constants.Guild.id becomes unavailable.""" + if guild.id != constants.Guild.id: + return + + self._guild_available.clear() + + async def wait_until_guild_available(self) -> None: + """ + Wait until the constants.Guild.id guild is available (and the cache is ready). + + The on_ready event is inadequate because it only waits 2 seconds for a GUILD_CREATE + gateway event before giving up and thus not populating the cache for unavailable guilds. + """ + await self._guild_available.wait()
Update wordpress-directory-listing.yaml Add other paths susceptible to directory listing.
@@ -9,6 +9,10 @@ requests: - method: GET path: - "{{BaseURL}}/wp-content/uploads/" + - "{{BaseURL}}/wp-content/themes/" + - "{{BaseURL}}/wp-content/plugins/" + - "{{BaseURL}}/wp-content/plugins/hustle/views/admin/dashboard/" + - "{{BaseURL}}/wp-includes/" matchers-condition: and matchers: - type: status @@ -16,4 +20,4 @@ requests: - 200 - type: word words: - - Index of /wp-content/uploads + - Index of /
Lines that aren't in the legend should have their colors skipped I'm not positive how stable the "_linexxx" format is for lines that aren't shown in mpl, but this fixes my current issue.
@@ -90,6 +90,9 @@ def draw_line2d(data, obj): if marker and not show_line: addplot_options.append('only marks') + if obj.get_label().startswith("_line"): + addplot_options.append("forget plot") + # process options content.append('\\addplot ') if addplot_options:
Reset_Caches: reset envs caches even when there's no memoization Properties memoization is unrelated to caches in lexical environment lookups: we have to reset lookup caches even when no property is memoized. TN:
@@ -2040,9 +2040,9 @@ package body ${ada_lib_name}.Analysis.Implementation is (Main_Trace, "In reset caches for unit " & To_String (Unit.File_Name)); Unit.Cache_Version := Unit.Context.Cache_Version; + Reset_Envs (Unit); % if ctx.has_memoization: Destroy (Unit.Memoization_Map); - Reset_Envs (Unit); % endif end if; end Reset_Caches;
Ignore reportlab vuln 39642 when running safety. This was resolved in by allowing users to set the reportlab trustedHosts configuration.
@@ -67,10 +67,12 @@ commands = coverage report [testenv:safety] +# Safety ignore list: +# 39642: reportlab vuln resolved in https://github.com/mitre/debrief/pull/39 deps = safety skip_install = true whitelist_externals=find commands = - safety check -r requirements.txt + safety check -r requirements.txt --ignore 39642 safety check -r requirements-dev.txt
Update docs for csvgrep Update docs to include way to use csvgrep to search for all rows that do not have an empty cell.
@@ -54,3 +54,7 @@ Search for rows relating to states with names beginning with the letter "I":: csvgrep -c 1 -r "^I" examples/realdata/FY09_EDU_Recipients_by_State.csv +Search for rows that do not contain an empty state cell:: + + csvgrep -c 1 -r "^$" -i examples/realdata/FY09_EDU_Recipients_by_State.csv +
Py3: Disabled augeas test that segfaults the test suite. Because of a bug in python-augeas:
@@ -15,6 +15,7 @@ from tests.support.mock import ( # Import Salt Libs import salt.modules.augeas_cfg as augeas_cfg from salt.exceptions import SaltInvocationError +import salt.ext.six as six # Make sure augeas python interface is installed if augeas_cfg.HAS_AUGEAS: from augeas import Augeas as _Augeas @@ -26,7 +27,7 @@ class AugeasCfgTestCase(TestCase): Test cases for salt.modules.augeas_cfg ''' # 'execute' function tests: 3 - + @skipIf(six.PY3, 'Disabled pending https://github.com/hercules-team/python-augeas/issues/30') def test_execute(self): ''' Test if it execute Augeas commands
Update amadey.txt Amadey's tails are always the pair of ```index.php\login.php```.
@@ -100,7 +100,7 @@ go-refund.com # Reference: https://twitter.com/adrian__luca/status/1148186673739685888 # Reference: https://any.run/report/2f41879d3656e45471a0a784d61eb339f343f7614a19d2916be28685d1501c0b/b69b53a3-1003-47c4-b836-20fe21cb5640 -http://46.166.129.157/index.php +http://46.166.129.157 # Reference: https://app.any.run/tasks/5c1df594-6f00-44e7-998d-d98c220babfc/ @@ -154,27 +154,50 @@ drgh3.in # Reference: https://app.any.run/tasks/96ce5eb3-0058-452f-8924-4946c769cae2/ -http://217.8.117.51/aW8bVds1/ +http://217.8.117.51 # Reference: https://app.any.run/tasks/de8dc698-6f59-43ca-a465-3baee439b34d/ -http://193.111.152.61/f25bn5Gf/ +http://193.111.152.61 + +# Reference: https://www.virustotal.com/gui/ip-address/51.38.140.6/relations + +http://51.38.140.6 # Generic trails /5vFgnRd4hdDbgS3H/index.php +/5vFgnRd4hdDbgS3H/login.php /8f74ede3-010d-4d83-834c-7f06e8d51100/index.php +/8f74ede3-010d-4d83-834c-7f06e8d51100/login.php +/Amadey/index.php /Amadey/login.php +/aW8bVds1/index.php +/aW8bVds1/login.php /boomsun/index.php +/f25bn5Gf/index.php +/f25bn5Gf/login.php /f5lkB/index.php +/f5lkB/login.php /Hfunr3U/index.php +/Hfunr3U/login.php /j88hNjkMn/index.php +/j88hNjkMn/login.php /g3VbWkG4/index.php +/g3VbWkG4/login.php /g5tUY/index.php +/g5tUY/login.php +/g81hYYq/index.php /g81hYYq/login.php +/gkkjs/index.php /gkkjs/login.php /madapam/index.php +/madapam/login.php /mBSqq12/index.php +/mBSqq12/login.php /S0soiAI/index.php +/S0soiAI/login.php /t1QccbN2/index.php +/t1QccbN2/login.php +/t7BnLkqwitOp52/index.php /t7BnLkqwitOp52/login.php
PR to update new main_script path in "test_pvc_multi_snapshot_performance.py" This PR addresses failure reported in Post directory changes(PR5020), script was failing as main_script path had changed in multi snapshot performance test
@@ -112,7 +112,7 @@ class TestPvcMultiSnapshotPerformance(E2ETest): os.environ["PVCNAME"] = self.pvc_obj.name os.environ["INTERFACE"] = self.interface - main_script = "tests/e2e/performance/test_multi_snapshots.py" + main_script = "tests/e2e/performance/csi_tests/test_multi_snapshots.py" result = subprocess.run([main_script], stdout=subprocess.PIPE) log.info(f"Results from main script : {result.stdout.decode('utf-8')}")
docs: omit mention of Python 2.7 in 'CONTRIBUTING.rst' Closes
@@ -69,7 +69,6 @@ We use `nox <https://nox.readthedocs.io/en/latest/>`__ to instrument our tests. - To test your changes, run unit tests with ``nox``:: - $ nox -s unit-2.7 $ nox -s unit-3.8 $ ... @@ -144,7 +143,6 @@ Running System Tests # Run all system tests $ nox -s system-3.8 - $ nox -s system-2.7 # Run a single system test $ nox -s system-3.8 -- -k <name of test> @@ -152,9 +150,8 @@ Running System Tests .. note:: - System tests are only configured to run under Python 2.7 and - Python 3.8. For expediency, we do not run them in older versions - of Python 3. + System tests are only configured to run under Python 3.8. + For expediency, we do not run them in older versions of Python 3. This alone will not run the tests. You'll need to change some local auth settings and change some configuration in your project to
Update A_star.cpp Fix bug
@@ -1119,7 +1119,7 @@ int A_star::trace_back(int current_node, Grid& grid){ while(trace_back_flag){ - unsigned int last_node = grid.vertices_total[dummy_node].parent; + int last_node = grid.vertices_total[dummy_node].parent; if(last_node<0 or last_node>=grid.vertices_total.size()){ trace_back_flag = false; @@ -1204,7 +1204,7 @@ bool A_star::CheckExendable_With_Certain_Length(int first_node_same_layer,int cu if(culmulated_length>=half_minL){ search_flag = false; }else{ - unsigned int next_node = dummy_node + first_direction; + int next_node = dummy_node + first_direction; if(next_node<0 or next_node>=grid.vertices_total.size() ) { search_flag = false; feasible = false; @@ -1227,7 +1227,7 @@ bool A_star::CheckExendable_With_Certain_Length(int first_node_same_layer,int cu if(culmulated_length>=half_minL){ search_flag = false; }else{ - unsigned int next_node = dummy_node + current_direction; + int next_node = dummy_node + current_direction; if(next_node<0 or next_node>=grid.vertices_total.size() ) { search_flag = false; feasible = false;
Fix RERUN_CMAKE Summary: Pull Request resolved:
@@ -168,6 +168,7 @@ import importlib from tools.setup_helpers.configure import * from tools.setup_helpers.generate_code import generate_code from tools.setup_helpers.ninja_builder import NinjaBuilder, ninja_build_ext +import tools.setup_helpers.configure ################################################################################ # Parameters parsed from environment @@ -375,8 +376,7 @@ for lib in dep_libs: description = 'Rebuild {} external library'.format(lib) def run(self): - global RERUN_CMAKE - RERUN_CMAKE = False + tools.setup_helpers.configure.RERUN_CMAKE = False build_libs([self.lib]) rebuild_dep.lib = lib rebuild_dep_cmds['rebuild_' + lib.lower()] = rebuild_dep @@ -557,8 +557,7 @@ class rebuild(distutils.command.build.build): ] + distutils.command.build.build.sub_commands def run(self): - global RERUN_CMAKE - RERUN_CMAKE = False + tools.setup_helpers.configure.RERUN_CMAKE = False distutils.command.build.build.run(self)
allow setting custom environment variables for mc admin subprocess `MinioAdmin` accepts another argument `env` which will be passed to the underlying `subprocess.run` call. It can be used to set custom environments for the `mc admin` subprocess, e.g. you could set `MC_HOST_<alias>` per `MinioAdmin` instance.
@@ -30,10 +30,11 @@ class MinioAdmin: def __init__( self, target, binary_path=None, config_dir=None, ignore_cert_check=False, - timeout=None, + timeout=None, env=None, ): self._target = target self._timeout = timeout + self._env = env self._base_args = [binary_path or "mc", "--json"] if config_dir: self._base_args += ["--config-dir", config_dir] @@ -47,6 +48,7 @@ class MinioAdmin: self._base_args + args, capture_output=True, timeout=self._timeout, + env=self._env, check=True, text=True, )
Ssdeep fix * fix ssdeep reputation script * fix ssdeep reputation script * update only bad score * Merge branches 'master' and 'ssdeep_fix' of github.com:demisto/content into ssdeep_fix # Conflicts: # Scripts/script-SSdeepReputation.yml
@@ -2,7 +2,7 @@ commonfields: id: SSDeepReputation version: -1 name: SSDeepReputation -script: |2 +script: |- REPUTATIONS = { 0: 'None', @@ -70,7 +70,7 @@ script: |2 max_score = max(map(lambda x: x.get('score'), related_indicators)) max_score_indicator = next(x for x in related_indicators if x.get('score', 0) == max_score) - if max_score > ssdeep_indicator.get('score', 0): + if max_score > ssdeep_indicator.get('score', 0) and max_score > 1: entry = { 'Type': entryTypes['note'], 'HumanReadable': 'Similarity to %s %s:%s' % (REPUTATIONS[max_score_indicator['score']], max_score_indicator['indicator_type'], max_score_indicator['value']),
Fix: avoid retrying improperly formatted FORGET messages Problem: FORGET messages are coerced into a Pydantic model inside the forget message handler. This coercing can (and does) fail, for example if users add additional fields in the content. Solution: Catch the Pydantic validation exception, log an error and discard the message.
@@ -8,6 +8,7 @@ from aioipfs.api import RepoAPI from aioipfs.exceptions import NotPinnedError from aleph_message.models import ForgetMessage, MessageType from aleph_message.models import ItemType +from pydantic import ValidationError from aleph.model.filepin import PermanentPin from aleph.model.hashes import delete_value @@ -15,7 +16,6 @@ from aleph.model.messages import Message from aleph.services.ipfs.common import get_ipfs_api from aleph.utils import item_type_from_hash - logger = logging.getLogger(__name__) @@ -210,12 +210,17 @@ async def get_target_message_info(target_hash: str) -> Optional[TargetMessageInf return TargetMessageInfo.from_db_object(message_dict) -async def handle_forget_message(message: Dict, content: Dict): +async def handle_forget_message(message: Dict, content: Dict) -> bool: # Parsing and validation # TODO: this is a temporary fix to release faster, finish od-message-models-in-pipeline message["content"] = content + try: forget_message = ForgetMessage(**message) + except ValidationError as e: + logger.error("Invalid forget message: %s", e) + return False + logger.debug(f"Handling forget message {forget_message.item_hash}") hashes_to_forget = forget_message.content.hashes
Minor Text Updates Minor Text Updates to "System Info Gathering Using Dxdiag Application"
@@ -6,12 +6,16 @@ author: Teoderick Contreras, Splunk type: Hunting datamodel: - Endpoint -description: This analytic is to a suspicious dxdiag.exe process command-line execution. - Dxdiag is used to collect the system info of the target host. This technique was seen used by Remcos RATS, - various actors, and other malware to collect information as part of the recon or collection phase of an attack. - This behavior should be rarely seen in a corporate network, but this command line can be used by a network - administrator to audit host machine specifications. Thus in some rare cases, this detection will contain - false positives in its results. To triage further, analyze what commands were passed after it pipes out the result to a file for further processing. +description: This analytic is to detect a suspicious dxdiag.exe process + command-line execution. Dxdiag is used to collect the system info of + the target host. This technique has been used by Remcos RATS, various + actors, and other malware to collect information as part of the recon + or collection phase of an attack. This behavior should rarely be seen + in a corporate network, but this command line can be used by a network + administrator to audit host machine specifications. Thus in some rare + cases, this detection will contain false positives in its results. To + triage further, analyze what commands were passed after it pipes out + the result to a file for further processing. search: '| tstats `security_content_summariesonly` count min(_time) as firstTime max(_time) as lastTime from datamodel=Endpoint.Processes where `process_dxdiag` AND Processes.process = "* /t *" by Processes.dest Processes.user Processes.parent_process_name Processes.parent_process @@ -23,8 +27,8 @@ how_to_implement: To successfully implement this search you need to be ingesting your endpoints into the `Endpoint` datamodel in the `Processes` and `Filesystem` node. In addition, confirm the latest CIM App 4.20 or higher is installed and the latest TA for the endpoint product. -known_false_positives: this commandline can be used by network administrator to audit - host machine specification.filter is needed. +known_false_positives: This commandline can be used by a network administrator to audit + host machine specifications. Thus, a filter is needed. references: - https://app.any.run/tasks/df0baf9f-8baf-4c32-a452-16562ecb19be/ tags:
docs: Add nginx reload documentation. This adds reference for reloading nginx when the certificates are replaced so that the server works with the new certificates instead of the old ones. Fixes:
@@ -125,3 +125,9 @@ sudo -s # If not already root ``` where HOSTNAME is the domain name (or IP address) to use on the generated certificate. + +After replacing the certificates, you need to reload `nginx` by +running the following as `root`: +``` +service nginx reload +```
Add some SLA probes HG-- branch : feature/microservices
# --------------------------------------------------------------------- # IGetSLAProbe # --------------------------------------------------------------------- -# Copyright (C) 2007-2016 The NOC Project +# Copyright (C) 2007-2017 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- @@ -34,7 +34,9 @@ class IGetSLAProbes(BaseInterface): "http-get", "dns", "ftp", - "dhcp" + "dhcp", + "owamp", # One-Way Active Measurement Protocol (RFC4656) + "twamp" # Two-Way Active Measurement Protocol (RFC5357) ]), "target": StringParameter(), "hw_timestamp": BooleanParameter(default=False)
Safely load YAML configuration Fixes
@@ -48,7 +48,7 @@ if __name__ == "__main__": logging.basicConfig(level=logging.INFO) with open(template_config, 'r') as tc: - config = yaml.load(tc) + config = yaml.safe_load(tc) # Work out which templates we are calculating if templates == "all":
sidebar: Move chevron in the stream list further from the scrollbar. Previously, the chevrons were too close to the scrollbar. This commit fixes that by moving it farther from the scrollbar.
@@ -125,7 +125,7 @@ li.hidden-filter { #global_filters .count, #stream_filters .count { position: absolute; - right: 20px; + right: 25px; top: 2px; padding: 2px 3px 1px 3px; background: #80837f; @@ -172,7 +172,7 @@ li.hidden-filter { } .topic-unread-count { - right: 20px; + right: 25px; } .private_message_count { @@ -190,6 +190,7 @@ ul.filters .arrow { position: absolute; right: 0px; top: 2px; + right: 4px; font-size: 0.8em; display: none; } @@ -245,7 +246,7 @@ ul.filters li.out_of_home_view li.muted_topic { line-height: 12px; padding-top: 4px; margin-right: 15px; - padding-left: 33px; + padding-left: 28px; } #stream_filters .subscription_block .stream-name { @@ -297,7 +298,8 @@ ul.expanded_private_messages { li.show-more-topics, li.topic-list-item { position: relative; - padding-left: 33px; + padding-left: 28px; + padding-right: 5px; } li.show-more-private-messages,
Remove Tokyo Metro Open Data Link has been dead since early August, removing dead service
@@ -614,7 +614,6 @@ API | Description | Auth | HTTPS | Link | | Transport for Switzerland | Official Swiss Public Transport Open Data | `apiKey` | Yes | [Go!](https://opentransportdata.swiss/en/) | | Transport for The Netherlands | NS, only trains | `apiKey` | No | [Go!](http://www.ns.nl/reisinformatie/ns-api) | | Transport for The Netherlands | OVAPI, country-wide public transport | No | Yes | [Go!](https://github.com/skywave/KV78Turbo-OVAPI/wiki) | -| Transport for Tokyo, Japan | Tokyo Metro | `apiKey` | Yes | [Go!](https://developer.tokyometroapp.jp/info) | | Transport for Toronto, Canada | TTC | No | Yes | [Go!](https://myttc.ca/developers) | | Transport for United States | NextBus API | No | No | [Go!](http://www.nextbus.com/xmlFeedDocs/NextBusXMLFeed.pdf) | | Transport for Vancouver, Canada | TransLink | `OAuth` | Yes | [Go!](https://developer.translink.ca/) |
request_client: add workaround for handling certificates SSLContext in Python only takes a file path, so we dump the certificate into a temporary file before using it.
SPDX-License-Identifier: Apache-2.0 Copyright 2017 Massachusetts Institute of Technology. ''' +import os.path +import tempfile import requests @@ -10,7 +12,7 @@ from requests.packages.urllib3.poolmanager import PoolManager # pylint: disable class RequestsClient: - def __init__(self, base_url, tls_enabled, ignore_hostname=False, **kwargs): + def __init__(self, base_url, tls_enabled, ignore_hostname=False, verify_custom: str = False, **kwargs): if tls_enabled: self.base_url = f'https://{base_url}' else: @@ -19,12 +21,27 @@ class RequestsClient: if ignore_hostname: self.session.mount("http://", HostNameIgnoreAdapter()) self.session.mount("https://", HostNameIgnoreAdapter()) + self.verify_custom = verify_custom + self.temp_dir = None for arg, value in kwargs.items(): if isinstance(value, dict): value = self.__deep_merge( getattr(self.session, arg), value) setattr(self.session, arg, value) + def __enter__(self): + # This is a workaround for SSLContext not being able to load certificates directly + if self.verify_custom: + self.temp_dir = tempfile.TemporaryDirectory(prefix="keylime_") + self.session.verify = os.path.join(self.temp_dir.name, "agent.crt") + with open(self.session.verify, "w", encoding="utf-8") as f: + f.write(self.verify_custom) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.temp_dir: + self.temp_dir.cleanup() + def request(self, method, url, **kwargs): return self.session.request(method, self.base_url + url, **kwargs)
github actions: update image version Pin image version so it doesn't break with future latest changes.
@@ -14,7 +14,7 @@ on: jobs: lint: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: matrix: # only use one version for the lint step @@ -54,7 +54,7 @@ jobs: fi test: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 needs: lint strategy: max-parallel: 5
Add gzip support to Data Layer download client add gzip
@@ -145,7 +145,10 @@ async def insert_from_delta_file( try: async with aiohttp.ClientSession() as session: - async with session.get(server_info.url + "/" + filename, timeout=timeout, proxy=proxy_url) as resp: + headers = {"accept-encoding": "gzip"} + async with session.get( + server_info.url + "/" + filename, headers=headers, timeout=timeout, proxy=proxy_url + ) as resp: resp.raise_for_status() size = int(resp.headers.get("content-length", 0)) log.debug(f"Downloading delta file {filename}. Size {size} bytes.")
Update All Brazil Capacities No source update required
] ], "capacity": { - "hydro": 62095, + "hydro": 63797, "nuclear": 1990, - "solar": 3, - "thermal": 18962, - "wind": 28 + "solar": 910, + "wind": 28, + "unknown": 19365 }, "contributors": [ "https://github.com/alexanmtz", "https://github.com/corradio", - "https://github.com/systemcatch" + "https://github.com/systemcatch", + "https://github.com/nessie2013" ], "flag_file_name": "br.png", "parsers": { ] ], "capacity": { - "hydro": 14281, - "thermal": 3426 + "hydro": 22251, + "solar": 5, + "wind": 426, + "unknown": 3654 }, "contributors": [ "https://github.com/alexanmtz", "https://github.com/corradio", - "https://github.com/systemcatch" + "https://github.com/systemcatch", + "https://github.com/nessie2013" ], "flag_file_name": "br.png", "parsers": { ] ], "capacity": { - "hydro": 11008, - "solar": 132, - "thermal": 7214, - "wind": 8763 + "hydro": 11023, + "solar": 1525, + "wind": 12801, + "unknown": 7227 }, "contributors": [ "https://github.com/alexanmtz", "https://github.com/corradio", - "https://github.com/systemcatch" + "https://github.com/systemcatch", + "https://github.com/nessie2013" ], "flag_file_name": "br.png", "parsers": { ] ], "capacity": { - "hydro": 16491, + "hydro": 17045, "solar": 4, - "thermal": 4348, - "wind": 1938 + "wind": 2018, + "unknown": 4220 }, "contributors": [ "https://github.com/alexanmtz", "https://github.com/corradio", - "https://github.com/systemcatch" + "https://github.com/systemcatch", + "https://github.com/nessie2013" ], "flag_file_name": "br.png", "parsers": {
Update train_lightgcn.py Fix the "OMP: Warning Forking a process while a parallel region is active is potentially unsafe." issue
@@ -47,6 +47,9 @@ def parse_args(): parser.add_argument( "--batch_size", nargs="?", type=int, help="Batch size for training." ) + parser.add_argument( + "--dataset", nargs="?", type=str, help="Dataset Options" + ) return parser.parse_args() @@ -92,36 +95,14 @@ class LightGCN_train(TrainEngine): self.model_save_dir = os.path.join( self.config["system"]["model_save_dir"], self.config["model"]["save_name"] ) - self.max_n_update = self.config["model"]["max_n_update"] - for epoch in range(self.config["model"]["max_epoch"]): - print(f"Epoch {epoch} starts !") - print("-" * 80) - if epoch > 0 and self.eval_engine.n_no_update == 0: - # previous epoch have already obtained better result - self.engine.save_checkpoint(model_dir=self.model_save_dir) - - if self.eval_engine.n_no_update >= self.max_n_update: - print( - "Early stop criterion triggered, no performance update for {:} times".format( - self.max_n_update - ) - ) - break - + self.engine = LightGCNEngine(self.config) train_loader = self.data.instance_bpr_loader( batch_size=self.config["model"]["batch_size"], device=self.config["model"]["device_str"], ) - self.engine.train_an_epoch(epoch_id=epoch, train_loader=train_loader) - self.eval_engine.train_eval( - self.data.valid[0], self.data.test[0], self.engine.model, epoch - ) + self._train(self.engine, train_loader, self.model_save_dir) self.config["run_time"] = self.monitor.stop() - - def test(self): - """Test the model.""" - self.engine.resume_checkpoint(model_dir=self.model_save_dir) - super(LightGCN_train, self).test() + return self.eval_engine.best_valid_performance def tune_train(config):
Better error message for quantized dispatch Summary: Pull Request resolved: Fixes Test Plan: Imported from OSS
@@ -291,7 +291,22 @@ private: "Available functions are ", listAllDispatchKeys()) } - const std::string dispatch_key_str = dispatch_key.has_value() ? toString(*dispatch_key) : "None"; + // If the input is quantized, but the quantization is not supported. + if (dispatch_key.value() == TensorTypeId::QuantizedCPUTensorId) { + TORCH_CHECK(false, "Tried running '", operator_name_, "' with a", + " quantized tensor but '", operator_name_, "' expects a", + " non-quantized input."); + } + + // If the input is not quantized, but the kernel is. + if (kernels_.lookup(TensorTypeId::QuantizedCPUTensorId)) { + TORCH_CHECK(false, "Tried running '", operator_name_, "' but the input", + " is not quantized. Please ensure you have QuantStub", + " during model conversion, or you manually quantize the", + " input tensor."); + } + + const std::string dispatch_key_str = toString(*dispatch_key); TORCH_CHECK(false, "Didn't find kernel to dispatch to for operator '", operator_name_, "'. Tried to look up kernel for dispatch key '", dispatch_key_str, "'. Registered dispatch keys are: ", listAllDispatchKeys());
Update staging.yaml merged branches
@@ -68,9 +68,6 @@ branches: - sr/memoized-toggle # Sravan July 26 - bmb/sso-login-v2 # Biyeun July 26 - sk/data-registry-view # Simon July 29 - - ml/remove-show-field-sms-billables-report+ml/sms-billable-report-visibility-checkbox # Charl Aug 11 - - ml/sms-billable-report-visibility-checkbox # Minha August 5 - - ml/remove-show-field-sms-billables-report # Minha August 5 - cs/SC-1640-send-unique-identifier-telerivit # Charl Aug 6 - jls/smart-linking-mvp # Jenny Aug 9 - jls/smart-linking-mvp-content-type # Jenny Aug 23
Update randomforest.pyx updated the documentation based on comments on PR
@@ -332,9 +332,8 @@ class RandomForestClassifier(Base): """ Implements a Random Forest classifier model which fits multiple decision tree classifiers. - The user is responsible for setting the various - state variables to appropriate values. - The model at the moment uses only numpy arrays as inputs. + The model accepts cudf dataframe as an input only + for the fit function. Examples @@ -369,10 +368,10 @@ class RandomForestClassifier(Base): n_estimators : int (default = 10) number of trees in the forest. handle : cuml.Handle - If it is None, a new one is created just for this class. + If it is None, a new handle is created for this instance. split_algo : 0 for HIST, 1 for GLOBAL_QUANTILE and 3 for SPLIT_ALGO_END (default = 0) - The type of algorithm to be used to create the trees. + the algorithm to determine how nodes are split in the tree. bootstrap : boolean (default = True) Control bootstrapping. If set, each tree in the forest is built @@ -513,7 +512,8 @@ class RandomForestClassifier(Base): def get_params(self, deep=True): """ - Sklearn style return parameter state + Returns the value of all parameters + required to configure this estimator as a dictionary. Parameters ----------- deep : boolean (default = True) @@ -532,7 +532,9 @@ class RandomForestClassifier(Base): def set_params(self, **params): """ - Sklearn style set parameter state to dictionary of params. + Sets the value of parameters required to + configure this estimator, it functions similar to + the sklearn set_params. Parameters ----------- params : dict of new params
List nodes with 0 pods in list_pods_by_node Fixes
@@ -84,16 +84,16 @@ class KubernetesAPI: def list_pods_by_node(self): out = {} try: + nodes = self.list_nodes() pods = self.list_pods() except ApiException: raise + for node in nodes: + out[node['metadata']['name']] = [] + for pod in pods: node = pod['spec'].get('node_name', 'Unknown') - - if node not in out: - out[node] = [] - out[node].append(pod) return out
add workaround for non-existent directories not being reported in GL * GitLab returns a 200 OK when queried for a directory that does not exist. We can work around this by checking to see if the directory contains anything; empty directories shouldn't exist in git.
@@ -315,6 +315,12 @@ class GitLabProvider(provider.BaseProvider): elif resp.status == 404: # True Not Found raise exceptions.NotFoundError(path.full_path) + # GitLab currently returns 200 OK for nonexistent directories + # See: https://gitlab.com/gitlab-org/gitlab-ce/issues/34016 + # Fallback: empty directories shouldn't exist in git, + if page_nbr == 1 and len(data_page) == 0: + raise exceptions.NotFoundError(path.full_path) + data.extend(data_page) page_nbr = resp.headers.get('X-Next-Page', None)