message
stringlengths
13
484
diff
stringlengths
38
4.63k
Help channels: add a function to get in use time Future code will also need to get this time, so moving it out to a separate function reduces redundancy.
@@ -5,7 +5,7 @@ import logging import random import typing as t from collections import deque -from datetime import datetime, timezone +from datetime import datetime, timedelta, timezone from pathlib import Path import discord @@ -286,6 +286,15 @@ class HelpChannels(Scheduler, commands.Cog): if channel.category_id == category.id and not self.is_excluded_channel(channel): yield channel + async def get_in_use_time(self, channel_id: int) -> t.Optional[timedelta]: + """Return the duration `channel_id` has been in use. Return None if it's not in use.""" + log.trace(f"Calculating in use time for channel {channel_id}.") + + claimed_timestamp = await self.claim_times.get(channel_id) + if claimed_timestamp: + claimed = datetime.utcfromtimestamp(claimed_timestamp) + return datetime.utcnow() - claimed + @staticmethod def get_names() -> t.List[str]: """ @@ -548,10 +557,8 @@ class HelpChannels(Scheduler, commands.Cog): self.bot.stats.incr(f"help.dormant_calls.{caller}") - claimed_timestamp = await self.claim_times.get(channel.id) - if claimed_timestamp: - claimed = datetime.utcfromtimestamp(claimed_timestamp) - in_use_time = datetime.utcnow() - claimed + in_use_time = await self.get_in_use_time(channel.id) + if in_use_time: self.bot.stats.timing("help.in_use_time", in_use_time) unanswered = await self.unanswered.get(channel.id)
Implemented .env and .quartenv loading in cli module cli.ScriptInfo loads the env files if: python-dotenv is installed, the files exists and QUART_SKIP_DOTENV env var is not set to '1'
@@ -7,6 +7,12 @@ from typing import Any, Callable, Iterable, List, Optional, TYPE_CHECKING import click +try: + from dotenv import load_dotenv +except ImportError: + pass + + from .__about__ import __version__ from .helpers import get_debug_flag @@ -32,6 +38,7 @@ class ScriptInfo: app_import_path: Optional[str]=None, create_app: Optional[Callable]=None, ) -> None: + self.load_dotenv_if_exists() self.app_import_path = app_import_path or os.environ.get('QUART_APP') self.create_app = create_app self.data: dict = {} @@ -75,6 +82,20 @@ class ScriptInfo: return self._app + def load_dotenv_if_exists(self): + if os.environ.get('QUART_SKIP_DOTENV') == '1': + return + + if not Path(".env").is_file() and not Path(".quartenv").is_file(): + return + + try: + if Path(".env").is_file(): + load_dotenv() + if Path(".quartenv").is_file(): + load_dotenv(dotenv_path=Path(".") / ".quartenv") + except NameError: + print("* Tip: There are .env files present. Do \"pip install python-dotenv\" to use them.") pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
Make nunique in DataFrame to return a Koalas DataFrame instead of pandas' The approach is basically similar with `DataFrame._reduce_for_stat_function`.
@@ -3504,7 +3504,7 @@ defaultdict(<class 'list'>, {'col..., 'col...})] dropna: bool = True, approx: bool = False, rsd: float = 0.05, - ) -> pd.Series: + ) -> "ks.Series": """ Return number of unique elements in the object. @@ -3527,7 +3527,7 @@ defaultdict(<class 'list'>, {'col..., 'col...})] Returns ------- - The number of unique values per column as a pandas Series. + The number of unique values per column as a Koalas Series. Examples -------- @@ -3550,22 +3550,31 @@ defaultdict(<class 'list'>, {'col..., 'col...})] B 1 Name: 0, dtype: int64 """ + from databricks.koalas.series import _col + axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') - res = self._sdf.select( + sdf = self._sdf.select( [ self._kser_for(label)._nunique(dropna, approx, rsd) for label in self._internal.column_labels ] - ).toPandas() - if self._internal.column_labels_level == 1: - res.columns = [label[0] for label in self._internal.column_labels] - else: - res.columns = pd.MultiIndex.from_tuples(self._internal.column_labels) - if self._internal.column_label_names is not None: - res.columns.names = self._internal.column_label_names - return res.T.iloc[:, 0] + ) + + # The data is expected to be small so it's fine to transpose/use default index. + with ks.option_context( + "compute.default_index_type", "distributed", "compute.max_rows", None + ): + kdf = DataFrame(sdf) # type: ks.DataFrame + internal = _InternalFrame( + spark_frame=kdf._internal.spark_frame, + index_map=kdf._internal.index_map, + column_labels=self._internal.column_labels, + column_label_names=self._internal.column_label_names, + ) + + return _col(DataFrame(internal).transpose()) def round(self, decimals=0): """
Cleaned up outdated information in VEP docs. Fixes
@@ -4916,9 +4916,6 @@ class VariantDataset(object): the `LOFTEE plugin <https://github.com/konradjk/loftee>`__ on the current variant dataset and adds the result as a variant annotation. - If the variant annotation path defined by ``root`` already exists and its schema matches the VEP schema, then - Hail only runs VEP for variants for which the annotation is missing. - **Examples** Add VEP annotations to the dataset: @@ -4976,7 +4973,7 @@ class VariantDataset(object): **Annotations** Annotations with the following schema are placed in the location specified by ``root``. - The schema can be confirmed with :py:attr:`~hail.VariantDataset.variant_schema`, :py:attr:`~hail.VariantDataset.sample_schema`, and :py:attr:`~hail.VariantDataset.global_schema`. + The full resulting dataset schema can be queried with :py:attr:`~hail.VariantDataset.variant_schema`. .. code-block:: text @@ -5121,8 +5118,8 @@ class VariantDataset(object): :param str root: Variant annotation path to store VEP output. - :param bool csq: If true, annotates VCF CSQ field as a String. - If False, annotates with the full nested struct schema + :param bool csq: If ``True``, annotates VCF CSQ field as a String. + If ``False``, annotates with the full nested struct schema :return: An annotated with variant annotations from VEP. :rtype: :py:class:`.VariantDataset`
fix(plugins): changing the way ftrack is querying entity_type this will remove the server entity duplicity error on mysql
@@ -44,7 +44,15 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): input_data = context.data["hierarchyContext"] + # self.import_to_ftrack(input_data) + + try: self.import_to_ftrack(input_data) + except Exception as exc: + import sys + import traceback + self.log.info(traceback.format_exc(sys.exc_info())) + raise Exception("failed") def import_to_ftrack(self, input_data, parent=None): for entity_name in input_data: @@ -66,8 +74,8 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin): # try to find if entity already exists else: - query = '{} where name is "{}" and parent_id is "{}"'.format( - entity_type, entity_name, parent['id'] + query = 'TypedContext where name is "{0}" and project.full_name is "{1}"'.format( + entity_name, self.ft_project["full_name"] ) try: entity = self.session.query(query).one()
Update model_utils.py previous documentations are not 100% right.
@@ -293,32 +293,7 @@ def depth_weighting(mesh, indActive=None, v=2, z0=None): Notes: - Currently, z0 equal to the average clearance (flight hight), - so z0 must be either a scalar or 2d array. - Specifically, without the topo, the clearance (z0) is constant parameter, - with the topography, the clearance (z0) should be a 2d array, because of - elevation varing. - - Usage: - - Without topo: - - depth_weighting( - mesh, v = 2, z0 = a scalar parameter - ) - - With topo: - - depth_weighting( - mesh, v = 2, z0 = topo_xyz - ) - - if you want to do another minor adjustment to make the decay curve perform better, - - depth_weighting( - mesh, v = 2, z0 = topo_xyz + factor - ) - + The depth weighting depends on the depth from receiver locations. """ @@ -335,21 +310,19 @@ def depth_weighting(mesh, indActive=None, v=2, z0=None): # With topography: z0 is a 2d array elif len(z0.shape) == 2: - assert indActive is not None, "indActive cannot be None with topography!" - tree = cKDTree(z0[:, :-1]) _, ind = tree.query(mesh.cell_centers[:, :-1]) delta_z = np.abs(mesh.cell_centers[:, -1] - z0[ind, -1]) - delta_z = delta_z[indActive] else: raise Exception( "z0 must be either a scalar or 2d array!" ) + if indActive is not None: + delta_z = delta_z[indActive] wz = (delta_z) ** (-0.5*v) - return wz / np.nanmax(wz)
Update tryit.ipynb Update with new features !
"cell_type": "markdown", "metadata": {}, "source": [ - "<img src=\"https://raw.githubusercontent.com/euroargodev/argopy/master/docs/_static/argopy_logo_long.png\" alt=\"argopy logo\" width=\"200\"/> " + "<img src=\"https://raw.githubusercontent.com/euroargodev/argopy/master/docs/_static/argopy_logo_long.png\" alt=\"argopy logo\" width=\"200\"/> \n", + "\n", + "**argopy** will manage for you all the complicated stuff of localising, downloading and formatting Argo data. Focus on your science !" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Import the ``argopy`` data fetcher:" + "So, simply import the ``argopy`` data fetcher:" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Then, to get access to Argo data, all you need is 1 line of code:" + "Then, to get access to Argo data with 1 line of code:" ] }, { "metadata": {}, "outputs": [], "source": [ - "ds = ArgoDataFetcher().region([-75, -45, 20, 30, 0, 100, '2011', '2012']).to_xarray()" + "ds = ArgoDataFetcher().region([-75, -45, 20, 30, 0, 100, '2011-01', '2011-06']).to_xarray()" ] }, { "metadata": {}, "outputs": [], "source": [ - "print(ds)" + "ds" ] }, { "outputs": [], "source": [ "ds = ds.argo.point2profile()\n", - "print(ds)" + "ds" ] }, { "metadata": {}, "outputs": [], "source": [ - "ds = ArgoDataFetcher().float(6902746).to_xarray()" + "ArgoDataFetcher().float(6902746).to_xarray()" ] }, { "metadata": {}, "outputs": [], "source": [ - "ds = ArgoDataFetcher().profile(6902755, 12).to_xarray()" + "ArgoDataFetcher().profile(6902755, 12).to_xarray()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "and finally, if you want to work with data interpolated on the same vertical levels (in pressure), than you can simply do it as well like:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "ds = ArgoDataFetcher().float(6902746).to_xarray()\n", + "ds = ds.argo.point2profile()\n", + "ds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds.argo.interp_std_levels(np.arange(0,1000,10))" ] } ],
refactor: helper: Separate setting counts for model & UI. Helper.set_counts is separated into setting counts for model first and then using the model data to set the counts in UI.
@@ -84,14 +84,14 @@ def asynch(func: Any) -> Any: return wrapper -def set_count(id_list: List[int], controller: Any, new_count: int) -> None: - # This method applies new_count for 'new message' (1) or 'read' (-1) - # (we could ensure this in a different way by a different type) - assert new_count == 1 or new_count == -1 - - messages = controller.model.index['messages'] - unread_counts = controller.model.unread_counts # type: UnreadCounts - +def _set_count_in_model(id_list: List[int], new_count: int, + messages: Dict[int, Message], + unread_counts: UnreadCounts) -> None: + """ + This function doesn't explicitly set counts in model, + but updates `unread_counts` (which can update the model + if it's passed in, but is not tied to it). + """ for id in id_list: msg = messages[id] @@ -118,10 +118,16 @@ def set_count(id_list: List[int], controller: Any, new_count: int) -> None: elif new_count == 1: unreads[key] = new_count - # if view is not yet loaded. Usually the case when first message is read. - while not hasattr(controller, 'view'): - time.sleep(0.1) +def _set_count_in_view(id_list: List[int], controller: Any, new_count: int, + messages: Dict[int, Message], + unread_counts: UnreadCounts) -> None: + """ + This function for the most part contains the logic for setting the + count in the UI buttons. The later buttons (all_msg, all_pms) + additionally set the current count in the model and make use of the + same in the UI. + """ stream_buttons_log = controller.view.stream_w.log is_open_topic_view = controller.view.left_panel.is_in_topic_view if is_open_topic_view: @@ -174,6 +180,22 @@ def set_count(id_list: List[int], controller: Any, new_count: int) -> None: unread_counts['all_msg'] += new_count all_msg.update_count(unread_counts['all_msg']) + +def set_count(id_list: List[int], controller: Any, new_count: int) -> None: + # This method applies new_count for 'new message' (1) or 'read' (-1) + # (we could ensure this in a different way by a different type) + assert new_count == 1 or new_count == -1 + messages = controller.model.index['messages'] + unread_counts = controller.model.unread_counts # type: UnreadCounts + + _set_count_in_model(id_list, new_count, messages, unread_counts) + + # if view is not yet loaded. Usually the case when first message is read. + while not hasattr(controller, 'view'): + time.sleep(0.1) + + _set_count_in_view(id_list, controller, new_count, messages, unread_counts) + while not hasattr(controller, 'loop'): time.sleep(0.1) controller.update_screen()
displays preferred IP if you have DS-Lite conncetion for example
@@ -104,7 +104,7 @@ fi # get IP address & port local_ip=$(ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d'/') -public_ip=$(curl -s ipinfo.io/ip) +public_ip=$(curl -s http://v4v6.ipv6-test.com/api/myip.php) public_port=$(cat ${bitcoin_dir}/bitcoin.conf 2>/dev/null | grep port= | awk -F"=" '{print $2}') if [ "${public_port}" = "" ]; then if [ $chain = "test" ]; then
[issue Add support for AWS Organizations fix travis build error
@@ -181,7 +181,7 @@ class OrganizationsBackend(BaseBackend): def validate_parent_id(self, parent_id): try: self.get_organizational_unit_by_id(parent_id) - except RESTError as e: + except RESTError: raise RESTError( 'ParentNotFoundException', "You specified parent that doesn't exist."
Fix permission. uses: john-shaffer/cache@sudo-tar
@@ -48,7 +48,7 @@ jobs: ${{ runner.os }}-pip-new- - name: Cache talib if: ${{ runner.os == 'Linux' }} - uses: actions/cache@v2 + uses: john-shaffer/cache@sudo-tar id: talib-cache with: path: |
Stop submit button for MultipleChoiceInput from firing ng-click twice Stop submit button for MultipleChoiceInput from firing ng-click twice
@@ -39,6 +39,9 @@ oppia.directive('oppiaInteractiveMultipleChoiceInput', [ $scope.answer = null; $scope.submitAnswer = function(answer) { + if (answer === null) { + return; + } answer = parseInt(answer, 10); $scope.onSubmit({ answer: answer,
client: isolated unicode fix for Windows While tarfile documentation says that encoding defaults to 'utf-8' on Windows, this is a lie. So force it and it seems to have magically fixed the problem.
@@ -1699,8 +1699,8 @@ def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks): elif filetype == 'tar': basedir = os.path.dirname(fullpath) - with tarfile.TarFile(fileobj=srcfileobj) as extractor: - for ti in extractor: + with tarfile.TarFile(fileobj=srcfileobj, encoding='utf-8') as t: + for ti in t: if not ti.isfile(): logging.warning( 'Path(%r) is nonfile (%s), skipped', @@ -1711,7 +1711,7 @@ def fetch_isolated(isolated_hash, storage, cache, outdir, use_symlinks): logging.error( 'Path(%r) is outside root directory', fp) - ifd = extractor.extractfile(ti) + ifd = t.extractfile(ti) file_path.ensure_tree(os.path.dirname(fp)) putfile(ifd, fp, 0700, ti.size)
Update es-words.yml A user pointed out that "This field is required" wasn't being translated, and provided a translation.
@@ -23,4 +23,5 @@ es: "Back": "Atras" "Loading. Please wait . . . ": "Cargando. Espere, por favor." "You must sign your name to continue.": "Tiene que firmar antes de continuar." - "None of the above": "Ninguno" + "None of the above": "Ninguno", + "This field is required.": "Esta pregunta es requerida."
Update scanner-util.js Add modality to the output csv
@@ -16,7 +16,7 @@ var yesterday = getYesterday(); var mongoarray = db.feature_set.aggregate([{ $match : {"Modality":"MR", "AcquisitionTime": { "$exists" : true }, "StudyDate":{$in:[param1]}, "ImageType":/ORIGINAL/i, "AcquisitionTime" : { "$exists" : true },"AcquisitionDate" : { "$exists" : true } }}, { "$group":{ - "_id": { StudyInstanceUID: "$StudyInstanceUID", PatientID: "$PatientID", DeviceSerialNumber: "$DeviceSerialNumber", StudyDescription: "$StudyDescription"}, + "_id": { StudyInstanceUID: "$StudyInstanceUID", PatientID: "$PatientID", DeviceSerialNumber: "$DeviceSerialNumber", StudyDescription: "$StudyDescription", Modality: "$Modality"}, "earliestTime": {"$min": { $add: [ {$toInt:{$substr:["$AcquisitionTime", 0, 6]}}, { $cond: { if: { $eq: [ "$AcquisitionDate", "$StudyDate" ] }, then: 240000, else: 480000 } }] }}, "latestTime": {"$max": { $add: [ {$toInt:{$substr:["$AcquisitionTime", 0, 6]}}, { $cond: { if: { $eq: [ "$AcquisitionDate", "$StudyDate" ] }, then: 240000, else: 480000 } }] }}, "series": {$sum: 1} @@ -24,7 +24,7 @@ var mongoarray = db.feature_set.aggregate([{ $match : {"Modality":"MR", "Acquis {$project: { "startTime":{$subtract:["$earliestTime",240000]}, "endTime":{$subtract:["$latestTime", 240000]},"AcquisitionDate" : "$AcquisitionDate", "numberOfSeries" : "$series", "durationInMinutes": {$add:[ {$subtract:[{ $toInt:{$substr: [ "$latestTime", 2, 2 ]}}, { $toInt:{$substr: [ "$earliestTime", 2, 2 ]}}]}, {$multiply:[ {$subtract:[{ $toInt:{$substr: [ "$latestTime", 0, 2 ]}}, { $toInt:{$substr: [ "$earliestTime", 0, 2 ]} }]}, 60]}, {$divide:[ {$subtract:[{ $toInt:{$substr: [ "$latestTime", 4, 2 ]}}, { $toInt:{$substr: [ "$earliestTime", 4, 2 ]} }]}, 60]} ]} }}], { allowDiskUse: true }).forEach(function(study) { - print(study._id.DeviceSerialNumber + ", " + study._id.StudyInstanceUID + "," + study._id.PatientID + ", " + study.durationInMinutes + ", " + study.numberOfSeries + ", " + study.startTime + ", " + study.endTime + ", " + study._id.StudyDescription); + print(study._id.DeviceSerialNumber + ", " + study._id.StudyInstanceUID + "," + study._id.PatientID + ", " + study.durationInMinutes + ", " + study.numberOfSeries + ", " + study.startTime + ", " + study.endTime + ", " + study._id.StudyDescription + ", " + study._id.Modality); }); conn.close();
min_epochs is broken in ptl right now set to 0 for now
@@ -56,8 +56,8 @@ RECOGNITION_HYPER_PARAMS = {'pad': 16, 'batch_size': 1, 'quit': 'early', 'epochs': -1, - 'min_epochs': 5, - 'lag': 5, + 'min_epochs': 0, + 'lag': 10, 'min_delta': None, 'optimizer': 'Adam', 'lrate': 1e-3, @@ -86,7 +86,7 @@ SEGMENTATION_HYPER_PARAMS = {'line_width': 8, 'freq': 1.0, 'quit': 'dumb', 'epochs': 50, - 'min_epochs': 5, + 'min_epochs': 0, 'lag': 10, 'min_delta': None, 'optimizer': 'Adam',
BUG: fixed kwarg spelling Fixed spelling of `decode_timedelta` kwarg.
@@ -591,7 +591,7 @@ def load_netcdf_pandas(fnames, strict_meta=False, file_format='NETCDF4', def load_netcdf_xarray(fnames, strict_meta=False, file_format='NETCDF4', - epoch_name='Epoch', decode_deltatime=False, + epoch_name='Epoch', decode_timedelta=False, labels={'units': ('units', str), 'name': ('long_name', str), 'notes': ('notes', str), 'desc': ('desc', str), @@ -648,9 +648,9 @@ def load_netcdf_xarray(fnames, strict_meta=False, file_format='NETCDF4', # Load the data differently for single or multiple files if len(fnames) == 1: - data = xr.open_dataset(fnames[0], decode_deltatime=decode_deltatime) + data = xr.open_dataset(fnames[0], decode_timedelta=decode_timedelta) else: - data = xr.open_mfdataset(fnames, decode_deltatime=decode_deltatime, + data = xr.open_mfdataset(fnames, decode_timedelta=decode_timedelta, combine='by_coords') # Copy the variable attributes from the data object to the metadata
settings: Replace user groups delete button with fa-trash-o. This makes this settings page a bit more consistent with the rest of the site.
{{t 'Saved' }} </button> <button class="button save-status btn-danger small"> - {{t 'Discard changes' }} + <i class="fa fa-undo" aria-label="{{t 'Delete' }}" title="{{t 'Delete' }}"></i> </button> <button class="button rounded small delete btn-danger"> - {{t 'Delete' }} + <i class="fa fa-trash-o" aria-label="{{t 'Delete' }}" title="{{t 'Delete' }}"></i> </button> </h4> <p class="subscribers">{{t 'Subscribers' }}</p>
Fix AttributeError in mitogen.core.Context.send_await() As of Receiver objects do not have a get_data() method and Receiver.get() does not unpickle the message.
@@ -769,9 +769,10 @@ class Context(object): def send_await(self, msg, deadline=None): """Send `msg` and wait for a response with an optional timeout.""" receiver = self.send_async(msg) - response = receiver.get_data(deadline) - IOLOG.debug('%r._send_await() -> %r', self, response) - return response + response = receiver.get(deadline) + data = response.unpickle() + IOLOG.debug('%r._send_await() -> %r', self, data) + return data def __repr__(self): return 'Context(%s, %r)' % (self.context_id, self.name)
polish to test_read_<spec/map>_files changed assert construction fixed variable name bug
@@ -31,8 +31,7 @@ def test_read_map_files(): # how many map files we expect to see n_map_files = 28 - if len(hdr_map_file_list) != n_map_files: - assert False, ( + assert len(hdr_map_file_list) == n_map_files, ( "test_read_map_files has wrong number data files: found {}, expected " " {}".format(len(hdr_map_file_list), n_map_files)) @@ -52,10 +51,9 @@ def test_read_spec_files(): # how many spec files expected n_spec_files = 6 - if len(hdr_spec_file_list) != n_spec_files: - assert False, ( + assert len(hdr_spec_file_list) == n_spec_files, ( "test_spectra has wrong number data files: found {}, expected " - " {}".format(len(hdr_file_list), n_data_files)) + " {}".format(len(hdr_spec_file_list), n_spec_files)) # b.t.w. If this assert happens, py.test reports one more test # than it would have otherwise.
Make loss function configurable in trax. Was thinking of using trax in PPO so did it, but will probably integrate it later on.
@@ -288,6 +288,7 @@ def reshape_by_device(train_data, num_devices): @gin.configurable(blacklist=["output_dir"]) def train(output_dir, model=gin.REQUIRED, + loss_fun=loss, inputs=trax_inputs.inputs, optimizer=trax_opt.adam, lr_schedule=lr.MultifactorSchedule, @@ -303,6 +304,8 @@ def train(output_dir, output_dir: Directory where to put the logs and checkpoints. model: The model to train as a callable returning 2 callables, an init_fun and apply_fun. + loss_fun: callable with signature: params, trax.inputs.Inputs, model, rng + -> loss. inputs: callable returning trax.inputs.Inputs. optimizer: The optimizer as a callable taking a learning_rate callable and returning 2 callables, opt_init and opt_update. @@ -348,7 +351,7 @@ def train(output_dir, # jit model_predict and update so they're fast jit_model_predict = backend.jit(model_predict) # for evaluation - jit_update_fun = _jit_update_fun(model_predict, loss, optimizer, lr_fun, + jit_update_fun = _jit_update_fun(model_predict, loss_fun, optimizer, lr_fun, num_devices) print() @@ -362,7 +365,7 @@ def train(output_dir, # Non-compiled debug step helps find problems in models easier. if run_debug_step: - debug_loss = loss(params, next(train_stream), model_predict, rng) + debug_loss = loss_fun(params, next(train_stream), model_predict, rng) step_log(step, "Debug step loss %.8f" % debug_loss) for epoch, epoch_steps in epochs(train_steps, epoch_steps): @@ -421,8 +424,8 @@ def train(output_dir, old_lr_fun = lr_fun lr_fun = lr_schedule(history) if lr_fun != old_lr_fun: # For performance, only jit if there is a change. - jit_update_fun = _jit_update_fun(model_predict, loss, optimizer, lr_fun, - num_devices) + jit_update_fun = _jit_update_fun(model_predict, loss_fun, optimizer, + lr_fun, num_devices) # Flush summary writers train_sw.writer.flush()
add range to sensor table for clarification when using expose attribute
@@ -8,7 +8,7 @@ except ModuleNotFoundError: # Defines the column order of the printed table. # ["dpt_number", "value_type", "dpt_size", "unit", "dpt_range"] -COLUMN_ORDER = ["dpt_number", "value_type", "dpt_size", "unit"] +COLUMN_ORDER = ["dpt_number", "value_type", "dpt_size", "dpt_range", "unit"] # Defines the column adjustment of the printed table. # "left", "right" or "center"
Factor out deployment getting logic fixed bug in execute where `run` is not awaited
@@ -73,12 +73,27 @@ def exception_traceback(exc: Exception) -> str: return "".join(list(tb.format())) -def check_if_deprecated_deployment(deployment): +async def get_deployment(client, name, deployment_id): + if name is None and deployment_id is not None: + try: + deployment = await client.read_deployment(deployment_id) + except PrefectHTTPStatusError: + exit_with_error(f"Deployment {deployment_id!r} not found!") + elif name is not None: + try: + deployment = await client.read_deployment_by_name(name) + except ObjectNotFound: + exit_with_error(f"Deployment {name!r} not found!") + else: + exit_with_error("Must provide a deployment name or id") + if not deployment.manifest_path: exit_with_error( f"This deployment has been deprecated. Please see https://orion-docs.prefect.io/concepts/deployments/ to learn how to create a deployment." ) + return deployment + class RichTextIO: def __init__(self, console, prefix: str = None) -> None: @@ -183,21 +198,7 @@ async def run( The flow run will not execute until an agent starts. """ async with get_client() as client: - if name is None and deployment_id is not None: - try: - deployment = await client.read_deployment(deployment_id) - except PrefectHTTPStatusError: - exit_with_error(f"Deployment {deployment_id!r} not found!") - elif name is not None: - try: - deployment = await client.read_deployment_by_name(name) - except ObjectNotFound: - exit_with_error(f"Deployment {name!r} not found!") - else: - exit_with_error("Must provide a deployment name or id") - - check_if_deprecated_deployment(deployment) - + deployment = await get_deployment(client, name, deployment_id) flow_run = await client.create_flow_run_from_deployment(deployment.id) app.console.print(f"Created flow run {flow_run.name!r} ({flow_run.id})") @@ -219,30 +220,15 @@ async def execute( This command will block until the flow run completes. """ - assert_deployment_name_format(name) - async with get_client() as client: - if name is None and deployment_id is not None: - try: - deployment = await client.read_deployment(deployment_id) - except PrefectHTTPStatusError: - exit_with_error(f"Deployment {deployment_id!r} not found!") - elif name is not None: - try: - deployment = await client.read_deployment_by_name(name) - except ObjectNotFound: - exit_with_error(f"Deployment {name!r} not found!") - else: - exit_with_error("Must provide a deployment name or id") - - check_if_deprecated_deployment(deployment) + deployment = await get_deployment(client, name, deployment_id) app.console.print("Loading flow from deployed location...") flow = await load_flow_from_deployment(deployment, client=client) parameters = deployment.parameters or {} app.console.print("Running flow...") - state = flow._run(**parameters) + state = await flow._run(**parameters) if state.is_failed(): exit_with_error("Flow run failed!")
Reformat src/conf.py to conform to latest python black rules Python black removed Python 2 support so it now flags the `u` prefixed strings. We picked up python black 22.1: ``` Collecting black Downloading black-22.1.0-py3-none-any.whl (160 kB) ``` It removed python 2 support: * *
@@ -36,12 +36,12 @@ nitpicky = True version = "3.2" release = "3.2.0" -project = u"Apache CouchDB\u00ae" +project = "Apache CouchDB\u00ae" -copyright = u"%d, %s" % ( +copyright = "%d, %s" % ( datetime.datetime.now().year, - u"Apache Software Foundation. CouchDB\u00ae is a registered trademark of the " - + u"Apache Software Foundation", + "Apache Software Foundation. CouchDB\u00ae is a registered trademark of the " + + "Apache Software Foundation", ) primary_domain = "http"
Increase repeater post timeout to 75 seconds 99DOTS responses sometimes take ~60s to process, which leads us to mark those records as failed
@@ -6,7 +6,7 @@ MIN_RETRY_WAIT = timedelta(minutes=60) CHECK_REPEATERS_INTERVAL = timedelta(minutes=5) CHECK_REPEATERS_KEY = 'check-repeaters-key' -POST_TIMEOUT = 45 # seconds +POST_TIMEOUT = 75 # seconds RECORD_PENDING_STATE = 'PENDING' RECORD_SUCCESS_STATE = 'SUCCESS'
Update to repair when choose 1 as start url return error Update to repair when choose 1 as start url return error
@@ -88,7 +88,7 @@ class WebNovelCrawler: if chapter.isdigit(): chapter = int(chapter) if 1 <= chapter <= len(self.chapters): - return chapter - 1 + return chapter # end if # end if for i, ch_id in enumerate(self.chapters): @@ -106,7 +106,7 @@ class WebNovelCrawler: end = self.get_chapter_index(self.end_chapter) or len(self.chapters) if not start: return future_to_url = {self.executor.submit(self.parse_chapter, index):\ - index for index in range(start, end)} + index for index in range(start-1, end)} # Wait till finish [x.result() for x in concurrent.futures.as_completed(future_to_url)] # end def
framework/pluginloader: Make sure to also clear aliases When 'clearing' the pluginloader previously any aliases already discovered were not removed, now ensure all discovered items are removed.
@@ -455,6 +455,8 @@ class PluginLoader(object): """ Clear all discovered items. """ self.plugins = {} self.kind_map.clear() + self.aliases.clear() + self.global_param_aliases.clear() def reload(self): """ Clear all discovered items and re-run the discovery. """
Update installation.md Mounting the configuration file with the ":ro" flag will prevent users from editing config in new v12.0 UI.
@@ -38,7 +38,7 @@ services: frigate: ... volumes: - - /path/to/your/config.yml:/config/config.yml:ro + - /path/to/your/config.yml:/config/config.yml - /path/to/your/storage:/media/frigate - type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear target: /tmp/cache @@ -55,7 +55,7 @@ services: frigate: ... volumes: - - /path/to/your/config.yml:/config/config.yml:ro + - /path/to/your/config.yml:/config/config.yml - /path/to/network/storage:/media/frigate - /path/to/local/disk:/db - type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear @@ -111,7 +111,7 @@ services: - /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware volumes: - /etc/localtime:/etc/localtime:ro - - /path/to/your/config.yml:/config/config.yml:ro + - /path/to/your/config.yml:/config/config.yml - /path/to/your/storage:/media/frigate - type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear target: /tmp/cache @@ -135,7 +135,7 @@ docker run -d \ --device /dev/dri/renderD128 \ --shm-size=64m \ -v /path/to/your/storage:/media/frigate \ - -v /path/to/your/config.yml:/config/config.yml:ro \ + -v /path/to/your/config.yml:/config/config.yml \ -v /etc/localtime:/etc/localtime:ro \ -e FRIGATE_RTSP_PASSWORD='password' \ -p 5000:5000 \
Change data-stream-name referencess to data-stream-id. This changes all references of the data-stream-name to more predictable data-stream-id references in the subscriptions overlay. This prevents unescaped characters from breaking selectors and stream renames from breaking selectors.
@@ -69,15 +69,20 @@ exports.set_all_stream_audible_notifications_to = function (new_setting) { set_notification_setting_for_all_streams("audible_notifications", new_setting); }; +function get_stream_id(target) { + if (target.constructor !== jQuery) { + target = $(target); + } + return target.closest(".stream-row, .subscription_settings").attr("data-stream-id"); +} // Finds the stream name of a jquery object that's inside a // .stream-row or .subscription_settings element. function get_stream_name(target) { - if (target.constructor !== jQuery) { - target = $(target); + var stream = stream_data.get_sub_by_id(get_stream_id(target)); + if (stream) { + return stream.name; } - var stream_id = target.closest(".stream-row, .subscription_settings").attr("data-stream-id"); - return stream_data.get_sub_by_id(stream_id).name; } function stream_home_view_clicked(e) { @@ -193,7 +198,6 @@ function update_stream_name(stream_id, new_name) { // Update the subscriptions page var sub_row = $(".stream-row[data-stream-id='" + sub.stream_id + "']"); sub_row.find(".stream-name").text(new_name); - sub_row.attr("data-stream-name", new_name); // Update the message feed. message_live_update.update_stream_name(stream_id, new_name); @@ -276,7 +280,7 @@ function add_sub_to_table(sub) { add_email_hint(sub, email_address_hint_content); if (meta.stream_created) { - $(".stream-row[data-stream-name='" + meta.stream_created + "']").click(); + $(".stream-row[data-stream-id='" + stream_data.get_sub(meta.stream_created).stream_id + "']").click(); meta.stream_created = false; } } @@ -366,9 +370,11 @@ function show_subscription_settings(sub_row) { stream_color.set_colorpicker_color(colorpicker, color); } -exports.show_settings_for = function (stream_name) { - var sub_settings = settings_for_sub(stream_data.get_sub(stream_name)); - var stream = $(".subscription_settings[data-stream-name='" + stream_name + "']"); +exports.show_settings_for = function (stream_id) { + var sub = stream_data.get_sub_by_id(stream_id); + var sub_settings = settings_for_sub(sub); + + var stream = $(".subscription_settings[data-stream-id='" + stream_id + "']"); $(".subscription_settings[data-stream].show").removeClass("show"); $("#subscription_overlay .subscription_settings.show").removeClass("show"); @@ -1149,7 +1155,7 @@ $(function () { show_subs_pane.settings(); $(node).addClass("active"); - exports.show_settings_for(get_stream_name(node)); + exports.show_settings_for(get_stream_id(node)); } else { show_subs_pane.nothing_selected(); }
Change CHP incentives defaults to non-zero for MACRS and ITC 5 year MACRS with 100% bonus and 50% ITC reduction 10% ITC
@@ -1352,28 +1352,28 @@ nested_input_definitions = { "macrs_option_years": { "type": "int", "restrict_to": macrs_schedules, - "default": 0, + "default": 5, "description": "MACRS schedule for financial analysis. Set to zero to disable" }, "macrs_bonus_pct": { "type": "float", "min": 0.0, "max": 1.0, - "default": 0.0, + "default": 1.0, "description": "Percent of upfront project costs to depreciate under MACRS" }, "macrs_itc_reduction": { "type": "float", "min": 0.0, "max": 1.0, - "default": 0.0, + "default": 0.5, "description": "Percent of the full ITC that depreciable basis is reduced by" }, "federal_itc_pct": { "type": "float", "min": 0.0, "max": 1.0, - "default": 0.0, + "default": 0.1, "description": "Percent federal capital cost incentive" }, "state_ibi_pct": {
docs: Fix H2O_WAVE_APP_ADDRESS in configuration docs. Thanks
@@ -54,7 +54,7 @@ For production deployments, you'll want to configure which port your app listens You can use the following environment variables to configure your app's server's behavior: -### H2O_APP_ADDRESS +### H2O_WAVE_APP_ADDRESS The public host/port of the app server. Defaults to `http://127.0.0.1:8000`. Set this variable if you are running your Wave server and your app on different machines or containers. ### H2O_WAVE_ADDRESS @@ -81,7 +81,7 @@ $ H2O_WAVE_INTERNAL_ADDRESS=ws://127.0.0.1:66666 ./venv/bin/python my_app.py ### H2O_WAVE_EXTERNAL_ADDRESS :::caution Deprecated -Specify `H2O_APP_ADDRESS` instead. +Specify `H2O_WAVE_APP_ADDRESS` instead. ::: The public host/port of the app server. Defaults to `http://127.0.0.1:8000`. Set this variable if you are running your Wave server and your app on different machines or containers.
Make IPython work with OpenSSL in FIPS mode `md5` is not supported with OpenSSL in FIPS mode, hence moving `md5` to `sha1`
@@ -55,7 +55,7 @@ def code_name(code, number=0): This now expects code to be unicode. """ - hash_digest = hashlib.md5(code.encode("utf-8")).hexdigest() + hash_digest = hashlib.sha1(code.encode("utf-8")).hexdigest() # Include the number and 12 characters of the hash in the name. It's # pretty much impossible that in a single session we'll have collisions # even with truncated hashes, and the full one makes tracebacks too long
Correct inertia creation The inertia creation did not take the orientation of its parent joint into account, corrected this via similarity transform
@@ -123,6 +123,10 @@ def calculateInertia(obj, mass, geometry_dict=None, errors=None, adjust=False, l if not geometry_dict: geometry = deriveGeometry(obj) + # Get the rotation of the object + object_rotation = obj.rotation_euler.to_matrix() + + if geometry['type'] == 'box': inertia = calculateBoxInertia(mass, geometry['size']) elif geometry['type'] == 'cylinder': @@ -132,7 +136,11 @@ def calculateInertia(obj, mass, geometry_dict=None, errors=None, adjust=False, l elif geometry['type'] == 'mesh': sUtils.selectObjects((obj,), clear=True, active=0) inertia = calculateMeshInertia(mass, obj.data) - return inertia + + # Correct the inertia orientation to account for Cylinder / mesh orientation issues + inertia = object_rotation *inertiaListToMatrix(inertia) *object_rotation.transposed() + + return inertiaMatrixToList(inertia) def calculateBoxInertia(mass, size):
Remove MailTest fixes issue
@@ -194,7 +194,6 @@ API | Description | Auth | HTTPS | CORS | Link | | languagelayer | Language detection | No | Yes | Unknown | [Go!](https://languagelayer.com) | | Lob.com | US Address Verification | `apiKey` | Yes | Unknown | [Go!](https://lob.com/) | | mailboxlayer | Email address validation | No | Yes | Unknown | [Go!](https://mailboxlayer.com) | -| MailTest | Email address validation | No | Yes | Unknown | [Go!](http://mailtest.in/documentation/) | | NumValidate | Open Source phone number validation | No | Yes | Unknown | [Go!](https://numvalidate.com) | | numverify | Phone number validation | No | Yes | Unknown | [Go!](https://numverify.com) | | PurgoMalum | Content validator against profanity & obscenity | No | No | Unknown | [Go!](http://www.purgomalum.com) |
Update build.sh Disable build of tutorial as the mnist download fails right now.
@@ -48,6 +48,9 @@ if [[ "${JOB_BASE_NAME}" == *worker_* ]]; then # Step 1: Remove runnable code from tutorials that are not supposed to be run python $DIR/remove_runnable_code.py beginner_source/aws_distributed_training_tutorial.py beginner_source/aws_distributed_training_tutorial.py || true python $DIR/remove_runnable_code.py advanced_source/ddp_pipeline_tutorial.py advanced_source/ddp_pipeline_tutorial.py || true + # Temp remove for mnist download issue. + python $DIR/remove_runnable_code.py beginner_source/fgsm_tutorial.py beginner_source/fgsm_tutorial.py || true + # TODO: Fix bugs in these tutorials to make them runnable again # python $DIR/remove_runnable_code.py beginner_source/audio_classifier_tutorial.py beginner_source/audio_classifier_tutorial.py || true
Fix OneAccess.TDRE.get_metrics to new interface HG-- branch : feature/microservices
# Python modules import re -from collections import defaultdict # NOC modules from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript -from noc.core.script.metrics import percent_usage class Script(GetMetricsScript): @@ -47,57 +45,32 @@ class Script(GetMetricsScript): def collect_profile_metrics(self, metrics): if self.has_capability("OneAccess | IP | SLA | Probes"): - self.collect_ip_sla_metrics(metrics) + self.logger.debug("Merics %s" % metrics) + if self.ALL_SLA_METRICS.intersection(set(m.metric for m in metrics)): self.collect_ip_sla_metrics(metrics) def collect_ip_sla_metrics(self, metrics): - if not (self.ALL_SLA_METRICS & set(metrics)): - return # NO SLA metrics requested + # if not (self.ALL_SLA_METRICS & set(metrics)): + # return # NO SLA metrics requested ts = self.get_ts() - probes = self.scripts.get_sla_probes() - self.cli("SELGRP Performance") - c = self.cli("GET ip/router/qualityMonitor[]/") - for match in self.rx_metric.finditer(c): - name = match.group("name") - delay = match.group("delay") - jitter = match.group("jitter") - if self.SLA_ICMP_RTT in metrics: - for p in probes: - if ( - p["name"] == name and - p["tests"][0]["type"] == "icmp-echo" and - name in metrics[self.SLA_ICMP_RTT]["probes"] - ): - self.set_metric( - name=self.SLA_ICMP_RTT, - value=float(delay) * 1000000, - ts=ts, - tags={"probe": name} - ) - if self.SLA_UDP_RTT in metrics: - for p in probes: - if ( - p["name"] == name and - p["tests"][0]["type"] == "udp-echo" and - name in metrics[self.SLA_UDP_RTT]["probes"] - ): + m = self.get_ip_sla_metrics() + for bv in metrics: + if bv.metric in self.ALL_SLA_METRICS: + id = tuple(bv.path + [bv.metric]) + if id in m: self.set_metric( - name=self.SLA_UDP_RTT, - value=float(delay) * 1000000, + id=bv.id, + metric=bv.metric, + value=m[id], ts=ts, - tags={"probe": name} + path=bv.path, ) - if self.SLA_JITTER in metrics: - for p in probes: - if ( - p["name"] == name and - name in metrics[self.SLA_JITTER]["probes"] - ): - self.set_metric( - name=self.SLA_JITTER, - value=float(jitter) * 1000000, - ts=ts, - tags={"probe": name} - ) - break + def get_ip_sla_metrics(self): + r = {} + self.cli("SELGRP Performance") + c = self.cli("GET ip/router/qualityMonitor[]/") + for match in self.rx_metric.findall(c): + r[("", match[0], "SLA | UDP RTT")] = float(match[1]) * 1000000 + r[("", match[0], "SLA | JITTER")] = float(match[2]) * 1000000 + return r
Allow config overrides from environment variables Keys are of the format: "INSIGHTS_%s" % key.upper() In English, that's the uppercase version of the config key with "INSIGHTS_" prepended to the key.
@@ -352,7 +352,25 @@ def apply_legacy_config(): CONFIG['gpg'] = False +def boolify(v): + if v.lower() == "true": + return True + elif v.lower() == "false": + return False + else: + return v + + def compile_config(): + # Options can be set as environment variables + # The formula for the key is `"INSIGHTS_%s" % key.upper()` + # In English, that's the uppercase version of the config key with + # "INSIGHTS_" prepended to it. + insights_env_opts = dict((k.upper().split("_", 1)[1], boolify(v)) + for k, v in os.environ.iteritems() + if k.upper().startswith("INSIGHTS_")) + CONFIG.update(insights_env_opts) + # TODO: If the defaults.yaml file ever fills in all the client config, then # they will clobber the legacy file, even if the user has no insights.yaml # defined!!
Fix test on py3 compress_whitespace on py3 replaces nbsp with normal space
@@ -105,12 +105,12 @@ class MiscTests(TestCase): factory.set_response(response) return factory.title - html = ("""\ + html = (b"""\ <html><head> - <title>T&nbsp;itle</title> + <title>T&gt;itle</title> </head><body><p>Blah.<p></body></html> """) - self.assertEqual(get_title(html), u'T\xa0itle') + self.assertEqual(get_title(html), u'T>itle') html = ("""\ <html><head>
Update azorult.txt ```lakeshoreintegrated.com``` looks as compromised site. So, full path only.
@@ -656,3 +656,23 @@ testaztest.xyz # Reference: https://twitter.com/James_inthe_box/status/1164898833500798976 losjardinesdejavier.com/admin/32/index.php + +# Reference: https://twitter.com/DynamicAnalysis/status/1165720711219929088 +# Reference: https://pastebin.com/wHV90Sc2 + +http://151.80.8.23/panel/index.php +http://185.222.56.163/index.php +http://23.227.201.16/gidi/index.php +http://92.63.192.119/index.php +a0327852.xsph.ru +a0329841.xsph.ru +cdl24885oq.temp.swtest.ru +kilangsprcoket.tk +latiso.ru +modcloudserver.eu +roberto.ac.ug +testaztest.xyz +testieng.kl.com.ua +u4504124br.ha003.t.justns.ru +lakeshoreintegrated.com/ch/index.php +xcvcdgfg.ru
Skip gather and reduce scatter grad tests on GPU Recent changes in XLA:GPU seem to be causing deadlocks.
@@ -974,6 +974,8 @@ class PythonPmapTest(jtu.JaxTestCase): )) @ignore_slow_all_to_all_warning() def testGradOf(self, prim, tiled, use_axis_index_groups): + if jtu.device_under_test() == "gpu": + raise SkipTest("XLA:GPU with ReduceScatter deadlocks") # b/264516146 axis_index_groups = None devices = jax.devices()
Increase limit to 50 items/per page in IndividualLearnerSelector Fixes
import commonCoachStrings from '../../common'; const DEFAULT_ITEMS_PER_PAGE = 50; - const SHORT_ITEMS_PER_PAGE = 5; export default { name: 'IndividualLearnerSelector', }; }, itemsPerPage() { - return this.targetClassId ? SHORT_ITEMS_PER_PAGE : DEFAULT_ITEMS_PER_PAGE; + return DEFAULT_ITEMS_PER_PAGE; }, }, methods: {
Added information of QWeather in README.md QWeather, also named HeWeather, provides free weather api for developers.
@@ -927,6 +927,7 @@ API | Description | Auth | HTTPS | CORS | | [ODWeather](http://api.oceandrivers.com/static/docs.html) | Weather and weather webcams | No | No | Unknown | | [OpenUV](https://www.openuv.io) | Real-time UV Index Forecast | `apiKey` | Yes | Unknown | | [OpenWeatherMap](https://openweathermap.org/api) | Weather | `apiKey` | Yes | Unknown | +| [QWeather](https://dev.qweather.com/) | Location-based weather data | `apiKey` | Yes | Yes | | [Storm Glass](https://stormglass.io/) | Global marine weather from multiple sources | `apiKey` | Yes | Yes | | [Weatherbit](https://www.weatherbit.io/api) | Weather | `apiKey` | Yes | Unknown | | [Yahoo! Weather](https://developer.yahoo.com/weather/) | Weather | `apiKey` | Yes | Unknown |
Update bs4_filters.js Adding event listener on bs4_filters.js to catch adminForm creation
@@ -185,6 +185,7 @@ var AdminFilters = function(element, filtersElement, filterGroups, activeFilters html: true, placement: 'bottom' }); + $(document).on('adminFormReady', function(evt){ if ($('#filter-groups-data').length == 1) { var filter = new AdminFilters( '#filter_form', '.field-filters', @@ -192,4 +193,5 @@ var AdminFilters = function(element, filtersElement, filterGroups, activeFilters JSON.parse($('#active-filters-data').text()) ); } + }) })(jQuery);
refactor: helper: Merge parameters passed into _set_count_[model/view]. Since the functions only need to iterate through each message, we pass in a list of messages by merging id_list and message dict.
@@ -84,17 +84,14 @@ def asynch(func: Any) -> Any: return wrapper -def _set_count_in_model(id_list: List[int], new_count: int, - messages: Dict[int, Message], +def _set_count_in_model(new_count: int, changed_messages: List[Message], unread_counts: UnreadCounts) -> None: """ This function doesn't explicitly set counts in model, but updates `unread_counts` (which can update the model if it's passed in, but is not tied to it). """ - for id in id_list: - message = messages[id] - + for message in changed_messages: if message['type'] == 'stream': key = (message['stream_id'], message['subject']) unreads = unread_counts['unread_topics'] @@ -119,8 +116,8 @@ def _set_count_in_model(id_list: List[int], new_count: int, unreads[key] = new_count -def _set_count_in_view(id_list: List[int], controller: Any, new_count: int, - messages: Dict[int, Message], +def _set_count_in_view(controller: Any, new_count: int, + changed_messages: List[Message], unread_counts: UnreadCounts) -> None: """ This function for the most part contains the logic for setting the @@ -136,8 +133,7 @@ def _set_count_in_view(id_list: List[int], controller: Any, new_count: int, user_buttons_log = controller.view.user_w.log all_msg = controller.view.home_button all_pm = controller.view.pm_button - for id in id_list: - message = messages[id] + for message in changed_messages: user_id = message['sender_id'] # If we sent this message, don't increase the count @@ -188,14 +184,14 @@ def set_count(id_list: List[int], controller: Any, new_count: int) -> None: assert new_count == 1 or new_count == -1 messages = controller.model.index['messages'] unread_counts = controller.model.unread_counts # type: UnreadCounts - - _set_count_in_model(id_list, new_count, messages, unread_counts) + changed_messages = [messages[id] for id in id_list] + _set_count_in_model(new_count, changed_messages, unread_counts) # if view is not yet loaded. Usually the case when first message is read. while not hasattr(controller, 'view'): time.sleep(0.1) - _set_count_in_view(id_list, controller, new_count, messages, unread_counts) + _set_count_in_view(controller, new_count, changed_messages, unread_counts) while not hasattr(controller, 'loop'): time.sleep(0.1)
Updated LICENSE [ci skip] Bad merge in overwrote
BSD 3-Clause License -Copyright (c) 2009-2019, Dimagi Inc., and individual contributors. +Copyright (c) 2009-2020, Dimagi Inc., and individual contributors. All rights reserved. Redistribution and use in source and binary forms, with or without
Fix bug in Langkit_Support.Token_Data_Handlers.Previous_Token This piece of code was comparing vector indices with the content of these vectors (which also are indices), whereas it should compare indices with themselves instead. As a result, lookup operations built on top of that were not behaving properly in some cases. TN:
@@ -385,6 +385,7 @@ package body Langkit_Support.Token_Data_Handlers is Element_Index : Positive; Element : Integer) return Relative_Position is + Triv_Index : constant Natural := Natural (Key_Trivia); begin -- Index can be zero if the corresponding token is not followed by -- any trivia. In this case, rely on the sloc to compare them. @@ -400,18 +401,13 @@ package body Langkit_Support.Token_Data_Handlers is end; end if; - declare - Designated_Trivia : constant Natural := - TDH.Tokens_To_Trivias.Get (Element); - begin - if Designated_Trivia < Natural (Key_Trivia) then + if Element < Triv_Index then return After; - elsif Designated_Trivia > Natural (Key_Trivia) then + elsif Element > Triv_Index then return Before; else return Inside; end if; - end; end Compare; function Token_Floor is new Floor
In QCS getting started, only install cirq-google This significantly speeds up the startup experience because we don't have to install all the other packages in the cirq metapackage. In testing just now installing `cirq-google` took about 7 seconds in colab, whereas installing `cirq` took about 30 seconds.
}, "source": [ "## Setup\n", - "Note: this notebook relies on unreleased Cirq features. If you want to try these features, make sure you install cirq via `pip install cirq --pre`." + "Note: this notebook relies on unreleased Cirq features. If you want to try these features, make sure you install cirq via `pip install cirq-google --pre`." ] }, { "try:\n", " import cirq\n", "except ImportError:\n", - " print(\"installing cirq...\")\n", - " !pip install --quiet cirq --pre\n", - " print(\"installed cirq.\")" + " print(\"installing cirq-google...\")\n", + " !pip install --quiet cirq-google --pre\n", + " print(\"installed cirq-google.\")" ] }, {
ImageReaderTest : Convert `IECore.FileSequence` Ideally `IECore.FileSequence` would return a `pathlib.Path`. This works around that for now to avoid the `StringPlug` from stripping out the backslashes.
@@ -164,10 +164,11 @@ class ImageReaderTest( GafferImageTest.ImageTestCase ) : shutil.copyfile( self.offsetDataWindowFileName, testSequence.fileNameForFrame( 3 ) ) reader = GafferImage.ImageReader() - reader["fileName"].setValue( testSequence.fileName ) + # todo : Change IECore.FileSequence to return a `pathlib.Path` object and use that directly. + reader["fileName"].setValue( pathlib.Path( testSequence.fileName ) ) oiio = GafferImage.OpenImageIOReader() - oiio["fileName"].setValue( testSequence.fileName ) + oiio["fileName"].setValue( pathlib.Path( testSequence.fileName ) ) def assertMatch() : @@ -269,11 +270,11 @@ class ImageReaderTest( GafferImageTest.ImageTestCase ) : shutil.copyfile( self.offsetDataWindowFileName, testSequence.fileNameForFrame( 7 ) ) reader = GafferImage.ImageReader() - reader["fileName"].setValue( testSequence.fileName ) + reader["fileName"].setValue( pathlib.Path( testSequence.fileName ) ) reader["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Hold ) oiio = GafferImage.OpenImageIOReader() - oiio["fileName"].setValue( testSequence.fileName ) + oiio["fileName"].setValue( pathlib.Path( testSequence.fileName ) ) oiio["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Hold ) context = Gaffer.Context( Gaffer.Context.current() )
ignore if TableauUser already exists because add_domain_membership is called multiple times upon user creation
@@ -453,6 +453,11 @@ def add_tableau_user(domain, username): these details to the Tableau instance. ''' session = TableauAPISession.create_session_for_domain(domain) + if TableauUser.objects.filter( + server=session.tableau_connected_app.server, + username=username + ).exists(): + return True user = TableauUser.objects.create( server=session.tableau_connected_app.server, username=username,
Change AT_CHECK to TORCH_CHECK in python_arg_parser.h Summary: Pull Request resolved:
@@ -439,7 +439,7 @@ inline at::MemoryFormat PythonArgs::toMemoryFormat(int i) { inline at::QScheme PythonArgs::toQScheme(int i) { if (!args[i]) return at::kPerTensorAffine; - AT_CHECK(THPQScheme_Check(args[i]), "qscheme arg must be an instance of the torch.qscheme"); + TORCH_CHECK(THPQScheme_Check(args[i]), "qscheme arg must be an instance of the torch.qscheme"); const auto qscheme = reinterpret_cast<THPQScheme*>(args[i]); return qscheme->qscheme; }
git # This is a combination of 2 commits. Add channel ID to message deletion logs
<div class="discord-message"> <div class="discord-message-header"> <span class="discord-username" - style="color: {{ message.author.top_role.colour | hex_colour }}">{{ message.author }}</span><span - class="discord-message-metadata has-text-grey">{{ message.timestamp }} | User ID: {{ message.author.id }}</span> + style="color: {{ message.author.top_role.colour | hex_colour }}">{{ message.author }} + </span> + <span class="discord-message-metadata has-text-grey"> + User ID: {{ message.author.id }}<br> + {{ message.timestamp }} (Channel ID: {{ message.channel_id }}) + </span> </div> <div class="discord-message-content"> {{ message.content | escape | visible_newlines | safe }}
Adservice: Add libc6-compat runtime dependency Fixes
@@ -11,6 +11,8 @@ RUN ./gradlew installDist FROM openjdk:8-alpine +RUN apk add --no-cache libc6-compat + RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ chmod +x /bin/grpc_health_probe
fix a slight misconfig of syndic in salt.config was causing a failing unit test
@@ -942,8 +942,9 @@ class TestDaemon(object): syndic_opts.update( salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, "syndic")) ) - syndic_opts["cachedir"] = os.path.join(TMP, "rootdir", "cache") syndic_opts["config_dir"] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR + syndic_opts["cachedir"] = os.path.join(TMP, "rootdir", "cache") + syndic_opts["root_dir"] = os.path.join(TMP, "rootdir") # This proxy connects to master proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, "proxy"))
fix: rationalize platform constraints for 'pyarrow' extra Release-As: 1.27.2
@@ -47,13 +47,10 @@ extras = { ], "pandas": ["pandas>=0.17.1"], # Exclude PyArrow dependency from Windows Python 2.7. - 'pyarrow: platform_system == "Windows"': [ - "pyarrow>=1.0.0, <2.0dev; python_version>='3.5'", - ], - 'pyarrow: platform_system != "Windows"': [ + "pyarrow": [ "pyarrow >= 1.0.0, < 2.0dev; python_version >= '3.5'", # Pyarrow >= 0.17.0 is not compatible with Python 2 anymore. - "pyarrow < 0.17.0; python_version < '3.0'", + "pyarrow < 0.17.0; python_version < '3.0' and platform_system != 'Windows'", ], "tqdm": ["tqdm >= 4.0.0, <5.0.0dev"], "fastparquet": [
Support the config_save command. This adds the save_config, this is based on the Cisco save command.
@@ -13,9 +13,19 @@ class RadETXBase(BaseConnection): time.sleep(.3 * self.global_delay_factor) self.clear_buffer() - def save_config(self, cmd='admin save', confirm=False): + def save_config(self, cmd='admin save', confirm=False, confirm_response=''): """Saves Config Using admin save""" - return super(RadETXBase, self).save_config(cmd=cmd, confirm=confirm) + if confirm: + output = self.send_command_timing(command_string=cmd) + if confirm_response: + output += self.send_command_timing(confirm_response) + else: + # Send enter by default + output += self.send_command_timing(self.RETURN) + else: + # Some devices are slow so match on trailing-prompt if you can + output = self.send_command(command_string=cmd) + return output def enable(self, *args, **kwargs): pass @@ -26,7 +36,7 @@ class RadETXSSH(RadETXBase): class RadETXTelnet(RadETXBase): """RAD ETX Telnet Support""" - def special_login_handler(self, delay_factor=1): + def telnet_login(self, delay_factor=1): """ RAD presents with the following on login
Have easily accessible default configs This will help add next_tables for acl tables
@@ -133,3 +133,15 @@ FAUCET_PIPELINE = ( ETH_DST_DEFAULT_CONFIG, FLOOD_DEFAULT_CONFIG, ) + +DEFAULT_CONFIGS = { + 'port_acl': PORT_ACL_DEFAULT_CONFIG, + 'vlan': VLAN_DEFAULT_CONFIG, + 'vlan_acl': VLAN_ACL_DEFAULT_CONFIG, + 'eth_src': ETH_SRC_DEFAULT_CONFIG, + 'ipv4_fib': IPV4_FIB_DEFAULT_CONFIG, + 'ipv6_fib': IPV6_FIB_DEFAULT_CONFIG, + 'vip': VIP_DEFAULT_CONFIG, + 'eth_dst': ETH_DST_DEFAULT_CONFIG, + 'flood': FLOOD_DEFAULT_CONFIG, +}
Update script-ExtractDomain.yml add inv id
@@ -113,9 +113,9 @@ script: |- domainScore = scores[i]; } } - executeCommand("createNewIndicator", {type:'Domain',value:domain, source:'DBot', reputation: scoreToReputation(domainScore)}); + executeCommand("createNewIndicator", {type:'Domain',value:domain, source:'DBot', reputation: scoreToReputation(domainScore), relatedIncidents:investigation.id}); } else { - executeCommand("createNewIndicator", {type:'Domain',value:domain, source:'DBot'}); + executeCommand("createNewIndicator", {type:'Domain',value:domain, source:'DBot', relatedIncidents:investigation.id}); } } }
ShaderNodeDisplacement error. Fixed. An error could be happen if Normal not connected:
@@ -106,33 +106,21 @@ class ShaderNodeAmbientOcclusion(NodeParser): return ao_map * color -class ShaderNodeDisplacement(RuleNodeParser): +class ShaderNodeDisplacement(NodeParser): # inputs: Height, Midlevel, Scale, Normal - nodes = { + def export(self): + height = self.get_input_value('Height') + midlevel = self.get_input_value('Midlevel') + scale = self.get_input_value('Scale') + normal = self.get_input_normal('Normal') + # displacement vec = Scale * (Height - Midlevel) * Normal - "Displacement": { - "type": "*", - "params": { - "color0": "nodes.mul", - "color1": "normal:inputs.Normal" - } - }, - "mul": { - "type": "*", - "params": { - "color0": "inputs.Scale", - "color1": "nodes.sub", - } - }, - "sub": { - "type": "-", - "params": { - "color0": "inputs.Height", - "color1": "inputs.Midlevel", - } - }, - } + displacement = scale * (height - midlevel) + if normal: + displacement *= normal + + return displacement class NodeReroute(NodeParser):
Update .travis.yml made pysat directory more specific. Looks like pyglow is working in python 2.7 now. The build is failing because of server side CDAWeb issues.
@@ -50,6 +50,7 @@ install: - ls - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then + cd ..; echo 'cloning pyglow'; travis_wait 50 git clone https://github.com/timduly4/pyglow.git; echo 'installing pyglow'; @@ -59,7 +60,7 @@ install: cd ..; fi # install pysat - - cd pysat + - cd /home/travis/build/rstoneback/pysat - "python setup.py install" # command to run tests script:
Clarify that extension types which implement "__cinit__" do not require their base types to implement it. Closes
@@ -43,9 +43,11 @@ There are two methods concerned with initialising the object. The :meth:`__cinit__` method is where you should perform basic C-level initialisation of the object, including allocation of any C data structures that your object will own. You need to be careful what you do in the -:meth:`__cinit__` method, because the object may not yet be fully valid Python +:meth:`__cinit__` method, because the object may not yet be a fully valid Python object when it is called. Therefore, you should be careful invoking any Python -operations which might touch the object; in particular, its methods. +operations which might touch the object; in particular, its methods and anything +that could be overridden by subtypes (and thus depend on their subtype state being +initialised already). By the time your :meth:`__cinit__` method is called, memory has been allocated for the object and any C attributes it has have been initialised to 0 or null. (Any @@ -53,12 +55,13 @@ Python attributes have also been initialised to None, but you probably shouldn't rely on that.) Your :meth:`__cinit__` method is guaranteed to be called exactly once. -If your extension type has a base type, the :meth:`__cinit__` method of the base type -is automatically called before your :meth:`__cinit__` method is called; you cannot -explicitly call the inherited :meth:`__cinit__` method. If you need to pass a modified -argument list to the base type, you will have to do the relevant part of the -initialisation in the :meth:`__init__` method instead (where the normal rules for -calling inherited methods apply). +If your extension type has a base type, any existing :meth:`__cinit__` methods in +the base type hierarchy are automatically called before your :meth:`__cinit__` +method. You cannot explicitly call the inherited :meth:`__cinit__` methods, and the +base types are free to choose whether they implement :meth:`__cinit__` at all. +If you need to pass a modified argument list to the base type, you will have to do +the relevant part of the initialisation in the :meth:`__init__` method instead, where +the normal rules for calling inherited methods apply. Any initialisation which cannot safely be done in the :meth:`__cinit__` method should be done in the :meth:`__init__` method. By the time :meth:`__init__` is called, the object is
Remove non-existing parameter from doc Remove non-existing TradeExchange parameter from generate_target_weight_position doc
@@ -148,7 +148,6 @@ class WeightStrategyBase(BaseStrategy, AdjustTimer): pred score for this trade date, index is stock_id, contain 'score' column. current : Position() current position. - trade_exchange : Exchange() trade_date : pd.Timestamp trade date. """
Docker overhaul for upload_artifact script. Removing all venv-specific code.
set -euo pipefail -SCRIPTPATH=$(pwd) -PIP_PATH="$SCRIPTPATH/env/bin/pip" -PYTHON_PATH="$SCRIPTPATH/env/bin/python" - -echo "Now creating virtualenv..." -virtualenv -p python3.5 env -if [ $? -ne 0 ]; then - echo ".. Abort! Can't create virtualenv." - exit 1 -fi - -pip install --upgrade "pip < 21.0" -PIP_CMD="$PIP_PATH install -r requirements/release_upload.txt" -echo "Running $PIP_CMD..." -$PIP_CMD -if [ $? -ne 0 ]; then - echo ".. Abort! Can't install '$PIP_CMD'." - exit 1 -fi - -PYTHON_CMD="$PYTHON_PATH .buildkite/upload_artifacts.py" -echo "Now excuting upload artifacts script..." +echo "--- Downloading all artifacts here for upload to GH" mkdir -p dist buildkite-agent artifact download 'dist/*.pex' dist/ buildkite-agent artifact download 'dist/*.whl' dist/ @@ -31,8 +10,25 @@ buildkite-agent artifact download 'dist/*.tar.gz' dist/ buildkite-agent artifact download 'dist/*.deb' dist/ buildkite-agent artifact download 'dist/*.exe' dist/ -$PYTHON_CMD -if [ $? -ne 0 ]; then - echo ".. Abort! Can't execute '$PYTHON_CMD'." - exit 1 -fi +echo "--- Building docker environment in Docker" +# Depends on relevant requirements file and script locations +docker build \ + --iidfile upload_artifacts.iid \ + -f docker/upload_artifacts.dockerfile \ + . + +IMAGE=$(cat upload_artifacts.iid) + +echo "--- Running script in Docker, image ID: $IMAGE" +# Mounting dist so that we're not redundantly copying +# Adding envars for GH access and Tag information +docker run \ + --mount type=bind,src=$PWD/dist,target=/dist \ + -e GITHUB_ACCESS_TOKEN \ + -e BUILDKITE_TAG \ + --cidfile upload_artifacts.cid \ + $IMAGE \ + +CONTAINER=$(cat upload_artifacts.cid) + +trap "docker rm $CONTAINER" exit
Lkt lowering: add support for call expressions TN:
@@ -1273,6 +1273,24 @@ class LktTypesLoader: }[type(expr.f_op)] return E.Arithmetic(left, right, operator) + elif isinstance(expr, L.CallExpr): + # For now, the only legal call expression is the method + # invocation. + callee = helper(expr.f_name) + assert isinstance(callee, E.FieldAccess) + + # Collect positional and keyword arguments + args = [] + kwargs = {} + for arg in expr.f_args: + value = helper(arg.f_value) + if arg.f_name: + kwargs[arg.f_name.text] = value + else: + args.append(value) + + return callee(*args, **kwargs) + elif isinstance(expr, L.CharLit): return E.CharacterLiteral(denoted_char_lit(expr))
Erichlf torrentday [fix] Fix torrentday plugin. fix * Fix torrentday Search Plugin to Reflect New TD * Add Some Error Checks to torrentday * Address PR#2263 Comments * Streamline Code Some
@@ -123,30 +123,40 @@ class UrlRewriteTorrentday(object): categories = [categories] # If there are any text categories, turn them into their id number categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories] - params = { 'cata': 'yes', 'c%s' % ','.join(str(c) for c in categories): 1, 'clear-new': 1} + params = { 'cata': 'yes', 'c{}'.format(','.join(str(c) for c in categories)): 1, 'clear-new': 1} entries = set() for search_string in entry.get('search_strings', [entry['title']]): - url = 'https://www.torrentday.com/browse.php' - params['search'] = normalize_unicode(search_string).replace(':', '') + url = 'https://www.torrentday.com/t' + params['q'] = normalize_unicode(search_string).replace(':', '') cookies = { 'uid': config['uid'], 'pass': config['passkey'], '__cfduid': config['cfduid'] } try: page = requests.get(url, params=params, cookies=cookies).content except RequestException as e: - raise PluginError('Could not connect to torrentday: %s' % e) + raise PluginError('Could not connect to torrentday: {}'.format(e)) soup = get_soup(page) - - for tr in soup.find_all('tr', { 'class': 'browse' }): + # the first row is the header so skip it + for tr in soup.find_all('tr')[1:]: entry = Entry() # find the torrent names - title = tr.find('a', { 'class': 'torrentName' }) + td = tr.find('td', { 'class': 'torrentNameInfo' }) + if not td: + log.warning('Could not find entry torrentNameInfo for %s.', search_string) + continue + title = td.find('a') + if not title: + log.warning('Could not determine title for %s.', search_string) + continue entry['title'] = title.contents[0] log.debug('title: %s', title.contents[0]) # find download link - torrent_url = tr.find('td', { 'class': 'dlLinksInfo' }) + torrent_url = tr.find('td', { 'class': 'ac' }) + if not torrent_url: + log.warning('Could not determine download link for %s.', search_string) + continue torrent_url = torrent_url.find('a').get('href') # construct download URL @@ -155,7 +165,8 @@ class UrlRewriteTorrentday(object): entry['url'] = torrent_url # us tr object for seeders/leechers - seeders, leechers = tr.find_all('td', { 'class': ['seedersInfo', 'leechersInfo']}) + seeders = tr.find_all('td', { 'class': 'ac seedersInfo'}) + leechers = tr.find_all('td', { 'class': 'ac leechersInfo'}) entry['torrent_seeds'] = int(seeders.contents[0].replace(',', '')) entry['torrent_leeches'] = int(leechers.contents[0].replace(',', '')) entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
docs: Add Apache2 reverse proxy instructions and example. Tweaked by tabbott to disable older SSL and remove websockets logic, which isn't relevant in master.
@@ -257,6 +257,65 @@ your installation. [zulipchat-puppet]: https://github.com/zulip/zulip/tree/master/puppet/zulip_ops/manifests [nginx-loadbalancer]: https://github.com/zulip/zulip/blob/master/puppet/zulip_ops/files/nginx/sites-available/loadbalancer +### Apache2 configuration + +Below is a working example of a full Apache2 configuration. It assumes +that your Zulip sits at `http://localhost:5080`. You first need to +make the following changes in two configuration files. + +1. Follow the instructions for [Configure Zulip to allow HTTP](#configuring-zulip-to-allow-http). + +2. Add the following to `/etc/zulip/settings.py`: + ``` + EXTERNAL_HOST = 'zulip.example.com' + ALLOWED_HOSTS = ['zulip.example.com', '127.0.0.1'] + USE_X_FORWARDED_HOST = True + ``` + + +3. Restart your Zulip server with `/home/zulip/deployments/current/restart-server`. + +4. Create an Apache2 virtual host configuration file, similar to the + following. Place it the appropriate path for your Apache2 + installation and enable it (E.g. if you use Debian or Ubuntu, then + place it in `/etc/apache2/sites-available/zulip.example.com.conf` + and then run `a2ensite zulip.example.com && systemctl reload + apache2`): + + ``` + <VirtualHost *:80> + ServerName zulip.example.com + RewriteEngine On + RewriteRule ^ https://%{HTTP_HOST}%{REQUEST_URI} [R=301,L] + </VirtualHost> + + <VirtualHost *:443> + ServerName zulip.example.com + + RequestHeader set "X-Forwarded-Proto" expr=%{REQUEST_SCHEME} + RequestHeader set "X-Forwarded-SSL" expr=%{HTTPS} + + RewriteEngine On + RewriteRule /(.*) http://localhost:5080/$1 [P,L] + + <Location /> + Require all granted + ProxyPass http://localhost:5080/ timeout=300 + ProxyPassReverse http://localhost:5080/ + ProxyPassReverseCookieDomain 127.0.0.1 zulip.example.com + </Location> + + SSLProtocol TLSv1.2 + SSLEngine on + SSLProxyEngine on + SSLCertificateFile /etc/letsencrypt/live/zulip.example.com/fullchain.pem + SSLCertificateKeyFile /etc/letsencrypt/live/zulip.example.com/privkey.pem + SSLCipherSuite "EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA256:EECDH+aRSA+SHA256:EECDH+ECDSA+SHA384:EECDH+ECDSA+SHA256:EECDH+aRSA+SHA384:EDH+aRSA+AESGCM:EDH+aRSA+SHA256:EDH+aRSA:EECDH:!aNULL:!eNULL:!MEDIUM:!LOW:!3DES:!MD5:! + SSLHonorCipherOrder on + Header set Strict-Transport-Security "max-age=31536000" + </VirtualHost> + ``` + ### HAProxy configuration If you want to use HAProxy with Zulip, this `backend` config is a good
Rapid7 FDNS - correct typo in URL. This PR corrects a typo that I made in
Deprecated: True Name: Rapid7 FDNS ANY Dataset Description: | - This dataset has been deprecated. Please see this [Rapid7 blog post](https://www.rapid7.com/blog/post/2022/02/10/evolving-how-we-share-rapid7-research-data-2/m) for details. + This dataset has been deprecated. Please see this [Rapid7 blog post](https://www.rapid7.com/blog/post/2022/02/10/evolving-how-we-share-rapid7-research-data-2/) for details. Documentation: https://opendata.rapid7.com/about/ Contact: | [email protected]
Remove FitWindow's parent This addresses (FitWindow always on top of PlotWindow issue)
@@ -1038,7 +1038,7 @@ class FitAction(PlotAction): # open a window with a FitWidget if self.fit_window is None: - self.fit_window = qt.QMainWindow(self.plot) + self.fit_window = qt.QMainWindow() # import done here rather than at module level to avoid circular import # FitWidget -> BackgroundWidget -> PlotWindow -> PlotActions -> FitWidget from ..fit.FitWidget import FitWidget
Refactor irregular companies classifier Fix
-import datetime +from collections import namedtuple +from datetime import date +from itertools import chain from unittest import TestCase import pandas as pd -from rosie.chamber_of_deputies.classifiers.irregular_companies_classifier import IrregularCompaniesClassifier +from rosie.chamber_of_deputies.classifiers import IrregularCompaniesClassifier + + +Status = namedtuple('Status', ('situation_date', 'issue_date', 'expected')) class TestIrregularCompaniesClassifier(TestCase): - SITUATIONS = [('ABERTA', False), ('BAIXADA', True), ('NULA', True), ('INAPTA', True), ('SUSPENSA', True)] + SUSPICIOUS_SITUATIONS = ( + 'BAIXADA', + 'NULA', + 'INAPTA' + 'SUSPENSA', + ) + + SITUATIONS = chain(SUSPICIOUS_SITUATIONS, ('ABERTA',)) + + STATUS = ( + Status(date(2013, 1, 30), date(2013, 1, 1), False), + Status(date(2013, 1, 1), date(2013, 1, 30), True) + ) def setUp(self): self.subject = IrregularCompaniesClassifier() - def _get_company_dataset(self, fields): + def _get_company_dataset(self, **kwargs): base_company = { 'recipient_id': '02989654001197', - 'situation_date': datetime.date(2013, 1, 3), - 'issue_date': datetime.date(2013, 1, 30), + 'situation_date': date(2013, 1, 3), + 'issue_date': date(2013, 1, 30), 'situation': '', } - for key, value in fields.items(): - base_company[key] = value - dataset = pd.DataFrame([base_company, ]) + base_company.update(kwargs) + dataset = pd.DataFrame([base_company]) return dataset def test_is_regular_company(self): for situation in self.SITUATIONS: - self.assertEqual( - self.subject.predict(self._get_company_dataset({ - 'situation': situation[0] - }))[0], situation[1]) - - STATUS = [ - (datetime.date(2013, 1, 30), datetime.date(2013, 1, 1), False), - (datetime.date(2013, 1, 1), datetime.date(2013, 1, 30), True) - ] + company = self._get_company_dataset(situation=situation) + expected = situation in self.SUSPICIOUS_SITUATIONS + result, *_ = self.subject.predict(company) + with self.subTest(): + self.assertEqual(result, expected) def test_if_company_is_suspended(self): for status in self.STATUS: - self.assertEqual( - self.subject.predict(self._get_company_dataset({ - 'situation': 'SUSPENSA', - 'situation_date': status[0], - 'issue_date': status[1], - }))[0], status[2]) - + company = self._get_company_dataset( + situation='SUSPENSA', + situation_date=status.situation_date, + issue_date=status.issue_date, + ) + result, *_ = self.subject.predict(company) + with self.subTest(): + self.assertEqual(result, status.expected)
Add daily summary statistics calculation Frame stats command Remove "stats" adjacent aliases for existing commands to avoid confusion
@@ -207,7 +207,7 @@ class AdventOfCode: @adventofcode_group.command( name="leaderboard", - aliases=("board", "stats", "lb"), + aliases=("board", "lb"), brief="Get a snapshot of the PyDis private AoC leaderboard", ) async def aoc_leaderboard(self, ctx: commands.Context, number_of_people_to_display: int = 10): @@ -247,9 +247,22 @@ class AdventOfCode: embed=aoc_embed, ) + @adventofcode_group.command( + name="stats", + aliases=('lbs', "privatestats"), + brief=("Get summary statistics for the PyDis private leaderboard") + ) + async def private_leaderboard_stats(self, ctx: commands.Context): + """ + Return an embed with the summary statistics for the PyDis private leaderboard + + Embed will display the total members, and the number of users who have completed each day's puzzle + """ + raise NotImplementedError + @adventofcode_group.command( name="global", - aliases=("globalstats", "globalboard", "gb"), + aliases=("globalboard", "gb"), brief="Get a snapshot of the global AoC leaderboard", ) async def global_leaderboard(self, ctx: commands.Context, number_of_people_to_display: int = 10): @@ -461,6 +474,8 @@ class AocPrivateLeaderboard: self._event_year = event_year self.last_updated = datetime.utcnow() + self.daily_completion_summary = self.calculate_daily_completion() + def top_n(self, n: int = 10) -> dict: """ Return the top n participants on the leaderboard. @@ -470,6 +485,29 @@ class AocPrivateLeaderboard: return self.members[:n] + def calculate_daily_completion(self) -> List[tuple]: + """ + Calculate member completion rates by day + + Return a list of tuples for each day containing the number of users who completed each part + of the challenge + """ + + daily_member_completions = [] + for day in range(25): + one_star_count = 0 + two_star_count = 0 + for member in self.members: + if member.starboard[day][1]: + one_star_count += 1 + two_star_count += 1 + elif member.starboard[day][0]: + one_star_count += 1 + else: + daily_member_completions.append((one_star_count, two_star_count)) + + return(daily_member_completions) + @staticmethod async def json_from_url( leaderboard_id: int = AocConfig.leaderboard_id, year: int = AocConfig.year
fix: missing line in Disaggregator.clear_model_checkpoints. Fixes: NameError: name 'entry' is not defined
@@ -78,6 +78,7 @@ class Disaggregator(object): return with os.scandir() as path_list: - if not entry.is_file() and entry.name.startswith(self.file_prefix) and entry.name.endswith(".h5"): + for entry in path_list: + if entry.is_file() and entry.name.startswith(self.file_prefix) and entry.name.endswith(".h5"): print("{}: Removing {}".format(self.MODEL_NAME, entry.path)) os.remove(entry.path)
Update README.rst Add new badges and conda instructions
-|AzurePipelines| |PyPIdownloads| |PyPI| |Conda| |CondaForge| |LatestRelease| |Binder| +|AzureDevops| |PyPIdownloads| |PyPI| |CondaSURG| |CondaPlatforms| |GithubRelease| |Binder| +.. |CondaSURG| image:: https://img.shields.io/conda/vn/SURG_JHU/uqpy?style=plastic :alt: Conda (channel only) +.. |CondaPlatforms| image:: https://img.shields.io/conda/pn/SURG_JHU/uqpy?style=plastic :alt: Conda +.. |GithubRelease| image:: https://img.shields.io/github/v/release/SURGroup/UQpy?style=plastic :alt: GitHub release (latest by date) +.. |AzureDevops| image:: https://img.shields.io/azure-devops/build/UQpy/5ce1851f-e51f-4e18-9eca-91c3ad9f9900/1?style=plastic :alt: Azure DevOps builds .. |PyPIdownloads| image:: https://img.shields.io/pypi/dm/UQpy?style=plastic :alt: PyPI - Downloads .. |PyPI| image:: https://img.shields.io/pypi/v/UQpy?style=plastic :alt: PyPI -.. |Conda| image:: https://img.shields.io/conda/dn/conda-forge/UQpy?style=plastic :alt: Conda -.. |CondaForge| image:: https://img.shields.io/conda/v/conda-forge/UQpy?style=plastic :alt: Conda -.. |LatestRelease| image:: https://img.shields.io/github/downloads/SURGroup/UQpy/latest/total?style=plastic :alt: GitHub Releases (by Release) .. |Binder| image:: https://mybinder.org/badge_logo.svg :target: https://mybinder.org/v2/gh/SURGroup/UQpy/master -.. |AzurePipelines| image:: https://img.shields.io/azure-devops/build/UQpy/5ce1851f-e51f-4e18-9eca-91c3ad9f9900/1/master :alt: Azure DevOps builds (branch) - ******************************************* Uncertainty Quantification with python (UQpy) @@ -69,6 +68,7 @@ Using Conda * :: conda install -c conda-forge uqpy + conda install -c surg_jhu uqpy (latest version) Clone your fork of the UQpy repo from your GitHub account to your local disk (to get the latest version):
Added link to mbed CLI for Windows Installer Added a reference to the mbed CLI for Windows Installer in the Installation section.
@@ -80,6 +80,8 @@ $ sudo apt-get install libc6:i386 libncurses5:i386 libstdc++6:i386 ### Installing Mbed CLI +Windows users can use the [mbed CLI for Windows installer](https://docs.mbed.com/docs/mbed-os-handbook/en/latest/dev_tools/cli_install/) which will install mbed CLI and all necessary requirements with one installer. + You can get the latest stable version of Mbed CLI through pip by running: ```
MtProtoSender: Fix crash on receiving unknown RPC results Such RPC results may arrive after reconnection, for example.
@@ -294,6 +294,8 @@ class MtProtoSender: inner_length = reader.read_int() begin_position = reader.tell_position() + # note: this code is IMPORTANT for skipping RPC results of lost + # requests (for example, ones from the previous connection session) if not self.process_msg(inner_msg_id, sequence, reader, request): reader.set_position(begin_position + inner_length) @@ -372,7 +374,12 @@ class MtProtoSender: request.on_response(compressed_reader) else: reader.seek(-4) + if request_id == request.msg_id: request.on_response(reader) + else: + # note: if it's really a result for RPC from previous connection + # session, it will be skipped by the handle_container() + Log.w('RPC result found for unknown request (maybe from previous connection session)') def handle_gzip_packed(self, msg_id, sequence, reader, request): Log.d('Handling gzip packed data')
allocations.consumer_id is not used in query. PostGreSQL required consumer_id in group by clause, but consumer_id is not being used in the query and is superfluous. Closes-Bug:
@@ -975,7 +975,6 @@ def _check_capacity_exceeded(conn, allocs): provider_uuids = set([a.resource_provider.uuid for a in allocs]) usage = sa.select([_ALLOC_TBL.c.resource_provider_id, - _ALLOC_TBL.c.consumer_id, _ALLOC_TBL.c.resource_class_id, sql.func.sum(_ALLOC_TBL.c.used).label('used')]) usage = usage.where(_ALLOC_TBL.c.resource_class_id.in_(rc_ids))
ENH: added new test function Added a test function to evaluate input arg or kwarg failure in a desired function or method.
import numpy as np - def assert_list_contains(small_list, big_list, test_nan=False, test_case=True): """Assert all elements of one list exist within the other list. @@ -149,3 +148,47 @@ def eval_warnings(warns, check_msgs, warn_type=DeprecationWarning): len(found_msgs) - np.sum(found_msgs), repr(warn_type)) return + + +def eval_bad_input(func, error, err_msg, input_args=None, input_kwargs=None): + """Evaluate bad function or method input. + + Parameters + ---------- + func : function or method + Function or method to be evaluated + error : class + Expected error or exception + err_msg : str + Expected error message + input_args : list or NoneType + Input arguments or None for no input arguments (default=None) + input_kwargs : dict or NoneType + Input keyword arguments or None for no input kwargs (default=None) + + Raises + ------ + AssertionError + If unexpected error message is returned + Exception + If error or exception of unexpected type is returned, it is raised + + """ + + # Ensure there are appropriate input lists and dicts + if input_args is None: + input_args = [] + + if input_kwargs is None: + input_kwargs = {} + + # Call the function, catching only the expected error type + try: + func(*input_args, **input_kwargs) + except error as err: + # Evaluate the error message + assert str(err).find(err_msg) >= 0, \ + "unexpected error message ({:} not in {:})".format(err_msg, + str(err)) + + return
Adding a note for test case test_volume_boot_pattern(). Added a comment as a NOTE for test case test_volume_boot_pattern() to describe its dependency on public network. Closes-Bug:
@@ -68,6 +68,9 @@ class TestVolumeBootPattern(manager.EncryptionScenarioTest): waiters.wait_for_server_termination(self.servers_client, server['id']) @decorators.idempotent_id('557cd2c2-4eb8-4dce-98be-f86765ff311b') + # Note: This test is being skipped based on 'public_network_id'. + # It is being used in create_floating_ip() method which gets called + # from get_server_ip() method @testtools.skipUnless(CONF.network.public_network_id, 'The public_network_id option must be specified.') @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
Update __init__.py chore: bugfix, darwin also contains a "win" :), so ...
@@ -94,7 +94,7 @@ def _mount_nfs_uri(provider_uri, mount_path, auto_mount: bool = False): else: # Judging system type sys_type = platform.system() - if "win" in sys_type.lower(): + if "windows" in sys_type.lower(): # system: window exec_result = os.popen(f"mount -o anon {provider_uri} {mount_path}") result = exec_result.read()
Makefile cleanup Remove ansible-container related parameter.
@@ -9,13 +9,6 @@ DATE := $(shell date -u +%Y%m%d%H%M) VERSION=$(shell $(PYTHON) -c "from galaxy import __version__; print(__version__.split('-')[0])") RELEASE=$(shell $(PYTHON) -c "from galaxy import __version__; print(__version__.split('-')[1])") -#ansible-container options -ifeq ($(DETACHED),yes) - detach_option="-d" -else - detach_option="" -endif - ifneq ($(OFFICIAL),yes) BUILD=dev$(DATE) SDIST_TAR_FILE=galaxy-$(VERSION)-$(BUILD).tar.gz @@ -31,7 +24,6 @@ endif DOCKER_COMPOSE=docker-compose -f ./scripts/compose-dev.yml -p galaxy - .PHONY: help help: @echo "Prints help"
stm32h7: add or fix interrupts for b3 Ended up changing some peripherals too: * DAC -> DAC1 * Add DAC2 * USART9 -> UART9
@@ -10,6 +10,8 @@ _modify: name: FDCAN1 FDCAN: name: FDCAN2 + DAC: + name: DAC1 # The SVD is just quite different to the RM for all these registers. # We'll go with the RM convention even though it is inconsistent too. @@ -265,6 +267,13 @@ _modify: AWDCH1CH: name: AWD1CH +# The ADC3 interrupt doesn't exist (no ADC3 peripheral), but the slot was +# re-used for DAC2 +ADC1: + _delete: + _interrupts: + - ADC3 + AXI: _strip: - AXI_ @@ -669,10 +678,19 @@ CRS: _strip: - CRS_ -DAC: +DAC1: _strip: - DAC_ +_add: + DAC2: + derivedFrom: DAC1 + baseAddress: 0x58003400 + interrupts: + DAC2: + description: DAC2 underrun interrupt + value: 127 + DMA2D: _strip: - DMA2D_ @@ -712,6 +730,8 @@ MDIOS: PWR: _strip: - PWR_ + _delete: + _interrupts: WWDG1_RST # Doesn't exist at all on these parts CR3: # Annoyingly RM0455 names these fields differently to RM0399 whilst # they have the same function @@ -729,6 +749,13 @@ PWR: bitOffset: 3 bitWidth: 1 +RAMECC: + _add: + _interrupts: + RAMECC: + description: ECC diagnostic global interrupt + value: 145 + RTC: _strip: - RTC_ @@ -744,11 +771,43 @@ I2C3: SAI1: _strip: - SAI_ + _add: + _interrupts: + SAI1: + description: SAI1 global interrupt + value: 87 SDMMC?: _strip: - SDMMC_ +SPDIFRX: + _add: + _interrupts: + SPDIFRX: + description: SPDIFRX global interrupt + value: 97 + +_delete: + - USART9 + - USART10 + +_add: + UART9: + derivedFrom: USART1 + baseAddress: 0x40018000 + interrupts: + UART9: + description: UART9 global interrupt + value: 140 + USART10: + derivedFrom: USART1 + baseAddress: 0x4001C000 + interrupts: + USART10: + description: USART10 global interrupt + value: 141 + VREFBUF: _strip: - VREFBUF_
chore: fix internal strings This commit fixes some internal CI links and content.
@@ -20,6 +20,12 @@ chmod -R 755 ./$PIPELINE_ID # mv logo_white.svg ./$PIPELINE_ID/static/images/logo.svg # mv favicon.ico ./$PIPELINE_ID/static/images/favicon.ico +find ./$PIPELINE_ID -type f -name '*.html' -exec sed -i -e '#ara is a free and open source project under#d' {} \; +find ./$PIPELINE_ID -type f -name '*.html' -exec sed -i -e 's#https://github.com/ansible-community/ara#https://github.com/kubeinit/kubeinit#g' {} \; +find ./$PIPELINE_ID -type f -name '*.html' -exec sed -i -e 's#https://ara.recordsansible.org#https://kubeinit.org#g' {} \; +find ./$PIPELINE_ID -type f -name '*.html' -exec sed -i -e 's#ARA Records Ansible and makes it easier to understand and troubleshoot. It is another recursive acronym.#KubeInit helps with the deployment of multiple Kubernetes distributions.#g' {} \; +find ./$PIPELINE_ID -type f -name '*.html' -exec sed -i -e 's#ara#KubeInit#g' {} \; + find ./$PIPELINE_ID -type f -exec sed -i -e 's#../static/images/logo.svg#https://raw.githubusercontent.com/Kubeinit/kubeinit/master/images/logo_white.svg#g' {} \; find ./$PIPELINE_ID -type f -exec sed -i -e 's#../static/images/favicon.ico#https://raw.githubusercontent.com/Kubeinit/kubeinit/master/images/favicon.ico#g' {} \;
fix flake8 lint Summary: Pull Request resolved: ghimport-source-id: Stack from [ghstack](https://github.com/ezyang/ghstack): * **#18835 fix flake8 lint** * [jit] run cpp tests for non-cuda builds in test_jit.py ...again
@@ -343,6 +343,7 @@ def has_sparse_dispatches(dispatches): return True return False + def parse_native_yaml(path): with open(path, 'r') as f: return yaml.load(f, Loader=Loader)
On errors inside the thread, set a `raise_next_tick` flag Threads that raise exceptions bypass the callback, which makes dbt hang. Now threads don't raise during the callback, instead they set a flag. The RunManager will check the flag during queue processing and raise if set. Fix compilation failures so they raise properly.
@@ -54,6 +54,7 @@ class RunManager(object): ]) self.node_results = [] self._skipped_children = {} + self._raise_next_tick = None def get_runner(self, node): adapter = get_adapter(self.config) @@ -82,7 +83,11 @@ class RunManager(object): runner.after_execute(result) if result.errored and runner.raise_on_first_error(): - raise dbt.exceptions.RuntimeException(result.error) + # if we raise inside a thread, it'll just get silently swallowed. + # stash the error message we want on the RunManager, and it will + # check the next 'tick' - should be soon since our thread is about + # to finish! + self._raise_next_tick = result.error return result @@ -100,16 +105,23 @@ class RunManager(object): else: pool.apply_async(self.call_runner, args=args, callback=callback) + def _raise_set_error(self): + if self._raise_next_tick is not None: + raise dbt.exceptions.RuntimeException(self._raise_next_tick) + def run_queue(self, pool): """Given a pool, submit jobs from the queue to the pool. """ def callback(result): - """A callback to handle results.""" + """Note: mark_done, at a minimum, must happen here or dbt will + deadlock during ephemeral result error handling! + """ self._handle_result(result) self.job_queue.mark_done(result.node.unique_id) while not self.job_queue.empty(): node = self.job_queue.get() + self._raise_set_error() runner = self.get_runner(node) # we finally know what we're running! Make sure we haven't decided # to skip it due to upstream failures @@ -118,14 +130,18 @@ class RunManager(object): runner.do_skip(cause=cause) args = (runner,) self._submit(pool, args, callback) + # block on completion self.job_queue.join() + # if an error got set during join(), raise it. + self._raise_set_error() return def _handle_result(self, result): - """Note: this happens inside an apply_async() callback, so it must be - "fast". (The pool worker thread will block!) + """Mark the result as completed, insert the `CompiledResultNode` into + the manifest, and mark any descendants (potentially with a 'cause' if + the result was an ephemeral model) as skipped. """ is_ephemeral = self.Runner.is_ephemeral_model(result.node) if not is_ephemeral:
fix missing add debug logging as well
@@ -244,10 +244,12 @@ def addtoProcessedArchive(inputfile, processedList, processedArchive): processedList.add(inputfile) with open(processedArchive, 'w') as pa: json.dump(processedList, pa) + log.debug("Adding %s to processed archive %s" % (inputfile, processedArchive)) def processFile(inputfile, mp, info=None, relativePath=None, silent=False, tag=True, tmdbid=None, tvdbid=None, imdbid=None, season=None, episode=None, original=None, processedList=None, processedArchive=None): if checkAlreadyProcessed(inputfile, processedList, processedArchive): + log.debug("%s is already processed and will be skipped based on archive %s." % (inputfile, processedArchive)) return # Process @@ -295,6 +297,7 @@ def processFile(inputfile, mp, info=None, relativePath=None, silent=False, tag=T elif tagdata.mediatype == MediaType.TV: postprocessor.setTV(tagdata.tmdbid, tagdata.season, tagdata.episode) postprocessor.run_scripts() + addtoProcessedArchive(inputfile, processedList, processedArchive) else: log.error("There was an error processing file %s, no output data received" % inputfile)
Update notes for /_dbs_info Update the Notes section for /_dbs_info to include advise on keeping the db limit to 100.
.. note:: The supported number of the specified databases in the list can be limited by modifying the `max_db_number_for_dbs_info_req` entry in configuration - file. The default limit is 100. + file. The default limit is 100. Increasing the limit, while possible, creates + load on the server so it is advisable to have more requests with 100 dbs, + rather than a few requests with 1000s of dbs at a time. .. _api/server/cluster_setup:
check that num is valid in scan() always convert num to int
@@ -805,6 +805,12 @@ def scan(detectors, *args, num=None, per_step=None, md=None): "argument 'num'.") num = args[-1] args = args[:-1] + + if not (float(num).is_integer() and num > 0.0): + raise ValueError(f"The parameter `num` is expected to be a number of steps (not step size!) " + f"It must therefore be a whole number. The given value was {num}.") + num = int(num) + md_args = list(chain(*((repr(motor), start, stop) for motor, start, stop in partition(3, args)))) motor_names = tuple(motor.name for motor, start, stop
scripts: Fix pylint issue W0613 in scripts/templates/2.0/adjust.py scripts/templates/2.0/adjust.py:4:19: W0613: Unused argument 'mapping' (unused-argument)
import ast -def adjust(config, mapping): +def adjust(config, mapping): # pylint: disable=unused-argument """ Process the configuration intermediary representation adjusting some of the values following changes to the configuration files semantics.
Add new CLI commands `cci project connect_saucelabs` and `cci project show_saucelabs`
@@ -285,6 +285,32 @@ def project_show_github(config): except ServiceNotConfigured: click.echo('Github is not configured for this project. Use project connect_github to configure.') [email protected](name='connect_saucelabs', help="Configure this project for Saucelabs tasks") [email protected]('--username', help="The Saucelabs username to use for tasks", prompt=True) [email protected]('--api-key', help="The Saucelabs username to use for tasks", prompt=True, hide_input=True) [email protected]('--project', help='Set if storing encrypted keychain file in project directory', is_flag=True) +@pass_config +def project_connect_saucelabs(config, username, api_key, project): + check_keychain(config) + config.keychain.set_service('saucelabs', ServiceConfig({ + 'username': username, + 'api_key': api_key, + }), project) + if project: + click.echo('Saucelabs is now configured for this project') + else: + click.echo('Saucelabs is now configured for global use') + [email protected](name='show_saucelabs', help="Prints the current Saucelabs configuration for this project") +@pass_config +def project_show_saucelabs(config): + check_keychain(config) + try: + saucelabs = config.keychain.get_service('saucelabs') + click.echo(pretty_dict(saucelabs.config)) + except ServiceNotConfigured: + click.echo('Saucelabs is not configured for this project. Use project connect_saucelabs to configure.') + @click.command(name='connect_mrbelvedere', help="Configure this project for mrbelvedere tasks") @click.option('--base_url', help="The base url for your mrbelvedere instance", prompt=True) @@ -354,11 +380,13 @@ def project_cd(config): project.add_command(project_connect_apextestsdb) project.add_command(project_connect_github) project.add_command(project_connect_mrbelvedere) +project.add_command(project_connect_saucelabs) project.add_command(project_init) project.add_command(project_info) project.add_command(project_show_apextestsdb) project.add_command(project_show_github) project.add_command(project_show_mrbelvedere) +project.add_command(project_show_saucelabs) #project.add_command(project_list) #project.add_command(project_cd)
BUG: Add missing DECREF in new path Pretty, harmless reference count leak (the method is a singleton)
@@ -476,6 +476,7 @@ PyArray_CheckCastSafety(NPY_CASTING casting, if (PyArray_MinCastSafety(castingimpl->casting, casting) == casting) { /* No need to check using `castingimpl.resolve_descriptors()` */ + Py_DECREF(meth); return 1; }
Clarify the pair-filter exception rule See
@@ -1426,7 +1426,7 @@ The ``--pair-filter`` option determines how to combine the filters for R1 and R2 into a single decision about the read pair. The default is ``--pair-filter=any``, which means that a read pair is discarded -(or redirected) if *one of* the reads (R1 or R2) fulfills the filtering criterion. +(or redirected) if at least *one of* the reads (R1 or R2) fulfills the filtering criterion. As an example, if option ``--minimum-length=20`` is used and paired-end data is processed, a read pair is discarded if at least one of the reads is shorter than 20 nt. @@ -1455,6 +1455,8 @@ The following table describes the effect for some filtering options. | ``--max-n`` | one of the reads contains too many ``N`` bases | both reads contain too many ``N`` bases | +----------------------------+------------------------------------------------+-----------------------------------------+ +There is currently no way to change the pair-filter mode for each filter individually. + .. note:: As an exception, when you specify adapters *only* for R1 (``-a``/``-g``/``-b``) or *only* for @@ -1465,6 +1467,10 @@ The following table describes the effect for some filtering options. untrimmed because it would always be the case that one of the reads in the pair does not contain an adapter. + The pair-filter mode for the other filtering options, such as ``--minimum-length``, is + not overridden in the same way and remains ``any`` unless changed explicitly with the + ``--pair-filter`` option. + These are the paired-end specific filtering and output options: ``--minimum-length LENGTH1:LENGTH2`` or ``-m LENGTH1:LENGTH2``
MAINT: Tidy exception handling in _datasource.py Remove unnecessary try/except from DataSource.
import os import shutil import io -from contextlib import closing from numpy.core.overrides import set_module @@ -333,12 +332,9 @@ def _cache(self, path): # TODO: Doesn't handle compressed files! if self._isurl(path): - try: - with closing(urlopen(path)) as openedurl: + with urlopen(path) as openedurl: with _open(upath, 'wb') as f: shutil.copyfileobj(openedurl, f) - except URLError: - raise URLError("URL not found: %s" % path) else: shutil.copyfile(path, upath) return upath
Don't generate wrappers for private properties that return arrays Such wrappers are useful only as part of the public API. TN:
@@ -15,7 +15,9 @@ ${ada_doc(property, 0)} ## Wrapper to return convenient Ada arrays -% if not property.overriding and is_array_type(property.type): +% if property.is_public \ + and not property.overriding \ + and is_array_type(property.type): function ${property.name} ${helpers.argument_list(property, False)} return ${property.type.api_name()};
Made resumable upload test use bigger file Sometimes the resumable upload test would fail because it couldn't artifically halt the test before finishing uploading the entire test file. Made the test file much larger with more entropy to help avoid this race condition.
@@ -3807,7 +3807,10 @@ class TestCp(testcase.GsUtilIntegrationTestCase): def start_over_error_test_helper(self, http_error_num): bucket_uri = self.CreateBucket() - fpath = self.CreateTempFile(contents=b'a' * 2 * ONE_KIB) + # The object contents need to be fairly large to avoid the race condition + # where the contents finish uploading before we artifically halt the copy. + rand_chars = get_random_ascii_chars(size=(ONE_MIB * 4)) + fpath = self.CreateTempFile(contents=rand_chars) boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB)) if self.test_api == ApiSelector.JSON: test_callback_file = self.CreateTempFile(
actions.py: Removed unnecessary logging in notify_subscription_added/removed. We already record RealmAuditLog in bulk_add/remove_subscription so there is no need to log while notifying.
@@ -2716,14 +2716,7 @@ def get_subscriber_emails(stream: Stream, def notify_subscriptions_added(user_profile: UserProfile, sub_pairs: Iterable[Tuple[Subscription, Stream]], stream_user_ids: Callable[[Stream], List[int]], - recent_traffic: Dict[int, int], - no_log: bool=False) -> None: - if not no_log: - log_event({'type': 'subscription_added', - 'user': user_profile.email, - 'names': [stream.name for sub, stream in sub_pairs], - 'realm': user_profile.realm.string_id}) - + recent_traffic: Dict[int, int]) -> None: sub_dicts = [] for (subscription, stream) in sub_pairs: sub_dict = stream.to_dict() @@ -2991,13 +2984,7 @@ def get_available_notification_sounds() -> List[str]: return available_notification_sounds -def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream], - no_log: bool=False) -> None: - if not no_log: - log_event({'type': 'subscription_removed', - 'user': user_profile.email, - 'names': [stream.name for stream in streams], - 'realm': user_profile.realm.string_id}) +def notify_subscriptions_removed(user_profile: UserProfile, streams: Iterable[Stream]) -> None: payload = [dict(name=stream.name, stream_id=stream.id) for stream in streams] event = dict(type="subscription", op="remove",
fix: forgot a list check in raise_for_missing_imports
@@ -3,4 +3,5 @@ import sys def raise_for_missing_imports(*args): missing = [pkg for pkg in args if pkg not in sys.modules] + if missing: raise ImportError(f"Missing packages: {missing}") \ No newline at end of file
[Test] Fix the lint from Lint failed after it is merged due to some conflict. This should fix the issue
@@ -32,7 +32,6 @@ from ray.dashboard.modules.event.event_utils import ( monitor_events, ) from ray.job_submission import JobSubmissionClient -from pprint import pprint logger = logging.getLogger(__name__)
Fix callframe for exporting notebook. Also expose check_ipython() method for use in notebook.
@@ -44,20 +44,24 @@ from tfx.orchestration.interactive import notebook_formatters from tfx.orchestration.launcher import in_process_component_launcher +def check_ipython(): + # __IPYTHON__ variable is set by IPython, see + # https://ipython.org/ipython-doc/rel-0.10.2/html/interactive/reference.html#embedding-ipython. + return getattr(builtins, '__IPYTHON__', None) + + def requires_ipython(fn): """Decorator for methods that can only be run in IPython.""" @functools.wraps(fn) - def check_ipython(*args, **kwargs): + def run_if_ipython(*args, **kwargs): """Invokes `fn` if called from IPython, otherwise just emits a warning.""" - # __IPYTHON__ variable is set by IPython, see - # https://ipython.org/ipython-doc/rel-0.10.2/html/interactive/reference.html#embedding-ipython. - if getattr(builtins, '__IPYTHON__', None): + if check_ipython(): return fn(*args, **kwargs) else: logging.warning('Method "%s" is a no-op when invoked outside of IPython.', fn.__name__) - return check_ipython + return run_if_ipython class InteractiveContext(object): @@ -172,7 +176,7 @@ class InteractiveContext(object): if current_frame is None: raise ValueError('Unable to get current frame.') - caller_filepath = inspect.getfile(current_frame.f_back) + caller_filepath = inspect.getfile(current_frame.f_back.f_back) notebook_dir = os.path.dirname(os.path.abspath(caller_filepath)) # The notebook filename is user-provided, as IPython kernels are agnostic to
docs: Document ADD_TOKENS_TO_NOREPLY_ADDRESS in email.md. Rewritten and moved by tabbott.
@@ -115,7 +115,22 @@ If it doesn't work, check these common failure causes: your hosting provider's firewall. * Your SMTP server's permissions might not allow the email account - you're using to send email from the `noreply` email address. + you're using to send email from the `noreply` email addresses used + by Zulip when sending confirmation emails. + + For security reasons, Zulip sends confirmation emails (used for + account creation, etc.) with randomly generated from addresses + starting with `noreply-`. + + If necessary, you can set `ADD_TOKENS_TO_NOREPLY_ADDRESS` to `False` + in `/etc/zulip/settings.py` (which will cause these confirmation + emails to be sent from a consistent `noreply@` address). Disabling + `ADD_TOKENS_TO_NOREPLY_ADDRESS` is generally safe if you are not + using Zulip's feature that allows anyone to create an account in + your Zulip organization if they have access to an email address in a + certain domain. See [this article][helpdesk-attack] for details on + the security issue with helpdesk software that + `ADD_TOKENS_TO_NOREPLY_ADDRESS` helps protect against. * Make sure you set the password in `/etc/zulip/zulip-secrets.conf`. @@ -158,3 +173,5 @@ aren't receiving emails from Zulip: if Django documentation references setting `EMAIL_HOST_PASSWORD`, you should instead set `email_password` in `/etc/zulip/zulip-secrets.conf`. + +[helpdesk-attack]: https://medium.com/intigriti/how-i-hacked-hundreds-of-companies-through-their-helpdesk-b7680ddc2d4c
fix SkillConfig.package_dependencies' both contracts and skills were missing.
@@ -1116,10 +1116,21 @@ class SkillConfig(ComponentConfiguration): @property def package_dependencies(self) -> Set[ComponentId]: """Get the connection dependencies.""" - return { + return ( + { ComponentId(ComponentType.PROTOCOL, protocol_id) for protocol_id in self.protocols } + .union( + { + ComponentId(ComponentType.CONTRACT, contract_id) + for contract_id in self.contracts + } + ) + .union( + {ComponentId(ComponentType.SKILL, skill_id) for skill_id in self.skills} + ) + ) @property def json(self) -> Dict:
[ReTrigger] Mitigate bad regular expressions Thanks Sinbad on Discord for pointing out the flaws in implimentation and providing suggestions to mitigate the issue.
@@ -15,6 +15,7 @@ import functools import asyncio import random import string +from multiprocessing import Pool, TimeoutError from .converters import * @@ -383,7 +384,20 @@ class TriggerHandler: continue if allowed_trigger and (is_auto_mod and is_mod): continue - search = re.findall(trigger.regex, message.content) + + try: + pool = Pool(processes=1) + process = pool.apply_async(re.findall, (trigger.regex, message.content)) + task = functools.partial(process.get, timeout=10) + task = self.bot.loop.run_in_executor(None, task) + search = await asyncio.wait_for(task, timeout=10) + except (TimeoutError, asyncio.TimeoutError) as e: + error_msg = ("ReTrigger took too long to find matches " + f"{guild.name} ({guild.id}) " + f"Offending regex {trigger.regex} Name: {trigger.name}") + log.error(error_msg, exc_info=True) + return # we certainly don't want to be performing multiple triggers if this happens + if search != []: if await self.check_trigger_cooldown(message, trigger): return
Adding a simple restart endpoint command A restart endpoint command was requested (usually for cases when a reconfiguration needs to be recognized). Simply calls the stop and then the start commands on the designated endpoint.
@@ -373,6 +373,13 @@ def stop_endpoint(name: str = typer.Argument("default", autocompletion=complete_ logger.info("Endpoint <{}> is not active.".format(name)) [email protected](name="restart") +def restart_endpoint(name: str = typer.Argument("default", autocompletion=complete_endpoint_name)): + """Restarts an endpoint""" + stop_endpoint(name) + start_endpoint(name) + + @app.command(name="list") def list_endpoints(): """ List all available endpoints