message
stringlengths
13
484
diff
stringlengths
38
4.63k
Correctly handle serve port option, fix Thanks for catching it!
@@ -171,8 +171,8 @@ def register_command(subparsers): parser.set_defaults(func=run) parser.add_argument('--host', default='127.0.0.1', help='Host to expose the demo serve on') - parser.add_argument('--port', action='store_const', default=7878, - const=int, help='Port to expose the demo server on') + parser.add_argument('--port', default='7878', + help='Port to expose the demo server on') def run(args):
Fix EOL Remove a redundant / wrong closing parentheses.
@@ -300,7 +300,7 @@ def filter_genes( logg.info( f"in less than {_min_cells} cells ({layer})." if _min_counts is None - else f"{_min_counts)} counts ({layer}).", + else f"{_min_counts} counts ({layer}).", no_indent=True, ) if max_cells is not None or max_counts is not None:
roadmap: update TiKV roadmap Via:
@@ -39,28 +39,45 @@ This document defines the roadmap for TiDB development. ## TiKV: -- [ ] Raft - - [x] Region merge - - [ ] Local read thread - - [ ] Multi-thread raftstore - - [x] None voter - - [x] Pre-vote - - [ ] Multi-thread apply pool - - [ ] Split region in batch - - [ ] Raft Engine -- [x] RocksDB - - [x] DeleteRange - - [ ] BlobDB -- [x] Transaction +### Raft + +- [x] Region Merge - Merge small Regions together to reduce overhead +- [x] Local Read Thread - Process read requests in a local read thread +- [x] Split Region in Batch - Speed up Region split for large Regions +- [x] Raft Learner - Support Raft learner to smooth the configuration change process +- [x] Raft Pre-vote - Support Raft pre-vote to avoid unnecessary leader election on network isolation +- [ ] Joint Consensus - Change multi members safely. +- [ ] Multi-thread Raftstore - Process Region Raft logic in multiple threads +- [ ] Multi-thread apply pool - Apply Region Raft committed entries in multiple threads + +### Engine + +- [ ] Titan - Separate large key-values from LSM-Tree +- [ ] Pluggable Engine Interface - Clean up the engine wrapper code and provide more extendibility + +### Storage + +- [ ] Flow Control - Do flow control in scheduler to avoid write stall in advance + +### Transaction + - [x] Optimize transaction conflicts - - [ ] Distributed GC -- [x] Coprocessor - - [x] Streaming -- [ ] Tool - - [x] Import distributed data - - [ ] Export distributed data - - [ ] Disaster Recovery -- [ ] Flow control and degradation +- [ ] Distributed GC - Distribute MVCC garbage collection control to TiKV + +### Coprocessor + +- [x] Streaming - Cut large data set into small chunks to optimize memory consumption +- [ ] Chunk Execution - Process data in chunk to improve performance +- [ ] Request Tracing - Provide per-request execution details + +### Tools + +- [x] TiKV Importer - Speed up data importing by SST file ingestion + +### Client + +- [ ] TiKV client (Rust crate) +- [ ] Batch gRPC Message - Reduce message overhead ## PD: @@ -69,7 +86,7 @@ This document defines the roadmap for TiDB development. - [x] Decentralize scheduling table Regions - [x] Scheduler supports prioritization to be more controllable - [ ] Use machine learning to optimize scheduling -- [ ] Cluster Simulator +- [ ] Optimize Region metadata - Save Region metadata in detached storage engine ## TiSpark:
stop `nan` from breaking everything unit and integration tests pass i owe *yet another* test
@@ -198,6 +198,8 @@ class CsvImporter: # required float try: + if value == 'nan': + raise ValueError('nan not valid for `value`') value = float(row.val) except (TypeError, ValueError) as e: # val was either `None` or not a float
Fix a bug in patch Related to apply_to_all_doctypes
@@ -23,12 +23,14 @@ def execute(): user_permissions_to_delete = [] for user_permission in frappe.get_all('User Permission', fields=['*']): + skip_for_doctype = [] + + # while migrating from v11 -> v11 if has_skip_for_doctype: if not user_permission.skip_for_doctype: - frappe.db.set_value('User Permission', user_permission.name, 'apply_to_all_doctypes', 1) continue skip_for_doctype = user_permission.skip_for_doctype.split('\n') - else: + else: # while migrating from v10 -> v11 if skip_for_doctype_map[(user_permission.allow, user_permission.user)] == None: skip_for_doctype = get_doctypes_to_skip(user_permission.allow, user_permission.user) # cache skip for doctype for same user and doctype @@ -47,19 +49,26 @@ def execute(): user_permission.skip_for_doctype = None for doctype in applicable_for_doctypes: if doctype: - # Maintain sequence (name, user, allow, for_Value, applicable_for) + # Maintain sequence (name, user, allow, for_value, applicable_for, apply_to_all_doctypes) new_user_permissions_list.append(( frappe.generate_hash("", 10), user_permission.user, user_permission.allow, user_permission.for_value, - doctype + doctype, + 0 )) - + else: + # No skip_for_doctype found! Just update apply_to_all_doctypes. + frappe.db.set_value('User Permission', user_permission.name, 'apply_to_all_doctypes', 1) if new_user_permissions_list: print(len(new_user_permissions_list)) - frappe.db.sql('INSERT INTO `tabUser Permission` (`name`, `user`, `allow`, `for_value`, `applicable_for`) VALUES {}'.format( + frappe.db.sql(''' + INSERT INTO `tabUser Permission` + (`name`, `user`, `allow`, `for_value`, `applicable_for`, `apply_to_all_doctypes`) + VALUES {} + '''.format( ', '.join(['%s'] * len(new_user_permissions_list)) ), tuple(new_user_permissions_list))
Issue - remove bots from contributors list * fix-contributors-list * remove-bots * delete-extraneous-file * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see
@@ -20,7 +20,7 @@ def announce(version): stdout = check_output(["git", "log", f"{last_version}..HEAD", "--format=%aN"]) stdout = stdout.decode("utf-8") - contributors = set(stdout.splitlines()) + contributors = {name for name in stdout.splitlines() if not name.endswith("[bot]")} template_name = ( "release.minor.rst" if version.endswith(".0") else "release.patch.rst"
Fix: push selected non-tracking branch close
@@ -74,6 +74,7 @@ class GsPushCommand(WindowCommand, PushBase): elif self.savvy_settings.get("prompt_for_tracking_branch"): if sublime.ok_cancel_dialog(SET_UPSTREAM_PROMPT): self.window.run_command("gs_push_to_branch_name", { + "local_branch_name": self.local_branch_name, "set_upstream": True, "force": self.force, "force_with_lease": self.force_with_lease @@ -82,6 +83,7 @@ class GsPushCommand(WindowCommand, PushBase): # if `prompt_for_tracking_branch` is false, ask for a remote and perform # push current branch to a remote branch with the same name self.window.run_command("gs_push_to_branch_name", { + "local_branch_name": self.local_branch_name, "branch_name": self.local_branch_name, "set_upstream": False, "force": self.force, @@ -117,7 +119,18 @@ class GsPushToBranchNameCommand(WindowCommand, PushBase): Prompt for remote and remote branch name, then push. """ - def run(self, branch_name=None, set_upstream=False, force=False, force_with_lease=False): + def run( + self, + local_branch_name=None, + branch_name=None, + set_upstream=False, + force=False, + force_with_lease=False): + if local_branch_name: + self.local_branch_name = local_branch_name + else: + self.local_branch_name = self.get_current_branch_name() + self.branch_name = branch_name self.set_upstream = set_upstream self.force = force @@ -140,10 +153,9 @@ class GsPushToBranchNameCommand(WindowCommand, PushBase): if self.branch_name: self.on_entered_branch_name(self.branch_name) else: - current_local_branch = self.get_current_branch_name() show_single_line_input_panel( PUSH_TO_BRANCH_NAME_PROMPT, - current_local_branch, + self.local_branch_name, self.on_entered_branch_name ) @@ -154,7 +166,7 @@ class GsPushToBranchNameCommand(WindowCommand, PushBase): """ sublime.set_timeout_async(lambda: self.do_push( self.selected_remote, - self.get_current_branch_name(), + self.local_branch_name, force=self.force, force_with_lease=self.force_with_lease, remote_branch=branch))
Added HAVING class added a class named Having and inherited it with TokenList It will be easier for further manipulations as a HAVING clause contains multiple conditions just like WHERE clause
@@ -533,6 +533,12 @@ class Where(TokenList): 'HAVING', 'RETURNING', 'INTO') +class Having(TokenList): + """A HAVING clause.""" + M_OPEN = T.Keyword, 'HAVING' + M_CLOSE = T.Keyword, ('ORDER', 'LIMIT') + + class Case(TokenList): """A CASE statement with one or more WHEN and possibly an ELSE part.""" M_OPEN = T.Keyword, 'CASE'
message_controls: Make click area for icons more uniform. Previously user have to click pixel perfect on the message controls icons to achieve the click action. This commit will uniformly increases the click target area for the icons. Tweaked by tabbott to avoid some weird glitches.
@@ -594,7 +594,7 @@ td.pointer { } .message_controls { - top: -3px; + top: -5px; } .message_top_line { @@ -674,8 +674,10 @@ td.pointer { .message_controls { display: inline-block; position: absolute; - top: 1px; + top: -1px; right: -56px; + width: 54px; + height: 22px; z-index: 1; // This is a bit tricky; we need to reserve space for the message @@ -687,7 +689,12 @@ td.pointer { opacity: 0; visibility: hidden; transition: all 0.2s ease; - padding: 0px 1px; + width: 16px; + text-align: center; + + > i { + vertical-align: middle; + } } > .reaction_button { @@ -2761,17 +2768,15 @@ select.inline_select_topic_edit { left: -3px; } + /* In mobile views we place it just to the right of the time element */ .message_controls { - width: 56px; right: 40px; - top: 0px; } .include-sender .message_controls { background: none !important; padding: none !important; border: none !important; - top: -3px; } .message_time {
v2.7.6 Changed ABCMessageHandler The return value of a __repr__ must be a string object
@@ -168,14 +168,15 @@ class ABCMessageHandler(ABC): def __repr__(self): rules = "" - for rules in self.rules: + for rule in self.rules: rules += ( - rules[0].call.__name__ + "(" + + rule[0].call.__name__ + ": " - + ", ".join([rule.__class__.__name__ for rule in rules]) - + "\n" + + ", ".join([rule_.__class__.__name__ for rule_ in rule]) + + "), " ) - return rules + return "<ABCMessageHandler {}>".format(rules.rstrip(", ")) def __bool__(self): return len(self.rules) > 0
Fixes case when identifier is None. Attempts to find the identifier with the given arguments.
-'''Module provider for EUserv''' +''' +Module provider for EUserv + +Author: Matthias Schoettle (mschoettle), 2019 + +EUserv API Docs: https://support.euserv.com/api-doc/ +''' from __future__ import absolute_import import json @@ -151,7 +157,8 @@ class Provider(BaseProvider): def _update_record(self, identifier, rtype=None, name=None, content=None): if identifier is None: - raise Exception('No identifier for record provided') + LOGGER.info('No identifier for record provided, trying to find it...') + identifier = self._find_record_identifier(rtype, name, None) data = { 'dns_record_id': identifier } @@ -232,6 +239,17 @@ class Provider(BaseProvider): if self._get_lexicon_option('priority'): data['prio'] = int(self._get_lexicon_option('priority')) + # Find identifier of a record with the given properties. + def _find_record_identifier(self, rtype, name, content): + records = self._list_records(rtype, name, content) + + LOGGER.debug('records: %s', records) + + if len(records) == 1: + return records[0]['id'] + + raise Exception('No record identifier found') + # Get the subdomain name only for the given name. # This provider automatically suffixes the name with the domain name. def _subdomain_name(self, name):
Fix correct numerical issue with log_softmax Summary: This fixes the numerical problem in log_softmax cpu code when inputs are big but their differences are small. Pull Request resolved:
@@ -51,13 +51,14 @@ void host_softmax(Tensor output, const Tensor& input, const int64_t dim) { } if (LogSoftMax) - tmpsum = max_input + std::log(tmpsum); + tmpsum = std::log(tmpsum); else tmpsum = 1 / tmpsum; for (int64_t d = 0; d < dim_size; d++) if (LogSoftMax) - output_data[d * dim_stride] = input_data[d * dim_stride] - tmpsum; + output_data[d * dim_stride] = + input_data[d * dim_stride] - max_input - tmpsum; else output_data[d * dim_stride] *= tmpsum; }
Add `@final` to `NotImplementedType` and `ellipsis` These aren't subclassable at runtime. (Missed these two in my previous PR due to the fact that they're exposed in the `types` module, rather than builtins, at runtime. Also they're both a little weird to say the least, so let's see what the CI thinks...)
@@ -979,6 +979,7 @@ class property(object): def __set__(self, __obj: Any, __value: Any) -> None: ... def __delete__(self, __obj: Any) -> None: ... +@final class _NotImplementedType(Any): # type: ignore # A little weird, but typing the __call__ as NotImplemented makes the error message # for NotImplemented() much better @@ -1440,6 +1441,7 @@ def __import__( # Actually the type of Ellipsis is <type 'ellipsis'>, but since it's # not exposed anywhere under that name, we make it private here. +@final class ellipsis: ... Ellipsis: ellipsis
Gate usage on minimum supported Python version 3.7 is recommended, but 3.5 should be currently supported
@@ -14237,6 +14237,6 @@ def ProcessGAMCommand(args): if __name__ == "__main__": if sys.platform.startswith('win'): freeze_support() - if sys.version_info[0] < 3 or sys.version_info[1] < 7: - systemErrorExit(5, 'GAM requires Python 3.7 or newer. You are running %s.%s.%s. Please upgrade your Python version or use one of the binary GAM downloads.' % sys.version_info[:3]) + if sys.version_info[0] < 3 or sys.version_info[1] < 5: + systemErrorExit(5, 'GAM requires Python 3.5 or newer. You are running %s.%s.%s. Please upgrade your Python version or use one of the binary GAM downloads.' % sys.version_info[:3]) sys.exit(ProcessGAMCommand(sys.argv))
Fix named tensor build Summary: Pull Request resolved: We're missing an include for named tensors in templates/TypeDefault.h. Test Plan: - run ci [namedtensor ci]
#include <c10/util/ArrayRef.h> #include <c10/util/intrusive_ptr.h> #include <torch/csrc/WindowsTorchApiMacro.h> +#ifdef BUILD_NAMEDTENSOR +#include <ATen/NamedTensorUtils.h> +#endif namespace c10 { struct Storage;
Fixes old OpString references in upgrade2v0.9.7.py script. Forgot to fix these before! Removes OpString and WeightedOpString references.
@@ -129,7 +129,7 @@ replacements = { "directMLEGSTgatesets" : "directMLEGSTmodels", "focusedLSGSTgatesets" : "focusedLSGSTmodels", "gatesetFilenameOrObj" : "modelFilenameOrObj", -"CompressedGateString" : "CompressedOpString", +"CompressedGateString" : "CompressedCircuit", "gateStringToEstimate" : "circuitToEstimate", "translate_gatestring" : "translate_circuit", "process_gate_strings" : "process_op_strings", @@ -185,8 +185,6 @@ replacements = { "DMGateRep_Composed" : "DMOpRep_Composed", "SBGateRep_Clifford" : "SBOpRep_Clifford", "objects.gatestring" : "objects.circuit", -"WeightedGateString" : "WeightedOpString", -"WeighedGateStrings" : "WeighedOpStrings", "compile_gatestring" : "compile_circuit", "dsGateStringsToUse" : "dsCircuitsToUse", "filter_gatestrings" : "filter_circuits", @@ -388,7 +386,7 @@ replacements = { "DMGateCRep" : "DMOpCRep", "SVGateCRep" : "SVOpCRep", "SBGateCRep" : "SBOpCRep", -"GateString" : "OpString", +"GateString" : "Circuit", "gatestring" : "circuit", "gateString" : "circuit", "gate_tuple" : "oplabel_tuple",
[sync] added ItemType.Unknown see issue
@@ -196,10 +196,13 @@ class ItemType(enum.Enum): :cvar File: File type. :cvar Folder: Folder type. + :cvar Unknown: Item type could not be determined. This can occur for remote deleted + events, see issue #198. """ File = "file" Folder = "folder" + Unknown = "unknown" class ChangeType(enum.Enum): @@ -457,7 +460,8 @@ class SyncEvent(Base): # type: ignore FileSystemEvent, respectively. :param direction: Direction of the sync: upload or download. - :param item_type: The item type: file or folder. + :param item_type: The item type: file or folder or undetermined (in rare cases where + we cannot determine the type of a deleted file). :param sync_time: The time the SyncEvent was registered. :param dbx_id: A unique dropbox ID for the file or folder. Will only be set for download events which are not deletions. @@ -607,7 +611,7 @@ class SyncEvent(Base): # type: ignore item_type = ItemType.Folder change_dbid = None except NotFoundError: - item_type = None + item_type = ItemType.Unknown change_dbid = None elif isinstance(md, FolderMetadata):
Fix: typo of environment - MISP_FEED_MPORT_FROM_DATE to MISP_FEED_IMPORT_FROM_DATE. I found typo of environment in parsing config variables section. - actual: MISP_FEED_MPORT_FROM_DATE - expect: MISP_FEED_IMPORT_FROM_DATE
@@ -145,7 +145,7 @@ class MispFeed: "MISP_FEED_SSL_VERIFY", ["misp_feed", "ssl_verify"], config, False, True ) self.misp_feed_import_from_date = get_config_variable( - "MISP_FEED_MPORT_FROM_DATE", ["misp_feed", "import_from_date"], config + "MISP_FEED_IMPORT_FROM_DATE", ["misp_feed", "import_from_date"], config ) self.misp_feed_create_reports = get_config_variable( "MISP_FEED_CREATE_REPORTS",
Update Metal_Transmon_Cross.py orientation bug fix
@@ -239,7 +239,7 @@ Description: clawRotate = 0 if connector_location == 'N': - clawRotate = 90 + clawRotate = -90 elif connector_location == 'E': clawRotate = 180
Update _get_in_touch.html fixed typo (errant bracket)
-<div class="alert alert-info"><span class="glyphicon glyphicon-exclamation-sign"></span> Do you need help with your analysis? Don't forget to check the (<a href='/faq'>FAQ page</a>, and <a href="mailto:{{ SUPPORT_TO_EMAIL }}" class="feedback-show">get in touch</a> if you have questions.</div> +<div class="alert alert-info"><span class="glyphicon glyphicon-exclamation-sign"></span> Do you need help with your analysis? Don't forget to check the <a href='/faq'>FAQ page</a>, and <a href="mailto:{{ SUPPORT_TO_EMAIL }}" class="feedback-show">get in touch</a> if you have questions.</div>
Support "tags" yaml tag add tags yaml tags
@@ -242,6 +242,7 @@ class DatasetMetadata: extra_gated_fields: Optional[Dict] = None extra_gated_prompt: Optional[str] = None license_details: Optional[str] = None + tags: Optional[List[str]] = None licenses: Optional[Union[EmptyList, List[str]]] = None # deprecated languages: Optional[Union[EmptyList, List[str]]] = None # deprecated
Use creates_node in NodeToParsersPass To determine which parsers should be added to the map.
@@ -1428,9 +1428,11 @@ class NodeToParsersPass(): node type to one specific parser. """ - if isinstance(parser, Transform): - self.nodes_to_rules[parser.get_type()].append(parser) - elif isinstance(parser, List): + if creates_node(parser, follow_refs=False): + if isinstance(parser, Opt): + for alt in parser.get_type()._alternatives: + self.nodes_to_rules[alt].append(parser) + self.nodes_to_rules[parser.get_type()].append(parser) for c in parser.children():
Added 'unfocused_border' widget argument, to be used for coloring border of unfocused windows.
@@ -62,6 +62,13 @@ class TaskList(base._Widget, base.PaddingMixin, base.MarginMixin): "Method for alerting you of WM urgent " "hints (one of 'border' or 'text')" ), + ( + "unfocused_border", + None, + "Border color for unfocused windows. " + "Affects only hightlight_method 'border' and 'block'. " + "Defaults to None, which means no special color." + ), ( "max_title_width", None, @@ -322,7 +329,8 @@ class TaskList(base._Widget, base.PaddingMixin, base.MarginMixin): border = self.border text_color = border else: - border = self.background or self.bar.background + border = self.unfocused_border or (self.background or + self.bar.background) text_color = self.foreground if self.highlight_method == 'text':
Moved js logic from document ready handler to ajaxComplete handler Similar to
hqDefine("m4change/javascripts/main", function () { var initialPageData = hqImport("hqwebapp/js/initial_page_data"); - $(function () { + $(document).on("ajaxComplete", function (e, xhr, options) { + var fragment = "async/", + pageUrl = window.location.href.split('?')[0], + ajaxUrl = options.url.split('?')[0]; + if (ajaxUrl.indexOf(fragment) === -1 || !pageUrl.endsWith(ajaxUrl.replace(fragment, ''))) { + return; + } var OPTIONS = { users: initialPageData.get("users"), groups: initialPageData.get("groups"),
Create a holder for "attribute expression" constructors TN:
@@ -267,6 +267,37 @@ class Frozable(object): return wrapper +class AttributeExpression(object): + """ + Holder for information related to a ".attribute" construct in the property + DSL. + """ + + def __init__(self, constructor, args, kwargs, parameterless): + """ + :param constructor: Callable that builds the expression. + :param args: Partial list of positional arguments to pass to + `constructor`. + :param kwargs: Partial keyword arguments to pass to `constructor`. + :param bool parameterless: False if this ".attribute" requires + arguments, true otherwise. + """ + self.constructor = constructor + self.args = args + self.kwargs = kwargs + self.parameterless = parameterless + + def build(self, prefix): + return (self.constructor(prefix, *self.args, **self.kwargs) + if self.parameterless else + partial(self.constructor, prefix, *self.args, **self.kwargs)) + + def __repr__(self): + return '<AttributeExpression for {}, args={}, kwargs={}>'.format( + self.constructor, self.args, self.kwargs + ) + + class AbstractExpression(Frozable): """ An abstract expression is an expression that is not yet resolved (think: @@ -446,16 +477,7 @@ class AbstractExpression(Frozable): from langkit.expressions.structs import FieldAccess try: - klass, args, kwargs, paramless = ( - AbstractExpression.attrs_dict[attr] - ) - if paramless: - # Automatically instantiate paramless attributes - return klass(self, *args, **kwargs) - else: - # For attributes with parameters, return a partial - # instantiation. - return partial(klass, self, *args, **kwargs) + return AbstractExpression.attrs_dict[attr].build(self) except KeyError: return self.composed_attrs().get(attr, FieldAccess(self, attr)) @@ -617,8 +639,9 @@ def attr_expr_impl(name, args, kwargs, parameterless=False): """ def internal(decorated_class): - AbstractExpression.attrs_dict[name] = (decorated_class, args, kwargs, - parameterless) + AbstractExpression.attrs_dict[name] = AttributeExpression( + decorated_class, args, kwargs, parameterless + ) return decorated_class return internal
[fix|refactor]: updated the review fixs and used chunked to avoid blocking of the query
import csv from django.core.management.base import BaseCommand -from corehq.apps.reports.util import get_all_users_by_domain +from corehq.apps.users.models import CommCareUser from custom.icds_reports.const import INDIA_TIMEZONE from custom.icds_reports.models import ICDSAuditEntryRecord from django.db.models import Max +from dimagi.utils.chunked import chunked class Command(BaseCommand): @@ -14,38 +15,31 @@ class Command(BaseCommand): if date is None: return 'N/A' date = date.astimezone(INDIA_TIMEZONE) - date_formatted = date.strftime(date, "%d/%m/%Y, %I:%M %p") + date_formatted = date.strftime("%d/%m/%Y, %I:%M %p") return date_formatted def handle(self, *args, **options): - users = get_all_users_by_domain('icds-cas') + users = CommCareUser.by_domain('icds-cas') usernames = [] for user in users: if user.has_permission('icds-cas', 'access_all_locations'): usernames.append(user.username) - usage_data = ICDSAuditEntryRecord.objects.filter(username__in=usernames).values('username').annotate(time=Max('time_of_use')) - headers = ["S.No", "username", "time"] - count = 1 + chunk_size = 100 + headers = ["username", "time"] rows = [headers] usernames_usage = [] + for user_chunk in chunked(usernames, chunk_size): + usage_data = ICDSAuditEntryRecord.objects.filter(username__in=list(user_chunk)).values('username').annotate(time=Max('time_of_use')) for usage in usage_data: + print(usage) row_data = [ - count, - usage.username, - self.convert_to_ist(usage.time) + usage['username'], + self.convert_to_ist(usage['time']) ] - usernames_usage.append(usage.username) + usernames_usage.append(usage['username']) rows.append(row_data) - count = count + 1 - for user in usernames: - if user not in usernames_usage: - row_data = [ - count, - user, - "N/A" - ] - rows.append(row_data) - count = count + 1 + users_not_logged_in = set(usernames) - set(usernames_usage) + rows.extend([[user, 'N/A'] for user in users_not_logged_in]) fout = open('/home/cchq/National_users_data.csv', 'w') writer = csv.writer(fout) writer.writerows(rows)
Add Neblio support to electrumx * add neblio * update daemon & deserializer and add header_hash * remove LegacyRPDaemon usage for Neblio getblock() RPC was updated in nebliod to eliminate the need for LegacyRPCDaemon support in electrumx
@@ -955,3 +955,35 @@ class Fujicoin(Coin): TX_PER_BLOCK = 1 RPC_PORT = 3776 REORG_LIMIT = 1000 + +class Neblio(Coin): + NAME = "Neblio" + SHORTNAME = "NEBL" + NET = "mainnet" + XPUB_VERBYTES = bytes.fromhex("0488b21e") + XPRV_VERBYTES = bytes.fromhex("0488ade4") + P2PKH_VERBYTE = bytes.fromhex("35") + P2SH_VERBYTES = [bytes.fromhex("70")] + WIF_BYTE = bytes.fromhex("80") + GENESIS_HASH = ('7286972be4dbc1463d256049b7471c25' + '2e6557e222cab9be73181d359cd28bcc') + DESERIALIZER = DeserializerTxTime + TX_COUNT = 23675 + TX_COUNT_HEIGHT = 22785 + TX_PER_BLOCK = 1 + RPC_PORT = 6326 + REORG_LIMIT = 1000 + HEADER_HASH = None + + @classmethod + def header_hash(cls, header): + '''Given a header return the hash.''' + if cls.HEADER_HASH is None: + import scrypt + cls.HEADER_HASH = lambda x: scrypt.hash(x, x, 1024, 1, 1, 32) + + version, = struct.unpack('<I', header[:4]) + if version > 6: + return super().header_hash(header) + else: + return cls.HEADER_HASH(header)
Relax the swift version check Only check the swiftlang/clang versions, don't fail the check if the Target info doesn't match.
@@ -145,7 +145,7 @@ def is_correct_swift_version(swiftc, compatibility_version): error_msg = "error: please select {description}".format( description=supported_configs[platform.system()][compatibility_version]['description'] ) - if swift_version != expected_swift_version: + if swift_version.split("\n")[0] != expected_swift_version.split("\n")[0]: common.debug_print("--- Version check failed ---") common.debug_print("Expected version:\n" + expected_swift_version) common.debug_print("Current version:\n" + swift_version)
installer.sh: Do not delete the local copy of the certificates. Copy, rather than move, the certificates to /var/lib/keylime/tpm_cert_store/. This way, git status does not show that all the certificates have been deleted.
@@ -605,7 +605,7 @@ echo "========================================================================== if [ ! -d "/var/lib/keylime/tpm_cert_store" ]; then echo "Creating new tpm_cert_store" mkdir -p /var/lib/keylime - mv $KEYLIME_DIR/tpm_cert_store /var/lib/keylime/tpm_cert_store + cp $KEYLIME_DIR/tpm_cert_store /var/lib/keylime/tpm_cert_store else echo "Updating existing cert store" cp -n $KEYLIME_DIR/tpm_cert_store/* /var/lib/keylime/tpm_cert_store/
Update note on temporary status. Anything called exactly "TODO" is easily searchable.
@@ -49,9 +49,9 @@ def containerise(name, ("representation", representation["format"]), ("version", version["version"]), - # TEMPORARY, REMOVE PLEASE - # Temporarily assume "assets", as existing published assets - # won't have this new variable. + # TODO(marcus): Temporarily assume "assets". + # remove the assumption once a project new project + # is started. ("silo", version.get("silo", "assets")), ("path", version["path"]),
Move nn.codec to False to prevent wrong codec strict mode change Currently, as a followup to there is a situation where the wrong code is changed to strict False
@@ -497,7 +497,7 @@ class RecognitionModel(pl.LightningModule): else: raise ValueError(f'invalid resize parameter value {self.resize}') - codec.strict = False + self.nn.codec.strict = False else: self.train_set.dataset.encode(self.codec)
fix: Delete if DuplicateEntry and insert again Sometimes I add a fix for something. After some time, I forget the reason for it. And when something breaks, I remember why I added it in the first place. This is one of those times.
@@ -164,14 +164,19 @@ def insert_event_jobs(events, event_type): def insert_single_event(frequency, event, cron_format=None): cron_expr = {"cron_format": cron_format} if cron_format else {} - - if not frappe.db.exists("Scheduled Job Type", {"method": event, "frequency": frequency, **cron_expr }): - frappe.get_doc({ + doc = frappe.get_doc({ "doctype": "Scheduled Job Type", "method": event, "cron_format": cron_format, "frequency": frequency - }).insert() + }) + + if not frappe.db.exists("Scheduled Job Type", {"method": event, "frequency": frequency, **cron_expr }): + try: + doc.insert() + except frappe.DuplicateEntryError: + doc.delete() + doc.insert() def clear_events(all_events):
Update version 0.7.0 -> 0.7.1 Fixes * Remove dwave-networkx version upper bound to resolve version conflicts
# ================================================================================================ __all__ = ['__version__', '__author__', '__authoremail__', '__description__'] -__version__ = '0.7.0' +__version__ = '0.7.1' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'All things D-Wave System.'
Introduce process_count_max minion configuration parameter This allows users to limit the number of processes or threads a minion will start in response to published messages, prevents resource exhaustion in case a high number of concurrent jobs is scheduled in a short time.
@@ -1333,6 +1333,7 @@ class Minion(MinionBase): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True + @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data @@ -1365,6 +1366,15 @@ class Minion(MinionBase): self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners + + process_count_max = self.opts.get('process_count_max') + if process_count_max > 0: + process_count = len(salt.utils.minion.running(self.opts)) + while process_count >= process_count_max: + log.warn("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid'])) + yield tornado.gen.sleep(10) + process_count = len(salt.utils.minion.running(self.opts)) + # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other
register item after dependencies this is a workaround to avoid circular dependencies for skills
@@ -131,8 +131,8 @@ def add_item(ctx: Context, item_type: str, item_public_id: PublicId) -> None: if not is_fingerprint_correct(package_path, item_config): # pragma: no cover raise click.ClickException("Failed to add an item with incorrect fingerprint.") - register_item(ctx, item_type, item_public_id) _add_item_deps(ctx, item_type, item_config) + register_item(ctx, item_type, item_public_id) def _add_item_deps(ctx: Context, item_type: str, item_config) -> None:
README: List the three services that EDCD *might* be providing Only Live is guaranteed to be available.
@@ -94,6 +94,34 @@ Anyone planning to send data too EDDN **MUST** comply with all the advice in that document, and the individual schema README files as applicable. It's also the best resource for those listening to the EDDN data stream. +#### EDDN endpoints + +There are up to three separate services which might be running. + +| Service | Upload | Listeners | Notes | +| ------: | :-----: |:-----------------------------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Live | `https://eddn.edcd.io:4430/upload/` | `tcp://eddn.edcd.io:9500/` | The actual live service, which should always be running. It is automatically restarted every Thursday at 07:06:00 UTC | +| Beta | `https://beta.eddn.edcd.io:4431/upload/` | `tcp://beta.eddn.edcd.io:9510/` | The beta service, which should be running the current state of the `beta` branch. Usually only active when either new schemas or forthcoming code changes are being actively tested. | +| Dev | `https://dev.eddn.edcd.io:4432/upload/` | `tcp://dev.eddn.edcd.io:9520/` | The dev service, which could be running any public branch of the code *or* a private branch. | + +In general the Beta and Dev services will only be running so as to aid the core +development team in testing changes. Don't expect them to be generally +available. + +You **MUST** use the correct hostname in the Upload URLs as these are +TLS/HTTPS connections terminated on a Reverse Proxy. + +You can get away with using alternate hostnames on the Listener URLs, as +they're purely a ZeroMQ endpoint, no TLS. But don't be surprised if either +the Beta or Dev service is actually running on a different IP, so don't +chance it. + +If you need to test some of your own changes then please read +[Running this software](docs/Running-this-software.md) for how to instantiate +your own test service. It is hoped that in the future the code will allow for +easily running in a "local only" mode, not requiring any reverse proxy or +internet-valid TLS certificates. + --- ---
Reviewer fix (remove Slack integration) * Revert "Removed Slack reporting of unit tests as requested by reviewer." This reverts commit * Removed Slack reporting of unit test results at request of reviewer
@@ -185,4 +185,5 @@ class VEPTests(unittest.TestCase): if __name__ == '__main__': suite = unittest.TestLoader().discover('.') + results = SlackResult() unittest.TextTestRunner(verbosity=2).run(suite) \ No newline at end of file
changing the name of class TestFS to FSTest in test/test_indexing.py Avoid pytest collecting TestFS even though it contains no tests by changing the name to _TestFS.
@@ -71,7 +71,7 @@ class TestFormat(object): assert 0 -class TestFS(object): +class _TestFS(object): name = 'inmemorytestgrid' files = dict() @@ -87,9 +87,9 @@ class TestFS(object): def close(self): self.cache[self.file_id] = self.getvalue() - super(TestFS._Writer, self).close() + super(_TestFS._Writer, self).close() - def __init__(self, _id='testfs'): + def __init__(self, _id='_testFS'): self._id = _id def config(self): @@ -97,7 +97,7 @@ class TestFS(object): @classmethod def from_config(cls, config): - return TestFS(_id=config['id']) + return _TestFS(_id=config['id']) def new_file(self, mode='xb', **kwargs): _id = kwargs['_id'] @@ -380,7 +380,7 @@ class TestIndexingBase(): crawler = indexing.MasterCrawler(root=self._tmp_dir.name) crawler.tags = {'test1'} index = self.get_index_collection() - mirror = TestFS() + mirror = _TestFS() for doc in crawler.crawl(): assert 'file_id' in doc doc.pop('file_id')
Don't restrict the C/C++ standard GCC has it set by default or uses even newer version.
@@ -616,7 +616,7 @@ def c_checker(code: str, working_directory: str) -> CheckerRunFunction: return gcc_checker( code, ".c", - [os.getenv("CC", "gcc"), "-std=c99"] + INCLUDE_FLAGS, + [os.getenv("CC", "gcc")] + INCLUDE_FLAGS, working_directory=working_directory, ) @@ -626,7 +626,7 @@ def cpp_checker(code: str, working_directory: str) -> CheckerRunFunction: return gcc_checker( code, ".cpp", - [os.getenv("CXX", "g++"), "-std=c++0x"] + INCLUDE_FLAGS, + [os.getenv("CXX", "g++")] + INCLUDE_FLAGS, working_directory=working_directory, )
fix: do not update URL on saved reports "Saved reports" imply that they have already stored filters, columns etc. Applying URL from filters doesn't make sense here.
@@ -1456,8 +1456,9 @@ frappe.views.ListView = class ListView extends frappe.views.BaseList { on_update() {} update_url_with_filters() { - if (frappe.get_route_str() == this.page_name) { + if (frappe.get_route_str() == this.page_name && !this.report_name) { // only update URL if the route still matches current page. + // do not update if current list is a "saved report". window.history.replaceState(null, null, this.get_url_with_filters()); } }
On centos7 with the old configuration Diamond was started on DEBUG mode, this starts the process the same way as in centos6 and centos5
Description=diamond - A system statistics collector for graphite [Service] -ExecStart=/usr/bin/python /usr/bin/diamond --log-stdout --foreground -Restart=on-abort +ExecStart=/usr/bin/python /usr/bin/diamond -p /var/run/diamond.pid +Type=forking +PIDFile=/var/run/diamond.pid [Install] WantedBy=multi-user.target
Refresh ceph dashboard user role This change allows the operator to refresh the ceph dashboard admin role on multiple ceph-ansible executions. In the current state the role is set only when the user is created, and there's no way to change it if the user exists. Closes:
if {{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard ac-user-show {{ dashboard_admin_user | quote }}; then {{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard ac-user-set-password {{ dashboard_admin_user | quote }} {{ dashboard_admin_password | quote }} else - {{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard ac-user-create {{ dashboard_admin_user | quote }} {{ dashboard_admin_password | quote }} {{ 'read-only' if dashboard_admin_user_ro | bool else 'administrator' }} + {{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard ac-user-create {{ dashboard_admin_user | quote }} {{ dashboard_admin_password | quote }} fi + {{ container_exec_cmd }} ceph --cluster {{ cluster }} dashboard ac-user-set-roles {{ dashboard_admin_user | quote }} {{ 'read-only' if dashboard_admin_user_ro | bool else 'administrator' }} retries: 6 delay: 5 register: ac_result
test_management_commands: Use subTest with help tests. We remove the print statement, and instead use subTest to improve debuggability.
@@ -172,11 +172,9 @@ class TestCommandsCanStart(TestCase): @slow("Aggregate of runs dozens of individual --help tests") def test_management_commands_show_help(self) -> None: - with stdout_suppressed() as stdout: + with stdout_suppressed(): for command in self.commands: - print('Testing management command: {}'.format(command), - file=stdout) - + with self.subTest(management_command=command): with self.assertRaises(SystemExit): call_command(command, '--help') # zerver/management/commands/runtornado.py sets this to True;
Also inform groups and qtile of mouse-enter focus changes Cursor motion when focus_follow_mouse is true only focusses in the core but doesn't focus groups, so border colors do not change and occasionally other side effects appear (e.g. qtile.current_window is not changed). This fixes that.
@@ -45,6 +45,7 @@ from wlroots.wlr_types import ( from wlroots.wlr_types.cursor import WarpMode from xkbcommon import xkb +from libqtile import hook from libqtile.backend import base from libqtile.backend.wayland import keyboard, output, window, wlrq from libqtile.log_utils import logger @@ -245,7 +246,12 @@ class Core(base.Core): focus_changed = self.seat.pointer_state.focused_surface != surface self.seat.pointer_notify_enter(surface, sx, sy) if focus_changed: + hook.fire("client_mouse_enter", win) if self.qtile.config.follow_mouse_focus: + if win.group.current_window != self: + win.group.focus(win, False) + if win.group.screen and self.qtile.current_screen != win.group.screen: + self.qtile.focus_screen(win.group.screen.index, False) self.focus_window(win, surface) else: # The enter event contains coordinates, so we only need to
Create atomic Map<string, list> type This will be used to track blocks waiting to be validated because some dependent block has not yet been validated.
@@ -65,3 +65,62 @@ class ConcurrentSet: def __contains__(self, element): with self._lock: return element in self._set + + +class ConcurrentMultiMap: + """A dictionary that maps keys to lists of items. All methods are + modifications to the referenced list.""" + def __init__(self): + self._dict = dict() + self._lock = RLock() + + def __contains__(self, key): + with self._lock: + return key in self._dict + + def append(self, key, item): + """Append item to the list at key. Creates the list at key if it + doesn't exist. + """ + with self._lock: + if key in self._dict: + self._dict[key].append(item) + else: + self._dict[key] = [item] + + def set(self, key, items): + """Set key to a copy of items""" + if not isinstance(items, list): + raise ValueError("items must be a list") + with self._lock: + self._dict[key] = items.copy() + + def swap(self, key, items): + """Set key to a copy of items and return the list that was previously + stored if the key was set. If not key was set, returns an empty list. + """ + if not isinstance(items, list): + raise ValueError("items must be a list") + return_value = [] + with self._lock: + if key in self._dict: + return_value = self._dict[key] + # Make a copy since we don't want users keeping a reference that is + # outside the lock + self._dict[key] = items.copy() + return return_value + + def pop(self, key, default): + """If the key is set, remove and return the list stored at key. + Otherwise return default.""" + with self._lock: + return self._dict.pop(key, default) + + def get(self, key, default): + """If the key is set, return a copy of the list stored at key. + Otherwise return default.""" + with self._lock: + try: + return self._dict[key].copy() + except KeyError: + return default
Correct typo in doc string Small typo I noticed while reading through documentation. I'm assuming the docs below are pulled from this string
@@ -610,7 +610,7 @@ class EarthLocation(u.Quantity): @property def lat(self): - """Longitude of the location, for the default ellipsoid.""" + """Latitude of the location, for the default ellipsoid.""" return self.geodetic[1] @property
Travis-CI Changes Now building on travis-ci.com instead of travis-ci.org.
branch | status --- | --- -master | [![Build Status](https://travis-ci.org/chaoss/augur.svg?branch=master)](https://travis-ci.org/chaoss/augur) - dev | [![Build Status](https://travis-ci.org/chaoss/augur.svg?branch=dev)](https://travis-ci.org/chaoss/augur) - gsoc-dev | [![Build Status](https://travis-ci.org/chaoss/augur.svg?branch=gsoc-dev)](https://travis-ci.org/chaoss/augur) +master | [![Build Status](https://travis-ci.com/chaoss/augur.svg?branch=master)](https://travis-ci.org/chaoss/augur) + dev | [![Build Status](https://travis-ci.com/chaoss/augur.svg?branch=dev)](https://travis-ci.org/chaoss/augur) + gsoc-dev | [![Build Status](https://travis-ci.com/chaoss/augur.svg?branch=gsoc-dev)](https://travis-ci.org/chaoss/augur) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2788/badge)](https://bestpractices.coreinfrastructure.org/projects/2788)
Potential fix for decay Attempted to fix errors in using decay in RecurrentTransferMechanisms (and LCA)
@@ -453,6 +453,9 @@ class RecurrentTransferMechanism(TransferMechanism): """Implement decay """ + if INITIALIZING in context: + self.previous_input = self.variable + if self.decay is not None and self.decay != 1.0: self.previous_input *= self.decay
Fix all ScreenManagers sharing the same transition All ScreenManagers share the same SlideTransition instance by default, which can cause some timing issues and other weirdness (and is more generally a bad idea). This PR fixes the issue.
@@ -906,7 +906,7 @@ class ScreenManager(FloatLayout): to None. ''' - transition = ObjectProperty(SlideTransition(), baseclass=TransitionBase) + transition = ObjectProperty(baseclass=TransitionBase) '''Transition object to use for animating the transition from the current screen to the next one being shown. @@ -966,6 +966,7 @@ class ScreenManager(FloatLayout): def __init__(self, **kwargs): super(ScreenManager, self).__init__(**kwargs) self.fbind('pos', self._update_pos) + self.transition = SlideTransition() def _screen_name_changed(self, screen, name): self.property('screen_names').dispatch(self)
Fail on error when UrlValidator fails on SchedulerPluginCloudFormationInfrastructure Make SchedulerPluginCloudFormationInfrastructure validation failed on S3 error and https error
@@ -1702,7 +1702,11 @@ class SchedulerPluginCloudFormationInfrastructure(Resource): def _register_validators(self): self._register_validator( - UrlValidator, url=self.template, fail_on_https_error=True, expected_bucket_owner=self.s3_bucket_owner + UrlValidator, + url=self.template, + fail_on_https_error=True, + fail_on_s3_error=True, + expected_bucket_owner=self.s3_bucket_owner, )
minor: Remove unused url argument. This is not required anymore after we moved to run_test model to save screenshots.
@@ -72,14 +72,9 @@ class CommonUtils { } } - async get_page(url = null) { + async get_page() { await this.ensure_browser(); - const page = await this.browser.newPage(); - if (url !== null) { - await page.goto(url); - } - return page; }
Allow an empty commodities array in commodities/3 closes
}, "commodities": { "type" : "array", - "minItems" : 1, "description" : "Commodities returned by the Companion API, with illegal commodities omitted", "items" : { "type" : "object",
SceneHierarchy : Take advantage of new PathListingWidget APIs This gives us greatly improved performance when working with large numbers of locations, and allows us to finally mirror multiple selections from the viewer into the PathListingWidget. Fixes
@@ -162,19 +162,15 @@ class SceneHierarchy( GafferUI.NodeSetEditor ) : assert( pathListing is self.__pathListing ) - paths = pathListing.getExpandedPaths() - paths = IECore.PathMatcher( [ "/" ] + [ str( path ) for path in paths ] ) with Gaffer.BlockedConnection( self._contextChangedConnection() ) : - ContextAlgo.setExpandedPaths( self.getContext(), paths ) + ContextAlgo.setExpandedPaths( self.getContext(), pathListing.getExpansion() ) def __selectionChanged( self, pathListing ) : assert( pathListing is self.__pathListing ) - paths = pathListing.getSelectedPaths() - paths = IECore.PathMatcher( [ str(p) for p in paths ] ) with Gaffer.BlockedConnection( self._contextChangedConnection() ) : - ContextAlgo.setSelectedPaths( self.getContext(), paths ) + ContextAlgo.setSelectedPaths( self.getContext(), pathListing.getSelection() ) @GafferUI.LazyMethod( deferUntilPlaybackStops = True ) def __transferExpansionFromContext( self ) : @@ -183,27 +179,15 @@ class SceneHierarchy( GafferUI.NodeSetEditor ) : if expandedPaths is None : return - p = self.__pathListing.getPath() - expandedPaths = [ p.copy().setFromString( s ) for s in expandedPaths.paths() ] with Gaffer.BlockedConnection( self.__expansionChangedConnection ) : - self.__pathListing.setExpandedPaths( expandedPaths ) + self.__pathListing.setExpansion( expandedPaths ) @GafferUI.LazyMethod( deferUntilPlaybackStops = True ) def __transferSelectionFromContext( self ) : - selection = ContextAlgo.getSelectedPaths( self.getContext() ).paths() + selection = ContextAlgo.getSelectedPaths( self.getContext() ) with Gaffer.BlockedConnection( self.__selectionChangedConnection ) : - ## \todo Qt is dog slow with large non-contiguous selections, - # so we're only mirroring single selections currently. Rewrite - # PathListingWidget so it manages selection itself using a PathMatcher - # and we can refer to the same data structure everywhere, and reenable - # mirroring of multi-selection. - if len( selection ) == 1 : - p = self.__pathListing.getPath() - selection = [ p.copy().setFromString( s ) for s in selection ] - self.__pathListing.setSelectedPaths( selection, scrollToFirst=True, expandNonLeaf=False ) - else : - self.__pathListing.setSelectedPaths( [] ) + self.__pathListing.setSelection( selection, scrollToFirst=True, expandNonLeaf=False ) GafferUI.EditorWidget.registerType( "SceneHierarchy", SceneHierarchy )
Add 'scale-up' upgrade steps for cinder-backup This patch adds the missing steps for 'scale-up' upgrade support for the cinder-backup service similar to the implementation in Related-Bug:
@@ -338,6 +338,67 @@ outputs: register: output retries: 5 until: output.rc == 0 + - name: Create hiera data to upgrade cinder_backup in a stepwise manner. + when: + - step|int == 1 + block: + - name: set cinder_backup upgrade node facts in a single-node environment + set_fact: + cinder_backup_short_node_names_upgraded: "{{ cinder_backup_short_node_names }}" + cinder_backup_node_names_upgraded: "{{ cinder_backup_node_names }}" + cacheable: no + when: groups['cinder_backup'] | length <= 1 + - name: set cinder_backup upgrade node facts from the limit option + set_fact: + cinder_backup_short_node_names_upgraded: "{{ cinder_backup_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}" + cinder_backup_node_names_upgraded: "{{ cinder_backup_node_names_upgraded|default([]) + [item] }}" + cacheable: no + when: + - groups['cinder_backup'] | length > 1 + - item.split('.')[0] in ansible_limit.split(',') + loop: "{{ cinder_backup_node_names }}" + - debug: + msg: "Prepare cinder_backup upgrade for {{ cinder_backup_short_node_names_upgraded }}" + - fail: + msg: > + You can't upgrade cinder_backup without + staged upgrade. You need to use the limit option in order + to do so. + when: >- + cinder_backup_short_node_names_upgraded is not defined or + cinder_backup_short_node_names_upgraded | length == 0 or + cinder_backup_node_names_upgraded is not defined or + cinder_backup_node_names_upgraded | length == 0 + - name: remove cinder_backup init container on upgrade-scaleup to force re-init + shell: | + if podman inspect cinder_backup_init_bundle &> /dev/null; then + podman rm cinder_backup_init_bundle + fi + when: cinder_backup_short_node_names_upgraded | length > 1 + - name: add the cinder_backup short name to hiera data for the upgrade. + include_role: + name: tripleo-upgrade-hiera + tasks_from: set.yml + vars: + tripleo_upgrade_key: cinder_backup_short_node_names_override + tripleo_upgrade_value: "{{ cinder_backup_short_node_names_upgraded }}" + - name: add the cinder_backup long name to hiera data for the upgrade + include_role: + name: tripleo-upgrade-hiera + tasks_from: set.yml + vars: + tripleo_upgrade_key: cinder_backup_node_names_override + tripleo_upgrade_value: "{{ cinder_backup_node_names_upgraded }}" + - name: remove the extra hiera data needed for the upgrade. + include_role: + name: tripleo-upgrade-hiera + tasks_from: remove.yml + vars: + tripleo_upgrade_key: "{{ item }}" + loop: + - cinder_backup_short_node_names_override + - cinder_backup_node_names_override + when: cinder_backup_short_node_names_upgraded | length == cinder_backup_node_names | length - name: Retag the pacemaker image if containerized when: - step|int == 3
Using Py_UNICODE to store lone surrogates makes Py3 join surrogate pairs on 16-bit Unicode platforms (Windows) when reading them back in, although we correctly processed them before. Instead, we now use the "unicode_escape" codec to store byte strings, because it can return surrogate characters (which the other codecs cannot).
@@ -1632,13 +1632,14 @@ class UnicodeNode(ConstNode): # lone (unpaired) surrogates are not really portable and cannot be # decoded by the UTF-8 codec in Py3.3 self.result_code = code.get_py_const(py_object_type, 'ustring') - data_cname = code.get_pyunicode_ptr_const(self.value) + data_cname = code.get_string_const( + StringEncoding.BytesLiteral(self.value.encode('unicode_escape'))) const_code = code.get_cached_constants_writer(self.result_code) if const_code is None: return # already initialised const_code.mark_pos(self.pos) const_code.putln( - "%s = PyUnicode_FromUnicode(%s, (sizeof(%s) / sizeof(Py_UNICODE))-1); %s" % ( + "%s = PyUnicode_DecodeUnicodeEscape(%s, sizeof(%s) - 1, NULL); %s" % ( self.result_code, data_cname, data_cname,
update top-level messaging in README Test Plan: did not test Reviewers: schrockn, max
# Dagster -Dagster is a system for building modern data applications. +Dagster is a data orchestrator for machine learning, analytics, and ETL + **Elegant programming model:** Dagster provides a set of abstractions for building self-describing, testable, and reliable data applications. It embraces the principles of functional data programming;
python2 compat solution instead of list.copy()
@@ -22,7 +22,7 @@ class SMAConfigParser(ConfigParser, object): return value if value == '': - return default.copy() + return list(default) value = value.split(separator) @@ -36,7 +36,7 @@ class SMAConfigParser(ConfigParser, object): def getdict(self, section, option, vars=None, listseparator=",", dictseparator=":", default={}, lower=True, replace=[' ']): l = self.getlist(section, option, vars, listseparator, [], lower, replace) - output = default.copy() + output = list(default) for listitem in l: split = listitem.split(dictseparator) if len(split) > 1:
[batch] fix job private jobs AFAIK, we do not need a distinct name anyway
@@ -134,7 +134,7 @@ class AzureResourceManager(CloudResourceManager): ) try: - await self.arm_client.put(f'/deployments/{machine_name}-dplmnt', json=vm_config) + await self.arm_client.put(f'/deployments/{machine_name}', json=vm_config) log.info(f'created machine {machine_name}') except Exception: log.exception(f'error while creating machine {machine_name}')
migrations: Subscription.is_user_active denormalization - final step. With the previous two commits deployed, we're ready to use the denormalization to optimize the query. With dev environment db prepared using ./manage.py populate_db --extra-users=2000 --extra-streams=400 this takes the execution time of the query in bulk_get_subscriber_user_ids from 1.5-1.6s to 0.4-0.5s on my machine.
@@ -3161,12 +3161,10 @@ def bulk_get_subscriber_user_ids( zerver_subscription.user_profile_id FROM zerver_subscription - INNER JOIN zerver_userprofile ON - zerver_userprofile.id = zerver_subscription.user_profile_id WHERE zerver_subscription.recipient_id in %(recipient_ids)s AND zerver_subscription.active AND - zerver_userprofile.is_active + zerver_subscription.is_user_active ORDER BY zerver_subscription.recipient_id, zerver_subscription.user_profile_id
Clarify use of "program qubits" This commit adjusts the first pagraph in the "Qubits" section in two ways: * Explicitly says that the qubits described in the paragraph are "program qubits". * Moves the sentence talking about quantum registers to later in the paragraph, once the idea is introduced.
@@ -36,10 +36,10 @@ Qubits ~~~~~~ There is a quantum bit (``qubit``) type that is interpreted as a reference to a -two-level subsystem of a quantum state. Quantum registers are static -arrays of qubits that cannot be dynamically resized. The statement ``qubit name;`` -declares a reference to a quantum bit. The statement ``qreg name[size];`` or ``qubit name[size];`` declares a -quantum register with the given name identifier. The keyword ``qreg`` is included +two-level subsystem of a quantum state. The statement ``qubit name;`` +declares a reference to a quantum bit. These qubits are referred to as "program qubits" (in distinction to "physical qubits" on actual hardware; see below.) The statement ``qreg name[size];`` or ``qubit name[size];`` declares a +quantum register with the given name identifier. Quantum registers are static +arrays of qubits that cannot be dynamically resized. The keyword ``qreg`` is included for backwards compatibility and will be removed in the future. Sizes must always be constant positive integers. The label ``name[j]`` refers to a qubit of this register, where
[Relay] Add tensor rank check for `nn.instance_norm` Add tensor rank check for `nn.instance_norm`.
@@ -923,6 +923,7 @@ bool InstanceNormRel(const Array<Type>& types, int num_inputs, const Attrs& attr ICHECK_EQ(types.size(), 4); const auto* data = types[0].as<TensorTypeNode>(); if (data == nullptr) return false; + ICHECK_GT(data->shape.size(), 2); const InstanceNormAttrs* param = attrs.as<InstanceNormAttrs>(); int axis = param->axis >= 0 ? param->axis : param->axis + data->shape.size(); ICHECK(axis >= 0 && axis < (int)data->shape.size());
Refactor ConstantValue Makes it simpler to extend
@@ -278,13 +278,7 @@ class ConstantValue(ConstantString): ) def deserialize(self, external_value): - """ - Ignores ``external_value`` and returns ``self.value`` cast from - ``self.value_data_type`` to ``self.commcare_data_type``. - """ - serializer = (serializers.get((self.value_data_type, self.commcare_data_type)) - or serializers.get((None, self.commcare_data_type))) - return serializer(self.value) if serializer else self.value + return self._get_commcare_value(self.value) def _get_commcare_value(self, case_trigger_info): """ @@ -292,7 +286,9 @@ class ConstantValue(ConstantString): Used by ``self.get_value()``. """ - return self.deserialize(self.value) + serializer = (serializers.get((self.value_data_type, self.commcare_data_type)) + or serializers.get((None, self.commcare_data_type))) + return serializer(self.value) if serializer else self.value class CasePropertyMap(CaseProperty):
[DOC] replace wrong option doc -always option was accidentially wrong. Replace it by a better explanation.
@@ -14,8 +14,7 @@ The following parameters are supported: &params; --always If used, the bot won't ask if it should file the message - onto user talk page. +-always The bot won't ask for confirmation when putting a page -text: Use this text to be added; otherwise 'Test' is used
[docs] Add CI contribution instructions This PR documents the steps to introducing a new CI docker image, which we've been doing a lot lately.
@@ -174,6 +174,29 @@ The images for these containers are hosted in the `tlcpack Docker Hub <https://h and referenced in the `Jenkinsfile.j2 <https://github.com/apache/tvm/tree/main/Jenkinsfile.j2>`_. These can be inspected and run locally via standard Docker commands. +Adding a new Docker image +""""""""""""""""""""""""" + +New docker images can be added to test TVM on a variety of platforms. Here are the steps for adding +a new CI image: + +1. Define the ``docker/Dockerfile.ci_foo`` and associated scripts in ``docker/install``. Create a PR containing only these changes (no ``Jenkinsfile`` changes). + + Example: https://github.com/apache/tvm/pull/12230/files + +2. A committer verifies the image builds locally and then reviews/approves this PR. +3. A committer creates the ci-foo repos in https://hub.docker.com/u/tlcpack and https://hub.docker.com/u/tlcpackstaging. +4. Create a PR to create an ECR repo for the image in tlcpack/ci: https://github.com/tlc-pack/ci/pull/46/files +5. A committer creates and gets merged a PR to add the image to the ``Jenkinsfile`` + + Example: https://github.com/apache/tvm/pull/12369/files. + + **NOTE**: The PR must be opened from a branch in apache/tvm, not from a branch in a forked repo. + +6. A committer adds this image to the daily docker rebuild/validation run in tlcpack. + + Example: https://github.com/tlc-pack/tlcpack/pull/131 + ``ci-docker-staging`` ^^^^^^^^^^^^^^^^^^^^^
Record per-target compile workflow stats when using RscCompile ### Problem We would like to have metrics about how much of a build uses Rsc. ### Solution Record the compile workflow a target uses under `RscCompile`
@@ -455,9 +455,21 @@ class RscCompile(ZincCompile, MirroredTargetOptionMixin): on_success=ivts.update, on_failure=ivts.force_invalidate) + workflow = rsc_compile_context.workflow + + # Replica of JvmCompile's _record_target_stats logic + def record(k, v): + self.context.run_tracker.report_target_info( + self.options_scope, + compile_target, + ['compile', k], + v + ) + record('workflow', workflow.value) + record('execution_strategy', self.execution_strategy) + # Create the rsc job. # Currently, rsc only supports outlining scala. - workflow = rsc_compile_context.workflow workflow.resolve_for_enum_variant({ 'zinc-only': lambda: None, 'zinc-java': lambda: None,
[ckcore] Show help for alias command * [ckcore] Show help for alias command #fixes 221
@@ -481,6 +481,9 @@ class HelpCommand(CLISource): return "Shows available commands, as well as help for any specific command." async def parse(self, arg: Optional[str] = None, **env: str) -> Source: + def show_cmd(cmd: CLIPart) -> str: + return f"{cmd.name} - {cmd.info()}\n\n{cmd.help()}" + if not arg: all_parts = sorted(self.parts.values(), key=lambda p: p.name) parts = (p for p in all_parts if isinstance(p, (CLISource, CLICommand))) @@ -496,8 +499,11 @@ class HelpCommand(CLISource): f"and chain multiple commands using the semicolon (;)." ) elif arg and arg in self.parts: - cmd = self.parts[arg] - result = f"{cmd.name} - {cmd.info()}\n\n{cmd.help()}" + result = show_cmd(self.parts[arg]) + elif arg and arg in self.aliases: + alias = self.aliases[arg] + explain = f"{arg} is an alias for {alias}\n\n" + result = explain + show_cmd(self.parts[alias]) else: result = f"No command found with this name: {arg}"
Getting help at conda-forge page edited Made a few corrections in this page.
-How to get help in conda-forge +How to get help at conda-forge ============================== You could connect with us via **Gitter, GitHub Issues or Mailing List**. @@ -7,7 +7,7 @@ We would be happy to hear from you! Gitter ------------------- -If you are just starting out with conda-forge and/or are completely lost, we recommend to get in touch through `Gitter <https://gitter.im/conda-forge/conda-forge.github.io>`__ and our community members will direct +If you are just starting out with conda-forge and/or feel completely lost, we recommend getting in touch through `Gitter <https://gitter.im/conda-forge/conda-forge.github.io>`__. Our community members will direct you to the proper documentation and/or help you via the chat. GitHub issues
Remove callback parameter from custom.Button Its behaviour was strange. Removing and adding an event handler every time a message is sent is not a good idea and it would just do more harm than good.
@@ -37,23 +37,12 @@ class ButtonMethods(UpdateMethods): for row in buttons: current = [] for button in row: + if isinstance(button, custom.MessageButton): + button = button.button + inline = custom.Button._is_inline(button) is_inline |= inline is_normal |= not inline - if isinstance(button, custom.Button): - if button.callback: - self.remove_event_handler( - button.callback, events.CallbackQuery) - - self.add_event_handler( - button.callback, - events.CallbackQuery(data=getattr( - button.button, 'data', None)) - ) - - button = button.button - elif isinstance(button, custom.MessageButton): - button = button.button if button.SUBCLASS_OF_ID == 0xbad74a3: # 0xbad74a3 == crc32(b'KeyboardButton')
Fix typo Summary: Pull Request resolved:
@@ -158,7 +158,7 @@ public: * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } * > * > static auto registry = c10::RegisterOperators() - * > .op("my_op", c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() * > .kernel<decltype(my_kernel_cpu), &my_kernel_cpu>(CPUTensorId())); */ template<class FuncType, FuncType* kernel_func> @@ -179,7 +179,7 @@ public: * > namespace { Tensor my_kernel_cpu(Tensor a, Tensor b) {...} } * > * > static auto registry = c10::RegisterOperators() - * > .op("my_op", c10::RegisterOperators() + * > .op("my_op", c10::RegisterOperators::options() * > .catchAllKernel<decltype(my_kernel_cpu), &my_kernel_cpu>()); */ template<class FuncType, FuncType* kernel_func> @@ -385,7 +385,7 @@ public: * * > static auto registry = c10::RegisterOperators() * > .op("my_op", c10::RegisterOperators::options() - * > .kernel([] (Tensor a, Tensor b) {...})); + * > .catchAllKernel([] (Tensor a, Tensor b) {...})); * */ template<class FuncType>
complexcircle.py: fix normalization issues The numpy.sign behavior for complex variables is different from Matlab's which projects a complex vector element-wise on the complex unit circle.
@@ -65,7 +65,7 @@ class ComplexCircle(Manifold): return y def retr(self, z, v): - return np.sign(z + v) + return self._normalize(z + v) def log(self, x1, x2): v = self.proj(x1, x2 - x1) @@ -77,7 +77,7 @@ class ComplexCircle(Manifold): def rand(self): n = self._n - return np.sign(rnd.randn(n) + 1j * rnd.randn(n)) + return self._normalize(rnd.randn(n) + 1j * rnd.randn(n)) def randvec(self, z): v = rnd.randn(self._n) * (1j * z) @@ -87,6 +87,11 @@ class ComplexCircle(Manifold): return self.proj(x2, d) def pairmean(self, z1, z2): - return np.sign(z1 + z2) + return self._normalize(z1 + z2) - # XXX: Do we need to supply vec(x, u_mat) and mat(x, u_vec)? + @staticmethod + def _normalize(x): + """ + Normalize the entries of x element-wise by their absolute values. + """ + return x / np.abs(x)
Fix comments for metrics_to_scalars metrics_to_scalars can return non-float values, such as int or complex, depending on the dtype of the tensor.
@@ -27,13 +27,13 @@ def metrics_to_scalars(metrics: Any) -> Any: Raises: MisconfigurationException: - If ``value`` contains multiple elements, hence preventing conversion to ``float`` + If tensors inside ``metrics`` contains multiple elements, hence preventing conversion to a scalar. """ def to_item(value: torch.Tensor) -> numbers.Number: if value.numel() != 1: raise MisconfigurationException( - f"The metric `{value}` does not contain a single element" f" thus it cannot be converted to float." + f"The metric `{value}` does not contain a single element, thus it cannot be converted to a scalar." ) return value.item()
Trigger warning/error for deprecated Target method overrides. ### Problem deprecated two methods that are intended to be overridden by `Target` subclasses. But because we don't detect the override, the warning will never trigger. ### Solution Detect the override by looking for a non-equal `__func__` object.
@@ -617,6 +617,9 @@ class Target(AbstractTarget): :param Payload payload: The post-Target.__init__() Payload object. :yields: AddressSpec strings representing dependencies of this target. """ + + if cls.compute_injectable_specs.__func__ is not Target.compute_injectable_specs.__func__: + cls._compute_injectable_specs_warn_or_error() cls._validate_target_representation_args(kwargs, payload) # N.B. This pattern turns this method into a non-yielding generator, which is helpful for # subclassing. @@ -641,6 +644,8 @@ class Target(AbstractTarget): :param Payload payload: The post-Target.__init__() Payload object. :yields: AddressSpec strings representing dependencies of this target. """ + if cls.compute_dependency_specs.__func__ is not Target.compute_dependency_specs.__func__: + cls._compute_dependency_specs_warn_or_error() cls._validate_target_representation_args(kwargs, payload) # N.B. This pattern turns this method into a non-yielding generator, which is helpful for # subclassing. @@ -658,6 +663,11 @@ class Target(AbstractTarget): :param Payload payload: The post-Target.__init__() Payload object. :yields: AddressSpec strings representing dependencies of this target. """ + cls._compute_injectable_specs_warn_or_error() + cls.compute_injectable_address_specs(kwargs=kwargs, payload=payload) + + @classmethod + def _compute_injectable_specs_warn_or_error(cls): warn_or_error( removal_version="1.27.0.dev0", deprecated_entity_description="Target.compute_injectable_specs()", @@ -665,7 +675,6 @@ class Target(AbstractTarget): "before. This change is to prepare for Pants eventually supporting file system specs, " "e.g. `./pants cloc foo.py`. In preparation, we renamed `Spec` to `AddressSpec`." ) - cls.compute_injectable_address_specs(kwargs=kwargs, payload=payload) @classmethod def compute_dependency_specs(cls, kwargs=None, payload=None): @@ -684,6 +693,11 @@ class Target(AbstractTarget): :param Payload payload: The post-Target.__init__() Payload object. :yields: AddressSpec strings representing dependencies of this target. """ + cls._compute_dependency_specs_warn_or_error() + cls.compute_dependency_address_specs(kwargs=kwargs, payload=payload) + + @classmethod + def _compute_dependency_specs_warn_or_error(cls): warn_or_error( removal_version="1.27.0.dev0", deprecated_entity_description="Target.compute_dependency_specs()", @@ -691,7 +705,6 @@ class Target(AbstractTarget): "before. This change is to prepare for Pants eventually supporting file system specs, " "e.g. `./pants cloc foo.py`. In preparation, we renamed `Spec` to `AddressSpec`." ) - cls.compute_dependency_address_specs(kwargs=kwargs, payload=payload) @property def dependencies(self):
Fix plot rotor - disk_elements Now the visual representation of disk elements are based on the shaft elements length. The objective here is avoid large distortions between short and long shafts.
@@ -159,7 +159,7 @@ class DiskElement(Element): # fmt: on return G - def patch(self, position, ax, bk_ax): + def patch(self, position, length, ax, bk_ax): """Disk element patch. Patch that will be used to draw the disk element. Parameters @@ -170,6 +170,8 @@ class DiskElement(Element): Axes in which the plot will be drawn. position : float Position in which the patch will be drawn. + length : float + minimum length of shaft elements Returns ------- ax : matplotlib axes @@ -178,6 +180,7 @@ class DiskElement(Element): Returns the axes object with the plot. """ zpos, ypos = position + le = length / 8 D = ypos * 2 hw = 0.02 @@ -207,13 +210,13 @@ class DiskElement(Element): # bokeh plot - plot disks elements bk_disk_points_u = [ - [zpos, zpos + hw, zpos - hw], - [ypos, ypos + D, ypos + D] + [zpos, zpos + le, zpos - le], + [ypos, ypos*4, ypos*4] ] bk_disk_points_l = [ - [zpos, zpos + hw, zpos - hw], - [-ypos, -(ypos + D), -(ypos + D)] + [zpos, zpos + le, zpos - le], + [-ypos, -ypos*4, -ypos*4] ] bk_ax.patch( @@ -232,16 +235,16 @@ class DiskElement(Element): color=bokeh_colors[9] ) bk_ax.circle( - x=zpos, - y=ypos + D, - radius=0.02, + x=bk_disk_points_u[0][0], + y=bk_disk_points_u[1][1], + radius=le, fill_alpha=1, color=bokeh_colors[9] ) bk_ax.circle( - x=zpos, - y=-(ypos + D), - radius=0.02, + x=bk_disk_points_l[0][0], + y=bk_disk_points_l[1][1], + radius=le, fill_alpha=1, color=bokeh_colors[9] )
Append correct type to transition_times Previously, when the platform did not need to move, a datetime object was appended, leading to comparison errors in later platform.move calls.
@@ -68,7 +68,8 @@ def create_smooth_transition_models(initial_state, x_coords, y_coords, times, tu a = np.arctan2(vy, vx) # initial bearing if dx == 0 and dy == 0 and vx == 0 and vy == 0: # if at destination with 0 speed, stay - transition_times.append(time) + transition_times.append( + timedelta(seconds=(time - times[times.index(time) - 1]).total_seconds())) transition_models.append(CombinedLinearGaussianTransitionModel((ConstantVelocity(0), ConstantVelocity(0)))) continue
invite_users: Discard "#" in private streams. Added an if statement which renders "#" only when it's not "invite only". Fixes part of
{{#if (eq name ../notifications_stream)}} #{{name}} <i>({{t 'Receives new stream announcements' }})</i> {{else}} - #{{name}} + {{#if (not invite_only)}}#{{/if}}{{name}} {{/if}} </label> {{/each}}
fix init.py HG-- branch : feature/microservices
@@ -22,7 +22,8 @@ class Profile(BaseProfile): pattern_more = [ (r"---?More---?", " "), (r"--- \[Space\] Next page, \[Enter\] Next line, \[A\] All, Others to exit ---", " "), - (r"Startup configuration file name", "\n") + (r"Are you sure to delete non-active file", "Y\n\n"), + (r"Startup configuration file name", "\n\n\n") ] config_volatile = ["\x08+"] rogue_chars = ["\r"]
Update io_examples.ipynb Explicit about which dataset
"cell_type": "markdown", "metadata": {}, "source": [ - "We are using the Northeastern University Oracle SigMF recordings found [here](http://www.genesys-lab.org/oracle)" + "We are using the Northeastern University Oracle SigMF recordings found [here](http://www.genesys-lab.org/oracle) (Dataset #2)" ] }, {
Quality: Autoformat generated code twice on Windows. * Otherwise differences remained, seems something is not fully stable in that way, but this helps.
@@ -862,6 +862,11 @@ def makeHelpersBinaryOperation(operand, op_code): autoformat(filename_c, None, True) autoformat(filename_h, None, True) + # No idea why, but this helps. + if os.name == "nt": + autoformat(filename_c, None, True) + autoformat(filename_h, None, True) + def writeline(output, *args): if not args:
Update usage.rst fix bugs after master merge
@@ -18,6 +18,8 @@ Data are returned as a collection of measurements in a :class:`xarray.Dataset`: ds +.. currentmodule:: xarray + Fetched data are returned as a 1D array collection of measurements. If you prefer to work with a 2D array collection of vertical profiles, simply transform the dataset with the :class:`xarray.Dataset` accessor method :meth:`Dataset.argo.point2profile`: .. ipython:: python
Append to the prescribing table during conversion This replaces the operation in openprescribing-data/runner.py at
@@ -134,12 +134,49 @@ class Command(BaseCommand): # Create table at raw_nhs_digital_data table_ref = create_temporary_data_source(uri) - temp_table = self.write_aggregated_data_to_temp_table(table_ref, date) + self.append_aggregated_data_to_prescribing_table( + table_ref, date) + temp_table = self.write_aggregated_data_to_temp_table( + table_ref, date) converted_uri = uri[:-4] + '_formatted-*.csv.gz' copy_table_to_gcs(temp_table, converted_uri) return download_from_gcs(converted_uri, local_path) - def write_aggregated_data_to_temp_table(self, source_table_ref, date): + def append_aggregated_data_to_prescrbing_table( + self, source_table_ref, date): + query = """ + SELECT + Area_Team_Code AS sha, + LEFT(PCO_Code, 3) AS pct, + Practice_Code AS practice, + BNF_Code AS bnf_code, + BNF_Description AS bnf_name, + SUM(Items) AS items, + SUM(NIC) AS net_cost, + SUM(Actual_Cost) AS actual_cost, + SUM(Quantity * Items) AS quantity, + TIMESTAMP('%s') AS month, + FROM %s + WHERE Practice_Code NOT LIKE '%%998' -- see issue #349 + GROUP BY + bnf_code, bnf_name, pct, + practice, sha + """ % (date.replace('_', '-'), source_table_ref) + client = bigquery.client.Client(project='ebmdatalab') + dataset = client.dataset('hscic') + table = dataset.table( + name='prescribing') + job = client.run_async_query("create_%s_%s" % ( + table.name, int(time.time())), query) + job.destination = table + job.use_query_cache = False + job.write_disposition = 'WRITE_APPEND' + job.allow_large_results = True + wait_for_job(job) + return table + + def write_aggregated_data_to_temp_table( + self, source_table_ref, date): query = """ SELECT LEFT(PCO_Code, 3) AS pct_id,
[DOCKER] Fix git clone failure. git clone --branch=xxx won't take a hash, switch from the hash to the tag that represents that hash.
@@ -19,8 +19,7 @@ make -j4 sdk && make -j4 sdk_install_pkg ./linux/installer/bin/sgx_linux_x64_sdk*.bin --prefix /opt cd - -tag=6098af # v1.0.5 -git clone --branch=$tag --depth=1 https://github.com/baidu/rust-sgx-sdk.git /opt/rust-sgx-sdk +git clone --branch=v1.0.5 --depth=1 https://github.com/baidu/rust-sgx-sdk.git /opt/rust-sgx-sdk cd /opt/rust-sgx-sdk curl -s -S -L 'https://gist.githubusercontent.com/nhynes/37164039c5d3f33aa4f123e4ba720036/raw/b0de575fe937231799930764e76c664b92975163/rust-sgx-sdk.diff' | git apply cd -
Fix Imap widget `poll()` method was modifying `self.text` in place before returning the value. However, `self.text` is a property setter which sets the layout text. The widget should only update the text value by returning the value from `poll()`.
@@ -83,11 +83,11 @@ class ImapWidget(base.ThreadPoolText): def poll(self): im = imaplib.IMAP4_SSL(self.server, 993) if self.password == 'Gnome Keyring Error': - self.text = 'Gnome Keyring Error' + text = 'Gnome Keyring Error' else: im.login(self.user, self.password) status, response = im.status(self.mbox, '(UNSEEN)') - self.text = response[0].decode() - self.text = self.label + ': ' + re.sub(r'\).*$', '', re.sub(r'^.*N\s', '', self.text)) + text = response[0].decode() + text = self.label + ': ' + re.sub(r'\).*$', '', re.sub(r'^.*N\s', '', text)) im.logout() - return self.text + return text
Fixes a bug in Cython state-vector term calcs (fixes segfaults) Piping down dim=d^2 to the Cython sv_prs_as_polys(...) [from SV_prs_as_polys(...)] instead of d would cause occasional segfaults. Note that the GateTermCalculator's "dim" is d^2 whereas the SV functions consider just d to be the relevant dimension. This issue is now fixed.
@@ -1220,11 +1220,13 @@ def SV_prs_as_polys(calc, rholabel, elabels, gatestring, comm=None, memLimit=Non for jj,indx in enumerate(inds): E_cindices[ii][jj] = <INT>indx + #Note: term calculator "dim" is the full density matrix dim + stateDim = int(round(np.sqrt(calc.dim))) #Call C-only function (which operates with C-representations only) cdef vector[PolyCRep*] polys = sv_prs_as_polys( cgatestring, rho_term_creps, gate_term_creps, E_term_creps, - E_cindices, numEs, calc.max_order, mpo, mpv, calc.dim, <bool>fastmode) + E_cindices, numEs, calc.max_order, mpo, mpv, stateDim, <bool>fastmode) return [ PolyRep_from_allocd_PolyCRep(polys[i]) for i in range(<INT>polys.size()) ]
Add postgres-only note to nulls= param of Ordering node. Refs [skip ci]
@@ -1322,7 +1322,7 @@ Query-builder :param node: A column-like object. :param str direction: ASC or DESC :param str collation: Collation name to use for sorting. - :param str nulls: Sort nulls (FIRST or LAST). + :param str nulls: Sort nulls (FIRST or LAST, Postgres-only). Represent ordering by a column-like object.
remove leftover task cancel The task is no longer created in the cog
@@ -464,5 +464,4 @@ class DocCog(commands.Cog): async def cog_unload(self) -> None: """Clear scheduled inventories, queued symbols and cleanup task on cog unload.""" self.inventory_scheduler.cancel_all() - self.init_refresh_task.cancel() await self.item_fetcher.clear()
fix: handle case where alexa guard is disabled This also changes the entity parsing code to be extremely defensive so that this function won't break startup no matter what Amazon returns. Fixes
@@ -206,14 +206,18 @@ async def get_entity_data( login_obj: AlexaLogin, entity_ids: List[Text] ) -> AlexaEntityData: """Get and process the entity data into a more usable format.""" - raw = await AlexaAPI.get_entity_state(login_obj, entity_ids=entity_ids) + entities = {} - device_states = raw.get("deviceStates") + if entity_ids: + raw = await AlexaAPI.get_entity_state(login_obj, entity_ids=entity_ids) + device_states = raw.get("deviceStates", []) if isinstance(raw, dict) else None if device_states: for device_state in device_states: - entity_id = device_state["entity"]["entityId"] + entity_id = device_state.get("entity", {}).get("entityId") + if entity_id: entities[entity_id] = [] - for cap_state in device_state["capabilityStates"]: + cap_states = device_state.get("capabilityStates", []) + for cap_state in cap_states: entities[entity_id].append(json.loads(cap_state)) return entities
MNT: move more of request_suspend into coro Should eliminate race conditions.
@@ -1020,8 +1020,13 @@ class RunEngine: explanation of why the suspension has been requested """ + + print("Suspending....To get prompt hit Ctrl-C twice to pause.") + ts = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + print("Suspension occurred at {}.".format(ts)) + + async def _request_suspend(pre_plan, post_plan, justification): if not self.resumable: - # TODO fix race condition here! print("No checkpoint; cannot suspend.") print("Aborting: running cleanup and marking " "exit_status as 'abort'...") @@ -1031,17 +1036,10 @@ class RunEngine: was_paused = self._state == 'paused' self._state = 'aborting' if was_paused: - with self._state_lock: - self._exception = RequestAbort() self._resume_task() else: - self.loop.call_soon_threadsafe(self._task.cancel) - - print("Suspending....To get prompt hit Ctrl-C twice to pause.") - ts = datetime.now().strftime('%Y-%m-%d %H:%M:%S') - print("Suspension occurred at {}.".format(ts)) + self._task.cancel() - async def _request_suspend(pre_plan, post_plan, justification): if justification is not None: print("Justification for this suspension:\n%s" % justification) self._record_interruption('suspend') @@ -1306,6 +1304,7 @@ class RunEngine: # side of the yield in the plan will be moved past resp = self._response_stack.pop() # if any status tasks have failed, grab the exceptions. + # give priority to things pushed in from outside with self._state_lock: if self._exception is not None: stashed_exception = self._exception
Add 'scale-up' upgrade steps for manila This patch adds the missing steps for 'scale-up' upgrade support for the manila service similar to the implementation in Related-Bug: Depends-On:
@@ -318,6 +318,67 @@ outputs: register: output retries: 5 until: output.rc == 0 + - name: Create hiera data to upgrade manila_share in a stepwise manner. + when: + - step|int == 1 + block: + - name: set manila_share upgrade node facts in a single-node environment + set_fact: + manila_share_short_node_names_upgraded: "{{ manila_share_short_node_names }}" + manila_share_node_names_upgraded: "{{ manila_share_node_names }}" + cacheable: no + when: groups['manila_share'] | length <= 1 + - name: set manila_share upgrade node facts from the limit option + set_fact: + manila_share_short_node_names_upgraded: "{{ manila_share_short_node_names_upgraded|default([]) + [item.split('.')[0]] }}" + manila_share_node_names_upgraded: "{{ manila_share_node_names_upgraded|default([]) + [item] }}" + cacheable: no + when: + - groups['manila_share'] | length > 1 + - item.split('.')[0] in ansible_limit.split(',') + loop: "{{ manila_share_node_names }}" + - debug: + msg: "Prepare manila_share upgrade for {{ manila_share_short_node_names_upgraded }}" + - fail: + msg: > + You can't upgrade manila_share without + staged upgrade. You need to use the limit option in order + to do so. + when: >- + manila_share_short_node_names_upgraded is not defined or + manila_share_short_node_names_upgraded | length == 0 or + manila_share_node_names_upgraded is not defined or + manila_share_node_names_upgraded | length == 0 + - name: remove manila_share init container on upgrade-scaleup to force re-init + shell: | + if podman inspect manila_share_init_bundle &> /dev/null; then + podman rm manila_share_init_bundle + fi + when: manila_share_short_node_names_upgraded | length > 1 + - name: add the manila_share short name to hiera data for the upgrade. + include_role: + name: tripleo-upgrade-hiera + tasks_from: set.yml + vars: + tripleo_upgrade_key: manila_share_short_node_names_override + tripleo_upgrade_value: "{{ manila_share_short_node_names_upgraded }}" + - name: add the manila_share long name to hiera data for the upgrade + include_role: + name: tripleo-upgrade-hiera + tasks_from: set.yml + vars: + tripleo_upgrade_key: manila_share_node_names_override + tripleo_upgrade_value: "{{ manila_share_node_names_upgraded }}" + - name: remove the extra hiera data needed for the upgrade. + include_role: + name: tripleo-upgrade-hiera + tasks_from: remove.yml + vars: + tripleo_upgrade_key: "{{ item }}" + loop: + - manila_share_short_node_names_override + - manila_share_node_names_override + when: manila_share_short_node_names_upgraded | length == manila_share_node_names | length - name: Retag the pacemaker image if containerized when: - step|int == 3
Updates Dependency Manager Files In CODEOWNERS Updates Pipfile and pipenv lock file to poetry equivalents in CODEOWNERS.
@@ -35,7 +35,8 @@ Dockerfile @MarkKoz @Akarys42 @Den4200 @jb3 docker-compose.yml @MarkKoz @Akarys42 @Den4200 @jb3 # Tools -Pipfile* @Akarys42 +poetry.lock @Akarys42 +pyproject.toml @Akarys42 # Statistics bot/async_stats.py @jb3
remove minimum 32-bit restriction Change minimum 32-bit restriction for floating point types to 8-bit. This change is to enable reduced precision types that may use vector operations underneath the hood (cases #lanes > 1 such as half4).
@@ -127,7 +127,7 @@ inline void TVMArrayFree_(TVMArray* arr) { inline void VerifyType(int dtype_code, int dtype_bits, int dtype_lanes) { CHECK_GE(dtype_lanes, 1); if (dtype_code == kDLFloat) { - CHECK_EQ(dtype_bits % 32, 0); + CHECK_EQ(dtype_bits % 8, 0); } else { CHECK_EQ(dtype_bits % 8, 0); }
Cleanup BUILD_DOCS cmake section Summary: Breaking out of cc mingzhe09088 Yangqing Pull Request resolved:
@@ -306,7 +306,7 @@ if(BUILD_DOCS) if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/docs) file(REMOVE_RECURSE ${CMAKE_CURRENT_BINARY_DIR}/docs) - endif (EXISTS ${CMAKE_CURRENT_BINARY_DIR}/docs) + endif() file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/docs) configure_file(${DOXYGEN_C_IN} ${DOXYGEN_C_OUT} @ONLY) @@ -323,10 +323,10 @@ if(BUILD_DOCS) WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} COMMENT "Generating Python API documentation with Doxygen" VERBATIM) - else (DOXYGEN_FOUND) + else() message(FATAL_ERROR "Doxygen needs to be installed to generate the documentation") - endif (DOXYGEN_FOUND) -endif (BUILD_DOCS) + endif() +endif() # ---[ CMake related files # Uninistall option.
Change link to slack in docs to slackin Summary: Title Test Plan: yarn develop Reviewers: schrockn, max
@@ -53,7 +53,10 @@ export const Header = forwardRef(({ onMenuClick, sidebarOpened }, ref) => { </div> </div> <Menu sx={styles.socialIcons(showingSearch)}> - <ExternalLink href="https://dagster.slack.com" sx={styles.externalLink}> + <ExternalLink + href="https://dagster-slackin.herokuapp.com/" + sx={styles.externalLink} + > <img src={slackIcon} height={25} /> </ExternalLink> <ExternalLink
[BUG] `sklearn 1.2.0` compatibility - decouple `sklearn.base._pprint` The benchmarking module was still calling the private method `sklearn.base._pprint` which has moved in `1.2.0`. This is solved by replacing `_pprint` which a `repr` call, which may make things a bit less pretty but still leaves them functional (and compatible across `sktime` versions)
@@ -6,7 +6,7 @@ __author__ = ["mloning", "sajaysurya"] import pandas as pd from joblib import dump, load -from sklearn.base import ClassifierMixin, RegressorMixin, _pprint +from sklearn.base import ClassifierMixin, RegressorMixin from sklearn.model_selection import GridSearchCV, RandomizedSearchCV from sklearn.pipeline import Pipeline @@ -176,10 +176,7 @@ class BaseStrategy(BaseEstimator): return "%s(%s(%s))" % ( strategy_name, estimator_name, - _pprint( - self.get_params(deep=False), - offset=len(strategy_name), - ), + repr(self.get_params(deep=False)), )
Update cloud-billing.rst "contact sales" was not properly linked
@@ -13,7 +13,7 @@ From February 2, 2023, Mattermost Cloud Professional and Enterprise plans are of 2. Fill in your payment information. 3. Select **Switch to annual billing** -To upgrade to a Professional plan within Mattermost, select **View plans** in the header or in the System Console. For Enterprise plans, you need to [contact sales](https://mattermost.com/contact-sales/). +To upgrade to a Professional plan within Mattermost, select **View plans** in the header or in the System Console. For Enterprise plans, you need to `contact sales </https://mattermost.com/contact-sales/>`__. When you upgrade, you'll need to indicate the number of user seats you have - these are active users who'll be using Mattermost. This can't be less than the current total number of active users in your workspace but you can buy more seats to accommodate active user growth. You'll be billed immediately for the cost of the annual subscription.
Update __init__.py In case of DELETE action we don't pass the value if we want to remove the attribute. There are more cases but it's for you to handle.
@@ -120,6 +120,8 @@ class Item(BaseModel): def validate_no_empty_key_values(self, attribute_updates, key_attributes): for attribute_name, update_action in attribute_updates.items(): action = update_action.get("Action") or "PUT" # PUT is default + if action == "DELETE": + continue new_value = next(iter(update_action["Value"].values())) if action == "PUT" and new_value == "" and attribute_name in key_attributes: raise EmptyKeyAttributeException
updates readme request kittens
@@ -69,8 +69,8 @@ API | Description | Auth | HTTPS | CORS | | [PlaceGOAT](https://placegoat.com/) | Placeholder goat images | No | Yes | Unknown | | [RandomCat](https://aws.random.cat/meow) | Random pictures of cats | No | Yes | Yes | | [RandomDog](https://random.dog/woof.json) | Random pictures of dogs | No | Yes | Yes | -| [Request Kittens](https://github.com/joshwcomeau/RequestKittens) | Provides a way to return various cat images | No | Yes | Unknown | | [RandomFox](https://randomfox.ca/floof/) | Random pictures of foxes | No | Yes | No | +| [Request Kittens](https://github.com/joshwcomeau/RequestKittens) | Provides a way to return various cat images | No | Yes | Unknown | | [RescueGroups](https://userguide.rescuegroups.org/display/APIDG/API+Developers+Guide+Home) | Adoption | No | Yes | Unknown | | [Shibe.Online](http://shibe.online/) | Random pictures of Shibu Inu, cats or birds | No | Yes | Yes |
Factor out shared query logic Summary: Prep for run groupings queries. Test Plan: Unit Reviewers: sashank, prha
@@ -137,7 +137,7 @@ def _add_filters_to_query(self, query, filters): return query - def get_runs(self, filters=None, cursor=None, limit=None): + def _runs_query(self, filters=None, cursor=None, limit=None): filters = check.opt_inst_param( filters, 'filters', PipelineRunsFilter, default=PipelineRunsFilter() ) @@ -154,23 +154,17 @@ def get_runs(self, filters=None, cursor=None, limit=None): query = self._add_filters_to_query(base_query, filters) query = self._add_cursor_limit_to_query(query, cursor, limit) + + return query + + def get_runs(self, filters=None, cursor=None, limit=None): + query = self._runs_query(filters, cursor, limit) + rows = self.execute(query) return self._rows_to_runs(rows) def get_runs_count(self, filters=None): - filters = check.opt_inst_param( - filters, 'filters', PipelineRunsFilter, default=PipelineRunsFilter() - ) - - # If we have a tags filter, then we need to select from a joined table - if filters.tags: - subquery = db.select([1]).select_from( - RunsTable.outerjoin(RunTagsTable, RunsTable.c.run_id == RunTagsTable.c.run_id) - ) - else: - subquery = db.select([1]).select_from(RunsTable) - - subquery = self._add_filters_to_query(subquery, filters) + subquery = self._runs_query(filters=filters).alias('subquery') # We use an alias here because Postgres requires subqueries to be # aliased.
Update operations.py wcr throws error when the lambda function uses fields of custom structs. Catching the AttributeError fixes this issue
@@ -79,7 +79,7 @@ def detect_reduction_type(wcr_str): b = sympy.Symbol('b') try: result = wcr(a, b) - except TypeError: # e.g., "Cannot determine truth value of relational" + except (TypeError, AttributeError): # e.g., "Cannot determine truth value of relational" result = None # Check resulting value
lint: Catch exit(1) in management commands. Using sys.exit(1) in a management command makes it impossible to unit test the code in question. The correct approach to do the same thing in Django management commands is to raise CommandError.
@@ -673,6 +673,10 @@ def build_custom_checkers(by_lang): ]), 'description': "Now that we're a Python 3 only codebase, we don't need to use typing.Text. Please use str instead.", }, + {'pattern': 'exit[(]1[)]', + 'include_only': set(["/management/commands/"]), + 'description': 'Raise CommandError to exit with failure in management commands', + }, ]) + whitespace_rules + comma_whitespace_rule bash_rules = cast(RuleList, [ {'pattern': '#!.*sh [-xe]',
Minor documentation and log changes Observed verbally in a previous PR, addressing before they leave my mind.
@@ -66,10 +66,10 @@ class EndpointInterchange: Funcx config object that describes how compute should be provisioned reg_info : dict[str, dict] - Tuple of connection info for task_queue and result_queue - Connection parameters to connect to the service side RabbitMQ pipes - Optional: If not supplied, the endpoint will use a retry loop to - attempt registration periodically. + Dictionary containing connection information for both the task and + result queues. The required data structure is returned from the + Endpoint registration API call, encapsulated in the SDK by + `FuncXClient.register_endpoint()`. logdir : str Parsl log directory paths. Logs and temp files go here. Default: '.' @@ -265,7 +265,7 @@ class EndpointInterchange: try: self._start_threads_and_main() except pika.exceptions.ProbableAuthenticationError as e: - log.warning(f"Unable to start endpoint: {e}") + log.error(f"Unable to connect to AMQP service: {e}") self._kill_event.set() except Exception: log.exception("Unhandled exception in main kernel.")