message
stringlengths
13
484
diff
stringlengths
38
4.63k
Support HTTP status code 201 in rest_client 201 is a valid HTTP status code when doing POST method, it means the resource has been created. So we shouldn't show error log for status 201.
@@ -79,6 +79,8 @@ class AtlassianRestAPI(object): response_content = response.content if response.status_code == 200: log.debug('Received: {0}\n {1}'.format(response.status_code, response_content)) + elif response.status_code == 201: + log.debug('Received: {0}\n "Created" response'.format(response.status_code)) elif response.status_code == 204: log.debug('Received: {0}\n "No Content" response'.format(response.status_code)) elif response.status_code == 404:
Add `http.HTTPMethod` `description` isn't actually read-only at runtime, but I don't think there's any other way of telling type checkers "this is an attribute that the members have, not a member itself". And pretending it's a property is already what we do for `HTTPStatus`, which has the same issue.
@@ -2,6 +2,12 @@ import sys from enum import IntEnum from typing_extensions import Literal +if sys.version_info >= (3, 11): + from enum import StrEnum + +if sys.version_info >= (3, 11): + __all__ = ["HTTPStatus", "HTTPMethod"] +else: __all__ = ["HTTPStatus"] class HTTPStatus(IntEnum): @@ -74,3 +80,17 @@ class HTTPStatus(IntEnum): EARLY_HINTS: Literal[103] IM_A_TEAPOT: Literal[418] TOO_EARLY: Literal[425] + +if sys.version_info >= (3, 11): + class HTTPMethod(StrEnum): + @property + def description(self) -> str: ... + CONNECT: str + DELETE: str + GET: str + HEAD: str + OPTIONS: str + PATCH: str + POST: str + PUT: str + TRACE: str
converts non-string column names to string to avoid failing in display later
@@ -51,7 +51,7 @@ def validate_column_names(column_names, invalid_column_names): try: validate_property(column_name, allow_parents=False) except (ValueError, TypeError): - invalid_column_names.add(column_name) + invalid_column_names.add(str(column_name)) # Cobble together the context needed to render breadcrumbs that class-based views get from BasePageView
[auxdata.dem_create] added min. version to egm96_15.gtx error message converting from EGM96 geoid heights to WGS84 ellipsoid heights requires PROJ.4 version 5.0.0 or higher; see:
@@ -204,7 +204,7 @@ def dem_create(src, dst, t_srs=None, tr=None, geoid_convert=False, geoid='EGM96' errstr = str(e) if 'Cannot open egm96_15.gtx' in errstr: addition = '\nplease refer to the following site for instructions ' \ - 'on how to use the file egm96_15.gtx:\n' \ + 'on how to use the file egm96_15.gtx (requires proj.4 >= 5.0.0):\n' \ 'https://gis.stackexchange.com/questions/258532/' \ 'noaa-vdatum-gdal-variable-paths-for-linux-ubuntu' raise RuntimeError(errstr + addition)
(trivial) qt locktimeedit: fix a DeprecationWarning /home/user/wspace/electrum/electrum/gui/qt/locktimeedit.py:145: DeprecationWarning: an integer is required (got type Alignment). Implicit conversion to integers using __int__ is deprecated, and may be removed in a future version of Python. painter.drawText(textRect, Qt.AlignRight | Qt.AlignVCenter, "height")
@@ -142,7 +142,7 @@ class LockTimeHeightEdit(LockTimeRawEdit): textRect.adjust(2, 0, -10, 0) painter = QPainter(self) painter.setPen(ColorScheme.GRAY.as_color()) - painter.drawText(textRect, Qt.AlignRight | Qt.AlignVCenter, "height") + painter.drawText(textRect, int(Qt.AlignRight | Qt.AlignVCenter), "height") def get_max_allowed_timestamp() -> int:
Added annotation file for ignored chromosomes Based on ignoreForNorm list (genome file), where by default X chromosome is not ignored
@@ -53,3 +53,12 @@ for sample in samples: print('ERROR: Required file "{}" for sample "{}" specified in ' 'configuration file is NOT available.'.format(file, sample)) exit(1) + +ignoreForPeaks = ignore_forNorm +if "X" in ignoreForPeaks: + ignoreForPeaks.remove("X") +ignoreForPeaksFile = os.path.join(outdir_MACS2,'Annotation_'+genome+"_chroms.grep_ignore") + +with open(ignoreForPeaksFile) as f: + for chrom in ignoreForPeaks: + f.write('^'+chrom +'\n')
Improve `mmap.mmap.__enter__` It returns `Self` at runtime, not `mmap.mmap`, so inheriting from `AbstractContextManager` doesn't really do anything for us.
import sys -from _typeshed import ReadableBuffer -from contextlib import AbstractContextManager +from _typeshed import ReadableBuffer, Self from typing import Iterable, Iterator, NoReturn, Sized, overload ACCESS_DEFAULT: int @@ -27,7 +26,7 @@ if sys.platform != "win32": PAGESIZE: int -class mmap(AbstractContextManager[mmap], Iterable[int], Sized): +class mmap(Iterable[int], Sized): if sys.platform == "win32": def __init__(self, fileno: int, length: int, tagname: str | None = ..., access: int = ..., offset: int = ...) -> None: ... else: @@ -70,6 +69,7 @@ class mmap(AbstractContextManager[mmap], Iterable[int], Sized): # Doesn't actually exist, but the object is actually iterable because it has __getitem__ and # __len__, so we claim that there is also an __iter__ to help type checkers. def __iter__(self) -> Iterator[int]: ... + def __enter__(self: Self) -> Self: ... def __exit__(self, *args: object) -> None: ... if sys.version_info >= (3, 8) and sys.platform != "win32":
TST: Make negative bar count check exact message. To make sure the ValueError is not being occluded by another error.
@@ -646,7 +646,10 @@ class MinuteEquityHistoryTestCase(WithHistory, ZiplineTestCase): """ Negative bar counts leak future information. """ - with self.assertRaises(ValueError): + with self.assertRaisesRegexp( + ValueError, + "bar_count must be >= 1, but got -1" + ): self.data_portal.get_history_window( [self.ASSET1], pd.Timestamp('2015-01-07 14:35', tz='UTC'),
changelog: fix number of new rules This was caused by a bug in the GH action which updates this number automatically:
@@ -15,7 +15,7 @@ It includes many new rules, including all new techniques introduced in MITRE ATT - main: more detailed progress bar output when matching functions #562 @mr-tz - main: detect file limitations without doing code analysis for better performance #583 @williballenthin -### New Rules (82) +### New Rules (80) - anti-analysis/packer/amber/packed-with-amber @gormaniac - collection/file-managers/gather-3d-ftp-information @re-fox
portico-signin: Fix organization name text overflow. This fixes the issue on subdomain.zulipchat.com/login/ where the organization name will have characters such as the lowercase "g" cut off near the bottom due to the line-height being too small and the overflow being hidden. This re-arranges the properties to fix that issue.
@@ -493,7 +493,7 @@ html { .split-view .org-header .info-box { display: inline-block; position: relative; - margin: 15px 0px 0px 20px; + margin: 20px 0px 0px 20px; width: calc(100% - 125px); /* full width minus the avatar + padding + borders */ @@ -511,13 +511,13 @@ html { .split-view .info-box .organization-name { font-size: 1.5em; font-weight: 300; - line-height: 1; + line-height: 1.2; } .split-view .info-box .organization-path { font-weight: 400; color: hsl(0, 0%, 46%); - margin-top: 10px; + margin-top: 5px; } .new-style .lead {
Matplotlib max-h fix so that plots are not cut off `object-fit:contain` only works with `max-h` if they are set on the same element. I moved `max-h` to `img` and also increased the height a bit such that the plot has enough space and can adapt to the container height. Fixes
<div id="bokehDiv"/> {:else if value && value["type"] == "matplotlib"} <div - class="output-image w-full max-h-80 flex justify-center items-center relative" + class="output-image w-full flex justify-center items-center relative" {theme} > <!-- svelte-ignore a11y-missing-attribute --> - <img class="w-full h-full object-contain" src={value["plot"]} /> + <img class="w-full max-h-[30rem] object-contain" src={value["plot"]} /> </div> {:else} <div class="h-full min-h-[15rem] flex justify-center items-center">
node: move working path to lazy created. The working path is needed for custom script so far. Not to create it, if it's not necessary.
@@ -52,10 +52,11 @@ class Node(ContextMixin, InitializableMixin): self.features: Features self.tools = Tools(self) # the path uses remotely - self.remote_working_path: Optional[PurePath] = None node_id = str(self.index) if self.index >= 0 else "" self.log = get_logger(logger_name, node_id) + # The working path will be created in remote node, when it's used. + self._remote_working_path: Optional[PurePath] = None self._base_local_log_path = base_log_path # Not to set the log path until its first used. Because the path # contains node name, which is not set in __init__. @@ -222,29 +223,15 @@ class Node(ContextMixin, InitializableMixin): return self._local_log_path - def close(self) -> None: - self.log.debug("closing node connection...") - if self._shell: - self._shell.close() - - def _initialize(self, *args: Any, **kwargs: Any) -> None: - if self.is_remote: - assert ( - self._connection_info - ), "call setConnectionInfo before use remote node" - address = f"{self._connection_info.address}:{self._connection_info.port}" - else: - address = "localhost" - self.log.info(f"initializing node '{self.name}' {address}") - self.shell.initialize() - self.os: OperatingSystem = OperatingSystem.create(self) - - # set working path + @property + def remote_working_path(self) -> PurePath: + if not self._remote_working_path: if self.is_remote: if self.is_posix: remote_root_path = Path("$HOME") else: remote_root_path = Path("%TEMP%") + working_path = remote_root_path.joinpath( constants.PATH_REMOTE_ROOT, constants.RUN_LOGIC_PATH ).as_posix() @@ -255,14 +242,33 @@ class Node(ContextMixin, InitializableMixin): # PurePath is more reasonable here, but spurplus doesn't support it. if self.is_posix: - self.remote_working_path = PurePosixPath(result.stdout) + self._remote_working_path = PurePosixPath(result.stdout) else: - self.remote_working_path = PureWindowsPath(result.stdout) + self._remote_working_path = PureWindowsPath(result.stdout) else: - self.remote_working_path = constants.RUN_LOCAL_PATH + self._remote_working_path = constants.RUN_LOCAL_PATH - self.shell.mkdir(self.remote_working_path, parents=True, exist_ok=True) - self.log.debug(f"working path is: '{self.remote_working_path}'") + self.shell.mkdir(self._remote_working_path, parents=True, exist_ok=True) + self.log.debug(f"working path is: '{self._remote_working_path}'") + + return self._remote_working_path + + def close(self) -> None: + self.log.debug("closing node connection...") + if self._shell: + self._shell.close() + + def _initialize(self, *args: Any, **kwargs: Any) -> None: + if self.is_remote: + assert ( + self._connection_info + ), "call setConnectionInfo before use remote node" + address = f"{self._connection_info.address}:{self._connection_info.port}" + else: + address = "localhost" + self.log.info(f"initializing node '{self.name}' {address}") + self.shell.initialize() + self.os: OperatingSystem = OperatingSystem.create(self) def _execute( self,
Noviflow docs: icmpv6 match moved to table 6 instead of 3 add match fields for FaucetIPv6TupleTest
@@ -23,28 +23,38 @@ In this example, the server running FAUCET is 10.0.1.8; configuration for CPN in .. code-block:: none - set config controller controllergroup 1 controllerid 1 priority 1 ipaddr 10.0.1.8 port 6653 security none + set config controller controllergroup faucet controllerid 1 priority 1 ipaddr 10.0.1.8 port 6653 security none + set config controller controllergroup gauge controllerid 1 priority 1 ipaddr 10.0.1.8 port 6654 security none set config switch dpid 0x1 Configure the tables ^^^^^^^^^^^^^^^^^^^^ - These matches are known to pass the unit tests as of FAUCET 1.6.4, but take care to adjust - ACL table matches and any changes for future versions. +These matches are known to pass the unit tests as of FAUCET 1.6.18, but take care to adjust +ACL tables matches based on the type of ACL rules defined in the configuration file. +Different FAUCET releases may also use different match fields in the other tables. .. code-block:: none - set config pipeline tablesizes 1024 1024 1024 1024 1024 1024 1024 1024 1024 tablewidths 80 40 40 40 40 40 40 40 40 - set config table tableid 0 matchfields 0 3 4 5 6 10 14 23 29 31 + set config pipeline tablesizes 1524 1024 1024 1024 1024 1024 1024 1024 1024 tablewidths 80 40 40 40 40 40 40 40 40 + set config table tableid 0 matchfields 0 3 4 5 6 10 11 12 13 14 23 29 31 set config table tableid 1 matchfields 0 3 4 5 6 set config table tableid 2 matchfields 0 5 6 10 11 12 14 - set config table tableid 3 matchfields 0 3 4 5 6 10 29 + set config table tableid 3 matchfields 0 3 4 5 6 10 set config table tableid 4 matchfields 5 6 12 set config table tableid 5 matchfields 5 6 27 - set config table tableid 6 matchfields 3 5 10 23 + set config table tableid 6 matchfields 3 5 10 23 29 set config table tableid 7 matchfields 0 3 6 set config table tableid 8 matchfields 0 3 6 +Note that this table configuration will allow most of the automated test cases to pass, except FaucetIPv6TupleTest +(which requires IPv6 Src and Dst matching in the ACL table). In order to run this test, table 0 must be +configured as follows: + +.. code-block:: none + + set config table tableid 0 matchfields 0 5 6 10 26 27 13 14 + Create faucet.yaml ^^^^^^^^^^^^^^^^^^
Get output from process In case of error
@@ -16,22 +16,29 @@ script: # This is the call made when pressing the integration test button. demisto.results('ok') sys.exit(0) - if demisto.command() == 'rasterize': - from subprocess import call + import os + import subprocess + return_code = 0 + error_message = '' friendlyName = 'url.png' if demisto.get(demisto.args(), 'type') == 'pdf': friendlyName = 'url.pdf' command = ['phantomjs', '/usr/local/bin/rasterize.js', demisto.args()['url'], friendlyName] if demisto.get(demisto.args(), 'width') and demisto.get(demisto.args(), 'height'): command.append(demisto.gets(demisto.args(), 'width') + '*' + demisto.gets(demisto.args(), 'height')) - stat = call(command) - if stat == 0: + try: + error_message = subprocess.check_output(command); + except Exception as e: + return_code = -1 + error_message = e.output + + if return_code == 0: filename = demisto.uniqueFile() - call(['mv', friendlyName, demisto.investigation()['id'] + '_' + filename]) + subprocess.call(['mv', friendlyName, demisto.investigation()['id'] + '_' + filename]) demisto.results({'ContentsFormat': 'text', 'Type': entryTypes['file'], 'File': friendlyName, 'FileID': filename, 'Contents': ''}) else: - demisto.results({'ContentsFormat': 'text', 'Type': entryTypes['error'], 'Contents': 'PhantomJS returned - ' + str(stat)}) + demisto.results({'ContentsFormat': 'text', 'Type': entryTypes['error'], 'Contents': 'PhantomJS returned - ' + error_message}) sys.exit(0) demisto.results({'ContentsFormat': 'text', 'Type': entryTypes['error'], 'Contents': 'Unrecognized command'}) type: python
Initialize GceVirtualMachine.preempt_marker in the constructor. This way if PKB fails before GceVirtualMachine.Create we don't get a missing field error checking for the preempt marker. This only happened when there was an unrelated failure in _CreateDependencies.
@@ -424,6 +424,8 @@ class GceVirtualMachine(virtual_machine.BaseVirtualMachine): if (not FLAGS.gce_migrate_on_maintenance or self.gpu_count or self.network.placement_group): self.on_host_maintenance = 'TERMINATE' + if self.preemptible: + self.preempt_marker = f'gs://{FLAGS.gcp_preemptible_status_bucket}/{FLAGS.run_uri}/{self.name}' def _GetNetwork(self): """Returns the GceNetwork to use.""" @@ -545,7 +547,6 @@ class GceVirtualMachine(virtual_machine.BaseVirtualMachine): if self.preemptible: cmd.flags['preemptible'] = True - self.preempt_marker = f'gs://{FLAGS.gcp_preemptible_status_bucket}/{FLAGS.run_uri}/{self.name}' metadata.update([self._PreemptibleMetadataKeyValue()]) cmd.flags['metadata'] = util.FormatTags(metadata)
Make curl in DevStack Bypass Proxy This commit modifies curl option in wait_for_nova_resources to bypass proxy with --noproxy option. Without this option, if you run DevStack behind proxy, curl command fails with timeout & wait_for_nova_resources also fails. Because curl only accesses Placement service API, this modification should be fair.
@@ -2202,7 +2202,7 @@ function wait_for_nova_resources { for i in $(seq 1 12); do # Fetch provider UUIDs from Placement local providers - providers=$(curl -sH "X-Auth-Token: $token" $endpoint/resource_providers \ + providers=$(curl --noproxy '*' -sH "X-Auth-Token: $token" $endpoint/resource_providers \ | jq -r '.resource_providers[].uuid') local p @@ -2213,13 +2213,13 @@ function wait_for_nova_resources { # A resource class inventory record looks something like # {"max_unit": 1, "min_unit": 1, "step_size": 1, "reserved": 0, "total": 1, "allocation_ratio": 1} # Subtrack reserved from total (defaulting both to 0) - amount=$(curl -sH "X-Auth-Token: $token" $endpoint/resource_providers/$p/inventories \ + amount=$(curl --noproxy '*' -sH "X-Auth-Token: $token" $endpoint/resource_providers/$p/inventories \ | jq ".inventories.CUSTOM_$resource_class as \$cls | (\$cls.total // 0) - (\$cls.reserved // 0)") # Check whether the resource provider has all expected traits # registered against it. - rp_traits=$(curl -sH "X-Auth-Token: $token" \ + rp_traits=$(curl --noproxy '*' -sH "X-Auth-Token: $token" \ -H "OpenStack-API-Version: placement 1.6" \ $endpoint/resource_providers/$p/traits) for trait in $IRONIC_DEFAULT_TRAITS; do
Add table that describes paired-end counterparts of trimming options ... and some other improvements. Closes
-============= -Recipes (FAQ) -============= +=============== +Recipes and FAQ +=============== This section gives answers to frequently asked questions. It shows you how to get cutadapt to do what you want it to do! @@ -174,6 +174,38 @@ principle:: Note the ``-`` character in the second invocation to cutadapt. +Support for concatenated compressed files +----------------------------------------- + +Cutadapt supports concatenated gzip and bzip2 input files. + + +Paired-end read name check +-------------------------- + +When reading paired-end files, Cutadapt checks whether the read names match. +Only the part of the read name before the first space is considered. If the +read name ends with ``/1`` or ``/2``, then that is also ignored. For example, +two FASTQ headers that would be considered to denote properly paired reads are:: + + @my_read/1 a comment + +and:: + + @my_read/2 another comment + +This is an example for *improperly paired* read names:: + + @my_read/1;1 + +and:: + + @my_read/2;1 + +Since the ``/1`` and ``/2`` are ignored only if the occur at the end of the read +name, and since the ``;1`` is considered to be part of the read name, these +reads will not be considered to be propely paired. + Other things (unfinished) -------------------------
azure: added HCS family support HCS VM family was considered as H family until now and that was causing various issues in lisav2 testing.
@@ -216,6 +216,16 @@ Function Validate-SubscriptionUsage($RGXMLData, $Location, $OverrideVMSize, $Sto $currentStatus = Set-Usage -currentStatus $currentStatus -text $identifierText -usage $testVMUsage -AllowedUsagePercentage $AllowedUsagePercentage $overFlowErrors += Test-Usage -currentStatus $currentStatus -text $identifierText -AllowedUsagePercentage $AllowedUsagePercentage } + elseif ( $testVMSize.StartsWith("HB")) { + $identifierText = "standardHBSFamily" + $currentStatus = Set-Usage -currentStatus $currentStatus -text $identifierText -usage $testVMUsage -AllowedUsagePercentage $AllowedUsagePercentage + $overFlowErrors += Test-Usage -currentStatus $currentStatus -text $identifierText -AllowedUsagePercentage $AllowedUsagePercentage + } + elseif ( $testVMSize.StartsWith("HC")) { + $identifierText = "standardHCSFamily" + $currentStatus = Set-Usage -currentStatus $currentStatus -text $identifierText -usage $testVMUsage -AllowedUsagePercentage $AllowedUsagePercentage + $overFlowErrors += Test-Usage -currentStatus $currentStatus -text $identifierText -AllowedUsagePercentage $AllowedUsagePercentage + } elseif ( $testVMSize.StartsWith("H")) { $identifierText = "standardHFamily" $currentStatus = Set-Usage -currentStatus $currentStatus -text $identifierText -usage $testVMUsage -AllowedUsagePercentage $AllowedUsagePercentage
`test_cases` README: Address post-merge review of Thanks for the review!
@@ -13,7 +13,7 @@ multiple other mechanisms for spotting errors in the stubs. Different test cases in this directory serve different purposes. For some stubs in typeshed, the type annotations are complex enough that it's useful to have -basic sanity checks that test whether a type checker understands the intent of +sanity checks that test whether a type checker understands the intent of the annotations correctly. Examples of tests like these are `stdlib/builtins/check_pow.py` and `stdlib/asyncio/check_gather.py`. @@ -27,8 +27,8 @@ false-positive errors for idiomatic usage of these classes. ### How the tests work The code in this directory is not intended to be directly executed. Instead, -type checkers are run on the code, in order to check whether typing errors are -emitted at the places where we want them to be. +type checkers are run on the code, to check that typing errors are +emitted at the correct places. Some files in this directory simply contain samples of idiomatic Python, which should not (if the stubs are correct) cause a type checker to emit any errors.
[docs] Improve docs-serve and fix link [docs] Improve docs-serve and fix link 1. Make sure dependencies are installed when using docs-serve 2. Fix link to customizing reports from report reference page.
@@ -42,7 +42,10 @@ lint: install-dependencies typecheck: install-dependencies source ${VENV_ACTIVATE} && python3 presubmit.py typecheck -docs-serve: +install-docs-dependencies: docs/Gemfile.lock + cd docs && bundle install + +docs-serve: install-docs-dependencies cd docs && bundle exec jekyll serve --livereload clear-cache:
.github: squish issue template onto single lines The GitHub UI seems to keep line breaks from the template for new issues, so squish onto single lines.
-Please do not report issues that can be reproduced with the -[upstream Java project](https://github.com/googlei18n/libphonenumber): if your -issue can be reproduced with the -[online Java demo](http://libphonenumber.appspot.com/), -report your [problem upstream](https://github.com/googlei18n/libphonenumber/blob/master/CONTRIBUTING.md#checklist-before-filing-an-issue) -instead. +Please do not report issues that can be reproduced with the [upstream Java project](https://github.com/googlei18n/libphonenumber): if your issue can be reproduced with the [online Java demo](http://libphonenumber.appspot.com/), report your [problem upstream](https://github.com/googlei18n/libphonenumber/blob/master/CONTRIBUTING.md#checklist-before-filing-an-issue) instead. -In particular, any changes to the phonenumber metadata or to the API should be -reported upstream rather than here. Only issues or contributions that are -**specific to the Python version** are relevant here. +In particular, any changes to the phonenumber metadata or to the API should be reported upstream rather than here. Only issues or contributions that are **specific to the Python version** are relevant here.
fix(tests): Fix incorrect `pytest.raises(...) as` usage The exception variable used in the `as` should be used outside of the `with` scope.
@@ -184,30 +184,26 @@ async def test_merge_commit_message_undefined( ) with pytest.raises(context.RenderTemplateFailure) as x: await ctxt.pull_request.get_commit_message() - assert str(x) == "foobar" + assert "foobar" in str(x.value) @pytest.mark.parametrize( - "body,error", + "body", [ - ( """Hello world # Commit Message {{title}} here is my message {{ and broken template -""", - "lol", - ), +""" ], ) async def test_merge_commit_message_syntax_error( - body: str, error: str, context_getter: conftest.ContextGetterFixture + body: str, context_getter: conftest.ContextGetterFixture ) -> None: ctxt = await context_getter( github_types.GitHubPullRequestNumber(43), body=body, title="My PR title" ) - with pytest.raises(context.RenderTemplateFailure) as rmf: + with pytest.raises(context.RenderTemplateFailure): await ctxt.pull_request.get_commit_message() - assert str(rmf) == error
Skipped tests Increased timeout for - Endpoint Malware Investigation - Generic - Test
"Demisto REST API" ], "fromversion": "5.0.0", - "timeout": 900 + "timeout": 1200 }, { "playbookID": "ParseExcel-test" "_comment": "~~~ INSTANCE ISSUES ~~~", + "AlphaSOC Wisdom": "Issue 19235", + "DomainTools Iris": "Issue 20433", + "Cybereason": "Issue 20430", + "SentinelOne V2": "Issue 19361", + "ThreatQ v2": "Issue 19468", "Check Point": "Issue 18643", "McAfee Active Response": "Issue 20320", "RSA Archer": "Issue 20335",
fix the log of parse_summary feature. Detail: fix the issue that the 100% process info is printed twice by determining whether the file_handler.offset changed.
@@ -88,6 +88,7 @@ class EventParser: start_offset = file_handler.offset try: event_str = _SummaryParser.event_load(file_handler) + if start_offset != file_handler.offset: self._print_process(file_handler) crc_check_time = 0 if event_str is None:
Change batch size to 1 This is the default when using the Lambda API, and it also prevents the issues with partial failures.
@@ -557,7 +557,7 @@ class Chalice(object): return EventSourceHandler(event_func, SNSEvent) return _register_sns_message - def on_sqs_message(self, queue, batch_size=10, name=None): + def on_sqs_message(self, queue, batch_size=1, name=None): def _register_sqs_message(event_func): handler_name = name if handler_name is None:
Update tags.yaml Added missing tags.
- commerce - complaints - computed tomography +- computer forensics - computer security - computer vision - conservation - denoising - dialog - dicom +- digital forensics - digital preservation - disaster response - distributional semantics
Fix for I think this was a typo? I can deploy VPSes again, at least.
@@ -1179,7 +1179,7 @@ class Cloud(object): 'minion', vm_, self.opts, default={} ) - alias, driver = vm_['provider'].split(':') + alias, driver = vm_['driver'].split(':') fun = '{0}.create'.format(driver) if fun not in self.clouds: log.error(
Fix docs for test_request_context Fixes
@@ -2140,8 +2140,8 @@ class Flask(_PackageBoundObject): return RequestContext(self, environ) def test_request_context(self, *args, **kwargs): - """Creates a WSGI environment from the given values (see - :class:`werkzeug.test.EnvironBuilder` for more information, this + """Creates a :class:`~flask.ctx.RequestContext` from the given values + (see :class:`werkzeug.test.EnvironBuilder` for more information, this function accepts the same arguments plus two additional). Additional arguments (only if ``base_url`` is not specified):
Changed mount.mount module to accept only the mount point. Previously the mount.mount needs at least the device and the mount point. Using this approach it doesn't use the /etc/fstab entries. Change to needs only the mount point. You still can use the device, but now is an optional parameter.
@@ -1245,7 +1245,7 @@ def automaster(config="/etc/auto_salt"): def mount( - name, device, mkmnt=False, fstype="", opts="defaults", user=None, util="mount" + name, device=False, mkmnt=False, fstype="", opts="defaults", user=None, util="mount" ): """ Mount a device @@ -1295,7 +1295,11 @@ def mount( else: args += " -t {0}".format(fstype) - cmd = "mount {0} {1} {2} ".format(args, device, name) + cmd = "mount " + if device: + cmd += "{0} {1} {2} ".format(args, device, name) + else: + cmd += "{0} ".format(name) out = __salt__["cmd.run_all"](cmd, runas=user, python_shell=False) if out["retcode"]: return out["stderr"]
Refactor flake8 integration test to use create_options_bootstrapper follow up on
@@ -10,10 +10,9 @@ from pants.base.specs import FilesystemLiteralSpec, OriginSpec, SingleAddress from pants.core.goals.lint import LintOptions, LintResults from pants.engine.addresses import Address from pants.engine.fs import DigestContents, FileContent -from pants.engine.rules import RootRule +from pants.engine.rules import RootRule, SubsystemRule from pants.engine.selectors import Params from pants.engine.target import TargetWithOrigin -from pants.testutil.engine.util import create_subsystem from pants.testutil.external_tool_test_base import ExternalToolTestBase from pants.testutil.interpreter_selection_utils import skip_unless_python27_and_python3_present from pants.testutil.option.util import create_options_bootstrapper @@ -27,7 +26,12 @@ class Flake8IntegrationTest(ExternalToolTestBase): @classmethod def rules(cls): - return (*super().rules(), *flake8_rules(), RootRule(Flake8Request), RootRule(LintOptions)) + return ( + *super().rules(), + *flake8_rules(), + RootRule(Flake8Request), + SubsystemRule(LintOptions), + ) def make_target_with_origin( self, @@ -54,7 +58,6 @@ class Flake8IntegrationTest(ExternalToolTestBase): passthrough_args: Optional[str] = None, skip: bool = False, additional_args: Optional[List[str]] = None, - reports_dir: Optional[str] = None, ) -> LintResults: args = ["--backend-packages=pants.backend.python.lint.flake8"] if config: @@ -70,7 +73,6 @@ class Flake8IntegrationTest(ExternalToolTestBase): LintResults, Params( Flake8Request(Flake8FieldSet.create(tgt) for tgt in targets), - create_subsystem(LintOptions, reports_dir=reports_dir), # type: ignore[type-var] create_options_bootstrapper(args=args), ), ) @@ -181,7 +183,7 @@ class Flake8IntegrationTest(ExternalToolTestBase): def test_output_file(self) -> None: target = self.make_target_with_origin([self.bad_source]) - result = self.run_flake8([target], reports_dir=".") + result = self.run_flake8([target], additional_args=["--lint-reports-dir='.'"]) assert len(result) == 1 assert result[0].exit_code == 1 assert result[0].stdout.strip() == ""
Update core.py with f-strings (issue Corrected past shortcomings
@@ -3269,7 +3269,7 @@ def _scalar_heuristic(arr, elem): dout._fill_value.flat[0]).all(): warnings.warn( "Upon accessing multidimensional field " - f"{indx:s}, need to keep dimensionality " + f"{indx!s}, need to keep dimensionality " "of fill_value at 0. Discarding " "heterogeneous fill_value and setting " f"all to {dout._fill_value[0]!s}.", @@ -3914,7 +3914,10 @@ def __repr__(self): dtype=str(self.dtype) ) is_structured = bool(self.dtype.names) - key = f"{'long' if is_long else 'short'}_{'flx' if is_structured else 'std'}" + key = '{}_{}'.format( + 'long' if is_long else 'short', + 'flx' if is_structured else 'std' + ) return _legacy_print_templates[key] % parameters prefix = f"masked_{name}("
ParameterisedHolderUI: Updated deprecated presets() method Cortex 10 deprecated `presets()` in favour of `getPresets()` for Parameters.
@@ -226,7 +226,7 @@ def __plugPresetValues( plug ) : # make sure to get the values in the same # order that the names were given. - values = [ parameter.presets()[x] for x in parameter.presetNames() ] + values = [ parameter.getPresets()[x] for x in parameter.presetNames() ] if isinstance( plug, Gaffer.StringPlug ) : return IECore.StringVectorData( [ v.value for v in values ] ) elif isinstance( plug, Gaffer.BoolPlug ) :
[swarming] Fix unicode for fix_python_cmd. Also use fs.stat instead of os.stat.
@@ -260,20 +260,23 @@ def fix_python_cmd(cmd, env=None): # At this point we need to prepend some resolved python to cmd. if sys.platform == 'win32': - python_exe = 'python.exe' + python_exe = u'python.exe' check = fs.isfile else: - python_exe = 'python' + python_exe = u'python' def check(candidate): try: - return bool(os.stat(candidate).st_mode | os.path.stat.S_IEXEC) + return bool(fs.stat(candidate).st_mode | os.path.stat.S_IEXEC) except OSError: return False - found_python = sys.executable + found_python = unicode(sys.executable) paths = (os.environ if env is None else env).get('PATH', '').split(os.pathsep) for path in paths: + if path == '': + continue + candidate = os.path.join(path, python_exe) if check(candidate): found_python = candidate
voctocore: fixup streamblanker removed debug message
@@ -98,7 +98,6 @@ class StreamblankToolbarController(object): if current_time - self.last_draw_time >= 1.0: self.last_draw_time = current_time for button in list(self.blank_btns.values()) + [self.livebtn]: - print(button.get_name()) if self.blink: button.get_style_context().add_class("blink") else:
[tests] Use PY2 instead of PYTHON_VERSION for WarningSourceSkipContextManager Pywikibot supports Python (3.4)/3.5+. It is not necessary to have a different behaviour for 3.0 - 3.1 anymore
@@ -253,7 +253,7 @@ class WarningSourceSkipContextManager(warnings.catch_warnings): skip_frames -= 1 # Ignore socket IO warnings (T183696, T184996) - if (PYTHON_VERSION >= (3, 2) + if (not PY2 and issubclass(warn_msg.category, ResourceWarning) and str(warn_msg.message).startswith( ('unclosed <ssl.SSLSocket',
boxes: Improve error message for invalid stream name. Now that autocomplete for stream box has been added, we can show a hint to use autocomplete when the user inputs an invalid stream name.
@@ -319,7 +319,14 @@ class WriteBox(urwid.Pile): if header.focus_col == 0: stream_name = header[0].edit_text if not self.model.is_valid_stream(stream_name): - self.view.set_footer_text("Invalid stream name", 3) + invalid_stream_error = ( + 'Invalid stream name.' + ' Use {} or {} to autocomplete.' + .format(keys_for_command('AUTOCOMPLETE').pop(), + keys_for_command('AUTOCOMPLETE' + '_REVERSE').pop()) + ) + self.view.set_footer_text(invalid_stream_error, 3) return key user_ids = self.model.get_other_subscribers_in_stream( stream_name=stream_name)
Make code pass PEP8 checks Per suggestion at
@@ -807,22 +807,26 @@ There's a couple of things to keep in mind when enabling cors for a view: .. code-block:: python from chalice import Chalice, Response + app = Chalice(app_name='multipleorigincors') + _ALLOWED_ORIGINS = set([ + 'http://allowed1.example.com', + 'http://allowed2.example.com', + ]) + + @app.route('/cors_multiple_origins', methods=['GET']) def supports_cors_multiple_origins(): - origin = app.current_request.to_dict()['headers']['origin'] - if origin == 'http://allowed1.example.com': - return Response(body='You sent a whitelisted origin!', - status_code=200, - headers={'Access-Control-Allow-Origin': 'http://allowed1.example.com'}) - elif origin == 'http://allowed2.example.com': - return Response(body='You sent a whitelisted origin!', - status_code=200, - headers={'Access-Control-Allow-Origin': 'http://allowed2.example.com'}) + origin = app.current_request.to_dict()['headers'].get('origin', '') + if origin in _ALLOWED_ORIGINS: + return Response( + body='You sent a whitelisted origin!\n', + headers={ + 'Access-Control-Allow-Origin': origin + }) else: - return Response(body="The origin you sent has not been whitelisted: " + origin, - status_code=200) + return "The origin you sent has not been whitelisted: %s\n" % origin * Every view function must explicitly enable CORS support.
Add entry for distributed tests to CODEOWNERS. Summary: Pull Request resolved: ghstack-source-id: Test Plan: waitforbuildbot
/torch/distributed/rpc @mrshenli @pritamdamania87 @zhaojuanmao /torch/distributed/autograd @mrshenli @pritamdamania87 @zhaojuanmao /torch/distributed/optim @mrshenli @pritamdamania87 @zhaojuanmao @aazzolini + +# Distributed tests +/test/distributed @mrshenli @pritamdamania87 @zhaojuanmao +/torch/testing/_internal/distributed @mrshenli @pritamdamania87 @zhaojuanmao
s/paramter/parameter/ Minor typo
@@ -313,7 +313,7 @@ In addition to credentials, you can also configure non-credential values. In general, boto3 follows the same approach used in credential lookup: try various locations until a value is found. Boto3 uses these sources for configuration: -* Explicitly passed as the ``config`` paramter when creating a client. +* Explicitly passed as the ``config`` parameter when creating a client. * Environment variables * The ``~/.aws/config`` file.
Fix travis build for python 3.6 failing This fixes a build error in python 3.6 in travis.
@@ -26,6 +26,9 @@ python: before_install: - sudo apt-get update -qq - sudo apt-get install -qq -y python-virtualenv + # otherwise imagecodecs fails to build on py3.6, + # see https://github.com/scikit-image/scikit-image/issues/4673 + - pip install --upgrade pip install: # TODO why was this deactivated?
Doc improvement for bulk update [skip ci]
@@ -234,7 +234,12 @@ efficiently update one or more columns on a list of models. For example: .. note:: For large lists of objects, you should specify a reasonable batch_size and wrap the call to :py:meth:`~Model.bulk_update` with - :py:meth:`Database.atomic`. + :py:meth:`Database.atomic`: + + .. code-block:: python + + with database.atomic(): + User.bulk_update(list_of_users, fields=['username'], batch_size=50) Alternatively, you can use the :py:meth:`Database.batch_commit` helper to process chunks of rows inside *batch*-sized transactions. This method also
Allow deleting a pres exchange item without reading it first Related to this allows deleting a presentation exchange record without reading it from storage.
@@ -34,6 +34,8 @@ from ....messaging.valid import ( from ....storage.error import StorageError, StorageNotFoundError from ....storage.vc_holder.base import VCHolder from ....storage.vc_holder.vc_record import VCRecord +from ....storage.record import StorageRecord +from ....storage.base import BaseStorage from ....utils.tracing import trace_event, get_timer, AdminAPIMessageTracingSchema from ....vc.ld_proofs import BbsBlsSignature2020, Ed25519Signature2018 from ....wallet.error import WalletNotFoundError @@ -1234,8 +1236,10 @@ async def present_proof_remove(request: web.BaseRequest): pres_ex_record = None try: async with context.profile.session() as session: - pres_ex_record = await V20PresExRecord.retrieve_by_id(session, pres_ex_id) - await pres_ex_record.delete_record(session) + storage = session.inject(BaseStorage) + storage_record = StorageRecord(type=V20PresExRecord.RECORD_TYPE, value=None, id=pres_ex_id) + await storage.delete_record(storage_record) + except StorageNotFoundError as err: raise web.HTTPNotFound(reason=err.roll_up) from err except StorageError as err:
Update exercism-xswift project configuration Configuration has been updated with a new source branch. This branch also only appears to build with SwiftPM, so the build configuration has also been updated. The configurations for Swift 4 no longer apply, so the associated xfails have also been removed.
"repository": "Git", "url": "https://github.com/masters3d/xswift.git", "path": "exercism-xswift", - "branch": "do-not-delete-pre-4.2-compatibility", + "branch": "master", "maintainer": "[email protected]", "compatibility": [ { - "version": "4.0", - "commit": "7614aae4a87d92e2343473c619a805ace98a474e" - }, - { - "version": "4.1", - "commit": "7614aae4a87d92e2343473c619a805ace98a474e" + "version": "5.1", + "commit": "b640558479638d27c0a285aae617fc133296df20" } ], "platforms": [ ], "actions": [ { - "action": "BuildXcodeProjectTarget", - "project": "xswift.xcodeproj", - "target": "xswift", - "destination": "generic/platform=macOS", - "configuration": "Release", - "xfail": { - "compatibility": { - "4.0": { - "branch": { - "master": "https://bugs.swift.org/browse/SR-8307", - "swift-5.0-branch": "https://bugs.swift.org/browse/SR-8307", - "swift-5.1-branch": "https://bugs.swift.org/browse/SR-8307" - } - }, - "4.1": { - "branch": { - "master": "https://bugs.swift.org/browse/SR-8307", - "swift-5.0-branch": "https://bugs.swift.org/browse/SR-8307", - "swift-5.1-branch": "https://bugs.swift.org/browse/SR-8307" - } - } - } - } + "action": "BuildSwiftPackage", + "configuration": "release", + "tags": "sourcekit" } ] },
Fix circuit_dag_test Networkx changed its copy and eq functions for DiGraph between version 2.1 and 2.2, which breaks the attached test in version 2.1 (but it works in 2.2). In any case, it's probably safer to convert both instances to cirq Dags.
@@ -249,4 +249,4 @@ def test_larger_circuit(): def test_is_maximalist(circuit): dag = cirq.CircuitDag.from_circuit(circuit) transitive_closure = networkx.dag.transitive_closure(dag) - assert transitive_closure == dag + assert cirq.CircuitDag(incoming_graph_data=transitive_closure) == dag
Use ContentNodeSlim for displaying node summary data. Include num_coach_contents to ensure display.
import samePageCheckGenerator from 'kolibri.utils.samePageCheckGenerator'; -import { LessonResource, ContentNodeResource } from 'kolibri.resources'; +import { LessonResource, ContentNodeSlimResource } from 'kolibri.resources'; import { createTranslator } from 'kolibri.utils.i18n'; import { error as logError } from 'kolibri.lib.logging'; import router from 'kolibri.coreVue.router'; @@ -58,9 +58,10 @@ export function getResourceCache(store, resourceIds) { } if (nonCachedResourceIds.length) { - return ContentNodeResource.fetchCollection({ + return ContentNodeSlimResource.fetchCollection({ getParams: { ids: nonCachedResourceIds, + include_fields: ['num_coach_contents'], }, }).then(contentNodes => { contentNodes.forEach(contentNode =>
remove comment # Debug variables total sr required and provided are useful outputs
@@ -492,7 +492,6 @@ def process_results(self, dfm_list, data, meta, saveToDB=True): self.nested_outputs["Scenario"]["Site"][name]["load_met_series_kw"] = self.results_dict.get("load_met") self.nested_outputs["Scenario"]["Site"][name]["load_met_pct"] = self.results_dict.get("load_met_pct") self.nested_outputs["Scenario"]["Site"][name]["sr_required_series_kw"] = self.results_dict.get("sr_required_load") - #Debug variables self.nested_outputs["Scenario"]["Site"][name]["total_sr_required"] = self.results_dict.get("tot_sr_required") self.nested_outputs["Scenario"]["Site"][name]["total_sr_provided"] = self.results_dict.get("tot_sr_provided") elif name == "LoadProfileBoilerFuel":
Update ffmpeg.py include language to toJson
@@ -135,12 +135,14 @@ class MediaStreamInfo(object): 'type': self.type} if self.type == 'audio': out['channels'] = self.audio_channels + out['language'] = self.metadata['language'].lower().strip() elif self.type == 'video': out['pix_fmt'] = self.pix_fmt out['profile'] = self.profile elif self.type == 'subtitle': out['forced'] = self.sub_forced out['default'] = self.sub_default + out['language'] = self.metadata['language'].lower().strip() return out @staticmethod
travis + circleci: Start postgresql during provision. Otherwise, in CircleCI it isn't running when we need it later. Travis must special-case starting it automatically.
@@ -301,6 +301,7 @@ def main(options): run(["sudo", "service", "rabbitmq-server", "restart"]) run(["sudo", "service", "redis-server", "restart"]) run(["sudo", "service", "memcached", "restart"]) + run(["sudo", "service", "postgresql", "restart"]) elif options.is_docker: run(["sudo", "service", "rabbitmq-server", "restart"]) run(["sudo", "pg_dropcluster", "--stop", POSTGRES_VERSION, "main"])
Expands \e (for 10^ exponent) macro in latex rendering. This is needed because Katex doesn't support custom commands, and in any case isn't setup to do so.
@@ -246,7 +246,7 @@ def latex_value(el, precision=6, polarprecision=3, p = s.split('e') if len(p) == 2: ex = str(int(p[1])) #exponent without extras (e.g. +04 => 4) - s = p[0] + "\\e{" + ex + "}" + s = p[0] + "\\times 10^{" + ex + "}" #Strip superfluous endings if "." in s:
Change board embed color Can't update color when the game is over anymore
@@ -11,7 +11,7 @@ from PIL import Image, ImageDraw, ImageFont from discord.ext import commands from bot.bot import Bot -from bot.constants import Colours, MODERATION_ROLES +from bot.constants import MODERATION_ROLES from bot.utils.decorators import with_role DECK = list(product(*[(0, 1, 2)]*4)) @@ -262,7 +262,7 @@ class DuckGamesDirector(commands.Cog): file = discord.File(fp=image_stream, filename="board.png") embed = discord.Embed( title="Duck Duck Duck Goose!", - color=Colours.bright_green, + color=discord.Color.dark_purple(), ) embed.set_image(url="attachment://board.png") return await ctx.send(embed=embed, file=file)
Fix the snapshots events integration test * When restoring a snapshot we also restart the rest service and nginx, which might lead to intermittent connection errors. So, I added the error type "ConnectionError" to the except clause.
@@ -17,6 +17,7 @@ import uuid import time import json from datetime import datetime +from requests.exceptions import ConnectionError from integration_tests import AgentlessTestCase from integration_tests.framework.postgresql import run_query from integration_tests.tests.utils import get_resource as resource @@ -182,7 +183,7 @@ class EventsTest(AgentlessTestCase): self, execution, message, timeout_seconds=60): """ It might take longer for events to show up in the DB than it takes for Execution status to return, this method waits until a specific - event is listed in the DB, and wil fail in case of a time out. + event is listed in the DB, and will fail in case of a time out. """ deadline = time.time() + timeout_seconds @@ -196,10 +197,12 @@ class EventsTest(AgentlessTestCase): ) ) # This might fail due to the fact that we're changing the DB in - # real time - it's OK. Just try again + # real time - it's OK. When restoring a snapshot we also restart + # the rest service and nginx, which might lead to intermittent + # connection errors. Just try again try: all_events = self.client.events.list(include_logs=True) - except CloudifyClientError: + except (CloudifyClientError, ConnectionError): pass
set domain_name to 'Unknown' instead of 'not set' for consistency
@@ -861,7 +861,7 @@ class IOSDriver(NetworkDriver): # default values. vendor = u'Cisco' uptime = -1 - serial_number, fqdn, os_version, hostname = (u'Unknown', u'Unknown', u'Unknown', u'Unknown') + serial_number, fqdn, os_version, hostname, domain_name = (u'Unknown',)*5 # obtain output from device show_ver = self._send_command('show version') @@ -890,7 +890,6 @@ class IOSDriver(NetworkDriver): _, os_version = line.split("IOS (tm) ") os_version = os_version.strip() - domain_name = 'not set' # Determine domain_name and fqdn for line in show_hosts.splitlines(): if 'Default domain' in line:
Remove warning from unused temp Follow up to (01d323abea91f8b75caffa452f57320b06a57e9e). That PR creates a temp that's only used in the limited API (and therefore causes warnings on all other paths)
@@ -3492,6 +3492,7 @@ class ModuleNode(Nodes.Node, Nodes.BlockNode): code.putln(code.error_goto_if_null(env.module_cname, self.pos)) code.putln("#endif") code.putln("#endif") # CYTHON_PEP489_MULTI_PHASE_INIT + code.putln("CYTHON_UNUSED_VAR(%s);" % module_temp) # only used in limited API code.putln( "%s = PyModule_GetDict(%s); %s" % (
extensions guide: method to implement is __call__ (Not forward().)
@@ -147,7 +147,7 @@ The learning rate will be dropped according to the curve below with :math:`{\rm if self._init is None: self._init = getattr(optimizer, self._attr) - def forward(self, trainer): + def __call__(self, trainer): self._t += 1 optimizer = trainer.updater.get_optimizer('main') @@ -174,4 +174,4 @@ This extension ``PolynomialShift`` takes five arguments. - ``batchsize``: The training mini-batchsize. - ``len_dataset``: The length of the dataset, i.e., the number of data in the training dataset. -This extension calculates the number of iterations which will be performed during training by using ``stop_trigger``, ``batchsize``, and ``len_dataset``, then stores it as a property ``_maxiter``. This property will be used in the :meth:`forward` method to update the learning rate. The :meth:`initialize` method obtains the initial learning rate from the optimizer given to the :class:`~chainer.training.Trainer` object. The :meth:`~chainer.traininig.serialize` method stores or recovers the properties, ``_t`` (number of iterations) and ``_last_value`` (the latest learning rate), belonging to this extension. +This extension calculates the number of iterations which will be performed during training by using ``stop_trigger``, ``batchsize``, and ``len_dataset``, then stores it as a property ``_maxiter``. This property will be used in the :meth:`__call__` method to update the learning rate. The :meth:`initialize` method obtains the initial learning rate from the optimizer given to the :class:`~chainer.training.Trainer` object. The :meth:`~chainer.traininig.serialize` method stores or recovers the properties, ``_t`` (number of iterations) and ``_last_value`` (the latest learning rate), belonging to this extension.
Fix connection unable to connect to old alias Fixes: milvus-io/milvus#16740
@@ -62,7 +62,7 @@ class Connections(metaclass=SingleInstanceMetaClass): """ self._kwargs = {"default": {"host": DefaultConfig.DEFAULT_HOST, "port": DefaultConfig.DEFAULT_PORT, - "user": "",}} + "user": ""}} self._conns = {} def add_connection(self, **kwargs): @@ -159,40 +159,34 @@ class Connections(metaclass=SingleInstanceMetaClass): tmp_port = tmp_kwargs.pop("port", None) gh = GrpcHandler(host=str(tmp_host), port=str(tmp_port), **tmp_kwargs) gh._wait_for_channel_ready() - return gh - if alias in self._conns: + kwargs.pop('password') + self._kwargs[alias] = copy.deepcopy(kwargs) + self._conns[alias] = gh + return + + def dup_alias_consist(cached_kw, current_kw) -> bool: + if "host" in current_kw and cached_kw[alias].get("host") != current_kw.get("host") or \ + ("port" in current_kw and cached_kw[alias].get("port") != current_kw.get("port")) or \ + ("user" in current_kw and cached_kw[alias].get("user") != current_kw.get("user")): + return False + return True + kwargs["user"] = user - if len(kwargs) > 0 and self._kwargs[alias] != kwargs: + if self.has_connection(alias): + if not dup_alias_consist(self._kwargs, kwargs): raise ConnectionConfigException(0, ExceptionsMessage.ConnDiffConf % alias) - # return self._conns[alias] return if alias in self._kwargs: - if len(kwargs) > 0: - if "host" not in kwargs or "port" not in kwargs: + if not dup_alias_consist(self._kwargs, kwargs): raise ConnectionConfigException(0, ExceptionsMessage.NoHostPort) - kwargs["user"] = user - conn = connect_milvus(**kwargs, password=password) - self._kwargs[alias] = copy.deepcopy(kwargs) - self._conns[alias] = conn - return - # return conn - conn = connect_milvus(**self._kwargs[alias], password=password) - self._conns[alias] = conn + connect_milvus(**kwargs, password=password) return - # return conn - if len(kwargs) > 0: if "host" not in kwargs or "port" not in kwargs: raise ConnectionConfigException(0, ExceptionsMessage.NoHostPort) - kwargs["user"] = user - conn = connect_milvus(**kwargs, password=password) - self._kwargs[alias] = copy.deepcopy(kwargs) - self._conns[alias] = conn - return - # return conn - raise ConnectionConfigException(0, ExceptionsMessage.ConnLackConf % alias) + connect_milvus(**kwargs, password=password) def list_connections(self) -> list: """ List names of all connections. @@ -233,15 +227,13 @@ class Connections(metaclass=SingleInstanceMetaClass): return self._kwargs.get(alias, {}) def has_connection(self, alias: str) -> bool: - """ - Retrieves connection configure by alias. + """ Check if connection named alias exists. :param alias: The name of milvus connection :type alias: str - :return dict: - The connection configure which of the name is alias. - If alias does not exist, return empty dict. + :return bool: + if the connection of name alias exists. :example: >>> from pymilvus import connections @@ -253,7 +245,6 @@ class Connections(metaclass=SingleInstanceMetaClass): """ if not isinstance(alias, str): raise ConnectionConfigException(0, ExceptionsMessage.AliasType % type(alias)) - return alias in self._conns def _fetch_handler(self, alias=DefaultConfig.DEFAULT_USING) -> GrpcHandler:
Always use current CCG membership Relates to
@@ -353,7 +353,7 @@ def create_bigquery_views(): sql = """ SELECT prescribing.sha AS sha, - prescribing.pct AS pct, + practices.ccg_id AS pct, prescribing.practice AS practice, COALESCE(bnf_map.current_bnf_code, prescribing.bnf_code) AS bnf_code, @@ -362,14 +362,16 @@ def create_bigquery_views(): prescribing.net_cost AS net_cost, prescribing.actual_cost AS actual_cost, prescribing.quantity AS quantity, - prescribing.month AS month, + prescribing.month AS month FROM ebmdatalab.hscic.prescribing AS prescribing LEFT JOIN ebmdatalab.hscic.bnf_map AS bnf_map ON bnf_map.former_bnf_code = prescribing.bnf_code - + INNER JOIN + ebmdatalab.hscic.practices AS practices + ON practices.code = prescribing.practice """ client = bigquery.client.Client(project='ebmdatalab') dataset = Dataset("hscic", client) @@ -388,6 +390,10 @@ def create_bigquery_views(): 'ebmdatalab.hscic.bnf_map', '[ebmdatalab:hscic.bnf_map]', ) + sql = sql.replace( + 'ebmdatalab.hscic.practices', + '[ebmdatalab:hscic.practices]', + ) table.view_query = sql table.view_use_legacy_sql = True try:
Use numeric limits to define TensorTypeSet(FULL) representation Summary: Pull Request resolved: This also removes an annoying warning about change of sign conversion Test Plan: Run unit tests
@@ -41,7 +41,7 @@ public: TensorTypeSet() : repr_(0) {} TensorTypeSet(Full) - : repr_(-1) {} + : repr_(std::numeric_limits<decltype(repr_)>::max()) {} // Public version of TensorTypeSet(uint64_t) API; external users // must be explicit when they do this! TensorTypeSet(Raw, uint64_t x)
zulip_ops: Add aptitude dependency. This is useful for `aptitude why`.
@@ -5,6 +5,8 @@ class zulip_ops::base { $org_base_packages = [# Management for our systems "openssh-server", "mosh", + # package management + "aptitude", # SSL Certificates "letsencrypt", # Monitoring
Update sg_document-structure.rst Link to handbook.
Document Structure ================== -Structure and organization are an important part of a document's ease of use and its understandability. Information should be organized and presented in a logical order, with similar subjects grouped together in the same section. - -In most cases, a document has a title, an introductory paragraph, and one or more sections. - -Try to keep only one topic in a page. Shorter topics are easier to reuse in other documents, are easier to write and edit, and are easier to translate. - -Document Title --------------- - -Use a title that accurately reflects the content of the document. People scan the table of contents looking for answers; it's often faster than using the built-in search engine. - -Use title case for document titles. For more information and an example of capitalization, see :ref:`capital`. - -Introductory Paragraph ----------------------- - -Each page should have an introduction that acts as a short description of the document. The short description should be a single paragraph of no more than three sentences. Keep in mind that the description is displayed in the search results along with the page title. People read the description to help them decide if the document is the one that they want. - -Table of Contents ------------------- - -If the document contains more than four sections or subsections, add a table of contents to help the user navigate the document, see :ref:`table-of-contents`. - -Document Sections ------------------ - -To make pages easier for people to quickly scan for the content that they're looking for, break your document up into logical sections. Each section should have a title, and the title should relate to the content of the section. Use title case for the section title. - -A section title is not required if you have only one section. +This page has moved to the `Mattermost handbook <https://handbook.mattermost.com/operations/operations/company-processes/publishing/publishing-guidelines/voice-tone-and-writing-style-guidelines/documentation-style-guide#document-structure>`_.
Update noaa-cdr-fundamental.yaml Updating contact information
@@ -6,8 +6,8 @@ Description: | Documentation: https://www.ncdc.noaa.gov/cdr Contact: | For questions regarding the specific CDR data holdings, please contact [email protected].<br /> - For any questions regarding data delivery not associated with this platform or any general questions regarding the NOAA Big Data Program, email [email protected]. - <br /> We also seek to identify case studies on how NOAA data is being used and will be featuring those stories in joint publications and in upcoming events. If you are interested in seeing your story highlighted, please share it with the NOAA BDP team here: [email protected] + For any questions regarding data delivery not associated with this platform or any general questions regarding the NOAA Open Data Dissemination Team (NODD) email us at [email protected]. + <br /> We also seek to identify case studies on how NOAA data is being used and will be featuring those stories in joint publications and in upcoming events. If you are interested in seeing your story highlighted, please share it with the NODD team here: [email protected] ManagedBy: "[NOAA](http://www.noaa.gov/)" UpdateFrequency: Climate Data Records are updated independently. For update frequency for a specific CDR, please refer to the Climate Data Record website. Collabs:
Update src/azure-cli/azure/cli/command_modules/sql/_help.py Fixed typo
@@ -518,7 +518,7 @@ examples: helps['sql midb list'] = """ type: command -short-summary: List maanged databases on a managed instance. +short-summary: List managed databases on a managed instance. examples: - name: List managed databases on a managed instance text: az sql midb list -g mygroup --mi myinstance
Modify the error message of capsule failed Closes-Bug:
@@ -170,7 +170,7 @@ class API(object): host_state = self._schedule_container(context, new_capsule, extra_spec) except Exception as exc: - new_capsule.status = consts.ERROR + new_capsule.status = consts.FAILED new_capsule.status_reason = str(exc) new_capsule.save(context) return
DOCS: Add an example of admonition with warning style I didn't find this useful tip (change the class of an admonition) in the documentation. See and for related but different things.
@@ -560,6 +560,22 @@ must also include a table title. See example above. - ```{important} This is an example of an important directive. ``` +* - ````md + ```{admonition} Title + :class: warning + text + ``` + ```` + - ````md + ```{admonition} This is a title + :class: warning + An example of an admonition with a title and a warning style. + ``` + ```` + - ```{admonition} This is a title + :class: warning + An example of an admonition with a title and a warning style. + ``` `````` ## Figures and images
Update relatedUserSelectorViewTests.es6.js to use the new avatar rendering mechanism. This test was not updated to use the new avatar rendering support recently added to Djblets, which was causing this test to fail. This patch updates the test. Testing Done: This test was run locally, and now passes. Reviewed at
@@ -20,15 +20,30 @@ suite('rb/admin/views/relatedUserSelectorView', function() { it('with inital options', function() { let view = new RB.RelatedUserSelectorView({ $input: $('<input id="id_people" type="hidden">'), - initialOptions: [{"username": "admin", "fullname": - "Admin User", "id": 1, - "avatarURL": "https://secure.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\\u0026d=mm"}, - {"username": "doc", "fullname": "Doc Dwarf", "id": 2, - "avatarURL": "https://secure.gravatar.com/avatar/b0f1ae4342591db2695fb11313114b3e?s=40\\u0026d=mm"}, - {"username": "dopey", "fullname": "Dopey Dwarf", "id": 3, - "avatarURL": "https://secure.gravatar.com/avatar/1a0098e6600792ea4f714aa205bf3f2b?s=40\\u0026d=mm"}], + initialOptions: [{ + username: 'admin', + fullname: 'Admin User', + id: 1, + avatarHTML: { + '20': '<div class="avatar" alt="Admin User"></div>', + }, + }, { + username: 'doc', + fullname: "Doc Dwarf", + id: 2, + avatarHTML: { + '20': '<div class="avatar" alt="Doc Dwarf"></div>', + }, + }, { + username: 'dopey', + fullname: 'Dopey Dwarf', + id: 3, + avatarHTML: { + '20': '<div class="avatar" alt="Dopey Dwarf"></div>', + }, + }], useAvatars: true, - multivalued: true + multivalued: true, }); view.render(); expect(view.options.useAvatars).toBe(true); @@ -61,15 +76,19 @@ suite('rb/admin/views/relatedUserSelectorView', function() { dropdown */ spyOn(view, 'loadOptions').and.callFake(function(query, callback) { callback([{ - avatarURL: "https://secure.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=40\\u0026d=mm", - fullname: "Admin User", + avatarHTML: { + '20': '<div class="avatar" alt="Admin User"></div>', + }, + fullname: 'Admin User', id: 1, - username: "admin", + username: 'admin', }, { - avatarURL: "https://secure.gravatar.com/avatar/b0f1ae4342591db2695fb11313114b3e?s=40\\u0026d=mm", - fullname: "Doc Dwarf", + avatarHTML: { + '20': '<div class="avatar" alt="Doc Dwarf"></div>', + }, + fullname: 'Doc Dwarf', id: 2, - username: "doc", + username: 'doc', }]); });
Handles out of range pagination token datetime Check that "after_datetime" token used for pagination is smaller than the given starting datetime when doing a lookup
@@ -335,6 +335,8 @@ class DocumentLogsModel(SharedModel, ActionLogsDataInterface, ElasticsearchLogsM after_random_id = page_token["random_id"] if after_datetime is not None: + if after_datetime < start_datetime: + return LogEntriesPage([], None) end_datetime = min(end_datetime, after_datetime) all_logs = []
add expires docs to set_cookie. update set_cookie docstring to explain what expires will accept and behave properly, without having to go track down httputil.format_timestamp, which is not exposed in the docs(that I could find).
@@ -543,6 +543,10 @@ class RequestHandler(object): Newly-set cookies are not immediately visible via `get_cookie`; they are not present until the next request. + expires may be a numeric timestamp as returned by `time.time`, + a time tuple as returned by `time.gmtime`, or a + `datetime.datetime` object. + Additional keyword arguments are set on the cookies.Morsel directly. See https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel
Update Docs for building PyTorch for Android. Summary: Pull Request resolved:
@@ -49,6 +49,7 @@ For this you can use `./scripts/build_pytorch_android.sh` script. ``` git clone https://github.com/pytorch/pytorch.git cd pytorch +git submodule update --init --recursive sh ./scripts/build_pytorch_android.sh ```
Update sets.py Update helper text to maintain consistency of constant names between docs and sets_categories_data.py
@@ -99,12 +99,12 @@ def singleton_ingredients(dishes, intersection): """Determine which `dishes` have a singleton ingredient (an ingredient that only appears once across dishes). :param dishes: list - of ingredient sets. - :param intersection: constant - can be one of `<CATEGORY>_INTERSECTION` constants imported from `sets_categories_data.py`. + :param intersection: constant - can be one of `<CATEGORY>_INTERSECTIONS` constants imported from `sets_categories_data.py`. :return: set - containing singleton ingredients. Each dish is represented by a `set` of its ingredients. - Each `<CATEGORY>_INTERSECTION` is an `intersection` of all dishes in the category. `<CATEGORY>` can be any one of: + Each `<CATEGORY>_INTERSECTIONS` is an `intersection` of all dishes in the category. `<CATEGORY>` can be any one of: (VEGAN, VEGETARIAN, PALEO, KETO, or OMNIVORE). The function should return a `set` of ingredients that only appear in a single dish.
Eliminated "print >>" for python3 Should be backward-compatible.
@@ -26,6 +26,8 @@ import gffutils from gffutils.iterators import DataIterator import tempfile +from __future__ import print_function + SEQ_DIR = "seq" RNASEQ_DIR = "rnaseq" SRNASEQ_DIR = "srnaseq" @@ -76,7 +78,7 @@ def _output_gff3(gff3_file, out_file, dialect): attr = {"transcript_id": transcript_id, "gene_id": gene_id} attributes = gffutils.attributes.Attributes(attr) feature.attributes = attributes - print >> out_handle, feature + print(file=out_handle, end="", feature) def _output_ncbi_gff3(gff3_file, out_file, dialect): gene_key = "gene" @@ -103,7 +105,7 @@ def _output_ncbi_gff3(gff3_file, out_file, dialect): "gene_biotype": biotype} attributes = gffutils.attributes.Attributes(attr) feature.attributes = attributes - print >> out_handle, feature + print(file=out_handle, end="", feature) def _is_from_ncbi(gff3_file): with open(gff3_file) as in_handle:
confirguration/core: Add parameter default to MetaConfig Uses os.eviron as the default environment for creation of a MetaConfiguration object, this allows an instance to be created without providing any parameters and fixes re-creating an instance from pod.
@@ -626,7 +626,7 @@ class MetaConfiguration(Configuration): def user_config_file(self): return os.path.join(self.user_directory, 'config.yaml') - def __init__(self, environ): + def __init__(self, environ=os.environ): super(MetaConfiguration, self).__init__() user_directory = environ.pop('WA_USER_DIRECTORY', '') if user_directory:
use WatchedFileHandler instead of RotatingFileHandler this guarantees that the log file will be recreated after it has been deleted
@@ -40,7 +40,7 @@ config_name = os.getenv("MAESTRAL_CONFIG", "maestral") log_file = get_log_path("maestral", config_name + ".log") log_fmt = logging.Formatter(fmt="%(asctime)s %(name)s %(levelname)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") -rfh = logging.handlers.RotatingFileHandler(log_file, maxBytes=10**5, backupCount=3) +rfh = logging.handlers.WatchedFileHandler(log_file) rfh.setFormatter(log_fmt) rfh.setLevel(CONF.get("app", "log_level_file"))
[Jira] document parameter usage for jira server jira server usage for assign_issue account_id parameter
@@ -1099,7 +1099,8 @@ class Jira(AtlassianRestAPI): """Assign an issue to a user. None will set it to unassigned. -1 will set it to Automatic. :param issue: the issue ID or key to assign :type issue: int or str - :param account_id: the account ID of the user to assign the issue to + :param account_id: the account ID of the user to assign the issue to; + for jira server the value for account_id should be a valid jira username :type account_id: str :rtype: bool """
Issue 47: Automatic deployment on PyPI This configures an automated deployment on PyPI. The password is encrypted, such that only Travis CI can decrypt it. Automatic deployment will only be done for tags.
@@ -14,3 +14,10 @@ script: - mpirun pytest --cov=heat after_success: - codecov +deploy: + provider: pypi + user: bhagemeier + password: + secure: jYXgTpuqLjDB4ScljLTeY1LOsc1XeAdzPcG7+WPkDI14ucTvG4szsaxP3RbQXtStGNJYLhENKD9XUlEJOXnWvAsSR/Pk5GRGNGJaD+0G5NB8HwQZvcpAieQz0lfyaanpohFUH3ttowXDMGRz/BaqAYC2KzxIVAib/34WmvG3lR8UpINsRxighYeHhrkapez+44K8LrsBcTI/mEFDLBE44hgg9oDbkg/hs63LGnhM/6Jh4PP4oPatqTVzbDs1rhC/VegAiJC6rRfpXeoEa2kTCR1FckXS+WUrT4CmRgnXyIqnBoaYzT4ep9swd5P07+va8DKEdIXQtaPJovfVJ8cCU/xxLyTtAVAurT4VZWQQRtHPysfuYgEINoaocvbApJl581yR+Q7N9RXWyFwmF0k0kj+eIDtwv1mu5/uPU5VpJVTxxpsMnxKcAOSBHQbDhEEU6S/exK7FbOh3KoEo5Is61+rCwPA2PZK1OYa3kPWHG3kPE83rquv1sP1EmMMrsv03M2fV8KxkNYEZu1dcyTW0+xMqYezUWByClYTAGMqF1KKpkbMbnSAHu7SVJVVdU5w1zMd2wLAJIF/bjtF33ZzRzKfd4ibNtdi8dv6ADl55SoDuEKhirWZdNI9yYQHd3g+2FlgHSxcZfkAoQzuT1S+Rg5E06Fv8v4Fr7Et4EXorP7g= + on: + tags: true
Scroll to top of registration to show validation errors # Conflicts: # contentcuration/contentcuration/frontend/accounts/pages/Create.vue
:lazy-src="require('shared/images/kolibri-logo.svg')" :src="require('shared/images/kolibri-logo.svg')" /> - <h2 class="text-xs-center mb-4 primary--text"> + <h2 ref="top" class="text-xs-center mb-4 primary--text"> {{ $tr('createAnAccountTitle') }} </h2> <VLayout justify-center class="px-4">
Do not show the same photo multiple times This happened on person albums
@@ -367,7 +367,8 @@ class AlbumDateViewSet(viewsets.ModelViewSet): "hidden", "exif_timestamp", "owner", - ), + ) + .distinct(), ), Prefetch( "photos__owner", @@ -433,7 +434,7 @@ class AlbumDateListViewSet(viewsets.ModelViewSet): & Q(photos__faces__person__id=self.request.query_params.get("person")) ) qs = ( - qs.annotate(photo_count=Count("photos")) + qs.annotate(photo_count=Count("photos", distinct=True)) .filter(Q(photo_count__gt=0)) .order_by(F("date").desc(nulls_last=True)) )
SDK - Make it easier to compile and submit a pipeline run * SDK - Make it easier to compile and submit a pipeline run Adds the `Client.run_pipeline_func_on_kfp(func)` function that compiles the pipeline and submits it for execution on KFP-enabled cluster. * Renamed run_pipeline_func_on_kfp to create_run_from_pipeline_func
@@ -18,9 +18,11 @@ import logging import json import os import tarfile +import tempfile import zipfile import yaml from datetime import datetime +from typing import Mapping, Callable import kfp_server_api @@ -255,6 +257,43 @@ class Client(object): IPython.display.display(IPython.display.HTML(html)) return response.run + def create_run_from_pipeline_func(self, pipeline_func: Callable, arguments: Mapping[str, str], run_name=None, experiment_name=None): + '''Runs pipeline on KFP-enabled Kubernetes cluster. + This command compiles the pipeline function, creates or gets an experiment and submits the pipeline for execution. + + Args: + pipeline_func: A function that describes a pipeline by calling components and composing them into execution graph. + arguments: Arguments to the pipeline function provided as a dict. + run_name: Optional. Name of the run to be shown in the UI. + experiment_name: Optional. Name of the experiment to add the run to. + ''' + + class RunPipelineResult: + def __init__(self, client, run_info): + self._client = client + self.run_info = run_info + self.run_id = run_info.id + + def wait_for_run_completion(self, timeout=None): + timeout = timeout or datetime.datetime.max - datetime.datetime.min + return self._client.wait_for_run_completion(timeout) + + def __str__(self): + return '<RunPipelineResult(run_id={})>'.format(self.run_id) + + #TODO: Check arguments against the pipeline function + pipeline_name = pipeline_func.__name__ + experiment_name = experiment_name or 'Default' + run_name = run_name or pipeline_name + ' ' + datetime.now().strftime('%Y-%m-%d %H-%M-%S') + experiment = self.create_experiment(name=experiment_name) + try: + (_, pipeline_package_path) = tempfile.mkstemp(suffix='.zip') + kfp.compiler.Compiler().compile(pipeline_func, pipeline_package_path) + run_info = self.run_pipeline(experiment.id, run_name, pipeline_package_path, arguments) + return RunPipelineResult(self, run_info) + finally: + os.remove(pipeline_package_path) + def list_runs(self, page_token='', page_size=10, sort_by='', experiment_id=None): """List runs. Args:
Use a lock to serialise checkpointing. Prior to this, parsl/tests/test_checkpointing/test_regression_233.py was failing on benc's laptop fairly regularly - within 10 iterations of while pytest parsl/tests/test_checkpointing/test_regression_233.py; do echo TICK ; done
@@ -81,6 +81,8 @@ class DataFlowKernel(object): logger.info("Parsl version: {}".format(get_version())) logger.info("Libsubmit version: {}".format(libsubmit.__version__)) + self.checkpoint_lock = threading.Lock() + # Update config with defaults self._config = update_config(config, self.rundir) @@ -613,7 +615,7 @@ class DataFlowKernel(object): By default the checkpoints are written to the RUNDIR of the current run under RUNDIR/checkpoints/{tasks.pkl, dfk.pkl} """ - + with self.checkpoint_lock: checkpoint_queue = None if tasks: checkpoint_queue = tasks @@ -643,7 +645,6 @@ class DataFlowKernel(object): for task_id in checkpoint_queue: if not self.tasks[task_id]['checkpoint'] and \ self.tasks[task_id]['status'] == States.done: - hashsum = self.tasks[task_id]['hashsum'] if not hashsum: continue
Update generic.txt > ta569.txt
@@ -17214,11 +17214,6 @@ http://52.221.14.194 http://54.254.144.12 php.ooo -# Reference: https://twitter.com/malware_traffic/status/1574848307519754242 -# Reference: https://github.com/brad-duncan/IOCs/blob/main/2022-09-27-TA569-Soc-Gholish-IOCs.txt - -dotimewat.com - # Reference: https://www.virustotal.com/gui/file/a02a0d8e8fd14382339abf67a9e015849d8479ad58e82f9621b3d08ab287fb2e/detection mrlee.eu.org
Rewrite chunks of Set Dropdown Value so it will work on firefox. Apparently, SeleniumLibrary's scroll_element_into_view doesn't seem to work for elements in a scrollable div on Firefox. Also, the use of ActionChains seems unnecessary, so I remove them.
@@ -10,8 +10,8 @@ file. """ import re -from selenium.webdriver.common.action_chains import ActionChains from SeleniumLibrary.errors import ElementNotFound +from selenium.common.exceptions import NoSuchElementException from cumulusci.robotframework.pageobjects import pageobject from cumulusci.robotframework.pageobjects import BasePage from cumulusci.robotframework.utils import capture_screenshot_on_error @@ -151,22 +151,27 @@ class ModalMixin: if input_element is None: raise Exception(f"No form element found with the label '{label}'") - self.selenium.scroll_element_into_view(input_element) + # SeleniumLibrary's scroll_element_into_view doesn't seem to work + # for elements in a scrollable div on Firefox. Javascript seems to + # work though. + self.selenium.driver.execute_script( + "arguments[0].scrollIntoView()", input_element + ) trigger = input_element.find_element_by_class_name("uiPopupTrigger") - ac = ActionChains(self.selenium.driver) - ac.move_to_element(trigger).click().perform() + trigger.click() + + try: popup_locator = ( "//div[contains(@class, 'uiPopupTarget')][contains(@class, 'visible')]" ) - try: popup = self.selenium.get_webelement(popup_locator) except ElementNotFound: raise ElementNotFound("Timed out waiting for the popup menu") + try: value_element = popup.find_element_by_link_text(value) - ac = ActionChains(self.selenium.driver) - ac.move_to_element(value_element).click().perform() - except Exception: + value_element.click() + except NoSuchElementException: raise Exception(f"Dropdown value '{value}' not found") @capture_screenshot_on_error
streamalert - rules.rst - update logs req streamalert - rules.rst - update logs req
@@ -90,7 +90,7 @@ Parameter Details logs ~~~~~~~~~~~ -``logs`` define the log sources the rule supports; The ``def`` function block is not run unless this condition is satisfied. +``logs`` define the log sources the rule supports; The ``def`` function block is not run unless this condition is satisfied. Your rule(s) must define at least one log source. A rule can be run against multiple log sources if desired.
added default version this fix case when there is no existing version published yet
@@ -1024,6 +1024,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): version = pype.api.get_latest_version(asset, subset) if version: version = int(version["name"]) + 1 + else: + version = 1 template_data["subset"] = subset template_data["family"] = "render"
Remove /sr.* pattern from test /sr.* pattern should raise exception
@@ -1184,7 +1184,7 @@ class FilemodLineTests(TestCase, LoaderModuleMockMixin): cfg_content = '- /srv/custom' isfile_mock = MagicMock(side_effect=lambda x: True if x == name else DEFAULT) - for before_line in ['/srv/salt', '/srv/sa.*t', '/sr.*']: + for before_line in ['/srv/salt', '/srv/sa.*t']: with patch('os.path.isfile', isfile_mock), \ patch('os.stat', MagicMock(return_value=DummyStat())), \ patch('salt.utils.files.fopen',
timestamp error catch Added error catch for constructing timestamps when reading Madrigal HDF5 files
@@ -98,6 +98,12 @@ def load(fnames, tag=None, sat_id=None, xarray_coords=[]): # lowercase variable names data.columns = [item.lower() for item in data.columns] # datetime index from times + time_keys = np.array(['year', 'month', 'day', 'hour', 'min', 'sec']) + if not np.all([key in data.columns for key in time_keys]): + time_keys = [key for key in time_keys if key not in data.columns] + raise ValueError("unable to construct time index, missing " + + "{:}".format(time_keys)) + time = pysat.utils.create_datetime_index(year=data.loc[:,'year'], month=data.loc[:,'month'], day=data.loc[:,'day'],
Add instruction to anaconda prompt Add instruction to use Anaconda prompt on Windows.
@@ -20,7 +20,7 @@ Ross's code was tested in Python 3.6 and higher. Install ross ------------ -You can install the latest release version with:: +Using the terminal (or the Anaconda prompt if on Windows) you can install the latest release version with:: pip install ross-rotordynamics
CI: use docker image cimg/python, not circleci/python The circleci/python images appear to be outdated and do not support python3.11.
@@ -11,9 +11,10 @@ commands: - run: name: "Preparing environment" command: | + python --version # check that python is installed sudo apt-get update sudo apt-get install -y openjdk-11-jre - sudo pip install nox + pip install nox - run: name: "Testing OmegaConf" command: | @@ -26,7 +27,7 @@ jobs: py_version: type: string docker: - - image: circleci/python:<< parameters.py_version >> + - image: cimg/python:<< parameters.py_version >> steps: - linux: py_version: << parameters.py_version >>
Windows: When using MinGW require it to be no older than CondaCC * That one is fairly old, and older will only cause issues we do not want to deal with.
@@ -727,6 +727,15 @@ The gcc compiler %s (version %s) doesn't have the sufficient \ version (>= 4.4).""" % (the_compiler, gcc_version) ) + # CondaCC or newer. + if mingw_mode and gcc_version < "5.3": + sys.exit( + """\ +The MinGW64 compiler %s (version %s) doesn't have the sufficient \ +version (>= 5.3).""" % (the_compiler, gcc_version) + ) + + # Older g++ complains about aliasing with Py_True and Py_False, but we don't # care. if gcc_version < "4.5":
Don't inadvertently copy over a bunch of foreign keys Manually specify which fields to copy, handle the known foreign key objects explicitly.
@@ -168,12 +168,18 @@ class Command(BaseCommand): location_types_map[old_id] = new_id # MPTT sorts this queryset so we can just save in the same order - new_loc_ids_by_code = {} + new_loc_pks_by_code = {} for loc in SQLLocation.active_objects.filter(domain=self.existing_domain): - loc.location_id = '' - loc.parent_id = new_loc_ids_by_code[loc.parent.site_code] if loc.parent_id else None - _, new_id = self.save_sql_copy(loc, self.new_domain) - new_loc_ids_by_code[loc.site_code] = new_id + # start with a new location so we don't inadvertently copy over a bunch of foreign keys + new_loc = SQLLocation() + for field in ["name", "site_code", "external_id", "metadata", + "is_archived", "latitude", "longitude"]: + setattr(new_loc, field, getattr(loc, field, None)) + new_loc.domain = self.new_domain + new_loc.parent_id = new_loc_pks_by_code[loc.parent.site_code] if loc.parent_id else None + new_loc.location_type_id = location_types_map[loc.location_type_id] + _, new_pk = self.save_sql_copy(new_loc, self.new_domain) + new_loc_pks_by_code[new_loc.site_code] = new_pk existing_fixture_config = LocationFixtureConfiguration.for_domain(self.existing_domain) self.save_sql_copy(existing_fixture_config, self.new_domain)
BUGFIX - Fixes assertion error to none type If the call to ombi failed and returned 'None' this would cause an error when ARM later tries to add the 'background_url'
@@ -187,6 +187,7 @@ def call_omdb_api(title=None, year=None, imdb_id=None, plot="short"): try: title_info_json = urllib.request.urlopen(strurl).read() title_info = json.loads(title_info_json.decode()) + title_info['background_url'] = None app.logger.debug(f"omdb - {title_info}") except urllib.error.HTTPError as e: app.logger.debug(f"omdb call failed with error - {e}") @@ -740,7 +741,6 @@ def metadata_selector(func, query="", year="", imdb_id=""): return call_omdb_api(str(query), str(year)) elif func == "get_details": s = call_omdb_api(title=str(query), year=str(year), imdb_id=str(imdb_id), plot="full") - s['background_url'] = None return s app.logger.debug(cfg['METADATA_PROVIDER']) app.logger.debug("unknown provider - doing nothing, saying nothing. Getting Kryten")
Move date formatting into a function We no longer need to replace dashes with slashes in the date string as we don't support IE8 any longer and IE9 upwards can parse the dates as they are.
@@ -474,9 +474,7 @@ var analyseChart = { stepped: true, format: { to: function(val) { - var d = _this.globalOptions.allMonths[val]; - return Highcharts.dateFormat('%b \'%y', - new Date(d.replace(/-/g, '/'))); + return formatDate(_this.globalOptions.allMonths[val]); }, from: function(val) { return _this.globalOptions.allMonths.indexOf[val] + 1; @@ -503,9 +501,7 @@ var analyseChart = { stepped: true, format: { to: function(val) { - var d = _this.globalOptions.allMonths[val]; - return Highcharts.dateFormat('%b \'%y', - new Date(d.replace(/-/g, '/'))); + return formatDate(_this.globalOptions.allMonths[val]); }, from: function(val) { return _this.globalOptions.allMonths.indexOf[val] + 1; @@ -531,4 +527,9 @@ var analyseChart = { }, }; + +function formatDate(dateStr) { + return Highcharts.dateFormat("%b '%y", new Date(dateStr)); +} + module.exports = analyseChart;
bd_rate: Parse Mutliconfig presets correctly With new multiconfig options, we are parsing and storing them in new structure of folders, so we require to treat them differently.
@@ -377,6 +377,7 @@ try: print("Runs do not match.") sys.exit(1) task = info_data[0]["task"] + codec = info_data[0]["codec"] except FileNotFoundError: # no info.json, using bare directories print("Couldn't open", args.run[0]) @@ -403,17 +404,25 @@ if info_data and not args.overlap: if info_data: for video in videos: + run_a = args.run[0] + "/" + task + "/" + video + args.suffix + run_b = args.run[1] + "/" + task + "/" + video + args.suffix + if 'ctcPresets' in info_data[0].keys(): + if len(info_data[0]["ctcPresets"]) > 1: + run_a = args.run[0] + "/" + codec + "/" + task + "/" + video + args.suffix + if 'ctcPresets' in info_data[1].keys(): + if len(info_data[1]["ctcPresets"]) > 1: + run_b = args.run[1] + "/" + codec + "/" + task + "/" + video + args.suffix if args.overlap: metric_data[video] = bdrate( - args.run[0] + "/" + task + "/" + video + args.suffix, - args.run[1] + "/" + task + "/" + video + args.suffix, + run_a, + run_b, None, args.fullrange, ) else: metric_data[video] = bdrate( - args.run[0] + "/" + task + "/" + video + args.suffix, - args.run[1] + "/" + task + "/" + video + args.suffix, + run_a, + run_b, args.anchordir[0] + "/" + sets[task]["anchor"]
UserGuide.yml trivial typo contianer
containers are also purposefully isolated from the host system, so in order to run a tool inside a Docker container there is additional work to ensure that input files are available inside the container and output - files can be recovered from the contianer. CWL can perform this work + files can be recovered from the container. CWL can perform this work automatically, allowing you to use Docker to simplify your software management while avoiding the complexity of invoking and managing Docker containers.
feat: Run build command for all apps After `bench build` is run, it will check `package.json` of every installed app run `yarn build` if a build script exists.
@@ -2,6 +2,7 @@ const fs = require('fs'); const path = require('path'); const chalk = require('chalk'); const rollup = require('rollup'); +const { execSync } = require('child_process'); const log = console.log; // eslint-disable-line const { @@ -26,18 +27,25 @@ create_build_file(); if (build_for_app) { build_assets_for_app(build_for_app) + .then(() => { + run_build_command_for_app(build_for_app); + }) } else { - build_assets_for_all_apps(); + build_assets_for_all_apps() + .then(() => { + run_build_command_for_apps() + }); } + function build_assets_for_all_apps() { - run_serially( + return run_serially( apps_list.map(app => () => build_assets(app)) ); } function build_assets_for_app(app) { - build_assets(app) + return build_assets(app) } function build_assets(app) { @@ -102,6 +110,26 @@ function create_build_file() { touch(path.join(sites_path, '.build'), { force: true }); } +function run_build_command_for_apps() { + let cwd = process.cwd(); + apps_list.map(app => run_build_command_for_app(app)) + process.chdir(cwd); +} + +function run_build_command_for_app(app) { + if (app === 'frappe') return; + let root_app_path = path.resolve(get_app_path(app), '..'); + let package_json = path.resolve(root_app_path, 'package.json'); + if (fs.existsSync(package_json)) { + let package = require(package_json); + if (package.scripts && package.scripts.build) { + console.log('\nRunning build command for', chalk.bold(app)); + process.chdir(root_app_path); + execSync('yarn build', { encoding: 'utf8', stdio: 'inherit' }); + } + } +} + function ensure_js_css_dirs() { const paths = [ path.resolve(assets_path, 'js'),
help-docs: Update accessing private messages information. Removes unnecessary mention of mac keyboard shortcut because the documentation updates when a mac keyboard is detected. Also, removes reference to accessing group private messages from the right sidbar.
@@ -34,11 +34,11 @@ There are several ways to access an existing PM or group PM. * For recent conversations, click on **Private messages** near the top of the left sidebar. -* Click on any user or group in the right sidebar. -* Start typing a user's name in [search](/help/search-for-messages). You'll be - able to select PMs or group PMs with that user. -* Open the compose box, and enter in a (list of) users. Type `Ctrl + .` (or - `Cmd + .` on a mac) to open that conversation. +* Click on any user in the right sidebar. +* Start typing a user's name in [search](/help/search-for-messages). + You'll be able to select PMs or group PMs with that user. +* Open the compose box, and enter in a list of users on the **To:** + line. Type `Ctrl` + `.` to open that conversation. ## Related articles
`select_related` user profile when listing users This is used for the user avatar
@@ -98,7 +98,7 @@ def index(request, *args): else: ordering = 'name' - paginator = Paginator(users, per_page=20) + paginator = Paginator(users.select_related('wagtail_userprofile'), per_page=20) users = paginator.get_page(request.GET.get('p')) if request.is_ajax():
Fix, need to track fallback variable usage in classes too. * Functions with local classes that were using outside functions closure variables were not properly generating code for providing those values.
@@ -223,6 +223,8 @@ class VariableClosureLookupVisitorPhase1(VisitorNoopMixin): ), source_ref = node.source_ref ) + + variable.addVariableUser(provider) else: new_node = ExpressionLocalsVariableRef( locals_scope = provider.getFunctionLocalsScope(),
config: change tpm_hash_alg to SHA1 by default This ensures that Keylime works out of the box on older kernel versions.
@@ -115,12 +115,14 @@ max_retries = 10 # TPM2-specific options, allows customizing default algorithms to use. # Specify the default crypto algorithms to use with a TPM2 for this agent. +# On kernel versions less than 5.10 choose sha1 as hash algorithm, because +# otherwise IMA attestation will fail. # # Currently accepted values include: # - hashing: sha512, sha384, sha256 or sha1 # - encryption: ecc or rsa # - signing: rsassa, rsapss, ecdsa, ecdaa or ecschnorr -tpm_hash_alg = sha256 +tpm_hash_alg = sha1 tpm_encryption_alg = rsa tpm_signing_alg = rsassa
Add treshold for infinite scroll loading in chrome at 75% zoom the scroll can be a not integer. some time it can go to negative as well. using a treshold in stead of equality this could not happen
@@ -11,12 +11,15 @@ library.add( faCaretSquareUp ); +const BOTTOM_DISTANCE_TRESHOLD = 5; + /** * A scrollable container that after rendering an initial amount of items will * only render the rest of them (in chunks) if they are required, or in this * case, when the user scrolls to the bottom of the container. */ class InfiniteScroll extends Component { + constructor(props) { super(props); @@ -41,7 +44,9 @@ class InfiniteScroll extends Component { * @public */ static isContainerScrolledToTheBottom(el) { - return el.getScrollHeight() - el.getScrollTop() === el.getClientHeight(); + const distanceFromBottom = el.getScrollHeight() - + (el.getScrollTop() + el.getClientHeight()); + return distanceFromBottom < BOTTOM_DISTANCE_TRESHOLD; } /**
Improve compare_notebook And detect difference when final line return is missing
@@ -16,20 +16,25 @@ _BLANK_LINE = re.compile(r'^\s*$') def _multilines(obj): try: - return obj.splitlines() + lines = obj.splitlines() + return lines + [''] if obj.endswith('\n') else lines except AttributeError: # Remove the final blank space on Python 2.7 # return json.dumps(obj, indent=True, sort_keys=True).splitlines() return [line.rstrip() for line in json.dumps(obj, indent=True, sort_keys=True).splitlines()] -def compare(actual, expected): +def compare(actual, expected, return_diff=False): """Compare two strings, lists or dict-like objects""" if actual != expected: - raise AssertionError('\n' + '\n'.join(difflib.unified_diff( + diff = '\n'.join(difflib.unified_diff( _multilines(actual), _multilines(expected), - 'first', 'second', lineterm=''))) + 'first', 'second', lineterm='')) + if return_diff: + return diff + raise AssertionError('\n' + diff) + return '' def filtered_cell(cell, preserve_outputs, cell_metadata_filter): @@ -166,12 +171,11 @@ def compare_notebooks(notebook_actual, notebook_expected, fmt=None, allow_expect # 3. bis test entire cell content if not same_content(ref_cell.source, test_cell.source, allow_removed_final_blank_line): - try: - compare(ref_cell.source, test_cell.source) - except AssertionError as error: + if ref_cell.source != test_cell.source: if raise_on_first_difference: + diff = compare(ref_cell.source, test_cell.source, return_diff=True) raise NotebookDifference("Cell content differ on {} cell #{}: {}" - .format(test_cell.cell_type, i, str(error))) + .format(test_cell.cell_type, i, diff)) modified_cells.add(i) if not compare_outputs:
Added RTD to project_urls Added a link to RTD to project_urls so it's easier to find the documentation from the pypi site.
[metadata] name = jsonschema url = https://github.com/Julian/jsonschema +project_urls = + Docs = https://python-jsonschema.readthedocs.io/en/stable/ description = An implementation of JSON Schema validation for Python long_description = file: README.rst author = Julian Berman
Update plot_j1j2.py Correct plotting script
@@ -15,7 +15,7 @@ sigma = [] evar = [] evarsig = [] -data = json.load(open('test2.log')) +data = json.load(open('test.log')) for iteration in data["Output"]: iters.append(iteration["Iteration"]) energy.append(iteration["Energy"]["Mean"])