message
stringlengths
13
484
diff
stringlengths
38
4.63k
Improve the detect backup was created when parse log Task: Story:
import os +import sys + from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils -import sys topdir = os.path.normpath( os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir))
Refactored stackcreate fuction to return full testdata Takes in: taskcat_cfg taskcat cfg as ymal object test_list as list sprefix (special prefix) as string Returns: list_of_test - Each element in the list is a dict of test_names -- Each iten in dict contain data returned from create_stack 'StackID' and 'ResponseMetadata'
@@ -41,10 +41,11 @@ def main(): tcat_instance.stage_in_s3(taskcat_cfg) tcat_instance.validate_template(taskcat_cfg, test_list) tcat_instance.validate_parameters(taskcat_cfg, test_list) - stackinfo = tcat_instance.stackcreate(taskcat_cfg, test_list, 'tag') - tcat_instance.get_stackstatus(stackinfo, 5) - tcat_instance.createreport(test_list, stackinfo, 'taskcat-results.html') - tcat_instance.cleanup(stackinfo, 5) + testdata = tcat_instance.stackcreate(taskcat_cfg, test_list, 'tag') + tcat_instance.get_stackstatus(testdata, 5) + quit() + tcat_instance.createreport(test_list, testdata, 'taskcat-results.html') + tcat_instance.cleanup(testdata, 5) # --End
Update dynamic_domain.txt A little bit later I'll try to proceed these two lists: and (especially)
@@ -2555,6 +2555,15 @@ my-router.de my-gateway.de +# Reference: https://gist.github.com/neu5ron/8dd695d4cb26b6dcd997#gistcomment-2141306 (# spdyn.de domains) + +firewall-gateway.com +firewall-gateway.de +firewall-gateway.net +my-firewall.org +myfirewall.org +spdns.org + # Reference: https://www.virustotal.com/gui/domain/aidyn.net/relations aidyn.net
Fix build when included by another project; take 2 Summary: Only adding `include_directories` doesn't propagate to the including targets. Also use `target_include_directories` to do so. Closes
@@ -62,7 +62,8 @@ configure_file(config.h.in config.h) # Prepend include path so that generated config.h is picked up. # Note that it is included as "gloo/config.h" to add parent directory. -include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/..) +get_filename_component(PARENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR} DIRECTORY) +include_directories(BEFORE ${PARENT_BINARY_DIR}) add_library(gloo ${GLOO_STATIC_OR_SHARED} ${GLOO_SRCS}) target_link_libraries(gloo ${gloo_DEPENDENCY_LIBS}) @@ -78,8 +79,11 @@ if(USE_CUDA) endif() endif() -get_filename_component(PARENT_DIR ${CMAKE_CURRENT_SOURCE_DIR} DIRECTORY) -include_directories(${PARENT_DIR}) +# Add path to gloo/config.h so it is picked up by parent projects. +target_include_directories(gloo BEFORE PUBLIC ${PARENT_BINARY_DIR}) +if(USE_CUDA) + target_include_directories(gloo_cuda BEFORE PUBLIC ${PARENT_BINARY_DIR}) +endif() # Install if necessary. # If the Gloo build is included from another project's build, it may
fix(match_main_contract): fix match_main_contract interface fix match_main_contract interface
@@ -426,5 +426,5 @@ if __name__ == '__main__': stock_a_pe_df = stock_a_pe(symbol="kc") print(stock_a_pe_df) - stock_a_pe_df = stock_a_pe(symbol="000300.XSHG") + stock_a_pe_df = stock_a_pe(symbol="000016.XSHG") print(stock_a_pe_df)
implemented synching referenced files in workfile When workfile is synched, it checks for referenced files (added by Loader) and tries to sync them too.
from openpype.modules import ModulesManager from openpype.pipeline import load +:from openpype.lib.avalon_context import get_linked_ids_for_representations +from openpype.modules.sync_server.utils import SiteAlreadyPresentError class AddSyncSite(load.LoaderPlugin): - """Add sync site to representation""" + """Add sync site to representation + + If family of synced representation is 'workfile', it looks for all + representations which are referenced (loaded) in workfile with content of + 'inputLinks'. + It doesn't do any checks for site, most common use case is when artist is + downloading workfile to his local site, but it might be helpful when + artist is re-uploading broken representation on remote site also. + """ representations = ["*"] families = ["*"] @@ -12,21 +22,61 @@ class AddSyncSite(load.LoaderPlugin): icon = "download" color = "#999999" + _sync_server = None + + @property + def sync_server(self): + if not self._sync_server: + manager = ModulesManager() + self._sync_server = manager.modules_by_name["sync_server"] + + return self._sync_server + def load(self, context, name=None, namespace=None, data=None): self.log.info("Adding {} to representation: {}".format( data["site_name"], data["_id"])) - self.add_site_to_representation(data["project_name"], - data["_id"], - data["site_name"]) + family = context["representation"]["context"]["family"] + project_name = data["project_name"] + repre_id = data["_id"] + + add_ids = [repre_id] + if family == "workfile": + links = get_linked_ids_for_representations(project_name, + add_ids, + link_type="reference") + add_ids.extend(links) + + add_ids = set(add_ids) + self.log.info("Add to repre_ids {}".format(add_ids)) + is_main = True + for add_repre_id in add_ids: + self.add_site_to_representation(project_name, + add_repre_id, + data["site_name"], + is_main) + is_main = False + self.log.debug("Site added.") - @staticmethod - def add_site_to_representation(project_name, representation_id, site_name): - """Adds new site to representation_id, resets if exists""" - manager = ModulesManager() - sync_server = manager.modules_by_name["sync_server"] - sync_server.add_site(project_name, representation_id, site_name, - force=True) + def add_site_to_representation(self, project_name, representation_id, + site_name, is_main): + """Adds new site to representation_id, resets if exists + + Args: + project_name (str) + representation_id (ObjectId): + site_name (str) + is_main (bool): true for really downloaded, false for references, + force redownload main file always, for references only if + broken + """ + try: + self.sync_server.add_site(project_name, representation_id, + site_name, + force=is_main, + force_only_broken=not is_main) + except SiteAlreadyPresentError: + self.log.debug("Site present", exc_info=True) def filepath_from_context(self, context): """No real file loading"""
Downgrade master CI checks to nightly cadence. CI seems to be getting bogged down, re-running the full CI suite every time we push to master. This downgrades the health checks to a nightly cadence instead of after merging every PR.
name: Continuous Integration on: - # Trigger the workflow on push or pull request, - # but only for the master branch - push: - branches: - - master + schedule: + # Checks out master by default. + - cron: '0 0 * * *' pull_request: branches: - master
Fixes sentry-native crashpad compilation on Linux Without this patch sentry does not compile, ref.
@@ -24,6 +24,12 @@ sources: url: "https://github.com/getsentry/sentry-native/releases/download/0.2.6/sentry-native-0.2.6.zip" sha256: "0d93bd77f70a64f3681d4928dfca6b327374218a84d33ee31489114d8e4716c0" patches: + "0.4.12": + - patch_file: "patches/0.4.xx-CXX-14.patch" + base_path: "source_subfolder" + "0.4.11": + - patch_file: "patches/0.4.xx-CXX-14.patch" + base_path: "source_subfolder" "0.4.10": - patch_file: "patches/0.4.xx-CXX-14.patch" base_path: "source_subfolder"
ebuild.domain: force external repos to use their location as a repo_id Otherwise multiple repos can easily have the same repo_id, e.g. a copy of the gentoo tree from rsync and another copy of it as a git checkout.
@@ -531,7 +531,7 @@ class domain(config_domain): path = os.path.abspath(path) if not os.path.isdir(os.path.join(path, 'profiles')): raise TypeError('invalid repo: %r' % path) - repo_config = RepoConfig(path) + repo_config = RepoConfig(path, config_name=path) repo_obj = ebuild_repo.tree(config, repo_config) location = repo_obj.location self.repos_raw[location] = repo_obj
Update the link for reporting data to a database Link was killed in: And the new link was moved again in: As of today, the other two links on this line still appear to be correct.
@@ -54,7 +54,7 @@ Even though Locust primarily works with web sites/services, it can be used to te ## Hackable -Locust's code base is intentionally kept small and doesn't solve everything out of the box. Instead, we try to make it easy to adapt to any situation you may come across, using regular Python code. If you want to [send reporting data to that database & graphing system you like](https://github.com/SvenskaSpel/locust-plugins/blob/master/examples/timescale_listener_ex.py), [wrap calls to a REST API](https://github.com/SvenskaSpel/locust-plugins/blob/master/examples/rest_ex.py) to handle the particulars of your system or run a [totally custom load pattern](https://docs.locust.io/en/latest/custom-load-shape.html#custom-load-shape), there is nothing stopping you! +Locust's code base is intentionally kept small and doesn't solve everything out of the box. Instead, we try to make it easy to adapt to any situation you may come across, using regular Python code. If you want to [send reporting data to that database & graphing system you like](https://github.com/SvenskaSpel/locust-plugins/blob/master/locust_plugins/dashboards/README.md), [wrap calls to a REST API](https://github.com/SvenskaSpel/locust-plugins/blob/master/examples/rest_ex.py) to handle the particulars of your system or run a [totally custom load pattern](https://docs.locust.io/en/latest/custom-load-shape.html#custom-load-shape), there is nothing stopping you! ## Links
Minor code style fixes Make spacing after function definition consistent with rest of the codebase
@@ -191,6 +191,7 @@ class MLflowCallback(object): Args: study: Study to be tracked in MLflow. """ + # This sets the `tracking_uri` for MLflow. if self._tracking_uri is not None: mlflow.set_tracking_uri(self._tracking_uri) @@ -205,6 +206,7 @@ class MLflowCallback(object): trial: Trial to be tracked. study: Study to be tracked. """ + tags: Dict[str, str] = {} tags["number"] = str(trial.number) tags["datetime_start"] = str(trial.datetime_start) @@ -255,4 +257,5 @@ class MLflowCallback(object): Args: params: Trial params. """ + mlflow.log_params(params)
models: Change return type of get_human_admin_users to QuerySet. I would be accessing some methods of QuerySet in the subsequent commits. So marking this as Sequence results in mypy errors.
@@ -636,7 +636,7 @@ class Realm(models.Model): role__in=roles, ) - def get_human_billing_admin_users(self) -> Sequence["UserProfile"]: + def get_human_billing_admin_users(self) -> QuerySet: return UserProfile.objects.filter( Q(role=UserProfile.ROLE_REALM_OWNER) | Q(is_billing_admin=True), realm=self,
Add Wikibase Client extension requirement to APISite.unconnectedpages() This special page is only available on wikis that have this extension installed.
@@ -6543,6 +6543,7 @@ class APISite(BaseSite): return lrgen @deprecated_args(step=None) + @need_extension('Wikibase Client') def unconnected_pages(self, total=None): """Yield Page objects from Special:UnconnectedPages.
Update .readthedocs.yml set build python version 3.6.10 instead of default 3.6 (.12) for which pip currently recommends wrong numpy version
@@ -18,7 +18,7 @@ formats: [] # Optionally set the version of Python and requirements required to build your docs python: - version: 3.6 + version: 3.6.10 install: - requirements: docs/requirements.txt - method: setuptools
Clarify the installation process Changes: Explain the difference between pip and manual installation Clarify the installation steps Correct the command for manual installation
@@ -13,7 +13,7 @@ Requirements :lines: 25-27 :dedent: 4 -Depending on the processed files, it might require the **manual installation** of extra modules. +If you are planning to use file formats other than plain ``txt``, you will need to install additional **extra modules** to have the right interface. At the moment, those modules are: - |bioread|_, for AcqKnowledge (``.acq``) files. @@ -24,6 +24,9 @@ At the moment, those modules are: Linux and mac installation -------------------------- +The most convenient option is to use ``pip``, as it allows you to automatically download and install the package from PyPI repository and facilitates upgrading or uninstalling it. Since we use ``auto`` to publish the latest features as soon as they are ready, PyPI will always have the latest stable release of ``phys2bids`` as a package. + +Alternatively, you can get the package directly from GitHub but all download, installation and package handling steps will need to be done manually. Install with ``pip`` ^^^^^^^^^^^^^^^^^^^^ @@ -33,18 +36,21 @@ Install with ``pip`` If it is, you might need to use ``pip`` instead of ``pip3``, although some OSs do adopt ``pip3`` anyway. If you want to check, type ``python --version`` in a terminal. +If you don't need any of the extra modules listed at the beginning of this page go to **Install phys2bids alone**. Otherwise, follow the instructions under **Install phys2bids with extra modules**. + Install ``phys2bids`` alone ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Pypi has the latest stable release of ``phys2bids`` as a package. Since we use ``auto`` to publish the latest features as soon as they are ready, it will *really* be the latest release. Just run:: +To install ``phys2bids`` along with the basic required modules just run:: pip3 install phys2bids -Install extra modules -~~~~~~~~~~~~~~~~~~~~~ +You can now proceed to check your installation and start using ``phys2bids``! -If you are planning to use file formats other than plain ``txt``, you need to install extra modules to have the right interface. -Extra modules installation can be done with the syntax:: +Install ``phys2bids`` with extra modules +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The installation of ``phys2bids`` along with extra modules can be done with the syntax:: pip3 install phys2bids[<extra>] @@ -57,6 +63,8 @@ For instance, if you plan to install ``phys2bids`` and use all of the interfaces pip3 install phys2bids[interfaces] +You can now proceed to check your installation and start using ``phys2bids``! + .. note:: If you "missed" or skipped this trick when you installed ``phys2bids`` the first time, don't worry! You can do it any time - this will update ``phys2bids`` and install all extra modules you want. @@ -72,9 +80,9 @@ Alternatively, if you have ``git`` installed, use the command:: Open a terminal in the ``phys2bids`` folder and execute the command:: - python3 setup.py + python3 setup.py install -You might still need to install extra dependencies listed at the beginning of the page. +This should have installed ``phys2bids`` along with the basic required modules. If you need any of the extra modules listed at the beginning of the page you might need to install them manually. Otherwise, you can proceed to check your installation and start using ``phys2bids``. .. note:: If python 3 is already your default, you might use ``python`` rather than ``python3``
Modifies examples in staticmethods The CI's stores each saved rotor for each doctest, so I'm removing the saved rotor after every test * save() * load() * available_rotors() * remove()
@@ -2083,7 +2083,8 @@ class Rotor(object): Examples -------- >>> rotor = rotor_example() - >>> rotor.save('new_rotor.toml') + >>> rotor.save('new_rotor') + >>> Rotor.remove('new_rotor') """ main_path = os.path.dirname(ross.__file__) path = Path(main_path) @@ -2133,10 +2134,11 @@ class Rotor(object): Example ------- >>> rotor1 = rotor_example() - >>> rotor1.save('new_rotor.toml') - >>> rotor2 = Rotor.load('new_rotor.toml') + >>> rotor1.save('new_rotor1') + >>> rotor2 = Rotor.load('new_rotor1') >>> rotor1 == rotor2 True + >>> Rotor.remove('new_rotor1') """ main_path = os.path.dirname(ross.__file__) rotor_path = Path(main_path) / "rotors" / file_name @@ -2172,9 +2174,10 @@ class Rotor(object): Example ------- >>> rotor = rotor_example() - >>> rotor.save('new_rotor.toml') + >>> rotor.save('new_rotor2') >>> Rotor.available_rotors() - ['new_rotor.toml'] + ['Rotor_format', 'new_rotor2', 'Benchmarks'] + >>> Rotor.remove('new_rotor2') """ return [x for x in os.listdir(Path(os.path.dirname(ross.__file__)) / "rotors")] @@ -2189,15 +2192,13 @@ class Rotor(object): Example ------- - >>> rotor1 = rotor_example() - >>> rotor2 = rotor_example() - >>> rotor1.save('new_rotor1.toml') - >>> rotor2.save('new_rotor2.toml') + >>> rotor = rotor_example() + >>> rotor.save('new_rotor2') >>> Rotor.available_rotors() - ['new_rotor1.toml', 'new_rotor2.toml'] - >>> Rotor.remove('new_rotor2.toml') + ['Rotor_format', 'new_rotor2', 'Benchmarks'] + >>> Rotor.remove('new_rotor2') >>> Rotor.available_rotors() - ['new_rotor1.toml'] + ['Rotor_format', 'Benchmarks'] """ shutil.rmtree(Path(os.path.dirname(ross.__file__)) / "rotors" / rotor_name)
Fix for plugin setting not working: Always allow plugin API URLs
-"""JSON API for the plugin app.""" +"""API for the plugin app.""" -from django.conf import settings from django.urls import include, re_path from django_filters.rest_framework import DjangoFilterBackend @@ -238,7 +237,6 @@ general_plugin_api_urls = [ re_path(r'^.*$', PluginList.as_view(), name='api-plugin-list'), ] -if settings.PLUGINS_ENABLED: plugin_api_urls.append( re_path(r'^plugin/', include(general_plugin_api_urls)) )
Remove pip install paunch We now have python-paunch-1.1.1 [1] in the overcloud images so we do not need to pip install it any longer. [1]
@@ -139,10 +139,6 @@ resources: - name: Write kolla config json files copy: content="{{item.value|to_json}}" dest="{{item.key}}" force=yes with_dict: "{{kolla_config}}" - - name: Install paunch FIXME remove when packaged - shell: | - yum -y install python-pip - pip install paunch ######################################################## # Bootstrap tasks, only performed on bootstrap_server_id ########################################################
Changes an assert(is-real) statement to be more robust. Using numpy.isreal has a very low tolerance on imaginary values (maybe none?) and so the use of all(isreal(x)) has been removed in favor of isclose(norm(imag(x)),0). The former was giving false assertion errors when being used to depolarize a 3Q-GST Lindbladian gate.
@@ -2126,7 +2126,8 @@ class LindbladParameterizedGate(Gate): assert(self.hamGens.shape == (bsH-1,d2,d2)) if nonham_diagonal_only: assert(self.otherGens.shape == (bsO-1,d2,d2)) - assert(_np.all(_np.isreal(otherC))) + assert(_np.isclose(_np.linalg.norm(_np.imag(otherC)),0)) + #assert(_np.all(_np.isreal(otherC))) #sometimes fails even when imag to machine prec if cptp: #otherParams is a 1D vector of the sqrts of diagonal els otherC = otherC.clip(1e-16,1e100) #must be positive
util: use new location for needs_ssh Resolves: rm#39322
@@ -12,7 +12,7 @@ def can_connect_passwordless(hostname): denied`` message or a``Host key verification failed`` message. """ # Ensure we are not doing this for local hosts - if not remoto.connection.needs_ssh(hostname): + if not remoto.backends.needs_ssh(hostname): return True logger = logging.getLogger(hostname)
simplify isflipped logic This patch removes the 'fromdims > 0' check from the isflipped property method, which is superfluous as numpy correctly reports the determinant of a 0x0 matrix to be 1.
@@ -176,7 +176,7 @@ class Square(Matrix): @property def isflipped(self): - return self.fromdims > 0 and self.det < 0 + return bool(self.det < 0) @types.lru_cache def transform_poly(self, coeffs):
Update link to vcalendar-filter Original link is dead.
#!/usr/bin/awk -f -# mutt2khal is designed to be used in conjunction with vcalendar-filter (https://github.com/datamuc/mutt-filters/blob/master/vcalendar-filter) +# mutt2khal is designed to be used in conjunction with vcalendar-filter (https://github.com/terabyte/mutt-filters/blob/master/vcalendar-filter) # and was inspired by the work of Jason Ryan (https://bitbucket.org/jasonwryan/shiv/src/tip/Scripts/mutt2khal) # example muttrc: macro attach A "<pipe-message>vcalendar-filter | mutt2khal<enter>"
discover-features not protocol-discovery I changed protocol-discovery to discover-features. It is very confusing to refer to "protocol-discovery" as the identifier for the message family and then list "discover-features" in the example.
@@ -27,13 +27,13 @@ supported by one another's agents. They need a way to find out. This RFC introduces a protocol for discussing the protocols an agent can handle. The identifier for the message family used by this protocol is -`protocol-discovery`, and the fully qualified URI for its definition is: +`discover-features`, and the fully qualified URI for its definition is: did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/discover-features/1.0 ### Roles -There are two roles in the `protocol-discovery` protocol: `requester` and +There are two roles in the `discover-features` protocol: `requester` and `responder`. The requester asks the responder about the protocols it supports, and the responder answers. Each role uses a single message type. @@ -109,18 +109,18 @@ protocols that match your query." An agent might not tell another that it supports a protocol for various reasons, including: the trust that it imputes to the other party based on cumulative interactions so far, whether it's in the middle of upgrading a plugin, whether it's currently -under high load, and so forth. And responses to a `protocol-discovery` request are +under high load, and so forth. And responses to a `discover-features` request are not guaranteed to be true forever; agents can be upgraded or downgraded, although they probably won't churn in their protocol support from moment to moment. ### Privacy Considerations -Because the regex in a `request` message can be very inclusive, the `protocol-discovery` +Because the regex in a `request` message can be very inclusive, the `discover-features` protocol could be used to mine information suitable for agent fingerprinting, in much the same way that browser fingerprinting works. This is antithetical to the ethos of our ecosystem, and represents bad behavior. Agents should -use `protocol-discovery` to answer legitimate questions, and not to build detailed +use `discover-features` to answer legitimate questions, and not to build detailed profiles of one another. However, fingerprinting may be attempted anyway. @@ -182,10 +182,10 @@ As shown in the above `~l10n` decorator, all agents using this protocol have [a simple message catalog](catalog.json) in scope. This allows agents to send [`problem-report`s]( https://github.com/hyperledger/indy-hipe/blob/6a5e4fe2/text/error-handling/README.md#the-problem-report-message-type -) to complain about something related to `protocol-discovery` issues. +) to complain about something related to `discover-features` issues. The catalog looks like this: -[![error catalog for protocol-discovery protocol](catalog.png)](catalog.json) +[![error catalog for discover-features protocol](catalog.png)](catalog.json) For more information, see the [localization RFC](https://github.com/hyperledger/indy-hipe/blob/569357c6/text/localized-messages/README.md).
Implement boundary attack paper:
@@ -196,6 +196,63 @@ class LabelOnlyDecisionBoundary(MembershipInferenceAttack): self.distance_threshold_tau = distance_threshold_tau + def calibrate_distance_threshold_unsupervised( + self, top_t: int, num_samples: int, max_queries: int, **kwargs + ): + """ + Calibrate distance threshold on randomly generated samples, choosing the top-t percentile of the noise needed + to change the classifier's initial prediction. + + | Paper link: https://arxiv.org/abs/2007.15528 + + :param top_t: Top-t percentile. + :param num_samples: Number of random samples to generate. + :param max_queries: Maximum number of queries. + :Keyword Arguments for HopSkipJump: + * *norm*: Order of the norm. Possible values: "inf", np.inf or 2. + * *max_iter*: Maximum number of iterations. + * *max_eval*: Maximum number of evaluations for estimating gradient. + * *init_eval*: Initial number of evaluations for estimating gradient. + * *init_size*: Maximum number of trials for initial generation of adversarial examples. + * *verbose*: Show progress bars. + """ + from art.attacks.evasion.hop_skip_jump import HopSkipJump + + x_min, x_max = self.estimator.clip_values + + x_rand = np.random.rand(*(num_samples, ) + self.estimator.input_shape).astype(np.float32) + x_rand *= (x_max - x_min) # scale + x_rand += x_min # shift + + y_rand = np.random.randint(0, self.estimator.nb_classes, num_samples) + y_rand = check_and_transform_label_format(y_rand, self.estimator.nb_classes) + + hsj = HopSkipJump(classifier=self.estimator, targeted=False, **kwargs) + + distances = [] + + i = 0 + while len(x_rand) != 0 and i < max_queries: + x_adv = hsj.generate(x=x_rand, y=y_rand) + + distance = np.linalg.norm((x_adv - x_rand).reshape((x_rand.shape[0], -1)), ord=2, axis=1) + + y_pred = self.estimator.predict(x=x_adv) + + changed_predictions = np.argmax(y_pred, axis=1) != np.argmax(y_rand, axis=1) + + distances.extend(distance[changed_predictions]) + + x_rand, y_rand = x_adv[~changed_predictions], y_rand[~changed_predictions] + + i += 1 + + if len(distances) == 0: + raise RuntimeWarning("No successful adversarial examples were generated - no distances were obtained." + "Distance threshold will not be set.") + else: + self.distance_threshold_tau = np.percentile(distances, top_t) + def _check_params(self) -> None: if self.distance_threshold_tau is not None and ( not isinstance(self.distance_threshold_tau, (int, float)) or self.distance_threshold_tau <= 0.0
update __init__ file just add some commas this will avoid warnings like " 'events' is not declared in __all__ "
@@ -17,9 +17,9 @@ __all__ = ( "SequentialTaskSet", "wait_time", "task", "tag", "TaskSet", - "HttpUser", "User" - "between", "constant", "constant_pacing" - "events", + "HttpUser", "User", + "between", "constant", "constant_pacing", + "events" ) # Used for raising a DeprecationWarning if old Locust/HttpLocust is used
Add support for kubeflow 0.6 dashboard Requires an HTTPS connection, which means we have to use an ingress instead of just connecting to the pod itself.
@@ -58,15 +58,15 @@ def main(): password_overlay = { "applications": { - "ambassador-auth": {"options": {"password": password}}, "katib-db": {"options": {"root_password": get_random_pass()}}, + "kubeflow-gatekeeper": {"options": {"password": password}}, "modeldb-db": {"options": {"root_password": get_random_pass()}}, - "pipelines-db": {"options": {"root_password": get_random_pass()}}, "pipelines-api": {"options": {"minio-secret-key": "minio123"}}, + "pipelines-db": {"options": {"root_password": get_random_pass()}}, } } - for service in ["dns", "storage", "dashboard", "rbac", "juju"]: + for service in ["dns", "storage", "dashboard", "ingress", "rbac", "juju"]: print("Enabling %s..." % service) run("microk8s-enable.wrapper", service) @@ -90,7 +90,7 @@ def main(): print("Kubeflow deployed.") print("Waiting for operator pods to become ready.") - for _ in range(40): + for _ in range(80): status = json.loads(juju("status", "-m", "uk8s:kubeflow", "--format=json")) unready_apps = [ name @@ -119,20 +119,46 @@ def main(): ) juju("config", "ambassador", "juju-external-hostname=localhost") + juju("expose", "ambassador") + + # Workaround for https://bugs.launchpad.net/juju/+bug/1849725. + # Wait for up to a minute for Juju to finish setting up the Ingress + # so that we can patch it, and fail if it takes too long. + patch = json.dumps({ + 'kind': 'Ingress', + 'apiVersion': 'extensions/v1beta1', + 'metadata': {'name': 'ambassador', 'namespace': 'kubeflow'}, + 'spec': {'tls': [{'hosts': ['localhost'], 'secretName': 'ambassador-tls'}]}, + }).encode('utf-8') - status = json.loads(juju("status", "-m", "uk8s:kubeflow", "--format=json")) - ambassador_ip = status["applications"]["ambassador"]["address"] + env = os.environ.copy() + env["PATH"] += ":%s" % os.environ["SNAP"] + + for _ in range(12): + try: + subprocess.run( + ['microk8s-kubectl.wrapper', 'apply', '-f', '-'], + input=patch, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE, + env=env, + ).check_returncode() + break + except subprocess.CalledProcessError: + time.sleep(5) + else: + print("Couldn't set Ambassador up properly") + sys.exit(1) print( textwrap.dedent( """ Congratulations, Kubeflow is now available. - The dashboard is available at http://%s/ + The dashboard is available at https://localhost/ To tear down Kubeflow and associated infrastructure, run: microk8s.disable kubeflow """ - % ambassador_ip ) )
return and print infos return generated informations to the function and not just print out
@@ -170,13 +170,15 @@ class MeshInterface: def showInfo(self, file=sys.stdout): """Show human readable summary about this object""" - - print( - f"Owner: {self.getLongName()} ({self.getShortName()})", file=file) - print(f"\nMy info: {stripnl(MessageToJson(self.myInfo))}", file=file) - print("\nNodes in mesh:", file=file) + owner = f"Owner: {self.getLongName()} ({self.getShortName()})" + myinfo = f"\nMy info: {stripnl(MessageToJson(self.myInfo))}" + mesh = "\nNodes in mesh:" + nodes = "" for n in self.nodes.values(): - print(f" {stripnl(n)}", file=file) + nodes = nodes + f" {stripnl(n)}" + infos = owner + myinfo + mesh + nodes + print(infos) + return infos def showNodes(self, includeSelf=True, file=sys.stdout): """Show table summary of nodes in mesh""" @@ -227,8 +229,11 @@ class MeshInterface: for i, row in enumerate(rows): row['N'] = i+1 - print(tabulate(rows, headers='keys', missingval='N/A', - tablefmt='fancy_grid'), file=file) + table = tabulate(rows, headers='keys', missingval='N/A', + tablefmt='fancy_grid') + print(table) + return table + def getNode(self, nodeId): """Return a node object which contains device settings and channel info"""
Bump supported protoc version to 3.1.0. Review-Url:
@@ -22,7 +22,7 @@ THIS_DIR = os.path.dirname(os.path.abspath(__file__)) # Minimally required protoc version. MIN_SUPPORTED_PROTOC_VERSION = (3, 0, 0) # Maximally supported protoc version. -MAX_SUPPORTED_PROTOC_VERSION = (3, 0, 0) +MAX_SUPPORTED_PROTOC_VERSION = (3, 1, 0) # Printed if protoc is missing or too old.
Added ref parameter to the record_exchange def Tests should fail
@@ -98,7 +98,8 @@ def create_card_hold(db, participant, amount): log(msg + "succeeded.") else: log(msg + "failed: %s" % error) - record_exchange(db, route, amount, fee, participant, 'failed', error) + ref = result.transaction.id + record_exchange(db, route, amount, fee, participant, 'failed', error, ref) return hold, error @@ -118,14 +119,15 @@ def capture_card_hold(db, participant, amount, hold): cents, amount_str, charge_amount, fee = _prep_hit(amount) amount = charge_amount - fee # account for possible rounding - e_id = record_exchange(db, route, amount, fee, participant, 'pre') + ref = hold.id + e_id = record_exchange(db, route, amount, fee, participant, 'pre', ref) # TODO: Find a way to link transactions and corresponding exchanges # meta = dict(participant_id=participant.id, exchange_id=e_id) error = '' try: - result = braintree.Transaction.submit_for_settlement(hold.id, str(cents/100.00)) + result = braintree.Transaction.submit_for_settlement(ref, str(cents/100.00)) assert result.is_success if result.transaction.status != 'submitted_for_settlement': error = result.transaction.status @@ -228,7 +230,7 @@ def get_ready_payout_routes_by_network(db, network): return out -def record_exchange(db, route, amount, fee, participant, status, error=None): +def record_exchange(db, route, amount, fee, participant, status, error=None, ref): """Given a Bunch of Stuff, return an int (exchange_id). Records in the exchanges table have these characteristics: @@ -240,6 +242,8 @@ def record_exchange(db, route, amount, fee, participant, status, error=None): fee The payment processor's fee. It's always positive. + ref transaction id in the external system. + """ assert route.participant.id == participant.id @@ -248,10 +252,10 @@ def record_exchange(db, route, amount, fee, participant, status, error=None): exchange_id = cursor.one(""" INSERT INTO exchanges - (amount, fee, participant, status, route, note) - VALUES (%s, %s, %s, %s, %s, %s) + (amount, fee, participant, status, route, note, ref) + VALUES (%s, %s, %s, %s, %s, %s, %s) RETURNING id - """, (amount, fee, participant.username, status, route.id, error)) + """, (amount, fee, participant.username, status, route.id, error, ref)) if status == 'failed': propagate_exchange(cursor, participant, route, error, 0)
support reduced Basis mask test The basis test suite automatically tests validity of all combinations of masks. If the number of dofs is large, this takes too much time. This patch adds support for defining the masks to test via the `checkmasks` attribute, defaulting to all.
@@ -1238,7 +1238,8 @@ class CommonBasis: self.basis.get_support(numpy.array([[True]*self.checkndofs], dtype=bool)) def test_getitem_array(self): - for mask in itertools.product(*[[False, True]]*self.checkndofs): + checkmasks = getattr(self, 'checkmasks', itertools.product(*[[False, True]]*self.checkndofs)) + for mask in checkmasks: mask = numpy.array(mask, dtype=bool) indices, = numpy.where(mask) for value in mask, indices:
Update ncepgrib2.py Moved import of pygrib.gaulats to inside elif gdtnum == 40 since no other condition relies on gaulats
@@ -756,7 +756,6 @@ lat/lon values returned by grid method may be incorrect.""" @return: C{B{lats},B{lons}}, float32 numpy arrays containing latitudes and longitudes of grid (in degrees). """ - from pygrib import gaulats gdsinfo = self.grid_definition_info gdtnum = self.grid_definition_template_number gdtmpl = self.grid_definition_template @@ -779,6 +778,10 @@ lat/lon values returned by grid method may be incorrect.""" projparams['proj'] = 'cyl' lons,lats = np.meshgrid(lons,lats) # make 2-d arrays. elif gdtnum == 40: # gaussian grid (only works for global!) + try: + from pygrib import gaulats + except: + raise ImportError("pygrib required to compute Gaussian lats/lons") lon1, lat1 = self.longitude_first_gridpoint, self.latitude_first_gridpoint lon2, lat2 = self.longitude_last_gridpoint, self.latitude_last_gridpoint nlats = self.points_in_y_direction
Status: don't throw on missing services If postgres or rabbitmq isn't there, return empty instead of throwing an indexerror
@@ -69,12 +69,13 @@ class Status(SecuredResource): if not config.instance.postgresql_host.startswith(('localhost', '127.0.0.1')): for job in jobs: - if job['display_name'] == 'PostgreSQL': + if job['display_name'] == 'PostgreSQL' \ + and job['instances']: job['instances'][0]['state'] = 'remote' broker_state = 'running' if broker_is_healthy() else 'failed' for job in jobs: - if job['display_name'] == 'RabbitMQ': + if job['display_name'] == 'RabbitMQ' and job['instances']: job['instances'][0]['state'] = broker_state else: jobs = ['undefined']
[Nightly-test] promote single_node/decision_tree_autoscaling_20_runs to staging In this way, we can use these two tests to test anyscale staging release.
- name: decision_tree_autoscaling_20_runs group: core-multi-test working_dir: nightly_tests + env: staging legacy: test_name: decision_tree_autoscaling_20_runs test_suite: nightly_tests - name: single_node group: core-scalability-test working_dir: benchmarks + env: staging legacy: test_name: single_node test_suite: benchmark_tests
Update README.md added formatting fixes
@@ -76,10 +76,11 @@ sudo regionset /dev/sr0 ## Install **Setup 'arm' user and ubuntu basics:** -# Sets up graphics drivers, does Ubuntu update & Upgrade, gets Ubuntu to auto set up driver, and finally installs and setups up avahi-daemon + +Sets up graphics drivers, does Ubuntu update & Upgrade, gets Ubuntu to auto set up driver, and finally installs and setups up avahi-daemon ```bash sudo apt upgrade -y && sudo apt update -y -***optional: sudo add-apt-repository ppa:graphics-drivers/ppa +***optional (was not required for me): sudo add-apt-repository ppa:graphics-drivers/ppa sudo apt install avahi-daemon -y && sudo systemctl restart avahi-daemon sudo apt install ubuntu-drivers-common -y && sudo ubuntu-drivers install sudo reboot @@ -170,15 +171,21 @@ Optionally if you want something more stable than master you can download the la **Email notifcations** -- A lot of random problems are found in the sysmail, email alerting is a most effective method for debugging and monitoring. -- I recommend you install postfix from the instructions below: - http://mhawthorne.net/posts/2011-postfix-configuring-gmail-as-relay/ -- Then configure /etc/aliases +A lot of random problems are found in the sysmail, email alerting is a most effective method for debugging and monitoring. + +I recommend you install postfix from here:http://mhawthorne.net/posts/2011-postfix-configuring-gmail-as-relay/ + +Then configure /etc/aliases e.g.: + + ``` root: [email protected] arm: [email protected] userAccount: [email protected] -- Run below to pick up the aliases + ``` + +Run below to pick up the aliases + ``` sudo newaliases ```
cppunparse: Fix writing `_Constant`/`_Bytes` with the wrong string quotes fixes issue
@@ -544,19 +544,20 @@ class CPPUnparser: self.dispatch(t.body) self.leave() - def _write_constant(self, value): + def _write_constant(self, value, infer_type=False): + result = repr(value) if isinstance(value, (float, complex)): # Substitute overflowing decimal literal for AST infinities. - self.write(repr(value).replace("inf", INFSTR)) + self.write(result.replace("inf", INFSTR), infer_type) else: - self.write(repr(value)) + self.write(result.replace('\'', '\"'), infer_type) def _Constant(self, t, infer_type=False): value = t.value if isinstance(value, tuple): self.write("(") if len(value) == 1: - self._write_constant(value[0]) + self._write_constant(value[0], infer_type) self.write(",") else: interleave(lambda: self.write(", "), self._write_constant, @@ -727,26 +728,12 @@ class CPPUnparser: self._generic_With(t, is_async=True, infer_type=infer_type) # expr - def _Bytes(self, t): - self.write(repr(t.s)) + def _Bytes(self, t, infer_type=False): + self._write_constant(t.s, infer_type) def _Str(self, tree, infer_type=False): - result = '' - if six.PY3: - result = repr(tree.s) - else: - # if from __future__ import unicode_literals is in effect, - # then we want to output string literals using a 'b' prefix - # and unicode literals with no prefix. - if "unicode_literals" not in self.future_imports: - result = repr(tree.s) - elif isinstance(tree.s, str): - result = "b" + repr(tree.s) - elif isinstance(tree.s, unicode): - result = repr(tree.s).lstrip("u") - else: - assert False, "shouldn't get here" - self.write(result.replace('\'', '\"'), infer_type) + result = tree.s + self._write_constant(result, infer_type) return dace.pointer(dace.int8) if infer_type else None format_conversions = {97: 'a', 114: 'r', 115: 's'}
Error message when no genemes found after dereplication. Fixes
@@ -156,7 +156,15 @@ checkpoint rename_genomes: def get_genomes_fasta(wildcards): genome_dir = checkpoints.rename_genomes.get(**wildcards).output.dir path= os.path.join(genome_dir, "{genome}.fasta") - return expand(path, genome=glob_wildcards(path).genome) + genomes=expand(path, genome=glob_wildcards(path).genome) + + if len(genomes)==0: + logger.critical("No genomes found after dereplication. " + "You don't have any Metagenome assembled genomes with sufficient quality. " + "You may want to change the assembly, binning or filtering parameters. " + "Or focus on the genecatalog workflow only.") + exit(1) + return genomes
Use math.isqrt() in _gf_ddf_shoup() There are no other cases, mentioned in This finally closes diofant/diofant#839
@@ -1396,7 +1396,7 @@ def _gf_ddf_shoup(self, f): domain = self.domain n, q = f.degree(), domain.order - k = math.ceil(math.sqrt(n//2)) + k = math.isqrt(n//2 - 1) + 1 if n > 1 else 0 x = self.gens[0] h = pow(x, q, f)
site.py: delete page using pageid instead of title Use pageid to delete page.
@@ -3825,24 +3825,33 @@ class APISite(BaseSite): """Delete page from the wiki. Requires appropriate privilege level. @see: U{https://www.mediawiki.org/wiki/API:Delete} + Page to be deleted can be given either as Page object or as pageid. - @param page: Page to be deleted. - @type page: pywikibot.Page + @param page: Page to be deleted or its pageid. + @type page: Page or, in case of pageid, int or str @param reason: Deletion reason. + @raises TypeError, ValueError: page has wrong type/value. """ token = self.tokens['delete'] + params = {'action': 'delete', 'token': token, 'reason': reason} + + if isinstance(page, pywikibot.Page): + params['title'] = page + msg = page.title(withSection=False) + else: + pageid = int(page) + params['pageid'] = pageid + msg = pageid + + req = self._simple_request(**params) self.lock_page(page) - req = self._simple_request(action='delete', - token=token, - title=page, - reason=reason) try: req.submit() except api.APIError as err: errdata = { 'site': self, - 'title': page.title(with_section=False), + 'title': msg, 'user': self.user(), } if err.code in self._dl_errors:
Addd options to use updated bleurt checkpoints * Update bleurt.py add options to use newer recommended checkpoint bleurt-20 and its distilled versions * Update bleurt.py remove trailing spaces
@@ -69,6 +69,10 @@ CHECKPOINT_URLS = { "bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip", "bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip", "bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip", + "bleurt-20-d3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip", + "bleurt-20-d6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip", + "bleurt-20-d12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip", + "bleurt-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip", }
Fixed nondeterministic RG for ORT RNN tests Summary: Relaxing tolerance for ORT RNN tests Pull Request resolved:
@@ -72,6 +72,12 @@ class TestONNXRuntime(unittest.TestCase): opset_version = _export_onnx_opset_version keep_initializers_as_inputs = True # For IR version 3 type export. + def setUp(self): + torch.manual_seed(0) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(0) + np.random.seed(seed=0) + def run_test(self, model, input, rtol=1e-3, atol=1e-7, do_constant_folding=True, batch_size=2, use_gpu=True): run_model_test(self, model, batch_size=batch_size, input=input, use_gpu=use_gpu, rtol=rtol, atol=atol, @@ -832,7 +838,7 @@ class TestONNXRuntime(unittest.TestCase): return input input = make_input(RNN_BATCH_SIZE) - self.run_test(model, input, batch_size=RNN_BATCH_SIZE, atol=1e-7) + self.run_test(model, input, batch_size=RNN_BATCH_SIZE) # test that the model still runs with a different batch size other_input = make_input(RNN_BATCH_SIZE + 1) @@ -908,11 +914,11 @@ class TestONNXRuntime(unittest.TestCase): return input input = make_input(RNN_BATCH_SIZE) - self.run_test(model, input, batch_size=RNN_BATCH_SIZE, atol=1e-5) + self.run_test(model, input, batch_size=RNN_BATCH_SIZE) # test that the model still runs with a different batch size other_input = make_input(RNN_BATCH_SIZE + 1) - self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1, atol=1e-5) + self.run_test(model, other_input, batch_size=RNN_BATCH_SIZE + 1) def make_test(name, base, layer, bidirectional, initial_state,
Update ssa___applying_stolen_credentials_via_powersploit_modules.yml Fixed a typo "accuonts" -> "accounts"
@@ -2,7 +2,7 @@ name: Applying Stolen Credentials via PowerSploit modules id: 270b482d-2af2-448f-9923-9cf005f61be4 version: 1 date: '2020-11-03' -description: Stolen credentials are applied by methods such as user impersonation, credential injection, spoofing of authentication processes or getting hold of critical accuonts. +description: Stolen credentials are applied by methods such as user impersonation, credential injection, spoofing of authentication processes or getting hold of critical accounts. This detection indicates such activities carried out by PowerSploit exploit kit APIs. how_to_implement: You must be ingesting Windows Security logs from devices of interest, including the event ID 4688 with enabled command line logging. references:
add ability to move resources Used this to transfer resources from one project to another. Not exposed via HQ but can come in handy for dev purposes.
@@ -196,3 +196,39 @@ class TransifexApiClient(object): :return: source lang code on transifex """ return self.project_details().json().get('source_language_code') + + def move_resources(self, hq_lang_code, target_project, version=None, use_version_postfix=True): + """ + ability to move resources from one project to another + :param hq_lang_code: lang code on hq + :param target_project: target project slug on transifex + :param version: version if needed on parent resource slugs + :param use_version_postfix: to use version postfix in new project + :return: responses per resource slug + """ + responses = {} + for resource_slug in self.get_resource_slugs(version): + lang = self.transifex_lang_code(hq_lang_code) + url = "https://www.transifex.com/api/2/project/{}/resource/{}/translation/{}/?file".format( + self.project, resource_slug, lang + ) + response = requests.get(url, auth=self._auth, stream=True) + if response.status_code != 200: + raise ResourceMissing + if use_version_postfix: + upload_resource_slug = resource_slug + else: + upload_resource_slug = resource_slug.split("_v")[0] + upload_url = "https://www.transifex.com/api/2/project/{}/resource/{}/translation/{}".format( + target_project, upload_resource_slug, lang) + content = response.content + headers = {'content-type': 'application/json'} + data = { + 'name': upload_resource_slug, 'slug': upload_resource_slug, 'content': content, + 'i18n_type': 'PO' + } + upload_response = requests.put( + upload_url, data=json.dumps(data), auth=self._auth, headers=headers, + ) + responses[resource_slug] = upload_response + return responses
Update circl_passive_ssl.py Cleaned up
@@ -38,9 +38,6 @@ class CirclPassiveSSLApi(object): if r.ok: return r.json() - return None - - @staticmethod def fetch_cert(cert_sha1, settings): auth = ( @@ -55,8 +52,6 @@ class CirclPassiveSSLApi(object): if r.ok: return r.json() - return None - class CirclPassiveSSLSearchIP(OneShotAnalytics, CirclPassiveSSLApi): default_values = { 'name': 'Circl.lu IP to ssl certificate lookup.',
MAINT: Removing parallelisation from distance calculation [CHANGED] was using context stack from ContextStack in the process of rewriting to a different distribution
@@ -112,8 +112,7 @@ class EstimateDistances(object): self._est_params = list(est_params or []) self._run = False # a flag indicating whether estimation completed - # whether we're on the master CPU or not - self._on_master_cpu = parallel.get_communicator().Get_rank() == 0 + def __str__(self): return str(self.get_table())
Add default channel for Condor provider Fixes
@@ -3,6 +3,7 @@ import os import re import time +from parsl.channels import LocalChannel from parsl.utils import RepresentationMixin from parsl.launchers import SingleNodeLauncher from parsl.providers.condor.template import template_string @@ -59,7 +60,7 @@ class CondorProvider(RepresentationMixin, ClusterProvider): :class:`~parsl.launchers.SingleNodeLauncher` (the default), """ def __init__(self, - channel=None, + channel=LocalChannel(), nodes_per_block=1, init_blocks=1, min_blocks=0,
Force function to_bytes to always return bytes The recently introduced function to_bytes should return bytes, otherwise it's confusing and we can get unexpected results. References:
@@ -106,21 +106,16 @@ def to_bytes(x): .. versionadded:: 0.8.2 - .. note:: If the argument passed is not of type str or bytes, - it will be ignored, cause some twisted.web operations has the - capability to extract the needed bytes string from the object - itself via the render method. - - .. warning:: This is similar to :meth:`~coherence.upnp.core.utils. - to_string` but with the difference that the returned result - it could be an object. + .. note:: If the argument passed is not of type str or bytes, it will be + converted to his string representation and then it will be + converted into bytes. """ if isinstance(x, bytes): return x elif isinstance(x, str): return x.encode('ascii') else: - return x + return str(x).encode('ascii') def means_true(value):
Re-order prerequisite installations Some subtasks, such as testing proxy connectivity, will load and reinitialize config variables. Those may conflict with those values intended in script.
#!/bin/bash # Exit immediately if anything goes wrong, instead of making things worse. set -e +#################################################################### + +# NB(kamidzi): following calls load_configs(); potentially is destructive to settings +if [[ ! -z "$BOOTSTRAP_HTTP_PROXY_URL" ]] || [[ ! -z "$BOOTSTRAP_HTTPS_PROXY_URL" ]] ; then + echo "Testing configured proxies..." + source "$REPO_ROOT/bootstrap/shared/shared_proxy_setup.sh" +fi + +REQUIRED_VARS=( BOOTSTRAP_CACHE_DIR REPO_ROOT ) +check_for_envvars "${REQUIRED_VARS[@]}" + +# Create directory for download cache. +mkdir -p "$BOOTSTRAP_CACHE_DIR" ubuntu_url="http://us.archive.ubuntu.com/ubuntu/dists/trusty-updates" @@ -26,24 +39,10 @@ vbox_version="5.0.36" vbox_additions="VBoxGuestAdditions_$vbox_version.iso" vbox_url="http://download.virtualbox.org/virtualbox" -curl_cmd() { curl -f --progress -L -H 'Accept-encoding: gzip,deflate' "$@"; } -# wget_cmd() { wget --show-progress --no-check-certificate -nc -c -nd --header='Accept-Encoding: gzip,deflate' "$@"; } -#################################################################### - -if [[ ! -z "$BOOTSTRAP_HTTP_PROXY_URL" ]] || [[ ! -z "$BOOTSTRAP_HTTPS_PROXY_URL" ]] ; then - echo "Testing configured proxies..." - source "$REPO_ROOT/bootstrap/shared/shared_proxy_setup.sh" -fi - -REQUIRED_VARS=( BOOTSTRAP_CACHE_DIR REPO_ROOT ) -check_for_envvars "${REQUIRED_VARS[@]}" - # List of binary versions to download source "$REPO_ROOT/bootstrap/config/build_bins_versions.sh" -# Create directory for download cache. -mkdir -p "$BOOTSTRAP_CACHE_DIR" - +curl_cmd() { curl -f --progress -L -H 'Accept-encoding: gzip,deflate' "$@"; } #################################################################### # download_file wraps the usual behavior of curling a remote URL to a local file
Avoid checking `field` twice on all iterations Yields a small performance improvement
@@ -937,7 +937,7 @@ class ComponentCreateView(GetReturnURLMixin, View): # Assign errors on the child form's name/label field to name_pattern/label_pattern on the parent form if field == 'name': field = 'name_pattern' - if field == 'label': + elif field == 'label': field = 'label_pattern' for e in errors: form.add_error(field, '{}: {}'.format(name, ', '.join(e)))
Typo in powerfeed.md pot -> port
# Power Feed -A power feed represents the distribution of power from a power panel to a particular device, typically a power distribution unit (PDU). The power pot (inlet) on a device can be connected via a cable to a power feed. A power feed may optionally be assigned to a rack to allow more easily tracking the distribution of power among racks. +A power feed represents the distribution of power from a power panel to a particular device, typically a power distribution unit (PDU). The power port (inlet) on a device can be connected via a cable to a power feed. A power feed may optionally be assigned to a rack to allow more easily tracking the distribution of power among racks. Each power feed is assigned an operational type (primary or redundant) and one of the following statuses:
Warn instead of fail when NoConvergence errors occur in world_to_pixel_values Closes
@@ -323,7 +323,17 @@ class FITSWCSAPIMixin(BaseLowLevelWCS, HighLevelWCSMixin): return world[0] if self.world_n_dim == 1 else tuple(world) def world_to_pixel_values(self, *world_arrays): + # avoid circular import + from astropy.wcs.wcs import NoConvergence + try: pixel = self.all_world2pix(*world_arrays, 0) + except NoConvergence as e: + warnings.warn(str(e)) + # would be better to use e.best_solution here but that would + # have to be formatted by wcs._array_converter which feels like + # unnecessary code duplication + pixel = self.all_world2pix(*world_arrays, 0, quiet=True) + return pixel[0] if self.pixel_n_dim == 1 else tuple(pixel) @property
$.Rewriting: add a disclaimer for this experimental feature TN:
## vim: filetype=makoada +-- This package provides support for tree-based source code rewriting. +-- +-- .. ATTENTION:: This is an experimental feature, so even if it is exposed to +-- allow experiments, it is totally unsupported and the API is very likely to +-- change in the future. + private with Ada.Containers.Hashed_Maps; private with Ada.Containers.Vectors; private with Ada.Strings.Unbounded;
add an optional argument while running the test suite This allows the test suite to be run nicely inside an environment without killing the main thread
@@ -18,7 +18,7 @@ from fontParts.test import test_image from fontParts.test import test_guideline -def testEnvironment(objectGenerator): +def testEnvironment(objectGenerator, inApp=False): modules = [ test_font, test_info, @@ -43,8 +43,11 @@ def testEnvironment(objectGenerator): _setObjectGenerator(suite, objectGenerator) globalSuite.addTest(suite) runner = unittest.TextTestRunner() - ret = not runner.run(globalSuite).wasSuccessful() - sys.exit(ret) + succes = runner.run(globalSuite).wasSuccessful() + if not inApp: + sys.exit(not succes) + else: + return succes def _setObjectGenerator(suite, objectGenerator): for i in suite:
More strict conflict with pydocstyle See
'sphinx_rtd_theme>=0.2.4'], } extra_reqs['develop'] = ['pytest>=3.0', 'flake8>=2.5.5,!=3.1.0', - 'flake8-docstrings', 'pydocstyle!=2.1.0', 'pep8-naming', + 'flake8-docstrings', 'pydocstyle<2.1.0', 'pep8-naming', 'flake8-comprehensions', 'flake8-isort', 'pytest-cov', 'coverage'] + setup_reqs
Check out the branch' head, no merging Now commit hashes are traceable in git.
@@ -14,6 +14,8 @@ jobs: if: "!contains(github.event.head_commit.message, 'skip ci')" steps: - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} - name: Set up Python uses: actions/[email protected] - name: Lint with Pre-commit @@ -25,6 +27,8 @@ jobs: if: "!contains(github.event.head_commit.message, 'skip ci')" steps: - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} - name: Set up Python uses: actions/[email protected] - name: Use Python Dependency Cache @@ -85,6 +89,8 @@ jobs: PY_IGNORE_IMPORTMISMATCH: yes steps: - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} - name: Install MSYS2 run: | choco install --no-progress msys2 --params="/NoUpdate /NoPath" @@ -161,6 +167,8 @@ jobs: PKG_CONFIG_PATH: /usr/local/opt/libffi/lib/pkgconfig:/usr/local/opt/[email protected]/lib/pkgconfig:${PKG_CONFIG_PATH:-} steps: - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} - name: Set up Python uses: actions/[email protected] - name: Install macOS Dependencies
Fix docs/qubits.ipynb as part of docs cleanup for Cirq 1.0 Move import to top
{ "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "WZ1G8QHhdHZR" - }, - "source": [ - "##### Copyright 2020 The Cirq Developers" - ] - }, { "cell_type": "code", "execution_count": null, }, "outputs": [], "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "#@title Copyright 2020 The Cirq Developers\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", "# you may not use this file except in compliance with the License.\n", "# You may obtain a copy of the License at\n", "#\n", "except ImportError:\n", " print(\"installing cirq...\")\n", " !pip install --quiet cirq\n", - " print(\"installed cirq.\")" + " print(\"installed cirq.\")\n", + " import cirq" ] }, { "A qubit is the basic unit of quantum information, a quantum bit: a two level system that can exist in superposition of those two possible states. Cirq also supports higher dimensional systems, so called [qudits](qudits.ipynb) that we won't cover here.\n", "\n", "In Cirq, a `Qubit` is nothing else than an abstract object that has an identifier, a `cirq.Qid` and some other potential metadata to represent device specific properties that can be used to validate a circuit.\n", - "In contrast to real qubits, the Cirq qubit does not have any state. The reason for this is that the actual state of the qubit or qubits is maintained in the quantum processor, or, in case of simulation, in the simulated state vector." + "In contrast to real qubits, the Cirq qubit does not have any state. The reason for this is that the actual state of the qubits is maintained in the quantum processor, or, in case of simulation, in the simulated state vector." ] }, { } ], "source": [ - "import cirq\n", - "\n", "qubit = cirq.NamedQubit(\"myqubit\")\n", "\n", "# creates an equal superposition of |0> and |1> when simulated\n",
Update apt_unclassified.txt > apt_mustangpanda
@@ -1589,12 +1589,6 @@ ckstar.zapto.org 64.34.205.178:443 -# Reference: https://twitter.com/katechondic/status/1556940169483264000 -# Reference: https://twitter.com/katechondic/status/1557031529141964801 -# Reference: https://www.virustotal.com/gui/file/c52828dbf62fc52ae750ada43c505c934f1faeb9c58d71c76bdb398a3fbbe1e2/detection - -http://89.38.225.151 - # Reference: https://twitter.com/malwrhunterteam/status/1567483040317816833 # Reference: https://twitter.com/h2jazi/status/1567512391289544704 # Reference: https://www.virustotal.com/gui/file/40831538e59700fd86081130af597623d0779a93cde6f76b86d52174522d8ad4/detection
stop tab buttons from scrolling around The always_overscroll (default true) option that was added to ScrollView in causes the buttons of TabbedPanel to always scroll
@@ -683,7 +683,7 @@ class TabbedPanel(GridLayout): tab_pos = self.tab_pos tab_layout = self._tab_layout tab_layout.clear_widgets() - scrl_v = ScrollView(size_hint=(None, 1)) + scrl_v = ScrollView(size_hint=(None, 1), always_overscroll=False) tabs = self._tab_strip parent = tabs.parent if parent:
$.Analysis: simplify Reparse implementations TN:
@@ -71,10 +71,6 @@ package body ${ada_lib_name}.Analysis is procedure Free is new Ada.Unchecked_Deallocation (Analysis_Unit_Type, Analysis_Unit); - procedure Update_Charset (Unit : Analysis_Unit; Charset : String); - -- If Charset is an empty string, do nothing. Otherwise, update - -- Unit.Charset field to Charset. - function Normalize_Unit_Filename (Filename : String) return Unbounded_String; -- Try to return a canonical filename. This is used to have an @@ -105,17 +101,6 @@ package body ${ada_lib_name}.Analysis is (Symbols : Symbol_Table) return Symbol_Literal_Array; -- Create pre-computed symbol literals in Symbols and return them - -------------------- - -- Update_Charset -- - -------------------- - - procedure Update_Charset (Unit : Analysis_Unit; Charset : String) is - begin - if Charset'Length /= 0 then - Unit.Charset := To_Unbounded_String (Charset); - end if; - end Update_Charset; - ------------ -- Create -- ------------ @@ -577,21 +562,10 @@ package body ${ada_lib_name}.Analysis is (Unit : Analysis_Unit; Charset : String := "") is - procedure Init_Parser - (Unit : Analysis_Unit; - Read_BOM : Boolean; - Parser : in out Parser_Type) is + Dummy : constant Analysis_Unit := Get_From_File + (Unit.Context, To_String (Unit.File_Name), Charset, Reparse => True); begin - Init_Parser_From_File - (To_String (Unit.File_Name), To_String (Unit.Charset), Read_BOM, - Unit, Parser); - end Init_Parser; - - Reparsed : Reparsed_Unit; - begin - Update_Charset (Unit, Charset); - Do_Parsing (Unit, Charset'Length = 0, Init_Parser'Access, Reparsed); - Update_After_Reparse (Unit, Reparsed); + null; end Reparse; ------------- @@ -603,22 +577,10 @@ package body ${ada_lib_name}.Analysis is Charset : String := ""; Buffer : String) is - procedure Init_Parser - (Unit : Analysis_Unit; - Read_BOM : Boolean; - Parser : in out Parser_Type) - is + Dummy : constant Analysis_Unit := Get_From_Buffer + (Unit.Context, To_String (Unit.File_Name), Charset, Buffer); begin - Init_Parser_From_Buffer - (Buffer, To_String (Unit.Charset), Read_BOM, Unit, Parser); - end Init_Parser; - - Reparsed : Reparsed_Unit; - begin - Update_Charset (Unit, Charset); - Do_Parsing (Unit, Charset'Length = 0, Init_Parser'Access, Reparsed); - Unit.Charset := To_Unbounded_String (Charset); - Update_After_Reparse (Unit, Reparsed); + null; end Reparse; -------------
coresight: fix GenericMemAPTarget issues. * coresight: revert superclass of GenericMemAPTarget to CoreSightCoreComponent. This fixes an issue where DebugContext tries to access .core if the parent is not a CoreSightCoreComponent. * coresight: GenericMemAPTarget: raise CoreRegisterAccessError for core register methods. * coresight: GenericMemAPTarget: pass .read_memory() 'now' arg to AP.
# pyOCD debugger # Copyright (c) 2020 Cypress Semiconductor Corporation -# Copyright (c) 2021 Chris Reed +# Copyright (c) 2021-2022 Chris Reed # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); import logging -from .component import CoreSightComponent +from .component import CoreSightCoreComponent +from ..core import exceptions from ..core.target import Target from ..core.core_registers import CoreRegistersIndex @@ -26,7 +27,7 @@ LOG = logging.getLogger(__name__) DEAD_VALUE = 0 -class GenericMemAPTarget(Target, CoreSightComponent): +class GenericMemAPTarget(Target, CoreSightCoreComponent): """@brief This target represents ARM debug Access Port without a CPU It may be used to access the address space of the target via Access Ports @@ -43,7 +44,7 @@ class GenericMemAPTarget(Target, CoreSightComponent): def __init__(self, session, ap, memory_map=None, core_num=0, cmpid=None, address=None): Target.__init__(self, session, memory_map) - CoreSightComponent.__init__(self, ap, cmpid, address) + CoreSightCoreComponent.__init__(self, ap, cmpid, address) self.core_number = core_num self.core_type = DEAD_VALUE self._core_registers = CoreRegistersIndex() @@ -70,7 +71,7 @@ class GenericMemAPTarget(Target, CoreSightComponent): self.ap.write_memory(addr, value, transfer_size) def read_memory(self, addr, transfer_size=32, now=True): - return self.ap.read_memory(addr, transfer_size, True) + return self.ap.read_memory(addr, transfer_size, now) def read_memory_block8(self, addr, size): return self.ap.read_memory_block8(addr, size) @@ -115,22 +116,22 @@ class GenericMemAPTarget(Target, CoreSightComponent): return None def read_core_register(self, reg): - return DEAD_VALUE + raise exceptions.CoreRegisterAccessError("GenericMemAPTarget does not support core register access") def read_core_register_raw(self, reg): - return DEAD_VALUE + raise exceptions.CoreRegisterAccessError("GenericMemAPTarget does not support core register access") def read_core_registers_raw(self, reg_list): - return [DEAD_VALUE] * len(reg_list) + raise exceptions.CoreRegisterAccessError("GenericMemAPTarget does not support core register access") def write_core_register(self, reg, data): - pass + raise exceptions.CoreRegisterAccessError("GenericMemAPTarget does not support core register access") def write_core_register_raw(self, reg, data): - pass + raise exceptions.CoreRegisterAccessError("GenericMemAPTarget does not support core register access") def write_core_registers_raw(self, reg_list, data_list): - pass + raise exceptions.CoreRegisterAccessError("GenericMemAPTarget does not support core register access") def set_breakpoint(self, addr, type=Target.BreakpointType.AUTO): return False
Expanded ability of import Renamed GIT_PYTHON_NOWARN to GIT_PYTHON_INITERR and added values for quiet import, warning import, and raise import. These respectively mean that no message or error is printed if git is non-existent, a plain warning is printed but the import succeeds, and an ImportError exception is raised.
@@ -232,11 +232,20 @@ class Git(LazyMixin): # executable cls.GIT_PYTHON_GIT_EXECUTABLE = cls.git_exec_name - # test if the user didn't want a warning - nowarn = os.environ.get("GIT_PYTHON_NOWARN", "false") - nowarn = nowarn.lower() in ["t", "true", "y", "yes"] - - if not nowarn: + # determine what the user wanted to happen + # we expect GIT_PYTHON_INITERR to either be unset or be one of + # the following values: + # q|quiet|s|silence + # w|warn|warning + # r|raise|e|error + initerr_quiet = ["q", "quiet", "s", "silence"] + initerr_warn = ["w", "warn", "warning"] + initerr_raise = ["r", "raise", "e", "error"] + + initerr = os.environ.get("GIT_PYTHON_INITERR", "warn").lower() + if initerr in initerr_quiet: + pass + elif initerr in initerr_warn: print(dedent("""\ WARNING: %s All git commands will error until this is rectified. @@ -244,6 +253,19 @@ class Git(LazyMixin): This initial warning can be silenced in the future by setting the environment variable: export GIT_PYTHON_NOWARN=true """) % err) + elif initerr in initerr_raise: + raise ImportError(err) + else: + err = dedent("""\ + GIT_PYTHON_INITERR environment variable has been set but it has been set with an invalid value. + + Use only the following values: + (1) q|quiet|s|silence: for no warning or exception + (2) w|warn|warning: for a printed warning + (3) r|raise|e|error: for a raised exception + """) + raise ImportError(err) + else: # after the first setup (when GIT_PYTHON_GIT_EXECUTABLE # is no longer None) we raise an exception and reset the
Updated description to something a bit simpler l
@@ -194,7 +194,7 @@ API | Description | Auth | HTTPS | CORS | Link | API | Description | Auth | HTTPS | CORS | Link | |---|---|---|---|---|---| | ApiLeap | Make screenshots from web pages and HTML | `apiKey` | Yes | Unknown | [Go!](https://apileap.com/) | -| Apility.io | IP, Domains and Emails anti-abuse API blocklist lookup tool | No | Yes | Yes | [Go!](https://apility.io/apidocs/) | +| Apility.io | IP, Domains and Emails anti-abuse API blocklist | No | Yes | Yes | [Go!](https://apility.io/apidocs/) | | APIs.guru | Wikipedia for Web APIs, OpenAPI/Swagger specs for public APIs | No | Yes | Unknown | [Go!](https://apis.guru/api-doc/) | | BetterMeta | Return a site's meta tags in JSON format | `X-Mashape-Key` | Yes | Unknown | [Go!](http://bettermeta.io) | | Bitbucket | Pull public information for a Bitbucket account | No | Yes | Unknown | [Go!](https://api.bitbucket.org/2.0/users/karllhughes) |
Update TCN Fixes
@@ -6,8 +6,13 @@ from redbot.message import headers class tcn(headers.HttpHeader): canonical_name = "TCN" + description = """\ +The `TCN` header field is part of an experimental transparent content negotiation scheme. It +is not widely supported in clients. +""" reference = "https://tools.ietf.org/html/rfc2295" - list_header = False + list_header = True deprecated = False valid_in_requests = False valid_in_responses = True + no_coverage = True \ No newline at end of file
InfraValidator should skip k8s resource cleanup if pod_name is None Please refer
@@ -188,6 +188,10 @@ class KubernetesRunner(base_runner.BaseModelServerRunner): logger=logging.warning, retry_filter=_api_exception_retry_filter) def _DeleteModelServerPod(self): + if self._pod_name is None: + # No model server Pod has been created yet. + logging.info('Server pod has not been created.') + return try: logging.info('Deleting Pod (name=%s)', self._pod_name) self._k8s_core_api.delete_namespaced_pod(
Remove integration shoeboxes at start of scaling. This can be disabled with the option delete_integration_shoeboxes
.type = int .help = "Number of bins to use for calculating and plotting merging stats." .expert_level = 1 + delete_integration_shoeboxes = True + .type = bool + .help = "Discard integration shoebox data from scaling output, to help" + "with memory management." + .expert_level = 2 } include scope dials.algorithms.scaling.scaling_options.phil_scope include scope dials.algorithms.scaling.cross_validation.cross_validate.phil_scope @@ -550,6 +555,10 @@ def format_d_min(value): reflections = flatten_reflections(params.input.reflections) experiments = flatten_experiments(params.input.experiments) + if params.output.delete_integration_shoeboxes: + for r in reflections: + del r['shoebox'] + if params.cross_validation.cross_validation_mode: from dials.algorithms.scaling.cross_validation.cross_validate import \ cross_validate
refactor: [cli] do not set log level at the top of the module do not set log level at the top of the module, anyconfig.cli because it'll be set by anyconfig.cli.to_log_level at any rate later.
@@ -25,7 +25,6 @@ _ENCODING = locale.getdefaultlocale()[1] or 'UTF-8' logging.basicConfig(format="%(levelname)s: %(message)s") LOGGER = logging.getLogger("anyconfig") LOGGER.addHandler(logging.StreamHandler()) -LOGGER.setLevel(logging.WARN) if anyconfig.compat.IS_PYTHON_3: import io
Adding guards around tracepath/tracepath6 commands The scripts-library.sh file is calling commands without testing if they exist first. This change adds guards to prevent a non-existant binary from being called.
@@ -186,10 +186,14 @@ function get_instance_info { systemd-resolve --statistics && \ cat /etc/systemd/resolved.conf) > \ "/openstack/log/instance-info/host_dns_info_${TS}.log" || true + if [ "$(which tracepath)" ]; then { tracepath "8.8.8.8" -m 5 2>/dev/null || tracepath "8.8.8.8"; } > \ "/openstack/log/instance-info/host_tracepath_info_${TS}.log" || true + fi + if [ "$(which tracepath6)" ]; then { tracepath6 "2001:4860:4860::8888" -m 5 2>/dev/null || tracepath6 "2001:4860:4860::8888"; } >> \ "/openstack/log/instance-info/host_tracepath_info_${TS}.log" || true + fi if [ "$(which lxc-ls)" ]; then lxc-ls --fancy > \ "/openstack/log/instance-info/host_lxc_container_info_${TS}.log" || true
Rephrase RuntimeError description This PR rephrases a confusing description of a RuntimeError to be more clear and precise.
@@ -143,7 +143,8 @@ class RDBStorage(BaseStorage): :exc:`ValueError`: If the given `heartbeat_interval` or `grace_period` is not a positive integer. :exc:`RuntimeError`: - If the a process that was failed by heartbeat but was actually running. + When a process tries to finish a trial that has already + been set to FAILED by heartbeat. """ def __init__(
refactor: tests: keys: Add type annotations. This commit adds parameter and return type annotations or hints to the `test_keys.py` file, that contains tests for its counterpart `keys.py` from the `zulipterminal` module, to make mypy checks consistent and improve code readability.
-from typing import Dict +from typing import Any, Dict import pytest +from pytest_mock import MockerFixture from zulipterminal.config import keys @@ -11,33 +12,33 @@ USED_KEYS = {key for values in keys.KEY_BINDINGS.values() for key in values["key @pytest.fixture(params=keys.KEY_BINDINGS.keys()) -def valid_command(request): +def valid_command(request: Any) -> str: return request.param @pytest.fixture(params=["BLAH*10"]) -def invalid_command(request): +def invalid_command(request: Any) -> str: return request.param -def test_keys_for_command(valid_command): +def test_keys_for_command(valid_command: str) -> None: assert keys.KEY_BINDINGS[valid_command]["keys"] == keys.keys_for_command( valid_command ) -def test_primary_key_for_command(valid_command): +def test_primary_key_for_command(valid_command: str) -> None: assert keys.KEY_BINDINGS[valid_command]["keys"][0] == keys.primary_key_for_command( valid_command ) -def test_keys_for_command_invalid_command(invalid_command): +def test_keys_for_command_invalid_command(invalid_command: str) -> None: with pytest.raises(keys.InvalidCommand): keys.keys_for_command(invalid_command) -def test_keys_for_command_identity(valid_command): +def test_keys_for_command_identity(valid_command: str) -> None: """ Ensures that each call to keys_for_command returns the original keys in a new list which validates that the original keys don't get altered @@ -48,28 +49,28 @@ def test_keys_for_command_identity(valid_command): ) -def test_is_command_key_matching_keys(valid_command): +def test_is_command_key_matching_keys(valid_command: str) -> None: for key in keys.keys_for_command(valid_command): assert keys.is_command_key(valid_command, key) -def test_is_command_key_nonmatching_keys(valid_command): +def test_is_command_key_nonmatching_keys(valid_command: str) -> None: keys_to_test = USED_KEYS - set(keys.keys_for_command(valid_command)) for key in keys_to_test: assert not keys.is_command_key(valid_command, key) -def test_is_command_key_invalid_command(invalid_command): +def test_is_command_key_invalid_command(invalid_command: str) -> None: with pytest.raises(keys.InvalidCommand): keys.is_command_key(invalid_command, "esc") # key doesn't matter -def test_HELP_is_not_allowed_as_tip(): +def test_HELP_is_not_allowed_as_tip() -> None: assert keys.KEY_BINDINGS["HELP"]["excluded_from_random_tips"] is True assert keys.KEY_BINDINGS["HELP"] not in keys.commands_for_random_tips() -def test_commands_for_random_tips(mocker): +def test_commands_for_random_tips(mocker: MockerFixture) -> None: new_key_bindings: Dict[str, keys.KeyBinding] = { "ALPHA": { "keys": ["a"],
AnimationEditor : Refactor `__editablePlugAdded()` Remove bogus check for children - CurvePlugs always have a single output called "out". Avoid deprecated PathListingWidget methods. Support unusual case of a CurvePlug with multiple outputs. Block selection changed slot when updating selection, so we don't get unwanted recursion.
@@ -240,20 +240,14 @@ class AnimationEditor( GafferUI.NodeSetEditor ) : def __editablePlugAdded( self, standardSet, curvePlug ) : - curves = curvePlug.children() - if not curves : - return - - connected = curves[0].outputs() - - if not connected : - return - - previousSelection = self.__curveList.getSelectedPaths() - newPath = Gaffer.GraphComponentPath( self.scriptNode(), connected[0].relativeName( self.scriptNode() ).replace( '.', '/' ) ) - previousSelection.append( newPath ) + selection = self.__curveList.getSelection() + for output in curvePlug["out"].outputs() : + selection.addPath( + output.relativeName( self.scriptNode() ).replace( ".", "/" ) + ) - self.__curveList.setSelectedPaths( previousSelection ) + with Gaffer.BlockedConnection( self.__selectionChangedConnection ) : + self.__curveList.setSelection( selection ) def __sourceCurvePlug( self, plug ) :
fix: Decode content before calling json.loads() This commit fixes an issue that happens when trying to open **Prepared Reports**. The issue happens because the content isn't decoded in a format readable by the `json.loads()` function. This was fixed by decoding the content to *utf-8*.
@@ -320,7 +320,7 @@ def get_prepared_report_result(report, filters, dn="", user=None): attached_file = frappe.get_doc("File", attached_file_name) compressed_content = attached_file.get_content() uncompressed_content = gzip_decompress(compressed_content) - data = json.loads(uncompressed_content) + data = json.loads(uncompressed_content.decode("utf-8")) if data: columns = json.loads(doc.columns) if doc.columns else data[0]
[bugfix] Mediawiki backend: Do not fail when no email is present I included additional data in MediaWiki backend's get_user_data. The previous solution failed when no such data were available because MediaWiki didn't sent it (because set grants didn't permit it to do so).
@@ -173,13 +173,13 @@ class MediaWiki(BaseOAuth1): return { 'username': identity['username'], 'userID': identity['sub'], - 'email': identity['email'], - 'confirmed_email': identity['confirmed_email'], - 'editcount': identity['editcount'], - 'rights': identity['rights'], - 'groups': identity['groups'], - 'registered': identity['registered'], - 'blocked': identity['blocked'] + 'email': identity.get('email'), + 'confirmed_email': identity.get('confirmed_email'), + 'editcount': identity.get('editcount'), + 'rights': identity.get('rights'), + 'groups': identity.get('groups'), + 'registered': identity.get('registered'), + 'blocked': identity.get('blocked') } def get_user_id(self, details, response):
TST: added unit test for timezones Added a unit test for aware datetimes. Also added missing enumerate in a recently adjusted unit test.
@@ -556,6 +556,8 @@ class TestBasics(): # # ------------------------------------------------------------------------- def test_today_yesterday_and_tomorrow(self): + """ Test the correct instantiation of yesterday/today/tomorrow dates + """ self.ref_time = dt.datetime.utcnow() self.out = dt.datetime(self.ref_time.year, self.ref_time.month, self.ref_time.day) @@ -564,12 +566,26 @@ class TestBasics(): assert self.out + pds.DateOffset(days=1) == self.testInst.tomorrow() def test_filter_datetime(self): + """ Test the removal of time of day information using a filter + """ self.ref_time = dt.datetime.utcnow() self.out = dt.datetime(self.ref_time.year, self.ref_time.month, self.ref_time.day) assert self.out == self.testInst._filter_datetime_input(self.ref_time) + def test_filter_datetime_aware_to_naive(self): + """ Test the transformation of aware to naive UTC by datetime filter + """ + self.ref_time = dt.datetime(2010, 1, 1, tzinfo=dt.timezone.utc) + self.out = dt.datetime(self.ref_time.year, self.ref_time.month, + self.ref_time.day) + ftime = self.testInst._filter_datetime_input(self.ref_time) + assert self.out == ftime + assert ftime.tzinfo is None or ftime.utcoffset() is None + def test_filtered_date_attribute(self): + """ Test use of filter during date assignment + """ self.ref_time = dt.datetime.utcnow() self.out = dt.datetime(self.ref_time.year, self.ref_time.month, self.ref_time.day) @@ -1875,7 +1891,7 @@ class TestBasics(): out = pds.date_range(start_date, stop_date, freq='2D').tolist() # Convert filenames in list to a date - for item in self.testInst._iter_list: + for i, item in enumerate(self.testInst._iter_list): snip = item.split('.')[0] ref_snip = out[i].strftime('%Y-%m-%d') assert snip == ref_snip
Fix tick label text width caused by formatting By formatting the code to conform to PEP8, I forgot a backslash causing a misbehavior. This patch fixes that, as otherwise the whole dictionary would have been set to the variable instead of only one value it contains.
@@ -387,7 +387,7 @@ class Axes(object): tick_label_text_width = None tick_label_text_width_identifier = "%s tick label text width" % axes if tick_label_text_width_identifier in data['extra axis options']: - tick_label_text_width = data['extra axis options [base]'] + tick_label_text_width = data['extra axis options [base]'] \ [tick_label_text_width_identifier] del data['extra axis options'][tick_label_text_width_identifier]
[web] Import static.js in index.html This is necessary after we introducing the static mode, or it will raise an "undefined" exception.
<link rel="stylesheet" href="/static/vendor.css"/> <link rel="stylesheet" href="/static/app.css"/> <link rel="icon" href="/static/images/favicon.ico" type="image/x-icon"/> + <script src="/static/static.js"></script> <script src="/static/vendor.js"></script> <script src="/static/app.js"></script> </head>
exposing net_transformer_fun before add grad Summary: Pull Request resolved: Need a interface to re-write the graph after the net is built and after adding gradient ops.
@@ -44,6 +44,7 @@ def Parallelize( param_update_builder_fun=None, optimizer_builder_fun=None, post_sync_builder_fun=None, + pre_grad_net_transformer_fun=None, net_transformer_fun=None, devices=None, rendezvous=None, @@ -91,6 +92,11 @@ def Parallelize( Signature: net_transformer_fun( model, num_devices, device_prefix, device_type) + pre_grad_net_transformer_fun: + Optional function to transform the network similar to + net_transformer_fun, but happens before gradient ops + been add. + Signature: pre_grad_net_transformer_fun(model) post_sync_builder_fun: Function applied after initial parameter sync has been completed, such as keeping multi-precision parameters @@ -234,6 +240,9 @@ def Parallelize( model_helper_obj._computed_param_names =\ list(viewkeys(computed_params_grouped)) + if pre_grad_net_transformer_fun: + pre_grad_net_transformer_fun(model_helper_obj) + if has_parameter_updates: log.info("Adding gradient operators") _AddGradientOperators(devices, model_helper_obj, losses_by_gpu)
Remove max_rows parameter from DataFrame.to_markdown function Resolves I removed the `max_rows` parameter from the `DataFrame.to_markdown` function.
@@ -1764,13 +1764,12 @@ defaultdict(<class 'list'>, {'col..., 'col...})] kdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args ) - def to_markdown(self, buf=None, mode=None, max_rows=None): + def to_markdown(self, buf=None, mode=None): """ Print DataFrame in Markdown-friendly format. .. note:: This method should only be used if the resulting Pandas object is expected - to be small, as all the data is loaded into the driver's memory. If the input - is large, set max_rows parameter. + to be small, as all the data is loaded into the driver's memory. Parameters ---------- @@ -1806,9 +1805,6 @@ defaultdict(<class 'list'>, {'col..., 'col...})] ) # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() - if max_rows is not None: - kdf = self.head(max_rows) - else: kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_markdown, pd.DataFrame.to_markdown, args
Also wrap code blocks at 80 chars (unless already wrapped) Follow-up to
@@ -196,11 +196,13 @@ class LspHoverCommand(LspTextCommand): else: value = item.get("value") language = item.get("language") + + if '\n' not in value: + value = "\n".join(textwrap.wrap(value, 80)) + if language: formatted.append("```{}\n{}\n```\n".format(language, value)) else: - if '\n' not in value: - value = "\n".join(textwrap.wrap(value, 80)) formatted.append(value) if formatted:
[Datasets] Skip flaky pipelining memory release test This pipelining memory release test is flaky; it was skipped in this Polars PR, which was then reverted.
@@ -81,6 +81,7 @@ class OnesSource(Datasource): return read_tasks [email protected](reason="Flaky, see https://github.com/ray-project/ray/issues/24757") @pytest.mark.parametrize("lazy_input", [True, False]) def test_memory_release_pipeline(shutdown_only, lazy_input): context = DatasetContext.get_current()
Add get_submatrix function See:
import numpy import scipy.sparse +from scipy.sparse import csc_matrix +from scipy.sparse.compressed import _process_slice, get_csr_submatrix def sparse_matrix(shape, integer=False): @@ -112,3 +114,37 @@ def smallest_int_type_for_range(minimum, maximum): return numpy.uint64 else: return numpy.int64 + + +def get_submatrix(matrix, rows=slice(None, None), cols=slice(None, None)): + """ + Return a submatrix sliced by the supplied rows and columns, with a special + fast path for Compressed Sparse Column matrices + + Workaroud for https://github.com/scipy/scipy/issues/11496 + """ + # Default slicing behaviour for types which don't need the fast path + if not isinstance(matrix, csc_matrix): + return matrix[rows, cols] + # This is based on the code found in the following file, but skips the + # redundant initialisation checks that would get run on the new matrix + # instance: + # scipy/sparse/compressed.py:_cs_matrix._get_submatrix + N, M = matrix.shape + i0, i1 = _process_slice(cols, M) + j0, j1 = _process_slice(rows, N) + if i0 == 0 and j0 == 0 and i1 == M and j1 == N: + return matrix + indptr, indices, data = get_csr_submatrix( + M, N, matrix.indptr, matrix.indices, matrix.data, i0, i1, j0, j1 + ) + shape = (j1 - j0, i1 - i0) + # Construct the new matrix instance by directly assigning its members, + # rather than using `__init__` which runs additional checks that we don't + # need + new_matrix = csc_matrix.__new__(csc_matrix) + new_matrix.data = data + new_matrix.indices = indices + new_matrix.indptr = indptr + new_matrix._shape = shape + return new_matrix
add optionnal H parameter to replace N A user can set H for LampRays the same way they set H for an ObjectRays
@@ -788,11 +788,15 @@ class ObjectRays(UniformRays): class LampRays(RandomUniformRays, Rays): def __init__(self, diameter, NA=1.0, N=100, random=False, z=0, rayColors=None, T=10, label=None): + def __init__(self, diameter, NA=1.0, N=100, T=10, H=None, random=False, z=0, rayColors=None, label=None): if random: RandomUniformRays.__init__(self, yMax=diameter/2, yMin=-diameter/2, thetaMax=NA, thetaMin=-NA, maxCount=N) else: self.yMin = -diameter/2 self.yMax = diameter/2 + + if H: + N = H self.maxCount = N*T rays = []
Update directories in running_pets.md Correct documentation so all commands should be run from the root of the git directory.
@@ -51,29 +51,35 @@ dataset for Oxford-IIIT Pets lives [here](http://www.robots.ox.ac.uk/~vgg/data/pets/). You will need to download both the image dataset [`images.tar.gz`](http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz) and the groundtruth data [`annotations.tar.gz`](http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz) -to the `tensorflow/models` directory. This may take some time. After downloading -the tarballs, your `object_detection` directory should appear as follows: +to the `tensorflow/models` directory and unzip them. This may take some time. + +``` bash +# From tensorflow/models/ +wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz +wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz +tar -xvf images.tar.gz +tar -xvf annotations.tar.gz +``` + +After downloading the tarballs, your `tensorflow/models` directory should appear +as follows: ```lang-none -+ object_detection/ - + data/ - images.tar.gz - annotations.tar.gz - - create_pet_tf_record.py ++ images/ ++ annotations/ ++ object_detection/ ... other files and directories ``` The Tensorflow Object Detection API expects data to be in the TFRecord format, so we'll now run the `create_pet_tf_record` script to convert from the raw Oxford-IIIT Pet dataset into TFRecords. Run the following commands from the -`object_detection` directory: +`tensorflow/models` directory: ``` bash # From tensorflow/models/ -wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz -wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz -tar -xvf annotations.tar.gz -tar -xvf images.tar.gz python object_detection/create_pet_tf_record.py \ --label_map_path=object_detection/data/pet_label_map.pbtxt \ --data_dir=`pwd` \ @@ -84,7 +90,7 @@ Note: It is normal to see some warnings when running this script. You may ignore them. Two TFRecord files named `pet_train.record` and `pet_val.record` should be generated -in the `object_detection` directory. +in the `tensorflow/models` directory. Now that the data has been generated, we'll need to upload it to Google Cloud Storage so the data can be accessed by ML Engine. Run the following command to @@ -279,7 +285,7 @@ three files: * `model.ckpt-${CHECKPOINT_NUMBER}.meta` After you've identified a candidate checkpoint to export, run the following -command from `tensorflow/models/object_detection`: +command from `tensorflow/models`: ``` bash # From tensorflow/models
ENH: Changed the sequence of checking for locale existence of a file and importing urllib modules in numpy._datasource.py to prevent the import in case the local file was found See
@@ -547,6 +547,11 @@ def exists(self, path): is accessible if it exists in either location. """ + + # First test for local path + if os.path.exists(path): + return True + # We import this here because importing urllib2 is slow and # a significant fraction of numpy's total import time. if sys.version_info[0] >= 3: @@ -556,10 +561,6 @@ def exists(self, path): from urllib2 import urlopen from urllib2 import URLError - # Test local path - if os.path.exists(path): - return True - # Test cached url upath = self.abspath(path) if os.path.exists(upath):
Update Linux-Test-Project-Tests.sh git package does not exist on sles12sp4.
@@ -38,24 +38,24 @@ GetDistro update_repos LogMsg "Installing dependencies" -common_packages=(git m4 bison flex make gcc psmisc autoconf automake) +common_packages=(m4 bison flex make gcc psmisc autoconf automake) update_repos install_package "${common_packages[@]}" case $DISTRO in "suse"*) - suse_packages=(db48-utils libaio-devel libattr1 libcap-progs \ - libdb-4_8 perl-BerkeleyDB git-core) + suse_packages=(git-core db48-utils libaio-devel libattr1 \ + libcap-progs libdb-4_8 perl-BerkeleyDB) install_package "${suse_packages[@]}" ;; "ubuntu"* | "debian"*) - deb_packages=(db-util libaio-dev libattr1 libcap-dev keyutils \ - libdb4.8 libberkeleydb-perl expect dh-autoreconf \ - libnuma-dev quota genisoimage gdb unzip exfat-utils) + deb_packages=(git libaio-dev libattr1 libcap-dev keyutils \ + libdb4.8 libberkeleydb-perl expect dh-autoreconf gdb \ + libnuma-dev quota genisoimage db-util unzip exfat-utils) install_package "${deb_packages[@]}" ;; "redhat"* | "centos"* | "fedora"*) - rpm_packages=(db48-utils libaio-devel libattr libcap-devel libdb) + rpm_packages=(git db48-utils libaio-devel libattr libcap-devel libdb) install_package "${rpm_packages[@]}" ;; *)
[API docs] Fix Moment API docs. Improves docs rendering for `cirq.Moment`
@@ -357,7 +357,18 @@ class Moment: return self.__class__(op.transform_qubits(qubit_map) for op in self.operations) def expand_to(self, qubits: Iterable['cirq.Qid']) -> 'cirq.Moment': - """Returns self expanded to given superset of qubits by making identities explicit.""" + """Returns self expanded to given superset of qubits by making identities explicit. + + Args: + qubits: Iterable of `cirq.Qid`s to expand this moment to. + + Returns: + A new `cirq.Moment` with identity operations on the new qubits + not currently found in the moment. + + Raises: + ValueError: if this moments' qubits are not a subset of `qubits`. + """ if not self.qubits.issubset(qubits): raise ValueError(f'{qubits} is not a superset of {self.qubits}')
Fix for issue - Check of elements fixed properties in restriction now verify None values.
@@ -360,6 +360,15 @@ class XsdElement(XsdComponent, ValidationMixin, ParticleMixin, ElementPathMixin) else: return 'none' + @property + def depth(self): + if self.ref is not None: + return 1 + elif self.type.parent is None: + return 1 + else: + return self.type.depth + 1 + # Global element's exclusive properties @property def abstract(self): @@ -925,8 +934,9 @@ class XsdElement(XsdComponent, ValidationMixin, ParticleMixin, ElementPathMixin) elif not self.is_consistent(other) and self.type.elem is not other.type.elem and \ not self.type.is_derived(other.type, 'restriction') and not other.type.abstract: return False - elif self.fixed != other.fixed and \ - self.type.normalize(self.fixed) != other.type.normalize(other.fixed): + elif other.fixed is not None and ( + self.fixed is None or + self.type.normalize(self.fixed) != other.type.normalize(other.fixed)): return False elif other.nillable is False and self.nillable: return False
enable consul ui on bootrap server HG-- branch : feature/dcs
{% endif -%} "bootstrap": true, "server": true, + "ui": true, "check_update_interval": "0s", "node_name": "{{ consul_node_name }}", "datacenter": "{{ consul_datacenter }}",
Ensure linebreak after summary_prefix Added a newline after a non-empty summary_prefix to ensure there is a linebreak between the set summary_prefix and the hardcoded 'Aggregation resulted in the following data...' table header.
@@ -241,6 +241,11 @@ class Alerter(object): #Type independent prefix text = self.rule.get('summary_prefix', '') + # If a prefix is set, ensure there is a newline between it and the hardcoded + # 'Aggregation resulted in...' header below + if text != '': + text += "\n" + summary_table_fields = self.rule['summary_table_fields'] if not isinstance(summary_table_fields, list): summary_table_fields = [summary_table_fields] @@ -285,7 +290,7 @@ class Alerter(object): for key in keys: markdown_row += '| ' + str(key) + ' ' text += markdown_row + '| ' + str(count) + ' |\n' - text += '\n\n' + text += '\n' # Type independent suffix text += self.rule.get('summary_suffix', '')
Make scanning for meta encoding much quicker Previously, this code tried to match everything with strings beginning with "<"; now we jump forward to each "<" and compare there. This also alters the jumpTo implementation to avoid computing a (perhaps long) slice, making repeated calls O(n^2).
@@ -668,15 +668,11 @@ class EncodingBytes(bytes): def jumpTo(self, bytes): """Look for the next sequence of bytes matching a given sequence. If a match is found advance the position to the last byte of the match""" - newPosition = self[self.position:].find(bytes) - if newPosition > -1: - # XXX: This is ugly, but I can't see a nicer way to fix this. - if self._position == -1: - self._position = 0 - self._position += (newPosition + len(bytes) - 1) - return True - else: + try: + self._position = self.index(bytes, self.position) + len(bytes) - 1 + except ValueError: raise StopIteration + return True class EncodingParser(object): @@ -697,6 +693,10 @@ class EncodingParser(object): (b"<", self.handlePossibleStartTag)) for _ in self.data: keepParsing = True + try: + self.data.jumpTo(b"<") + except StopIteration: + break for key, method in methodDispatch: if self.data.matchBytes(key): try:
Added fix for bluray with no title crashing ARM if the bluray is not identified and has no title it will error out. This should stop the error and allow the rip to continue.
@@ -108,6 +108,10 @@ def identify_bluray(job): doc = xmltodict.parse(xml_file.read()) except OSError as e: logging.error("Disc is a bluray, but bdmt_eng.xml could not be found. Disc cannot be identified. Error number is: " + str(e.errno)) + # Fix for blurays with no label causing crashes + job.title = "not identified" + job.year = "" + db.session.commit() return False try:
Update test.py Improved with progress
@@ -3,28 +3,42 @@ from numpy import * import matplotlib.pyplot as plt fobj = 5 +dObj = 5 + f2 = 200 -f3 = 200 +d2 = 100 + +f3 = 100 +d3 = 10 path = ImagingPath() path.append(Space(d=f3)) -path.append(Lens(f=f3, diameter=100)) +path.append(Lens(f=f3, diameter=d3)) path.append(Space(d=f3)) path.append(Space(d=f2)) -path.append(Lens(f=f2, diameter=100)) +path.append(Lens(f=f2, diameter=d2)) path.append(Space(d=f2)) path.append(Space(d=fobj)) -path.append(Lens(f=fobj, diameter=5)) +path.append(Lens(f=fobj, diameter=dObj)) path.append(Space(d=fobj)) rayHeights = [] -for ray in RandomLambertianRays(yMax=2.5,M=1000000): +nRays = 1000000 +i = 0 +progressLog = 100 + +allRays = RandomLambertianRays(yMax=2.5,M=nRays) + +for ray in allRays: lastRay = path.traceThrough(ray) if lastRay.isNotBlocked: rayHeights.append(lastRay.y) -_ = plt.hist(rayHeights, bins=40,density=True) + if i % progressLog == 0: + progressLog *= 10 + print("Progress {0}/{1} ({2}%) ".format(i, nRays,i/nRays*100)) + i += 1 + +plt.hist(rayHeights, bins=40,density=True) plt.title("Intensity profile") plt.show() - -#print(totalNumberRays)
[BUG] pinning click due to incompatibility with newest black This PR follows the fix suggested in to restore `code-quality` CI functionality temporarily, until the incompatibility is fixed.
@@ -25,6 +25,7 @@ repos: hooks: - id: black language_version: python3 + additional_dependencies: [click==8.0.4] # args: [--line-length 79] - repo: https://github.com/pycqa/flake8
Add parse_table method to Huawei.VRP profile HG-- branch : feature/microservices
@@ -89,3 +89,24 @@ class Profile(BaseProfile): # Do not change these numbers. Used in get_switchport script v["version"] = "3.10" return v["version"] + + @staticmethod + def parse_table(e): + p = {"table": []} + is_table = False + is_next = False + header = [] + for l in e.splitlines(): + if not l: + continue + if "-"*10 in l: + is_table = True + header = prev_l + continue + if ":" in l and not is_table: + p.update(dict([l.split(":")])) + elif is_table: + l = l.split() + p["table"].append(l) + prev_l = l + return p
Fix DyDCNv2 RuntimeError the parameter of offset is not set as continuous will trigger the runtime error: offset must be continuous
@@ -44,7 +44,7 @@ class DyDCNv2(nn.Module): def forward(self, x, offset, mask): """Forward function.""" - x = self.conv(x.contiguous(), offset, mask) + x = self.conv(x.contiguous(), offset.contiguous(), mask) if self.with_norm: x = self.norm(x) return x
make deactivaton safe even if apps were not loaded rigth
@@ -269,12 +269,17 @@ class PluginAppConfig(AppConfig): for plugin_path in settings.INTEGRATION_APPS_PATHS: models = [] # the modelrefs need to be collected as poping an item in a iter is not welcomed app_name = plugin_path.split('.')[-1] + try: + app_config = apps.get_app_config(app_name) # check all models - for model in apps.get_app_config(app_name).get_models(): + for model in app_config.get_models(): # remove model from admin site admin.site.unregister(model) models += [model._meta.model_name] + except LookupError: + # if an error occurs the app was never loaded right -> so nothing to do anymore + break # unregister the models (yes, models are just kept in multilevel dicts) for model in models:
commands/process: Fix initialization of ProcessContext ordering Ensure that that ProcessContext is initialized before attempting to initialize any of the output processors.
@@ -80,6 +80,9 @@ class ProcessCommand(Command): pc = ProcessContext() for run_output in output_list: + pc.run_output = run_output + pc.target_info = run_output.target_info + if not args.recursive: self.logger.info('Installing output processors') else: @@ -108,8 +111,6 @@ class ProcessCommand(Command): pm.validate() pm.initialize(pc) - pc.run_output = run_output - pc.target_info = run_output.target_info for job_output in run_output.jobs: pc.job_output = job_output pm.enable_all()
Extract ECS task overrides This makes it easier to extend in the future and also prevents an additional storage call to look up the run's tags when we already have the run.
import warnings from collections import namedtuple from contextlib import suppress +from typing import Any, Dict import boto3 from botocore.exceptions import ClientError @@ -224,30 +225,29 @@ def launch_run(self, context: LaunchRunContext) -> None: # Set cpu or memory overrides # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-cpu-memory-error.html - cpu_and_memory_overrides = {} - tags = self._get_run_tags(run.run_id) - if tags.cpu: - cpu_and_memory_overrides["cpu"] = tags.cpu - if tags.memory: - cpu_and_memory_overrides["memory"] = tags.memory + cpu_and_memory_overrides = self.get_cpu_and_memory_overrides(run) - # Run a task using the same network configuration as this processes's - # task. - response = self.ecs.run_task( - taskDefinition=task_definition, - cluster=metadata.cluster, - overrides={ - "containerOverrides": [ + container_overrides = [ { "name": self.container_name, "command": command, # containerOverrides expects cpu/memory as integers **{k: int(v) for k, v in cpu_and_memory_overrides.items()}, } - ], + ] + + overrides: Dict[str, Any] = { + "containerOverrides": container_overrides, # taskOverrides expects cpu/memory as strings **cpu_and_memory_overrides, - }, + } + + # Run a task using the same network configuration as this processes's + # task. + response = self.ecs.run_task( + taskDefinition=task_definition, + cluster=metadata.cluster, + overrides=overrides, networkConfiguration={ "awsvpcConfiguration": { "subnets": metadata.subnets, @@ -286,6 +286,18 @@ def launch_run(self, context: LaunchRunContext) -> None: cls=self.__class__, ) + def get_cpu_and_memory_overrides(self, run: PipelineRun) -> Dict[str, str]: + overrides = {} + + cpu = run.tags.get("ecs/cpu") + memory = run.tags.get("ecs/memory") + + if cpu: + overrides["cpu"] = cpu + if memory: + overrides["memory"] = memory + return overrides + def terminate(self, run_id): tags = self._get_run_tags(run_id)
Update py3.8-all-free.yml missing req !
@@ -5,9 +5,37 @@ dependencies: - python=3.8 - xarray - scipy - - netCDF4 + - netcdf4 - erddapy - fsspec - aiohttp - packaging - toolz + + - dask + - gsw + - pyarrow + - tqdm + - distributed + + - matplotlib + - cartopy + - seaborn + - ipython + - ipywidgets + - ipykernel + + - zarr + - bottleneck + - cftime + - cfgrib + - numpy + - pandas + + - pip + - pytest + - setuptools + - black + - flake8 + - pytest-cov + - pytest-env \ No newline at end of file
client: make logging_utils_test.py run on Python3 Regexp pattern needs to be bytes for bytes text.
-#!/usr/bin/env vpython +#!/usr/bin/env vpython3 # Copyright 2015 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. @@ -46,7 +46,8 @@ class Test(unittest.TestCase): expected = _LOG_HEADER + ': DEBUG foo\n$' if sys.platform == 'win32': expected = expected.replace('\n', '\r\n') - self.assertTrue(re.match(expected, result), (expected, result)) + self.assertTrue( + re.match(expected.encode('utf-8'), result), (expected, result)) def test_prepare_logging(self): root = logging.RootLogger(logging.DEBUG) @@ -58,7 +59,8 @@ class Test(unittest.TestCase): # It'd be nice to figure out a way to ensure it's properly in UTC but it's # tricky to do reliably. expected = _LOG_HEADER + ' D: foo\n$' - self.assertTrue(re.match(expected, result), (expected, result)) + self.assertTrue( + re.match(expected.encode('utf-8'), result), (expected, result)) def test_rotating(self): # Create a rotating log. Create a subprocess then delete the file. Make sure @@ -80,7 +82,7 @@ class Test(unittest.TestCase): ] for e, l in zip(expected, lines): ex = _LOG_HEADER_PID + e + '$' - self.assertTrue(re.match(ex, l), (ex, l)) + self.assertTrue(re.match(ex.encode('utf-8'), l), (ex, l)) self.assertEqual(len(expected), len(lines))
Make register and nameless variable run through sympy before being set Closes
"""This is for context-related stuff.""" from vyxal.Canvas import Canvas +import sympy class Context: @@ -11,7 +12,7 @@ class Context: self.empty_input_is_zero = True self.default_arity = 1 self.dictionary_compression = True - self.ghost_variable = 0 + self.ghost_variable = sympy.nsimplify(0) self.function_stack = [] self.inputs = [[[], 0]] # [[[inputs], index], [[inputs], index]] # inputs[0] = [[inputs], index] @@ -25,7 +26,7 @@ class Context: self.printed = False self.range_start = 1 # Where do auto ranges start? self.range_end = 1 # How much to add to the end - self.register = 0 + self.register = sympy.nsimplify(0) self.repl_mode = False self.retain_popped = False self.reverse_flag = False
Base rocket and hab RPs off of breakdowns, allowing them to work for playoffs. This allows for rocket RP, hab RP, and unicorn match analysis on playoff matches.
@@ -188,8 +188,9 @@ class EventInsightsHelper(object): hatch_panel_points += alliance_breakdown['hatchPanelPoints'] cargo_points += alliance_breakdown['cargoPoints'] - alliance_rocket_rp_achieved = alliance_breakdown['completeRocketRankingPoint'] - alliance_climb_rp_achieved = alliance_breakdown['habDockingRankingPoint'] + completed_rocket = alliance_breakdown['completedRocketNear'] or alliance_breakdown['completedRocketFar'] + alliance_rocket_rp_achieved = alliance_breakdown['completeRocketRankingPoint'] or completed_rocket + alliance_climb_rp_achieved = alliance_breakdown['habDockingRankingPoint'] or (alliance_breakdown['habClimbPoints'] >= 15) rocket_rp_achieved += 1 if alliance_rocket_rp_achieved else 0 climb_rp_achieved += 1 if alliance_climb_rp_achieved else 0 alliance_win = alliance_color == match.winning_alliance