message
stringlengths
13
484
diff
stringlengths
38
4.63k
cabs -> abs cabs is automatically emitted by loopy for a c99 target
@@ -501,7 +501,7 @@ def test_complex_support(ctx_factory, target): euler1_imag[i] = imag(euler1[i]) real_times_complex[i] = in1[i]*(in2[i]*1j) real_plus_complex[i] = in1[i] + (in2[i]*1j) - abs_complex[i] = cabs(real_plus_complex[i]) + abs_complex[i] = abs(real_plus_complex[i]) complex_div_complex[i] = (2jf + 7*in1[i])/(32jf + 37*in1[i]) complex_div_real[i] = (2jf + 7*in1[i])/in1[i] real_div_complex[i] = in1[i]/(2jf + 7*in1[i]) @@ -534,7 +534,7 @@ def test_complex_support(ctx_factory, target): np.testing.assert_allclose(out["euler1_imag"], 0, atol=1e-10) np.testing.assert_allclose(out["real_times_complex"], in1*(in2*1j)) np.testing.assert_allclose(out["real_plus_complex"], in1+(in2*1j)) - np.testing.assert_allclose(out["abs_complex"], np.sqrt(in1**2+in2**2)) + np.testing.assert_allclose(out["abs_complex"], np.abs(out["real_plus_complex"])) np.testing.assert_allclose(out["complex_div_complex"], (2j+7*in1)/(32j+37*in1)) np.testing.assert_allclose(out["complex_div_real"], (2j + 7*in1)/in1) np.testing.assert_allclose(out["real_div_complex"], in1/(2j + 7*in1))
fixed TypeError `shift` variable was defined as a `list`, which breaks line 490: ``` midpt_index = np.argmin(np.abs(shifts-midval)) ``` Simple solution implemented here is to change it to a `numpy.ndarray` .
@@ -472,9 +472,9 @@ class Specfit(interactive.Interactive): midpt = self.Spectrum.xarr[midpt_pixel].value elif midpt_location == 'fitted': try: - shifts = [self.Spectrum.specfit.parinfo[x].value + shifts = np.array([self.Spectrum.specfit.parinfo[x].value for x in self.Spectrum.specfit.parinfo.keys() - if 'SHIFT' in x] + if 'SHIFT' in x]) except AttributeError: raise AttributeError("Can only specify midpt_location=" "fitted if there is a SHIFT parameter"
Update installation-osx.rst Remove extraneous backticks.
@@ -142,7 +142,7 @@ To install the Kivy virtualenv, you must: 3. In the GUI copy the Kivy.app to /Applications by dragging the folder icon to the right. 4. Optionally create a symlink by running the following command:: - ``ln -s /Applications/Kivy.app/Contents/Resources/script /usr/local/bin/kivy`` + ln -s /Applications/Kivy.app/Contents/Resources/script /usr/local/bin/kivy This creates the ``kivy`` binary that you can use instead of python to run scripts. I.e. instead of doing ``python my_script.py`` or ``python -m pip install <module name>``, write
Update apt_barium.txt ```If it is unable to communicate with the domain above, Speculoos will attempt to use a backup C2 at 119.28.139[.]20, also over TCP/443. ```
@@ -81,6 +81,10 @@ exchange.dumb1.com # Reference: https://unit42.paloaltonetworks.com/apt41-using-new-speculoos-backdoor-to-target-organizations-globally/ # Reference: https://otx.alienvault.com/pulse/5e95c0d3d12068d29f538338 +# Reference: https://www.virustotal.com/gui/ip-address/66.42.98.220/relations +http://66.42.98.220 +66.42.98.220:12345 +119.28.139.20:443 alibaba.zzux.com exchange.longmusic.com
Add check calls for configurable class Summary: A little seatbelt Test Plan: Unit Reviewers: prha, alangenfeld, dgibson
@@ -26,9 +26,9 @@ def _schedule_directory(base): def configurable_class_data(config_field): return ConfigurableClassData( - config_field["module"], - config_field["class"], - yaml.dump(config_field.get("config") or {}, default_flow_style=False), + check.str_elem(config_field, "module"), + check.str_elem(config_field, "class"), + yaml.dump(check.opt_dict_elem(config_field, "config"), default_flow_style=False), )
Fix Kubeflow ingress issues Allows setting a hostname during `microk8s.enable kubeflow`, in case a user wants something other than localhost, and also manually creates an Ingress vs using `juju expose`, due to issues with cluster restart.
@@ -19,7 +19,7 @@ def run(*args, die=True, debug=False): env["PATH"] += ":%s" % os.environ["SNAP"] if debug: - print("Running `%s`" % ' '.join(args)) + print("Running `%s`" % " ".join(args)) result = subprocess.run( args, @@ -51,7 +51,7 @@ def get_random_pass(): def juju(*args, **kwargs): - if strtobool(os.environ.get("KUBEFLOW_DEBUG") or 'false'): + if strtobool(os.environ.get("KUBEFLOW_DEBUG") or "false"): return run("microk8s-juju.wrapper", "--debug", *args, debug=True, **kwargs) else: return run("microk8s-juju.wrapper", *args, **kwargs) @@ -61,6 +61,7 @@ def main(): password = os.environ.get("KUBEFLOW_AUTH_PASSWORD") or get_random_pass() channel = os.environ.get("KUBEFLOW_CHANNEL") or "stable" no_proxy = os.environ.get("KUBEFLOW_NO_PROXY") or None + hostname = os.environ.get("KUBEFLOW_HOSTNAME") or "localhost" password_overlay = { "applications": { @@ -130,43 +131,50 @@ def main(): "--all", ) - juju("config", "ambassador", "juju-external-hostname=localhost") - juju("expose", "ambassador") - # Workaround for https://bugs.launchpad.net/juju/+bug/1849725. - # Wait for up to a minute for Juju to finish setting up the Ingress - # so that we can patch it, and fail if it takes too long. - patch = json.dumps({ - 'kind': 'Ingress', - 'apiVersion': 'extensions/v1beta1', - 'metadata': {'name': 'ambassador', 'namespace': 'kubeflow'}, - 'spec': {'tls': [{'hosts': ['localhost'], 'secretName': 'ambassador-tls'}]}, - }).encode('utf-8') + ingress = json.dumps( + { + "apiVersion": "extensions/v1beta1", + "kind": "Ingress", + "metadata": {"name": "ambassador-ingress", "namespace": "kubeflow"}, + "spec": { + "rules": [ + { + "host": hostname, + "http": { + "paths": [ + { + "backend": { + "serviceName": "ambassador", + "servicePort": 80, + }, + "path": "/", + } + ] + }, + } + ], + "tls": [{"hosts": [hostname], "secretName": "dummy-tls"}], + }, + } + ).encode("utf-8") env = os.environ.copy() env["PATH"] += ":%s" % os.environ["SNAP"] - for _ in range(12): - try: subprocess.run( - ['microk8s-kubectl.wrapper', 'apply', '-f', '-'], - input=patch, + ["microk8s-kubectl.wrapper", "apply", "-f", "-"], + input=ingress, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env, ).check_returncode() - break - except subprocess.CalledProcessError: - time.sleep(5) - else: - print("Couldn't set Ambassador up properly") - sys.exit(1) print( textwrap.dedent( """ Congratulations, Kubeflow is now available. - The dashboard is available at https://localhost/ + The dashboard is available at https://%s/ Username: admin Password: %s @@ -180,7 +188,7 @@ def main(): microk8s.disable kubeflow """ - % (password_overlay["applications"]["kubeflow-gatekeeper"]["options"]["password"]) + % (hostname, password) ) )
Don't show signature help if user has continued typing When opened after server reply, the hover fights with the completion menu.
@@ -101,6 +101,7 @@ class SignatureHelpListener(sublime_plugin.ViewEventListener): self.view.hide_popup() def request_signature_help(self, point: int) -> None: + self.requested_position = point client = client_from_session(session_for_view(self.view, 'signatureHelpProvider', point)) if client: global_events.publish("view.on_purge_changes", self.view) @@ -111,6 +112,7 @@ class SignatureHelpListener(sublime_plugin.ViewEventListener): lambda response: self.handle_response(response, point)) def handle_response(self, response: 'Optional[Dict]', point: int) -> None: + if self.view.sel()[0].begin() == self.requested_position: self._help = create_signature_help(response) if self._help: content = self._help.build_popup_content(self._renderer)
Don't show token with decimals=None Closes
@@ -98,7 +98,7 @@ class BalanceService: :param exclude_spam: :return: ERC20 tokens filtered by spam or trusted """ - base_queryset = Token.objects.filter( + base_queryset = Token.objects.erc20().filter( address__in=erc20_addresses ).values_list( 'address', flat=True
Fix phishing playbook 1. Set sender properly into context 2. Use the sender from incident label instead of context, since context can contain many other emails as well
@@ -3,6 +3,7 @@ version: -1 system: true fromversion: 2.5.0 name: Phishing Playbook - Automated +releaseNotes: "-" description: |- This is an automated playbook to investigate suspected Phishing attempts. It picks up the required information from the incident metadata as created by the mail listener. @@ -86,7 +87,7 @@ tasks: htmlBody: "" noteEntryID: "" subject: 'Re: Phishing Investigation - ${incident.name}' - to: ${Account.Email} + to: ${incident.labels.Email/from} view: |- { "position": { @@ -208,7 +209,7 @@ tasks: scriptarguments: distance: "" domain: ${inputs.CompanyDomains} - sender: ${Account.Email} + sender: ${incident.labels.Email/from} results: - LevenshteinDistance view: |- @@ -387,7 +388,7 @@ tasks: htmlBody: "" noteEntryID: "" subject: 'Re: Phishing Investigation - ${incident.name}' - to: ${Account.Email} + to: ${incident.labels.Email/from} view: |- { "position": { @@ -446,7 +447,7 @@ tasks: htmlBody: "" noteEntryID: "" subject: 'Re: Phishing Investigation - ${incident.name}' - to: ${Account.Email} + to: ${incident.labels.Email/from} view: |- { "position": { @@ -632,8 +633,9 @@ tasks: '#none#': - "98" scriptarguments: - key: Account.Email + key: Account.Email.Address value: ${incident.labels.Email/from} + append: true view: |- { "position": {
Fix a typo in tutorials docs Fix a typo in tutorials docs (double parenthesis: "))" )
@@ -58,7 +58,7 @@ inversion problems. <http://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/03_fwi.ipynb>`_ * `04 - Distributed FWI with Dask <http://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/04_dask.ipynb>`_ -* `05 - FWI with total variation (TV)) minimisation +* `05 - FWI with total variation (TV) minimisation <http://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/05_skimage_tv.ipynb>`_ * `06 - Acoustic modeling (2D) on a staggerd grid with the first-order wave equation <http://nbviewer.jupyter.org/github/opesci/devito/blob/master/examples/seismic/tutorials/06_staggered_acoustic.ipynb>`_
auto_attr_custom: forward the documentation to generated class TN:
@@ -713,6 +713,7 @@ def auto_attr_custom(name, *partial_args, **partial_kwargs): '__init__': __init__, '__repr__': __repr__, 'sub_expressions': sub_expressions, + '__doc__': fn.__doc__, } ))
Fix documentation for repo cloning To clone the GitHub repository into /opt/peering-manager, the dot has to be removed. If you don't remove the dot, the repository will be cloned into /opt
@@ -21,13 +21,14 @@ Clone the Git repository from the base directory. This will create the `peering-manager` application directory and extract the repository into it. ```no-highlight -# git clone https://github.com/respawner/peering-manager.git . -Cloning into '.'... -remote: Counting objects: 431, done. -remote: Compressing objects: 100% (123/123), done. -remote: Total 431 (delta 100), reused 117 (delta 48), pack-reused 255 -Receiving objects: 100% (431/431), 1.18 MiB | 593.00 KiB/s, done. -Resolving deltas: 100% (217/217), done. +# git clone https://github.com/respawner/peering-manager.git +Cloning into 'peering-manager'... +remote: Enumerating objects: 126, done. +remote: Counting objects: 100% (126/126), done. +remote: Compressing objects: 100% (91/91), done. +remote: Total 8579 (delta 46), reused 71 (delta 30), pack-reused 8453 +Receiving objects: 100% (8579/8579), 12.07 MiB | 13.52 MiB/s, done. +Resolving deltas: 100% (5135/5135), done. ``` Verify the repository was extracted.
Correct definition to match output in document. Code was malformed, with un-even brackets and output was not matching.
@@ -26,7 +26,7 @@ Add definitions to your spec using `definition <apispec.APISpec.definition>`. spec.definition('Gist', properties={ 'id': {'type': 'integer', 'format': 'int64'}, - 'content': 'type': 'string'}, + 'name': {'type': 'string'} })
STY: updated flake8 in unit tests Made flake8 suggested style changes in unit tests.
@@ -64,7 +64,6 @@ class TestTestingUtils(): """ assert testing.nan_equal(val1, val2) - @pytest.mark.parametrize("val1, val2", [(0.0, 1.0), (np.nan, np.inf), ('one', 'One'), (None, False), (True, 'true'), (False, 'F'),
Update gcloud_setup.rst rst -> html
@@ -77,7 +77,7 @@ In the config file (the one that you use with --config flag, or, if you use default, in the ``studio/default_config.yaml``), go to the ``cloud`` section. Change projectId to the project id of the google project that you enabled cloud computing under. You can also modify default instance -parameters (see `Cloud computing for studio <cloud.rst>`__ for +parameters (see `Cloud computing for studio <http://studioml.readthedocs.io/en/latest/cloud.html>`__ for limitations though). Test @@ -93,5 +93,5 @@ To test if things are set up correctly, go to Then run ``studio`` locally, and watch the new experiment. In a little while, it should change its status to "finished" and show the system information (number of cpus, amount of ram / hdd) of a default instance. -See `Cloud computing for studio <cloud.rst>`__ for more instructions on +See `Cloud computing for studio <http://studioml.readthedocs.io/en/latest/cloud.html>`__ for more instructions on using an instance with specific hardware parameters.
Update android_bankbot.txt Deleting some orphan strings
@@ -2040,13 +2040,11 @@ hir-san.tk ili-oori.tk internet-bankmellat-ir.tk internet-mellatbank-ir.tk -ir-idpax-tk +ir-idpax-iran.tk lnternet-bankmellat-ir.tk lsp-pey.cf mellatbank-iran-com.ga mylicense.cf -ns1.p-vps.tk -ns2.p-vps.tk og-req.tk op-seq.tk p-coin.tk
svtplay: dont download related videos with -A fixes:
@@ -205,7 +205,7 @@ class Svtplay(Service, MetadataThumbMixin): if tab == i["id"]: collections.append(i) else: - if i["id"] == "upcoming": + if i["id"] == "upcoming" or i["id"] == "related": continue elif self.config.get("include_clips") and "clips" in i["id"]: collections.append(i)
Fix string in header Commit changed sha1 to sha256. Need to change string correspondingly Related-Bug:
@@ -185,7 +185,7 @@ def http_log_req(_logger, args, kwargs): v = value.encode('utf-8') h = hashlib.sha256(v) d = h.hexdigest() - value = "{SHA1}%s" % d + value = "{SHA256}%s" % d header = ' -H "%s: %s"' % (key, value) string_parts.append(header)
CircularDmaBuffer: get udp_if from test fixture Was not breaking tests before, but see
@@ -38,7 +38,6 @@ namespace { // Variables // ---------------------------------------------------------------------------- constexpr size_t BUFFER_SIZE_TEST = 100; -MockUartInterface uart_if; // Classes & structs // ---------------------------------------------------------------------------- @@ -57,11 +56,12 @@ protected: const uint16_t transmission_size_ = BUFFER_SIZE_TEST; const size_t buffer_size_ = BUFFER_SIZE_TEST; CircularDmaBuffer* buff_ = nullptr; + MockUartInterface uart_if; }; // Functions // ---------------------------------------------------------------------------- -TEST(CircularDmaBufferShould, InitializeMembersZeroWithDefaultConstructor) { +TEST_F(CircularDmaBufferTest, InitializeMembersZeroWithDefaultConstructor) { CircularDmaBuffer buff; EXPECT_EQ(buff.getUartHandle(), nullptr); @@ -73,7 +73,7 @@ TEST(CircularDmaBufferShould, InitializeMembersZeroWithDefaultConstructor) { EXPECT_EQ(buff.getBuffTail(), 0); } -TEST(CircularDmaBufferShould, ConstInitializeMembersWithParameterizedConstructor) { +TEST_F(CircularDmaBufferTest, ConstInitializeMembersWithParameterizedConstructor) { constexpr size_t BUFFER_SIZE = 20; UART_HandleTypeDef huart; uint8_t raw_buff[BUFFER_SIZE] = { }; @@ -92,7 +92,7 @@ TEST(CircularDmaBufferShould, ConstInitializeMembersWithParameterizedConstructor EXPECT_EQ(buff.getBuffTail(), 0); } -TEST(CircularDmaBufferShould, SucceedSelfCheck) { +TEST_F(CircularDmaBufferTest, SucceedSelfCheck) { constexpr size_t BUFFER_SIZE = 20; UART_HandleTypeDef huart; uint8_t raw_buff[BUFFER_SIZE] = { }; @@ -107,7 +107,7 @@ TEST(CircularDmaBufferShould, SucceedSelfCheck) { // TODO: define different test fixtures with variants of m_transmission_size vs. m_buffer_size comparison? -TEST(CircularDmaBufferShould, FailSelfCheck) { +TEST_F(CircularDmaBufferTest, FailSelfCheck) { constexpr size_t BUFFER_SIZE = 8; UART_HandleTypeDef huart; uint8_t raw_buff[BUFFER_SIZE] = { }; @@ -120,7 +120,7 @@ TEST(CircularDmaBufferShould, FailSelfCheck) { EXPECT_FALSE(buff.selfCheck()); } -TEST(CircularDmaBufferShould, Initiate) { +TEST_F(CircularDmaBufferTest, Initiate) { constexpr size_t BUFFER_SIZE = 20; UART_HandleTypeDef huart; uint8_t raw_buff[BUFFER_SIZE] = { }; @@ -137,7 +137,7 @@ TEST(CircularDmaBufferShould, Initiate) { EXPECT_TRUE(buff.selfCheck()); } -TEST(CircularDmaBufferShould, AbortReinitiateIfError) { +TEST_F(CircularDmaBufferTest, AbortReinitiateIfError) { constexpr size_t BUFFER_SIZE = 20; UART_HandleTypeDef huart; uint8_t raw_buff[BUFFER_SIZE] = { }; @@ -154,7 +154,7 @@ TEST(CircularDmaBufferShould, AbortReinitiateIfError) { buff.reinitiateIfError(); } -TEST(CircularDmaBufferShould, NotAbortReinitiateIfNoError) { +TEST_F(CircularDmaBufferTest, NotAbortReinitiateIfNoError) { constexpr size_t BUFFER_SIZE = 20; UART_HandleTypeDef huart; uint8_t raw_buff[BUFFER_SIZE] = { };
Fix inventory_dns Dnsmasq does not re-read its config files on SIGHUP ([1]). Since a config directive (host-record) is used, a service restart is required in order for the record to be available. [1] "Notes [...] SIGHUP does NOT re-read the configuration file."
mode: 0644 when: inventory_dns | bool == true become: yes -- name: "Sending dnsmasq HUP" - # Note(TheJulia): We need to actually to send a hup signal directly as - # Ansible's reloaded state does not pass through to the init script. - command: killall -HUP dnsmasq +- name: "Restarting dnsmasq" + service: + name: dnsmasq + state: restarted become: yes when: (inventory_dhcp | bool == true) or (inventory_dns | bool == true) - name: "Deploy to hardware - Using custom instance_info."
Fix logging statements went unnoticed because logging exceptions wont interrupt the main thread so you actually have to view the logs to notice
@@ -87,9 +87,9 @@ def test_signal_service(dcos_api_session): if enabled == 'false': pytest.skip('Telemetry disabled in /opt/mesosphere/etc/dcos-signal-config.json... skipping test') - logging.info("Version: ", dcos_version) - logging.info("Customer Key: ", customer_key) - logging.info("Cluster ID: ", cluster_id) + logging.info("Version: " + dcos_version) + logging.info("Customer Key: " + customer_key) + logging.info("Cluster ID: " + cluster_id) direct_report = dcos_api_session.get('/system/health/v1/report?cache=0') signal_results = subprocess.check_output(["/opt/mesosphere/bin/dcos-signal", "-test"], universal_newlines=True)
Reduce code duplication in AddElementwise This patch decreases code duplication in the parameter parsing of augmenters.arithmetic.AddElementwise by using the parameter handling functions in parameters.py.
@@ -150,13 +150,15 @@ class AddElementwise(Augmenter): Parameters ---------- - value : int or iterable of two ints or StochasticParameter, optional(default=0) + value : int or tuple of two int or list of int or StochasticParameter, optional(default=0) Value to add to the pixels. * If an int, then that value will be used for all images. * If a tuple (a, b), then values from the discrete range [a .. b] will be sampled. + * If a list of integers, a random value will be sampled from the list + per image. * If a StochasticParameter, then values will be sampled per pixel (and possibly channel) from that parameter. @@ -202,24 +204,8 @@ class AddElementwise(Augmenter): def __init__(self, value=0, per_channel=False, name=None, deterministic=False, random_state=None): super(AddElementwise, self).__init__(name=name, deterministic=deterministic, random_state=random_state) - if ia.is_single_integer(value): - ia.do_assert(-255 <= value <= 255, "Expected value to have range [-255, 255], got value %d." % (value,)) - self.value = Deterministic(value) - elif ia.is_iterable(value): - ia.do_assert(len(value) == 2, "Expected tuple/list with 2 entries, got %d entries." % (len(value),)) - self.value = DiscreteUniform(value[0], value[1]) - elif isinstance(value, StochasticParameter): - self.value = value - else: - raise Exception("Expected float or int, tuple/list with 2 entries or StochasticParameter. Got %s." % (type(value),)) - - if per_channel in [True, False, 0, 1, 0.0, 1.0]: - self.per_channel = Deterministic(int(per_channel)) - elif ia.is_single_number(per_channel): - ia.do_assert(0 <= per_channel <= 1.0) - self.per_channel = Binomial(per_channel) - else: - raise Exception("Expected per_channel to be boolean or number or StochasticParameter") + self.value = iap.handle_discrete_param(value, "value", value_range=(-255, 255), tuple_to_uniform=True, list_to_choice=True, allow_floats=False) + self.per_channel = iap.handle_probability_param(per_channel, "per_channel") def _augment_images(self, images, random_state, parents, hooks): input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)
Bug fix for FrechetSort Summary: noise should be sample independently default length is the last dim of scores
@@ -74,7 +74,7 @@ class FrechetSort(Sampler): number of items and it can be difficult to enumerate them.""" assert scores.dim() == 2, "sample_action only accepts batches" log_scores = scores if self.log_scores else torch.log(scores) - perturbed = log_scores + self.gumbel_noise.sample((scores.shape[1],)) + perturbed = log_scores + self.gumbel_noise.sample(scores.shape) action = torch.argsort(perturbed.detach(), descending=True) if self.topk is not None: action = action[: self.topk] @@ -86,9 +86,9 @@ class FrechetSort(Sampler): list of permutations only considering the top `equiv_len` ranks?""" log_scores = scores if self.log_scores else torch.log(scores) s = self.select_indices(log_scores, action) - n = len(log_scores) + n = log_scores.shape[-1] p = self.upto if self.upto is not None else n return -sum( torch.log(torch.exp((s[k:] - s[k]) * self.shape).sum(dim=0)) - for k in range(p) # pyre-ignore + for k in range(p) )
fix: Remove opening file object when validating S3 parquet source * Remove opening the file object Let pyarrow handle opening the path using the filesystem. * fix: linting error
@@ -160,9 +160,7 @@ class FileSource(DataSource): if filesystem is None: schema = ParquetDataset(path).schema.to_arrow_schema() else: - schema = ParquetDataset( - filesystem.open_input_file(path), filesystem=filesystem - ).schema + schema = ParquetDataset(path, filesystem=filesystem).schema return zip(schema.names, map(str, schema.types))
Fix proxy documentation Make user and password documentation identical
@@ -53,7 +53,7 @@ def managed(name, port, services=None, user=None, password=None, bypass_domains= The username to use for the proxy server if required password - The password to use if required by the server + The password to use for the proxy server if required bypass_domains An array of the domains that should bypass the proxy
Prepare `2.11.1rc3`. [ci skip-rust] [ci skip-build-wheels]
# 2.11.x Release Series +## 2.11.1rc3 (Jun 23, 2022) + +### Bug fixes + +* Fix `[python-infer].inits` and `[python-infer].conftests` to consider `resolve` field (Cherry-pick of #15787) ([#15794](https://github.com/pantsbuild/pants/pull/15794)) + +### Documentation + +* Fix broken links to `tailor` documentation ([#15844](https://github.com/pantsbuild/pants/pull/15844)) + +* Handle anchors in `doc_url()` correctly (cherrypick of #15812) ([#15822](https://github.com/pantsbuild/pants/pull/15822)) + ## 2.11.1rc2 (May 31, 2022) ### User API Changes * jvm: fix incorrect check for root-only `ClasspathEntryRequest` implementations (Cherry pick of #15494) ([#15497](https://github.com/pantsbuild/pants/pull/15497)) -* Fix incorrect digest for JVM multiple jvm_artifact(.., jar=..) entries (Cherry-pick of #15571) ([#15610](https://github.com/pantsbuild/pants/pull/15610)) +* Fix incorrect digest for JVM multiple `jvm_artifact(.., jar=..)` entries (Cherry-pick of #15571) ([#15610](https://github.com/pantsbuild/pants/pull/15610)) ## 2.11.1rc0 (May 13, 2022)
Fix inconsistent credential precedence for ebcli The cli was not setting the correct profile when multiple profiles were being used back and forth during multiple init calls. This was due to an update on botocore. The CLI now correctly sets the profile tag if used during init. SIM CR
@@ -168,6 +168,7 @@ def _get_botocore_session(): 'profile': (None, _profile_env_var, _profile, None), }) session.set_config_variable('region', _region_name) + session.set_config_variable('profile', _profile) session.register_component('data_loader', _get_data_loader()) _set_user_agent_for_session(session) _get_botocore_session.botocore_session = session
EditScopeAlgo : Improve TransformEdits UI Make row names wider, to better accomodate long location names. Hide default row. It is never used because every location in the PathFilter has a dedicated row in the spreadsheet.
#include "GafferScene/Transform.h" #include "Gaffer/EditScope.h" +#include "Gaffer/Metadata.h" #include "Gaffer/PlugAlgo.h" #include "Gaffer/Spreadsheet.h" #include "Gaffer/StringPlug.h" @@ -165,7 +166,10 @@ SceneProcessorPtr transformProcessor() plug->setInput( spreadsheet->outPlug()->getChild<Plug>( name ) ); } - PlugAlgo::promoteWithName( spreadsheet->rowsPlug(), "edits" ); + auto rowsPlug = static_cast<Spreadsheet::RowsPlug *>( PlugAlgo::promoteWithName( spreadsheet->rowsPlug(), "edits" ) ); + Metadata::registerValue( rowsPlug, "spreadsheet:defaultRowVisible", new BoolData( false ) ); + Metadata::registerValue( rowsPlug->defaultRow(), "spreadsheet:rowNameWidth", new IntData( 300 ) ); + result->outPlug()->setInput( transform->outPlug() ); return result;
fix nginx ngstat access lists HG-- branch : feature/microservices
@@ -135,9 +135,14 @@ server { location /ng_stats { stub_status; -{% for ip in ansible_all_ipv4_addresses %} +{% for host in groups["svc-nginx"] %} + {% for ip in hostvars[host].ansible_all_ipv4_addresses %} allow {{ ip }}; {% endfor %} +{% endfor %} +{% if keepalived_nginx_virtual_ip %} + allow {{ keepalived_nginx_virtual_ip }}; +{% endif %} deny all; access_log off; }
docs: warn about darglint perf issues darglint has a known performance issue with NumPy and Google styles. The best solution so far is to use it seldomly, manually and through CI
@@ -73,6 +73,17 @@ following settings: Our `darglint.toml <https://github.com/wemake-services/wemake-python-styleguide/blob/master/styles/darglint.toml>`_ file is available with the core settings for ``isort``. +.. warning:: + + There is a `known issue <https://github.com/terrencepreilly/darglint/issues/186>`_ + with ``darglint``'s performance when using ``google`` or ``numpy`` + documentation style, if you face long running times during the linting + process you can use the ``sphinx`` style by setting + ``docstring-style = sphinx`` in the ``["setup.cfg".flake8]`` section in a + nitpick configuration file. Otherwise, you can run ``darglint`` manually and + through CIs only, disabling it in flake8 args with + ``--darglint-ignore-regex='.*'``. + .. rubric:: Ignoring violations We know that people might not agree with 100% of our rules.
workloads/rt_app: Remove timeout in file transfer Remove the explict timeout when pushing to the device. Allow the polling mechanims to monitor the transfer if required.
@@ -162,7 +162,7 @@ class RtApp(Workload): self.host_json_config = self._load_json_config(context) self.config_file_on_target = self.target.path.join(self.target_working_directory, os.path.basename(self.host_json_config)) - self.target.push(self.host_json_config, self.config_file_on_target, timeout=60) + self.target.push(self.host_json_config, self.config_file_on_target) self.command = '{} {}'.format(self.target_binary, self.config_file_on_target) time_buffer = 30 @@ -284,7 +284,7 @@ class RtApp(Workload): self.target.execute(tar_command, timeout=300) target_path = self.target.path.join(self.target_working_directory, TARBALL_FILENAME) host_path = os.path.join(context.output_directory, TARBALL_FILENAME) - self.target.pull(target_path, host_path, timeout=120) + self.target.pull(target_path, host_path) with tarfile.open(host_path, 'r:gz') as tf: tf.extractall(context.output_directory) os.remove(host_path)
[config-service] fix string formatting [email protected]
@@ -468,7 +468,7 @@ class ConfigApi(remote.Service): if not acl.can_reimport(request.config_set): raise endpoints.ForbiddenException( '%s is now allowed to reimport %r' % ( - auth.get_current_identity().to_bytes()), request.config_set) + auth.get_current_identity().to_bytes(), request.config_set)) # Assume it is Gitiles. try: gitiles_import.import_config_set(request.config_set)
Separate the geometry description into a new function
@@ -283,6 +283,7 @@ class FluidFlow: self.yp = 0 self.p_mat_analytical = np.zeros([self.nz, self.ntheta]) self.p_mat_numerical = np.zeros([self.nz, self.ntheta]) + self.geometry_description() self.analytical_pressure_matrix_available = False self.numerical_pressure_matrix_available = False self.calculate_pressure_matrix_numerical() @@ -379,23 +380,17 @@ class FluidFlow: return self.p_mat_analytical - def calculate_coefficients(self, direction=None): - """This function calculates the constants that form the Poisson equation - of the discrete pressure (central differences in the second - derivatives). It is executed when the class is instantiated. + def geometry_description(self): + """This function calculates the geometry description. + It is executed when the class is instantiated. Examples -------- >>> my_fluid_flow = fluid_flow_example() - >>> my_fluid_flow.calculate_coefficients()# doctest: +ELLIPSIS - (array([[... + >>> my_fluid_flow.geometry_description() """ - c1 = np.zeros([self.nz, self.ntheta]) - c2 = np.zeros([self.nz, self.ntheta]) - c0w = np.zeros([self.nz, self.ntheta]) for i in range(0, self.nz): zno = i * self.dz self.z_list[i] = zno - eccentricity_error = False for j in range(0, self.ntheta): # fmt: off self.gama[i][j] = j * self.dtheta + np.pi / 2 + self.attitude_angle @@ -407,6 +402,26 @@ class FluidFlow: self.re[i][j] = radius_external self.ri[i][j] = radius_internal + + def calculate_coefficients(self, direction=None): + """This function calculates the constants that form the Poisson equation + of the discrete pressure (central differences in the second + derivatives). + Examples + -------- + >>> my_fluid_flow = fluid_flow_example() + >>> my_fluid_flow.calculate_coefficients()# doctest: +ELLIPSIS + (array([[... + """ + c1 = np.zeros([self.nz, self.ntheta]) + c2 = np.zeros([self.nz, self.ntheta]) + c0w = np.zeros([self.nz, self.ntheta]) + + for i in range(0, self.nz): + eccentricity_error = False + for j in range(0, self.ntheta): + # fmt: off + w = self.omega * self.radius_rotor k = (self.re[i][j] ** 2 * (np.log(self.re[i][j]) - 1 / 2) - self.ri[i][j] ** 2 *
Fix regression from Forgot that the default setting for use-qsv-decoder-with-encoder was True Fixes
@@ -661,7 +661,7 @@ class MkvtoMp4: options['preopts'].extend(['-hwaccel', 'dxva2']) elif info.video.codec.lower() == "hevc" and self.hevc_qsv_decoder: options['preopts'].extend(['-vcodec', 'hevc_qsv']) - elif info.video.codec.lower() == "h264" and self.qsv_decoder and (info.video.video_level / 10) < 5: + elif vcodec == "h264qsv" and info.video.codec.lower() == "h264" and self.qsv_decoder and (info.video.video_level / 10) < 5: options['preopts'].extend(['-vcodec', 'h264_qsv']) # Add width option
Fix taking address of temporary array Error was: dtype_transfer.c:2979:28: error: taking address of temporary array 2979 | (char *[2]){main_src, main_dst}, &block_size, | ^~~~~~~~~~~~~~~~~~~~
@@ -2951,9 +2951,11 @@ _strided_to_strided_multistep_cast( if (castdata->from.func != NULL) { npy_intp out_stride = castdata->from.descriptors[1]->elsize; + char *const data[2] = {src, castdata->from_buffer}; + npy_intp strides[2] = {src_stride, out_stride}; if (castdata->from.func(&castdata->from.context, - (char *[2]){src, castdata->from_buffer}, &block_size, - (npy_intp [2]){src_stride, out_stride}, + data, &block_size, + strides, castdata->from.auxdata) != 0) { /* TODO: Internal buffer may require cleanup on error. */ return -1; @@ -2975,18 +2977,22 @@ _strided_to_strided_multistep_cast( main_dst_stride = dst_stride; } + char *const data[2] = {main_src, main_dst}; + npy_intp strides[2] = {main_src_stride, main_dst_stride}; if (castdata->main.func(&castdata->main.context, - (char *[2]){main_src, main_dst}, &block_size, - (npy_intp [2]){main_src_stride, main_dst_stride}, + data, &block_size, + strides, castdata->main.auxdata) != 0) { /* TODO: Internal buffer may require cleanup on error. */ return -1; } if (castdata->to.func != NULL) { + char *const data[2] = {main_dst, dst}; + npy_intp strides[2] = {main_dst_stride, dst_stride}; if (castdata->to.func(&castdata->to.context, - (char *[2]){main_dst, dst}, &block_size, - (npy_intp [2]){main_dst_stride, dst_stride}, + data, &block_size, + strides, castdata->to.auxdata) != 0) { return -1; }
Document location of frontend code on docker Mention the location of the frontend code in docker
@@ -23,7 +23,7 @@ Install Node.js and Yarn $ apt-get update && apt-get install nodejs yarn -Cd to timesketch repository root (folder that contains `package.json`) +Cd to timesketch repository root (folder that contains `package.json` - on docker it is: `/usr/local/src/timesketch/timesketch/frontend`) and install Node.js packages (this will create `node_modules/` folder in the current directory and install packages from `package.json` there)
Translated using Weblate (English) Currently translated at 100.0% (22 of 22 strings) Translate-URL: Translation: Couchers/Web app - Donations
"donations_info": "Your donation goes to <1>{{ legal_name }}</1>, a U.S. 501(c)(3) non-profit that operates the Couchers.org service and supports the project. Donations are tax exempt in the USA, our EIN is 87-1734577.", "benefactor_contact": "If you wish to contribute over $1000, please contact us at <1>{{email}}</1> for us to arrange a lower fee transfer.", "benefactor_email": "[email protected]", - "donations_recurrence_explainer": "A monthly donation helps us build our operations on a stable basis and plan ahead.", + "donations_recurrence_explainer": "A monthly donation helps us plan ahead and build our operations on a stable basis.", "donations_value": "{{val, currency}}" }
Update jax2tf_test.py Fix the type of np.zeros to be float32. Also, replaced `jnp.zeros` with `np.zeros` because technically the argument to `f_tf` should be a TF value, not a JAX value.
@@ -522,7 +522,7 @@ class Jax2TfTest(tf_test_util.JaxToTfTestCase): return jnp.sum(x) f_tf = jax2tf.convert(f_jax) self.assertAllClose( - f_tf(x=jnp.zeros(3)), # Call with kwargs. + f_tf(x=np.zeros(3, dtype=np.float32)), # Call with kwargs. np.zeros((), dtype=np.float32))
Fix LVM state documentation Capitalise the right words Add clarification to lv_absent on vgname
@@ -42,7 +42,7 @@ def __virtual__(): def pv_present(name, **kwargs): ''' - Set a physical device to be used as an LVM physical volume + Set a Physical Device to be used as an LVM Physical Volume name The device name to initialize. @@ -106,13 +106,13 @@ def pv_absent(name): def vg_present(name, devices=None, **kwargs): ''' - Create an LVM volume group + Create an LVM Volume Group name - The volume group name to create + The Volume Group name to create devices - A list of devices that will be added to the volume group + A list of devices that will be added to the Volume Group kwargs Any supported options to vgcreate. See @@ -213,16 +213,16 @@ def lv_present(name, thinpool=False, **kwargs): ''' - Create a new logical volume + Create a new Logical Volume name - The name of the logical volume + The name of the Logical Volume vgname - The volume group name for this logical volume + The name of the Volume Group on which the Logical Volume resides size - The initial size of the logical volume + The initial size of the Logical Volume extents The number of logical extents to allocate @@ -231,7 +231,7 @@ def lv_present(name, The name of the snapshot pv - The physical volume to use + The Physical Volume to use kwargs Any supported options to lvcreate. See @@ -240,10 +240,10 @@ def lv_present(name, .. versionadded:: to_complete thinvolume - Logical volume is thinly provisioned + Logical Volume is thinly provisioned thinpool - Logical volume is a thin pool + Logical Volume is a thin pool ''' ret = {'changes': {}, 'comment': '', @@ -289,13 +289,13 @@ def lv_present(name, def lv_absent(name, vgname=None): ''' - Remove a given existing logical volume from a named existing volume group + Remove a given existing Logical Volume from a named existing volume group name - The logical volume to remove + The Logical Volume to remove vgname - The volume group name + The name of the Volume Group on which the Logical Volume resides ''' ret = {'changes': {}, 'comment': '',
Fix flaky test Summary: Set random seed Pull Request resolved:
import json import logging +import random import unittest from typing import Dict, List +import numpy as np import torch from ml.rl.test.gym.world_model.mdnrnn_gym import mdnrnn_gym @@ -18,6 +20,9 @@ MDNRNN_CARTPOLE_JSON = "ml/rl/test/configs/mdnrnn_cartpole_v0.json" class TestMDNRNNGym(unittest.TestCase): def setUp(self): logging.getLogger().setLevel(logging.DEBUG) + np.random.seed(0) + torch.manual_seed(0) + random.seed(0) @staticmethod def verify_result(result_dict: Dict[str, float], expected_top_features: List[str]):
Ryu's parser is too clever, trying to parse down to the application. Have it drop the packet if the Ryu parser crashes.
import ipaddress from ryu.lib import mac -from ryu.lib.packet import arp, ethernet, icmp, icmpv6, ipv4, ipv6, packet, vlan +from ryu.lib.packet import arp, ethernet, icmp, icmpv6, ipv4, ipv6, stream_parser, packet, vlan from ryu.ofproto import ether from ryu.ofproto import inet @@ -57,11 +57,16 @@ def parse_pkt(pkt): def parse_packet_in_pkt(msg): + pkt = None + vlan_vid = None + + try: pkt = packet.Packet(msg.data) + except stream_parser.StreamParser.TooSmallException: + return (pkt, vlan_vid) + eth_pkt = pkt.get_protocols(ethernet.ethernet)[0] eth_type = eth_pkt.ethertype - vlan_vid = None - # Packet ins, can only come when a VLAN header has already been pushed # (ie. when we have progressed past the VLAN table). This gaurantees # a VLAN header will always be present, so we know which VLAN the packet @@ -70,7 +75,7 @@ def parse_packet_in_pkt(msg): # tagged packet vlan_proto = pkt.get_protocols(vlan.vlan)[0] vlan_vid = vlan_proto.vid - return pkt, vlan_vid + return (pkt, vlan_vid) def build_pkt_header(eth_src, eth_dst, vid, dl_type):
bugfix: tests: core: Assign ThemeSpec instance to `controller` theme. This commit replaces the previous assignment of a `str` value to the theme attribute of the `controller` pytest fixture with a ThemeSpec instance generated by the `generate_theme` helper method.
@@ -5,6 +5,7 @@ from typing import Any import pytest +from zulipterminal.config.themes import generate_theme from zulipterminal.core import Controller from zulipterminal.version import ZT_VERSION @@ -37,7 +38,7 @@ class TestController: self.config_file = "path/to/zuliprc" self.theme_name = "zt_dark" - self.theme = "default" + self.theme = generate_theme("zt_dark", 256) self.in_explore_mode = False self.autohide = True # FIXME Add tests for no-autohide self.notify_enabled = False
stm32h7: correct PLL2DIVR field names Now agrees with RM0433
@@ -402,6 +402,16 @@ RCC: name: BDRST RTCSRC: name: RTCSEL + PLL2DIVR: + _modify: + DIVP1: + name: DIVP2 + DIVQ1: + name: DIVQ2 + DIVR1: + name: DIVR2 + DIVN1: + name: DIVN2 APB1LRSTR: _modify: USART7RST:
docs: Remove mentions of some ldap features being added in 2.0. 2.0 is old enough that explicitly mentioning when these features were implemented isn't particularly useful and adds clutter.
@@ -194,14 +194,14 @@ run of `manage.py sync_ldap_user_data`. #### Synchronizing avatars -Starting with Zulip 2.0, Zulip supports syncing LDAP / Active +Zulip supports syncing LDAP / Active Directory profile pictures (usually available in the `thumbnailPhoto` or `jpegPhoto` attribute in LDAP) by configuring the `avatar` key in `AUTH_LDAP_USER_ATTR_MAP`. #### Synchronizing custom profile fields -Starting with Zulip 2.0, Zulip supports syncing +Zulip supports syncing [custom profile fields][custom-profile-fields] from LDAP / Active Directory. To configure this, you first need to [configure some custom profile fields][custom-profile-fields] for your @@ -214,7 +214,7 @@ to the `AUTH_LDAP_USER_ATTR_MAP`. #### Automatically deactivating users with Active Directory -Starting with Zulip 2.0, Zulip supports synchronizing the +Zulip supports synchronizing the disabled/deactivated status of users from Active Directory. You can configure this by uncommenting the sample line `"userAccountControl": "userAccountControl",` in @@ -236,7 +236,7 @@ for details on the various `userAccountControl` flags. #### Deactivating non-matching users -Starting with Zulip 2.0, Zulip supports automatically deactivating +Zulip supports automatically deactivating users if they are not found by the `AUTH_LDAP_USER_SEARCH` query (either because the user is no longer in LDAP/Active Directory, or because the user no longer matches the query). This feature is
remove the reference to a bug as the bug has been fixed in the public client lib
"\n", "The AutoML Tables logs the errors in the `errors.csv` file.\n", "\n", - "**NOTE:** The client library has a bug. If the following cell returns a `TypeError: Could not convert Any to BatchPredictResult` error, ignore it. The batch prediction output file(s) will be updated to the GCS bucket that you set in the preceding cells." + "**NOTE:** The batch prediction output file(s) will be updated to the GCS bucket that you set in the preceding cells." ] }, {
fix wrong http method, update should use PATCH method instead of POST
@@ -127,7 +127,7 @@ class ScanZoneAPI(SCEndpoint): ... ips=['127.0.0.1'], scanner_ids=[1]) ''' payload = self._constructor(**kw) - return self._api.post('zone', json=payload).json()['response'] + return self._api.patch('zone/{}'.format(id), json=payload).json()['response'] def list(self, fields=None):
Reinitializes LS gauge opt algo alongside minimize LS gauge opt (on its own), runs in .136 seconds on my machine. Previously, it took 16.45 seconds
@@ -208,8 +208,6 @@ def gaugeopt_to_target(gateset, targetGateset, itemWeights=None, found, gaugeMx is the gauge matrix used to transform the gateset, and gateset is the final gauge-transformed gateset. """ - - if CPpenalty == 0 and \ TPpenalty == 0 and \ validSpamPenalty == 0 and \ @@ -405,24 +403,28 @@ def gaugeopt_custom_least_squares(gateset, targetGateset, objective_fn, gauge_gr minSol = _opt.least_squares(call_objective_fn, x0, jac=jacobian, max_nfev=maxfev, ftol=tol) + ''' minSol2 = _opt.least_squares(call_objective_fn, x0, #jac=jacobian, max_nfev=maxfev, ftol=tol) gaugeGroupEl.from_vector(minSol2.x) - a = gateset.copy() - a.transform(gaugeGroupEl) + finiteDifsGateset = gateset.copy() + finiteDifsGateset.transform(gaugeGroupEl) + ''' gaugeGroupEl.from_vector(minSol.x) newGateset = gateset.copy() newGateset.transform(gaugeGroupEl) + ''' print('jacobian compared to finite differences') - print(a.frobeniusdist(newGateset)) + print(finiteDifsGateset.frobeniusdist(newGateset)) + ''' if returnAll: return minSol.fun, gaugeMat, newGateset else: - return newGateset + return newGateset #, finiteDifsGateset def gaugeopt_custom(gateset, objective_fn, gauge_group=None,
Update Redis Exporter to 1.15.0 PR Support for memory usage aggregation by key groups (thanks ) PR Bump prometheus/client_golang library from 1.8.0 to 1.9.0
@@ -58,7 +58,7 @@ packages: context: static: <<: *default_static_context - version: 1.14.0 + version: 1.15.0 license: MIT summary: Prometheus exporter for Redis server metrics. description: Prometheus Exporter for Redis Metrics. Supports Redis 2.x, 3.x, 4.x, 5.x and 6.x
Fix errors where we use an invalid metaclass name Be more lenient on the check.
@@ -90,7 +90,7 @@ class SanitizerService(Service): if isinstance(p, UML.ExtensionEnd): p, ext = ext, p st = ext.type - meta = p.type and getattr(UML, p.type.name) + meta = p.type and getattr(UML, p.type.name, None) self.perform_unlink_for_instances(st, meta) @event_handler(AssociationSetEvent) @@ -107,7 +107,7 @@ class SanitizerService(Service): if not p: return st = event.old_value - meta = getattr(UML, p.type.name) + meta = getattr(UML, p.type.name, None) self.perform_unlink_for_instances(st, meta) @event_handler(AssociationDeleteEvent)
Update visualize.py match prediction_statistics -> test_statistics to have the same parameter name within functions
@@ -1448,7 +1448,7 @@ def calibration_multiclass( def confusion_matrix( - prediction_statistics, + test_statistics, ground_truth_metadata, field, top_n_classes, @@ -1456,27 +1456,27 @@ def confusion_matrix( model_names=None, **kwargs ): - if len(prediction_statistics) < 1: - logging.error('No prediction_statistics provided') + if len(test_statistics) < 1: + logging.error('No test_statistics provided') return metadata = load_json(ground_truth_metadata) - prediction_statistics_per_model_name = [load_json(prediction_statistics_f) - for prediction_statistics_f in - prediction_statistics] + test_statistics_per_model_name = [load_json(test_statistics_f) + for test_statistics_f in + test_statistics] fields_set = set() - for ls in prediction_statistics_per_model_name: + for ls in test_statistics_per_model_name: for key in ls: fields_set.add(key) fields = [field] if field is not None and len(field) > 0 else fields_set - for i, prediction_statistics in enumerate( - prediction_statistics_per_model_name): + for i, test_statistics in enumerate( + test_statistics_per_model_name): for field in fields: - if 'confusion_matrix' in prediction_statistics[field]: + if 'confusion_matrix' in test_statistics[field]: confusion_matrix = np.array( - prediction_statistics[field]['confusion_matrix'] + test_statistics[field]['confusion_matrix'] ) model_name_name = model_names[i] if ( model_names is not None and i < len(model_names)
doc: add downloads badge PR-URL:
# `node-gyp` - Node.js native addon build tool [![Build Status](https://github.com/nodejs/node-gyp/workflows/Tests/badge.svg?branch=master)](https://github.com/nodejs/node-gyp/actions?query=workflow%3ATests+branch%3Amaster) +![npm](https://img.shields.io/npm/dm/node-gyp) `node-gyp` is a cross-platform command-line tool written in Node.js for compiling native addon modules for Node.js. It contains a vendored copy of the
Police shoot woman in the face | May 31st updated main link, longer video
@@ -18,7 +18,7 @@ It is clearly seen that the woman was shot in the face, and was bleeding profuse **Links** -* https://mobile.twitter.com/etpartipredsct1/status/1266935860865298432 +* https://mobile.twitter.com/MarajYikes/status/1267030131563827200 ## Long Beach
models: Remove redundant check for POLICY_EVERYONE. We check whether policy value is POLICY_EVERYONE in has_permission itself so there is no need to handle that in can_edit_topic_of_any_message.
@@ -1897,8 +1897,6 @@ class UserProfile(AbstractBaseUser, PermissionsMixin, UserBaseSettings): return self.has_permission("user_group_edit_policy") def can_edit_topic_of_any_message(self) -> bool: - if self.realm.edit_topic_policy == Realm.POLICY_EVERYONE: - return True return self.has_permission("edit_topic_policy") def can_add_custom_emoji(self) -> bool:
Fix nn threshold test Summary: Pull Request resolved:
@@ -1304,14 +1304,15 @@ _modules_containing_builtins = (torch, torch.nn.functional, torch._C._nn) # TODO: delete this list, _should_skip(), and remove torch.nn.functional from # builtins list once everything in it has been converted to weak script _builtin_blacklist = { - 'tanhshrink', - 'softsign', - 'pairwise_distance', - 'prelu', - 'hardshrink', 'adaptive_avg_pool2d', 'adaptive_avg_pool3d', 'ctc_loss', + 'hardshrink', + 'pairwise_distance', + 'prelu', + 'softsign', + 'tanhshrink', + 'threshold', # ops with inplace option 'relu',
Fix a link that wouldn't display properly. `https://rich.readthedocs.io/en/latest/style.html` was hyperlinked to `docs` and would not display on VSCode's Terminal or the MacOS Terminal. This expands it out.
@@ -47,7 +47,7 @@ def main(): "[red]The default colour is shown as input Statement.\nIf left empty default value will be assigned.[/red]" ) console.print( - "[magenta]Please follow the link for available styles.[/magenta][link=https://rich.readthedocs.io/en/latest/style.html]docs[/link]" + "[magenta] For a full list of styles, visit[/magenta] https://rich.readthedocs.io/en/latest/style.html" ) for key in default: temp = default[key]
update cea-dev-workflow minor change
@@ -60,7 +60,7 @@ Git push and remote pull request ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #. When ready to update team members with your changes, ensure latest changes are committed. -#. Select *Push changes*. +#. Select *Push origin*. - **git push**: pushes your local commits to the remote respoitory. #. Open the remote repository in your browser. #. Click the *pull requests* tab.
Removed early clamping This caused leaf variable errors
@@ -108,17 +108,16 @@ class OverTheAirFlickeringTorch(EvasionAttack): epoch_print_str = f"{num_epochs}:" delta = torch.nn.parameter.Parameter( - torch.zeros(x[0].shape[1], 3, 1, 1).normal_(mean=0.0, std=0.2).to(torch.device("cuda")), requires_grad=True + torch.zeros(x[0].shape[1], 3, 1, 1, requires_grad=True).normal_(mean=0.0, std=0.2).to(self.estimator.device),requires_grad=True ) # All values of delta needs to be within [V_min, V_max], so we get those # values here. v_min = torch.min(x).item() v_max = torch.max(x).item() - delta = torch.clamp(delta, v_min, v_max) # Learning rate from the paper. - optimizer = torch.optim.Adam([delta], lr=1e-3) + optimizer = torch.optim.AdamW([delta], lr=1e-3) # They did not specify a learning rate scheduler or patience. scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5)
chore: update dependency No functional changes, just dropping old node versions from engines, linting, and fixing CI.
"glob": "^7.1.4", "graceful-fs": "^4.2.6", "make-fetch-happen": "^10.0.3", - "nopt": "^5.0.0", + "nopt": "^6.0.0", "npmlog": "^6.0.0", "rimraf": "^3.0.2", "semver": "^7.3.5",
Update CODEOWNERS Updates Batch code owners
/src/azure-cli/azure/cli/command_modules/appconfig/ @shenmuxiaosen @avanigupta @bim-msft /src/azure-cli/azure/cli/command_modules/appservice/ @qwordy @Juliehzl /src/azure-cli/azure/cli/command_modules/backup/ @dragonfly91 @fengzhou-msft -/src/azure-cli/azure/cli/command_modules/batch/ @bgklein +/src/azure-cli/azure/cli/command_modules/batch/ @bgklein @gingi @dpwatrous @paterasMSFT /src/azure-cli/azure/cli/command_modules/batchai/ @AlexanderYukhanov /src/azure-cli/azure/cli/command_modules/cosmosdb/ @dmakwana /src/azure-cli/azure/cli/command_modules/cloud/ @jiasli @fengzhou-msft @Juliehzl
Add retries for container push ECR Public and GitHub Actions are a little flaky, so retry pushes.
@@ -106,11 +106,13 @@ jobs: aws-region: us-east-1 - name: Push to ECR run: | - aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/diag-nijmegen/grand-challenge/web-base - make push_web_base - aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/diag-nijmegen/grand-challenge/web-test-base - make push_web_test_base - aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/diag-nijmegen/grand-challenge/web - make push_web - aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/diag-nijmegen/grand-challenge/http - make push_http + sudo apt-get update + sudo apt-get install -y retry + retry -t 5 -- /bin/bash -c 'aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/diag-nijmegen/grand-challenge/web-base' + retry -t 5 -- make push_web_base + retry -t 5 -- /bin/bash -c 'aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/diag-nijmegen/grand-challenge/web-test-base' + retry -t 5 -- make push_web_test_base + retry -t 5 -- /bin/bash -c 'aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/diag-nijmegen/grand-challenge/web' + retry -t 5 -- make push_web + retry -t 5 -- /bin/bash -c 'aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/diag-nijmegen/grand-challenge/http' + retry -t 5 -- make push_http
Update elf_mirai.txt Added main names for ```mirai``` ([0] https://www.hindawi.com/journals/scn/2018/7178164/lst16/)
/sora.m68k /sora.arc /sora.sh4 + +# Reference: https://www.hindawi.com/journals/scn/2018/7178164/lst16/ + +/mirai.arm +/mirai.arm5n +/mirai.arm7 +/mirai.dbg +/mirai.m68k +/mirai.mips +/mirai.mipsl +/mirai.ppc +/mirai.sh4 +/mirai.spc +/mirai.x86
logging method call in ml2 driver logging for post commit method is being done in following patch
# under the License. from oslo_config import cfg +from oslo_log import helpers as log_helpers from oslo_log import log as logging from neutron.db.models import securitygroup @@ -83,34 +84,42 @@ class OpenDaylightMechanismDriver(api.MechanismDriver): context.current['id'], operation, data, ml2_context=context) + @log_helpers.log_method_call def create_network_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_NETWORK, odl_const.ODL_CREATE) + @log_helpers.log_method_call def create_subnet_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) + @log_helpers.log_method_call def create_port_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_PORT, odl_const.ODL_CREATE) + @log_helpers.log_method_call def update_network_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_NETWORK, odl_const.ODL_UPDATE) + @log_helpers.log_method_call def update_subnet_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_SUBNET, odl_const.ODL_UPDATE) + @log_helpers.log_method_call def update_port_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_PORT, odl_const.ODL_UPDATE) + @log_helpers.log_method_call def delete_network_precommit(self, context): OpenDaylightMechanismDriver._record_in_journal( context, odl_const.ODL_NETWORK, odl_const.ODL_DELETE, data=[]) + @log_helpers.log_method_call def delete_subnet_precommit(self, context): # Use the journal row's data field to store parent object # uuids. This information is required for validation checking @@ -120,6 +129,7 @@ class OpenDaylightMechanismDriver(api.MechanismDriver): context, odl_const.ODL_SUBNET, odl_const.ODL_DELETE, data=new_context) + @log_helpers.log_method_call def delete_port_precommit(self, context): # Use the journal row's data field to store parent object # uuids. This information is required for validation checking @@ -183,6 +193,7 @@ class OpenDaylightMechanismDriver(api.MechanismDriver): journal.record(context, odl_const.ODL_SG_RULE, rule['id'], odl_const.ODL_CREATE, res_rule) + @log_helpers.log_method_call def sync_from_callback_precommit(self, context, operation, res_type, res_id, resource_dict, **kwargs): object_type = res_type.singular @@ -270,6 +281,7 @@ class OpenDaylightMechanismDriver(api.MechanismDriver): delete_subnet_postcommit = _postcommit delete_port_postcommit = _postcommit + @log_helpers.log_method_call def bind_port(self, port_context): """Set binding for a valid segments
Get rid of skipped test Summary: Got a task about this one consistently skipping. Getting rid of `unittest.skip` for now.
# LICENSE file in the root directory of this source tree. import itertools -import unittest import torch from botorch.exceptions import UnsupportedError @@ -221,6 +220,6 @@ class TestLinearTruncatedFidelityKernel(BotorchTestCase, BaseKernelTestCase): self.assertTrue(isinstance(kernel2.covar_module_unbiased, RBFKernel)) self.assertTrue(isinstance(kernel2.covar_module_biased, RBFKernel)) - @unittest.skip("This kernel uses priors by default, which cause this test to fail") def test_kernel_pickle_unpickle(self): - ... + # This kernel uses priors by default, which cause this test to fail + pass
commands: Fix handling of keyword arguments in `query_ldap` command. This bug seems to be introduced by me while doing the refactoring in `94649f58f2fe0ed78d84e597ad6676522cfef9be`. Fixes:
@@ -12,6 +12,7 @@ class Command(BaseCommand): help="email of user to query") def handle(self, *args: Any, **options: str) -> None: - values = query_ldap(**options) + email = options['email'] + values = query_ldap(email) for value in values: print(value)
Update GUI.py remove old code from JS telemetry workflow to make status icon work
@@ -3273,24 +3273,22 @@ class MainApp(App): ##-------------------Signal Status Check-------------------## - if client_status.split(":")[0] == "CONNECTED": + #if client_status.split(":")[0] == "CONNECTED": we dont check client status anymore in the python lightstreamer script if sub_status == "Subscribed": #client connected and subscibed to ISS telemetry if float(aos) == 1.00: self.signal_acquired() #signal status 1 means acquired sasa_xmit = 1 - elif float(aos) == 0.00: self.signal_lost() #signal status 0 means loss of signal sasa_xmit = 0 - elif float(aos) == 2.00: self.signal_stale() #signal status 2 means data is not being updated from server sasa_xmit = 0 else: self.signal_unsubscribed() - else: - self.signal_unsubscribed() + #else: + # self.signal_unsubscribed() if mimicbutton: # and float(aos) == 1.00): serialWrite("PSARJ=" + psarj + " " + "SSARJ=" + ssarj + " " + "PTRRJ=" + ptrrj + " " + "STRRJ=" + strrj + " " + "B1B=" + beta1b + " " + "B1A=" + beta1a + " " + "B2B=" + beta2b + " " + "B2A=" + beta2a + " " + "B3B=" + beta3b + " " + "B3A=" + beta3a + " " + "B4B=" + beta4b + " " + "B4A=" + beta4a + " " + "AOS=" + aos + " " + "V1A=" + v1a + " " + "V2A=" + v2a + " " + "V3A=" + v3a + " " + "V4A=" + v4a + " " + "V1B=" + v1b + " " + "V2B=" + v2b + " " + "V3B=" + v3b + " " + "V4B=" + v4b + " " + "ISS=" + module + " " + "Sgnt_el=" + str(int(sgant_elevation)) + " " + "Sgnt_xel=" + str(int(sgant_xelevation)) + " " + "Sgnt_xmit=" + str(int(sgant_transmit)) + " " + "SASA_Xmit=" + str(int(sasa_xmit)) + " SASA_AZ=" + str(float(sasa_az)) + " SASA_EL=" + str(float(sasa_el)) + " ")
[utils.serializer] serialize to OrderedDict... ... for better performance
@@ -15,7 +15,7 @@ import traceback from typing import Dict, Union, Any, Sequence # external imports -from dropbox.stone_serializers import json_encode # type: ignore +from dropbox.stone_serializers import json_compat_obj_encode # type: ignore from dropbox.stone_validators import Struct # type: ignore @@ -26,12 +26,11 @@ ErrorType = Dict[str, Union[str, Sequence[str], None]] def dropbox_stone_to_dict(obj: Any) -> StoneType: """Converts the result of a Dropbox SDK call to a dictionary.""" - dictionary = dict(type=obj.__class__.__name__) + serialized = json_compat_obj_encode(Struct(obj.__class__), obj) + serialized['type'] = type(obj).__name__ + serialized.move_to_end('type', last=False) - obj_string = json_encode(Struct(obj.__class__), obj) - dictionary.update(json.loads(obj_string)) - - return dictionary + return serialized def error_to_dict(err: Exception) -> ErrorType:
Remove command definition in Vim autoload script It seems that commands can't be defined in autoload scripts of Vim 8.
" map <C-P> :call yapf#YAPF()<cr> " imap <C-P> <c-o>:call yapf#YAPF()<cr> " -" Alternatively, you can call the command YAPF. If you omit the range, -" it will reformat the whole buffer. -" -" example: -" :YAPF " formats whole buffer -" :'<,'>YAPF " formats lines selected in visual mode -" function! yapf#YAPF() range " Determine range to format. let l:line_ranges = a:firstline . '-' . a:lastline @@ -54,5 +47,3 @@ function! yapf#YAPF() range " Reset cursor to first line of the formatted range. call cursor(a:firstline, 1) endfunction - -command! -range=% YAPF <line1>,<line2>call yapf#YAPF()
Add BG-MK and BG-TR Interchange Capacities * Add BG-MK and BG-TR Capacities source from * Update README.md
"rotation": 180 }, "BG->MK": { + "capacity": [ + -950, + 950 + ], "lonlat": [ 22.912615, 41.86784 "rotation": -90 }, "BG->TR": { + "capacity": [ + -2485, + 2485 + ], "lonlat": [ 26.89864, 42.002181
update aea.skills.base.py on skill loading Fix 'not declared in configuration file' warning for 'tac_controller_contract' agent
@@ -900,15 +900,19 @@ class _SkillComponentLoader: - the class must be a subclass of "SkillComponent"; - its __module__ attribute must not start with 'aea.' (we exclude classes provided by the framework) - its __module__ attribute starts with the expected dotted path of this skill. + In particular, it should not be imported from another skill. :param classes: a list of pairs (class name, class object) :return: a list of the same kind, but filtered with only skill component classes. """ filtered_classes = filter( lambda name_and_class: issubclass(name_and_class[1], SkillComponent) + # the following condition filters out classes imported from 'aea' and not str.startswith(name_and_class[1].__module__, "aea.") + # the following condition filters out classes imported + # from other skills and not str.startswith( - name_and_class[1].__module__, self.skill_dotted_path + name_and_class[1].__module__, self.skill_dotted_path + "." ), classes, ) @@ -1142,6 +1146,14 @@ class _SkillComponentLoader: set_of_unused_classes = set( filter(lambda x: x not in used_classes, set_of_classes) ) + # filter out classes that are from other packages + set_of_unused_classes = set( + filter( + lambda x: not str.startswith(x.__module__, "packages."), + set_of_unused_classes, + ) + ) + if len(set_of_unused_classes) == 0: # all classes in the module are used! continue
Quick unit test using flask's method view view creation. Make sure easy to use
@@ -29,6 +29,7 @@ from flask_security.forms import ( email_validator, valid_user_email, ) +from flask_security import auth_required from flask_security.utils import ( capture_reset_password_requests, encode_string, @@ -498,3 +499,29 @@ def test_json_error_response_typeerror(): error_msg = ("tuple",) with pytest.raises(TypeError): json_error_response(errors=error_msg) + + +def test_method_view(app, client): + # auth_required with flask method view + from flask.views import MethodView + from flask import render_template_string + + class MyView(MethodView): + decorators = [auth_required("token", "session")] + + def get(self): + return render_template_string("Hi view") + + myview = MyView.as_view("myview") + + app.add_url_rule("/myview", view_func=myview, methods=["GET"]) + + response = client.get("/myview", follow_redirects=False) + # should require login + assert response.status_code == 302 + assert "/login" in response.location + + authenticate(client) + response = client.get("/myview") + assert response.status_code == 200 + assert b"Hi view" in response.data
Update ug017_storm_ref_stats.rst Clarify operator definition. Fix typos. Clarify what happens if you try to embed stat() in a Storm query.
Storm Reference - Statistical Operator ====================================== -The statistical operator is used to generate data about data in Synapse. +The statistical operator is used to calculate statistics about data in Synapse. ``stat()`` is defined in common.py_ as opposed to storm.py_. @@ -9,9 +9,9 @@ The statistical operator is used to generate data about data in Synapse. * ``stat()`` operates directly on the Synapse storage layer using the row-level APIs (as opposed to the node (form) APIs used by other Storm operators). This is an optimization that allows ``stat()`` to answer questions across large data sets ("all of the IP addresses in Synapse") that would otherwise be too "heavy" (non-performant) to lift. -* Depending on the specific ``stat()`` handler used and the optimizations available in a particular Syanpse storage backing, the amount of time for a given ``stat()`` query to return may vary. For example, "count" operations will generally return much faster than "min" or "max" operations, even with the use of row-level APIs. +* Depending on the specific ``stat()`` handler used and the optimizations available in a particular Synapse storage backing, the amount of time for a given ``stat()`` query to return may vary. For example, "count" operations will generally return much faster than "min" or "max" operations, even with the use of row-level APIs. -* ``stat()`` is designed as a stand-alone operator; because it uses a different set of APIs, it cannot operate on the output of a previous Storm query and so cannot be "chained" as part of a larger query. +* ``stat()`` is designed as a stand-alone operator; because it uses a different set of APIs, it cannot operate on the output of a previous Storm query and so cannot be "chained" as part of a larger query. ``stat()`` operations embedded within a larger Storm query are simply dropped / ignored. * Because Storm expects to return node data as the result of a query, ``stat()`` generates an "ephemeral node" containing the results of the operation. That is, output is structured as a "node" with properties reflecting the query parameters and results. However, this ephemeral node does not have a node identifier (ID), and the node is not permanently stored in the Cortex.
Replace np.concatenate with np.union1d in statesp._remove_useless_states method This ensures that all elements in the array of state indices to be removed are unique.
@@ -202,7 +202,7 @@ class StateSpace(LTI): ax0_C = np.where(~self.C.any(axis=0))[1] useless_1 = np.intersect1d(ax1_A, ax1_B, assume_unique=True) useless_2 = np.intersect1d(ax0_A, ax0_C, assume_unique=True) - useless = np.concatenate((useless_1, useless_2)) + useless = np.union1d(useless_1, useless_2) # Remove the useless states. self.A = delete(self.A, useless, 0)
Fix Eltex get_version HG-- branch : Eltex.MES.New
@@ -42,7 +42,8 @@ class Script(BaseScript): "41": "MES-3224F", "42": "MES-1024", "43": "MES-2124", - "52": "MES-1124" + "52": "MES-1124", + "54": "MES-5248" } def execute(self):
Update test_instruments.py made pep8 changes to import order, removed some unused imporrts added prelim f107 data to excludes
""" tests the pysat meta object and code """ -import pysat -import pandas as pds -from nose.tools import assert_raises, raises -import nose.tools +import importlib from functools import partial -import tempfile - - -import pysat.instruments.pysat_testing -# import pysat.instruments as instruments import numpy as np -# import os +import sys +import nose.tools +import pandas as pds +import tempfile -import sys -import importlib +import pysat +import pysat.instruments.pysat_testing # module in list below are excluded from download checks exclude_list = ['champ_star', 'superdarn_grdex', 'cosmic_gps', 'cosmic2013_gps', 'icon_euv', 'icon_ivm', 'icon_mighti', 'icon_fuv', 'sw_dst', 'sw_kp', 'demeter_iap', 'sport_ivm'] - # exclude testing download functionality for specific module name, tag, sat_id -exclude_tags = {'': {'tag': [''], 'sat_id': ['']}} +exclude_tags = {'sw_f107': {'tag': ['prelim'], 'sat_id': ['']}} # dict, keyed by pysat instrument, with a list of usernames and passwords user_download_dict = {'supermag_magnetometer':['rstoneback', None]}
2.6.9 Hotfixes * Fix for * Fixes launch crash loop Fixes launch crash loop caused by the new copy destintaion of abcde.conf
@@ -98,6 +98,9 @@ function install_arm_requirements() { libdvd-pkg lsdvd sudo dpkg-reconfigure libdvd-pkg + + # create folders required to run the ARM service + sudo -u arm mkdir -p /home/arm/logs } function remove_existing_arm() { @@ -182,6 +185,8 @@ function setup_config_files() { # abcde.conf is expected in /etc by the abcde installation cp --no-clobber "/opt/arm/setup/.abcde.conf" "/etc/.abcde.conf" chown arm:arm "/etc/.abcde.conf" + # link to the new install location so runui.py doesn't break + sudo -u arm ln -sf /etc/.abdce.conf /etc/arm/config/abcde.conf if [[ $port_flag ]]; then echo -e "${RED}Non-default port specified, updating arm config...${NC}"
Remove settings about SGX in config.cmake removed settings about SGX since SGX is removed from TVM core
@@ -87,17 +87,6 @@ set(USE_OPENGL OFF) # Whether enable MicroTVM runtime set(USE_MICRO OFF) -# Whether to enable SGX runtime -# -# Possible values for USE_SGX: -# - /path/to/sgxsdk: path to Intel SGX SDK -# - OFF: disable SGX -# -# SGX_MODE := HW|SIM -set(USE_SGX OFF) -set(SGX_MODE "SIM") -set(RUST_SGX_SDK "/path/to/rust-sgx-sdk") - # Whether enable RPC runtime set(USE_RPC ON)
Set pg_isready user and password We know these settings are going to be true because the environment variables five lines up say so.
@@ -71,7 +71,7 @@ services: expose: - "5432" healthcheck: - test: pg_isready + test: pg_isready -d postgresql://commcarehq:commcarehq@postgres interval: 10s retries: 10 volumes:
Add running python setup.py test to CI tests may catch additional problems, see
@@ -16,7 +16,9 @@ jobs: with: python-version: ${{ matrix.python-version }} - name: Install linter - run: python -m pip install flake8 + run: | + uname -a + python -m pip install flake8 - name: Check syntax and style run: flake8 . --exclude get-pip.py --max-complexity=13 --statistics @@ -57,16 +59,16 @@ jobs: run: | pip install wheel pip install -r requirements.txt - pip install -r extra_requirements.txt pip install . - - name: Run unit tests with extra packages as non-root user - run: | - python -m pyfakefs.tests.all_tests - name: Run unit tests without extra packages as non-root user run: | export TEST_REAL_FS=1 python -m pyfakefs.tests.all_tests_without_extra_packages shell: bash + - name: Run setup.py test (uses pytest) + run: | + python setup.py test + shell: bash - name: Run unit tests without extra packages as root run: | if [[ '${{ matrix.os }}' != 'windows-2016' ]]; then @@ -74,6 +76,12 @@ jobs: sudo env "PATH=$PATH" python -m pyfakefs.tests.all_tests_without_extra_packages fi shell: bash + - name: Install extra dependencies + run: | + pip install -r extra_requirements.txt + - name: Run unit tests with extra packages as non-root user + run: | + python -m pyfakefs.tests.all_tests - name: Run pytest tests run: | export PY_VERSION=${{ matrix.python-version }}
Filter availableFiles by what is renderable. Resolves
return undefined; }, availableFiles() { - return this.files.filter(file => !file.thumbnail && !file.supplementary && file.available); + return this.files.filter( + file => + !file.thumbnail && + !file.supplementary && + file.available && + this.Kolibri.canRenderContent(this.kind, file.extension) + ); }, defaultFile() { return this.availableFiles && this.availableFiles.length
Process replay: Fix subtest diff Fix subtest diff
@@ -71,7 +71,7 @@ def run_test_process(data): assert os.path.exists(cur_log_fn), f"Cannot find log to upload: {cur_log_fn}" upload_file(cur_log_fn, os.path.basename(cur_log_fn)) os.remove(cur_log_fn) - return (segment, cfg.proc_name, res) + return (segment, cfg.proc_name, cfg.subtest_name, res) def get_log_data(segment): @@ -212,9 +212,9 @@ if __name__ == "__main__": results: Any = defaultdict(dict) p2 = pool.map(run_test_process, pool_args) - for (segment, proc, result) in tqdm(p2, desc="Running Tests", total=len(pool_args)): + for (segment, proc, subtest_name, result) in tqdm(p2, desc="Running Tests", total=len(pool_args)): if isinstance(result, list): - results[segment][proc] = result + results[segment][proc + subtest_name] = result diff1, diff2, failed = format_diff(results, ref_commit) if not upload:
qt swap dialog: fix enabling OK button fixes
@@ -34,7 +34,7 @@ class SwapDialog(WindowModalDialog): self.lnworker = self.window.wallet.lnworker self.swap_manager = self.lnworker.swap_manager self.network = window.network - self.tx = None + self.tx = None # for the forward-swap only self.is_reverse = True vbox = QVBoxLayout(self) self.description_label = WWLabel(self.get_description()) @@ -113,7 +113,6 @@ class SwapDialog(WindowModalDialog): else: self._spend_max_forward_swap() else: - self.tx = None self.send_amount_e.setAmount(None) self.update_fee() @@ -156,7 +155,8 @@ class SwapDialog(WindowModalDialog): self.recv_amount_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) self.recv_amount_e.follows = False self.send_follows = False - self.ok_button.setEnabled((recv_amount is not None) and (self.tx is not None)) + self.ok_button.setEnabled((recv_amount is not None) + and (self.tx is not None or self.is_reverse)) def on_recv_edited(self): if self.recv_amount_e.follows: @@ -171,7 +171,8 @@ class SwapDialog(WindowModalDialog): self.send_amount_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) self.send_amount_e.follows = False self.send_follows = True - self.ok_button.setEnabled((send_amount is not None) and (self.tx is not None)) + self.ok_button.setEnabled((send_amount is not None) + and (self.tx is not None or self.is_reverse)) def update(self): sm = self.swap_manager @@ -188,6 +189,7 @@ class SwapDialog(WindowModalDialog): self.update_fee() def update_fee(self): + self.tx = None is_max = self.max_button.isChecked() if self.is_reverse: if is_max:
GDB helpers: fix pretty-printing of synthetic nodes TN:
@@ -142,7 +142,23 @@ class ASTNodePrinter(BasePrinter): def unit(self): return AnalysisUnit(tagged_field(self.value, 'unit')) + @property + def synthetic(self): + """ + Return whether this node is synthetic. + + :rtype: bool + """ + return int(tagged_field(self.value, 'token_start_index')) == 0 + def sloc(self, with_end=True): + """ + Return the source location for this node as a string. + + This must not be called if the node is synthetic. + + :rtype: str + """ filename = self.unit.filename if filename: filename = os.path.basename(filename) @@ -159,8 +175,19 @@ class ASTNodePrinter(BasePrinter): if with_end and end else '' ) + @property + def parent(self): + """ + Return the parent node, or None if it's the root one. + + :rtype: gdb.Value + """ + return tagged_field(self.value, 'parent') + def to_string(self): - return ('<{} {}>'.format(self.kind, self.sloc()) + loc = ('synthetic from {}'.format(self.parent) + if self.synthetic else self.sloc()) + return ('<{} {}>'.format(self.kind, loc) if self.value else 'null')
Fix two bugs in printing bytes instance Bug 1: When `value` is None, trying to call `len(None)` throws an exception. Bug 2: When len(`value`) <= 100, the code currently prints b'' rather than `value`.
@@ -100,7 +100,7 @@ class Bytes(AbstractType): @classmethod def repr(cls, value): - return repr(value[:100] + b'...' if len(value) > 100 else b'') + return repr(value[:100] + b'...' if value is not None and len(value) > 100 else value) class Boolean(AbstractType):
Fixes terminal size not being set properly See at the documentation of interact. I include here for easy reference | Note that if you change the window size of the parent the SIGWINCH | signal will not be passed through to the child.
@@ -3,6 +3,8 @@ import json import os import sys import distutils.spawn +import shutil +import signal import click import crayons @@ -420,11 +422,32 @@ def shell(): shell = os.environ['SHELL'] click.echo(crayons.yellow('Spawning environment shell ({0}).'.format(crayons.red(shell)))) - c = pexpect.spawn("{0} -c '. {1}; exec {0} -i'".format(shell, activate_virtualenv(source=False))) + # Grab current terminal dimensions to replace the hardcoded default + # dimensions of pexpect + terminal_dimensions = shutil.get_terminal_size() + + c = pexpect.spawn( + "{0} -c '. {1}; exec {0} -i'".format( + shell, + activate_virtualenv(source=False) + ), + dimensions=( + terminal_dimensions.lines, + terminal_dimensions.columns + ) + ) # Skip this step for bash. if 'bash' not in shell: c.send(activate_virtualenv() + '\n') + # Handler for terminal resizing events + # Must be defined here to have the shell process in its context, since we + # can't pass it as an argument + def sigwinch_passthrough(sig, data): + terminal_dimensions = shutil.get_terminal_size() + c.setwinsize(terminal_dimensions.lines, terminal_dimensions.columns) + signal.signal(signal.SIGWINCH, sigwinch_passthrough) + # Interact with the new shell. c.interact()
Fix preserve third dimension for grayscale images after cv2.warpPerspective cv2.warpPerspective apparentely does not preserve third dimension when the image is grayscale (single channel). This code should fix the warped image afterwards.
@@ -991,6 +991,8 @@ class PerspectiveTransform(Augmenter): # cv2.warpPerspective only supports <=4 channels assert images[i].shape[2] <= 4, "PerspectiveTransform is currently limited to images with 4 or less channels." warped = cv2.warpPerspective(images[i], M, (max_width, max_height)) + if warped.ndim == 2 and images[i].ndim == 3: + warped = np.expand_dims(warped, 2) #print(np.min(warped), np.max(warped), warped.dtype) if self.keep_size: h, w = images[i].shape[0:2]
(from AES) Update irambassador.py Bump the default validation timeout to 60 seconds.
@@ -80,8 +80,13 @@ class IRAmbassador (IRResource): "rewrite": "/ambassador/v0/", } - # Set up the default Envoy validation timeout. - default_validation_timeout: ClassVar[int] = 10 + # Set up the default Envoy validation timeout. This is deliberately chosen to be very large + # because the consequences of this timeout tripping are very bad. Ambassador basically ceases + # to function. It is far better to slow down as our configurations grow and give users a + # leading indicator that there is a scaling issue that needs to be dealt with than to + # suddenly and mysteriously stop functioning the day their configuration happens to become + # large enough to exceed this threshold. + default_validation_timeout: ClassVar[int] = 60 def __init__(self, ir: 'IR', aconf: Config, rkey: str="ir.ambassador",
lexer: use langkit.compiled_types.render This makes "capi" available to templates. TN:
@@ -5,10 +5,10 @@ from itertools import count import re from langkit.compile_context import get_context +from langkit.compiled_types import render from langkit.diagnostics import (Context, check_source_language, extract_library_location) from langkit.names import Name -from langkit.template_utils import common_renderer class Matcher(object): @@ -573,7 +573,7 @@ class Lexer(object): :rtype: str """ - return common_renderer.render( + return render( "lexer/quex_lexer_spec", tokens=self.tokens, patterns=self.__patterns, @@ -831,7 +831,7 @@ class Case(RuleAssoc): self.default_alt = alts[-1] def render(self, lexer): - return common_renderer.render( + return render( "lexer/case_action", alts=self.alts, default_alt=self.default_alt,
Do not delete CW logs for endpoints. Keep the logs for failed tests only.
@@ -58,12 +58,17 @@ def timeout(seconds=0, minutes=0, hours=0): @contextmanager def timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session, seconds=0, minutes=35, hours=0): with timeout(seconds=seconds, minutes=minutes, hours=hours) as t: + no_errors = False try: yield [t] + no_errors = True finally: try: sagemaker_session.delete_endpoint(endpoint_name) LOGGER.info('deleted endpoint {}'.format(endpoint_name)) + + _show_endpoint_logs(endpoint_name, sagemaker_session) + if no_errors: _cleanup_endpoint_logs(endpoint_name, sagemaker_session) except ClientError as ce: if ce.response['Error']['Code'] == 'ValidationException': @@ -71,7 +76,7 @@ def timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session, second pass -def _cleanup_endpoint_logs(endpoint_name, sagemaker_session): +def _show_endpoint_logs(endpoint_name, sagemaker_session): log_group = '/aws/sagemaker/Endpoints/{}'.format(endpoint_name) try: # print out logs before deletion for debuggability @@ -79,7 +84,16 @@ def _cleanup_endpoint_logs(endpoint_name, sagemaker_session): logs = AWSLogs(log_group_name=log_group, log_stream_name='ALL', start='1d', aws_region=sagemaker_session.boto_session.region_name) logs.list_logs() + except Exception: + LOGGER.exception('Failure occurred while listing cloudwatch log group %s. ' + + 'Swallowing exception but printing stacktrace for debugging.', log_group) + +def _cleanup_endpoint_logs(endpoint_name, sagemaker_session): + log_group = '/aws/sagemaker/Endpoints/{}'.format(endpoint_name) + try: + # print out logs before deletion for debuggability + LOGGER.info('deleting cloudwatch log group {}:'.format(log_group)) cwl_client = sagemaker_session.boto_session.client('logs') cwl_client.delete_log_group(logGroupName=log_group) LOGGER.info('deleted cloudwatch log group: {}'.format(log_group))
autoscale site * autoscale site * fix site deploy remove explicit namespace * bump * bump
-apiVersion: apps/v1beta2 +apiVersion: apps/v1 kind: Deployment metadata: name: site-deployment @@ -9,7 +9,7 @@ spec: selector: matchLabels: app: site - replicas: 1 + replicas: 2 template: metadata: labels: @@ -19,9 +19,26 @@ spec: {% if deploy %} priorityClassName: production {% endif %} + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - site + topologyKey: "kubernetes.io/hostname" containers: - name: site image: "{{ site_image.image }}" + resources: + requests: + memory: "250M" + cpu: "100m" + limits: + memory: "1G" + cpu: "1" ports: - containerPort: 80 livenessProbe: @@ -36,3 +53,20 @@ spec: port: 80 initialDelaySeconds: 5 periodSeconds: 5 +--- +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: site +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: site + minReplicas: 2 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 80
[bugfix] Bugfixes for the conversion scripts/maintenance/compat2core.py remove complete version line escape dot
@@ -42,7 +42,7 @@ import pywikibot # be careful with replacement order! replacements = ( # doc strings - ('#\r?\n__version__', + ('#\r?\n__version__.*\r?\n', '#\n' '# Automatically ported from compat branch by compat2core.py script\n'), ('Pywikipedia bot team', 'Pywikibot team'), @@ -60,7 +60,7 @@ replacements = ( # site instance call (r'pywikibot\.getSite\s*\(\s*', 'pywikibot.Site('), # lang is different from code. We should use code in core - (r'([Ss])ite.lang(?:uage\(\))?', r'\1ite.code'), + (r'([Ss])ite\.lang(?:uage\(\))?', r'\1ite.code'), # change compat library classes to pywikibot intrinsic classes (r'catlib\.Category\s*\(\s*', 'pywikibot.Category('), (r'catlib\.change_category\s*\((\s*)(?P<article>.+?),\s*(?P<oldcat>.+?),',
Closes Fix adding session from PeeringDB If a session was already created for a PeeringDB record, trying to add the other one (attached to the same record) would fail.
@@ -1189,7 +1189,7 @@ class InternetExchangePeeringSession(BGPSession): # Try to get the session, in case it already exists try: - InternetExchangePeeringSession.objects.get( + session = InternetExchangePeeringSession.objects.get( autonomous_system=autonomous_system, internet_exchange=internet_exchange, ip_address=ip_address,
Update kmeans.py Change "memoizatiion" to "memoization"
@@ -101,7 +101,7 @@ class KMeans(object): Extracts centroids :param model: Local KMeans instance :param dfs: List of cudf.Dataframes to use - :param r: Stops memoizatiion caching + :param r: Stops memoization caching :return: The fit model """
One more fix for Summary: Pull Request resolved:
@@ -39,6 +39,7 @@ macro(custom_protobuf_find) set(CMAKE_POSITION_INDEPENDENT_CODE ON) if (MSVC) + if(MSVC_Z7_OVERRIDE) foreach(flag_var CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) @@ -46,6 +47,7 @@ macro(custom_protobuf_find) string(REGEX REPLACE "/Z[iI]" "/Z7" ${flag_var} "${${flag_var}}") endif(${flag_var} MATCHES "/Z[iI]") endforeach(flag_var) + endif(MSVC_Z7_OVERRIDE) endif(MSVC) add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/protobuf/cmake)
Add proxied websocket support Add websocket support through a proxied server.
@@ -14,7 +14,7 @@ import { NotebookPanel, INotebookModel } from '@jupyterlab/notebook'; - +import { PageConfig } from '@jupyterlab/coreutils'; /** * The plugin registration information. @@ -47,8 +47,13 @@ class VPythonExtension implements DocumentRegistry.IWidgetExtension<NotebookPane glowcommlab.comm = vp_comm vp_comm.onMsg = glowcommlab.onmessage - glowcommlab.setupWebsocket(commMsg) + // Get base URL of current notebook server + let baseUrl = PageConfig.getBaseUrl() + + // Construct URL of our proxied service + let serviceUrl = base_url + 'proxy/' + port; + glowcommlab.setupWebsocket(commMsg, serviceUrl) }); vp_comm.onClose = (msg) => {console.log("comm onClose");};
Add better error message for call_stats error mode * Add better error message for call_stats error mode fixes * fix
@@ -26,15 +26,22 @@ class CallStatsCombiner(val nAlleles: Int) extends Serializable { var alleleCount = new Array[Int](nAlleles) var homozygoteCount = new Array[Int](nAlleles) + @inline def increment(idx: Int): Unit = { + if (idx >= nAlleles) + fatal(s"call_stats: expected alleles with maximum index ${nAlleles - 1}, found allele with index $idx" + + s"\n This can happen with invalid input data, or failure to reconcile alleles and genotypes after 'split_multi'") + alleleCount(idx) += 1 + } + def merge(c: Call): CallStatsCombiner = { (Call.ploidy(c): @switch) match { case 0 => case 1 => - alleleCount(Call.alleleByIndex(c, 0)) += 1 + increment(Call.alleleByIndex(c, 0)) case 2 => val p = Call.allelePair(c) - alleleCount(p.j) += 1 - alleleCount(p.k) += 1 + increment(p.j) + increment(p.k) if (p.j == p.k) homozygoteCount(p.j) += 1 case _ => throw new UnsupportedOperationException @@ -43,6 +50,7 @@ class CallStatsCombiner(val nAlleles: Int) extends Serializable { } def merge(that: CallStatsCombiner): CallStatsCombiner = { + assert(nAlleles == that.nAlleles) alleleCount.indices.foreach { i => alleleCount(i) += that.alleleCount(i) } homozygoteCount.indices.foreach { i => homozygoteCount(i) += that.homozygoteCount(i) } this
add example multiprocess code Summary: fixes
@@ -27,15 +27,57 @@ a ``spawn`` or ``forkserver`` start methods. :mod:`python:multiprocessing` in Python 2 can only create subprocesses using ``fork``, and it's not supported by the CUDA runtime. -.. warning:: - - CUDA API requires that the allocation exported to other processes remains - valid as long as it's used by them. You should be careful and ensure that - CUDA tensors you shared don't go out of scope as long as it's necessary. - This shouldn't be a problem for sharing model parameters, but passing other - kinds of data should be done with care. Note that this restriction doesn't - apply to shared CPU memory. - +Unlike CPU tensors, the sending process is required to keep the original tensor +as long as the receiving process retains a copy of the tensor. +This shouldn't be a problem for sharing model parameters (which stay live +for the entire execution of the model), but passing other +kinds of data should be done with care. + +Here is an example program which handles these requirements correctly: + +:: + import torch + import torch.multiprocessing as mp + + torch.set_default_tensor_type(torch.cuda.FloatTensor) + + def sender(q, e): + for i in range(10): + s_sample = [torch.zeros(1), torch.ones(1)] + q.put(s_sample) + e.wait() + del s_sample + e.clear() + + if __name__ == "__main__": + ctx = mp.get_context("spawn") + q = ctx.Queue() + e = ctx.Event() + p = ctx.Process(target=sender, args=(q, e)) + p.start() + + for i in range(10): + print('=== ITER {} ===".format(i)) + r_sample = q.get() + del r_sample + e.set() + + p.join() + +In the example above, calling `e.wait()` +on sender side ensures tensor `s_sample` doesn't get deleted while +receiver is working on it. The receiver signals when it is done +with the tensor using `e.set()`, being careful to `del` its reference +to the received tensor first. It is INSUFFICIENT to promise never to call +`r_sample` again; while `r_sample` is live, it may be confused with +any subsequent tensors allocated by the source process at the same address. + +If a receiver wants to save the data of `r_sample` for future use while +letting the source process deallocate the original, it must +`clone()` it. + +This behavior is very confusing, and we are tracking a fix for it +at https://github.com/pytorch/pytorch/issues/16141 Sharing strategies ------------------
Tag bootstrap_javascript use settings include_jquery for now Update docs about `include_jquery` variable
@@ -32,7 +32,8 @@ The ``BOOTSTRAP4`` dict variable contains these settings and defaults: # Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap4.html) 'javascript_in_head': False, - # Include jQuery with Bootstrap JavaScript (affects django-bootstrap4 template tags) + # Include jQuery with Bootstrap JavaScript False|falsy|slim|full (default=False) + # False - means tag bootstrap_javascript use default value - `falsy` and does not include jQuery) 'include_jquery': False, # Label class to use in horizontal forms
Correct coordinate data variable finding code Corrects code which finds coordinate data variables to include both NUG coordinate variables (variables with one dimension where the name of the dimension and variable are the same) and existing variables in the dataset which are referred to by other variables' "coordinates" attribute.
@@ -78,6 +78,23 @@ class TestCF1_6(BaseTestCase): self.addCleanup(nc.close) return nc + def test_coord_data_vars(self): + """Check that coordinate data variables are properly handled""" + ds = MockTimeSeries() + ds.createDimension('siglev', 20) + + temp = ds.createVariable("temp", np.float64, dimensions=("time",), + fill_value=np.float(99999999999999999999.)) + temp.coordinates = "sigma noexist" + ds.createVariable("sigma", np.float64, dimensions=('siglev',)) + self.cf.setup(ds) + # time is a NUG coordinate variable, sigma is not, but is referred to in + # variables, so both should show up in cf_coord_data_vars. + # noexist does not exist in the dataset's variables, so it is not + # present in coord_data_vars + self.assertEqual(self.cf.coord_data_vars, {'time', 'sigma'}) + + def load_dataset(self, nc_dataset): ''' Return a loaded NC Dataset for the given path
Fix `unit.test_spm` for Windows This only fixes the test... I don't think it fixes SPM on Windows
@@ -14,9 +14,12 @@ import shutil import msgpack import hashlib import logging +import sys +try: import pwd import grp -import sys +except ImportError: + pass # Import Salt libs import salt.client @@ -491,6 +494,16 @@ class SPMClient(object): # No defaults for this in config.py; default to the current running # user and group + import salt.utils + if salt.utils.is_windows(): + import salt.utils.win_functions + cur_user = salt.utils.win_functions.get_current_user() + cur_user_sid = salt.utils.win_functions.get_sid_from_name(cur_user) + uid = self.opts.get('spm_uid', cur_user_sid) + gid = self.opts.get('spm_gid', cur_user_sid) + uname = cur_user + gname = cur_user + else: uid = self.opts.get('spm_uid', os.getuid()) gid = self.opts.get('spm_gid', os.getgid()) uname = pwd.getpwuid(uid)[0] @@ -710,7 +723,7 @@ class SPMClient(object): raise SPMInvocationError('A path to a directory must be specified') if args[1] == '.': - repo_path = os.environ['PWD'] + repo_path = os.getcwd() else: repo_path = args[1]
Add _remove_invalid_and_duplicate_signatures to pylintrc's exclude-protected setting.
@@ -344,7 +344,7 @@ defining-attr-methods=__init__,__new__,setUp # List of member names, which should be excluded from the protected access # warning. -exclude-protected=_asdict, _fields, _replace, _source, _make, _generate_and_write_metadata, _delete_obsolete_metadata, _log_status_of_top_level_roles, _load_top_level_metadata, _strip_version_number, _delegated_roles +exclude-protected=_asdict, _fields, _replace, _source, _make, _generate_and_write_metadata, _delete_obsolete_metadata, _log_status_of_top_level_roles, _load_top_level_metadata, _strip_version_number, _delegated_roles, _remove_invalid_and_duplicate_signatures # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls