message
stringlengths
13
484
diff
stringlengths
38
4.63k
Add helpful error message when a program is terminated by a (POSIX) signal, such as a segfault Fixes
@@ -5,6 +5,7 @@ import logging import os import re import shutil +import signal import stat import subprocess # nosec import sys @@ -354,6 +355,16 @@ class JobBase(HasReqsHints, metaclass=ABCMeta): else: processStatus = "permanentFail" + if processStatus != "success": + if rcode < 0: + _logger.warning( + "[job %s] was terminated by signal: %s", + self.name, + signal.Signals(-rcode).name, + ) + else: + _logger.warning("[job %s] exited with status: %d", self.name, rcode) + if "listing" in self.generatefiles: if self.generatemapper: relink_initialworkdir(
fix tox python3 overrides It is necessary to add python3 as the base python for the genpolicy test.
@@ -132,6 +132,7 @@ description = Generates sample configuration file for monasca-api commands = oslo-config-generator --config-file=config-generator/api-config.conf [testenv:genpolicy] +basepython = python3 description = Generates sample policy.json file for monasca-api commands = oslopolicy-sample-generator --config-file=config-generator/policy.conf
dm/tools: update some sentences found inaccurate or unclear tools/dm: update some sentences found inaccurate or unclear
@@ -56,7 +56,7 @@ For the binlog during incremental data import, DM uses the downstream database t - When DM-worker is restarted before or after synchronizing sharding DDL statements, it checks the checkpoint information and you can use the `start-task` command to recover the data synchronization task automatically. - - When DM-worker is restarted during the process of synchronizing sharding DDL statements, the issue might occur that the DM-worker owner has executed the DDL statement and successfully changed the downstream database table schema, while other DM-worker instances are restarted but fail to skip the DDL statement and update the checkpoints. + - When DM-worker is restarted during the process of synchronizing sharding DDL statements, the issue might occur that the owner (one of DM-worker instances) has executed the DDL statement and successfully changed the downstream database table schema, while other DM-worker instances are restarted but fail to skip the DDL statement and update the checkpoints. At this time, DM tries again to synchronize these DDL statements that are not skipped. However, the restarted DM-worker instances will be blocked at the position of the binlog event corresponding to the DDL binlog event, because the DM-worker instance that is not restarted has executed to the place after this DDL binlog event. @@ -355,7 +355,7 @@ Assuming that the `172.16.10.72` machine needs to be maintained or this machine 3. Edit the `inventory.ini` file and add the new DM-worker instance. - Edit the `inventory.ini` file, comment or delete the line where the `dm_worker1` instance `172.16.10.72` that you want to replace exists, and add the `172.16.10.75` information of the new `dm_worker1` instance. + Edit the `inventory.ini` file, comment or delete the line where the original `dm_worker1` instance (`172.16.10.72`) that you want to replace exists, and add the information for the new `dm_worker1` instance (`172.16.10.75`). ```ini [dm_worker_servers] @@ -395,7 +395,7 @@ This section describes how to switch between master and slave instances using dm ### Upstream master-slave switch behind the virtual IP -1. Use `query-status` to make sure that relay catches up with the master instance before the switch (`relayCatchUpMaster`). +1. Use `query-status` to make sure that the relay unit has caught up with the binlog status of the master instance before the switch (`relayCatchUpMaster`). 2. Use `pause-relay` to pause relay. 3. Use `pause-task` to pause all running tasks. 4. The upstream master and slave instances behind the virtual IP execute the switch operation. @@ -405,7 +405,7 @@ This section describes how to switch between master and slave instances using dm ### Master-slave switch after changing IP -1. Use `query-status` to make sure that relay catches up with the master instance before the switch (`relayCatchUpMaster`). +1. Use `query-status` to make sure that the relay unit has caught up with the binlog status of the master instance before the switch (`relayCatchUpMaster`). 2. Use `stop-task` to stop all running tasks. 3. Modify the DM-worker configuration, and use DM-Ansible to perform a rolling update on DM-worker. 4. Update the `task.yaml` and `mysql-instances / config` configurations.
Allow connecting to the Name-node via https This fixes a `ConnectionError: ('Connection aborted.', BadStatusLine('\x15\x03\x03\x00\x02\x02P'))` when the Name-node only uses HTTPS.
@@ -43,6 +43,7 @@ class WebHDFS(AbstractFileSystem): proxy_to=None, kerb_kwargs=None, data_proxy=None, + use_https=False, **kwargs ): """ @@ -74,12 +75,16 @@ class WebHDFS(AbstractFileSystem): maps host names `host->data_proxy[host]`; if a callable, full URLs are passed, and function must conform to `url->data_proxy(url)`. + use_https: bool + Whether to connect to the Name-node using HTTPS instead of HTTP kwargs """ if self._cached: return super().__init__(**kwargs) - self.url = "http://{host}:{port}/webhdfs/v1".format(host=host, port=port) + self.url = "{protocol}://{host}:{port}/webhdfs/v1".format( + protocol='https' if use_https else 'http', host=host, port=port + ) self.kerb = kerberos self.kerb_kwargs = kerb_kwargs or {} self.pars = {}
walreceiver: finish WAL segments on timeout If we received no new replication message for a long time, and some messages are still pending, flush a WAL segment.
@@ -210,6 +210,10 @@ class WALReceiver(PGHoardThread): with suppress(InterruptedError): if not any(select.select([self.c], [], [], max(0.0, timeout))): self.c.send_feedback() # timing out, send keepalive + # Don't leave unfinished segments waiting for more than the KEEPALIVE_INTERVAL + if self.buffer.tell() > 0: + self.switch_wal() + self.process_completed_segments() # When we stop, process sent wals to update last_flush lsn. self.process_completed_segments(block=True) finally:
Config spark_cluster_mode default value Changed the default
@@ -35,7 +35,7 @@ Follow the instructions below to configure this check for an Agent running on a # spark_url: http://<Mesos_master>:5050 # Mesos master web UI # spark_url: http://<YARN_ResourceManager_address>:8088 # YARN ResourceManager address - spark_cluster_mode: spark_standalone_mode # default + spark_cluster_mode: spark_yarn_mode # default # spark_cluster_mode: spark_mesos_mode # spark_cluster_mode: spark_yarn_mode # spark_cluster_mode: spark_driver_mode
Generalise sequence parsing Now the resource does not have to be named `"sequences"` but any resource whose path(s) live under `"data/sequences/"` is parsed as a sequence.
@@ -13,6 +13,7 @@ from functools import partial import collections.abc as cabc import logging import os +import re import pandas as pd import dill as pickle @@ -159,15 +160,25 @@ class EnergySystem: listify = lambda x: x if type(x) is list else repeat(x) resource = lambda r: package.get_resource(r) or empty - data['sequences'] = { + timeindex = None + def sequences(r): + """ Parses the resource `r` as a sequence. + """ + result = { name: [s[name] - for s in resource('sequences').read(keyed=True)] - for name in resource('sequences').headers} - timeindex=data['sequences']['timeindex'] - data['sequences'] = { - name: pd.Series(data['sequences'][name], index=timeindex) - for name in data['sequences'] + for s in resource(r).read(keyed=True)] + for name in resource(r).headers} + timeindex=result['timeindex'] + result = { + name: pd.Series(result[name], index=timeindex) + for name in result if name != 'timeindex'} + return result + + for r in package.resources: + if all(re.match(r'^data/sequences/.*$', p) + for p in listify(resource(r).descriptor['path'])): + data.update({r: sequences(r)}) data.update( {name: {r['name']: {key: r[key] for key in r}
igw: Add check for missing iqn If the user is still using the older packages and does not setup the target iqn you will just get a vague error message later on. This adds a check during the validate task, so it is clear to the user.
- not containerized_deployment | bool - not use_new_ceph_iscsi | bool +- name: make sure gateway_iqn is configured + fail: + msg: "you must set a iqn for the iSCSI target" + when: + - "gateway_iqn | default('') | length == 0" + - not containerized_deployment | bool + - not use_new_ceph_iscsi | bool + - name: fail if unsupported chap configuration fail: msg: "Mixing clients with CHAP enabled and disabled is not supported."
set pgdb for telegraf HG-- branch : feature/microservices
## connection with the server and doesn't restrict the databases we are trying ## to grab metrics for. ## - address = "host={{ ansible_host }} user={{noc_pg_user}} password={{noc_pg_password}} sslmode=disable" - - ## A list of databases to pull metrics about. If not specified, metrics for all - ## databases are gathered. - databases = ["{{noc_pg_db}}"] + address = "host={{ ansible_host }} user={{noc_pg_user}} password={{noc_pg_password}} sslmode=disable database={{noc_pg_db}}" \ No newline at end of file
[Logs] Remove source from user settings as we now automatically detect the source
@@ -61,10 +61,6 @@ def lambda_handler(event, context): aws_meta["function_version"] = context.function_version aws_meta["invoked_function_arn"] = context.invoked_function_arn aws_meta["memory_limit_in_mb"] = context.memory_limit_in_mb - try: - metadata["ddsource"] = os.environ['Source'] - except Exception: - pass try:
PGPKey.pubkey() should return self if it is already a public key This makes it easier to use PGPy to work with OpenPGP certificates where we don't have the secret part corresponding to some of the public keys (e.g. stripped subkeys, subkeys on smartcards, etc). Closes
@@ -1334,9 +1334,10 @@ class PGPKey(Armorable, ParentRef, PGPObject): @property def pubkey(self): """If the :py:obj:`PGPKey` object is a private key, this method returns a corresponding public key object with - all the trimmings. Otherwise, returns ``None`` + all the trimmings. If it is already a public key, just return it. """ - if not self.is_public: + if self.is_public: + return self if self._sibling is None or isinstance(self._sibling, weakref.ref): # create a new key shell pub = PGPKey() @@ -1367,7 +1368,6 @@ class PGPKey(Armorable, ParentRef, PGPObject): pub._parent = weakref.ref(self.parent) return self._sibling() - return None @pubkey.setter def pubkey(self, pubkey):
Fixed tmp_dir bug in kgx.py Now calling report method to print cardinality of edges and nodes
@@ -73,13 +73,15 @@ def _dump(input, output, input_type, output_type): for i in input: t.parse(i) + t.report() + output_transformer = _transformers.get(output_type) if output_transformer is None: raise Exception('Output does not have a recognized type: ' + _file_types) kwargs = { - 'app_dir' : click.get_app_dir(kgx.__name__), + 'tmp_dir' : click.get_app_dir(kgx.__name__), 'extention' : output_type }
Fix hot_spec.rst in Template Guide Add missing space and words in doc/source/template_guide/hot_spec.rst
@@ -296,10 +296,10 @@ for the ``heat_template_version`` key: up until the Pike release. This version adds the ``make_url`` function for assembling URLs, the ``list_concat`` function for combining multiple lists, the ``list_concat_unique`` function for combining multiple - lists without repeating items, the``string_replace_vstrict`` which - raises errors for missing and empty params, and the ``contains`` which - checks whether specific value is in a sequence. The complete list of - supported functions is:: + lists without repeating items, the ``string_replace_vstrict`` function + which raises errors for missing and empty params, and the ``contains`` + function which checks whether specific value is in a sequence. The + complete list of supported functions is:: digest filter
Check ZooKeeper status using curl `lsof` is not available on all systems. Use commands that are installed by DC/OS.
@@ -37,4 +37,4 @@ ExecStartPre=$PKG_PATH/bin/set_exhibitor_file_permissions.py # Start Exhibitor ExecStart=$PKG_PATH/usr/exhibitor/start_exhibitor.py # Wait for ZooKeeper to start listening -ExecStartPost=-/usr/bin/timeout 20 /bin/sh -c 'until lsof -i :2181; do sleep 1; done' +ExecStartPost=-/usr/bin/timeout 60 /bin/sh -c 'until echo ruok | /opt/mesosphere/bin/curl --connect-timeout 5 telnet://localhost:2181; do sleep 5; done'
Update version of package to 3.10.3 cr
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -__version__ = '3.10.2' +__version__ = '3.10.3'
Raising ValueError instead of using assert when checking inputs to Particles class. Check that either all particles have parents or none do and return ValueError otherwise.
@@ -46,17 +46,20 @@ class Particles(Type): def __init__(self, state_vector=None, weight=None, parent=None, particle_list=None, *args, **kwargs): - if state_vector is not None: - assert particle_list is None,\ - "Particles object cannot use both state_vector and particle_list" + if (particle_list is not None) and (state_vector is not None or weight is not None): + raise ValueError("Use either a list of Particle objects or StateVectors and weights," + " but not both.") if particle_list and isinstance(particle_list, list): state_vector = StateVectors([particle.state_vector for particle in particle_list]) weight = np.array([Probability(particle.weight) for particle in particle_list]) - parent_list = [particle.parent for particle in particle_list - if particle.parent is not None] - if parent_list: + parent_list = [particle.parent for particle in particle_list] + + if parent_list.count(None) == 0: parent = Particles(particle_list=parent_list) + elif 0 < parent_list.count(None) < len(parent_list): + raise ValueError("Either all particles should have" + " parents or none of them should.") if parent: parent.parent = None
project: do not update local published/ refs in dryrun mode Tested-by: Mike Frysinger
@@ -1026,6 +1026,7 @@ class Project(object): if GitCommand(self, cmd, bare=True).Wait() != 0: raise UploadError('Upload failed') + if not dryrun: msg = "posted to %s for %s" % (branch.remote.review, dest_branch) self.bare_git.UpdateRef(R_PUB + branch.name, R_HEADS + branch.name,
display file data for a single selected case clear files associated with a case when multiple cases are are selected
@@ -142,16 +142,23 @@ class DialogCases(QtWidgets.QDialog): def count_selected_items(self): """ Update label with the count of selected rows. - Also clear the textedit if multiple rows are selected. """ + Also clear the textedit if multiple rows are selected. + :return + item_count """ indexes = self.ui.tableWidget.selectedIndexes() ix = [] for i in indexes: ix.append(i.row()) - i = set(ix) - self.ui.label_cases.setText(_("Cases: ") + str(len(i)) + "/" + str(len(self.cases))) - if len(i) > 1: + i = len(set(ix)) + if i > 1: self.ui.textBrowser.clear() + case_name = "" + if i == 1: + case_name = self.ui.tableWidget.item(indexes[0].row(), 0).text() + self.ui.label_cases.setText(_("Cases: ") + str(i) + "/" + str(len(self.cases)) + " " + case_name) + + return i def load_cases_and_attributes(self): """ Load case and attribute details from database. Display in tableWidget. @@ -425,12 +432,9 @@ class DialogCases(QtWidgets.QDialog): self.case_text = [] return self.selected_case = self.cases[x] + if self.count_selected_items() > 1: + return - '''# clear case text viewed if the caseid has changed - if self.caseTextViewed != [] and self.caseTextViewed[0]['caseid'] != self.selected_case['caseid']: - self.caseTextViewed = [] - self.case_text = [] - self.ui.textBrowser.clear()''' #logger.debug("Selected case: " + str(self.selected_case['id']) +" "+self.selected_case['name'])''' # get case_text for this file if self.selected_file is not None:
fix exception pickling see
@@ -39,7 +39,6 @@ class MaestralApiError(Exception): def __init__(self, title, message, dbx_path=None, dbx_path_dst=None, local_path=None, local_path_dst=None): - super().__init__(title) self.title = title self.message = message self.dbx_path = dbx_path
issue have Side._on_fork() empty _fork_refs This is mostly to avoid ugly debugging that depends on the state of GC. Discard sides from _fork_refs after they have been closed.
@@ -1415,7 +1415,9 @@ class Side(object): @classmethod def _on_fork(cls): - for side in list(cls._fork_refs.values()): + while cls._fork_refs: + _, side = cls._fork_refs.popitem() + _vv and IOLOG.debug('Side._on_fork() closing %r', side) side.close() def close(self):
1. Clarified that data documentation is configurable and the user has the control. 2. Documented the run_id_filter and the stores confiuration
@@ -52,7 +52,10 @@ Users can specify * where the HTML files should be written (filesystem or S3) * which renderer and view class should be used to render each section -Here is an example of a site configuration: +Data Documentation Site Configuration +************************************* + +Here is an example of a site configuration from great_expectations.yml: .. code-block:: bash @@ -67,7 +70,7 @@ Here is an example of a site configuration: type: filesystem base_directory: uncommitted/validations/ run_id_filter: - ne: profiling + ne: profiling # exclude validations with run id "profiling" - reserved for profiling results profiling_store: # where to look for profiling results (filesystem/S3) type: filesystem base_directory: uncommitted/validations/ @@ -105,12 +108,20 @@ Here is an example of a site configuration: module: great_expectations.render.view class: DefaultJinjaPageView +* ``validations_store`` and ``profiling_store`` in the example above specify the location of validation and profiling results that the site will include in the documentation. The store's ``type`` can be ``filesystem`` or ``s3``. S3 store is not currently implemented, but will be supported in a near future. ``base_directory`` must be specified for ``filesystem`` stores. The optional ``run_id_filter`` attribute allows to include (``eq`` for exact match) or exclude (``ne``) validation results with a particular run id. + + + +Adjusting Data Documentation For Your Project's Needs +***************************************************** By default, GE creates two data documentation sites for a new project: 1. "local_site" renders documentation for all the datasources in the project from GE artifacts in the local repo. The site includes expectation suites and profiling and validation results from `uncommitted` directory. Local site provides the convenience of visualizing all the entities stored in JSON files as HTML. 2. "team_site" is meant to support the "shared source of truth for a team" use case. By default only the expectations section is enabled. Users have to configure the profiling and the validations sections (and the corresponding validations_store and profiling_store attributes based on the team's decisions where these are stored (a local filesystem or S3). Reach out on `Slack <https://tinyurl.com/great-expectations-slack>`__ if you would like to discuss the best way to configure a team site. +Users have full control over configuring Data Documentation for their project - they can modify the two pre-configured sites (or remove them altogether) and add new sites with a configuration that meets the project's needs. The easiest way to add a new site to the configuration is to copy the "local_site" configuration block in great_expectations.yml, give the copy a new name and modify the details as needed. + How to build documentation ----------------------------
Update configuration.rst Change "you" to "your" in line 11.
@@ -8,7 +8,7 @@ settings module to customize its behavior. The debug toolbar ships with a default configuration that is considered sane for the vast majority of Django projects. Don't copy-paste blindly - the default values shown below into you settings module! It's useless and + the default values shown below into your settings module! It's useless and it'll prevent you from taking advantage of better defaults that may be introduced in future releases.
Add command to resend infraction embed Resolve
@@ -11,6 +11,7 @@ from discord.utils import escape_markdown from bot import constants from bot.bot import Bot from bot.converters import Expiry, Infraction, Snowflake, UserMention, allowed_strings, proxy_user +from bot.exts.moderation.infraction import _utils from bot.exts.moderation.infraction.infractions import Infractions from bot.exts.moderation.modlog import ModLog from bot.pagination import LinePaginator @@ -38,13 +39,22 @@ class ModManagement(commands.Cog): """Get currently loaded Infractions cog instance.""" return self.bot.get_cog("Infractions") - # region: Edit infraction commands - @commands.group(name='infraction', aliases=('infr', 'infractions', 'inf', 'i'), invoke_without_command=True) async def infraction_group(self, ctx: Context) -> None: - """Infraction manipulation commands.""" + """Infraction management commands.""" await ctx.send_help(ctx.command) + @infraction_group.command(name="resend", aliases=("send", "rs", "dm")) + async def infraction_resend(self, ctx: Context, infraction: Infraction) -> None: + """Resend a DM to a user about a given infraction of theirs.""" + id_ = infraction["id"] + if await _utils.notify_infraction(infraction): + await ctx.send(f":incoming_envelope: Resent DM for infraction `{id_}`.") + else: + await ctx.send(f"{constants.Emojis.failmail} Failed to resend DM for infraction `{id_}`.") + + # region: Edit infraction commands + @infraction_group.command(name="append", aliases=("amend", "add", "a")) async def infraction_append( self,
Rav4 TSS2 has two different steering racks Split tuning on eps fwVersion \x02 only. See for findings. Unify Rav4 & Rav4 Hybrid Average mass between ICE & Hybrid
@@ -168,36 +168,22 @@ class CarInterface(CarInterfaceBase): ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.17], [0.03]] ret.lateralTuning.pid.kf = 0.00006 - elif candidate == CAR.RAV4_TSS2: + elif candidate in [CAR.RAV4_TSS2, CAR.RAV4H_TSS2]: stop_and_go = True ret.safetyParam = 73 ret.wheelbase = 2.68986 ret.steerRatio = 14.3 tire_stiffness_factor = 0.7933 - ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15], [0.05]] - ret.mass = 3370. * CV.LB_TO_KG + STD_CARGO_KG - ret.lateralTuning.pid.kf = 0.00004 - - for fw in car_fw: - if fw.ecu == "eps" and fw.fwVersion == b"8965B42170\x00\x00\x00\x00\x00\x00": + ret.mass = 3585. * CV.LB_TO_KG + STD_CARGO_KG # Average between ICE and Hybrid ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]] ret.lateralTuning.pid.kf = 0.00007818594 - break - elif candidate == CAR.RAV4H_TSS2: - stop_and_go = True - ret.safetyParam = 73 - ret.wheelbase = 2.68986 - ret.steerRatio = 14.3 - tire_stiffness_factor = 0.7933 + # 2019+ Rav4 TSS2 uses two different steering racks and specific tuning seems to be necessary. + # See https://github.com/commaai/openpilot/pull/21429#issuecomment-873652891 + for fw in car_fw: + if fw.ecu == "eps" and fw.fwVersion.startswith(b'\x02'): ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15], [0.05]] - ret.mass = 3800. * CV.LB_TO_KG + STD_CARGO_KG ret.lateralTuning.pid.kf = 0.00004 - - for fw in car_fw: - if fw.ecu == "eps" and fw.fwVersion == b"8965B42170\x00\x00\x00\x00\x00\x00": - ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]] - ret.lateralTuning.pid.kf = 0.00007818594 break elif candidate in [CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2]:
Dockerfile: Update with support for additional instruments Ensure support is present in the Docker image for instruemnts that require trace-cmd, monsoon or iio-capture.
@@ -46,8 +46,38 @@ ARG DEVLIB_REF=v1.2 ARG WA_REF=v3.2 ARG ANDROID_SDK_URL=https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip -RUN apt-get update -RUN apt-get install -y python3 python3-pip git wget zip openjdk-8-jre-headless vim emacs nano curl sshpass ssh usbutils locales +RUN apt-get update && apt-get install -y \ +apache2-utils \ +bison \ +cmake \ +curl \ +emacs \ +flex \ +git \ +libcdk5-dev \ +libiio-dev \ +libxml2 \ +libxml2-dev \ +locales \ +nano \ +openjdk-8-jre-headless \ +python3 \ +python3-pip \ +ssh \ +sshpass \ +sudo \ +trace-cmd \ +usbutils \ +vim \ +wget \ +zip + +# Clone and download iio-capture +RUN git clone -v https://github.com/BayLibre/iio-capture.git /tmp/iio-capture && \ + cd /tmp/iio-capture && \ + make && \ + make install + RUN pip3 install pandas # Ensure we're using utf-8 as our default encoding @@ -57,8 +87,16 @@ ENV LANGUAGE en_US:en ENV LC_ALL en_US.UTF-8 # Let's get the two repos we need, and install them -RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && cd /tmp/devlib && git checkout $DEVLIB_REF && python3 setup.py install && pip3 install .[full] -RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && cd /tmp/wa && git checkout $WA_REF && python3 setup.py install && pip3 install .[all] +RUN git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && \ + cd /tmp/devlib && \ + git checkout $DEVLIB_REF && \ + python3 setup.py install && \ + pip3 install .[full] +RUN git clone -v https://github.com/ARM-software/workload-automation.git /tmp/wa && \ + cd /tmp/wa && \ + git checkout $WA_REF && \ + python3 setup.py install && \ + pip3 install .[all] # Clean-up RUN rm -R /tmp/devlib /tmp/wa @@ -72,7 +110,13 @@ RUN mkdir -p /home/wa/.android RUN mkdir -p /home/wa/AndroidSDK && cd /home/wa/AndroidSDK && wget $ANDROID_SDK_URL -O sdk.zip && unzip sdk.zip RUN cd /home/wa/AndroidSDK/tools/bin && yes | ./sdkmanager --licenses && ./sdkmanager platform-tools && ./sdkmanager 'build-tools;27.0.3' +# Download Monsoon +RUN mkdir -p /home/wa/monsoon +RUN curl https://android.googlesource.com/platform/cts/+/master/tools/utils/monsoon.py\?format\=TEXT | base64 --decode > /home/wa/monsoon/monsoon.py +RUN chmod +x /home/wa/monsoon/monsoon.py + # Update the path +RUN echo 'export PATH=/home/wa/monsoon:${PATH}' >> /home/wa/.bashrc RUN echo 'export PATH=/home/wa/AndroidSDK/platform-tools:${PATH}' >> /home/wa/.bashrc RUN echo 'export PATH=/home/wa/AndroidSDK/build-tools:${PATH}' >> /home/wa/.bashrc RUN echo 'export ANDROID_HOME=/home/wa/AndroidSDK' >> /home/wa/.bashrc
Kill some unnecessary function declarations. Summary: Pull Request resolved: Test Plan: Imported from OSS
TH_CPP_API void THTensor_(setStorage)(THTensor *self, THStorage *storage_, ptrdiff_t storageOffset_, at::IntArrayRef size_, at::IntArrayRef stride_); -/* strides.data() might be NULL */ -TH_CPP_API THTensor *THTensor_(newWithStorage)(THStorage *storage, ptrdiff_t storageOffset, - at::IntArrayRef sizes, at::IntArrayRef strides); TH_CPP_API void THTensor_(resize)(THTensor *self, at::IntArrayRef size, at::IntArrayRef stride); -TH_CPP_API THTensor *THTensor_(newWithSize)(at::IntArrayRef size, at::IntArrayRef stride); #endif
Update polytracker/tracing.py Add comments
@@ -1128,6 +1128,9 @@ class ProgramTrace(ABC): taints.add(self.file_offset(node)) else: parent1, parent2 = node.parent_one, node.parent_two + // a node will always have either zero or two parents. + // labels that are reused will reuse their associated nodes. + // all other nodes are unions. assert parent1 is not None and parent2 is not None if parent1 not in history: history.add(parent1)
Add Hostname HG-- branch : AddHostname
##---------------------------------------------------------------------- ## Failed Scripts Report ##---------------------------------------------------------------------- -## Copyright (C) 2007-2012 The NOC Project +## Copyright (C) 2007-2017 The NOC Project ## See LICENSE for details ##---------------------------------------------------------------------- """ @@ -16,6 +16,7 @@ from pymongo import ReadPreference from noc.main.models.pool import Pool from noc.sa.models.managedobject import ManagedObject from noc.sa.models.managedobjectprofile import ManagedObjectProfile +from noc.services.web.apps.sa.reportobjectdetail.views import ReportObjectsHostname from noc.sa.models.useraccess import UserAccess from noc.core.translation import ugettext as _ @@ -82,13 +83,15 @@ class ReportFilterApplication(SimpleReport): bad_cli_cred = get_db()["noc.joblog"].find({"problems.suggest_cli": "Failed to guess CLI credentials", "_id": {"$in": is_managed_ng_in}}, read_preference=ReadPreference.SECONDARY_PREFERRED) - + mos_id = list(is_managed.values_list("id", flat=True)) + mo_hostname = ReportObjectsHostname(mo_ids=mos_id, use_facts=True) for b in is_not_alived_c: mo = ManagedObject.get_by_id(b["object"]) data += [( mo.name, mo.address, mo.profile_name, + mo_hostname[mo.id], mo.auth_profile if mo.auth_profile else "", mo.auth_profile.user if mo.auth_profile else mo.user, mo.auth_profile.snmp_ro if mo.auth_profile else mo.snmp_ro, @@ -102,6 +105,7 @@ class ReportFilterApplication(SimpleReport): mo.name, mo.address, mo.profile_name, + mo_hostname[mo.id], mo.auth_profile if mo.auth_profile else "", mo.auth_profile.user if mo.auth_profile else mo.user, mo.auth_profile.snmp_ro if mo.auth_profile else mo.snmp_ro, @@ -114,6 +118,7 @@ class ReportFilterApplication(SimpleReport): mo.name, mo.address, mo.profile_name, + mo_hostname[mo.id], mo.auth_profile if mo.auth_profile else "", mo.auth_profile.user if mo.auth_profile else mo.user, mo.auth_profile.snmp_ro if mo.auth_profile else mo.snmp_ro, @@ -124,7 +129,7 @@ class ReportFilterApplication(SimpleReport): return self.from_dataset( title=self.title, columns=[ - _("Managed Object"), _("Address"), _("Profile"), + _("Managed Object"), _("Address"), _("Profile"), _("Hostname"), _("Auth Profile"), _("Username"), _("SNMP Community"), _("Avail"), _("Error") ],
Add missing tests to test_trusts.py Add: *test_delete_trust() *test_delete_trust_from_cluster()
@@ -98,3 +98,41 @@ class TestTrusts(base.SaharaTestCase): cluster_update.assert_called_with(ctx, fake_cluster, {"trust_id": "trust_id"}) + + @mock.patch('sahara.utils.openstack.keystone.client_from_auth') + @mock.patch('sahara.utils.openstack.keystone.auth_for_admin') + @mock.patch('sahara.service.trusts.create_trust') + def test_delete_trust(self, trust, auth_for_admin, + client_from_auth): + client = self._client() + client_from_auth.return_value = client + trust.return_value = 'test_id' + trustor_auth = mock.Mock() + trustee_auth = mock.Mock() + auth_for_admin.return_value = trustee_auth + trust_id = trusts.create_trust(trustor_auth, trustee_auth, + "role_names") + + trusts.delete_trust(trustee_auth, trust_id) + client.trusts.delete.assert_called_with(trust_id) + + @mock.patch('sahara.conductor.API.cluster_update') + @mock.patch('sahara.utils.openstack.keystone.auth_for_admin') + @mock.patch('sahara.service.trusts.delete_trust') + @mock.patch('sahara.conductor.API.cluster_get') + @mock.patch('sahara.context.current') + def test_delete_trust_from_cluster(self, context_current, cl_get, + delete_trust, auth_for_admin, + cluster_update): + fake_cluster = mock.Mock(trust_id='test_id') + cl_get.return_value = fake_cluster + trustor_auth = mock.Mock() + trustee_auth = mock.Mock() + auth_for_admin.return_value = trustee_auth + ctx = mock.Mock(roles="role_names", auth_plugin=trustor_auth) + context_current.return_value = ctx + trusts.delete_trust_from_cluster("cluster") + + delete_trust.assert_called_with(trustee_auth, 'test_id') + cluster_update.assert_called_with(ctx, fake_cluster, + {"trust_id": None})
Add drop_collection to tear_down for User and Role This solves the CI error about duplicate keys.
@@ -221,6 +221,8 @@ def mongoengine_setup(request, app, tmpdir, realdburl): def tear_down(): with app.app_context(): + User.drop_collection() + Role.drop_collection() db.connection.drop_database(db_name) request.addfinalizer(tear_down)
Correct argument order in OriginValidator Order was reversed from spec.
@@ -16,7 +16,7 @@ class OriginValidator: self.application = application self.allowed_origins = allowed_origins - async def __call__(self, scope, send, receive): + async def __call__(self, scope, receive, send): # Make sure the scope is of type websocket if scope["type"] != "websocket": raise ValueError( @@ -34,11 +34,11 @@ class OriginValidator: # Check to see if the origin header is valid if self.valid_origin(parsed_origin): # Pass control to the application - return await self.application(scope, send, receive) + return await self.application(scope, receive, send) else: # Deny the connection denier = WebsocketDenier() - return await denier(scope, send, receive) + return await denier(scope, receive, send) def valid_origin(self, parsed_origin): """
Disambiguate codepoint value. The usage of 127462 as a unicode start point isn't super clear for other devs coming across the code in future, so assigning it to a nicely named variable with an accompanying inline comment should help make things clearer.
@@ -271,7 +271,8 @@ class Utils(Cog): if len(options) > 20: raise BadArgument("I can only handle 20 options!") - options = {chr(i): f"{chr(i)} - {v}" for i, v in enumerate(options, start=127462)} + codepoint_start = 127462 # represents "regional_indicator_a" unicode value + options = {chr(i): f"{chr(i)} - {v}" for i, v in enumerate(options, start=codepoint_start)} embed = Embed(title=title, description="\n".join(options.values())) message = await ctx.send(embed=embed) for reaction in options:
The format for providing host can be confusing at times Some provide it as some provide it as 127.0.0.1:5000 which are wrong. Hence the example in the span tag
<label for="hatch_rate">Hatch rate <span style="color:#8a8a8a;">(users spawned/second)</span></label> <input type="text" name="hatch_rate" id="hatch_rate" class="val" value="{{ hatch_rate or "" }}"/><br> <label for="host"> - Host + Host <span style="color:#8a8a8a;">(eg: http://127.0.0.1:8080)</span> {% if override_host_warning %} <span style="color:#8a8a8a; font-size:12px;">(setting this will override the host on all User classes)</span></label> {% endif %}
Py3 fixes for layer_model_helper.py Summary: Fixes `__getattr__` to adhere to its Python API contract, and wraps `range()` call in a list since it does not return one anymore in Python 3. Pull Request resolved:
@@ -529,7 +529,7 @@ class LayerModelHelper(model_helper.ModelHelper): return self.add_layer(new_layer) return wrapper else: - raise ValueError( + raise AttributeError( "Trying to create non-registered layer: {}".format(layer)) @property @@ -651,5 +651,5 @@ class LayerModelHelper(model_helper.ModelHelper): # and change the assertion accordingly assert isinstance(breakdown_map, dict) assert all(isinstance(k, six.string_types) for k in breakdown_map) - assert sorted(list(breakdown_map.values())) == range(len(breakdown_map)) + assert sorted(breakdown_map.values()) == list(range(len(breakdown_map))) self._breakdown_map = breakdown_map
Then: enhance error message for invalid input type TN:
@@ -483,7 +483,8 @@ class Then(AbstractExpression): # * any pointer, since it can be checked against "null"; # * any StructType, since structs are nullable. expr = construct(self.expr, - lambda cls: cls.is_ptr or cls.is_struct_type) + lambda cls: cls.is_ptr or cls.is_struct_type, + 'Invalid prefix type for .then: {expr_type}') self.var_expr.set_type(expr.type) # Create a then-expr specific scope to restrict the span of the "then"
Fix potential flakiness By making the process collection more thread-safe.
from __future__ import absolute_import +import copy from collections import namedtuple import multiprocessing import time @@ -118,7 +119,7 @@ def _start_polling(self): def _poll(self): with self._processes_lock: - processes = self._processes + processes = copy.copy(self._processes) self._processes = [] for process in processes: @@ -163,7 +164,9 @@ def _consume_process_queue(self, process): def join(self): '''Joins on all processes synchronously.''' - for process in self._processes: + with self._processes_lock: + processes = copy.copy(self._processes) + for process in processes: while process.process.is_alive(): process.process.join(0.1) gevent.sleep(0.1)
add '--logfile' option to 'aea' This allows for saving the logs of the execution of any 'aea' command to a specific file. The logs will still be printed to stdout.
import os import shutil +from logging import FileHandler from pathlib import Path from typing import cast @@ -48,9 +49,13 @@ DEFAULT_SKILL = "error" @click.version_option('0.1.0') @click.pass_context @click_log.simple_verbosity_option(logger, default="INFO") -def cli(ctx) -> None: [email protected]('-l', '--logfile', 'log_file', type=click.Path(), required=False, default=None, + help="Save logs into a log file.") +def cli(ctx, log_file: str) -> None: """Command-line tool for setting up an Autonomous Economic Agent.""" ctx.obj = Context(cwd=".") + if log_file: + logger.addHandler(FileHandler(log_file)) @cli.command()
Update jira.rst Adding a new method to get all the users who have browse permission to a project
@@ -147,6 +147,10 @@ Manage projects # Use 'expand' to get details (default is None) possible values are notificationSchemeEvents,user,group,projectRole,field,all jira.get_priority_scheme_of_project(project_key_or_id, expand=None) + # Returns a list of active users who have browse permission for a project that matches the search string for username. + # Using " " string (space) for username gives All the active users who have browse permission for a project + jira.get_users_with_browse_permission_to_a_project(self, username, issue_key=None, project_key=None, start=0, limit=100) + Manage issues -------------
Fix NodeUI documentation:url metadata. The file structure of the docs was re-organized a while back, but we forgot to update these links.
@@ -44,7 +44,7 @@ import GafferUI def __documentationURL( node ) : - fileName = "$GAFFER_ROOT/doc/gaffer/html/NodeReference/" + node.typeName().replace( "::", "/" ) + ".html" + fileName = "$GAFFER_ROOT/doc/gaffer/html/Reference/NodeReference/" + node.typeName().replace( "::", "/" ) + ".html" fileName = os.path.expandvars( fileName ) return "file://" + fileName if os.path.isfile( fileName ) else ""
UI: improved disengage on gas toggle description * Update settings.cc Should be depressed as standard in automotive industry. Pressed implies state change from down -> up, depressed means the state is down * Update selfdrive/ui/qt/offroad/settings.cc
@@ -68,7 +68,7 @@ TogglesPanel::TogglesPanel(SettingsWindow *parent) : ListWidget(parent) { { "DisengageOnAccelerator", "Disengage On Accelerator Pedal", - "When enabled, openpilot will disengage when the accelerator pedal is pressed.", + "When enabled, pressing the accelerator pedal will disengage openpilot.", "../assets/offroad/icon_disengage_on_accelerator.svg", }, #ifdef ENABLE_MAPS
Replace python validation_rule_enforcer with Rust The validation_rule_enforcer is used when every batch is added to a candidate block. This incurs a cost of transforming an increasing set of batches from rust to python with every batch added. By using the rust implementation, this transformation cost can be avoided.
@@ -27,6 +27,7 @@ use batch::Batch; use transaction::Transaction; use journal::chain_commit_state::TransactionCommitCache; +use journal::validation_rule_enforcer; use pylogger; @@ -172,7 +173,12 @@ impl CandidateBlock { let gil = cpython::Python::acquire_gil(); let py = gil.python(); self.block_store - .call_method(py, "has_batch", (txn.header_signature.as_str(),), None) + .call_method( + py, + "has_transaction", + (txn.header_signature.as_str(),), + None, + ) .expect("Blockstore has no method 'has_batch'") .extract::<bool>(py) .unwrap() @@ -264,37 +270,15 @@ impl CandidateBlock { batches_to_add.push(batch); { - let gil = cpython::Python::acquire_gil(); - let py = gil.python(); - let validation_enforcer = py.import( - "sawtooth_validator.journal.validation_rule_enforcer", - ).expect("Unable to import sawtooth_validator.journal.validation_rule_enforcer"); - let batches = cpython::PyList::new( - py, - &self.pending_batches + let batches_to_test = self.pending_batches .iter() - .map(|b| b.to_py_object(py)) - .chain(batches_to_add.iter().map(|b| b.to_py_object(py))) - .collect::<Vec<cpython::PyObject>>(), - ); - let signer_pub_key = self.identity_signer - .call_method(py, "get_public_key", cpython::NoArgs, None) - .expect("IdentitySigner has no method 'get_public_key'") - .call_method(py, "as_hex", cpython::NoArgs, None) - .expect("PublicKey has no method 'as_hex'"); - if !validation_enforcer - .call( - py, - "enforce_validation_rules", - (self.settings_view.clone_ref(py), signer_pub_key, batches), - None, - ) - .expect( - "Module validation_rule_enforcer has no function 'enforce_validation_rules'", - ) - .extract::<bool>(py) - .unwrap() - { + .chain(batches_to_add.iter()) + .collect::<Vec<_>>(); + if !validation_rule_enforcer::enforce_validation_rules( + &self.settings_view, + &self.get_signer_public_key_hex(), + &batches_to_test, + ) { return; } } @@ -316,6 +300,19 @@ impl CandidateBlock { } } + fn get_signer_public_key_hex(&self) -> String { + let gil = cpython::Python::acquire_gil(); + let py = gil.python(); + + self.identity_signer + .call_method(py, "get_public_key", cpython::NoArgs, None) + .expect("IdentitySigner has no method 'get_public_key'") + .call_method(py, "as_hex", cpython::NoArgs, None) + .expect("PublicKey has no method 'as_hex'") + .extract(py) + .expect("Unable to convert python string to rust") + } + pub fn sign_block(&self, block_builder: &cpython::PyObject) { let gil = cpython::Python::acquire_gil(); let py = gil.python();
Setup RBAC for hostpath This fixes
@@ -16,6 +16,7 @@ spec: labels: k8s-app: hostpath-provisioner spec: + serviceAccountName: microk8s-hostpath containers: - name: hostpath-provisioner image: cdkbot/hostpath-provisioner-$ARCH:latest @@ -43,3 +44,57 @@ metadata: annotations: storageclass.kubernetes.io/is-default-class: "true" provisioner: microk8s.io/hostpath +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: microk8s-hostpath + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: microk8s-hostpath +rules: +- apiGroups: [""] + resources: + - persistentvolumeclaims + verbs: + - list + - get + - watch + - update +- apiGroups: [""] + resources: + - persistentvolumes + verbs: + - list + - get + - update + - watch + - create +- apiGroups: [""] + resources: + - events + verbs: + - create + - patch +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: microk8s-hostpath +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: microk8s-hostpath +subjects: + - kind: ServiceAccount + name: microk8s-hostpath + namespace: kube-system
Update quickstart.md [Minor fix] the section is on the left not the right.
@@ -76,7 +76,7 @@ for score, i, j in all_sentence_combinations[0:5]: print("{} \t {} \t {:.4f}".format(sentences[i], sentences[j], cos_sim[i][j])) ``` -See on the right the *Usage* sections for more examples how to use SentenceTransformers. +See on the left the *Usage* sections for more examples how to use SentenceTransformers. ## Pre-Trained Models Various pre-trained models exists optimized for many tasks exists. For a full list, see **[Pretrained Models](pretrained_models.md)**.
Code modification Modified with scipy beta function
@@ -2,7 +2,7 @@ import numpy as np from scipy.special import betaln, betainc from scipy.special import logsumexp from matplotlib import pyplot as plt - +from scipy.stats import beta def normalizeLogspace(x): L = logsumexp(x, 0) @@ -12,26 +12,16 @@ def normalizeLogspace(x): def evalpdf(thetas, postZ, alphaPost): p = np.zeros_like(thetas) - # print(p.shape) M = np.size(postZ) for k in range(M): a = alphaPost[k, 0] b = alphaPost[k, 1] - p += postZ[k] * np.exp(betaLogprob(a, b, thetas)) + # p += postZ[k] * np.exp(beta.logpdf(thetas, a, b)) # this also works + p += postZ[k] * beta.pdf(thetas, a, b) return p -def betaLogprob(a, b, X): - logkerna = (a - 1) * np.log(X) - logkerna[a == 1 and X == 0] = 0 - logkernb = (b - 1) * np.log(1 - X) - logkernb[b == 1 and X == 1] = 0 - logp = logkerna + logkernb - betaln(a, b) - - return logp - - dataSS = np.array([20, 10]) alphaPrior = np.array([[20, 20], [30, 10]]) M = 2
move header writing from write_log() to new _write_header() update Log.__init__()
@@ -131,21 +131,11 @@ def read_log(filename): return log, info def write_log(output, log, info): - _fmt = lambda x: '%s' % x if x is not None else '' - - output('# started: %s' % time.asctime()) - output('# groups: %d' % len(info)) - for ii, (xlabel, ylabel, yscale, names, plot_kwargs) \ - in six.iteritems(info): - output('# %d' % ii) - output('# xlabel: "%s", ylabel: "%s", yscales: "%s"' - % (_fmt(xlabel), _fmt(ylabel), yscale)) - output('# names: "%s"' % ', '.join(names)) - output('# plot_kwargs: "%s"' - % ', '.join('%s' % ip for ip in plot_kwargs)) + xlabels, ylabels, yscales, names, plot_kwargs = zip(*info.values()) + _write_header(output, xlabels, ylabels, yscales, names, plot_kwargs) for ii, (xlabel, ylabel, yscale, names, plot_kwargs) \ - in six.iteritems(info): + in ordered_iteritems(info): for ip, name in enumerate(names): xs, ys, vlines = log[name] @@ -155,6 +145,23 @@ def write_log(output, log, info): if x in vlines: output(name + ': -----') + output('# ended: %s' % time.asctime()) + +def _write_header(output, xlabels, ylabels, yscales, data_names, plot_kwargs): + _fmt = lambda x: '%s' % x if x is not None else '' + + output('# started: %s' % time.asctime()) + output('# groups: %d' % len(data_names)) + for ig, names in enumerate(data_names): + output('# %d' % ig) + output('# xlabel: "%s", ylabel: "%s", yscales: "%s"' + % (_fmt(xlabels[ig]), _fmt(ylabels[ig]), + yscales[ig])) + output('# names: "%s"' % ', '.join(names)) + output('# plot_kwargs: "%s"' + % ', '.join('%s' % ii + for ii in plot_kwargs[ig])) + def plot_log(axs, log, info, xticks=None, yticks=None, groups=None, show_legends=True, swap_axes=False): """ @@ -361,19 +368,9 @@ class Log(Struct): self.can_plot = (mpl is not None) and (Process is not None) if log_filename is not None: - _fmt = lambda x: '%s' % x if x is not None else '' self.output = Output('', filename=log_filename) - self.output('# started: %s' % time.asctime()) - self.output('# groups: %d' % n_gr) - for ig, names in enumerate(data_names): - self.output('# %d' % ig) - self.output('# xlabel: "%s", ylabel: "%s", yscales: "%s"' - % (_fmt(xlabels[ig]), _fmt(ylabels[ig]), - yscales[ig])) - self.output('# names: "%s"' % ', '.join(names)) - self.output('# plot_kwargs: "%s"' - % ', '.join('%s' % ii - for ii in self.plot_kwargs[ig])) + _write_header(self.output, xlabels, ylabels, yscales, data_names, + plot_kwargs) if self.is_plot and (not self.can_plot): output(_msg_no_live)
Fix the parallel optimization iterator Previously the iterator would be empty if n_trials was set to None. Thanks to for the code
@@ -280,8 +280,17 @@ class Study(BaseStudy): gc_after_trial, None) else: time_start = datetime.datetime.now() + + if n_trials is not None: + _iter = range(n_trials) + elif timeout is not None: + is_timeout = lambda: (datetime.datetime.now() - time_start).total_seconds() > timeout + _iter = iter(is_timeout, True) + else: + # The following expression makes an iterator that never ends. + _iter = iter(int, 1) + with Parallel(n_jobs=n_jobs, prefer="threads") as parallel: - _iter = range(n_trials) if n_trials is not None else iter(int, 0) parallel( delayed(self._optimize_sequential) (func, 1, timeout, catch, callbacks, gc_after_trial, time_start)
reset timeout_seconds to 420 to align with private repo
@@ -87,8 +87,8 @@ nested_input_definitions = { "timeout_seconds": { "type": "int", "min": 1, - "max": 9999, - "default": 600, + "max": 420, + "default": 420, "description": "The number of seconds allowed before the optimization times out" }, "user_uuid": {
Add comment explaining buildx to workflow It's better to document these steps.
@@ -25,6 +25,12 @@ jobs: - name: Checkout code uses: actions/checkout@v2 + # The current version (v2) of Docker's build-push action uses + # buildx, which comes with BuildKit features that help us speed + # up our builds using additional cache features. Buildx also + # has a lot of other features that are not as relevant to us. + # + # See https://github.com/docker/build-push-action - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1
Update .travis.yml Moved pyglow install attempt
@@ -29,17 +29,17 @@ before_install: # Replace dep1 dep2 ... with your dependencies - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION atlas numpy scipy matplotlib nose pandas statsmodels coverage netCDF4 - + # install pyglow, space science models + - git clone https://github.com/timduly4/pyglow.git + - cd pyglow + - ./pyglow_install.sh + - cd .. # command to install dependencies install: # Coverage packages are on my binstar channel # - conda install --yes -c dan_blanchard python-coveralls nose-cov - source activate test-environment - pip install coveralls - - git clone https://github.com/timduly4/pyglow.git - - cd pyglow - - ./pyglow_install.sh - - cd .. - "python setup.py install" # command to run tests script:
fix hub doc format Summary: Pull Request resolved:
@@ -52,6 +52,7 @@ You can see the full script in handles ``pretrained``, alternatively you can put the following logic in the entrypoint definition. :: + if pretrained: # For checkpoint saved in local repo model.load_state_dict(<path_to_saved_checkpoint>)
Update search when contributors are added/removed. Update search when affiliated insts are added/removed. Remove properties and M2M fields from SEARCH_UPDATE_FIELDS.
@@ -221,18 +221,13 @@ class AbstractNode(DirtyFieldsMixin, TypedModel, AddonModelMixin, IdentifierMixi 'title', 'category', 'description', - 'visible_contributor_ids', - 'tags', 'is_fork', - 'is_registration', 'retraction', 'embargo', 'is_public', 'is_deleted', 'wiki_pages_current', - 'is_retracted', 'node_license', - 'affiliated_institutions', 'preprint_file', } @@ -720,6 +715,7 @@ class AbstractNode(DirtyFieldsMixin, TypedModel, AddonModelMixin, IdentifierMixi raise UserNotAffiliatedError('User is not affiliated with {}'.format(inst.name)) if not self.is_affiliated_with_institution(inst): self.affiliated_institutions.add(inst) + self.update_search() if log: NodeLog = apps.get_model('osf.NodeLog') @@ -752,6 +748,7 @@ class AbstractNode(DirtyFieldsMixin, TypedModel, AddonModelMixin, IdentifierMixi ) if save: self.save() + self.update_search() return True return False @@ -1148,7 +1145,7 @@ class AbstractNode(DirtyFieldsMixin, TypedModel, AddonModelMixin, IdentifierMixi project_signals.contributor_added.send(self, contributor=contributor, auth=auth, email_template=send_email) - + self.update_search() return contrib_to_add, True # Permissions must be overridden if changed when contributor is @@ -1363,7 +1360,7 @@ class AbstractNode(DirtyFieldsMixin, TypedModel, AddonModelMixin, IdentifierMixi ) self.save() - + self.update_search() # send signal to remove this user from project subscriptions project_signals.contributor_removed.send(self, user=contributor)
Update merge_arrays.py This fixes an issue of wrong memlet volumes after MergeArrays has been applied. To see the bug in action launch for instance jacobi2d in the polybench samples and look at the wrong volume of the memlet adjacent to the very first map entry.
@@ -112,8 +112,10 @@ class InMergeArrays(pattern_matching.Transformation): map.remove_out_connector('OUT_' + c) # Re-propagate memlets + edge_to_propagate = next(e for e in graph.out_edges(map) + if e.src_conn[4:] == result_connector) map_edge._data = propagate_memlet(dfg_state=graph, - memlet=map_edge.data, + memlet=edge_to_propagate.data, scope_node=map, union_inner_edges=True) @@ -221,7 +223,9 @@ class OutMergeArrays(pattern_matching.Transformation): map.remove_out_connector('OUT_' + c) # Re-propagate memlets + edge_to_propagate = next(e for e in graph.in_edges(map) + if e.dst_conn[3:] == result_connector) map_edge._data = propagate_memlet(dfg_state=graph, - memlet=map_edge.data, + memlet=edge_to_propagate.data, scope_node=map, union_inner_edges=True)
test_do_send_realm_reactivation_email: Deactivate the realm first. This doesn't make sense if the realm is active and will fail as soon as do_reactivate_realm is fixed in the next commit to be a noop and not create confused RealmAuditLog entries when the realm is active.
@@ -399,6 +399,8 @@ class RealmTest(ZulipTestCase): def test_do_send_realm_reactivation_email(self) -> None: realm = get_realm("zulip") + do_deactivate_realm(realm, acting_user=None) + self.assertEqual(realm.deactivated, True) iago = self.example_user("iago") do_send_realm_reactivation_email(realm, acting_user=iago) from django.core.mail import outbox
Log to stdout when PCLUSTER_LOG_TO_STDOUT env is set Facilitates development and debugging For example you can do: `PCLUSTER_LOG_TO_STDOUT=1 pcluster list-clusters`
import logging.config import os +import sys from pcluster.utils import get_cli_log_file @@ -40,11 +41,20 @@ def config_logger(): "maxBytes": 5 * 1024 * 1024, "backupCount": 3, }, + "console": { + "level": "DEBUG", + "formatter": "standard", + "class": "logging.StreamHandler", + "stream": sys.stdout, + }, }, "loggers": { "": {"handlers": ["default"], "level": "WARNING", "propagate": False}, # root logger "pcluster": {"handlers": ["default"], "level": "INFO", "propagate": False}, }, } + if os.environ.get("PCLUSTER_LOG_TO_STDOUT"): + for logger in logging_config["loggers"].values(): + logger["handlers"] = ["console"] os.makedirs(os.path.dirname(logfile), exist_ok=True) logging.config.dictConfig(logging_config)
Explicitly wrap embedding theta in tf.identity when tf.gather is used to prevent TPU issues.
@@ -2243,7 +2243,15 @@ class SimpleEmbeddingLayer(quant_utils.QuantizableLayer): pruning_utils.AddToPruningCollections(self.vars.wm, self.vars.mask, self.vars.threshold) else: - self.CreateVariable('wm', pc) + # If tf.gather is used, the gradient for the wm will be represented as + # IndexedSlices which is sparse. tf.tpu.cross_replica_sum turns + # IndexedSlices into a dense tensor with undefined first dimension. + # This may cause issues on TPU so instead we just wrap this with + # tf.identity which allows tf.tpu.cross_replica_sum to properly compute + # the first dim. + # NOTE: This may potentially lead to unintended copies on CPU/GPU. + theta_fn = tf.identity if self._fprop_mode == 'gather' else None + self.CreateVariable('wm', pc, theta_fn=theta_fn) def EmbLookupDefaultTheta(self, ids): """Lookups embedding vectors for ids."""
Adds commented-out example of how to use GateSetFunction class in reportables.py This example shows a more advanced usage of the class, in particular how the evaluate_nearby method might be used for finite-difference computation speedup in the future.
@@ -111,6 +111,32 @@ Rel_gatestring_eigenvalues = _gsf.gatesetfn_factory(rel_gatestring_eigenvalues) # init args == (gatesetA, gatesetB, gatestring) +#Example alternate implementation that utilizes evaluate_nearby... +#class Gatestring_gaugeinv_diamondnorm(_gsf.GateSetFunction): +# def __init__(self, gatesetA, gatesetB, gatestring): +# B = gatesetB.product(gatestring) +# self.evB = _np.linalg.eigvals(B) +# self.gatestring = gatestring +# _gsf.GateSetFunction.__init__(self, gatesetA, ["all"]) +# +# def evaluate(self, gateset): +# A = gateset.product(self.gatestring) +# evA, evecsA = _np.linalg.eig(A) +# self.A0, self.evA0, self.evecsA0, self.ievecsA0 = A, evA, evecsA, _np.linalg.inv(evecsA) #save for evaluate_nearby... +# wts, self.pairs = _tools.minweight_match(evA, self.evB, lambda x,y: abs(x-y), return_pairs=True) +# return _np.max(wts) +# +# def evaluate_nearby(self, nearby_gateset): +# #avoid calling minweight_match again +# A = nearby_gateset.product(self.gatestring) +# dA = A - self.A0 +# #evA = _np.linalg.eigvals(A) # = self.evA0 + U * (A-A0) * Udag +# evA = _np.array( [ self.evA0 + _np.dot(self.ievecsA0[k,:], _np.dot(dA, self.evecsA0[:,k])) for k in range(dA.shape[0])] ) +# return _np.max( [ abs(evA[i]-self.evB[j]) for i,j in self.pairs ] ) +# +# ref for eigenvalue derivatives: https://www.win.tue.nl/casa/meetings/seminar/previous/_abstract051019_files/Presentation.pdf + + def gatestring_gaugeinv_diamondnorm(gatesetA, gatesetB, gatestring): A = gatesetA.product(gatestring) # "gate" B = gatesetB.product(gatestring) # "target gate"
Update evolution.py removing extra copy and putting it on
@@ -311,7 +311,7 @@ def evolve(v0,t0,times,f,solver_name="dop853",real=False,stack_state=False,verbo v0 = v0.astype(_np.complex128,copy=False).view(_np.float64) except ValueError: # copy initial state v0 to make it contiguous - v0 = v0.astype(_np.complex128,copy=False).copy().view(_np.float64) + v0 = v0.astype(_np.complex128,copy=True).view(_np.float64) solver = ode(_cmplx_f) # y_f = f(t,y,*args) solver.set_f_params(f,f_params)
BUG: Fix reference count error of types when init multiarraymodule PyDict_SetItemString has internally increased object reference. We don't need to INCREF before adding to module dict anymore.
@@ -4666,23 +4666,15 @@ PyMODINIT_FUNC initmultiarray(void) { ADDCONST(MAY_SHARE_EXACT); #undef ADDCONST - Py_INCREF(&PyArray_Type); PyDict_SetItemString(d, "ndarray", (PyObject *)&PyArray_Type); - Py_INCREF(&PyArrayIter_Type); PyDict_SetItemString(d, "flatiter", (PyObject *)&PyArrayIter_Type); - Py_INCREF(&PyArrayMultiIter_Type); PyDict_SetItemString(d, "nditer", (PyObject *)&NpyIter_Type); - Py_INCREF(&NpyIter_Type); PyDict_SetItemString(d, "broadcast", (PyObject *)&PyArrayMultiIter_Type); - Py_INCREF(&PyArrayDescr_Type); PyDict_SetItemString(d, "dtype", (PyObject *)&PyArrayDescr_Type); - - Py_INCREF(&PyArrayFlags_Type); PyDict_SetItemString(d, "flagsobj", (PyObject *)&PyArrayFlags_Type); /* Business day calendar object */ - Py_INCREF(&NpyBusDayCalendar_Type); PyDict_SetItemString(d, "busdaycalendar", (PyObject *)&NpyBusDayCalendar_Type); set_flaginfo(d);
fixed bug in post chunk caused by last checkin" "
@@ -468,11 +468,6 @@ async def POST_Chunk(request): log.error(msg) raise HTTPBadRequest(reason=msg) - # get chunk from cache/s3. If not found init a new chunk if this is a write request - chunk_arr = await getChunk(app, chunk_id, dset_json, chunk_init=put_points) - - if put_points: - # writing point data # create a numpy array for incoming points input_bytes = await request_read(request) if len(input_bytes) != request.content_length: @@ -480,6 +475,14 @@ async def POST_Chunk(request): log.error(msg) raise HTTPInternalServerError() + # get chunk from cache/s3. If not found init a new chunk if this is a write request + chunk_arr = await getChunk(app, chunk_id, dset_json, chunk_init=put_points) + + + if put_points: + # writing point data + + # create a numpy array with the following type: # (coord1, coord2, ...) | dset_dtype if rank == 1:
Remove duplicate function left over from merge forward Also adds the distutils import for lint
from __future__ import absolute_import import copy import contextlib +import distutils import errno import fnmatch import glob @@ -16,7 +17,6 @@ import shutil import stat import subprocess import time -import warnings from datetime import datetime # Import salt libs @@ -109,48 +109,6 @@ PYGIT2_MINVER = '0.20.3' LIBGIT2_MINVER = '0.20.0' -def enforce_types(key, val): - ''' - Force params to be strings unless they should remain a different type - ''' - non_string_params = { - 'ssl_verify': bool, - 'insecure_auth': bool, - 'env_whitelist': 'stringlist', - 'env_blacklist': 'stringlist', - 'refspecs': 'stringlist', - } - - def _find_global(key): - for item in non_string_params: - try: - if key.endswith('_' + item): - ret = item - break - except TypeError: - if key.endswith('_' + str(item)): - ret = item - break - else: - ret = None - return ret - - if key not in non_string_params: - key = _find_global(key) - if key is None: - return six.text_type(val) - - expected = non_string_params[key] - if expected is bool: - return val - elif expected == 'stringlist': - if not isinstance(val, (six.string_types, list)): - val = six.text_type(val) - if isinstance(val, six.string_types): - return [x.strip() for x in val.split(',')] - return [six.text_type(x) for x in val] - - def enforce_types(key, val): ''' Force params to be strings unless they should remain a different type
AutoresponderEmailMixin: improvements to the autoresponder mechanism 1. Mixin settings don't clash with EmailSendMixin 2. Higher customization 3. Supports TXT and HTML emails only
@@ -266,36 +266,77 @@ class PrepopulationSupportMixin: class AutoresponderMixin: - """Automatically emails the sender.""" + """Automatically emails the form sender.""" @property - def email_subject(self): + def autoresponder_subject(self): + """Autoresponder email subject.""" raise NotImplementedError @property - def email_body_template(self): + def autoresponder_body_template_txt(self): + """Autoresponder email body template (TXT).""" raise NotImplementedError - def form_valid(self, form): - """Send email to form sender if the form is valid.""" + @property + def autoresponder_body_template_html(self): + """Autoresponder email body template (HTML).""" + raise NotImplementedError - retval = super().form_valid(form) + @property + def autoresponder_form_field(self): + """Form field's name that contains autoresponder recipient email.""" + return 'email' + + def autoresponder_email_context(self, form): + """Context for """ + # list of fields allowed to show to the user + whitelist = [] + form_data = [v for k, v in form.cleaned_data.items() if k in whitelist] + return dict(form_data=form_data) + + def autoresponder_kwargs(self, form): + """Arguments passed to EmailMultiAlternatives.""" + recipient = ( + form.cleaned_data.get(self.autoresponder_form_field, None) or "" + ) + return dict(to=[recipient]) - body_template = get_template(self.email_body_template) - email_body = body_template.render({}) - recipient = form.cleaned_data['email'] + def autoresponder_prepare_email(self, form): + """Prepare EmailMultiAlternatives object with message.""" + # get message subject + subject = self.autoresponder_subject - email = EmailMessage( - subject=self.email_subject, - body=email_body, - to=[recipient], - ) + # get message body templates + body_txt_tpl = get_template(self.autoresponder_body_template_txt) + body_html_tpl = get_template(self.autoresponder_body_template_html) + + # get message body (both in text and in HTML) + context = self.autoresponder_email_context(form) + body_txt = body_txt_tpl.render(context) + body_html = body_html_tpl.render(context) + + # additional arguments, including recipients + kwargs = self.autoresponder_kwargs(form) + + email = EmailMultiAlternatives(subject, body_txt, **kwargs) + email.attach_alternative(body_html, 'text/html') + return email + + def autoresponder(self, form, fail_silently=True): + """Get email from `self.autoresponder_prepare_email`, then send it.""" + email = self.autoresponder_prepare_email(form) try: email.send() except SMTPException as e: - pass # fail silently + if not fail_silently: + raise e + def form_valid(self, form): + """Send email to form sender if the form is valid.""" + retval = super().form_valid(form) + self.autoresponder(form, fail_silently=True) return retval
Update NNL documentation Changed Bayesian dropout to MC dropout, and added a note at MC dropout.
@@ -4334,7 +4334,7 @@ Stochasticity: Note: Usually dropout only applied during training as below - (except `Bayesian dropout`_). + (except `MC dropout`_). If you want to use dropout as an MC dropout, remove 'if train:'. .. code-block:: python @@ -4342,7 +4342,7 @@ Stochasticity: if train: h = F.dropout(h, 0.5) - .. _Bayesian dropout: https://arxiv.org/abs/1506.02142 + .. _MC dropout: https://arxiv.org/abs/1506.02142 inputs: x: doc: N-D array
Update quickstart.rst Add the alternative option to download and execute the bootstrap salt minion script in just one line.
@@ -33,6 +33,13 @@ for any OS with a Bourne shell: curl -L https://bootstrap.saltstack.com | sudo sh - +.. note:: + + Alternatively, to download the bash script and run it immediately, use: + + .. code-block:: bash + + curl -L https://bootstrap.saltproject.io | sudo sh -s -- See the `salt-bootstrap`_ documentation for other one liners. When using `Vagrant`_ to test out salt, the `Vagrant salt provisioner`_ will provision the VM for you.
Correct FPB revision checks FPB revision was being stored in `fp_rev` but often tested from `fpb_rev`. Combine these two variables. Spotted while browsing code - by inspection, this appears to have made `FPB::can_support_address` pessimistic.
@@ -62,9 +62,9 @@ class FPB(BreakpointProvider, CoreSightComponent): def init(self): # setup FPB (breakpoint) fpcr = self.ap.read32(FPB.FP_CTRL) - self.fp_rev = 1 + ((fpcr & FPB.FP_CTRL_REV_MASK) >> FPB.FP_CTRL_REV_SHIFT) - if self.fp_rev not in (1, 2): - logging.warning("Unknown FPB version %d", self.fp_rev) + self.fpb_rev = 1 + ((fpcr & FPB.FP_CTRL_REV_MASK) >> FPB.FP_CTRL_REV_SHIFT) + if self.fpb_rev not in (1, 2): + logging.warning("Unknown FPB version %d", self.fpb_rev) self.nb_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF) self.nb_lit = (fpcr >> 7) & 0xf logging.info("%d hardware breakpoints, %d literal comparators", self.nb_code, self.nb_lit) @@ -118,12 +118,12 @@ class FPB(BreakpointProvider, CoreSightComponent): if not bp.enabled: bp.enabled = True comp = 0 - if self.fp_rev == 1: + if self.fpb_rev == 1: bp_match = (1 << 30) if addr & 0x2: bp_match = (2 << 30) comp = addr & 0x1ffffffc | bp_match | 1 - elif self.fp_rev == 2: + elif self.fpb_rev == 2: comp = (addr & 0xfffffffe) | 1 self.ap.write32(bp.comp_register_addr, comp) logging.debug("BP: wrote 0x%08x to comp @ 0x%08x", comp, bp.comp_register_addr)
message-feed: Remove visually unappealing top border. This removes an unecessary and unappealing top border to the message headers while keeping all else the same.
@@ -716,7 +716,6 @@ td.pointer { .message_list .recipient_row { background: hsl(0, 0%, 94%); border-bottom: 1px solid hsl(0, 0%, 88%); - border-top: 1px solid hsl(0, 0%, 88%); margin-bottom: 10px; } @@ -726,7 +725,7 @@ td.pointer { .stream_label { display: inline-block; - padding: 3px 7px 2px 6px; + padding: 4px 7px 3px 6px; font-weight: normal; height: 17px; line-height: 17px; @@ -1004,6 +1003,7 @@ td.pointer { .private-message .messagebox, .message_header_private_message .message-header-contents { background-color: hsl(192, 19%, 95%); + box-shadow: inset 1px 1px 0px hsl(0, 0%, 88%); } .message-header-contents {
fix false negative isuniform of transposed const The `isuniform` test currently responds with a false negative when the argument contains a `Transpose`. This commit adds support for `Transpose` by peeling of `Tranpose` instances of the argument in addition to `InsertAxis`, before testing whether the argument is the desired `Constant`.
@@ -2923,7 +2923,7 @@ def zeros_like(arr): return zeros(arr.shape, arr.dtype) def isuniform(arg, value): - while isinstance(arg, InsertAxis): + while isinstance(arg, (InsertAxis, Transpose)): arg = arg.func if isinstance(arg, Constant) and arg.ndim == 0: return arg.value[()] == value
Update version-archive.rst Fixed URL that had commas instead of periods
@@ -4,7 +4,7 @@ Version Archive Mattermost Enterprise Edition ------------------------------ -Mattermost Enterprise Edition v4.4.0 - `View Changelog <https://docs.mattermost.com/administration/changelog.html#release-v4-4-0>`_ - `Download <https://releases.mattermost.com/4.4,0/mattermost-4.4,0-linux-amd64.tar.gz>`_ +Mattermost Enterprise Edition v4.4.0 - `View Changelog <https://docs.mattermost.com/administration/changelog.html#release-v4-4-0>`_ - `Download <https://releases.mattermost.com/4.4.0/mattermost-4.4.0-linux-amd64.tar.gz>`_ - ``https://releases.mattermost.com/4.4.0/mattermost-4.4.0-linux-amd64.tar.gz`` - SHA-256 Checksum: ``7289896f03b8513b7447b7c884a9553db07c4e3205e4269a710196224ecb5297`` Mattermost Enterprise Edition v4.3.2 - `View Changelog <https://docs.mattermost.com/administration/changelog.html#release-v4-3-2>`_ - `Download <https://releases.mattermost.com/4.3.2/mattermost-4.3.2-linux-amd64.tar.gz>`_
btcpayserver: fix deletion of self-signed cert on first install fixes:
@@ -16,10 +16,10 @@ if [ ${#BTCPayServer} -eq 0 ]; then echo "BTCPayServer=off" >> /mnt/hdd/raspiblitz.conf fi -# stop service +# stop services echo "making sure services are not running" +sudo systemctl stop nbxplorer 2>/dev/null sudo systemctl stop btcpayserver 2>/dev/null -sudo systemctl disable btcpayserver 2>/dev/null # switch on if [ "$1" = "1" ] || [ "$1" = "on" ]; then @@ -39,36 +39,6 @@ if [ "$1" = "1" ] || [ "$1" = "on" ]; then sudo adduser --disabled-password --gecos "" btcpay 2>/dev/null cd /home/btcpay - # store BTCpay data on HDD - sudo mkdir /mnt/hdd/.btcpayserver 2>/dev/null - - sudo mv -f /home/admin/.btcpayserver /mnt/hdd/ 2>/dev/null - sudo rm -rf /home/admin/.btcpayserver - sudo mv -f /home/btcpay/.btcpayserver /mnt/hdd/ 2>/dev/null - - sudo chown -R btcpay:btcpay /mnt/hdd/.btcpayserver - sudo ln -s /mnt/hdd/.btcpayserver /home/btcpay/ 2>/dev/null - - # clean when installed as admin - sudo rm -f /home/admin/dotnet-sdk* - sudo rm -f /home/admin/dotnet-sdk* - sudo rm -f /home/admin/.nbxplorer/Main/settings.config - - # cleanup previous installs - sudo rm -f /home/btcpay/dotnet-sdk* - sudo rm -f /home/btcpay/aspnetcore* - sudo rm -rf /home/btcpay/dotnet - sudo rm -f /usr/local/bin/dotnet - - sudo systemctl stop nbxplorer 2>/dev/null - sudo systemctl disable nbxplorer 2>/dev/null - sudo rm -f /home/btcpay/.nbxplorer/Main/settings.config - sudo rm -f /etc/systemd/system/nbxplorer.service - - sudo rm -f /home/btcpay/.btcpayserver/Main/settings.config - sudo rm -f /etc/systemd/system/btcpayserver.service - sudo rm -f /etc/nginx/sites-available/btcpayserver - echo "" echo "***" echo "Installing .NET" @@ -242,6 +212,7 @@ BTC.lightning=type=lnd-rest;server=https://127.0.0.1:8080/;macaroonfilepath=/hom echo "BTCPay Server is already installed." # start service echo "start service" + sudo systemctl start nbxplorer 2>/dev/null sudo systemctl start btcpayserver 2>/dev/null fi
Update Texas.md Added an incident in Dallas on the Margaret Hunt Hill Bridge on June 2nd.
@@ -128,6 +128,14 @@ The video shows a certain individual trying to escape, what seems to be loud exp * https://twitter.com/xtranai/status/1266898175568338945 +### Police maneuver protestors onto bridge and fire tear gas and rubber bullets | June 2nd + +On June 2nd, protestors are routed onto Margaret Hunt Hill Bridge. They are met by a line of police officers and are then fired upon by rubber bullets, tear gas, and more. + +**Links** + +* https://www.reddit.com/r/PublicFreakout/comments/gx9a5n/these_protests_took_place_on_june_2nd_in_dallas/ + ## San Antonio ### Police shoot man filming them with what were allegedly rubber bullets | (believed to be) May 31st
Enforcing the version of python that is required. this avoids build issues and debugging
+#!/bin/bash + +PYTHON_VERSION=`python --version` + +if [ "$PYTHON_VERSION" == "Python 3.5.2" ]; then + echo Found correct python version +else + echo Incorrect version of python in path: $PYTHON_VERSION + exit 1 +fi + + IGNORE_MISSING_OPENMP=1 cxml="/usr/local/bin/castxml" if [ -f "$cxml" ]; then
Fix collectd image build During collectd image build, the following issue occurs. package glibc-devel-2.28-196.el8.x86_64 requires glibc = 2.28-196.el8, but none of the providers can be installed. This patch fixes the issue.
FROM quay.io/centos/centos:stream8 RUN dnf clean all && \ - dnf group install -y "Development Tools" && \ + dnf group install -y "Development Tools" --nobest && \ dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm && \ dnf install -y centos-release-opstools && \ dnf install -y collectd collectd-turbostat collectd-disk collectd-apache collectd-ceph \
Capitalize first letter of a string confirm string is passed as commandline argument Trim the trailing newline convert to vector Change the 1'st letter to capital case convert to string for printing / {} will print string without double quotes
-// Accept a string from User and Capitalize the first letter of that string +// Accept a string in command line from User and Capitalize the first letter of that string // Rust is built to support not just ASCII, but Unicode by-default. // Do note, each character in string is multi-byte UTF-8 supported, which needs upto 3 bytes (to accommodate Japanese letters) // Best part of Rust compiler issues warning to remove unused variables, functions, ... fn main() { - // Use Std. Input Device - use std::io::{stdin}; - // Create a new String Object - let mut input_value = String::new(); - // Read a mutable input value from Std Input with newline at the end. If it fails, throw Error Message - stdin().read_line(&mut input_value).expect("Error: reading from keyboard failed"); - // Chomp or Remove the new line at the end of string + //confirm string is passed as commandline argument + let mut input_value = std::env::args().nth(1).expect("Kindly pass the string as Command line Argument"); + // Trim the trailing newline input_value = input_value.trim_end().to_string(); - // COnvert the String Object into Vectorr + // convert to vector let mut buff: Vec<char> = input_value.chars().collect(); - // Convert 1'st character to Capital letter, Throw Error in case of null string - buff[0] = buff[0].to_uppercase().nth(0).expect("Updating letter 1 to uppper case failed"); - // Convert vector to String + // Change the 1'st letter to capital case + buff[0] = buff[0].to_uppercase().nth(0).unwrap(); + // convert to string for printing let buff_hold: String = buff.into_iter().collect(); - // Print Updated Value - - println! ("String with letter1 capitalised is {:?}", buff_hold); + // {} will print string without double quotes {:?} will print string with double quotes + println!("{}", buff_hold); }
working on fixes Weiqi suggested also started working on issue where viz resets custom values when the page changes also started working on enhancement where mouseover on a bubble gives you the entire word along with the count (for smaller bubbles that only display one letter of the word)
$(function () { + /** + * updateMaxWordsOpt + */ function updateMaxWordsOpt() { if ($('#vizmaxwords').is(':checked')) { console.log('hi') @@ -124,7 +127,7 @@ $(window).on('load', function () { tooltip.transition() .duration(200) .style('opacity', 1) - tooltip.html('<div class="tooltip-arrow"></div><div class="tooltip-inner">' + (d.value) + '</div>') + tooltip.html('<div class="tooltip-arrow"></div><div class="tooltip-inner">' + (d.text) + ': ' + (d.value) + '</div>') }) .on('mousemove', function (d) { return tooltip @@ -148,7 +151,7 @@ $(window).on('load', function () { tooltip.transition() .duration(200) .style('opacity', 1) - tooltip.html('<div class="tooltip-arrow"></div><div class="tooltip-inner">' + (d.value) + '</div>') + tooltip.html('<div class="tooltip-arrow"></div><div class="tooltip-inner">' + (d.text) + ': ' + (d.value) + '</div>') }) .on('mousemove', function (d) { return tooltip
Get slave_pos to choose latest replica Latest replica have slave_pos biggest, so we just need use slave_pos to compare replicas.
@@ -57,10 +57,14 @@ class MariaDBApp(mysql_service.BaseMySqlApp): with mysql_util.SqlClient(self.get_engine()) as client: return client.execute('SELECT @@global.gtid_binlog_pos').first()[0] + def _get_gtid_slave_executed(self): + with mysql_util.SqlClient(self.get_engine()) as client: + return client.execute('SELECT @@global.gtid_slave_pos').first()[0] + def get_last_txn(self): master_UUID = self._get_master_UUID() last_txn_id = '0' - gtid_executed = self._get_gtid_executed() + gtid_executed = self._get_gtid_slave_executed() for gtid_set in gtid_executed.split(','): uuid_set = gtid_set.split('-') if str(uuid_set[1]) == str(master_UUID):
Added clarification in eigen docstring. It was not clear only from 'low' or 'high' what the order of the eigenvalues was going to be.
@@ -280,8 +280,9 @@ eigs.__doc__ =\ Whether the eigenvectors should be returned as well. sort : {'low', 'high'}, optional Sort the output of the eigenvalues and -vectors ordered by the relevant - size of the real part of the eigenvalue. If not all of the eigenvalues - are requested, this influences which eigenvalues will be found. + size of the real part of the eigenvalue from 'low' to high or from + 'high' to low. If not all of the eigenvalues are requested, this + influences which eigenvalues will be found. eigvals : int, optional Number of eigenvalues and -vectors to return. If `0`, then returns all.
ceph-validate : Added functions to accept true and flase ceph-validate used to throw error for setting flags as 'true' or 'false' for True and False Now user can set the flags 'dmcrypt' and 'osd_auto_discovery' as 'true' or 'false' Will fix - Bug
@@ -155,6 +155,14 @@ def validate_monitor_options(value): assert any([monitor_address_given, monitor_address_block_given, monitor_interface_given]), msg +def validate_dmcrypt_bool_value(value): + assert value in ["true", True, "false", False], "dmcrypt can be set to true/True or false/False (default)" + + +def validate_osd_auto_discovery_bool_value(value): + assert value in ["true", True, "false", False], "osd_auto_discovery can be set to true/True or false/False (default)" + + def validate_osd_scenarios(value): assert value in ["collocated", "non-collocated", "lvm"], "osd_scenario must be set to 'collocated', 'non-collocated' or 'lvm'" @@ -222,8 +230,8 @@ rados_options = ( ) osd_options = ( - (optional("dmcrypt"), types.boolean), - (optional("osd_auto_discovery"), types.boolean), + (optional("dmcrypt"), validate_dmcrypt_bool_value), + (optional("osd_auto_discovery"), validate_osd_auto_discovery_bool_value), ("osd_scenario", validate_osd_scenarios), )
Fixed collect parameters now copies the array if the parameter returs such value
@@ -94,7 +94,10 @@ class CollectParameters: self._p = list() def __call__(self, **kwargs): - self._p.append(self._parameter.get_value(*self._idx)) + value = self._parameter.get_value(*self._idx) + if isinstance(value, np.ndarray): + value = np.array(value) + self._p.append(value) def get_values(self): return self._p
Update injectable_antibiotics.json unsure where it goes in new measure def structure. to do: we need to exclude `bnf_code NOT LIKE "0501070I0%" why it matters is [here on this google doc](https://docs.google.com/document/d/1DNJoMsNfCv4CIHR3Xo7p9AtQjnwknskPY1_glpqUsXg/edit)
{ - "name": "Injectable antibiotics", + "name": "Antibiotic stewardship:Injectable preparations for the treatment of infection", "title": [ - "Injectable antibiotics" + "Antibiotic stewardship:Injectable preparations for the treatment of infection" ], "description": [ - "TODO" + "Number of prescription items for all injectable medicines in BNF Chapter 5 (infections) excluding colistimethate sodium" ], "numerator_short": "Items", "denominator_short": "1000 Patients", "TODO" ], "tags": [ - "core" + "antimicrobial", + "core", + "infections" ], "url": null, "is_percentage": false, "is_cost_based": false, - "low_is_good": true, + "low_is_good": false, "numerator_type": "bnf_items", "numerator_bnf_codes_query": [ "SELECT DISTINCT(bnf_code)",
Fix HP.1910.get_chassis_id script HG-- branch : feature/microservices
# --------------------------------------------------------------------- # HP.1910.get_chassis_id # --------------------------------------------------------------------- -# Copyright (C) 2007-2013 The NOC Project +# Copyright (C) 2007-2017 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- @@ -25,10 +25,9 @@ class Script(BaseScript): if self.has_snmp(): try: macs = [] - for v in self.snmp.get_tables( - ["1.3.6.1.2.1.2.2.1.6"], bulk=True): + for v in self.snmp.get_tables(["1.3.6.1.2.1.2.2.1.6"]): + if v[1] != '\x00\x00\x00\x00\x00\x00': macs += [v[1]] - macs.remove('\x00\x00\x00\x00\x00\x00') return { "first_chassis_mac": min(macs), "last_chassis_mac": max(macs) @@ -38,7 +37,8 @@ class Script(BaseScript): pass # Fallback to CLI - match = self.rx_mac.search(self.cli("display device manuinfo", cached=True)) + v = self.cli("display device manuinfo", cached=True) + match = self.rx_mac.search(v) mac = match.group("mac") return { "first_chassis_mac": mac,
fixed typpo in sensor_fusion_2d equation h/t JChunX
@@ -2,7 +2,11 @@ import numpy as np from scipy.linalg import block_diag from scipy.stats import norm import matplotlib.pyplot as plt -from pyprobml_utils import save_fig +import os +#from pyprobml_utils import save_fig + +figdir = "../figures"; +def save_fig(fname): plt.savefig(os.path.join(figdir, fname)) def gauss_plot2d(mu, sigma, plot_options): plt.scatter(mu[0], mu[1],marker="x", c=plot_options['color']) @@ -23,7 +27,10 @@ def gauss_soft_condition(pmu, py, A, y): post['sigma'] = np.linalg.inv(smu_inv + A.T.dot(sy_inv).dot(A)) # reshape is needed to assist in + broadcasting - post['mu'] = post['sigma'].dot(A.T.dot(sy_inv).dot(y.reshape(4,1) - py['mu'])) + smu_inv.dot(pmu['mu']).reshape(2,1) + ny = py['mu'].shape[0] # 4 + nm = pmu['mu'].shape[0] # 2 + post['mu'] = post['sigma'].dot(A.T.dot(sy_inv).dot(y.reshape(ny,1) - py['mu']) + + smu_inv.dot(pmu['mu']).reshape(nm,1)) # these values are unused model = norm(loc=A.dot(pmu['mu']) + py['mu'], scale=py['sigma'] + A.dot(pmu['sigma']).dot(A.T)) @@ -63,7 +70,7 @@ def helper(sigmas): py['mu'] = np.zeros((4,1)) py['sigma'] = block_diag(sigmas[0], sigmas[1]) - #fit model + post, log_evidence = gauss_soft_condition(prior, py, A, y) gauss_plot2d(y1, sigmas[0], {"color":"r"}) gauss_plot2d(y2, sigmas[1], {"color":"g"})
Make ErrorReturnCode "RAN:" copy-pastable. Using shlex.quote. Before: sh.ErrorReturnCode_1: RAN: /usr/bin/sh -c echo -rf build && ls / && exit 1 After: sh.ErrorReturnCode_1: RAN: /usr/bin/sh -c 'echo -rf build && ls / && exit 1'
@@ -87,6 +87,11 @@ else: from io import BytesIO as iocStringIO from Queue import Queue, Empty +try: + from shlex import quote as shlex_quote # here from 3.3 onward +except ImportError: + from pipes import quote as shlex_quote # undocumented before 2.7 + IS_OSX = platform.system() == "Darwin" THIS_DIR = os.path.dirname(os.path.realpath(__file__)) SH_LOGGER_NAME = __name__ @@ -692,7 +697,7 @@ class RunningCommand(object): # arguments are the encoding we pass into _encoding, which falls back to # the system's encoding enc = call_args["encoding"] - self.ran = " ".join([arg.decode(enc, "ignore") for arg in cmd]) + self.ran = " ".join([shlex_quote(arg.decode(enc, "ignore")) for arg in cmd]) self.call_args = call_args self.cmd = cmd
feat(db): add new-site flag to use tcp/ip instead of unix socket ref frappe/bench#949
@@ -17,34 +17,43 @@ from six import text_type @click.option('--db-port', type=int, help='Database Port') @click.option('--mariadb-root-username', default='root', help='Root username for MariaDB') @click.option('--mariadb-root-password', help='Root password for MariaDB') [email protected]('--no-mariadb-socket', is_flag=True, default=False, help='Set MariaDB host to % and use TCP/IP Socket instead of using the UNIX Socket') @click.option('--admin-password', help='Administrator password for new site', default=None) @click.option('--verbose', is_flag=True, default=False, help='Verbose') @click.option('--force', help='Force restore if site/database already exists', is_flag=True, default=False) @click.option('--source_sql', help='Initiate database with a SQL file') @click.option('--install-app', multiple=True, help='Install app after installation') def new_site(site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, - verbose=False, install_apps=None, source_sql=None, force=None, install_app=None, - db_name=None, db_type=None, db_host=None, db_port=None): + verbose=False, install_apps=None, source_sql=None, force=None, no_mariadb_socket=False, + install_app=None, db_name=None, db_type=None, db_host=None, db_port=None): "Create a new site" frappe.init(site=site, new_site=True) _new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=verbose, install_apps=install_app, source_sql=source_sql, force=force, - db_type=db_type, db_host=db_host, db_port=db_port) + no_mariadb_socket=no_mariadb_socket, db_type=db_type, db_host=db_host, db_port=db_port) if len(frappe.utils.get_sites()) == 1: use(site) def _new_site(db_name, site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None, force=False, - reinstall=False, db_type=None, db_host=None, db_port=None): + no_mariadb_socket=False, reinstall=False, db_type=None, db_host=None, db_port=None): """Install a new Frappe site""" if not force and os.path.exists(site): print('Site {0} already exists'.format(site)) sys.exit(1) + if no_mariadb_socket and not db_type == "mariadb": + print('--no-mariadb-socket requires db_type to be set to mariadb.') + sys.exit(1) + + if no_mariadb_socket: + print('Using % as Database Host.') + db_host = "%" + if not db_name: db_name = '_' + hashlib.sha1(site.encode()).hexdigest()[:16]
Change default emoji reaction styling. This changes the styling to be slightly more compact, have more bottom padding between the edge of the message wall, and have more consistency.
.message_reactions .reaction_button { border-radius: 0.5em; display: none; - margin: 0.2em; - padding: 0.2em; + margin: 1px 0.1em; + padding: 2.5px; padding-left: 0.3em; padding-right: 0.3em; float: left; .reaction_button .message_reaction_count { font-size: 1.1em; color: #555; + margin-left: 3px; +} + +.reaction_button i { + font-size: 1em; + margin-right: 3px; } .message_reactions:hover .reaction_button { } .message_reactions { - margin-left: -0.2em; + margin: 5px 0px; padding-left: 46px; overflow: auto; } .message_reaction { float: left; - margin: 0.2em; - padding: 0.4em; - padding-left: 0.3em; - padding-right: 0.3em; + margin: 0.15em; + padding: 0.2em 0.3em; + padding-left: 0.2em; + padding-right: 0.2em; cursor: pointer; background-color: #eef7fa; - border: thin solid #add8e6; - border-radius: 0.5em; + border: 1px solid #c7dfe6; + border-radius: 4px; } .message_reaction .emoji { float: left; - zoom: 0.80; - -moz-transform: scale(0.80); + top: 4px; + zoom: 0.70; + -moz-transform: scale(0.70); -moz-transform-origin: 0 0; } .message_reaction_count { - font-weight: bold; + position: relative; + top: 1px; font-size: 0.8em; float: left; color: #0088CC; - margin-left: 0.1em; + margin-left: 3px; line-height: 1em; }
Add pypi deploy key for travis This is a test, might not work
@@ -28,3 +28,12 @@ script: after_success: - codecov + +deploy: + provider: pypi + user: "kyle_johnson" + password: "" + secure: "qBNTRm7RgcXFIqNIN2pEN2bm6mhuNJC9vXyZOKUpWdjPrcKoAYSCoCKMa/YuhKowMW9NHHjakbU7T3sJJb15GsAE0JkrQ7dfk5baqA9E9Wgsis++/qJ0LxgRvZAypNxHsl+Ofx9RDJnTi7LGFkVU4QqmYyYDuC7x1NE4jU+4h3M=" + on: + branch: master + # tags: true
Make sure from-filenames intersect with names-file When both names-file an from-names are defined we want to make sure the tests that from-filenames returns intersect with the test in the names file.
@@ -193,7 +193,7 @@ class SaltTestingParser(optparse.OptionParser): '--name', dest='name', action='append', - default=None, + default=[], help=('Specific test name to run. A named test is the module path ' 'relative to the tests directory') ) @@ -449,26 +449,22 @@ class SaltTestingParser(optparse.OptionParser): def parse_args(self, args=None, values=None): self.options, self.args = optparse.OptionParser.parse_args(self, args, values) + file_names = [] if self.options.names_file: with open(self.options.names_file, 'rb') as fp_: # pylint: disable=resource-leakage - lines = [] for line in fp_.readlines(): if six.PY2: - lines.append(line.strip()) + file_names.append(line.strip()) else: - lines.append( + file_names.append( line.decode(__salt_system_encoding__).strip()) - if self.options.name: - self.options.name.extend(lines) - else: - self.options.name = lines + if self.args: - if not self.options.name: - self.options.name = [] for fpath in self.args: if os.path.isfile(fpath) and \ fpath.endswith('.py') and \ os.path.basename(fpath).startswith('test_'): + if fpath in file_names: self.options.name.append(fpath) continue self.exit(status=1, msg='\'{}\' is not a valid test module'.format(fpath)) @@ -483,11 +479,12 @@ class SaltTestingParser(optparse.OptionParser): 'filename_map.yml' ) - mapped_mods = self._map_files(self.options.from_filenames) - if mapped_mods: - if self.options.name is None: - self.options.name = [] - self.options.name.extend(mapped_mods) + self.options.name.extend(self._map_files(self.options.from_filenames)) + + if self.options.name and file_names: + self.options.name = list(set(self.options.name).intersection(file_names)) + elif file_names: + self.options.name = file_names print_header(u'', inline=True, width=self.options.output_columns) self.pre_execution_cleanup()
Skip ngram parser on MariaDB Not supported yet:
@@ -100,7 +100,53 @@ class Migration(migrations.Migration): field=models.TextField(default=''), preserve_default=False, ), - # We need to add these indexes manually because Django imposes an artificial limitation that forces to specify the max length of the TextFields that get referenced by the FULLTEXT index. If we do it manually, it works, because Django can't check that we are defining a new index. - migrations.RunSQL(sql='ALTER TABLE wagtailsearch_indexentry ADD FULLTEXT INDEX `fulltext_title_body` (`title`, `body`)', reverse_sql='ALTER TABLE wagtailsearch_indexentry DROP INDEX `fulltext_title_body`'), - migrations.RunSQL(sql='ALTER TABLE wagtailsearch_indexentry ADD FULLTEXT INDEX `fulltext_autocomplete` (`autocomplete`) WITH PARSER ngram', reverse_sql='ALTER TABLE wagtailsearch_indexentry DROP INDEX `fulltext_autocomplete`') # We use an ngram parser here, so that it matches partial search queries. The index on body and title doesn't match partial queries by default. ] + + # Create FULLTEXT indexes + # We need to add these indexes manually because Django imposes an artificial limitation + # that forces to specify the max length of the TextFields that get referenced by the + # FULLTEXT index. If we do it manually, it works, because Django can't check that we are + # defining a new index. + operations.append( + migrations.RunSQL( + sql=""" + ALTER TABLE wagtailsearch_indexentry + ADD FULLTEXT INDEX `fulltext_title_body` (`title`, `body`) + """, + reverse_sql=""" + ALTER TABLE wagtailsearch_indexentry + DROP INDEX `fulltext_title_body` + """ + ) + ) + + # We use an ngram parser for autocomplete, so that it matches partial search queries. + # The index on body and title doesn't match partial queries by default. + # Note that this is not supported on MariaDB. See https://jira.mariadb.org/browse/MDEV-10267 + if connection.mysql_is_mariadb: + operations.append( + migrations.RunSQL( + sql=""" + ALTER TABLE wagtailsearch_indexentry + ADD FULLTEXT INDEX `fulltext_autocomplete` (`autocomplete`) + """, + reverse_sql=""" + ALTER TABLE wagtailsearch_indexentry + DROP INDEX `fulltext_autocomplete` + """ + ) + ) + else: + operations.append( + migrations.RunSQL( + sql=""" + ALTER TABLE wagtailsearch_indexentry + ADD FULLTEXT INDEX `fulltext_autocomplete` (`autocomplete`) + WITH PARSER ngram + """, + reverse_sql=""" + ALTER TABLE wagtailsearch_indexentry + DROP INDEX `fulltext_autocomplete` + """ + ) + )
Update navbar from ROSS website Tutorial tab had a missing link (error 404 page not found). This should lead it to the correct path.
@@ -133,7 +133,7 @@ html_theme_options = { # Note the "1" or "True" value above as the third argument to indicate # an arbitrary url. "navbar_links": [ - ("Tutorial", "examples/tutorial"), + ("Tutorial", "tutorials"), ("Examples", "examples"), ("API", "api"), ],
feature(radare2): add r2pipe command to execute stateful radare2 cmds This can be a useful command to quickly execute some radare2 operations in various positions in mid of a debugging session without the need to shell out and temporarily transfer process control to radare2.
import argparse import subprocess +import pwndbg.color.message as message import pwndbg.commands +import pwndbg.radare2 parser = argparse.ArgumentParser(description='Launches radare2', epilog="Example: r2 -- -S -AA") @@ -41,3 +43,21 @@ def r2(arguments, no_seek=False, no_rebase=False): subprocess.call(cmd) except Exception: print("Could not run radare2. Please ensure it's installed and in $PATH.") + + +parser = argparse.ArgumentParser(description='Execute stateful radare2 commands through r2pipe', + epilog="Example: r2pipe pdf sym.main") +parser.add_argument('arguments', nargs='+', type=str, + help='Arguments to pass to r2pipe') + + [email protected](parser) [email protected] +def r2pipe(arguments): + try: + r2 = pwndbg.radare2.r2pipe() + print(r2.cmd(' '.join(arguments))) + except ImportError: + print(message.error("Could not import r2pipe python library")) + except Exception as e: + print(message.error(e))
Update tests/sources/tools/perception/object_detection_2d/detr/test_detr.py Add log with test name
@@ -47,6 +47,8 @@ def rmdir(_dir): class TestDetrLearner(unittest.TestCase): @classmethod def setUpClass(cls): + print("\n\n**********************************\nTEST Object Detection DETR Learner\n" + "**********************************") cls.temp_dir = os.path.join("tests", "sources", "tools", "perception", "object_detection_2d", "detr", "detr_temp")
Remove sprint channels from the configuration. Now that the core dev sprint has ended, we can safely remove those. It caused the wrong channel message to be huge because of all the deleted channels.
@@ -126,23 +126,6 @@ class Channels(NamedTuple): hacktoberfest_2020 = 760857070781071431 voice_chat = 412357430186344448 - # Core Dev Sprint channels - sprint_announcements = 755958119963557958 - sprint_information = 753338352136224798 - sprint_organisers = 753340132639375420 - sprint_general = 753340631538991305 - sprint_social1_cheese_shop = 758779754789863514 - sprint_social2_pet_shop = 758780951978573824 - sprint_escape_room = 761031075942105109 - sprint_stdlib = 758553316732698634 - sprint_asyncio = 762904152438472714 - sprint_typing = 762904690341838888 - sprint_discussion_capi = 758553358587527218 - sprint_discussion_triage = 758553458365300746 - sprint_discussion_design = 758553492662255616 - sprint_discussion_mentor = 758553536623280159 - sprint_documentation = 761038271127093278 - class Client(NamedTuple): name = "Sir Lancebot" @@ -295,23 +278,6 @@ WHITELISTED_CHANNELS = ( Channels.off_topic_1, Channels.off_topic_2, Channels.voice_chat, - - # Core Dev Sprint Channels - Channels.sprint_announcements, - Channels.sprint_information, - Channels.sprint_organisers, - Channels.sprint_general, - Channels.sprint_social1_cheese_shop, - Channels.sprint_social2_pet_shop, - Channels.sprint_escape_room, - Channels.sprint_stdlib, - Channels.sprint_asyncio, - Channels.sprint_typing, - Channels.sprint_discussion_capi, - Channels.sprint_discussion_triage, - Channels.sprint_discussion_design, - Channels.sprint_discussion_mentor, - Channels.sprint_documentation, ) GIT_SHA = environ.get("GIT_SHA", "foobar")
update to r1.12.0 removed comments
@@ -14,11 +14,11 @@ class TensorFlowBaseTest(rfm.RunOnlyRegressionTest): self.tags = {'production'} self.num_tasks = 1 self.num_gpus_per_node = 1 - self.modules = ['TensorFlow/1.7.0-CrayGNU-18.08-cuda-9.1-python3'] + self.modules = ['TensorFlow/1.12.0-CrayGNU-19.03-cuda-10.0-python3'] # Checkout to the branch corresponding to the module version of # TensorFlow - self.pre_run = ['git checkout r1.7.0'] + self.pre_run = ['git checkout r1.12.0'] self.variables = {'PYTHONPATH': '$PYTHONPATH:.'} @@ -34,7 +34,7 @@ class TensorFlowMnistTest(TensorFlowBaseTest): str(train_epochs)] self.sanity_patterns = sn.all([ - sn.assert_found(r'INFO:tensorflow:Finished evaluation at', + sn.assert_found(r'Finished evaluation at', self.stderr), sn.assert_gt(sn.extractsingle( r"Evaluation results:\s+\{.*'accuracy':\s+(?P<accuracy>\S+)" @@ -48,21 +48,21 @@ class TensorFlowWidedeepTest(TensorFlowBaseTest): super().__init__('wide_deep') train_epochs = 10 - self.executable = 'python3 ./official/wide_deep/wide_deep.py' + self.executable = 'python3 ./official/wide_deep/census_main.py' self.executable_opts = [ '--data_dir', './official/wide_deep/', '--model_dir', './official/wide_deep/model_dir', '--train_epochs', str(train_epochs)] self.sanity_patterns = sn.all([ - sn.assert_found(r'INFO:tensorflow:Finished evaluation at', + sn.assert_found(r'Finished evaluation at', self.stderr), sn.assert_reference(sn.extractsingle( r"Results at epoch %s[\s\S]+accuracy:\s+(?P<accuracy>\S+)" % - train_epochs, self.stdout, 'accuracy', float, -1), + train_epochs, self.stderr, 'accuracy', float, -1), 0.85, -0.05, None) ]) self.pre_run += ['mkdir ./official/wide_deep/model_dir', - 'python3 ./official/wide_deep/data_download.py ' + 'python3 ./official/wide_deep/census_dataset.py ' '--data_dir ./official/wide_deep/']
workloads/dhrystone: Fix taskset Was invoking busybox in a hardcoded way, and was not using self.target.busybox. Updated to use the correct version.
@@ -82,7 +82,8 @@ class Dhrystone(Workload): else: execution_mode = '-r {}'.format(self.duration) if self.taskset_mask: - taskset_string = 'busybox taskset 0x{:x} '.format(self.taskset_mask) + taskset_string = '{} taskset 0x{:x} '.format(self.target.busybox, + self.taskset_mask) else: taskset_string = '' self.command = '{}{} {} -t {} -d {}'.format(taskset_string,
Bugfix: png rendering Following crystal frame merge, missed that set_reciprocal_crystal_vectors undefined for png output class - added stub
@@ -89,6 +89,10 @@ def set_reciprocal_lattice_vectors(self, *args, **kwargs): # we do not draw reciprocal lattice vectors at this time pass + def set_reciprocal_crystal_vectors(self, *args, **kwargs): + # we do not draw reciprocal crystak vectors at this time either + pass + def project_2d(self, n): d = self.points.dot(n.elems) p = d * flex.vec3_double(len(d), n.elems)
Comment out broken assertion. Part of
@@ -42,7 +42,12 @@ def datetime_naive_local_to_naive_utc(d): dateutil.tz.tzutc()).replace(tzinfo = None) def datetime_utc_to_naive_local(d): - assert d.tzinfo == dateutil.tz.tzutc() + # We would have liked to assert that: + # assert d.tzinfo == dateutil.tz.tzutc() + # but a bug in dateutil makes it actually use tzlocal even when there's + # an explicit 'Z' suffiz in the timestamp string, as is the case for the + # MW API. So this is actually a UTC date, with tzinfo = tzlocal. + # https://github.com/dateutil/dateutil/issues/349 return d.astimezone(dateutil.tz.tzlocal()).replace(tzinfo = None) def get_page_revisions(wiki, title, start):
tests: fix flaky systest Avoid sharing dataset ids between separate test cases.
@@ -422,7 +422,7 @@ class TestBigQuery(unittest.TestCase): self.assertEqual(table.clustering_fields, ["user_email", "store_code"]) def test_delete_dataset_with_string(self): - dataset_id = _make_dataset_id("delete_table_true") + dataset_id = _make_dataset_id("delete_table_true_with_string") project = Config.CLIENT.project dataset_ref = bigquery.DatasetReference(project, dataset_id) retry_403(Config.CLIENT.create_dataset)(Dataset(dataset_ref)) @@ -431,7 +431,7 @@ class TestBigQuery(unittest.TestCase): self.assertFalse(_dataset_exists(dataset_ref)) def test_delete_dataset_delete_contents_true(self): - dataset_id = _make_dataset_id("delete_table_true") + dataset_id = _make_dataset_id("delete_table_true_with_content") project = Config.CLIENT.project dataset_ref = bigquery.DatasetReference(project, dataset_id) dataset = retry_403(Config.CLIENT.create_dataset)(Dataset(dataset_ref))
Fixes duplicate gauge optimzation (bug) in do_long_sequence_gst. This bug was created recently, when moving to Results objects which contain Estimates.
@@ -531,8 +531,9 @@ def do_long_sequence_gst_base(dataFilenameOrSet, targetGateFilenameOrSet, tNxt = _time.time() profiler.add_time('do_long_sequence_gst: gauge optimization',tRef); tRef=tNxt + #Perform extra analysis if a bad fit was obtained badFitThreshold = advancedOptions.get('badFitThreshold',20) - if ret.estimates[estlbl].misfit_sigma() > badFitThreshold: #HARDCODED arbitrary threshold + if ret.estimates[estlbl].misfit_sigma() > badFitThreshold: onBadFit = advancedOptions.get('onBadFit',"scale data") # 'do nothing' if onBadFit in ("scale data","scale data and reopt") \ and parameters['weights'] is None: @@ -598,19 +599,6 @@ def do_long_sequence_gst_base(dataFilenameOrSet, targetGateFilenameOrSet, else: raise ValueError("Invalid onBadFit value: %s" % onBadFit) - - #Do final gauge optimization to *final* iteration result only - if gaugeOptParams != False: - gaugeOptParams = gaugeOptParams.copy() #so we don't modify the caller's dict - if "targetGateset" not in gaugeOptParams: - gaugeOptParams["targetGateset"] = gs_target - - go_gs_final = _alg.gaugeopt_to_target(gs_lsgst_list[-1],**gaugeOptParams) - ret.estimates[estlbl].add_gaugeoptimized(gaugeOptParams, go_gs_final) - - tNxt = _time.time() - profiler.add_time('do_long_sequence_gst: gauge optimization',tRef); tRef=tNxt - profiler.add_time('do_long_sequence_gst: results initialization',tRef) return ret @@ -718,7 +706,7 @@ def do_stdpractice_gst(dataFilenameOrSet,targetGateFilenameOrSet, printer-1) #Gauge optimize to a variety of spam weights - for vSpam in [0]: #,1 + for vSpam in [0]: #,1 DEBUG for spamWt in [1e-4]: #,1e-2,1e-1 ret.estimates[parameterization].add_gaugeoptimized( {'itemWeights': {'gates':1, 'spam':spamWt},
Yield control to other greenthreads while processing trusted ports process_trusted_ports() appeared to be greenthread unfriendly, so if there are many trusted ports on a node, openvswitch agent may "hang" for a significant time. This patch adds explicit yield. Closes-Bug:
@@ -17,6 +17,7 @@ import collections import contextlib import copy +import eventlet import netaddr from neutron_lib.callbacks import events as callbacks_events from neutron_lib.callbacks import registry as callbacks_registry @@ -665,6 +666,8 @@ class OVSFirewallDriver(firewall.FirewallDriver): """Pass packets from these ports directly to ingress pipeline.""" for port_id in port_ids: self._initialize_egress_no_port_security(port_id) + # yield to let other greenthreads proceed + eventlet.sleep(0) def remove_trusted_ports(self, port_ids): for port_id in port_ids:
Add uncovered test cases We want to know that the overlay is added regardless of which page number you are requesting when a pdf contains pages that go beyond the print area
@@ -1143,11 +1143,13 @@ def test_preview_letter_template_precompiled_s3_error( @pytest.mark.parametrize( - "filetype, post_url, message", + "filetype, post_url, message, requested_page", [ - ('png', 'precompiled-preview.png', ""), - ('png', 'precompiled/overlay.png?page_number=1', "content-outside-printable-area"), - ('pdf', 'precompiled/overlay.pdf', "content-outside-printable-area") + ('png', 'precompiled-preview.png', "", ""), + ('png', 'precompiled/overlay.png?page_number=1', "content-outside-printable-area", "1"), + ('png', 'precompiled/overlay.png?page_number=2', "content-outside-printable-area", "2"), + ('png', 'precompiled/overlay.png?page_number=3', "content-outside-printable-area", "3"), + ('pdf', 'precompiled/overlay.pdf', "content-outside-printable-area", "") ] ) def test_preview_letter_template_precompiled_png_file_type_or_pdf_with_overlay( @@ -1158,7 +1160,8 @@ def test_preview_letter_template_precompiled_png_file_type_or_pdf_with_overlay( mocker, filetype, post_url, - message + message, + requested_page, ): template = create_template(sample_service, @@ -1180,8 +1183,8 @@ def test_preview_letter_template_precompiled_png_file_type_or_pdf_with_overlay( metadata = { "message": message, - "invalid_pages": "[1]", - "page_count": "1" + "invalid_pages": "[1,3]", + "page_count": "4" } mock_get_letter_pdf = mocker.patch( @@ -1194,12 +1197,13 @@ def test_preview_letter_template_precompiled_png_file_type_or_pdf_with_overlay( mock_post = request_mock.post( 'http://localhost/notifications-template-preview/{}'.format(post_url), content=expected_returned_content, - headers={'X-pdf-page-count': '1'}, + headers={'X-pdf-page-count': '4'}, status_code=200 ) response = admin_request.get( 'template.preview_letter_template_by_notification_id', + page=requested_page, service_id=notification.service_id, notification_id=notification.id, file_type=filetype,
Update index.rst missing .. raw:: html so product flow diagram renders in rst compilation
@@ -71,6 +71,8 @@ Recipes encode the directions for how to sparsify a model into a simple, easily **Full Deep Sparse product flow:** +.. raw:: html + <img src="https://docs.neuralmagic.com/docs/source/sparsification/flow-overview.svg" width="960px"> Resources and Learning More
Code simplification. Thanks for the suggestion
@@ -71,12 +71,8 @@ def request_from_dict(d, spider=None): def _find_method(obj, func): - if obj: - try: - func.__func__ - except AttributeError: # func is not a instance method. Not supported. - pass - else: + # Only instance methods contain ``__func__`` + if obj and hasattr(func, '__func__'): members = inspect.getmembers(obj, predicate=inspect.ismethod) for name, obj_func in members: # We need to use __func__ to access the original