message
stringlengths
13
484
diff
stringlengths
38
4.63k
maintain PyMca compatibility This should allow using a singleton print preview shared by silx and PyMca5 plot widgets.
@@ -40,7 +40,7 @@ __date__ = "11/07/2017" _logger = logging.getLogger(__name__) -_logger.setLevel(logging.DEBUG) +# _logger.setLevel(logging.DEBUG) # TODO: # - automatic picture centering @@ -364,8 +364,16 @@ class PrintPreviewDialog(qt.QDialog): if commentPosition is None: commentPosition = "CENTER" - vb = viewBox if viewBox is not None else item.viewBoxF() - svgItem = _GraphicsSvgRectItem(vb, self.page) + if viewBox is None: + if hasattr(item, "_viewBox"): + # PyMca compatibility + viewBox = item._viewBox + else: + # by default, we cannot set the viewbox, + # take the original one + viewBox = item.viewBoxF() + + svgItem = _GraphicsSvgRectItem(viewBox, self.page) svgItem.setSvgRenderer(item) svgItem.setCacheMode(qt.QGraphicsItem.NoCache)
Store monitors by the host they came from and tidy up after receiving a batch This allows us to prune monitors which don't exist on the remote host any more if they've been deleted or renamed. Fixes
@@ -24,7 +24,7 @@ class SimpleMonitor: self.still_failing = [] # type: List[str] self.skipped = [] # type: List[str] self.warning = [] # type: List[str] - self.remote_monitors = {} # type: Dict[str, Monitor] + self.remote_monitors = {} # type: Dict[str, Dict[str, Monitor]] self.loggers = {} # type: Dict[str, Logger] self.alerters = {} # type: Dict[str, Alerter] @@ -195,8 +195,9 @@ class SimpleMonitor: logger._groups, ) try: - for key in list(self.remote_monitors.keys()): - logger.save_result2(key, self.remote_monitors[key]) + for host_monitors in self.remote_monitors.values(): + for (name, monitor) in host_monitors.items(): + logger.save_result2(name, monitor) except Exception: # pragma: no cover module_logger.exception("exception while logging remote monitors") logger.end_batch() @@ -232,19 +233,19 @@ class SimpleMonitor: ) except Exception: # pragma: no cover module_logger.exception("exception caught while alerting for %s", key) - for key in list(self.remote_monitors.keys()): - this_monitor = self.remote_monitors[key] + for host_monitors in self.remote_monitors.values(): + for (name, monitor) in host_monitors.items(): try: - if this_monitor.remote_alerting: - alerter.send_alert(key, this_monitor) + if monitor.remote_alerting: + alerter.send_alert(name, monitor) else: module_logger.debug( "not alerting for monitor %s as it doesn't want remote alerts", - key, + name, ) except Exception: # pragma: no cover module_logger.exception( - "exception caught while alerting for remote monitor %s", key + "exception caught while alerting for remote monitor %s", name ) def count_monitors(self) -> int: @@ -324,6 +325,8 @@ class SimpleMonitor: self.log_result(self.loggers[key]) def update_remote_monitor(self, data: Any, hostname: str) -> None: + """Process a list of monitors received from a remote host.""" + seen_monitors = [] # type: List[str] for (name, state) in data.items(): module_logger.info("updating remote monitor %s", name) if isinstance(state, dict): @@ -332,6 +335,7 @@ class SimpleMonitor: state["data"] ) self.remote_monitors[name] = remote_monitor + seen_monitors.append(name) except KeyError: module_logger.exception( "Could not add remote monitor from host %s; " @@ -346,6 +350,7 @@ class SimpleMonitor: module_logger.critical("Could not unpickle monitor %s", name) else: self.remote_monitors[name] = remote_monitor + seen_monitors.append(name) else: module_logger.critical( "Could not deserialize state of monitor %s. " @@ -354,6 +359,17 @@ class SimpleMonitor: "in the [monitor] section.", name, ) + self._trim_remote_monitors(hostname, seen_monitors) + + def _trim_remote_monitors(self, hostname: str, seen_monitors: List[str]) -> None: + """Remove remote monitors for a host which aren't in the given list.""" + host_monitors = self.remote_monitors[hostname] + for name in host_monitors.keys(): + if name not in seen_monitors: + module_logger.info( + "forgetting remote monitor %s from host %s", name, hostname + ) + del host_monitors[name] def run_loop(self) -> None: """Run the complete monitor loop once."""
Fix icons incorrectly displaying on the command palette Fixes
@@ -144,6 +144,7 @@ const extension: JupyterFrontEndPlugin<void> = { palette.addItem({ command: commandIDs.createNewPython, + args: { isPalette: true }, category: 'Python Editor' }); }
fix: autosuggest for callable classes will i ever get this right?
@@ -298,7 +298,12 @@ class TestAutoSuggest(AutoSuggest): obj = getattr(base, current) if inspect.isclass(obj): obj = obj.__init__ - elif callable(obj) and not inspect.ismethod(obj) and not inspect.isfunction(obj): + elif ( + callable(obj) + and not hasattr(obj, "_autosuggest") + and not inspect.ismethod(obj) + and not inspect.isfunction(obj) + ): # object is a callable class instance obj = obj.__call__
Fix LB heat template parameter name loadbalancer_protocol --> protocol Closes-Bug:
@@ -27,7 +27,7 @@ resources: type: Magnum::Optional::Neutron::LBaaS::Listener properties: loadbalancer: {get_resource: loadbalancer} - protocol: {get_param: loadbalancing_protocol} + protocol: {get_param: protocol} protocol_port: {get_param: port} pool: @@ -35,7 +35,7 @@ resources: properties: lb_algorithm: ROUND_ROBIN listener: {get_resource: listener} - protocol: {get_param: loadbalancing_protocol} + protocol: {get_param: protocol} monitor: type: Magnum::Optional::Neutron::LBaaS::HealthMonitor
convert_options() handles sensitive_keys This is so that the sensitive_keys can be specified in django settings.
@@ -46,6 +46,7 @@ def convert_options(settings, defaults=None): options.setdefault('list_max_length', getopt('list_max_length')) options.setdefault('site', getopt('site')) options.setdefault('processors', getopt('processors')) + options.setdefault('sanitize_keys', getopt('sanitize_keys')) options.setdefault('dsn', getopt('dsn', os.environ.get('SENTRY_DSN'))) options.setdefault('context', getopt('context')) options.setdefault('tags', getopt('tags'))
Adding some prints when hdbscan assertion fails Authors: - Corey J. Nolet (https://github.com/cjnolet) Approvers: - Dante Gama Dessavre (https://github.com/dantegd) URL:
@@ -108,6 +108,12 @@ class HDBSCANTest : public ::testing::TestWithParam<HDBSCANInputs<T, IdxT>> { score = MLCommon::Metrics::compute_adjusted_rand_index( out.get_labels(), labels_ref.data(), params.n_row, handle.get_stream()); + + if (score < 0.85) { + std::cout << "Test failed. score=" << score << std::endl; + raft::print_device_vector("actual labels", out.get_labels(), params.n_row, std::cout); + raft::print_device_vector("expected labels", labels_ref.data(), params.n_row, std::cout); + } } void SetUp() override { basicTest(); }
Update lpherbal.json amended sql
"numerator_columns": [ "SUM(actual_cost) AS numerator, " ], - "numerator_from": "{hscic}.normalised_prescribing_standard p LEFT JOIN (SELECT DISTINCT bnf_code FROM {richard.herbal_list}) r ON p.bnf_code = r.bnf_code", + "numerator_from": "{hscic}.normalised_prescribing_standard p LEFT JOIN (SELECT DISTINCT bnf_code FROM {richard}.herbal_list) r ON p.bnf_code = r.bnf_code", "numerator_where": [ "1 = 1" ],
Don't use pipenv on github actions It should install dependencies faster
@@ -37,25 +37,26 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - - name: Install pipenv + - name: Create virtualenv run: | - python -m pip install --upgrade pipenv wheel + python -m venv venv - uses: actions/cache@v2 id: cache-pip with: - path: ~/.local/share/virtualenvs + path: ./venv key: ${{ matrix.python-version }}-${{ hashFiles('requirements-test.txt') }}-${{ hashFiles('requirements.txt') }} - name: Install dependencies if: steps.cache-pip.outputs.cache-hit != 'true' run: | - pipenv install -r requirements-test.txt - pipenv install coveralls + source venv/bin/activate + pip install -r requirements-test.txt coveralls wheel env: PIP_USE_MIRRORS: true - name: Run tests and coverage run: | - pipenv run python manage.py check - pipenv run coverage run --source=$SOURCE_FOLDER -m py.test -rxXs + source venv/bin/activate + python manage.py check + coverage run --source=$SOURCE_FOLDER -m py.test -rxXs env: SOURCE_FOLDER: safe_transaction_service DJANGO_SETTINGS_MODULE: config.settings.test @@ -66,20 +67,21 @@ jobs: CELERY_BROKER_URL: redis://localhost:6379/0 ETHEREUM_MAINNET_NODE: ${{ secrets.ETHEREUM_MAINNET_NODE }} COINMARKETCAP_API_TOKEN: ${{ secrets.COINMARKETCAP_API_TOKEN }} - PIPENV_DONT_LOAD_ENV: 1 # By default pipenv loads .env file - name: Send results to coveralls if: ${{ env.COVERALLS_REPO_TOKEN }} - run: pipenv run coveralls + run: | + source venv/bin/activate + coveralls env: COVERALLS_REPO_TOKEN: ${{ secrets.COVERALLS_REPO_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # Required for coveralls docker-deploy: runs-on: ubuntu-latest needs: test-app + if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/develop' || startsWith(github.ref, 'refs/tags/') steps: - uses: actions/checkout@v2 - name: Dockerhub login - if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/develop' || startsWith(github.ref, 'refs/tags/') uses: docker/login-action@v1 with: username: ${{ secrets.DOCKER_USER }}
feat: add new "pyannote.audio.pipeline.utils.load_pretrained_pipeline" related to
@@ -242,19 +242,9 @@ def _generic(name: str, elif kind == 'pipeline': - params_yml, = pretrained_subdir.glob('*/*/params.yml') - - config_yml = params_yml.parents[2] / 'config.yml' - with open(config_yml, 'r') as fp: - config = yaml.load(fp, Loader=yaml.SafeLoader) - - from pyannote.core.utils.helper import get_class_by_name - pipeline_name = config['pipeline']['name'] - Pipeline = get_class_by_name(pipeline_name, - default_module_name='pyannote.audio.pipeline') - pipeline = Pipeline(**config['pipeline'].get('params', {})) - - return pipeline.load_params(params_yml) + from pyannote.audio.pipeline.utils import load_pretrained_pipeline + params_yml, *_ = pretrained_subdir.glob('*/*/params.yml') + return load_pretrained_pipeline(params_yml.parent) sad_dihard = functools.partial(_generic, 'sad_dihard') scd_dihard = functools.partial(_generic, 'scd_dihard')
Added ffmpeg download when importing moviepy.editor When the `FFMPEG_BINARY` environment variable is not set, the ffmpeg binary will be downloaded if needed when `import moviepy.editor` is called.
@@ -17,8 +17,15 @@ clip.preview(). # Note that these imports could have been performed in the __init__.py # file, but this would make the loading of moviepy slower. -# Clips +import os + +# Downloads ffmpeg if it isn't already installed +import imageio +# Checks to see if the user has set a place for their own version of ffmpeg +if os.getenv('FFMPEG_BINARY', 'ffmpeg-imageio') == 'ffmpeg-imageio': + imageio.plugins.ffmpeg.download() +# Clips from .video.io.VideoFileClip import VideoFileClip from .video.io.ImageSequenceClip import ImageSequenceClip from .video.io.downloader import download_webfile
in apply sets from_argument from input var it was hard-coded to "data", which clashes with some processes
@@ -617,7 +617,7 @@ class ImageCollectionClient(ImageCollection): process_id = 'apply' arguments[data_argument] = \ { - "from_argument": "data" + "from_argument": data_argument } args = { 'data': {'from_node': self.node_id},
MRG, FIX: Fix scalp coupling index test that intermittently failed * Remove scalp coupling index test that intermittently failed This test was intermittently failing see The test was assuming that random noise would be correlated less than 0.5, but sometimes by chance this happens. * FIX: seed
@@ -50,9 +50,9 @@ def test_scalp_coupling_index(fname, fmt, tmpdir): raw._data[4] = new_data raw._data[5] = new_data * -1.0 # Set next two channels to be uncorrelated - # TODO: this might be a bad idea as sometimes random noise might correlate + rng = np.random.RandomState(0) raw._data[6] = new_data - raw._data[7] = np.random.rand(raw._data[0].shape[0]) + raw._data[7] = rng.rand(raw._data[0].shape[0]) # Check values sci = scalp_coupling_index(raw) assert_allclose(sci[0:6], [1, 1, 1, 1, -1, -1], atol=0.01)
Use '.../' to denote parent path locations in runs list Prior to this, parent paths were listed explicitly, which could result in a lot of noise in the runs list. This approach uses three dots to simply note that the runs are outside the current cwd location.
@@ -322,7 +322,8 @@ def _format_guildfile_op(opref): relpath = os.path.relpath(opref.pkg_name, config.cwd()) if relpath[0] != '.': relpath = os.path.join('.', relpath) - return "%s/%s:%s" % (relpath, opref.model_name, opref.op_name) + path_desc = re.sub(r"(\.\.[/])+", ".../", relpath) + return "%s/%s:%s" % (path_desc, opref.model_name, opref.op_name) def _format_package_op(opref): return "%s/%s:%s" % (opref.pkg_name, opref.model_name, opref.op_name)
Fix get_data_parallel_src_rank (necessary for sparse attention in 3D parallel)
@@ -221,8 +221,16 @@ def get_data_parallel_src_rank(): """Calculate the global rank corresponding to a local rank zero in the data parallel group.""" global_rank = torch.distributed.get_rank() - local_world_size = get_data_parallel_world_size() - return (global_rank // local_world_size) * local_world_size + topo = get_topology() + if topo is None: + # we are just using model parallel + return global_rank % get_model_parallel_world_size() + else: + # We are using pipeline parallel + d = topo.get_axis_comm_lists('data') + for l in d: + if global_rank in l: + return l[0] def get_data_parallel_world_size():
[Docker]Add dialout group by default on login This would help to run commands like nrfjprog inside docker without sudo command.
@@ -54,6 +54,7 @@ getent passwd "${CI_BUILD_UID}" || adduser --force-badname --gid "${CI_BUILD_GID --gecos "${CI_BUILD_USER} (generated by with_the_same_user script)" \ --disabled-password --home "${CI_BUILD_HOME}" --quiet "${CI_BUILD_USER}" usermod -a -G sudo -G tvm-venv "${CI_BUILD_USER}" +usermod -a -G sudo -G dialout "${CI_BUILD_USER}" # Add user to video group for ROCm if [[ ! -z "${ROCM_ENABLED-}" ]]; then
Update worksites-takeover.yaml fix for matchers type
@@ -13,6 +13,6 @@ requests: - "{{BaseURL}}" matchers: - - type: word + - type: regex regex: - "(?:Company Not Found|you&rsquo;re looking for doesn&rsquo;t exist)"
implement dispersion index dispersion index is the sumsquares distances between the member of each cluster
@@ -217,7 +217,7 @@ def _cluster_quality_crossvalidation(data, clusters, clustering): cv = var warnings.warn("Number of columns in data (" + str(n_cols) + ") is smaller " "than the number of cluster (" + str(len(clusters)) + ") plus 1. " - "Returnin the residual noise instead.") + "Returning the residual noise instead.") # cv = var * (n_cols - 1)**2 / len(clusters) return cv @@ -256,6 +256,25 @@ def _cluster_quality_gev(data, clusters, clustering, sd=None, n_microstates=4): return gev, gev_all +def _cluster_quality_dispersion(data, clusters, clustering, n_microstates=4): + """Sumsquares of the distances between samples within each clusters. + An error measure for a n_microstate cluster where the lower the better. + Can be used to compare and find the optimal number of clusters. + """ + + n_rows, n_cols = data.shape # n_sample, n_channel + dispersion_state = np.zeros(n_microstates) + for state in range(n_microstates): + idx = (clustering == state) + data_state = data[idx, :] + state_size = len(data_state) # number of samples in this cluster + # pair-wise distance between members of the same cluster + distance = scipy.spatial.distance.cdist(data_state, data_state) + # sumsquares of distances + dispersion_state[state] = 0.5 * np.sum(distance**2) / state_size + + dispersion = np.sum(dispersion_state) + return dispersion def _correlate_vectors(A, B, axis=0): """Compute pairwise correlation of multiple pairs of vectors.
Fix `JitTest.ADFormulas` intermittent failures Summary: Clamp input tensor values to [3, 3] to limit how small `tanh` gradint can get Pull Request resolved: Test Plan: CI + `bin/test_jit --gtest_filter=JitTest.ADFormulas --gtest_repeat=60000 --gtest_break_on_failure`
@@ -28,8 +28,8 @@ using var_meta_list = std::vector<var_meta_type>; using test_fn_type = std::function<variable_list(const variable_list&)>; struct ADTestSpec { - ADTestSpec(const char* name, var_meta_list input_meta, test_fn_type test_fn) - : name(name), input_meta(input_meta), test_fn(test_fn) {} + ADTestSpec(const char* name, var_meta_list input_meta, test_fn_type test_fn, float clampMax = -1.0f) + : name(name), input_meta(input_meta), test_fn(test_fn), clampMax(clampMax) {} variable_list operator()(const variable_list& inputs) const { return test_fn(inputs); @@ -38,6 +38,10 @@ struct ADTestSpec { std::vector<Variable> make_vars() const { std::vector<Variable> out; for (const auto& m : input_meta) { + if (clampMax > 0.0f) { + out.push_back(torch::randn(m, at::requires_grad(true)).clamp(-clampMax, clampMax)); + continue; + } out.push_back(torch::randn(m, at::requires_grad(true))); } return out; @@ -46,6 +50,7 @@ struct ADTestSpec { const char* name; var_meta_list input_meta; test_fn_type test_fn; + float clampMax; }; variable_list get_grad_outputs(const variable_list& vars) { @@ -88,9 +93,11 @@ void testADFormulas() { {"sigmoid", unary_pointwise, [](const VL& v) -> VL { return {v[0].sigmoid()}; }}, + // Clamp tanh input tensor values to [-3, 3] + // to set a minimum on gradient absolute values {"tanh", unary_pointwise, - [](const VL& v) -> VL { return {v[0].tanh()}; }}, + [](const VL& v) -> VL { return {v[0].tanh()}; }, 3.0f}, {"t", unary_pointwise_2d, [](const VL& v) -> VL { return {v[0].t()}; }}, {"view", unary_pointwise_2d,
Install charmcraft 1.0.0 Install charmcraft 1.0.0 through pip for now until we move to pytest-operator tests.
@@ -40,11 +40,13 @@ jobs: - name: Install dependencies run: | set -eux + sudo apt update + sudo apt install -y python3-pip sudo snap install charm --classic sudo snap install juju --classic sudo snap install juju-helpers --classic sudo snap install juju-wait --classic - sudo snap install charmcraft --classic + sudo pip3 install charmcraft==1.0.0 - name: Build Docker images run: |
ceph-mon: change command to see if rbd exists The previous command was hanging, see this issue:
- include: set_osd_pool_default_pg_num.yml - name: test if rbd exists - command: ceph --cluster {{ cluster }} osd pool stats rbd + command: ceph --cluster {{ cluster }} osd pool get rbd size changed_when: false failed_when: false register: rbd_pool_exist
Test for removing core when the other one is mounted and core numeration is custom - each core ID starts with the same digit.
@@ -14,6 +14,7 @@ from test_utils.output import CmdException from test_utils.size import Size, Unit mount_point = "/mnt/cas" +cores_amount = 3 @pytest.mark.require_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) @@ -54,3 +55,44 @@ def test_remove_core_when_other_mounted_auto_numeration(): except CmdException as exc: TestRun.fail(f"Cannot remove the unmounted core.\n{exc}") + [email protected]_disk("cache", DiskTypeSet([DiskType.optane, DiskType.nand])) [email protected]_disk("core", DiskTypeLowerThan("cache")) +def test_remove_core_when_other_mounted_custom_numeration(): + """ + title: | + Test for removing one core from the cache when the other core is mounted. + Cores have custom numeration, starting with the same digit. + description: | + Test of the ability to remove the unmounted core from the cache when the other core + is mounted and its ID starts with the same digit. + pass_criteria: + - No system crash. + - Removing unmounted core finished with success. + """ + with TestRun.step("Prepare devices."): + cache_device = TestRun.disks['cache'] + cache_device.create_partitions([Size(50, Unit.MebiByte)]) + cache_part = cache_device.partitions[0] + core_device = TestRun.disks['core'] + core_device.create_partitions([Size(200, Unit.MebiByte)] * cores_amount) + + with TestRun.step("Start cache."): + cache = casadm.start_cache(cache_part, force=True) + + with TestRun.step("Add cores to cache and mount them except the first one."): + random_prefix = random.randint(1, 9) + random_interfix = random.randint(1, 9) + + free_core = cache.add_core(core_device.partitions[0], random_prefix) + mounted_cores = [] + for i, part in enumerate(core_device.partitions[1:]): + part.create_filesystem(Filesystem.xfs) + mounted_cores.append(cache.add_core(part, f"{random_prefix}{random_interfix}{i}")) + mounted_cores[i].mount(f"{mount_point}{cache.cache_id}-{mounted_cores[i].core_id}") + + with TestRun.step("Remove the unmounted core."): + try: + cache.remove_core(free_core.core_id) + except CmdException as exc: + TestRun.fail(f"Cannot remove the unmounted core.\n{exc}")
salt.utils.templates Add docstrings to some functions Some functions in salt/utils/templates.py were undocumented.
@@ -212,6 +212,19 @@ def generate_sls_context(tmplpath, sls): def wrap_tmpl_func(render_str): + """ + Each template processing function below, ``render_*_tmpl``, is wrapped by + ``render_tmpl`` before being inserted into the ``TEMPLATE_REGISTRY``. Some + actions are taken here that are common to all renderers. Perhaps a + standard decorator construct would have been more legible. + + :param function render_str: Template rendering function to be wrapped. + Each function is responsible for rendering the source data for its + repective template language. + + :returns function render_tmpl: The wrapper function + """ + def render_tmpl( tmplsrc, from_str=False, to_str=False, context=None, tmplpath=None, **kws ): @@ -378,6 +391,18 @@ def _get_jinja_error(trace, context=None): def render_jinja_tmpl(tmplstr, context, tmplpath=None): + """ + Render a Jinja template. + + :param str tmplstr: A string containing the source to be rendered. + + :param dict context: Any additional context data used by the renderer. + + :param str tmplpath: Base path from which ``tmplstr`` may load additional + template files. + + :returns str: The string rendered by the template. + """ opts = context["opts"] saltenv = context["saltenv"] loader = None @@ -557,6 +582,18 @@ def render_jinja_tmpl(tmplstr, context, tmplpath=None): # pylint: disable=3rd-party-module-not-gated def render_mako_tmpl(tmplstr, context, tmplpath=None): + """ + Render a Mako template. + + :param str tmplstr: A string containing the source to be rendered. + + :param dict context: Any additional context data used by the renderer. + + :param str tmplpath: Base path from which ``tmplstr`` may load additional + template files. + + :returns str: The string rendered by the template. + """ import mako.exceptions # pylint: disable=no-name-in-module from mako.template import Template # pylint: disable=no-name-in-module from salt.utils.mako import SaltMakoTemplateLookup @@ -585,6 +622,17 @@ def render_mako_tmpl(tmplstr, context, tmplpath=None): def render_wempy_tmpl(tmplstr, context, tmplpath=None): + """ + Render a Wempy template. + + :param str tmplstr: A string containing the source to be rendered. + + :param dict context: Any additional context data used by the renderer. + + :param str tmplpath: Unused. + + :returns str: The string rendered by the template. + """ from wemplate.wemplate import TemplateParser as Template return Template(tmplstr).render(**context)
Remove warning around SSL option in email Alerter Based on feedback from user
@@ -35,8 +35,6 @@ class EMailAlerter(Alerter): Optional[str], self.get_config_option("ssl", allowed_values=["starttls", "yes", None]), ) - if self.ssl == "yes": - self.alerter_logger.warning("ssl=yes for email alerter is untested") self.support_catchup = True
Update setup_relative_calculation.py fix unit check
@@ -352,7 +352,7 @@ def run_setup(setup_options, serialize_systems=True, build_samplers=True): forcefield_files = setup_options['forcefield_files'] if "timestep" in setup_options: - if isinstance(timestep, float): + if isinstance(setup_options['timestep'], float): timestep = setup_options['timestep'] * unit.femtoseconds else: timestep = setup_options['timestep']
Fix typo (I know, thank me later)
@@ -380,7 +380,7 @@ WELCOME_NOTIFICATION = app.packets.notification( OFFLINE_NOTIFICATION = app.packets.notification( "The server is currently running in offline mode; " - "some features will be unavailble.", + "some features will be unavailable.", ) DELTA_90_DAYS = timedelta(days=90)
validate: prevent from installing OSD on same disk as the OS This commit adds a validation task to prevent from installing an OSD on the same disk as the OS. Closes:
--- +- name: find device used for operating system + command: findmnt -v -n -T / -o SOURCE + changed_when: false + register: root_device + +- name: resolve root_device + command: "readlink -f {{ root_device.stdout }}" + changed_when: false + register: _root_device + +- name: set_fact root_device + set_fact: + root_device: "{{ _root_device.stdout }}" + +- name: lvm_volumes variable's tasks related + when: + - lvm_volumes is defined + - lvm_volumes | length > 0 + block: + - name: resolve devices in lvm_volumes + command: "readlink -f {{ item.data }}" + changed_when: false + register: _lvm_volumes_data_devices + with_items: "{{ lvm_volumes }}" + when: + - item.data_vg is undefined + + - name: set_fact lvm_volumes_data_devices + set_fact: + lvm_volumes_data_devices: "{{ lvm_volumes_data_devices | default([]) + [item.stdout] }}" + with_items: "{{ _lvm_volumes_data_devices.results }}" + when: + - item.skipped is undefined + +- name: devices variable's tasks related + when: + - devices is defined + - devices | length > 0 + block: + - name: resolve devices in devices + command: "readlink -f {{ item }}" + changed_when: false + register: devices_resolved + with_items: "{{ devices }}" + + - name: set_fact devices_resolved + set_fact: + _devices: "{{ _devices | default([]) + [item.stdout] }}" + with_items: "{{ devices_resolved.results }}" + +- name: fail if root_device is passed in lvm_volumes or devices + fail: + msg: "{{ root_device }} found in either lvm_volumes or devices variable" + when: root_device in lvm_volumes_data_devices | default([]) or root_device in _devices | default([]) + - name: check no gpt header is present when osd scenario is lvm/lvm-batch block: - name: read information about the devices
Add flag to xcodebuild to disable manifest sandbox We're seeing a collision in some sandbox rules. Since we're using our own sandbox in the source compat suite, we can disable the one coming from package support.
@@ -317,7 +317,8 @@ def dispatch(root_path, repo, action, swiftc, swift_version, action['action'] ) - initial_xcodebuild_flags = ['SWIFT_EXEC=%s' % swiftc] + initial_xcodebuild_flags = ['SWIFT_EXEC=%s' % swiftc, + '-IDEPackageSupportDisableManifestSandbox=YES'] if build_config == 'debug': initial_xcodebuild_flags += ['-configuration', 'Debug']
fix Rally task [NovaServers.list_servers] failed with network problems when we get 'nics' information from rally task config 'contexts', the type of self.config["nics"] is 'tuple', but we need it is 'list' type. Closes-Bug:
@@ -91,7 +91,7 @@ class ServerGenerator(context.Context): if self.config.get("nics"): if isinstance(self.config["nics"][0], dict): # it is a format that Nova API expects - kwargs["nics"] = self.config["nics"] + kwargs["nics"] = list(self.config["nics"]) else: kwargs["nics"] = [{"net-id": nic} for nic in self.config["nics"]]
hotspots: Fix resizing of ``Got It!`` box. This dynamically resizes the box to fit the variable text size present with different languages. At the same time, it adds padding to the English version to make it look similar to previous versions. Fixes
position: absolute; bottom: 15px; right: 15px; - width: 80px; - height: 35px; + max-width: 125px; + max-height: 70px; border: none; color: hsl(0, 0%, 100%); background-color: hsl(164, 44%, 47%); border-radius: 4px; + white-space: normal; + padding: 7px 20px; } /* arrows */
Update Orbit_Screen.kv Updated code to use new yellow icon.
keep_ratio: True Image: id: OrbitISStiny - source: './imgs/orbit/ISSmimicLogoPartsGlowingISSblue.png' + source: './imgs/orbit/OrbitYellowISSicon.png' keep_ratio: False allow_stretch: True size_hint: 0.07,0.07
Reuse report_type ### Problem We extract coverage_subsystem.options.report into a local variable earlier in the function/rule. <img width="799" alt="Screen Shot 2020-06-05 at 1 28 23 PM" src="https://user-images.githubusercontent.com/1268088/83919892-7156bc80-a730-11ea-8aa2-84676ffe5cca.png"> ### Solution Reuse existing variable. ### Result Readable code/less code
@@ -376,9 +376,9 @@ async def generate_coverage_report( report_dir = PurePath(coverage_subsystem.options.report_output_path) report_file: Optional[PurePath] = None - if coverage_subsystem.options.report == ReportType.HTML: + if report_type == ReportType.HTML: report_file = report_dir / "htmlcov" / "index.html" - elif coverage_subsystem.options.report == ReportType.XML: + elif report_type == ReportType.XML: report_file = report_dir / "coverage.xml" return FilesystemCoverageReport(
Modify the class names in the error messages Remove the syntax to highlight the class names in the error messages.
@@ -262,15 +262,14 @@ class Flow(on.Edge): if self.investment and self.nonconvex: raise WrongOptionCombinationError( "Investment flows cannot be combined with " - + "nonconvex flows using the class " - + "<class 'solph.flows.Flow'>! Please consider using" - + " <class 'solph.flows.NonConvexInvestFlow'>" + + "nonconvex flows using the general Flow class! " + + "Please consider using the NonConvexInvestFlow class." ) else: warn( - "You are using the class <class 'solph.flows.Flow'> " - "with option `allow_nonconvex_investment`, please consider " - "using <class 'solph.flows.NonConvexInvestFlow'> instead.", + "You are using the general Flow class with the option " + "`allow_nonconvex_investment`, please consider using" + "NonConvexInvestFlow class instead.", FlowOptionWarning, )
Adding Ambari missing versions With the new validation system, image versions on get_image_arguments() has to contain all available versions in order to allow cluster creation and validation with them all. Story:
@@ -270,7 +270,7 @@ class AmbariPluginProvider(p.ProvisioningPluginBase): resource_roots=['plugins/ambari/resources/images']) def get_image_arguments(self, hadoop_version): - if hadoop_version != '2.4': + if hadoop_version not in self.get_versions(): return NotImplemented return self.validator.get_argument_list()
Update README.md Updated module list depending on psutil and netifaces
@@ -83,8 +83,8 @@ This will create a file called `debug.log` in the same directory as the executab Modules and commandline utilities are only required for modules, the core itself has no external dependencies at all. -* psutil (for the modules 'cpu', 'memory') -* netifaces (for the module 'nic') +* psutil (for the modules 'cpu', 'memory', 'traffic') +* netifaces (for the module 'nic', 'traffic') * requests (for the module 'weather') # Required commandline utilities
Set CI timeout to 20m to avoid failure Workaround for slow blender279 download that times out on CircleCI
@@ -14,6 +14,7 @@ jobs: - checkout - run: name: Install Blender + no_output_timeout: 20m command: | FILTERS=($TEST_FILTERS) FILTER=${FILTERS[$CIRCLE_NODE_INDEX]}
Small fix to map/data display Update to get the value of `aria-label` (the jurisdiction name) for use as heading
this.hoveredSlug = null; }, jurNameForSlug(slug) { - return document.getElementById(slug).ariaLabel; + return document.getElementById(slug).getAttribute('aria-label'); }, caseCount() { return (this.hoveredSlug ? this.jurData[this.hoveredSlug].case_count : this.total_cases).toLocaleString()
Update mnist_cnn_features_level_fgsm.py Edit the example description
# -*- coding: utf-8 -*- -"""Trains a convolutional neural network on the MNIST dataset, then attacks it with the FGSM attack.""" +"""Trains a convolutional neural network on the MNIST dataset, then attacks one of the inner layers with the FGSM attack.""" from __future__ import absolute_import, division, print_function, unicode_literals import sys
Add back scikit-image This is a NoOp since this package is already part of the base image. However, we prefer to list it here explicitly. We also already have smoke test for this package.
@@ -103,6 +103,7 @@ RUN apt-get install -y libfreetype6-dev && \ vader_lexicon verbnet webtext word2vec_sample wordnet wordnet_ic words ycoe && \ # Stop-words pip install stop-words && \ + pip install scikit-image && \ /tmp/clean-layer.sh RUN pip install ibis-framework && \
Change queue instances type from c4.xlarge to c5.large Reason for this change is that not all the regions support c4.xlarge. C5 family support is broader What does this change solve? It allows to run the test where C4 isn't present
@@ -20,14 +20,14 @@ compute_resource_settings = ondemand_i1,ondemand_i2 compute_resource_settings = ondemand_i3,ondemand_i4 [compute_resource ondemand_i1] -instance_type = c4.xlarge +instance_type = c5.large [compute_resource ondemand_i2] instance_type = {{ instance }} min_count = 1 [compute_resource ondemand_i3] -instance_type = c4.xlarge +instance_type = c5.large [compute_resource ondemand_i4] instance_type = {{ instance }}
Restrict PHP parsing to .php and .inc files This prevents situations where a directory-traversal path is forwarded from uwsgi to the PHP engine, which attempts to render the file. As an example, this could leak the contents of a flag.txt file located above `webroot/` in the deployed problem directory.
@@ -346,6 +346,6 @@ class PHPApp(WebService): """ web_root = join(self.directory, self.php_root) - self.start_cmd = "uwsgi --protocol=http --plugin php -p {1} --force-cwd {0} --php-allowed-docroot {2} --http-socket-modifier1 14 --php-index index.html --php-index index.php --check-static {0} --static-skip-ext php --logto /dev/null".format( + self.start_cmd = "uwsgi --protocol=http --plugin php -p {1} --force-cwd {0} --php-allowed-docroot {2} --http-socket-modifier1 14 --php-index index.html --php-index index.php --check-static {0} --static-skip-ext php --logto /dev/null --php-allowed-ext .php --php-allowed-ext .inc".format( web_root, self.num_workers, self.directory )
Easy Debug for API Send Request Added Debug Log for Non JSON Result
@@ -528,6 +528,9 @@ class API(object): "Error checking for `feedback_required`, " "response text is not JSON" ) + self.logger.error('Full Response JSON: {}'.format(str(response))) + try: self.logger.error('Response Text: {}'.format(str(response.text))) + except: pass if response.status_code == 429: sleep_minutes = 5
Update viewpoint-system-status.yaml updated exposures -> exposure
@@ -6,7 +6,7 @@ info: severity: low metadata: shodan-query: http.title:"ViewPoint System Status" - tags: status,exposures,viewpoint + tags: status,exposure,viewpoint requests: - method: GET
lint: Exclude `zerver/views/development/` from i18n lint rules. This is consistent with how we handle JsonableError and friends; it doesn't make sense for translators to spend time on strings only visible in a development environment.
@@ -328,8 +328,8 @@ python_rules = RuleList( }, { "pattern": r"""\Wjson_error\(['"].+[),]$""", - "exclude": {"zerver/tests"}, - "description": "Argument to json_error should a literal string enclosed by _()", + "exclude": {"zerver/tests", "zerver/views/development/"}, + "description": "Argument to json_error should be a literal string enclosed by _()", }, # To avoid JsonableError(_variable) and JsonableError(_(variable)) {
Handle the "spatial" attribute in onnx BatchNormalization op Summary: If we have this "spatial" attribute and its value equals to 1, we could just remove this attribute and convert this op to caffe2 SpatialBN. Pull Request resolved:
@@ -952,17 +952,21 @@ Caffe2Ops Caffe2Backend::CreateSlice( Caffe2Ops Caffe2Backend::CreateBatchNormalization( OnnxNode* onnx_node, const ConversionContext& ctx) { - if (ctx.opset_version() < 6) { auto& attributes = onnx_node->attributes; + + if (ctx.opset_version() < 6) { attributes.remove("consumed_inputs"); } if (ctx.opset_version() >= 7) { - auto& attributes = onnx_node->attributes; auto* attr = attributes.AddRewrittenAttribute("is_test"); attr->set_i(1); } + if (attributes.HasAttribute("spatial") && attributes.get<int64_t>("spatial") == 1) { + attributes.remove("spatial"); + } + return CommonOnnxNodeToCaffe2Ops(onnx_node, ctx); }
Fix `loading_data_recipe` links Fixes
@@ -14,9 +14,9 @@ PyTorch offer built-in high-quality datasets for you to use in `torch.utils.data.Dataset <https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset>`__. These datasets are currently available in: -* `torchvision <https://pytorch.org/docs/stable/torchvision/datasets.html>`__ -* `torchaudio <https://pytorch.org/audio/datasets.html>`__ -* `torchtext <https://pytorch.org/text/datasets.html>`__ +* `torchvision <https://pytorch.org/vision/stable/datasets.html>`__ +* `torchaudio <https://pytorch.org/audio/stable/datasets.html>`__ +* `torchtext <https://pytorch.org/text/stable/datasets.html>`__ with more to come. Using the Yesno dataset from ``torchaudio.datasets.YESNO``, we will
Fix date_time_format in simplereport HG-- branch : feature/microservices
@@ -331,7 +331,7 @@ class TableColumn(ReportNode): :param f: :return: """ - return DateFormat(f).format(config.date_format) + return DateFormat(f).format(config.date_time_formats.date_format) def f_time(self, f): """ @@ -339,7 +339,7 @@ class TableColumn(ReportNode): :param f: :return: """ - return DateFormat(f).format(config.time_format) + return DateFormat(f).format(config.date_time_formats.time_format) def f_datetime(self, f): """ @@ -347,7 +347,7 @@ class TableColumn(ReportNode): :param f: :return: """ - return DateFormat(f).format(config.datetime_format) + return DateFormat(f).format(config.date_time_formats.datetime_format) def f_size(self, f): """
[IMPR] Improvements for askForHints (3) decrease nested code
@@ -1170,12 +1170,14 @@ class Subject(interwiki_graph.Subject): if not self.workonme: # we don't work on it anyway return - if ((self.untranslated or self.conf.askhints) + if not ( + (self.untranslated or self.conf.askhints) and not self.hintsAsked and self.originPage and self.originPage.exists() and not self.originPage.isRedirectPage() and not self.originPage.isCategoryRedirect()): + return self.hintsAsked = True if not self.conf.untranslated:
Add check in configuration parser for expected scores This is useful beacuse we can fail early rather than having to wait until after the model is trained.
@@ -35,6 +35,7 @@ from rsmtool.utils import (DEFAULTS, FIELD_NAME_MAPPING, is_skll_model) +from skll import Learner from skll.metrics import SCORERS if HAS_RSMEXTRA: @@ -816,8 +817,9 @@ class ConfigurationParser: "to specify the name of the column which " "contains candidate IDs.") - # 8. Check that if "skll_objective" is specified, it's one of the metrics that SKLL - # allows for AND that it is specified for a SKLL model and _not_ a built-in + # 8. Check that if "skll_objective" is specified, it's + # one of the metrics that SKLL allows for AND that it is + # specified for a SKLL model and _not_ a built-in # linear regression model if new_config['skll_objective']: if not is_skll_model(new_config['model']): @@ -828,13 +830,26 @@ class ConfigurationParser: raise ValueError("Invalid SKLL objective. Please refer to the SKLL " "documentation and choose a valid tuning objective.") - # 9. Check the fields that requires rsmextra + # 9. Check that if we are running rsmtool to ask for + # expected scores then the SKLL model type must actually + # support probabilistic classification. If it's not a SKLL + # model at all, we just treat it as a LinearRegression model + # which is basically what they all are in the end. + if context == 'rsmtool' and new_config['predict_expected_scores']: + model_name = new_config['model'] + dummy_learner = Learner(new_config['model']) if is_skll_model(model_name) else Learner('LinearRegression') + if not hasattr(dummy_learner.model_type, 'predict_proba'): + raise ValueError("{} does not support expected scores " + "since it is not a probablistic classifier.".format(model_name)) + del dummy_learner + + # 10. Check the fields that requires rsmextra if not HAS_RSMEXTRA: if new_config['special_sections']: raise ValueError("Special sections are only available to ETS" " users by installing the rsmextra package.") - # 10. Raise a warning if we are specifiying a feature file but also + # 11. Raise a warning if we are specifiying a feature file but also # telling the system to automatically select transformations if new_config['features'] and new_config['select_transformations']: logging.warning("You specified a feature file but also set " @@ -844,7 +859,7 @@ class ConfigurationParser: "the automatically selected transformations " "and signs.") - # 11. Clean up config dict to keep only context-specific fields + # 12. Clean up config dict to keep only context-specific fields context_relevant_fields = (CHECK_FIELDS[context]['optional'] + CHECK_FIELDS[context]['required'])
added test for signal_scaling added test_first_level_models_with_no_signal_scaling()
@@ -382,3 +382,28 @@ def test_first_level_models_from_bids(): # can arise when variant or space is present and not specified assert_raises(ValueError, first_level_models_from_bids, bids_path, 'main', 'T1w') # variant not specified + +def test_first_level_models_with_no_signal_scaling(): + """ + test to ensure that the FirstLevelModel works correctly with a + signal_scaling==False. In particular, that derived theta are correct for a + constant design matrix with a single valued fmri image + """ + shapes, rk = [(3, 1, 1, 2)], 1 + fmri_data = [] + design_matrices = [] + design_matrices.append( pd.DataFrame(np.ones((shapes[0][-1], rk) ), + columns=list('abcdefghijklmnopqrstuvwxyz')[:rk])) + first_level_model = FirstLevelModel(mask=False, noise_model='ols', signal_scaling=False) + fmri_data.append(Nifti1Image( np.zeros((1,1,1,2)) + 6, np.eye(4))) + + first_level_model.fit(fmri_data, design_matrices=design_matrices) + # trivial test of signal_scaling value + assert_true(first_level_model.signal_scaling==False) + # assert that our design matrix has one constant + assert_true(first_level_model.design_matrices_[0].equals( + pd.DataFrame([1.0, 1.0], columns=['a']))) + # assert that we only have one theta as there is only on voxel in our image + assert_true(first_level_model.results_[0][0].theta.shape == (1,1)) + # assert that the theta is equal to the one voxel value + assert_almost_equal(first_level_model.results_[0][0].theta[0,0],6.0,2)
SupplierPart - Improve API The default DRF behaviour throws errors if the supplied query params do not conform to the limit_choices_to field This is non optimum! Don't want to have to handle these cases Do the filtering ourselves!
@@ -104,12 +104,41 @@ class SupplierPartList(generics.ListCreateAPIView): queryset = super().get_queryset() + return queryset + + def filter_queryset(self, queryset): + """ + Custom filtering for the queryset. + """ + + queryset = super().filter_queryset(queryset) + + params = self.request.query_params + + # Filter by manufacturer + manufacturer = params.get('manufacturer', None) + + if manufacturer is not None: + queryset = queryset.filter(manufacturer=manufacturer) + + # Filter by supplier + supplier = params.get('supplier', None) + + if supplier is not None: + queryset = queryset.filter(supplier=supplier) + # Filter by EITHER manufacturer or supplier - company = self.request.query_params.get('company', None) + company = params.get('company', None) if company is not None: queryset = queryset.filter(Q(manufacturer=company) | Q(supplier=company)) + # Filter by parent part? + part = params.get('part', None) + + if part is not None: + queryset = queryset.filter(part=part) + return queryset def get_serializer(self, *args, **kwargs): @@ -147,9 +176,6 @@ class SupplierPartList(generics.ListCreateAPIView): ] filter_fields = [ - 'part', - 'supplier', - 'manufacturer', ] search_fields = [
Docs fix Fixed error in Migration FAQ.
@@ -68,6 +68,9 @@ Also you can bind your own filters for using as keyword arguments: class MyFilter(BoundFilter): key = 'is_admin' + def __init__(self, is_admin): + pass + async def check(self, message: types.Message): member = await bot.get_chat_member(message.chat.id, message.from_user.id) return member.is_admin()
Increase connection timeout for object controller tests Intermittent test failures due to timeout of *mocked* connections have been reported, so increase the connection timeout in the test app. Closes-Bug:
@@ -169,8 +169,10 @@ class BaseObjectControllerMixin(object): self.logger = debug_logger('proxy-server') self.logger.thread_locals = ('txn1', '127.0.0.2') + # increase connection timeout to avoid intermittent failures + conf = {'conn_timeout': 1.0} self.app = PatchedObjControllerApp( - None, FakeMemcache(), account_ring=FakeRing(), + conf, FakeMemcache(), account_ring=FakeRing(), container_ring=FakeRing(), logger=self.logger) # you can over-ride the container_info just by setting it on the app
Remove the 'supplier_part' field when first creating a Part object As the Part does not yet exist, there are no matching SupplierPart objects
@@ -91,6 +91,15 @@ class PartCreate(AjaxCreateView): return context + def get_form(self): + form = super(AjaxCreateView, self).get_form() + + # Hide the default_supplier field (there are no matching supplier parts yet!) + #form.fields['default_supplier'].widget.attrs['hidden'] = True + del form.fields['default_supplier'] + + return form + # Pre-fill the category field if a valid category is provided def get_initial(self): """ Get initial data for the new Part object:
don't error when the mod is unknown fixes
@@ -117,7 +117,7 @@ class Infractions(BaseCog): infraction.delete_instance() await MessageUtils.send_to(ctx, "YES", "inf_delete_deleted", id=infraction.id) GearbotLogging.log_key(ctx.guild.id, 'inf_delete_log', id=infraction.id, target=Utils.clean_user(target), - target_id=target.id, mod=Utils.clean_user(mod), mod_id=mod.id, reason=reason, + target_id=target.id, mod=Utils.clean_user(mod), mod_id=mod.id if mod is not None else 0, reason=reason, user=Utils.clean_user(ctx.author), user_id=ctx.author.id) InfractionUtils.clear_cache(ctx.guild.id)
Update README.rst update dependency install instructions
@@ -18,9 +18,21 @@ and then run it:: This will pop up a GUI window. -If you have cloned the toga repository, navigate to the demo directory and run it like this:: +If you have cloned the toga repository, install the dependent packages in your virtualenv:: - $ pip install toga + $ cd toga + $ pip install -e src/core + $ pip install -e src/dummy + +Then install the platform specific code:: + + $ pip install -e src/cocoa # macOS + $ pip install -e src/gtk # Linux + $ pip install -e src/winforms # Windows + +Finally navigate to the demo directory and run the application:: + + $ cd demo $ python -m toga_demo Community
[cleanup] Cleanup MW version dependency in Site.notifications Also update documentation of EchoMixin
@@ -29,12 +29,11 @@ class EchoMixin: def notifications(self, **kwargs): """Yield Notification objects from the Echo extension. - :keyword format: If specified, notifications will be returned formatted - this way. Its value is either 'model', 'special' or None. Default - is 'special'. - :type format: str or None + :keyword Optional[str] format: If specified, notifications will + be returned formatted this way. Its value is either ``model``, + ``special`` or ``None``. Default is ``special``. - Refer API reference for other keywords. + .. seealso:: :api:`Notifications` for other keywords. """ params = { 'action': 'query', @@ -48,19 +47,16 @@ class EchoMixin: data = self.simple_request(**params).submit() notifications = data['query']['notifications']['list'] - # Support API before 1.27.0-wmf.22 - if hasattr(notifications, 'values'): - notifications = notifications.values() - return (Notification.fromJSON(self, notification) for notification in notifications) @need_extension('Echo') - def notifications_mark_read(self, **kwargs): + def notifications_mark_read(self, **kwargs) -> bool: """Mark selected notifications as read. + .. seealso:: :api:`echomarkread` + :return: whether the action was successful - :rtype: bool """ # TODO: ensure that the 'echomarkread' action # is supported by the site
quex_interface_body_c.mako: initialize the struct Lexer::prev_id field TN:
@@ -26,6 +26,7 @@ static void init_lexer(Lexer *lexer) { QUEX_NAME(token_p_set)(&lexer->quex_lexer, &lexer->buffer_tk); memset (&lexer->buffer_tk, 0, sizeof (lexer->buffer_tk)); + lexer->prev_id = 0; } Lexer*
Override comparisons on Expression so object comparison doesn't mess us up. Fixes
@@ -284,6 +284,18 @@ class Expression(object): plural('broadcast/join', len(self._joins), 'broadcasts/joins')) return s + def __lt__(self, other): + raise NotImplementedError("'<' comparison with expression of type {}".format(str(self._type))) + + def __le__(self, other): + raise NotImplementedError("'<=' comparison with expression of type {}".format(str(self._type))) + + def __gt__(self, other): + raise NotImplementedError("'>' comparison with expression of type {}".format(str(self._type))) + + def __ge__(self, other): + raise NotImplementedError("'>=' comparison with expression of type {}".format(str(self._type))) + def _init(self): pass
[ENH] Gettimeindex to access index of hierarchical data Adds a `_get_time_index` utility to retrieve `pandas` time series index from euqual indexed panels and series in `datatypes._utilities`.
@@ -6,6 +6,57 @@ import numpy as np import pandas as pd +def _get_index(x): + if hasattr(x, "index"): + return x.index + else: + # select last dimension for time index + return pd.RangeIndex(x.shape[-1]) + + +def get_time_index(X): + """Get index of time series data, helper function. + + Parameters + ---------- + X : pd.DataFrame / pd.Series / np.ndarray + in one of the following sktime mtype specifications for Series, Panel, Hierarchical: + pd.DataFrame, pd.Series, np.ndarray, pd-multiindex, nested_univ, pd_multiindex_hier + assumes all time series have equal length and equal index set + will *not* work for numpy3D, list-of-df, pd-wide, pd-long + + Returns + ------- + time_index : pandas Index + Index of time series + """ + # assumes that all samples share the same the time index, only looks at + # first row + if isinstance(X, pd.DataFrame): + if isinstance(X.index, pd.MultiIndex): + return X.xs( + X.index.get_level_values("instances")[0], level="instances" + ).index + else: + return _get_index(X.iloc[0, 0]) + + elif isinstance(X, pd.Series): + if isinstance(X.index, pd.MultiIndex): + return X.xs( + X.index.get_level_values("instances")[0], level="instances" + ).index + else: + return _get_index(X.iloc[0]) + + elif isinstance(X, np.ndarray): + return _get_index(X) + + else: + raise ValueError( + f"X must be a pandas DataFrame or Series, but found: {type(X)}" + ) + + def get_index_for_series(obj, cutoff=0): """Get pandas index for a Series object.
perform bottom up traversal of the call graph while inlining callees with gbarriers in them
@@ -2343,19 +2343,26 @@ def infer_arg_descr(program): def inline_kernels_with_gbarriers(program): from loopy.kernel.instruction import BarrierInstruction from loopy.transform.callable import inline_callable_kernel + from loopy.kernel.tools import get_call_graph + from pytools.graph import compute_topological_order def has_gbarrier(knl): return any((isinstance(insn, BarrierInstruction) and insn.synchronization_kind == "global") for insn in knl.instructions) - # FIXME: should traverse in call-graph's topological sort order - callees_to_inline = [name for name, knl_clbl in program.callables_table.items() - if (isinstance(knl_clbl, CallableKernel) - and has_gbarrier(knl_clbl.subkernel))] + call_graph = get_call_graph(program, only_kernel_callables=True) - for callee_to_inline in callees_to_inline: - program = inline_callable_kernel(program, callee_to_inline) + # traverse the kernel calls in a reverse topological sort so that barriers + # are rightly passed to the entrypoints. + toposort = compute_topological_order(call_graph, + # pass key to have deterministic codegen + key=lambda x: x + ) + + for name in toposort[::-1]: + if has_gbarrier(program[name]): + program = inline_callable_kernel(program, name) return program
clean up gemfile Remove opennebula, since we are using docker and ec2 Pin winrm to the package since our changes have been released by upstream
@@ -11,16 +11,11 @@ group :docker do gem 'kitchen-docker', :git => 'https://github.com/test-kitchen/kitchen-docker.git' end -group :opennebula do - gem 'kitchen-opennebula', '>=0.2.3' - gem 'xmlrpc' -end - group :windows do gem 'vagrant-wrapper' gem 'kitchen-vagrant' gem 'winrm', '~>2.0' - gem 'winrm-fs', :git => 'https://github.com/WinRb/winrm-fs.git' + gem 'winrm-fs', '~>1.2.1' end group :ec2 do
[batch] more reslience in pod throttler * [batch] more reslience in pod throttler Two big changes. Catch any errors and release the semaphore. Restart failed workers in the concurrent worker pool. * address comments * bump * stick manager in a separate thread
import asyncio import logging +import traceback log = logging.getLogger('batch.throttler') @@ -13,13 +14,28 @@ class PodThrottler: self.pending_pods = set() self.created_pods = set() - for _ in range(parallelism): - asyncio.ensure_future(self._create_pod()) + workers = [asyncio.ensure_future(self._create_pod()) + for _ in range(parallelism)] + + async def manager(workers): + while True: + failed, pending = asyncio.wait(workers, return_when=asyncio.FIRST_COMPLETED) + for fut in failed: + err = fut.exception() + assert err is not None + err_msg = '\n'.join( + traceback.format_exception(type(err), err, err.__traceback__)) + log.error(f'restarting failed worker: {err} {err_msg}') + pending.append(asyncio.ensure_future(self._create_pod())) + workers = pending + + asyncio.ensure_future(manager(workers)) async def _create_pod(self): while True: await self.semaphore.acquire() + try: job = await self.queue.get() pod_name = job._pod_name @@ -29,6 +45,10 @@ class PodThrottler: return await job._create_pod() + except: + self.semaphore.release() + raise + self.pending_pods.remove(pod_name) self.created_pods.add(pod_name) self.queue.task_done()
use github issue instead of spreadsheet making a quick change now since the linked spreadsheet is not world-readable. can discuss later whether github issues or a spreadsheet make more sense longer term.
@@ -9,7 +9,7 @@ PRs will undergo lightweight review from the core team, primarily to ensure subm * The author's PR does not make extraneous changes to the manuscript. * That the author's PR is not a duplication of work already contained in the manuscript (with a more stringent notion of "duplication" for the text in the main body vs. the appendices). -For the purposes of tracking, if you plan to submit a PR, please add your name(s) and an active PR in this [spreadsheet](https://docs.google.com/spreadsheets/d/1cxnkU15uEzo8g0UDJuhSVUvBV3fER40A8D5WHVdUM4U/edit?usp=sharing&resourcekey=0-8eUFBst_aD419RAkjSBitQ) so that other authors can more easily check that they are 1) not duplicating work or 2) contact you to collaborate. Add this as soon as possible so that we can more effectively mitigate duplicate efforts. For this reason, please also open a draft PR with your edits as soon as possible, before they are complete, and fill out the relevant template fields to facilitate our tracking of your work. +For the purposes of tracking, if you plan to submit a PR, please open a GitHub issue briefly describing your intended change so that other authors can more easily check that they are 1) not duplicating work or 2) contact you to collaborate. Add this as soon as possible so that we can more effectively mitigate duplicate efforts. For this reason, please also open a draft PR with your edits as soon as possible, before they are complete, and fill out the relevant template fields to facilitate our tracking of your work. Below are proposed analyses that we have not done ourselves, but expect would be interesting to carry out and add to the draft. If you plan to perform one of these analyses, please let us know, and add your intent to the above spreadsheet so that we may connect you with other interested authors.
README feature update Updated with new features as well as notable features
@@ -41,28 +41,37 @@ If this is your first time making a PR or aren't sure of the standard practice o - [A great example from one of our own contributors](https://github.com/PokemonGoF/PokemonGo-Bot/pull/3912) ## Features +- [x] Based on Python for botting on any operating system - Windows, macOS and Linux +- [x] Allow custom hash service provider [NEW] - [x] GPS Location configuration -- [x] Search Pokestops -- [x] Catch Pokemon -- [x] Determine which pokeball to use (uses Razz Berry if the catch percentage is low!) -- [x] Exchange, evolve and catch Pokemon as per configuration +- [x] Search & spin Pokestops / Gyms +- [x] Diverse options for humanlike behavior from movement to overall game play +- [x] Ability to add multiple coordinates to select between your favorite botting locations +- [x] Support self defined path / route +- [x] Advanced catch, evolve and transfer confuration using our PokemonOptimizer settings +- [x] Determine which pokeball to use +- [x] Rules to determine the use of Razz and Pinap Berries +- [x] Exchange, evolve and catch Pokemon base on pre-configured rules - [x] Transfer Pokemon in bulk - [x] Auto switch mode (Inventory Checks - switches between catch/farming items) - [x] Limit the step to farm specific area for pokestops -- [x] Limit Pokestops/catch Pokemons per day +- [x] Limit Spin Pokestops/Catch Pokemons per day - [x] IV Functionality filter +- [x] Mass rename of Pokemon with comprehenive rules - [x] Adjust delay between Pokemon capture & Transfer as per configuration -- [x] Hatch eggs -- [x] Incubate eggs -- [x] Use candy -- [x] Set family ID as VIP and priorice bot to catch it! -- [x] Spin Gyms pokestops +- [x] Telegram integration - reporting of bot's events +- [x] Snipe Pokemon within a radius of 30Km, either through telegram command or local map (Example Rocket Map) +- [x] Issue command through telegream - Activate Lucky egg / Incense, Snipping +- [x] Support dropping of Lure Module +- [x] Incubate eggs & Buddy walk +- [x] Bot is able to identify pokemon in their family +- [x] Set family ID as VIP and prioritize bot to catch it! - [x] Transfer red slashed pokemons - [x] Set shiny pokemons as VIP -- [x] Deploy a pokemon in Gym if slot available +- [x] Deploy a pokemon in Gym if there are slots available - [x] Docker support - [x] Auto heal Pokemons -- [x] Information about PGoAPI bot version is rather Bossland endpoint, expiration key date and RPM used +- [x] Displaying of Hash expiration date and RPM information ## Credits
Remove username hack from strava Strava API now returns a username:
@@ -27,14 +27,13 @@ class StravaOAuth(BaseOAuth2): def get_user_details(self, response): """Return user details from Strava account""" - # because there is no usernames on strava - username = response['athlete']['id'] email = response['athlete'].get('email', '') + username = response['athlete'].get('username', '') fullname, first_name, last_name = self.get_user_names( first_name=response['athlete'].get('firstname', ''), last_name=response['athlete'].get('lastname', ''), ) - return {'username': str(username), + return {'username': username, 'fullname': fullname, 'first_name': first_name, 'last_name': last_name,
tools/dm: add description for task auto restore after dm-worker restart Via:
@@ -40,7 +40,7 @@ This sections describes the considerations that you need to know when you restar **In the process of full data loading:** -For the SQL files during full data import, DM uses the downstream database to record the checkpoint information. When DM-worker is restarted, it checks the checkpoint information and you can use the [`start-task` command](/tools/dm/practice.md#step-4-start-the-data-synchronization-task) to recover the data synchronization task automatically. +For the SQL files during full data import, DM uses the downstream database to record the checkpoint information, and DM-worker records the subtask information in the local meta file. When DM-worker is restarted, it checks the checkpoint information and the subtask information in the local record, and the running task before restarting recovers the data synchronization automatically. **In the process of incremental data synchronization:** @@ -48,13 +48,13 @@ For the binlog during incremental data import, DM uses the downstream database t + Sharding DDL statements synchronization is not enabled - If the sharding DDL statements synchronization is not enabled in the task running on DM-worker, when DM-worker is restarted, it checks the checkpoint information and you can use the `start-task` command to recover the data synchronization task automatically. + If the sharding DDL statements synchronization is not enabled in the task running on DM-worker, when DM-worker is restarted, it checks the checkpoint information and the subtask information in the local record, and the running task before restarting recovers the data synchronization automatically. + Sharding DDL statements synchronization is enabled - When DM is synchronizing the sharding DDL statements, if DM-worker successfully executes (or skips) the sharding DDL binlog event, then the checkpoints of all tables related to sharding DDL in the DM-worker are updated to the position after the binlog event corresponding to the DDL statement. - - When DM-worker is restarted before or after synchronizing sharding DDL statements, it checks the checkpoint information and you can use the `start-task` command to recover the data synchronization task automatically. + - When DM-worker is restarted before or after synchronizing sharding DDL statements, it recovers the data synchronization automatically according to the checkpoint information and the subtask information in the local record. - When DM-worker is restarted during the process of synchronizing sharding DDL statements, the issue might occur that the owner (one of DM-worker instances) has executed the DDL statement and successfully changed the downstream database table schema, while other DM-worker instances are restarted but fail to skip the DDL statement and update the checkpoints.
Change ValueError to assertion in clifford_optimize Fixes hunch was correct. The coefficient is initialized at 1, and can only be negated. Changed the error to an assertion accordingly, Update: Removed the assertion entirely since that assertion already exists in `PauliStringPhasor.__init__`
@@ -92,14 +92,6 @@ def clifford_optimized_circuit(circuit: circuits.Circuit, atol: float = 1e-8) -> qubit, pauli = next(iter(merge_op.pauli_string.items())) quarter_turns = round(merge_op.exponent_relative * 2) - if merge_op.pauli_string.coefficient not in [1, -1]: - # TODO: Add support for more general phases. - # Github issue: https://github.com/quantumlib/Cirq/issues/2962 - # Legacy coverage ignore, we need test code that hits this. - # coverage: ignore - raise NotImplementedError( - 'Only +1/-1 pauli string coefficients currently supported' - ) quarter_turns *= int(merge_op.pauli_string.coefficient.real) quarter_turns %= 4 part_cliff_gate = ops.SingleQubitCliffordGate.from_quarter_turns(pauli, quarter_turns)
DOC: Fixed minor typos in temp_elide.c [ci skip]
/* * Functions used to try to avoid/elide temporaries in python expressions - * of type a + b + b by translating some operations into inplace operations. + * of type a + b + b by translating some operations into in-place operations. * This example translates to this bytecode: * * 0 LOAD_FAST 0 (a) * instructions so they always have a reference count larger than 1. * The temporary of the first BINARY_ADD on the other hand only has a count of * 1. Only temporaries can have a count of 1 in python so we can use this to - * transform the second operation into an inplace operation and not affect the + * transform the second operation into an in-place operation and not affect the * output of the program. * CPython does the same thing to resize memory instead of copying when doing * string concatenation. * This is an expensive operation so temporaries are only avoided for rather * large arrays. * - * A possible future improvement would be to change cpython to give as access + * A possible future improvement would be to change cpython to give us access * to the top of the stack. Then we could just check that the objects involved * are on the cpython stack instead of checking the function callstack. * - * Elision can be applied to all operations that do have inplace variants and + * Elision can be applied to all operations that do have in-place variants and * do not change types (addition, subtraction, multiplication, float division, * logical and bitwise operations ...) * For commutative operations (addition, multiplication, ...) if eliding into - * the lefthand side fails it can succedd on the righthand side by swapping the + * the lefthand side fails it can succeed on the righthand side by swapping the * arguments. E.g. b * (a * 2) can be elided by changing it to (2 * a) * b. * - * TODO only supports systems with backtrace(), windows can probably be - * supported too by using the appropriate windows apis. + * TODO only supports systems with backtrace(), Windows can probably be + * supported too by using the appropriate Windows APIs. */ #if defined HAVE_BACKTRACE && defined HAVE_DLFCN_H && ! defined PYPY_VERSION #endif /* * Heuristic size of the array in bytes at which backtrace overhead generation - * becomes less than speed gained by inplace operations. Depends on stack depth + * becomes less than speed gained by in-place operations. Depends on stack depth * being checked. Measurements with 10 stacks show it getting worthwhile * around 100KiB but to be conservative put it higher around where the L2 cache * spills. #else /* * in debug mode always elide but skip scalars as these can convert to 0d array - * during in place operations + * during in-place operations */ #define NPY_MIN_ELIDE_BYTES (32) #endif @@ -272,7 +272,7 @@ check_callers(int * cannot) /* * check if in "alhs @op@ orhs" that alhs is a temporary (refcnt == 1) so we - * can do inplace operations instead of creating a new temporary + * can do in-place operations instead of creating a new temporary * "cannot" is set to true if it cannot be done even with swapped arguments */ static int
tv4play: better message if the video is not available fixes:
@@ -74,7 +74,7 @@ class Tv4play(Service, OpenGraphThumbMixin): url = "https://playback-api.b17g.net/media/{}?service=tv4&device=browser&protocol=hls%2Cdash&drm=widevine".format(vid) res = self.http.request("get", url, cookies=self.cookies) if res.status_code > 200: - yield ServiceError("Can't play this because the video is geoblocked.") + yield ServiceError("Can't play this because the video is geoblocked or not available.") return if res.json()["playbackItem"]["type"] == "hls": streams = hlsparse(self.config, self.http.request("get", res.json()["playbackItem"]["manifestUrl"]),
[swarming] trap IOError while waiting Sometimes there can be an exception. Have the bot loop instead, because this normally happen during system shutdown. This will remove noise in the bot events log.
@@ -1154,7 +1154,13 @@ def _poll_server(botobj, quit_bit, last_action): _call_hook_safe( True, botobj, 'on_bot_idle', max(0, time.time() - last_action)) _maybe_update_lkgbc(botobj) + try: + # Sometimes throw with "[Errno 4] Interrupted function call", especially + # on Windows upon system shutdown. quit_bit.wait(value) + except IOError: + # Act as it if were set as this likely mean a system shutdown. + quit_bit.set() return False if cmd == 'terminate':
[batch] eliminate non-determinism in test stderr and stdout non-deterministically interleave, breaking the string match in `test_can_use_google_credentials`
@@ -608,7 +608,8 @@ location = "gs://{ bucket_name }/{ token }/{ attempt_token }/test_can_use_hailct hl.utils.range_table(10).write(location) hl.read_table(location).show() ''' - j = builder.create_job(os.environ['HAIL_HAIL_BASE_IMAGE'], ['python3', '-c', script]) + j = builder.create_job(os.environ['HAIL_HAIL_BASE_IMAGE'], + ['/bin/bash', '-c', f'python3 -c >out 2>err \'{script}\'; cat out err']) builder.submit() status = j.wait() assert status['state'] == 'Success', f'{j.log(), status}'
chore: Get the next perfect square If the amount of squares is not a perfect square, get the next highest perfect square
@@ -325,8 +325,10 @@ class AvatarModify(commands.Cog): if 1 <= squares <= MAX_SQUARES: raise commands.BadArgument(f"Squares must be a positive number less than or equal to {MAX_SQUARES:,}.") - if not math.sqrt(squares).is_integer(): - raise commands.BadArgument("The number of squares must be a perfect square.") + sqrt = math.sqrt(squares) + + if not sqrt.is_integer(): + squares = math.ceil(sqrt) ** 2 # Get the next perfect square file_name = file_safe_name("mosaic_avatar", ctx.author.display_name)
Announce deprecation of Python 2 support in MLflow Announces deprecation of Python 2 support, to be dropped entirely from MLflow in a future release.
@@ -24,6 +24,8 @@ implement mutual exclusion manually. For a lower level API, see the :py:mod:`mlflow.tracking` module. """ +import sys + from mlflow.version import VERSION as __version__ from mlflow.utils.logging_utils import _configure_mlflow_loggers import mlflow.tracking._model_registry.fluent @@ -44,6 +46,14 @@ import mlflow.tracking as tracking # noqa _configure_mlflow_loggers(root_module_name=__name__) +if sys.version_info.major == 2: + warnings.warn("MLflow support for Python 2 is deprecated and will be dropped in a future " + "release. At that point, existing Python 2 workflows that use MLflow will " + "continue to work without modification, but Python 2 users will no longer " + "get access to the latest MLflow features and bugfixes. We recommend that " + "you upgrade to Python 3 - see https://docs.python.org/3/howto/pyporting.html " + "for a migration guide.", DeprecationWarning) + ActiveRun = mlflow.tracking.fluent.ActiveRun log_param = mlflow.tracking.fluent.log_param log_metric = mlflow.tracking.fluent.log_metric
Windows: Cleanup how "exec" works. * The documentation asks to not use "subprocess.call" because it may dead lock, so lets not do it, and use a process with proper "communicate" and "wait" method calls instead.
@@ -46,9 +46,12 @@ def callExec(args): del args[1] try: - sys.exit( - subprocess.call(args) + process = subprocess.Popen( + args = args, ) + process.communicate() + + sys.exit(process.wait()) except KeyboardInterrupt: # There was a more relevant stack trace already, so abort this # right here, pylint: disable=protected-access
[DOC] Update PULL_REQUEST_TEMPLATE.md so PRs should start with [ENH], [DOC] or [BUG] in title Update PULL_REQUEST_TEMPLATE.md so PRs should start with [ENH], [DOC] or [BUG] in title
@@ -42,6 +42,7 @@ Please go through the checklist below. Please feel free to remove points if they - [ ] I've added myself to the [list of contributors](https://github.com/alan-turing-institute/sktime/blob/main/.all-contributorsrc). - [ ] Optionally, I've updated sktime's [CODEOWNERS](https://github.com/alan-turing-institute/sktime/blob/main/CODEOWNERS) to receive notifications about future changes to these files. - [ ] I've added unit tests and made sure they pass locally. +- [ ] The PR title starts with either [ENH], [DOC] or [BUG] indicating wether the PR topic is related to enhancement, documentation or bug ##### For new estimators - [ ] I've added the estimator to the online documentation.
fixes file extension recognition for packed images resolves
@@ -157,10 +157,15 @@ def __get_image_data(sockets_or_slots, export_settings): source_channels_length = 1 file_name = os.path.splitext(result.shader_node.image.name)[0] + if result.shader_node.image.packed_file is None: + file_path = result.shader_node.image.filepath + else: + # empty path for packed textures, because they are converted to png anyway + file_path = "" image_data = gltf2_io_image_data.ImageData( file_name, - result.shader_node.image.filepath, + file_path, result.shader_node.image.size[0], result.shader_node.image.size[1], source_channel,
Update dynamic_domain.txt Update 2: ```changeip.com``` section.
@@ -380,6 +380,7 @@ zapto.org zenergycounsel.us # Reference: http://www.changeip.com/services/free-dynamic-dns/ +# Reference: https://gist.githubusercontent.com/neu5ron/8dd695d4cb26b6dcd997/raw/e1b1ed6fd0b0810b07c168fee028b668254ad486/dynamic-dns.txt (# changeip.com) 1dumb.com 25u.com @@ -391,6 +392,9 @@ zenergycounsel.us acmetoy.com almostmy.com americanunfinished.com +authorizedddns.net +authorizedddns.org +authorizedddns.us authorizeddns.net authorizeddns.org authorizeddns.us @@ -409,20 +413,20 @@ ddns.ms ddns.name ddns.us dhcp.biz +dns-dns.com +dns-stuff.com dns04.com dns05.com dns1.us dns2.us -dns-dns.com dnset.com dnsrd.com -dns-stuff.com dsmtp.com dumb1.com +dynamic-dns.net dynamicdns.biz dynamicdns.co.uk dynamicdns.me.uk -dynamic-dns.net dynamicdns.org.uk dyndns.pro dynssl.com @@ -438,6 +442,7 @@ freewww.biz freewww.info ftp1.biz ftpserver.biz +gettrails.com gettrials.com got-game.org gr8domain.biz @@ -514,6 +519,7 @@ serveuser.com serveusers.com sexidude.com sexxxy.biz +sexxy.biz sixth.biz squirly.info ssl443.org @@ -532,6 +538,7 @@ xxxy.biz xxxy.info ygto.com youdontcare.com +youndontcare.com yourtrap.com zyns.com zzux.com
Update settings.rst Fixes a tiny typo
@@ -204,7 +204,7 @@ then participants will be able to complete the experiment any number of times. Note that this option does not affect the behavior when a participant starts the experiment but the quits or refreshes the page. In those cases, they will -still be locked out, regardless of the setting of 0allow_repeats`. +still be locked out, regardless of the setting of `allow_repeats`. Database Parameters
Toyota: whitelist FW queries whitelist toyota
@@ -209,16 +209,19 @@ FW_QUERY_CONFIG = FwQueryConfig( Request( [StdQueries.SHORT_TESTER_PRESENT_REQUEST, TOYOTA_VERSION_REQUEST], [StdQueries.SHORT_TESTER_PRESENT_RESPONSE, TOYOTA_VERSION_RESPONSE], + whitelist_ecus=[Ecu.fwdCamera, Ecu.fwdRadar, Ecu.dsu, Ecu.abs, Ecu.eps], bus=0, ), Request( [StdQueries.SHORT_TESTER_PRESENT_REQUEST, StdQueries.OBD_VERSION_REQUEST], [StdQueries.SHORT_TESTER_PRESENT_RESPONSE, StdQueries.OBD_VERSION_RESPONSE], + whitelist_ecus=[Ecu.engine], bus=0, ), Request( [StdQueries.TESTER_PRESENT_REQUEST, StdQueries.DEFAULT_DIAGNOSTIC_REQUEST, StdQueries.EXTENDED_DIAGNOSTIC_REQUEST, StdQueries.UDS_VERSION_REQUEST], [StdQueries.TESTER_PRESENT_RESPONSE, StdQueries.DEFAULT_DIAGNOSTIC_RESPONSE, StdQueries.EXTENDED_DIAGNOSTIC_RESPONSE, StdQueries.UDS_VERSION_RESPONSE], + whitelist_ecus=[Ecu.engine, Ecu.fwdRadar, Ecu.fwdCamera, Ecu.abs, Ecu.eps], bus=0, ), ],
Minor typo in readme Minor typo in readme
# Quora Bookmarked Topics Downloader -This python scrpit will download your Quora Bookmarked post into a pdf file. Just enter your quora credientials and selenium will take care for the rest of the part. +This python script will download your Quora Bookmarked post into a pdf file. Just enter your quora credentials and selenium will take care of the rest of the part. # How to run? ```$ python app.py```
Hardcode 8 test threads in CI This is needed due to the removal of psutil
@@ -125,7 +125,7 @@ jobs: [flake8] %(code)s: %(text)s'" - name: Run tests and generate coverage report - run: pytest -n auto --cov --disable-warnings -q + run: pytest -n 8 --cov --disable-warnings -q # Prepare the Pull Request Payload artifact. If this fails, we # we fail silently using the `continue-on-error` option. It's
lightbox: Remove redundant conversion of `image` to jQuery object. `image` passed to lightbox.open() is already a jQuery object, so we don't need to convert it explicitly. Also, the parameter is renamed from `image` to `$image`.
@@ -85,7 +85,7 @@ function display_video(payload) { // the image param is optional, but required on the first preview of an image. // this will likely be passed in every time but just ignored if the result is already // stored in the `asset_map`. -exports.open = function (image, options) { +exports.open = function ($image, options) { if (!options) { options = { // default to showing standard images. @@ -93,8 +93,6 @@ exports.open = function (image, options) { }; } - const $image = $(image); - // if the asset_map already contains the metadata required to display the // asset, just recall that metadata. let $preview_src = $image.attr("src");
MIKE core needs to be at least 0.2 upgrades will otherwise fail
@@ -5,9 +5,9 @@ with open("README.md", "r", encoding="utf-8") as fh: setuptools.setup( name="mikeio", - version="0.12.0", + version="0.12.1", install_requires=[ - "mikecore", + "mikecore>=0.2.0", "numpy>=1.15.0.", # first version with numpy.quantile "pandas>1.0", "scipy>1.0",
Update core team & alumni list Add Tim Allen and Vince Salvino to the core team list Update Dawn's affiliation to Wharton Move Andy Babic and Bertrand Bordage to alumni list Add Michael van Tellingen to the alumni list
Core team ========= -* Andy Babic (Torchbox) * Andy Chosak (consumerfinance.gov) -* Bertrand Bordage (NoriPyt) * Codie Roelf (Praekelt) * Coen van der Kamp (Four Digits) * Cynthia Kiser (Caltech) * Dan Braghis (Torchbox) -* Dawn Wages +* Dawn Wages (The Wharton School) * Jacob Topp-Mugglestone (Torchbox) * Janneke Janssen (Lukkien) * Jonny Scholes (Neon Jungle) @@ -24,13 +22,18 @@ Core team * Scott Cranfill (JPL) * Storm Heg * Thibaud Colas (Torchbox) +* Tim Allen (The Wharton School) * Tom Dyson (Torchbox) +* Vince Salvino (CodeRed) Core team alumni ================ +* Andy Babic +* Bertrand Bordage * Emily Horsman * Josh Barr +* Michael van Tellingen * Mikalai Radchuk * Rob Moorman * Tim Heap
Changed error in keithley2600 Int cast instead of float in error property
@@ -54,7 +54,7 @@ class Keithley2600(Instrument): # if tab delimitated message is greater than one, grab first two as code, message # otherwise, assign code & message to returned error if len(err) > 1: - err = (float(err[0]), err[1]) + err = (int(err[0]), err[1]) code = err[0] message = err[1].replace('"', '') else:
Added API docs for ingest and tasks Fixes
@@ -90,6 +90,12 @@ Indices .. autoclass:: IndicesClient :members: +Ingest +------ + +.. autoclass:: IngestClient + :members: + Cluster ------- @@ -114,3 +120,8 @@ Snapshot .. autoclass:: SnapshotClient :members: +Tasks +----- + +.. autoclass:: TasksClient + :members:
To avoid I/O errors, carry out vg deactivate (using vgchange -an) and dmsetup remove device.
# Runs "lvremove -ff <vg>; vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume. - name: Clear GlusterFS storage device contents - shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}" + shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; vgchange -an {{ fields[1] }}; dmsetup remove {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}" delegate_to: "{{ item.item }}" with_items: "{{ devices_info.results }}" register: clear_devices
utils/exec_control: Fix once decorator implementation Ensures that the once decorator does not affect classes in a parallel inheritance hierarchy.
-from inspect import getmro - # "environment" management: __environments = {} __active_environment = None @@ -96,10 +94,7 @@ def once(method): if __active_environment is None: activate_environment('default') - func_id = repr(method.func_name) - # Store the least derived class, which isn't object, to account - # for subclasses. - func_id += repr(getmro(args[0].__class__)[-2]) + func_id = repr(method.__code__) if func_id in __environments[__active_environment]: return
$.Common: generate kind subtype for abstract nodes with no derivation This makes the generated Ada API more consistent (now *all* nodes have such a subtype) and will make it easier to write predicates that involve such nodes. TN:
@@ -83,12 +83,14 @@ package ${ada_lib_name}.Common is ## Output subranges to materialize abstract classes as sets of their ## concrete subclasses. % for cls in ctx.astnode_types: + subtype ${cls.ada_kind_range_name} is ${T.node_kind} % if cls.concrete_subclasses: - subtype ${cls.ada_kind_range_name} is - ${T.node_kind} range - ${'{} .. {}'.format(*cls.ada_kind_range_bounds)}; - --% no-document: True + range ${'{} .. {}'.format(*cls.ada_kind_range_bounds)}; + % else: + with Static_Predicate => False; + -- This abstract node has no concrete derivations % endif + --% no-document: True % endfor ## Output a subtype to materialize the set of kinds for synthetic nodes
emoji.js: Add `display_name` field. The idea is to use this field for storing the best matching alias to be displayed in search results. In subsequent commits I will replace the search and rendering logic to use this field instead of creating new objects on each search.
@@ -107,6 +107,7 @@ exports.build_emoji_data = function (realm_emojis) { _.each(realm_emojis, function (realm_emoji, realm_emoji_name) { emoji_dict = { name: realm_emoji_name, + display_name: realm_emoji_name, aliases: [realm_emoji_name], is_realm_emoji: true, url: realm_emoji.emoji_url, @@ -122,6 +123,7 @@ exports.build_emoji_data = function (realm_emojis) { if (!exports.emojis_by_name.hasOwnProperty(emoji_name)) { emoji_dict = { name: emoji_name, + display_name: emoji_name, aliases: emoji.default_emoji_aliases[codepoint], is_realm_emoji: false, emoji_code: codepoint,
Adding more interface types to convert_intf_name() Added the following interface types and abbreviation variations: 'Ten': 'TenGigabitEthernet', 'Tw': 'TwoGigabitEthernet', 'Two': 'TwoGigabitEthernet', 'For': 'FortyGigabitEthernet', 'Hun': 'HundredGigE',
@@ -205,6 +205,9 @@ class Common(): 'Gig': 'GigabitEthernet', 'GE': 'GigabitEthernet', 'Te': 'TenGigabitEthernet', + 'Ten': 'TenGigabitEthernet', + 'Tw': 'TwoGigabitEthernet', + 'Two': 'TwoGigabitEthernet', 'mgmt': 'mgmt', 'Vl': 'Vlan', 'Tu': 'Tunnel', @@ -215,7 +218,9 @@ class Common(): 'BD': 'BDI', 'Se': 'Serial', 'Fo': 'FortyGigabitEthernet', + 'For': 'FortyGigabitEthernet', 'Hu': 'HundredGigE', + 'Hun': 'HundredGigE', 'vl': 'vasileft', 'vr': 'vasiright', 'BE': 'Bundle-Ether'
Fix nightly steps on existing directory ...and also move root chain height calc in nightly build
@@ -108,11 +108,6 @@ jobs: keys: - pyquarkchain-dep-{{ checksum "requirements.txt" }} - - run: - name: Calculate root chain tip height - command: | - echo 'export R_HEIGHT=$(python quarkchain/tools/db_browser.py --cluster_config=`pwd`/mainnet/singularity/cluster_config_template.json root_print_tip 2> /dev/null | grep "height" | awk "{print \$2}" | sed "s/,$//")' >> $BASH_ENV - - restore_cache: keys: - snapshot @@ -127,8 +122,9 @@ jobs: - run: name: Check DB command: | - lo=$(( R_HEIGHT / CIRCLE_NODE_TOTAL * CIRCLE_NODE_INDEX )) - hi=$(( R_HEIGHT / CIRCLE_NODE_TOTAL * (CIRCLE_NODE_INDEX + 1) + 4 )) + height=$(python quarkchain/tools/db_browser.py --cluster_config=`pwd`/mainnet/singularity/cluster_config_template.json root_print_tip 2> /dev/null | grep "height" | awk "{print \$2}" | sed "s/,$//") + lo=$(( height / CIRCLE_NODE_TOTAL * CIRCLE_NODE_INDEX )) + hi=$(( height / CIRCLE_NODE_TOTAL * (CIRCLE_NODE_INDEX + 1) + 4 )) echo "checking: $hi -> $lo" QKC__QUARKCHAIN__DISABLE_POW_CHECK=True ./run_cluster.sh --check_db=True --check_db_rblock_batch=100 --check_db_rblock_from=$hi --check_db_rblock_to=$lo
remove unneeded objectstorage class We use the default django-storages default_storage
@@ -346,89 +346,6 @@ class FileOnDiskStorage(FileSystemStorage): return super(FileOnDiskStorage, self)._save(name, content) -@deconstructible -class ObjectStorage(Storage): - """ - ObjectStorage stores our data in Minio, an object storage system that's Amazon S3 compatible. - - Minio runs on your local machine, allowing developers to emulate S3. In production, Minio acts as a proxy - to a production storage system, such as Google Cloud Storage, or the real Amazon S3. - """ - def __init__(self): - self.client = minio.Minio( - "localhost:9000", - access_key="development", - secret_key="development", - secure=False, - ) - self.bucket_name = settings.AWS_STORAGE_BUCKET_NAME - - - def _open(self, name, mode='rb'): - resp = self.client.fget_object(self.bucket_name, name, self.path(name)) - logging.debug('name is _open is '.format(name)) - file = DjangoFile(open(self.path(name)), 'wb+') - return file - - def _save(self, name, content): - - if name is None: - logging.warning("file name was saved without a filename!") - if content is None: - logging.warning("file name {} was saved without any content!".format(name)) - - if self.exists(name): - logging.warn('Content copy "%s" already exists!' % name) - return name - - logging.debug('content in _save is '.format(content)) - logging.debug('name in _save is '.format(name)) - - try: - self.client.put_object(self.bucket_name, name, content, content.size) - except ResponseError as e: - pass - return name - - def get_available_name(self, name): - return name - - def delete(self, name): - self.client.remove_objects(self.bucket_name, [name]) - - def exists(self, name): - # take out the leading slash, minio doesn't like it - name = os.path.relpath(name, "/") - - try: - self.client.stat_object(self.bucket_name, name) - return True - except NoSuchKey as err: - logging.debug("Stat'ing object {} returned a response error; assuming it doesn't exist.".format(name)) - logging.debug("Stat object error was {}".format(err)) - return False - - def listdir(self, path): - # directories, files = [], [] - # for entry in list(self.bucket.list_blobs(prefix=path, delimiter='/')): - # directories.append(entry.prefixes) - # files.append(entry) - # return directories, files - # TODO(aron): figure out how to do this using the Minio client - raise NotImplementedError - - def size(self, name): - resp = self.client.stat_object(self.bucket_name, name) - return resp.size - - def url(self, name): - - # take out the leading slash, minio doesn't like it - name = os.path.relpath(name, "/") - - # get a presigned URL that will expire in 7 days - return self.client.presigned_get_object(self.bucket_name, name) - class ChannelResourceSize(models.Model): tree_id = models.IntegerField()
Fix PipelineController start should not kill the process when done Fix PY3.5 compatibility
@@ -690,7 +690,6 @@ class PipelineController(object): step_task_completed_callback=step_task_completed_callback, wait=wait ) - leave_process(0) return True @@ -2793,7 +2792,7 @@ class PipelineDecorator(PipelineController): add_pipeline_tags=False, # type: bool target_project=None, # type: Optional[str] abort_on_failure=False, # type: bool - pipeline_execution_queue='services', # type: Optional[str] + pipeline_execution_queue='services' # type: Optional[str] ): # type: (...) -> Callable """
Fix a bug in the data loading of GraphWriter In the bucket sampler, the length array should be recovered after random picking samples.
@@ -227,6 +227,8 @@ class BucketSampler(torch.utils.data.Sampler): random.shuffle(datas) idxs = sum(datas, []) batch = [] + + lens = torch.Tensor([len(x) for x in self.data_source]) for idx in idxs: batch.append(idx) mlen = max([0]+[lens[x] for x in batch])
[PY3] Fix timezone module unit tests When writing to a file, the string needs to be converted to bytes first.
@@ -19,6 +19,8 @@ ensure_in_syspath('../../') # Import Salt Libs from salt.modules import timezone +import salt.ext.six as six +import salt.utils # Globals timezone.__salt__ = {} @@ -76,6 +78,9 @@ class TimezoneTestCase(TestCase): def create_tempfile_with_contents(self, contents): temp = NamedTemporaryFile(delete=False) + if six.PY3: + temp.write(salt.utils.to_bytes(contents)) + else: temp.write(contents) temp.close() self.tempfiles.append(temp)
PyTorch should always depend on `future` Summary: Because `past` is used in `caffe2.python.core` Pull Request resolved: Test Plan: CI
@@ -352,10 +352,10 @@ def build_deps(): ################################################################################ # the list of runtime dependencies required by this built package -install_requires = [] +install_requires = ['future'] if sys.version_info <= (2, 7): - install_requires += ['future', 'typing'] + install_requires += ['typing'] missing_pydep = ''' Missing build dependency: Unable to `import {importname}`.
pkg_analysis_body_ada.mako: minor reformatting TN: minor
@@ -1163,8 +1163,8 @@ package body ${ada_lib_name}.Analysis is procedure Reroot_Foreign_Nodes (Self : in out Lex_Env_Data_Type; Root_Scope : Lexical_Env) is - Els : ${root_node_type_name}_Vectors.Elements_Array - := Self.Contains.To_Array; + Els : ${root_node_type_name}_Vectors.Elements_Array := + Self.Contains.To_Array; Env : Lexical_Env; begin Self.Is_Contained_By.Clear;
Update README.md Add gitter link.
[![Build Status](https://travis-ci.com/WagnerGroup/pyqmc.svg?branch=master)](https://travis-ci.com/WagnerGroup/pyqmc) [![Documentation Status](https://readthedocs.org/projects/pyqmc/badge/?version=latest)](https://pyqmc.readthedocs.io/en/latest/?badge=latest) - +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im//pyqmc/community) ## PyQMC A python module that implements real-space quantum Monte Carlo techniques. It is primarily meant to interoperate with PySCF. Documentation is available at [readthedocs](https://pyqmc.readthedocs.io/en/latest/).