message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Add note to MultiIndex.is_monotonic*
`MultiIndex.is_monotonic_increasing` and `MultiIndex.is_monotonic_decreasing` won't work in Koalas < 1.7.0 with PySpark 3.1.1 without disabling the `spark.sql.optimizer.nestedSchemaPruning.enabled`.
We should mention this to the docs. | @@ -767,6 +767,9 @@ class IndexOpsMixin(object, metaclass=ABCMeta):
which is potentially expensive. In case of multi-index, all data are
transferred to single node which can easily cause out-of-memory error currently.
+ .. note:: Disable the Spark config `spark.sql.optimizer.nestedSchemaPruning.enabled`
+ for multi-index if you're using Koalas < 1.7.0 with PySpark 3.1.1.
+
Returns
-------
is_monotonic : bool
@@ -842,6 +845,9 @@ class IndexOpsMixin(object, metaclass=ABCMeta):
which is potentially expensive. In case of multi-index, all data are transferred
to single node which can easily cause out-of-memory error currently.
+ .. note:: Disable the Spark config `spark.sql.optimizer.nestedSchemaPruning.enabled`
+ for multi-index if you're using Koalas < 1.7.0 with PySpark 3.1.1.
+
Returns
-------
is_monotonic : bool
|
doc/delay: fix warning.
Add space between a note admonition and the preceding paragraph. | @@ -121,6 +121,7 @@ class DelayInstrument(Instrument):
Temperature (in device-specific units) the
device must cool down to just before the actual workload
execution (after setup has been performed).
+
.. note:: This cannot be specified at the same time as
``fixed_between_jobs``
"""),
|
Try to lowercase virtualenv locations
Fixes | @@ -201,6 +201,11 @@ class Project(object):
return False
+ def _get_virtualenv_location(cls, name):
+ """Get the path to a virtualenv from its name"""
+ venv = delegator.run('{0} -m pipenv.pew dir "{1}"'.format(escape_grouped_arguments(sys.executable), name)).out
+ return venv.strip()
+
@property
def virtualenv_name(self):
# Replace dangerous characters into '_'. The length of the sanitized
@@ -219,6 +224,11 @@ class Project(object):
# Hash the full path of the pipfile
hash = hashlib.sha256(self.pipfile_location.encode()).digest()[:6]
encoded_hash = base64.urlsafe_b64encode(hash).decode()
+ if os.name == 'nt' and not PIPENV_VENV_IN_PROJECT and sanitized.lower() != sanitized:
+ venv = self._get_virtualenv_location(sanitized)
+ lower_venv = self._get_virtualenv_location(sanitized.lower())
+ if not venv.strip() and lower_venv.strip():
+ sanitized = sanitized.lower()
# If the pipfile was located at '/home/user/MY_PROJECT/Pipfile',
# the name of its virtualenv will be 'my-project-wyUfYPqE'
if PIPENV_PYTHON:
@@ -241,13 +251,7 @@ class Project(object):
# Default mode.
if not venv_in_project:
- c = delegator.run(
- '{0} -m pipenv.pew dir "{1}"'.format(
- escape_grouped_arguments(sys.executable),
- self.virtualenv_name,
- )
- )
- loc = c.out.strip()
+ loc = self._get_virtualenv_location(self.virtualenv_name)
# The user wants the virtualenv in the project.
else:
loc = os.sep.join(
|
Update .travis.yml
Added test coverage analysis | language: python
+env:
+ global:
+ - CC_TEST_REPORTER_ID=[6304c25454062b12a30c7471d682dc2a7e67a7e8830b625b278222a752d09db7]
+before_script:
+ - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter
+ - chmod +x ./cc-test-reporter
+ - ./cc-test-reporter before-build
python:
- "3.5"
- - "3.6" # current default Python on Travis CI
+ - "3.6"
- "3.7"
-# command to install dependencies
install:
- pip install -e .[all]
-# command to run tests
script:
- pytest
+after_script:
+ - ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT
|
Make more comfortable get_page_id behaviour
If get_pade_id exec with wrong id parameter, don't raise 'AttributeError: 'NoneType' object has no attribute 'get'' | @@ -58,7 +58,7 @@ class Confluence(AtlassianRestAPI):
:param title: title
:return:
"""
- return self.get_page_by_title(space, title).get('id')
+ return (self.get_page_by_title(space, title) or {}).get('id')
def get_page_space(self, page_id):
"""
|
(Failing) test for icalendar.new_event()
Discovered by following warnings about mismatched types | +import datetime as dt
import random
import textwrap
import icalendar
+from freezegun import freeze_time
-from khal.icalendar import split_ics
+from khal.icalendar import new_vevent, split_ics
-from .utils import LOCALE_BERLIN, _get_text, normalize_component
+from .utils import LOCALE_BERLIN, _get_text, _replace_uid, normalize_component
def _get_TZIDs(lines):
@@ -25,6 +27,29 @@ def test_normalize_component():
"""))
+def test_new_vevent():
+ with freeze_time('20220702T1400'):
+ vevent = _replace_uid(new_vevent(
+ LOCALE_BERLIN,
+ dt.date(2022, 7, 2),
+ dt.date(2022, 7, 3),
+ 'An Event',
+ allday=True,
+ repeat='weekly',
+ ))
+ assert vevent.to_ical().decode('utf-8') == '\r\n'.join([
+ 'BEGIN:VEVENT',
+ 'SUMMARY:An Event',
+ 'DTSTART;VALUE=DATE:20220702',
+ 'DTEND;VALUE=DATE:20220703',
+ 'DTSTAMP;VALUE=DATE-TIME:20220702T140000Z',
+ 'UID:E41JRQX2DB4P1AQZI86BAT7NHPBHPRIIHQKA',
+ 'RRULE:FREQ=WEEKLY',
+ 'END:VEVENT',
+ ''
+ ])
+
+
def test_split_ics():
cal = _get_text('cal_lots_of_timezones')
vevents = split_ics(cal)
|
added fuel use, energy produced to null_generator_results
test_custom_rates now passing | @@ -956,6 +956,9 @@ function add_null_generator_results(m, p, r::Dict)
r["GENERATORtoBatt"] = []
r["GENERATORtoGrid"] = []
r["GENERATORtoLoad"] = []
+ r["fuel_used_gal"] = 0
+ r["year_one_gen_energy_produced"] = 0.0
+ r["average_yearly_gen_energy_produced"] = 0.0
nothing
end
|
Fix test_WbMonolingualText_invalid_text
Apparently the error message has changed, update it. | @@ -168,8 +168,7 @@ class TestWikibaseSaveTest(WikibaseTestCase):
language='en')
self.assertRaisesRegex(
OtherPageSaveError,
- r'Edit to page \[\[(wikidata:test:)?Q68]] failed:\n'
- r'invalid-snak: Invalid snak data.',
+ r'Edit to page \[\[(wikidata:test:)?Q68]] failed:',
item.addClaim, claim)
def test_math_invalid_function(self):
|
DOC: correct kind for numericaltype code doc
Code documentation of numpy scalar types indicated the wrong type
(number) for the kind 'i'. This edit moves the indication to the
right type (unsignedinteger). | generic
+-> bool_ (kind=b)
- +-> number (kind=i)
+ +-> number
| integer
- | signedinteger (intxx)
+ | signedinteger (intxx) (kind=i)
| byte
| short
| intc
|
tests: rework lntransport test a bit
send multiple messages, and not only short ones | @@ -4,6 +4,8 @@ from electrum.ecc import ECPrivkey
from electrum.lnutil import LNPeerAddr
from electrum.lntransport import LNResponderTransport, LNTransport
+from aiorpcx import TaskGroup
+
from . import ElectrumTestCase
from .test_bitcoin import needs_test_with_all_chacha20_implementations
@@ -46,27 +48,53 @@ class TestLNTransport(ElectrumTestCase):
server_shaked = asyncio.Event()
responder_key = ECPrivkey.generate_random_key()
initiator_key = ECPrivkey.generate_random_key()
+ messages_sent_by_client = [
+ b'hello from client',
+ b'long data from client ' + bytes(range(256)) * 100 + b'... client done',
+ b'client is running out of things to say',
+ ]
+ messages_sent_by_server = [
+ b'hello from server',
+ b'hello2 from server',
+ b'long data from server ' + bytes(range(256)) * 100 + b'... server done',
+ ]
+ async def read_messages(transport, expected_messages):
+ ctr = 0
+ async for msg in transport.read_messages():
+ self.assertEqual(expected_messages[ctr], msg)
+ ctr += 1
+ if ctr == len(expected_messages):
+ return
+ async def write_messages(transport, expected_messages):
+ for msg in expected_messages:
+ transport.send_bytes(msg)
+ await asyncio.sleep(0.01)
+
async def cb(reader, writer):
t = LNResponderTransport(responder_key.get_secret_bytes(), reader, writer)
self.assertEqual(await t.handshake(), initiator_key.get_public_key_bytes())
- t.send_bytes(b'hello from server')
- self.assertEqual(await t.read_messages().__anext__(), b'hello from client')
+ async with TaskGroup() as group:
+ await group.spawn(read_messages(t, messages_sent_by_client))
+ await group.spawn(write_messages(t, messages_sent_by_server))
responder_shaked.set()
- server_future = asyncio.ensure_future(asyncio.start_server(cb, '127.0.0.1', 42898))
- loop.run_until_complete(server_future)
- server = server_future.result() # type: asyncio.Server
async def connect():
peer_addr = LNPeerAddr('127.0.0.1', 42898, responder_key.get_public_key_bytes())
t = LNTransport(initiator_key.get_secret_bytes(), peer_addr, proxy=None)
await t.handshake()
- t.send_bytes(b'hello from client')
- self.assertEqual(await t.read_messages().__anext__(), b'hello from server')
+ async with TaskGroup() as group:
+ await group.spawn(read_messages(t, messages_sent_by_server))
+ await group.spawn(write_messages(t, messages_sent_by_client))
server_shaked.set()
+ async def f():
+ server = await asyncio.start_server(cb, '127.0.0.1', 42898)
try:
- connect_future = asyncio.ensure_future(connect())
- loop.run_until_complete(responder_shaked.wait())
- loop.run_until_complete(server_shaked.wait())
+ async with TaskGroup() as group:
+ await group.spawn(connect())
+ await group.spawn(responder_shaked.wait())
+ await group.spawn(server_shaked.wait())
finally:
server.close()
- loop.run_until_complete(server.wait_closed())
+ await server.wait_closed()
+
+ loop.run_until_complete(f())
|
clean-venv-cache: Directly import functions from 'hash_reqs.py'.
Instead of running the 'hash_reqs.py' as a script, directly import
functions from it to calculate the hash. This will speed up the
script. | #!/usr/bin/env python3
import argparse
-import datetime
import os
-import subprocess
import sys
-import time
if False:
from typing import Set, Text
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(ZULIP_PATH)
+from scripts.lib.hash_reqs import expand_reqs, hash_deps
from scripts.lib.zulip_tools import GENERIC_CACHE_SCRIPT_PARSER, \
get_caches_to_be_purged, get_environment, get_recent_deployments, \
purge_unused_caches
@@ -31,14 +29,17 @@ def get_caches_in_use(threshold_days):
# type: (int) -> Set[Text]
setups_to_check = set([ZULIP_PATH, ])
caches_in_use = set()
+
if ENV == "prod":
setups_to_check |= get_recent_deployments(threshold_days)
- hash_reqs = os.path.join(ZULIP_PATH, 'scripts', 'lib', 'hash_reqs.py')
+
for path in setups_to_check:
for filename in os.listdir(os.path.join(path, "requirements")):
requirements_file = os.path.join(path, "requirements", filename)
- hash_val = subprocess.check_output([hash_reqs, requirements_file]).strip()
- caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val.decode('utf-8')))
+ deps = expand_reqs(requirements_file)
+ hash_val = hash_deps(deps)
+ caches_in_use.add(os.path.join(VENV_CACHE_DIR, hash_val))
+
return caches_in_use
def main():
|
Update code for pycryptodome backwards incompatibility
from the 3.0 Jun 2014 release: | @@ -43,7 +43,7 @@ def b64_aes_encrypt(message):
"""
key = settings.SECRET_KEY if isinstance(settings.SECRET_KEY, bytes) else settings.SECRET_KEY.encode('ascii')
secret = pad(key, AES_BLOCK_SIZE)[:AES_KEY_MAX_LEN]
- aes = AES.new(secret)
+ aes = AES.new(secret, AES.MODE_ECB)
message_bytes = message if isinstance(message, bytes) else message.encode('utf8')
plaintext = pad(message_bytes, AES_BLOCK_SIZE)
@@ -65,7 +65,7 @@ def b64_aes_decrypt(message):
"""
key = settings.SECRET_KEY if isinstance(settings.SECRET_KEY, bytes) else settings.SECRET_KEY.encode('ascii')
secret = pad(key, AES_BLOCK_SIZE)[:AES_KEY_MAX_LEN]
- aes = AES.new(secret)
+ aes = AES.new(secret, AES.MODE_ECB)
ciphertext = b64decode(message)
plaintext = aes.decrypt(ciphertext)
|
enter_summary_scope decorator
* enter_summary_scope decorator
It can be used to make all the summary within method of a class run in a new summary scope.
* Address review comments | @@ -337,3 +337,29 @@ class push_summary_writer(object):
def __exit__(self, type, value, traceback):
_summary_writer_stack.pop()
+
+
+def enter_summary_scope(method):
+ """A decorator to run the wrapped method in a new summary scope.
+
+ The class the method belongs to must have attribute '_name' and it
+ will be used as the name of the summary scope.
+
+ Instead of using ``with alf.summary.scope(self._name):`` inside a class method,
+ we can use ``@alf.summary.enter_summary_scope`` to decorate the method to
+ have the benefit of cleaner code.
+ """
+
+ @functools.wraps(method)
+ def wrapped(self, *args, **kwargs):
+ # The first argument to the method is going to be ``self``, i.e. the
+ # instance that the method belongs to.
+ assert hasattr(self,
+ '_name'), "self is expected to have attribute '_name'"
+ scope_name = _scope_stack[-1] + self._name + '/'
+ _scope_stack.append(scope_name)
+ ret = method(self, *args, **kwargs)
+ _scope_stack.pop()
+ return ret
+
+ return wrapped
|
output_processors/postgres: Fix incorrect parameter
When verifying the database schema the connection instead of a cursor
should be passed. | @@ -521,7 +521,7 @@ class PostgresqlResultProcessor(OutputProcessor):
self.conn.reset()
def verify_schema_versions(self):
- local_schema_version, db_schema_version = get_schema_versions(self.cursor)
+ local_schema_version, db_schema_version = get_schema_versions(self.conn)
if local_schema_version != db_schema_version:
self.cursor.close()
self.cursor = None
|
Add some options to skip certain normalization steps in the RL workflow
Summary: Make box cox and quantiles optional in the normalization flow. | @@ -42,6 +42,8 @@ def identify_parameter(
max_unique_enum_values=DEFAULT_MAX_UNIQUE_ENUM,
quantile_size=DEFAULT_MAX_QUANTILE_SIZE,
quantile_k2_threshold=DEFAULT_QUANTILE_K2_THRESHOLD,
+ skip_box_cox=False,
+ skip_quantiles=False,
):
feature_type = identify_types.identify_type(values, max_unique_enum_values)
@@ -88,9 +90,10 @@ def identify_parameter(
not np.isclose(stddev, 0):
values = candidate_values
boxcox_lambda = float(lmbda)
- if boxcox_lambda is None:
+ if boxcox_lambda is None or skip_box_cox:
boxcox_shift = None
- if boxcox_lambda is None and k2_original > quantile_k2_threshold:
+ boxcox_lambda = None
+ if boxcox_lambda is None and k2_original > quantile_k2_threshold and (not skip_quantiles):
feature_type = identify_types.QUANTILE
quantiles = mquantiles(
values,
|
Fix reshape on Dense fortran arrays
The algorithm here is much slower than necessary, but is at least
correct, unlike the old version. I should revisit it and get it
properly up to speed. | @@ -50,6 +50,12 @@ cpdef CSR reshape_csr(CSR matrix, idxint n_rows_out, idxint n_cols_out):
return out
+# We have to use a signed integer type because the standard library doesn't
+# provide overloads for unsigned types.
+cdef inline idxint _reshape_dense_reindex(idxint idx, idxint size):
+ cdef div_t res = div(idx, size)
+ return res.quot + res.rem
+
cpdef Dense reshape_dense(Dense matrix, idxint n_rows_out, idxint n_cols_out):
_reshape_check_input(matrix, n_rows_out, n_cols_out)
cdef Dense out
@@ -57,14 +63,14 @@ cpdef Dense reshape_dense(Dense matrix, idxint n_rows_out, idxint n_cols_out):
out = matrix.copy()
out.shape = (n_rows_out, n_cols_out)
return out
- out = dense.zeros(matrix.shape[0], matrix.shape[1])
- cdef size_t idx_self=0, idx_out, idx_out_start, stride=n_cols_out
- for idx_out_start in range(stride):
- idx_out = idx_out_start
- for _ in range(n_rows_out):
- out.data[idx_out] = matrix.data[idx_self]
- idx_self += 1
- idx_out += stride
+ out = dense.zeros(n_rows_out, n_cols_out)
+ cdef size_t idx_in=0, idx_out=0
+ cdef size_t size = n_rows_out * n_cols_out
+ # TODO: improve the algorithm here.
+ cdef size_t stride = _reshape_dense_reindex(matrix.shape[1]*n_rows_out, size)
+ for idx_in in range(size):
+ out.data[idx_out] = matrix.data[idx_in]
+ idx_out = _reshape_dense_reindex(idx_out + stride, size)
return out
|
[ceph_cmd_json_parsing] Fix commands in the docstring
The commands associated with the parser name were incorrect. | @@ -3,7 +3,7 @@ Ceph status commands
====================
This module provides processing for the output of the following ceph related
-commands with `-f json-pretty` parameter.
+commands with ``-f json-pretty`` parameter.
CephOsdDump - command ``ceph osd dump -f json-pretty``
------------------------------------------------------
@@ -14,17 +14,17 @@ CephOsdDf - command ``ceph osd df -f json-pretty``
CephS - command ``ceph -s -f json-pretty``
------------------------------------------
-CephDfDetail - command ``ceph osd erasure-code-profile get default -f json-pretty``
------------------------------------------------------------------------------------
+CephDfDetail - command ``ceph df detail -f json-pretty``
+--------------------------------------------------------
-CephHealthDetail - command ``ceph daemon {ceph_socket_files} config show``
+CephHealthDetail - command ``ceph health detail -f json-pretty``
--------------------------------------------------------------------------
-CephECProfileGet - command ``ceph health detail -f json-pretty``
-----------------------------------------------------------------
+CephECProfileGet - command ``ceph osd erasure-code-profile get default -f json-pretty``
+---------------------------------------------------------------------------------------
-CephCfgInfo - command ``ceph df detail -f json-pretty``
--------------------------------------------------------
+CephCfgInfo - command ``ceph daemon {ceph_socket_files} config show``
+---------------------------------------------------------------------
CephOsdTree - command ``ceph osd tree -f json-pretty``
------------------------------------------------------
@@ -35,7 +35,6 @@ CephReport - command ``ceph report``
All these parsers are based on a shared class which processes the JSON
information into a dictionary.
"""
-
import json
from insights import JSONParser, parser, CommandParser, LegacyItemAccess
from insights.parsers import ParseException, SkipException
@@ -144,7 +143,6 @@ class CephCfgInfo(CommandParser, JSONParser):
>>> ceph_cfg_info.max_open_files == '131072'
True
"""
-
@property
def max_open_files(self):
"""
@@ -179,7 +177,6 @@ class CephReport(CommandParser, LegacyItemAccess):
>>> ceph_report_content["version"] == '12.2.8-52.el7cp'
True
"""
-
def parse_content(self, content):
if not content:
raise SkipException("Empty output.")
|
Discuss the special case where metadata is marked obselete and deleted
Also document the assumption that the metadata store is the latest and exists in _update_metadata() | @@ -1081,16 +1081,19 @@ class Updater(object):
# do we blindly trust the downloaded root metadata here?
self._update_root_metadata(root_metadata)
- # Ensure the role and key information of the top-level roles is updated.
- # We do this whether or not root needed to be updated, in order to ensure
- # that, e.g., the entries in roledb for top-level roles are populated with
- # expected keyid info so that roles can be validated. See Issue #736.
+ # Ensure that the role and key information of the top-level roles is the
+ # latest. We do this whether or not Root needed to be updated, in order to
+ # ensure that, e.g., the entries in roledb for top-level roles are
+ # populated with expected keyid info so that roles can be validated. In
+ # certain circumstances, top-level metadata might be missing because it was
+ # marked obsolete and deleted after a failed attempt. See Issue #736.
self._rebuild_key_and_role_db()
self.consistent_snapshot = self.metadata['current']['root']['consistent_snapshot']
# Use default but sane information for timestamp metadata, and do not
# require strict checks on its required length.
self._update_metadata('timestamp', DEFAULT_TIMESTAMP_UPPERLENGTH)
+
# TODO: After fetching snapshot.json, we should either verify the root
# fileinfo referenced there matches what was fetched earlier in
# _update_root_metadata() or make another attempt to download root.json.
@@ -1670,10 +1673,12 @@ class Updater(object):
"""
<Purpose>
Non-public method that downloads, verifies, and 'installs' the metadata
- belonging to 'metadata_role'. Calling this method implies the metadata
- has been updated by the repository and thus needs to be re-downloaded.
- The current and previous metadata stores are updated if the newly
- downloaded metadata is successfully downloaded and verified.
+ belonging to 'metadata_role'. Calling this method implies that the
+ 'metadata_role' on the repository is newer than the client's, and thus
+ needs to be re-downloaded. The current and previous metadata stores are
+ updated if the newly downloaded metadata is successfully downloaded and
+ verified. This method also assumes that the store of top-level metadata
+ is the latest and exists.
<Arguments>
metadata_role:
|
code comments incorrectness
Closes-Bug: | @@ -118,8 +118,8 @@ def get_datastore(session, cluster, datastore_regex=None,
"get_object_property",
cluster,
"datastore")
- # If there are no hosts in the cluster then an empty string is
- # returned
+ # If there are no datastores in the cluster then an exception is
+ # raised
if not datastore_ret:
raise exception.DatastoreNotFound()
|
tests: run dev_setup and lvm_setup on secondary cluster for rgw_multisite
Otherwise, the deployment of the second cluster fails. | @@ -172,7 +172,8 @@ commands=
bash -c "cd {changedir}/secondary && vagrant up --no-provision {posargs:--provider=virtualbox}"
bash -c "cd {changedir}/secondary && bash {toxinidir}/tests/scripts/generate_ssh_config.sh {changedir}/secondary"
ansible-playbook --ssh-extra-args='-F {changedir}/secondary/vagrant_ssh_config' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/setup.yml
- ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "change_dir={changedir}/secondary ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
+ ansible-playbook -vv -i "localhost," -c local {toxinidir}/tests/functional/dev_setup.yml --extra-vars "dev_setup={env:DEV_SETUP:False} change_dir={changedir}/secondary ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest}" --tags "vagrant_setup"
+ ansible-playbook --ssh-extra-args='-F {changedir}/secondary/vagrant_ssh_config' -vv -i {changedir}/secondary/hosts {toxinidir}/tests/functional/lvm_setup.yml
ansible -i localhost, all -c local -b -m iptables -a 'chain=FORWARD protocol=tcp source=192.168.0.0/16 destination=192.168.0.0/16 jump=ACCEPT action=insert rule_num=1'
ansible-playbook --ssh-extra-args='-F {changedir}/secondary/vagrant_ssh_config' -vv -i {changedir}/secondary/hosts {toxinidir}/{env:PLAYBOOK:site.yml.sample} --extra-vars "\
ireallymeanit=yes \
|
Update enumerate-pe-sections.yml
Resolving comment | @@ -5,7 +5,7 @@ rule:
author: "@Ana06"
scope: function
mbc:
- - Data::Code::Enumerate PE Sections [C0062.001]
+ - Discovery::Code Discovery::Enumerate PE Sections [B0046.001]
references:
- https://0x00sec.org/t/reflective-dll-injection/3080
- https://www.ired.team/offensive-security/code-injection-process-injection/reflective-dll-injection
|
Update to preserve expected sid output
This allows the use in conjunction with load_problems.py | # instance. If using a custom APP_SETTINGS_FILE, ensure the appropriate
# environment variable is set prior to running this script. This script is best
# run from the pico-web role (ansible/roles/pico-web/tasks/main.yml)
+#
+# Script outputs the `sid` of the shell server to use in a call to load_problems.py
import sys
@@ -12,12 +14,13 @@ import api
def main(name, host, user, password, port, proto):
- # If a server by this name exists no action necessary
+ # If a server by this name exists short circuit no action necessary
servers = api.shell_servers.get_all_servers()
- if any([s["name"] == name for s in servers]):
- print("shell server already exists with name: {}".format(name))
+ for s in servers:
+ if s["name"] == name:
+ print(s["sid"], end="")
return
- else:
+ # server does not exist try to add
try:
sid = api.shell_servers.add_server(
name=name,
|
fix nonlinear expressions in reopt.jl
moved division of variables to after value.() is called to prevent julia throwing a nonlinear epxression error for our MILP formulation | @@ -1212,14 +1212,13 @@ function add_chp_results(m, p, r::Dict)
##Hot thermal energy storage results go here; need to populate expressions for first collection
if !isempty(p.HotTES)
- @expression(REopt, HotTESSizeMMBTU, sum(dvStorageCapEnergy[b] for b in HotTES))
+ @expression(REopt, HotTESSizeMMBTU, sum(dvStorageCapEnergy[b] for b in p.HotTES))
results["hot_tes_size_mmbtu"] = round(value(HotTESSizeMMBTU), digits=5)
- @expression(REopt, HotTESDischargeSeries[ts in p.TimeStep], dvDischargeFromStorage[b, ts]
- for b in HotTES)
+ @expression(REopt, HotTESDischargeSeries[ts in p.TimeStep], sum(dvDischargeFromStorage[b, ts]
+ for b in p.HotTES))
results["hot_tes_thermal_production_series"] = round.(value.(HotTESDischargeSeries), digits=5)
- @expression(REopt, HotTESsoc[ts in p.TimeStep], dvStorageSOC[b,ts] / HotTESSizeMMBTU
- for b in HotTES)
- results["hot_tes_pct_soc_series"] = round.(value.(HotTESsoc), digits=5)
+ @expression(REopt, HotTESsoc[ts in p.TimeStep], sum(dvStorageSOC[b,ts] for b in p.HotTES))
+ results["hot_tes_pct_soc_series"] = round.(value.(HotTESsoc) / value(HotTESSizeMMBTU), digits=5)
else
results["hot_tes_size_mmbtu"] = 0.0
results["hot_tes_thermal_production_series"] = []
@@ -1228,14 +1227,13 @@ function add_chp_results(m, p, r::Dict)
##Cold thermal energy storage results go here; need to populate expressions for first collection
if !isempty(p.ColdTES)
- @expression(REopt, ColdTESSizeKWHT, sum(dvStorageCapEnergy[b] for b in ColdTES))
+ @expression(REopt, ColdTESSizeKWHT, sum(dvStorageCapEnergy[b] for b in p.ColdTES))
results["cold_tes_size_kwht"] = round(value(ColdTESSizeKWHT), digits=5)
- @expression(REopt, ColdTESDischargeSeries[ts in p.TimeStep], dvDischargeFromStorage[b, ts]
- for b in ColdTES)
+ @expression(REopt, ColdTESDischargeSeries[ts in p.TimeStep], sum(dvDischargeFromStorage[b, ts]
+ for b in p.ColdTES))
results["cold_tes_thermal_production_series"] = round.(value.(ColdTESDischargeSeries), digits=5)
- @expression(REopt, ColdTESsoc[ts in p.TimeStep], dvStorageSOC[b,ts] / ColdTESSizeKWHT
- for b in ColdTES)
- results["cold_tes_pct_soc_series"] = round.(value.(ColdTESsoc), digits=5)
+ @expression(REopt, ColdTESsoc[ts in p.TimeStep], sum(dvStorageSOC[b,ts] for b in p.ColdTES))
+ results["cold_tes_pct_soc_series"] = round.(value.(ColdTESsoc) / value(ColdTESSizeKWHT), digits=5)
else
results["cold_tes_size_kwht"] = 0.0
results["cold_tes_thermal_production_series"] = []
|
Update cloud.rst
rtd -> docs.studio.ml | @@ -4,8 +4,8 @@ Cloud computing
Studio can be configured to submit jobs to the cloud. Right
now, only Google Cloud is supported (CPU only), as well as Amazon EC2
(CPU and GPU). Specifically, once configured (see
-`here <http://studioml.readthedocs.io/en/latest/gcloud_setup.html>`__ for configuration instructions for Google
-Cloud, and `here <http://studioml.readthedocs.io/en/latest/ec2_setup.html>`__ for EC2) the command
+`here <http://docs.studio.ml/en/latest/gcloud_setup.html>`__ for configuration instructions for Google
+Cloud, and `here <http://docs.studio.ml/en/latest/ec2_setup.html>`__ for EC2) the command
::
|
Clarify note in Zoom docs
The existing note isn't clear to me, updated slightly | @@ -41,7 +41,7 @@ Zoom Setup Guide
.. important::
To generate an **API Key** and **API Secret** requires a `Pro, Business, Education, or API Zoom plan <https://zoom.us/pricing>`__.
- Only one paid account is required to generate an **API Key** and **API Secret**. The free Zoom plan can be used for other user accounts.
+ Only one paid account is required to generate an **API Key** and **API Secret**. Other team members can use a free Zoom plan to create and join Zoom meetings in Mattermost.
.. image:: ../images/zoom_api_key.png
|
Correct small typo
`te` -> `the` | @@ -6,7 +6,7 @@ This example demonstrates explicit setting of heartbeat and blocked connection t
Starting with RabbitMQ 3.5.5, the broker's default hearbeat timeout decreased from 580 seconds to 60 seconds. As a result, applications that perform lengthy processing in the same thread that also runs their Pika connection may experience unexpected dropped connections due to heartbeat timeout. Here, we specify an explicit lower bound for heartbeat timeout.
-When RabbitMQ broker is running out of certain resources, such as memory and disk space, it may block connections that are performing resource-consuming operations, such as publishing messages. Once a connection is blocked, RabbiMQ stops reading from that connection's socket, so no commands from the client will get through to te broker on that connection until the broker unblocks it. A blocked connection may last for an indefinite period of time, stalling the connection and possibly resulting in a hang (e.g., in BlockingConnection) until the connection is unblocked. Blocked Connectin Timeout is intended to interrupt (i.e., drop) a connection that has been blocked longer than the given timeout value.
+When RabbitMQ broker is running out of certain resources, such as memory and disk space, it may block connections that are performing resource-consuming operations, such as publishing messages. Once a connection is blocked, RabbiMQ stops reading from that connection's socket, so no commands from the client will get through to the broker on that connection until the broker unblocks it. A blocked connection may last for an indefinite period of time, stalling the connection and possibly resulting in a hang (e.g., in BlockingConnection) until the connection is unblocked. Blocked Connectin Timeout is intended to interrupt (i.e., drop) a connection that has been blocked longer than the given timeout value.
Example of configuring heartbeat and blocked-connection timeouts::
|
Change the error type to match sklearn.
Change the error type when trying to predict before fitting SVM to match sklearn.
Fixes
Authors:
- Artem M. Chirkin (https://github.com/achirkin)
Approvers:
- Dante Gama Dessavre (https://github.com/dantegd)
URL: | @@ -326,9 +326,9 @@ class SVMBase(Base,
@cuml.internals.api_base_return_array_skipall
def coef_(self):
if self._c_kernel != LINEAR:
- raise RuntimeError("coef_ is only available for linear kernels")
+ raise AttributeError("coef_ is only available for linear kernels")
if self._model is None:
- raise RuntimeError("Call fit before prediction")
+ raise AttributeError("Call fit before prediction")
if self._internal_coef_ is None:
self._internal_coef_ = self._calc_coef()
# Call the base class to perform the output conversion
|
Update bulkresizer.py file
Implement valid_path function in bulkresizer.py file
With this function bulkresizer will check if the given path form the user leads to a directory | @@ -6,6 +6,9 @@ from plugin import plugin
from colorama import Fore
+def valid_path(path):
+ return True if os.path.isdir(path) else False
+
def bulk_resizer(input_path, output_path, desired_size=32,
color=[0, 0, 0], rename=True):
img_no = 0
|
Leave in opt_in on instance config until the branch cut
Summary: To prevent unneeded dagster.yaml breakage
Test Plan: BK, load dagit
Reviewers: alangenfeld | @@ -55,4 +55,5 @@ def dagster_instance_config_schema():
"run_coordinator": config_field_for_configurable_class(),
"run_launcher": config_field_for_configurable_class(),
"telemetry": Field({"enabled": Field(Bool, is_required=False)}),
+ "opt_in": Field({"local_servers": Field(Bool, is_required=False)}),
}
|
Only log warning when more than 10 seconds.
Pre-validation can take a while since it's validating many blocks at once, including clvm and VDFs. | @@ -909,7 +909,7 @@ class FullNode:
List[PreValidationResult]
] = await self.blockchain.pre_validate_blocks_multiprocessing(blocks_to_validate, {}, wp_summaries=wp_summaries)
pre_validate_end = time.time()
- if pre_validate_end - pre_validate_start > 1:
+ if pre_validate_end - pre_validate_start > 10:
self.log.warning(f"Block pre-validation time: {pre_validate_end - pre_validate_start:0.2f} seconds")
else:
self.log.debug(f"Block pre-validation time: {pre_validate_end - pre_validate_start:0.2f} seconds")
|
Drop unused function & attribute
Made unused in
I meant to drop it then, but forgot | @@ -81,7 +81,6 @@ class ResourceManager(object):
def __init__(self, sm=None):
self.sm = sm or get_storage_manager()
- self.task_mapping = _create_task_mapping()
def list_executions(self, include=None, is_include_system_workflows=False,
filters=None, pagination=None, sort=None,
@@ -2664,19 +2663,6 @@ def get_resource_manager(sm=None):
ResourceManager())
-def _create_task_mapping():
- mapping = {
- 'create_snapshot': 'cloudify_system_workflows.snapshot.create',
- 'restore_snapshot': 'cloudify_system_workflows.snapshot.restore',
- 'uninstall_plugin': 'cloudify_system_workflows.plugins.uninstall',
- 'create_deployment_environment':
- 'cloudify_system_workflows.deployment_environment.create',
- 'delete_deployment_environment':
- 'cloudify_system_workflows.deployment_environment.delete'
- }
- return mapping
-
-
def create_secret(key, secret, tenant):
sm = get_storage_manager()
timestamp = utils.get_formatted_timestamp()
|
Support parameters in PowerShell
Update the PowerShell alias so it passes thefuck parameters (e.g. `-y` or `-r`). | @@ -6,7 +6,7 @@ class Powershell(Generic):
return 'function ' + alias_name + ' {\n' \
' $history = (Get-History -Count 1).CommandLine;\n' \
' if (-not [string]::IsNullOrWhiteSpace($history)) {\n' \
- ' $fuck = $(thefuck $history);\n' \
+ ' $fuck = $(thefuck $args $history);\n' \
' if (-not [string]::IsNullOrWhiteSpace($fuck)) {\n' \
' if ($fuck.StartsWith("echo")) { $fuck = $fuck.Substring(5); }\n' \
' else { iex "$fuck"; }\n' \
|
Update fonduer deps
With the latest release of matplotlib 3.0.0, we need to add some new
system dependencies. | @@ -15,11 +15,12 @@ For OS X using homebrew_::
$ brew install poppler
$ brew install postgresql
+ $ brew install libpng freetype pkg-config
On Debian-based distros::
$ sudo apt update
- $ sudo apt install libxml2-dev libxslt-dev python3-dev
+ $ sudo apt install libxml2-dev libxslt-dev python3-dev build-dep python-matplotlib
$ sudo apt install poppler-utils
$ sudo apt install postgresql
|
Fix don't work with senlin actions
We can't do run senlin actions because have an error when
init client senlin. We need an other way to init client to
run client with cron trigger and manual.
Closes-Bug: | @@ -18,6 +18,7 @@ from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
+from keystoneauth1.identity import v3 as ks_identity_v3
from keystoneauth1 import session as ks_session
from keystoneauth1.token_endpoint import Token
from keystoneclient import httpclient
@@ -846,9 +847,30 @@ class SenlinAction(base.OpenStackAction):
keystone_endpoint = keystone_utils.get_keystone_endpoint()
senlin_endpoint = self.get_service_endpoint()
+ if context.is_trust_scoped and keystone_utils.is_token_trust_scoped(
+ context.auth_token):
+ if context.trust_id is None:
+ raise Exception(
+ "'trust_id' must be provided in the admin context."
+ )
+
+ auth = ks_identity_v3.Password(
+ auth_url=keystone_endpoint.url,
+ trust_id=context.trust_id,
+ username=CONF.keystone_authtoken.username,
+ password=CONF.keystone_authtoken.password,
+ user_domain_name=CONF.keystone_authtoken.user_domain_name
+ )
+ else:
+ auth = ks_identity_v3.Token(
+ auth_url=keystone_endpoint.url,
+ token=context.auth_token,
+ project_id=context.project_id
+ )
+
return self._get_client_class()(
endpoint_url=senlin_endpoint.url,
- token=context.auth_token,
+ session=ks_session.Session(auth=auth),
tenant_id=context.project_id,
region_name=senlin_endpoint.region,
auth_url=keystone_endpoint.url,
|
Catch and log unhandled consensus driver exception
Log any unhandled exceptions from the python consensus sdk driver. | @@ -67,6 +67,7 @@ class ZmqDriver(Driver):
driver_thread.join()
def _driver_loop(self):
+ try:
while True:
if self._exit:
self._engine.stop()
@@ -80,6 +81,8 @@ class ZmqDriver(Driver):
result = self._process(message)
self._updates.put(result)
+ except Exception: # pylint: disable=broad-except
+ LOGGER.exception("Uncaught driver exception")
def stop(self):
self._exit = True
|
Updated ramdisk API docstrings
Added information about raised exceptions and conditions
under which they are raised. | @@ -93,7 +93,10 @@ class LookupController(rest.RestController):
:param node_uuid: UUID of a node.
:raises: NotFound if requested API version does not allow this
endpoint.
- :raises: NotFound if suitable node was not found.
+ :raises: NotFound if suitable node was not found or node's provision
+ state is not allowed for the lookup.
+ :raises: IncompleteLookup if neither node UUID nor any valid MAC
+ address was provided.
"""
if not api_utils.allow_ramdisk_endpoints():
raise exception.NotFound()
@@ -156,6 +159,11 @@ class HeartbeatController(rest.RestController):
:param node_ident: the UUID or logical name of a node.
:param callback_url: the URL to reach back to the ramdisk.
+ :raises: NodeNotFound if node with provided UUID or name was not found.
+ :raises: InvalidUuidOrName if node_ident is not valid name or UUID.
+ :raises: NoValidHost if RPC topic for node could not be retrieved.
+ :raises: NotFound if requested API version does not allow this
+ endpoint.
"""
if not api_utils.allow_ramdisk_endpoints():
raise exception.NotFound()
|
Update Sphinx dependency version and check RTD performance
Pinned Sphinx version to more recent version (==4.2.0), thanks to Joshua Newton's keen observations. | @@ -39,7 +39,7 @@ setup(
extras_require={
'docs': [ # pin sphinx to match what RTD uses:
# https://github.com/readthedocs/readthedocs.org/blob/ecac31de54bbb2c100f933e86eb22b0f4389ba84/requirements/pip.txt#L16
- 'sphinx<2',
+ 'sphinx==4.2.0',
'sphinx-rtd-theme<0.5',
],
'dev': ["pre-commit>=2.10.0"]
|
Update isolated_labels.py
fix iso label silhouette | import pandas as pd
from sklearn.metrics import f1_score
+from sklearn.metrics import silhouette_samples
from .clustering import cluster_optimal_resolution
from .silhouette import silhouette
@@ -227,7 +228,9 @@ def score_isolated_label(
else:
# AWS score between isolated label vs rest
adata.obs[iso_label_key] = adata.obs[label_key] == isolated_label
- score = silhouette(adata, iso_label_key, embed)
+ adata.obs['silhouette_temp'] = silhouette_samples(adata.obsm[embed], adata.obs[iso_label_key])
+
+ score = adata.obs[adata.obs[iso_label_key]].silhouette_temp.mean()
if verbose:
print(f"{isolated_label}: {score}")
|
Caps the length of the circuit string in dataset-loading functions.
When there are really big circuits it's annoying to have warnings
printed that are super long, so this truncates what is printed to
40 characters (adding an ellipsis when needed). | @@ -365,10 +365,14 @@ class StdInputParser(object):
self._fillDataCountDict(countDict, fillInfo, valueList)
if all([(abs(v) < 1e-9) for v in list(countDict.values())]):
if ignoreZeroCountLines:
- if not bBad: warnings.append("Dataline for circuit '%s' has zero counts and will be ignored" % circuitStr)
+ if not bBad:
+ s = circuitStr if len(circuitStr) < 40 else circuitStr[0:37] + "..."
+ warnings.append("Dataline for circuit '%s' has zero counts and will be ignored" % s)
continue # skip lines in dataset file with zero counts (no experiments done)
else:
- if not bBad: warnings.append("Dataline for circuit '%s' has zero counts." % circuitStr)
+ if not bBad:
+ s = circuitStr if len(circuitStr) < 40 else circuitStr[0:37] + "..."
+ warnings.append("Dataline for circuit '%s' has zero counts." % s)
if circuitLbls is None: circuitLbls = "auto" # if line labels weren't given just use defaults
circuit = _objs.Circuit(circuitTuple, stringrep=circuitStr,
@@ -584,10 +588,14 @@ class StdInputParser(object):
bSkip = False
if all([(abs(v) < 1e-9) for cDict in dsCountDicts.values() for v in cDict.values()]):
if ignoreZeroCountLines:
- if not bBad: warnings.append("Dataline for circuit '%s' has zero counts and will be ignored" % circuitStr)
+ if not bBad:
+ s = circuitStr if len(circuitStr) < 40 else circuitStr[0:37] + "..."
+ warnings.append("Dataline for circuit '%s' has zero counts and will be ignored" % s)
bSkip = True # skip lines in dataset file with zero counts (no experiments done)
else:
- if not bBad: warnings.append("Dataline for circuit '%s' has zero counts." % circuitStr)
+ if not bBad:
+ s = circuitStr if len(circuitStr) < 40 else circuitStr[0:37] + "..."
+ warnings.append("Dataline for circuit '%s' has zero counts." % s)
if not bSkip:
for dsLabel, countDict in dsCountDicts.items():
|
[AIR] Fix Categorizer.__repr__ attribute error
__repr__ fails because stats_ attribute is not assigned until _fit is called. | @@ -168,7 +168,8 @@ class Categorizer(Preprocessor):
return df
def __repr__(self):
- return f"<Categorizer columns={self.columns} stats={self.stats_}>"
+ stats = getattr(self, "stats_", None)
+ return f"<Categorizer columns={self.columns} stats={stats}>"
def _get_unique_value_indices(
|
Update astar.py
* Update astar.py
Improved comments added punctuations.
* Update astar.py
* Update machine_learning/astar.py
* Update astar.py | """
-The A* algorithm combines features of uniform-cost search and pure
-heuristic search to efficiently compute optimal solutions.
-A* algorithm is a best-first search algorithm in which the cost
-associated with a node is f(n) = g(n) + h(n),
-where g(n) is the cost of the path from the initial state to node n and
-h(n) is the heuristic estimate or the cost or a path
-from node n to a goal.A* algorithm introduces a heuristic into a
-regular graph-searching algorithm,
-essentially planning ahead at each step so a more optimal decision
-is made.A* also known as the algorithm with brains
+The A* algorithm combines features of uniform-cost search and pure heuristic search to
+efficiently compute optimal solutions.
+
+The A* algorithm is a best-first search algorithm in which the cost associated with a
+node is f(n) = g(n) + h(n), where g(n) is the cost of the path from the initial state to
+node n and h(n) is the heuristic estimate or the cost or a path from node n to a goal.
+
+The A* algorithm introduces a heuristic into a regular graph-searching algorithm,
+essentially planning ahead at each step so a more optimal decision is made. For this
+reason, A* is known as an algorithm with brains.
+
+https://en.wikipedia.org/wiki/A*_search_algorithm
"""
import numpy as np
class Cell:
"""
- Class cell represents a cell in the world which have the property
- position : The position of the represented by tupleof x and y
- coordinates initially set to (0,0)
- parent : This contains the parent cell object which we visited
- before arrinving this cell
- g,h,f : The parameters for constructing the heuristic function
- which can be any function. for simplicity used line
- distance
+ Class cell represents a cell in the world which have the properties:
+ position: represented by tuple of x and y coordinates initially set to (0,0).
+ parent: Contains the parent cell object visited before we arrived at this cell.
+ g, h, f: Parameters used when calling our heuristic function.
"""
def __init__(self):
self.position = (0, 0)
self.parent = None
-
self.g = 0
self.h = 0
self.f = 0
"""
- overrides equals method because otherwise cell assign will give
- wrong results
+ Overrides equals method because otherwise cell assign will give
+ wrong results.
"""
def __eq__(self, cell):
@@ -48,8 +45,8 @@ class Cell:
class Gridworld:
"""
Gridworld class represents the external world here a grid M*M
- matrix
- world_size: create a numpy array with the given world_size default is 5
+ matrix.
+ world_size: create a numpy array with the given world_size default is 5.
"""
def __init__(self, world_size=(5, 5)):
@@ -90,10 +87,10 @@ class Gridworld:
def astar(world, start, goal):
"""
- Implementation of a start algorithm
- world : Object of the world object
- start : Object of the cell as start position
- stop : Object of the cell as goal position
+ Implementation of a start algorithm.
+ world : Object of the world object.
+ start : Object of the cell as start position.
+ stop : Object of the cell as goal position.
>>> p = Gridworld()
>>> start = Cell()
@@ -137,14 +134,14 @@ def astar(world, start, goal):
if __name__ == "__main__":
world = Gridworld()
- # stat position and Goal
+ # Start position and goal
start = Cell()
start.position = (0, 0)
goal = Cell()
goal.position = (4, 4)
print(f"path from {start.position} to {goal.position}")
s = astar(world, start, goal)
- # Just for visual reasons
+ # Just for visual reasons.
for i in s:
world.w[i] = 1
print(world.w)
|
Event for lures
Added lure support | @@ -18,6 +18,7 @@ import urllib2
from geopy.geocoders import GoogleV3
from pgoapi import PGoApi
+from pgoapi import RpcApi
from pgoapi.utilities import f2i, get_cell_ids
from s2sphere import Cell, CellId, LatLng
@@ -431,6 +432,10 @@ class PokemonGoBot(object):
)
self.event_manager.register_event('no_pokeballs')
self.event_manager.register_event('enough_ultraballs')
+ self.event_manager.register_event('lure_success')
+ self.event_manager.register_event('lure_failed')
+ self.event_manager.register_event('lure_not_enough')
+ self.event_manager.register_event('lure_info')
self.event_manager.register_event(
'pokemon_catch_rate',
parameters=(
@@ -480,7 +485,9 @@ class PokemonGoBot(object):
)
)
self.event_manager.register_event('pokemon_not_in_range')
+ self.event_manager.register_event('pokemon_blocked_by_anticheat')
self.event_manager.register_event('pokemon_inventory_full')
+ self.event_manager.register_event('pokemon_encounter_error')
self.event_manager.register_event(
'pokemon_caught',
parameters=(
@@ -1301,6 +1308,13 @@ class PokemonGoBot(object):
)
sleep(5) # Pause to allow user to see warning
+ '''
+ plat = RpcApi.request.platform_requests(type=5)
+
+ if plat:
+ print('Response dictionary: \n\r{}'.format(json.dumps(plat, indent=2)))
+ '''
+
self.logger.info('')
def _print_list_pokemon(self):
|
[cleanup] Avoid deeply nested control flow in imagerecat.py
Problem reported by codeclimate.com | @@ -80,8 +80,9 @@ def categorizeImages(generator, onlyFilter, onlyUncat):
"""
for page in generator:
- if page.exists() and (page.namespace() == 6) and \
- (not page.isRedirectPage()):
+ if not page.exists() or page.namespace() != 6 or page.isRedirectPage():
+ continue
+
imagepage = pywikibot.FilePage(page.site, page.title())
pywikibot.output('Working on ' + imagepage.title())
@@ -89,7 +90,8 @@ def categorizeImages(generator, onlyFilter, onlyUncat):
imagepage.site, 'Template:Uncategorized')
in imagepage.templates()):
pywikibot.output('No Uncategorized template found')
- else:
+ continue
+
currentCats = getCurrentCats(imagepage)
if onlyFilter:
commonshelperCats = []
@@ -100,11 +102,10 @@ def categorizeImages(generator, onlyFilter, onlyUncat):
galleries) = getCommonshelperCats(imagepage)
newcats = applyAllFilters(commonshelperCats + currentCats)
- if len(newcats) > 0 and not(set(currentCats) == set(newcats)):
+ if newcats and set(currentCats) != set(newcats):
for cat in newcats:
pywikibot.output(' Found new cat: ' + cat)
- saveImagePage(imagepage, newcats, usage, galleries,
- onlyFilter)
+ saveImagePage(imagepage, newcats, usage, galleries, onlyFilter)
def getCurrentCats(imagepage):
|
Fix FastRL
Summary: Fast RL model manager names need to be updated after our refactor | @@ -97,9 +97,10 @@ class DenseNormalization:
for k in self.keys:
value, presence = data[k]
- data[k] = self._preprocessor(
- value.to(self.device), presence.to(self.device)
- )
+ value, presence = value.to(self.device), presence.to(self.device)
+ presence[torch.isnan(value)] = 0
+ value[torch.isnan(value)] = 0
+ data[k] = self._preprocessor(value, presence)
return data
|
Update edit.py
Tweak user.paste | @@ -4,7 +4,7 @@ ctx = Context()
mod = Module()
-def get_selected_text(default=None):
+def get_selected_text():
try:
with clip.capture() as s:
actions.edit.copy()
@@ -24,6 +24,9 @@ class edit_actions:
class Actions:
def paste(text: str):
"""Pastes text and preserves clipboard"""
+
with clip.revert():
clip.set(text)
+ # sleep(0.1)
actions.edit.paste()
+ actions.sleep("100ms")
|
Update Makefile
Summary: After need to install Airflow manually or clean installs won't work
Test Plan: caught in buildkite image builds
Reviewers: #ft, prha | @@ -22,7 +22,10 @@ install_dev_python_modules:
# On machines with less memory, pyspark install will fail... see:
# https://stackoverflow.com/a/31526029/11295366
- pip --no-cache-dir install pyspark==2.4.0 $(QUIET)
+ pip --no-cache-dir install pyspark==2.4.4 $(QUIET)
+
+# Need to manually install Airflow because we no longer explicitly depend on it
+ pip install apache-airflow $(QUIET)
# dagster-pandas must come before dasgtermill because of dependency
# See https://github.com/dagster-io/dagster/issues/1485
|
Update CONTRIBUTING.md
punctuation | @@ -197,7 +197,7 @@ Finally, if your contribution is accepted, the Rasa team member will merge it to
#### 9. Share your contributions with the world!
-Contributing to open source can take a lot of time and effort so you should be proud of the great work you have done!
+Contributing to open source can take a lot of time and effort, so you should be proud of the great work you have done!
Let the world know that you have become a contributor to the Rasa open source project by posting about it on your social media (make sure to tag @RasaHQ as well), mention the contribution on your CV and get ready to get some really cool [Rasa contributor swag](https://blog.rasa.com/announcing-the-rasa-contributor-program/)!
#### 10. Non-code contributions
|
Update phishtank.py
blakf formatted | @@ -15,7 +15,8 @@ class PhishTank(Feed):
default_values = {
"frequency": timedelta(hours=4),
"name": "PhishTank",
- "source": "http://data.phishtank.com/data/%s/online-valid.csv" % yeti_config.get("phishtank", "key"),
+ "source": "http://data.phishtank.com/data/%s/online-valid.csv"
+ % yeti_config.get("phishtank", "key"),
"description": "PhishTank community feed. Contains a list of possible Phishing URLs.",
}
|
Removing unnecessary lines in HybridAlluvium component
These float conversions are superseded by code just below this block
that tests for different possible input types (e.g. field name as a
string). | @@ -225,14 +225,10 @@ class HybridAlluvium(Component):
#store other constants
self.m_sp = float(m_sp)
self.n_sp = float(n_sp)
- self.K_sed = float(K_sed)
- self.K_br = float(K_br)
self.F_f = float(F_f)
self.phi = float(phi)
self.H_star = float(H_star)
self.v_s = float(v_s)
- self.sp_crit_sed = float(sp_crit_sed)
- self.sp_crit_br = float(sp_crit_br)
#K's and critical values can be floats, grid fields, or arrays
if type(K_sed) is str:
|
Remove duplicate
`Transport for India` is already included in the `Open Government, India` link in the Government section | @@ -1444,7 +1444,6 @@ API | Description | Auth | HTTPS | CORS |
| [Transport for Grenoble, France](https://www.metromobilite.fr/pages/opendata/OpenDataApi.html) | Grenoble public transport | No | No | No |
| [Transport for Hessen, Germany](https://opendata.rmv.de/site/start.html) | RMV API (Public Transport in Hessen) | No | Yes | Unknown |
| [Transport for Honolulu, US](http://hea.thebus.org/api_info.asp) | Honolulu Transportation Information | `apiKey` | No | Unknown |
-| [Transport for India](https://data.gov.in/sector/transport) | India Public Transport API | `apiKey` | Yes | Unknown |
| [Transport for Lisbon, Portugal](https://emel.city-platform.com/opendata/) | Data about buses routes, parking and traffic | `apiKey` | Yes | Unknown |
| [Transport for London, England](https://api.tfl.gov.uk) | TfL API | `apiKey` | Yes | Unknown |
| [Transport for Manchester, England](https://developer.tfgm.com/) | TfGM transport network data | `apiKey` | Yes | No |
|
Update CovidDatasets.py
Fail anytime a file is missing | @@ -303,6 +303,11 @@ class JHUDataset(Dataset):
_logger.info('Received a 404 for date {}. Ending iteration.'.format(snapshot_date))
break
raise
+ except FileNotFoundError:
+ # assuming we're pointing to a locally cached repository
+ _logger.info('File not found for date {}. Ending iteration.'.format(snapshot_date))
+ break
+ raise
df = df.rename(columns=self._fieldname_map)
snapshot_date_as_datetime = datetime.datetime.combine(snapshot_date, datetime.datetime.min.time())
|
Fixed bug of Masked Arrays and bootstrapping code
This bug was causing a fatal error when bootstrapping. Changed numpy.ma.append to numpy.append because the append was done on a non-masked Array and the masked array is created lated with the fill value. | @@ -455,7 +455,7 @@ def get_resampled_arrs(dt_arr, values_arr, year_to_eliminate, year_to_duplicate)
# we add slices to duplicate in the end
dt_arr_result = numpy.append(dt_arr_subsetted, dt_arr_year_to_duplicate)
- values_arr_result = numpy.ma.append(values_arr_subsetted, values_arr_year_to_duplicate, axis=0)
+ values_arr_result = numpy.append(values_arr_subsetted, values_arr_year_to_duplicate, axis=0)
return (dt_arr_result, values_arr_result)
|
Github Actions CD
Github Actions readiness for CD | @@ -3,7 +3,6 @@ on:
release:
types: [created]
-
jobs:
release:
runs-on: ubuntu-latest
@@ -38,11 +37,13 @@ jobs:
- name: Build
run: |
- python setup.py sdist bdist_wheel
- pip install dist/terraform_compliance-${{ steps.strip-tag.outputs.tag }}-*.whl
+ python setup.py sdist bdist_wheel && \
+ ls -al dist/* && \
+ pip install --force-reinstall dist/terraform_compliance-${{ steps.strip-tag.outputs.tag }}-*.whl
- - name: Run integration tests
- run: python tests/functional/run_functional_tests.py
+ - name: Integration Tests
+ run: |
+ python tests/functional/run_functional_tests.py
- name: Publish to PyPI
env:
@@ -56,13 +57,17 @@ jobs:
DOCKER_HUB_USER: ${{ secrets.DOCKER_HUB_USER }}
DOCKER_HUB_PASSWORD: ${{ secrets.DOCKER_HUB_PASSWORD }}
run : |
- - echo "Getting the latest terraform version from Hashicorp"
- - echo "export LATEST_TERRAFORM_VERSION=$(curl https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r .current_version)" > terraform_version.sh
- - source terraform_version.sh
- - if [ -z "$LATEST_TERRAFORM_VERSION" ]; then echo "Can not identify latest terraform version!"; travis_terminate 1; fi
- - docker build --compress --no-cache -t "$IMAGE_NAME" --build-arg VERSION=$RELEASE_VERSION --build-arg LATEST_TERRAFORM_VERSION=$LATEST_TERRAFORM_VERSION --build-arg HASHICORP_PGP_KEY="$(cat hashicorp-pgp-key.pub)" . || travis_terminate 1
- - docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD"
- - docker tag "$IMAGE_NAME" $IMAGE_NAME:latest
- - docker tag "$IMAGE_NAME" "$IMAGE_NAME":"$RELEASE_VERSION"
- - docker push "$IMAGE_NAME":latest
- - docker push "$IMAGE_NAME":"$RELEASE_VERSION"
+ sleep 15
+ echo "Getting the latest terraform version from Hashicorp"
+ echo "export LATEST_TERRAFORM_VERSION=$(curl https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r .current_version)" > terraform_version.sh
+ source terraform_version.sh
+ if [ -z "$LATEST_TERRAFORM_VERSION" ]; then echo "Can not identify latest terraform version!"; travis_terminate 1; fi
+ docker build --compress --no-cache -t "$IMAGE_NAME" \
+ --build-arg VERSION=${{ steps.strip-tag.outputs.tag }} \
+ --build-arg LATEST_TERRAFORM_VERSION=$LATEST_TERRAFORM_VERSION \
+ --build-arg HASHICORP_PGP_KEY="$(cat hashicorp-pgp-key.pub)" .
+ docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD"
+ docker tag "$IMAGE_NAME" $IMAGE_NAME:latest
+ docker tag "$IMAGE_NAME" "$IMAGE_NAME":"${{ steps.strip-tag.outputs.tag }}"
+ docker push "$IMAGE_NAME":latest
+ docker push "$IMAGE_NAME":"${{ steps.strip-tag.outputs.tag }}"
|
Re-apply patch to application.py
It did not make it in the merge. Original commit says:
Do not initialize gi in gaphor main
We want to do as long as possible without requiring toolkit logic. | @@ -13,8 +13,6 @@ import logging
import inspect
import importlib_metadata
-from gi.repository import Gio, Gtk
-
from gaphor.event import ServiceInitializedEvent, ServiceShutdownEvent
from gaphor.abc import Service
@@ -106,6 +104,12 @@ class _Application:
The file_manager service is used here to load a Gaphor model if one was
specified on the command line."""
+ import gi
+
+ gi.require_version("Gtk", "3.0")
+
+ from gi.repository import Gio, Gtk
+
app = Gtk.Application(
application_id="org.gaphor.Gaphor", flags=Gio.ApplicationFlags.FLAGS_NONE
)
|
Populate rename text
[#OSF-7151] | @@ -1701,12 +1701,14 @@ var FGInput = {
var placeholder = args.placeholder || '';
var id = args.id || '';
var helpTextId = args.helpTextId || '';
+ var oninput = args.oninput || noop;
var onkeypress = args.onkeypress || noop;
var value = args.value ? '[value="' + args.value + '"]' : '';
return m('span', [
m('input' + value, {
'id' : id,
className: 'pull-right form-control' + extraCSS,
+ oninput: oninput,
onkeypress: onkeypress,
'data-toggle': tooltipText ? 'tooltip' : '',
'title': tooltipText,
@@ -1922,6 +1924,8 @@ var FGToolbar = {
self.createFolder = function(event){
_createFolder.call(self.tb, event, self.dismissToolbar, self.helpText);
};
+ self.renameId = m.prop('');
+ self.renameData = m.prop('');
},
view : function(ctrl) {
var templates = {};
@@ -1971,15 +1975,23 @@ var FGToolbar = {
)
)
];
+
+ if(typeof item !== 'undefined' && item.id !== ctrl.renameId()){
+ ctrl.renameData(item.data.name);
+ ctrl.renameId(item.id);
+ }
+
templates[toolbarModes.RENAME] = [
m('.col-xs-9',
m.component(FGInput, {
+ oninput: m.withAttr('value', ctrl.renameData),
onkeypress: function (event) {
if (ctrl.tb.pressedKey === ENTER_KEY) {
_renameEvent.call(ctrl.tb);
}
},
id: 'renameInput',
+ value: ctrl.renameData(),
helpTextId: 'renameHelpText',
placeholder: 'Enter name',
}, ctrl.helpText())
|
[modules/pacman] Update url filtering
Arch mirrors can also have rsync protocol | @@ -27,7 +27,7 @@ def get_pacman_info(widget, path):
count = len(repos)*[0]
for line in result.splitlines():
- if line.startswith("http"):
+ if line.startswith(("http", "rsync")):
for i in range(len(repos)-1):
if "/" + repos[i] + "/" in line:
count[i] += 1
|
[stream-refactor] mark py24 as allow-fail
This needs a day or two's worth of soaking to fix all the remaining nits | @@ -29,6 +29,11 @@ script:
# newest->oldest in various configuartions.
matrix:
+ allow_failures:
+ # Python 2.4 tests are still unreliable
+ - language: c
+ env: MODE=mitogen_py24 DISTRO=centos5
+
include:
# Mitogen tests.
# 2.4 -> 2.4
|
Render node : Protect `hash()` and `execute()` methods
These have been protected on the base class for some time - TaskPlug is responsible for providing the public interface. | @@ -81,9 +81,6 @@ class GAFFERSCENE_API Render : public GafferDispatch::TaskNode
ScenePlug *outPlug();
const ScenePlug *outPlug() const;
- IECore::MurmurHash hash( const Gaffer::Context *context ) const override;
- void execute() const override;
-
protected :
// Constructor for derived classes which wish to hardcode the renderer type. Perhaps
@@ -92,6 +89,9 @@ class GAFFERSCENE_API Render : public GafferDispatch::TaskNode
// loading of the module which registers the required renderer type.
Render( const IECore::InternedString &rendererType, const std::string &name );
+ IECore::MurmurHash hash( const Gaffer::Context *context ) const override;
+ void execute() const override;
+
private :
ScenePlug *adaptedInPlug();
@@ -99,6 +99,9 @@ class GAFFERSCENE_API Render : public GafferDispatch::TaskNode
static size_t g_firstPlugIndex;
+ // Friendship for the bindings
+ friend struct GafferDispatchBindings::Detail::TaskNodeAccessor;
+
};
IE_CORE_DECLAREPTR( Render );
|
Remove redundant method parameter.
``ignore_cert_errors`` is passed to ``Chrome`` via ``Browser`` via
``BrowserPool` here:
it is not doing anything in ``Browser.browser_page``. | @@ -376,7 +376,7 @@ class Browser:
return self.websock_url is not None
def browse_page(
- self, page_url, ignore_cert_errors=False, extra_headers=None,
+ self, page_url, extra_headers=None,
user_agent=None, behavior_parameters=None,
on_request=None, on_response=None, on_screenshot=None,
username=None, password=None, hashtags=None,
|
Fixed a few problems with pathlib2 stub
It was using a type alias with a forward definition and a bunch of unused imports. | import os
-import sys
-from _typeshed import OpenBinaryMode, OpenBinaryModeReading, OpenBinaryModeUpdating, OpenBinaryModeWriting, OpenTextMode
-from io import BufferedRandom, BufferedReader, BufferedWriter, FileIO, TextIOWrapper
from types import TracebackType
-from typing import IO, Any, BinaryIO, Generator, List, Optional, Sequence, Text, TextIO, Tuple, Type, TypeVar, Union, overload
-from typing_extensions import Literal
+from typing import IO, Any, Generator, List, Optional, Sequence, Text, Tuple, Type, TypeVar, Union
_P = TypeVar("_P", bound=PurePath)
_PurePathBase = object
-_PathLike = PurePath
class PurePath(_PurePathBase):
parts: Tuple[str, ...]
@@ -20,14 +15,14 @@ class PurePath(_PurePathBase):
suffix: str
suffixes: List[str]
stem: str
- def __new__(cls: Type[_P], *args: Union[str, _PathLike]) -> _P: ...
+ def __new__(cls: Type[_P], *args: Union[str, PurePath]) -> _P: ...
def __hash__(self) -> int: ...
def __lt__(self, other: PurePath) -> bool: ...
def __le__(self, other: PurePath) -> bool: ...
def __gt__(self, other: PurePath) -> bool: ...
def __ge__(self, other: PurePath) -> bool: ...
- def __truediv__(self: _P, key: Union[str, _PathLike]) -> _P: ...
- def __rtruediv__(self: _P, key: Union[str, _PathLike]) -> _P: ...
+ def __truediv__(self: _P, key: Union[str, PurePath]) -> _P: ...
+ def __rtruediv__(self: _P, key: Union[str, PurePath]) -> _P: ...
def __div__(self: _P, key: Union[str, PurePath]) -> _P: ...
def __bytes__(self) -> bytes: ...
def as_posix(self) -> str: ...
@@ -35,10 +30,10 @@ class PurePath(_PurePathBase):
def is_absolute(self) -> bool: ...
def is_reserved(self) -> bool: ...
def match(self, path_pattern: str) -> bool: ...
- def relative_to(self: _P, *other: Union[str, _PathLike]) -> _P: ...
+ def relative_to(self: _P, *other: Union[str, PurePath]) -> _P: ...
def with_name(self: _P, name: str) -> _P: ...
def with_suffix(self: _P, suffix: str) -> _P: ...
- def joinpath(self: _P, *other: Union[str, _PathLike]) -> _P: ...
+ def joinpath(self: _P, *other: Union[str, PurePath]) -> _P: ...
@property
def parents(self: _P) -> Sequence[_P]: ...
@property
@@ -48,7 +43,7 @@ class PurePosixPath(PurePath): ...
class PureWindowsPath(PurePath): ...
class Path(PurePath):
- def __new__(cls: Type[_P], *args: Union[str, _PathLike], **kwargs: Any) -> _P: ...
+ def __new__(cls: Type[_P], *args: Union[str, PurePath], **kwargs: Any) -> _P: ...
def __enter__(self) -> Path: ...
def __exit__(
self, exc_type: Optional[Type[BaseException]], exc_value: Optional[BaseException], traceback: Optional[TracebackType]
|
Update custom-server.rst
Relax the item about public IP address?
Currently the install does **not** report "Done!" | @@ -36,14 +36,15 @@ Step 1: Installing The Littlest JupyterHub
.. code-block:: bash
export http_proxy=<your_proxy>
- export https_proxy=<your_proxy>
-#. Some requests will fail if your certs are self-signed:
+#. Some requests will fail if your certs are self-signed. Copy the text below and paste it
+ into the terminal after replacing ``</directory/with/your/ssl/certificates>``
+ with the **path of the directory containing your ssl certificates** (don't include the brackets!).:
.. code::
- export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates
- sudo npm config set strict-ssl false
+ export REQUESTS_CA_BUNDLE=</directory/with/your/ssl/certificates>
+ sudo npm config set cafile </directory/with/your/ssl/certificates>
#. Make sure you have ``Python3``, ``curl`` and ``git`` installed. On latest Ubuntu you can get all of these with:
|
update VariableTimeStepper for current AdaptiveTimeSteppingSolver
allow setting current step in .set_step()
update .advance(), .iter_from_current()
new .iter_from() | @@ -164,14 +164,16 @@ class VariableTimeStepper(TimeStepper):
if step is None:
step = 0
- if step > 0:
- raise ValueError('cannot set step > 0 in VariableTimeStepper!')
+ if (step > 0) and (step != self.step):
+ msg = 'cannot set step != self.step or 0 in VariableTimeStepper!'
+ raise ValueError(msg)
+ if step == 0:
self.step = 0
self.time = self.t0
self.nt = 0.0
- self.dts = []
- self.times = []
+ self.dts = [self.dt]
+ self.times = [self.time]
self.n_step = 1
def get_default_time_step(self):
@@ -190,17 +192,22 @@ class VariableTimeStepper(TimeStepper):
self.time += self.dt
self.normalize_time()
+ self.times.append(self.time)
+ self.dts.append(self.dt)
+
self.n_step = self.step + 1
+ def iter_from(self, step):
+ self.set_step(step=step)
+
+ return self.iter_from_current()
+
def iter_from_current(self):
"""
ts.step, ts.time is consistent with step, time returned here
ts.nt is normalized time in [0, 1].
"""
while 1:
- self.times.append(self.time)
- self.dts.append(self.dt)
-
yield self.step, self.time
if self.nt >= 1.0:
|
Fix variable name
and also use 10 for the buffer (same as it was originally) | @@ -142,9 +142,9 @@ plt.show()
# Note that by default ``dense_lucaskanade`` uses a 5-pixel buffer.
# with buffer
-buffer = 5
+buffer = 10
fd_kwargs2 = {"buffer_mask" : buffer}
-xy, uv = LK_optflow(R, dense=False, fd_kwargs=fd_kwargs2)
+xy, uv = dense_lucaskanade(R, dense=False, fd_kwargs=fd_kwargs2)
plt.imshow(ref_dbr, cmap=plt.get_cmap("Greys"))
plt.imshow(mask, cmap=colors.ListedColormap(["black"]), alpha=0.5)
plt.quiver(
@@ -175,8 +175,8 @@ plt.show()
# interpolation routine. This will produce a smoother motion field.
interp_kwargs = {"epsilon" : 5} # use a small shape parameter for interpolation
-UV1 = LK_optflow(R, dense=True, fd_kwargs=fd_kwargs1, interp_kwargs=interp_kwargs)
-UV2 = LK_optflow(R, dense=True, fd_kwargs=fd_kwargs2, interp_kwargs=interp_kwargs)
+UV1 = dense_lucaskanade(R, dense=True, fd_kwargs=fd_kwargs1, interp_kwargs=interp_kwargs)
+UV2 = dense_lucaskanade(R, dense=True, fd_kwargs=fd_kwargs2, interp_kwargs=interp_kwargs)
V1 = np.sqrt(UV1[0] ** 2 + UV1[1] ** 2)
V2 = np.sqrt(UV2[0] ** 2 + UV2[1] ** 2)
|
Add a link to the gitlab repo
Using the official icon. | @@ -96,7 +96,14 @@ html_theme_options = {
"external_links": [
{"name": "Source code", "url": "https://gitlab.com/pgjones/quart"},
{"name": "Issues", "url": "https://gitlab.com/pgjones/quart/issues"},
- ]
+ ],
+ "icon_links": [
+ {
+ "name": "GitLab",
+ "url": "https://gitlab.com/pgjones/quart",
+ "icon": "fab fa-gitlab",
+ },
+ ],
}
# html_sidebars = {}
|
fix mismatched destination path variable name
data_dir -> dest_dir | @@ -78,7 +78,7 @@ def __extract_rar(rar_path, dest_dir):
logging.info("Extraction failed.")
exit(1)
else:
- logging.info("Skipping extracting. Data already there {0}.".format(data_dir))
+ logging.info("Skipping extracting. Data already there {0}.".format(dest_dir))
def __convert_waves(wavedir, converted_wavedir, wavename, sr):
|
models: Move UserProfile.property_types to UserBaseSettings.
Since all the display settings are defined in UserBaseSettings,
we should shift the property_types dict to UserBaseSettings. | @@ -1246,6 +1246,22 @@ class UserBaseSettings(models.Model):
realm_name_in_notifications: bool = models.BooleanField(default=False)
presence_enabled: bool = models.BooleanField(default=True)
+ # Define the types of the various automatically managed properties
+ property_types = dict(
+ color_scheme=int,
+ default_language=str,
+ default_view=str,
+ demote_inactive_streams=int,
+ dense_mode=bool,
+ emojiset=str,
+ fluid_layout_width=bool,
+ high_contrast_mode=bool,
+ left_side_userlist=bool,
+ starred_message_counts=bool,
+ translate_emoticons=bool,
+ twenty_four_hour_time=bool,
+ )
+
class Meta:
abstract = True
@@ -1458,22 +1474,6 @@ class UserProfile(AbstractBaseUser, PermissionsMixin, UserBaseSettings):
objects: UserManager = UserManager()
- # Define the types of the various automatically managed properties
- property_types = dict(
- color_scheme=int,
- default_language=str,
- default_view=str,
- demote_inactive_streams=int,
- dense_mode=bool,
- emojiset=str,
- fluid_layout_width=bool,
- high_contrast_mode=bool,
- left_side_userlist=bool,
- starred_message_counts=bool,
- translate_emoticons=bool,
- twenty_four_hour_time=bool,
- )
-
notification_setting_types = dict(
enable_desktop_notifications=bool,
enable_digest_emails=bool,
|
FIX TelegramObject.__setitem__
* FIX TelegramObject.__setitem__
Removed 'raise KeyError(key)'
* Add warning and log in TelegramObject.__setitem__
When Telegram adds a new field -> Aiogram will warn about this.
* Removed warnings.warn
* Set logger to 'aiogram'
* Removed 'f' before string | from __future__ import annotations
import io
+import logging
import typing
from typing import TypeVar
@@ -26,6 +27,9 @@ Float = TypeVar('Float', bound=float)
Boolean = TypeVar('Boolean', bound=bool)
T = TypeVar('T')
+# Main aiogram logger
+log = logging.getLogger('aiogram')
+
class MetaTelegramObject(type):
"""
@@ -225,7 +229,9 @@ class TelegramObject(ContextInstanceMixin, metaclass=MetaTelegramObject):
if key in self.props:
return self.props[key].set_value(self, value, self.conf.get('parent', None))
self.values[key] = value
- raise KeyError(key)
+
+ # Log warning when Telegram silently adds new Fields
+ log.warning("Field '%s' doesn't exist in %s", key, self.__class__)
def __contains__(self, item: str) -> bool:
"""
|
Flip default value of jax_unique_mhlo_module_names to False.
This should help avoid unnecessary cache misses. | @@ -876,7 +876,7 @@ config.define_bool_state(
config.define_bool_state(
name='jax_unique_mhlo_module_names',
- default=True,
+ default=False,
help='Enables the generation of unique MHLO module names. This is useful '
'to clients that expect modules to have unique names (e.g, trace data).')
|
Consolidating disabled styles.
Fixing odd line height for smaller description text.
Fixing styles when radiobuttons are inlined.
Moving custom radio buttons back on top of native inputs. | <template>
<!-- HTML makes clicking label apply to input by default -->
- <label class="k-radio-button">
+ <label :class="['k-radio-button', {disabled}]">
<!-- v-model listens for @input event by default -->
<!-- @input has compatibility issues for input of type radio -->
<!-- Here, manually listen for @change (no compatibility issues) -->
:class="['unchecked', {disabled, active}]"
/>
- <span :class="['text', {disabled}]">
+ <span class="text">
{{ label }}
<span
v-if="description"
:class="['description', {disabled}]"
>
- <br>
{{ description }}
</span>
</span>
required: true,
},
/**
- * Description for Label
+ * Description for label
*/
description: {
type: String,
$radio-height = 24px
.k-radio-button
- // give conditional classes higher priority
&.disabled
- cursor: default
- position: relative
+ color: $core-text-disabled
+ &:not(.disabled)
cursor: pointer
+ position: relative
display:block
margin-top: 8px
margin-bottom: 8px
- line-height: $radio-height
+
+ .input, .text
+ // consistent look in inline and block displays
+ vertical-align: top
.input
// use opacity, not appearance:none because ie compatibility
opacity: 0
- // bring the invible HTML element on top of our custom radio-button
- position: absolute
width: $radio-height
height: $radio-height
.checked, .unchecked
- vertical-align: top
&.active
// setting opacity to 0 hides input's default outline
outline: $core-outline
&.disabled
fill: $core-grey-300
+ // lay our custom radio buttons on top of the actual element
+ width: $radio-height
+ height: $radio-height
+ position: absolute
+ left: 0
+ top:0
.checked
fill: $core-action-normal
.unchecked
fill: $core-text-annotation
-
.text, .description
- &.disabled
- color: $core-text-disabled
- .text
display: inline-block
+ .text
padding-left: 8px
+ line-height: $radio-height
max-width: 'calc(100% - %s)' % $radio-height // stylus specific
.description
+ &:not(.disabled)
color: $core-text-annotation
+ width:100%
+ line-height: normal
font-size: 12px
</style>
|
Fix test_create_ops_arg_constant
Fixed identation.
Added new parameter to the create_ops_arg call. | @@ -197,7 +197,7 @@ class TestOPSExpression(object):
def test_create_ops_arg_constant(self):
a = Constant(name='*a')
- res = create_ops_arg(a, {}, {})
+ res = create_ops_arg(a, {}, {}, {})
assert type(res) == namespace['ops_arg_gbl']
assert str(res.args[0]) == str(Byref(Constant(name='a')))
|
Update crawler.py
Fixes issues with many sources when searching.
e.g.:
With the string slice you get:
Without: | @@ -141,7 +141,7 @@ class Crawler:
elif url.find('//') >= 0:
return url
elif url.startswith('/'):
- return self.home_url + url[1:]
+ return self.home_url + url
elif page_url:
return page_url.strip('/') + '/' + url
else:
|
Make 'tags' an error in runtests.py
We've now got rid of all legacy uses of "tags". Therefore make it an error instead of a warning to avoid new uses sneaking in. | @@ -537,8 +537,7 @@ def parse_tags(filepath):
if tag in ('coding', 'encoding'):
continue
if tag == 'tags':
- tag = 'tag'
- print("WARNING: test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
+ raise RuntimeError("test tags use the 'tag' directive, not 'tags' (%s)" % filepath)
if tag not in ('mode', 'tag', 'ticket', 'cython', 'distutils', 'preparse'):
print("WARNING: unknown test directive '%s' found (%s)" % (tag, filepath))
values = values.split(',')
|
[IMPR] Remove '.py' before matching the string
Improve string matching by removing '.py' ending.
See T217195 for further information.
Also ignore __init__.py scripts
use set to hold the script names to hold the script path
(used later to let the user the choice to start it) | @@ -196,22 +196,23 @@ def main():
print('ERROR: {} not found! Misspelling?'.format(filename),
file=sys.stderr)
- scripts = []
+ scripts = {}
for file_package in script_paths:
path = file_package.split('.')
for script_name in os.listdir(os.path.join(*path)):
- if script_name.endswith('.py'):
- scripts.append(script_name)
+ if (script_name.endswith('.py')
+ and not script_name.startswith('__')):
+ # remove .py for better matching
+ scripts[script_name[:-3]] = os.path.join(
+ *(path + [script_name]))
- similar_scripts = get_close_matches(filename, scripts,
+ similar_scripts = get_close_matches(filename[:-3], scripts,
n=10, cutoff=0.7)
-
if similar_scripts:
- print(
- '\nThe most similar {}:'
- .format('script is' if len(similar_scripts) == 1
- else 'scripts are'))
- print('\t' + '\n\t'.join(similar_scripts))
+ print('\nThe most similar script{}:'
+ .format(' is' if len(similar_scripts) == 1
+ else 's are'))
+ print('\t' + '.py\n\t'.join(similar_scripts) + '.py')
return True
# When both pwb.py and the filename to run are within the current
|
Fix a small README error.
In [google-cloud-python](https://github.com/googleapis/google-cloud-python), we do not use the `google-cloud-` prefix in the directory for each individual API. | @@ -27,7 +27,7 @@ cd google-cloud-python/
Navigate to the destination directory to generate the library.
```
-cd google-cloud-tasks/
+cd tasks/
```
### Running `synthtool`
@@ -59,7 +59,7 @@ Find examples below in different programming languages (Cloud Tasks API used as
```
- Navigate to the destination directory to generate the library:
```
- cd google-cloud-tasks/
+ cd tasks/
```
- Run `synthtool` to generate using the existing [`synth.py`][python_tasks_synth_py]
file for the [Python Client for Cloud Tasks API][python_tasks_library]:
|
[ROCm] Remove installation of ca-certificates and apt-transport-https in test.sh
Summary:
These packages are now part of the base docker image.
Pull Request resolved: | @@ -37,7 +37,6 @@ fi
if [[ "$BUILD_ENVIRONMENT" == *rocm* ]]; then
# TODO: Move this to Docker
- sudo apt-get -qq install --no-install-recommends apt-transport-https ca-certificates
sudo apt-get -qq update
sudo apt-get -qq install --no-install-recommends libsndfile1
fi
|
Fix - switched from raise to six.reraise
Small formatting changes | @@ -115,7 +115,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# clean destination
self.log.critical("Error when registering", exc_info=True)
self.handle_destination_files(self.integrated_file_sizes, 'remove')
- raise
+ six.reraise(*sys.exc_info())
def register(self, instance):
# Required environment variables
@@ -509,7 +509,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
format(self.integrated_file_sizes))
# get 'files' info for representation and all attached resources
- self.log.debug("Preparing files information ..")
+ self.log.debug("Preparing files information ...")
representation["files"] = self.get_files_info(
instance,
self.integrated_file_sizes)
@@ -573,7 +573,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
hardlinks = instance.data.get("hardlinks", list())
for src, dest in hardlinks:
dest = self.get_dest_temp_url(dest)
- self.log.debug("Hardlinking file .. {} -> {}".format(src, dest))
+ self.log.debug("Hardlinking file ... {} -> {}".format(src, dest))
if not os.path.exists(dest):
self.hardlink_file(src, dest)
@@ -593,7 +593,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"""
src = os.path.normpath(src)
dst = os.path.normpath(dst)
- self.log.debug("Copying file .. {} -> {}".format(src, dst))
+ self.log.debug("Copying file ... {} -> {}".format(src, dst))
dirname = os.path.dirname(dst)
try:
os.makedirs(dirname)
@@ -602,7 +602,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
pass
else:
self.log.critical("An unexpected error occurred.")
- raise
+ six.reraise(*sys.exc_info())
# copy file with speedcopy and check if size of files are simetrical
while True:
@@ -625,7 +625,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
pass
else:
self.log.critical("An unexpected error occurred.")
- raise
+ six.reraise(*sys.exc_info())
filelink.create(src, dst, filelink.HARDLINK)
@@ -638,7 +638,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
})
if subset is None:
- self.log.info("Subset '%s' not found, creating.." % subset_name)
+ self.log.info("Subset '%s' not found, creating ..." % subset_name)
self.log.debug("families. %s" % instance.data.get('families'))
self.log.debug(
"families. %s" % type(instance.data.get('families')))
@@ -948,4 +948,4 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
except OSError:
self.log.error("Cannot {} file {}".format(mode, file_url)
, exc_info=True)
- raise
+ six.reraise(*sys.exc_info())
|
Kia Ceed: fix eps ECU type
should be eps | @@ -845,7 +845,7 @@ FW_VERSIONS = {
},
CAR.KIA_CEED: {
(Ecu.fwdRadar, 0x7D0, None): [b'\xf1\000CD__ SCC F-CUP 1.00 1.02 99110-J7000 ', ],
- (Ecu.esp, 0x7D4, None): [b'\xf1\000CD MDPS C 1.00 1.06 56310-XX000 4CDEC106', ],
+ (Ecu.eps, 0x7D4, None): [b'\xf1\000CD MDPS C 1.00 1.06 56310-XX000 4CDEC106', ],
(Ecu.fwdCamera, 0x7C4, None): [b'\xf1\000CD LKAS AT EUR LHD 1.00 1.01 99211-J7000 B40', ],
(Ecu.engine, 0x7E0, None): [b'\001TCD-JECU4F202H0K', ],
(Ecu.transmission, 0x7E1, None): [
|
Fix non regression.
blender_object is None when not needed | @@ -411,8 +411,8 @@ def extract_primitives(glTF, blender_mesh, library, blender_object, blender_vert
# Skin must be ignored if the object is parented to a bone of the armature
# (This creates an infinite recursive error)
- # SO ignoring skin in that case
- if blender_object.parent_type == "BONE" and blender_object.parent.name == armature.name:
+ # So ignoring skin in that case
+ if blender_object and blender_object.parent_type == "BONE" and blender_object.parent.name == armature.name:
bone_max = 0 # joints & weights will be ignored in following code
else:
# Manage joints & weights
|
Transfers: don't sleep in preparer if fully loaded
Otherwise the queue can grow while we don't do much work. | @@ -35,7 +35,7 @@ from rucio.daemons.conveyor.common import HeartbeatHandler
from rucio.db.sqla.constants import RequestState
if TYPE_CHECKING:
- from typing import Optional
+ from typing import Optional, Tuple
from sqlalchemy.orm import Session
graceful_stop = threading.Event()
@@ -104,8 +104,9 @@ def preparer(once, sleep_time, bulk, partition_wait_time=10):
prefix = 'conveyor-preparer[%s/%s] ' % (worker_number, total_workers)
daemon_logger = formatted_logger(logging.log, prefix + '%s')
+ count = 0
try:
- updated_msg = run_once(total_workers=total_workers, worker_number=worker_number, limit=bulk, logger=logger)
+ count, updated_msg = run_once(total_workers=total_workers, worker_number=worker_number, limit=bulk, logger=logger)
except RucioException:
daemon_logger(logging.ERROR, 'errored with a RucioException, retrying later', exc_info=True)
updated_msg = 'errored'
@@ -116,10 +117,11 @@ def preparer(once, sleep_time, bulk, partition_wait_time=10):
end_time = time()
time_diff = end_time - start_time
logger(logging.INFO, '%s, taking %.3f seconds' % (updated_msg, time_diff))
+ if count < bulk:
daemon_sleep(start_time=start_time, sleep_time=sleep_time, graceful_stop=graceful_stop, logger=logger)
-def run_once(total_workers: int = 0, worker_number: int = 0, limit: "Optional[int]" = None, logger=logging.log, session: "Optional[Session]" = None) -> str:
+def run_once(total_workers: int = 0, worker_number: int = 0, limit: "Optional[int]" = None, logger=logging.log, session: "Optional[Session]" = None) -> "Tuple[int, str]":
req_sources = __list_transfer_requests_and_source_replicas(
total_workers=total_workers,
worker_number=worker_number,
@@ -128,9 +130,9 @@ def run_once(total_workers: int = 0, worker_number: int = 0, limit: "Optional[in
session=session
)
if not req_sources:
- return 'had nothing to do'
+ return 0, 'had nothing to do'
transfertool_filter = get_transfertool_filter(lambda rse_id: get_supported_transfertools(rse_id=rse_id, session=session))
requests = reduce_requests(req_sources, [rse_lookup_filter, sort_requests_minimum_distance, transfertool_filter], logger=logger)
count = preparer_update_requests(requests, session=session)
- return f'updated {count}/{limit} requests'
+ return count, f'updated {count}/{limit} requests'
|
Fixes
Remove default serviceaccount creation (not needed, breaks on 4.4)
Remove --for=pull since this was used by the default account.
Made the code in-line since there was only one call now. | @@ -52,19 +52,6 @@ class TestCouchbaseWorkload(E2ETest):
cb_worker = OCS()
cb_examples = OCS()
- def add_serviceaccount_secret(self, acct_name, dockerstr):
- """
- Add secret for serviceaccount
-
- Args:
- acct_name (str): Name of the service account
- dockerstr (str): Docker secret
-
- """
- self.secretsadder.exec_oc_cmd(
- f"secrets add serviceaccount/{acct_name} secrets/{dockerstr} --for=pull"
- )
-
def is_up_and_running(self, pod_name, ocp_value):
"""
Test if the pod specified is up and running.
@@ -159,8 +146,9 @@ class TestCouchbaseWorkload(E2ETest):
newdockerstr = dockercfgs[startloc:]
endloc = newdockerstr.find(' ')
dockerstr = newdockerstr[:endloc]
- self.add_serviceaccount_secret("couchbase-operator", dockerstr)
- self.add_serviceaccount_secret("default", dockerstr)
+ self.secretsadder.exec_oc_cmd(
+ f"secrets add serviceaccount/couchbase-operator secrets/{dockerstr}"
+ )
self.rolebinding = OCP(namespace=self.COUCHBASE_OPERATOR)
rolebind_cmd = "".join([
"create rolebinding couchbase-operator-rolebinding ",
|
Work in progress on scenewidget...
... including debugging console print statements. | @@ -111,6 +111,7 @@ class SelectionWidget(Widget):
def event(self, window_pos=None, space_pos=None, event_type=None):
elements = self.elements
+ print(event_type)
if event_type == "hover_start":
self.cursor = wx.CURSOR_SIZING
self.scene.context.gui.SetCursor(wx.Cursor(self.cursor))
@@ -123,47 +124,53 @@ class SelectionWidget(Widget):
matrix = self.parent.matrix
xin = space_pos[0] - self.left
yin = space_pos[1] - self.top
+ # TODO Handle distance should be constant regardless of zoom factor. May need to scale by screen DPI.
xmin = 5 / matrix.value_scale_x()
- ymin = 5 / matrix.value_scale_x()
+ ymin = 5 / matrix.value_scale_y()
xmax = self.width - xmin
ymax = self.height - ymin
- self.tool = self.tool_translate
+ print(xmin,ymin,xmax,ymax,xin,yin)
cursor = self.cursor
- self.cursor = wx.CURSOR_SIZING
- first = elements.first_element(emphasized=True)
+ for e in elements.elems(emphasized=True):
try:
- if first.lock:
+ if e.lock:
+ print("lock")
+ self.cursor = wx.CURSOR_SIZING
+ self.tool = self.tool_translate
if self.cursor != cursor:
self.scene.context.gui.SetCursor(wx.Cursor(self.cursor))
- self.scene.context.gui.SetCursor(wx.Cursor(self.cursor))
return RESPONSE_CHAIN
except (ValueError, AttributeError):
pass
- if xin <= xmin:
- self.cursor = wx.CURSOR_SIZEWE
- self.tool = self.tool_scalex_w
- if yin <= ymin:
- self.cursor = wx.CURSOR_SIZENS
- self.tool = self.tool_scaley_n
- if xin >= xmax:
- self.cursor = wx.CURSOR_SIZEWE
- self.tool = self.tool_scalex_e
- if yin >= ymax:
- self.cursor = wx.CURSOR_SIZENS
- self.tool = self.tool_scaley_s
if xin >= xmax and yin >= ymax:
self.cursor = wx.CURSOR_SIZENWSE
self.tool = self.tool_scalexy_se
- if xin <= xmin and yin <= ymin:
+ elif xin <= xmin and yin <= ymin:
self.cursor = wx.CURSOR_SIZENWSE
self.tool = self.tool_scalexy_nw
- if xin >= xmax and yin <= ymin:
+ elif xin >= xmax and yin <= ymin:
self.cursor = wx.CURSOR_SIZENESW
self.tool = self.tool_scalexy_ne
- if xin <= xmin and yin >= ymax:
+ elif xin <= xmin and yin >= ymax:
self.cursor = wx.CURSOR_SIZENESW
self.tool = self.tool_scalexy_sw
+ elif xin <= xmin:
+ self.cursor = wx.CURSOR_SIZEWE
+ self.tool = self.tool_scalex_w
+ elif yin <= ymin:
+ self.cursor = wx.CURSOR_SIZENS
+ self.tool = self.tool_scaley_n
+ elif xin >= xmax:
+ self.cursor = wx.CURSOR_SIZEWE
+ self.tool = self.tool_scalex_e
+ elif yin >= ymax:
+ self.cursor = wx.CURSOR_SIZENS
+ self.tool = self.tool_scaley_s
+ else:
+ self.cursor = wx.CURSOR_SIZING
+ self.tool = self.tool_translate
if self.cursor != cursor:
+ print("change_cursor")
self.scene.context.gui.SetCursor(wx.Cursor(self.cursor))
return RESPONSE_CHAIN
dx = space_pos[4]
|
Standalone: Do not change PATH before running depends.exe
* This can cause finding the wrong DLLs for some users and is too
agressive. We already add the package directory and its children
for packages, that ought to be sufficient. | @@ -695,28 +695,6 @@ def _detectBinaryPathDLLsMacOS(original_dir, binary_filename):
return result
-def _makeBinaryPathPathDLLSearchEnv(package_name):
- # Put the PYTHONPATH into the system "PATH", DLLs frequently live in
- # the package directories.
- env = os.environ.copy()
- path = env.get("PATH","").split(os.pathsep)
-
- # Put the "Python.exe" first. At least for WinPython, they put the DLLs
- # there.
- path = [sys.prefix] + sys.path + path
-
- if package_name is not None:
- for element in sys.path:
- candidate = os.path.join(element, package_name)
-
- if os.path.isdir(candidate):
- path.append(candidate)
-
-
- env["PATH"] = os.pathsep.join(path)
-
- return env
-
def _getCacheFilename(is_main_executable, source_dir, original_dir, binary_filename):
original_filename = os.path.join(
original_dir,
@@ -753,8 +731,7 @@ def _getCacheFilename(is_main_executable, source_dir, original_dir, binary_filen
)
-def _detectBinaryPathDLLsWindows(is_main_executable, source_dir, original_dir, binary_filename,
- package_name):
+def _detectBinaryPathDLLsWindows(is_main_executable, source_dir, original_dir, binary_filename):
# This is complex, as it also includes the caching mechanism
# pylint: disable=too-many-branches
@@ -810,8 +787,7 @@ SxS
"-pa1",
"-ps1",
binary_filename
- ),
- env = _makeBinaryPathPathDLLSearchEnv(package_name),
+ )
)
inside = False
@@ -922,7 +898,7 @@ SxS
def detectBinaryDLLs(is_main_executable, source_dir, original_filename,
- binary_filename, package_name):
+ binary_filename):
""" Detect the DLLs used by a binary.
Using "ldd" (Linux), "depends.exe" (Windows), or "otool" (MacOS) the list
@@ -940,8 +916,7 @@ def detectBinaryDLLs(is_main_executable, source_dir, original_filename,
is_main_executable = is_main_executable,
source_dir = source_dir,
original_dir = os.path.dirname(original_filename),
- binary_filename = binary_filename,
- package_name = package_name
+ binary_filename = binary_filename
)
elif Utils.getOS() == "Darwin":
return _detectBinaryPathDLLsMacOS(
@@ -956,13 +931,12 @@ def detectBinaryDLLs(is_main_executable, source_dir, original_filename,
def detectUsedDLLs(source_dir, standalone_entry_points):
result = OrderedDict()
- for count, (original_filename, binary_filename, package_name) in enumerate(standalone_entry_points):
+ for count, (original_filename, binary_filename, _package_name) in enumerate(standalone_entry_points):
used_dlls = detectBinaryDLLs(
is_main_executable = count == 0,
source_dir = source_dir,
original_filename = original_filename,
- binary_filename = binary_filename,
- package_name = package_name,
+ binary_filename = binary_filename
)
for dll_filename in used_dlls:
|
Fix issue when remote-deps generated twice
If `make remote-deps` is run twice in a row without running `make clean`, `/node/config/config/...` is created, possibly preventing some changes from being applied. | @@ -150,6 +150,7 @@ remote-deps: mod-download
# Recreate the directory so that we are sure to clean up any old files.
rm -rf filesystem/etc/calico/confd
mkdir -p filesystem/etc/calico/confd
+ rm -rf config
rm -rf bin/bpf
mkdir -p bin/bpf
rm -rf filesystem/usr/lib/calico/bpf/
|
test: Use osrelease instead of lsb_distrib_release
Replace `lsb_distrib_release` by `osrelease` in the tests to avoid
needing to set the lsb_* values. | @@ -622,7 +622,7 @@ class Repo:
"""
if (
self.grains["osfullname"] == "Ubuntu"
- and self.grains["lsb_distrib_release"] == "22.04"
+ and self.grains["osrelease"] == "22.04"
):
return True
return False
@@ -659,7 +659,7 @@ class Repo:
)
repo_content = "deb {opts} https://repo.saltproject.io/py3/{}/{}/{arch}/latest {} main".format(
self.fullname,
- self.grains["lsb_distrib_release"],
+ self.grains["osrelease"],
self.grains["oscodename"],
arch=self.grains["osarch"],
opts=opts,
@@ -669,7 +669,7 @@ class Repo:
@key_url.default
def _default_key_url(self):
key_url = "https://repo.saltproject.io/py3/{}/{}/{}/latest/salt-archive-keyring.gpg".format(
- self.fullname, self.grains["lsb_distrib_release"], self.grains["osarch"]
+ self.fullname, self.grains["osrelease"], self.grains["osarch"]
)
if self.alt_repo:
|
Pin celery to <4.2 for now.
Refs
Closes | @@ -95,7 +95,7 @@ boto3==1.7.67 \
--hash=sha256:e225d9fa3f313049547bf510a54d5f0af82f70d53fe0c0f891a0a9f8d4474681
celery==4.1.1 \
--hash=sha256:6fc4678d1692af97e137b2a9f1c04efd8e7e2fb7134c5c5ad60738cdd927762f \
- --hash=sha256:d1f2a3359bdbdfb344edce98b8e891f5fe64f8a11c5a45538ec20ac237c971f5
+ --hash=sha256:d1f2a3359bdbdfb344edce98b8e891f5fe64f8a11c5a45538ec20ac237c971f5 # pyup: <4.2
botocore==1.10.68 \
--hash=sha256:669844066de8e945890a7842cd445b83b5abf3449425526ce6037276f73e5aa4 \
--hash=sha256:f27e82ae95ef3887bf9fa6362aada83f3b56d315ec72e2c4870d322c22e83369
|
img_tools.pxi: Fix incorrect rowlen for alignment
Fixes the k % rowlen == 0 test | @@ -57,6 +57,7 @@ cdef inline convert_to_gl_format(data, fmt, width, height):
if rowlen * height < datasize:
# FIXME: warn/fail if pitch * height != datasize:
pitchalign = pitch - rowlen
+ rowlen -= 1 # to match 0-based k below
# note, this is the fastest copying method. copying element by element
# from a memoryview is slower then copying the whole buffer and then
|
Remove mention of that this is impossible in mpl
Maybe I am misunderstanding, but why does it not count that you can do it via ax.spine? | "Removing axes spines\n",
"--------------------\n",
"\n",
- "Both the ``white`` and ``ticks`` styles can benefit from removing the top and right axes spines, which are not needed. It's impossible to do this through the matplotlib parameters, but you can call the seaborn function :func:`despine` to remove them:"
+ "Both the ``white`` and ``ticks`` styles can benefit from removing the top and right axes spines, which are not needed. The seaborn function :func:`despine` can be called to remove them:"
]
},
{
|
Added some default values for configuration items which had none before
mix/videocaps:
video/x-raw,format=I420,width=1920,height=1080,framerate=25/1,pixel-aspect-ratio=1/1
mix/audiocaps:
audio/x-raw,format=S16LE,channels=2,layout=interleaved,rate=48000
previews/videocaps: video/x-raw,width=1024,height=576,framerate=25/1
previews/deinterlace: false
previews/enabled: false | @@ -107,34 +107,34 @@ class VocConfigParser(SafeConfigParser):
self.add_section_if_missing('audio')
self.set('audio', 'volumecontrol', "true" if show else "false")
- def getVideoCaps(self, section='mix'):
- return self.get(section, 'videocaps')
+ def getVideoCaps(self):
+ return self.get('mix', 'videocaps', fallback="video/x-raw,format=I420,width=1920,height=1080,framerate=25/1,pixel-aspect-ratio=1/1")
def getAudioCaps(self, section='mix'):
- return self.get(section, 'audiocaps')
+ return self.get(section, 'audiocaps', fallback="audio/x-raw,format=S16LE,channels=2,layout=interleaved,rate=48000")
def getNumAudioStreams(self):
return self.getint('mix', 'audiostreams', fallback=1)
- def getVideoSize(self, section='mix'):
+ def getVideoSize(self):
caps = Gst.Caps.from_string(
- self.getVideoCaps(section)).get_structure(0)
+ self.getVideoCaps()).get_structure(0)
_, width = caps.get_int('width')
_, height = caps.get_int('height')
return (width, height)
- def getVideoRatio(self, section='mix'):
- width, height = self.getVideoSize(section)
+ def getVideoRatio(self):
+ width, height = self.getVideoSize()
return float(width)/float(height)
- def getFramerate(self, section='mix'):
+ def getFramerate(self):
caps = Gst.Caps.from_string(
- self.getVideoCaps(section)).get_structure(0)
+ self.getVideoCaps()).get_structure(0)
(_, numerator, denominator) = caps.get_fraction('framerate')
return (numerator, denominator)
- def getFramesPerSecond(self, section='mix'):
- num, denom = self.getFramerate(section)
+ def getFramesPerSecond(self):
+ num, denom = self.getFramerate()
return float(num) / float(denom)
def getVideoSystem(self):
@@ -184,7 +184,7 @@ class VocConfigParser(SafeConfigParser):
def getPreviewCaps(self):
if self.has_option('previews', 'videocaps'):
- return self.getVideoCaps('previews')
+ return self.get('previews', 'videocaps', fallback='video/x-raw,width=1024,height=576,framerate=25/1')
else:
return self.getVideoCaps()
@@ -196,10 +196,10 @@ class VocConfigParser(SafeConfigParser):
return(width, height)
def getDeinterlacePreviews(self):
- return self.getboolean('previews', 'deinterlace')
+ return self.getboolean('previews', 'deinterlace', fallback=False)
def getPreviewsEnabled(self):
- return self.getboolean('previews', 'enabled')
+ return self.getboolean('previews', 'enabled', fallback=False)
def getLivePreviewEnabled(self):
return self.getboolean('previews', 'live', fallback=False)
|
fix readme formatting
asdf and asdf-standard use different file types (sigh) | @@ -345,8 +345,8 @@ More information on the ASDF Standard itself can be found
There are two mailing lists for ASDF:
-* [asdf-users](https://groups.google.com/forum/#!forum/asdf-users)
-* [asdf-developers](https://groups.google.com/forum/#!forum/asdf-developers)
+* `asdf-users <https://groups.google.com/forum/#!forum/asdf-users>`
+* `asdf-developers <https://groups.google.com/forum/#!forum/asdf-developers>`
If you are looking for the **A**\ daptable **S**\ eismic **D**\ ata
**F**\ ormat, information can be found
|
scene/infile argument in snap.util.geocode
Argument for the scene's path is named different in the geocode functions of snap and gamma | @@ -96,7 +96,7 @@ matching OSV type for processing.
from pyroSAR.snap import geocode
scene = 'S1A_IW_GRDH_1SDV_20180101T170648_20180101T170713_019964_021FFD_DA78.zip'
- geocode(scene=scene,
+ geocode(infile=scene,
outdir='outdir',
allow_RES_OSV=True)
|
Test that endpoints can all be made into clients
Test that endpoints can all be made into clients | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import os
+import json
from nose.tools import assert_equal
from botocore.session import get_session
@@ -32,6 +34,32 @@ SERVICE_RENAMES = {
'lex-runtime': 'runtime.lex',
}
+BLACKLIST = [
+ 'mobileanalytics',
+]
+
+
+def test_endpoint_matches_service():
+ backwards_renames = dict((v, k) for k, v in SERVICE_RENAMES.items())
+ session = get_session()
+ loader = session.get_component('data_loader')
+ expected_services = set(loader.list_available_services('service-2'))
+
+ pdir = os.path.dirname
+ endpoints_path = os.path.join(pdir(pdir(pdir(__file__))),
+ 'botocore', 'data', 'endpoints.json')
+ with open(endpoints_path, 'r') as f:
+ data = json.loads(f.read())
+ for partition in data['partitions']:
+ for service in partition['services'].keys():
+ service = backwards_renames.get(service, service)
+ if service not in BLACKLIST:
+ yield _assert_endpoint_is_service, service, expected_services
+
+
+def _assert_endpoint_is_service(service, expected_services):
+ assert service in expected_services
+
def test_service_name_matches_endpoint_prefix():
# Generates tests for each service to verify that the endpoint prefix
|
Use GuildChannel abc for CategoryChannel edit
I noticed nothing happened when I did
`ch.edit(overwrites=oh.overwrites)`
`http.edit_channel` doesn't do anything with the `overwrites` keyword,
it's processed as `permission_overwrites` instead which `self._edit`
takes care of.
I feel this was an oversight at some point. | @@ -791,17 +791,7 @@ class CategoryChannel(discord.abc.GuildChannel, Hashable):
Editing the category failed.
"""
- try:
- position = options.pop('position')
- except KeyError:
- pass
- else:
- await self._move(position, reason=reason)
- self.position = position
-
- if options:
- data = await self._state.http.edit_channel(self.id, reason=reason, **options)
- self._update(self.guild, data)
+ await self._edit(options=options, reason=reason)
@property
def channels(self):
|
Detect openSUSE and SLES
+ For openSUSE Leap and SLES >= 15 Python 3 is used and the distro
is identified as "opensuse" and "sles" while in Python 2 both
were lumped together as "suse"
+ Closes | @@ -69,7 +69,7 @@ def get_osutil(distro_name=DISTRO_NAME,
if distro_name == "coreos" or distro_code_name == "coreos":
return CoreOSUtil()
- if distro_name == "suse":
+ if distro_name in ("suse", "sles", "opensuse"):
if distro_full_name == 'SUSE Linux Enterprise Server' \
and Version(distro_version) < Version('12') \
or distro_full_name == 'openSUSE' and Version(distro_version) < Version('13.2'):
|
Handle case where flag is provided that isn't defined by model
This is a perfectly valid case and needed to support broken model defs
or simply to pass through options to the command module/script. | @@ -181,10 +181,13 @@ def _flag_cmd_arg_vals(opdef):
vals = {}
for name, flag_val in opdef.flag_values().items():
flagdef = opdef.get_flagdef(name)
+ if flagdef:
if flagdef.options:
_apply_option_args(flagdef, flag_val, vals)
else:
_apply_flag_arg(flagdef, flag_val, vals)
+ else:
+ vals[name] = flag_val
return vals
def _apply_option_args(flagdef, val, target):
|
added test
changed getrecentlist to getlist | @@ -120,6 +120,11 @@ script:
demisto.incidents(incidents)
demisto.setLastRun({'time': now})
+ def test():
+ now = datetime.datetime.utcnow()
+ getRecentList(now)
+ demisto.results('ok')
+
def getRecentList(time):
result = []
@@ -212,9 +217,9 @@ script:
body = """<?xml version="1.0" encoding="utf-8"?>
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
- <IncidentGetRecentList xmlns="https://www.monitoredsecurity.com/">
+ <IncidentGetList xmlns="https://www.monitoredsecurity.com/">
<StartTimeStampGMT>%s</StartTimeStampGMT>
- </IncidentGetRecentList>
+ </IncidentGetList>
</soap12:Body>
</soap12:Envelope>""" % (time)
headers = {
@@ -257,6 +262,10 @@ script:
fetchIncidents()
exit(0)
+ if demisto.command() == 'test-module':
+ test()
+ exit(0)
+
if demisto.command() == 'smss-update-incident-workflow':
updateIncident()
exit(0)
|
fix: change subctl--linux-amd64 with subctl
This commit changes subctl--linux-amd64 with
subctl--linux-amd64 because is the new name
for this binary. | docker save $IDS -o /tmp/submariner_images.tar
# Backup the binary files
- cp ~/submariner-operator/bin/subctl--linux-amd64 ~/subctl
+ cp ~/submariner-operator/bin/subctl ~/subctl
tar -cvf /tmp/submariner_binaries.tar ~/subctl
args:
executable: /bin/bash
|
[fix] fix `getting started` link in readme
The current getting started link is broken. I replaced it by the correct one. Closes | @@ -26,7 +26,7 @@ It is a Python library built on [JAX](https://github.com/google/jax).
## Installation and Usage
Netket supports MacOS and Linux. We reccomend to install NetKet using `pip`
-For instructions on how to install the latest stable/beta release of NetKet see the [Getting Started](https://www.netket.org/website/get_started.html) section of our website.
+For instructions on how to install the latest stable/beta release of NetKet see the [Getting Started](https://www.netket.org/getting_started.html) section of our website.
If you wish to install the current development version of NetKet, which is the master branch of this GitHub repository, together with the additional
dependencies, you can run the following command:
|
Update IronDefense.yml
Reviewed and updated | @@ -21,7 +21,7 @@ configuration:
name: requestTimeout
required: false
type: 0
-description: The IronDefense Integration for Demisto allows users to interact with
+description: The IronDefense Integration allows users to interact with
IronDefense alerts within Demisto. The Integration provides the ability to rate
alerts, update alert statuses, add comments to alerts, and to report observed bad
activity.
@@ -31,14 +31,14 @@ script:
commands:
- arguments:
- default: false
- description: The ID of the IronDefense alert
+ description: The ID of the IronDefense alert.
isArray: false
name: alert_id
required: true
secret: false
- auto: PREDEFINED
default: false
- description: The severity rating of this alert
+ description: 'The severity rating of the alert. Can be: "Undecided", "Benign", "Suspicious", "Malicious".'
isArray: false
name: severity
predefined:
@@ -50,7 +50,7 @@ script:
secret: false
- auto: PREDEFINED
default: false
- description: Was this rating expected? Use "Unknown" if the rating is undecided
+ description: 'Determines whether the rating was expected. Can be: "Unknown", "Expected", "Unexpected". Use "Unknown" if the rating is undecided.'
isArray: false
name: expectation
predefined:
@@ -60,14 +60,14 @@ script:
required: true
secret: false
- default: false
- description: Explain the rating of this alert
+ description: Explains the rating of the alert.
isArray: false
name: comments
required: true
secret: false
- auto: PREDEFINED
default: false
- description: Shares the provided comment with IronDome
+ description: Whether to share the comment with IronDome.
isArray: false
name: share_comment_with_irondome
predefined:
@@ -81,20 +81,20 @@ script:
name: irondefense-rate-alert
- arguments:
- default: false
- description: The ID of the IronDefense alert
+ description: The ID of the IronDefense alert.
isArray: false
name: alert_id
required: true
secret: false
- default: false
- description: Text of comment
+ description: Explains the rating of the alert.
isArray: false
name: comment
required: true
secret: false
- auto: PREDEFINED
default: false
- description: Shares the provided comment with IronDome
+ description: Whether to share the comment with IronDome.
isArray: false
name: share_comment_with_irondome
predefined:
@@ -103,19 +103,19 @@ script:
required: true
secret: false
deprecated: false
- description: Adds a comment to an IronDefense alert
+ description: Adds a comment to an IronDefense alert.
execution: false
name: irondefense-comment-alert
- arguments:
- default: false
- description: The ID of the IronDefense alert
+ description: The ID of the IronDefense alert.
isArray: false
name: alert_id
required: true
secret: false
- auto: PREDEFINED
default: false
- description: The alert status to set
+ description: 'The alert status to set. Can be: "Awaiting Review", "Under Review", "Closed".'
isArray: false
name: status
predefined:
@@ -125,14 +125,14 @@ script:
required: true
secret: false
- default: false
- description: Explain the status of this alert
+ description: Explains the status of the alert.
isArray: false
name: comments
required: true
secret: false
- auto: PREDEFINED
default: false
- description: Shares the provided comment with IronDome
+ description: Whether to share the comment with IronDome.
isArray: false
name: share_comment_with_irondome
predefined:
@@ -141,7 +141,7 @@ script:
required: true
secret: false
deprecated: false
- description: Sets the status of an IronDefense alert
+ description: Sets the status of an IronDefense alert.
execution: false
name: irondefense-set-alert-status
- arguments:
|
Fix DepolarizingChannel documentation
In commit the formula for `DepolarizingChannel` got changed, and I'm pretty sure it should be ` p / (4**n - 1) \sum _i P_i \rho P_i` instead, given that's what the ` AsymmetricDepolarizingChannel` does. | @@ -263,7 +263,7 @@ class DepolarizingChannel(gate_features.SupportsOnEachGate, raw_types.Gate):
This channel evolves a density matrix via
$$
- \rho \rightarrow (1 - p) \rho + 1 / (4**n - 1) \sum _i P_i X P_i
+ \rho \rightarrow (1 - p) \rho + p / (4**n - 1) \sum _i P_i \rho P_i
$$
where $P_i$ are the $4^n - 1$ Pauli gates (excluding the identity).
@@ -372,7 +372,7 @@ def depolarize(p: float, n_qubits: int = 1) -> DepolarizingChannel:
This channel evolves a density matrix via
$$
- \rho \rightarrow (1 - p) \rho + 1 / (4**n - 1) \sum _i P_i X P_i
+ \rho \rightarrow (1 - p) \rho + p / (4**n - 1) \sum _i P_i \rho P_i
$$
where $P_i$ are the $4^n - 1$ Pauli gates (excluding the identity).
|
Fixed more pycodestyle.
Fixed too broad except clause. | @@ -564,7 +564,7 @@ class LocalGeometryFinder:
self.valences = [int(site.specie.oxi_state) for site in self.structure]
else:
self.valences = valences
- except:
+ except AttributeError:
self.valences = valences
else:
self.valences = valences
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.