message
stringlengths
13
484
diff
stringlengths
38
4.63k
Add .summary() method to rotor_assembly.py * This method is used to call the class SummaryResults in results.py * It creates a summary of the main parameters and attributes from the rotor model. The data is presented in a table format.
@@ -38,6 +38,7 @@ from ross.results import ( ForcedResponseResults, ModeShapeResults, StaticResults, + SummaryResults, ConvergenceResults, TimeResponseResults, ) @@ -239,6 +240,8 @@ class Rotor(object): "L", "node_pos", "node_pos_r", + "beam_cg", + "axial_cg_pos", "y_pos", "i_d", "o_d", @@ -271,10 +274,13 @@ class Rotor(object): nodes_pos_l = np.zeros(len(df_shaft.n_l)) nodes_pos_r = np.zeros(len(df_shaft.n_l)) + axial_cg_pos = np.zeros(len(df_shaft.n_l)) - for i in range(len(df_shaft)): + for i, sh in enumerate(self.shaft_elements): if i == 0: nodes_pos_r[i] = nodes_pos_r[i] + df_shaft.loc[i, "L"] + axial_cg_pos[i] = sh.beam_cg + nodes_pos_l[i] + sh.axial_cg_pos = axial_cg_pos[i] continue if df_shaft.loc[i, "n_l"] == df_shaft.loc[i - 1, "n_l"]: nodes_pos_l[i] = nodes_pos_l[i - 1] @@ -282,9 +288,12 @@ class Rotor(object): else: nodes_pos_l[i] = nodes_pos_r[i - 1] nodes_pos_r[i] = nodes_pos_l[i] + df_shaft.loc[i, "L"] + axial_cg_pos[i] = sh.beam_cg + nodes_pos_l[i] + sh.axial_cg_pos = axial_cg_pos[i] df_shaft["nodes_pos_l"] = nodes_pos_l df_shaft["nodes_pos_r"] = nodes_pos_r + df_shaft["axial_cg_pos"] = axial_cg_pos df = pd.concat( [df_shaft, df_disks, df_bearings, df_point_mass, df_seals], sort=True @@ -2500,6 +2509,30 @@ class Rotor(object): return results + def summary(self): + """Rotor summary. + + This creates a summary of the main parameters and attributes from the + rotor model. The data is presented in a table format. + + Parameters + ---------- + + Returns + ------- + results : class instance + An instance of SumarryResults class to build the summary table + + Examples + -------- + >>> rotor = rotor_example() + >>> table = rotor.summary().plot() + >>> # to display the plot use the command: + >>> # show(table) + """ + results = SummaryResults(self.df_shaft) + return results + @classmethod def from_section( cls,
Update instructions in Making the Grade, Task 1 Update instructions in Making the Grade, Task 1 to stress that the resulting score order is not important.
@@ -14,6 +14,7 @@ There shouldn't be any scores that have more than two places after the decimal p Create the function `round_scores()` that takes a `list` of `student_scores`. This function should _consume_ the input `list` and `return` a new list with all the scores converted to `int`s. +The order of the scores in the resulting list is not important. ```python >>> student_scores = [90.33, 40.5, 55.44, 70.05, 30.55, 25.45, 80.45, 95.3, 38.7, 40.3]
add input_format parameter to _run_tool helper because the 21.01 format is not supported by older galaxy versions
@@ -58,7 +58,7 @@ class TestGalaxyJobs(GalaxyTestBase.GalaxyTestBase): @test_util.skip_unless_galaxy('release_21.01') @test_util.skip_unless_tool("random_lines1") def test_run_and_rerun_random_lines(self): - original_output = self._run_tool() + original_output = self._run_tool(input_format='21.01') original_job_id = original_output['jobs'][0]['id'] rerun_output = self.gi.jobs.rerun_job(original_job_id) @@ -180,22 +180,27 @@ class TestGalaxyJobs(GalaxyTestBase.GalaxyTestBase): status = self.gi.jobs.update_job_lock(active=False) self.assertFalse(status) - def _run_tool(self, tool_id='random_lines1') -> dict: + def _run_tool(self, tool_id: str = 'random_lines1', input_format: str = 'legacy') -> dict: tool_inputs = { 'num_lines': '1', 'input': { 'src': 'hda', 'id': self.dataset_id }, + # include both input formats so that either can be specified to run_tool() + # 21.01 format 'seed_source': { 'seed_source_selector': 'set_seed', 'seed': 'asdf' - } + }, + # legacy format + 'seed_source|seed_source_selector': 'set_seed', + 'seed_source|seed': 'asdf' } return self.gi.tools.run_tool( history_id=self.history_id, tool_id=tool_id, tool_inputs=tool_inputs, - input_format='21.01' + input_format=input_format )
fix: set link field to `undefined` instead of empty string This breaks other code where undefined values are removed, technically invalid value for link field => unset link field so it should be undefined
@@ -471,7 +471,7 @@ frappe.ui.form.ControlLink = class ControlLink extends frappe.ui.form.ControlDat docname: value, fields: columns_to_fetch, }).then((response) => { - if (!response || !response.name) return ""; + if (!response || !response.name) return undefined; if (!docname || !columns_to_fetch.length) return response.name; for (const [target_field, source_field] of Object.entries(fetch_map)) {
remove credentials check and set AWS credentials in boto3 only if they are configured This enables retrieval of credentials from the environment as described at:
@@ -278,14 +278,13 @@ def _do_aws_request(request): :rtype: dict :raises: AwsDownloadFailedException, ValueError """ - if not SHConfig().aws_access_key_id or not SHConfig().aws_secret_access_key: - raise ValueError('The requested data is in Requester Pays AWS bucket. In order to download the data please set ' - 'your access key in command line:\n' - '$ sentinelhub.config --aws_access_key_id <your AWS key> --aws_secret_access_key ' - '<your AWS secret key>') - aws_service, _, bucket_name, url_key = request.url.split('/', 3) - s3_client = boto3.client(aws_service.strip(':'), aws_access_key_id=SHConfig().aws_access_key_id, + if SHConfig().aws_access_key_id and SHConfig().aws_secret_access_key: + key_args = dict(aws_access_key_id=SHConfig().aws_access_key_id, aws_secret_access_key=SHConfig().aws_secret_access_key) + else: + key_args = {} + aws_service, _, bucket_name, url_key = request.url.split('/', 3) + s3_client = boto3.client(aws_service.strip(':'), **key_args) try: return s3_client.get_object(Bucket=bucket_name, Key=url_key, RequestPayer='requester') except s3_client.exceptions.NoSuchKey:
Move newrelic initialization to the very start of wsgi initialization Makes sure we track all backend errors.
@@ -7,12 +7,9 @@ For more information on this file, see https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/ """ +import logging import os -from django.core.wsgi import get_wsgi_application - -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contentcuration.settings") - # Attach newrelic APM try: import newrelic.agent @@ -21,4 +18,9 @@ try: except ImportError: pass +try: + from django.core.wsgi import get_wsgi_application + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contentcuration.settings") application = get_wsgi_application() +except ImportError: + logging.warn("Django's WSGI wasn't successfully imported!")
Update README.md * Update README.md No submodule any more * Update README.md remove beta
[![Build Status](https://api.travis-ci.org/HorizonRobotics/alf.svg?branch=master)](https://travis-ci.org/HorizonRobotics/alf) -Agent Learning Framework (ALF) is a reinforcement learning framework emphasizing on the flexibility and easiness of implementing complex algorithms involving many different components. ALF is built on [Tensorflow 2.0](https://www.tensorflow.org/beta/). +Agent Learning Framework (ALF) is a reinforcement learning framework emphasizing on the flexibility and easiness of implementing complex algorithms involving many different components. ALF is built on [Tensorflow 2.1](https://www.tensorflow.org/). ## Algorithms @@ -25,10 +25,6 @@ You can run the following commands to install ALF ``` git clone https://github.com/HorizonRobotics/alf cd alf -git submodule update --init --recursive -cd tf_agents -pip install -e . -cd .. pip install -e . ```
WIP: Start of support guide questions for LDAP and TLS * Start of support guide questions for LDAP and TLS * Update support.md Fixed some typos
@@ -111,6 +111,36 @@ Typically found in `/etc/nginx/nginx.conf` and `/etc/nginx/sites-available/matte #### Can you send us a snippet of your Nginx error logs around the time of the incident? Typically found in `/var/log/nginx/error.log` + +## LDAP Issues + +#### Attach your LDAP settings from config.json +The username/password should be removed. + +#### What AD/LDAP server and version are you using? +For example Active Directory on Windows Server 2016 + +#### Are there any LDAP errors in the logs? + +#### Is there a limit on the number of users the LDAP server can return in a single query? If so, have you set the Maximum Page Size in Mattermost? +Many LDAP servers have an upper limit on the number of users returned. So they might be hitting that limit. An error will appear in the logs usually informing of this case, but it's good to try anyway. + +#### Can you send an example user from you system? ldapsearch command format is preferred. + +#### Is the server properly licensed? Check the licensing page in the system console. + +## TLS/SSL Issues + +#### Are you using a proxy or the built in TLS support? + +#### Are there any errors in the Mattermost logs? + +#### Send your config.json + +#### Send your proxy configuration if you are using one. + +#### Have you followed one of the setup guides? + ## GitLab Issues #### General Questions
popovers: Add newline between checkboxes in `Move topic` menu. This is cleaner UI and avoids the spacing looking weird if both fit on a line at a given zoom level. Fixes
<span></span> {{t "Send notification to new topic" }} </label> + <br/> <label class="checkbox"> <input class="send_notification_to_old_thread" name="send_notification_to_old_thread" type="checkbox" {{#if notify_old_thread}}checked="checked"{{/if}} /> <span></span>
Delete unused method from deployops.py SIM: cr
@@ -65,22 +65,3 @@ def deploy(app_name, env_name, version, label, message, group_name=None, timeout_in_minutes=timeout, can_abort=True, env_name=env_name) - - -def deploy_no_events(app_name, env_name, version, label, message, process=False, staged=False): - region_name = aws.get_region_name() - - io.log_info('Deploying code to ' + env_name + ' in region ' + region_name) - - if version: - app_version_label = version - else: - # Create app version - app_version_label = commonops.create_app_version( - app_name, process=process, label=label, message=message, staged=staged) - - # swap env to new app version - request_id = elasticbeanstalk.update_env_application_version( - env_name, app_version_label) - - return request_id
Set Telegram variables to None when unset Fixes AttributeError when they're not present in config.
@@ -22,7 +22,7 @@ for variable_name in ('PB_API_KEY', 'PB_CHANNEL', 'TWITTER_CONSUMER_KEY', 'NOTIFY', 'NAME_FONT', 'IV_FONT', 'MOVE_FONT', 'TWEET_IMAGES', 'NOTIFY_IDS', 'NEVER_NOTIFY_IDS', 'RARITY_OVERRIDE', 'IGNORE_IVS', 'IGNORE_RARITY', - 'WEBHOOKS'): + 'WEBHOOKS', 'TELEGRAM_BOT_TOKEN', 'TELEGRAM_CHAT_ID'): if not hasattr(config, variable_name): setattr(config, variable_name, None)
Remove use of astropy units in docstring Fix improper indentation
@@ -311,9 +311,9 @@ def farnocchia(k, r0, v0, tof): ---------- k : float Standar Gravitational parameter - r0 : ~astropy.units.Quantity + r0 : ~np.array Initial position vector wrt attractor center. - v0 : ~astropy.units.Quantity + v0 : ~anp.array Initial velocity vector. tof : float Time of flight (s).
added additional criteria to strong validator added lowercase criteria added using a specific set of special characters
@@ -674,8 +674,10 @@ class strong(BaseValidation): validations, length=8, uppercase=2, + lowercase=2, numbers=2, special=2, + special_chars=None, breach=False, messages={}, raises={}, @@ -683,11 +685,14 @@ class strong(BaseValidation): super().__init__(validations, messages=messages, raises=raises) self.length = length self.uppercase = uppercase + self.lowercase = lowercase self.numbers = numbers self.special = special + self.special_chars = special_chars self.breach = breach self.length_check = True self.uppercase_check = True + self.lowercase_check = True self.numbers_check = True self.special_check = True self.breach_check = True @@ -709,6 +714,16 @@ class strong(BaseValidation): self.uppercase_check = False all_clear = False + if self.lowercase != 0: + lowercase = 0 + for letter in attribute: + if letter.islower(): + lowercase += 1 + + if lowercase < self.lowercase: + self.lowercase_check = False + all_clear = False + if self.numbers != 0: numbers = 0 for letter in attribute: @@ -733,7 +748,10 @@ class strong(BaseValidation): all_clear = False if self.special != 0: - if len(re.findall("[^A-Za-z0-9]", attribute)) < self.special: + special_chars = "[^A-Za-z0-9]" + if self.special_chars: + special_chars = self.special_chars + if len(re.findall(special_chars, attribute)) < self.special: self.special_check = False all_clear = False @@ -755,10 +773,18 @@ class strong(BaseValidation): ) ) + if not self.lowercase_check: + message.append( + "The {} field must have {} lowercase letters".format( + attribute, self.lowercase + ) + ) + if not self.special_check: + valid_chars = self.special_chars or "!@#$%^&*()_+" message.append( - "The {} field must have {} special characters".format( - attribute, self.special + "The {} field must contain {} of these characters: {}".format( + attribute, self.special, valid_chars ) ) @@ -863,7 +889,7 @@ class json(BaseValidation): class phone(BaseValidation): def __init__(self, *rules, pattern="123-456-7890", messages={}, raises={}): - super().__init__(rules, messages={}, raises={}) + super().__init__(rules, messages=messages, raises=raises) # 123-456-7890 # (123)456-7890 self.pattern = pattern
STY: logger style in template Updated the logger style in the template, explaining the two implementation options.
@@ -44,11 +44,15 @@ Author name and institution """ import datetime as dt -import logging import pysat -logger = logging.getLogger(__name__) +# Assign the pysat logger to the local log commands, as these functions will +# all be executed within pysat. If this is the only instance pysat is used, +# consider omitting the pysat import and logger assignment and replacing it +# with: +# from pysat import logger +logger = pysat.logger # ---------------------------------------------------------------------------- # Instrument attributes:
added check for dash.page_container refactored interpolate_index
@@ -2200,6 +2200,8 @@ class Dash: ] + [self.layout] ) + if _ID_CONTENT not in self.validation_layout: + raise Exception("`dash.page_container` not found in the layout") # Update the page title on page navigation self.clientside_callback( @@ -2237,7 +2239,7 @@ class Dash: ) return dedent( - """ + f""" <!DOCTYPE html> <html> <head> @@ -2255,31 +2257,20 @@ class Dash: <meta property="og:type" content="website" /> <meta property="og:description" content="{description}" /> <meta property="og:image" content="{image}"> - {metas} - {favicon} - {css} + {kwargs["metas"]} + {kwargs["favicon"]} + {kwargs["css"]} </head> <body> - {app_entry} + {kwargs["app_entry"]} <footer> - {config} - {scripts} - {renderer} + {kwargs["config"]} + {kwargs["scripts"]} + {kwargs["renderer"]} </footer> </body> </html> """ - ).format( - metas=kwargs["metas"], - description=description, - title=title, - image=image, - favicon=kwargs["favicon"], - css=kwargs["css"], - app_entry=kwargs["app_entry"], - config=kwargs["config"], - scripts=kwargs["scripts"], - renderer=kwargs["renderer"], ) self.interpolate_index = interpolate_index
Bump FastHttpUser/geventhttpclient dependency to 2.0.2. Not really a strict requirement, but older versions have a couple of issues I dont want people to have to encounter.
@@ -41,7 +41,7 @@ install_requires = requests >=2.23.0 msgpack >=0.6.2 pyzmq >=22.2.1, !=23.0.0 - geventhttpclient >=1.5.1 + geventhttpclient >=2.0.2 ConfigArgParse >=1.0 psutil >=5.6.7 Flask-BasicAuth >=0.2.0
Fix comment buttons not displaying correctly in high contrast mode fixes
@@ -533,6 +533,11 @@ li.inline:first-child { width: 30px; height: 30px; color: $color-teal; + + @media (forced-colors: $media-forced-colours) { + color: ButtonText; + border: 1px solid; + } } } }
fix(tags/print-return): use the raw GitHub URL for the GIF As mentioned in the previous commit, using the raw GitHub URL would be more reliable than a Discord CDN URL.
embed: title: Print and Return image: - url: https://cdn.discordapp.com/attachments/267659945086812160/998198889154879558/print-return.gif + url: https://raw.githubusercontent.com/python-discord/bot/main/bot/resources/media/print-return.gif --- Here's a handy animation demonstrating how `print` and `return` differ in behavior.
Use system locale in collect_env.py Summary: Fixes Pull Request resolved:
# This script outputs relevant system environment info # Run it with `python collect_env.py`. from __future__ import absolute_import, division, print_function, unicode_literals +import locale import re import subprocess import sys @@ -42,8 +43,9 @@ def run(command): output, err = p.communicate() rc = p.returncode if PY3: - output = output.decode("utf-8") - err = err.decode("utf-8") + enc = locale.getpreferredencoding() + output = output.decode(enc) + err = err.decode(enc) return rc, output.strip(), err.strip()
[client] uninstall in reverse order So that nested caches work.
@@ -1313,7 +1313,7 @@ def main(args): # # If the Swarming bot cannot clean up the cache, it will handle it like # any other bot file that could not be removed. - for path, name in caches: + for path, name in reversed(caches): try: # uninstall() doesn't trim but does call save() implicitly. Trimming # *must* be done manually via periodic 'run_isolated.py --clean'.
Fix typo in doc/source/api/octaviaapi.rst Fix typo in doc/source/api/octaviaapi.rst
@@ -1803,7 +1803,7 @@ Layer 7 Rules Layer 7 rules are individual statements of logic which match parts of an HTTP request, session, or other protocol-specific data for any given client request. All the layer 7 rules associated with a given layer 7 policy -are logically ANDed together to see wether the policy matches a given client +are logically ANDed together to see whether the policy matches a given client request. If logical OR behavior is desired instead, the user should instead create multiple layer 7 policies with rules which match each of the components of the logical OR statement.
Fix: documentation - remove old syntax Removed 'my_ship' from 'destination' in html example template.
@@ -731,7 +731,7 @@ way: Or a better way to get my ship model: {{my_ship.model}} <br> <h2>Loops</h2> {% for ship in groups['all'] %} - Ship {{ship}} is in the shipyard, and has destination {{hostvars[ship]['my_ship']['destination']}}. <br> + Ship {{ship}} is in the shipyard, and has destination {{hostvars[ship]['destination']}}. <br> {% endfor %} </body> </html>
Fix Estimator role expansion Instead of manually constructing the role ARN, use the IAM boto client to do it. This properly expands service-roles and regular roles.
@@ -522,8 +522,8 @@ class Session(object): def expand_role(self, role): """Expand an IAM role name into an ARN. - If the role is already in the form of an ARN, then the role is simply returned. Otherwise, the role - is formatted as an ARN, using the current account as the IAM role's AWS account. + If the role is already in the form of an ARN, then the role is simply returned. Otherwise we retrieve the full + ARN and return it. Args: role (str): An AWS IAM role (either name or full ARN). @@ -534,8 +534,7 @@ class Session(object): if '/' in role: return role else: - account = self.boto_session.client('sts').get_caller_identity()['Account'] - return 'arn:aws:iam::{}:role/{}'.format(account, role) + return boto3.resource("iam").Role(role).arn def get_caller_identity_arn(self): """Returns the ARN user or role whose credentials are used to call the API.
Remove explicitly enable neutron This patch removes, explicitly enabled neutron from local.conf as devstack now default uses neutron
@@ -24,13 +24,6 @@ MULTI_HOST=1 # This is the controller node, so disable nova-compute disable_service n-cpu -# Disable nova-network and use neutron instead -disable_service n-net -ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3,neutron - -# Enable remote console access -enable_service n-cauth - # Enable the Watcher Dashboard plugin enable_plugin watcher-dashboard git://git.openstack.org/openstack/watcher-dashboard @@ -42,6 +35,7 @@ enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer # This is the controller node, so disable the ceilometer compute agent disable_service ceilometer-acompute + # Enable the ceilometer api explicitly(bug:1667678) enable_service ceilometer-api
Fix moto_server handling of unsigned requests Certain AWS requests are unsigned. Moto in standalone server mode implements an heuristic to deduce the endpoint and region based on the X-Amz-Target HTTP header. This commit extends this concept to add additional endpoints that used unsigned requests at times.
@@ -21,6 +21,16 @@ from moto.core.utils import convert_flask_to_httpretty_response HTTP_METHODS = ["GET", "POST", "PUT", "DELETE", "HEAD", "PATCH"] +DEFAULT_SERVICE_REGION = ('s3', 'us-east-1') + +# Map of unsigned calls to service-region as per AWS API docs +# https://docs.aws.amazon.com/cognito/latest/developerguide/resource-permissions.html#amazon-cognito-signed-versus-unsigned-apis +UNSIGNED_REQUESTS = { + 'AWSCognitoIdentityService': ('cognito-identity', 'us-east-1'), + 'AWSCognitoIdentityProviderService': ('cognito-idp', 'us-east-1'), +} + + class DomainDispatcherApplication(object): """ Dispatch requests to different applications based on the "Host:" header @@ -50,6 +60,32 @@ class DomainDispatcherApplication(object): raise RuntimeError('Invalid host: "%s"' % host) + def infer_service_region(self, environ): + auth = environ.get('HTTP_AUTHORIZATION') + if auth: + # Signed request + # Parse auth header to find service assuming a SigV4 request + # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html + # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request'] + try: + credential_scope = auth.split(",")[0].split()[1] + _, _, region, service, _ = credential_scope.split("/") + return service, region + except ValueError: + # Signature format does not match, this is exceptional and we can't + # infer a service-region. A reduced set of services still use + # the deprecated SigV2, ergo prefer S3 as most likely default. + # https://docs.aws.amazon.com/general/latest/gr/signature-version-2.html + return DEFAULT_SERVICE_REGION + else: + # Unsigned request + target = environ.get('HTTP_X_AMZ_TARGET') + if target: + service, _ = target.split('.', 1) + return UNSIGNED_REQUESTS.get(service, DEFAULT_SERVICE_REGION) + # S3 is the last resort when the target is also unknown + return DEFAULT_SERVICE_REGION + def get_application(self, environ): path_info = environ.get('PATH_INFO', '') @@ -66,19 +102,7 @@ class DomainDispatcherApplication(object): else: host = environ['HTTP_HOST'].split(':')[0] if host in {'localhost', 'motoserver'} or host.startswith("192.168."): - # Fall back to parsing auth header to find service - # ['Credential=sdffdsa', '20170220', 'us-east-1', 'sns', 'aws4_request'] - try: - _, _, region, service, _ = environ['HTTP_AUTHORIZATION'].split(",")[0].split()[ - 1].split("/") - except (KeyError, ValueError): - # Some cognito-idp endpoints (e.g. change password) do not receive an auth header. - if environ.get('HTTP_X_AMZ_TARGET', '').startswith('AWSCognitoIdentityProviderService'): - service = 'cognito-idp' - else: - service = 's3' - - region = 'us-east-1' + service, region = self.infer_service_region(environ) if service == 'dynamodb': if environ['HTTP_X_AMZ_TARGET'].startswith('DynamoDBStreams'): host = 'dynamodbstreams'
added entry Police tackle and arrest protester, indiscriminately beat others with batons and shoot them with less lethals
@@ -75,6 +75,14 @@ Multiple police officers begin striking protestors with their batons. The video * https://streamable.com/ja2fw6 (with audio) * https://www.reddit.com/r/PublicFreakout/comments/gv8vaw/lapd_beating_and_shooting_peaceful_protesters_for/ +### Police tackle and arrest protester, indiscriminately beat others with batons and shoot them with less lethals | May 31st + +LAPD single out, tackle, and arrest an unarmed protester, then indiscriminately beat nearby protesters with batons and fire less lethals into the crowd. + +**Links** + +* https://twitter.com/greg_doucette/status/1267095100166987778 + ## Oakland ### Officer runs down protesters with police cruiser | Believed to be May 30th
warning in `_phi` actually why do we not put `_phi_divide` in `_phi`?
@@ -10,11 +10,10 @@ from .complexity_embedding import complexity_embedding # Phi # ============================================================================= - def _phi( signal, delay=1, dimension=2, tolerance="default", distance="chebyshev", approximate=True, fuzzy=False ): - """Common internal for `entropy_approximate` and `entropy_sample`. + """Common internal for `entropy_approximate`, `entropy_sample` and `entropy_range`. Adapted from `EntroPy <https://github.com/raphaelvallat/entropy>`_, check it out! @@ -31,19 +30,41 @@ def _phi( ) if approximate is True: + # Warning for undefined + if any(count1 == 0) or any(count2 == 0): + r = _get_tolerance(signal, tolerance=tolerance, dimension=dimension, show=False) + warn( + "Undefined conditional probabilities for entropy were detected. " + + f"Try manually increasing tolerance levels (current tolerance={r}).", + category=NeuroKitWarning, + ) + phi = np.inf + else: phi[0] = np.mean(np.log(count1 / embedded1.shape[0])) phi[1] = np.mean(np.log(count2 / embedded2.shape[0])) + else: phi[0] = np.mean((count1 - 1) / (embedded1.shape[0] - 1)) phi[1] = np.mean((count2 - 1) / (embedded2.shape[0] - 1)) + return phi def _phi_divide(phi): if phi[0] == 0: + # warn( + # "Undefined conditional probabilities for entropy were detected. " + + # f"Try manually increasing tolerance levels (current tolerance={r}).", + # category=NeuroKitWarning, + # ) return -np.inf division = np.divide(phi[1], phi[0]) if division == 0: + # warn( + # "Undefined conditional probabilities for entropy were detected. " + + # f"Try manually increasing tolerance levels (current tolerance={r}).", + # category=NeuroKitWarning, + # ) return np.inf return -np.log(division)
Slight change to getting the keys of G.nodes May throw KeyError: 'Key 0 not found'
@@ -82,7 +82,7 @@ def from_networkx(G): edge_index = torch.tensor(list(G.edges)).t().contiguous() keys = [] - keys += list(G.nodes(data=True)[0].keys()) + keys += list(list(G.nodes(data=True))[0][1].keys()) keys += list(list(G.edges(data=True))[0][2].keys()) data = {key: [] for key in keys}
Refactor stream handling This places the control of stream creation and deletion in the base class and should ensure that streams are deleted.
@@ -57,6 +57,8 @@ class Server(asyncio.Protocol): class HTTPProtocol: + stream_class = Stream + def __init__( self, app: 'Quart', @@ -80,6 +82,7 @@ class HTTPProtocol: path: str, headers: CIMultiDict, ) -> None: + self.streams[stream_id] = self.stream_class(self.loop) headers['Remote-Addr'] = self.transport.get_extra_info('peername')[0] request = self.app.request_class(method, path, headers, self.streams[stream_id].future) # It is important that the app handles the request in a unique @@ -90,6 +93,9 @@ class HTTPProtocol: def handle_response(self, stream_id: int, future: asyncio.Future) -> None: raise NotImplemented() + def end_response(self, stream_id: int, response: Response) -> None: + del self.streams[stream_id] + def response_headers(self) -> List[Tuple[str, str]]: return [('server', self.server_header)] @@ -125,7 +131,6 @@ class H11Server(HTTPProtocol): headers = CIMultiDict() for name, value in event.headers: headers.add(name.decode().title(), value.decode()) - self.streams[0] = Stream(self.loop) self.handle_request( 0, event.method.decode().upper(), event.target.decode(), headers, ) @@ -152,6 +157,7 @@ class H11Server(HTTPProtocol): self._send(h11.Data(data=data)) self._send(h11.EndOfMessage()) self._handle_events() + self.end_response(stream_id, response) def _handle_error(self) -> None: self._send(h11.Response(status_code=400, headers=tuple())) @@ -182,6 +188,8 @@ class H2Stream(Stream): class H2Server(HTTPProtocol): + stream_class = H2Stream + def __init__( self, app: 'Quart', @@ -210,7 +218,6 @@ class H2Server(HTTPProtocol): headers = CIMultiDict() for name, value in event.headers: headers.add(name.title(), value) - self.streams[event.stream_id] = H2Stream(self.loop) self.handle_request( event.stream_id, headers[':method'].upper(), headers[':path'], headers, ) @@ -239,7 +246,7 @@ class H2Server(HTTPProtocol): await self._send_data(stream_id, data) self.connection.end_stream(stream_id) self.transport.write(self.connection.data_to_send()) # type: ignore - del self.streams[stream_id] + self.end_response(stream_id, response) async def _send_data(self, stream_id: int, data: bytes) -> None: while True:
Not enough values to unpack (expected 4, got 2) When contract test invoke is not successful, got error message ValueError: not enough values to unpack (expected 4, got 2)
@@ -234,12 +234,12 @@ def test_invoke(script, wallet, outputs): # tx.Gas = Fixed8.One() # tx.Attributes = [] # return tx, [] - return None,[] + return None,None, None, None except Exception as e: print("COULD NOT EXECUTE %s " % e) - return None,[] + return None,None, None, None
Fix missing import Why did I start editing code in a web browser?!
@@ -4,11 +4,10 @@ from __future__ import division from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals - from mock import patch - from builtins import chr import unittest +import sys from mock.mock import MagicMock from asciimatics.event import KeyboardEvent, MouseEvent from asciimatics.exceptions import NextScene, StopApplication, InvalidFields
I think I found the problem, finally I'll still need to refine how I handle it, though. Right now I'm copying the strides and shape of each py::array to new arrays to be used in the CPU methods' structs
namespace py = pybind11; struct c_array py2c(py::array input) { - char format[8]; - strcpy(format, input.request().format.c_str()); + py::buffer_info info = input.request(); + + if (info.ndim > 15) { + throw std::invalid_argument("Array cannot exceed 15 dimensions"); + } + + char format[15]; + strcpy(format, info.format.c_str()); + ssize_t shape[15]; + std::copy(info.shape.begin(), info.shape.end(), shape); + ssize_t strides[15]; + std::copy(info.strides.begin(), info.strides.end(), strides); + struct c_array out = { - input.request().ptr, - input.request().itemsize, - input.request().size, + info.ptr, + info.itemsize, + info.size, format, - input.request().ndim, - &input.request().shape[0], - &input.request().strides[0] + info.ndim, + &shape[0], + &strides[0] }; return out; }
Limit problem types by cell type Remove automatic options for the problem type in the create_assignments labextension when the notebook cell type is not code.
@@ -253,6 +253,10 @@ class CellWidget extends Panel { ['tests', 'Autograded task'], ['readonly', 'Read-only'] ]); + if (this.cell.model.type !== 'code') { + options.delete('solution'); + options.delete('tests'); + } const fragment = document.createDocumentFragment(); for (const optionEntry of options.entries()) { const option = document.createElement('option');
Cleaner fix for python 2/3 in adb_device.py * Cleaner fix for python 2/3 in adb_device.py Cleaner fix for python 2/3 compatibility in adb_device.py * Update adb_device.py
@@ -30,9 +30,6 @@ function, but rather a USB function - listing devices with a specific interface class, subclass, and protocol. """ -try: - import cStringIO as io -except ImportError: import io import logging import os.path @@ -164,15 +161,14 @@ class AdbDevice(object): Returns: The file data if dest_file is not set, None otherwise. """ + should_return_data = dest_file is None if isinstance(dest_file, str): dest_file = open(dest_file, 'w') - elif not dest_file: + elif dest_file is None: dest_file = io.StringIO() self.filesync_service.recv(device_filename, dest_file, timeouts.PolledTimeout.from_millis(timeout_ms)) - # An empty call to cStringIO.StringIO returns an instance of - # cStringIO.OutputType. - if isinstance(dest_file, io.OutputType): + if should_return_data: return dest_file.getvalue() def list(self, device_path, timeout_ms=None):
Don't use `to_qubo()` Avoid performance hit from converting from 'SPIN' to 'BINARY'
@@ -176,8 +176,11 @@ class EmbeddingComposite(dimod.Sampler, dimod.Composite): # apply the embedding to the given problem to map it to the child sampler __, target_edgelist, target_adjacency = child.structure + # add self-loops to edgelist to handle singleton variables + source_edgelist = bqm.quadratic.keys() + [(v, v) for v in bqm.linear] + # get the embedding - embedding = minorminer.find_embedding(bqm.to_qubo()[0], target_edgelist) + embedding = minorminer.find_embedding(source_edgelist, target_edgelist) if bqm and not embedding: raise ValueError("no embedding found")
Check the return code of the compiler This PR is about avoiding the cancellation of the build when the compiler raises a warning. We now raise a RuntimeError only if the compiler command returns a non-zero exit status. This closes
@@ -189,6 +189,7 @@ def create_shared_library(codegen, print(out) if len(err)>0: print(err) + if p.returncode != 0: raise RuntimeError("Failed to build module") sharedlib_folder += 'build/lib*/'
Add AUTO mode to HVAC According to KNX specification v2.1 3/7/2 Datapoint Types, section 4.3, DPT 20.102 value 0 is Auto
@@ -7,6 +7,7 @@ from .dpt import DPTBase class HVACOperationMode(Enum): """Enum for the different KNX HVAC operation modes.""" + AUTO = "Auto" COMFORT = "Comfort" STANDBY = "Standby" NIGHT = "Night" @@ -32,11 +33,15 @@ class DPTHVACMode(DPTBase): return HVACOperationMode.STANDBY elif raw[0] == 0x01: return HVACOperationMode.COMFORT + elif raw[0] == 0x00: + return HVACOperationMode.AUTO @classmethod def to_knx(cls, value): """Serialize to KNX/IP raw data.""" - if value == HVACOperationMode.COMFORT: + if value == HVACOperationMode.AUTO: + return (0,) + elif value == HVACOperationMode.COMFORT: return (1,) elif value == HVACOperationMode.STANDBY: return (2,)
Update v_generate_tbl_ddl.sql Added clearer error for when the diststyle is unknown "<<Error - UNKNOWN DISTSTYLE>>"
@@ -189,7 +189,7 @@ FROM pg_namespace AS n WHEN c.reldiststyle = 1 THEN 'DISTSTYLE KEY' WHEN c.reldiststyle = 8 THEN 'DISTSTYLE ALL' WHEN c.reldiststyle = 9 THEN 'DISTSTYLE AUTO' - ELSE 'UNKNOWN' + ELSE '<<Error - UNKNOWN DISTSTYLE>>' END AS ddl FROM pg_namespace AS n INNER JOIN pg_class AS c ON n.oid = c.relnamespace
Update pysat/tests/test_utils.py kwarg dict in parametrize
@@ -512,10 +512,10 @@ class TestGenerateInstList(object): class TestDeprecation(object): """Unit test for deprecation warnings.""" - @pytest.mark.parametrize("fnames,f_fmt,msg_inds", - [(None, None, [0, 1]), - ('no_file', None, [0, 2])]) - def test_load_netcdf4(self, fnames, f_fmt, msg_inds): + @pytest.mark.parametrize("kwargs,msg_inds", + [({'fnames': None}, [0, 1]), + ({'fnames': 'no_file', 'file_format': None}, [0, 2])]) + def test_load_netcdf4(self, kwargs, msg_inds): """Test deprecation warnings from load_netcdf4.""" with warnings.catch_warnings(record=True) as war: try:
Update setup.py Not sure if this is the best approach, but as it stands the binary the current hubble.service file is looking for (via /opt/hubble/hubble binary) doesn't exist. It was suggested to use a symlink in a prior PR, but the binary itself is non-existent in this case.
@@ -15,7 +15,7 @@ if distro == 'redhat' or distro == 'centos': data_files = [('/etc/init.d', ['pkg/hubble']), ('/etc/hubble', ['conf/hubble']), ] elif version.startswith('7'): - data_files = [('/usr/lib/systemd/system', ['pkg/hubble.service']), + data_files = [('/usr/lib/systemd/system', ['pkg/centos7/hubble.service']), ('/etc/hubble', ['conf/hubble']), ] elif distro == 'Amazon Linux AMI': data_files = [('/etc/init.d', ['pkg/hubble']),
Run CentOS tests on CentOS Stream CentOS 8 went EOL on 2022-01-31. Switch to CentOS Stream 8.
@@ -7,9 +7,14 @@ OS=${OS:="centos"} OS_VERSION=${OS_VERSION:="8"} PYTHON_VERSION=${PYTHON_VERSION:="3.8"} ACTION=${ACTION:="test"} -IMAGE="$OS:$OS_VERSION" CONTAINER_NAME="atomic-reactor-$OS-$OS_VERSION-py$PYTHON_VERSION" +if [[ "$OS" == centos ]]; then + IMAGE="quay.io/centos/centos:stream$OS_VERSION" +else + IMAGE="$OS:$OS_VERSION" +fi + # Use arrays to prevent globbing and word splitting engine_mounts=(-v "$PWD":"$PWD":z) for dir in ${EXTRA_MOUNT:-}; do
Reorder parameters for the FPCA The constructor for `FPCA` receives parameters in a different order. This change fixes this.
@@ -107,8 +107,8 @@ class MahalanobisDistance(BaseEstimator): self.n_components, self.centering, self.regularization, - self.weights, self.components_basis, + self.weights, ) fpca.fit(X) self.eigenvalues_ = fpca.explained_variance_
[definitions] Make ResourceDefinition variables private Test Plan: Unit Reviewers: #ft, max
@@ -22,11 +22,23 @@ class ResourceDefinition(object): ''' def __init__(self, resource_fn, config_field=None, description=None): - self.resource_fn = check.callable_param(resource_fn, 'resource_fn') - self.config_field = check_user_facing_opt_field_param( + self._resource_fn = check.callable_param(resource_fn, 'resource_fn') + self._config_field = check_user_facing_opt_field_param( config_field, 'config_field', 'of a ResourceDefinition or @resource' ) - self.description = check.opt_str_param(description, 'description') + self._description = check.opt_str_param(description, 'description') + + @property + def resource_fn(self): + return self._resource_fn + + @property + def config_field(self): + return self._config_field + + @property + def description(self): + return self._description @staticmethod def none_resource(description=None):
[Datasets] Add AWS CLI info into S3 credential error messagee As followup of (comment), we want to add AWS CLI command information into S# credential error message, so users have a better idea to further debug the read issue.
@@ -346,7 +346,10 @@ def _handle_read_os_error(error: OSError, paths: Union[str, List[str]]) -> str: ( f"Failing to read AWS S3 file(s): {paths}. " "Please check that file exists and has properly configured access. " - "See https://docs.ray.io/en/latest/data/creating-datasets.html#reading-from-remote-storage " # noqa + "You can also run AWS CLI command to get more detailed error message " + "(e.g., aws s3 ls <file-name>). " + "See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/index.html " # noqa + "and https://docs.ray.io/en/latest/data/creating-datasets.html#reading-from-remote-storage " # noqa "for more information." ) )
don't log gearbot failed mass pings usually these are just from the modlogs
@@ -71,6 +71,8 @@ class ModLog(BaseCog): if Configuration.get_var(message.guild.id, "MESSAGE_LOGS", "ENABLED") and ( message.content != "" or len(message.attachments) > 0) and message.author.id != self.bot.user.id: await MessageUtils.insert_message(self.bot, message) + else: + return failed_mass_ping = 0 if "@everyone" in message.content and message.mention_everyone is False:
Reorganize block store docs This change organizes the block store docs by topic rather than letting autodoc organize methods by the order they appear in the _proxy.py file.
@@ -12,5 +12,32 @@ The block_store high-level interface is available through the ``block_store`` member of a :class:`~openstack.connection.Connection` object. The ``block_store`` member will only be added if the service is detected. +Volume Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_store.v2._proxy.Proxy + + .. automethod:: openstack.block_store.v2._proxy.Proxy.create_volume + .. automethod:: openstack.block_store.v2._proxy.Proxy.delete_volume + .. automethod:: openstack.block_store.v2._proxy.Proxy.get_volume + .. automethod:: openstack.block_store.v2._proxy.Proxy.volumes + +Type Operations +^^^^^^^^^^^^^^^ + .. autoclass:: openstack.block_store.v2._proxy.Proxy - :members: + + .. automethod:: openstack.block_store.v2._proxy.Proxy.create_type + .. automethod:: openstack.block_store.v2._proxy.Proxy.delete_type + .. automethod:: openstack.block_store.v2._proxy.Proxy.get_type + .. automethod:: openstack.block_store.v2._proxy.Proxy.types + +Snapshot Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_store.v2._proxy.Proxy + + .. automethod:: openstack.block_store.v2._proxy.Proxy.create_snapshot + .. automethod:: openstack.block_store.v2._proxy.Proxy.delete_snapshot + .. automethod:: openstack.block_store.v2._proxy.Proxy.get_snapshot + .. automethod:: openstack.block_store.v2._proxy.Proxy.snapshots
Fix an error in getting award km I noticed a crash when a buddy was already set, the start KM would be zero, crashing this logic. Simple fix; check if present and set to 0 when not found. Seems to do the trick.
@@ -264,6 +264,9 @@ class PokemonOptimizer(BaseTask): if distance_walked >= distance_needed: self.get_buddy_walked(pokemon) + # self.buddy["start_km_walked"] can be empty here + if 'start_km_walked' not in self.buddy: + self.buddy["start_km_walked"] = 0 self.buddy["last_km_awarded"] = self.buddy["start_km_walked"] + distance_needed * int(distance_walked / distance_needed) self.lock_buddy = False else:
Update CLI.md * Update CLI.md Update the suggested command template for "trust keys", to make it consistent with other examples. * Update docs/CLI.md Remove the + to avoid confusion.
@@ -153,7 +153,7 @@ itself. The --trust command-line option, in conjunction with --pubkeys and --role, can be used to indicate the trusted keys of a role. ```Bash -$ repo.py --trust --pubkeys --role +$ repo.py --trust --pubkeys </path/to/foo_key.pub> --role <rolename> ``` For example:
dsl_unparse: fix unparsing of foo._.at(n) It used to be unparsed as `foo??(n)` (incorrect) and now unparsed as `foo?(n)`. Found when unparsing LAL. TN:
@@ -533,7 +533,7 @@ def var_name(var_expr, default="_"): return unparsed_name(ret) -def is_a(expr, *names): +def expr_is_a(expr, *names): return any(expr.__class__.__name__ == n for n in names) @@ -543,7 +543,7 @@ def needs_parens(expr): return not ( isinstance(expr, (FieldAccess, Literal, AbstractVariable, BigIntLiteral, EnvGet, Map, Quantifier)) - or is_a(expr, "as_entity", "as_bare_entity", "children", + or expr_is_a(expr, "as_entity", "as_bare_entity", "children", "env_parent", "rebindings_parent", "parents", "parent", "root", "append_rebinding", "concat_rebindings", "env_node", "rebindings_new_env", "rebindings_old_env", "get_value", @@ -858,7 +858,11 @@ def emit_expr(expr, **ctx): # expression in the new syntax, so we don't want to use the ? # syntax on it. if not isinstance(expr.then_expr, Match): - return "{}?{}".format( + # If the "then" expression also implies a "?", do not emit it + # twice. + fmt = '{}{}' if expr_is_a(expr.then_expr, 'at') else '{}?{}' + + return fmt.format( ee(expr.expr), ee(expr.then_expr, then_underscore_var=expr.var_expr) )
lightbox: Don't blow up for messages not in the message store. This should somewhat reduce the gravity of the failure mode for cases where the message the user clicked cannot be found (which would be a significant bug on its own merit in any case).
@@ -138,9 +138,10 @@ exports.open = function (image, options) { const message = message_store.get(zid); if (message === undefined) { blueslip.error("Lightbox for unknown message " + $message.attr("zid")); - } + } else { sender_full_name = message.sender_full_name; } + } payload = { user: sender_full_name, title: $parent.attr("title"),
minor fix I have fixed the function so that len(candidate_offsets) can't be zero.
@@ -5,7 +5,8 @@ import pandas as pd import scipy.signal from ..epochs import epochs_create, epochs_to_df -from ..signal import signal_findpeaks, signal_formatpeaks, signal_resample, signal_smooth, signal_zerocrossings +from ..signal import (signal_findpeaks, signal_formatpeaks, signal_resample, + signal_smooth, signal_zerocrossings) from ..stats import standardize from .ecg_peaks import ecg_peaks from .ecg_segment import ecg_segment @@ -371,14 +372,12 @@ def _dwt_delineate_tp_onsets_offsets( offsets.append(np.nan) continue epsilon_offset = -offset_weight * dwt_local[offset_slope_peaks[0]] - if not (-dwt_local[onset_slope_peaks[0] :] < epsilon_offset).any(): + if not (-dwt_local[offset_slope_peaks[0] :] < epsilon_offset).any(): offsets.append(np.nan) continue candidate_offsets = np.where(-dwt_local[offset_slope_peaks[0] :] < epsilon_offset)[0] + offset_slope_peaks[0] - if(len(candidate_offsets) > 0): offsets.append(candidate_offsets[0] + srch_idx_start) - else: - offsets.append(np.nan) + # # only for debugging # events_plot([candidate_offsets, offset_slope_peaks], dwt_local)
add labels of PointsOfInterest missing dot patch (todo)
@@ -120,7 +120,10 @@ class FigureManager: if graphic.label.isRenderedOn(self.figure): labels.append(graphic.label) - labels.extend(self.labels) + for label in self.labels: + if label.isRenderedOn(self.figure): + labels.append(label) + return labels def fixLabelOverlaps(self, maxIteration: int = 5): @@ -252,7 +255,8 @@ class FigureManager: if self.path.showPointsOfInterest: self.add(*self.labelsOfStops()) - # self.add(*self.labelsOfPointsOfInterest()) + self.add(*self.labelsOfPointsOfInterest()) + # todo: change POI labels into a dot Graphic ? or add optional dot patch in label class z = 0 for element in self.path.elements: @@ -374,7 +378,39 @@ class FigureManager: return labels def labelsOfPointsOfInterest(self) -> List[Label]: + """ Labels of general points of interest are drawn below the axis, at 25% of the largest diameter. """ + + labelStrings = {} # Gather labels at same z + + zElement = 0 + # For the group as a whole, then each element + for pointOfInterest in self.path.pointsOfInterest(z=zElement): + zStr = "{0:3.3f}".format(pointOfInterest['z']) + labelString = pointOfInterest['label'] + if zStr in labelStrings: + labelStrings[zStr] = labelStrings[zStr] + ", " + labelString + else: + labelStrings[zStr] = labelString + + # Points of interest for each element + for element in self.path.elements: + pointsOfInterest = element.pointsOfInterest(zElement) + + for pointOfInterest in pointsOfInterest: + zStr = "{0:3.3f}".format(pointOfInterest['z']) + labelString = pointOfInterest['label'] + if zStr in labelStrings: + labelStrings[zStr] = labelStrings[zStr] + ", " + labelString + else: + labelStrings[zStr] = labelString + zElement += element.L + labels = [] + halfHeight = self.path.largestDiameter / 2 + for zStr, labelString in labelStrings.items(): + z = float(zStr) + labels.append(Label(labelString, x=z, y=-halfHeight*0.5, fontsize=12)) + return labels def maxRayHeight(self):
[tests] Give a more informative AssertionError Give a more informative AssertionError if assert does not match
@@ -336,7 +336,11 @@ class APISite(BaseSite): if login_manager.login(retry=True, autocreate=autocreate): self._username = login_manager.username del self.userinfo # force reloading - assert self.userinfo['name'] == self.username() # load userinfo + + # load userinfo + assert self.userinfo['name'] == self.username(), \ + '{} != {}'.format(self.userinfo['name'], self.username()) + self._loginstatus = _LoginStatus.AS_USER else: self._loginstatus = _LoginStatus.NOT_LOGGED_IN # failure
[Fix] Fix license badge * Add serialization * fix broken badge * Revert "Add serialization" This reverts commit * fix
# Deep Graph Library (DGL) [![Build Status](http://ci.dgl.ai:80/buildStatus/icon?job=DGL/master)](http://ci.dgl.ai:80/job/DGL/job/master/) -[![GitHub license](https://dmlc.github.io/img/apache2.svg)](./LICENSE) +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](./LICENSE) [Documentation](https://docs.dgl.ai) | [DGL at a glance](https://docs.dgl.ai/tutorials/basics/1_first.html#sphx-glr-tutorials-basics-1-first-py) | [Model Tutorials](https://docs.dgl.ai/tutorials/models/index.html) | [Discussion Forum](https://discuss.dgl.ai)
Update avcodecs.py use correct vaapi keys
@@ -845,11 +845,11 @@ class H264VAAPI(H264Codec): optlist = super(H264VAAPI, self)._codec_specific_produce_ffmpeg_list(safe, stream) optlist.extend(['-vaapi_device', '/dev/dri/renderD128']) if 'vaapi_wscale' in safe and 'vaapi_hscale' in safe: - optlist.extend(['-vf', 'hwupload,%s=%s:%s:format=nv12' % (self.scale_filter, safe['wscale'], safe['hscale'])]) - elif 'wscale' in safe: - optlist.extend(['-vf', 'hwupload,%s=%s:trunc(ow/a/2)*2:format=nv12' % (self.scale_filter, safe['wscale'])]) - elif 'hscale' in safe: - optlist.extend(['-vf', 'hwupload,%s=trunc((oh*a)/2)*2:%s:format=nv12' % (self.scale_filter, safe['hscale'])]) + optlist.extend(['-vf', 'hwupload,%s=%s:%s:format=nv12' % (self.scale_filter, safe['vaapi_wscale'], safe['vaapi_hscale'])]) + elif 'vaapi_wscale' in safe: + optlist.extend(['-vf', 'hwupload,%s=%s:trunc(ow/a/2)*2:format=nv12' % (self.scale_filter, safe['vaapi_wscale'])]) + elif 'vaapi_hscale' in safe: + optlist.extend(['-vf', 'hwupload,%s=trunc((oh*a)/2)*2:%s:format=nv12' % (self.scale_filter, safe['vaapi_hscale'])]) else: optlist.extend(['-vf', "hwupload,format=nv12"])
Fix issue Disable textures after painting is finished.
@@ -152,6 +152,8 @@ class GLScatterPlotItem(GLGraphicsItem): glDisableClientState(GL_VERTEX_ARRAY) glDisableClientState(GL_COLOR_ARRAY) #posVBO.unbind() + ##fixes #145 + glDisable( GL_TEXTURE_2D ) #for i in range(len(self.pos)): #pos = self.pos[i]
Ensure CompImageHeader can be sliced and copied. With the __new__ method, we ensure the old behaviour of returning a Header instance is kept.
@@ -85,6 +85,9 @@ class CompImageHeader(Header): This essentially wraps the image header, so that all values are read from and written to the image header. However, updates to the image header will also update the table header where appropriate. + + Note that if no image header is passed in, the code will instantiate a + regular `~astropy.io.fits.Header`. """ # TODO: The difficulty of implementing this screams a need to rewrite this @@ -105,9 +108,19 @@ class CompImageHeader(Header): # the schema system, but it's not quite ready for that yet. Also it still # makes more sense to change CompImageHDU to subclass ImageHDU :/ - def __init__(self, table_header, image_header=None): + def __new__(cls, table_header, image_header=None): + # 2019-09-14 (MHvK): No point wrapping anything if no image_header is + # given. This happens if __getitem__ and copy are called - our super + # class will aim to initialize a new, possibly partially filled + # header, but we cannot usefully deal with that. + # TODO: the above suggests strongly we should *not* subclass from + # Header. See also comment above about the need for reorganization. if image_header is None: - image_header = Header() + return Header(table_header) + else: + return super().__new__(cls) + + def __init__(self, table_header, image_header): self._cards = image_header._cards self._keyword_indices = image_header._keyword_indices self._rvkc_indices = image_header._rvkc_indices
Mention hiredis installation option. See
@@ -32,6 +32,14 @@ but you can override this with the ``hosts`` key in its config:: }, } +Consider `hiredis`_ library installation to improve layer performance:: + + pip install hiredis + +It will be used automatically if it's installed. + +.. _hiredis: https://github.com/redis/hiredis-py + Sharding ~~~~~~~~
Update whitelist.txt News site, not malware-distribution one.
@@ -2922,3 +2922,7 @@ nvpn.so # Note: rupor.info is legit news site, which was compromised in 2009 due to MDL database. On 2020-06-03 rupor.info is clean. rupor.info + +# Reference: https://www.virustotal.com/gui/domain/rusvesna.su/detection + +rusvesna.su
Fix crash with automod due to silent Discord breaking change Fix
@@ -419,7 +419,7 @@ class AutoModAction: The matched keyword from the triggering message. matched_content: Optional[:class:`str`] The matched content from the triggering message. - Requires the :attr:`Intents.message_content` or it will always return an empty string. + Requires the :attr:`Intents.message_content` or it will always return ``None``. """ __slots__ = ( @@ -447,9 +447,9 @@ class AutoModAction: self.channel_id: Optional[int] = utils._get_as_snowflake(data, 'channel_id') self.user_id: int = int(data['user_id']) self.alert_system_message_id: Optional[int] = utils._get_as_snowflake(data, 'alert_system_message_id') - self.content: str = data['content'] + self.content: str = data.get('content', '') self.matched_keyword: Optional[str] = data['matched_keyword'] - self.matched_content: Optional[str] = data['matched_content'] + self.matched_content: Optional[str] = data.get('matched_content') def __repr__(self) -> str: return f'<AutoModRuleExecution rule_id={self.rule_id} action={self.action!r}>'
Bump minimum Requests version 2.20.1 has issues, see
@@ -60,7 +60,7 @@ setup( install_requires=[ "feedparser >= 5.1.0", "pytz", - "requests", + "requests >= 2.21.0", "pathlib", "bibtexparser", ],
unicode message Fixes COMMCAREHQ-3J8
@@ -292,7 +292,7 @@ def handle_pillow_error(pillow, change, exception): error_id = error.id pillow_logging.exception( - "[%s] Error on change: %s, %s. Logged as: %s" % ( + u"[%s] Error on change: %s, %s. Logged as: %s" % ( pillow.get_name(), change['id'], exception,
create_snapshot: fail the execution when snapshot fails If snapshot creation fails, reraise so that we fail the execution. As opposed to returning "Snapshot creation failed. Execution succeeded".
@@ -75,6 +75,7 @@ class SnapshotCreate(object): except BaseException as e: self._update_snapshot_status(self._config.failed_status, str(e)) ctx.logger.error('Snapshot creation failed: {0}'.format(str(e))) + raise finally: ctx.logger.debug('Removing temp dir: {0}'.format(self._tempdir)) shutil.rmtree(self._tempdir)
ENH: Check input tlist in correlation_spectrum_fft * faster rcm * update norms * Remove rcm bucky test the use of int_argsort makes the exact perm array platform dependent. * Check for equally spaced tlist in correlation_fft
@@ -545,7 +545,7 @@ def spectrum(H, wlist, c_ops, a_op, b_op, solver="es", use_pinv=False): "%s (use es or pi)." % solver) -def spectrum_correlation_fft(taulist, y): +def spectrum_correlation_fft(tlist, y): """ Calculate the power spectrum corresponding to a two-time correlation function using FFT. @@ -567,12 +567,13 @@ def spectrum_correlation_fft(taulist, y): if debug: print(inspect.stack()[0][3]) - - N = len(taulist) - dt = taulist[1] - taulist[0] + tlist = np.asarray(tlist) + N = tlist.shape[0] + dt = tlist[1] - tlist[0] + if not np.allclose(np.diff(tlist), dt*np.ones(N-1,dtype=float)): + raise Exception('tlist must be equally spaced for FFT.') F = scipy.fftpack.fft(y) - # calculate the frequencies for the components in F f = scipy.fftpack.fftfreq(N, dt)
actions: Add allow_deactivated option when fetching recipients. Preparatory commit to allow viewing group PM with deactivated users.
@@ -1866,7 +1866,8 @@ def get_recipient_from_user_ids(recipient_profile_ids: Set[int], return get_personal_recipient(list(recipient_profile_ids)[0]) def validate_recipient_user_profiles(user_profiles: List[UserProfile], - sender: UserProfile) -> Set[int]: + sender: UserProfile, + allow_deactivated: bool=False) -> Set[int]: recipient_profile_ids = set() # We exempt cross-realm bots from the check that all the recipients @@ -1876,8 +1877,8 @@ def validate_recipient_user_profiles(user_profiles: List[UserProfile], realms.add(sender.realm_id) for user_profile in user_profiles: - if (not user_profile.is_active and not user_profile.is_mirror_dummy) or \ - user_profile.realm.deactivated: + if (not user_profile.is_active and not user_profile.is_mirror_dummy and + not allow_deactivated) or user_profile.realm.deactivated: raise ValidationError(_("'%s' is no longer using Zulip.") % (user_profile.email,)) recipient_profile_ids.add(user_profile.id) if not is_cross_realm_bot_email(user_profile.email): @@ -1928,9 +1929,10 @@ def recipient_for_user_ids(user_ids: Iterable[int], sender: UserProfile) -> Reci def recipient_for_user_profiles(user_profiles: List[UserProfile], forwarded_mirror_message: bool, forwarder_user_profile: Optional[UserProfile], - sender: UserProfile) -> Recipient: + sender: UserProfile, allow_deactivated: bool=False) -> Recipient: - recipient_profile_ids = validate_recipient_user_profiles(user_profiles, sender) + recipient_profile_ids = validate_recipient_user_profiles(user_profiles, sender, + allow_deactivated=allow_deactivated) return get_recipient_from_user_ids(recipient_profile_ids, forwarded_mirror_message, forwarder_user_profile, sender)
[swarming] enable dead bot cron job I'll commit only once has been deployed everywhere.
@@ -14,12 +14,10 @@ cron: schedule: every 1 minutes target: backend -# TODO(maruel): https://crbug.com/826421 Enable once the default version is -# enabled. -#- description: Update BotInfo.composite for dead bots. -# url: /internal/cron/update_bot_info -# schedule: every 1 minutes -# target: backend +- description: Update BotInfo.composite for dead bots. + url: /internal/cron/update_bot_info + schedule: every 1 minutes + target: backend - description: Clean up stale BotTaskDimensions and TaskDimensions so no composite indexes are needed.
Added Deloitte to the list of users Planning to write/present about our use of Luigi in the near future, and move it up into the other list.
@@ -156,6 +156,7 @@ Some more companies are using Luigi but haven't had a chance yet to write about * `OAO <https://adops.com/>`_ * `Grovo <https://grovo.com/>`_ * `Weebly <https://www.weebly.com/>`_ +* `Deloitte <https://www.Deloitte.co.uk/>`_ We're more than happy to have your company added here. Just send a PR on GitHub.
Add __init__ to MappingView and its subclasses While these implementations don't matter for the 'typing' module itself, these are also imported to serve as the implementations for the 'collection.abc' module. Fixes
@@ -339,10 +339,12 @@ class MutableSet(AbstractSet[_T], Generic[_T]): def __ixor__(self, s: AbstractSet[_S]) -> MutableSet[Union[_T, _S]]: ... def __isub__(self, s: AbstractSet[Any]) -> MutableSet[_T]: ... -class MappingView: +class MappingView(Sized): + def __init__(self, mapping: Mapping[_KT_co, _VT_co]) -> None: ... # undocumented def __len__(self) -> int: ... class ItemsView(MappingView, AbstractSet[Tuple[_KT_co, _VT_co]], Generic[_KT_co, _VT_co]): + def __init__(self, mapping: Mapping[_KT_co, _VT_co]) -> None: ... # undocumented def __and__(self, o: Iterable[Any]) -> Set[Tuple[_KT_co, _VT_co]]: ... def __rand__(self, o: Iterable[_T]) -> Set[_T]: ... def __contains__(self, o: object) -> bool: ... @@ -357,6 +359,7 @@ class ItemsView(MappingView, AbstractSet[Tuple[_KT_co, _VT_co]], Generic[_KT_co, def __rxor__(self, o: Iterable[_T]) -> Set[Union[Tuple[_KT_co, _VT_co], _T]]: ... class KeysView(MappingView, AbstractSet[_KT_co], Generic[_KT_co]): + def __init__(self, mapping: Mapping[_KT_co, _VT_co]) -> None: ... # undocumented def __and__(self, o: Iterable[Any]) -> Set[_KT_co]: ... def __rand__(self, o: Iterable[_T]) -> Set[_T]: ... def __contains__(self, o: object) -> bool: ... @@ -371,6 +374,7 @@ class KeysView(MappingView, AbstractSet[_KT_co], Generic[_KT_co]): def __rxor__(self, o: Iterable[_T]) -> Set[Union[_KT_co, _T]]: ... class ValuesView(MappingView, Iterable[_VT_co], Generic[_VT_co]): + def __init__(self, mapping: Mapping[_KT_co, _VT_co]) -> None: ... # undocumented def __contains__(self, o: object) -> bool: ... def __iter__(self) -> Iterator[_VT_co]: ... if sys.version_info >= (3, 8):
don't restart service on failure this could cause an endless loop of restarts
@@ -8,8 +8,6 @@ NotifyAccess = exec ExecStart = {start_cmd} -c %i ExecStop = {stop_cmd} -c %i ExecStopPost=/usr/bin/env bash -c "if [ $SERVICE_RESULT!=success ]; then notify-send Maestral 'Daemon failed'; fi" -RestartSec = 1 -Restart = on-failure WatchdogSec = 30s [Install]
Update ml.ipynb Fixed a typo in the one hot encoder example
"metadata": {}, "outputs": [], "source": [ - "encoder = df.ml_one_hot_encoder([df.col.class_])\n", + "encoder = df.ml.one_hot_encoder([df.col.class_])\n", "df_encoded = encoder.transform(df)" ] },
Update send_notification utility to use dbus_next Previous version relied on gi.repository.Notify and checked for ImportError. Now that dbus_next is a requirement, we can use that to submit notifications instead.
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import asyncio import functools import glob import importlib @@ -40,16 +41,6 @@ except ImportError: from libqtile.log_utils import logger -_can_notify = False -try: - import gi - gi.require_version("Notify", "0.7") # type: ignore - from gi.repository import Notify # type: ignore - Notify.init("Qtile") - _can_notify = True -except ImportError as e: - logger.warning("Failed to import dependencies for notifications: %s" % e) - class QtileError(Exception): pass @@ -247,18 +238,47 @@ def send_notification(title, message, urgent=False, timeout=10000, id=None): passed when calling this function again to replace that notification. See: https://developer.gnome.org/notification-spec/ """ - if _can_notify and Notify.get_server_info()[0]: - notifier = Notify.Notification.new(title, message) - if urgent: - notifier.set_urgency(Notify.Urgency.CRITICAL) - notifier.set_timeout(timeout) - if id is None: - id = randint(10, 1000) - notifier.set_property('id', id) - notifier.show() + id = randint(10, 1000) if id is None else id + urgency = 2 if urgent else 1 + + try: + loop = asyncio.get_running_loop() + except RuntimeError: + logger.warning("Eventloop has not started. Cannot send notification.") + else: + loop.create_task(_notify(title, message, urgency, timeout, id)) + return id +async def _notify(title, message, urgency, timeout, id): + notification = ["qtile", # Application name + id, # id + "", # icon + title, # summary + message, # body + [""], # actions + {"urgency": Variant("y", urgency)}, # hints + timeout] # timeout + + bus, msg = await _send_dbus_message(True, + MessageType.METHOD_CALL, + "org.freedesktop.Notifications", + "org.freedesktop.Notifications", + "/org/freedesktop/Notifications", + "Notify", + "susssasa{sv}i", + notification) + + if msg.message_type == MessageType.ERROR: + logger.warning("Unable to send notification. " + "Is a notification server running?") + + # a new bus connection is made each time a notification is sent so + # we disconnect when the notification is done + bus.disconnect() + + def guess_terminal(preference=None): """Try to guess terminal.""" test_terminals = []
Fix ledger sign message there was an around ~1/128 chance of creating an invalid signature when signing a message with a ledger
@@ -334,7 +334,12 @@ class Ledger_KeyStore(Hardware_KeyStore): if sLength == 33: s = s[1:] # And convert it - return bytes([27 + 4 + (signature[0] & 0x01)]) + r + s + + # Pad r and s points with 0x00 bytes when the point is small to get valid signature. + r_padded = bytes([0x00]) * (32 - len(r)) + r + s_padded = bytes([0x00]) * (32 - len(s)) + s + + return bytes([27 + 4 + (signature[0] & 0x01)]) + r_padded + s_padded @runs_in_hwd_thread @test_pin_unlocked
Improve comment co-autored by Simon Cross (@hodgestar)
@@ -316,7 +316,10 @@ cdef class QobjEvo: t = self._prepare(t, None) if self.isconstant: - # When the QobjEvo is constant, it is usually made of only one Qobj + # For constant QobjEvo's, we sum the contained Qobjs directly in + # order to retain the cached values of attributes like .isherm when + # possible, rather than calling _call(t) which may lose this cached + # information. return sum(element.qobj(t) for element in self.elements) cdef _BaseElement part = self.elements[0]
Add type annotation to get_project Now usage information for methods will actually show. project = client.get_project(PROJECT_ID) export_url = project.export_labels()
@@ -404,7 +404,7 @@ class Client: else: return db_object_type(self, res) - def get_project(self, project_id): + def get_project(self, project_id) -> Project: """ Gets a single Project with the given ID. >>> project = client.get_project("<project_id>")
mysql: fix deprecated storage_engine variable storage_engine variable has been deprecated in MySQL 5.5. default_storage_engine is a substitution fixes compatibility with MySQL 5.7
@@ -121,7 +121,7 @@ MYSQL_OPTIONS = { 'sql_mode': 'TRADITIONAL', 'charset': 'utf8', 'init_command': """ - SET storage_engine=INNODB; + SET default_storage_engine=INNODB; SET character_set_connection=utf8,collation_connection=utf8_unicode_ci; SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; """
Use bytes for hmac key For Python 3
@@ -26,7 +26,7 @@ class NetworkLogger(Logger): self.host = config_options["host"] self.port = int(config_options["port"]) self.hostname = socket.gethostname() - self.key = config_options["key"] + self.key = bytearray(config_options["key"], 'utf-8') except Exception: raise RuntimeError("missing config options for network monitor") @@ -56,7 +56,7 @@ class NetworkLogger(Logger): s.connect((self.host, self.port)) p = pickle.dumps(self.batch_data) mac = hmac.new(self.key, p) - # print "My MAC is %s" % mac.hexdigest() + print("My MAC is %s" % mac.hexdigest()) s.send("%s\n%s" % (mac.hexdigest(), p)) except Exception as e: print("Failed to send data: %s" % e)
[core/theme] Dynamically create theme accessors Dynamically generate the accessors for a theme's attributes. NB: This might be even nicer coming from a JSON rather than inside the code.
@@ -22,6 +22,14 @@ class Theme(object): core.event.register('start', self.__start) core.event.register('next-widget', self.__next_widget) + for attr, default in [ + ('fg', None), ('bg', None), + ('default-separators', True), + ('separator-block-width', 0), + ('separator', None) + ]: + setattr(self, attr.replace('-', '_'), lambda widget=None, default=default, attr=attr: self.__get(widget, attr, default)) + def load(self, name): for path in PATHS: theme_file = os.path.join(path, '{}.json'.format(name)) @@ -30,18 +38,6 @@ class Theme(object): return json.load(data) raise RuntimeError('unable to find theme {}'.format(name)) - def fg(self, widget=None): - return self.__get(widget, 'fg') - - def bg(self, widget=None): - return self.__get(widget, 'bg') - - def default_separators(self, widget=None): - return self.__get(widget, 'default-separators', True) - - def separator_block_width(self, widget=None): - return self.__get(widget, 'separator-block-width', 0) - def __start(self): self.__widget_count = 0
TST: added volume unit tests Added unit tests for volumetric unit scaling.
@@ -83,6 +83,7 @@ class TestScaleUnits(object): self.dist_units = ["m", "km", "cm"] self.vel_units = ["m/s", "cm/s", "km/s", 'm s$^{-1}$', 'cm s$^{-1}$', 'km s$^{-1}$', 'm s-1', 'cm s-1', 'km s-1'] + self.vol_units = ["m-3", "cm-3", "/cc", 'n/cc', 'm$^{-3}$', 'cm$^{-3}$'] self.scale = 0.0 return @@ -90,6 +91,7 @@ class TestScaleUnits(object): """Clean up the test environment.""" del self.deg_units, self.dist_units, self.vel_units, self.scale + del self.vol_units return def eval_unit_scale(self, out_unit, scale_type): @@ -123,6 +125,11 @@ class TestScaleUnits(object): assert self.scale == 1.0 elif out_unit.find("km") == 0: assert self.scale == 0.001 + elif scale_type.lower() == 'volume': + if out_unit.find("m") == 0: + assert self.scale == 1.0 + else: + assert self.scale == 1000000.0 return def test_scale_units_same(self): @@ -156,6 +163,14 @@ class TestScaleUnits(object): self.eval_unit_scale(out_unit, 'velocity') return + def test_scale_units_vol(self): + """Test scale_units for volumes.""" + + for out_unit in self.vol_units: + self.scale = utils.scale_units(out_unit, "m-3") + self.eval_unit_scale(out_unit, 'volume') + return + @pytest.mark.parametrize("in_args,err_msg", [ (['happy', 'm'], 'output unit'), (['m', 'happy'], 'input unit'), (['m', 'm/s'], 'Cannot scale m and m/s'),
doc: fix visual studio links Fixes: PR-URL:
@@ -54,22 +54,11 @@ Install all the required tools and configurations using Microsoft's [windows-bui #### Option 2 Install tools and configuration manually: - * Visual C++ Build Environment: - * Option 1: Install [Visual C++ Build Tools](http://landinghub.visualstudio.com/visual-cpp-build-tools) using the **Default Install** option. - - * Option 2: Install [Visual Studio 2015](https://www.visualstudio.com/products/visual-studio-community-vs) and select *Common Tools for Visual C++* during setup. This also works with the free Community and Express for Desktop editions. - - * Option 3: if you already have Visual Studio 2015 installed and did not install the - *Common Tools for Visual C++* during setup, you can `File -> New -> Project`, pick - any of the options under `Templates -> Other Languages -> Visual C++` then `Ok` - and Visual Studio will offer to install the *Common Tools for Visual C++* with a - "Install Missing Features" / "You need the Universal Windows App Development Tools - to develop Windows app projects." dialog. - - > :bulb: [Windows Vista / 7 only] requires [.NET Framework 4.5.1](http://www.microsoft.com/en-us/download/details.aspx?id=40773) - + * Install Visual C++ Build Environment: [Visual Studio Build Tools](https://visualstudio.microsoft.com/thank-you-downloading-visual-studio/?sku=BuildTools) + (using "Visual C++ build tools" workload) or [Visual Studio 2017 Community](https://visualstudio.microsoft.com/pl/thank-you-downloading-visual-studio/?sku=Community) + (using the "Desktop development with C++" workload) * Install [Python 2.7](https://www.python.org/downloads/) (`v3.x.x` is not supported), and run `npm config set python python2.7` (or see below for further instructions on specifying the proper Python version and path.) - * Launch cmd, `npm config set msvs_version 2015` + * Launch cmd, `npm config set msvs_version 2017` If the above steps didn't work for you, please visit [Microsoft's Node.js Guidelines for Windows](https://github.com/Microsoft/nodejs-guidelines/blob/master/windows-environment.md#compiling-native-addon-modules) for additional tips.
Handle exception when an interface doesn't support multicast [fixes Improved logic Additional DEBUG logging
@@ -93,22 +93,31 @@ def discover( MCAST_GRP = "239.255.255.250" MCAST_PORT = 1900 - _sockets = [] - # Use the specified interface, if any - if interface_addr is not None: + if interface_addr is not None: # Use the specified interface, if any try: - address = socket.inet_aton(interface_addr) + _ = socket.inet_aton(interface_addr) + addresses = {interface_addr} + _LOG.debug( + "Sending discovery packets on specified interface %s", interface_addr + ) except OSError as e: raise ValueError( "{} is not a valid IP address string".format(interface_addr) ) from e - _sockets.append(create_socket(interface_addr)) - _LOG.debug("Sending discovery packets on specified interface") - else: - # Use all relevant network interfaces - for address in _find_ipv4_addresses(): + else: # Use all qualified, discovered network interfaces + addresses = _find_ipv4_addresses() + if len(addresses) == 0: + _LOG.debug("No interfaces available for discovery") + return None + _LOG.debug("Sending discovery packets on discovered interface(s) %s", addresses) + + # Create sockets + _sockets = [] + for address in addresses: try: - _sockets.append(create_socket(address)) + _sock = create_socket(address) + _sockets.append(_sock) + _LOG.debug("Created socket %s for %s", _sock, address) except OSError as e: _LOG.warning( "Can't make a discovery socket for %s: %s: %s", @@ -116,12 +125,20 @@ def discover( e.__class__.__name__, e, ) - _LOG.debug("Sending discovery packets on %s", _sockets) - for _ in range(0, 3): # Send a few times to each socket. UDP is unreliable - for _sock in _sockets: + for _ in range(0, 3): + for _sock in _sockets[:]: # Copy the list, because items may be removed + _LOG.debug("Sending discovery packet on %s", _sock) + try: _sock.sendto(really_utf8(PLAYER_SEARCH), (MCAST_GRP, MCAST_PORT)) + except OSError as e: + _LOG.debug("Sending failed %s: removing %s from sockets list", e, _sock) + _sockets.remove(_sock) + + if len(_sockets) == 0: + _LOG.debug("Sending failed on all interfaces") + return None t0 = time.time() while True:
C API: add exceptions wrapping in struct primitives TN:
@@ -33,7 +33,11 @@ procedure ${dec_ref} (R : ${c_type_name}_Ptr) % if cls.is_refcounted(): procedure ${dec_ref} (R : ${c_type_name}_Ptr) is begin + Clear_Last_Exception; Dec_Ref (R.all); +exception + when Exc : others => + Set_Last_Exception (Exc); end ${dec_ref}; % endif
Fix Linksys.SPS2xx.get_mac_address_table script HG-- branch : feature/microservices
@@ -66,7 +66,7 @@ class Script(BaseScript): r.append({ "interfaces": [iface], "mac": chassis, - "type": {"3": "D", "2": "S", "1": "S"}[v[3]], + "type": {"3": "D", "2": "S", "1": "S"}[str(v[3])], "vlan_id": vlan_id, }) return r
secscan: update https proxy scheme Update the https proxy scheme from "https" to "http". The scheme was ignored prior to urllib3 1.26, which is why it was working.
@@ -281,7 +281,7 @@ class ImplementedSecurityScannerAPI(SecurityScannerAPIInterface): timeout=timeout, verify=MITM_CERT_PATH, headers=DEFAULT_HTTP_HEADERS, - proxies={"https": "https://" + signer_proxy_url, "http": "http://" + signer_proxy_url}, + proxies={"https": "http://" + signer_proxy_url, "http": "http://" + signer_proxy_url}, ) if resp.status_code // 100 != 2: raise Non200ResponseException(resp)
Update __init__.py add back accidentally deleted package
@@ -95,9 +95,9 @@ del get_versions __all__ = ['fatal_error', 'Params', 'Outputs', 'Spectral_data', 'deprecation_warning', 'print_image', 'plot_image', 'color_palette', 'apply_mask', 'gaussian_blur', 'transform', 'hyperspectral', 'readimage', 'readbayer', - 'laplace_filter', 'sobel_filter', 'scharr_filter', 'hist_equalization', 'erode', - 'image_add', 'image_fusion', 'image_subtract', 'dilate', 'watershed', 'rectangle_mask', 'rgb2gray_hsv', 'rgb2gray_lab', - 'rgb2gray', 'median_blur', 'fill', 'invert', 'logical_and', 'logical_or', 'logical_xor', + 'laplace_filter', 'sobel_filter', 'scharr_filter', 'hist_equalization', 'erode', 'image_add', + 'image_fusion', 'image_subtract', 'dilate', 'watershed', 'rectangle_mask', 'rgb2gray_hsv', 'rgb2gray_lab', + 'rgb2gray_cmyk', 'rgb2gray', 'median_blur', 'fill', 'invert', 'logical_and', 'logical_or', 'logical_xor', 'find_objects', 'roi_objects', 'object_composition', 'analyze_object', 'morphology', 'analyze_bound_horizontal', 'analyze_bound_vertical', 'analyze_color', 'analyze_nir_intensity', 'print_results', 'flip', 'crop_position_mask', 'get_nir', 'report_size_marker_area',
Move setting of project root to dbcon fixture Previously root was only set for launched app, it should be available also for unit tests.
@@ -160,7 +160,7 @@ class ModuleUnitTest(BaseTest): db_handler.teardown(self.TEST_OPENPYPE_NAME) @pytest.fixture(scope="module") - def dbcon(self, db_setup): + def dbcon(self, db_setup, output_folder_url): """Provide test database connection. Database prepared from dumps with 'db_setup' fixture. @@ -170,6 +170,17 @@ class ModuleUnitTest(BaseTest): dbcon.Session["AVALON_PROJECT"] = self.PROJECT dbcon.Session["AVALON_ASSET"] = self.ASSET dbcon.Session["AVALON_TASK"] = self.TASK + + # set project root to temp folder + platform_str = platform.system().lower() + root_key = "config.roots.work.{}".format(platform_str) + dbcon.update_one( + {"type": "project"}, + {"$set": + { + root_key: output_folder_url + }} + ) yield dbcon @pytest.fixture(scope="module") @@ -277,17 +288,6 @@ class PublishTest(ModuleUnitTest): def launched_app(self, dbcon, download_test_data, last_workfile_path, startup_scripts, app_args, app_name, output_folder_url): """Launch host app""" - # set publishing folders - platform_str = platform.system().lower() - root_key = "config.roots.work.{}".format(platform_str) - dbcon.update_one( - {"type": "project"}, - {"$set": - { - root_key: output_folder_url - }} - ) - # set schema - for integrate_new from openpype import PACKAGE_DIR # Path to OpenPype's schema
argparse: fix for latest py39 and resulted in the issue being fixed upstream.
@@ -290,7 +290,6 @@ if sys.version_info >= (3, 9): self, option_strings: Sequence[str], dest: str, - const: None = ..., # unused in implementation default: Union[_T, str, None] = ..., type: Optional[Union[Callable[[Text], _T], Callable[[str], _T], FileType]] = ..., choices: Optional[Iterable[_T]] = ...,
update troubleshooting added exchange rate troubleshooting
@@ -159,3 +159,22 @@ AttributeError: 'NoneType' object has no attribute 'actions' ``` In this case, ZRX is not yet added to the list. See [this page](/utilities/paper-trade/#account-balance) on how to add balances. + +#### Cross-Exchange Market Making error in logs + +Errors will appear if the token value is unable to convert `{from_currency}` to `{to_currency}` are not listed on the exchange rate class. + +``` +2019-09-30 05:42:42,000 - hummingbot.core.clock - ERROR - Unexpected error running clock tick. +Traceback (most recent call last): + File "clock.pyx", line 119, in hummingbot.core.clock.Clock.run_til + File "cross_exchange_market_making.pyx", line 302, in hummingbot.strategy.cross_exchange_market_making.cross_exchange_market_making.CrossExchangeMarketMakingStrategy.c_tick + File "cross_exchange_market_making.pyx", line 387, in hummingbot.strategy.cross_exchange_market_making.cross_exchange_market_making.CrossExchangeMarketMakingStrategy.c_process_market_pair + File "cross_exchange_market_making.pyx", line 1088, in hummingbot.strategy.cross_exchange_market_making.cross_exchange_market_making.CrossExchangeMarketMakingStrategy.c_check_and_create_new_orders + File "cross_exchange_market_making.pyx", line 781, in hummingbot.strategy.cross_exchange_market_making.cross_exchange_market_making.CrossExchangeMarketMakingStrategy.c_get_market_making_price + File "/hummingbot/core/utils/exchange_rate_conversion.py", line 190, in convert_token_value + raise ValueError(f"Unable to convert '{from_currency}' to '{to_currency}'. Aborting.") +ValueError: Unable to convert 'BTC' to 'BTC'. Aborting. +``` + +In this case, BTC is not yet added to the list of exchange rate class. See [this page](/utilities/exchange-rates/#exchange-rate-class) the correct format on adding exchange rate.
small bug fix valid at super low mass flows under convergence tolerance
@@ -269,6 +269,9 @@ def calc_mass_flow_edges(edge_node_df, mass_flow_substation_df, all_nodes_df, pi if loops: # print('Fundamental loops in the network:', loops) #returns nodes that define loop, useful for visiual verification in testing phase, + sum_delta_m_num = np.zeros((1, len(loops)))[0] + sum_delta_m_den = np.zeros((1, len(loops)))[0] + # if loops exist: # 1. calculate initial guess solution of matrix A # delete first plant on an edge of matrix and solution space b as these are redundant @@ -296,6 +299,7 @@ def calc_mass_flow_edges(edge_node_df, mass_flow_substation_df, all_nodes_df, pi sum_delta_m_num = np.zeros((1,len(loops)))[0] sum_delta_m_den = np.zeros((1,len(loops)))[0] + for i in range(len(loops)): # calculate the mass flow correction for each loop # iterate over loops @@ -370,6 +374,7 @@ def calc_mass_flow_edges(edge_node_df, mass_flow_substation_df, all_nodes_df, pi print('Error in the defined mass flows, deviation of ', max(abs(b_original - b_verification)), ' from node demands.') if loops: + if abs(sum_delta_m_num).any() > 10: # 10 Pa is sufficiently small print('Error in the defined mass flows, deviation of ', sum_delta_m_num, ' from 0 pressure in loop.')
db commits at the right places when sending payments. fixing vulnerabilities introduced in
@@ -44,6 +44,7 @@ def create_invoice( extra=extra, ) + g.db.commit() return invoice.payment_hash, payment_request @@ -97,6 +98,8 @@ def pay_invoice( if wallet.balance_msat < 0: g.db.rollback() raise PermissionError("Insufficient balance.") + else: + g.db.commit() if internal: # mark the invoice from the other side as not pending anymore @@ -112,6 +115,7 @@ def pay_invoice( else: raise Exception(error_message or "Failed to pay_invoice on backend.") + g.db.commit() return invoice.payment_hash
Use msgpack instead of msgpack-python The package msgpack-python has been deprecated.
@@ -53,7 +53,7 @@ REQUIREMENTS = [ "sortedcontainers>=1.4.4", "psutil>=2.0.0", "pymacaroons-pynacl>=0.9.3", - "msgpack-python>=0.4.2", + "msgpack>=0.5.0", "phonenumbers>=8.2.0", "six>=1.10", # prometheus_client 0.4.0 changed the format of counter metrics
Move parsing into lower layer in todo list Parse the arguments after determining the command. Add keywords for priority in addition to numerical values. Supported are 'critical', 'high' and 'normal'.
@@ -27,45 +27,63 @@ def sort(data): def todoHandler(data): global todoList - words = data.split() - s = words[1] if len(words) > 1 else "0" - arg = words[2] if len(words) > 2 else 0 if "add" in data: + data = data.replace("add", "", 1) if "comment" in data: - index = int(arg) + data = data.replace("comment", "", 1) + words = data.split() + index = int(words[0]) if(index<0 or index>=len(todoList['items'])): print("No such todo") return - todoList['items'][index]['comment'] = " ".join(words[3:]) + todoList['items'][index]['comment'] = " ".join(words[1:]) else: + data = " ".join(data.split()) newItem = {'complete':0} if " - " in data: parts = data.split(" - ") - newItem['name'] = parts[0].replace("add ", "", 1) + newItem['name'] = parts[0] newItem['comment'] = parts[1] else: - newItem['name'] = data.replace("add ", "", 1) + newItem['name'] = data todoList['items'].append(newItem) elif "remove" in data: - index = int(s) + data = data.replace("remove", "", 1) + index = int(data.split()[0]) if(index<0 or index>=len(todoList['items'])): print("No such todo") return todoList['items'].remove(todoList['items'][index]) elif "priority" in data: - index = int(s) + data = data.replace("priority", "", 1) + if "critical" in data: + data = data.replace("critical", "", 1) + priority = 100 + elif "high" in data: + data = data.replace("high", "", 1) + priority = 50 + elif "normal" in data: + data = data.replace("normal", "", 1) + priority = 0 + else: + words = data.split() + priority = int(words[1]) + words = data.split() + index = int(words[0]) if(index<0 or index>=len(todoList['items'])): print("No such todo") return - todoList['items'][index]['priority'] = int(arg) + todoList['items'][index]['priority'] = priority elif "complete" in data: - index = int(s) + data = data.replace("complete", "", 1) + words = data.split() + index = int(words[0]) if(index<0 or index>=len(todoList['items'])): print("No such todo") return complete = 100 - if arg: - complete = int(arg) + if words[1]: + complete = int(words[1]) todoList['items'][index]['complete'] = complete elif "help" in data: print(Fore.GREEN + "Commands: {add <todo description>, remove <index>, complete <index> [<completion>], priority <index> [<level>]}" + Fore.RESET)
Do not log a block launch if a block was not launched Prior to this PR, a block launched message was logged even if the launch failed and a ScalingFailed exception was raised. This was confusing.
@@ -176,8 +176,9 @@ class BlockProviderExecutor(ParslExecutor): def _launch_block(self, block_id: str) -> Any: launch_cmd = self._get_launch_command(block_id) job_id = self.provider.submit(launch_cmd, 1) + if job_id: logger.debug("Launched block {}->{}".format(block_id, job_id)) - if not job_id: + else: raise ScalingFailed(self, "Attempt to provision nodes did not return a job ID") return job_id
Remove PytestWarnings about collecting test classes The classes under tests.util are not destined to hold test cases.
@@ -456,6 +456,8 @@ class FakeFrontend(FrontendModule): class TestBackend(BackendModule): + __test__ = False + def __init__(self, auth_callback_func, internal_attributes, config, base_url, name): super().__init__(auth_callback_func, internal_attributes, base_url, name) @@ -474,6 +476,8 @@ class TestBackend(BackendModule): class TestFrontend(FrontendModule): + __test__ = False + def __init__(self, auth_req_callback_func, internal_attributes, config, base_url, name): super().__init__(auth_req_callback_func, internal_attributes, base_url, name) @@ -492,6 +496,8 @@ class TestFrontend(FrontendModule): class TestRequestMicroservice(RequestMicroService): + __test__ = False + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -503,6 +509,8 @@ class TestRequestMicroservice(RequestMicroService): class TestResponseMicroservice(ResponseMicroService): + __test__ = False + def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
Simplify PPO Summary: Pull Request resolved: We shouldn't need to yield the placeholder loss.
@@ -163,16 +163,16 @@ class PPOTrainer(ReAgentLightningModule): return opts[0], opts[1] return None, opts[0] - def placeholder_loss(self): - """PPO Trainer performs manual updates. Return placeholder losses to Pytorch Lightning.""" - return [None] * len(self.optimizers()) + # pyre-fixme[14]: `training_step` overrides method defined in + # `ReAgentLightningModule` inconsistently. + def training_step(self, training_batch: rlt.PolicyGradientInput, batch_idx: int): + if isinstance(training_batch, dict): + training_batch = rlt.PolicyGradientInput.from_dict(training_batch) - def train_step_gen(self, training_batch: rlt.PolicyGradientInput, batch_idx: int): self.traj_buffer.append(training_batch) self.step += 1 if self.step % self.update_freq == 0: self.update_model() - yield from self.placeholder_loss() def update_model(self): assert len(self.traj_buffer) == self.update_freq
change travis config for ray installation this is for distributed computing
@@ -39,10 +39,10 @@ install: - conda create --yes -n test python=$TRAVIS_PYTHON_VERSION - source activate test - conda install --yes numpy scipy matplotlib pip nose - - conda install --yes -c bioconda ray - pip install setuptools - python setup.py install - pip install coveralls + - pip install ray script: coverage run test.py tests/*
Fixed broken code I should have tested it.
@@ -43,7 +43,7 @@ class YTDLSource(discord.PCMVolumeTransformer): @classmethod async def from_url(cls, url, *, loop=None): loop = loop or asyncio.get_event_loop() - data = await loop.run_in_executor(ytdl.extract_info, url) + data = await loop.run_in_executor(None, ytdl.extract_info, url) if 'entries' in data: # take first item from a playlist @@ -97,7 +97,7 @@ class Music: if ctx.voice_client.is_playing(): ctx.voice_client.stop() - player = await YoutubeSource.from_url(query, loop=self.bot.loop) + player = await YTDLSource.from_url(url, loop=self.bot.loop) ctx.voice_client.play(player, after=lambda e: print('Player error: %s' % e) if e else None) await ctx.send('Now playing: {}'.format(player.title))
Fix caption being None This would later be an empty string with some modifications that were removed upon upgrading to layer 75, which changed where the captions are used and their naming.
@@ -1200,7 +1200,7 @@ class TelegramClient(TelegramBareClient): # region Uploading files - def send_file(self, entity, file, caption=None, + def send_file(self, entity, file, caption='', force_document=False, progress_callback=None, reply_to=None, attributes=None, @@ -1420,7 +1420,7 @@ class TelegramClient(TelegramBareClient): kwargs['is_voice_note'] = True return self.send_file(*args, **kwargs) - def _send_album(self, entity, files, caption=None, + def _send_album(self, entity, files, caption='', progress_callback=None, reply_to=None, parse_mode='md'): """Specialized version of .send_file for albums"""
run compute_integrals after the initial run of the dynamic run to ensure that the logz errors are correct. That's needed only if no batches will be added.
@@ -938,6 +938,13 @@ class DynamicSampler: bounditer=results.bounditer, eff=self.eff, delta_logz=results.delta_logz) + new_vals = {} + (new_vals['logwt'], new_vals['logz'], new_vals['logzvar'], + new_vals['h']) = compute_integrals(logl=self.saved_run.D['logl'], + logvol=self.saved_run.D['logvol']) + for curk in ['logwt', 'logz', 'logzvar', 'h']: + self.saved_run.D[curk] = new_vals[curk].tolist() + self.base_run.D[curk] = new_vals[curk].tolist() self.base = True # baseline run complete self.saved_run.D['batch'] = np.zeros(len(self.saved_run.D['id']), @@ -1683,7 +1690,6 @@ class DynamicSampler: dlogz=dlogz_init, logl_max=logl_max_init) - # Add points in batches. for n in range(self.batch, maxbatch): # Update stopping criteria. res = self.results
Un-normalize os_family in pkgrepo state The __grains__['os'].lower() code was added in but I don't know why. I hope this "fix" doesn't break anything.
@@ -319,7 +319,6 @@ def managed(name, ppa=None, **kwargs): enabled = True repo = name - os_family = __grains__['os_family'].lower() if __grains__['os'] in ('Ubuntu', 'Mint'): if ppa is not None: # overload the name/repo value for PPAs cleanly @@ -333,7 +332,7 @@ def managed(name, ppa=None, **kwargs): if enabled is not None \ else salt.utils.is_true(disabled) - elif os_family in ('redhat', 'suse'): + elif __grains__['os_family'] in ('RedHat', 'Suse'): if 'humanname' in kwargs: kwargs['name'] = kwargs.pop('humanname') if 'name' not in kwargs: @@ -344,7 +343,7 @@ def managed(name, ppa=None, **kwargs): if disabled is not None \ else salt.utils.is_true(enabled) - elif os_family == 'nilinuxrt': + elif __grains__['os_family'] in ('NILinuxRT',): # opkg is the pkg virtual kwargs['enabled'] = not salt.utils.is_true(disabled) \ if disabled is not None \ @@ -373,7 +372,7 @@ def managed(name, ppa=None, **kwargs): else: sanitizedkwargs = kwargs - if os_family == 'debian': + if __grains__['os_family'] == 'Debian': repo = salt.utils.pkg.deb.strip_uri(repo) if pre: @@ -387,7 +386,7 @@ def managed(name, ppa=None, **kwargs): # not explicitly set, so we don't need to update the repo # if it's desired to be enabled and the 'enabled' key is # missing from the repo definition - if os_family == 'redhat': + if __grains__['os_family'] == 'RedHat': if not salt.utils.is_true(sanitizedkwargs[kwarg]): break else: @@ -397,7 +396,7 @@ def managed(name, ppa=None, **kwargs): elif kwarg == 'comps': if sorted(sanitizedkwargs[kwarg]) != sorted(pre[kwarg]): break - elif kwarg == 'line' and os_family == 'debian': + elif kwarg == 'line' and __grains__['os_family'] == 'Debian': # split the line and sort everything after the URL sanitizedsplit = sanitizedkwargs[kwarg].split() sanitizedsplit[3:] = sorted(sanitizedsplit[3:]) @@ -412,14 +411,14 @@ def managed(name, ppa=None, **kwargs): salt.utils.pkg.deb.combine_comments(kwargs['comments']) if pre_comments != post_comments: break - elif kwarg == 'comments' and os_family == 'redhat': + elif kwarg == 'comments' and __grains__['os_family'] == 'RedHat': precomments = salt.utils.pkg.rpm.combine_comments(pre[kwarg]) kwargcomments = salt.utils.pkg.rpm.combine_comments( sanitizedkwargs[kwarg]) if precomments != kwargcomments: break else: - if os_family in ('redhat', 'suse') \ + if __grains__['os_family'] in ('RedHat', 'Suse') \ and any(isinstance(x, bool) for x in (sanitizedkwargs[kwarg], pre[kwarg])): # This check disambiguates 1/0 from True/False @@ -450,7 +449,7 @@ def managed(name, ppa=None, **kwargs): pass try: - if os_family == 'debian': + if __grains__['os_family'] == 'Debian': __salt__['pkg.mod_repo'](repo, saltenv=__env__, **kwargs) else: __salt__['pkg.mod_repo'](repo, **kwargs)
Don't copy `storage.driver` into the Product defn on ingest Closes
@@ -49,7 +49,7 @@ def morph_dataset_type(source_type, config, index, storage_format): output_type.definition['managed'] = True output_type.definition['description'] = config['description'] output_type.definition['storage'] = {k: v for (k, v) in config['storage'].items() - if k in ('crs', 'driver', 'tile_size', 'resolution', 'origin')} + if k in ('crs', 'tile_size', 'resolution', 'origin')} output_type.metadata_doc['format'] = {'name': storage_format}