message
stringlengths
13
484
diff
stringlengths
38
4.63k
With no exploration, action selection is now based on visit counts Instead of value
@@ -19,9 +19,12 @@ class Node(object): self.count = 0 self.value = 0 - def select_action(self, temperature): + def select_action(self, temperature=10): if self.children: + if temperature > 0: return max(self.children.keys(), key=(lambda key: self.children[key].selection_strategy(temperature))) + else: + return max(self.children.keys(), key=(lambda key: self.children[key].count)) else: return None
feat: exported several useful libraries Added DataLayerProvenance, Storage, ThreadedQueue, EmptyVolumeException to possible from cloudvolume import *
-from .cloudvolume import CloudVolume +from .cloudvolume import CloudVolume, EmptyVolumeException +from .provenance import DataLayerProvenance from .storage import Storage +from .threaded_queue import ThreadedQueue \ No newline at end of file
Kill _th_fill binding, which isn't used anymore. Summary: Pull Request resolved: We still keep the function in TH, since it's called from within TH. Test Plan: Imported from OSS
- IntArrayRefSize size - IntArrayRef stride ]] -[[ - name: _th_fill_ - return: self - cname: fill - variants: function - cpu_half: True - cpu_bool: True - cuda_bool: True - cpu_bfloat16: True - options: - - arguments: - - THTensor* self - - real value - - zero_dim_tensor_only: True - arguments: - - THTensor* self - - THTensor* value -]] [[ name: _th_masked_fill_ cpu_bool: True
fix(email): handle case where cstr returns text_type of str chardet requires input to be bytes or bytesarray, but sometimes frappe.cstr() returns text_type of str without encoding it to utf-8
@@ -480,7 +480,7 @@ class Email: """Detect chartset.""" charset = part.get_content_charset() if not charset: - charset = chardet.detect(cstr(part))['encoding'] + charset = chardet.detect(safe_encode(cstr(part)))['encoding'] return charset
quota: Fix calculating org size Fixing database call which will now match on manifest vs repository to correctly calculate org size.
@@ -244,10 +244,10 @@ def cache_namespace_repository_sizes(namespace_name): now_ms = get_epoch_timestamp_ms() subquery = ( - Tag.select(Tag.repository_id) + Tag.select(Tag.manifest) .where(Tag.hidden == False) .where((Tag.lifetime_end_ms >> None) | (Tag.lifetime_end_ms > now_ms)) - .group_by(Tag.repository_id) + .group_by(Tag.manifest) .having(fn.Count(Tag.name) > 0) ) @@ -258,7 +258,7 @@ def cache_namespace_repository_sizes(namespace_name): fn.sum(Manifest.layers_compressed_size).alias("repository_size"), ) .join(Repository) - .join(subquery, on=(subquery.c.repository_id == Repository.id)) + .join(subquery, on=(subquery.c.manifest_id == Manifest.id)) .where(Repository.namespace_user == namespace.id) .group_by(Repository.id) ) @@ -280,17 +280,17 @@ def get_namespace_size(namespace_name): now_ms = get_epoch_timestamp_ms() subquery = ( - Tag.select(Tag.repository_id) + Tag.select(Tag.manifest) .where(Tag.hidden == False) .where((Tag.lifetime_end_ms >> None) | (Tag.lifetime_end_ms > now_ms)) - .group_by(Tag.repository_id) + .group_by(Tag.manifest) .having(fn.Count(Tag.name) > 0) ) namespace_size = ( Manifest.select(fn.sum(Manifest.layers_compressed_size)) .join(Repository) - .join(subquery, on=(subquery.c.repository_id == Repository.id)) + .join(subquery, on=(subquery.c.manifest_id == Manifest.id)) .where(Repository.namespace_user == namespace.id) ).scalar()
Update manual.py more printing of version info for debugging
@@ -302,7 +302,8 @@ def main(): silent = args['auto'] tag = True - safePrint("%sbit Python." % (struct.calcsize("P") * 8)) + safePrint("Python %s-bit %s." % (struct.calcsize("P") * 8, sys.version)) + safePrint("Guessit version: %s." % guessit.__version__) # Settings overrides if(args['config']):
bugfix - notifier changes the vertex as it notifies This causes following notifiers to use the changed value. So worker processes, that receive their data by these notifications, receive a changed value. specifically alarms contain a redundant field 'resource'
@@ -18,6 +18,7 @@ from vitrage.common.constants import EntityCategory from vitrage.common.constants import NotifierEventTypes from vitrage.common.constants import VertexProperties as VProps from vitrage.evaluator.actions import evaluator_event_transformer as evaluator +from vitrage.graph.driver.networkx_graph import vertex_copy from vitrage.messaging import get_transport @@ -75,25 +76,27 @@ class GraphNotifier(object): vitrage_is_deleted property set to True :param graph: The graph """ - notification_types = _get_notification_type(before, current, is_vertex) + curr = current + notification_types = _get_notification_type(before, curr, is_vertex) if not notification_types: return # in case the vertex point to some resource add the resource to the # notification (useful for deduce alarm notifications) - if current.get(VProps.VITRAGE_RESOURCE_ID): - current.properties[VProps.RESOURCE] = graph.get_vertex( - current.get(VProps.VITRAGE_RESOURCE_ID)) + if curr.get(VProps.VITRAGE_RESOURCE_ID): + curr = vertex_copy(curr.vertex_id, curr.properties) + curr.properties[VProps.RESOURCE] = graph.get_vertex( + curr.get(VProps.VITRAGE_RESOURCE_ID)) LOG.info('notification_types : %s', str(notification_types)) - LOG.info('notification properties : %s', current.properties) + LOG.info('notification properties : %s', curr.properties) for notification_type in notification_types: try: self.oslo_notifier.info( {}, notification_type, - current.properties) + curr.properties) except Exception as e: LOG.exception('Cannot notify - %s - %s', notification_type, e)
Use nodejs v8 Use current version of nodejs, since v7 is not maintained anymore.
yum: name=epel-release - name: Install nodejs - shell: curl -sL https://rpm.nodesource.com/setup_7.x | bash - && yum install -y nodejs + shell: curl -sL https://rpm.nodesource.com/setup_8.x | bash - && yum install -y nodejs - name: Check node version command: node -v
Another minor fix to get a system test to work. A missed "frequency" column was still being used in the dataset tests.
@@ -104,11 +104,11 @@ Gx^4 20 80 self.assertEqual(ds[('Gx','Gy')][('1',)], 60) dataset_txt2 = \ -"""## Columns = 0 frequency, count total +"""## Columns = 0 count, 1 count {} 0 100 -Gx 0.1 100 -GxGy 0.4 100 -Gx^4 0.2 100 +Gx 10 90 +GxGy 40 60 +Gx^4 20 80 """ with open(temp_files + "/TinyDataset2.txt","w") as output: output.write(dataset_txt2) @@ -161,22 +161,22 @@ Gx^4 0.2 100 def test_multi_dataset(self): multi_dataset_txt = \ -"""## Columns = DS0 0 count, DS0 1 count, DS1 0 frequency, DS1 count total +"""## Columns = DS0 0 count, DS0 1 count, DS1 0 count, DS1 1 count {} 0 100 0 100 -Gx 10 90 0.1 100 -GxGy 40 60 0.4 100 -Gx^4 20 80 0.2 100 +Gx 10 90 10 90 +GxGy 40 60 40 60 +Gx^4 20 80 20 80 """ with open(temp_files + "/TinyMultiDataset.txt","w") as output: output.write(multi_dataset_txt) multiDS = pygsti.io.load_multidataset(temp_files + "/TinyMultiDataset.txt", cache=True) bad_multi_dataset_txt = \ -"""## Columns = DS0 0 count, DS0 1 count, DS1 0 frequency, DS1 count total +"""## Columns = DS0 0 count, DS0 1 count, DS1 0 count, DS1 1 count {} 0 100 0 100 -FooBar 10 90 0.1 100 -GxGy 40 60 0.4 100 -Gx^4 20 80 0.2 100 +FooBar 10 90 10 90 +GxGy 40 60 40 60 +Gx^4 20 80 20 80 """ with open(temp_files + "/BadTinyMultiDataset.txt","w") as output: output.write(bad_multi_dataset_txt)
ansible: remove seemingly unused raw_params Traced git log all the way back to beginning of time, and checked Ansible versions starting Jan 2016. Zero clue where this came from, but the convention suggests it came from Ansible at some point.
@@ -93,16 +93,13 @@ class Runner(object): Subclasses may override `_run`()` and extend `setup()` and `revert()`. """ def __init__(self, module, service_context, emulate_tty=None, - raw_params=None, args=None, env=None): + args=None, env=None): if args is None: args = {} - if raw_params is not None: - args['_raw_params'] = raw_params self.module = utf8(module) self.service_context = service_context self.emulate_tty = emulate_tty - self.raw_params = raw_params self.args = args self.env = env
Fixed data path problem in visualization Fixed data path problem in visualization
@@ -18,6 +18,7 @@ import gym import numpy as np import os import sys +import time import ray try: @@ -100,7 +101,10 @@ def visualizer_rllib(args): sys.exit(1) sim_params.restart_instance = False - sim_params.emission_path = './test_time_rollout/' + dir_path = os.path.dirname(os.path.realpath(__file__)) + emission_path = '{0}/test_time_rollout/'.format(dir_path) + + sim_params.emission_path = emission_path # pick your rendering mode if args.render_mode == 'sumo_web3d': @@ -264,8 +268,10 @@ def visualizer_rllib(args): # if prompted, convert the emission file into a csv file if args.emission_to_csv: + time.sleep(0.1) + dir_path = os.path.dirname(os.path.realpath(__file__)) - emission_filename = '{0}-emission.xml'.format(scenario.name) + emission_filename = '{0}-emission.xml'.format(env.scenario.name) emission_path = \ '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename)
Updated changelog [formerly f3dbb5315dcd0bce2e37a1706f22b109682f5276] [formerly ed77e400e03cc6da83430580fc202d962eef6bf2] [formerly b70d6dca8cfc0f974bca21019fb37bebe547f139]
@@ -16,11 +16,20 @@ These are taken from the GitHub Issues tab. ## 4.1 #### Features * Vigenere is now enabled, due to massive performance gains from the C++ core +* Pytest can now be run over the entire program, from main to the output. This means we can crate tests that test the whole of Ciphey, not just small unit tests. +* Better input handling. Ciphey now supports pipes `echo 'hello' | ciphey` and text with no arguments `ciphey 'hello'.` #### Bug Fixes * Chi-squared calcuations are now done _correctly_ +* Fixed bug where __main__ didn't return the output. +* Multiple debug statements were printed when not in debug mode. #### Maintenance * Offloaded lots of stuff onto C++ core to get much speed * Disabled addition of language checkers, as no-one could explain why it would make sense +* Bases.py is refactored so users can manually call decryptions. The name has also changed from base64 to bases. +* LanguageChecker now follows PEP8. +* Main and MathsHelper now follow PEP8. +* Now uses Nox to Pytest against multple Python versions. +* Code coverage is now calculated and used. ## 3.1 #### Features * Adding a logging library (Loguru) and implemented full logging of the @@ -53,4 +62,4 @@ and what's being worked on first. * Fixed program not installing via Pip * Fixed basicEncryption module from crashing the program #### Maintenance -* Added program to Pip \ No newline at end of file +* [Added](Added) program to Pip
Update active_directory_password_spraying.yml description update
@@ -4,19 +4,24 @@ version: 1 date: '2021-04-07' author: Mauricio Velazco, Splunk type: batch -description: Monitor for activities and techniques associated with Password Spraying attacks against Active Directory environments. +description: Monitor for activities and techniques associated with Password Spraying attacks within Active Directory environments. narrative: Adversaries may use a single or small list of commonly used passwords against many different accounts to attempt to acquire valid account credentials. Password spraying uses one password (e.g. 'Password01'), or a small list of commonly used passwords, that may match the complexity policy of the domain. Logins are attempted with that password against many different accounts on a network to avoid account lockouts that would normally occur when brute forcing a single account with many passwords. This technique allows an adversary to remain undetected. \ - Password Spraying can be leverages by adversaries to obtain initial access in a network or to escalate privileges in an environment where access has been already obtained. + Password Spraying can be leveraged by adversaries to obtain initial access in a network or to escalate privileges in an environment where access has been already obtained. - This Analytic Story is focused on detecting potential Password Spraying attacks against Active Directory environments. It presents + This Analytic Story is focused on detecting potential Password Spraying attacks against Active Directory environments leveraging Windows Event Logs in the + 'Account Logon' and 'Logon/Logoff' Advanced Audit Policy categories. It presents 9 different detection analytics which aid defenders in identifyng instances where one source + user, source host or source process fails to authenticate against a target or targets using multiple users. A user, host or process failing to authenticate with multiple + users is not a common behavior for legitimate systems and should be monitored by blue teams. Possible false positive scenarios include but are not limited to + vulnerability scanners, remote administration tools and missconfigured systems. These can be quickly spotted and addded to an allow list. references: - https://attack.mitre.org/techniques/T1110/003/ - https://www.hub.trimarcsecurity.com/post/trimarc-research-detecting-password-spraying-with-security-event-auditing - https://www.ired.team/offensive-security-experiments/active-directory-kerberos-abuse/active-directory-password-spraying +- https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-r2-and-2012/dn452415(v=ws.11) tags: analytic_story: - Active Directory Password Spraying
Re-add test_two_families to run_tests Add back test_two_families to the validator set of tests.
@@ -192,6 +192,8 @@ test_battleship() { test_validator() { run_docker_test ./validator/tests/unit_validator.yaml copy_coverage .coverage.validator + run_docker_test test_two_families + copy_coverage .coverage.test_two_families run_docker_test test_events_and_receipts copy_coverage .coverage.test_events_and_receipts run_docker_test test_namespace_restriction
Very basic cyclic prng log to track exactly when the cycles restart Closes
from threading import Lock import random import sympy +from datetime import datetime mutex = Lock() +LOGFILE = 'prng.log' + def modexp(b, e, m): bits = [(e >> bit) & 1 for bit in range(0, e.bit_length())] s = b @@ -16,6 +19,10 @@ def modexp(b, e, m): s %= m return v +def log(message): + with open(LOGFILE, 'a') as f: + f.write('%s - %s\n' % (str(datetime.now()), message)) + class CyclicPRNG: N = 0 Modulus = 0 @@ -34,6 +41,8 @@ class CyclicPRNG: if N < 1: raise Exception("Random Number Generator must be given a positive non-zero integer") + log('PRNG Starting Up') + def getN(self): return self.N @@ -83,6 +92,7 @@ class CyclicPRNG: while self.current > self.N: self.current = (self.current * self.G) % self.Modulus if value == self.end: + log('Cycle Restarted') self.initGenerator() self.initPermutation() mutex.release()
Add monitor failure description to Discord notification Include more information about the failures, so it will be easier to understand what happened to a specific job just looking at the Discord notification message content.
@@ -76,18 +76,26 @@ class CustomSendDiscordMessage(SendDiscordMessage): stats = self.data.stats n_scraped_items = stats.get("item_scraped_count", 0) + failures_report = [] + for result in self.result.monitor_results: + if result.status != "FAIL": + continue + failures_report.append(f"{result.monitor.name}: {result.reason}") + failures = len(self.result.failures) emoji = "\U0001F525" if failures > 0 else "\U0001F60E" - message = "\n".join( - [ + msg_lines = [ f"*{self.data.spider.name}* {stats['finish_reason']}", f"- Finish time: *{stats['finish_time']}*", f"- Gazettes scraped: *{n_scraped_items}*", f"- {emoji} {failures} failures {emoji}", ] - ) - return message + if failures_report: + msg_lines.append("===== FAILURES =====") + msg_lines.extend(failures_report) + + return "\n".join(msg_lines) class SpiderCloseMonitorSuite(MonitorSuite):
Quick add some explanation about the resampling in slowfast. * Quick add some explanation about the resampling in slowfast. Fix. * Fix docstring. * Fix typo.
@@ -371,9 +371,11 @@ class ResNet3dSlowFast(nn.Module): Args: pretrained (str): The file path to a pretrained model. resample_rate (int): A large temporal stride ``resample_rate`` - on input frames, corresponding to the :math:`\\tau` in the paper. - i.e., it processes only one out of ``resample_rate`` frames. - Default: 16. + on input frames. The actual resample rate is calculated by + multipling the ``interval`` in ``SampleFrames`` in the + pipeline with ``resample_rate``, equivalent to the :math:`\\tau` + in the paper, i.e. it processes only one out of + ``resample_rate * interval`` frames. Default: 8. speed_ratio (int): Speed ratio indicating the ratio between time dimension of the fast and slow pathway, corresponding to the :math:`\\alpha` in the paper. Default: 8.
Fix env bugs Fix env bugs
@@ -9,7 +9,7 @@ RUN apt-get update && \ rm go.tgz ENV GOROOT=/usr/local/go GOPATH=/root/gopath -ENV PATH=${PATH}:${GOROOT}/bin +ENV PATH=\${PATH}:\${GOROOT}/bin CMD ["sh", "-c", "cd /root/gopath/src/github.com/PaddlePaddle/cloud/go/cmd/pfsserver && go get ./... && go build"] EOF
fix typo in config was a `,`, not a `:`, so 'options' was a set rather than a dictionary.
@@ -267,7 +267,12 @@ class Config(object): 'check-templated-letter-state': { 'task': 'check-templated-letter-state', 'schedule': crontab(day_of_week='mon-fri', hour=9, minute=0), - 'options': {'queue', QueueNames.PERIODIC} + 'options': {'queue': QueueNames.PERIODIC} + }, + 'check-precompiled-letter-state': { + 'task': 'check-precompiled-letter-state', + 'schedule': crontab(day_of_week='mon-fri', hour='9,15', minute=0), + 'options': {'queue': QueueNames.PERIODIC} }, 'raise-alert-if-letter-notifications-still-sending': { 'task': 'raise-alert-if-letter-notifications-still-sending', @@ -286,11 +291,6 @@ class Config(object): 'schedule': crontab(hour=23, minute=00), 'options': {'queue': QueueNames.PERIODIC} }, - 'check-precompiled-letter-state': { - 'task': 'check-precompiled-letter-state', - 'schedule': crontab(day_of_week='mon-fri', hour='9,15', minute=0), - 'options': {'queue', QueueNames.PERIODIC} - }, } CELERY_QUEUES = []
help: Replace perfect-scrollbar with simplebar in help pages. The perfect-scrollbar library created one major problem, which is that `ctrl-F` didn't work, and several smaller problems. Fixes and fixes
/* eslint indent: "off" */ -import PerfectScrollbar from 'perfect-scrollbar'; +import SimpleBar from 'simplebar'; function registerCodeSection($codeSection) { const $li = $codeSection.find("ul.nav li"); @@ -78,10 +78,12 @@ function render_code_sections() { function scrollToHash(container) { var hash = window.location.hash; + var simplebar = new SimpleBar(container).getScrollElement(); if (hash !== '') { - container.scrollTop = $(hash).position().top - $('.markdown .content').position().top; + var position = $(hash).position().top - $(container).position().top; + simplebar.scrollTop = position; } else { - container.scrollTop = 0; + simplebar.scrollTop = 0; } } @@ -91,12 +93,7 @@ function scrollToHash(container) { name: null, }; - var markdownPS = new PerfectScrollbar($(".markdown")[0], { - suppressScrollX: true, - useKeyboard: false, - wheelSpeed: 0.68, - scrollingThreshold: 50, - }); + var markdownSB = new SimpleBar($(".markdown")[0]); var fetch_page = function (path, callback) { $.get(path, function (res) { @@ -111,7 +108,7 @@ function scrollToHash(container) { if (html_map[path]) { $(".markdown .content").html(html_map[path]); render_code_sections(); - markdownPS.update(); + markdownSB.recalculate(); scrollToHash(container); } else { loading.name = path; @@ -119,18 +116,13 @@ function scrollToHash(container) { html_map[path] = res; $(".markdown .content").html(html_map[path]); loading.name = null; - markdownPS.update(); + markdownSB.recalculate(); scrollToHash(container); }); } }; - new PerfectScrollbar($(".sidebar")[0], { - suppressScrollX: true, - useKeyboard: false, - wheelSpeed: 0.68, - scrollingThreshold: 50, - }); + new SimpleBar($(".sidebar")[0]); $(".sidebar.slide h2").click(function (e) { var $next = $(e.target).next(); @@ -140,7 +132,7 @@ function scrollToHash(container) { $('.sidebar ul').not($next).hide(); // Toggle the heading $next.slideToggle("fast", "swing", function () { - markdownPS.update(); + markdownSB.recalculate(); }); } }); @@ -199,7 +191,7 @@ function scrollToHash(container) { scrollToHash(container); window.onresize = function () { - markdownPS.update(); + markdownSB.recalculate(); }; window.addEventListener("popstate", function () {
Update run_server documentation. Fixes
@@ -34,7 +34,7 @@ python butler.py run_server ``` It may take a few seconds to start. Once you see an output line like -`INFO <timestamp> admin_server.py:<num>] Starting admin server`, you can see the web interface by navigating to [http://localhost:9000](http://localhost:9000). +`[INFO] Listening at: http://0.0.0.0:9000`, you can see the web interface by navigating to [http://localhost:9000](http://localhost:9000). **Note:** The local instance may use ports [other than 9000](https://github.com/google/clusterfuzz/blob/master/src/local/butler/constants.py), such as 9008, for things like uploading files. Your local
Fix no-else-return pylint error Addresses pylint errors related to no-else-return on pymatgen.io.vasp.outputs
@@ -995,7 +995,6 @@ class Vasprun(MSONable): [vbm_spins[0], vbm_spins[1]], [vbm_spins_kpoints[0] == cbm_spins_kpoints[0], vbm_spins_kpoints[1] == cbm_spins_kpoints[1]], ) - else: return max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint def calculate_efermi(self): @@ -5352,7 +5351,6 @@ class Eigenval: [vbm_spins[0], vbm_spins[1]], [vbm_spins_kpoints[0] == cbm_spins_kpoints[0], vbm_spins_kpoints[1] == cbm_spins_kpoints[1]], ) - else: return max(cbm - vbm, 0), cbm, vbm, vbm_kpoint == cbm_kpoint
[Android][RPC] Fix Vulkan runtime support. Update Android RPC app to reflect the new Vulkan source code tree structure.
#endif #ifdef TVM_VULKAN_RUNTIME -#include "../src/runtime/vulkan/vulkan.cc" +#include "../src/runtime/vulkan/vulkan_buffer.cc" +#include "../src/runtime/vulkan/vulkan_common.cc" +#include "../src/runtime/vulkan/vulkan_device.cc" +#include "../src/runtime/vulkan/vulkan_device_api.cc" +#include "../src/runtime/vulkan/vulkan_instance.cc" +#include "../src/runtime/vulkan/vulkan_module.cc" +#include "../src/runtime/vulkan/vulkan_stream.cc" +#include "../src/runtime/vulkan/vulkan_wrapped_func.cc" #endif #ifdef USE_SORT
Missing schedule documentation Added guide for schedules supported by library but not directly available using `python manage.py create_jobs <app name>` command
@@ -33,16 +33,20 @@ Create a job ------------ A job is a Python script with a mandatory ``BaseJob`` class which extends from -``HourlyJob``, ``DailyJob``, ``WeeklyJob``, ``MonthlyJob`` or ``Yearly``. +``MinutelyJob``, ``QuarterHourlyJob``, ``HourlyJob``, ``DailyJob``, ``WeeklyJob``, ``MonthlyJob`` or ``Yearly``. It has one method that must be implemented called ``execute``, which is called when the job is run. The directories ``hourly``, ``daily``, ``monthly``, ``weekly`` and ``yearly`` are used only to for organisation purpose. +Note: If you want to use ``QuarterHourlyJob`` or ``Minutely`` job, create python package with name ``quarter_hourly`` or ``minutely`` respectively (similar to ``hourly`` or ``daily`` package). + To create your first job you can start copying ``sample.py``. -Remember to replace ``BaseJob`` with ``HourlyJob``, ``DailyJob``, ``WeeklyJob``, ``MonthlyJob`` or ``Yearly``. +Remember to replace ``BaseJob`` with ``MinutelyJob``, ``QuarterHourlyJob``, ``HourlyJob``, ``DailyJob``, ``WeeklyJob``, ``MonthlyJob`` or ``Yearly``. Some simple examples are provided by the `django_extensions.jobs package <https://github.com/django-extensions/django-extensions/tree/master/django_extensions/jobs>`_. +Note that each job should be in a new python script (within respective directory) and the class implementing the cron should be named ``Job``. Also, ``__init__.py`` file is not used for identifying jobs. + Run a job ---------
Update elf_mirai.txt Duplication
@@ -26414,64 +26414,6 @@ scan.aykashi.xyz # Reference: https://twitter.com/0xrb/status/1293852159000211458 -/OneDrive.arc -/OneDrive.arm -/OneDrive.arm4 -/OneDrive.arm4l -/OneDrive.arm4t -/OneDrive.arm4tl -/OneDrive.arm4tll -/OneDrive.arm5 -/OneDrive.arm5l -/OneDrive.arm5n -/OneDrive.arm6 -/OneDrive.arm64 -/OneDrive.arm6l -/OneDrive.arm7 -/OneDrive.arm7l -/OneDrive.arm8 -/OneDrive.armv4 -/OneDrive.armv4l -/OneDrive.armv5l -/OneDrive.armv6 -/OneDrive.armv61 -/OneDrive.armv6l -/OneDrive.armv7l -/OneDrive.dbg -/OneDrive.exploit -/OneDrive.i4 -/OneDrive.i486 -/OneDrive.i586 -/OneDrive.i6 -/OneDrive.i686 -/OneDrive.kill -/OneDrive.m68 -/OneDrive.m68k -/OneDrive.mips -/OneDrive.mips64 -/OneDrive.mipseb -/OneDrive.mipsel -/OneDrive.mpsl -/OneDrive.pcc -/OneDrive.powerpc -/OneDrive.powerpc-440fp -/OneDrive.powerppc -/OneDrive.ppc -/OneDrive.ppc2 -/OneDrive.ppc440 -/OneDrive.ppc440fp -/OneDrive.root -/OneDrive.root32 -/OneDrive.sh -/OneDrive.sh4 -/OneDrive.sparc -/OneDrive.spc -/OneDrive.ssh4 -/OneDrive.x32 -/OneDrive.x64 -/OneDrive.x86 -/OneDrive.x86_32 -/OneDrive.x86_64 /ADfafg.arc /ADfafg.arm /ADfafg.arm4
ENH: Remove recurring check The 'itemsize != 0' condition was already verified.
@@ -420,7 +420,7 @@ PyArray_GetStridedCopyFn(int aligned, npy_intp src_stride, /* contiguous dst */ if (itemsize != 0 && dst_stride == itemsize) { /* contiguous src */ - if (itemsize != 0 && src_stride == itemsize) { + if (src_stride == itemsize) { return &_contig_to_contig; } /* general src */ @@ -592,7 +592,7 @@ NPY_NO_EXPORT PyArray_StridedUnaryOp * /* contiguous dst */ if (itemsize != 0 && dst_stride == itemsize) { /* contiguous src */ - if (itemsize != 0 && src_stride == itemsize) { + if (src_stride == itemsize) { switch (itemsize) { /**begin repeat1 * #elsize = 2, 4, 8, 16#
ParameterisedHolderTest : Update for signal message changes The exceptions are reported as messages as of
@@ -1003,7 +1003,14 @@ class ParameterisedHolderTest( GafferTest.TestCase ) : ph = GafferCortex.ParameterisedHolderNode() ph.setParameterised( c ) - self.assertRaises( RuntimeError, ph["parameters"]["driver"].setValue, 10 ) + # capture the message that will be emitted by the signal handler + with IECore.CapturingMessageHandler() as mh : + ph["parameters"]["driver"].setValue( 10 ) + + self.assertEqual( len( mh.messages ), 1 ) + self.assertTrue( "Ooops!" in mh.messages[0].message ) + # the value was still set + self.assertEqual( ph["parameters"]["driver"].getValue(), 10 ) def testParameterInvalidWhenParameterChangedRaises( self ) : @@ -1045,11 +1052,13 @@ class ParameterisedHolderTest( GafferTest.TestCase ) : ph.setParameterised( c ) with IECore.CapturingMessageHandler() as mh : - # We want the original exception to be the visible one. - six.assertRaisesRegex( self, RuntimeError, "Ooops!", ph["parameters"]["driver"].setValue, 1 ) - # And we want the secondary exception to be reported as a message. - self.assertEqual( len( mh.messages ), 1 ) + ph["parameters"]["driver"].setValue( 1 ) + + self.assertEqual( len( mh.messages ), 2 ) + # we want the exception to be reported as a message. self.assertTrue( "Value is not an instance of \"IntData\"" in mh.messages[0].message ) + # and we want the parameterChanged exception as a message + self.assertTrue( "Ooops!" in mh.messages[1].message ) def testTypeNamePrefixes( self ) :
used PlaceholderLineEdit Changed lambda to separate method as lamba is supposed to have some issue during QtDestroy
@@ -20,6 +20,7 @@ from openpype.tools.utils.models import ( ProjectModel, ProjectSortFilterProxy ) +from openpype.tools.utils import PlaceholderLineEdit class StandaloneOverlayWidget(QtWidgets.QFrame): @@ -61,7 +62,7 @@ class StandaloneOverlayWidget(QtWidgets.QFrame): btns_layout.addWidget(cancel_btn, 0) btns_layout.addWidget(confirm_btn, 0) - txt_filter = QtWidgets.QLineEdit() + txt_filter = PlaceholderLineEdit(content_widget) txt_filter.setPlaceholderText("Quick filter projects..") txt_filter.setClearButtonEnabled(True) txt_filter.addAction(qtawesome.icon("fa.filter", color="gray"), @@ -88,12 +89,11 @@ class StandaloneOverlayWidget(QtWidgets.QFrame): projects_view.doubleClicked.connect(self._on_double_click) confirm_btn.clicked.connect(self._on_confirm_click) cancel_btn.clicked.connect(self._on_cancel_click) - txt_filter.textChanged.connect( - lambda: projects_proxy.setFilterRegularExpression( - txt_filter.text())) + txt_filter.textChanged.connect(self._on_text_changed) self._projects_view = projects_view self._projects_model = projects_model + self._projects_proxy = projects_proxy self._cancel_btn = cancel_btn self._confirm_btn = confirm_btn self._txt_filter = txt_filter @@ -115,6 +115,10 @@ class StandaloneOverlayWidget(QtWidgets.QFrame): def _on_cancel_click(self): self._set_project(self._project_name) + def _on_text_changed(self): + self._projects_proxy.setFilterRegularExpression( + self._txt_filter.text()) + def set_selected_project(self): index = self._projects_view.currentIndex()
Change comments in utils.py. Removed completed functions-in-use TODO.
# See the License for the specific language governing permissions and # limitations under the License. """Supporting methods for the classification pipeline.""" -# TODO(Sahana): Verify if all the methods are being used by the pipeline. import argparse from typing import Any, Dict, List
BLD: compare platform.architecture() correctly The function returns a tuple of values, of which we need to check the first. Fixes
@@ -290,7 +290,7 @@ def add_system_root(library_root): vcpkg = shutil.which('vcpkg') if vcpkg: vcpkg_dir = os.path.dirname(vcpkg) - if platform.architecture() == '32bit': + if platform.architecture()[0] == '32bit': specifier = 'x86' else: specifier = 'x64'
Reduce min_pending_latency within allowable values [clowntown]
@@ -10,7 +10,7 @@ automatic_scaling: target_cpu_utilization: 0.95 target_throughput_utilization: 0.95 max_concurrent_requests: 20 - min_pending_latency: 30000ms + min_pending_latency: 15000ms max_pending_latency: automatic builtins:
[deflakey] test_error_handling.py in workflow ## Why are these changes needed? This test timeout. Move it to large. ``` WARNING: //python/ray/workflow:tests/test_error_handling: Test execution time (288.7s excluding execution overhead) outside of range for MODERATE tests. Consider setting timeout="long" or size="large". ```
@@ -7,7 +7,13 @@ load("//bazel:python.bzl", "py_test_module_list") SRCS = glob(["**/conftest.py"]) -LARGE_TESTS = ["tests/test_recovery.py", "tests/test_basic_workflows_2.py", "tests/test_metadata.py", "tests/test_events.py"] +LARGE_TESTS = [ + "tests/test_error_handling.py", + "tests/test_recovery.py", + "tests/test_basic_workflows_2.py", + "tests/test_metadata.py", + "tests/test_events.py" +] py_test_module_list( files = glob(["tests/test_*.py", "examples/**/*.py"], exclude=LARGE_TESTS),
Make doc test as python3 only Summary: I was cleaning up some virtual environments and ran into this Test Plan: Run unit tests in python 2 Reviewers: max, alangenfeld
@@ -38,6 +38,7 @@ def _path_starts_with(path, starts_with): # (probably hard since tests are collected before fixtures are executed -- but maybe we can lever # the checked-in snapshots for this) or collect the test failures and display all of them. @pytest.mark.docs [email protected](sys.version_info < (3, 6), reason="We don't support building docs in python 2") def test_build_all_docs(snapshot): pwd = os.getcwd() try:
[IMPR] set -ignore option to CANCEL.MATCH by default set -ignore option to CANCEL.MATCH by default to ignore ISBN errors update module doc string simplify arg parsing
@@ -16,7 +16,12 @@ The following parameters are supported: inserted. -ignore: Ignores if an error occurred and either skips the page or - only that method. It can be set to 'page' or 'method'. + only that method. It can be set to: + all - dos not ignore errors + match - ignores ISBN related errors (default) + method - ignores fixing method errors + page - ignores page related errors + The following generators and filters are supported: @@ -59,7 +64,7 @@ class CosmeticChangesBot(ExistingPageBot, NoRedirectPageBot): self.available_options.update({ 'async': False, 'summary': 'Robot: Cosmetic changes', - 'ignore': CANCEL.ALL, + 'ignore': CANCEL.MATCH, }) super().__init__(**kwargs) @@ -91,23 +96,17 @@ def main(*args: Tuple[str, ...]) -> None: gen_factory = pagegenerators.GeneratorFactory() for arg in local_args: - if arg.startswith('-summary:'): - options['summary'] = arg[len('-summary:'):] - elif arg == '-always': - options['always'] = True - elif arg == '-async': - options['async'] = True - elif arg.startswith('-ignore:'): - ignore_mode = arg[len('-ignore:'):].lower() - if ignore_mode == 'method': - options['ignore'] = CANCEL.METHOD - elif ignore_mode == 'page': - options['ignore'] = CANCEL.PAGE - elif ignore_mode == 'match': - options['ignore'] = CANCEL.MATCH - else: - raise ValueError( - 'Unknown ignore mode "{}"!'.format(ignore_mode)) + opt, _, value = arg.partition(':') + if opt == '-summary': + options['summary'] = value + elif opt in ('-always', '-async'): + options[opt[1:]] = True + elif opt == '-ignore': + value = value.upper() + try: + options['ignore'] = getattr(CANCEL, value) + except AttributeError: + raise ValueError('Unknown ignore mode {!r}!'.format(value)) else: gen_factory.handle_arg(arg)
Move common tools to one function So they can be applied to all tools.
@@ -28,10 +28,7 @@ def apply_default_tool_set(view, modeling_language, event_manager, rubberband_st ) view.add_controller(*text_edit_tools(view, event_manager)) view.add_controller(rubberband_tool(view, rubberband_state)) - view.add_controller(*scroll_tools(view)) - view.add_controller(zoom_tool(view)) - view.add_controller(view_focus_tool(view)) - view.add_controller(shortcut_tool(view, modeling_language, event_manager)) + add_basic_tools(view, modeling_language, event_manager) def apply_magnet_tool_set(view, modeling_language, event_manager): @@ -40,11 +37,7 @@ def apply_magnet_tool_set(view, modeling_language, event_manager): view.add_controller( *transactional_tool(magnet_tool(view), event_manager=event_manager) ) - view.add_controller(*text_edit_tools(view, event_manager)) - view.add_controller(*scroll_tools(view)) - view.add_controller(zoom_tool(view)) - view.add_controller(view_focus_tool(view)) - view.add_controller(shortcut_tool(view, modeling_language, event_manager)) + add_basic_tools(view, modeling_language, event_manager) def apply_placement_tool_set( @@ -59,6 +52,11 @@ def apply_placement_tool_set( ) ) view.add_controller(drop_zone_tool(view, item_factory.item_class)) + add_basic_tools(view, modeling_language, event_manager) + + +def add_basic_tools(view, modeling_language, event_manager): view.add_controller(*scroll_tools(view)) view.add_controller(zoom_tool(view)) + view.add_controller(view_focus_tool(view)) view.add_controller(shortcut_tool(view, modeling_language, event_manager))
Add exercise conventions closes
Exercism exercises in Python + ## Contributing Guide Please see the [contributing guide](https://github.com/exercism/x-common/blob/master/CONTRIBUTING.md) + ## Working on the Exercises We welcome both improvements to the existing exercises and new exercises. -A pool of exercise ideas can be found in the [x-common repo](https://github.com/exercism/x-common). +A list of missing exercise can be found here: http://exercism.io/languages/python/todo + + +### Conventions + +- We use minimalistic stub files for all exercises (#272). +- We use `unittest` (Python Standard Library) and no 3rd-party-framework. +- We use the parameter order `self.assertEqual(actual, expected)` (#440). + + +### Testing All exercises must be compatible with Python versions 2.7 and 3.3 upwards. -Therefore please test your changes with these versions. -Test a single exercise with Python 2.7: +To test a single exercise (e.g., with Python 2.7): ``` python2.7 test/check-exercises.py [exercise-name] ``` -Test a single exercise with Python 3.3: +To test all exercises (e.g., with Python 3): ``` -python3.3 test/check-exercises.py [exercise-name] +python3 test/check-exercises.py ``` -Test all exercises: -``` -python test/check-exercises.py -``` -## Code Style +### Code Style + +The Python code in this repo is meant to follow the [PEP8 style guide](https://www.python.org/dev/peps/pep-0008/) (a stylized version http://pep8.org). + +This repo uses [flake8](http://flake8.readthedocs.org/en/latest/) with default settings to enforce the coding standard. + + +### CI build + +This repo uses `travis-ci` in the following configuration: [travis.yml](https://github.com/exercism/xpython/blob/master/.travis.yml) -The Python code in this repo is meant to follow the [PEP8 style guide](https://www.python.org/dev/peps/pep-0008/). +It will check automatically the code style, the problem configuration and runns the unittests with all supported Python versions. -This repo uses [flake8](http://flake8.readthedocs.org/en/latest/) with default settings to enforce the coding standard. When you submit a PR, it needs to pass the flake8 tool with no warnings, or it won't be accepted. ## Pull Requests
fix(background jobs): Show method name on Background Jobs page. After background jobs page doesn't provide any information.
@@ -28,6 +28,7 @@ def get_info(show_failed=False): if j.kwargs.get('site')==frappe.local.site: jobs.append({ 'job_name': j.kwargs.get('kwargs', {}).get('playbook_method') \ + or j.kwargs.get('kwargs', {}).get('job_type') \ or str(j.kwargs.get('job_name')), 'status': j.get_status(), 'queue': name, 'creation': format_datetime(convert_utc_to_user_timezone(j.created_at)),
rolling_update: add any_errors_fatal If a failure occurs in ceph-validate, the upgrade playbook keeps running where we expect it to fail.
- "{{ iscsi_gw_group_name|default('iscsigws') }}" - "{{ grafana_server_group_name|default('grafana-server') }}" + any_errors_fatal: True become: True gather_facts: False vars:
fix: check whether the cluster is healthy This commit unblocks the CI and only check that the cluster is healthy.
changed_when: "cmd_res.rc == 0" retries: 60 delay: 60 + until: "'Healthy' in cmd_res.stdout" # We should have all the master and worker nodes started - until: cmd_res.stdout_lines | list | count == ( groups['all_control_plane_nodes'] | count + groups['all_compute_nodes'] | count ) + # TODO:FIXME: Count and compare with + # "Healthy with x known peers" where x is: + # ( groups['all_control_plane_nodes'] | count + groups['all_compute_nodes'] | count ) - name: "Deploy CDK" ansible.builtin.shell: |
Update helm-chart readme to reflect current image Default image tag was incorrectly stated, this commit corrects it.
@@ -46,7 +46,7 @@ The following tables lists the configurable parameters of the Ambassador chart a | Parameter | Description | Default | | ------------------------------- | ------------------------------------------ | ---------------------------------------------------------- | | `image.repository` | Image | `quay.io/datawire/ambassador` -| `image.tag` | Image tag | `0.35.0` +| `image.tag` | Image tag | `0.40.2` | `image.pullPolicy` | Image pull policy | `IfNotPresent` | `image.imagePullSecrets` | Image pull secrets | None | `daemonSet` | If `true `, Create a daemonSet. By default Deployment controller will be created | `false`
Skip cephadm playbook when there is no mon or nfs group This change just make us able to skip the cephadm playbook when no mons or nfs nodes are defined by the inventory.
@@ -579,10 +579,6 @@ outputs: ms_client_mode: secure - {get_attr: [DefaultCephConfigOverrides, value, vars]} cephadm_extra_vars: {get_attr: [CephAdmVars, value, vars]} - - name: Prepare cephadm user and keys - include_role: - name: tripleo_run_cephadm - tasks_from: enable_ceph_admin_user.yml # This is supposed to run a playbook which is responsible to # deploy Ceph using cephadm. # The storage network is supposed to be available since we are @@ -590,6 +586,14 @@ outputs: # TODO: (fpantano) Remove this section when --network-ports is # available and Ceph deployment can be moved **before** # the overcloud. + - name: Prepare cephadm user and keys + include_role: + name: tripleo_run_cephadm + tasks_from: enable_ceph_admin_user.yml + when: groups['ceph_mon'] | default([]) | length > 0 or + groups['ceph_nfs'] | default([]) | length > 0 - name: Deploy the ceph cluster using cephadm include_role: name: tripleo_run_cephadm + when: groups['ceph_mon'] | default([]) | length > 0 or + groups['ceph_nfs'] | default([]) | length > 0
fix order time indication fix order time indication
@@ -353,6 +353,11 @@ class Mt5Gateway(BaseGateway): self.local_sys_map[local_id] = sys_id self.sys_local_map[sys_id] = local_id + + order = self.orders.get(local_id, None) + if local_id and order: + order.datetime = generate_datetime(data["order_time_setup"]) + # Update order data elif trans_type in {TRADE_TRANSACTION_ORDER_UPDATE, TRADE_TRANSACTION_ORDER_DELETE}: sysid = str(data["order"])
Fix command line parameters (remove -P in front of URI) I removed the `-P` flags in front of `examples/hyperparam` because it gave me the error "Please specify URI" on WSL2 with Ubuntu 20.04.
@@ -51,15 +51,15 @@ Runs the Keras deep learning training with default parameters and log it in expe .. code-block:: bash - mlflow run -e random --experiment-id <hyperparam_experiment_id> -P examples/hyperparam + mlflow run -e random --experiment-id <hyperparam_experiment_id> examples/hyperparam .. code-block:: bash - mlflow run -e gpyopt --experiment-id <hyperparam_experiment_id> -P examples/hyperparam + mlflow run -e gpyopt --experiment-id <hyperparam_experiment_id> examples/hyperparam .. code-block:: bash - mlflow run -e hyperopt --experiment-id <hyperparam_experiment_id> -P examples/hyperparam + mlflow run -e hyperopt --experiment-id <hyperparam_experiment_id> examples/hyperparam Runs the hyperparameter tuning with either random search or GpyOpt or Hyperopt and log the results under ``hyperparam_experiment_id``.
Skip this test if pywin32 is not present Skip this test if pywin32 is not present because it's not part of Spyder listed dependencies
# Local imports from spyder.py3compat import PY3 from spyder.widgets import pathmanager as pathmanager_mod +from spyder.utils.programs import is_module_installed @pytest.fixture @@ -62,8 +63,9 @@ def test_check_uncheck_path(qtbot): assert pathmanager.not_active_pathlist == [] [email protected](os.name != 'nt', - reason="This feature is not applicable for Unix systems") [email protected](os.name != 'nt' or not is_module_installed('win32con'), + reason=("This feature is not applicable for Unix " + "systems and pywin32 is needed")) def test_synchronize_with_PYTHONPATH(qtbot, mocker): pathmanager = setup_pathmanager(qtbot, None, pathlist=['path1', 'path2', 'path3'],
Handle admin_ips login error While this could provide information leakage, I think the benefit of knowing the issue to new RTB admins (particularly since it defaults to localhost) outweighs the risk.
@@ -77,6 +77,14 @@ class LoginHandler(BaseHandler): and not user.is_admin() ): self.redirect("/user/missions/firstlogin") + elif user.is_admin() and not self.allowed_ip(): + self.render( + "public/login.html", + info=[ + "Succesfull credentials, but administration is restriceted via IP. See 'admin_ips' in configuration." + ], + errors=None, + ) else: self.redirect("/user") else: @@ -92,6 +100,11 @@ class LoginHandler(BaseHandler): PBKDF2.crypt(password_attempt, "BurnTheHashTime") self.failed_login() + def allowed_ip(self): + return ( + len(options.admin_ips) == 0 or self.request.remote_ip in options.admin_ips + ) + def successful_login(self, user): """ Called when a user successfully logs in """ logging.info(
Allow users to order by value column Tks for the solution Tks for the bug report
@@ -248,6 +248,7 @@ class ReimbursementModelAdmin(SimpleHistoryAdmin): return 'R$ {:.2f}'.format(obj.total_net_value).replace('.', ',') value.short_description = 'valor' + value.admin_order_field = 'total_net_value' def still_available(self, obj): return obj.available_in_latest_dataset
Fix some major issues with the LGPO module Issue with the movement of the registry object to salt.utils Issues with dict values in the debug Fix __virtual__
@@ -35,7 +35,7 @@ Current known limitations - lxml - uuid - struct - - salt.modules.reg + - salt.utils.win_reg ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function @@ -98,7 +98,7 @@ try: import lxml import struct from lxml import etree - from salt.modules.reg import Registry as Registry + from salt.utils.win_reg import Registry HAS_WINDOWS_MODULES = True TRUE_VALUE_XPATH = etree.XPath('.//*[local-name() = "trueValue"]') FALSE_VALUE_XPATH = etree.XPath('.//*[local-name() = "falseValue"]') @@ -2672,9 +2672,12 @@ def __virtual__(): ''' Only works on Windows systems ''' - if salt.utils.platform.is_windows() and HAS_WINDOWS_MODULES: + if not salt.utils.platform.is_windows(): + return False, 'win_lgpo: Not a Windows System' + if not HAS_WINDOWS_MODULES: + return False, 'win_lgpo: Required modules failed to load' + log.debug('win_lgpo: LGPO module loaded successfully') return __virtualname__ - return False def _updateNamespace(item, new_namespace): @@ -5372,7 +5375,7 @@ def set_(computer_policy=None, user_policy=None, else: raise SaltInvocationError(msg) if policy_namespace and policy_name in _admTemplateData[policy_namespace] and the_policy is not None: - log.debug('setting == %s', _admTemplateData[policy_namespace][policy_name].lower()) + log.debug('setting == %s', six.text_type(_admTemplateData[policy_namespace][policy_name]).lower()) log.debug(six.text_type(_admTemplateData[policy_namespace][policy_name]).lower()) if six.text_type(_admTemplateData[policy_namespace][policy_name]).lower() != 'disabled' \ and six.text_type(_admTemplateData[policy_namespace][policy_name]).lower() != 'not configured':
POSIX mode for shlex.split doesn't handle spaces This shows up on Python 3.7 - this change may introduce a regression on other Python envs on Windows. May need to reinstate.
@@ -935,19 +935,10 @@ def user(): return os.getenv("USER") or "" def shlex_split(s): - s = s or "" - return shlex.split(s) - - # TODO: this causes problems! Do we need it? - posix = PLATFORM != "Windows" # If s is None, this call will block (see # https://bugs.python.org/issue27775) s = s or "" - parts = shlex.split(s, posix=posix) - if not posix: - # Workaround issue where '' in Windows is split as "''" - parts = ["" if part == "''" else part for part in parts] - return parts + return shlex.split(s) def format_bytes(n): units = [None, "K", "M", "G", "T", "P", "E", "Z"]
move cutadapt param.opts to the end of call to allow overriding presets by user
@@ -19,7 +19,7 @@ if paired: threads: 8 conda: CONDA_SHARED_ENV shell: """ - cutadapt {params.opts} -j {threads} -e 0.1 -q 16 -O 3 --trim-n --minimum-length 25 -a AGATCGGAAGAGC -A AGATCGGAAGAGC \ + cutadapt -j {threads} -e 0.1 -q 16 -O 3 --trim-n --minimum-length 25 -a AGATCGGAAGAGC -A AGATCGGAAGAGC {params.opts} \ -o {output.r1} -p {output.r2} {input.r1} {input.r2} > {log.out} 2> {log.err} """ else: @@ -38,7 +38,7 @@ else: threads: 8 conda: CONDA_SHARED_ENV shell: """ - cutadapt {params.opts} -j {threads} -e 0.1 -q 16 -O 3 --trim-n --minimum-length 25 -a AGATCGGAAGAGC \ + cutadapt -j {threads} -e 0.1 -q 16 -O 3 --trim-n --minimum-length 25 -a AGATCGGAAGAGC {params.opts} \ -o {output} {input.r1} > {log.out} 2> {log.err} """
Detect spyder If running in spyder ide then use no_notebook.
+import os from ._version import get_versions from .gs_version import glowscript_version __version__ = get_versions()['version'] @@ -10,6 +11,8 @@ del glowscript_version def __checkisnotebook(): # returns True if running in Jupyter notebook try: + if any('SPYDER' in name for name in os.environ): + return False # Spyder detected so return False shell = get_ipython().__class__.__name__ if shell == 'ZMQInteractiveShell': # Jupyter notebook or qtconsole? return True
Updates gate optimization in do_long_sequence_gst for propagation. The gauge optimization performed as a part of do_long_sequence_gst now includes 'gateset' and '_gaugeGroupEl' in the gauge-opt params dictionary so that it can be used properly with the 'gauge_propagate_confidence_region_factory' method of an Estimate object.
@@ -570,7 +570,10 @@ def do_long_sequence_gst_base(dataFilenameOrSet, targetGateFilenameOrSet, if "comm" not in gaugeOptParams: gaugeOptParams["comm"] = comm - go_gs_final = _alg.gaugeopt_to_target(gs_lsgst_list[-1],**gaugeOptParams) + gaugeOptParams['returnAll'] = True # so we get gaugeEl to save + gaugeOptParams['gateset'] = gs_lsgst_list[-1] #starting gate set + _, gaugeEl, go_gs_final = _alg.gaugeopt_to_target(**gaugeOptParams) + gaugeOptParams['_gaugeGroupEl'] = gaugeEl #store gaugeopt el ret.estimates[estlbl].add_gaugeoptimized(gaugeOptParams, go_gs_final, None, printer)
Add mention to dont_merge_cookies in CookiesMiddlewares docs Add mention to dont_merge_cookies in CookiesMiddlewares docs
@@ -237,6 +237,17 @@ Default: ``True`` Whether to enable the cookies middleware. If disabled, no cookies will be sent to web servers. +Notice that if the :class:`~scrapy.http.Request` +has ``meta['dont_merge_cookies']`` evaluated to ``True``. +despite the value of :setting:`COOKIES_ENABLED` the cookies will **not** be +sent to web servers and received cookies in +:class:`~scrapy.http.Response` will **not** be merged with the existing +cookies. + +For more detailed information see the ``cookies`` parameter in +:class:`~scrapy.http.Request` + + .. setting:: COOKIES_DEBUG COOKIES_DEBUG
Update readme.md Add case for compiling ops if tensorflow was compiled from source using gcc >= 5.0
@@ -26,6 +26,13 @@ TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') g++ -std=c++11 -shared word2vec_ops.cc word2vec_kernels.cc -o word2vec_ops.so -fPIC -I $TF_INC -O2 -D_GLIBCXX_USE_CXX11_ABI=0 ``` +If tensorflow was compiled from source using gcc >= 5.0, you don't need to append D_GLIBCXX_USE_CXX11_ABI=0. + +```shell +TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())') +g++ -std=c++11 -shared word2vec_ops.cc word2vec_kernels.cc -o word2vec_ops.so -fPIC -I $TF_INC -O2 +``` + On Mac, add `-undefined dynamic_lookup` to the g++ command. (For an explanation of what this is doing, see the tutorial on [Adding a New Op to TensorFlow](https://www.tensorflow.org/how_tos/adding_an_op/#building_the_op_library). The flag `-D_GLIBCXX_USE_CXX11_ABI=0` is included to support newer versions of g++.)
wait_for event param is now positional only Closes Closes BOT-33N
@@ -404,7 +404,7 @@ class Incidents(Cog): def check(payload: discord.RawReactionActionEvent) -> bool: return payload.message_id == incident.id - coroutine = self.bot.wait_for(event="raw_message_delete", check=check, timeout=timeout) + coroutine = self.bot.wait_for("raw_message_delete", check=check, timeout=timeout) return scheduling.create_task(coroutine, event_loop=self.bot.loop) async def process_event(self, reaction: str, incident: discord.Message, member: discord.Member) -> None:
Changed the names of some parameters This should fix Not sure if it will...
@@ -119,13 +119,13 @@ class CompleteTutorial(BaseTask): # at the first choices in general, so fully # random on the whole avatar space is not the way to go either avatar['skin']=random.randint(0,3) - avatar['hair']=random.randint(0,3) - avatar['shirt']=random.randint(0,3) - avatar['pants']=random.randint(0,3) - avatar['hat']=random.randint(0,3) - avatar['shoes']=random.randint(0,3) - avatar['eyes']=random.randint(0,3) - avatar['backpack']=random.randint(0,3) + avatar['avartar_hair']=random.randint(0,3) + avatar['avartar_shirt']=random.randint(0,3) + avatar['avartar_pants']=random.randint(0,3) + avatar['avartar_hat']=random.randint(0,3) + avatar['avartar_shoes']=random.randint(0,3) + avatar['avartar_eyes']=random.randint(0,3) + avatar['avartar_backpack']=random.randint(0,3) return avatar def _set_avatar(self):
Update bot/exts/holidays/halloween/candy_collection.py From
@@ -201,7 +201,7 @@ class CandyCollection(commands.Cog): inline=False ) e.add_field( - name=f'{user.name}' + "'s Candy Score", + name="Your Candy Score", value=get_user_candy_score(), inline=False )
use SPD solver for diagonally dominant matrices This patch adds a test for diagonal dominance and switches Pardiso to the SPD matrix type to benefit from this property.
@@ -264,10 +264,17 @@ class MKLMatrix(Matrix): upper = numpy.zeros(len(self.data), dtype=bool) rowptr = numpy.empty_like(self.rowptr) rowptr[0] = 1 + diagdom = True for irow, (n, m) in enumerate(numeric.overlapping(self.rowptr-1), start=1): d = n + self.colidx[n:m].searchsorted(irow) upper[d:m] = True rowptr[irow] = rowptr[irow-1] + (m-d) - return Pardiso(mtype=dict(f=-2, c=6)[self.dtype.kind], a=self.data[upper], ia=rowptr, ja=self.colidx[upper], **args) + diagdom = diagdom and d < m and self.colidx[d] == irow and abs(self.data[n:m]).sum() < 2 * abs(self.data[d]) + if diagdom: + log.debug('matrix is diagonally dominant, solving as SPD') + mtype = dict(f=2, c=4) + else: + mtype = dict(f=-2, c=6) + return Pardiso(mtype=mtype[self.dtype.kind], a=self.data[upper], ia=rowptr, ja=self.colidx[upper], **args) # vim:sw=2:sts=2:et
add 1D data as curves, not scatter Even if x values are not monotonic, it can still be a curve (mesh, parametric data...)
""" __authors__ = ["P. Knobel"] __license__ = "MIT" -__date__ = "27/06/2017" +__date__ = "23/10/2017" import numpy @@ -175,18 +175,11 @@ class ArrayCurvePlot(qt.QWidget): xerror=self.__axis_errors, yerror=y_errors) - # x monotonically increasing or decreasiing: curve - elif numpy.all(numpy.diff(x) > 0) or numpy.all(numpy.diff(x) < 0): + else: self._plot.addCurve(x, y, legend=legend, xerror=self.__axis_errors, yerror=y_errors) - # scatter - else: - self._plot.addScatter(x, y, value=numpy.ones_like(y), - legend=legend, - xerror=self.__axis_errors, - yerror=y_errors) self._plot.resetZoom() self._plot.getXAxis().setLabel(self.__axis_name) self._plot.getYAxis().setLabel(self.__signal_name)
fix: changed aws to github url for fashion mnist changed aws to github url for fashion mnist data as aws url comes blocked for me
@@ -155,7 +155,7 @@ def set_hw_parser(parser=None): default=resource_filename('jina', '/'.join(('resources', 'helloworld.flow.index.yml'))), help='the yaml path of the index flow') gp.add_argument('--index-data-url', type=str, - default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz', + default='https://github.com/zalandoresearch/fashion-mnist/raw/master/data/fashion/train-images-idx3-ubyte.gz', help='the url of index data (should be in idx3-ubyte.gz format)') gp.add_argument('--index-batch-size', type=int, default=1024, @@ -165,7 +165,7 @@ def set_hw_parser(parser=None): default=resource_filename('jina', '/'.join(('resources', 'helloworld.flow.query.yml'))), help='the yaml path of the query flow') gp.add_argument('--query-data-url', type=str, - default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz', + default='https://github.com/zalandoresearch/fashion-mnist/raw/master/data/fashion/t10k-images-idx3-ubyte.gz', help='the url of query data (should be in idx3-ubyte.gz format)') gp.add_argument('--query-batch-size', type=int, default=32,
ocs_ci/ocs/pillowfight.py - Docstring fix
@@ -68,8 +68,8 @@ class PillowFight(object): Args: replicas (int): Number of pod replicas - num_items (str): Number of items to be loaded to the cluster - num_threads (str): Number of threads + num_items (int): Number of items to be loaded to the cluster + num_threads (int): Number of threads """ ocp_local = OCP(namespace=self.namespace)
middlewares: session: fix session id generation Previously the session id would always be 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
@@ -23,7 +23,7 @@ def generate_random_string(length): for i in range(length): characters.append(random.choice(choices)) - return ''.join(choices) + return ''.join(characters) def generate_session_key(connection):
Factor log_exceptions into a configuration parameter This simplifies away unnecessary propagation, cleans up argument lists. As far as I can tell, log_exceptions is True in all execution paths. It seems that removed the last occasion of it being (spuriously) set to False.
@@ -116,6 +116,7 @@ class RemoteScheduler(object): self._rpc_retry_attempts = config.getint('core', 'rpc-retry-attempts', 3) self._rpc_retry_wait = config.getint('core', 'rpc-retry-wait', 30) + self._log_exceptions = config.getboolean('core', 'log-exceptions', True) if HAS_REQUESTS: self._fetcher = RequestsFetcher(requests.Session()) @@ -126,7 +127,7 @@ class RemoteScheduler(object): logger.info("Wait for %d seconds" % self._rpc_retry_wait) time.sleep(self._rpc_retry_wait) - def _fetch(self, url_suffix, body, log_exceptions=True): + def _fetch(self, url_suffix, body): full_url = _urljoin(self._url, url_suffix) last_exception = None attempt = 0 @@ -140,7 +141,7 @@ class RemoteScheduler(object): break except self._fetcher.raises as e: last_exception = e - if log_exceptions: + if self._log_exceptions: logger.warning("Failed connecting to remote scheduler %r", self._url, exc_info=True) continue @@ -152,11 +153,11 @@ class RemoteScheduler(object): ) return response - def _request(self, url, data, log_exceptions=True, attempts=3, allow_null=True): + def _request(self, url, data, attempts=3, allow_null=True): body = {'data': json.dumps(data)} for _ in range(attempts): - page = self._fetch(url, body, log_exceptions) + page = self._fetch(url, body) response = json.loads(page)["response"] if allow_null or response is not None: return response
Suppressing hypothesis health check for qnnpack_add Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -6,6 +6,7 @@ import torch.jit import torch.nn.functional as F from torch.nn.modules.utils import _pair +from hypothesis import settings, HealthCheck from hypothesis import assume, given from hypothesis import strategies as st import hypothesis_utils as hu @@ -1520,6 +1521,7 @@ class TestQNNPackOps(TestCase): self.assertEqual(qY, qY_hat) """Tests the correctness of the quantized::add (qnnpack) op.""" + @settings(suppress_health_check=(HealthCheck.filter_too_much,)) @given(A=hu.tensor(shapes=hu.array_shapes(1, 5, 1, 5), qparams=hu.qparams(dtypes=torch.quint8)), zero_point=st.sampled_from([0, 2, 5, 15, 127]),
Move sfp_mnemonics to Passive DNS category Move sfp_mnemonics from `Search Engine`s to `Passive DNS` category
@@ -18,7 +18,7 @@ import socket from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent class sfp_mnemonic(SpiderFootPlugin): - """Mnemonic PassiveDNS:Footprint,Investigate,Passive:Search Engines::Obtain Passive DNS information from PassiveDNS.mnemonic.no.""" + """Mnemonic PassiveDNS:Footprint,Investigate,Passive:Passive DNS::Obtain Passive DNS information from PassiveDNS.mnemonic.no.""" # Default options opts = {
purge: ceph-crash purge fixes This fixes the service file removal and makes the playbook call `systemctl reset-failed` on the service because in Ceph Nautilus, ceph-crash doesn't handle `SIGTERM` signal. Closes:
enabled: no failed_when: false + - name: systemctl reset-failed ceph-crash@{{ 'ceph-crash@' + ansible_facts['hostname'] }} # noqa 303 + command: "systemctl reset-failed ceph-crash@{{ 'ceph-crash@' + ansible_facts['hostname'] }}" + changed_when: false + failed_when: false + when: containerized_deployment | bool + - name: remove service file file: - name: "/etc/systemd/system/ceph-crash.service" + name: "/etc/systemd/system/ceph-crash{{ '@' if containerized_deployment | bool else '' }}.service" state: absent failed_when: false
Added enable_timeout Added enable_timeout to see if that fixes the system's inability to find it in the config.
@@ -20,10 +20,13 @@ autofire_coils: ac_test: coil: c_test switch: s_test + enable_timeout: False ac_test_inverted: coil: c_test2 switch: s_test_nc + enable_timout: False ac_test_inverted2: coil: c_test2 switch: s_test reverse_switch: True + enable_timeout: False
ENH: added io to utils Added Input/Output methods to the utilities module.
# -*- coding: utf-8 -*- """Utilities supporting pysat classes, packages, and the testing environment. -pysat.utils contains a number of functions used -throughout the pysat package. This includes conversion -of formats, loading of files, and user-supplied info +pysat.utils contains a number of functions used throughout the pysat package. +This includes conversion of formats, loading of files, and user-supplied info for the pysat data directory structure. """ @@ -16,6 +15,7 @@ from pysat.utils._core import NetworkLock from pysat.utils._core import scale_units from pysat.utils import coords from pysat.utils import files +from pysat.utils import io from pysat.utils import registry from pysat.utils import testing from pysat.utils import time
fix bug when using a pretrained model (made IL -> RL fail) Logging mean return better
@@ -169,9 +169,9 @@ utils.configure_logging(model_name) # Define obss preprocessor if 'emb' in args.arch: - obss_preprocessor = utils.IntObssPreprocessor(model_name, envs[0].observation_space) + obss_preprocessor = utils.IntObssPreprocessor(args.pretrained_model or model_name, envs[0].observation_space) else: - obss_preprocessor = utils.ObssPreprocessor(model_name, envs[0].observation_space) + obss_preprocessor = utils.ObssPreprocessor(args.pretrained_model or model_name, envs[0].observation_space) # Define actor-critic model @@ -355,6 +355,6 @@ while status['num_frames'] < args.frames: if mean_return > best_mean_return: best_mean_return = mean_return utils.save_model(acmodel, model_name) - logger.info("Best model is saved.") + logger.info("Return {: .2f}; best model is saved".format(mean_return)) else: - logger.info("Return {}; not the best model; not saved".format(mean_return)) + logger.info("Return {: .2f}; not the best model; not saved".format(mean_return))
Fix issue causing Windows executable to be missing Python modules Due to importing some modules indirectly via six, the Windows executable is not properly including some required python libraries. The fix is to manually include them when performing the py2exe freezing.
@@ -231,6 +231,10 @@ setup( options={ "py2exe": { # TODO(windows): Auto-generate this list based on contents of the monitors directory. + # TODO(czerwin): Add in check to guard against new six.move + # dependencies. py2exe does not properly follow dependencies + # imported via six.move since they are proxied, so we must + # manually add the dependencies here. "includes": "scalyr_agent.builtin_monitors.windows_system_metrics," "scalyr_agent.builtin_monitors.windows_process_metrics," "scalyr_agent.builtin_monitors.apache_monitor," @@ -241,7 +245,9 @@ setup( "scalyr_agent.builtin_monitors.syslog_monitor," "scalyr_agent.builtin_monitors.test_monitor," "scalyr_agent.builtin_monitors.url_monitor," - "scalyr_agent.builtin_monitors.windows_event_log_monitor", + "scalyr_agent.builtin_monitors.windows_event_log_monitor," + "urlparse,urllib,urllib2,SocketServer,_winreg,cookielib," + "httplib,thread,cPickle,itertools", "dll_excludes": ["IPHLPAPI.DLL", "NSI.dll", "WINNSI.DLL", "WTSAPI32.dll"], } },
Remove kwargs validation for identity project updates Keystone supports setting custom properties when updating projects [1]. This change removes the check in openstacksdk cloud that prevents users from passing in custom kwargs when calling update_project. [1] Story: Task: 39157
@@ -98,7 +98,6 @@ class IdentityCloudMixin(_normalize.Normalizer): return _utils._get_entity(self, 'project', name_or_id, filters, domain_id=domain_id) - @_utils.valid_kwargs('description') def update_project(self, name_or_id, enabled=None, domain_id=None, **kwargs): with _utils.shade_exceptions(
Resolving "https://pencilcode.net/lib/pencilcodeembed.js - Failed to load resource: net::ERR_CERT_DATE_INVALID" error This error occurred when the pencilcode SSL cert expired. Since this is outside our control, we ignore the error.
@@ -51,6 +51,12 @@ var CONSOLE_ERRORS_TO_IGNORE = [ _.escapeRegExp( 'http://localhost:9099/www.googleapis.com/identitytoolkit/v3/' + 'relyingparty/verifyPassword?key=fake-api-key'), + // This error covers the case when the PencilCode site uses an + // invalid SSL certificate (which can happen when it expires). + // In such cases, we ignore the error since it is out of our control. + _.escapeRegExp( + 'https://pencilcode.net/lib/pencilcodeembed.js - Failed to ' + + 'load resource: net::ERR_CERT_DATE_INVALID'), ]; var checkForConsoleErrors = async function(errorsToIgnore) {
parent: Trim whitespace & e variable in first stage SSH command size: 439 (-4 bytes) Preamble size: 8962 (no change)
@@ -336,8 +336,8 @@ class Stream(mitogen.core.Stream): os.close(r) os.close(W) os.close(w) - os.environ['ARGV0']=e=sys.executable - os.execv(e,['mitogen:CONTEXT_NAME']) + os.environ['ARGV0']=sys.executable + os.execv(sys.executable,['mitogen:CONTEXT_NAME']) os.write(1,'EC0\n') C=_(sys.stdin.read(PREAMBLE_COMPRESSED_LEN),'zip') os.fdopen(W,'w',0).write(C)
added peers command to console node versions now stored on VE p2p response
@@ -21,7 +21,7 @@ import fork log, consensus = logger.getLogger(__name__) -cmd_list = ['balance', 'mining', 'seed', 'hexseed', 'recoverfromhexseed', 'recoverfromwords', 'stakenextepoch', 'stake', 'address', 'wallet', 'send', 'mempool', 'getnewaddress', 'quit', 'exit', 'search' ,'json_search', 'help', 'savenewaddress', 'listaddresses','getinfo','blockheight', 'json_block', 'reboot'] +cmd_list = ['balance', 'mining', 'seed', 'hexseed', 'recoverfromhexseed', 'recoverfromwords', 'stakenextepoch', 'stake', 'address', 'wallet', 'send', 'mempool', 'getnewaddress', 'quit', 'exit', 'search' ,'json_search', 'help', 'savenewaddress', 'listaddresses','getinfo','blockheight', 'json_block', 'reboot', 'peers'] api_list = ['block_data','stats', 'ip_geotag','exp_win','txhash', 'address', 'empty', 'last_tx', 'stake_reveal_ones', 'last_block', 'richlist', 'ping', 'stake_commits', 'stake_reveals', 'stake_list', 'stakers', 'next_stakers', 'latency'] term = Terminal(); @@ -1361,7 +1361,7 @@ class WalletProtocol(Protocol): self.transport.write('>>> Number of transactions in memory pool: '+ str(len(chain.transaction_pool))+'\r\n') elif data[0] == 'help': - self.transport.write('>>> QRL ledger help: try quit, wallet, send, getnewaddress, search, recoverfromhexseed, recoverfromwords, stake, stakenextepoch, mempool, json_block, json_search, seed, hexseed, getinfo, or blockheight'+'\r\n') + self.transport.write('>>> QRL ledger help: try quit, wallet, send, getnewaddress, search, recoverfromhexseed, recoverfromwords, stake, stakenextepoch, mempool, json_block, json_search, seed, hexseed, getinfo, peers, or blockheight'+'\r\n') #removed 'hrs, hrs_check,' elif data[0] == 'quit' or data[0] == 'exit': self.transport.loseConnection() @@ -1388,6 +1388,11 @@ class WalletProtocol(Protocol): elif data[0] == 'blockheight': self.transport.write('>>> Blockheight: '+str(chain.m_blockheight())+'\r\n') + elif data[0] == 'peers': + self.transport.write('>>> Connected Peers:\r\n') + for peer in f.peers: + self.transport.write('>>> ' + peer.identity + " [" + peer.version + "] blockheight: " + str(peer.blockheight) + '\r\n') + elif data[0] == 'reboot': if len(args)<1: self.transport.write('>>> reboot <password>\r\n') @@ -1594,6 +1599,7 @@ class p2pProtocol(Protocol): self.messages = [] self.identity = None self.blockheight = None + self.version = '' self.blocknumber_headerhash = {} pass @@ -1847,6 +1853,7 @@ class p2pProtocol(Protocol): if not data: self.transport.write(self.wrap_message('VE',chain.version_number)) else: + self.version = str(data) printL(( self.transport.getPeer().host, 'version: ', data)) return
fix(rename_doc): Use sbool instead of cint cint("false") returns True which is what is sent by frappe dialog. This may be required to be fixed in the client alone but making this change to make the API more "robust" as this has been working in this particular way for far too long now :')
@@ -9,7 +9,7 @@ from frappe.model.dynamic_links import get_dynamic_link_map from frappe.model.naming import validate_name from frappe.model.utils.user_settings import sync_user_settings, update_user_settings_data from frappe.query_builder import Field -from frappe.utils import cint +from frappe.utils.data import sbool from frappe.utils.password import rename_password from frappe.utils.scheduler import is_scheduler_inactive @@ -50,8 +50,8 @@ def update_document_title( frappe.throw(f"{obj=} must be of type str or None") # handle bad API usages - merge = cint(merge) - enqueue = cint(enqueue) + merge = sbool(merge) + enqueue = sbool(enqueue) doc = frappe.get_doc(doctype, docname) doc.check_permission(permtype="write") @@ -136,8 +136,8 @@ def rename_doc( old = old or doc.name doctype = doctype or doc.doctype - force = cint(force) - merge = cint(merge) + force = sbool(force) + merge = sbool(merge) meta = frappe.get_meta(doctype) if validate:
fix: Comment out caching code to fix test Remove debug flag
@@ -64,8 +64,10 @@ def create_review_points_log(user, points, reason=None): @frappe.whitelist() def get_energy_points(user): - points = frappe.cache().hget('energy_points', user, - lambda: get_user_energy_and_review_points(user)) + # points = frappe.cache().hget('energy_points', user, + # lambda: get_user_energy_and_review_points(user)) + # TODO: cache properly + points = get_user_energy_and_review_points(user) return frappe._dict(points.get(user, {})) @frappe.whitelist() @@ -84,7 +86,7 @@ def get_user_energy_and_review_points(user=None): FROM `tabEnergy Point Log` {where_user} GROUP BY `user` - """.format(where_user=where_user), values=[user] if user else (), debug=1, as_dict=1) + """.format(where_user=where_user), values=user or (), as_dict=1) dict_to_return = frappe._dict() for d in points_list:
apple: Rewrite comment in tests in generate_access_url_payload. The original comment is worded rather unclearly, we should explain these details better.
@@ -2537,8 +2537,9 @@ class AppleIdAuthBackendTest(AppleAuthMixin, SocialAuthBase): ) def generate_access_url_payload(self, account_data_dict: Dict[str, str]) -> str: - # The ACCESS_TOKEN_URL endpoint works a bit different in standard Oauth2, - # where the token_data_dict contains some essential data. we add that data here. + # The ACCESS_TOKEN_URL endpoint works a bit different than in standard Oauth2, + # and here, similarly to OIDC, id_token is also returned in the response. + # In Apple auth, all the user information is carried in the id_token. return json.dumps( { "access_token": "foobar",
Cycles Renderer : Remove `m_pause` member It is not being used for anything.
@@ -2906,7 +2906,6 @@ class CyclesRenderer final : public IECoreScenePreview::Renderer m_sceneChanged( true ), m_sessionReset( false ), m_outputsChanged( true ), - m_pause( false ), m_cryptomatteAccurate( true ), m_cryptomatteDepth( 0 ), m_seed( 0 ), @@ -3623,8 +3622,7 @@ class CyclesRenderer final : public IECoreScenePreview::Renderer if( m_renderState == RENDERSTATE_RENDERING ) { - m_pause = false; - m_session->set_pause( m_pause ); + m_session->set_pause( false ); return; } @@ -3649,8 +3647,7 @@ class CyclesRenderer final : public IECoreScenePreview::Renderer if( m_renderState == RENDERSTATE_RENDERING ) { - m_pause = true; - m_session->set_pause( m_pause ); + m_session->set_pause( true ); } } @@ -4303,7 +4300,6 @@ class CyclesRenderer final : public IECoreScenePreview::Renderer bool m_sceneChanged; bool m_sessionReset; bool m_outputsChanged; - bool m_pause; bool m_cryptomatteAccurate; int m_cryptomatteDepth; int m_seed;
Update facebook.py Reflect on the feedback provided!
@@ -58,7 +58,6 @@ class FacebookOAuth2(BaseOAuth2): ) return {'username': response.get('username', response.get('name')), 'email': response.get('email', ''), - 'profile_picture': response.get('profile_picture', ''), 'fullname': fullname, 'first_name': first_name, 'last_name': last_name}
[fix] filelist login, changed set to list for entries modified: flexget/components/sites/sites/filelist.py
@@ -141,12 +141,25 @@ class SearchFileList: url = BASE_URL + 'takelogin.php' try: + # get validator token + response = requests.get(BASE_URL + 'login.php') + soup = get_soup(response.content) + + login_validator = soup.find("input", {"name": "validator"}) + + if not login_validator: + raise plugin.PluginError( + 'FileList.ro could not get login validator' + ) + logger.debug('Login Validator: {}'.format(login_validator.get('value'))) logger.debug('Attempting to retrieve FileList.ro cookie') + response = requests.post( url, data={ 'username': username, 'password': password, + 'validator': login_validator.get('value'), 'login': 'Log in', 'unlock': '1', }, @@ -179,7 +192,7 @@ class SearchFileList: """ Search for entries on FileList.ro """ - entries = set() + entries = list() params = { 'cat': CATEGORIES[config['category']], @@ -257,7 +270,7 @@ class SearchFileList: if genres: e['torrent_genres'] = genres - entries.add(e) + entries.append(e) return entries
Remove warning about default hash type This is no longer needed in the 2017.7 branch as the default has changed.
@@ -246,9 +246,6 @@ def _fingerprint(public_key, fingerprint_hash_type=None): if fingerprint_hash_type: hash_type = fingerprint_hash_type.lower() else: - # Set fingerprint_hash_type to md5 as default - log.warning('Public Key hashing currently defaults to "md5". This will ' - 'change to "sha256" in the 2017.7.0 release.') hash_type = 'sha256' try:
Refactor test_roles_client This patch refactors test_roles_client to include more reusable fixtures. This is going to ease the development of the tests of some new library methods in a follow up patch.
@@ -18,32 +18,40 @@ from tempest.tests.lib.services import base class TestRolesClient(base.BaseServiceTest): + + FAKE_ROLE_ID = "1" + FAKE_ROLE_NAME = "test" + FAKE_DOMAIN_ID = "1" + + FAKE_ROLE_ID_2 = "2" + FAKE_ROLE_NAME_2 = "test2" + FAKE_ROLE_INFO = { "role": { - "domain_id": "1", - "id": "1", - "name": "test", - "links": "example.com" + "domain_id": FAKE_DOMAIN_ID, + "id": FAKE_ROLE_ID, + "name": FAKE_ROLE_NAME, + "links": { + "self": "http://example.com/identity/v3/roles/%s" % ( + FAKE_ROLE_ID) + } } } - FAKE_LIST_ROLES = { - "roles": [ - { - "domain_id": "1", - "id": "1", - "name": "test", - "links": "example.com" - }, - { - "domain_id": "2", - "id": "2", - "name": "test2", - "links": "example.com" + FAKE_ROLE_INFO_2 = { + "role": { + "domain_id": FAKE_DOMAIN_ID, + "id": FAKE_ROLE_ID_2, + "name": FAKE_ROLE_NAME_2, + "links": { + "self": "http://example.com/identity/v3/roles/%s" % ( + FAKE_ROLE_ID_2) + } } - ] } + FAKE_LIST_ROLES = {"roles": [FAKE_ROLE_INFO, FAKE_ROLE_INFO_2]} + def setUp(self): super(TestRolesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() @@ -56,8 +64,8 @@ class TestRolesClient(base.BaseServiceTest): 'tempest.lib.common.rest_client.RestClient.post', self.FAKE_ROLE_INFO, bytes_body, - domain_id="1", - name="test", + domain_id=self.FAKE_DOMAIN_ID, + name=self.FAKE_ROLE_NAME, status=201) def _test_show_role(self, bytes_body=False): @@ -66,7 +74,7 @@ class TestRolesClient(base.BaseServiceTest): 'tempest.lib.common.rest_client.RestClient.get', self.FAKE_ROLE_INFO, bytes_body, - role_id="1") + role_id=self.FAKE_ROLE_ID) def _test_list_roles(self, bytes_body=False): self.check_service_client_function( @@ -81,8 +89,8 @@ class TestRolesClient(base.BaseServiceTest): 'tempest.lib.common.rest_client.RestClient.patch', self.FAKE_ROLE_INFO, bytes_body, - role_id="1", - name="test") + role_id=self.FAKE_ROLE_ID, + name=self.FAKE_ROLE_NAME) def _test_create_user_role_on_project(self, bytes_body=False): self.check_service_client_function( @@ -193,7 +201,7 @@ class TestRolesClient(base.BaseServiceTest): self.client.delete_role, 'tempest.lib.common.rest_client.RestClient.delete', {}, - role_id="1", + role_id=self.FAKE_ROLE_ID, status=204) def test_create_user_role_on_project_with_str_body(self):
Expose KeyPressEvent in key_binding/__init__.py This is often used in type annotations.
@@ -5,13 +5,16 @@ from .key_bindings import ( KeyBindingsBase, merge_key_bindings, ) -from .key_processor import KeyPress +from .key_processor import KeyPress, KeyPressEvent __all__ = [ + # key_bindings. "ConditionalKeyBindings", "DynamicKeyBindings", "KeyBindings", "KeyBindingsBase", "merge_key_bindings", + # key_processor "KeyPress", + "KeyPressEvent", ]
Remove redundant word 'strategy' Most of strategies have word 'strategy' in their display name.
@@ -245,7 +245,7 @@ class BaseStrategy(loadable.Loadable): should perform. """ - LOG.info("Initializing " + self.get_display_name() + " Strategy") + LOG.info("Initializing " + self.get_display_name()) if not self.compute_model: raise exception.ClusterStateNotDefined()
Fix browbeat_network conditional During the browbeat workload installation process we only check if browbeat_network is defined, but we also need to make sure it is not none.
- name: Check browbeat_network fail: msg="browbeat_network needs to be set" - when: browbeat_network is not defined + when: browbeat_network is not defined or browbeat_network is none - name: Copy userdata files template:
Ignore error when piping output to inexistent program * Ignore the `BrokenPipeError` which is caused when the `logging.Handler` tries to `emit` to a closed stream.
@@ -101,6 +101,24 @@ def set_handler_level(hdlr, level): logging.Handler.setLevel = set_handler_level +# Here we monkeypatch the `handleError` method of `logging.Handler` in +# order to ignore `BrokenPipeError` exceptions while keeping the default +# behavior for all the other types of exceptions. + +def handleError(func): + def ignore_brokenpipe(hdlr, l): + exc_type, *_ = sys.exc_info() + if exc_type == BrokenPipeError: + pass + else: + func(hdlr, l) + + return ignore_brokenpipe + + +logging.Handler.handleError = handleError(logging.Handler.handleError) + + class MultiFileHandler(logging.FileHandler): '''A file handler that allows writing on different log files based on information from the log record.
Fix missing in _CombinedRegistry.bindings. This method was never called, so not really a bug. Found thanks to the new mypy release.
@@ -1326,6 +1326,7 @@ class _CombinedRegistry(KeyBindingsBase): KeyBindings object.""" raise NotImplementedError + @property def bindings(self) -> List[Binding]: """Not needed - this object is not going to be wrapped in another KeyBindings object."""
Minor implementation improvements Use `contextlib.suppress` instead of `try/except/pass`. Check explicitly for `None` so as to allow empty strings.
@@ -13,6 +13,7 @@ __all__ = [ ] +import contextlib import functools import glob import inspect @@ -802,21 +803,15 @@ class RegressionTest(RegressionMixin, jsonext.JSONSerializable): if name is not None: self.name = name - try: - if not self.descr: - self.descr = self.name - - except AttributeError: # Pass if descr is a required variable. - pass - - try: - if not self.executable: - self.executable = os.path.join('.', self.name) + with contextlib.suppress(AttributeError): + if self.descr is None: + self.descr = self.name - except AttributeError: # Pass if the executable is a required variable. - pass + with contextlib.suppress(AttributeError): + if self.executable is None: + self.executable = os.path.join('.', self.name) self._perfvalues = {}
docker: fix and improve build_locally.sh Cotainers are now tagged with the correct version. Plain logging is now used and there is the option to pass parameters to the buildx command.
@@ -9,6 +9,6 @@ KEYLIME_DIR=${2:-"../../"} ./generate-files.sh ${VERSION} for part in base registrar verifier tenant; do - docker buildx build -t keylime_${part} -f ${part}/Dockerfile $KEYLIME_DIR + docker buildx build -t keylime_${part}:${VERSION} -f ${part}/Dockerfile $KEYLIME_DIR --progress plain ${@:3} rm -f ${part}/Dockerfile done
Change git URL in README to https Like in this URL won't work for people who do not have a GitHub SSH key, whereas a https URL should work for anyone.
@@ -57,7 +57,7 @@ or a Git URI:: mlflow run examples/sklearn_elasticnet_wine -P alpha=0.4 - mlflow run [email protected]:mlflow/mlflow-example.git -P alpha=0.4 + mlflow run https://github.com/mlflow/mlflow-example.git -P alpha=0.4 See ``examples/sklearn_elasticnet_wine`` for a sample project with an MLproject file.
(mod/docs)adding `import asyncio` might make the docs a bit clearer
@@ -82,6 +82,9 @@ if that's something you would want. Let's add this view in our `views.py` file: .. code-block:: python from api.commands import inittasks as tasklist + import asyncio + + ... class TaskView(APIView): def get(self, request):
Added new option `skip` to dependencies which can be used to suppress installation of folders under unpackaged/pre or unpackaged/post
@@ -565,6 +565,10 @@ class UpdateDependencies(BaseSalesforceMetadataApiTask): ) ) + skip = dependency.get('skip') + if not isinstance(skip, list): + skip = [skip,] + # Initialize github3.py API against repo gh = self.project_config.get_github_api() repo_owner, repo_name = dependency['github'].split('/')[3:5] @@ -587,6 +591,8 @@ class UpdateDependencies(BaseSalesforceMetadataApiTask): contents = repo.contents('unpackaged/pre') if contents: for dirname in contents.keys(): + if 'unpackaged/pre/{}'.format(dirname) in skip: + continue subfolder = "{}-{}/unpackaged/pre/{}".format(repo.name, repo.default_branch, dirname) zip_url = "{}/archive/{}.zip".format(repo.html_url, repo.default_branch) @@ -625,6 +631,8 @@ class UpdateDependencies(BaseSalesforceMetadataApiTask): contents = repo.contents('unpackaged/post') if contents: for dirname in contents.keys(): + if 'unpackaged/post/{}'.format(dirname) in skip: + continue zip_url = "{}/archive/{}.zip".format(repo.html_url, repo.default_branch) subfolder = "{}-{}/unpackaged/post/{}".format(repo.name, repo.default_branch, dirname)
core: more descriptive graceful shutdown timeout error Accounts for timers too Tidy up a wordy comment further down the file
@@ -3333,10 +3333,10 @@ class Broker(object): self._loop_once(max(0, deadline - time.time())) if self.keep_alive(): - LOG.error('%r: some streams did not close gracefully. ' - 'The most likely cause for this is one or ' - 'more child processes still connected to ' - 'our stdout/stderr pipes.', self) + LOG.error('%r: pending work still existed %d seconds after ' + 'shutdown began. This may be due to a timer that is yet ' + 'to expire, or a child connection that did not fully ' + 'shut down.', self, self.shutdown_timeout) def _do_broker_main(self): """ @@ -3511,11 +3511,11 @@ class ExternalContext(object): if not self.config['profiling']: os.kill(os.getpid(), signal.SIGTERM) - #: On Python >3.4, the global importer lock has been sharded into a - #: per-module lock, meaning there is no guarantee the import statement in - #: service_stub_main will be truly complete before a second thread - #: attempting the same import will see a partially initialized module. - #: Sigh. Therefore serialize execution of the stub itself. + #: On Python >3.4, the global importer lock has split into per-module + #: locks, so there is no guarantee the import statement in + #: service_stub_main will complete before a second thread attempting the + #: same import will see a partially initialized module. Therefore serialize + #: the stub explicitly. service_stub_lock = threading.Lock() def _service_stub_main(self, msg):
framework/instruments: add ManagedCallback __repr__ Add a __repr__ for ManagedCallback callback to prove a useful representation in logging.
@@ -280,6 +280,12 @@ class ManagedCallback(object): else: raise + def __repr__(self): + text = 'ManagedCallback({}, {})' + return text.format(self.instrument.name, self.callback.im_func.func_name) + + __str__ = __repr__ + # Need this to keep track of callbacks, because the dispatcher only keeps # weak references, so if the callbacks aren't referenced elsewhere, they will
use keyword arguments to send argument to writeGlyphToString the order was wrong...
@@ -290,12 +290,19 @@ class RGlyph(RBaseObject, BaseGlyph): def _loadFromGLIF(self, glifData): try: - readGlyphFromString(glifData, glyphObject=self.naked(), - pointPen=self.getPointPen()) + readGlyphFromString( + aString=glifData, + glyphObject=self.naked(), + pointPen=self.getPointPen() + ) except GlifLibError: raise FontPartsError("Not valid glif data") def _dumpToGLIF(self, glyphFormatVersion): glyph = self.naked() - return writeGlyphToString(glyph.name, glyph, - glyph.drawPoints, glyphFormatVersion) + return writeGlyphToString( + glyphName=glyph.name, + glyphObject=glyph, + drawPointsFunc=glyph.drawPoints, + formatVersion=glyphFormatVersion + )
[microTVM][Zephyr] Disable test_armv7m_intrinsic since it's broken add xfail
@@ -104,12 +104,12 @@ def _apply_desired_layout_no_simd(relay_mod): @tvm.testing.requires_micro @pytest.mark.skip_boards(["mps2_an521"]) [email protected](reason="due https://github.com/apache/tvm/issues/12619") def test_armv7m_intrinsic(workspace_dir, board, west_cmd, microtvm_debug): """Testing a ARM v7m SIMD extension.""" - if board not in [ "mps2_an521", - "stm32f746xx_disco", + "stm32f746g_disco", "nucleo_f746zg", "nucleo_l4r5zi", "nrf5340dk_nrf5340_cpuapp",
Reword some anti-crossing docstrings Based on review from
@@ -19,12 +19,12 @@ __all__ = ['anti_crossing_clique', 'anti_crossing_loops'] def anti_crossing_clique(num_variables: int) -> BinaryQuadraticModel: - """Generate an anti crossing problem with a single clique. + """Generate an anti-crossing problem with a single clique. - Let ``N = num_variables // 2``. This function returns a binary - quadratic model with variables `[0, N)` forming a ferromagnetic clique. - Each variable `v` in `[N, 2*N)` ferromagnetically interacts with - variable `v-N`. + Let ``N = num_variables // 2``. This function returns a binary quadratic + model where half the variables, `[0, N)`, form a ferromagnetic clique, with + each variable, `v`, also ferromagnetically interacting with one variable, + `v+N`, of the remaining half of the variables, `[N, 2*N)`. All of the variables in the clique except variable `1` have a linear bias of `+1`, and all of the variables attached to the clique have a linear bias @@ -63,9 +63,9 @@ def anti_crossing_clique(num_variables: int) -> BinaryQuadraticModel: def anti_crossing_loops(num_variables: int) -> BinaryQuadraticModel: - """Generate an anti crossing problem with two loops. + """Generate an anti-crossing problem with two loops. - These instances are copies of the instance studied in [DJA]_. + This is the problem studied in [DJA]_. Note that for small values of ``num_variables``, the loops can be as small as a single edge.
Increase the size limit for GROUP_CONCAT. We were hitting this for zh_hans and zh_hant.
@@ -164,6 +164,7 @@ def install_scratch_db(): # generate a sql query that will atomically swap tables in # 'citationhunt' and 'scratch'. Modified from: # http://blog.shlomoid.com/2010/02/emulating-missing-rename-database.html + cursor.execute('''SET group_concat_max_len = 2048;''') cursor.execute(''' SELECT CONCAT('RENAME TABLE ', GROUP_CONCAT('%s.', table_name,
Increase test runs and reduce test run success count threshold. Tested-by: Ellis Breen Tested-by: Build Bot
@@ -45,7 +45,7 @@ class TouchTest(ConnectionTestCase): self.assertFalse(rv.success) self.assertTrue(E.NotFoundError._can_derive(rv.rc)) - @flaky(5,2) + @flaky(20,1) def test_trivial_multi_touch(self): kv = self.gen_kv_dict(prefix="trivial_multi_touch") self.cb.upsert_multi(kv, ttl=1)
tests: operation: archive: Test for preseve directory structure Fixes:
+import os +import pathlib +import tempfile from unittest.mock import patch, mock_open from dffml import run @@ -91,3 +94,56 @@ class TestTarOperations(AsyncTestCase): ), patch("tarfile.TarInfo.fromtarfile", m_open): async for _, _ in run(dataflow): m_open.assert_any_call("test/path/to/tar_file.tar", "rb") + + +class TestArchiveCreation(AsyncTestCase): + async def preseve_directory_structure(self, extension, make, extract): + # Temporary directory to work in + with tempfile.TemporaryDirectory() as tempdir: + # Variables for inputs and outputs + output_file_path = pathlib.Path( + tempdir, f"output_file.{extension}" + ) + output_directory_path = pathlib.Path(tempdir, "output_directory") + input_directory_path = pathlib.Path(tempdir, "input_directory") + input_directory_path.joinpath( + "top_level_dir", "child_dir_1" + ).mkdir(parents=True) + input_directory_path.joinpath( + "top_level_dir", "child_dir_1", "file1" + ).write_text("") + # Create our directory tree + await make( + input_directory_path, output_file_path, + ) + # Create our directory tree + await extract( + output_file_path, output_directory_path, + ) + # Test that the directory structure in the created tar file are the same + # as the input directory. + self.assertEqual( + sorted( + [ + "top_level_dir", + os.path.join("top_level_dir", "child_dir_1"), + os.path.join("top_level_dir", "child_dir_1", "file1"), + ] + ), + sorted( + [ + str(path.relative_to(output_directory_path)) + for path in output_directory_path.rglob("*") + ] + ), + ) + + async def test_preseve_directory_structure_tar(self): + await self.preseve_directory_structure( + "tar", make_tar_archive, extract_tar_archive + ) + + async def test_preseve_directory_structure_zip(self): + await self.preseve_directory_structure( + "zip", make_zip_archive, extract_zip_archive + )
[luhn] bump to 1.7.0 * [luhn] bump to 1.7.0 Bump `luhn` to latest version of the canonical data. * bump CI
@@ -2,7 +2,7 @@ import unittest from luhn import Luhn -# Tests adapted from `problem-specifications//canonical-data.json` @ v1.6.1 +# Tests adapted from `problem-specifications//canonical-data.json` @ v1.7.0 class LuhnTest(unittest.TestCase): @@ -27,6 +27,9 @@ class LuhnTest(unittest.TestCase): def test_invalid_credit_card(self): self.assertIs(Luhn("8273 1232 7352 0569").valid(), False) + def test_invalid_long_number_with_an_even_remainder(self): + self.assertIs(Luhn("1 2345 6789 1234 5678 9012").valid(), False) + def test_valid_number_with_an_even_number_of_digits(self): self.assertIs(Luhn("095 245 88").valid(), True)