message
stringlengths
13
484
diff
stringlengths
38
4.63k
Make `is_authenticated` a property access rather than a function call. This is a change in Django that was still functional for compatibility reasons until recently, but ultimately should be an attribute.
@@ -12,7 +12,7 @@ class AddToBU(MiddlewareMixin): def process_view(self, request, view_func, view_args, view_kwargs): if hasattr(settings, 'ADD_TO_ALL_BUSINESS_UNITS'): - if request.user.is_authenticated(): + if request.user.is_authenticated: if settings.ADD_TO_ALL_BUSINESS_UNITS \ and request.user.userprofile.level != 'GA': for business_unit in BusinessUnit.objects.all():
Render stamp for tiles. PURPOSE After merging following error occur in non-Win OS: File "/Users/user/Library/Application Support/Blender/2.80/scripts/addons/rprblender/utils/render_stamp.py", line 47, in <module> class BitmapInfoHeader(ctypes.Structure): NameError: name 'ctypes' is not defined EFFECT OF CHANGE Fix plugin running for non-Windows OS. Due to render stamp
@@ -6,13 +6,14 @@ This version uses Windows API so it's compatible only with Windows operation sys import platform +from . import IS_WIN from . import logging log = logging.Log(tag="render_stamp") # WinAPI text rendering doesn't work on Ubuntu and MacOS, use empty placeholder -if platform.system() == 'Windows': +if IS_WIN: # Windows specific imports and constants import numpy as np @@ -124,12 +125,6 @@ class Win32GdiBitmap: ctypes.windll.gdi32.DeleteDC(self.bitmap) -if platform.system() != 'Windows': - def render(text, image_width, image_height): - """ Unable to render """ - log.debug(f"Render stamp for operation system '{platform.system()}' is not implemented yet") - raise NotImplementedError() -else: def render(text, image_width, image_height): """ Render stamp text as bitmap using Windows GDI32 API, return pixels and actual stamp image size @@ -181,4 +176,8 @@ else: return black_white_rect, width, height - +else: + def render(text, image_width, image_height): + """ Unable to render """ + log.debug(f"Render stamp for operation system '{platform.system()}' is not implemented yet") + raise NotImplementedError()
Update protocols_passing_authentication_in_cleartext.yml Added a few more changes to make it more clear. Thank you for updating!
@@ -6,10 +6,10 @@ author: Rico Valdez, Splunk type: TTP datamodel: - Network_Traffic -description: This search looks for cleartext protocols at risk of leaking credentials. - Currently, this consists of legacy protocols such as telnet, POP3, IMAP, and non-anonymous - FTP sessions. While some of these protocols can be used over SSL, they typically - run on different assigned ports in those cases. +description: The following analytic identifies cleartext protocols at risk of leaking sensitive information. + Currently, this consists of legacy protocols such as telnet (port 23), POP3 (port 110), IMAP (port 143), and non-anonymous + FTP (port 21) sessions. While some of these protocols may be used over SSL, they typically + are found on different assigned ports in those instances. search: '| tstats `security_content_summariesonly` count min(_time) as firstTime max(_time) as lastTime from datamodel=Network_Traffic where All_Traffic.action!=blocked AND All_Traffic.transport="tcp" AND (All_Traffic.dest_port="23" OR All_Traffic.dest_port="143" OR All_Traffic.dest_port="110" @@ -23,7 +23,9 @@ how_to_implement: This search requires you to be ingesting your network traffic, known_false_positives: Some networks may use kerberized FTP or telnet servers, however, this is rare. -references: [] +references: + - https://www.rackaid.com/blog/secure-your-email-and-file-transfers/ + - https://www.infosecmatter.com/capture-passwords-using-wireshark/ tags: analytic_story: - Use of Cleartext Protocols @@ -50,4 +52,5 @@ tags: - All_Traffic.user - All_Traffic.src - All_Traffic.dest + - All_Traffic.action security_domain: network
update test_linker.py to handle a temp file using try/finally This is based on the recommendation from
@@ -46,10 +46,11 @@ class LinkerTest(unittest.TestCase): manifest = _mock_manifest('ABC') (fd, fname) = tempfile.mkstemp() + os.close(fd) + try: self.linker.write_graph(fname, manifest) - new_linker = linker.from_file(fname) - os.close(fd) + finally: os.unlink(fname) actual_nodes = new_linker.nodes()
Update Dockerfile Performing Browser Update that's erroring. `npx browserslist@latest --update-db`
@@ -12,6 +12,7 @@ FROM build-stage as core-ui COPY frontend/package.json . COPY frontend/package-lock.json . RUN npm install +RUN npx browserslist@latest --update-db RUN npm install @vue/cli COPY frontend/ . RUN npm run build
Fix forceBGZ behavior fixes
@@ -491,13 +491,13 @@ class HailContext private(val sc: SparkContext, if (forceBGZ) hadoopConf.set("io.compression.codecs", codecs.replaceAllLiterally("org.apache.hadoop.io.compress.GzipCodec", "is.hail.io.compress.BGzipCodecGZ")) - + try { val reader = new HtsjdkRecordReader(callFields) - val vkds = LoadVCF(this, reader, headerFile, inputs, nPartitions, dropSamples, gr, contigRecoding.getOrElse(Map.empty[String, String])) - + LoadVCF(this, reader, headerFile, inputs, nPartitions, dropSamples, gr, + contigRecoding.getOrElse(Map.empty[String, String])) + } finally { hadoopConf.set("io.compression.codecs", codecs) - - vkds + } } def importMatrix(file: String,
Fix use_gpu in experiment API Also removes use_tf, since it is now deprecated. Fixes
@@ -221,8 +221,7 @@ def run_experiment(method_call=None, dry=False, env=None, variant=None, - use_tf=False, - use_gpu=False, + force_cpu=False, pre_commands=None, **kwargs): """Serialize the method call and run the experiment using the @@ -240,11 +239,8 @@ def run_experiment(method_call=None, commands without executing them. env (dict): Extra environment variables. variant (dict): If provided, should be a dictionary of parameters. - use_tf (bool): Used along with the Theano and GPU configuration - when using TensorFlow - use_gpu (bool): Whether the launched task is running on GPU. - This triggers a few configuration changes including certain - environment flags. + force_cpu (bool): Whether to set all GPU devices invisible + to force use CPU. pre_commands (str): Pre commands to run the experiment. """ @@ -272,11 +268,8 @@ def run_experiment(method_call=None, global exp_count - if use_tf: - if not use_gpu: - os.environ['CUDA_VISIBLE_DEVICES'] = '' - else: - os.unsetenv('CUDA_VISIBLE_DEVICES') + if force_cpu: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' for task in batch_tasks: call = task.pop('method_call') @@ -304,8 +297,7 @@ def run_experiment(method_call=None, elif 'variant' in task: del task['variant'] task['env'] = task.get('env', dict()) or dict() - task['env']['GARAGE_USE_GPU'] = str(use_gpu) - task['env']['GARAGE_USE_TF'] = str(use_tf) + task['env']['GARAGE_FORCE_CPU'] = str(force_cpu) for task in batch_tasks: env = task.pop('env', None)
Minor fix to fbprophet Use .loc as suggested in the warning
@@ -187,7 +187,7 @@ def _merge_X(df, X): X.columns = X.columns.astype(str) if "ds" in X.columns: raise ValueError("Column name 'ds' is reserved in fbprophet") - X["ds"] = X.index + X.loc[:, "ds"] = X.index # df = df.merge(X, how="inner", on="ds", copy=False) df = df.merge(X, how="inner", on="ds") return df, X.drop(columns="ds")
Split ignored_tags in stats.py As a follow up for this patch split the ignored_tags to two list, ignored_pool_tags and ignored_spec_tags as the two tag list are different.
@@ -68,11 +68,15 @@ class PciDeviceStats(object): # the PCI alias, but they are matched by the placement # allocation_candidates query, so we can ignore them during pool creation # and during filtering here - ignored_tags = ['resource_class', 'traits'] - # these are metadata keys in the pool and in the request that are matched + ignored_spec_tags = ignored_pool_tags = ['resource_class', 'traits'] + # this is a metadata key in the spec that is matched # specially in _filter_pools_based_on_placement_allocation. So we can # ignore them in the general matching logic. - ignored_tags += ['rp_uuid', 'rp_uuids'] + ignored_spec_tags += ['rp_uuids'] + # this is a metadata key in the pool that is matched + # specially in _filter_pools_based_on_placement_allocation. So we can + # ignore them in the general matching logic. + ignored_pool_tags += ['rp_uuid'] def __init__( self, @@ -146,7 +150,11 @@ class PciDeviceStats(object): if tags: pool.update( - {k: v for k, v in tags.items() if k not in self.ignored_tags} + { + k: v + for k, v in tags.items() + if k not in self.ignored_pool_tags + } ) # NOTE(gibi): since PCI in placement maps a PCI dev or a PF to a # single RP and the scheduler allocates from a specific RP we need @@ -358,7 +366,9 @@ class PciDeviceStats(object): def ignore_keys(spec): return { - k: v for k, v in spec.items() if k not in self.ignored_tags + k: v + for k, v in spec.items() + if k not in self.ignored_spec_tags } request_specs = [ignore_keys(spec) for spec in request.spec]
Disabling voltha-net encryption because it was causing latency issues. This needs to be better charactreized to ensure that encryption latencies are acceptable to HA requirements.
voltha_base_dir="/cord/incubator/voltha" hostName=`hostname` -docker network create --driver overlay --subnet=172.29.19.0/24 --opt encrypted=true voltha_net +docker network create --driver overlay --subnet=172.29.19.0/24 voltha_net +#docker network create --driver overlay --subnet=172.29.19.0/24 --opt encrypted=true voltha_net docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-kafka-cluster.yml kafka docker stack deploy -c ${voltha_base_dir}/compose/docker-compose-consul-cluster.yml consul echo "Waiting for consul to start"
test(changelog): fixes logic issue made evident by latest fix(git) commit Exception is raised by git error: "fatal: your current branch 'master' does not have any commits yet". This response is more informative than the original "No commits found" exception and the "fail fast" logic is a bit easier to follow.
@@ -6,6 +6,7 @@ from commitizen import cli, git from commitizen.commands.changelog import Changelog from commitizen.exceptions import ( DryRunExit, + GitCommandError, NoCommitsFoundError, NoRevisionError, NotAGitProjectError, @@ -19,10 +20,12 @@ def test_changelog_on_empty_project(mocker): testargs = ["cz", "changelog", "--dry-run"] mocker.patch.object(sys, "argv", testargs) - with pytest.raises(NoCommitsFoundError) as excinfo: + with pytest.raises(GitCommandError): cli.main() - assert "No commits found" in str(excinfo) + # git will error out with something like: + # "your current branch 'XYZ' does not have any commits yet" + # is it wise to assert the exception contains a message like this? @pytest.mark.usefixtures("tmp_commitizen_project")
run rst2pseudoxml.py with shell=true makes rst2pseudoxml.py work properly on Windows executes via a shell instead of not working
@@ -34,7 +34,8 @@ def test_rst_file_syntax(filename): p = subprocess.Popen( ['rst2pseudoxml.py', '--report=1', '--exit-status=1', filename], stderr=subprocess.PIPE, - stdout=subprocess.PIPE + stdout=subprocess.PIPE, + shell=True ) err = p.communicate()[1] assert p.returncode == 0, err.decode('utf8')
Add warning message for links In case a bidirectional link is defined with an efficiency value lower than 1, the logger will prompt a warning hinting the user of a (maybe) unintended behavior. If there are any links with such specification the respective links are listed.
@@ -995,6 +995,14 @@ def define_nodal_balances(network,snapshots): efficiency = get_switchable_as_dense(network, 'Link', 'efficiency', snapshots) + filter = ( + (get_switchable_as_dense(network, 'Link', 'p_min_pu', snapshots) < 0) & + (efficiency < 1) + ) + links = filter[filter].dropna(how='all', axis=1) + if links.size > 0: + logger.warning("Some bidirectional links have efficiency values lower than 1: '" + "', '".join(links) + "'.") + for cb in network.links.index: bus0 = network.links.at[cb,"bus0"] bus1 = network.links.at[cb,"bus1"]
fix disconnected lso deployment create (redhat-operators) catalog source before configuring LSO
@@ -449,6 +449,9 @@ class Deployment(object): logger.info("Deployment of OCS via OCS operator") self.label_and_taint_nodes() + if not live_deployment: + create_catalog_source(image) + if config.DEPLOYMENT.get("local_storage"): setup_local_storage(storageclass=self.DEFAULT_STORAGECLASS_LSO) @@ -476,8 +479,6 @@ class Deployment(object): ibmcloud.add_deployment_dependencies() if not live_deployment: create_ocs_secret(self.namespace) - if not live_deployment: - create_catalog_source(image) self.subscribe_ocs() operator_selector = get_selector_for_ocs_operator() subscription_plan_approval = config.DEPLOYMENT.get("subscription_plan_approval")
[bugfix, tests] Fix AppVeyor Python version you were right, I don't know why there is 3.5.0 missing
@@ -21,8 +21,8 @@ environment: PYTHON_VERSION: "2.7.4" PYTHON_ARCH: "64" - - PYTHON: "C:\\Python350-x64" - PYTHON_VERSION: "3.5.0" + - PYTHON: "C:\\Python351-x64" + PYTHON_VERSION: "3.5.1" PYTHON_ARCH: "64" # Appveyor pre-installs these versions onto build machines
Enable stale issue github action After several dry run outputs, it's performing the expected actions so we can enable this workflow.
@@ -44,4 +44,4 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} loglevel: DEBUG # Set dry-run to true to not perform label or close actions. - dry-run: true + dry-run: false
Update thehive.py Do not add empty observables.
@@ -35,6 +35,7 @@ class HiveAlerter(Alerter): for mapping in self.rule.get('hive_observable_data_mapping', []): for observable_type, mapping_key in mapping.items(): data = self.lookup_field(match, mapping_key, '') + if len(data) != 0: artifact = {'tlp': 2, 'tags': [], 'message': None,
Fixed CortexM_CC3220SF usage of dp attribute. CortexM objects no longer have a 'dp' attribute.
@@ -147,6 +147,6 @@ class CortexM_CC3220SF(CortexM): try: self.writeMemory(CortexM.NVIC_AIRCR, CortexM.NVIC_AIRCR_VECTKEY | CortexM.NVIC_AIRCR_VECTRESET) # Without a flush a transfer error can occur - self.dp.flush() + self.flush() except exceptions.TransferError: - self.dp.flush() + self.flush()
Update inputoutput.py Fix long lines
@@ -755,11 +755,11 @@ def block_to_graphviz_string(block=None, namer=_graphviz_default_namer, split_st function that can subsequently be passed to 'output_to_graphviz' or 'block_to_graphviz_string'. :: - node_fanout = {n: "Fanout: %d" % my_fanout_func(n) for n in pyrtl.working_block().logic} - wire_delay = {w: "Delay: %.2f" % my_delay_func(w) for w in pyrtl.working_block().wirevector_set} + node_fanout = {n: "Fanout: %d" % my_fanout_func(n) for n in working_block().logic} + wire_delay = {w: "Delay: %.2f" % my_delay_func(w) for w in working_block().wirevector_set} with open("out.gv", "w") as f: - pyrtl.output_to_graphviz(f, namer=pyrtl.graphviz_detailed_namer(node_fanout, wire_delay)) + output_to_graphviz(f, namer=graphviz_detailed_namer(node_fanout, wire_delay)) """ graph = net_graph(block, split_state) node_index_map = {} # map node -> index
NOC.inv.map.Maintainance use remote search HG-- branch : feature/microservices
@@ -175,43 +175,25 @@ Ext.define('NOC.inv.map.Maintainance', { loadData: function() { var me = this, - filter = function(element) { - if(this.filter.length === 0 && this.isCompleted) { - return !element.is_completed; - } - if(this.filter.length === 0 && !this.isCompleted) { - return true; - } - if(this.filter.length > 0 && this.isCompleted && element.subject.indexOf(this.filter) > -1) { - return !element.is_completed; - } - return this.filter.length > 0 && !this.isCompleted && element.subject.indexOf(this.filter) > -1; - - }, filterValue = me.dockMaintainance.down('#filterField') ? me.dockMaintainance.down('#filterField').getValue() : '', isCompletedValue = me.dockMaintainance.down('#isCompleted') ? me.dockMaintainance.down('#isCompleted').getValue() : true; - // ToDo uncomment after create backend request - // var params = {}; - // if(filterValue.length > 0) { - // params = Ext.apply(params, {query: filterValue}); - // } - // if(isCompletedValue) { - // params = Ext.apply(params, {completed: 1}) - // } + var params = {}; + if(filterValue.length > 0) { + params = Ext.apply(params, {__query: filterValue}); + } + if(isCompletedValue) { + params = Ext.apply(params, {is_completed: 1}) + } Ext.Ajax.request({ url: me.rest_url, method: "GET", - // params: params, + params: params, success: function(response) { var data = Ext.decode(response.responseText); - var filtred = data.filter(filter, { - filter: filterValue, - isCompleted: isCompletedValue - }); - me.store.loadData(filtred); - me.dockMaintainance.down('#status').update({count: filtred.length}); + me.store.loadData(data); + me.dockMaintainance.down('#status').update({count: data.length}); }, failure: function() { NOC.msg.failed(__("Failed to load data"))
Fix tense problems Some of the first paragraph was in the past tense and some was in the present. Now everything is in the present.
**Why do we need asynchronous programming?** -Imagine that you're coding a Discord bot and every time somebody uses a command, you need to get some information from a database. But there's a catch: the database servers are acting up today and take a whole 10 seconds to respond. If you did **not** use asynchronous methods, your whole bot will stop running until it gets a response from the database. How do you fix this? Asynchronous programming. +Imagine that you're coding a Discord bot and every time somebody uses a command, you need to get some information from a database. But there's a catch: the database servers are acting up today and take a whole 10 seconds to respond. If you do **not** use asynchronous methods, your whole bot will stop running until it gets a response from the database. How do you fix this? Asynchronous programming. **What is asynchronous programming?**
Update executor.py fixed spelling of registered
@@ -48,7 +48,7 @@ def GetExecutorParams(model_name, cluster_params, model_registry): """Get the params needed to instantiate the Executor. Args: - model_name: A model name regsitered in the ModelRegistry. + model_name: A model name registered in the ModelRegistry. cluster_params: A cluster hyperparams object. model_registry: A ModelRegistry object.
Update _mpca.py isort all (imports)
@@ -14,7 +14,7 @@ Reference: import numpy as np # import tensorly as tl -from tensorly.base import unfold, fold +from tensorly.base import fold, unfold from tensorly.tenalg import multi_mode_dot from sklearn.base import BaseEstimator, TransformerMixin
Fix yt music artist matching also run black formatting
@@ -218,10 +218,11 @@ def order_results( # (slug_result_name if result["type"] != "song" else slug_result_artists)) artist_match_number += ( 1 - if slugify(artist).replace("-", "") - in ( - slug_result_name if result["type"] != "song" else - slug_result_artists).replace("-", "") + if slugify(artist) + in [ + slug_result_name, + slug_result_artists, + ] else 0 )
switch to sa.profile.LookupField HG-- branch : feature/moversion
@@ -10,7 +10,7 @@ Ext.define('NOC.core.filter.Filter', { requires: [ 'Ext.ux.form.SearchField', - 'NOC.main.ref.profile.LookupField', + 'NOC.sa.profile.LookupField', 'NOC.main.pool.LookupField', 'NOC.sa.vendor.LookupField', 'NOC.sa.platform.LookupField', @@ -57,7 +57,7 @@ Ext.define('NOC.core.filter.Filter', { } }, { - xtype: 'main.ref.profile.LookupField', + xtype: 'sa.profile.LookupField', itemId: 'profile_name', // name of http request query param fieldLabel: __('By SA Profile:'), uiStyle: undefined,
Add GnocchiStorageS3BucketPrefix into deployment Closes-Bug: Depends-On:
@@ -93,6 +93,10 @@ parameters: description: S3 storage access key secret. type: string hidden: true + GnocchiStorageS3BucketPrefix: + default: '' + description: S3 storage bucket prefix. + type: string GnocchiFileBasePath: default: '/var/lib/gnocchi' description: Path to use when file driver is used. This could be NFS or a @@ -160,6 +164,7 @@ outputs: gnocchi::storage::s3::s3_region_name: {get_param: GnocchiStorageS3RegionName} gnocchi::storage::s3::s3_access_key_id: {get_param: GnocchiStorageS3AccessKeyId} gnocchi::storage::s3::s3_secret_access_key: {get_param: GnocchiStorageS3AccessSecret} + gnocchi::storage::s3::s3_bucket_prefix: {get_param: GnocchiStorageS3BucketPrefix} #Gnocchi statsd gnocchi::statsd::resource_id: '0a8b55df-f90f-491c-8cb9-7cdecec6fc26' gnocchi::statsd::user_id: '27c0d3f8-e7ee-42f0-8317-72237d1c5ae3'
Set GUI binary name to chia-blockchain in the Fedora rpm set gui binary name to chia-blockchain (rpm)
@@ -83,9 +83,11 @@ if [ "$REDHAT_PLATFORM" = "arm64" ]; then fi PRODUCT_NAME="chia" echo electron-builder build --linux rpm "${OPT_ARCH}" \ + --config.extraMetadata.name=chia-blockchain \ --config.productName="${PRODUCT_NAME}" --config.linux.desktop.Name="Chia Blockchain" \ --config.rpm.packageName="chia-blockchain" electron-builder build --linux rpm "${OPT_ARCH}" \ + --config.extraMetadata.name=chia-blockchain \ --config.productName="${PRODUCT_NAME}" --config.linux.desktop.Name="Chia Blockchain" \ --config.rpm.packageName="chia-blockchain" LAST_EXIT_CODE=$?
ci: add comment per job execution On every force-push we lose the CI information from previous executions this commit includes a comment once each job finish.
@@ -142,8 +142,8 @@ def main(cluster_type, job_type): else: state = "failure" - desc = ("Ended with %s in %s minutes" % (state, - round((time.time() - start_time) / 60, 2))) + dur_mins = str(round((time.time() - start_time) / 60, 2)) + desc = ("Ended with %s in %s minutes" % (state, dur_mins)) dest_url = 'https://storage.googleapis.com/kubeinit-ci/jobs/' + str(job_name) + "-" + str(output) + '/index.html' print("'launch_e2e.py' ==> The destination URL is: " + dest_url) @@ -158,6 +158,16 @@ def main(cluster_type, job_type): hypervisors, services, launch_from)) + pr.create_issue_comment(body="%s-%s-%s-%s-%s-%s-%s-%s-%s-%s" % (distro, + driver, + masters, + workers, + hypervisors, + services, + launch_from, + state, + str(start_time), + dur_mins)) if output == 0: exit() @@ -217,6 +227,7 @@ def main(cluster_type, job_type): branch_name = 'main' pr_number = 'latest' timestamp = 'weekly' + start_time = time.time() job_name = (distro + "-" + driver + "-" + masters + "-" + @@ -247,6 +258,23 @@ def main(cluster_type, job_type): launch_from, job_name) + if output == 0: + state = "success" + else: + state = "failure" + dur_mins = str(round((time.time() - start_time) / 60, 2)) + issue = repo.issue(489) + issue.create_comment(body="%s-%s-%s-%s-%s-%s-%s-%s-%s-%s" % (distro, + driver, + masters, + workers, + hypervisors, + services, + launch_from, + state, + str(start_time), + dur_mins)) + # # KubeInit's submariner PR check # @@ -328,8 +356,9 @@ def main(cluster_type, job_type): else: state = "failure" - desc = ("Ended with %s in %s minutes" % (state, - round((time.time() - start_time) / 60, 2))) + dur_mins = str(round((time.time() - start_time) / 60, 2)) + desc = ("Ended with %s in %s minutes" % (state, dur_mins)) + dest_url = 'https://storage.googleapis.com/kubeinit-ci/jobs/' + str(job_name) + "-" + str(output) + '/index.html' print("'launch_e2e.py' ==> The destination URL is: " + dest_url) @@ -344,6 +373,16 @@ def main(cluster_type, job_type): hypervisors, services, launch_from)) + pr.create_issue_comment(body="%s-%s-%s-%s-%s-%s-%s-%s-%s-%s" % (distro, + driver, + masters, + workers, + hypervisors, + services, + launch_from, + state, + str(start_time), + dur_mins)) if output == 0: exit() else:
feat(DQN): DQN now inherits from IncrementalAgent See
@@ -2,14 +2,14 @@ from abc import ABC, abstractmethod from gym import spaces import logging -from rlberry.agents import Agent +from rlberry.agents import IncrementalAgent from rlberry.agents.dqn.exploration import exploration_factory from rlberry.agents.utils.memories import ReplayMemory, Transition logger = logging.getLogger(__name__) -class AbstractDQNAgent(Agent, ABC): +class AbstractDQNAgent(IncrementalAgent, ABC): def __init__(self, env, horizon, @@ -21,7 +21,7 @@ class AbstractDQNAgent(Agent, ABC): double=True, **kwargs): ABC.__init__(self) - Agent.__init__(self, env, **kwargs) + IncrementalAgent.__init__(self, env, **kwargs) self.horizon = horizon self.exploration_kwargs = exploration_kwargs or {} self.memory_kwargs = memory_kwargs or {} @@ -42,32 +42,37 @@ class AbstractDQNAgent(Agent, ABC): self.writer = None def fit(self, **kwargs): + return self.partial_fit(fraction=1, **kwargs) + + def partial_fit(self, fraction, **kwargs): episode_rewards = [] - for episode in range(self.n_episodes): + for episode in range(int(fraction * self.n_episodes)): logger.debug(f"episode {episode+1}/{self.n_episodes}") - done, total_reward = False, 0 - state = self.env.reset() - ep_time = 0 - while not done and ep_time < self.horizon: - self.exploration_policy.step_time() - action = self.policy(state) - next_state, reward, done, info = self.env.step(action) - self.record(state, action, reward, next_state, done, info) - state = next_state - total_reward += reward - # - ep_time += 1 + total_reward = self._run_episode() if self.writer: self.writer.add_scalar("fit/total_reward", total_reward, episode) episode_rewards.append(total_reward) - return { - "n_episodes": self.n_episodes, + "n_episodes": int(fraction * self.n_episodes), "episode_rewards": episode_rewards } + def _run_episode(self): + total_reward = 0 + state = self.env.reset() + for _ in range(self.horizon): + self.exploration_policy.step_time() + action = self.policy(state) + next_state, reward, done, info = self.env.step(action) + self.record(state, action, reward, next_state, done, info) + state = next_state + total_reward += reward + if done: + break + return total_reward + def record(self, state, action, reward, next_state, done, info): """ Record a transition by performing a Deep Q-Network iteration
Relax external update revision checking. Allow for a revision update from the same revision as the last tested.
@@ -55,7 +55,7 @@ def handle_update(testcase, revision, stacktrace, error): last_tested_revision = ( testcase.get_metadata('last_tested_revision') or testcase.crash_revision) - if revision <= last_tested_revision: + if revision < last_tested_revision: logs.log_warn(f'Revision {revision} less than previously tested ' f'revision {last_tested_revision}.') return
Clean-up StudioML dependencies and remove version locks where possible.
-pip==20.0.2 +pip setuptools_scm setuptools_scm_git_archive @@ -6,42 +6,36 @@ setuptools_scm_git_archive configparser numpy -h5py -pillow flask -jinja2 cma apscheduler pycryptodome -PyNaCl==1.3.0 +PyNaCl requests requests_toolbelt python_jwt sseclient -timeout_decorator terminaltables -PyYAML==5.3.1 +PyYAML google-api-python-client google-cloud-storage <= 1.5.0 google-cloud-pubsub <= 0.28.3 google-gax <= 0.15.14 google-auth-httplib2 - oauth2client==3.0.0 -boto3==1.12.37 -rsa==3.4.2 + +boto3 +rsa filelock pygtail -pika == 1.1.0 - -cachetools == 2.0.1 +pika -tensorflow==1.15.2 +tensorflow
regen migration: use Panda safety parameters no magic numbers
@@ -15,6 +15,8 @@ from cereal.visionipc import VisionIpcServer, VisionStreamType from common.params import Params from common.realtime import Ratekeeper, DT_MDL, DT_DMON, sec_since_boot from common.transformations.camera import eon_f_frame_size, eon_d_frame_size, tici_f_frame_size, tici_d_frame_size +from panda.python import Panda +from selfdrive.car.toyota.values import EPS_SCALE from selfdrive.manager.process import ensure_running from selfdrive.manager.process_config import managed_processes from selfdrive.test.process_replay.process_replay import FAKEDATA, setup_env, check_enabled @@ -30,8 +32,8 @@ def replay_panda_states(s, msgs): # TODO: new safety params from flags, remove after getting new routes for Toyota safety_param_migration = { - "TOYOTA PRIUS 2017": 578, - "TOYOTA RAV4 2017": 329 + "TOYOTA PRIUS 2017": EPS_SCALE["TOYOTA PRIUS 2017"] | Panda.FLAG_TOYOTA_STOCK_LONGITUDINAL, + "TOYOTA RAV4 2017": EPS_SCALE["TOYOTA RAV4 2017"] | Panda.FLAG_TOYOTA_ALT_BRAKE, } # Migrate safety param base on carState
Simplify code by removing exec() Supercedes
@@ -245,18 +245,7 @@ class IOLoop(object): self._poller.poll() -# Define a base class for deriving abstract base classes for compatibility -# between python 2 and 3 (metaclass syntax changed in Python 3). Ideally, would -# use `@six.add_metaclass` or `six.with_metaclass`, but pika traditionally has -# resisted external dependencies in its core code. -if pika.compat.PY2: - class _AbstractBase(object): # pylint: disable=R0903 - """PY2 Abstract base for _PollerBase class""" - __metaclass__ = abc.ABCMeta -else: - # NOTE: Wrapping in exec, because - # `class _AbstractBase(metaclass=abc.ABCMeta)` fails to load on python 2. - exec('class _AbstractBase(metaclass=abc.ABCMeta): pass') # pylint: disable=W0122 +_AbstractBase = abc.ABCMeta('_AbstractBase', (object,), {}) class _PollerBase(_AbstractBase): # pylint: disable=R0902
One line change to readme Made Venv segment separate from windows installation
@@ -77,6 +77,8 @@ Not sure. Installing the Python package plyvel seems to require C++ compiler sup ------------------- +### Virtual Environment + Now navigate into the project, make a Python 3 virtual environment and activate it via
Update fhmm_exact.py Trying to solve
@@ -491,7 +491,7 @@ class FHMM(Disaggregator): # Copy mains data to disag output output_datastore.append(key=mains_data_location, - value=pd.DataFrame(chunk, columns=cols)) + value=pd.DataFrame(chunk, columns=cols, dtype='float32')) if data_is_available: self._save_metadata_for_disaggregation(
ceph-iscsi: add ceph-iscsi stable repositories This commit adds the support of the ceph-iscsi stable repository when use ceph_repository community instead of always using the devel repositories. We're still using the devel repositories for rtslib and tcmu-runner in both cases (dev and community).
- ceph-iscsi-config when: not use_new_ceph_iscsi | bool - - name: set_fact ceph_iscsi_repos - set_fact: - ceph_iscsi_repos: "{{ common_repos + iscsi_base }}" - - - name: set_fact ceph_iscsi_pkgs - set_fact: - ceph_iscsi_pkgs: "{{ common_pkgs + iscsi_base }}" - - name: when ceph_iscsi_config_dev is true when: - ceph_origin == 'repository' - - ceph_repository == 'dev' + - ceph_repository in ['dev', 'community'] - ceph_iscsi_config_dev | bool block: - - name: fetch ceph-iscsi development repository - uri: - url: https://shaman.ceph.com/api/repos/{{ item }}/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/repo - return_content: yes - register: ceph_iscsi_config_dev_yum_repo - with_items: "{{ ceph_iscsi_repos }}" + - name: ceph-iscsi dependency repositories + get_url: + url: 'https://shaman.ceph.com/api/repos/{{ item }}/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/repo' + dest: '/etc/yum.repos.d/{{ item }}-dev.repo' + force: true + with_items: "{{ common_repos }}" + + - name: ceph-iscsi development repository + get_url: + url: 'https://shaman.ceph.com/api/repos/{{ item }}/{{ ceph_dev_branch }}/{{ ceph_dev_sha1 }}/{{ ansible_distribution | lower }}/{{ ansible_distribution_major_version }}/repo' + dest: '/etc/yum.repos.d/{{ item }}-dev.repo' + force: true + with_items: '{{ iscsi_base }}' + when: ceph_repository == 'dev' - - name: configure ceph-iscsi-config development repository - copy: - content: "{{ item.0.content }}" - dest: "/etc/yum.repos.d/{{ item.1 }}-dev.repo" - owner: root - group: root - backup: yes - with_together: - - "{{ ceph_iscsi_config_dev_yum_repo.results }}" - - "{{ ceph_iscsi_repos }}" + - name: ceph-iscsi stable repository + get_url: + url: 'https://download.ceph.com/ceph-iscsi/{{ "3" if use_new_ceph_iscsi | bool else "2" }}/rpm/el{{ ansible_distribution_major_version }}/ceph-iscsi.repo' + dest: /etc/yum.repos.d/ceph-iscsi.repo + force: true + when: ceph_repository == 'community' - name: install ceph iscsi package package: - name: "{{ item }}" + name: "{{ common_pkgs + iscsi_base }}" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" register: result until: result is succeeded - with_items: "{{ ceph_iscsi_pkgs }}" - name: check the status of the target.service override stat:
Fees should always be in XCH mojos units Fees are always XCH mojos
@@ -219,7 +219,7 @@ async def send(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> print(f"Wallet id: {wallet_id} not found.") return - final_fee = uint64(int(fee * mojo_per_unit)) + final_fee: uint64 = uint64(int(fee * units["chia"])) # fees are always in XCH mojos final_amount: uint64 = uint64(int(amount * mojo_per_unit)) final_min_coin_amount: uint64 = uint64(int(min_coin_amount * mojo_per_unit)) final_max_coin_amount: uint64 = uint64(int(max_coin_amount * mojo_per_unit))
Rename helper function to clarify purpose modified: pypeit/spectrographs/ldt_deveny.py
@@ -61,8 +61,8 @@ class LDTDeVenySpectrograph(spectrograph.Spectrograph): binning = self.get_meta_value(self.get_headarr(hdu), 'binning') gain = np.atleast_1d(hdu[0].header['GAIN']) ronoise = np.atleast_1d(hdu[0].header['RDNOISE']) - datasec = self.swap_section(hdu[0].header['TRIMSEC']) - oscansec = self.swap_section(hdu[0].header['BIASSEC']) + datasec = self.swap_row_col(hdu[0].header['TRIMSEC']) + oscansec = self.swap_row_col(hdu[0].header['BIASSEC']) # Detector detector_dict = dict( @@ -374,7 +374,7 @@ class LDTDeVenySpectrograph(spectrograph.Spectrograph): return par - def swap_section(self, section_string): + def swap_row_col(self, section_string): """ Swap the FITS header keywords TRIMSEC / BIASSEC into the order and numpy type needed for PypeIt. The LDT/DeVeny FITS header lists the
fix subclass test on py36 Summary: Not sure why i pushed my luck and picked a weird class for this test case
import sys import tempfile from functools import update_wrapper -from typing import List import pytest @@ -116,8 +115,8 @@ def test_is_subclass(): assert not is_subclass(ListType, str) # type that aren't classes can be passed into is_subclass - assert not inspect.isclass(List[str]) - assert not is_subclass(List[str], DagsterType) + assert not inspect.isclass(2) + assert not is_subclass(2, DagsterType) @pytest.mark.skipif(
Disable tripleo-ci-fedora-28-standalone job tripleo-ci-fedora-28-standalone is deprecated and should be disabled in all tripleo repos. Task:
dependencies: *deps_unit_lint - tripleo-ci-centos-7-standalone: dependencies: *deps_unit_lint - - tripleo-ci-fedora-28-standalone: - dependencies: *deps_unit_lint - tripleo-ci-centos-7-standalone-upgrade-stein: dependencies: *deps_unit_lint - tripleo-ci-centos-7-scenario000-multinode-oooq-container-upgrades:
set sym_offset if using XNU symbols This is to fix a bug with the hv raw binary patch that caused XNU symbols to stop working.
@@ -1541,7 +1541,7 @@ class HV(Reloadable): self.p.hv_set_time_stealing(False) - def load_raw(self, image, entryoffset=0x800): + def load_raw(self, image, entryoffset=0x800, use_xnu_symbols=False, vmin=0): sepfw_start, sepfw_length = self.u.adt["chosen"]["memory-map"].SEPFW tc_start, tc_size = self.u.adt["chosen"]["memory-map"].TrustCache if hasattr(self.u.adt["chosen"]["memory-map"], "preoslog"): @@ -1613,6 +1613,9 @@ class HV(Reloadable): self.tba.devtree = self.adt_base - phys_base + self.tba.virt_base self.tba.top_of_kernel_data = guest_base + image_size + if use_xnu_symbols == True: + self.sym_offset = vmin - guest_base + self.tba.phys_base - self.tba.virt_base + self.iface.writemem(guest_base + self.bootargs_off, BootArgs.build(self.tba)) print("Setting secondary CPU RVBARs...") @@ -1668,7 +1671,7 @@ class HV(Reloadable): #image = macho.prepare_image(load_hook) image = macho.prepare_image() - self.load_raw(image, entryoffset=(macho.entry - macho.vmin)) + self.load_raw(image, entryoffset=(macho.entry - macho.vmin), use_xnu_symbols=self.xnu_mode, vmin=macho.vmin) def update_pac_mask(self):
Modify lightbox to only display valid images and YT Videos. This modifies the lightbox to only display images inside the ".message_inline_image" class, rather than all images inside the message body, which currently includes things like the bot icon.
@@ -10,7 +10,7 @@ var asset_map = { function render_lightbox_list_images(preview_source) { if (!is_open) { - var images = Array.prototype.slice.call($(".focused_table .messagebox-content img")); + var images = Array.prototype.slice.call($(".focused_table .message_inline_image img")); var $image_list = $("#lightbox_overlay .image-list").html(""); images.forEach(function (img) {
Fix formatting Summary: See title Test Plan: BK
@@ -38,12 +38,7 @@ def _do_setup(name='dagster-aws'): ], packages=find_packages(exclude=['test']), include_package_data=True, - install_requires=[ - 'boto3>=1.9', - 'dagster', - 'psycopg2-binary', - 'requests', - ], + install_requires=['boto3>=1.9', 'dagster', 'psycopg2-binary', 'requests',], extras_require={'pyspark': ['dagster-pyspark']}, zip_safe=False, )
RawConfigParser dict_type argument should be a Type Fixes
import sys from typing import (AbstractSet, MutableMapping, Mapping, Dict, Sequence, List, Union, Iterable, Iterator, Callable, Any, IO, overload, - Optional, Pattern, TypeVar) + Optional, Pattern, Type, TypeVar) # Types only used in type comments only from typing import Optional, Tuple # noqa @@ -57,7 +57,7 @@ class LegacyInterpolation(Interpolation): ... class RawConfigParser(_parser): def __init__(self, defaults: Optional[_section] = ..., - dict_type: Mapping[str, str] = ..., + dict_type: Type[Mapping[str, str]] = ..., allow_no_value: bool = ..., *, delimiters: Sequence[str] = ...,
Make test_server.py more elegant and simple Use only one line for mocking network resources like ports and networks in test_server.py. Depends-On:
@@ -413,24 +413,18 @@ class TestServerCreate(TestServer): network_client = self.app.client_manager.network network_client.find_network = find_network network_client.find_port = find_port - network_resource = mock.Mock() - network_resource.id = 'net1_uuid' - port1_resource = mock.Mock() - port1_resource.id = 'port1_uuid' - port2_resource = mock.Mock() - port2_resource.id = 'port2_uuid' + network_resource = mock.Mock(id='net1_uuid') + port1_resource = mock.Mock(id='port1_uuid') + port2_resource = mock.Mock(id='port2_uuid') find_network.return_value = network_resource find_port.side_effect = (lambda port_id, ignore_missing: {"port1": port1_resource, "port2": port2_resource}[port_id]) # Mock sdk APIs. - _network = mock.Mock() - _network.id = 'net1_uuid' - _port1 = mock.Mock() - _port1.id = 'port1_uuid' - _port2 = mock.Mock() - _port2.id = 'port2_uuid' + _network = mock.Mock(id='net1_uuid') + _port1 = mock.Mock(id='port1_uuid') + _port2 = mock.Mock(id='port2_uuid') find_network = mock.Mock() find_port = mock.Mock() find_network.return_value = _network @@ -609,8 +603,7 @@ class TestServerCreate(TestServer): find_port = mock.Mock() network_client = self.app.client_manager.network network_client.find_port = find_port - port_resource = mock.Mock() - port_resource.id = 'port1_uuid' + port_resource = mock.Mock(id='port1_uuid') find_port.return_value = port_resource self.assertRaises(exceptions.CommandError,
Check edit permissions on dataverse file target instead of node [#PLAT-578]
@@ -40,7 +40,7 @@ class DataverseFile(DataverseFileNode, File): version.identifier = revision user = user or _get_current_user() - if not user or not self.node.can_edit(user=user): + if not user or not self.target.has_permission(user, 'write'): try: # Users without edit permission can only see published files if not data['extra']['hasPublishedVersion']:
Unparsers: handle Skip parsers as Or alternatives TN:
@@ -590,7 +590,11 @@ class NodeUnparser(Unparser): # this field. field_unparser.always_absent = False for subparser in parser.parsers: - if not isinstance(subparser, Defer): + # Named parsing rules always create nodes, so we don't need to + # check Defer parsers. Skip parsers also create nodes, but most + # importantly they trigger a parsing error, so unparsers can + # ignore them. + if not isinstance(subparser, (Defer, Skip)): NodeUnparser.from_parser(subparser.get_type(), subparser) elif isinstance(parser, _Extract):
Winpanda App: Fix Support For DC/OS Package Mutual Deps For The 'Start' Command 1) fix creating package map being passed to sorting procedure in the 'Start' command JIRA: - Winpanda App: Fix Support For DC/OS Package Mutual Deps For The 'Start' Command
@@ -305,7 +305,9 @@ class CmdStart(Command): pkg_manifests = ( self.config.inst_storage.get_pkgactive(PackageManifest.load) ) - packages_bulk = [Package(manifest=m) for m in pkg_manifests] + packages_bulk = { + m.pkg_id.pkg_name: Package(manifest=m) for m in pkg_manifests + } for package in cr_utl.pkg_sort_by_deps(packages_bulk): pkg_id = package.manifest.pkg_id
Update LogRhythmRest.md Updated RN description.
-- Added 2 new commands: +Added 5 new commands. + - lr-get-hosts - lr-get-alarm-data - lr-get-alarm-events -- Added the lr-get-hosts, lr-get-alarm-data, lr-get-alarm-events, lr-get-networks, and lr-get-persons commands. \ No newline at end of file + - lr-get-networks + - lr-get-persons
Increase storage perf case timeout The NFS testcases have a high setup time since they copy 1TB file over NFS
@@ -55,7 +55,7 @@ def _make_raid(node: Node, disk_list: List[str]) -> None: """, ) class StoragePerformance(TestSuite): # noqa - TIME_OUT = 6000 + TIME_OUT = 12000 @TestCaseMetadata( description="""
Decode passwords as on profile updates as well Resolves
@@ -339,7 +339,7 @@ class User(db.Model): user.firstname = self.firstname if self.firstname else user.firstname user.lastname = self.lastname if self.lastname else user.lastname user.email = self.email if self.email else user.email - user.password = self.get_hashed_password(self.plain_text_password) if self.plain_text_password else user.password + user.password = self.get_hashed_password(self.plain_text_password).decode("utf-8") if self.plain_text_password else user.password user.avatar = self.avatar if self.avatar else user.avatar if enable_otp is not None:
release pre-commit: test translations we should be able to run on the stripped dir now
@@ -61,7 +61,7 @@ jobs: cp .pylintrc $STRIPPED_DIR cp mypy.ini $STRIPPED_DIR cd $STRIPPED_DIR - ${{ env.RUN }} "SKIP=test_translations pre-commit run --all" + ${{ env.RUN }} "pre-commit run --all" build_all: name: build all
UI tweak to --help-op formatting Move dependency list above flags Show "(required)" for required flags
@@ -324,14 +324,24 @@ def _print_op_help(opdef): out.write_text(opdef.description.replace("\n", "\n\n")) out.write_paragraph() out.write_text("Use 'guild run --help' for a list of options.") - flags = _format_op_flags_dl(opdef) - if flags: - _write_dl_section("Flags", flags, out) deps = _format_op_deps_dl(opdef) if deps: _write_dl_section("Dependencies", deps, out) + flags = _format_op_flags_dl(opdef) + if flags: + _write_dl_section("Flags", flags, out) click.echo(out.getvalue(), nl=False) +def _format_op_deps_dl(opdef): + model_resources = { + res.name: res.description or "" + for res in opdef.modeldef.resources + } + return [ + (dep.spec, model_resources.get(dep.spec) or "Help not available") + for dep in opdef.dependencies + ] + def _format_op_flags_dl(opdef): dl = [] seen = set() @@ -340,18 +350,15 @@ def _format_op_flags_dl(opdef): if flag.name in seen: continue seen.add(flag.name) - dl.append((flag.name, flag.description)) + dl.append((flag.name, _format_flag_desc(flag))) return dl -def _format_op_deps_dl(opdef): - model_resources = { - res.name: res.description or "" - for res in opdef.modeldef.resources - } - return [ - (dep.spec, model_resources.get(dep.spec, "Help not available")) - for dep in opdef.dependencies - ] +def _format_flag_desc(flag): + if flag.required: + lines = flag.description.split("\n") + return "\n".join(["{} (required)".format(lines[0])] + lines[1:]) + else: + return flag.description def _print_cmd(op): formatted = " ".join([_maybe_quote_arg(arg) for arg in op.cmd_args])
Update README-documentation.md Table of contents
This document describes where the documentation is, how to write or update documentation for the Raytracing module and how to publish updated documentation through ReadTheDocs. +[TOC] + ## Reading documentation If you are a user of the raytracing package, you probably want to read the documentation online at https://raytracing.readthedocs.io/
Add AC Hardwire option to PowerPortTypeChoices Resolves FR
@@ -341,6 +341,8 @@ class PowerPortTypeChoices(ChoiceSet): TYPE_DC = 'dc-terminal' # Proprietary TYPE_SAF_D_GRID = 'saf-d-grid' + # Other + TYPE_OTHER = 'other' CHOICES = ( ('IEC 60320', ( @@ -447,6 +449,9 @@ class PowerPortTypeChoices(ChoiceSet): ('Proprietary', ( (TYPE_SAF_D_GRID, 'Saf-D-Grid'), )), + ('Other', ( + (TYPE_OTHER, 'Other'), + )), )
Add missing quote in circuits.ipynb Adds a missing quote in the Circuits notebook that causes incorrect display of the doc: ![TempCirqOperations](https://user-images.githubusercontent.com/83899250/125640450-7fbcc35c-c8cb-4c03-af46-37f2ab235c11.png)
"source": [ "The above is not the only way one can construct moments, nor even the typical method, but illustrates that a `Moment` is just a collection of operations on disjoint sets of qubits.\n", "\n", - "Finally, at the top level a `Circuit` is an ordered series of `Moment` objects. The first `Moment` in this series contains the first `Operations that will be applied. Here, for example, is a simple circuit made up of two moments:" + "Finally, at the top level a `Circuit` is an ordered series of `Moment` objects. The first `Moment` in this series contains the first `Operations` that will be applied. Here, for example, is a simple circuit made up of two moments:" ] }, {
add `_aSrc` add VTEMwaveform
@@ -57,6 +57,42 @@ class StepOffWaveform(BaseWaveform): else: return 0. +class RampOffWaveform(BaseWaveform): + + eps = 1e-9 + offTime = 0. + + def __init__(self, offTime=0.): + BaseWaveform.__init__(self, offTime=offTime, hasInitialFields=True) + + def eval(self, time): + if abs(time-0.) < self.eps: + return 1. + elif time < self.offTime: + return -1. / self.offTime * (time - self.offTime) + else: + return 0. + +class VTEMWaveform(BaseWaveform): + + eps = 1e-9 + offTime = 4.2e-3 + peakTime = 2.73e-3 + a = 3. + + def __init__(self, offTime=4.2e-3, peakTime=2.73e-3, a=3.): + BaseWaveform.__init__( + self, offTime=offTime, peakTime=peakTime, a=a, hasInitialFields=False + ) + + def eval(self, time): + if time < self.peakTime: + return (1. - np.exp(-self.a*time/self.peakTime)) / (1.-np.exp(-self.a)) + elif (time < self.offTime) and (time > self.peakTime): + return -1. / (self.offTime-self.peakTime) * (time - self.offTime) + else: + return 0. + class RawWaveform(BaseWaveform): @@ -206,18 +242,16 @@ class MagDipole(BaseTDEMSrc): self.loc, obsLoc, component, mu=self.mu, moment=self.moment ) - def _bSrc(self, prob): + def _aSrc(self, prob): if prob._formulation == 'EB': gridX = prob.mesh.gridEx gridY = prob.mesh.gridEy gridZ = prob.mesh.gridEz - C = prob.mesh.edgeCurl elif prob._formulation == 'HJ': gridX = prob.mesh.gridFx gridY = prob.mesh.gridFy gridZ = prob.mesh.gridFz - C = prob.mesh.edgeCurl.T if prob.mesh._meshType is 'CYL': if not prob.mesh.isSymmetric: @@ -232,7 +266,16 @@ class MagDipole(BaseTDEMSrc): az = self._srcFct(gridZ, 'z') a = np.concatenate((ax, ay, az)) - return C*a + return a + + def _bSrc(self, prob): + if prob._formulation == 'EB': + C = prob.mesh.edgeCurl + + elif prob._formulation == 'HJ': + C = prob.mesh.edgeCurl.T + + return C*self._aSrc(prob) def bInitial(self, prob):
add link in urls.py to live update's multi-page app example using a function as the layout
# -*- coding: utf-8 -*- import dash_core_components as dcc +import dash_html_components as html from tutorial import styles layout = [dcc.Markdown(''' @@ -171,11 +172,15 @@ function. A few notes: - Each page can have interactive elements even though those elements may not be in the initial view. Dash handles these "dynamically generated" components gracefully: as they are rendered, they will trigger the -callbacks with their initial values. -- Since we're adding callbacks to elements that don't exist in the app.layout, +callbacks with their initial values.'''), + html.Ul(html.Li(['''Since we're adding callbacks to elements that don't exist in the app.layout, Dash will raise an exception to warn us that we might be doing something wrong. In this case, we're adding the elements through a callback, so we can -ignore the exception by setting `app.config.suppress_callback_exceptions = True` +ignore the exception by setting `app.config.suppress_callback_exceptions = True`. +It is also possible to do this without suppressing callback exceptions. See the example +"Dynamically Create a Layout for Multi-Page App Validation" in ''', + dcc.Link('Live Updates', href='/live-updates'), 'for more details.'])), + dcc.Markdown(''' - You can modify this example to import the different page's `layout`s in different files. - This Dash Userguide that you're looking at is itself a multi-page Dash app, using rendered with these same principles.
Changed order_by parameter name from camelCase to snake_case Fix OpenApiParameter order_by was missing
@@ -59,8 +59,8 @@ class FaceListView(ListViewSet): ): inferred = True conditional_filter = Q(person_label_is_inferred=inferred) - if self.request.query_params.get("orderby"): - if self.request.query_params.get("orderby").lower() == "date": + if self.request.query_params.get("order_by"): + if self.request.query_params.get("order_by").lower() == "date": order_by = ["photo__exif_timestamp", "-person_label_probability", "id"] return ( Face.objects.filter( @@ -76,6 +76,7 @@ class FaceListView(ListViewSet): parameters=[ OpenApiParameter("person", OpenApiTypes.STR), OpenApiParameter("inferred", OpenApiTypes.BOOL), + OpenApiParameter("order_by", OpenApiTypes.STR), ], ) def list(self, *args, **kwargs):
Adding CrossBrowserTesting remote server example This illustrates how to run selenium base tests on CrossBrowserTesting remote devices.
@@ -69,6 +69,11 @@ Here's how to connect to a TestingBot Selenium Grid server for running tests: ```bash pytest my_first_test.py --server=USERNAME:[email protected] --port=80 ``` +Here's how to connect to a CrossBrowserTesting Selenium Grid server for running tests: + +```bash +pytest my_first_test.py --server=USERNAME:[email protected] --port=80 +``` Or you can create your own Selenium Grid for test distribution. ([See this ReadMe for details](https://github.com/seleniumbase/SeleniumBase/blob/master/seleniumbase/utilities/selenium_grid/ReadMe.md))
Let: track the original source name for bindings TN:
@@ -1009,7 +1009,8 @@ class Let(AbstractExpression): # Create the variables this Let expression binds and expand the result # expression using them. self.vars = [ - AbstractVariable(names.Name.from_lower(arg), create_local=True) + AbstractVariable(names.Name.from_lower(arg), create_local=True, + source_name=names.Name.from_lower(arg)) for arg in self.var_names ] self.expr = self.lambda_fn(*self.vars)
ceph-config: when using local_action set become: false There should be no need to use sudo when writing or using these files. It creates an issue when the user running ansible-playbook does not have sudo privs.
- name: template ceph_conf_overrides local_action: copy content="{{ ceph_conf_overrides }}" dest="{{ fetch_directory }}/ceph_conf_overrides_temp" + become: false run_once: true - name: get rendered ceph_conf_overrides local_action: set_fact ceph_conf_overrides_rendered="{{ lookup('template', '{{ fetch_directory }}/ceph_conf_overrides_temp') | from_yaml }}" + become: false run_once: true - name: remove tmp template file for ceph_conf_overrides local_action: file path="{{ fetch_directory }}/ceph_conf_overrides_temp" state=absent + become: false run_once: true - name: "generate ceph configuration file: {{ cluster }}.conf"
docs: Tweak API docs to include RequestsFetcher This only documents the configurable attributes and not the inherited methods.
Fetcher ============ -.. automodule:: tuf.ngclient.fetcher +.. autoclass:: tuf.ngclient.FetcherInterface :undoc-members: :private-members: _fetch + +.. autoclass:: tuf.ngclient.RequestsFetcher + :no-inherited-members:
Allow Alt+Backspace to be handled by default handler This commit causes Backspaces key presses with the Alt-modifier to be handled by Qt's default event handler. Previously only the Ctrl- and Shift-modifiers were considered. Needed to pass test_builtin_undo_redo in spyder/plugins/editor/widgets/tests/test_shortcuts.py
@@ -4316,6 +4316,7 @@ def keyPressEvent(self, event): key = event.key() text = to_text_string(event.text()) has_selection = self.has_selected_text() + alt = event.modifiers() & Qt.AltModifier ctrl = event.modifiers() & Qt.ControlModifier shift = event.modifiers() & Qt.ShiftModifier @@ -4404,7 +4405,7 @@ def keyPressEvent(self, event): self.textCursor().endEditBlock() elif key == Qt.Key_Insert and not shift and not ctrl: self.setOverwriteMode(not self.overwriteMode()) - elif key == Qt.Key_Backspace and not shift and not ctrl: + elif key == Qt.Key_Backspace and not shift and not ctrl and not alt: if has_selection or not self.intelligent_backspace: # See spyder-ide/spyder#12663 for why redefining this # action is necessary. Also see
remove liveness probes They cause unnecesary restarts.
@@ -47,15 +47,6 @@ spec: env: {{ include "studio.sharedEnvs" . | nindent 8 }} - name: SEND_USER_ACTIVATION_NOTIFICATION_EMAIL value: "true" - # liveness probes are checks for when a pod should be restarted. - livenessProbe: - httpGet: - path: /healthz - port: {{ .Values.studioApp.appPort }} - initialDelaySeconds: 120 - periodSeconds: 5 - # fail 10 times before we restart the pod - failureThreshold: 10 ports: - containerPort: {{ .Values.studioApp.appPort }} # readiness probes are checks for when the pod is ready to serve traffic.
fix: leaderboard not work when lang is not English fix issue 11907
@@ -141,7 +141,7 @@ class Leaderboard { } create_date_range_field() { - let timespan_field = $(this.parent).find(`.frappe-control[data-original-title='Timespan']`); + let timespan_field = $(this.parent).find(`.frappe-control[data-original-title=${__('Timespan')}]`); this.date_range_field = $(`<div class="from-date-field"></div>`).insertAfter(timespan_field).hide(); let date_field = frappe.ui.form.make_control({
sql: Add warning about get_lock and SELECT FOR UPDATE Add warning about get_lock and SELECT FOR UPDATE Optimistic locking property
@@ -97,6 +97,8 @@ TiDB implements the asynchronous schema changes algorithm in F1. The Data Manipu TiDB implements an optimistic transaction model. Unlike MySQL, which uses row-level locking to avoid write conflict, in TiDB, the write conflict is checked only in the `commit` process during the execution of the statements like `Update`, `Insert`, `Delete`, and so on. +Similarly, functions such as `GET_LOCK()` and `RELEASE_LOCK()` and statements such as `SELECT .. FOR UPDATE` do not work in the same way as in MySQL. + **Note:** On the business side, remember to check the returned results of `commit` because even there is no error in the execution, there might be errors in the `commit` process. ### Large transactions
Replace npm with yarn Replacing npm with yarn allows dramatically speedup nodejs dependencies installation and therefore reduce gulp container image build time.
- name: Install nodejs shell: curl -sL https://rpm.nodesource.com/setup_8.x | bash - && yum install -y nodejs +- name: Install yarn repository + get_url: + url: https://dl.yarnpkg.com/rpm/yarn.repo + dest: /etc/yum.repos.d/yarn.repo + +- name: Install yarn + yum: name=yarn + - name: Check node version command: node -v -- name: Check npm version - command: npm -v - - name: Create gulp group group: name: gulp mode: 0775 - name: Install node packages - npm: path=/galaxy state=latest + command: yarn global add gulp -- name: Install gulp globally - npm: name=gulp global=yes - -- name: Install less globally - npm: name=less global=yes +- name: Install dependencies + command: yarn install + args: + chdir: /galaxy - name: Build static assets command: gulp build
move out quote() tests on deprecated escapes (handle warnings at runtime) close
# test_lang.py +import sys +import warnings + import pytest from graphviz.lang import quote, attr_list, nohtml [email protected]('char', ['G', 'E', 'T', 'H', 'L', 'l']) +def test_deprecated_escape(recwarn, char): + warnings.simplefilter('always') + + escape = eval(r'"\%s"' % char) + + if sys.version_info < (3, 6): + assert not recwarn + else: + assert len(recwarn) == 1 + w = recwarn.pop(DeprecationWarning) + assert str(w.message).startswith('invalid escape sequence') + + assert escape == '\\%s' % char + assert quote(escape) == '"\\%s"' % char + + @pytest.mark.parametrize('identifier, expected', [ ('"spam"', r'"\"spam\""'), ('node', '"node"'), ('EDGE', '"EDGE"'), ('Graph', '"Graph"'), ('\\G \\N \\E \\T \\H \\L', '"\\G \\N \\E \\T \\H \\L"'), - ('\G \E \T \H \L', '"\\G \\E \\T \\H \\L"'), # noqa: W605 ('\\n \\l \\r', '"\\n \\l \\r"'), - ('\l', '"\\l"'), # noqa: W605 ('\r\n', '"\r\n"'), ('\\\\n', r'"\\n"'), (u'\u0665.\u0660', u'"\u0665.\u0660"'),
Re-raise KeyError exceptions. Chose to do this instead of returning a string.
@@ -360,7 +360,7 @@ class QuantumProgram(object): try: return self.__quantum_registers[name] except KeyError: - return "No quantum register of name " + name + raise KeyError('No quantum register "{0}"'.format(name)) def get_classical_register(self, name): """Return a Classical Register by name. @@ -373,7 +373,7 @@ class QuantumProgram(object): try: return self.__classical_registers[name] except KeyError: - return "No classical register of name" + name + raise KeyError('No classical register "{0}"'.format(name)) def get_quantum_register_names(self): """Return all the names of the quantum Registers.""" @@ -393,7 +393,7 @@ class QuantumProgram(object): try: return self.__quantum_program[name] except KeyError: - return "No quantum circuit of this name" + name + raise KeyError('No quantum circuit "{0}"'.format(name)) def get_circuit_names(self): """Return all the names of the quantum circuits.""" @@ -1034,7 +1034,7 @@ class QuantumProgram(object): ... } eg. {0: [2], 1: [2], 3: [2]} - intial_layout (dict): A mapping of qubit to qubit + initial_layout (dict): A mapping of qubit to qubit { ("q", strart(int)): ("q", final(int)), ...
ENH: option for slim ols results summary Add slim parameter to summary function. slim=True generates a minimal result table, default or slim=False generates the original output.
@@ -2618,7 +2618,7 @@ class RegressionResults(base.LikelihoodModelResults): self, exog=exog, transform=transform, weights=weights, row_labels=row_labels, **kwargs) - def summary(self, yname=None, xname=None, title=None, alpha=.05): + def summary(self, yname=None, xname=None, title=None, alpha=.05, slim=False): """ Summarize the Regression Results. @@ -2696,6 +2696,15 @@ class RegressionResults(base.LikelihoodModelResults): ('BIC:', ["%#8.4g" % self.bic]) ] + if slim: + slimlist = ['Dep. Variable:', 'Model:', 'No. Observations:', + 'Covariance Type:', 'R-squared:', 'Adj. R-squared:', + 'F-statistic:', 'Prob (F-statistic):'] + diagn_left = [] + diagn_right = [] + top_left = [elem for elem in top_left if elem[0] in slimlist] + top_right = [elem for elem in top_right if elem[0] in slimlist] + else: diagn_left = [('Omnibus:', ["%#6.3f" % omni]), ('Prob(Omnibus):', ["%#6.3f" % omnipv]), ('Skew:', ["%#6.3f" % skew]), @@ -2720,7 +2729,7 @@ class RegressionResults(base.LikelihoodModelResults): yname=yname, xname=xname, title=title) smry.add_table_params(self, yname=yname, xname=xname, alpha=alpha, use_t=self.use_t) - + if not slim: smry.add_table_2cols(self, gleft=diagn_left, gright=diagn_right, yname=yname, xname=xname, title="")
Update remcos.txt IP:port shouldn't be missed.
@@ -1973,6 +1973,7 @@ u864246.tk # Reference: https://app.any.run/tasks/e9a9e116-924d-4411-a454-9a841c51c39d/ +185.244.30.123:5149 kirtasiye.myq-see.com # Reference: https://twitter.com/James_inthe_box/status/1245714128695521280 @@ -2005,4 +2006,5 @@ owensmith.linkpc.net # Reference: https://app.any.run/tasks/0618ea81-3606-4992-be9d-d296c03d679c/ +79.134.225.72:3800 vision2020success.ddns.net
TST: tighten up the timing a bit We don't need to wait a full second for the motor to finish moving.
@@ -574,7 +574,7 @@ def test_cleanup_after_pause(RE, unpause_func, hw): def test_sigint_three_hits(RE, hw): import time motor = hw.motor - motor.delay = 1 + motor.delay = .5 pid = os.getpid() @@ -595,10 +595,13 @@ def test_sigint_three_hits(RE, hw): RE(finalize_wrapper(self_sig_int_plan(), abs_set(motor, 0, wait=True))) end_time = ttime.time() - assert end_time - start_time < 0.2 # not enough time for motor to cleanup + # not enough time for motor to cleanup, but long enough to start + assert 0.05 < end_time - start_time < 0.2 RE.abort() # now cleanup + done_cleanup_time = ttime.time() - assert done_cleanup_time - end_time > 0.3 + # this should be 0.5 (the motor.delay) above, leave sloppy for CI + assert 0.6 > done_cleanup_time - end_time > 0.3 @pytest.mark.skipif(sys.version_info < (3, 5),
Pin fastai. fastai 2.1.x uses torch 1.7. Unfortunately, we can't upgrade to torch 1.7 yet. We need to wait until all the libraries depending on torch are supported 1.7.
@@ -374,7 +374,8 @@ RUN pip install bcolz && \ pip install widgetsnbextension && \ pip install pyarrow && \ pip install feather-format && \ - pip install fastai && \ + # b/172491515: Unpin after upgrading to torch 1.7 + pip install fastai==2.0.19 && \ pip install allennlp && \ python -m spacy download en && python -m spacy download en_core_web_lg && \ apt-get install -y ffmpeg && \
update message content of the persistent view The messages were intended for a button that will automatically assign all availables roles to the interactor This changes since it's not the intended behavior
@@ -161,7 +161,7 @@ class ShowAllSelfAssignableRolesButton(discord.ui.Button): def __init__(self, assignable_roles: list[AssignableRole]): super().__init__( style=discord.ButtonStyle.success, - label="Show available roles", + label="Show all self assignable roles", custom_id=self.CUSTOM_ID, row=1 ) @@ -181,7 +181,7 @@ class ShowAllSelfAssignableRolesButton(discord.ui.Button): class Subscribe(commands.Cog): """Cog to allow user to self-assign & remove the roles present in ASSIGNABLE_ROLES.""" - SELF_ASSIGNABLE_ROLES_MESSAGE = "Click on this button to earn all the currently available server roles" + SELF_ASSIGNABLE_ROLES_MESSAGE = "Click on this button to show all self assignable roles" def __init__(self, bot: Bot): self.bot = bot
CopyPrimitiveVariablesTest : Don't use `six` We don't need it now we are Python-3-only.
@@ -320,8 +320,7 @@ class CopyPrimitiveVariablesTest( GafferSceneTest.SceneTestCase ) : copy["filter"].setInput( sphereFilter["out"] ) copy["primitiveVariables"].setValue( "*" ) - with six.assertRaisesRegex( - self, + with self.assertRaisesRegex( RuntimeError, 'Cannot copy .* from "/cube" to "/sphere" because source and destination primitives have different topology. Turn on `ignoreIncompatible` to disable this error and ignore invalid primitive variables.' ) :
project_file.mako: refactor computation of compilation switches As a nice side effect, this repairs automatic loading of GDB helpers (using "-g" for C units). TN:
@@ -80,6 +80,13 @@ library project ${lib_name} is package Compiler is + ---------------------- + -- Common_Ada_Cargs -- + ---------------------- + + -- Compilation switches to use for Ada that do not depend on the build + -- mode. + -- If asked to, enable all warnings and treat them as errors, except: -- * conditional expressions used in tests that are known to be True -- or False at compile time (C), as this is very common in generated @@ -108,27 +115,50 @@ library project ${lib_name} is when others => null; end case; + --------------- + -- Mode_Args -- + --------------- + + -- Compilation switches for all languages that depend on the build mode + + Mode_Args := (); case Build_Mode is when "dev" => - - for Default_Switches ("Ada") use - Common_Ada_Cargs & ("-g", "-O0", "-gnatwe", "-gnata"); + Mode_Args := ("-g", "-O0"); when "prod" => -- Debug information is useful even with optimization for -- profiling, for instance. - -- + Mode_Args := ("-g", "-Ofast"); + end case; + + ------------------- + -- Ada_Mode_Args -- + ------------------- + + -- Compilation switches for Ada that depend on the build mode + + Ada_Mode_Args := (); + case Build_Mode is + when "dev" => + Ada_Mode_Args := ("-gnatwe", "-gnata"); + + when "prod" => -- -fnon-call-exceptions: Make it possible to catch exception due -- to invalid memory accesses even though -gnatp is present. + Ada_Mode_Args := ("-gnatp", "-gnatn2", "-fnon-call-exceptions"); + end case; + for Default_Switches ("Ada") use - Common_Ada_Cargs & ("-g", "-Ofast", "-gnatp", "-gnatn2", - "-fnon-call-exceptions"); + Mode_Args & Ada_Mode_Args & Common_Ada_Cargs; + for Default_Switches ("C") use Mode_Args; + case Build_Mode is + when "prod" => ## TODO: This extension point is added to change the flags of ## Libadalang specific extension files. It is a temporary ## workaround, waiting for QC05-038 to be fixed. ${exts.include_extension(ctx.ext("prod_additional_flags"))} - end case; end Compiler;
this should go back to running my tests with errors? Let's try it
@@ -53,7 +53,6 @@ install: - export AWKWARD_DEPLOYMENT=base - pip install --upgrade pyOpenSSL # for deployment - if [[ $TRAVIS_PYTHON_VERSION != pypy* ]] ; then pip install pybind11 ; fi - - ln -s ../awkward-cpp/awkward/cpp awkward/cpp - python setup.py install - cd awkward-cpp - if [[ $TRAVIS_PYTHON_VERSION != pypy* ]] ; then python setup.py install ; fi
deprecate decorator but continue to support it for now
@@ -3,6 +3,7 @@ import json from datetime import datetime from uuid import uuid4 +import warnings from django.core.serializers.json import DjangoJSONEncoder from django.utils.translation import ugettext_lazy as _ @@ -157,14 +158,20 @@ class RegisterGenerator(object): self.is_default = is_default def __call__(self, generator_class): - collection = self.get_collection(self.repeater_cls) - collection.add_new_format(self.format_name, self.format_label, generator_class, - is_default=self.is_default) - return generator_class + warnings.warn( + "Usage of @RegisterGenerator as a decorator is deprecated. " + "Please put your payload generator classes in a tuple on your repeater class " + "called payload_generator_classes instead.", + DeprecationWarning) + return self.register_generator(generator_class, self.repeater_cls, self.format_name, + self.format_label, is_default=self.is_default) @classmethod - def register_generator(cls, generator_class, repeater_class, format_name, format_label, is_default): - return cls(repeater_class, format_name, format_label, is_default)(generator_class) + def register_generator(cls, generator_class, repeater_class, format_name, format_label, + is_default): + collection = cls.get_collection(repeater_class) + collection.add_new_format(format_name, format_label, generator_class, is_default) + return generator_class @classmethod def get_collection(cls, repeater_class):
Transfers: fix log message otherwise it prints: submitjob: {'transfers': [<rucio.core.transfer.DirectTransferDefinition object at 0x7f55fb224f60>], 'job_params': {...}}
@@ -194,7 +194,7 @@ def submitter(once=False, rses=None, partition_wait_time=10, logger(logging.INFO, 'Starting to submit transfers for %s (%s)', activity, transfertool_obj) for job in grouped_jobs: - logger(logging.DEBUG, 'submitjob: %s' % job) + logger(logging.DEBUG, 'submitjob: transfers=%s, job_params=%s' % ([str(t) for t in job['transfers']], job['job_params'])) submit_transfer(transfertool_obj=transfertool_obj, transfers=job['transfers'], job_params=job['job_params'], submitter='transfer_submitter', timeout=timeout, logger=logger)
Fix typo in quickstart section The function should be between quotes like a string. cli-name = mypkg.mymodule:some_func => cli-name = "mypkg.mymodule:some_func"
@@ -222,7 +222,7 @@ The following configuration examples show how to accomplish this: .. code-block:: toml [project.scripts] - cli-name = mypkg.mymodule:some_func + cli-name = "mypkg.mymodule:some_func" When this project is installed, a ``cli-name`` executable will be created. ``cli-name`` will invoke the function ``some_func`` in the
Fixing the missing migration and wrong method name There is no `power` but `pow` method in `torch.Tensor`.
@@ -80,7 +80,6 @@ class OnlinePreprocessor(torch.nn.Module): self._stft_args = {'center': True, 'pad_mode': 'reflect', 'normalized': False, 'onesided': True} # stft_args: same default values as torchaudio.transforms.Spectrogram & librosa.core.spectrum._spectrogram self._stft = partial(torch.stft, **self._win_args, **self._stft_args) - # self._magphase = partial(torchaudio.functional.magphase, power=2) ## [Deprecated] Removed since torchaudio 0.11.0, replaced below self._melscale = MelScale(sample_rate=sample_rate, n_mels=n_mels) self._mfcc_trans = MFCC(sample_rate=sample_rate, n_mfcc=n_mfcc, log_mels=True, melkwargs=self._win_args) self._istft = partial(torch.istft, **self._win_args, **self._stft_args) @@ -93,17 +92,8 @@ class OnlinePreprocessor(torch.nn.Module): # Replace the deprecated private member function self._magphase # Ref. https://github.com/pytorch/audio/issues/1337 # original: `self._magphase = partial(torchaudio.functional.magphase, power=2)` - - # Official API example - # ```python - # # Original - # from torchaudio.functional import magphase - # magnitude, phase = magphase(spectrogram, n) - # - # # Migrated - # magnitude, phase = spectrogram.abs().power(n), spectrogram.angle() - # ``` - return spectrogram.abs().power(2), spectrogram.angle() + spectrogram = torch.view_as_complex(spectrogram) + return spectrogram.abs().pow(exponent=2), spectrogram.angle() def _check_list(self, feat_list): if feat_list is None:
lzip: add source mirror Suggested by
sources: "1.21": - url: "http://download.savannah.gnu.org/releases/lzip/lzip-1.21.tar.gz" + url: [ + "http://download.savannah.gnu.org/releases/lzip/lzip-1.21.tar.gz", + "https://download-mirror.savannah.gnu.org/releases/lzip/lzip-1.21.tar.gz", + ] sha256: "e48b5039d3164d670791f9c5dbaa832bf2df080cb1fbb4f33aa7b3300b670d8b" patches: "1.21":
Deprecate saml2 frontend sign_alg and digest_alg configuration options sign_alg and digest_alg are deprecated; instead, use signing_algorithm and digest_algorithm configurations under the service/idp configuration path (not under policy/default)
@@ -377,18 +377,18 @@ class SAMLFrontend(FrontendModule, SAMLBaseModule): # Construct arguments for method create_authn_response # on IdP Server instance args = { + # Add the SP details + **resp_args, + # AuthnResponse data 'identity': ava, 'name_id': name_id, 'authn': auth_info, 'sign_response': sign_response, 'sign_assertion': sign_assertion, 'encrypt_assertion': encrypt_assertion, - 'encrypted_advice_attributes': encrypted_advice_attributes + 'encrypted_advice_attributes': encrypted_advice_attributes, } - # Add the SP details - args.update(**resp_args) - try: args['sign_alg'] = getattr(xmldsig, sign_alg) except AttributeError as e: @@ -413,6 +413,16 @@ class SAMLFrontend(FrontendModule, SAMLBaseModule): logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg) logger.debug(logline) + if 'sign_alg' in args or 'digest_alg' in args: + msg = ( + "sign_alg and digest_alg are deprecated; " + "instead, use signing_algorithm and digest_algorithm " + "under the service/idp configuration path " + "(not under policy/default)." + ) + logline = lu.LOG_FMT.format(id=lu.get_session_id(context.state), message=msg) + logger.warning(msg) + resp = idp.create_authn_response(**args) http_args = idp.apply_binding( resp_args["binding"], str(resp), resp_args["destination"],
tpm_main: Only access agent configuration if needed Do not access the agent configuration from API's called by other components.
@@ -52,6 +52,7 @@ class tpm(tpm_abstract.AbstractTPM): self.__get_tpm2_tools() + if self.need_hw_tpm: # We don't know which algs the TPM supports yet self.supported["encrypt"] = set() self.supported["hash"] = set() @@ -64,7 +65,6 @@ class tpm(tpm_abstract.AbstractTPM): ek_handle = config.get("agent", "ek_handle") - if self.need_hw_tpm: if ek_handle == "generate": # Start up the TPM self.__startup_tpm() @@ -85,9 +85,6 @@ class tpm(tpm_abstract.AbstractTPM): enabled_pcrs = self.__get_pcrs() if not enabled_pcrs.get(str(defaultHash)): raise Exception(f"No PCR banks enabled for hash algorithm specified: {defaultHash}") - else: - # Assume their defaults are sane? - pass self.defaults["hash"] = algorithms.Hash(defaultHash) self.defaults["encrypt"] = defaultEncrypt
GDB helpers: fix token string fetching for Python3 TN:
@@ -131,9 +131,7 @@ class Token: (first - src_buffer['P_BOUNDS']['LB0'])) char = gdb.lookup_type('character').pointer() - return (text_addr.cast(char) - .string('latin-1', length=4 * length) - .decode('utf32')) + return text_addr.cast(char).string('utf32', length=4 * length) def __repr__(self) -> str: return '<Token {} {}/{} at {} {}>'.format(
Fix incorrect alias on mm_override.cfg The correct alias is `game_overrides` now due to updates.
-alias game_override +alias game_overrides blink_duration .2 cl_blobbyshadows 0 @@ -124,6 +124,8 @@ tf_clientsideeye_lookats 1 tracer_extra 1 violence_hgibs 1 violence_hblood 1 +violence_agibs 1 +violence_ablood 1 plugin_unload 0 echo "Competitive matchmaking settings applied"
Delete note about deprecated /etc/default config files The old-style /etc/default config files should not be mentioned in the 1.0 Sawtooth docs.
@@ -13,13 +13,6 @@ directory (``config_dir``). By default, configuration files are stored in ``/etc/sawtooth``; see :doc:`configuring_sawtooth/path_configuration_file` for more information on the config directory location. -.. Note:: - - Sawtooth also includes a deprecated set of configuration files in - ``/etc/default``. These files have the same name as the component they - configure and contain the command-line arguments passed when the service - is started. - In addition, the Sawtooth log output can be configured with a log config file in `TOML <https://github.com/toml-lang/toml>`_ or `YAML <http://yaml.org>`_ format. By default, Sawtooth stores error and debug log messages
Update location of `usage.rst` to fix manpage compilation `usage.rst` has been moved from `doc/en` to `doc/en/how-to`, so the `man_pages` configuration value needs to be updated to the new location, so that we dont get this warning: writing... WARNING: "man_pages" config value references unknown document usage
@@ -320,7 +320,7 @@ latex_domain_indices = False # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [("usage", "pytest", "pytest usage", ["holger krekel at merlinux eu"], 1)] +man_pages = [("how-to/usage", "pytest", "pytest usage", ["holger krekel at merlinux eu"], 1)] # -- Options for Epub output ---------------------------------------------------
Ignore RequestsDependencyWarning for cryptography during tests The minimum recommended cryptography_version is hardcoded in requests/__init__.py.[1] [1]:
@@ -23,6 +23,12 @@ except ImportError: # Python 2.7 from collections import Mapping from types import ModuleType +try: + from cryptography import __version__ as cryptography_version + cryptography_version = list(map(int, cryptography_version.split('.'))) +except ImportError: + cryptography_version = None + import pywikibot from pywikibot.comms import threadedhttp from pywikibot import config @@ -641,6 +647,8 @@ def execute(command, data_in=None, timeout=0, error=None): command.insert(1, '-W ignore:{0}:DeprecationWarning'.format( 'Pywikibot will soon drop support for Python 2.7.2 and 2.7.3, ' 'please update your Python.')) + if cryptography_version and cryptography_version < [1, 3, 4]: + command.insert(1, '-W ignore:Old version of cryptography:Warning') # Any environment variables added on Windows must be of type # str() on Python 2. if OSWIN32 and PY2:
ignore_missing_imports reduces 65 out of 77 errors Before: Found 77 errors in 37 files (checked 67 source files) After: Found 12 errors in 4 files (checked 67 source files)
@@ -13,6 +13,9 @@ ignore = E501 # line too long. Long-line code is reformated by black; remaining long lines in docstrings are OK W503 # line break before binary operator. W503 is incompatible with PEP 8, don't use it +[mypy] +ignore_missing_imports = True # Suppress all missing import errors for all libraries + [build_sphinx] all-files = 1 source-dir = docs/source
Match load_rc_for_rally logic to load_rc_hook This makes the rally extras loading logic compatible with devstack-gate and unblocks the rally job. Closes-Bug:
@@ -44,7 +44,12 @@ function load_conf_hook { # Tweak gate configuration for our rally scenarios function load_rc_for_rally { for file in $(ls $RALLY_EXTRA_DIR/*.setup); do - $DSCONF merge_lc $LOCAL_CONF $file + tmpfile=$(tempfile) + config=$(cat $file) + echo "[[local|localrc]]" > $tmpfile + $DSCONF setlc_raw $tmpfile "$config" + $DSCONF merge_lc $LOCAL_CONF $tmpfile + rm -f $tmpfile done }
Reset cases of names in duration.rst to upper Names were accidentally lower cased in previous commit.
@@ -140,13 +140,13 @@ Here are some of the other testing procedures implemented by survdiff: .. ipython:: python :okwarning: - # fleming-Harrington with p=1, i.e. weight by pooled survival time + # Fleming-Harrington with p=1, i.e. weight by pooled survival time stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex, weight_type='fh', fh_p=1) - # gehan-Breslow, weight by number at risk + # Gehan-Breslow, weight by number at risk stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex, weight_type='gb') - # tarone-Ware, weight by the square root of the number at risk + # Tarone-Ware, weight by the square root of the number at risk stat, pv = sm.duration.survdiff(data.futime, data.death, data.sex, weight_type='tw')
Change stiffness to make major axis predictable With equal stiffness the orbit is circular and the major axis position can change due to calculation precision in different systems.
@@ -1444,6 +1444,15 @@ def test_unbalance(rotor7): def test_deflected_shape(rotor7): + # change to asymmetric stiffness to it is easier to get the major axis at the same place + bearing0 = BearingElement(0, kxx=1e6, kyy=2e6, cxx=1e3, cyy=1e3) + bearing1 = BearingElement(6, kxx=1e6, kyy=2e6, cxx=1e3, cyy=1e3) + rotor7 = Rotor( + shaft_elements=rotor7.shaft_elements, + disk_elements=rotor7.disk_elements, + bearing_elements=[bearing0, bearing1], + ) + forced = rotor7.run_unbalance_response( node=0, unbalance_magnitude=1, unbalance_phase=0, frequency=[50] ) @@ -1452,26 +1461,26 @@ def test_deflected_shape(rotor7): expected_x = np.array([0.0, 0.0625, 0.125, 0.1875, 0.25, 0.25, 0.3125, 0.375]) expected_y = np.array( [ - 0.00270725, - 0.00266953, - 0.00262725, - 0.002577, - 0.00252056, - 0.00252056, - 0.00246307, - 0.00239883, + 0.00274183, + 0.00269077, + 0.00263888, + 0.0025852, + 0.00252877, + 0.00252877, + 0.0024688, + 0.00240457, ] ) expected_z = np.array( [ - 0.00042256, - 0.00032067, - 0.00022188, - 0.0001715, - 0.00016696, - 0.00016696, - 0.00011911, - 0.0001152, + 5.72720829e-05, + 5.59724347e-05, + 5.46564072e-05, + 5.33052546e-05, + 5.19002311e-05, + 5.19002311e-05, + 5.04258251e-05, + 4.88678837e-05, ] ) assert_allclose(fig.data[-3]["x"][:8], expected_x, rtol=1e-4)
[EventPoster[ 1.1.0 Edit event message when an event has ended. Fixup docstrings.
@@ -12,7 +12,7 @@ from .event_obj import Event, ValidImage class EventPoster(commands.Cog): """Create admin approved events/announcements""" - __version__ = "1.0.0" + __version__ = "1.1.0" __author__ = "TrustyJAID" def __init__(self, bot): @@ -94,6 +94,8 @@ class EventPoster(commands.Cog): embed=em, ) async with self.config.guild(ctx.guild).events() as events: + event = await Event.from_json(events[str(ctx.author.id)]) + await event.message.edit(content="This event has ended.") del events[str(ctx.author.id)] await ctx.tick() @@ -165,7 +167,7 @@ class EventPoster(commands.Cog): @event_settings.command(name="channel") @commands.guild_only() async def set_channel(self, ctx: commands.Context, channel: discord.TextChannel = None): - """Set the admin approval channel""" + """Set the Announcement channel for events""" if not channel.permissions_for(ctx.me).embed_links: return await ctx.send("I require `Embed Links` permission to use that channel.") save_channel = None
Include distinguished names in pillar_ldap results Includes the distinguished names (DNs) of LDAP entries in the results provided by the pillar_ldap module in map mode if 'dn' or 'distinguishedName' is included in the 'attrs' configuration option. Currently that is not possible because the DN is thrown away before the pillar data is formed.
@@ -201,6 +201,9 @@ def _result_to_dict(data, result, conf, source): data[source] = [] for record in result: ret = {} + if 'dn' in attrs or 'distinguishedName' in attrs: + log.debug('dn: %s', record[0]) + ret['dn'] = record[0] record = record[1] log.debug('record: %s', record) for key in record:
Update helpers.py Fix ValueError: too many values to unpack (expected 2)
@@ -81,6 +81,6 @@ def render_expression(expr: Expression_T, *args, if escape_args: return expr.format( *[escape(s) if isinstance(s, str) else s for s in args], - **{k: escape(v) if isinstance(v, str) else v for k, v in kwargs} + **{k: escape(v) if isinstance(v, str) else v for k, v in kwargs.items()} ) return expr.format(*args, **kwargs)
[deploy] fix sed for updating HAIL_VERSION I could not get the `\d` to work for me on an Ubuntu machine. The `[0-9]` range seems to work with extended regexps.
@@ -138,7 +138,7 @@ Image URL: \`us.gcr.io/broad-dsp-gcr-public/terra-jupyter-hail:$terra_jupyter_ha EOF mv $temp_changelog terra-jupyter-hail/CHANGELOG.md -sed -i "/ENV HAIL_VERSION/s/\d\+\.\d\+\.\d\+/$HAIL_PIP_VERSION/" terra-jupyter-hail/Dockerfile +sed -Ei "/ENV HAIL_VERSION/s/[0-9]+\.[0-9]+\.[0-9]+/${HAIL_PIP_VERSION}/" terra-jupyter-hail/Dockerfile git commit -m "Update hail to version $HAIL_PIP_VERSION" -- config/conf.json terra-jupyter-hail git push -f origin HEAD curl -XPOST -H @$GITHUB_OAUTH_HEADER_FILE https://api.github.com/repos/DataBiosphere/terra-docker/pulls -d "{
Cancel PoET block on BlockNotReady Instead of retrying indefinitely, cancel the block if a BlockNotReady status is returned. Retrying indefinitely can cause the validator to hang because, if there are no valid batches with which to create a new block, the PoET engine will never consider any new blocks.
import logging import queue -import time import json @@ -113,24 +112,17 @@ class PoetEngine(Engine): return None def _finalize_block(self): - summary = None - while summary is None: summary = self._summarize_block() if summary is None: LOGGER.debug('Block not ready to be summarized') - time.sleep(1) - continue - else: - LOGGER.info('Block summary: %s', summary.hex()) - break + return None consensus = self._oracle.finalize_block(summary) if consensus is None: return None - while True: try: block_id = self._service.finalize_block(consensus) LOGGER.info( @@ -140,8 +132,7 @@ class PoetEngine(Engine): return block_id except exceptions.BlockNotReady: LOGGER.debug('Block not ready to be finalized') - time.sleep(1) - continue + return None except exceptions.InvalidState: LOGGER.warning('block cannot be finalized') return None
Update README.md Add paragraph space
@@ -115,8 +115,9 @@ To typecheck, run: `mypy -p git` To test, run: `pytest` -Configuration for flake8 is in the root/.flake8 file. -Configurations for mypy, pytest and coverage.py are in root/pyproject.toml. +Configuration for flake8 is in the ./.flake8 file. + +Configurations for mypy, pytest and coverage.py are in ./pyproject.toml. The same linting and testing will also be performed against different supported python versions upon submitting a pull request (or on each push if you have a fork with a "main" branch and actions enabled).