message
stringlengths 13
484
| diff
stringlengths 38
4.63k
|
---|---|
Update CVE-2021-39327.yaml
updated exposures -> exposure | @@ -14,7 +14,7 @@ info:
cvss-score: 5.3
cve-id: CVE-2021-39327
cwe-id: CWE-200
- tags: exposures,packetstorm,cve,cve2021,wordpress
+ tags: exposure,packetstorm,cve,cve2021,wordpress
requests:
- method: GET
|
Update assembly/__init__.py: fix flake8 errors
'.basis.Basis' imported but unused
'.basis.InteriorBasis' imported but unused
'.basis.FacetBasis' imported but unused
'.dofs.Dofs' imported but unused
'.form.BilinearForm' imported but unused
'.form.LinearForm' imported but unused
'.form.Functional' imported but unused
'.form.bilinear_form' imported but unused
'.form.linear_form' imported but unused
'.form.functional' imported but unused | @@ -62,3 +62,16 @@ def asm(form: Form,
"""
return form.assemble(*args, **kwargs)
+
+
+__all__ = [
+ "Basis",
+ "InteriorBasis",
+ "FacetBasis",
+ "Dofs",
+ "BilinearForm",
+ "LinearForm",
+ "Functional",
+ "bilinear_form",
+ "linear_form",
+ "functional"]
|
fix: force n_jobs to 1 in validation mode
There is a bug in pyannote.metrics that makes metrics behave strangely when multiprocessing. Using just one job until this is fixed | @@ -376,6 +376,9 @@ def main():
raise ValueError(msg)
params['metric'] = metric
+ # FIXME: parallel is broken in pyannote.metrics
+ params['n_jobs'] = 1
+
app.validate(protocol, **params)
if arg['apply']:
|
Update transmon_pocket.py
The only change is about the sketch of the qubit. | @@ -51,9 +51,9 @@ class TransmonPocket(BaseQubit):
Below is a sketch of the qubit
::
- -1
+ +1 +1
________________________________
- -1 |______ ____ __________| Y
+ -1 |______ ____ __________| +1 Y
| |____| |____| | ^
| __________________ | |
| | island | | |-----> X
@@ -65,8 +65,9 @@ class TransmonPocket(BaseQubit):
| |__________________| |
| ______ |
|_______|______| |
- |________________________________| +1
- +1
+ -1 |________________________________| +1
+
+ -1 -1
.. image::
transmon_pocket.png
|
ShaderUI : Add `hideShaders()` function
This is useful for hiding shaders that are deprecated but must be kept around for backwards compatibility. | @@ -223,6 +223,15 @@ def appendShaders( menuDefinition, prefix, searchPaths, extensions, nodeCreator,
menuDefinition.append( prefix, { "subMenu" : functools.partial( __shaderSubMenu, searchPaths, extensions, nodeCreator, matchExpression, searchTextPrefix ) } )
+__hiddenShadersPathMatcher = IECore.PathMatcher()
+## Hides shaders from the menu created by `appendShaders()`.
+# The `pathMatcher` is an `IECore.PathMatcher()` that will be used
+# to match searchpath-relative shader filenames.
+def hideShaders( pathMatcher ) :
+
+ global __hiddenShadersPathMatcher
+ __hiddenShadersPathMatcher.addPaths( pathMatcher )
+
def __nodeName( shaderName ) :
nodeName = os.path.split( shaderName )[-1]
@@ -247,6 +256,8 @@ def __loadFromFile( menu, extensions, nodeCreator ) :
def __shaderSubMenu( searchPaths, extensions, nodeCreator, matchExpression, searchTextPrefix ) :
+ global __hiddenShadersPathMatcher
+
if isinstance( matchExpression, str ) :
matchExpression = re.compile( fnmatch.translate( matchExpression ) )
@@ -261,6 +272,8 @@ def __shaderSubMenu( searchPaths, extensions, nodeCreator, matchExpression, sear
for file in files :
if os.path.splitext( file )[1][1:] in extensions :
shaderPath = os.path.join( root, file ).partition( path )[-1].lstrip( "/" )
+ if __hiddenShadersPathMatcher.match( shaderPath ) & IECore.PathMatcher.Result.ExactMatch :
+ continue
if shaderPath not in shaders and matchExpression.match( shaderPath ) :
shaders.add( os.path.splitext( shaderPath )[0] )
|
First take on realloc instrumentation
This instrumentation works for most cases. There is a bug where
a read out of bounds would happen if the shadow memory is not available
for as large region as new_size. To correct this allocation sizes need
to be tracked. | #include "polytracker/taint_sources.h"
+#include <vector>
EXT_C_FUNC void *__dfsw_malloc(size_t size, dfsan_label size_label,
dfsan_label *ret_label) {
@@ -11,8 +12,27 @@ EXT_C_FUNC void *__dfsw_realloc(void *ptr, size_t new_size,
dfsan_label ptr_label, dfsan_label size_label,
dfsan_label *ret_label) {
+ // TODO (hbrodin): This is incorrect. There is not new_size bytes available (typically)
+ // but for now, lets just hope that the user of returned memory clears it (no undefined read).
+ // This might actually cause a read oob if at the end of shadow memory... Need to track all
+ // allocation sizes to only copy the amount of memory avail in the old allocation.
+ //
+ // Make a copy of shadow memory/labels, in case we do actually move it
+ // This could be a lot faster if we used the shadow_for function and did a straight copy...
+ std::vector<dfsan_label> shadow;
+ auto oldptr = reinterpret_cast<char*>(ptr);
+ if (oldptr != nullptr && new_size > 0) {
+ shadow.reserve(new_size);
+ std::transform(oldptr, oldptr + new_size, std::back_inserter(shadow), [](char& v) {
+ return dfsan_read_label(&v, sizeof(v));
+ });
+ }
+
void *new_mem = realloc(ptr, new_size);
- if (new_mem != NULL && new_mem != ptr) {
+ if (new_mem != oldptr) {
+ for (size_t i=0;i<shadow.size();i++) {
+ dfsan_set_label(shadow[i], reinterpret_cast<char*>(new_mem) + i, sizeof(char));
+ }
}
*ret_label = 0;
return new_mem;
|
Fix manhattan plot downsampling
* Fix manhattan plot downsampling
I broke this in the last few days.
* Fix manhattan plot downsampling
I broke this in the last few days. | @@ -324,6 +324,8 @@ def manhattan(pvals, locus=None, title=None, size=4, hover_fields=None, collect_
hover_fields['locus'] = hail.str(locus)
+ pvals = -hail.log10(pvals)
+
if collect_all:
res = hail.tuple([locus.global_position(), pvals, hail.struct(**hover_fields)]).collect()
hf_struct = [point[2] for point in res]
@@ -340,8 +342,8 @@ def manhattan(pvals, locus=None, title=None, size=4, hover_fields=None, collect_
x = [point[0] for point in res]
y = [point[1] for point in res]
- y_inv_log = [-log10(p) for p in y]
- hover_fields['p_value'] = y
+ y_linear = [10 ** (-p) for p in y]
+ hover_fields['p_value'] = y_linear
ref = locus.dtype.reference_genome
@@ -373,7 +375,7 @@ def manhattan(pvals, locus=None, title=None, size=4, hover_fields=None, collect_
del labels[i - num_deleted]
num_deleted += 1
- p = scatter(x, y_inv_log, label=label, title=title, xlabel='Chromosome', ylabel='P-value (-log10 scale)',
+ p = scatter(x, y, label=label, title=title, xlabel='Chromosome', ylabel='P-value (-log10 scale)',
size=size, legend=False, source_fields=hover_fields)
p.xaxis.ticker = mid_points
|
Don't declare any documentationFormat
We have no way to display paragraph-like content in completions | @@ -244,8 +244,7 @@ def start_client(window: sublime.Window, config: ClientConfig):
},
"completion": {
"completionItem": {
- "snippetSupport": True,
- "documentationFormat": ["plaintext"]
+ "snippetSupport": True
},
"completionItemKind": {
"valueSet": [
|
Fix a typo in the assert
Summary: Pull Request resolved: | @@ -131,7 +131,7 @@ cv::Mat resizeImage(cv::Mat& img) {
int scaled_height = int(round(img.rows * im_scale));
assert((scaled_width <= max_size) && (scaled_height <= max_size));
if ((scaled_width < min_size) || (scaled_height < min_size)) {
- assert((scaled_width == max_size) || (scaled_width == max_size));
+ assert((scaled_width == max_size) || (scaled_height == max_size));
} else {
assert((scaled_width == min_size) || (scaled_height == min_size));
}
|
swarming: fix hilarious log entry
duration is now in ms, not seconds, but it's easier to still log in seconds. The
logging regression was introduced in
Review-Url: | @@ -135,7 +135,7 @@ def monitor_call(func):
if flat_dims:
hooks_durations.add(
duration, fields={'hookname': name, 'pool': flat_dims})
- logging.info('%s(): %gs', name, round(duration, 3))
+ logging.info('%s(): %gs', name, round(duration/1000., 3))
return hook
|
Remove flags set for all v4 TPUs. Topology flags will now be set in libTPU.
Remove deprecated fields `TPU_MESH_CONTROLLER_ADDRESS` and `TPU_MESH_CONTROLLER_PORT`. | import os
+
def cloud_tpu_init():
"""Automatically sets Cloud TPU topology and other env vars.
@@ -63,16 +64,18 @@ def cloud_tpu_init():
# pylint: enable=import-outside-toplevel
# Based on https://github.com/tensorflow/tensorflow/pull/40317
- gce_metadata_endpoint = 'http://' + os.environ.get('GCE_METADATA_IP',
- 'metadata.google.internal')
+ gce_metadata_endpoint = 'http://' + os.environ.get(
+ 'GCE_METADATA_IP', 'metadata.google.internal')
+
def get_metadata(key):
return requests.get(
f'{gce_metadata_endpoint}/computeMetadata/v1/instance/attributes/{key}',
- headers={'Metadata-Flavor': 'Google'}).text
+ headers={
+ 'Metadata-Flavor': 'Google'
+ }).text
worker_id = get_metadata('agent-worker-number')
accelerator_type = get_metadata('accelerator-type')
- worker_network_endpoints = get_metadata('worker-network-endpoints')
accelerator_type_to_host_bounds = {
'v2-8': '1,1,1',
@@ -88,27 +91,12 @@ def cloud_tpu_init():
'v3-512': '8,8,1',
'v3-1024': '8,16,1',
'v3-2048': '16,16,1',
- 'v4-8': '1,1,1',
- 'v4-16': '1,1,2',
- 'v4-32': '1,1,4',
- 'v4-64': '1,2,4',
- 'v4-128': '2,2,4',
- 'v4-256': '2,2,8',
- 'v4-512': '2,4,8',
- 'v4-1024': '4,4,8',
- 'v4-2048': '4,4,16',
- 'v4-4096': '4,8,16',
}
os.environ['CLOUD_TPU_TASK_ID'] = worker_id
+
+ # If v4 TPU don't set any topology related flags, libtpu will set these values.
+ if not accelerator_type.startswith('v4-'):
os.environ['TPU_CHIPS_PER_HOST_BOUNDS'] = '2,2,1'
os.environ['TPU_HOST_BOUNDS'] = accelerator_type_to_host_bounds[
accelerator_type]
- os.environ['TPU_MESH_CONTROLLER_ADDRESS'] = worker_network_endpoints.split(
- ',')[0].split(':')[2] + ':8476'
- os.environ['TPU_MESH_CONTROLLER_PORT'] = '8476'
-
- if (not os.environ.get('TPU_TOPOLOGY_WRAP', None)
- and 'v4' in accelerator_type
- and accelerator_type not in ['v4-8', 'v4-16', 'v4-32', 'v4-64']):
- os.environ['TPU_TOPOLOGY_WRAP'] = 'true,true,true'
|
fix splitter_window.py copy/paste glitch
Was setting SashPosition with `sash_gravity_p.get()` as an argument. Caused an exception when showing Design Window in wxGlade. | @@ -89,7 +89,7 @@ class EditSplitterWindow(ManagedBase, EditStylesMixin):
sash_gravity_p = self.properties['sash_gravity']
if sash_gravity_p.is_active():
- self.widget.SetSashPosition(sash_gravity_p.get())
+ self.widget.SetSashGravity(sash_gravity_p.get())
min_pane_size_p = self.properties['min_pane_size']
if min_pane_size_p.is_active():
|
Remove flake8
This package is causing a downgrade of the `importlib-metadata` package which is causing a cascading number of reinstalls:
Flake8 is a style checker for Python but we are not using it AFAIK. We don't have Jupyter plugins for it neither. | @@ -179,7 +179,6 @@ RUN apt-get install -y libfreetype6-dev && \
pip install wordcloud && \
pip install xgboost && \
pip install pydot && \
- pip install flake8 && \
# Pinned because it breaks theano test with the latest version (b/178107003).
pip install theano-pymc==1.0.11 && \
pip install python-Levenshtein && \
|
Fixes `AttributeError` when trying to copy a file
The full error was: "AttributeError: module 'posixpath' has no attribute
'makedirs'". That function is just in the `os` module, not `os.path`. | @@ -253,7 +253,7 @@ class SavedFile:
new_file = os.path.join(self.directory, filename)
new_file_dir = os.path.dirname(new_file)
if not os.path.isdir(new_file_dir):
- os.path.makedirs(new_file_dir)
+ os.makedirs(new_file_dir)
shutil.copyfile(orig_path, new_file)
if 'filename' not in kwargs:
self.save()
|
Update main.yml
Use specific environment for secrets passing. | @@ -16,6 +16,9 @@ on:
jobs:
build:
runs-on: ubuntu-latest
+ # Environment contains secrets for this build
+ environment:
+ name: Authomatic Tox
strategy:
matrix:
python-version: [3.6, 3.7, 3.8, 3.9]
@@ -23,7 +26,7 @@ jobs:
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
- - name: Sign release
+ - name: Decrypt Secret Config
# To encrypt, replace -d with -e and reverse -in and -out
run: |
openssl aes-256-cbc -pass env:PRIVATE_KEY -d -md sha512 -pbkdf2 -iter 100000 -salt -in tests/functional_tests/config_secret.py.enc -out tests/functional_tests/config_secret.py
|
swf: WorkflowTask: add the possibility to use a custom task list
This is meant to be used by defining a function:
```python
def get_task_list(cls, task_list, *args, **kwargs):
return task_list
```
in the class implementing the Workflow class and allows then to use
something like:
```python
chain.append(
MyWorkflow,
param1=toto,
param2=tata,
task_list="the_task_list",
)
``` | @@ -168,6 +168,10 @@ class WorkflowTask(task.WorkflowTask, SwfTask):
@property
def task_list(self):
+ get_task_list = getattr(self.workflow, 'get_task_list', None)
+ if get_task_list:
+ return get_task_list(self.workflow, *self.args, **self.kwargs)
+
return getattr(self.workflow, 'task_list', None)
def schedule(self, domain, task_list=None, executor=None, **kwargs):
|
Adding link to deployment guide
Seems important to have a link to deployment and upgrade guides from changelog, since these will be the most logical next steps for people reviewing this page. | # Mattermost self-hosted changelog
-[Mattermost](https://mattermost.com) is an open source platform for secure collaboration across the entire software development lifecycle. This changelog summarizes updates for the latest self-hosted versions of Mattermost.
+[Mattermost](https://mattermost.com) is an open source platform for secure collaboration across the entire software development lifecycle. This changelog summarizes updates for the latest self-hosted versions of Mattermost to be [deployed and upgraded on infrastructure you control](https://docs.mattermost.com/guides/deployment.html).
See the [changelog in progress](https://bit.ly/2nK3cVf) for the upcoming release. See the [Legacy Self-Hosted Mattermost Changelog](legacy-self-hosted-changelog) for details on all Mattermost self-hosted releases prior to v5.37.
|
Update nested_inputs.py
correct description of CHP and Boiler fuel emissions variables from gallons to MMBtus | @@ -1906,19 +1906,19 @@ nested_input_definitions = {
},
"emissions_factor_lb_CO2_per_mmbtu": {
"type": "float",
- "description": "Pounds of carbon dioxide emitted per gallon of fuel burned"
+ "description": "Pounds of carbon dioxide emitted per MMBtu of fuel burned"
},
"emissions_factor_lb_NOx_per_mmbtu": {
"type": "float",
- "description": "Pounds of NOx emitted per gallon of fuel burned"
+ "description": "Pounds of NOx emitted per MMBtu of fuel burned"
},
"emissions_factor_lb_SO2_per_mmbtu": {
"type": "float",
- "description": "Pounds of SO2 emitted per gallon of fuel burned"
+ "description": "Pounds of SO2 emitted per MMBtu of fuel burned"
},
"emissions_factor_lb_PM_per_mmbtu": {
"type": "float",
- "description": "Pounds of PM2.5 emitted per gallon of fuel burned"
+ "description": "Pounds of PM2.5 emitted per MMBtu of fuel burned"
}
},
|
Fix empty window name in template replacement
For example when device screen is off the focused window name is empty | @@ -1332,7 +1332,8 @@ class AdbClient:
def substituteDeviceTemplate(self, template):
serialno = self.serialno.replace('.', '_').replace(':', '-')
- focusedWindowName = self.getFocusedWindowName().replace('/', '-').replace('.', '_')
+ window_name = self.getFocusedWindowName() or 'no_name'
+ focusedWindowName = window_name.replace('/', '-').replace('.', '_')
timestamp = datetime.datetime.now().isoformat()
osName = platform.system()
if osName.startswith('Windows'): # ':' not supported in filenames
|
Settings updates
Use the wagtail version to add the correctly named wagtail apps to INSTALLED_APPS and middleware classes to MIDDLEWARE_CLASSES
Remove 'django.contrib.auth.middleware.SessionAuthenticationMiddleware' from MIDDLEWARE_CLASSES
Ensure the MIDDLEWARE setting is set for django >= 2 | import os
+from django import VERSION as DJANGO_VERSION
from django.conf.global_settings import * # NOQA
+from wagtail import VERSION as WAGTAIL_VERSION
DEBUG = True
SITE_ID = 1
@@ -12,19 +14,6 @@ LANGUAGE_CODE = 'en'
INSTALLED_APPS = (
- 'wagtail.contrib.settings',
- 'wagtail.wagtailforms',
- 'wagtail.wagtailsearch',
- 'wagtail.wagtailembeds',
- 'wagtail.wagtailimages',
- 'wagtail.wagtailsites',
- 'wagtail.wagtailusers',
- 'wagtail.wagtailsnippets',
- 'wagtail.wagtaildocs',
- 'wagtail.wagtailredirects',
- 'wagtail.wagtailadmin',
- 'wagtail.wagtailcore',
- 'wagtail.contrib.modeladmin',
'wagtailmenus',
'taggit',
@@ -36,7 +25,35 @@ INSTALLED_APPS = (
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
- 'django.contrib.sites'
+ 'django.contrib.sites',
+ 'wagtail.contrib.settings',
+ 'wagtail.contrib.modeladmin',
+)
+if WAGTAIL_VERSION >= (2, 0):
+ INSTALLED_APPS += (
+ 'wagtail.search',
+ 'wagtail.embeds',
+ 'wagtail.images',
+ 'wagtail.sites',
+ 'wagtail.users',
+ 'wagtail.snippets',
+ 'wagtail.documents',
+ 'wagtail.contrib.redirects',
+ 'wagtail.admin',
+ 'wagtail.core',
+ )
+else:
+ INSTALLED_APPS += (
+ 'wagtail.wagtailsearch',
+ 'wagtail.wagtailembeds',
+ 'wagtail.wagtailimages',
+ 'wagtail.wagtailsites',
+ 'wagtail.wagtailusers',
+ 'wagtail.wagtailsnippets',
+ 'wagtail.wagtaildocs',
+ 'wagtail.wagtailredirects',
+ 'wagtail.wagtailadmin',
+ 'wagtail.wagtailcore',
)
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
@@ -86,12 +103,21 @@ MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
+)
+if WAGTAIL_VERSION >= (2, 0):
+ MIDDLEWARE_CLASSES += (
+ 'wagtail.core.middleware.SiteMiddleware',
+ 'wagtail.contrib.redirects.middleware.RedirectMiddleware',
+ )
+else:
+ MIDDLEWARE_CLASSES += (
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
+if DJANGO_VERSION >= (2, 0):
+ MIDDLEWARE = MIDDLEWARE_CLASSES
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
|
[docs] environment_dict -> run_config
Test Plan: eyes
Reviewers: yuhan, sashank, bob | @@ -68,9 +68,9 @@ Pipeline configuration
.. _config_schema:
-Environment Dict Schema
+Run Config Schema
^^^^^^^^^^^^^^^^^^^^^^^
- The ``environment_dict`` used by :py:func:`execute_pipeline` and
+ The ``run_config`` used by :py:func:`execute_pipeline` and
:py:func:`execute_pipeline_iterator` has the following schema:
::
|
acronym: update tests to 1.7.0
Add underscore emphasis test. | @@ -3,7 +3,7 @@ import unittest
from acronym import abbreviate
-# Tests adapted from `problem-specifications//canonical-data.json` @ v1.6.0
+# Tests adapted from `problem-specifications//canonical-data.json` @ v1.7.0
class AcronymTest(unittest.TestCase):
def test_basic(self):
@@ -34,6 +34,9 @@ class AcronymTest(unittest.TestCase):
def test_apostrophes(self):
self.assertEqual(abbreviate("Halley's Comet"), 'HC')
+ def test_underscore_emphasis(self):
+ self.assertEqual(abbreviate("The Road _Not_ Taken"), 'TRNT')
+
if __name__ == '__main__':
unittest.main()
|
[IMPR] upload.py: Allow to use a file for description
Creation of the -descriptionfile: argument | @@ -33,6 +33,7 @@ Arguments:
-recursive When the filename is a directory it also uploads the files from
the subdirectories.
-summary: Pick a custom edit summary for the bot.
+ -descfile: Specify a filename where the description is stored
It is possible to combine -abortonwarn and -ignorewarn so that if the specific
warning is given it won't apply the general one but more specific one. So if it
@@ -57,6 +58,7 @@ parameter, and for a description.
#
from __future__ import absolute_import, unicode_literals
+import codecs
import math
import os
import re
@@ -114,6 +116,7 @@ def main(*args):
chunk_size_regex = re.compile(
r'^-chunked(?::(\d+(?:\.\d+)?)[ \t]*(k|ki|m|mi)?b?)?$', re.I)
recursive = False
+ description_file = None
# process all global bot args
# returns a list of non-global args, i.e. args for upload.py
@@ -149,13 +152,25 @@ def main(*args):
elif arg == '-chunked':
match = chunk_size_regex.match(option)
chunk_size = get_chunk_size(match)
+ elif arg == '-descfile':
+ description_file = value
elif arg and not value:
if not url:
url = arg
else:
description.append(arg)
- description = u' '.join(description)
+ description = ' '.join(description)
+
+ if description_file:
+ if description:
+ pywikibot.error('Both a description and a -descfile were '
+ 'provided. Please specify only one of those.')
+ return False
+ with codecs.open(description_file,
+ encoding=pywikibot.config.textfile_encoding) as f:
+ description = f.read().replace('\r\n', '\n')
+
while not ("://" in url or os.path.exists(url)):
if not url:
error = 'No input filename given.'
|
Added some tests for tick2second() and second2tick().
These could be improved but a couple of test cases is better than nothing. | -from .units import tempo2bpm, bpm2tempo
+from .units import tempo2bpm, bpm2tempo, tick2second, second2tick
def test_tempo2bpm_bpm2tempo():
for bpm, tempo in [
@@ -9,3 +9,17 @@ def test_tempo2bpm_bpm2tempo():
]:
assert bpm == tempo2bpm(tempo)
assert tempo == bpm2tempo(bpm)
+
+
+# Todo: these tests could be improved with better test values such as
+# edge cases.
+
+def test_tick2second():
+ assert tick2second(1, ticks_per_beat=100, tempo=500000) == 0.005
+ assert tick2second(2, ticks_per_beat=100, tempo=100000) == 0.002
+
+
+def test_second2tick():
+ assert second2tick(0.005, ticks_per_beat=100, tempo=500000) == 1
+ assert second2tick(0.002, ticks_per_beat=100, tempo=100000) == 2
+
|
Update segmentation.py
Fixing the `ImportError` which is caused from having misspelled reading_order.
) | @@ -48,7 +48,7 @@ from kraken.binarization import nlbin
logger = logging.getLogger('kraken')
-def neading_order(lines: Sequence, text_direction: str = 'lr') -> List:
+def reading_order(lines: Sequence, text_direction: str = 'lr') -> List:
"""Given the list of lines (a list of 2D slices), computes
the partial reading order. The output is a binary 2D array
such that order[i,j] is true if line i comes before line j
|
github-actions: Restrict main CI run to macos-10.15
Block macos-11 until
is fixed | @@ -22,7 +22,7 @@ jobs:
matrix:
python-version: [3.7, 3.8, 3.9]
python-architecture: ['x64']
- os: [ubuntu-latest, macos-latest, windows-latest]
+ os: [ubuntu-latest, macos-10.15, windows-latest]
include:
# add 32-bit build on windows
- python-version: 3.8
|
[modules/sensors] Linting
Un-import os, fix indentation | +# pylint: disable=C0111,R0903
+
"""Displays sensor temperature
Requires the following executable:
* sensors
Parameters:
- * sensors.match: What line in the output of `sensors -u` should be matched against (default: temp1_input)
+ * sensors.match: Line to match against output of 'sensors -u' (default: temp1_input)
* sensors.match_number: which of the matches you want (default -1: last match).
"""
-import os
import re
import bumblebee.input
@@ -18,8 +19,7 @@ import bumblebee.engine
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
super(Module, self).__init__(engine, config,
- bumblebee.output.Widget(full_text=self.temperature)
- )
+ bumblebee.output.Widget(full_text=self.temperature))
self._temperature = "unknown"
pattern = self.parameter("match", "temp1_input")
pattern_string = r"^\s*{}:\s*([\d.]+)$".format(pattern)
@@ -36,7 +36,7 @@ class Module(bumblebee.engine.Module):
return temperature
- def temperature(self, widget):
+ def temperature(self, _):
return self._temperature
def update(self, widgets):
|
Fix a typo in swift/common/swob.py
This patch just fixing a typo. | @@ -1263,7 +1263,7 @@ class Response(object):
# is 0, then that means we got a suffix-byte-range request
# (e.g. "bytes=-512"). This is asking for *up to* the last N
# bytes of the file. If we had any bytes to send at all,
- # we'd return a 206 with an apropriate Content-Range header,
+ # we'd return a 206 with an appropriate Content-Range header,
# but we can't construct a Content-Range header because we
# have no byte indices because we have no bytes.
#
|
Add back PhaseDiagramError
For backwards compatibility | @@ -1506,6 +1506,13 @@ class ReactionDiagram:
return cpd
+class PhaseDiagramError(Exception):
+ """
+ An exception class for Phase Diagram generation.
+ """
+ pass
+
+
def get_facets(qhull_data, joggle=False):
"""
Get the simplex facets for the Convex hull.
|
Update test_utils.py
Corrected test case for new behaviour. | @@ -70,8 +70,8 @@ import pytest
("/", "./a", "./a"),
("/fr/blog/2015/11/a/","/fr/blog/2015/11/a/a.jpg","../../../../../fr/blog/2015/11/a/a.jpg"),
("/fr/blog/2015/11/a/","/fr/blog/","../../../../../fr/blog/"),
- ("/fr/blog/2015/12/a/","/fr/b.php","../../../../fr/b.php"),
- ("/fr/blog/2016/1/a/","/images/b.svg","../../../../../images/b.svg"),
+ ("/fr/blog/2015/11/a.php","/fr/blog/","../../../../fr/blog/"),
+ ("/fr/blog/2015/11/a/","/images/b.svg","../../../../../images/b.svg"),
])
def test_make_relative_url(base, target, expected):
from lektor.utils import make_relative_url
|
Change redis memory policy to allkeys-lru
Was volitile-lru | @@ -20,7 +20,7 @@ spec:
containers:
- name: {{ .Chart.Name }}-redis
image: redis:6.0.8-alpine
- args: ["sh", "-c", "redis-server --requirepass $$REDIS_PASSWORD --save 900 1 --save 300 10 --save 60 10000 --appendonly yes --maxmemory 2048mb --maxmemory-policy volatile-lru"]
+ args: ["sh", "-c", "redis-server --requirepass $$REDIS_PASSWORD --save 900 1 --save 300 10 --save 60 10000 --appendonly yes --maxmemory 2048mb --maxmemory-policy allkeys-lru"]
# volumeMounts:
# - name: {{ .Chart.Name }}-redis-data-volume
# mountPath: /data
|
update world -- add more docs and helper functions for dealing with
objects | @@ -270,6 +270,16 @@ class CostarWorld(AbstractWorld):
"objs" should be a list of all possible objects that we might want to
aggregate. They'll be saved in the "objs" dictionary. It's the role of the
various options/policies to choose and use them intelligently.
+
+ Parameters:
+ -----------
+ n/a
+
+ Returns:
+ --------
+ n/a
+
+ Access via the self.observation member.
'''
self.observation = {}
if self.tf_listener is None:
@@ -284,23 +294,47 @@ class CostarWorld(AbstractWorld):
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
pass
+ def addObjects(self, objects):
+ '''
+ Wrapper for addObject that will read through the whole dictionary to
+ get different objects and add them with the addObject function.
+
+ Parameters:
+ -----------
+ objs: dictionary of object lists by class
+
+ Returns:
+ --------
+ n/a
+
+ '''
+ for obj_class, objs in objects.items():
+ for obj_name in objs:
+ self.addObject(obj_name, obj_class)
+
def addObject(self, obj_name, obj_class, *args):
'''
Wraps add actor function for objects. Make sure they have the right
policy and are added so we can easily look them up later on.
TODO(cpaxton): update this when we have a more unified way of thinking
about objects.
+
+ Parameters:
+ -----------
+ obj_class: type associated with a particular object
+ obj_name: name (unique identifier) of a particular object associated with TF
+
+ Returns:
+ --------
+ n/a
'''
- #self.class_by_object[obj_id] = obj_class
if obj_class not in self.object_by_class:
self.object_by_class[obj_class] = [obj_name]
else:
self.object_by_class[obj_class].append(obj_name)
self.objects_to_track.append(obj_name)
- return -1
-
def getObjects(self, obj_class):
'''
Return information about specific objects in the world. This should tell us
|
Replacing assignation by typing for `websocket_handshake`
* Replacing assignation by typing for `websocket_handshake`
Related to
* Fix some type hinting issues
* Cleanup websocket handchake response concat
* Optimize concat encoding | -from typing import TYPE_CHECKING, Optional, Sequence
+from typing import TYPE_CHECKING, Optional, Sequence, cast
from websockets.connection import CLOSED, CLOSING, OPEN
from websockets.server import ServerConnection
+from websockets.typing import Subprotocol
from sanic.exceptions import ServerError
from sanic.log import error_logger
@@ -15,13 +16,6 @@ if TYPE_CHECKING:
class WebSocketProtocol(HttpProtocol):
-
- websocket: Optional[WebsocketImplProtocol]
- websocket_timeout: float
- websocket_max_size = Optional[int]
- websocket_ping_interval = Optional[float]
- websocket_ping_timeout = Optional[float]
-
def __init__(
self,
*args,
@@ -35,7 +29,7 @@ class WebSocketProtocol(HttpProtocol):
**kwargs,
):
super().__init__(*args, **kwargs)
- self.websocket = None
+ self.websocket: Optional[WebsocketImplProtocol] = None
self.websocket_timeout = websocket_timeout
self.websocket_max_size = websocket_max_size
if websocket_max_queue is not None and websocket_max_queue > 0:
@@ -109,14 +103,22 @@ class WebSocketProtocol(HttpProtocol):
return super().close_if_idle()
async def websocket_handshake(
- self, request, subprotocols=Optional[Sequence[str]]
+ self, request, subprotocols: Optional[Sequence[str]] = None
):
# let the websockets package do the handshake with the client
try:
if subprotocols is not None:
# subprotocols can be a set or frozenset,
# but ServerConnection needs a list
- subprotocols = list(subprotocols)
+ subprotocols = cast(
+ Optional[Sequence[Subprotocol]],
+ list(
+ [
+ Subprotocol(subprotocol)
+ for subprotocol in subprotocols
+ ]
+ ),
+ )
ws_conn = ServerConnection(
max_size=self.websocket_max_size,
subprotocols=subprotocols,
@@ -131,21 +133,18 @@ class WebSocketProtocol(HttpProtocol):
)
raise ServerError(msg, status_code=500)
if 100 <= resp.status_code <= 299:
- rbody = "".join(
- [
- "HTTP/1.1 ",
- str(resp.status_code),
- " ",
- resp.reason_phrase,
- "\r\n",
- ]
- )
- rbody += "".join(f"{k}: {v}\r\n" for k, v in resp.headers.items())
+ first_line = (
+ f"HTTP/1.1 {resp.status_code} {resp.reason_phrase}\r\n"
+ ).encode()
+ rbody = bytearray(first_line)
+ rbody += (
+ "".join(f"{k}: {v}\r\n" for k, v in resp.headers.items())
+ ).encode()
+ rbody += b"\r\n"
if resp.body is not None:
- rbody += f"\r\n{resp.body}\r\n\r\n"
- else:
- rbody += "\r\n"
- await super().send(rbody.encode())
+ rbody += resp.body
+ rbody += b"\r\n\r\n"
+ await super().send(rbody)
else:
raise ServerError(resp.body, resp.status_code)
self.websocket = WebsocketImplProtocol(
|
use new api
from | @@ -6,8 +6,6 @@ from ..common import *
import json
import hashlib
import time
-import uuid
-import urllib.parse, urllib.request
def douyutv_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
html = get_content(url)
@@ -28,24 +26,19 @@ def douyutv_download(url, output_dir = '.', merge = True, info_only = False, **k
if show_status is not "1":
raise ValueError("The live stream is not online! (Errno:%s)" % server_status)
- tt = int(time.time() / 60)
- did = uuid.uuid4().hex.upper()
- sign_content = '{room_id}{did}A12Svb&%1UUmf@hC{tt}'.format(room_id = room_id, did = did, tt = tt)
- sign = hashlib.md5(sign_content.encode('utf-8')).hexdigest()
+ tt = int(time.time())
+ sign_content = 'lapi/live/thirdPart/getPlay/%s?aid=pcclient&rate=0&time=%s9TUk5fjjUjg9qIMH3sdnh' % (room_id, tt)
+ sign = hashlib.md5(sign_content.encode('ascii')).hexdigest()
- json_request_url = "http://www.douyu.com/lapi/live/getPlay/%s" % room_id
- payload = {'cdn': 'ws', 'rate': '0', 'tt': tt, 'did': did, 'sign': sign}
- postdata = urllib.parse.urlencode(payload)
- req = urllib.request.Request(json_request_url, postdata.encode('utf-8'))
- with urllib.request.urlopen(req) as response:
- content = response.read()
-
- data = json.loads(content.decode('utf-8'))['data']
+ json_request_url = "http://coapi.douyucdn.cn/lapi/live/thirdPart/getPlay/%s?rate=0" % room_id
+ headers = {'auth': sign, 'time': str(tt), 'aid': 'pcclient'}
+ content = get_content(json_request_url, headers = headers)
+ data = json.loads(content)['data']
server_status = data.get('error',0)
if server_status is not 0:
raise ValueError("Server returned error:%s" % server_status)
- real_url = data.get('rtmp_url')+'/'+data.get('rtmp_live')
+ real_url = data.get('live_url')
print_info(site_info, title, 'flv', float('inf'))
if not info_only:
|
Upgrade tensorflow to version 2.9.3
Dependabot alerts vulnarabilities in tensorflow==2.9.1 | @@ -250,7 +250,7 @@ tensorboard-data-server==0.6.1
# via tensorboard
tensorboard-plugin-wit==1.8.1
# via tensorboard
-tensorflow==2.9.1
+tensorflow==2.9.3
# via
# -r tools/model_tools/requirements-tensorflow.in
# openvino-dev
|
osbuild: require output_directory
Make the output_directory argument in Pipeline.assemble
and Assembler.run required. The qemu assembler assumes
it is passed in args and will crash without it. Making
it mandatory prevents this. | @@ -3,7 +3,6 @@ import hashlib
import json
import os
import tempfile
-from typing import Optional
from .api import API
from . import buildroot
@@ -119,7 +118,7 @@ class Assembler:
description["id"] = self.id
return description
- def run(self, tree, runner, build_tree, monitor, libdir, output_dir=None, var="/var/tmp"):
+ def run(self, tree, runner, build_tree, monitor, libdir, output_dir, var="/var/tmp"):
with buildroot.BuildRoot(build_tree, runner, libdir, var=var) as build_root:
args = {
@@ -131,7 +130,7 @@ class Assembler:
}
binds = []
- if output_dir:
+
output_dir = os.fspath(output_dir)
os.makedirs(output_dir, exist_ok=True)
binds.append(f"{output_dir}:/run/osbuild/output")
@@ -282,7 +281,7 @@ class Pipeline:
return results, build_tree, tree
- def assemble(self, object_store, build_tree, tree, monitor, libdir, output_directory: Optional[str]):
+ def assemble(self, object_store, build_tree, tree, monitor, libdir, output_directory):
results = {"success": True}
if not self.assembler:
@@ -301,7 +300,7 @@ class Pipeline:
build_dir,
monitor,
libdir,
- output_dir=output_dir,
+ output_dir,
var=object_store.store)
monitor.result(r)
@@ -320,7 +319,7 @@ class Pipeline:
return results
- def run(self, store, monitor, libdir, output_directory=None):
+ def run(self, store, monitor, libdir, output_directory):
os.makedirs("/run/osbuild", exist_ok=True)
results = {}
|
update to white_balance.md
changed errors | @@ -8,7 +8,7 @@ Corrects the exposure of an image. A color standard can be specified.
- **Parameters:**
- device - device number. Used to count steps in the pipeline
- - img - A gray scale image on which to perform the correction
+ - img - image on which to perform the correction
- mode - either 'hist' or 'max', if 'hist' method is used a histogram for the whole image or the specified ROI is calculated, and the
bin with the most pixels is used as a reference point to shift image values. If 'max' is used as a method, then the pixel with the maximum
value in the whole image or the specified ROI is used as a reference point to shift image values.
@@ -27,7 +27,7 @@ Corrects the exposure of an image. A color standard can be specified.
import plantcv as pcv
# Corrects image based on color standard and stores output as corrected_img
-device, corrected_img = pcv.white_balance(device,img,'hist', debug="print",(5, 5, 80, 80))
+device, corrected_img = pcv.white_balance(device,img,mode='hist', debug="print",roi=(5, 5, 80, 80))
```
|
Increase ModuleTest.run_function timeout
Reduce flakyness with greater timeout | @@ -781,7 +781,7 @@ class ModuleCase(TestCase, SaltClientTestCaseMixin):
'''
return self.run_function(_function, args, **kw)
- def run_function(self, function, arg=(), minion_tgt='minion', timeout=90, **kwargs):
+ def run_function(self, function, arg=(), minion_tgt='minion', timeout=120, **kwargs):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
|
Move query inside the transaction block.
The QuerySet should be lazily evaluated and the select for update should
be run when accessed in the for loop, but this is much clearer. | @@ -313,10 +313,10 @@ def process_reporting_metadata_staging():
from corehq.pillows.synclog import mark_last_synclog
from pillowtop.processors.form import mark_latest_submission
+ with transaction.atomic():
records = (
UserReportingMetadataStaging.objects.select_for_update(skip_locked=True).order_by('pk')
)[:100]
- with transaction.atomic():
for record in records:
user = CouchUser.get_by_user_id(record.user_id, record.domain)
if not user or user.is_deleted():
|
Replace tf.minimum with znp.minimum.
Leave out module zfit.core.sample due to it being highly dependent on tensorflow. | @@ -6,6 +6,7 @@ import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
+import zfit.z.numpy as znp
from .. import z
from ..core.interfaces import ZfitData, ZfitSpace
from ..settings import ztypes
@@ -51,7 +52,7 @@ def _bandwidth_silverman_KDEV1(data, *_, **__):
@z.function(wraps='tensor')
def min_std_or_iqr(x):
- return tf.minimum(tf.math.reduce_std(x), (tfp.stats.percentile(x, 75) - tfp.stats.percentile(x, 25)))
+ return znp.minimum(tf.math.reduce_std(x), (tfp.stats.percentile(x, 75) - tfp.stats.percentile(x, 25)))
class GaussianKDE1DimV1(WrapDistribution):
|
Improve `imp._FileLike.__exit__`
The signature of this method currently doesn't work quite as intended: see
Classes will [still be accepted](https://mypy-play.net/?mypy=latest&python=3.10&gist=77efe095d01edeb1a4614166f0c9cf68) as conforming to this protocol if they have more permissive signatures such as `def __exit__(self, *args: object) -> None: ...` | import types
from _typeshed import StrPath
from os import PathLike
+from types import TracebackType
from typing import IO, Any, Protocol
from _imp import (
@@ -45,7 +46,7 @@ class _FileLike(Protocol):
def read(self) -> str | bytes: ...
def close(self) -> Any: ...
def __enter__(self) -> Any: ...
- def __exit__(self, *args: Any) -> Any: ...
+ def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> Any: ...
# PathLike doesn't work for the pathname argument here
def load_source(name: str, pathname: str, file: _FileLike | None = ...) -> types.ModuleType: ...
|
Pin alembic to <1.5.0
Tests are passing on 1.4.3 but failing on 1.5.0:
Passing:
Failing: | @@ -59,7 +59,7 @@ def get_version():
"coloredlogs>=6.1, <=14.0",
"PyYAML",
# core (not explicitly expressed atm)
- "alembic>=1.2.1",
+ "alembic>=1.2.1, <1.5.0",
"croniter>=0.3.34",
"grpcio>=1.32.0", # ensure version we require is >= that with which we generated the grpc code (set in dev-requirements)
"grpcio-health-checking>=1.32.0",
|
Change function name
To be consistent with our naming conventions, functions should be
lowercase. But if this is lowercase, it will collide with the `exec`
statement. So I have re-named it. | @@ -87,10 +87,11 @@ def App(apptype, executor, walltime=60, cache=False, sites='all'):
"""
from parsl import APP_FACTORY_FACTORY
- def Exec(f):
- return APP_FACTORY_FACTORY.make(apptype, executor, f,
+ def wrapper(f):
+ return APP_FACTORY_FACTORY.make(apptype, f,
+ executor=executor,
sites=sites,
cache=cache,
walltime=walltime)
- return Exec
+ return wrapper
|
Use RequestsHttpConnection as a workaround to use HTTPS
* Because of an old elasticsearch-py version, urllib3 tranport
is called with a hardcoded "http" scheme which doesn't translate
to HTTPS/SSL calls to our ES cluster causing an SSL error | @@ -68,12 +68,15 @@ def patch():
"""Monkey patch elasticutils get_es to add use_ssl and http_auth settings."""
from django.conf import settings
+ from elasticsearch import RequestsHttpConnection
+
defaults = {
'urls': settings.ES_URLS,
'timeout': getattr(settings, 'ES_TIMEOUT', 5),
'use_ssl': getattr(settings, 'ES_USE_SSL', False),
'http_auth': getattr(settings, 'ES_HTTP_AUTH', None),
'verify_certs': getattr(settings, 'ES_VERIFY_CERTS', True),
+ 'connection_class': RequestsHttpConnection
}
defaults.update(overrides)
|
auth: Factor out some uses of EXTERNAL_HOST with ROOT_DOMAIN_URI.
Apart from being less verbose, this makes it more manifest (on e.g. grep)
that we aren't using EXTERNAL_HOST here to construct subdomains. | @@ -240,6 +240,10 @@ def google_oauth2_csrf(request, value):
token = _unsalt_cipher_token(get_token(request))
return hmac.new(token.encode('utf-8'), value.encode("utf-8"), hashlib.sha256).hexdigest()
+def reverse_on_root(viewname, args=None, kwargs=None):
+ # type: (str, List[str], Dict[str, str]) -> str
+ return settings.ROOT_DOMAIN_URI + reverse(viewname, args=args, kwargs=kwargs)
+
def start_google_oauth2(request):
# type: (HttpRequest) -> HttpResponse
url = reverse('zerver.views.auth.send_oauth_request_to_google')
@@ -252,11 +256,7 @@ def start_google_oauth2(request):
def redirect_to_main_site(request, url, is_signup=False):
# type: (HttpRequest, Text, bool) -> HttpResponse
- main_site_uri = ''.join((
- settings.EXTERNAL_URI_SCHEME,
- settings.EXTERNAL_HOST,
- url,
- ))
+ main_site_uri = settings.ROOT_DOMAIN_URI + url
params = {
'subdomain': get_subdomain(request),
'is_signup': '1' if is_signup else '0',
@@ -305,11 +305,7 @@ def send_oauth_request_to_google(request):
params = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
- 'redirect_uri': ''.join((
- settings.EXTERNAL_URI_SCHEME,
- settings.EXTERNAL_HOST,
- reverse('zerver.views.auth.finish_google_oauth2'),
- )),
+ 'redirect_uri': reverse_on_root('zerver.views.auth.finish_google_oauth2'),
'scope': 'profile email',
'state': csrf_state,
}
@@ -345,11 +341,7 @@ def finish_google_oauth2(request):
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
- 'redirect_uri': ''.join((
- settings.EXTERNAL_URI_SCHEME,
- settings.EXTERNAL_HOST,
- reverse('zerver.views.auth.finish_google_oauth2'),
- )),
+ 'redirect_uri': reverse_on_root('zerver.views.auth.finish_google_oauth2'),
'grant_type': 'authorization_code',
},
)
|
emoji: Add 6 more emoticon conversions.
While there are many more these are well established. | @@ -42,6 +42,12 @@ EMOTICON_CONVERSIONS = {
'<3': ':heart:',
':|': ':expressionless:',
':/': ':confused:',
+ ';)': ':wink:',
+ ':D': ':grinning:',
+ ':o': ':open_mouth:',
+ ':O': ':open_mouth:',
+ ':p': ':stuck_out_tongue:',
+ ':P': ':stuck_out_tongue:',
}
def emoji_names_for_picker(emoji_name_maps: Dict[str, Dict[str, Any]]) -> List[str]:
|
PathFilterUI : Account for `roots` when dragging and dropping paths
We want to add paths that are relative to the roots. | @@ -152,6 +152,33 @@ def __dropMode( nodeGadget, event ) :
else :
return __DropMode.Replace
+def __dropPaths( paths, pathsPlug ) :
+
+ if pathsPlug is None :
+ return paths
+
+ if not isinstance( pathsPlug.node(), GafferScene.PathFilter ) :
+ return paths
+
+ pathFilter = pathsPlug.node()
+ if not pathFilter["roots"].getInput() :
+ return paths
+
+ with pathsPlug.ancestor( Gaffer.ScriptNode ).context() :
+ rootPaths = IECore.PathMatcher()
+ for node in GafferScene.SceneAlgo.filteredNodes( pathFilter ) :
+ scene = node["in"][0] if isinstance( node["in"], Gaffer.ArrayPlug ) else node["in"]
+ GafferScene.matchingPaths( pathFilter["roots"], scene, rootPaths )
+
+ paths = IECore.PathMatcher( paths )
+ relativePaths = IECore.PathMatcher()
+ for rootPath in rootPaths.paths() :
+ relativePaths.addPaths(
+ paths.subTree( rootPath )
+ )
+
+ return relativePaths.paths()
+
def __dragEnter( nodeGadget, event ) :
if not isinstance( event.data, IECore.StringVectorData ) :
@@ -200,16 +227,18 @@ def __drop( nodeGadget, event ) :
if pathsPlug is None :
pathsPlug = __pathsPlug( nodeGadget.node()["filter"].source().node() )
+ dropPaths = __dropPaths( event.data, pathsPlug )
+
dropMode = __dropMode( nodeGadget, event )
if dropMode == __DropMode.Replace :
- paths = sorted( event.data )
+ paths = sorted( dropPaths )
elif dropMode == __DropMode.Add :
paths = set( pathsPlug.getValue() )
- paths.update( event.data )
+ paths.update( dropPaths )
paths = sorted( paths )
else :
paths = set( pathsPlug.getValue() )
- paths.difference_update( event.data )
+ paths.difference_update( dropPaths )
paths = sorted( paths )
with Gaffer.UndoScope( nodeGadget.node().ancestor( Gaffer.ScriptNode ) ) :
|
Commented out test_squeeze() "4D split tensor, along the axis"
until final implementation of Allgatherv is available | @@ -168,18 +168,17 @@ class TestManipulations(unittest.TestCase):
self.assertTrue((result._DNDarray__array == data._DNDarray__array.squeeze()).all())
# 4D split tensor, along the axis
- # TODO: reinstate test of uneven dimensions distribution
+ # TODO: reinstate this test of uneven dimensions distribution
# after update to Allgatherv implementation
# data = ht.array(ht.random.randn(1, 4, 5, 1), split=1)
- data = ht.array(ht.random.randn(1, 12, 5, 1), split=1) # even distribution for up to 4 ranks
- result = ht.squeeze(data, axis=-1)
- self.assertIsInstance(result, ht.DNDarray)
- # TODO: the following works locally but not when distributed,
- #self.assertEqual(result.dtype, ht.float32)
- #self.assertEqual(result._DNDarray__array.dtype, torch.float32)
- self.assertEqual(result.shape, (1, 12, 5))
- self.assertEqual(result.lshape, (1, 12, 5))
- self.assertEqual(result.split, 1)
+ # result = ht.squeeze(data, axis=-1)
+ # self.assertIsInstance(result, ht.DNDarray)
+ # # TODO: the following works locally but not when distributed,
+ # #self.assertEqual(result.dtype, ht.float32)
+ # #self.assertEqual(result._DNDarray__array.dtype, torch.float32)
+ # self.assertEqual(result.shape, (1, 12, 5))
+ # self.assertEqual(result.lshape, (1, 12, 5))
+ # self.assertEqual(result.split, 1)
# 3D split tensor, across the axis
size = ht.MPI_WORLD.size * 2
|
Pin alembic
Summary: See comment on
Test Plan: Unit
Reviewers: dgibson, alangenfeld, prha | @@ -64,7 +64,7 @@ def get_version() -> str:
"Jinja2<3.0",
"PyYAML>=5.1",
# core (not explicitly expressed atm)
- "alembic>=1.2.1",
+ "alembic>=1.2.1,<1.6.3",
"croniter>=0.3.34",
"grpcio>=1.32.0", # ensure version we require is >= that with which we generated the grpc code (set in dev-requirements)
"grpcio-health-checking>=1.32.0",
|
[aten] remove variable set but never used warning
Summary:
Pull Request resolved:
Remove warning
```
caffe2/aten/src/ATen/native/cuda/BatchLinearAlgebra.cu(1400): warning: variable "info" was set but never used
```
Test Plan: CI | @@ -1397,7 +1397,6 @@ AT_ERROR("lu_solve: MAGMA library not found in "
int info_tmp = 0;
if (b.dim() == 2) {
- magma_int_t info = 0;
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
|
Travis: Improve testing organisation
Split the tests into their own jobs so it is easier to see what test is
failing, also only run pep8 and pylint tests with python 3.6 as it is
uncessary to run with both python versions. | language: python
python:
- - "2.7"
- "3.6"
+ - "2.7"
install:
- pip install nose
- pip install nose2
- pip install flake8
- pip install pylint
-
-script:
- git clone -v https://github.com/ARM-software/devlib.git /tmp/devlib && cd /tmp/devlib && python setup.py install
- cd $TRAVIS_BUILD_DIR && python setup.py install
- - nose2 -s $TRAVIS_BUILD_DIR/tests
- - cd /tmp && wa run $TRAVIS_BUILD_DIR/tests/travis/idle_agenda.yaml -v -d idle_workload
- - cd $TRAVIS_BUILD_DIR && ./dev_scripts/pylint wa
- - cd $TRAVIS_BUILD_DIR && ./dev_scripts/pep8 wa
+env:
+ global:
+ - PYLINT="cd $TRAVIS_BUILD_DIR && ./dev_scripts/pylint wa"
+ - PEP8="cd $TRAVIS_BUILD_DIR && ./dev_scripts/pep8 wa"
+ - NOSETESTS="nose2 -s $TRAVIS_BUILD_DIR/tests"
+ - WORKLOAD="cd /tmp && wa run $TRAVIS_BUILD_DIR/tests/travis/idle_agenda.yaml -v -d idle_workload"
+ matrix:
+ - TEST=$PYLINT
+ - TEST=$PEP8
+ - TEST=$NOSETESTS
+ - TEST=$WORKLOAD
+script:
+ - echo $TEST && eval $TEST
+
+matrix:
+ exclude:
+ - python: "2.7"
+ env: TEST=$PYLINT
+ - python: "2.7"
+ env: TEST=$PEP8
|
Update specialtylenses.py
Corrected the comment with fa->f | @@ -207,10 +207,10 @@ class AchromatDoubletLens(MatrixGroup):
class SingletLens(MatrixGroup):
"""
- General singlet lens with an effective focal length of fa, back focal
- length of fb. The values fa and fb are used to validate the final focal lengths
+ General singlet lens with an effective focal length of f, back focal
+ length of fb. The values f and fb are used to validate the final focal lengths
and back focal lengths that are obtained from the combination of elements.
- Most manufacturer's specifiy 1% tolerance, so if fa is more than 1% different
+ Most manufacturer's specifiy 1% tolerance, so if f is more than 1% different
from the final focal length, a warning is raised.
Nomenclature from Thorlabs:
|
Added boiler_fuel ingest function for EIA923.
Ran pytests and boiler_fuel ingest function works. | @@ -921,6 +921,54 @@ def ingest_boiler_fuel_eia923(pudl_engine, eia923_dfs):
"""Ingest data on fuel consumption by boiler from EIA Form 923."""
pass
+ # This needs to be a copy of what we're passed in so we can edit it.
+ bf_df = eia923_dfs['boiler_fuel'].copy()
+
+ # Drop fields we're not inserting into the generation_fuel_eia923 table.
+ cols_to_drop = ['combined_heat_and_power_plant',
+ 'plant_name',
+ 'operator_name',
+ 'operator_id',
+ 'plant_state',
+ 'census_region',
+ 'nerc_region',
+ 'naics_code',
+ 'sector_number',
+ 'sector_name',
+ 'physical_unit_label',
+ 'total_fuel_consumption_quantity']
+ bf_df.drop(cols_to_drop, axis=1, inplace=True)
+
+ # Convert the EIA923 DataFrame from yearly to monthly records.
+ bf_df = yearly_to_monthly_eia923(bf_df, month_dict_2015_eia923)
+ # Replace the EIA923 NA value ('.') with a real NA value.
+ gf_df.replace(to_replace='^\.$', value=np.nan, regex=True, inplace=True)
+ # Remove "State fuel-level increment" records... which don't pertain to
+ # any particular plant (they have plant_id == operator_id == 99999)
+ # These don't occur in boiler_fuel tab, so should be able to leave this out
+ # gf_df = gf_df[gf_df.plant_id != 99999]
+
+ # Rename them to be consistent with the PUDL DB fields, if need be.
+ gf_df.rename(columns={
+ # EIA 923 PUDL DB field name
+ 'reported_prime_mover': 'prime_mover',
+ 'reported_fuel_type_code': 'fuel_type',
+ 'quantity_of_fuel_consumed': 'fuel_consumed_total',
+ 'mmbtuper_unit': 'fuel_mmbtu_per_unit'},
+ inplace=True)
+
+ gf_df.to_sql(name='boiler_fuel_eia923',
+ con=pudl_engine, index=False, if_exists='append',
+ dtype={'plant_id': Integer,
+ 'boiler_id': Integer,
+ 'prime_mover': String,
+ 'fuel_type': String,
+ 'fuel_consumed_total': Float,
+ 'fuel_mmbtu_per_unit': Float,
+ 'sulfur_content': Float,
+ 'ash_content': Float},
+ chunksize=1000)
+
def ingest_generator_eia923(pudl_engine, eia923_dfs):
"""Ingest data on electricity production by generator from EIA Form 923."""
|
Tiny cleanup in tutorial.rst
Apologies, this is just cleaning up a comment that was my own mess from my previous MR: | @@ -239,7 +239,6 @@ to return HTML code.
class CountryAutocomplete(autocomplete.Select2QuerySetView):
def get_result_label(self, item):
- # TODO TODO TODO PUT ESCAPING IN THIS WIDGET TO STOP XSS
return format_html('<img src="flags/{}.png"> {}', item.name, item.name)
|
Be more consistent with word choice.
Changed "method" and "way" to "solution" | @@ -9,15 +9,15 @@ for color in colors:
output += color + separator
print(output) # Prints 'red, green, blue, yellow, '
```
-However, this solution is flawed. The separator is still added to the last color, and it is slow.
+However, this solution is flawed. The separator is still added to the last element, and it is slow.
-The better way is to use `str.join`.
+The better solution is to use `str.join`.
```py
colors = ['red', 'green', 'blue', 'yellow']
separator = ", "
print(separator.join(colors)) # Prints 'red, green, blue, yellow'
```
-This method is much simpler, faster, and solves the problem of the extra separator. An important thing to note is that you can only `str.join` strings. For a list of ints,
+This solution is much simpler, faster, and solves the problem of the extra separator. An important thing to note is that you can only `str.join` strings. For a list of ints,
you must convert each element to a string before joining.
```py
integers = [1, 3, 6, 10, 15]
|
remove: fix help output
per
Fix | @@ -74,9 +74,6 @@ def add_parser(subparsers, parent_parser):
help="Force purge.",
)
remove_parser.add_argument(
- "targets",
- nargs="+",
- help="DVC-files to remove. Optional. "
- "(Finds all DVC-files in the workspace by default.)",
+ "targets", nargs="+", help="DVC-files to remove."
)
remove_parser.set_defaults(func=CmdRemove)
|
Fix mistake in docs error return values
Fixes | @@ -652,7 +652,7 @@ through defined error return values. For functions that return a Python object
``NULL`` pointer, so any function returning a Python object has a well-defined
error return value.
-While this is always the case for C functions, functions
+While this is always the case for Python functions, functions
defined as C functions or ``cpdef``/``@ccall`` functions can return arbitrary C types,
which do not have such a well-defined error return value. Thus, if an
exception is detected in such a function, a warning message is printed,
|
stage: add missing flush before project prompt
Tested-by: Mike Frysinger | @@ -75,6 +75,7 @@ The '%prog' command stages files to prepare the next commit.
out.nl()
out.prompt('project> ')
+ out.flush()
try:
a = sys.stdin.readline()
except KeyboardInterrupt:
|
Remove terminal size warning
In most cases the UI scales pretty well for things like demos (where the text
could potentionally be bigger) | @@ -358,16 +358,6 @@ def main():
app.loop.run_forever()
else:
- if EventLoop.rows() < 43 or EventLoop.columns() < 132:
- print("")
- utils.warning(
- "conjure-up is best viewed with a terminal geometry "
- "of at least 132x43. Please increase the size of your "
- "terminal before starting conjure-up.")
- print("")
- acknowledge = input("Do you wish to continue? [Y/n] ")
- if 'N' in acknowledge or 'n' in acknowledge:
- sys.exit(1)
app.ui = ConjureUI()
EventLoop.build_loop(app.ui, STYLES,
|
[ENH] more informative error message from `mtype` if no mtype can be identified
This PR improves the error message for the utility function `mtype` in case no mtype can be identified.
The error message now reports which mtypes were checked for conformance, and why conformance with that specific mtype fails. | @@ -298,21 +298,41 @@ def mtype(
if as_scitype is not None:
m_plus_scitypes = [(x[0], x[1]) for x in m_plus_scitypes if x[1] in as_scitype]
- res = [
- m_plus_scitype[0]
- for m_plus_scitype in m_plus_scitypes
- if check_is_mtype(obj, mtype=m_plus_scitype[0], scitype=m_plus_scitype[1])
- ]
+ # collects mtypes that are tested as valid for obj
+ mtypes_positive = []
+
+ # collects error messages from mtypes that are tested as invalid for obj
+ mtypes_negative = dict()
+
+ for m_plus_scitype in m_plus_scitypes:
+ valid, msg, _ = check_is_mtype(
+ obj,
+ mtype=m_plus_scitype[0],
+ scitype=m_plus_scitype[1],
+ return_metadata=True,
+ )
+ if valid:
+ mtypes_positive += [m_plus_scitype[0]]
+ else:
+ mtypes_negative[m_plus_scitype[0]] = msg
- if len(res) > 1:
+ if len(mtypes_positive) > 1:
raise TypeError(
- f"Error in check_is_mtype, more than one mtype identified: {res}"
+ f"Error in check_is_mtype, more than one mtype identified:"
+ f" {mtypes_positive}"
)
- if len(res) < 1:
- raise TypeError("No valid mtype could be identified")
+ if len(mtypes_positive) < 1:
+ msg = ""
+ for mtype, error in mtypes_negative.items():
+ msg += f"{mtype}: {error}\r\n"
+ msg = (
+ f"No valid mtype could be identified for object of type {type(obj)}. "
+ f"Errors returned are as follows, in format [mtype]: [error message] \r\n"
+ ) + msg
+ raise TypeError(msg)
- return res[0]
+ return mtypes_positive[0]
def check_is_scitype(
|
Adds a default max-iterations of 100 in Estimate.add_gaugeoptimized.
In the GST overview tutorial on some machines, and perhaps elsewhere,
stage 2 of the CPTP gauge optimization just hangs, making decent sized steps
with little/no progress. Maybe there's an optimization stopping
condition that should be looked at in the future. | @@ -199,6 +199,9 @@ class Estimate(object):
gop["targetModel"] = self.models['target']
else: raise ValueError("Must supply 'targetModel' in 'goparams' argument")
+ if "maxiter" not in gop:
+ gop["maxiter"] = 100
+
gop['returnAll'] = True
_, gaugeGroupEl, last_gs = _gaugeopt_to_target(**gop)
gop['_gaugeGroupEl'] = gaugeGroupEl # an output stored here for convenience
|
Branding: gate sync via helper function
Sync make also be invoked with a command; avoid logic duplication. | @@ -240,6 +240,23 @@ class Branding(commands.Cog):
# Notify guild of new event ~ this reads the information that we cached above!
await self.send_info_embed(Channels.change_log)
+ async def synchronise(self) -> None:
+ """
+ Fetch the current event and delegate to `enter_event`.
+
+ This is a convenience wrapper to force synchronisation either via a command, or when the daemon starts
+ with an empty cache. It is generally only used in a recovery scenario. In the usual case, the daemon
+ already has an `Event` instance and can pass it to `enter_event` directly.
+ """
+ log.debug("Synchronise: fetching current event")
+
+ event = await self.repository.get_current_event()
+
+ if event is None:
+ log.error("Failed to fetch event ~ cannot synchronise!")
+ else:
+ await self.enter_event(event)
+
# endregion
# region: Daemon
@@ -323,14 +340,8 @@ class Branding(commands.Cog):
current_event = await self.cache_information.get("event_path")
if current_event is None: # Maiden case ~ first start or cache loss
- log.debug("Applying event immediately as cache is empty (indicating maiden case)")
-
- event = await self.repository.get_current_event()
-
- if event is None:
- log.warning("Failed to fetch event ~ cache will remain empty!")
- else:
- await self.enter_event(event)
+ log.debug("Event cache is empty (indicating maiden case), invoking synchronisation")
+ await self.synchronise()
now = datetime.utcnow()
|
FIX: fix aspect ratio + extents + limits from LiveGrid out of BEC
Closes
Closes | @@ -4,7 +4,7 @@ from bluesky.callbacks.scientific import PeakStats
from cycler import cycler
from io import StringIO
import itertools
-from itertools import chain
+import numpy as np
import matplotlib.pyplot as plt
import re
import sys
@@ -242,11 +242,29 @@ class BestEffortCallback(CallbackBase):
warn("Need both 'shape' and 'extents' in plan metadata to "
"create LiveGrid.")
else:
+ data_range = np.array([float(np.diff(e)) for e in extents])
+ y_step, x_step = data_range / [s - 1 for s in shape]
+ adjusted_extent = [extents[1][0] - x_step / 2,
+ extents[1][1] + x_step / 2,
+ extents[0][0] - y_step / 2,
+ extents[0][1] + y_step / 2]
for I_key, ax in zip(columns, axes):
+ # MAGIC NUMBERS based on what tacaswell thinks looks OK
+ data_aspect_ratio = np.abs(data_range[1]/data_range[0])
+ MAR = 2
+ if (1/MAR < data_aspect_ratio < MAR):
+ aspect = 'equal'
+ ax.set_aspect(aspect, adjustable='box-forced')
+ else:
+ aspect = 'auto'
+ ax.set_aspect(aspect, adjustable='datalim')
+
live_grid = LiveGrid(shape, I_key,
xlabel=x_key, ylabel=y_key,
- extent=list(chain(*extents[::-1])),
+ extent=adjusted_extent,
+ aspect=aspect,
ax=ax)
+
live_grid('start', self._start_doc)
live_grid('descriptor', doc)
self._live_grids[doc['uid']][I_key] = live_grid
|
Update ava_utils.py
fix a bug | @@ -42,6 +42,9 @@ def det2csv(info, dataset_len, results, custom_classes):
result = results[idx]
for label, _ in enumerate(result):
for bbox in result[label]:
+ if type(bbox) == paddle.Tensor:
+ bbox = bbox.numpy()
+
bbox_ = tuple(bbox.tolist())
if custom_classes is not None:
actual_label = custom_classes[label + 1]
|
llvm, composition: Do not memoize binary functions.
There already is a cache for that. | @@ -1178,17 +1178,11 @@ class Composition(Composition_Base, metaclass=ComponentsMeta):
# Compiled resources
self.__generated_node_wrappers = {}
- self.__compiled_node_wrappers = {}
self.__generated_execution = None
- self.__compiled_execution = None
- self.__compiled_run = None
-
+ self.__generated_run = None
self.__generated_sim_node_wrappers = {}
- self.__compiled_sim_node_wrappers = {}
self.__generated_simulation = None
self.__generated_sim_run = None
- self.__compiled_simulation = None
- self.__compiled_sim_run = None
self._compilation_data = self._CompilationData(owner=self)
@@ -4400,13 +4394,8 @@ class Composition(Composition_Base, metaclass=ComponentsMeta):
return self.__generated_node_wrappers[node]
def _get_bin_node(self, node):
- if node not in self.__compiled_node_wrappers:
wrapper = self._get_node_wrapper(node)
- bin_f = pnlvm.LLVMBinaryFunction.get(wrapper._llvm_function.name)
- self.__compiled_node_wrappers[node] = bin_f
- return bin_f
-
- return self.__compiled_node_wrappers[node]
+ return pnlvm.LLVMBinaryFunction.get(wrapper._llvm_function.name)
def _get_sim_node_wrapper(self, node):
if node not in self.__generated_sim_node_wrappers:
@@ -4421,13 +4410,8 @@ class Composition(Composition_Base, metaclass=ComponentsMeta):
return self.__generated_sim_node_wrappers[node]
def _get_sim_bin_node(self, node):
- if node not in self.__compiled_sim_node_wrappers:
wrapper = self._get_sim_node_wrapper(node)
- bin_f = pnlvm.LLVMBinaryFunction.get(wrapper._llvm_function.name)
- self.__compiled_sim_node_wrappers[node] = bin_f
- return bin_f
-
- return self.__compiled_sim_node_wrappers[node]
+ return pnlvm.LLVMBinaryFunction.get(wrapper._llvm_function.name)
@property
def _llvm_function(self):
@@ -4437,6 +4421,14 @@ class Composition(Composition_Base, metaclass=ComponentsMeta):
return self.__generated_execution
+ @property
+ def _llvm_run(self):
+ if self.__generated_run is None:
+ with pnlvm.LLVMBuilderContext() as ctx:
+ self.__generated_run = ctx.gen_composition_run(self)
+
+ return self.__generated_run
+
@property
def _llvm_simulation(self):
if self.__generated_simulation is None:
@@ -4453,23 +4445,11 @@ class Composition(Composition_Base, metaclass=ComponentsMeta):
return self.__generated_sim_run
-
def _get_bin_execution(self):
- if self.__compiled_execution is None:
- wrapper = self._llvm_function
- bin_f = pnlvm.LLVMBinaryFunction.get(wrapper.name)
- self.__compiled_execution = bin_f
-
- return self.__compiled_execution
+ return pnlvm.LLVMBinaryFunction.get(self._llvm_function.name)
def _get_bin_run(self):
- if self.__compiled_run is None:
- with pnlvm.LLVMBuilderContext() as ctx:
- wrapper = ctx.gen_composition_run(self)
- bin_f = pnlvm.LLVMBinaryFunction.get(wrapper.name)
- self.__compiled_run = bin_f
-
- return self.__compiled_run
+ return pnlvm.LLVMBinaryFunction.get(self._llvm_run.name)
def reinitialize(self, execution_context=NotImplemented):
if execution_context is NotImplemented:
|
Replace tf.maximum with znp.maximum.
Leave out module zfit.core.sample due to it being highly on tensorflow. | @@ -30,7 +30,7 @@ def crystalball_func(x, mu, sigma, alpha, n):
lambda t: _powerlaw(b - t, a, -n),
lambda t: znp.exp(-0.5 * tf.square(t)),
values=t, value_safer=lambda t: tf.ones_like(t) * (b - 2))
- func = tf.maximum(func, tf.zeros_like(func))
+ func = znp.maximum(func, tf.zeros_like(func))
return func
|
Add trigger that ensures row in template_redacted is created
when adding new template through a migration | @@ -21,6 +21,32 @@ def upgrade():
;
""")
+ op.execute("""
+ create or replace function insert_redacted()
+ returns trigger AS
+ $$
+ BEGIN
+ INSERT INTO template_redacted (template_id, redact_personalisation, updated_at, updated_by_id)
+ SELECT templates.id, FALSE, now(), templates.created_by_id
+ FROM templates
+ WHERE templates.id NOT IN (SELECT template_id FROM template_redacted WHERE template_id = templates.id);
+ RETURN NEW;
+ END;
+ $$ LANGUAGE plpgsql;
+ """)
+
+ op.execute("""
+ CREATE TRIGGER insert_template_redacted AFTER INSERT ON templates
+ FOR EACH ROW
+ EXECUTE PROCEDURE insert_redacted();
+ """)
+
def downgrade():
- pass
+ op.execute("""
+ drop trigger insert_template_redacted ON templates
+ """)
+
+ op.execute("""
+ drop function insert_redacted();
+ """)
|
Resolve conflict for merge
Typo correction | }
});
- // init validation reqirement at first time page load
+ // init validation requirement at first time page load
{% if SETTING.get('ldap_enabled') %}
$('#ldap_uri').prop('required', true);
$('#ldap_base_dn').prop('required', true);
|
[exceptions] set args[0] for Maestral exceptions
This is to conform with typical expectations that args[0] contains an error message string. | @@ -40,6 +40,7 @@ class MaestralApiError(Exception):
local_path: str | None = None,
local_path_from: str | None = None,
) -> None:
+ super().__init__(f"{title}. {message}")
self.title = title
self.message = message
self.dbx_path = dbx_path
@@ -48,7 +49,7 @@ class MaestralApiError(Exception):
self.local_path_from = local_path_from
def __str__(self) -> str:
- return ". ".join([self.title, self.message])
+ return f"{self.title}. {self.message}"
# ==== regular sync errors =============================================================
|
Adding Conv2DLSTM
Mostly ported from Haiku implementation. | @@ -31,6 +31,8 @@ from . import base
from . import initializers
from . import linear
+from jax import nn as jnn
+from jax import numpy as jnp
from jax import random
@@ -193,3 +195,75 @@ class GRUCell(RNNCellBase):
mem_shape = batch_dims + (size,)
return init_fn(rng, mem_shape)
+
+class Conv2DLSTM(RNNCellBase):
+ r"""A 2-D convolutional LSTM cell.
+
+ The implementation is based on xingjian2015convolutional.
+ Given x_t and the previous state (h_{t-1}, c_{t-1})
+ the core computes
+
+ .. math::
+
+ \begin{array}{ll}
+ i_t = \sigma(W_{ii} * x_t + W_{hi} * h_{t-1} + b_i) \\
+ f_t = \sigma(W_{if} * x_t + W_{hf} * h_{t-1} + b_f) \\
+ g_t = \tanh(W_{ig} * x_t + W_{hg} * h_{t-1} + b_g) \\
+ o_t = \sigma(W_{io} * x_t + W_{ho} * h_{t-1} + b_o) \\
+ c_t = f_t c_{t-1} + i_t g_t \\
+ h_t = o_t \tanh(c_t)
+ \end{array}
+
+ where * denotes the convolution operator;
+ i_t, f_t, o_t are input, forget and output gate activations,
+ and g_t is a vector of cell updates.
+
+ Notes:
+ Forget gate initialization:
+ Following jozefowicz2015empirical we add 1.0 to b_f
+ after initialization in order to reduce the scale of forgetting in
+ the beginning of the training.
+ """
+
+ def apply(self, carry, inputs, features, kernel_size):
+ """Constructs a convolutional LSTM.
+
+ Args:
+ carry: the hidden state of the Conv2DLSTM cell,
+ initialized using `Conv2DLSTM.initialize_carry`.
+ inputs: input data with dimensions (batch, spatial_dims..., features).
+ features: number of convolution filters.
+ kernel_size: shape of the convolutional kernel.
+ Returns:
+ A tuple with the new carry and the output.
+ """
+ c, h = carry
+ input_to_hidden = linear.Conv.partial(
+ features=4*features, kernel_size=kernel_size)
+
+ hidden_to_hidden = linear.Conv.partial(
+ features=4*features, kernel_size=kernel_size)
+
+ gates = input_to_hidden(inputs) + hidden_to_hidden(h)
+ i, g, f, o = jnp.split(gates, indices_or_sections=4, axis=-1)
+
+ f = jnn.sigmoid(f + 1)
+ new_c = f * c + jnn.sigmoid(i) * jnp.tanh(g)
+ new_h = jnn.sigmoid(o) * jnp.tanh(new_c)
+ return (new_c, new_h), new_h
+
+ @staticmethod
+ def initialize_carry(rng, batch_dims, size, init_fn=initializers.zeros):
+ """initialize the RNN cell carry.
+
+ Args:
+ rng: random number generator passed to the init_fn.
+ batch_dims: a tuple providing the shape of the batch dimensions.
+ size: the input_shape + (features,).
+ init_fn: initializer function for the carry.
+ Returns:
+ An initialized carry for the given RNN cell.
+ """
+ key1, key2 = random.split(rng)
+ mem_shape = batch_dims + size
+ return init_fn(key1, mem_shape), init_fn(key2, mem_shape)
|
Update pyproject.toml
The build-system definition was out of date so updated this
according to
Also added some extra metadata to the package. | [build-system]
-requires = ["poetry>=0.12"]
-build-backend = "poetry.masonry.api"
+requires = ["poetry_core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
[tool.black]
line-length = 79
@@ -19,10 +19,15 @@ tabindex_no_positive = true
[tool.poetry]
name = "grand-challenge.org"
version = "0.1.0"
-description = "A framework to objectively evaluate the performance of machine learning algorithms in biomedical imaging"
+description = "A platform for end-to-end development of machine learning solutions in biomedical imaging"
+license = "Apache-2.0"
authors = ["James Meakin <[email protected]>"]
+readme = "README.rst"
+homepage = "https://grand-challenge.org/"
+repository = "https://github.com/comic/grand-challenge.org/"
+documentation = "https://comic.github.io/grand-challenge.org/"
packages = [
- { include = "app" },
+ { include = "grandchallenge", from = "app" },
]
[tool.poetry.dependencies]
|
postprocess: use a different function to detect ffmpeg.exe
the previous function said it was a file but there was none | @@ -28,7 +28,7 @@ class postprocess:
break
if self.detect is None and platform.system() == "Windows":
path = pathlib.Path(sys.executable).parent / "ffmpeg.exe"
- if path.is_file:
+ if os.path.isfile(path):
self.detect = path
def merge(self):
|
[commands] Fix unsupported discord converters in hybrid commands
These are things that are supported in regular commands but not in
application commands, such as discord.Colour, discord.Game, or
discord.Emoji. | @@ -44,7 +44,7 @@ from discord import app_commands
from discord.utils import MISSING, maybe_coroutine, async_all
from .core import Command, Group
from .errors import BadArgument, CommandRegistrationError, CommandError, HybridCommandError, ConversionError
-from .converter import Converter, Range, Greedy, run_converters
+from .converter import Converter, Range, Greedy, run_converters, CONVERTER_MAPPING
from .parameters import Parameter
from .flags import is_flag, FlagConverter
from .cog import Cog
@@ -117,6 +117,14 @@ def required_pos_arguments(func: Callable[..., Any]) -> int:
def make_converter_transformer(converter: Any) -> Type[app_commands.Transformer]:
+ try:
+ module = converter.__module__
+ except AttributeError:
+ pass
+ else:
+ if module is not None and (module.startswith('discord.') and not module.endswith('converter')):
+ converter = CONVERTER_MAPPING.get(converter, converter)
+
async def transform(cls, interaction: discord.Interaction, value: str) -> Any:
try:
if inspect.isclass(converter) and issubclass(converter, Converter):
@@ -219,7 +227,7 @@ def replace_parameter(
if renames:
app_commands.rename(**renames)(callback)
- elif is_converter(converter):
+ elif is_converter(converter) or converter in CONVERTER_MAPPING:
param = param.replace(annotation=make_converter_transformer(converter))
elif origin is Union:
if len(args) == 2 and args[-1] is _NoneType:
|
retention: Remove two redundant comments.
These two identical comments don't contribute anything useful and seem
just out of place at this point. | @@ -189,7 +189,6 @@ def move_expired_messages_to_archive_by_recipient(
) -> int:
assert message_retention_days != -1
- # This function will archive appropriate messages and their related objects.
query = SQL(
"""
INSERT INTO zerver_archivedmessage ({dst_fields}, archive_transaction_id)
@@ -222,7 +221,6 @@ def move_expired_personal_and_huddle_messages_to_archive(
assert message_retention_days != -1
check_date = timezone_now() - timedelta(days=message_retention_days)
- # This function will archive appropriate messages and their related objects.
recipient_types = (Recipient.PERSONAL, Recipient.HUDDLE)
# Archive expired personal and huddle Messages in the realm, including cross-realm messages.
|
ENH: Alignments.information_plot produces a drawable displaying information
and gaps per alignment position, plus smoothing of these. | @@ -2492,6 +2492,69 @@ class AlignmentI(object):
result = calculator.get_pairwise_distances()
return result
+ def information_plot(self, width=None, height=None, window=None,
+ stat='median', include_gap=True):
+ """plot information per position
+
+ Parameters
+ ----------
+ width : int
+ figure width in pixels
+ height : int
+ figure height in pixels
+ window : int or None
+ used for smoothing line, defaults to sqrt(length)
+ stat : str
+ 'mean' or 'median, used as the summary statistic for each window
+ include_gap
+ whether to include gap counts, shown on right y-axis
+ """
+ from plotly import graph_objs as go
+ from cogent3.draw.drawable import Drawable
+
+ window = window if window else numpy.sqrt(len(self))
+ window = int(window)
+ y = self.entropy_per_pos()
+ y = y.max() - y # convert to information
+ stats = {'mean': numpy.mean, 'median': numpy.median}
+ if stat not in stats:
+ raise ValueError('stat must be either "mean" or "median"')
+ calc_stat = stats[stat]
+ num = len(y) - window
+ v = [calc_stat(y[i: i + window]) for i in range(0, num)]
+ x = numpy.arange(num)
+ x += window // 2 # shift x-coordinates to middle of window
+ trace_line = go.Scatter(x=x, y=v, mode='lines',
+ name='smoothed',
+ line=dict(shape='spline', smoothing=1.3))
+ trace_marks = go.Scatter(x=numpy.arange(y.shape[0]),
+ y=y,
+ mode='markers',
+ opacity=0.5,
+ name='per position')
+ layout = dict(title='Information per position',
+ xaxis=dict(title='Position'),
+ width=width,
+ height=height,
+ yaxis=dict(title=f'{stat} Information (window={window})'),
+ showlegend=True,
+ yaxis2=dict(title='Count', side='right',
+ overlaying='y'))
+ if include_gap:
+ gap_counts = self.count_gaps_per_pos()
+ y = [calc_stat(gap_counts[i: i + window]) for i in range(0, num)]
+ trace_g = go.Scatter(x=x, y=y, yaxis='y2', name='Gaps', mode='lines',
+ line=dict(shape='spline', smoothing=1.3))
+ traces = [trace_marks, trace_line, trace_g]
+ else:
+ layout.pop('yaxis2')
+ traces = [trace_marks, trace_line]
+
+ draw = Drawable(title='Information per position')
+ draw.traces.extend(traces)
+ draw.layout.update(layout)
+ return draw
+
def _one_length(seqs):
"""raises ValueError if seqs not all same length"""
|
Fixed patch
Disable hardcoded cxx standard, conan will provide the standard
Link against all requirements
Install target so it can be packaged | ---- CMakeLists.txt 2019-12-25 22:05:35.580261859 +0100
-+++ CMakeLists.txt.old 2019-12-25 21:49:22.536528035 +0100
-@@ -34,6 +34,7 @@
+diff --git a/CMakeLists.txt.old b/CMakeLists.txt
+index 119d390..ce2ac92 100644
+--- a/CMakeLists.txt.old
++++ b/CMakeLists.txt
+@@ -34,7 +34,6 @@ endif()
# set CXX standard
set(CMAKE_CXX_STANDARD_REQUIRED True)
-+set(CMAKE_CXX_STANDARD 11)
+-set(CMAKE_CXX_STANDARD 11)
if (${COMPILER_IS_NVCC})
# GNU CXX extensions are not supported by nvcc
set(CMAKE_CXX_EXTENSIONS OFF)
-@@ -73,7 +74,6 @@
+@@ -74,6 +73,7 @@ endif()
add_library(backward ${libtype} backward.cpp)
target_compile_definitions(backward PUBLIC ${BACKWARD_DEFINITIONS})
target_include_directories(backward PUBLIC ${BACKWARD_INCLUDE_DIRS})
--conan_target_link_libraries(backward)
++conan_target_link_libraries(backward)
###############################################################################
# TESTS
+@@ -125,3 +125,9 @@ install(
+ FILES "BackwardConfig.cmake"
+ DESTINATION ${CMAKE_INSTALL_PREFIX}/lib/backward
+ )
++install(
++ TARGETS backward
++ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
++ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
++ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
++)
|
Improve README.md
add API to features
make Entity Relationship generation command more generic | @@ -17,6 +17,7 @@ A PowerDNS web interface with advanced features.
- Dashboard and pdns service statistics
- DynDNS 2 protocol support
- Edit IPv6 PTRs using IPv6 addresses directly (no more editing of literal addresses!)
+- limited API for manipulating zones and records
### Running PowerDNS-Admin
There are several ways to run PowerDNS-Admin. Following is a simple way to start PowerDNS-Admin with docker in development environment which has PowerDNS-Admin, PowerDNS server and MySQL Back-End Database.
@@ -172,5 +173,9 @@ docker-compose up -d
```
```
-eralchemy -i 'mysql://powerdns_admin:changeme@'$(docker inspect powerdns-admin-mysql|jq -jr '.[0].NetworkSettings.Networks.powerdnsadmin_default.IPAddress')':3306/powerdns_admin' -o /tmp/output.pdf
+source .env
+```
+
+```
+eralchemy -i 'mysql://${PDA_DB_USER}:${PDA_DB_PASSWORD}@'$(docker inspect powerdns-admin-mysql|jq -jr '.[0].NetworkSettings.Networks.powerdnsadmin_default.IPAddress')':3306/powerdns_admin' -o /tmp/output.pdf
```
|
Fixed issue of LTP complete results are not being downloaded
Copied the LTP result to "TestExecution.log" file which is by default downloaded by LISAv2 framework. | @@ -134,6 +134,11 @@ if grep FAIL "$LTP_OUTPUT" ; then
echo "Failed Tests:" >> ~/summary.log
grep FAIL "$LTP_OUTPUT" | cut -d':' -f 2- >> ~/summary.log
fi
-
+echo "-----------LTP RESULTS----------------"
+cat $LTP_RESULTS >> ~/TestExecution.log
+echo "--------------------------------------"
+echo "-----------LTP OUTPUT----------------"
+cat $LTP_OUTPUT >> ~/TestExecution.log
+echo "--------------------------------------"
SetTestStateCompleted
exit 0
|
remove PropertyStorage from VideoPlayer, onComplete breaks zero-volume,
and it's generally up to user to save volume in local storage | @@ -22,8 +22,6 @@ Item {
property real buffered; ///< how much content to buffer in seconds
property real startPosition; ///< second at which the video should start playback
- PropertyStorage { id: volumeStorage; name: "volume"; defaultValue: 1.0; }
-
///@private
constructor: {
this.impl = null
@@ -124,7 +122,6 @@ Item {
else if (this.volume < 0.0)
this.volume = 0.0;
- volumeStorage.value = this.volume
var player = this._getPlayer()
if (player)
player.setVolume(this.volume)
@@ -281,8 +278,6 @@ Item {
///@private
onCompleted: {
- this.volume = +(volumeStorage.value)
-
var player = this._getPlayer()
if (player)
player.setBackgroundColor(this.backgroundColor)
|
Fix documentation for mysql processlist
Fix code block and indentation | @@ -1965,6 +1965,9 @@ def processlist(**connection_args):
"SHOW FULL PROCESSLIST".
Returns: a list of dicts, with each dict representing a process:
+
+ .. code-block:: python
+
{'Command': 'Query',
'Host': 'localhost',
'Id': 39,
|
fix parsing error
test removing dependencies | @@ -22,20 +22,15 @@ jobs:
- uses: actions/checkout@v2
with:
clean: false
- - name: Set up Python
- uses: actions/setup-python@v2
- with:
- python-version: 3.8
- name: Install dependencies
run: |
sudo dpkg --add-architecture i386
sudo apt-get update
echo steam steam/question select "I AGREE" | sudo debconf-set-selections
echo steam steam/license note '' | sudo debconf-set-selections
- sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install steamcmd dos2unix lib32z1 libncurses5:i386 libbz2-1.0:i386 lib32gcc1 lib32stdc++6 libtinfo5:i386 libcurl3-gnutls:i386
+ sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install steamcmd dos2unix
/usr/games/steamcmd +login anonymous +force_install_dir ./tf2 +download_depot 232250 232256 +quit
- export VPK_LINUX=$(find "${HOME}" -type f -iname "vpk_linux32" -print0 | head -zn 1)
- echo $VPK_LINUX
+ export VPK_LINUX=$(find "${HOME}" -type f -iname "vpk_linux32" -print | head -n 1)
export VALVE_LIB_DIR=$(dirname "${VPK_LINUX}")
sudo bash -c 'cat <<\EOF >> /usr/local/bin/vpk
#!/bin/bash
|
Use and support returned acls from REST APIs
Cache the acls in cached_acl so we don't have to ask the REST API twice,
once for authN and once for authZ.
We might also have to check the REST API when when rest eauth is used
with eauth_acl_module.
This fixes | @@ -31,6 +31,8 @@ log = logging.getLogger(__name__)
__virtualname__ = "rest"
+cached_acl = {}
+
def __virtual__():
return __virtualname__
@@ -61,8 +63,39 @@ def auth(username, password):
if result["status"] == 200:
log.debug("eauth REST call returned 200: %s", result)
if result["dict"] is not None:
- return result["dict"]
+ cached_acl[username] = result["dict"]
return True
else:
log.debug("eauth REST call failed: %s", result)
return False
+
+
+def acl(username, **kwargs):
+ """
+ REST authorization
+ """
+ salt_eauth_acl = __opts__["external_auth"]["rest"].get(username, [])
+ log.debug("acl from salt for user %s: %s", username, salt_eauth_acl)
+
+ eauth_rest_acl = cached_acl.get(username, [])
+ log.debug("acl from cached rest for user %s: %s", username, eauth_rest_acl)
+ # This might be an ACL only call with no auth before, so check the rest api
+ # again
+ if not eauth_rest_acl:
+ # Update cached_acl from REST API
+ result = auth(username, kwargs["password"])
+ log.debug("acl rest result: %s", result)
+ if result:
+ eauth_rest_acl = cached_acl.get(username, [])
+ log.debug("acl from rest for user %s: %s", username, eauth_rest_acl)
+
+ merged_acl = salt_eauth_acl + eauth_rest_acl
+
+ log.debug("acl from salt and rest merged for user %s: %s", username, merged_acl)
+ # We have to make the .get's above return [] since we can't merge a
+ # possible list and None. So if merged_acl is falsey we return None so
+ # other eauth's can return an acl.
+ if not merged_acl:
+ return None
+ else:
+ return merged_acl
|
update dead link
Link to playwright list of devices was dead, I've replaced it with the correct one (I already replaced in in a previous PR, but the links was appearing twice, here I'm updating the second dead link) | @@ -39,7 +39,7 @@ class Devices(LibraryComponent):
"""Get a single device descriptor with name exactly matching name.
``name`` Given name of the requested device. See Playwright's
- [https://github.com/Microsoft/playwright/blob/master/src/server/deviceDescriptors.ts | deviceDescriptors.ts]
+ [https://github.com/microsoft/playwright/blob/master/packages/playwright-core/src/server/deviceDescriptorsSource.json|deviceDescriptorsSource.json]
for a formatted list.
Allows a concise syntax to set website testing values to exact matches of specific
|
fixing import
missed last import in copy/paste from master. | @@ -7,7 +7,7 @@ from plexapi.base import Playable, PlexPartialObject
from plexapi.exceptions import BadRequest
from plexapi.mixins import AdvancedSettingsMixin, ArtUrlMixin, ArtMixin, BannerMixin, PosterUrlMixin, PosterMixin
from plexapi.mixins import SplitMergeMixin, UnmatchMatchMixin
-from plexapi.mixins import CollectionMixin, CountryMixin, DirectorMixin, GenreMixin, LabelMixin, ProducerMixin
+from plexapi.mixins import CollectionMixin, CountryMixin, DirectorMixin, GenreMixin, LabelMixin, ProducerMixin, WriterMixin
class Video(PlexPartialObject):
|
test_domain_fact_from_file() test was too fragile and dependent
on some implicit state which might change in the future. | @@ -32,9 +32,10 @@ class TestDomain(unittest.TestCase):
@patch('jnpr.junos.Device.execute')
def test_domain_fact_from_file(self, mock_execute):
+ self.dev.facts._cache['hostname'] = 'r0'
mock_execute.side_effect = self._mock_manager_domain_file
self.assertEqual(self.dev.facts['domain'],'juniper.net')
- self.assertEqual(self.dev.facts['fqdn'],None)
+ self.assertEqual(self.dev.facts['fqdn'],'r0.juniper.net')
def _read_file(self, fname):
from ncclient.xml_ import NCElement
|
Use update instead of patch for update user
This is required in order for update user
phones/orgamizations/addresses/... clear to work | @@ -7048,7 +7048,7 @@ def doUpdateUser(users, i):
body[u'emails'] = [{u'type': u'custom', u'customType': u'former_employee', u'primary': False, u'address': user_primary}]
sys.stdout.write(u'updating user %s...\n' % user)
if body:
- callGAPI(cd.users(), u'patch', userKey=user, body=body)
+ callGAPI(cd.users(), u'update', userKey=user, body=body)
if admin_body:
callGAPI(cd.users(), u'makeAdmin', userKey=user, body=admin_body)
|
Update nextflow.config
added workflow parameter and removed personal information | params{
- DCM4CHEBin = "/home/kmit/pavan/dcm4che-5.22.5/bin"
- SrcAet = "[email protected]:4243"
- QueryAet = "niffler:10244"
- DestAet = "niffler"
+ workflow = 1
+
+ DCM4CHEBin = "/path/to/DCM4CHE/bin"
+ SrcAet = "SOURCEAET@IP:PORT"
+ QueryAet = "QUERYAET:PORT"
+ DestAet = "DESTAET"
NightlyOnly = false
StartHour = 19
EndHour = 7
@@ -10,7 +12,7 @@ params{
MaxNifflerProcesses = 1
FilePath ="{00100020}/{0020000D}/{0020000E}/{00080018}.dcm"
- CsvFile ="/home/kmit/pavan/Niffler/modules/cold-extraction/unit_test_read_csv.csv"
+ CsvFile ="path/to/csvfile"
NumberOfQueryAttributes = 2
FirstAttr = "PatientID"
FirstIndex = 0
@@ -22,7 +24,7 @@ params{
SendEmail =true
YourEmail ="[email protected]"
- OutputDirectory= "/home/kmit/pavan"
+ OutputDirectory= "path/to/outputDirectory"
SplitIntoChunks= 1
PrintImages=true
CommonHeadersOnly=false
@@ -33,5 +35,5 @@ params{
SpecificHeadersOnly=false
Featureset_File_for_png_extraction=""
- Featureset_File_for_suvpar="/home/kmit/pavan/Niffler/modules/suvpar/featureset.txt"
+ Featureset_File_for_suvpar="/path/to/featureset"
}
|
Fix checked column name
The attribute `self.steady_u` was previously set if `"fit_steady_s"` was
a column in `adata.var`. The same is true the other way around. | @@ -778,9 +778,9 @@ class BaseDynamics:
self.t_ = adata.var["fit_t_"][idx]
self.steady_state_ratio = self.gamma / self.beta
- if "fit_steady_s" in adata.var.keys():
- self.steady_u = adata.var["fit_steady_u"][idx]
if "fit_steady_u" in adata.var.keys():
+ self.steady_u = adata.var["fit_steady_u"][idx]
+ if "fit_steady_s" in adata.var.keys():
self.steady_s = adata.var["fit_steady_s"][idx]
if "fit_pval_steady" in adata.var.keys():
self.pval_steady = adata.var["fit_pval_steady"][idx]
|
Update config.py
removed connectors_config.accessed_keys and made it more generic to find correct type | @@ -324,9 +324,8 @@ class ConfigLoader(object):
options = {}
connectors_config = self.get_directory_connector_configs()
- if ( 'okta' in connectors_config.value) and (connector_name == 'ldap'):
- if connectors_config.accessed_keys is not None:
- raise AssertionException("Failed to match request : for Okta Connector type receiving LDAP as input type from commnand line")
+ if connector_name != 'csv' and connector_name not in connectors_config.value:
+ raise AssertionException("Config file must be specified for connector type :: '{}'".format(connector_name))
if connectors_config is not None:
connector_item = connectors_config.get_list(connector_name, True)
|
fix(check): filter out comment messege when checking
fix | @@ -81,15 +81,23 @@ class Check:
# Get commit message from file (--commit-msg-file)
if self.commit_msg_file:
with open(self.commit_msg_file, "r", encoding="utf-8") as commit_file:
- commit_title = commit_file.readline()
- commit_body = commit_file.read()
+ msg = commit_file.read()
+ msg = self._filter_comments(msg)
+ msg = msg.lstrip("\n")
+ commit_title = msg.split("\n")[0]
+ commit_body = "\n".join(msg.split("\n")[1:])
return [git.GitCommit(rev="", title=commit_title, body=commit_body)]
elif self.commit_msg:
+ self.commit_msg = self._filter_comments(self.commit_msg)
return [git.GitCommit(rev="", title="", body=self.commit_msg)]
# Get commit messages from git log (--rev-range)
return git.get_commits(end=self.rev_range)
+ def _filter_comments(self, msg: str) -> str:
+ lines = [line for line in msg.split('\n') if not line.startswith('#')]
+ return "\n".join(lines)
+
@staticmethod
def validate_commit_message(commit_msg: str, pattern: str) -> bool:
if commit_msg.startswith("Merge") or commit_msg.startswith("Revert"):
|
Remove typo in plot_pacf example
example contained "sm.graphics.tsa.plot_acf", should be 'sm.graphics.tsa.plot_pacf'. (According 'plots/graphics_tsa_plot_pacf.py' already looks fine.) | @@ -331,7 +331,7 @@ def plot_pacf(
>>> dta = sm.datasets.sunspots.load_pandas().data
>>> dta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008'))
>>> del dta["YEAR"]
- >>> sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40)
+ >>> sm.graphics.tsa.plot_pacf(dta.values.squeeze(), lags=40)
>>> plt.show()
.. plot:: plots/graphics_tsa_plot_pacf.py
|
typo: updating type check on rc
Closes: | - name: fail on unregistered red hat rhcs linux
fail:
msg: "You must register your machine with subscription-manager"
- when: subscription.rc != '0'
+ when: subscription.rc != 0
- name: fail on unsupported distribution for ubuntu cloud archive
fail:
|
Update ncbi-covid-19.yaml
* Update ncbi-covid-19.yaml
Updated tags to improve search for NCBI datasets, other minor updates
* Update ncbi-covid-19.yaml
* Add NCBI acronym
* Add file types | Name: COVID-19 Genome Sequence Dataset
-Description: A centralized sequence repository for all strains of novel corona virus (SARS-CoV-2) submitted to the National Center for Biotechnology Information. Included are both the original sequences submitted by the principal investigator as well as SRA-processed sequences that require the SRA Toolkit for anaylsis.
+Description: A centralized sequence repository for all strains of novel corona virus (SARS-CoV-2) submitted to the National Center for Biotechnology Information (NCBI). Included are both the original sequences submitted by the principal investigator as well as SRA-processed sequences that require the SRA Toolkit for anaylsis.
Documentation: https://www.ncbi.nlm.nih.gov/sra/docs/sra-aws-download/
Contact: https://support.nlm.nih.gov/support/create-case/
ManagedBy: "[National Library of Medicine (NLM)](http://nlm.nih.gov/)"
-UpdateFrequency: Weekly
+UpdateFrequency: TBD
Tags:
- aws-pds
- bioinformatics
- biology
- coronavirus
- COVID-19
+ - fastq
+ - bam
+ - cram
- genomic
- genetic
- health
@@ -19,7 +22,7 @@ Tags:
- virus
License: "[NIH Genomic Data Sharing Policy](https://osp.od.nih.gov/scientific-sharing/genomic-data-sharing/)"
Resources:
- - Description: "Genomic sequence reads of COVID-19 and related coronaviridae, organized by NCBI accession. Files in the `sra-src` folder are in FASTQ, BAM, or CRAM format (original submission); files in the `run` folder are in .sra format and require the [SRA Toolkit](https://github.com/ncbi/sra-tools/wiki/01.-Downloading-SRA-Toolkit)"
+ - Description: "Genomic sequence reads of SARS-CoV-2 and related coronaviridae, organized by NCBI accession. Files in the `sra-src` folder are in FASTQ, BAM, or CRAM format (original submission); files in the `run` folder are in .sra format and require the [SRA Toolkit](https://github.com/ncbi/sra-tools/wiki/01.-Downloading-SRA-Toolkit)"
ARN: arn:aws:s3:::sra-pub-sars-cov2
Region: us-east-1
Type: S3 Bucket
|
tests: set MTU to 1400 on test node interfaces
In the environment we were testing on, MTU was set to 1500 which causes
download failures of our yum repos. There might be a better way to set
this instead of doing it here in ansible. | group: root
when: not is_atomic
+ - name: set MTU on eth0
+ command: "ifconfig eth0 mtu 1400 up"
+
+ - name: set MTU on eth1
+ command: "ifconfig eth1 mtu 1400 up"
+
- hosts: mons
gather_facts: false
become: yes
when:
- not is_atomic
+ - name: set MTU on eth2
+ command: "ifconfig eth2 mtu 1400 up"
+
- hosts: mdss:rgws:clients
gather_facts: false
become: yes
|
Channel Stats Event
Added event stats for channel actions | @@ -257,9 +257,10 @@ class Channel(models.Model):
if self.pk and Channel.objects.filter(pk=self.pk).exists():
original_node = Channel.objects.get(pk=self.pk)
- if original_node is None:
newrelic.agent.record_custom_metric('Custom/ChannelStats/NumCreatedChannels', 1)
+ record_channel_stats(original_node is None)
+
super(Channel, self).save(*args, **kwargs)
# Check if original thumbnail is no longer referenced
@@ -299,6 +300,17 @@ class Channel(models.Model):
verbose_name = _("Channel")
verbose_name_plural = _("Channels")
+
+def record_channel_stats(is_create):
+ """
+ :param is_create: Whether action is channel creation.
+ """
+ if is_create:
+ newrelic.agent.record_custom_event("ChannelStats", {"action": "Create"})
+ # else:
+ # Called three times for create, need to find out why
+ # newrelic.agent.record_custom_event("ChannelStats", {"action": "Update"})
+
class ContentTag(models.Model):
id = UUIDField(primary_key=True, default=uuid.uuid4)
tag_name = models.CharField(max_length=30)
|
Update _assemble
src is set to default dst; components kw arg is not used anymore. | @@ -509,8 +509,6 @@ class Batch:
"""
to_act = bool(p is None or np.random.binomial(1, p))
- dst = src if dst is None else dst
-
if isinstance(src, list) and len(src) > 1 and isinstance(dst, (list, tuple)) and len(src) == len(dst):
return tuple([self._apply_transform(ix, func, to_act, *args, src=src_component,
dst=dst_component, use_self=use_self, **kwargs)
@@ -771,7 +769,12 @@ class Batch:
traceback.print_tb(all_errors[0].__traceback__)
raise RuntimeError("Could not assemble the batch")
if dst is None:
- dst = kwargs.get('components', self.components)
+ dst_default = kwargs.get('dst_default', 'src')
+ if dst_default == 'src':
+ dst = kwargs.get('src')
+ elif dst_default == 'components':
+ dst = self.components
+
if not isinstance(dst, (list, tuple, np.ndarray)):
dst = [dst]
|
Python API: add a ".text" property on analysis units
TN: | @@ -642,6 +642,11 @@ class AnalysisUnit(object):
_unit_last_token(self._c_value, ctypes.byref(result))
return result._wrap()
+ @property
+ def text(self):
+ ${py_doc('langkit.unit_text', 8)}
+ return Token.text_range(self.first_token, self.last_token)
+
@property
def token_count(self):
${py_doc('langkit.unit_token_count', 8)}
|
Prepare 2.7.2rc0.
[ci skip-rust]
[ci skip-build-wheels] | # 2.7.x Stable Releases
+## 2.7.2rc0 (Oct 28, 2021)
+
+### Performance
+
+* Use `--layout=packed` for all monolithic resolves. (cherrypick of #13400) ([#13402](https://github.com/pantsbuild/pants/pull/13402))
+
## 2.7.1 (Oct 27, 2021)
The second stable release of the `2.7.x` series, with no changes since the previous release candidate!
|
Code review: improve tests
`test_index_exists`: remove unnecessary cleanup boilerplate (index is
cleaned up by `tearDown()` method)
`test_indices_refresh`: remove unnecessary try/finally block (index is
deleted after test)
`test_index_put_alias_flips_existing`: replace boilerplate with new
contextmanager | @@ -161,11 +161,8 @@ class TestElasticManageAdapter(AdapterWithIndexTestCase):
def test_index_exists(self):
self.assertFalse(self.adapter.index_exists(self.index))
self.adapter.index_create(self.index)
- try:
self.adapter.indices_refresh([self.index])
self.assertTrue(self.adapter.index_exists(self.index))
- finally:
- self.adapter.index_delete(self.index)
def test_cluster_health(self):
self.assertIn("status", self.adapter.cluster_health())
@@ -361,26 +358,20 @@ class TestElasticManageAdapter(AdapterWithIndexTestCase):
def test_indices_refresh(self):
doc_adapter = TestDocumentAdapter()
- def set_refresh_interval(value):
- self.adapter._index_put_settings(
- doc_adapter.index_name,
- {"index.refresh_interval": value}
- )
-
def get_search_hits():
return doc_adapter.search({})["hits"]["hits"]
with temporary_index(doc_adapter.index_name, doc_adapter.type, doc_adapter.mapping):
# Disable auto-refresh to ensure the index doesn't refresh between our
# upsert and search (which would cause this test to fail).
- set_refresh_interval("-1")
- try:
+ self.adapter._index_put_settings(
+ doc_adapter.index_name,
+ {"index.refresh_interval": "-1"}
+ )
doc_adapter.upsert(TestDoc("1", "test"))
self.assertEqual([], get_search_hits())
self.adapter.indices_refresh([doc_adapter.index_name])
docs = [h["_source"] for h in get_search_hits()]
- finally:
- set_refresh_interval("5s") # set it back to previous value
self.assertEqual([{"_id": "1", "entropy": 3, "value": "test"}], docs)
def test_indices_refresh_requires_list_similar(self):
@@ -420,14 +411,11 @@ class TestElasticManageAdapter(AdapterWithIndexTestCase):
alias = "test_alias"
flip_to_index = f"{self.index}_alt"
self.adapter.index_create(self.index)
- self.adapter.index_create(flip_to_index)
- try:
+ with temporary_index(flip_to_index):
self.adapter.index_put_alias(self.index, alias)
self._assert_alias_on_single_index(alias, self.index)
self.adapter.index_put_alias(flip_to_index, alias)
self._assert_alias_on_single_index(alias, flip_to_index)
- finally:
- self.adapter.index_delete(flip_to_index)
def _assert_alias_on_single_index(self, alias, index):
aliases = self.adapter.get_aliases()
|
Remove forgotten intrinsic limit from recentchanges generator
Apparently, this was forgotten in | @@ -811,8 +811,8 @@ class GeneratorFactory(object):
ts_time = self.site.server_time()
rcstart = ts_time + timedelta(minutes=-(offset + duration))
rcend = ts_time + timedelta(minutes=-offset)
- else:
- total = int(params[0]) if params else 60
+ elif len(params) == 1:
+ total = int(params[0])
return RecentChangesPageGenerator(
namespaces=self.namespaces, total=total, start=rcstart, end=rcend,
site=self.site, reverse=True, tag=rctag,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.