message
stringlengths
13
484
diff
stringlengths
38
4.63k
Minor improvements in torch approximator using call instead of forward for the network removed useless requires_grad_(False) closes
@@ -98,7 +98,7 @@ class TorchApproximator(Serializable): if not self._use_cuda: torch_args = [torch.from_numpy(x) if isinstance(x, np.ndarray) else x for x in args] - val = self.network.forward(*torch_args, **kwargs) + val = self.network(*torch_args, **kwargs) if output_tensor: return val @@ -109,8 +109,7 @@ class TorchApproximator(Serializable): else: torch_args = [torch.from_numpy(x).cuda() if isinstance(x, np.ndarray) else x.cuda() for x in args] - val = self.network.forward(*torch_args, - **kwargs) + val = self.network(*torch_args, **kwargs) if output_tensor: return val @@ -259,7 +258,7 @@ class TorchApproximator(Serializable): else: output_type = y_hat.dtype - y = [y_i.clone().detach().requires_grad_(False).type(output_type) for y_i + y = [y_i.clone().detach().type(output_type) for y_i in torch_args[-self._n_fit_targets:]] if self._use_cuda:
Add SCRIPTS_ROOT to configuration.example.py Fixes by adding the new variable to the example configuration.
@@ -154,6 +154,10 @@ PREFER_IPV4 = False # this setting is derived from the installed location. # REPORTS_ROOT = '/opt/netbox/netbox/reports' +# The file path where custom scripts will be stored. A trailing slash is not needed. Note that the default value of +# this setting is derived from the installed location. +# SCRIPTS_ROOT = '/opt/netbox/netbox/scripts' + # By default, NetBox will store session data in the database. Alternatively, a file path can be specified here to use # local file storage instead. (This can be useful for enabling authentication on a standby instance with read-only # database access.) Note that the user as which NetBox runs must have read and write permissions to this path.
DOC: updated Constellation documentation Updated the Constellation documentation to reflect the updated string output.
@@ -44,4 +44,6 @@ to create a Constellation of real-time solar wind data. This last command will show that four :py:class:`~pysat._instrument.Instrument` objects match the desired :py:attr:`platform` and :py:attr:`tag` criteria. The :py:attr:`~pysat._instrument.Instrument.name` values are: :py:data:`epam`, -:py:data:`mag`, :py:data:`sis`, and :py:data:`swepam`. +:py:data:`mag`, :py:data:`sis`, and :py:data:`swepam`. The only +:py:class:`~pysat._instrument.Instrument` defining attributes that are not +unique are the :py:attr:`~pysat._instrument.Instrument.name` values.
TypeRepo.env_assoc: use T.defer_root_node instead of T.root_node TN:
@@ -2880,12 +2880,10 @@ class TypeRepo(object): EnvAssoc type, used to add associations of key and value to the lexical environments, via the add_to_env primitive. """ - assert T.root_node - class EnvAssoc(StructType): _fields = [ ('key', UserField(type=Symbol)), - ('val', UserField(type=T.root_node)), + ('val', UserField(type=self.defer_root_node)), ] return EnvAssoc
Pulling out 3.2.2 version of Robot Framework from Github Actions Latest builds are failing in somewhat unusual places. I want to see what is the consitancy of runs for version 4.x, 5.x, and 6.x.
@@ -9,7 +9,7 @@ jobs: strategy: matrix: python-version: [3.7, 3.9, pypy-3.7] - rf-version: [3.2.2, 4.1.3, 5.0.1] + rf-version: [4.1.3, 5.0.1, 6.0.1] steps: - uses: actions/checkout@v3
Refactor "delete part category" dialog Translations Simplification
{% load i18n %} {% block pre_form_content %} -{% trans 'Are you sure you want to delete category' %} <strong>{{ category.name }}</strong>? + +<div class='alert alert-block alert-danger'> + {% trans "Are you sure you want to delete this part category?" %} +</div> {% if category.children.all|length > 0 %} -<p>{% blocktrans with count=category.children.all|length%}This category contains {{count}} child categories{% endblocktrans %}.<br> -{% trans 'If this category is deleted, these child categories will be moved to the' %} +<div class='alert alert-block alert-warning'> + {% blocktrans with n=category.children.all|length %}This category contains {{ n }} child categories{% endblocktrans %}.<br> {% if category.parent %} - <strong>{{ category.parent.name }}</strong> {% trans 'category' %}. + {% blocktrans with category=category.parent.name %}If this category is deleted, these child categories will be moved to {{ category }}{% endblocktrans %}. {% else %} - {% trans 'top level Parts category' %}. + {% trans "If this category is deleted, these child categories will be moved to the top level part category" %}. {% endif %} -</p> - -<ul class='list-group'> -{% for cat in category.children.all %} - <li class='list-group-item'><strong>{{ cat.name }}</strong> - <em>{{ cat.description }}</em></li> -{% endfor %} -</ul> +</div> {% endif %} {% if category.parts.all|length > 0 %} -<p>{% blocktrans with count=category.parts.all|length %}This category contains {{count}} parts{% endblocktrans %}.<br> +<div class='alert alert-block alert-warning'> + {% blocktrans with n=category.parts.all|length %}This category contains {{ n }} parts{% endblocktrans %}.<br> {% if category.parent %} - {% blocktrans with path=category.parent.pathstring %}If this category is deleted, these parts will be moved to the parent category {{path}}{% endblocktrans %} + {% blocktrans with category=category.parent.name %}If this category is deleted, these parts will be moved to {{ category }}{% endblocktrans %}. {% else %} - {% trans 'If this category is deleted, these parts will be moved to the top-level category Teile' %} + {% trans "If this category is deleted, these parts will be moved to the top level part category" %}. {% endif %} -</p> -<ul class='list-group'> -{% for part in category.parts.all %} - <li class='list-group-item'><strong>{{ part.full_name }}</strong> - <em>{{ part.description }}</em></li> -{% endfor %} -</ul> +</div> {% endif %} {% endblock %} \ No newline at end of file
fix pipeline definition doc to reference solid Summary: pipeline def was referencing `apply_op_three` instead of `apply_op` Test Plan: none Reviewers: max, sashank
@@ -114,8 +114,8 @@ def adder_resource(init_context): pipeline_def = PipelineDefinition( name='basic', - solid_defs=[return_one, apply_op_three], - dependencies={'apply_op_three': {'num': DependencyDefinition('return_one')}}, + solid_defs=[return_one, apply_op], + dependencies={'apply_op': {'num': DependencyDefinition('return_one')}}, mode_defs=[add_mode], preset_defs=[add_three_preset], )
X-API-Key and Authorization: Bearer headers. * MS teams sends Authorization: Bearer header. To use X-API-Key header with teams (webhook) we need to look for X-API-Key if Authorization header doesn't have Key. (this was briefly discussed in alerta/alerta-contrib#280) * use startswith('Key ') instead of find('Key ').
@@ -24,7 +24,7 @@ def permission(scope=None): def wrapped(*args, **kwargs): # API Key (Authorization: Key <key>) - if 'Authorization' in request.headers: + if 'Authorization' in request.headers and request.headers['Authorization'].startswith('Key '): auth_header = request.headers['Authorization'] m = re.match(r'Key (\S+)', auth_header) key = m.group(1) if m else None
Ctypes: Fix missing adaptations for C target type conversion in list operations.
@@ -23,7 +23,8 @@ Right now only the creation is done here. But more should be added later on. from .CodeHelpers import ( decideConversionCheckNeeded, generateChildExpressionsCode, - generateExpressionCode + generateExpressionCode, + withObjectCodeTemporaryAssignment ) from .ErrorCodes import getErrorExitBoolCode, getErrorExitCode from .PythonAPICodes import generateCAPIObjectCode @@ -31,11 +32,13 @@ from .PythonAPICodes import generateCAPIObjectCode def generateListCreationCode(to_name, expression, emit, context): elements = expression.getElements() - assert elements element_name = context.allocateTempName("list_element") + with withObjectCodeTemporaryAssignment(to_name, "list_extend_result", expression, emit, context) \ + as result_name: + for count, element in enumerate(elements): generateExpressionCode( to_name = element_name, @@ -48,12 +51,12 @@ def generateListCreationCode(to_name, expression, emit, context): if count == 0: emit( "%s = PyList_New( %d );" % ( - to_name, + result_name, len(elements) ) ) - context.addCleanupTempName(to_name) + context.addCleanupTempName(result_name) if not context.needsCleanup(element_name): emit("Py_INCREF( %s );" % element_name) @@ -62,7 +65,7 @@ def generateListCreationCode(to_name, expression, emit, context): emit( "PyList_SET_ITEM( %s, %d, %s );" % ( - to_name, + result_name, count, element_name ) @@ -115,22 +118,25 @@ def generateListOperationExtendCode(to_name, expression, emit, context): ) emit("assert( PyList_Check( %s ) );" % list_arg_name) + + with withObjectCodeTemporaryAssignment(to_name, "list_extend_result", expression, emit, context) \ + as result_name: emit( "%s = _PyList_Extend( (PyListObject *)%s, %s );" % ( - to_name, + result_name, list_arg_name, value_arg_name ) ) getErrorExitCode( - check_name = to_name, + check_name = result_name, release_names = (list_arg_name, value_arg_name), emit = emit, context = context ) - context.addCleanupTempName(to_name) + context.addCleanupTempName(result_name) def generateListOperationPopCode(to_name, expression, emit, context): @@ -140,23 +146,28 @@ def generateListOperationPopCode(to_name, expression, emit, context): context = context ) - # TODO: Have a dedicated helper instead, this could be more efficient. + emit("assert( PyList_Check( %s ) );" % list_arg_name) + + with withObjectCodeTemporaryAssignment(to_name, "list_extend_result", expression, emit, context) \ + as result_name: + + # TODO: Have a dedicated helper instead, this could be more efficient. emit( '%s = PyObject_CallMethod( %s, (char *)"pop", NULL );' % ( - to_name, + result_name, list_arg_name ) ) getErrorExitCode( - check_name = to_name, + check_name = result_name, release_name = list_arg_name, emit = emit, context = context ) - context.addCleanupTempName(to_name) + context.addCleanupTempName(result_name) def generateBuiltinListCode(to_name, expression, emit, context):
Change the default source for the workflow fixture When passing source=None, the workflow object will create a DummySource in a newly created system tmpdir. Use the source_dir fixture instead, to make sure unit tests reuse the pytest tmpdir rather than creating new ones in /tmp.
@@ -14,6 +14,7 @@ import requests import requests.exceptions from atomic_reactor.constants import DOCKERFILE_FILENAME from atomic_reactor.dirs import RootBuildDir +from atomic_reactor.source import DummySource from tests.constants import LOCALHOST_REGISTRY_HTTP, DOCKER0_REGISTRY_HTTP, TEST_IMAGE from tests.util import uuid_value @@ -66,8 +67,11 @@ def user_params(monkeypatch): @pytest.fixture -def workflow(build_dir, user_params): - return DockerBuildWorkflow(RootBuildDir(build_dir), source=None) +def workflow(build_dir, source_dir, user_params): + return DockerBuildWorkflow( + build_dir=RootBuildDir(build_dir), + source=DummySource(None, None, workdir=source_dir), + ) @pytest.mark.optionalhook
Fix GPT2 impl partially TODO: still need to add `until` everywhere
@@ -17,19 +17,22 @@ class GPT2LM(LM): return cls(device=args.get("device", "cpu")) def generate(self, context, max_gen_length, truncate=True): - context_tensor = torch.tensor([self.tokenizer.encode(context.strip())], dtype=torch.long).to(self.device) + # when too long to fit in context, truncate from the left + context_tensor = torch.tensor([self.tokenizer.encode(context.strip())[max_gen_length - 1024:]], dtype=torch.long).to(self.device) res = self.gpt2.generate( context_tensor, + # TODO: change to have until rather than using eos_token_id eos_token_id=self.tokenizer.eos_token_id, do_sample=False, max_length=self.num_tokens(context) + max_gen_length, ) # chop off the prompt and the final eos token - return self.tokenizer.decode(res[0][len(context[0]):-1]).strip() + return self.tokenizer.decode(res[0][min(1024 - max_gen_length, len(context_tensor[0])):-1]).strip() def loglikelihood(self, context, continuation, truncate=True): - inp = torch.tensor([self.tokenizer.encode(context + continuation)], dtype=torch.long).to(self.device) + # when too long to fit in context, truncate from the left + inp = torch.tensor([self.tokenizer.encode(context + continuation)[-1024:]], dtype=torch.long).to(self.device) ctxlen = len(self.tokenizer.encode(context.strip())) cont_toks = inp[:, ctxlen:] # [batch, seq]
added pull request link to changelog added entry to contributors list
### Features - Added warning to nodes selector if nothing was matched ([#2115](https://github.com/fishtown-analytics/dbt/issues/2115), [#2343](https://github.com/fishtown-analytics/dbt/pull/2343)) - Suport column descriptions for BigQuery models ([#2335](https://github.com/fishtown-analytics/dbt/issues/2335), [#2402](https://github.com/fishtown-analytics/dbt/pull/2402)) -- Added BigQuery option maximum_bytes_billed to set an upper limit for query costs ([#2346](https://github.com/fishtown-analytics/dbt/issues/2346)) +- Added BigQuery option maximum_bytes_billed to set an upper limit for query costs ([#2346](https://github.com/fishtown-analytics/dbt/issues/2346), [#2427](https://github.com/fishtown-analytics/dbt/pull/2427)) ### Fixes - When tracking is disabled due to errors, do not reset the invocation ID ([#2398](https://github.com/fishtown-analytics/dbt/issues/2398), [#2400](https://github.com/fishtown-analytics/dbt/pull/2400)) @@ -20,6 +20,7 @@ Contributors: - [@azhard](https://github.com/azhard) [#2413](https://github.com/fishtown-analytics/dbt/pull/2413) - [@mikaelene](https://github.com/mikaelene) [#2414](https://github.com/fishtown-analytics/dbt/pull/2414) - [@raalsky](https://github.com/Raalsky) ([#2343](https://github.com/fishtown-analytics/dbt/pull/2343)) + - [@haukeduden](https://github.com/haukeduden) ([#2427](https://github.com/fishtown-analytics/dbt/pull/2427)) ## dbt 0.17.0b1 (May 5, 2020)
Create a mapping: kind constant -> ASTNodeType subclass TN:
@@ -406,6 +406,13 @@ class CompileCtx(object): :type: dict[langkit.compiled_types.ASTNodeType, int] """ + self.kind_constant_to_node = {} + """ + Reverse mapping for `node_kind_constants`. + + :type: dict[int, langkit.compiled_types.ASTNodeType] + """ + self._struct_types = [] """ List of all plain struct types. @@ -1710,6 +1717,7 @@ class CompileCtx(object): start=1 ): self.node_kind_constants[astnode] = i + self.kind_constant_to_node[i] = astnode def expose_public_api_types(self, astnode): """
Quality: Avoid temporary file from isorted * This causes flicker to git and can be left over if interrupted and it's not in the ignore list for git.
@@ -335,6 +335,7 @@ def _cleanupImportSortOrder(filename): isort_call + [ "-q", # quiet, but stdout is still garbage + "--overwrite-in-place", # avoid using another temp file, this is already on one. "-ot", # Order imports by type in addition to alphabetically "-m3", # "vert-hanging" "-tc", # Trailing commas
Update decisiontree.cu remove printf from stateless
@@ -187,7 +187,6 @@ void decisionTreeClassifierPredict(const ML::cumlHandle &handle, bool verbose) { std::shared_ptr<DecisionTreeClassifier<float>> dt_classifier = std::make_shared<DecisionTreeClassifier<float>>(); - dt_classifier->print(tree->root); dt_classifier->predict(handle, tree, rows, n_rows, n_cols, predictions, verbose); }
Update requirements.txt after testing. pandas.read_excel requires xlrd>=0.9.0
-pytest==3.4.2 -pandas==0.23.1 -numpy==1.14.1 -setuptools==38.5.2 +pytest>=3.4.2 +pandas>=0.23.1 +numpy>=1.14.1 +setuptools>=38.5.2 sphinxcontrib-fulltoc==1.2.0 scikit-learn==0.19.1 +xlrd>=0.9.0 \ No newline at end of file
Fix shaft elements plot and add slenderness ratio parameter This piece of code was missing in the first commit. It's a list with elements lenght, built just like "nodes_o_d" and "nodes_i_d", and has the same objective, provide data for the rotor plot.
@@ -259,6 +259,10 @@ class Rotor(object): nodes_o_d.append(df_shaft["o_d"].iloc[-1]) self.nodes_o_d = nodes_o_d + nodes_le = list(df_shaft.groupby("n_l")["L"].min()) + nodes_le.append(df_shaft["L"].iloc[-1]) + self.nodes_le = nodes_le + self.nodes = list(range(len(self.nodes_pos))) self.elements_length = [sh_el.L for sh_el in self.shaft_elements] self.L = nodes_pos[-1]
UserField: document the "is_public" constructor argument TN:
@@ -1041,6 +1041,9 @@ class UserField(AbstractField): :type type: CompiledType :type doc: str + + :param bool is_public: Whether this field is public in the generated + APIs. """ super(UserField, self).__init__(repr, doc, type) self._is_public = is_public
Update ctc.py setting the --ctc_conf ignore_nan_grad= true like and suggested at
@@ -23,7 +23,7 @@ class CTC(torch.nn.Module): dropout_rate: float = 0.0, ctc_type: str = "builtin", reduce: bool = True, - ignore_nan_grad: bool = False, + ignore_nan_grad: bool = True, ): assert check_argument_types() super().__init__()
help text for container_format, disk_format Updated the container_format and disk_format in v1 help text. Closes-Bug:
@@ -28,8 +28,9 @@ from glanceclient.common import utils from glanceclient import exc import glanceclient.v1.images -CONTAINER_FORMATS = 'Acceptable formats: ami, ari, aki, bare, and ovf.' -DISK_FORMATS = ('Acceptable formats: ami, ari, aki, vhd, vmdk, raw, ' +CONTAINER_FORMATS = ('Acceptable formats: ami, ari, aki, bare, ovf, ova,' + 'docker.') +DISK_FORMATS = ('Acceptable formats: ami, ari, aki, vhd, vdhx, vmdk, raw, ' 'qcow2, vdi, iso, and ploop.') DATA_FIELDS = ('location', 'copy_from', 'file')
Update contrib.py update concat docs
@@ -243,7 +243,7 @@ def concat(arr, dim=0): Example:: jt.concat([jt.array([[1],[2]]), jt.array([[2],[2]])], dim=1) - # return [[1],[2],[2],[2]] + # return jt.Var([[1,2],[2,2]],dtype=int32) ''' if not isinstance(arr, Sequence): raise TypeError("concat arr needs to be a tuple or list")
validate: support obs repository Otherwise, installation on SuSe fails. Fixes:
- name: validate ceph_repository fail: - msg: "ceph_repository must be either 'community', 'rhcs', 'dev', 'custom' or 'uca'" + msg: "ceph_repository must be either 'community', 'rhcs', 'obs', 'dev', 'custom' or 'uca'" when: - ceph_origin == 'repository' - - ceph_repository not in ['community', 'rhcs', 'dev', 'custom', 'uca'] + - ceph_repository not in ['community', 'rhcs', 'obs', 'dev', 'custom', 'uca'] - name: validate ceph_repository_community fail:
Update southern-california-earthquakes.yaml Adding more tutorials
Name: Southern California Earthquake Data Description: This dataset contains ground motion velocity and acceleration seismic waveforms recorded by the Southern California Seismic Network (SCSN) - and archived at the Southern California Earthquake Data Center (SCEDC). + and archived at the Southern California Earthquake Data Center (SCEDC). A + Distributed Acousting Sensing (DAS) dataset is included. Documentation: https://scedc.caltech.edu/data/cloud.html Contact: [email protected] ManagedBy: "[Southern California Earthquake Data Center](https://scedc.caltech.edu)" @@ -49,7 +50,17 @@ DataAtWork: - Title: 'SeisNoise.jl GPU Computing Tutorial - Another example of accessing data s3://scedc-pds for ambient noise cross-correlation' URL: https://nextjournal.com/thclements/seisnoisejl-gpu-computing-tutorial AuthorName: Tim Clements - AuthorURL: https://tclements.github.io/ + - Title: 'Using Amazon API Gateway and Lambda to window and decimate waveforms' + URL: https://github.com/SCEDC/cloud/tree/master/pds-lambda-apis + AuthorName: Shang-Lin Chen + AuthorURL: https://scedc.caltech.edu + Services: + - Lambda + - Title: 'Jupyter Notebook tutorial on Ridgecrest DAS dataset' + URL: https://github.com/SCEDC/cloud/blob/master/pds_ridgecrest_das.ipynb + AuthorName: Jiuxun Yin + + Publications: - Title: Southern California Earthquake Data Now Available in the AWS Cloud URL: https://pubs.geoscienceworld.org/ssa/srl/article-abstract/doi/10.1785/0220210039/600883/Southern-California-Earthquake-Data-Now-Available
Update agilent34410A.py fixed spelling typo
@@ -45,5 +45,5 @@ class Agilent34410A(Instrument): def __init__(self, adapter, delay=0.02, **kwargs): super(Agilent34410A, self).__init__( - adapter, "HP/Agilent/Keysight 34410A Multimiter", **kwargs + adapter, "HP/Agilent/Keysight 34410A Multimeter", **kwargs )
Overload Concat.__repr__ TN:
@@ -643,3 +643,6 @@ class Concat(AbstractExpression): return CallExpr('Concat_Result', 'Concat', array_1.type, [array_1, array_2], abstract_expr=self) + + def __repr__(self): + return '<Concat>'
[bugfix] use page.site.data_repository when creating a _WbDataPage use page.site.data_repository() instead of default Site().data_repository() when creating a _WbDataPage.
@@ -943,7 +943,7 @@ class _WbDataPage(_WbRepresentation): :param page: page containing the data :param site: The Wikibase site """ - site = site or Site().data_repository() + site = site or page.site.data_repository() specifics = type(self)._get_type_specifics(site) _WbDataPage._validate(page, specifics['data_site'], specifics['ending'], specifics['label'])
[commands] Handle nick mentions in HelpFormatter Modifies the help formatter to handle nicknamed bot users for mentions in clean_prefix
@@ -181,12 +181,12 @@ class HelpFormatter: @property def clean_prefix(self): """The cleaned up invoke prefix. i.e. mentions are ``@name`` instead of ``<@id>``.""" - user = self.context.bot.user + user = self.context.guild.me if self.context.guild else self.context.bot.user # this breaks if the prefix mention is not the bot itself but I # consider this to be an *incredibly* strange use case. I'd rather go # for this common use case rather than waste performance for the # odd one. - return self.context.prefix.replace(user.mention, '@' + user.name) + return self.context.prefix.replace(user.mention, '@' + user.display_name) def get_command_signature(self): """Retrieves the signature portion of the help page."""
Fix, need to reset return release mode when generating outline codes. * This could lead to crashes when this kind of function was executed in a return handler of a tried block.
@@ -779,10 +779,10 @@ def generateFunctionOutlineCode(to_name, expression, emit, context): context = PythonFunctionOutlineContext(parent=context, outline=expression) # Need to set return target, to assign to_name from. - old_return_release_mode = context.getReturnReleaseMode() return_target = context.allocateLabel("outline_result") old_return_target = context.setReturnTarget(return_target) + old_return_release_mode = context.setReturnReleaseMode(False) # TODO: Put the return value name as that to_name.c_type too.
Allow iterable params to column Add type checking in QueryBuilder column methods After apply this patch, list and tuple is allow to column (fixed
@@ -719,6 +719,9 @@ class QueryBuilder(Selectable, Term): if self._insert_table is None: raise AttributeError("'Query' object has no attribute '%s'" % "insert") + if isinstance(terms[0], (list, tuple)): + terms = terms[0] + for term in terms: if isinstance(term, str): term = Field(term, table=self._insert_table)
tools: Include `test_server_url.py` to be checked by mypy. This commit adds `test_server_url.py` to the `type_consistent_testfiles` list to check for type consistency with mypy.
@@ -78,7 +78,8 @@ repo_python_files['tests'] = [] # Added incrementally as newer test files are type-annotated. type_consistent_testfiles = [ - "test_run.py", "test_core.py", "test_emoji_data.py", "test_helper.py" + "test_run.py", "test_core.py", "test_emoji_data.py", "test_helper.py", + "test_server_url.py" ] for file_path in python_files:
Add group permission check to Get BI dashboard HG-- branch : feature/microservices
@@ -172,6 +172,7 @@ class BIAPI(API): :return: """ user = self.handler.current_user + groups = user.groups.values_list("id", flat=True) d = Dashboard.objects.filter(id=id).first() if not d: return None @@ -181,8 +182,10 @@ class BIAPI(API): for i in d.access: if i.user == user and i.level >= access_level: return d + elif i.group and i.group.id in groups and i.level >= access_level: + return d # No access - return None + raise APIError("User have no permission to access dashboard") @executor("query") @api
Fix, properly construct the path of extension modules. * This was at least a problem on MacOS, it should be a fix for Linux too.
@@ -632,7 +632,8 @@ static PyObject *loadModule(PyObject *module_name, struct Nuitka_MetaPathBasedLo char filename[MAXPATHLEN + 1]; strcpy(filename, getBinaryDirectoryHostEncoded()); - filename[strlen(filename)] = SEP; + char const sep_str[2] = {SEP, 0}; + strcat(filename, sep_str); copyModulenameAsPath(filename + strlen(filename), entry->name); strcat(filename, ".so");
Tests: Fixups for the distutils test runner * Need to be able to both execute "runner" and "runner.exe" which occurs for CPython too. * Was not really doing the output of Nuitka when it said so. * More fixups
@@ -87,10 +87,21 @@ for filename in sorted(os.listdir(".")): 'pip install "%s"' % (os.path.join(dist_dir, os.listdir(dist_dir)[0])) ) + runner_binary = os.path.join( + venv.getVirtualenvDir(), + "bin" if os.name != "nt" else "scripts", + "runner", + ) + + if os.path.exists(runner_binary): # Need to call CPython binary for Windows. process = subprocess.Popen( args=[ - sys.executable, + os.path.join( + venv.getVirtualenvDir(), + "bin" if os.name != "nt" else "scripts", + "python", + ), os.path.join( venv.getVirtualenvDir(), "bin" if os.name != "nt" else "scripts", @@ -100,6 +111,14 @@ for filename in sorted(os.listdir(".")): stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) + else: + assert os.path.exists(runner_binary + ".exe") + + process = subprocess.Popen( + args=[runner_binary + ".exe"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) stdout_cpython, stderr_cpython = process.communicate() exit_cpython = process.returncode @@ -136,26 +155,41 @@ for filename in sorted(os.listdir(".")): 'pip install "%s"' % (os.path.join(dist_dir, os.listdir(dist_dir)[0])) ) + runner_binary = os.path.join( + venv.getVirtualenvDir(), + "bin" if os.name != "nt" else "scripts", + "runner", + ) + + if os.path.exists(runner_binary): process = subprocess.Popen( args=[ - sys.executable, os.path.join( venv.getVirtualenvDir(), "bin" if os.name != "nt" else "scripts", - "runner", + "python", ), + runner_binary, ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) + else: + assert os.path.exists(runner_binary + ".exe") + + process = subprocess.Popen( + args=[runner_binary + ".exe"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) stdout_nuitka, stderr_nuitka = process.communicate() exit_nuitka = process.returncode print("STDOUT Nuitka:") - print(stdout_cpython) + print(stdout_nuitka) print("STDERR Nuitka:") - print(stderr_cpython) + print(stderr_nuitka) assert exit_nuitka == 0, exit_nuitka print("EXIT was OK.")
Fix date filters. Handle lists of operations. Filter[date][eq]=day becomes [filter[date][gt]=day-1, filter[date][lt]=day+1].
@@ -541,10 +541,12 @@ class ListFilterMixin(FilterMixin): if filters: for key, field_names in filters.iteritems(): for field_name, data in field_names.iteritems(): + operations = data if isinstance(data, list) else [data] + for operation in operations: if isinstance(queryset, list): - queryset = self.get_filtered_queryset(field_name, data, queryset) + queryset = self.get_filtered_queryset(field_name, operation, queryset) else: - queryset = self.filter_by_field(queryset, field_name=field_name, operation=data) + queryset = self.filter_by_field(queryset, field_name, operation) return queryset def filter_by_field(self, queryset, field_name, operation): @@ -581,7 +583,7 @@ class ListFilterMixin(FilterMixin): if operation['value'] not in (list(), tuple()): operation['source_field_name'] = '_contributors__guids___id' operation['op'] = 'iexact' - if operation['source_field_name'] == 'kind': + if field_name == 'kind': operation['source_field_name'] = 'is_file' # The value should be boolean operation['value'] = operation['value'] == 'file'
Squashed commit of the following: commit Author: William Jones Date: Mon Jul 8 14:34:18 2019 +0100 fix order of function parameters commit Author: William Jones Date: Mon Jul 8 14:31:37 2019 +0100 add boolean for atomic writes
@@ -115,7 +115,7 @@ def create_dir(path): def save_df_as_csv(df, path, filename, - comment="", prepend_info=True,**kwargs, atomic=True): + comment="", prepend_info=True, atomic=True, **kwargs): """ Save dataframe to a CSV file. Parameters
Update README.md Clean up quick start
@@ -22,18 +22,20 @@ See more on the [hypergan youtube](https://www.youtube.com/channel/UCU33XvBbMnS8 * [Quick start](#quick-start) * [Requirements](#requirements) * [Install](#install) - * [Troubleshooting](#troubleshooting) * [Train](#train) - * [Development Mode](#development-mode) * [The pip package hypergan](#the-pip-package-hypergan) * [Training](#training) * [Sampling](#sampling) + * [Additional Arguments](#additional-arguments) * [Running on CPU](#running-on-cpu) + * [Troubleshooting](#troubleshooting) + * [Development Mode](#development-mode) * [API](#api) * [Using a trained hypergan model](#using-a-trained-hypergan-model) * [Training a gan](#training-a-gan) * [Examples](#examples) * [Tutorials](#tutorials) + * [Datasets](#datasets) * [Creating a Dataset](#creating-a-dataset) * [Downloadable Datasets](#downloadable-datasets) @@ -84,12 +86,6 @@ GPU: Nvidia, GTX 1080+ recommended 3. Join the community * Once you've made something cool, be sure to share it on the Discord \([https://discord.gg/t4WWBPF](https://discord.gg/t4WWBPF)\). -### Troubleshooting - -Make sure that your cuda, nvidia drivers, pillow, pytorch, and pytorch vision are the latest version. - -Check the discord for help. - ### Create a new model ```bash @@ -134,7 +130,7 @@ See all configuration templates with `--list-templates` or `-l`. By default hypergan will not save training samples to disk. To change this, use `--save_samples`. -### Arguments +### Additional Arguments To see a detailed list, run @@ -142,6 +138,12 @@ To see a detailed list, run hypergan -h ``` +### Troubleshooting + +Make sure that your cuda, nvidia drivers, pillow, pytorch, and pytorch vision are the latest version. + +Check the discord for help. + ### Running on CPU You can switch the backend with: @@ -152,7 +154,6 @@ You can switch the backend with: Don't train on CPU! It's too slow. - ## API ```python
install ceph-mds packages on SUSE/openSUSE install packages on SUSE/openSUSE distributions, using the same logic as on RedHat-based distributions Fixes
register: result until: result is succeeded -- name: install redhat ceph-mds package +- name: install ceph-mds package on redhat or suse package: name: "ceph-mds" state: "{{ (upgrade_ceph_packages|bool) | ternary('latest','present') }}" until: result is succeeded when: - mds_group_name in group_names - - ansible_os_family == 'RedHat' + - ansible_os_family in ['Suse', 'RedHat'] - name: create mds keyring command: ceph --cluster {{ cluster }} --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring auth get-or-create mds.{{ mds_name }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/keyring
Infraction Tests: Small fixes Remove unnecessary space from placeholder Rename `has_active_infraction` to `get_active_infraction`
@@ -17,11 +17,11 @@ class TruncationTests(unittest.IsolatedAsyncioTestCase): self.guild = MockGuild(id=4567) self.ctx = MockContext(bot=self.bot, author=self.user, guild=self.guild) - @patch("bot.cogs.moderation.utils.has_active_infraction") + @patch("bot.cogs.moderation.utils.get_active_infraction") @patch("bot.cogs.moderation.utils.post_infraction") - async def test_apply_ban_reason_truncation(self, post_infraction_mock, has_active_mock): + async def test_apply_ban_reason_truncation(self, post_infraction_mock, get_active_mock): """Should truncate reason for `ctx.guild.ban`.""" - has_active_mock.return_value = False + get_active_mock.return_value = 'foo' post_infraction_mock.return_value = {"foo": "bar"} self.cog.apply_infraction = AsyncMock()
Make test_leaky_relu_inplace_with_neg_slope device-generic and skipIfRocm. Summary: Pull Request resolved: Fixes Test Plan: Imported from OSS
@@ -4108,18 +4108,6 @@ for shape in [(1,), ()]: with self.assertRaisesRegex(RuntimeError, "must implement the backward"): BadBw.apply(inp).sum().backward() - def test_leaky_relu_inplace_with_neg_slope(self): - for device in torch.testing.get_all_device_types(): - a = torch.tensor([-1., 1.], device=device, requires_grad=True) - b = torch.nn.functional.leaky_relu_(a.clone(), -2) - with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): - b.backward(torch.ones(2, device=device)) - - a = torch.tensor([-1., 1.], device=device, requires_grad=True) - b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0) - with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): - b.backward(torch.ones(2, device=device)) - def test_custom_function_local_inplace(self): class MyFn(torch.autograd.Function): @staticmethod @@ -5704,6 +5692,18 @@ class TestAutogradDeviceType(TestCase): grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out) self.assertEqual(grad_cudnn, grad_native, prec=1e-4) + @skipCUDAIfRocm + def test_leaky_relu_inplace_with_neg_slope(self, device): + a = torch.tensor([-1., 1.], device=device, requires_grad=True) + b = torch.nn.functional.leaky_relu_(a.clone(), -2) + with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): + b.backward(torch.ones(2, device=device)) + + a = torch.tensor([-1., 1.], device=device, requires_grad=True) + b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0) + with self.assertRaisesRegex(RuntimeError, "call out-of-place version"): + b.backward(torch.ones(2, device=device)) + @onlyCUDA def test_free_unneeded_tensor(self, device): x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
Simplify column and info copying for table. In particular, since parent_table and indices are never copied internally, there is no need to save and reset them. Also, as it was, the code set indices to an empty list even when not present initially, thus changing a column being copied.
@@ -74,26 +74,15 @@ def col_copy(col, copy_indices=True): if isinstance(col, BaseColumn): return col.copy() - # The new column should have None for the parent_table ref. If the - # original parent_table weakref there at the point of copying then it - # generates an infinite recursion. Instead temporarily remove the weakref - # on the original column and restore after the copy in an exception-safe - # manner. - - parent_table = col.info.parent_table - indices = col.info.indices - col.info.parent_table = None - col.info.indices = [] - - try: newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col) newcol.info = col.info - newcol.info.indices = deepcopy(indices or []) if copy_indices else [] + if copy_indices and col.info.indices: + newcol.info.indices = deepcopy(col.info.indices) for index in newcol.info.indices: index.replace_col(col, newcol) - finally: - col.info.parent_table = parent_table - col.info.indices = indices + else: + # TODO: bit strange that this is needed. Why not the default? + newcol.info.indices = [] return newcol
allow runway tests to exit 0 without signaling failure This will check the exit code of tests so any that have called sys.exit(0) (e.g. yamllint) won't trigger Runway to count it as failed.
@@ -78,6 +78,8 @@ class Test(BaseCommand): # pylint: disable=too-few-public-methods # tool it is wrapping. if not isinstance(err, SystemExit): traceback.print_exc() + elif err.code == 0: + continue # Tests calling sys.exit(0) don't indicate failure LOGGER.error('Test failed: %s', test.name) if test.required: LOGGER.error('Failed test was required, the remaining '
Fix test_delete_create_pvc_same_name Fixed teardown mechanism using teardown_factory
@@ -11,28 +11,6 @@ from tests import helpers logger = logging.getLogger(__name__) [email protected]() -def resources(request): - """ - Delete the pvc resources and validate pv deletion created during the test - - Returns: - list: empty list of pvcs - """ - pvcs = [] - - def finalizer(): - for instance in pvcs: - instance.delete() - instance.ocp.wait_for_delete( - instance.name - ) - assert helpers.validate_pv_delete(instance.backed_pv) - - request.addfinalizer(finalizer) - return pvcs - - @tier2 class TestDeleteCreatePVCSameName(ManageTest): """ @@ -56,28 +34,23 @@ class TestDeleteCreatePVCSameName(ManageTest): ] ) def test_delete_create_pvc_same_name( - self, interface, pvc_factory, resources + self, interface, pvc_factory, teardown_factory ): """ Delete PVC and create a new PVC with same name """ # Create a PVC - pvcs = resources pvc_obj1 = pvc_factory( - interface=interface, status=constants.STATUS_BOUND - ) - pvcs.append(pvc_obj1) - - # Check PV is Bound - pv_obj1 = pvc_obj1.backed_pv_obj - helpers.wait_for_resource_state( - resource=pv_obj1, state=constants.STATUS_BOUND + interface=interface, + access_mode=constants.ACCESS_MODE_RWO, + status=constants.STATUS_BOUND ) # Delete the PVC logger.info(f"Deleting PVC {pvc_obj1.name}") pvc_obj1.delete() pvc_obj1.ocp.wait_for_delete(pvc_obj1.name) + logger.info(f"Deleted PVC {pvc_obj1.name}") # Create a new PVC with same name logger.info(f"Creating new PVC with same name {pvc_obj1.name}") @@ -87,8 +60,8 @@ class TestDeleteCreatePVCSameName(ManageTest): namespace=pvc_obj1.project.namespace, do_reload=False ) - assert pvc_obj2, "Failed to create PVC" - pvcs.append(pvc_obj2) + + teardown_factory(pvc_obj2) # Check the new PVC and PV are Bound helpers.wait_for_resource_state(
qt: Link dbus-1 not dbus-1d in Debug builds on Windows Neither the Conan or the vanilla CMake-build version of the dbus package use a d-suffix for the debug version of the library on Windows. Fixes
@@ -420,6 +420,10 @@ class QtConan(ConanFile): " if (enable_precompiled_headers) {\n if (is_win) {", " if (enable_precompiled_headers) {\n if (false) {" ) + tools.replace_in_file(os.path.join(self.source_folder, "qt5", "qtbase", "configure.json"), + "-ldbus-1d", + "-ldbus-1" + ) def _make_program(self): if self._is_msvc:
Fixup some documentation references for new grain.equals function versionadded code-block notation
@@ -763,12 +763,15 @@ def equals(key, value): ''' Used to make sure the minion's grain key/value matches. - Returns ``True`` if matches otherwise ``False`` + Returns ``True`` if matches otherwise ``False``. + + .. versionadded:: Nitrogen CLI Example: - salt '*' grains.equals fqdn <expected_fqdn> + .. code-block:: bash + salt '*' grains.equals fqdn <expected_fqdn> salt '*' grains.equals systemd:version 219 ''' return str(value) == str(get(key))
sql: Improve description of i_s.engines It was improved to no longer show dummy data:
@@ -80,7 +80,7 @@ NULL. ### ENGINES table -The ENGINES table provides information about storage engines. But it contains dummy data only. In the production environment, use the TiKV engine for TiDB. +The ENGINES table provides information about storage engines. For compatibility, TiDB will always describe InnoDB as the only supported engine. ### EVENTS table
reference/tools: add a brief explanation for a default value in br doc * sql: fix sequence docs * Revert "sql: fix sequence docs" This reverts commit * reference/tools: add a brief explanation for a default value in br doc
@@ -154,7 +154,7 @@ Each of the above three sub-commands might still include the following three sub To back up the cluster data, use the `br backup` command. You can add the `full` or `table` sub-command to specify the scope of your backup operation: the whole cluster or a single table. -If the backup time might exceed the [`tikv_gc_life_time`](/reference/garbage-collection/configuration.md#tikv_gc_life_time) configuration which is `10m0s` by default, increase the value of this configuration. +If the backup time might exceed the [`tikv_gc_life_time`](/reference/garbage-collection/configuration.md#tikv_gc_life_time) configuration which is `10m0s` by default (`10m0s` means 10 minutes), increase the value of this configuration. For example, set `tikv_gc_life_time` to `720h`:
Only apply clip_values if available Also removed some unnecessary `.detach()` calls.
@@ -134,7 +134,8 @@ class FeatureAdversariesPyTorch(EvasionAttack): if self.random_start: # Starting at a uniformly random point adv = adv + torch.empty_like(adv).uniform_(-self.delta, self.delta) - adv = torch.clamp(adv, *self.estimator.clip_values).detach() + if self.estimator.clip_values is not None: + adv = torch.clamp(adv, *self.estimator.clip_values) if self.optimizer is None: # run a plain-vanilla PGD @@ -145,13 +146,12 @@ class FeatureAdversariesPyTorch(EvasionAttack): loss.backward() # pgd step - perturbation = -adv.grad.data.sign() * self.step_size - adv = adv + perturbation - perturbation = torch.clamp(adv.data - x.data, -self.delta, self.delta) - adv = torch.autograd.Variable( - torch.clamp(x.data + perturbation, self.estimator.clip_values[0], self.estimator.clip_values[1]), - requires_grad=True, - ) + adv.data = adv - adv.grad.detach().sign() * self.step_size + perturbation = torch.clamp(adv.detach() - x.detach(), -self.delta, self.delta) + adv.data = x.detach() + perturbation + if self.estimator.clip_values is not None: + adv.data = torch.clamp(adv.detach(), *self.estimator.clip_values) + adv.grad.zero_() else: # optimize soft constraint problem with chosen optimizer opt = self.optimizer(params=[adv], **self._optimizer_kwargs) # type: ignore @@ -159,7 +159,7 @@ class FeatureAdversariesPyTorch(EvasionAttack): for _ in trange(self.max_iter, desc="Feature Adversaries PyTorch", disable=not self.verbose): adv.requires_grad = True # clip adversarial within L-Inf delta-ball - adv.data = x.detach() + torch.clamp(adv.detach() - x.detach(), -self.delta, self.delta).detach() + adv.data = x.detach() + torch.clamp(adv.detach() - x.detach(), -self.delta, self.delta) def closure(): if torch.is_grad_enabled(): @@ -172,7 +172,7 @@ class FeatureAdversariesPyTorch(EvasionAttack): opt.step(closure) if self.estimator.clip_values is not None: - adv.data = torch.clamp(adv.detach(), *self.estimator.clip_values).detach() + adv.data = torch.clamp(adv.detach(), *self.estimator.clip_values) return adv.detach().cpu() def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
[IMPR] Reduce code complexity of pwb.main for further improvements Introduce a new function find_filename to look for the given filename and return its path. The code was moved from main(). This change will be used for further improvements.
@@ -168,26 +168,22 @@ except RuntimeError: sys.exit(1) -def main(): - """Command line entry point.""" - global filename - if not filename: - return False +def find_filename(filename): + """Search for the filename in the given script paths.""" + from pywikibot import config - file_package = None - argvu = pwb.argvu[1:] - if not os.path.exists(filename): script_paths = ['scripts', 'scripts.maintenance', 'scripts.userscripts'] - from pywikibot import config + if config.user_script_paths: if isinstance(config.user_script_paths, (tuple, list)): script_paths = config.user_script_paths + script_paths else: warn("'user_script_paths' must be a list or tuple,\n" 'found: {0}. Ignoring this setting.' - ''.format(type(config.user_script_paths))) + .format(type(config.user_script_paths))) + for file_package in script_paths: paths = file_package.split('.') + [filename] testpath = os.path.join(_pwb_dir, *paths) @@ -215,6 +211,22 @@ def main(): .format(' is' if len(similar_scripts) == 1 else 's are')) print('\t' + '.py\n\t'.join(similar_scripts) + '.py') + return None + return filename + + +def main(): + """Command line entry point.""" + global filename + if not filename: + return False + + file_package = None + argvu = pwb.argvu[1:] + + if not os.path.exists(filename): + filename = find_filename(filename) + if filename is None: return True # When both pwb.py and the filename to run are within the current
remove burn valid check since this has its own disposition options no need to apply twice
@@ -1063,7 +1063,7 @@ class MediaProcessor: def burnSubtitleFilter(self, inputfile, subtitle_streams, swl, valid_external_subs=None): if self.settings.burn_subtitles: - filtered_subtitle_streams = [x for x in subtitle_streams if self.validLanguage(x.metadata.get('language'), swl) and self.validDisposition(x.metadata.get('language'), x.dispostr, self.settings.ignored_subtitle_dispositions)] + filtered_subtitle_streams = [x for x in subtitle_streams if self.validLanguage(x.metadata.get('language'), swl)] filtered_subtitle_streams = sorted(filtered_subtitle_streams, key=lambda x: swl.index(x.metadata.get('language')) if x.metadata.get('language') in swl else 999) sub_candidates = [] if len(filtered_subtitle_streams) > 0:
Clean up change_parent matrix calculation Causes less updates.
@@ -66,15 +66,16 @@ class Presentation(Matrices, Element, Generic[S]): """Change the parent and update the item's matrix so the item visualy remains in the same place.""" old_parent = self.parent - self.parent = new_parent + if new_parent is old_parent: + return + self.parent = new_parent + m = self.matrix if old_parent: - m = old_parent.matrix_i2c - self.matrix.set(*self.matrix.multiply(m)) - + m = m * old_parent.matrix_i2c if new_parent: - m = new_parent.matrix_i2c.inverse() - self.matrix.set(*self.matrix.multiply(m)) + m = m * new_parent.matrix_i2c.inverse() + self.matrix.set(*m) def load(self, name, value): if name == "matrix": @@ -120,7 +121,7 @@ class Presentation(Matrices, Element, Generic[S]): if new_parent: new_parent.matrix_i2c.add_handler(self._on_matrix_changed) - self._on_matrix_changed(self.matrix, self.matrix.tuple()) + self._on_matrix_changed(None, ()) def _on_matrix_changed(self, matrix, old_value): if self.parent:
[BYOC][MergeComposite] if root->args[i] isn't a CallNode, then Donwcast<Call> will check fail we needn't execute L131 "call_map->Set(arg, new_arg)", because when arg is CallNode and root->args[i] is not CallNode, new_arg will be a null pointer. There is no point in caching null pointer.
@@ -121,7 +121,7 @@ class MergeCompositeWrapper : public ExprMutator { Array<Expr> new_args; for (const auto& arg : pattern->args) { Expr new_arg; - if (arg->IsInstance<CallNode>()) { + if (arg->IsInstance<CallNode>() && root->args[i]->IsInstance<CallNode>()) { new_arg = ExtractPattern(Downcast<Call>(arg), Downcast<Call>(root->args[i]), var_map, call_map); // if we've already processed this call node, return the previous result
Fix typo in contribution guide If you're only exposure to using... -> If your only exposure to using...
@@ -81,7 +81,7 @@ Following that we'll tell you about how you can test your changes locally and th # If you're using shells other than bash you'll need to use pip install -e ".[test,examples,doc]" ``` - * If you're only exposure to using pip is `pip install package_name` then this might be a bit confusing. + * If your only exposure to using pip is `pip install package_name` then this might be a bit confusing. * If we type `pip install -e .` (notice the 'dot'), this tells `pip` to install a package located here, in this directory, `.`. The `-e` flag indicates that it should be editable, meaning you will not have to run `pip install .` every time you make a change and want to try it. * Finally the `[test,examples,doc]` tells `pip` that there's some extra optional dependencies that we want to install.
Prevent file called `1` from being created during build * Fix issue with shell redirection A file called `1` is no longer created * Remove trailing whitespace
@@ -83,12 +83,12 @@ endif # Fall back to "-march=native" if the compiler doesn't support either of those. ifeq ($(filter -march=%,$(CXXFLAGS)),) - FAIL_A :=$(shell cp /dev/null a.cpp; $(CXX) -march=sandybridge -c a.cpp 2>1 || echo FAIL; rm -f a.cpp a.o) + FAIL_A :=$(shell cp /dev/null a.cpp; $(CXX) -march=sandybridge -c a.cpp 2>&1 || echo FAIL; rm -f a.cpp a.o) ifeq ($(FAIL_A),) CXXFLAGS += -march=sandybridge else # g++-4.8.x accepts "-march=corei7-avx" but not "-march=sandybridge " - FAIL_B :=$(shell cp /dev/null a.cpp; $(CXX) -march=corei7-avx -c a.cpp 2>1 || echo FAIL; rm -f a.cpp a.o) + FAIL_B :=$(shell cp /dev/null a.cpp; $(CXX) -march=corei7-avx -c a.cpp 2>&1 || echo FAIL; rm -f a.cpp a.o) ifeq ($(FAIL_B),) CXXFLAGS += -march=corei7-avx else
Add one more `# noqa` to unblock Missed one.
import sys from types import MappingProxyType -from typing import ( # noqa: Y027 +from typing import ( # noqa: Y027,Y038 AbstractSet as Set, AsyncGenerator as AsyncGenerator, AsyncIterable as AsyncIterable,
latex can't recognize path seperator \\ in windows Modify path before transport it to system command
import os import hashlib +from pathlib import Path + from manimlib.constants import TEX_TEXT_TO_REPLACE from manimlib.constants import TEX_USE_CTEX import manimlib.constants as consts @@ -39,12 +41,15 @@ def generate_tex_file(expression, template_tex_file_body): def tex_to_dvi(tex_file): result = tex_file.replace(".tex", ".dvi" if not TEX_USE_CTEX else ".xdv") + result = Path(result).as_posix() + tex_file = Path(tex_file).as_posix() + tex_dir = Path(consts.TEX_DIR).as_posix() if not os.path.exists(result): commands = [ "latex", "-interaction=batchmode", "-halt-on-error", - "-output-directory=\"{}\"".format(consts.TEX_DIR), + "-output-directory=\"{}\"".format(tex_dir), "\"{}\"".format(tex_file), ">", os.devnull @@ -53,7 +58,7 @@ def tex_to_dvi(tex_file): "-no-pdf", "-interaction=batchmode", "-halt-on-error", - "-output-directory=\"{}\"".format(consts.TEX_DIR), + "-output-directory=\"{}\"".format(tex_dir), "\"{}\"".format(tex_file), ">", os.devnull @@ -76,6 +81,8 @@ def dvi_to_svg(dvi_file, regen_if_exists=False): where in the dvi """ result = dvi_file.replace(".dvi" if not TEX_USE_CTEX else ".xdv", ".svg") + result = Path(result).as_posix() + dvi_file = Path(dvi_file).as_posix() if not os.path.exists(result): commands = [ "dvisvgm",
changelog! [skip-ci]
@@ -7,7 +7,12 @@ Changelog v0.4.2 ====== -Released: |today| +Released: August 4, 2017 + +New Features +------------ + + * Packets with partial body lengths can now be parsed. For now, these packets are converted to have definite lengths instead. (#95) (#208) Bugs Fixed ---------- @@ -15,6 +20,9 @@ Bugs Fixed * PGPKey.decrypt was mistakenly using message.issuers instead of message.encrypters when determining whether or not the key was eligible to attempt decrypting the message (#183) * Fixed an issue with parsing some cleartext messages (#184) + * Fixed signing already-encrypted messages (encrypt-then-sign) (#186) (#191) + * PGP*.from_blob now correctly raises an exception if given zero-length input (#199) (#200) + * Fixed an issue where PGPKey.decrypt would fail with an arcane traceback if the key is passphrase-protected and not unlocked. (#204) v0.4.1 ======
Fix MapR dependency on mysql on RHEL MapR is missing mysql-java-connector and that makes it necessary to have subscription enable on RHEL7 Story:
@@ -75,10 +75,14 @@ validators: - libtirpc - libvisual - libxslt + - mariadb + - mariadb-server + - mariadb-libs - mesa-dri-drivers - mesa-libGL - mesa-libGLU - mesa-private-llvm + - mysql-connector-java - nmap-ncat - numactl - openjpeg-libs
qt send tab: when clicking "Max", show tooltip explaining max amt follow
@@ -41,7 +41,7 @@ from typing import Optional, TYPE_CHECKING, Sequence, List, Union import eth_abi from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont -from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal +from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget, QMenuBar, QFileDialog, QCheckBox, QLabel, QVBoxLayout, QGridLayout, QLineEdit, @@ -1478,6 +1478,18 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): amount_after_all_fees = amount - x_fee_amount self.amount_e.setAmount(amount_after_all_fees) + # show tooltip explaining max amount + mining_fee = tx.get_fee() + mining_fee_str = self.format_amount_and_units(mining_fee) + msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str) + if x_fee_amount: + twofactor_fee_str = self.format_amount_and_units(x_fee_amount) + msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str) + frozen_bal = self.get_frozen_balance_str() + if frozen_bal: + msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(frozen_bal) + QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg) + def get_contact_payto(self, key): _type, label = self.contacts.get(key) return label + ' <' + key + '>' if _type == 'address' else key @@ -1677,6 +1689,21 @@ class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): """ return self.utxo_list.get_spend_list() + def get_text_not_enough_funds_mentioning_frozen(self) -> str: + text = _("Not enough funds") + frozen_str = self.get_frozen_balance_str() + if frozen_str: + text += " ({} {})".format( + frozen_str, _("are frozen") + ) + return text + + def get_frozen_balance_str(self) -> Optional[str]: + frozen_bal = sum(self.wallet.get_frozen_balance()) + if not frozen_bal: + return None + return self.format_amount_and_units(frozen_bal) + def pay_onchain_dialog( self, inputs: Sequence[PartialTxInput], outputs: List[PartialTxOutput], *,
fix: Now PyScript can import in the script path.
@@ -23,10 +23,11 @@ class PyScript(Task): def execute_action(self, **params): root = str(Path(self.path).parent) - task_func = self.get_task_func() - sys.path.append(root) + + task_func = self.get_task_func() output = task_func(**params) + sys.path.remove(root) return output
Add tox environ to test without external utilities available. Break PATH to simulate not having imagemagick, git, etc. installed.
@@ -10,6 +10,9 @@ setenv = # Use per-testenv coverage files to prevent contention when parallel # tests (using `tox -p`) COVERAGE_FILE=.coverage.{envname} + # To test in environment without external utitilities like imagemagick and git installed, + # break PATH in noutils environment(s). + noutils: PATH=/dev/null deps = pytest>=6 pytest-click
CMSIS-DAPv2 USB match class catches UnicodeDecodeError. This error is raised by certain versions of STLinkV2 that are known to have a corrupted interface name.
@@ -268,8 +268,15 @@ class HasCmsisDapv2Interface(object): try: def match_cmsis_dap_interface_name(desc): + try: interface_name = usb.util.get_string(desc.device, desc.iInterface) return (interface_name is not None) and ("CMSIS-DAP" in interface_name) + except UnicodeDecodeError: + # This exception can be raised if the device has a corrupted interface name. + # Certain versions of STLinkV2 are known to have this problem. If we can't + # read the interface name, there's no way to tell if it's a CMSIS-DAPv2 + # interface. + return False config = dev.get_active_configuration() cmsis_dap_interface = usb.util.find_descriptor(config, custom_match=match_cmsis_dap_interface_name)
Fix small typo in setup.py Fixed small typo in setup.py
@@ -279,7 +279,7 @@ class build_deps(Command): # This is not perfect solution as build does not depend on any of # the auto-generated code and auto-generated files will not be # included in this copy. If we want to use auto-generated files, - # we need to find a batter way to do this. + # we need to find a better way to do this. # More information can be found in conversation thread of PR #5772 self.copy_tree('torch/csrc', 'torch/lib/include/torch/csrc/')
Fix / update data path assertion forgot to push this final change before i merged the last commit
@@ -696,11 +696,11 @@ class NeoXArgs(*BASE_CLASSES): "in args " # assert that if one of train/test/valid_data_path are provided, all should be - assert_error_mess = "One of train/valid/test data_path is not provided\n" - assert_error_mess += "\n".join( - [f"{name}_data_path:{data_path}," for name, data_path in [self.train_data_path, - self.valid_data_path, - self.test_data_path]]) + assert_error_mess = "One or more of train/valid/test data_path are not provided:\n\t" + assert_error_mess += "\n\t".join( + [f"{name}_data_path: {data_path}," for name, data_path in [['train', self.train_data_path], + ['valid', self.valid_data_path], + ['test', self.test_data_path]]] assert any(has_separate_path) == all(has_separate_path), assert_error_mess return True
Fixed gdbserver response to 'vCont?' command. Search-replace error introduced during PEP8 rename.
@@ -760,7 +760,7 @@ class GDBServer(threading.Thread): # v_cont capabilities query. elif b'Cont?' == cmd: - return self.create_rsp_packet(b"v_cont;c;C;s;S;t") + return self.create_rsp_packet(b"vCont;c;C;s;S;t") # v_cont, thread action command. elif cmd.startswith(b'Cont'):
communication: handle attachment case for both filename and fileid some incoming files could have either filename or fileid for whatever reason. this handles both those cases (at least for now).
@@ -279,12 +279,16 @@ def prepare_to_notify(doc, print_html=None, print_format=None, attachments=None) if isinstance(a, string_types): # is it a filename? try: - # keep this for error handling - _file = frappe.get_doc("File", a) + # check for both filename and file id + file_id = frappe.db.get_list('File', or_filters={'file_name': a, 'name': a}, limit=1) + if not file_id: + frappe.throw(_("Unable to find attachment {0}").format(a)) + file_id = file_id[0]['name'] + _file = frappe.get_doc("File", file_id) _file.get_content() # these attachments will be attached on-demand # and won't be stored in the message - doc.attachments.append({"fid": a}) + doc.attachments.append({"fid": file_id}) except IOError: frappe.throw(_("Unable to find attachment {0}").format(a)) else:
Bugfix htmlsafe function Somehow the unicode characters intended were in fact the ascii versions which negated the point of htmlsafe as it left the output unsafe. This wasn't directly noticeable (as was the intention), so to help avoid this in the future I've used the explicit forms.
@@ -52,10 +52,13 @@ def loads(object_: str, app: Optional["Quart"] = None, **kwargs: Any) -> Any: def htmlsafe_dumps(object_: Any, **kwargs: Any) -> str: - # Note in the below the ascii characters are replaced with a - # unicode similar version. - result = dumps(object_, **kwargs).replace("<", "<").replace(">", ">") - return result.replace("&", "&").replace("'", "'") + return ( + dumps(object_, **kwargs) + .replace("<", "\\u003c") + .replace(">", "\\u003e") + .replace("&", "\\u0026") + .replace("'", "\\u0027") + ) def tojson_filter(object_: Any, **kwargs: Any) -> Markup:
travis.yml: fix pytorch version compatibility bug `conda install torchvision` will installs the latest one, and it will update the pytorch to 0.3.0, which has some compatibility bugs. Fix the torchvision verion to 0.1.8 for now.
@@ -20,7 +20,7 @@ before_install: - pip install -r requirements.txt # dependencies for im2text - pip install Pillow - - conda install torchvision + - conda install torchvision==0.1.8 # dependencies for speech2text - sudo apt-get install -y sox libsox-dev libsox-fmt-all - pip install librosa git+https://github.com/pytorch/audio
Fix checkFormat fail for null input Patched in the function checkFormat itself, instead of the code which calls it
@@ -344,6 +344,7 @@ def formatSubclassOf(fmt, cls, ontology, visited): def checkFormat(actualFile, inputFormats, ontology): # type: (Union[Dict[Text, Any], List, Text], Union[List[Text], Text], Graph) -> None for af in aslist(actualFile): + if not af: continue if "format" not in af: raise validate.ValidationException(u"Missing required 'format' for File %s" % af) for inpf in aslist(inputFormats): @@ -551,7 +552,7 @@ class Process(object): if self.formatgraph: for i in self.tool["inputs"]: d = shortname(i["id"]) - if d in builder.job and i.get("format") and builder.job[d]: + if d in builder.job and i.get("format"): checkFormat(builder.job[d], builder.do_eval(i["format"]), self.formatgraph) builder.bindings.extend(builder.bind_input(self.inputs_record_schema, builder.job))
linked-list: Implement extra-credit tests add optional tests for users to implement __len__() and __iter__()
@@ -44,6 +44,24 @@ class LinkedListTests(unittest.TestCase): self.assertEqual(50, self.list.pop()) self.assertEqual(30, self.list.shift()) + @unittest.skip("extra-credit") + def test_length(self): + self.list.push(10) + self.list.push(20) + self.assertEqual(2, len(self.list)) + self.list.shift() + self.assertEqual(1, len(self.list)) + self.list.pop() + self.assertEqual(0, len(self.list)) + + @unittest.skip("extra-credit") + def test_iterator(self): + self.list.push(10) + self.list.push(20) + iterator = iter(self.list) + self.assertEqual(10, next(iterator)) + self.assertEqual(20, next(iterator)) + if __name__ == '__main__': unittest.main()
Swap order of coverage actions in source It would be nice to get the html report even if the coverage percent has dropped below the failing threshold.
@@ -42,8 +42,8 @@ coverage: python3 -m coverage --version python3 -m coverage run --timid --branch -m pytest ./tests/whitebox/integration python3 -m coverage run --timid --branch -a -m pytest ./tests/whitebox/monkey_patching/test_keyboard_interrupt.py - python3 -m coverage report -m --fail-under=86 --show-missing --include="./src/*" python3 -m coverage html --include="./src/*" + python3 -m coverage report -m --fail-under=86 --show-missing --include="./src/*" keyboard-interrupt-test: py.test-3 ${PYTEST_OPTS} ./tests/whitebox/monkey_patching/test_keyboard_interrupt.py
clipping values, and replacing transposes with permute Without clipping, any values larger than 255 will be replaced with int(v mod 256) by byte(), which results in high freq noise in image.
@@ -90,6 +90,6 @@ def save_image(tensor, filename, nrow=8, padding=2, tensor = tensor.cpu() grid = make_grid(tensor, nrow=nrow, padding=padding, normalize=normalize, range=range, scale_each=scale_each) - ndarr = grid.mul(255).byte().transpose(0, 2).transpose(0, 1).numpy() + ndarr = grid.mul(255).clip(0, 255).byte().permute(1, 2, 0).numpy() im = Image.fromarray(ndarr) im.save(filename)
Have CaseBlock leave date_opened unset unless explicitly passed in Letting the default behavior kick in where date_opened automatically is set to date_modified
@@ -35,8 +35,7 @@ class CaseBlock(object): self.update = copy.copy(update) if update else {} now = datetime.utcnow() self.date_modified = date_modified or now - self.date_opened = (now if create and date_opened is CaseBlock.undefined - else date_opened) + self.date_opened = date_opened self.case_type = "" if create and case_type is CaseBlock.undefined else case_type self.case_name = "" if create and case_name is CaseBlock.undefined else case_name
Action is now recorded as event:PutEvents (fully qualified) We no longer need to amend how the Action is reported in the describe_event_bus policy
@@ -235,7 +235,7 @@ class EventsBackend(BaseBackend): 'Sid': statement_id, 'Effect': 'Allow', 'Principal': {'AWS': 'arn:aws:iam::{0}:root'.format(data['principal'])}, - 'Action': 'events:{0}'.format(data['action']), + 'Action': data['action'], 'Resource': arn }) return {
fix: return 0 if no users are found if no users are present in the system, get_total_users returns None instead of 0 causing comparison issues
@@ -841,11 +841,11 @@ def user_query(doctype, txt, searchfield, start, page_len, filters): def get_total_users(): """Returns total no. of system users""" - return frappe.db.sql('''SELECT SUM(`simultaneous_sessions`) + return cint(frappe.db.sql('''SELECT SUM(`simultaneous_sessions`) FROM `tabUser` WHERE `enabled` = 1 AND `user_type` = 'System User' - AND `name` NOT IN ({})'''.format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0] + AND `name` NOT IN ({})'''.format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0]) def get_system_users(exclude_users=None, limit=None): if not exclude_users:
fix: Make infer dummy entity join key idempotent fix: Infer dummy entity join key once
@@ -156,7 +156,11 @@ def update_feature_views_with_inferred_features_and_entities( ) # Infer a dummy entity column for entityless feature views. - if len(fv.entities) == 1 and fv.entities[0] == DUMMY_ENTITY_NAME: + if ( + len(fv.entities) == 1 + and fv.entities[0] == DUMMY_ENTITY_NAME + and not fv.entity_columns + ): fv.entity_columns.append(Field(name=DUMMY_ENTITY_ID, dtype=String)) # Run inference for entity columns if there are fewer entity fields than expected.
Remove comment that is in conflict with the code From git blame, this hasn't been the case for at least 2 years
@@ -43,8 +43,6 @@ domain_specific = [ url(r'^logo.png', logo, name='logo'), url(r'^apps/', include('corehq.apps.app_manager.urls')), url(r'^api/', include('corehq.apps.api.urls')), - # the receiver needs to accept posts at an endpoint that might - # not have a slash, so don't include it at the root urlconf url(r'^receiver/', include('corehq.apps.receiverwrapper.urls')), url(r'^settings/', include(settings_domain_specific)), url(r'^enterprise/', include(accounting_domain_specific)),
Modify 'Click to expand' detail margin Decrease the margin-top where a <detail> panel follows a paragraph, and increase the margin-bottom.
@@ -313,6 +313,10 @@ h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend { line-height: 1.5; } +.wy-body-for-nav p + details { + margin: -10px 0 15px; +} + .wy-body-for-nav blockquote { margin: 1em 0; padding-left: 1em;
Convert strs to bytes before decoding in tests Fixes pypy3 test failure Resolves
import base64 +import six from globus_sdk.authorizers import BasicAuthorizer from tests.framework import CapturedIOTestCase @@ -24,7 +25,7 @@ class BasicAuthorizerTests(CapturedIOTestCase): # confirm value self.assertEqual(header_dict["Authorization"][:6], "Basic ") decoded = base64.b64decode( - header_dict["Authorization"][6:]).decode('utf-8') + six.b(header_dict["Authorization"][6:])).decode('utf-8') self.assertEqual(decoded, "{0}:{1}".format(self.username, self.password)) @@ -38,7 +39,7 @@ class BasicAuthorizerTests(CapturedIOTestCase): # confirm values self.assertEqual(header_dict["Authorization"][:6], "Basic ") decoded = base64.b64decode( - header_dict["Authorization"][6:]).decode('utf-8') + six.b(header_dict["Authorization"][6:])).decode('utf-8') self.assertEqual(decoded, "{0}:{1}".format(self.username, self.password)) self.assertEqual(header_dict["Header"], "value")
fix bug that gallery is none in generated envs The gallery is created in __post_init__, but it's not called for a generated environment. So update code to create a _gallery cache, and update with gallery_raw. a small fix that remove vhd secret, as it may block troubleshooting on vhd path.
@@ -2,7 +2,7 @@ import json import logging import os import re -from dataclasses import dataclass, field +from dataclasses import InitVar, dataclass, field from datetime import datetime from functools import lru_cache from pathlib import Path @@ -151,30 +151,32 @@ class AzureNodeSchema: # for gallery image, which need to accept terms purchase_plan: Optional[AzureVmPurchasePlanSchema] = None - def __post_init__(self, *args: Any, **kwargs: Any) -> None: - add_secret(self.vhd) + _gallery: InitVar[Optional[AzureVmGallerySchema]] = None + @property + def gallery(self) -> Optional[AzureVmGallerySchema]: + # this is a safe guard and prevent mypy error on typing + if not hasattr(self, "_gallery"): + self._gallery: Optional[AzureVmGallerySchema] = None + gallery: Optional[AzureVmGallerySchema] = self._gallery + if not gallery: if isinstance(self.gallery_raw, dict): - self.gallery: Optional[ - AzureVmGallerySchema - ] = AzureVmGallerySchema.schema().load( # type: ignore + gallery = AzureVmGallerySchema.schema().load( # type: ignore self.gallery_raw ) # this step makes gallery_raw is validated, and filter out any unwanted # content. - self.gallery_raw = self.gallery.to_dict() # type: ignore + self.gallery_raw = gallery.to_dict() # type: ignore elif self.gallery_raw: assert isinstance( self.gallery_raw, str ), f"actual: {type(self.gallery_raw)}" - gallery = re.split(r"[:\s]+", self.gallery_raw.strip()) + gallery_strings = re.split(r"[:\s]+", self.gallery_raw.strip()) - if len(gallery) == 4: - self.gallery = AzureVmGallerySchema( - gallery[0], gallery[1], gallery[2], gallery[3] - ) + if len(gallery_strings) == 4: + gallery = AzureVmGallerySchema(*gallery_strings) # gallery_raw is used - self.gallery_raw = self.gallery.to_dict() # type: ignore + self.gallery_raw = gallery.to_dict() # type: ignore else: raise LisaException( f"Invalid value for the provided gallery " @@ -183,12 +185,22 @@ class AzureNodeSchema: f"'<Publisher> <Offer> <Sku> <Version>' " f"or '<Publisher>:<Offer>:<Sku>:<Version>'" ) + self._gallery = gallery + return gallery + + @gallery.setter + def gallery(self, value: Optional[AzureVmGallerySchema]) -> None: + self._status = value + if value is None: + self.gallery_raw = None + else: + self.gallery_raw = value.to_dict() # type: ignore def get_image_name(self) -> str: result = "" if self.vhd: result = self.vhd - elif self.gallery_raw: + elif self.gallery: assert isinstance( self.gallery_raw, dict ), f"actual type: {type(self.gallery_raw)}"
bypassConvert adjustments Logging now does not show options which are never created or used if bypass convert is trigger optionsonly mode also reflects bypassConvert
@@ -86,10 +86,17 @@ class MediaProcessor: options = None preopts = None postopts = None + outputfile = None + ripped_subs = [] + downloaded_subs = [] info = info or self.isValidSource(inputfile) if info: + if self.canBypassConvert(inputfile, info): + outputfile = inputfile + self.log.info("Bypassing conversion and setting outputfile to inputfile.") + else: try: options, preopts, postopts, ripsubopts, downloaded_subs = self.generateOptions(inputfile, info=info, original=original) except: @@ -114,13 +121,6 @@ class MediaProcessor: self.log.exception("Unable to log options.") ripped_subs = self.ripSubs(inputfile, ripsubopts) - - if self.canBypassConvert(inputfile, info, options): - outputfile = inputfile - for sub in downloaded_subs: - self.log.debug("Removing downloaded sub %s for media that is not being converted." % (sub)) - self.removeFile(sub) - else: try: outputfile, inputfile = self.convert(options, preopts, postopts, reportProgress) except: @@ -257,7 +257,11 @@ class MediaProcessor: # Generate a JSON formatter dataset with the input and output information and ffmpeg command for a theoretical conversion def jsonDump(self, inputfile, original=None): dump = {} - dump["input"] = self.generateSourceDict(inputfile) + dump["input"], info = self.generateSourceDict(inputfile) + if self.canBypassConvert(inputfile, info): + dump["output"] = dump["input"] + dump["output"]["bypassConvert"] = True + else: dump["output"], dump["preopts"], dump["postopts"], dump["ripsubopts"], dump["downloadedsubs"] = self.generateOptions(inputfile, original) parsed = self.converter.parse_options(dump["output"]) input_dir, filename, input_extension = self.parseFile(inputfile) @@ -287,7 +291,7 @@ class MediaProcessor: output.update(probe.toJson) else: output['error'] = "Invalid input, unable to read" - return output + return output, probe # Pass over audio and subtitle streams to ensure the language properties are safe, return any adjustments made to SWL/AWL if relax is enabled def safeLanguage(self, info): @@ -1101,7 +1105,7 @@ class MediaProcessor: return True return False - def canBypassConvert(self, inputfile, info, options): + def canBypassConvert(self, inputfile, info): # Process same extensions if self.settings.output_extension == self.parseFile(inputfile)[2]: if not self.settings.force_convert and not self.settings.process_same_extensions: @@ -1110,13 +1114,6 @@ class MediaProcessor: elif info.format.metadata.get('encoder', '').startswith('sma') and not self.settings.force_convert: self.log.info("Input and output extensions match and the file appears to have already been processed by SMA, enable force-convert to override [force-convert: %s]." % self.settings.force_convert) return True - ''' - elif not self.settings.force_convert and len([x for x in [options['video']] + [x for x in options['audio']] + [x for x in options['subtitle']] if x['codec'] != 'copy']) == 0: - self.log.info("Input and output extensions match and every codec is copy, this file probably doesn't need conversion, returning [force-convert: %s]." % self.settings.force_convert) - return True - elif self.settings.force_convert: - self.log.info("Input and output extensions match and every codec is copy, this file probably doesn't need conversion, but conversion being forced [force-convert: %s]." % self.settings.force_convert) - ''' return False # Encode a new file based on selected options, built in naming conflict resolution
handle existing phenotype terms log better
@@ -26,13 +26,16 @@ def update_variants(adapter, case_obj, old_variants): for variant in old_variants: new_variant = Variant.objects(variant_id=variant['variant_id']).first() if new_variant is None: - logger.warning("missing variant: %s", variant['variant_id']) + logger.warn("missing variant: %s", variant['variant_id']) continue if variant.get('manual_rank'): + logger.info("updating manual rank: %s", variant['manual_rank']) new_variant.manual_rank = variant['manual_rank'] if variant.get('sanger_ordered') is True: + logger.info("marking sanger order") new_variant.sanger_ordered = True if new_variant not in case_obj.suspects: + logger.info("adding suspect: %s", variant['variant_id']) new_variant.suspects.append(new_variant) yield new_variant @@ -118,19 +121,19 @@ def update_case(adapter, case_obj, exported_data): if phenotype_terms: logger.info("Updating phenotype terms") existing_terms = case_obj.phenotype_terms - if len(existing_terms) == 0: - for term in phenotype_terms: - hpo_obj = adapter.hpo_term(term) + existing_ids = set(term.phenotype_id for term in existing_terms) + for hpo_id in phenotype_terms: + if hpo_id in existing_ids: + logger.info("term already added: %s", hpo_id) + else: + hpo_obj = adapter.hpo_term(hpo_id) if hpo_obj: - new_term = PhenotypeTerm(phenotype_id=term, + new_term = PhenotypeTerm(phenotype_id=hpo_id, feature=hpo_obj.description) - logger.info("Adding term %s", term) - existing_terms.append(new_term) - else: - logger.info("Could not find term %s", term) - case_obj.phenotype_terms = existing_terms + logger.info("Adding term %s", hpo_id) + case_obj.phenotype_terms.append(new_term) else: - logger.info("Case was already phenotyped") + logger.info("Could not find term %s", hpo_id) # Update the phenotype groups # If the case has a phenotype group, skip to add the groups @@ -189,6 +192,7 @@ def update_cases(context, exported_cases): new_events = update_events(adapter, case_obj, exported_info['events']) for new_event in new_events: + logger.info("adding new event: %s", new_event.verb) new_event.save() case_obj.is_migrated = True case_obj.save()
Update README.md Simple, minor typo in the Readme.
<img height="120px" align="right" src="/static/mythril.png"/> -Mythril is a security analysis tool for Ethereum smart contracts. I uses concolic analysis, taint analysis and control flow checking to detect a variety of security vulnerabilities. The analysis is based on [laser-ethereum](https://github.com/b-mueller/laser-ethereum), a symbolic execution library for EVM bytecode. +Mythril is a security analysis tool for Ethereum smart contracts. It uses concolic analysis, taint analysis and control flow checking to detect a variety of security vulnerabilities. The analysis is based on [laser-ethereum](https://github.com/b-mueller/laser-ethereum), a symbolic execution library for EVM bytecode. ## Installation and setup
docs: tutorials: models: slr: Document that hyperparameters live in model config Related:
@@ -62,6 +62,9 @@ Anything that a user might want to tweak about a models behavior should go in the ``Config`` class for the model. The naming convention is ``TheName`` + ``Model`` + ``Config``. +Hyperparameters for a model should live inside the model's config. Ideally at +the top level and not nested within another structure. + Our model has three configurable properties. - ``features``. A list of :py:class:`Feature <dffml.feature.Feature>` objects
Updated acknowledgements file to change old username I recently moved from to and this commit fixes that in the acknowledgements file
@@ -57,7 +57,7 @@ Documenters - Matt Caldwell (@mattcaldwell) - berdario (@berdario) - Cory Taylor (@coryandrewtaylor) -- James C. (@Jammy4312) +- James C. (@JamesMCo) - Ally Weir (@allyjweir) - Steven Loria (@sloria) - Patrick Abeya (@wombat2k)
Menu : Fix search widget focus styling on Mac By default, Qt draws an additional focus box that we don't want.
@@ -201,6 +201,7 @@ class Menu( GafferUI.Widget ) : self.__searchMenu = _Menu( self._qtWidget(), "" ) self.__searchMenu.aboutToShow.connect( Gaffer.WeakMethod( self.__searchMenuShow ) ) self.__searchLine = QtWidgets.QLineEdit() + self.__searchLine.setAttribute( QtCore.Qt.WA_MacShowFocusRect, False ) self.__searchLine.textEdited.connect( Gaffer.WeakMethod( self.__updateSearchMenu ) ) self.__searchLine.returnPressed.connect( Gaffer.WeakMethod( self.__searchReturnPressed ) ) self.__searchLine.setObjectName( "search" )
Move start of consensus items after Genesis Move the start of the consensus network items to after genesis, as the validator is not ready to satisfy any requests on that interconnect.
@@ -404,14 +404,14 @@ class Validator: def start(self): self._component_dispatcher.start() self._component_service.start() - self._consensus_dispatcher.start() - self._consensus_service.start() if self._genesis_controller.requires_genesis(): self._genesis_controller.start(self._start) else: self._start() def _start(self): + self._consensus_dispatcher.start() + self._consensus_service.start() self._network_dispatcher.start() self._network_service.start()
adding the Privacy, Security and OSINT Show adding the Privacy, Security and OSINT Show
- [Talos Takes](https://talosintelligence.com/podcasts/shows/talos_takes) - [Open Source Security Podcast](https://opensourcesecurity.io/category/podcast/) - [Crypto-Gram Security Podcast](http://crypto-gram.libsyn.com/) +- [The Privacy, Security and OSINT Show](https://inteltechniques.com/podcast.html)
Update elf_coinminer.txt Detection update due to PA Unit 42 information.
# Reference: https://isc.sans.edu/forums/diary/Crypto+Mining+Is+More+Popular+Than+Ever/24050 # Reference: https://www.alibabacloud.com/blog/jbossminer-mining-malware-analysis_593804 # Reference: https://blog.talosintelligence.com/2018/08/rocke-champion-of-monero-miners.html +# Reference: https://unit42.paloaltonetworks.com/malware-used-by-rocke-group-evolves-to-evade-detection-by-cloud-security-products/ 3g2upl4pq6kufc4m.tk +a.ssvs.space +aybc.so blockbitcoin.com d3goboxon32grk2l.tk d20blzxlz9ydha.cloudfront.net dazqc4f140wtl.cloudfront.net +dwn.rundll32.ml enjoytopic.tk realtimenews.tk sydwzl.cn
fix typos fix some typos
@@ -21,7 +21,8 @@ TAG=0.2.1 kubectl apply -f ./install/$TAG/kfserving.yaml ``` By default, you can create InferenceService instances in any namespace which has no label with `control-plane` as key. -You can aslo configure KFServing to make InferenceService instances only work in the namespace which has label pair `serving.kubeflow.org/inferenceservice: enabled`. To enable this mode, you need add `env` field as below to `manager` container of statefulset `kfserving-controller-manager`. +You can also configure KFServing to make InferenceService instances only work in the namespace which has label pair `serving.kubeflow.org/inferenceservice: enabled`. To enable this mode, you need to add `env` field as stated below to `manager` container of statefulset `kfserving-controller-manager`. + ``` env: - name: ENABLE_WEBHOOK_NAMESPACE_SELECTOR
operations.format: don't regen all fetchables (and chksums) for fetch ops If they're provided to the call. This fixes `pmaint digest` complaining about missing digests when trying to fetch/digest distfiles for new ebuilds.
@@ -135,8 +135,9 @@ class operations(_operations_mod.base): if not isinstance(fetchables, (tuple, list)): fetchables = [fetchables] ret = [] + fetcher = self._fetch_kls(self.domain, self.pkg, fetchables, self._find_fetcher()) for fetchable in fetchables: - ret.append(self._fetch_op.fetch_one(fetchable, self._get_observer(observer))) + ret.append(fetcher.fetch_one(fetchable, self._get_observer(observer))) return all(ret) return self._fetch_op.fetch_all(self._get_observer(observer))
devstack: enable flow based tunnels for sfc ODL SFC requires flow based tunnels to operate correctly. This is enabled through a specific property set as an external id in the OVS database.
@@ -336,6 +336,9 @@ function bind_opendaylight_controller { other_config:provider_mappings=$ODL_PROVIDER_MAPPINGS fi sudo ovs-vsctl set Open_vSwitch $ovstbl other_config:local_ip=$ODL_LOCAL_IP + if [[ ",$ODL_NETVIRT_KARAF_FEATURE," =~ ",odl-netvirt-sfc," ]]; then + sudo ovs-vsctl set Open_vSwitch $ovstbl external_ids:of-tunnel=true + fi # for pseudo agent port binding if [ "$ODL_PORT_BINDING_CONTROLLER" == "pseudo-agentdb-binding" ]; then ODL_OVS_HOSTCONFIGS_OPTIONS=${ODL_OVS_HOSTCONFIGS_OPTIONS:---debug --noovs_dpdk}
Bugfix in PC_Miner.py Fix a missing ) makes crash in app
@@ -222,7 +222,7 @@ class Client: return (NODE_ADDRESS, NODE_PORT) elif "message" in response: - pretty_print(f"Warning: {response['message']}" + pretty_print(f"Warning: {response['message']}") + f", retrying in {retry_count*2}s", "warning", "net0")
Fix formulation of eigenvectors Add the missing imaginary unit in the section 'Create modulated structure'
@@ -1141,7 +1141,7 @@ eigenvectors with amplitudes and phase factors as ```{math} \frac{A} { \sqrt{N_\mathrm{a}m_j} } \operatorname{Re} \left[ \exp(i\phi) -\mathbf{e}_j \exp( \mathbf{q} \cdot \mathbf{r}_{jl} ) \right], +\mathbf{e}_j \exp( i \mathbf{q} \cdot \mathbf{r}_{jl} ) \right], ``` where {math}`A` is the amplitude, {math}`\phi` is the phase,
Update `AsyncHTTPClient` to latest version `1.11.1` Version `1.11.1` of `AsyncHTTPClient` only support Swift 5.4+
"maintainer": "[email protected]", "compatibility": [ { - "version": "5.0", - "commit": "a72c5adce3986ff6b5092ae0464a8f2675087860" + "version": "5.4", + "commit": "794dc9d42720af97cedd395e8cd2add9173ffd9a" } ], "platforms": [
update download location for BLAST 2.2.26 for Travis CI Travis builds were failing because NCBI have removed the 2.2.26 obsolete executables from their FTP site. The Travis config file now points at VBI at Virginia Tech's mirror
@@ -19,7 +19,7 @@ script: # application dependencies: BLAST+, legacy BLAST, MUMMER before_install: - cd $HOME - - wget ftp://ftp.ncbi.nlm.nih.gov/blast/executables/legacy.NOTSUPPORTED/2.2.26/blast-2.2.26-x64-linux.tar.gz + - wget http://mirrors.vbi.vt.edu/mirrors/ftp.ncbi.nih.gov/blast/executables/blast+/2.2.26/blast-2.2.26-x64-linux.tar.gz - tar -zxvf blast-2.2.26-x64-linux.tar.gz - export PATH=$HOME/blast-2.2.26/bin:$PATH - cd $TRAVIS_BUILD_DIR
[drivers.SAFE.quicklook] generalized to allow unpacked + tar.gz scenes until now this method worked on zipped archives only
@@ -1290,15 +1290,14 @@ class SAFE(ID): if format != 'kmz': raise RuntimeError('currently only kmz is supported as format') kml_name = self.findfiles('map-overlay.kml')[0] - kml_membername = kml_name.replace(self.scene, '').strip(r'\/') - png = self.findfiles('quick-look.png')[0] - png_membername = png.replace(self.scene, '').strip(r'\/') - with zf.ZipFile(self.scene, 'r') as archive: + png_name = self.findfiles('quick-look.png')[0] with zf.ZipFile(outname, 'w') as out: - kml = archive.open(kml_membername).read().decode('utf-8') + with self.getFileObj(kml_name) as kml_in: + kml = kml_in.getvalue().decode('utf-8') kml = kml.replace('Sentinel-1 Map Overlay', self.outname_base()) out.writestr('doc.kml', data=kml) - out.writestr('quick-look.png', data=archive.open(png_membername).read()) + with self.getFileObj(png_name) as png_in: + out.writestr('quick-look.png', data=png_in.getvalue()) def scanMetadata(self): with self.getFileObj(self.findfiles('manifest.safe')[0]) as input:
adapted code for q0 off-sweetspot the changes regard only the functions handling the noise
@@ -684,7 +684,7 @@ def distort_amplitude(fitted_stepresponse_ty,amp,tlist_new,sim_step_new): def shift_due_to_fluxbias_q0(fluxlutman,amp_final,fluxbias_q0): if not fluxlutman.czd_double_sided(): - omega_0 = fluxlutman.calc_amp_to_freq(0,'01') + omega_0 = compute_sweetspot_frequency(fluxlutman.q_polycoeffs_freq_01_det(),fluxlutman.q_freq_01()) f_pulse = fluxlutman.calc_amp_to_freq(amp_final,'01') f_pulse = np.clip(f_pulse,a_min=None,a_max=omega_0) # necessary otherwise the sqrt below gives nan @@ -701,7 +701,7 @@ def shift_due_to_fluxbias_q0(fluxlutman,amp_final,fluxbias_q0): amp_B = amp_final[half_length:] - omega_0 = fluxlutman.calc_amp_to_freq(0,'01') + omega_0 = compute_sweetspot_frequency(fluxlutman.q_polycoeffs_freq_01_det(),fluxlutman.q_freq_01()) f_pulse_A = fluxlutman.calc_amp_to_freq(amp_A,'01') f_pulse_A = np.clip(f_pulse_A,a_min=None,a_max=omega_0) @@ -748,8 +748,8 @@ def return_jump_operators(noise_parameters_CZ, f_pulse_final, fluxlutman): # time-dependent jump operators on q0 if T2_q0_amplitude_dependent[0] != -1: - f_pulse_final = np.clip(f_pulse_final,a_min=None,a_max=fluxlutman.q_freq_01()) - sensitivity = calc_sensitivity(f_pulse_final,fluxlutman.q_freq_01()) + f_pulse_final = np.clip(f_pulse_final,a_min=None,a_max=compute_sweetspot_frequency(fluxlutman.q_polycoeffs_freq_01_det(),fluxlutman.q_freq_01())) + sensitivity = calc_sensitivity(f_pulse_final,compute_sweetspot_frequency(fluxlutman.q_polycoeffs_freq_01_det(),fluxlutman.q_freq_01())) for i in range(len(sensitivity)): if sensitivity[i] < 1e-1: sensitivity[i] = 1e-1 @@ -1258,6 +1258,8 @@ def sensitivity_to_fluxoffsets(U_final_vec,input_to_parallelize,t_final,w_q0,w_q print('infid_vec =',infid_vec.tolist()) +def compute_sweetspot_frequency(polycoeff,freq_at_0_amp): + return polycoeff[1]**2/2/polycoeff[0]-polycoeff[2]+freq_at_0_amp def repeated_CZs_decay_curves(U_superop_average,t_final,w_q0,w_q1,alpha_q0):
Add float types to load_items to support configuration parameters of float type Complement missing float types when loading configuration group parameters so that parameters of type float can be attached into the trove instance successfully Story: Task: 42508
@@ -166,6 +166,8 @@ class Configuration(object): item.configuration_value = bool(int(item.configuration_value)) elif rule.data_type == 'integer': item.configuration_value = int(item.configuration_value) + elif rule.data_type == 'float': + item.configuration_value = float(item.configuration_value) else: item.configuration_value = str(item.configuration_value) return config_items
Update README.md Add new install steps for conjure-up
@@ -22,10 +22,7 @@ This is a minimal Kubernetes cluster comprised of the following components and f Installation has been automated via [conjure-up](http://conjure-up.io/): - sudo apt-add-repository ppa:juju/stable - sudo apt-add-repository ppa:conjure-up/next - sudo apt update - sudo apt install conjure-up + sudo snap install conjure-up --classic conjure-up canonical-kubernetes Conjure will prompt you for deployment options (AWS, GCE, Azure, etc.) and credentials.
Update knowledge_graph.py The l in "bool" is lost.
@@ -27,7 +27,7 @@ class KnowledgeGraphDataset(DGLBuiltinDataset): ----------- name: str Name can be 'FB15k-237', 'FB15k' or 'wn18'. - reverse: boo + reverse: bool Whether add reverse edges. Default: True. raw_dir : str Raw file directory to download/contains the input data directory.