message
stringlengths
13
484
diff
stringlengths
38
4.63k
added duplicates() method to IPAddress and Prefix model managers. refactored condition on IPAddress and Prefix clean method to use new manager method.
@@ -262,6 +262,12 @@ class PrefixQuerySet(NullsFirstQuerySet): return queryset return filter(lambda p: p.depth <= limit, queryset) + def duplicates(self, prefix): + return self.filter( + vrf=prefix.vrf, + prefix=str(prefix) + ).exclude(pk=prefix.pk) + class Prefix(CreatedUpdatedModel, CustomFieldModel): """ @@ -299,7 +305,6 @@ class Prefix(CreatedUpdatedModel, CustomFieldModel): return reverse('ipam:prefix', args=[self.pk]) def clean(self): - # Disallow host masks if self.prefix: if self.prefix.version == 4 and self.prefix.prefixlen == 32: @@ -311,6 +316,16 @@ class Prefix(CreatedUpdatedModel, CustomFieldModel): 'prefix': "Cannot create host addresses (/128) as prefixes. Create an IPv6 address instead." }) + if ((not self.vrf and settings.ENFORCE_GLOBAL_UNIQUE) or (self.vrf and self.vrf.enforce_unique)): + dupes = Prefix.objects.duplicates(self) + if dupes: + raise ValidationError({ + 'prefix': "Duplicate prefix found in {}: {}".format( + "VRF {}".format(self.vrf) if self.vrf else "global table", + dupes.first(), + ) + }) + def save(self, *args, **kwargs): if self.prefix: # Clear host bits from prefix @@ -361,6 +376,12 @@ class IPAddressManager(models.Manager): qs = super(IPAddressManager, self).get_queryset() return qs.annotate(host=RawSQL('INET(HOST(ipam_ipaddress.address))', [])).order_by('family', 'host') + def duplicates(self, ip_obj): + return self.filter( + vrf=ip_obj.vrf, + address__net_host=str(ip_obj.address.ip) + ).exclude(pk=ip_obj.pk) + class IPAddress(CreatedUpdatedModel, CustomFieldModel): """ @@ -401,21 +422,22 @@ class IPAddress(CreatedUpdatedModel, CustomFieldModel): return reverse('ipam:ipaddress', args=[self.pk]) def clean(self): - - # Enforce unique IP space if applicable - if self.vrf and self.vrf.enforce_unique: - duplicate_ips = IPAddress.objects.filter(vrf=self.vrf, address__net_host=str(self.address.ip))\ - .exclude(pk=self.pk) - if duplicate_ips: + if ((not self.vrf and settings.ENFORCE_GLOBAL_UNIQUE) or (self.vrf and self.vrf.enforce_unique)): + dupes = IPAddress.objects.duplicates(self) + if dupes: raise ValidationError({ - 'address': "Duplicate IP address found in VRF {}: {}".format(self.vrf, duplicate_ips.first()) + 'address': "Duplicate IP address found in global table: {}".format(dupes.first()) }) - elif not self.vrf and settings.ENFORCE_GLOBAL_UNIQUE: - duplicate_ips = IPAddress.objects.filter(vrf=None, address__net_host=str(self.address.ip))\ - .exclude(pk=self.pk) - if duplicate_ips: + + # Enforce unique IP space if applicable + if ((not self.vrf and settings.ENFORCE_GLOBAL_UNIQUE) or (self.vrf and self.vrf.enforce_unique)): + dupes = IPAddress.objects.duplicates(self) + if dupes: raise ValidationError({ - 'address': "Duplicate IP address found in global table: {}".format(duplicate_ips.first()) + 'address': "Duplicate IP address found in {}: {}".format( + "VRF {}".format(self.vrf) if self.vrf else "global table", + dupes.first(), + ) }) def save(self, *args, **kwargs):
Update Florida.md geos
@@ -12,7 +12,7 @@ tags: protester, shove id: fl-fortlauderdale-1 -geolocation: +geolocation: 26.120612,-80.1446222 **Links** @@ -31,7 +31,7 @@ tags: less-lethal, protester, rubber-bullet, shoot id: fl-fortlauderdale-2 -geolocation: +geolocation: 26.120612,-80.1446222 **Links** @@ -48,7 +48,7 @@ tags: abuse-of-power, body-cam, incitement, less-lethal, rubber-bullet, shoot, t id: fl-fortlauderdale-3 -geolocation: +geolocation: 26.120612,-80.1446222 **Links** @@ -68,7 +68,7 @@ tags: arrest, gun, less-lethal, protester, shove, tackle, tear-gas, throw id: fl-jacksonville-1 -geolocation: +geolocation: 30.3252674,-81.6556836 **Links** @@ -87,7 +87,7 @@ tags: arrest, protester, tackle id: fl-miami-1 -geolocation: +geolocation: 25.7796975,-80.1887776 **Links** @@ -106,7 +106,7 @@ tags: less-lethal, protester, tear-gas id: fl-miami-2 -geolocation: +geolocation: 25.7854266,-80.1923798 **Links** @@ -118,13 +118,13 @@ geolocation: Footage shows police arresting a woman. A man who walked nearby with his hands up is thrown to the ground by two officers and arrested. -Additional footage shows an officer grabbing the breast of the woman arrested. It should be noted that police use an "open hand" frisking technique to check along the breasts during an arrest. However the officer does not appear to be using an open hand and appears to be grabbing the woman, rather than frisking her. While having a male officer frisk a female protester without warning is another subject of controversy, this incident appears to even be outside the bounds of "proper conduct" according to police guidelines. +Additional footage shows an officer grabbing the breast of the woman arrested. It should be noted that police use an "open hand" frisking technique to check along the breasts during an arrest. However the officer does not appear to be using an open hand and appears to be grabbing the woman, rather than frisking her. While having a male officer frisk a female protester without warning is a point of controversy, this incident appears to be outside police guidelines even discounting that fact. -tags: abuse-of-power, arrest, inhumane-treatment, protester, shove +tags: abuse-of-power, arrest, inhumane-treatment, protester, shove, sexual-assault id: fl-miami-3 -geolocation: +geolocation: 25.7772583,-80.1879073 **Links** @@ -143,7 +143,7 @@ tags: abuse-of-power, arrest, protester, shove id: fl-miami-4 -geolocation: +geolocation: 25.774139,-80.1872472 **Links** @@ -162,7 +162,7 @@ tags: less-lethal, protester, stun-grenade, tear-gas id: fl-orlando-7 -geolocation: +geolocation: 25.774139,-80.1872472 **Links**
Fix interrupt handling Remove shield, switch gather to wait, queue restoration now happens in try/except in _get_query_results.
@@ -233,13 +233,12 @@ class SqlValidator(Validator): async def shutdown(self, signal, loop): logger.info("\n\n" + "Please wait, asking Looker to cancel any running queries") - logger.debug("Cleaning up Spectacles async tasks.") + logger.debug("Cleaning up async tasks.") tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()] for task in tasks: task.cancel() - await asyncio.gather(*tasks) - loop.stop() - logger.debug("Spectacles async tasks terminated.") + await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED) + # Nothing executes beyond this point because of CancelledErrors async def _query(self, mode: str = "batch") -> List[SqlError]: session = aiohttp.ClientSession( @@ -341,7 +340,7 @@ class SqlValidator(Validator): self, session: aiohttp.ClientSession ) -> List[SqlError]: logger.debug("%d queries running", self.running_query_tasks.qsize()) - + try: # Empty the queue (up to 250) to get all running query tasks query_task_ids: List[str] = [] while not self.running_query_tasks.empty() and len(query_task_ids) <= 250: @@ -361,8 +360,10 @@ class SqlValidator(Validator): pending_task_ids.append(query_task_id) # Put the running query tasks back in the queue await self.running_query_tasks.put(query_task_id) + query_task_ids.remove(query_task_id) continue elif query_status in ("complete", "error"): + query_task_ids.remove(query_task_id) # We can release a query slot for each completed query self.query_slots.release() lookml_object = self.query_tasks[query_task_id] @@ -389,6 +390,15 @@ class SqlValidator(Validator): f'Unexpected query result status "{query_status}" ' "returned by the Looker API" ) + except asyncio.CancelledError: + logger.debug( + "Cancelled result fetching, putting " + f"{self.running_query_tasks.qsize()} query task IDs back in the queue" + ) + for query_task_id in query_task_ids: + await self.running_query_tasks.put(query_task_id) + logger.debug("Restored query task IDs to queue") + raise return errors @@ -401,7 +411,7 @@ class SqlValidator(Validator): or not self.running_query_tasks.empty() ): if not self.running_query_tasks.empty(): - result = await asyncio.shield(self._get_query_results(session)) + result = await self._get_query_results(session) results.extend(result) await asyncio.sleep(0.5)
Add libmkl_vml_def.so * Add libmkl_vml_def.so Needed by pytorch for CPU model execution * Review edits
@@ -559,6 +559,7 @@ COPY --from=tritonserver_pytorch /opt/conda/lib/libmkl_intel_lp64.so /opt/triton COPY --from=tritonserver_pytorch /opt/conda/lib/libmkl_intel_thread.so /opt/tritonserver/backends/pytorch/ COPY --from=tritonserver_pytorch /opt/conda/lib/libmkl_def.so /opt/tritonserver/backends/pytorch/ COPY --from=tritonserver_pytorch /opt/conda/lib/libmkl_avx2.so /opt/tritonserver/backends/pytorch/ +COPY --from=tritonserver_pytorch /opt/conda/lib/libmkl_vml_def.so /opt/tritonserver/backends/pytorch/ COPY --from=tritonserver_pytorch /opt/conda/lib/libiomp5.so /opt/tritonserver/backends/pytorch/ COPY --from=tritonserver_pytorch /opt/conda/lib/python3.8/site-packages/torch/include \ /opt/tritonserver/include/torch
Add smoke test for preprocessing. The utility functions that this is using are all tested separately, so a stronger test would be kind of redundant.
@@ -5,7 +5,7 @@ import networkx as nx import pytest from spektral.layers import CensNetConv -from core import batch_size, F, S +from core import batch_size, F, S, A NODE_CHANNELS = 8 @@ -183,3 +183,11 @@ def test_get_config_round_trip(): # The new layer should be the same. assert new_layer.node_channels == layer.node_channels assert new_layer.edge_channels == layer.edge_channels + + +def test_preprocess_smoke(): + """ + Tests that the preprocessing functionality does not crash. + """ + # Act. + node_laplacian, edge_laplacian, incidence = CensNetConv.preprocess(A)
Enhancement: better formating the initial text # On branch pretty # Changes to be committed: # modified: core/commands/commit.py #
@@ -155,7 +155,7 @@ class GsCommitInitializeViewCommand(TextCommand, GitCommand): if has_prepare_commit_msg_hook and os.path.exists(commit_editmsg_path): with util.file.safe_open(commit_editmsg_path, "r") as f: - initial_text = f.read() + help_text + initial_text = "\n" + f.read().rstrip() + help_text elif option_amend: last_commit_message = self.git("log", "-1", "--pretty=%B").strip() initial_text = last_commit_message + help_text
share_decoder_embedding conflict with copy_generator throw error: CopyGenerator dose not support index
@@ -565,11 +565,12 @@ def make_base_model(opt, model_opt, fields, cuda, checkpoint=None): generator = nn.Sequential( nn.Linear(model_opt.rnn_size, len(fields["tgt"].vocab)), nn.LogSoftmax()) + if model_opt.share_decoder_embeddings: + generator[0].weight = decoder.embeddings.word_lut.weight else: generator = onmt.modules.CopyGenerator(model_opt, fields["src"].vocab, fields["tgt"].vocab) - if model_opt.share_decoder_embeddings: - generator[0].weight = decoder.embeddings.word_lut.weight + if checkpoint is not None: print('Loading model')
Made the change more elegant As suggested by This is of course more elegant.
@@ -101,9 +101,7 @@ def process_options(options): # if there is a config variable with VERISIGN_PRODUCT_<upper(authority.name)> take the value as Cert product-type # else default to "Server", to be compatoible with former versions authority = options.get("authority").name.upper() - product_type = current_app.config.get("VERISIGN_PRODUCT_{0}".format(authority)) - if product_type is None: - product_type = "Server" + product_type = current_app.config.get("VERISIGN_PRODUCT_{0}".format(authority), "Server") data = { "challenge": get_psuedo_random_string(), "serverType": "Apache",
generate_family_file.py: Add __future__ import line to generated family file Also fix other minor stylistic issues detected by flake8.
@@ -141,19 +141,22 @@ class FamilyFileGenerator(object): f.write(""" # -*- coding: utf-8 -*- \"\"\" -This family file was auto-generated by $Id$ +This family file was auto-generated by generate_family_file.py script. + Configuration parameters: url = %(url)s name = %(name)s Please do not commit this to the Git repository! \"\"\" +from __future__ import absolute_import, unicode_literals from pywikibot import family from pywikibot.tools import deprecated -class Family(family.Family): +class Family(family.Family): # noqa: D101 + name = '%(name)s' langs = { """.lstrip() % {'url': self.base_url, 'name': self.name})
Fix issue # 366 Set the right texture for rendering, otherwise a previously set texture is overwritten.
@@ -450,6 +450,7 @@ class GLViewWidget(QtOpenGL.QGLWidget): glfbo.glFramebufferTexture2D(glfbo.GL_FRAMEBUFFER, glfbo.GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0) self.paintGL(region=(x, h-y-h2, w2, h2), viewport=(0, 0, w2, h2)) # only render sub-region + glBindTexture(GL_TEXTURE_2D, tex) # fixes issue #366 ## read texture back to array data = glGetTexImage(GL_TEXTURE_2D, 0, format, type)
TST: Add a test for monotonic basin-hopping (T=0) (MBH always rejects steps that increase energy.)
@@ -301,6 +301,13 @@ class TestBasinHopping(object): niter=10, callback=callback2, seed=10) assert_equal(np.array(f_1), np.array(f_2)) + def test_monotonic_basin_hopping(self): + # test 1d minimizations with gradient and T=0 + i = 0 + res = basinhopping(func1d, self.x0[i], minimizer_kwargs=self.kwargs, + niter=self.niter, disp=self.disp, T=0) + assert_almost_equal(res.x, self.sol[i], self.tol) + class Test_Storage(object): def setup_method(self):
[query/compiler] Update dependencies These upgrades are simple and do not represent major version upgrades.
@@ -161,7 +161,7 @@ dependencies { exclude group: 'com.fasterxml.jackson.core' } - bundled 'org.lz4:lz4-java:1.4.0' + bundled 'org.lz4:lz4-java:1.8.0' // Breeze 1.0 has a known bug (https://github.com/scalanlp/breeze/issues/772) unbundled 'org.scalanlp:breeze-natives_' + scalaMajorVersion + ':' + breezeVersion() @@ -170,7 +170,7 @@ dependencies { } bundled 'com.github.fommil.netlib:all:1.1.2' - bundled('com.github.samtools:htsjdk:2.21.0') { + bundled('com.github.samtools:htsjdk:2.24.1') { transitive = false } @@ -200,8 +200,8 @@ dependencies { bundled 'com.google.cloud:google-cloud-storage:1.106.0' - bundled 'org.apache.httpcomponents:httpcore:4.4.13' - bundled('org.apache.httpcomponents:httpclient:4.5.12') { + bundled 'org.apache.httpcomponents:httpcore:4.4.14' + bundled('org.apache.httpcomponents:httpclient:4.5.13') { transitive = false } @@ -225,8 +225,8 @@ dependencies { } bundled 'com.indeed:lsmtree-core:1.0.7' - bundled 'com.indeed:util-serialization:1.0.31' - bundled 'com.indeed:util-mmap:1.0.31' + bundled 'com.indeed:util-serialization:1.0.36' + bundled 'com.indeed:util-mmap:1.0.36' bundled group: 'org.freemarker', name: 'freemarker', version: '2.3.14' bundled 'com.kohlschutter.junixsocket:junixsocket-core:2.3.2'
Add missing engine f/w for CAR.RAV4H_TSS2 Togo#7636 [also fixed some alpha-sorting]
@@ -993,8 +993,9 @@ FW_VERSIONS = { (Ecu.engine, 0x700, None): [ b'\x01896634A15000\x00\x00\x00\x00', b'\x018966342M5000\x00\x00\x00\x00', - b'\x018966342X6000\x00\x00\x00\x00', b'\x018966342W8000\x00\x00\x00\x00', + b'\0018966342X5000\x00\x00\x00\x00', + b'\x018966342X6000\x00\x00\x00\x00', b'\x028966342W4001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00', b'\x02896634A14001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00', b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
tests: Test for PMs in test_export_realm_with_member_consent. Sending PM from a hamlet(consented) to othello is a case of sending message from a consented user to a non consented user. This result in the generation of more than one message files during realm export. To handle this case _export_realm is updated.
@@ -239,14 +239,21 @@ class ImportExportTest(ZulipTestCase): exportable_user_ids=exportable_user_ids, consent_message_id=consent_message_id, ) - # TODO: Process the second partial file, which can be created - # for certain edge cases. export_usermessages_batch( input_path=os.path.join(output_dir, 'messages-000001.json.partial'), output_path=os.path.join(output_dir, 'messages-000001.json'), consent_message_id=consent_message_id, ) + try: + export_usermessages_batch( + input_path=os.path.join(output_dir, 'messages-000002.json.partial'), + output_path=os.path.join(output_dir, 'messages-000002.json'), + consent_message_id=consent_message_id, + ) + except FileNotFoundError: + pass + def read_file(fn: str) -> Any: full_fn = os.path.join(output_dir, fn) with open(full_fn) as f: @@ -256,6 +263,12 @@ class ImportExportTest(ZulipTestCase): result['realm'] = read_file('realm.json') result['attachment'] = read_file('attachment.json') result['message'] = read_file('messages-000001.json') + try: + message = read_file('messages-000002.json') + result["message"]["zerver_usermessage"].extend(message["zerver_usermessage"]) + result["message"]["zerver_message"].extend(message["zerver_message"]) + except FileNotFoundError: + pass result['uploads_dir'] = os.path.join(output_dir, 'uploads') result['uploads_dir_records'] = read_file(os.path.join('uploads', 'records.json')) result['emoji_dir'] = os.path.join(output_dir, 'emoji') @@ -483,6 +496,12 @@ class ImportExportTest(ZulipTestCase): self.example_email("ZOE"), self.example_email("othello")]) + # Create PMs + pm_a_msg_id = self.send_personal_message(self.example_email("AARON"), self.example_email("othello")) + pm_b_msg_id = self.send_personal_message(self.example_email("cordelia"), self.example_email("iago")) + pm_c_msg_id = self.send_personal_message(self.example_email("hamlet"), self.example_email("othello")) + pm_d_msg_id = self.send_personal_message(self.example_email("iago"), self.example_email("hamlet")) + # Send message advertising export and make users react self.send_stream_message(self.example_email("othello"), "Verona", topic_name="Export", @@ -563,6 +582,11 @@ class ImportExportTest(ZulipTestCase): self.assertNotIn(stream_c_message_id, exported_msg_ids) self.assertNotIn(huddle_c_message_id, exported_msg_ids) + self.assertNotIn(pm_a_msg_id, exported_msg_ids) + self.assertIn(pm_b_msg_id, exported_msg_ids) + self.assertIn(pm_c_msg_id, exported_msg_ids) + self.assertIn(pm_d_msg_id, exported_msg_ids) + def test_export_single_user(self) -> None: output_dir = self._make_output_dir() cordelia = self.example_user('cordelia')
Fix docstring typo `presizing` --> `presize` to conform to the actual function signature
@@ -45,7 +45,7 @@ def aug_tfms( size: The final size of the image. If an `int` is given, the maximum size of the image is rescaled, maintaing aspect ratio. If a `tuple` is given, the image is rescaled to have that exact size (width, height). - presizing: Rescale the image before applying other transfroms. If `None` this + presize: Rescale the image before applying other transfroms. If `None` this transform is not applied. First introduced by fastai,this technique is explained in their book in [this](https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb) chapter (tip: search for "Presizing").
changed time format fixed update response
@@ -101,17 +101,20 @@ script: return incident + def isoformat(date): + return date.isoformat()[:-3] + 'Z' + # FUNCTIONS def fetchIncidents(): incidents = [] t = datetime.datetime.utcnow() - now = str(t) + now = isoformat(t) lastRun = demisto.getLastRun() and demisto.getLastRun()['time'] if len(lastRun) == 0: t = t - timedelta(minutes=10) - lastRun = t + lastRun = isoformat(t) events = getRecentList(lastRun) for event in events: @@ -175,7 +178,10 @@ script: assignToPerson = dictWorkflowQuery['AssignedPerson'] res = updateIncidentRequest(num, status, resolution, ref, severity, assignToOrg, assignToPerson, comments) - if res.text == "true": + resString = xml.etree.ElementTree.tostring(res) + dictRes = xmltodict.parse(resString) + res = dictRes['ns0:Envelope']['ns0:Body']['ns1:UpdateIncidentWorkflowResponse']['ns1:UpdateIncidentWorkflowResult'] + if res == "true": msg = "Incident updated successfully" else: msg = "Incident update failed"
Update README.md fixed naming path python train_with_wav2vec.py hparams/train_with_wav2vec.yaml to python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml
@@ -34,7 +34,7 @@ The recipe can also be used using a pretrained Wav2Vec as an encoder ``` cd direct -python train_with_wav2vec.py hparams/train_with_wav2vec.yaml +python train_with_wav2vec2.py hparams/train_with_wav2vec2.yaml ``` # Performance summary
Update apt_domestickitten.txt Minor update for Reference section
# See the file 'LICENSE' for copying permission # Reference: https://research.checkpoint.com/domestic-kitten-an-iranian-surveillance-operation/ +# Reference: https://twitter.com/malwrhunterteam/status/1340344596698677250 +# Reference: https://www.virustotal.com/gui/file/bd7779e6100e07b3eae67bfcdc53f1f08468651240229e284cca60e2b953496b/detection http://162.248.247.172 http://190.2.144.140
sql: update memory control config Via:
@@ -18,7 +18,13 @@ oom-action = "log" ## Configure the memory quota of a query -You can control the memory quota of a query using the following session variables. Generally, you only need to configure `tidb_mem_quota_query`. Other variables are used for advanced configuration which most users do not need to care about. +In the configuration file, you can set the default Memory Quota for each Query. The following example sets it to 32GB: + +``` +mem-quota-query = 34359738368 +``` + +In addition, you can control the memory quota of a query using the following session variables. Generally, you only need to configure `tidb_mem_quota_query`. Other variables are used for advanced configuration which most users do not need to care about. | Variable Name | Description | Unit | Default Value | |-----------------------------------|---------------------------------------------------|-------|-----------|
Update changelog.rst fix
@@ -60,9 +60,7 @@ Mobjects, Scenes, and Animations #. Add customizable left and right bracket for :code:`Matrix` mobject and :code:`set_row_colors` method for matrix mobject #. Add :code:`AddTeXLetterByLetter` animation #. Enhanced GraphScene -```suggestion -Mobjects, Scenes, and Animations -``` + #. You can now add arrow tips to axes #. extend axes a bit at the start and/or end #. have invisible axes
DOC: fixed dostring warnings in registry Fixed the docstring warnings and improved some of the block comment formatting.
@@ -26,6 +26,7 @@ registered in pysat_dir/user_modules.txt and is also listed in :: pysat.user_modules + which is stored as a dict of dicts keyed by platform and name. Once registered, subsequent calls to Instrument may use the platform @@ -187,23 +188,24 @@ def register(module_names, overwrite=False): # get platform and name identifiers from imported module platform = inst_module.platform name = inst_module.name - # only register module if not already present - # multiple names allowed for a single platform + + # Only register module if not already present. Multiple names are + # allowed for a single platform if platform not in pysat.user_modules: # setup `of dict` part of dict of dicts pysat.user_modules[platform] = {} - # only register name if not present under platform + + # Only register name if it is not present under platform if name not in pysat.user_modules[platform]: + # Add to current user modules structure and store it to disk logger.info('Registering user module {}'.format(module_name)) - # add to current user modules structure pysat.user_modules[platform][name] = module_name - # store user modules to disk store() else: - # platform/name combination already registered - # check if this is a new package or just a redundant assignment + # Platform/name combination already registered. Check to see if + # this is a new package or just a redundant assignment if module_name != pysat.user_modules[platform][name]: - # new assignment, check for overwrite flag + # New assignment, check for overwrite flag if not overwrite: estr = ' '.join(('An instrument has already been ', 'registered for platform:', platform, @@ -213,9 +215,8 @@ def register(module_names, overwrite=False): 'must be enabled.')) raise ValueError(estr) else: - # overwrite with new module information + # Overwrite the old file with new module information pysat.user_modules[platform][name] = module_name - # store store() return @@ -224,9 +225,9 @@ def register(module_names, overwrite=False): def register_by_module(module): """Register all sub-modules attached to input module - Enables instantiation of a third-party Instrument - module using + Enables instantiation of a third-party Instrument module using :: + inst = pysat.Instrument(platform, name) Parameters @@ -242,8 +243,8 @@ def register_by_module(module): Note ---- - Gets a list of sub-modules by using the __all__ attribute, - defined in the module's __init__.py + Gets a list of sub-modules by using the `__all__` attribute, + defined in the module's `__init__.py` Examples -------- @@ -253,16 +254,15 @@ def register_by_module(module): import pysatModels pysat.utils.registry.register_by_module(pysatModels.models) - import pysatSpaceWeather - pysat.utils.registry.register_by_module(pysatSpaceWeather.instruments) - """ - # get a list of all user specified modules attached to module + # Get a list of all user specified modules attached to module module_names = module.__all__ - # add in package preamble + + # Add in package preamble module_names = [module.__name__ + '.' + mod for mod in module_names] - # register all of them + + # Register all of the sub-modules register(module_names) return
Adding central packages to release All Python packages, even nested ones, must be explicitly listed to be part of the package.
@@ -15,7 +15,14 @@ with open("README.md", "r") as handle: setup( name="heat", - packages=["heat", "heat.core"], # , "heat.core.cluster", "heat.core.regression"], + packages=[ + "heat", + "heat.core", + "heat.core.cluster", + "heat.core.regression", + "heat.core.regression.lasso", + "heat.utils", + ], data_files=["README.md", "LICENSE"], version=version.__version__, description="A framework for high performance data analytics and machine learning.",
normalize step + fft `entropy_spectral` fft seems to be default in most papers implementing SpEn
@@ -9,6 +9,8 @@ def entropy_spectral(signal, sampling_rate=1000, **kwargs): Spectral entropy (SE or SpEn) treats the signal's normalized power distribution in the frequency domain as a probability distribution, and calculates the Shannon entropy of it. + A power spectrum with one or two dominant frequencies possesses a relatively low spectral entropy, + and a broadbanded spectrum has a higher value. Parameters ---------- @@ -39,9 +41,12 @@ def entropy_spectral(signal, sampling_rate=1000, **kwargs): >>> # Spectral Entropy >>> SpEn, info = nk.entropy_spectral(signal, sampling_rate=200) >>> SpEn #doctest: +SKIP - >>> - >>> SpEn, info = nk.entropy_spectral(signal, sampling_rate=200, method='fft') - >>> SpEn #doctest: +SKIP + + References + ---------- + - Crepeau, J. C., & Isaacson, L. K. (1991). Spectral Entropy Measurements of Coherent Structures in an + Evolving Shear Layer. Journal of Non-Equilibrium Thermodynamics, 16(2). doi:10.1515/jnet.1991.16.2.137 + """ # Sanity checks @@ -51,7 +56,8 @@ def entropy_spectral(signal, sampling_rate=1000, **kwargs): ) # Power-spectrum density (PSD) - psd = signal_psd(signal, sampling_rate=sampling_rate, **kwargs) + psd = nk.signal_psd(signal, sampling_rate=sampling_rate, method='fft') + psd["Power"] /= np.sum(psd["Power"]) # area under normalized spectrum should sum to 1 (np.sum(psd["Power"])) psd = psd[psd["Power"] > 0] # Compute Shannon entropy
Add kwargs to CHP Tech class super() This wasn't needed until new Tech class attributes for export/curtail were introduced. This fixed the assignment of the can_[export/curtail] inputs for CHP.
@@ -331,7 +331,7 @@ class CHP(Tech): def __init__(self, dfm, run_uuid, existing_boiler_production_type_steam_or_hw, oa_temp_degF, site_elevation_ft, outage_start_time_step=None, outage_end_time_step=None, time_steps_per_hour=1, year=None, **kwargs): - super(CHP, self).__init__() + super(CHP, self).__init__(**kwargs) self.prime_mover = kwargs.get('prime_mover') self.size_class = kwargs.get('size_class')
Fix typo in exec-scheduler log path It is supposed to be `-`, not `_`, eg.
@@ -21,7 +21,7 @@ DEFAULT_INTERVAL = 60 SCHEDULER_LOCK_BASE = 10000 # so we won't conflict with usage collector, which uses lock numbers 1 and 2 -DEFAULT_LOG_PATH = '/var/log/cloudify/execution_scheduler/scheduler.log' +DEFAULT_LOG_PATH = '/var/log/cloudify/execution-scheduler/scheduler.log' CONFIG_PATH = '/opt/manager/cloudify-rest.conf' REST_SECURITY_PATH = '/opt/manager/rest-security.conf'
ExpressionUI : Add popup context menu and associated signal. This will allow different engines to modify the context menu.
@@ -51,7 +51,7 @@ Gaffer.Metadata.registerNode( scripted expressions. """, - "layout:customWidget:Expression:widgetType", "GafferUI.ExpressionUI._ExpressionWidget", + "layout:customWidget:Expression:widgetType", "GafferUI.ExpressionUI.ExpressionWidget", plugs = { @@ -140,10 +140,10 @@ def __popupMenu( menuDefinition, plugValueWidget ) : __popupMenuConnection = GafferUI.PlugValueWidget.popupMenuSignal().connect( __popupMenu ) -# _ExpressionPlugValueWidget +# ExpressionWidget ########################################################################## -class _ExpressionWidget( GafferUI.Widget ) : +class ExpressionWidget( GafferUI.Widget ) : def __init__( self, node, **kw ) : @@ -166,6 +166,7 @@ class _ExpressionWidget( GafferUI.Widget ) : self.__activatedConnection = self.__textWidget.activatedSignal().connect( Gaffer.WeakMethod( self.__activated ) ) self.__editingFinishedConnection = self.__textWidget.editingFinishedSignal().connect( Gaffer.WeakMethod( self.__editingFinished ) ) self.__dropTextConnection = self.__textWidget.dropTextSignal().connect( Gaffer.WeakMethod( self.__dropText ) ) + self.__contextMenuConnection = self.__textWidget.contextMenuSignal().connect( Gaffer.WeakMethod( self.__expressionContextMenu ) ) self.__messageWidget = GafferUI.MessageWidget() @@ -174,6 +175,48 @@ class _ExpressionWidget( GafferUI.Widget ) : self.__update() + def node( self ) : + + return self.__node + + def textWidget( self ) : + + return self.__textWidget + + __expressionContextMenuSignal = Gaffer.Signal2() + ## This signal is emitted whenever a popup menu + # for an ExpressionWidget is about to be shown. + # This provides an opportunity to customise the + # menu from external code. The signature for + # slots is ( menuDefinition, widget ), and slots + # should just modify the menu definition in place. + @classmethod + def expressionContextMenuSignal( cls ) : + + return cls.__expressionContextMenuSignal + + def __expressionContextMenuDefinition( self ) : + + menuDefinition = IECore.MenuDefinition() + + self.expressionContextMenuSignal()( menuDefinition, self ) + + return menuDefinition + + def __expressionContextMenu( self, *unused ) : + + menuDefinition = self.__expressionContextMenuDefinition() + if not len( menuDefinition.items() ) : + return False + + title = self.__node.relativeName( self.__node.scriptNode() ) + title = ".".join( [ IECore.CamelCase.join( IECore.CamelCase.split( x ) ) for x in title.split( "." ) ] ) + + self.____expressionContextMenu = GafferUI.Menu( menuDefinition, title = title ) + self.____expressionContextMenu.popup() + + return True + def __update( self ) : expression = self.__node.getExpression()
Increase hardcoded encapsulate follow-on resources This should fix and
@@ -1571,7 +1571,8 @@ class EncapsulatedJob(Job): self.encapsulatedJob = job Job.addChild(self, job) # Use small resource requirements for dummy Job instance. - self.encapsulatedFollowOn = Job(disk='1M', memory='32M', cores=0.1) + # But not too small, or the job won't have enough resources to safely start up Toil. + self.encapsulatedFollowOn = Job(disk='100M', memory='512M', cores=0.1) Job.addFollowOn(self, self.encapsulatedFollowOn) def addChild(self, childJob):
opts-separator include an option to define separator for situations where a specific character may be needed
@@ -115,6 +115,7 @@ class ReadSettings: 'post-process': False, 'wait-post-process': False, 'detailed-progress': False, + 'opts-separator': ',', 'preopts': '', 'postopts': '', }, @@ -619,8 +620,9 @@ class ReadSettings: self.postprocess = config.getboolean(section, 'post-process') self.waitpostprocess = config.getboolean(section, 'wait-post-process') self.detailedprogress = config.getboolean(section, 'detailed-progress') - self.preopts = config.getlist(section, "preopts") - self.postopts = config.getlist(section, "postopts") + self.opts_sep = config.get(section, "opts-separator") + self.preopts = config.getlist(section, "preopts", separator=self.opts_sep) + self.postopts = config.getlist(section, "postopts", separator=self.opts_sep) if self.force_convert: self.process_same_extensions = True
Put back bq_helper in editable mode and install cleverhans from pypi. Partial rollback of because
@@ -304,7 +304,7 @@ RUN pip install fancyimpute && \ pip install nibabel && \ pip install mlens && \ pip install scikit-multilearn && \ - pip install git+http://github.com/tensorflow/cleverhans.git#egg=cleverhans && \ + pip install cleverhans && \ pip install leven && \ pip install catboost && \ #cd /usr/local/src && git clone --depth=1 https://github.com/AxeldeRomblay/MLBox && cd MLBox/python-package && python setup.py install && \ @@ -457,7 +457,7 @@ RUN pip install flashtext && \ pip install pympler && \ pip install s3fs && \ pip install featuretools && \ - pip install git+https://github.com/SohierDane/BigQuery_Helper#egg=bq_helper && \ + pip install -e git+https://github.com/SohierDane/BigQuery_Helper#egg=bq_helper && \ pip install hpsklearn && \ pip install keras-tqdm && \ pip install git+https://github.com/Kaggle/learntools && \
Fixed broken link in developer center When the developer center is processed by grunt it adds a new level breaking this images.
@@ -147,6 +147,6 @@ from cartoframes import Layer cc.map(layers=Layer('brooklyn_poverty_w_rates', color='poverty_per_pop')) ``` -![](../img/guides/01-brooklyn_poverty.png) +![](../../img/guides/01-brooklyn_poverty.png) Note: Legends are not yet implemented for stable releases of cartoframes. See [this pull request](https://github.com/CartoDB/cartoframes/pull/184) for more information.
Update domain.txt Not as ```generic.txt``` or ```phishing.txt```, just as an actor for ```scarcruft```.
@@ -4962,3 +4962,8 @@ iok.la # Reference: https://www.ptsecurity.com/ru-ru/research/analytics/operation-taskmasters-2019/ (Russian) 5wya.com + +# Reference: https://securelist.com/scarcruft-continues-to-evolve-introduces-bluetooth-harvester/90729/ +# Reference: https://www.virustotal.com/gui/domain/nitesbr1.org/details + +nitesbr1.org
Add empty codeActionKind entry RLS now requires a codeActionKind entry. An empty valueSet will satisfy the requirement for now.
@@ -136,7 +136,11 @@ def get_initialize_params(project_path: str, config: ClientConfig): "rangeFormatting": {}, "definition": {}, "codeAction": { - "codeActionLiteralSupport": {} + "codeActionLiteralSupport": { + "codeActionKind": { + "valueSet": [] + } + } }, "rename": {} },
Update Oregon.md // Oregon.md Line: 3996, Third word, "sate", is corrected to "said", as it was misspelled.
@@ -3993,7 +3993,7 @@ id: or-portland-256 ### Bystander shot in the head | August 10th -A man sate he was walking home shows injuries to his head and arms, allegedly received from police. He states that he was struck with batons and shot with rubber bullets. Protest medics administer first aid to the man. +A man said he was walking home shows injuries to his head and arms, allegedly received from police. He states that he was struck with batons and shot with rubber bullets. Protest medics administer first aid to the man. tags: bystander, shoot, rubber-bullet, baton
Ticket 247: Fixing Number and layout * Ticket 247: Fixing Number and layout The card_face object doesn't support the same fields as the card object so I adjusted the code to pull the fields from the card object where not available by the card_face * tox run
@@ -426,15 +426,17 @@ def build_mtgjson_tokens( "text": face_data.get("oracle_text"), "power": face_data.get("power"), "colors": face_data.get("colors"), - "colorIdentity": face_data.get("color_identity"), + "colorIdentity": sf_token.get("color_identity"), "toughness": face_data.get("toughness"), "loyalty": face_data.get("loyalty"), - "watermark": face_data.get("watermark"), + "watermark": sf_token.get("watermark"), "scryfallId": sf_token["id"], + "layout": "double_faced_token", + "side": chr(97 + sf_card_face), "borderColor": face_data.get("border_color"), "artist": face_data.get("artist"), - "isOnlineOnly": face_data.get("digital"), - "number": face_data.get("collector_number"), + "isOnlineOnly": sf_token.get("digital"), + "number": sf_token.get("collector_number"), } else: token_card = { @@ -446,6 +448,7 @@ def build_mtgjson_tokens( "colorIdentity": sf_token.get("color_identity"), "toughness": sf_token.get("toughness"), "loyalty": sf_token.get("loyalty"), + "layout": "normal", "watermark": sf_token.get("watermark"), "scryfallId": sf_token["id"], "borderColor": sf_token.get("border_color"),
Fix(icsi-recipe): Downloaded zip file wasn't found + needed to adapt extract commands to new dir structure
@@ -226,11 +226,13 @@ def download_icsi( ) # Unzip annotations zip file - with zipfile.ZipFile("ICSI_original_transcripts.zip") as z: - z.extractall() # unzips to a dir called 'transcripts' + with zipfile.ZipFile(target_dir / "ICSI_original_transcripts.zip") as z: + # Unzips transcripts to <target_dir>/'transcripts' + # zip file also contains some documentation which will be unzipped to <target_dir> + z.extractall(target_dir) # If custom dir is passed, rename 'transcripts' dir accordingly if transcripts_dir: - Path("transcripts").rename(transcripts_dir) + Path(target_dir / "transcripts").rename(transcripts_dir) def parse_icsi_annotations(
Skip tests that depend on docker on windows Test Plan: unit Reviewers: max
import sys +import pytest + +from dagster import seven from dagster.api.list_repositories import ( sync_list_repositories, sync_list_repositories_ephemeral_grpc, @@ -274,7 +277,7 @@ def test_sync_list_python_module_attribute_grpc(): repository_code_pointer_dict['hello_world_repository'].fn_name == 'hello_world_repository' ) - [email protected](seven.IS_WINDOWS, reason='Depends on Docker, so skip running in Windows') def test_sync_list_container_grpc(docker_grpc_client): response = sync_list_repositories_grpc(docker_grpc_client)
Fix typo "properlyh" Summary: Pull Request resolved:
@@ -119,7 +119,7 @@ def optimize_inference_for_dag(net, input_blobs, namescope=""): ops = list(net.Proto().op) op_indices = [index for index, op in enumerate(net.Proto().op)] - # Sanity check: check that all external inputs are properlyh accounted + # Sanity check: check that all external inputs are properly accounted # and that no gradient ops are included in 'net' for op in ops: for b in op.input:
Update pythonpublish.yml Goes from test to prod
# This workflows will upload a Python Package using Twine when a release is created # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries -name: publish-test +name: publish on: release: @@ -24,8 +24,8 @@ jobs: pip install setuptools wheel twine - name: Build and publish env: - TWINE_USERNAME: ${{ secrets.PYPITEST_USERNAME }} - TWINE_PASSWORD: ${{ secrets.PYPITEST_PASSWORD }} + TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | python setup.py sdist bdist_wheel bdist_egg - twine upload --repository-url https://test.pypi.org/legacy/ dist/* + twine upload dist/*
move memory_limit into ETermBase.get_paths(), update .make_function() when using a path, memory limit is ignored
@@ -405,16 +405,21 @@ class ETermBase(Struct): output('build expression: {} s'.format(timer.stop())) def get_paths(self, expressions, operands): + memory_limit = self.backend_kwargs.get('memory_limit') + if ('numpy' in self.backend) or self.backend.startswith('dask'): + optimize = (self.optimize if memory_limit is None + else (self.optimize, memory_limit)) paths, path_infos = zip(*[nm.einsum_path( expressions[ia], *operands[ia], - optimize=self.optimize, + optimize=optimize, ) for ia in range(len(operands))]) elif 'opt_einsum' in self.backend: paths, path_infos = zip(*[oe.contract_path( expressions[ia], *operands[ia], optimize=self.optimize, + memory_limit=memory_limit, ) for ia in range(len(operands))]) else: @@ -522,7 +527,6 @@ class ETermBase(Struct): da_operands = [] c_chunk_size = self.backend_kwargs.get('c_chunk_size') - memory_limit = self.backend_kwargs.get('memory_limit') for ia in range(self.ebuilder.n_add): da_ops = [] for name, ii, op in zip(self.ebuilder.operand_names[ia], @@ -545,13 +549,11 @@ class ETermBase(Struct): def eval_einsum(out, eshape): _out = oe.contract(self.parsed_expressions[0], *da_operands[0], optimize=self.paths[0], - memory_limit=memory_limit, backend='dask') for ia in range(1, n_add): aux = oe.contract(self.parsed_expressions[ia], *da_operands[ia], optimize=self.paths[ia], - memory_limit=memory_limit, backend='dask') _out += aux
ENH: optimize.minimize: correct bounds warnings ENH: optimize.minimize: correct bounds warnings
@@ -552,20 +552,18 @@ def minimize(fun, x0, args=(), method=None, jac=None, hess=None, warn('Method %s does not use Hessian information (hess).' % method, RuntimeWarning) # - hessp - if meth not in ('newton-cg', 'dogleg', 'trust-ncg', 'trust-constr', + if meth not in ('newton-cg', 'trust-ncg', 'trust-constr', 'trust-krylov', '_custom') \ and hessp is not None: warn('Method %s does not use Hessian-vector product ' 'information (hessp).' % method, RuntimeWarning) # - constraints or bounds - if (meth in ('cg', 'bfgs', 'newton-cg', 'dogleg', 'trust-ncg') - and (bounds is not None or np.any(constraints))): - warn('Method %s cannot handle constraints nor bounds.' % method, - RuntimeWarning) - if meth in ('nelder-mead', 'l-bfgs-b', 'tnc', 'powell') and np.any(constraints): + if (meth not in ('cobyla', 'slsqp', 'trust-constr', '_custom') and + np.any(constraints)): warn('Method %s cannot handle constraints.' % method, RuntimeWarning) - if meth == 'cobyla' and bounds is not None: + if meth not in ('nelder-mead', 'powell', 'l-bfgs-b', 'tnc', 'slsqp', + 'trust-constr', '_custom') and bounds is not None: warn('Method %s cannot handle bounds.' % method, RuntimeWarning) # - return_all
Fix failing test In py3 bytes are actual bytes and you can't join them with strings So, you have to make sure everything's the same type I used `.encode()`
@@ -358,10 +358,10 @@ class CMDMODTestCase(TestCase, LoaderModuleMockMixin): # stdout with the non-decodable bits replaced with the unicode # replacement character U+FFFD. stdout_unicode = '\ufffd\x1b\ufffd\ufffd' + os.linesep - stderr_bytes = os.linesep.join([ + stderr_bytes = os.linesep.encode().join([ b'1+0 records in', b'1+0 records out', - b'4 bytes copied, 9.1522e-05 s, 43.7 kB/s']) + os.linesep + b'4 bytes copied, 9.1522e-05 s, 43.7 kB/s']) + os.linesep.encode() stderr_unicode = stderr_bytes.decode() proc = MagicMock(
Update conf.py Boards redirect.
@@ -322,6 +322,8 @@ redirects = { # Boards redirects "boards/navigating-mattermost-boards.html": "https://docs.mattermost.com/welcome/what-changed-in-v60.html", +"/boards/data-and-archives.html": + "https://docs.mattermost.com/boards/migrate-to-boards.html". # Channels redirects "channels/sign-in.html":
pm changes made cone search smaller applied magnitude cuts applied small border of 20 pixels to ignore
@@ -519,7 +519,7 @@ class data_products(find_sources): pos = [header['CRVAL1'], header['CRVAL2']] data = find_sources.tic_by_contamination(find_sources(), pos, r, contam) ra, dec = data['ra'], data['dec'] - return WCS(header).all_world2pix(ra, dec, 1), data['ID'] + return WCS(header).all_world2pix(ra, dec, 1), data['ID'], data['Tmag'] def find_isolated(x, y): @@ -613,15 +613,21 @@ class data_products(find_sources): fns, time = self.sort_by_date(camera, chip) mast, header = fits.getdata(self.ffi_dir+fns[0], header=True) - xy, id = pixel_cone(header, 6*np.sqrt(2), [0.0, 5e-6]) + print("Grabbing good sources for the pointing model") + xy, ids, tmag = pixel_cone(header, 6*np.sqrt(1.2), [0.0, 5e-6]) # Makes sure sources are in the file - bord = 80. + bord = 20. inds = np.where( (xy[1]>=44.+bord) & (xy[1]<len(mast)-45.-bord) & (xy[0]>=bord) & (xy[0]<len(mast[0])-41.-bord))[0] - x, y = xy[0][inds], xy[1][inds] + x, y, ids, tmag = xy[0][inds], xy[1][inds], ids[inds], tmag[inds] + print("Finding the most isolated sources") isolated = find_isolated(x, y) - x, y = x[isolated], y[isolated] + x, y, ids, tmag = x[isolated], y[isolated], ids[isolated], tmag[isolated] + + # Applying Tmag cuts + tmag_inds = np.where(tmag <= 14.0)[0] + x, y, ids, tmag = x[tmag_inds], y[tmag_inds], ids[tmag_inds], tmag[tmag_inds] calc_shift() return
fix: hide unhide workspace css not working in firefox reverting css part for now
@@ -1027,18 +1027,6 @@ body { } } - &[item-is-hidden="0"] { - .drop-icon { - display: none; - } - - &:has(.sidebar-item-container[item-is-hidden="0"]) { - .drop-icon { - display: inline-block; - } - } - } - .sidebar-item-container { margin-left: 10px; @@ -1071,14 +1059,6 @@ body { } } - .standard-sidebar-section { - display: none; - - &:has(> [item-is-hidden="0"]) { - display: block; - } - } - .standard-sidebar-section.show-control { .desk-sidebar-item.standard-sidebar-item {
Fix dispatcher Fix libgl issue.
@@ -19,13 +19,14 @@ FROM gcr.io/fuzzbench/base-image ENV WORK /work WORKDIR $WORK -# Install runtime dependencies for benchmarks, easy json parsing. +# Install runtime dependencies for benchmarks, easy json parsing, and fuzzbench. RUN apt-get update -y && apt-get install -y \ jq \ libglib2.0-0 \ libxml2 \ libarchive13 \ - libgss3 + libgss3 \ + libgl1 # Install docker cli. RUN DOCKER_VERSION=18.09.7 && \
Update Puppet Facter link The old link is now giving a 404
# Sal [![CircleCI](https://circleci.com/gh/salopensource/sal.svg?style=svg)](https://circleci.com/gh/salopensource/sal) -Sal is a multi-tenanted reporting dashboard for [Munki](https://github.com/munki/munki/) with the ability to display information from [Facter](http://puppetlabs.com/facter). It has a plugin system allowing you to easily build widgets to display your custom information from Facter, Grains, Munki's [conditional items](https://github.com/munki/munki/wiki/Conditional-Items) etc. +Sal is a multi-tenanted reporting dashboard for [Munki](https://github.com/munki/munki/) with the ability to display information from [Facter](https://puppet.com/docs/puppet/6.10/facter.html). It has a plugin system allowing you to easily build widgets to display your custom information from Facter, Grains, Munki's [conditional items](https://github.com/munki/munki/wiki/Conditional-Items) etc. With Sal, you are able to allow access to reports on certain sets of machines to certain people - for example, giving a manager access to the reports on the machines in their department.
Make KFold do split Based on
@@ -230,7 +230,8 @@ class Learned(Model): results = mapper(self._cross_score, ((i, [values_labels[i] for i in train_i], [values_labels[i] for i in test_i]) - for i, (train_i, test_i) in enumerate(folds_i))) + for i, (train_i, test_i) in enumerate( + folds_i.split(values_labels)))) agg_score_labels = [] for score_labels in results: agg_score_labels.extend(score_labels)
Assert that we're getting a sensible limit in get_items_not_in_filter Assert that we're getting a sensible limit in get_items_not_in_filter.
@@ -672,6 +672,8 @@ class MempoolManager: counter = 0 broke_from_inner_loop = False + assert limit > 0 + # Send 100 with the highest fee per cost for dic in reversed(self.mempool.sorted_spends.values()): if broke_from_inner_loop:
Make stylistic changes cr
@@ -136,7 +136,17 @@ class InitController(AbstractBaseController): configure_keyname(platform, keyname, keyname_of_existing_application, interactive, force_non_interactive) fileoperations.write_config_setting('global', 'include_git_submodules', True) - def initialize_multiple_directories(self, modules, region, interactive, force_non_interactive, keyname, profile, noverify, platform): + def initialize_multiple_directories( + self, + modules, + region, + interactive, + force_non_interactive, + keyname, + profile, + noverify, + platform + ): application_created = False app_name = None cwd = os.getcwd()
Update doctest with more lenient ellipsis Python 3.7 seems to output "re.Match object; span=(0, 4), match='abcd'"
@@ -9,7 +9,7 @@ with Java regular expression code. >>> r2 = re.compile('bc') >>> r3 = re.compile('abc') >>> fullmatch(r1, string) # doctest: +ELLIPSIS -<...SRE_Match object...> +<...Match object...> >>> fullmatch(r2, string) >>> fullmatch(r3, string) >>> r = re.compile(r'\\d{8}|\\d{10,11}')
add comments use standard definition of condition number
@@ -1263,17 +1263,20 @@ def randsphere(n, rstate=None): return xhat -def improve_covar_mat(covar0, ntries=100, min_condition_number=1e-12): +def improve_covar_mat(covar0, ntries=100, max_condition_number=1e14): """ Given the covariance matrix improve it, if it is not invertable - or eigen values are negative or condition number is below the limit + or eigen values are negative or condition number that is above the limit Returns: updated matrix and its inverse """ ndim = covar0.shape[0] covar = np.array(covar0) + coeffmin = 1e-10 + # this will a starting point for the modification + # of the form (1-coeff)*M + (coeff)*E - for trials in range(ntries): + for trial in range(ntries): failed = False try: # Check if matrix is invertible. @@ -1284,7 +1287,7 @@ def improve_covar_mat(covar0, ntries=100, min_condition_number=1e-12): lalg.cholesky(covar, lower=True) # Check if everything worked. - if np.all((l > 0) & np.isfinite(l)) and l.min()>l.max() * min_condition_number: + if np.all((l > 0) & np.isfinite(l)) and l.max() < l.min() * max_condition_number: break else: failed = True @@ -1293,7 +1296,9 @@ def improve_covar_mat(covar0, ntries=100, min_condition_number=1e-12): # suppress the off-diagonal elements failed = True if failed: - coeff = 0.5**(ntries - 1 - trials) + coeff = coeffmin * (1./coeffmin)**(trial*1./(ntries-1)) + # this starts at coeffmin when trial=0 and ends at 1 + # when trial == ntries-1 covar = (1. - coeff) * covar + coeff * np.eye(ndim) if failed: warnings.warn("Failed to guarantee the ellipsoid axes will be " @@ -1366,9 +1371,14 @@ def bounding_ellipsoid(points, pointvol=0.): # Due to round-off errors, we actually scale the ellipsoid so the # outermost point obeys `(x-v)^T A (x-v) < 1 - (a bit) < 1`. + + ROUND_DELTA = 1e-3 # numerical experiments show that round off errors can reach large - # values if the matrix eigen values are very low + # values if the matrix is poorly conditioned + # Note that likely the delta here must be related to maximum + # condition number parameter in improve_covar_mat() + # one_minus_a_bit = 1. - ROUND_DELTA if fmax > one_minus_a_bit: covar *= fmax / one_minus_a_bit @@ -1378,7 +1388,8 @@ def bounding_ellipsoid(points, pointvol=0.): covar, am = improve_covar_mat(covar) # this is a final check - # if this fails the ellipsoid is broken already + # if this fails the ellipsoid is still broken + # in the sense that it does not include the points fmax1 = np.einsum('...i, ...i', np.tensordot(delta, am, axes=1), delta).max() if fmax1 >=1: raise RuntimeError("Failed to initialize the ellipsoid to contain all the points")
Support -s option in runs list when showing comments `-s` is used for tests - does not impact users.
@@ -1802,18 +1802,12 @@ def _comment(args, ctx): def _list_comments(args, ctx): - _list_runs_comments(runs_op_selected(args, ctx, LATEST_RUN_ARG)) + _list_runs_comments(runs_op_selected(args, ctx, LATEST_RUN_ARG), args) -def _list_runs_comments(runs, comment_index_format=True): +def _list_runs_comments(runs, args, comment_index_format=True): formatted_runs = format_runs(runs) - cols = [ - _col1_for_comments_header(comment_index_format), - "op_desc", - "started", - "status_with_remote", - "label", - ] + cols = _cols_for_list_with_comments(comment_index_format, args) cli.table( formatted_runs, cols, @@ -1824,10 +1818,16 @@ def _list_runs_comments(runs, comment_index_format=True): ) -def _col1_for_comments_header(comment_index_format): - if comment_index_format: - return "short_id" - return "index" +def _cols_for_list_with_comments(comment_index_format, args): + if not comment_index_format: + return _cols_for_list(args) + return [ + "short_id", + "op_desc", + "started", + "status_with_remote", + "label" + ] def _fg_for_comments_header(comment_index_format):
iterable split comment added [ci skip]
@@ -1418,6 +1418,12 @@ def join_dicts(dicts: List[dict]) -> dict: def iterable_split(func: Callable, iterable: Iterable, params: dict, *args, **kwargs) -> np.ndarray: + """ + # TODO: a faster version using buffer-provider objects via the uppercase communication method + A faster version of iterable/tiles_split is possible when the return values from each process is of the same size + and will be addressed in future. In this case a buffer-provider object can be sent between processes using the + uppercase communication (like Gather instead of gather) methods which can be significantly faster. + """ if params[C.PARALLEL]: ret_combined = {} rets = Parallel(
fix comment on Cost params_bytes Summary: Pull Request resolved: As discussed with Alexander Sidorov, params_bytes refer to the number of bytes we're reading for parameters, not the size of parameters. They only differ in sparse operators.
@@ -190,7 +190,7 @@ class CAFFE2_API OpSchema { uint64_t flops{0}; // Floating point operations. uint64_t bytes_read{0}; // Total memory read. uint64_t bytes_written{0}; // Total memory written. - uint64_t params_bytes{0}; // Memory footprint of parameters + uint64_t params_bytes{0}; // Memory read for parameters. }; /** * @brief Registers a function that takes in an OperatorDef
Update mkvtomp4.py restore source sorting so that first language option respects this update the sortStreams function to handle either incoming source data or options that have been generated wrap sorting in try catches so errors don't break things
@@ -349,7 +349,15 @@ class MkvtoMp4: audio_settings = [] blocked_audio_languages = [] iOS = (self.settings.iOS is not False) - for a in info.audio: + + # Sort incoming streams so that things like first language preferences respect these options + audio_streams = info.audio + try: + self.sortStreams(audio_streams, awl) + except: + self.log.exception("Error sorting source audio streams [sort-streams].") + + for a in audio_streams: self.log.info("Audio detected for stream %s - %s %s %d channel." % (a.index, a.codec, a.metadata['language'], a.audio_channels)) if self.settings.output_extension in valid_tagging_extensions and a.codec.lower() == 'truehd' and self.settings.ignore_truehd: @@ -589,8 +597,11 @@ class MkvtoMp4: video_settings['filter'] = vfilter # Sort Options + try: self.sortStreams(audio_settings, awl) self.sortStreams(subtitle_settings, swl) + except: + self.log.exception("Error sorting output stream options [sort-streams].") # Attachments attachments = [] @@ -728,9 +739,15 @@ class MkvtoMp4: def sortStreams(self, streams, languages): if self.settings.sort_streams: self.log.debug("Reordering streams to be in accordance with approved languages and channels [sort-streams, prefer-more-channels].") + if len(streams) > 0: + if isinstance(streams[0], dict): streams.sort(key=lambda x: x.get('channels', 999), reverse=self.settings.prefer_more_channels) if languages: streams.sort(key=lambda x: languages.index(x['language']) if x['language'] in languages else 999) + else: + streams.sort(key=lambda x: x.audio_channels, reverse=self.settings.prefer_more_channels) + if languages: + streams.sort(key=lambda x: languages.index(x.metadata['language']) if x.metadata['language'] in languages else 999) def burnSubtitleFilter(self, inputfile, subtitle_streams, swl, valid_external_subs=None): if self.settings.burn_subtitles:
fixed Fixed indexing in returned enr state
@@ -927,7 +927,7 @@ def enr_fock(dims, excitations, state): raise ValueError("The state tuple %s is not in the restricted " "state space" % str(tuple(state))) - return Qobj(data, dims=[dims, 1]) + return Qobj(data, dims=[dims, [1]*len(dims)]) def enr_thermal_dm(dims, excitations, n):
Remove community nux for now ### Summary & Motivation We're going to rethink our strategy here
@@ -15,7 +15,6 @@ import ReactDOM from 'react-dom'; import {Link} from 'react-router-dom'; import styled from 'styled-components/macro'; -import {CommunityNux} from './NUX/CommunityNux'; import {extractInitializationData} from './extractInitializationData'; import {telemetryLink} from './telemetryLink'; @@ -71,7 +70,6 @@ ReactDOM.render( </AppTopNav> <App> <ContentRoot /> - <CommunityNux /> </App> </AppProvider>, document.getElementById('root'),
Removed all subsitution logic from the datasource. Variable substitution is a service that the data context provides. Datasources should not see unsubstituted variables in their config.
@@ -82,8 +82,6 @@ class SqlAlchemyDatasource(Datasource): generators=generators, **configuration_with_defaults) -######## - ### FIXME: EUGENE UPDATE WITH ENVIRONMENT LOGIC if credentials is not None: self._datasource_config.update({ "credentials": credentials @@ -116,10 +114,8 @@ class SqlAlchemyDatasource(Datasource): self._build_generators() def _get_sqlalchemy_connection_options(self, **kwargs): - # NOTE: Eugene: 2019-09-10: reimplement, following the environments config logic! - datasource_config_with_vars_replaced = self.data_context.get_config_with_variables_replaced(config=self._datasource_config) - if "credentials" in datasource_config_with_vars_replaced: - credentials = datasource_config_with_vars_replaced["credentials"] + if "credentials" in self._datasource_config: + credentials = self._datasource_config["credentials"] else: credentials = {}
cli: dataflow: diagram: Fix for new inputflow syntax We had forgotten to fix the diagram command when we added the new inputflow syntax. The one where we can specify which definitions from an origin an input could come from Fixes:
@@ -426,6 +426,39 @@ class Diagram(CMD): print(f"{seed_input_node} --> {input_node}") else: print(f"{seed_input_node} --> {node}") + elif isinstance(source, dict) and isinstance( + list(source.values())[0], list + ): + # Handle the case where the key is the origin (so + # something like "seed") and the value is a list of + # acceptable definitions from that origin. + origin, definition_names = list(source.items())[0] + for definition_name in definition_names: + origin_definition_name = ( + origin + "." + definition_name + ) + seed_input_node = hashlib.md5( + origin_definition_name.encode() + ).hexdigest() + print( + f"{seed_input_node}({origin_definition_name})" + ) + if len(self.stages) == 1: + print( + f"style {seed_input_node} fill:#f6dbf9,stroke:#a178ca" + ) + if not self.simple: + input_node = hashlib.md5( + ( + "input." + + instance_name + + "." + + input_name + ).encode() + ).hexdigest() + print(f"{seed_input_node} --> {input_node}") + else: + print(f"{seed_input_node} --> {node}") else: if not self.simple: source_output_node = hashlib.md5(
AbstractExpression.prepare: expand and traverse TypeRepo.Defer instances TN:
@@ -520,8 +520,10 @@ class AbstractExpression(Frozable): elif isinstance(obj, (dict)): for v in obj.items(): explore(v, fn) - elif (not isinstance(obj, (PropertyDef, TypeRepo.Defer)) and - hasattr(obj, 'prepare')): + elif isinstance(obj, TypeRepo.Defer): + obj = obj.get() + return explore(obj, fn) or obj + elif not isinstance(obj, PropertyDef) and hasattr(obj, 'prepare'): explore(obj.prepare(), fn) ret = self
Update ESP32_Code.ino Removing a line that is not used.
@@ -42,7 +42,6 @@ volatile int wifiStatus = 0; volatile int wifiPrev = WL_CONNECTED; void WiFireconnect( void * pvParameters ) { - bool ESP_reset = true; //wifiStatus = WiFi.status(); //wifiPrev = wifiStatus; delay(1000);
fix(pg): Transform Falsy values as None ref:
@@ -113,7 +113,7 @@ class Database: raise NotImplementedError def _transform_query(self, query: Query, values: QueryValues): - return query, values or () + return query, values or None def sql( self,
RampPlugValueWidget : Don't allow edits to readOnly plugs ( partially simplified version of )
@@ -79,7 +79,6 @@ class RampPlugValueWidget( GafferUI.PlugValueWidget ) : self.__splineWidget._qtWidget().setMinimumHeight( 50 ) self.__slider = GafferUI.Slider() - self.__slider.setSizeEditable( True ) self.__slider.setMinimumSize( 2 ) self.__positionsChangedConnection = self.__slider.positionChangedSignal().connect( Gaffer.WeakMethod( self.__positionsChanged ), scoped = False ) self.__slider.indexRemovedSignal().connect( Gaffer.WeakMethod( self.__indexRemoved ), scoped = False ) @@ -129,6 +128,9 @@ class RampPlugValueWidget( GafferUI.PlugValueWidget ) : def _updateFromPlug( self ) : plug = self.getPlug() + self.__slider.setSizeEditable( not ( plug.getInput() or + plug.direction() == Gaffer.Plug.Direction.Out or Gaffer.MetadataAlgo.readOnly( plug ) + ) ) with self.getContext() : self.__splineWidget.setSpline( plug.getValue().spline() ) @@ -146,7 +148,6 @@ class RampPlugValueWidget( GafferUI.PlugValueWidget ) : self.__positionsMergeGroupId += 1 self.__lastPositionChangedReason = reason - rejected = False plug = self.getPlug() with Gaffer.UndoScope( plug.ancestor( Gaffer.ScriptNode ), @@ -154,6 +155,7 @@ class RampPlugValueWidget( GafferUI.PlugValueWidget ) : ) : if len( slider.getPositions() ) == plug.numPoints() : + rejected = False # the user has moved an existing point on the slider for index, position in enumerate( slider.getPositions() ) : if plug.pointXPlug( index ).getValue() != position : @@ -162,6 +164,9 @@ class RampPlugValueWidget( GafferUI.PlugValueWidget ) : curPlug.setValue( position ) else: rejected = True + + if rejected: + self._updateFromPlug() else : # a new position was added on the end by the user clicking # on an empty area of the slider. @@ -169,33 +174,14 @@ class RampPlugValueWidget( GafferUI.PlugValueWidget ) : assert( len( slider.getPositions() ) == numPoints + 1 ) spline = plug.getValue().spline() position = slider.getPositions()[numPoints] - if not ( plug.getInput() or plug.direction() == Gaffer.Plug.Direction.Out - or Gaffer.MetadataAlgo.readOnly( plug ) - ): plug.addPoint() plug.pointXPlug( numPoints ).setValue( position ) plug.pointYPlug( numPoints ).setValue( spline( position ) ) - else: - rejected = True - - if rejected: - self._updateFromPlug() def __indexRemoved( self, slider, index ) : - plug = self.getPlug() - rejected = False - with Gaffer.UndoScope( plug.ancestor( Gaffer.ScriptNode ) ) : - if not ( plug.getInput() or plug.direction() == Gaffer.Plug.Direction.Out - or Gaffer.MetadataAlgo.readOnly( plug ) - ): + with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) : self.getPlug().removePoint( index ) - else: - rejected = True - - if rejected: - self._updateFromPlug() - def __selectedIndexChanged( self, slider ) :
Fix: generate_table would otherwise fail with a more general index Reference:
@@ -17,7 +17,7 @@ def generate_table(dataframe, max_rows=10): # Body [html.Tr([ - html.Td(dataframe.loc[i][col]) for col in dataframe.columns + html.Td(dataframe.iloc[i][col]) for col in dataframe.columns ]) for i in range(min(len(dataframe), max_rows))] )
Update test_structure.py Changed quotation marks to satisfy the 'black' linting.
@@ -551,14 +551,14 @@ Direct def test_get_symmetric_neighbor_list(self): # tetragonal group with all bonds related by symmetry - s = Structure.from_spacegroup(100, [[1, 0, 0], [0, 1, 0], [0, 0, 2]], ['Fe'], [[0., 0., 0.]]) + s = Structure.from_spacegroup(100, [[1, 0, 0], [0, 1, 0], [0, 0, 2]], ["Fe"], [[0.0, 0.0, 0.0]]) c_indices, p_indices, offsets, distances, s_indices, symops = s.get_symmetric_neighbor_list(0.8, sg=100) self.assertTrue(len(np.unique(s_indices)) == 1) self.assertTrue(s_indices[0] == 0) self.assertTrue((~np.isnan(s_indices)).all()) self.assertTrue((symops[0].affine_matrix == np.eye(4)).all()) # now more complicated example with bonds of same length but with different symmetry - s2 = Structure.from_spacegroup(198, [[8.908, 0, 0], [0, 8.908, 0], [0, 0, 8.908]], ['Cu'], [[0., 0., 0.]]) + s2 = Structure.from_spacegroup(198, [[8.908, 0, 0], [0, 8.908, 0], [0, 0, 8.908]], ["Cu"], [[0.0, 0.0, 0.0]]) c_indices2, p_indices2, offsets2, distances2, s_indices2, symops2 = s2.get_symmetric_neighbor_list(7, sg=198) self.assertTrue(len(np.unique(s_indices2)) == 2) self.assertTrue(len(s_indices2) == 48)
Fix DDP error checking for unused parameters Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -322,26 +322,34 @@ void Reducer::mark_variable_ready(VariableIndex index) { // Something is wrong if all variables contained in this bucket replica have // already been marked as ready. if (replica.pending == 0) { - // Receiving a call to `mark_variable_ready` twice for the same variable - // is only possible if the variable was initially deemed unused, and was - // marked ready from the `prepare_for_backward` function, only to become - // part of the autograd graph at a later point in time. - TORCH_INTERNAL_ASSERT(has_marked_unused_parameters_); - TORCH_CHECK( - false, + const auto common_error = c10::str( "Expected to mark a variable ready only once. ", "", - "This error is caused by use of a module parameter outside the ", - "`forward` function. The return value of the `forward` function ", - "is inspected by the distributed data parallel wrapper to figure ", - "out if any of the module's parameters went unused. If this is the ", - "case, it knows they won't receive gradients in a backward pass. ", - "If any of those parameters are then used outside `forward`, this ", - "error condition is triggered. ", - "", - "You can disable unused parameter detection by passing the keyword " - "argument `find_unused_parameters=False` to ", + "This error is caused by one of the following reasons: ", + "1) Use of a module parameter outside the `forward` function. ", + "Please make sure model parameters are not shared across multiple ", + "concurrent forward-backward passes", + "2) Reused parameters in multiple reentrant backward passes. For ", + "example, if you use multiple `checkpoint` functions to wrap the ", + "same part of your model, it would result in the same set of ", + "parameters been used by different reentrant backward passes ", + "multiple times, and hence marking a variable ready multiple times. ", + "DDP does not support such use cases yet."); + TORCH_CHECK( + has_marked_unused_parameters_, + common_error, + "3) Incorrect unused parameter detection. The return value of the ", + "`forward` function is inspected by the distributed data parallel ", + "wrapper to figure out if any of the module's parameters went ", + "unused. For unused parameters, DDP would not expect gradients from ", + "then. However, if an unused parameter becomes part of the autograd ", + "graph at a later point in time (e.g., in a reentrant backward when ", + "using `checkpoint`), the gradient will show up unexpectedly. If all ", + "parameters in the model participate in the backward pass, you can ", + "disable unused parameter detection by passing the keyword argument ", + "`find_unused_parameters=False` to ", "`torch.nn.parallel.DistributedDataParallel`."); + TORCH_CHECK(!has_marked_unused_parameters_, common_error); } if (bucket.expect_sparse_gradient) {
I have too many mags for the comand line. I'm not shure if these logfiles are usefull, if no error happens
@@ -42,7 +42,6 @@ def predict_genes_genomes(input_dir,out_dir,log,threads): pool.starmap(predict_genes, zip(genome_names,genomes_fastas, itertools.repeat(out_dir),log_names)) - shell("cat {log_names} > {log}".format(log_names=' '.join(log_names), log=log)) shell("rm -r {temp_log_dir}".format(temp_log_dir=temp_log_dir)) if __name__ == "__main__":
TST: Make test_partial_iteration_cleanup robust but require leak checker This makes sure the test is not flaky, but the test now requires a leak checker (both valgrind or reference count based should work in CPython at least). Closes
@@ -3169,7 +3169,6 @@ def test_warn_noclose(): @pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32", reason="Errors with Python 3.9 on Windows") [email protected](not HAS_REFCOUNT, reason="Python lacks refcounts") @pytest.mark.parametrize(["in_dtype", "buf_dtype"], [("i", "O"), ("O", "i"), # most simple cases ("i,O", "O,O"), # structured partially only copying O @@ -3177,9 +3176,14 @@ def test_warn_noclose(): ]) @pytest.mark.parametrize("steps", [1, 2, 3]) def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps): - value = 123 # relies on python cache (leak-check will still find it) + """ + Checks for reference counting leaks during cleanup. Using explicit + reference counts lead to occasional false positives (at least in parallel + test setups). This test now should still test leaks correctly when + run e.g. with pytest-valgrind or pytest-leaks + """ + value = 2**30 + 1 # just a random value that Python won't intern arr = np.full(int(np.BUFSIZE * 2.5), value).astype(in_dtype) - count = sys.getrefcount(value) it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)], flags=["buffered", "external_loop", "refs_ok"], casting="unsafe") @@ -3187,11 +3191,7 @@ def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps): # The iteration finishes in 3 steps, the first two are partial next(it) - # Note that resetting does not free references - del it - break_cycles() - break_cycles() - assert count == sys.getrefcount(value) + del it # not necessary, but we test the cleanup # Repeat the test with `iternext` it = np.nditer(arr, op_dtypes=[np.dtype(buf_dtype)], @@ -3199,11 +3199,7 @@ def test_partial_iteration_cleanup(in_dtype, buf_dtype, steps): for step in range(steps): it.iternext() - del it # should ensure cleanup - break_cycles() - break_cycles() - assert count == sys.getrefcount(value) - + del it # not necessary, but we test the cleanup @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") @pytest.mark.parametrize(["in_dtype", "buf_dtype"],
Fix groupby bug Add cast to groupby
@@ -1124,7 +1124,7 @@ class BaseColumns(ABC): :param agg: List of tuples with the form [("agg", "col")] :return: """ - df = self.root.data + df = self.root compact = {} agg_names = None @@ -1137,15 +1137,20 @@ class BaseColumns(ABC): for col_agg in agg: if is_dict(col_agg): - col_agg = list(col_agg.items())[::-1] + col_agg = list(col_agg.items())[0][::-1] _agg, _col = col_agg compact.setdefault(_col, []).append(_agg) - df = df.groupby(by=by).agg(compact).reset_index() + # TODO cast to float on certain aggregations + df = df.cols.to_float(list(compact.keys())) + + dfd = df.data + + dfd = dfd.groupby(by=by).agg(compact).reset_index() agg_names = agg_names or [a[0] + "_" + a[1] for a in agg] - df.columns = (val_to_list(by) + agg_names) - df.columns = [str(c) for c in df.columns] - return self.root.new(df) + dfd.columns = (val_to_list(by) + agg_names) + dfd.columns = [str(c) for c in dfd.columns] + return self.root.new(dfd) def move(self, column, position, ref_col=None) -> 'DataFrameType': """
Omit relay state in HTTP-POST response when empty Do not send an HTTP-POST SAML response with a non-conforming relay state. This can happen when the incoming <AuthnRequest> does not include relay state information.
@@ -94,6 +94,8 @@ def http_form_post_message(message, location, relay_state="", val=html.escape(_msg), type='hidden') + relay_state_input = "" + if relay_state: relay_state_input = HTML_INPUT_ELEMENT_SPEC.format( name='RelayState', val=html.escape(relay_state),
Document the CLI 'cache clear' command Resolves:
@@ -491,3 +491,19 @@ The `cache list` command lists Poetry's available caches. ```bash poetry cache list ``` + +### cache clear + +The `cache clear` command removes packages from a cached repository. + +For example, to clear the whole cache of packages from the `pypi` repository, run: + +```bash +poetry cache clear pypi --all +``` + +To only remove a specific package from a cache, you have to specify the cache entry in the following form `cache:package:version`: + +```bash +poetry cache clear pypi:requests:2.24.0 +```
Update tests/unit/recommenders/evaluation/test_python_evaluation.py Add comments
@@ -219,6 +219,7 @@ def test_get_top_k_items(rating_true): assert(top_3_items_df[DEFAULT_ITEM_COL][5] in [5, 6]) assert(set(top_3_items_df[DEFAULT_ITEM_COL][6:]) == set([2, 5, 6])) + # Tests when k is larger than the number of available items top_6_items_df = get_top_k_items( dataframe=rating_true, col_user=DEFAULT_USER_COL,
Update pymeasure/instruments/eurotest/eurotestHPP120256.py Remove module level variable for regex
@@ -93,8 +93,7 @@ class EurotestHPP120256(Instrument): COMMAND_DELAY = 0.2 # s response_encoding = "iso-8859-2" - f_numbers_regex_pattern = r'([+-]?([\d]*\.)?[\d]+)' - regex = re.compile(f_numbers_regex_pattern) + regex = re.compile(r'([+-]?([\d]*\.)?[\d]+)') def __init__(self, adapter,
Fix the HTML AST doc installation procedure TN:
@@ -770,7 +770,7 @@ class ManageScript(object): # Install the remaining miscellaneous files for fpath in [ os.path.join('include', lib_name + '.h'), - os.path.join('share', lib_name, 'ast-types.txt'), + os.path.join('share', lib_name, 'ast-types.html'), os.path.join('python', lib_name + '.py'), ]: build_path = self.dirs.build_dir(fpath)
azure: improve error message load lowest error details. The error may be an object, sometime the message is a serialized dict. So try to deserialize and parse it.
@@ -6,6 +6,7 @@ from dataclasses import dataclass, field from datetime import datetime from functools import lru_cache from pathlib import Path +from types import SimpleNamespace from typing import Any, Dict, List, Optional, Type, Union from azure.core.exceptions import HttpResponseError @@ -701,20 +702,14 @@ class AzurePlatform(Platform): # validate_operation returned, it means deployments created # successfuly. so check errors from deployments by name. deployment = deployments.get(resource_group_name, AZURE_DEPLOYMENT_NAME) - # log more details for troubleshooting if deployment.properties.provisioning_state == "Failed": - if deployment.properties.error.details: - error_messages = [ - f"{x.code}, {x.message}" - for x in deployment.properties.error.details - ] + error_messages = self._parse_detail_errors( + deployment.properties.error + ) elif isinstance(identifier, HttpResponseError) and identifier.error: # no validate_operation returned, the message may include # some errors, so check details - if identifier.error.details: - error_messages = [ - f"{x.code}, {x.message}" for x in identifier.error.details - ] + error_messages = self._parse_detail_errors(identifier.error) raise LisaException("\n".join(error_messages)) @@ -735,12 +730,27 @@ class AzurePlatform(Platform): raise LisaException(f"deploy failed: {result}") except HttpResponseError as identifier: assert identifier.error - error_messages = [ - f"{x.code}, {x.message}" for x in identifier.error.details - ] - # original message may not be friendly, refine it. + error_messages = self._parse_detail_errors(identifier.error) raise LisaException("\n".join(error_messages)) + def _parse_detail_errors(self, error: Any) -> List[str]: + # original message may be a summary, get lowest level details. + if hasattr(error, "details") and error.details: + errors: List[str] = [] + for detail in error.details: + errors.extend(self._parse_detail_errors(detail)) + else: + try: + # it returns json string in message sometime + parsed_error = json.loads( + error.message, object_hook=lambda x: SimpleNamespace(**x) + ) + errors = self._parse_detail_errors(parsed_error.error) + except Exception: + # load failed, it should be a real error message string + errors = [f"{error.code}: {error.message}"] + return errors + def _initialize_nodes(self, environment: Environment) -> None: node_context_map: Dict[str, Node] = dict()
Smooth out Sienna tuning Current Sienna tuning leads to pretty significant bouncing and oversteering, this change fixes it. Thanks to for finding some great values and checking with a Toyota mechanic on the real steer ratio for Siennas!
@@ -195,10 +195,10 @@ class CarInterface(CarInterfaceBase): stop_and_go = True ret.safetyParam = 73 ret.wheelbase = 3.03 - ret.steerRatio = 16.0 + ret.steerRatio = 15.5 tire_stiffness_factor = 0.444 ret.mass = 4590. * CV.LB_TO_KG + STD_CARGO_KG - ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]] + ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.19], [0.02]] ret.lateralTuning.pid.kf = 0.00007818594 elif candidate == CAR.LEXUS_IS:
Fixes problem when updating dynamic gene list There was a unbound local variable error when no hgnc genes where found
@@ -87,6 +87,7 @@ class CaseHandler(object): updated_case(dict) """ dynamic_gene_list = [] + res = [] if hgnc_ids: logger.info("Fetching genes by hgnc id") res = self.hgnc_collection.find(
Better debug output for test_no_fd_leak failure Print exactly which file descriptor appeared. Also re-enable on OS X. I cannot get it to fail there anymore. Refs
@@ -133,7 +133,6 @@ def requires_progs(*progs): requires_posix = skipUnless(os.name == "posix", "Requires POSIX") requires_utf8 = skipUnless(sh.DEFAULT_ENCODING == "UTF-8", "System encoding must be UTF-8") -not_macos = skipUnless(not IS_MACOS, "Doesn't work on MacOS") requires_py3 = skipUnless(IS_PY3, "Test only works on Python 3") requires_py35 = skipUnless(IS_PY3 and MINOR_VER >= 5, "Test only works on Python 3.5 or higher") requires_py36 = skipUnless(IS_PY3 and MINOR_VER >= 6, "Test only works on Python 3.6 or higher") @@ -2814,10 +2813,6 @@ print("cool") python(py.name, "%%") python(py.name, "%%%") - # TODO - # for some reason, i can't get a good stable baseline measured in this test - # on osx. so skip it for now if osx - @not_macos @requires_progs("lsof") def test_no_fd_leak(self): import sh @@ -2849,7 +2844,7 @@ print("cool") test_pid = os.getpid() - def get_num_fds(): + def get_fds(): lines = sh.lsof(p=test_pid).strip().split("\n") def test(line): @@ -2857,7 +2852,7 @@ print("cool") return "CHR" in line or "PIPE" in line lines = [line for line in lines if test(line)] - return len(lines) - 1 + return set(lines) py = create_tmp_test("") @@ -2866,17 +2861,17 @@ print("cool") # make sure our baseline is stable.. we can remove this test_command() - baseline = get_num_fds() + baseline = get_fds() for i in xrange(10): test_command() - num_fds = get_num_fds() - self.assertEqual(baseline, num_fds) + fds = get_fds() + self.assertEqual(len(baseline), len(fds), fds - baseline) for opts in get_opts(kwargs): for i in xrange(2): test_command(**opts) - num_fds = get_num_fds() - self.assertEqual(baseline, num_fds, (baseline, num_fds, opts)) + fds = get_fds() + self.assertEqual(len(baseline), len(fds), (fds - baseline, opts)) def test_pushd_thread_safety(self): import threading
Docstring added. docstring added for function do_systeminfo(self, s) and help_systeminfo(self).
@@ -722,9 +722,11 @@ class CmdInterpreter(Cmd): print_say("Shutdown the system.", self) def do_systeminfo(self, s): + """Display system information with distribution logo""" system("screenfetch") def help_systeminfo(self): + """"Print help about systeminfo command""" print_say("Display system information with distribution logo", self) def do_tell(self, s):
Readme: deprecate I removed also CI status as its not valid anymore. This repository is archived. Thank you for all the contributions.
+## Deprecation note! + +**Please note: This repository is deprecated and it is no longer actively maintained**. + ## yotta: Build Software with Reusable Components -[![Build Status](https://travis-ci.org/ARMmbed/yotta.svg)](https://travis-ci.org/ARMmbed/yotta) -[![Build Status](https://circleci.com/gh/ARMmbed/yotta.svg?style=shield)](https://circleci.com/gh/ARMmbed/yotta) yotta is a tool from [ARM mbed](https://mbed.org), to make it easier to build better software with C++ and C by re-using modules. Publish your own modules to
Pin docker-boshrelease version. Need to have deeper changes to handle latest upstream version. See
@@ -219,7 +219,7 @@ class Config(dict): } ] if requires_docker_bosh: - version = None + version = '29.0.0' version_param = '?v=' + version if version else '' self['releases'] += [{ 'name': 'docker-boshrelease',
fix: Use `sort_by` field instead of non-existent `sort_field` To get proper form navigation
@@ -1102,13 +1102,13 @@ frappe.ui.form.Form = class FrappeForm { let list_view = frappe.get_list_view(this.doctype); if (list_view) { filters = list_view.get_filters_for_args(); - sort_field = list_view.sort_field; + sort_field = list_view.sort_by; sort_order = list_view.sort_order; } else { let list_settings = frappe.get_user_settings(this.doctype)['List']; if (list_settings) { filters = list_settings.filters; - sort_field = list_settings.sort_field; + sort_field = list_settings.sort_by; sort_order = list_settings.sort_order; } }
add energy_source_code_num to the interim output of the net gen allocation This column indicates the importance of the energy source or fuel type. It indicates whether the energy source/fuel type is a primary, secondary etc fuel.
@@ -217,7 +217,8 @@ def allocate_gen_fuel_by_gen_pm_fuel(gf, gen, gens, drop_interim_cols=True): if drop_interim_cols: gen_pm_fuel_frac = gen_pm_fuel_frac[ IDX_PM_FUEL + - ['generator_id', 'net_generation_mwh', 'fuel_consumed_mmbtu']] + ['generator_id', 'energy_source_code_num', 'net_generation_mwh', + 'fuel_consumed_mmbtu']] return gen_pm_fuel_frac @@ -729,7 +730,9 @@ def calc_allocation_fraction(gen_pm_fuel, drop_interim_cols=True): if drop_interim_cols: gen_pm_fuel_ratio = gen_pm_fuel_ratio[ IDX_PM_FUEL + - ['generator_id', 'frac', 'net_generation_mwh_gf_tbl', 'net_generation_mwh_g_tbl', 'capacity_mw', 'fuel_consumed_mmbtu']] + ['generator_id', 'energy_source_code_num', 'frac', + 'net_generation_mwh_gf_tbl', 'net_generation_mwh_g_tbl', + 'capacity_mw', 'fuel_consumed_mmbtu']] return gen_pm_fuel_ratio
Fix Type Hint * Fix Type Hint Union is not necessary here? My IDE pops an error for it. * Rerun workflows * empty commit
@@ -101,7 +101,7 @@ class BaseHTTPResponse: async def send( self, - data: Optional[Union[AnyStr]] = None, + data: Optional[AnyStr] = None, end_stream: Optional[bool] = None, ) -> None: """
added tests for selection xzy, layer and time selection in dataset
@@ -1366,3 +1366,44 @@ def test_incompatible_data_not_allowed(): mikeio.Dataset([da1, da2]) assert "time" in str(excinfo.value).lower() + + +def test_xzy_selection(): + # select in space via x,y,z coordinates test + filename = "tests/testdata/oresund_sigma_z.dfsu" + ds = mikeio.read(filename) + + dss_xzy = ds.sel(x=340000, y=15.75, z=0) + + # check for point geometry after selection + assert type(dss_xzy.geometry) == mikeio.spatial.geometry.GeometryPoint3D + + +def test_layer_selection(): + # select layer test + filename = "tests/testdata/oresund_sigma_z.dfsu" + ds = mikeio.read(filename) + + dss_layer = ds.sel(layer=0) + # should not be layered after selection + assert type(dss_layer.geometry) == mikeio.spatial.FM_geometry.GeometryFM + + +def test_time_selection(): + # select time test + nt = 100 + data = [] + d = np.random.rand(nt) + data.append(d) + time = pd.date_range("2000-1-2", freq="H", periods=nt) + items = [ItemInfo("Foo")] + ds = mikeio.Dataset(data, time, items) + + # check for string input + dss_t = ds.sel(time="2000-01-05") + # and index based + dss_tix = ds.sel(time=80) + + assert dss_t.shape == (24,) + assert len(dss_tix) == 1 + \ No newline at end of file
Update bower.json Angular angular-sanitize is pulling in an incompatible version of angular knocking out the webUI by breaking chart.js.
"angular-underscore": "^0.5.0", "angular-translate": "^2.9.0", "angular-ui-switch": "~0.1.0", - "angular-sanitize": "^1.5.0", + "angular-sanitize": "~1.5.0", "angular-file-saver": "~1.0.1", "angular-ui-select": "~0.17.1", "d3": "^3.5.17"
Added more forum ignores More forum ignores for Vanilla forum URL patterns
"/post/\\d+/quote/\\d+", "/tortoise\\.pl", "/ad\\.pl", - "/reportpost\\?link=" + "/reportpost\\?link=", + "/discussion/comment/\\d+/(smile|smiley|wink|blush|neutral|relaxed|grin|joy|sweat_smile|lol|innocent|naughty|yum|relieved|love|sunglasses|smirk|expressionless|unamused|sweat|pensive|confused|confounded|kissing|kissing_heart|kissing_smiling_eyes|kissing_closed_eyes|tongue|disappointed|worried|angry|rage)\\.png", + "/discussion/\\d+/(smile|smiley|wink|blush|neutral|relaxed|grin|joy|sweat_smile|lol|innocent|naughty|yum|relieved|love|sunglasses|smirk|expressionless|unamused|sweat|pensive|confused|confounded|kissing|kissing_heart|kissing_smiling_eyes|kissing_closed_eyes|tongue|disappointed|worried|angry|rage)\\.png", + "/discussion/comment/\\d+/(cry|persevere|triumph|frowning|anguished|fearful|weary|sleepy|tired_face|grimace|bawling|open_mouth|hushed|cold_sweat|scream|astonished|flushed|sleeping|dizzy|no_mouth|mask|star|cookie|warning|mrgreen|heart|heartbreak|kiss|\\+1|-1|grey_question|trollface|grey_question)\\.png", + "/discussion/\\d+/(cry|persevere|triumph|frowning|anguished|fearful|weary|sleepy|tired_face|grimace|bawling|open_mouth|hushed|cold_sweat|scream|astonished|flushed|sleeping|dizzy|no_mouth|mask|star|cookie|warning|mrgreen|heart|heartbreak|kiss|\\+1|-1|grey_question|trollface|grey_question)\\.png", + "/discussion/votecomment/", + "/discussion/flag/" ], "type": "ignore_patterns" }
Updated to handbrake.py to handle if no main feat Added logging and error functionality if handbrake could not find main feature Copied from #338(https://github.com/automatic-ripping-machine/automatic-ripping-machine/pull/338/commits/699781fd2cfcb0bc1b7a0c03b863cdb6aa70dc31)
@@ -29,12 +29,17 @@ def handbrake_mainfeature(srcpath, basepath, logfile, job): filename = os.path.join(basepath, job.title + "." + job.config.DEST_EXT) filepathname = os.path.join(basepath, filename) + logging.info("Ripping title Mainfeature to " + shlex.quote(filepathname)) get_track_info(srcpath, job) track = job.tracks.filter_by(main_feature=True).first() - logging.info("Ripping title Mainfeature to " + shlex.quote(filepathname)) + + if track is None: + msg = "No main feature found by Handbrake. Turn MAINFEATURE to false in arm.yml and try again." + logging.error(msg) + raise RuntimeError(msg) track.filename = track.orig_filename = filename db.session.commit()
Killswitch bug fix Move check for killswitch config to after the config import.
@@ -11,17 +11,17 @@ from array import array from queue import Empty from aiohttp import ClientSession, ProxyConnectionError +from .db import SIGHTING_CACHE, MYSTERY_CACHE, Bounds +from .utils import random_sleep, round_coords, load_pickle, load_accounts, get_device_info, get_spawn_id, get_distance, get_start_coords +from .shared import DatabaseProcessor +from . import config + if config.FORCED_KILL: try: import _thread except ImportError as e: raise OSError('Your platform does not support _thread so FORCED_KILL will not work.') from e -from .db import SIGHTING_CACHE, MYSTERY_CACHE, Bounds -from .utils import random_sleep, round_coords, load_pickle, load_accounts, get_device_info, get_spawn_id, get_distance, get_start_coords -from .shared import DatabaseProcessor -from . import config - if config.NOTIFY: from .notification import Notifier
Removing the use of current-passed-ci.yml files We can no longer pull from current-passed-ci.yml as they point to a puddle that no longer exists and the CI had already moved away from using them a couple of months back.
@@ -54,15 +54,9 @@ if [ ! -z ${current_build+x} ] export RELEASE="$RELEASE-rhel" export VARS="$VARS --extra-vars current_build=$hash" fi - -#If we are not in the pipeline downstream builds need to use current-passed-ci -elif [[ $RELEASE == *rhos-* ]] - then - export RELEASE="perfci/$RELEASE-current-passed-ci" fi - #used to ensure concurrent jobs on the same executor work socketdir=$(mktemp -d /tmp/sockXXXXXX) export ANSIBLE_SSH_CONTROL_PATH=$socketdir/%%h-%%r
Undo part of the windows context manager change Popen is not a context manager for PY27, lots of tests failed as a result.
@@ -296,9 +296,11 @@ def find_vc_pdir_vswhere(msvc_version): vswhere_cmd = [vswhere_path, '-products', '*', '-version', msvc_version, '-property', 'installationPath'] if os.path.exists(vswhere_path): - with subprocess.Popen(vswhere_cmd, + #TODO PY27 cannot use Popen as context manager + # try putting it back to the old way for now + sp = subprocess.Popen(vswhere_cmd, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) as sp: + stderr=subprocess.PIPE) vsdir, err = sp.communicate() if vsdir: vsdir = vsdir.decode("mbcs").splitlines()
Added Santas Monica entry Not sure if this should be under LA or as Santas Monica.
@@ -119,3 +119,13 @@ The individual recording the video is trying his best to save his fellow protest * https://twitter.com/YourAnonCentral/status/1266991237355069442 +## Santas Monica + +### Police fire pepper bullets into apartment | June 1st + +Live on TV, police fire multiple pepper bullets at an apartment window. + +**Links** + +* https://twitter.com/GIFsZP/status/1267241803750813703 +
Download blueprint from its tenant folder, not the request one Since a blueprint can be global, it may be download by any user, from any tenant, so its local path should be calculated with the initial tenant with which it was created (during upload), instead of the current request tenant.
@@ -55,38 +55,33 @@ class BlueprintsIdArchive(SecuredResource): """ Download blueprint's archive """ - # Verify blueprint exists. - get_storage_manager().get( - models.Blueprint, - blueprint_id, - include=['id'] - ) + blueprint = get_storage_manager().get(models.Blueprint, blueprint_id) for arc_type in SUPPORTED_ARCHIVE_TYPES: # attempting to find the archive file on the file system local_path = os.path.join( config.instance.file_server_root, FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER, - current_app.config[CURRENT_TENANT_CONFIG].name, - blueprint_id, - '{0}.{1}'.format(blueprint_id, arc_type)) + blueprint.tenant.name, + blueprint.id, + '{0}.{1}'.format(blueprint.id, arc_type)) if os.path.isfile(local_path): archive_type = arc_type break else: raise RuntimeError("Could not find blueprint's archive; " - "Blueprint ID: {0}".format(blueprint_id)) + "Blueprint ID: {0}".format(blueprint.id)) blueprint_path = '{0}/{1}/{2}/{3}/{3}.{4}'.format( FILE_SERVER_RESOURCES_FOLDER, FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER, - current_app.config[CURRENT_TENANT_CONFIG].name, - blueprint_id, + blueprint.tenant.name, + blueprint.id, archive_type) return make_streaming_response( - blueprint_id, + blueprint.id, blueprint_path, os.path.getsize(local_path), archive_type
[FIX] Fix cublas batch matmul * Update batch_matmul.py Update batch_matmul.py * fix
@@ -138,7 +138,7 @@ def schedule_batch_matmul(cfg, outs): return s -def batch_matmul_cublas(x, y): +def batch_matmul_cublas(x, y, out_shape=None): """Computes batch matrix multiplication of `x` and `y` when `x` and `y` are data in batch. @@ -150,6 +150,9 @@ def batch_matmul_cublas(x, y): y : tvm.te.Tensor 3-D with shape [batch, N, K] + out_shape : None + The output shape + Returns ------- output : tvm.te.Tensor
new ReciprocalMassMatrixSolver (ls.rmm) (WIP) new .__init__(), .init_rmm(), .__call__()
@@ -1253,3 +1253,82 @@ class MultiProblem(ScipyDirect): res.append(resi) return res[-1] + + +class ReciprocalMassMatrixSolver(LinearSolver): + """ + Solver for explicit transient elastodynamics that uses lumped and/or + reciprocal mass matrix algorithms. + + Limitations: + - Assumes that the density is constant in time. + - Uses the direct EBC application, i.e., no EBC projection matrix. + """ + name = 'ls.rmm' + + _parameters = [ + ('rmm_term', 'str', None, True, + 'The RMM term definition.'), + ('debug', 'bool', False, False, + 'If True, run in debug mode.'), + ] + + def __init__(self, conf, context=None, **kwargs): + LinearSolver.__init__(self, conf, context=context, mtx_im=None, + **kwargs) + + def init_rmm(self): + from sfepy.discrete.evaluate import eval_equations, apply_ebc_to_matrix + + problem = self.context + equations, variables = problem.create_evaluable( + self.conf.rmm_term, preserve_caches=True, + copy_materials=False, mode='weak', + active_only=problem.active_only, + ) + vu = next(variables.iter_state()) + + mtx_a = eval_equations(equations, variables, preserve_caches=True, + mode='weak', dw_mode='matrix', term_mode='DPM', + active_only=problem.active_only) + apply_ebc_to_matrix(mtx_a, vu.eq_map.eq_ebc, + (vu.eq_map.master, vu.eq_map.slave)) + mtx_a.eliminate_zeros() + mtx_ia = mtx_a.copy() + mtx_ia.setdiag(1.0 / mtx_a.diagonal()) + + mtx_c = eval_equations(equations, variables, preserve_caches=True, + mode='weak', dw_mode='matrix', term_mode='RMM', + active_only=problem.active_only) + apply_ebc_to_matrix(mtx_c, vu.eq_map.eq_ebc, + (vu.eq_map.master, vu.eq_map.slave)) + mtx_c.eliminate_zeros() + + mtx_im = mtx_ia @ (mtx_c @ mtx_ia) + apply_ebc_to_matrix(mtx_im, vu.eq_map.eq_ebc, + (vu.eq_map.master, vu.eq_map.slave)) + + if self.conf.debug: + mtx_m = eval_equations( + equations, variables, preserve_caches=True, + mode='weak', dw_mode='matrix', term_mode=None, + active_only=problem.active_only, + ) + + mtx_r = vu.eq_map.get_operator() + mtx_imr = mtx_r.T @ mtx_im @ mtx_r + + dim = problem.domain.shape.dim + output('total mass check: AMM:', mtx_m.sum()/ dim, + 'RMM:', nm.linalg.inv(mtx_imr.toarray()).sum() / dim) + + return mtx_im + + @standard_call + def __call__(self, rhs, x0=None, conf=None, eps_a=None, eps_r=None, + i_max=None, mtx=None, status=None, **kwargs): + + if self.mtx_im is None: + self.mtx_im = self.init_rmm() + + return self.mtx_im @ rhs
[subset] Update default feature tags Corresponding to HarfBuzz commits:
@@ -2614,6 +2614,9 @@ class Options(object): 'vertical': ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'], 'ltr': ['ltra', 'ltrm'], 'rtl': ['rtla', 'rtlm'], + 'rand': ['rand'], + 'justify': ['jalt'], + 'private': ['Harf', 'HARF', 'Buzz', BUZZ'], # Complex shapers 'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3', 'cswh', 'mset', 'stch'],
Added IT-CSO Renewable Capacities Added Biomass, hydro, solar, and wind for the region from the listed source: (no source update required) (hydro is not split into normal and storage, as this source doesn't have that data) Regions included in IT-SO are: "Campania, Lazio, Abruzzo"
] }, "IT-CSO": { + "bounding_box": [ + [ + 10.951496463496312, + 39.49396986000008 + ], + [ + 16.29054702002901, + 43.40156077856554 + ] + ], + "capacity": { + "biomass": 440.7, + "geothermal": 0, + "hydro": 2770.7, + "nuclear": 0, + "solar": 2960.8, + "wind": 2061.1 + }, "contributors": [ - "https://github.com/corradio" + "https://github.com/corradio", + "https://github.com/nessie2013" ], "parsers": { "consumption": "ENTSOE.fetch_consumption", "price": "ENTSOE.fetch_price", "production": "ENTSOE.fetch_production" }, - "timezone": "Europe/Rome", - "bounding_box": [ - [ - 10.951496463496312, - 39.49396986000008 - ], - [ - 16.29054702002901, - 43.40156077856554 - ] - ] + "timezone": "Europe/Rome" }, "IT-NO": { "contributors": [
/hello/: Make gradients extend further on mobile. This makes the gradients extend further on mobile so that the white text does not display on an almost-white background.
@@ -3208,6 +3208,22 @@ nav ul li.active::after { .footer { width: 100vw; } + + /* the gradients leave the bottom of the text and the button white so we + want to have the gradients stay darker for longer. + */ + .gradients .gradient.green { + background: linear-gradient(-25deg, transparent 10%, #3fb082 80%); + } + + .gradients .gradient.blue { + background: linear-gradient(25deg, transparent 10%, #5298b1 80%); + } + + .gradients .gradient.sunburst { + background: linear-gradient(5deg, transparent 20%, #e8d275 80%); + } + } @media (max-width: 375px) {
Remove todo as mistral tag filtering is fixed Fixed tag filtering in mistral so it is safe to remove the workaround to delete workflows Depends-On:
@@ -113,16 +113,9 @@ if [ "$(hiera mistral_api_enabled)" = "true" ]; then if openstack cron trigger show publish-ui-logs-hourly >/dev/null 2>&1; then openstack cron trigger delete publish-ui-logs-hourly fi - #TODO In the future we should be able to run something like - # openstack workflow list --filter=tag=tripleo-common-managed - # but right now this is broken in Mistral, so we'll fix later. - for workflow in $(openstack workflow list -c Name -c Tags | grep tripleo-common-managed); do - NAME=$(echo ${workflow} | awk '{print $2}') - TAG=$(echo ${workflow} | awk '{print $4}') - if echo $TAG | grep -q tripleo-common-managed; then - openstack workflow delete $NAME - fi - done + + openstack workflow delete $(openstack workflow list -c Name -f value --filter tags=tripleo-common-managed) || true; + for workbook in $(ls /usr/share/openstack-tripleo-common/workbooks/*); do openstack workbook create $workbook done
Fixing KeyErrors on messages without text This may occur if messages consist only of media elements. A message that consists just a gif for example
@@ -305,7 +305,7 @@ class ConnectorTelegram(Connector): if message.get("reply_to_message"): event = Reply( - text=emoji.demojize(message["text"]), + text=emoji.demojize(message.get("text", "")), user=user, user_id=user_id, event_id=message["message_id"], @@ -317,7 +317,7 @@ class ConnectorTelegram(Connector): if message.get("text"): event = Message( - text=emoji.demojize(message["text"]), + text=emoji.demojize(message.get("text", "")), user=user, user_id=user_id, target=message["chat"]["id"],