message
stringlengths
13
484
diff
stringlengths
38
4.63k
[internal] Add context manager to handle temporary unfreezing of `frozen_after_init` objects This adds a safer way to temporarily unfreeze `frozen_after_init` objects, for example in tests. The API is not exposed to static typing (just like the existing `_unfreeze_instance`), but it will re-freeze once finished.
# Licensed under the Apache License, Version 2.0 (see LICENSE). from abc import ABC, abstractmethod +from contextlib import contextmanager from dataclasses import FrozenInstanceError as FrozenInstanceError from functools import wraps -from typing import Any, Callable, Optional, Type, TypeVar, Union +from typing import Any, Callable, Iterator, Optional, Type, TypeVar, Union T = TypeVar("T") C = TypeVar("C", bound=Type) @@ -125,6 +126,15 @@ def frozen_after_init(cls: C) -> C: def unfreeze_instance(self) -> None: self._is_frozen = False + @contextmanager + def unfrozen(self) -> Iterator: + old_is_frozen = self._is_frozen + try: + self._is_frozen = False + yield + finally: + self._is_frozen = old_is_frozen + @wraps(prev_init) def new_init(self, *args: Any, **kwargs: Any) -> None: prev_init(self, *args, **kwargs) @@ -140,6 +150,7 @@ def frozen_after_init(cls: C) -> C: cls._freeze_instance = freeze_instance cls._unfreeze_instance = unfreeze_instance + cls._unfrozen = unfrozen cls.__init__ = new_init cls.__setattr__ = new_setattr # type: ignore[assignment]
Make svg the default graph type Before the previous commit, the graph type default was actually svg, despite the d3 checkbox being checked by default. In order to keep more consistent behavior, we should set the default graph type to svg.
@@ -18,7 +18,7 @@ function visualiserApp(luigi) { DISABLED: 'minus-circle', UPSTREAM_DISABLED: 'warning' }; - var VISTYPE_DEFAULT = 'd3'; + var VISTYPE_DEFAULT = 'svg'; /* * Updates view of the Visualization type.
Clarify that bytes or str can be sent/received by websockets This clarifies some confusion in the documentation.
@@ -89,3 +89,13 @@ example, await test_websocket.send(data) except WebsocketResponse as error: assert error.response.status_code == 401 + +Sending and receiving Bytes or String +------------------------------------- + +The WebSocket protocol llows for either bytes or trings to be sent +with a frame marker indicating which. The +:meth:`~quart.wrappers.request.Websocket.receive` method will return +either ``bytes`` or ``str`` depending on what the client sent i.e. if +the client sent a string it will be returned from the method. Equally +you can send bytes or strings.
Handle submodels in a more intuitive way in the GUI This allows the user to chose a submodel type first, and then the specific submodel to add/instantiate. Also, the description is more generous.
@@ -1523,16 +1523,34 @@ class AddSubmodel(Operator): bl_label = "Add submodel" bl_options = {'REGISTER', 'UNDO'} - def submodellist(self, context): - """Returns a list of submodels in the blender scene for use as enum""" + def submodelnames(self, context): + """Returns a list of submodels of the chosen type for use as enum""" submodellist = [a.name for a in bpy.data.groups + if 'submodeltype' in a + and a['submodeltype'] == self.submodeltype] + return [(a, + a.split(':')[1], + a.split(':')[1].split('/')[0] + + ' version ' + a.split(':')[1].split('/')[1] + ' submodel') + for a in submodellist] + + def submodeltypes(self, context): + """Returns a list of submodel types in the scene for use as enum""" + submodellist = [a['submodeltype'] for a in bpy.data.groups if 'submodeltype' in a] - return [(a, a, a + ' submodel') for a in submodellist] + submodellist = set(submodellist) + return [(a, a, a + ' submodel type') for a in submodellist] + + submodeltype = EnumProperty( + name="Submodel type", + description="Type of the submodel", + items=submodeltypes + ) submodelname = EnumProperty( name="Submodel name", description="Name of the submodel", - items=submodellist + items=submodelnames ) instancename = StringProperty( @@ -1541,24 +1559,30 @@ class AddSubmodel(Operator): ) def check(self, context): - """Make sure invoke is updated when the enum is switched""" return True + def draw(self, context): + layout = self.layout + layout.prop(self, 'submodeltype') + layout.prop(self, 'submodelname') + layout.prop(self, 'instancename') + def invoke(self, context, event): """ Start off the instance numbering based on Blender objects and show a property dialog """ - i = 1 - while self.submodelname + '_{0:03d}'.format(i) in bpy.data.objects: - i += 1 - self.instancename = self.submodelname + '_{0:03d}'.format(i) + self.instancename = self.submodelname.split(':')[1].split('/')[0] wm = context.window_manager return wm.invoke_props_dialog(self) def execute(self, context): """create an instance of the submodel""" - eUtils.instantiateSubmodel(self.submodelname, self.instancename) + i = 1 + while self.instancename + '_{0:03d}'.format(i) in bpy.data.objects: + i += 1 + objectname = self.instancename + '_{0:03d}'.format(i) + eUtils.instantiateSubmodel(self.submodelname, objectname) return {'FINISHED'} @@ -1705,7 +1729,7 @@ class ToggleInterfaces(Operator): class ConnectInterfacesOperator(Operator): - """Connects assemblies at interfaces""" + """Connects submodels at interfaces""" bl_idname = "phobos.connect_interfaces" bl_label = "Connect Interfaces" bl_options = {'REGISTER', 'UNDO'}
ebuild.repo_objs: RepoConfig: add profile_arches attr Relating to the set of all arches with profiles defined in the repo.
@@ -462,6 +462,11 @@ class RepoConfig(syncable.tree): raise return frozenset() + @klass.jit_attr + def profile_arches(self): + """All arches with profiles defined in the repo.""" + return frozenset(self.profiles.arch_profiles.iterkeys()) + @klass.jit_attr def stable_arches(self): """All arches with stable profiles defined in the repo."""
Update show_lag.py Fixed typo in parser description
@@ -392,7 +392,7 @@ class ShowPortChannelDatabaseSchema(MetaParser): # parser for show port-channel database # ===================================== class ShowPortChannelDatabase(ShowPortChannelDatabaseSchema): - """parser show post-channel database""" + """parser show port-channel database""" cli_command = 'show port-channel database' def cli(self, output=None):
Check for ResourceWarnings while testing SSL env. Ref:
@@ -406,6 +406,7 @@ def test_tls_client_auth( ), ) def test_ssl_env( + recwarn, mocker, tls_http_server, adapter_type, ca, tls_verify_mode, tls_certificate, @@ -487,6 +488,23 @@ def test_ssl_env( }: assert key in env + # builtin ssl environment generation may use a loopback socket + # ensure no ResourceWarning was raised during the test + # NOTE: python 2.7 does not emit ResourceWarning for ssl sockets + for warn in recwarn: + if not issubclass(warn.category, ResourceWarning): + continue + + # the tests can sporadically generate resource warnings due to timing issues + # all of these sporadic warnings appear to be about socket.socket + # and have been observed to come from requests connection pool + msg = str(warn.message) + if 'socket.socket' in msg: + pytest.xfail( + 'Sometimes this test fails due to a socket.socket ResourceWarning:\n' + msg, + ) + assert False, msg + @pytest.mark.parametrize( 'ip_addr',
Fix downloading of Mbed 2 when zip is too big The Mbed 2 zip was being downloaded into memory. This downloads the zip in 1 MB chunks then writes them to disk to avoid running out of memory.
@@ -389,10 +389,13 @@ class Bld(object): try: if not os.path.exists(rev_file): action("Downloading library build \"%s\" (might take a while)" % rev) - outfd = open(rev_file, 'wb') inurl = urlopen(url) - outfd.write(inurl.read()) - outfd.close() + with open(rev_file, 'wb') as outfd: + data = None + while data != '': + # Download and write the data in 1 MB chunks + data = inurl.read(1024 * 1024) + outfd.write(data) except: if os.path.isfile(rev_file): os.remove(rev_file)
emoji: Add padding around the gif on GIF emoji upload. Replaced ImageOps.fit by ImageOps.pad, in zerver/lib/upload.py, which returns a sized and padded version of the image, expanded to fill the requested aspect ratio and size. Fixes part of
@@ -159,7 +159,7 @@ def resize_gif(im: GifImageFile, size: int=DEFAULT_EMOJI_SIZE) -> bytes: im.seek(frame_num) new_frame = Image.new("RGBA", im.size) new_frame.paste(im, (0, 0), im.convert("RGBA")) - new_frame = ImageOps.fit(new_frame, (size, size), Image.ANTIALIAS) + new_frame = ImageOps.pad(new_frame, (size, size), Image.ANTIALIAS) frames.append(new_frame) duration_info.append(im.info['duration']) out = io.BytesIO()
Update README.md * Update README.md added username and password fields to the annotations instance_config section, since we advise creating those in our instructions, and clients will run into authorization errors if those are not included. * Update rabbitmq/README.md
@@ -106,7 +106,7 @@ For containerized environments, see the [Autodiscovery Integration Templates][9] | -------------------- | -------------------------------------------- | | `<INTEGRATION_NAME>` | `rabbitmq` | | `<INIT_CONFIG>` | blank or `{}` | -| `<INSTANCE_CONFIG>` | `{"rabbitmq_api_url":"%%host%%:15672/api/"}` | +| `<INSTANCE_CONFIG>` | `{"rabbitmq_api_url":"%%host%%:15672/api/","username": <USERNAME>, "password": <PASSWORD>}` | ##### Log collection
GraphBookmarksUI : Use editor.scriptNode() more consistently Also seems like a positive change, though for naughty reasons
@@ -130,7 +130,7 @@ def appendNodeSetMenuDefinitions( editor, menuDefinition ) : n = editor.getNodeSet() - script = editor.ancestor( GafferUI.ScriptWindow ).scriptNode() + script = editor.scriptNode() menuDefinition.append( "/NumericBookmarkDivider", { "divider" : True, "label" : "Follow Numeric Bookmark" } )
docs: cli: edit: Add edit command example Fixes:
@@ -82,6 +82,64 @@ Output } ] +Edit +---- + +The edit command drops you into the Python debugger to edit a +:py:class:`Record <dffml.record.Record>` in any source. + +.. note:: + + Be sure to check the :doc:`/plugins/dffml_source` plugin page to see if the + source your trying to edit is read only be default, and requires you to add + another flag such as ``readwrite`` to enable editing. + +.. code-block:: console + + $ cat > image.csv << EOF + > key,image + > four,image1.mnistpng + > five,image2.mnistpng + > three,image3.mnistpng + > two,image4.mnistpng + > EOF + $ dffml edit -sources f=csv -source-filename image.csv -source-readwrite -keys three + > /home/user/Documents/python/dffml/dffml/cli/cli.py(45)run() + -> await sctx.update(record) + (Pdb) record.data.features["image"] += "FEEDFACE" + (Pdb) c + $ dffml list records -sources f=csv -source-filename image.csv -source-readwrite + [ + { + "extra": {}, + "features": { + "image": "image1.mnistpng" + }, + "key": "four" + }, + { + "extra": {}, + "features": { + "image": "image2.mnistpng" + }, + "key": "five" + }, + { + "extra": {}, + "features": { + "image": "image3.mnistpngFEEDFACE" + }, + "key": "three" + }, + { + "extra": {}, + "features": { + "image": "image4.mnistpng" + }, + "key": "two" + } + ] + DataFlow --------
api_docs: Add "StreamPostPolicy" component. To facilitate re-use of the same parameters in other paths, this commit store the content of the parameter "stream_post_policy" in components.
@@ -1917,21 +1917,7 @@ paths: type: boolean default: None example: false - - name: stream_post_policy - in: query - description: | - Policy for which users can post messages to the stream. - - * 1 => Any user can post. - * 2 => Only administrators can post. - * 3 => Only new members can post. - - **Changes**: New in Zulip 2.2 (feature level 1), replacing - the previous is_announcement_only boolean. - schema: - type: integer - default: 1 - example: 1 + - $ref: '#/components/parameters/StreamPostPolicy' - name: announce in: query description: | @@ -3159,21 +3145,7 @@ paths: type: boolean example: true required: false - - name: stream_post_policy - in: query - description: | - Policy for which users can post messages to the stream. - - * 1 => Any user can post. - * 2 => Only administrators can post. - * 3 => Only new members can post. - - **Changes**: New in Zulip 2.2 (feature level 1), replacing - the previous `is_announcement_only` boolean. - schema: - type: integer - example: 2 - required: false + - $ref: '#/components/parameters/StreamPostPolicy' - name: history_public_to_subscribers in: query description: | @@ -3871,3 +3843,20 @@ components: type: integer example: 11 required: true + StreamPostPolicy: + name: stream_post_policy + in: query + description: | + Policy for which users can post messages to the stream. + + * 1 => Any user can post. + * 2 => Only administrators can post. + * 3 => Only new members can post. + + **Changes**: New in Zulip 2.2, replacing the previous is_announcement_only + boolean. + schema: + type: integer + default: 1 + example: 2 + required: false
[cleanup] remove deprecated version.get_module_version() function This function gives no meaningfull result because our modules does no have any __version__ information.
@@ -25,7 +25,6 @@ from pywikibot import config from pywikibot.backports import cache from pywikibot.comms.http import fetch from pywikibot.exceptions import VersionParseError -from pywikibot.tools import deprecated _logger = 'version' @@ -342,21 +341,6 @@ def getversion_onlinerepo(path='branches/master'): raise VersionParseError('{!r} while parsing {!r}'.format(e, buf)) -@deprecated('pywikibot.__version__', since='20201003') -def get_module_version(module) -> Optional[str]: # pragma: no cover - """ - Retrieve __version__ variable from an imported module. - - :param module: The module instance. - :type module: module - :return: The version hash without the surrounding text. If not present - return None. - """ - if hasattr(module, '__version__'): - return module.__version__ - return None - - def get_module_filename(module) -> Optional[str]: """ Retrieve filename from an imported pywikibot module.
Fix /etc/hosts not being modified when hostname is changed Fixes
@@ -2027,19 +2027,12 @@ def build_network_settings(**settings): # Write settings _write_file_network(network, _DEB_NETWORKING_FILE, True) - # Write hostname to /etc/hostname + # Get hostname and domain from opts sline = opts['hostname'].split('.', 1) opts['hostname'] = sline[0] - hostname = '{0}\n' . format(opts['hostname']) current_domainname = current_network_settings['domainname'] current_searchdomain = current_network_settings['searchdomain'] - # Only write the hostname if it has changed - if not opts['hostname'] == current_network_settings['hostname']: - if not ('test' in settings and settings['test']): - # TODO replace wiht a call to network.mod_hostname instead - _write_file_network(hostname, _DEB_HOSTNAME_FILE) - new_domain = False if len(sline) > 1: new_domainname = sline[1]
max_depth include bit depth check in BaseCodec similar to BaseDecoder, rename max_color add max depth to h264_nvenc
@@ -37,6 +37,10 @@ class BaseCodec(object): codec_name = None ffmpeg_codec_name = None ffprobe_codec_name = None + max_depth = 9999 + + def supportsBitDepth(self, depth): + return depth <= self.max_depth def parse_options(self, opt): if 'codec' not in opt or opt['codec'] != self.codec_name: @@ -80,10 +84,10 @@ class BaseDecoder(object): Base decoder class. """ decoder_name = None - max_color = 9999 + max_depth = 9999 def supportsBitDepth(self, depth): - return depth <= self.max_color + return depth <= self.max_depth class AudioCodec(BaseCodec): @@ -1086,6 +1090,7 @@ class NVEncH264Codec(H264Codec): codec_name = 'h264_nvenc' ffmpeg_codec_name = 'h264_nvenc' scale_filter = 'scale_npp' + max_depth = 8 encoder_options = H264Codec.encoder_options.copy() encoder_options.update({ 'decode_device': str, @@ -1247,7 +1252,7 @@ class H264V4l2m2mCodec(H264Codec): class H264V4l2m2mDecoder(BaseDecoder): decoder_name = "h264_v4l2m2m" - max_color = 8 + max_depth = 8 class H265Codec(VideoCodec): @@ -1529,7 +1534,7 @@ class H265V4l2m2mCodec(H265Codec): class H265V4l2m2mDecoder(BaseDecoder): decoder_name = "hevc_v4l2m2m" - max_color = 10 + max_depth = 10 class NVEncH265Codec(H265Codec): @@ -1653,12 +1658,12 @@ class NVEncH265CodecPatched(NVEncH265Codec): class H264CuvidDecoder(BaseDecoder): decoder_name = "h264_cuvid" - max_color = 8 + max_depth = 8 class H265CuvidDecoder(BaseDecoder): decoder_name = "hevc_cuvid" - max_color = 10 + max_depth = 10 class VideotoolboxEncH265(H265Codec):
Run full validation only for specific frameworks If shared code is edited, we validate on test instead, to reduce workload.
@@ -10,6 +10,8 @@ jobs: runs-on: ubuntu-latest outputs: frameworks: ${{ steps.find-required-tests.outputs.frameworks }} + tasks: ${{ steps.find-required-tests.outputs.tasks }} + benchmark: ${{ steps.find-required-tests.outputs.benchmark }} skip_baseline: ${{ steps.find-required-tests.outputs.skip_baseline }} skip_evaluation: ${{ steps.find-required-tests.outputs.skip_evaluation }} steps: @@ -48,7 +50,11 @@ jobs: if [[ $is_common -eq 0 ]]; then FRAMEWORKS='["autogluon", "autosklearn", "gama", "h2oautoml", "mlplanweka", "tpot", "constantpredictor", "randomforest", "tunedrandomforest"]' + TASKS='["iris", "kc2", "cholesterol"]' + BENCHMARK='["test"]' else + TASKS='["APSFailure", "bioresponse", "dresses-sales", "eucalyptus", "internet-advertisements", "kc1", "micro-mass"]' + BENCHMARK='["validation"]' changed_frameworks=$(git diff --name-only HEAD..$GITHUB_BASE_REF | grep -o -i -P 'frameworks/(?!shared).*/' | uniq | sed -e 's/frameworks//' -e 's/\///g') if [ ! -z $changed_frameworks ]; then @@ -65,6 +71,8 @@ jobs: echo Building matrix for frameworks: $FRAMEWORKS echo "::set-output name=frameworks::$FRAMEWORKS" + echo "::set-output name=tasks::TASKS" + echo "::set-output name=benchmark::$BENCHMARK" echo "::set-output name=skip_baseline::$is_common" echo "::set-output name=skip_evaluations::$skip_evaluation" @@ -104,7 +112,8 @@ jobs: matrix: python-version: [3.8] framework: ${{ fromJson(needs.detect_changes.outputs.frameworks) }} - task: [APSFailure, bioresponse, dresses-sales, eucalyptus, internet-advertisements, kc1, micro-mass] + task: ${{ fromJson(needs.detect_changes.outputs.tasks) }} + benchmark: ${{ fromJson(needs.detect_changes.outputs.benchmark) }} fail-fast: true # not sure about this one, but considering the big workload it might be nicer steps: - uses: actions/checkout@v2 @@ -119,7 +128,6 @@ jobs: with: python-version: ${{ matrix.python-version }} - name: Run ${{ matrix.framework }} on ${{ matrix.task }} - uses: ./.github/actions/runbenchmark - with: - framework: ${{ matrix.framework }} - task: ${{ matrix.task }} + run: | + python runbenchmark.py ${{ matrix.framework }} validation test -f 0 -t ${{ matrix.task }} -e + echo "Exit with status: $?"
improve feed.py this would replace PR
@@ -10,6 +10,7 @@ from lxml import etree from mongoengine import DoesNotExist from mongoengine import StringField +from core.errors import GenericYetiError from core.config.celeryctl import celery_app from core.config.config import yeti_config from core.scheduling import ScheduleEntry @@ -105,6 +106,35 @@ class Feed(ScheduleEntry): # Helper functions + def _make_request(self, headers={}, auth=None, params={}): + + """Helper function. Performs an HTTP request on ``source`` and returns request object. + + Args: + headers: Optional headers to be added to the HTTP request. + auth: Username / password tuple to be sent along with the HTTP request. + params: Optional param to be added to the HTTP request. + + Returns: + requests object. + """ + + if auth: + r = requests.get( + self.source, + headers=headers, + auth=auth, + proxies=yeti_config.proxy, + params=params) + else: + r = requests.get( + self.source, headers=headers, proxies=yeti_config.proxy) + + if r.status_code != 200: + raise GenericYetiError("{} returns code: {}".format(self.source, r.status_code)) + + return r + def update_xml(self, main_node, children, headers={}, auth=None): """Helper function. Performs an HTTP request on ``source`` and treats the response as an XML object, yielding a ``dict`` for each parsed @@ -130,16 +160,7 @@ class Feed(ScheduleEntry): """ assert self.source is not None - if auth: - r = requests.get( - self.source, - headers=headers, - auth=auth, - proxies=yeti_config.proxy) - else: - r = requests.get( - self.source, headers=headers, proxies=yeti_config.proxy) - + r = self._make_request(headers, auth) return self.parse_xml(r.content, main_node, children) def parse_xml(self, data, main_node, children): @@ -170,16 +191,7 @@ class Feed(ScheduleEntry): """ assert self.source is not None - if auth: - r = requests.get( - self.source, - headers=headers, - auth=auth, - proxies=yeti_config.proxy) - else: - r = requests.get( - self.source, headers=headers, proxies=yeti_config.proxy) - + r = self._make_request(headers, auth) feed = r.text.split('\n') for line in feed: @@ -204,16 +216,7 @@ class Feed(ScheduleEntry): """ assert self.source is not None - if auth: - r = requests.get( - self.source, - headers=headers, - auth=auth, - proxies=yeti_config.proxy) - else: - r = requests.get( - self.source, headers=headers, proxies=yeti_config.proxy) - + r = self._make_request(headers, auth) feed = r.text.split('\n') reader = csv.reader( self.utf_8_encoder(feed), delimiter=delimiter, quotechar=quotechar) @@ -234,17 +237,7 @@ class Feed(ScheduleEntry): Python ``dict`` object representing the response JSON. """ - if auth: - r = requests.get( - self.source, - headers=headers, - auth=auth, - proxies=yeti_config.proxy, params=params) - else: - r = requests.get( - self.source, headers=headers, proxies=yeti_config.proxy, - params=params) - + r = self._make_request(headers, auth, params) return r.json() def info(self):
Update mpl_plotting.py fixing a typo I noticed in LiveGrid
@@ -363,7 +363,7 @@ class LiveGrid(CallbackBase): # make sure the 'positive direction' of the axes matches what is defined in #axes_positive - xmin, xmax = self.ax.get_ylim() + xmin, xmax = self.ax.get_xlim() if ((xmin > xmax and self.x_positive == 'right') or (xmax > xmin and self.x_positive == 'left')): self.ax.set_xlim(xmax, xmin)
Update currency codes: VEF -> VES The Venezuelan Fuerte (VEF) was redenominated to 1/100,000 Venezuelan Soberano (VES) and removed from ISO4217 in August 2018.
"RUB", "RWF", "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STN", "SYP", "SZL", "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", "UAH", "UGX", "USD", "USN", "USS", - "UYI", "UYU", "UZS", "VEF", "VND", "VUV", "WST", "XAF", "XAG", "XAU", + "UYI", "UYU", "UZS", "VES", "VND", "VUV", "WST", "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XFU", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", "YER", "ZAR", "ZMW", "CNH" ]
Update datasets.py remove unnecessary if
@@ -70,12 +70,6 @@ class CIFAR100_truncated(data.Dataset): self.root, self.train, self.transform, self.target_transform, self.download ) - if self.train: - # print("train member of the class: {}".format(self.train)) - # data = cifar_dataobj.train_data - data = cifar_dataobj.data - target = np.array(cifar_dataobj.targets) - else: data = cifar_dataobj.data target = np.array(cifar_dataobj.targets)
Update PROJECTS.rst add starterTree project request accepted(https://github.com/prompt-toolkit/python-prompt-toolkit/issues/1444)
@@ -37,6 +37,7 @@ Shells: - `athenacli <https://github.com/dbcli/athenacli>`_: A CLI for AWS Athena. - `vulcano <https://github.com/dgarana/vulcano>`_: A framework for creating command-line applications that also runs in REPL mode. - `kafka-shell <https://github.com/devshawn/kafka-shell>`_: A supercharged shell for Apache Kafka. +- `starterTree <https://github.com/thomas10-10/starterTree>`_: A command launcher organised in a tree structure with fuzzy autocompletion Full screen applications:
commenting out unit tests sorry, I was not able to figure out where exactly this is called. So I just commented out the code
@@ -2,6 +2,7 @@ import unittest class TestCorrectionFactorForHeatingAndCoolingSetpoints(unittest.TestCase): + """ def test_calc_delta_theta_int_inc_cooling_raises_ValueError(self): from cea.demand.space_emission_systems import calc_delta_theta_int_inc_cooling self.assertRaises(ValueError, calc_delta_theta_int_inc_cooling, cooling_system='T1', control_system=None) @@ -21,3 +22,4 @@ class TestCorrectionFactorForHeatingAndCoolingSetpoints(unittest.TestCase): def test_calc_delta_theta_int_inc_heating_T0(self): from cea.demand.space_emission_systems import calc_delta_theta_int_inc_heating self.assertEqual(calc_delta_theta_int_inc_heating('T0', 'T1'), 0.0) + """ \ No newline at end of file
Use AVFMT_FLAG_CUSTOM_IO to stop ffmpeg closing it avformat_open_input would otherwise call avio_closep when there is an error opening a custom IO file.
@@ -244,6 +244,7 @@ cdef class Container(object): if io_open is not None: self.ptr.io_open = pyav_io_open self.ptr.io_close = pyav_io_close + self.ptr.flags |= lib.AVFMT_FLAG_CUSTOM_IO cdef lib.AVInputFormat *ifmt cdef _Dictionary c_options
Update salt-cloud azurearm to work with latest sdk allows compatibility with azure-cli
@@ -79,7 +79,6 @@ HAS_LIBS = False try: import salt.utils.msazure from salt.utils.msazure import object_to_dict - import azure.storage from azure.common.credentials import ( UserPassCredentials, ServicePrincipalCredentials, @@ -115,6 +114,7 @@ try: from azure.mgmt.storage import StorageManagementClient from azure.mgmt.web import WebSiteManagementClient from msrestazure.azure_exceptions import CloudError + from azure.multiapi.storage.v2016_05_31 import CloudStorageAccount HAS_LIBS = True except ImportError: pass @@ -1728,7 +1728,7 @@ def list_containers(call=None, kwargs=None): # pylint: disable=unused-argument if not storconn: storconn = get_conn(StorageManagementClient) - storageaccount = azure.storage.CloudStorageAccount( + storageaccount = CloudStorageAccount( config.get_cloud_config_value( 'storage_account', get_configured_provider(), __opts__, search_global=False @@ -1769,7 +1769,7 @@ def list_blobs(call=None, kwargs=None): # pylint: disable=unused-argument 'A container must be specified' ) - storageaccount = azure.storage.CloudStorageAccount( + storageaccount = CloudStorageAccount( config.get_cloud_config_value( 'storage_account', get_configured_provider(), __opts__, search_global=False @@ -1809,7 +1809,7 @@ def delete_blob(call=None, kwargs=None): # pylint: disable=unused-argument 'A blob must be specified' ) - storageaccount = azure.storage.CloudStorageAccount( + storageaccount = CloudStorageAccount( config.get_cloud_config_value( 'storage_account', get_configured_provider(), __opts__, search_global=False
fix assert_string_list docstring value=None raises TypeError DistutilsSetupError: 2 must be a list of strings (got None)
@@ -213,7 +213,7 @@ def check_importable(dist, attr, value): def assert_string_list(dist, attr, value): - """Verify that value is a string list or None""" + """Verify that value is a string list""" try: assert ''.join(value) != value except (TypeError, ValueError, AttributeError, AssertionError):
Update evaluate.py Now evaluate.py will be able to accept kitti dataset and correctly call the functions in ../preprocessing/kitti.py
@@ -28,6 +28,7 @@ if __name__ == "__main__" and __package__ is None: from .. import models from ..preprocessing.csv_generator import CSVGenerator from ..preprocessing.pascal_voc import PascalVocGenerator +from ..preprocessing.kitti import KittiGenerator from ..utils.anchors import make_shapes_callback from ..utils.config import read_config_file, parse_anchor_parameters, parse_pyramid_levels from ..utils.eval import evaluate @@ -72,6 +73,13 @@ def create_generator(args, preprocess_image): shuffle_groups=False, **common_args ) + elif args.dataset_type == 'kitti': + validation_generator = KittiGenerator( + args.kitti_path, + 'val', + shuffle_groups=False, + **common_args + ) else: raise ValueError('Invalid data type received: {}'.format(args.dataset_type)) @@ -96,6 +104,9 @@ def parse_args(args): csv_parser.add_argument('annotations', help='Path to CSV file containing annotations for evaluation.') csv_parser.add_argument('classes', help='Path to a CSV file containing class label mapping.') + kitti_parser=subparsers.add_parser('kitti') + kitti_parser.add_argument('--kitti_path', help='Path to dataset directory') + parser.add_argument('model', help='Path to RetinaNet model.') parser.add_argument('--convert-model', help='Convert the model to an inference model (ie. the input is a training model).', action='store_true') parser.add_argument('--backbone', help='The backbone of the model.', default='resnet50')
Fix broken link. I'm not sure if this is a satisfactory new link, but I submit it for consideration.
@@ -130,6 +130,6 @@ Before adding a new feature, please write a specification using the style for `Django Enhancement Proposals`_. More information about how to send a Pull Request can be found on GitHub: -http://help.github.com/send-pull-requests/ +https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request .. _Django Enhancement Proposals: https://github.com/django/deps/blob/master/final/0001-dep-process.rst
avoid entering subtypes in evaluable.replace This patch limits object traversal of evaluable.replace by ignoring subclasses of tuple, list, dict, set and frozenset, so as to prevent inadvertently entering objects that can not be reinstantiated.
@@ -229,7 +229,7 @@ def replace(func=None, depthfirst=False, recursive=False, lru=4): cache[obj] = rstack[-1] if rstack[-1] is not obj else identity continue - if isinstance(obj, (tuple, list, dict, set, frozenset)): + if obj.__class__ in (tuple, list, dict, set, frozenset): if not obj: rstack.append(obj) # shortcut to avoid recreation of empty container else:
Dep-env: use ctx.tenant_name `ctx.deployment.tenant_name` does not exist. This one does.
@@ -170,7 +170,7 @@ def create(ctx, labels=None, inputs=None, skip_plugins_validation=False, ext_client, client_config, ext_deployment_id = \ _get_external_clients(nodes, manager_ips) - local_tenant_name = ctx.deployment.tenant_name if ext_client else None + local_tenant_name = ctx.tenant_name if ext_client else None local_idds, external_idds = _create_inter_deployment_dependencies( [manager.private_ip for manager in client.manager.get_managers()], client_config, new_dependencies, ctx.deployment.id,
Fix Ubuntu 18.04 installation steps The installation steps missed python3-dev dependency and the `make install` failed with "missing Python.h" error.
@@ -94,7 +94,7 @@ Runtime: First make sure you install [Python 3.6 or greater](https://askubuntu.com/a/865569). Then use this command line to install additional requirements and compile DeepState: ```shell -sudo apt update && sudo apt-get install build-essential gcc-multilib g++-multilib cmake python3-setuptools libffi-dev z3 +sudo apt update && sudo apt-get install build-essential gcc-multilib g++-multilib cmake python3-setuptools python3-dev libffi-dev z3 git clone https://github.com/trailofbits/deepstate deepstate mkdir deepstate/build && cd deepstate/build cmake ../
MAINT: Use single backticks when link needed. [ci skip]
@@ -2452,7 +2452,7 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): The comparison of `a` and `b` uses standard broadcasting, which means that `a` and `b` need not have the same shape in order for ``allclose(a, b)`` to evaluate to True. The same is true for - ``equal`` but not ``array_equal``. + `equal` but not `array_equal`. Examples --------
updates to thread calculations use multiple return statements instead of updating a local variable clean up comments increase max thread count to 150
@@ -15,27 +15,26 @@ except NotImplementedError: def calculate_thread_pool(): """ - Returns the default value for CherryPY thread_pool - It is calculated based on the best values obtained in - several partners installations. - The value must be between 10 (default CherryPy value) and 200. - Servers with more memory can deal with more threads. - Calculations are done for servers with more than 2 Gb of RAM + Returns the default value for CherryPY thread_pool: + - calculated based on the best values obtained in several partners installations + - value must be between 10 (default CherryPy value) and 200 + - servers with more memory can deal with more threads + - calculations are done for servers with more than 2 Gb of RAM """ MIN_POOL = 50 - MAX_POOL = 100 - pool = MIN_POOL + MAX_POOL = 150 if psutil: MIN_MEM = 2 MAX_MEM = 4 total_memory = psutil.virtual_memory().total / pow(2, 30) # in Gb + # if it's in the range, scale thread count linearly with available memory if MIN_MEM < total_memory < MAX_MEM: - pool = MIN_POOL + int((MAX_POOL - MIN_POOL) * float(total_memory - MIN_MEM) / (MAX_MEM - MIN_MEM)) - elif total_memory >= MAX_MEM: - pool = MAX_POOL + return MIN_POOL + int((MAX_POOL - MIN_POOL) * float(total_memory - MIN_MEM) / (MAX_MEM - MIN_MEM)) + # otherwise return either the min or max amount + return MAX_POOL if total_memory >= MAX_MEM else MIN_POOL elif sys.platform.startswith("darwin"): # Considering MacOS has at least 4 Gb of RAM - pool = MAX_POOL - return pool + return MAX_POOL + return MIN_POOL option_spec = {
[modules/traffic] Use boolean util methods see
Parameters: * traffic.exclude: Comma-separated list of interface prefixes to exclude (defaults to "lo,virbr,docker,vboxnet,veth") * traffic.states: Comma-separated list of states to show (prefix with "^" to invert - i.e. ^down -> show all devices that are not in state down) - * traffic.showname: set as False to hide network interface name + * traffic.showname: If set to False, hide network interface name (defaults to True) """ import re @@ -24,7 +24,7 @@ class Module(bumblebee.engine.Module): self._exclude = tuple(filter(len, self.parameter("exclude", "lo,virbr,docker,vboxnet,veth").split(","))) self._status = "" - self._showname = self.parameter("showname", "True") + self._showname = bumblebee.util.asbool(self.parameter("showname", True)) self._prev = {} self._states = {} self._states["include"] = [] @@ -88,7 +88,7 @@ class Module(bumblebee.engine.Module): name = "traffic-{}".format(interface) - if self._showname != "False": + if self._showname: self.create_widget(widgets, name, interface) for direction in ["rx", "tx"]:
Fix the TLS certificate file error in docker_client The TLS certificate file of docker client should be CONF.docker.cert_file
@@ -33,7 +33,7 @@ def docker_client(): if not CONF.docker.api_insecure: client_kwargs['ca_cert'] = CONF.docker.ca_file client_kwargs['client_key'] = CONF.docker.key_file - client_kwargs['client_cert'] = CONF.docker.key_file + client_kwargs['client_cert'] = CONF.docker.cert_file try: yield DockerHTTPClient(
Update dataloader.py Update class BilmDataloader(Dataloader) and class PrefixlmDataloader(Dataloader)
@@ -204,10 +204,16 @@ class BilmDataloader(Dataloader): seg = [] for ins in instances: - src.append(ins[0]) - tgt_forward.append(ins[1]) - tgt_backward.append(ins[2]) - seg.append(ins[3]) + src_single, pad_num = ins[0] + tgt_forward_single, tgt_backward_single = ins[1], ins[2] + for _ in range(pad_num): + src_single.append(self.vocab.get(PAD_TOKEN)) + tgt_forward_single.append(self.vocab.get(PAD_TOKEN)) + tgt_backward_single.append(self.vocab.get(PAD_TOKEN)) + src.append(src_single) + tgt_forward.append(tgt_forward_single) + tgt_backward.append(tgt_backward_single) + seg.append([1] * ins[3][0] + [0] * (len(src_single) - ins[3][0])) yield torch.LongTensor(src), \ torch.LongTensor(tgt_forward), \ @@ -444,9 +450,14 @@ class PrefixlmDataloader(Dataloader): seg = [] for ins in instances: - src.append(ins[0]) - tgt.append(ins[1]) - seg.append([1] * ins[2][0] + [2] * (ins[2][1] - ins[2][0]) + [0] * (len(ins[0]) - ins[2][1])) + src_single, pad_num = ins[0] + tgt_single = ins[1] + for _ in range(pad_num): + src_single.append(self.vocab.get(PAD_TOKEN)) + tgt_single.append(self.vocab.get(PAD_TOKEN)) + src.append(src_single) + tgt.append(tgt_single) + seg.append([1] * ins[2][0] + [2] * (ins[2][1] - ins[2][0]) + [0] * (len(src_single) - ins[2][1])) yield torch.LongTensor(src), \ torch.LongTensor(tgt), \
ketos extract default text order switch Do not reorder extracted text to display order anymore. The train subcommand does this automatically now.
@@ -351,7 +351,7 @@ def train(ctx, pad, output, spec, append, load, savefreq, report, quit, epochs, help='Normalize ground truth') @click.option('-s', '--normalize-whitespace/--no-normalize-whitespace', show_default=True, default=True, help='Normalizes unicode whitespace') [email protected]('-n', '--reorder/--no-reorder', default=True, show_default=True, [email protected]('-n', '--reorder/--no-reorder', default=False, show_default=True, help='Reorder transcribed lines to display order') @click.option('-r', '--rotate/--no-rotate', default=True, show_default=True, help='Skip rotation of vertical lines')
Update training.rst Adding channels for new staff members to join
@@ -5,6 +5,7 @@ Onboarding This document is intended for new hires to summarize norms for working at Mattermost, Inc. including: - `Getting Started Checklist`_ - Getting ready to work here +- `Channels`_ - Where we discuss work-related topics - `Meetings`_ - When we get together and why - `Mindsets`_ - Shared toolsets we have for solving problems - `Terminology`_- Shared vocabulary to convey ideas quickly @@ -63,6 +64,28 @@ First Week - (People Ops) US FTE - Email regarding 401K account set up. +--------------------------------------------------------- +Channels +--------------------------------------------------------- + +Whenever possible, we share key updates and have discussions in Mattermost. Some of the channels used are: + +- [Announcements](https://community.mattermost.com/private-core/channels/announcements) - Mattermost-wide announcements +- [Ask Anything](https://community.mattermost.com/core/channels/ask-anything) - General questions about Mattermost +- [Ask R&D](https://community.mattermost.com/core/channels/ask-r-and-d) - Technical questions to the Mattermost R&D team +- [Customer Feedback](https://community.mattermost.com/private-core/channels/customer-feedback) - Discussion of customer feedback +- [Customer Success](https://community.mattermost.com/private-core/channels/customer-success) - Updates from and general discussion with the Mattermost customer success team +- [Customer Support](https://community.mattermost.com/private-core/channels/community) - Discussion of customer support ticket escalations +- [Developers](https://pre-release.mattermost.com/core/channels/developers) - General development help +- [Marketing](https://community.mattermost.com/private-core/channels/marketing) - Campaign ideas and general marketing team discussions +- [Marketing Website](https://community.mattermost.com/private-core/channels/marketing-website-priv) - Website bugs, release notes, and web discussions +- [Product Management](https://pre-release.mattermost.com/core/channels/product-management) - Discussion with and questions for Mattermost product managers +- [Roadmap](https://pre-release.mattermost.com/private-core/channels/roadmap) - Questions about and discussion of the product roadmap, or to view public roadmap in the header +- [Spec Reviews](https://pre-release.mattermost.com/core/channels/spec-reviews) - In-progress plans for new features +- [Sustained Engineering](https://pre-release.mattermost.com/core/channels/sustained-engineering) - Discussion with Mattermost's [Sustained Engineering Team (SET)](https://developers.mattermost.com/internal/sustained-engineering/) +- [UX Design](https://pre-release.mattermost.com/core/channels/ux-design) - Questions and discussion about product design +- [Welcome](https://community.mattermost.com/private-core/channels/welcome) - Where new staff members are introduced on their first day + --------------------------------------------------------- Meetings ---------------------------------------------------------
[subset] Implement basic HVAR/VVAR support Needs more work. Part of
@@ -1735,6 +1735,40 @@ def subset_glyphs(self, s): self.glyphCount = len(self.variations) return bool(self.variations) +@_add_method(ttLib.getTableClass('HVAR')) +def subset_glyphs(self, s): + table = self.table + + if table.AdvWidthMap: + table.AdvWidthMap.mapping = _dict_subset(table.AdvWidthMap.mapping, s.glyphs) + if table.LsbMap: + table.LsbMap.mapping = _dict_subset(table.LsbMap.mapping, s.glyphs) + if table.RsbMap: + table.RsbMap.mapping = _dict_subset(table.RsbMap.mapping, s.glyphs) + + # TODO Handle direct mapping + # TODO Prune VarStore + + return True + +@_add_method(ttLib.getTableClass('VVAR')) +def subset_glyphs(self, s): + table = self.table + + if table.AdvHeightMap: + table.AdvHeightMap.mapping = _dict_subset(table.AdvHeightMap.mapping, s.glyphs) + if table.TsbMap: + table.TsbMap.mapping = _dict_subset(table.TsbMap.mapping, s.glyphs) + if table.BsbMap: + table.BsbMap.mapping = _dict_subset(table.BsbMap.mapping, s.glyphs) + if table.VOrgMap: + table.VOrgMap.mapping = _dict_subset(table.VOrgMap.mapping, s.glyphs) + + # TODO Handle direct mapping + # TODO Prune VarStore + + return True + @_add_method(ttLib.getTableClass('VORG')) def subset_glyphs(self, s): self.VOriginRecords = {g:v for g,v in self.VOriginRecords.items()
Add prefetch_renditions method on Image queryset manager Update logic when creating and looking for a rendtion
@@ -59,7 +59,24 @@ class SourceImageIOError(IOError): class ImageQuerySet(SearchableQuerySetMixin, models.QuerySet): - pass + def prefetch_renditions(self, *filters): + """ + Prefetches generated renditions for the given filters. + """ + # Get a list of filter spec strings. The given value could contain Filter objects + filter_specs = [ + filter.spec if isinstance(filter, Filter) else filter for filter in filters + ] + + rendition_model = self.model.get_rendition_model() + + return self.prefetch_related( + models.Prefetch( + "renditions", + queryset=rendition_model.objects.filter(filter_spec__in=filter_specs), + to_attr="prefetched_renditions", + ) + ) def get_upload_to(instance, filename): @@ -354,6 +371,8 @@ class AbstractImage(ImageFileMixin, CollectionMember, index.Indexed, models.Mode self._prefetched_objects_cache["renditions"]._result_cache.append( rendition ) + elif hasattr(self, "prefetched_renditions"): + self.prefetched_renditions.append(rendition) try: cache = caches["renditions"] @@ -384,7 +403,12 @@ class AbstractImage(ImageFileMixin, CollectionMember, index.Indexed, models.Mode # Interrogate prefetched values first (if available) if "renditions" in getattr(self, "_prefetched_objects_cache", {}): - for rendition in self.renditions.all(): + prefetched_renditions = self.renditions.all() + else: + prefetched_renditions = getattr(self, "prefetched_renditions", None) + + if prefetched_renditions is not None: + for rendition in prefetched_renditions: if ( rendition.filter_spec == filter.spec and rendition.focal_point_key == cache_key
StructMetaclass: switch struct-cannot-have-env-spec to an assertion TN:
@@ -1425,15 +1425,9 @@ class StructMetaclass(CompiledTypeMetaclass): dct.pop(f_n, None) env_spec = dct.get('env_spec', None) - if is_astnode: + assert env_spec is None or is_astnode dct['is_env_spec_inherited'] = env_spec is None dct['env_spec'] = env_spec - else: - with diag_ctx: - check_source_language( - env_spec is None, - 'Structs cannot define lexical environment specifications' - ) dct['should_emit_array_type'] = ( dct.get('should_emit_array_type', True) and
Add comments for pipenv support pipenv no longer installs to the running virtual environment, and instead must be separately activated. The added commented lines run yapf inside of the pre-generated virtual environment generated by pipenv.
@@ -27,12 +27,23 @@ if [ ! "$PYTHON_FILES" ]; then exit 0 fi +########## PIP VERSION ############# # Verify that yapf is installed; if not, warn and exit. if [ -z $(which yapf) ]; then echo 'yapf not on path; can not format. Please install yapf:' echo ' pip install yapf' exit 2 fi +######### END PIP VERSION ########## + +########## PIPENV VERSION ########## +# if [ -z $(pipenv run which yapf) ]; then +# echo 'yapf not on path; can not format. Please install yapf:' +# echo ' pipenv install yapf' +# exit 2 +# fi +###### END PIPENV VERSION ########## + # Check for unstaged changes to files in the index. CHANGED_FILES=(`git diff --name-only ${PYTHON_FILES[@]}`) @@ -47,10 +58,20 @@ if [ "$CHANGED_FILES" ]; then done exit 1 fi + # Format all staged files, then exit with an error code if any have uncommitted # changes. echo 'Formatting staged Python files . . .' + +########## PIP VERSION ############# yapf -i -r ${PYTHON_FILES[@]} +######### END PIP VERSION ########## + +########## PIPENV VERSION ########## +# pipenv run yapf -i -r ${PYTHON_FILES[@]} +###### END PIPENV VERSION ########## + + CHANGED_FILES=(`git diff --name-only ${PYTHON_FILES[@]}`) if [ "$CHANGED_FILES" ]; then echo 'Reformatted staged files. Please review and stage the changes.'
CompileCtx: turn annotate_fields_types into a parameter TN:
@@ -515,11 +515,6 @@ class CompileCtx(object): :type: langkit.compiled_types.Struct """ - self.annotate_fields_types = False - """ - Whether to run the 2to3 field annotation pass. - """ - self.template_lookup_extra_dirs = template_lookup_extra_dirs or [] self.additional_source_files = [] @@ -808,17 +803,17 @@ class CompileCtx(object): if path.isfile(filepath) and not filename.startswith("."): self.additional_source_files.append(filepath) - self.annotate_fields_types = annotate_fields_types - self.compile(compile_only=compile_only) + self.compile(compile_only=compile_only, + annotate_fields_types=annotate_fields_types) if compile_only: return with global_context(self): self._emit(file_root, generate_lexer, main_source_dirs, main_programs) - def compile(self, compile_only=False): + def compile(self, compile_only=False, annotate_fields_types=False): with global_context(self): - self._compile(compile_only) + self._compile(compile_only, annotate_fields_types) def write_ada_module(self, out_dir, template_base_name, qual_name, has_body=True): @@ -888,7 +883,7 @@ class CompileCtx(object): return self._struct_types - def _compile(self, compile_only=False): + def _compile(self, compile_only=False, annotate_fields_types=False): """ Compile the language specification: perform legality checks and type inference. @@ -955,7 +950,7 @@ class CompileCtx(object): with names.camel_with_underscores: pass_manager.run(self) - if self.annotate_fields_types: + if annotate_fields_types: # Only import lib2to3 if the users needs it import lib2to3.main
Fixed bug so that edge case of reflection with a single pixel in size predicted slightly outside image doesn't crash things.
@@ -210,8 +210,11 @@ namespace dials { namespace algorithms { int yb = y0 >= 0 ? 0 : std::abs(y0); int xe = x1 <= xi ? xs : xs-(x1-(int)xi); int ye = y1 <= yi ? ys : ys-(y1-(int)yi); - DIALS_ASSERT(ye > yb && yb >= 0 && ye <= ys); - DIALS_ASSERT(xe > xb && xb >= 0 && xe <= xs); + if (yb >= ye || xb >= xe) { + continue; + } + DIALS_ASSERT(yb >= 0 && ye <= ys); + DIALS_ASSERT(xb >= 0 && xe <= xs); DIALS_ASSERT(yb + y0 >= 0 && ye + y0 <= yi); DIALS_ASSERT(xb + x0 >= 0 && xe + x0 <= xi); DIALS_ASSERT(sbox.is_consistent());
Updated setup.py Changes include: Removed support for versions of setuptools prior to 18.0 (dating to early 2015) - This removed some extra logic related to conditional dependencies and simplified the imports Added a python_requires statement to require Python 3.4 or newer - I believe this requires setuptools >= 34.4
""" Setuptools setup file, used to install or test 'cmd2' """ -import sys - -import setuptools from setuptools import setup VERSION = '0.9.0' @@ -72,18 +69,7 @@ EXTRAS_REQUIRE = { ":python_version<'3.5'": ['contextlib2', 'typing'], } -if int(setuptools.__version__.split('.')[0]) < 18: - EXTRAS_REQUIRE = {} - if sys.platform.startswith('win'): - INSTALL_REQUIRES.append('pyreadline') - else: - INSTALL_REQUIRES.append('wcwidth') - if sys.version_info < (3, 5): - INSTALL_REQUIRES.append('contextlib2') - INSTALL_REQUIRES.append('typing') - TESTS_REQUIRE = ['pytest', 'pytest-xdist'] -DOCS_REQUIRE = ['sphinx', 'sphinx_rtd_theme', 'pyperclip', 'wcwidth'] setup( name="cmd2", @@ -98,6 +84,7 @@ setup( platforms=['any'], packages=['cmd2'], keywords='command prompt console cmd', + python_requires='>=3.4', install_requires=INSTALL_REQUIRES, extras_require=EXTRAS_REQUIRE, tests_require=TESTS_REQUIRE,
Fix Request Reference Points flask.Request to appropriate place in the documentation.
@@ -538,16 +538,16 @@ The Request Object `````````````````` The request object is documented in the API section and we will not cover -it here in detail (see :class:`~flask.request`). Here is a broad overview of +it here in detail (see :class:`~flask.Request`). Here is a broad overview of some of the most common operations. First of all you have to import it from the ``flask`` module:: from flask import request The current request method is available by using the -:attr:`~flask.request.method` attribute. To access form data (data +:attr:`~flask.Request.method` attribute. To access form data (data transmitted in a ``POST`` or ``PUT`` request) you can use the -:attr:`~flask.request.form` attribute. Here is a full example of the two +:attr:`~flask.Request.form` attribute. Here is a full example of the two attributes mentioned above:: @app.route('/login', methods=['POST', 'GET']) @@ -570,7 +570,7 @@ error page is shown instead. So for many situations you don't have to deal with that problem. To access parameters submitted in the URL (``?key=value``) you can use the -:attr:`~flask.request.args` attribute:: +:attr:`~flask.Request.args` attribute:: searchword = request.args.get('key', '') @@ -579,7 +579,7 @@ We recommend accessing URL parameters with `get` or by catching the bad request page in that case is not user friendly. For a full list of methods and attributes of the request object, head over -to the :class:`~flask.request` documentation. +to the :class:`~flask.Request` documentation. File Uploads
tests: Verify info logs logging in test_fix_unreads. This commit verifies info logging in test_fix_unreads using assertLogs so that the logging do not spam ./tools/test-backend output.
@@ -373,9 +373,21 @@ class FixUnreadTests(ZulipTestCase): assert_unread(um_unsubscribed_id) # fix unsubscribed - with connection.cursor() as cursor: + with connection.cursor() as cursor, \ + self.assertLogs('zulip.fix_unreads', 'INFO') as info_logs: fix_unsubscribed(cursor, user) + self.assertEqual(info_logs.output[0], 'INFO:zulip.fix_unreads:get recipients') + self.assertTrue('INFO:zulip.fix_unreads:[' in info_logs.output[1]) + self.assertTrue('INFO:zulip.fix_unreads:elapsed time:' in info_logs.output[2]) + self.assertEqual(info_logs.output[3], + 'INFO:zulip.fix_unreads:finding unread messages for non-active streams') + self.assertEqual(info_logs.output[4], 'INFO:zulip.fix_unreads:rows found: 1') + self.assertTrue('INFO:zulip.fix_unreads:elapsed time:' in info_logs.output[5]) + self.assertEqual(info_logs.output[6], + 'INFO:zulip.fix_unreads:fixing unread messages for non-active streams') + self.assertTrue('INFO:zulip.fix_unreads:elapsed time:' in info_logs.output[7]) + # Muted messages don't change. assert_unread(um_muted_topic_id) assert_unread(um_muted_stream_id) @@ -384,9 +396,19 @@ class FixUnreadTests(ZulipTestCase): # The unsubscribed entry should change. assert_read(um_unsubscribed_id) + with self.assertLogs('zulip.fix_unreads', 'INFO') as info_logs: # test idempotency fix(user) + self.assertEqual(info_logs.output[0], f'INFO:zulip.fix_unreads:\n---\nFixing {user.id}:') + self.assertEqual(info_logs.output[1], 'INFO:zulip.fix_unreads:get recipients') + self.assertTrue('INFO:zulip.fix_unreads:[' in info_logs.output[2]) + self.assertTrue('INFO:zulip.fix_unreads:elapsed time:' in info_logs.output[3]) + self.assertEqual(info_logs.output[4], + 'INFO:zulip.fix_unreads:finding unread messages for non-active streams') + self.assertEqual(info_logs.output[5], 'INFO:zulip.fix_unreads:rows found: 0') + self.assertTrue('INFO:zulip.fix_unreads:elapsed time:' in info_logs.output[6]) + assert_unread(um_normal_id) assert_unread(um_muted_topic_id) assert_unread(um_muted_stream_id)
Fix CDNA transformation bug and speed up its implementation. Fix CDNA transformation bug where transformed channels of color and masks were combined incorrectly. Remove for loop over batch size in implementation of CDNA transformation. This speeds up the building of the graph.
@@ -261,6 +261,8 @@ def cdna_transformation(prev_image, cdna_input, num_masks, color_channels): List of images transformed by the predicted CDNA kernels. """ batch_size = int(cdna_input.get_shape()[0]) + height = int(prev_image.get_shape()[1]) + width = int(prev_image.get_shape()[2]) # Predict kernels using linear function of last hidden layer. cdna_kerns = slim.layers.fully_connected( @@ -276,20 +278,22 @@ def cdna_transformation(prev_image, cdna_input, num_masks, color_channels): norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True) cdna_kerns /= norm_factor - cdna_kerns = tf.tile(cdna_kerns, [1, 1, 1, color_channels, 1]) - cdna_kerns = tf.split(axis=0, num_or_size_splits=batch_size, value=cdna_kerns) - prev_images = tf.split(axis=0, num_or_size_splits=batch_size, value=prev_image) + # Treat the color channel dimension as the batch dimension since the same + # transformation is applied to each color channel. + # Treat the batch dimension as the channel dimension so that + # depthwise_conv2d can apply a different transformation to each sample. + cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) + cdna_kerns = tf.reshape(cdna_kerns, [DNA_KERN_SIZE, DNA_KERN_SIZE, batch_size, num_masks]) + # Swap the batch and channel dimensions. + prev_image = tf.transpose(prev_image, [3, 1, 2, 0]) # Transform image. - transformed = [] - for kernel, preimg in zip(cdna_kerns, prev_images): - kernel = tf.squeeze(kernel) - if len(kernel.get_shape()) == 3: - kernel = tf.expand_dims(kernel, -1) - transformed.append( - tf.nn.depthwise_conv2d(preimg, kernel, [1, 1, 1, 1], 'SAME')) - transformed = tf.concat(axis=0, values=transformed) - transformed = tf.split(axis=3, num_or_size_splits=num_masks, value=transformed) + transformed = tf.nn.depthwise_conv2d(prev_image, cdna_kerns, [1, 1, 1, 1], 'SAME') + + # Transpose the dimensions to where they belong. + transformed = tf.reshape(transformed, [color_channels, height, width, batch_size, num_masks]) + transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) + transformed = tf.unstack(transformed, axis=-1) return transformed
panels: Adjust opacity value for the exit widget. This makes this icon less invisible. Once the user hovers over the "x" it will become brighter to notify the user.
@@ -771,6 +771,11 @@ on a dark background, and don't change the dark labels dark either. */ .close { color: inherit; + opacity: 0.8; + } + + .close:hover { + opacity: 1; } }
[bugfix] Read correct object in normalizeData Obviously, we need to read the JSON because its structure is enforced by _extract_JSON.
@@ -3921,7 +3921,7 @@ class SiteLinkCollection(MutableMapping): raise ValueError( "Couldn't determine the site and title of the value: " '{!r}'.format(json)) - db_name = obj['site'] + db_name = json['site'] norm_data[db_name] = json return norm_data
Fix parse rotamers Parsing final rotamers file instead of conformers file
@@ -140,7 +140,7 @@ class FakeGaussOutput(MSONable): class CRESTOutput(MSONable): - def __init__(self, path, output_filename): + def __init__(self, output_filename, path='.'): """ Currently assumes runtype is iMTD-GC [default] Args: @@ -201,13 +201,14 @@ class CRESTOutput(MSONable): if self.properly_terminated: conformer_pattern = re.compile( - r"\s+\d+\s+(?P<Erel>\d*\.\d*)\s+(?P<Etot>-*\d+\.\d+)\s+(?P<weight>-*\d+\.\d+)\s+(?P<conformer>-*\d+\.\d+)\s+(?P<set>\d+)\s+(?P<degen>\d+)") + r"\s+\d+\s+(?P<Erel>\d*\.\d*)\s+(?P<Etot>-*\d+\.\d+)\s+(?P<weight>-*\d+\.\d+)\s+" + r"(?P<conformer>-*\d+\.\d+)\s+(?P<set>\d+)\s+(?P<degen>\d+)\s+(?P<origin>\w+)\n") rotamer_pattern = re.compile( - r"\s+\d+\s+(?P<Erel>\d*\.\d*)\s+(?P<Etot>-*\d+\.\d+)\s+(?P<weight>-*\d+\.\d+)\s+\w+\n") + r"\s+\d+\s+(?P<Erel>\d*\.\d*)\s+(?P<Etot>-*\d+\.\d+)\s+(?P<weight>-*\d+\.\d+)\s+" + r"(?P<origin>\w+)\n") conformer_degeneracies = [] energies = [] with open(output_filepath, 'r') as xtbout_file: - # noinspection PyTypeChecker for line in xtbout_file: conformer_match = conformer_pattern.match(line) rotamer_match = rotamer_pattern.match(line) @@ -216,9 +217,15 @@ class CRESTOutput(MSONable): energies.append(conformer_match['Etot']) elif rotamer_match: energies.append(rotamer_match['Etot']) + n_rot_files = [] + for f in os.listdir(self.path): + if 'crest_rotamers' in f: + n_rot_file = int(os.path.splitext(f)[0].split('_')[2]) + n_rot_files.append(n_rot_file) + final_rotamer_filename = 'crest_rotamers_{}.xyz'.format(max(n_rot_files)) crestbest_path = os.path.join(self.path, 'crest_best.xyz') - rotamers_path = os.path.join(self.path, 'crest_conformers.xyz') + rotamers_path = os.path.join(self.path, final_rotamer_filename) try: self.lowest_energy_structure = Molecule.from_file(crestbest_path) @@ -229,15 +236,13 @@ class CRESTOutput(MSONable): start = 0 for n, d in enumerate(conformer_degeneracies): self.sorted_structures_energies.append([]) - # noinspection PyArgumentList i = 0 - # noinspection PyArgumentList for i in range(start, start + d): self.sorted_structures_energies[n].append([rotamer_structures[i], energies[i]]) start = i except FileNotFoundError: - print('{} not found'.format('crest_conformers.xyz')) + print('{} not found'.format(rotamers_path)) else: crestbest_path = os.path.join(self.path, 'crest_best.xyz')
llvm, mechanisms/optimizationcontrolmechanism: Fix indices in input initialization. The first index of GEP is added to the base pointer.
@@ -1014,7 +1014,7 @@ class OptimizationControlMechanism(ControlMechanism): for i in range(num_features): src = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(i + 1)]) # destination is a struct of 2d arrays - dst = builder.gep(comp_input, [ctx.int32_ty(i), ctx.int32_ty(0), ctx.int32_ty(0)]) + dst = builder.gep(comp_input, [ctx.int32_ty(0), ctx.int32_ty(i), ctx.int32_ty(0)]) builder.store(builder.load(src), dst)
wallet_db upgrades: (trivial) make upgrades more standalone and robust to code changes
@@ -33,7 +33,7 @@ import binascii from . import util, bitcoin from .util import profiler, WalletFileException, multisig_type, TxMinedInfo, bfh -from .invoices import PR_TYPE_ONCHAIN, Invoice +from .invoices import Invoice from .keystore import bip44_derivation from .transaction import Transaction, TxOutpoint, tx_from_any, PartialTransaction, PartialTxOutput from .logging import Logger @@ -557,6 +557,7 @@ class WalletDB(JsonDB): if not self._is_upgrade_method_needed(24, 24): return # add 'type' field to onchain requests + PR_TYPE_ONCHAIN = 0 requests = self.data.get('payment_requests', {}) for k, r in list(requests.items()): if r.get('address') == k: @@ -624,6 +625,7 @@ class WalletDB(JsonDB): def _convert_version_29(self): if not self._is_upgrade_method_needed(28, 28): return + PR_TYPE_ONCHAIN = 0 requests = self.data.get('payment_requests', {}) invoices = self.data.get('invoices', {}) for d in [invoices, requests]: @@ -659,8 +661,8 @@ class WalletDB(JsonDB): def _convert_version_30(self): if not self._is_upgrade_method_needed(29, 29): return - - from .invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN + PR_TYPE_ONCHAIN = 0 + PR_TYPE_LN = 2 requests = self.data.get('payment_requests', {}) invoices = self.data.get('invoices', {}) for d in [invoices, requests]: @@ -682,8 +684,7 @@ class WalletDB(JsonDB): def _convert_version_31(self): if not self._is_upgrade_method_needed(30, 30): return - - from .invoices import PR_TYPE_ONCHAIN + PR_TYPE_ONCHAIN = 0 requests = self.data.get('payment_requests', {}) invoices = self.data.get('invoices', {}) for d in [invoices, requests]:
Fix arrow parsing elements No linked issue, just a broken test
@@ -3741,7 +3741,7 @@ def parse_direction_arrow_to_integer(lhs, ctx): "v": 3, }.get(lhs, -1) else: - return vectorise(parse_direction_arrow_to_integer, lhs, ctx=ctx)() + return vectorise(parse_direction_arrow_to_integer, list(lhs), ctx=ctx)() def parse_direction_arrow_to_vector(lhs, ctx): @@ -3751,13 +3751,13 @@ def parse_direction_arrow_to_vector(lhs, ctx): ts = vy_type(lhs) if ts is str and len(lhs) == 1: return { - ">": [+1, 0], - "^": [0, +1], - "<": [-1, 0], - "v": [0, -1], - }.get(lhs, [0, 0]) + ">": [sympy.nsimplify(+1), sympy.nsimplify(0)], + "^": [sympy.nsimplify(0), sympy.nsimplify(+1)], + "<": [sympy.nsimplify(-1), sympy.nsimplify(0)], + "v": [sympy.nsimplify(0), sympy.nsimplify(-1)], + }.get(lhs, [sympy.nsimplify(0), sympy.nsimplify(0)]) else: - return vectorise(parse_direction_arrow_to_vector, lhs, ctx=ctx)() + return vectorise(parse_direction_arrow_to_vector, list(lhs), ctx=ctx)() def permutations(lhs, ctx):
BUG: fixed variable name Fixed bad variable name.
@@ -333,9 +333,9 @@ class Instrument(object): self.kwargs[fkey] = {gkey: kwargs[gkey] for gkey in good_kwargs} # Add in defaults if not already present - for dkey in default_keywords.keys(): + for dkey in default_kwargs.keys(): if dkey not in good_kwargs: - self.kwargs[fkey][dkey] = default_keywords[dkey] + self.kwargs[fkey][dkey] = default_kwargs[dkey] # Determine the number of kwargs in this function fkwargs = [gkey for gkey in self.kwargs[fkey].keys()]
DOC: updated use of custom Updated the custom examples in the Independence tutorial.
@@ -45,12 +45,13 @@ to non-DMSP data sets. .mean(skipna=True)) return mean_val - # instantiate pysat.Instrument object to get access to data + # Instantiate pysat.Instrument object to get access to data vefi = pysat.Instrument(platform='cnofs', name='vefi', tag='dc_b') - # define custom filtering method + # Define a custom filtering method def filter_inst(inst, data_label, data_gate): - # select data within +/- data gate + """ Select data within +/- data gate + """ min_gate = -np.abs(data_gate) max_gate = np.abs(data_gate) idx, = np.where((inst[data_label] < max_gate) & @@ -58,13 +59,13 @@ to non-DMSP data sets. inst.data = inst[idx] return - # attach filter to vefi object, function is run upon every load - vefi.custom.add(filter_inst, 'modify', 'latitude', 5.) + # Attach filter to vefi object, function is run upon every load + vefi.custom_attach(filter_inst, args=['latitude', 5.0]) - # make a plot of daily mean of 'db_mer' + # Make a plot of daily mean of 'db_mer' mean_dB = daily_mean(vefi, start, stop, 'dB_mer') - # plot the result using pandas functionality + # Plot the result using pandas functionality mean_dB.plot(title='Absolute Daily Mean of ' + vefi.meta['dB_mer'].long_name) plt.ylabel('Absolute Daily Mean (' + vefi.meta['dB_mer'].units + ')') @@ -85,12 +86,13 @@ instrument is supplied may be modified in arbitrary ways by the nano-kernel. cosmic = pysat.Instrument('cosmic', 'gps', tag='ionprf', clean_level='clean', altitude_bin=3) - # attach filter method - cosmic.custom.add(filter_inst, 'modify', 'edmaxlat', 15.) - # perform average + # Attach the filter method + cosmic.custom_attach(filter_inst, args=['edmaxlat', 15.0]) + + # Perform the averaging mean_max_dens = daily_mean(cosmic, start, stop, 'edmax') - # plot the result using pandas functionality + # Plot the result using pandas functionality long_name = cosmic.meta[data_label, cosmic.name_label] units = cosmic.meta[data_label, cosmic.units_label] mean_max_dens.plot(title='Absolute Daily Mean of ' + long_name) @@ -113,11 +115,13 @@ more than 1D datasets. def daily_mean(inst, start, stop, data_label): - # create empty series to hold result + # Create empty series to hold result mean_val = pandas.Series() - # get list of dates between start and stop + + # Get list of dates between start and stop date_array = pysat.utils.time.create_date_range(start, stop) - # iterate over season, calculate the mean + + # Iterate over season, calculate the mean for date in date_array: inst.load(date=date) if not inst.empty:
Disable nose-timer rev2 Comment out the nose-timer related environment variables and temporarily disable linting.
@@ -16,7 +16,7 @@ on: env: SKIP_DEAP: 1 NOSE_VERBOSE: 2 - NOSE_WITH_TIMER: 0 + #NOSE_WITH_TIMER: 1 NOSE_WITH_ID: 1 NOSE_REDNOSE: 1 NOSE_WITH_COVERAGE: 1 @@ -55,12 +55,12 @@ jobs: python -m pip install flake8 python -m pip install .[testing] # python -m pip freeze # this isn't relevant anymore since pip install builds a wheel separately - - name: Lint with flake8 - run: | +# - name: Lint with flake8 +# run: | # Critical errors, exit on failure - flake8 . --count --show-source --statistics --config=.flake8-critical +# flake8 . --count --show-source --statistics --config=.flake8-critical # Standard PEP8, allowed to fail since exit-zero treats all errors as warnings - flake8 . --exit-zero --statistics +# flake8 . --exit-zero --statistics - name: Run unit tests ubuntu if: ${{matrix.os == 'ubuntu-18.04'}} run: |
Quote node id's In case a ":" is present in the name, it will be considered name:port otherwise.
@@ -192,15 +192,13 @@ def diagram_as_pydot(diagram: Diagram, splines: str) -> pydot.Dot: @as_pydot.register def _(presentation: ElementPresentation): - if not any( - c for c in presentation.children if not isinstance(c, AttachedPresentation) - ): + if all(isinstance(c, AttachedPresentation) for c in presentation.children): for attached in presentation.children: if isinstance(attached, AttachedPresentation): yield as_pydot(attached) yield pydot.Node( - presentation.id, + f'"{presentation.id}"', id=presentation.id, label="", shape="rect",
WL: logger call is using wrong string format This will raise an exception when called.
@@ -340,4 +340,4 @@ def configure_device(device: InputDevice, configs: dict[str, InputConfig]) -> No elif device.device_type == input_device.InputDeviceType.KEYBOARD: _configure_keyboard(device, conf) else: - logger.warning("Device not configured. Type '{}' not recognised.", device.device_type) + logger.warning("Device not configured. Type '%s' not recognised.", device.device_type)
Fix couch_reindex_schedule default logic Was causing 500s everywhere if default was relied on
@@ -12,8 +12,10 @@ from django.conf import settings # Run every 10 minutes, or as specified in settings.COUCH_REINDEX_SCHEDULE -couch_reindex_schedule = deserialize_run_every_setting( - getattr(settings, 'COUCH_REINDEX_SCHEDULE', timedelta(minutes=10))) +if hasattr(settings, 'COUCH_REINDEX_SCHEDULE'): + couch_reindex_schedule = deserialize_run_every_setting(settings.COUCH_REINDEX_SCHEDULE) +else: + couch_reindex_schedule = timedelta(minutes=10) @periodic_task(serializer='pickle', run_every=couch_reindex_schedule, queue=settings.CELERY_PERIODIC_QUEUE)
Update Alabama.md Alabama Geos
@@ -10,7 +10,7 @@ tags: arrest, journalist, zip-tie id: al-birmingham-1 -geolocation: +geolocation: 33.520453,-86.8109093 **Links** @@ -29,7 +29,7 @@ tags: arrest, journalist, racial-profiling, zip-tie id: al-birmingham-2 -geolocation: +geolocation: 33.520453,-86.8109093 **Links** @@ -49,7 +49,7 @@ tags: arrest, shove, throw, push, knee, protester id: al-hoover-1 -geolocation: +geolocation: 33.385529,-86.805699 **Links** @@ -68,7 +68,7 @@ tags: baton, kick, beat, push, shove, protester id: al-huntsville-1 -geolocation: +geolocation: 34.7298523,-86.5854804 **Links** @@ -83,7 +83,7 @@ tags: threaten, pepper-spray, spray, protester id: al-huntsville-2 -geolocation: +geolocation: 34.7302616,-86.5858014 **Links** @@ -98,7 +98,7 @@ tags: tear-gas, tear-gas-canister, journalist, protester id: al-huntsville-3 -geolocation: +geolocation: 34.7302616,-86.5858014 **Links** @@ -115,7 +115,7 @@ tags: tear-gas, tear-gas-canister, rubber-bullet, shoot, protester id: al-huntsville-4 -geolocation: +geolocation: 34.7302616,-86.5858014 **Links** @@ -136,7 +136,7 @@ tags: rubber-bullet, threaten, protester id: al-huntsville-5 -geolocation: +geolocation: 34.7302616,-86.5858014 **Links**
Update jobs archived flag before setting the default value Running an update before setting the column default value reduces the time the table is locked (since most rows don't have a NULL value anymore), but the migration takes slightly longer to run overall.
@@ -15,7 +15,9 @@ down_revision = '0244_another_letter_org' def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.add_column('jobs', sa.Column('archived', sa.Boolean(), nullable=False, server_default=sa.false())) + op.add_column('jobs', sa.Column('archived', sa.Boolean(), nullable=True)) + op.execute('update jobs set archived = false') + op.alter_column('jobs', 'archived', nullable=False, server_default=sa.false()) # ### end Alembic commands ###
ceph-validate: do not resolve devices This is already done in the ceph-facts role.
when: - item.skipped is undefined -- name: devices variable's tasks related - when: - - devices is defined - - devices | length > 0 - block: - - name: resolve devices in devices - command: "readlink -f {{ item }}" - changed_when: false - register: devices_resolved - with_items: "{{ devices }}" - - - name: set_fact devices_resolved - set_fact: - _devices: "{{ _devices | default([]) + [item.stdout] }}" - with_items: "{{ devices_resolved.results }}" - - name: fail if root_device is passed in lvm_volumes or devices fail: msg: "{{ root_device }} found in either lvm_volumes or devices variable" - when: root_device in lvm_volumes_data_devices | default([]) or root_device in _devices | default([]) + when: root_device in lvm_volumes_data_devices | default([]) or root_device in devices | default([]) - name: check devices are block devices block:
fix: Comment editbox UX Option to dismiss changes
@@ -247,6 +247,19 @@ frappe.ui.form.NewTimeline = class { let edit_box = this.make_editable(edit_wrapper); let content_wrapper = comment_wrapper.find('.content'); + let delete_button = $(` + <button class="btn btn-link action-btn icon-btn"> + ${frappe.utils.icon('close', 'sm', 'close')} + </button> + `).click(() => this.delete_comment(doc.name)); + + let dismiss_button = $(` + <button class="btn btn-link action-btn icon-btn"> + ${__('Dismiss')} + </button> + `).click(() => edit_button.toggle_edit_mode()); + dismiss_button.hide(); + edit_box.set_value(doc.content); edit_box.on_submit = (value) => { @@ -273,17 +286,14 @@ frappe.ui.form.NewTimeline = class { edit_button.toggle_edit_mode = () => { edit_button.edit_mode = !edit_button.edit_mode; edit_button.text(edit_button.edit_mode ? __('Save') : __('Edit')); + delete_button.toggle(!edit_button.edit_mode); + dismiss_button.toggle(edit_button.edit_mode); edit_wrapper.toggle(edit_button.edit_mode); content_wrapper.toggle(!edit_button.edit_mode); }; - let delete_button = $(` - <button class="btn btn-link action-btn icon-btn"> - ${frappe.utils.icon('close', 'sm', 'close')} - </button> - `).click(() => this.delete_comment(doc.name)); - comment_wrapper.find('.actions').append(edit_button); + comment_wrapper.find('.actions').append(dismiss_button); comment_wrapper.find('.actions').append(delete_button); }
speed up jax backend by passing expressions as tuples list of strings caused recompiling each call update ExpressionBuilder.get_expressions()
@@ -370,7 +370,7 @@ class ExpressionBuilder(Struct): expressions = [self.join_subscripts(subscripts[ia], self.out_subscripts[ia]) for ia in range(self.n_add)] - return expressions + return tuple(expressions) def get_sizes(self, ia, operands): return get_sizes(self.subscripts[ia], operands[ia])
Add failing test case for Rename a test case.
@@ -4,6 +4,7 @@ import unittest import gevent from gevent import sleep from gevent.queue import Queue +import greenlet import locust from locust import runners, between, constant, LoadTestShape @@ -360,7 +361,7 @@ class TestLocustRunner(LocustTestCase): self.assertEqual(env, runner.environment) self.assertEqual(runner, env.runner) - def test_users_can_call_runner_quit(self): + def test_users_can_call_runner_quit_without_deadlocking(self): class BaseUser(User): wait_time = constant(0) @@ -379,6 +380,28 @@ class TestLocustRunner(LocustTestCase): finally: timeout.cancel() + def test_runner_quit_does_not_get_blocked_by_slow_on_stop(self): + class BaseUser(User): + wait_time = constant(0) + + @task + def trigger(self): + pass + + def on_stop(self): + gevent.sleep(0.2) + + runner = Environment(user_classes=[BaseUser]).create_local_runner() + runner.spawn_users(10, 10, wait=False) + timeout = gevent.Timeout(0.4) + timeout.start() + try: + runner.quit() + except gevent.Timeout: + self.fail("Got Timeout exception, runner must have hung somehow.") + finally: + timeout.cancel() + def test_stop_users_with_spawn_rate(self): class MyUser(User): wait_time = constant(1)
Use raw string for regex in tokenization_t5_fast.py Suppress deprecation warning
@@ -237,7 +237,7 @@ class T5TokenizerFast(PreTrainedTokenizerFast): def get_sentinel_tokens(self): return list( - set(filter(lambda x: bool(re.search("<extra_id_\d+>", x)) is not None, self.additional_special_tokens)) + set(filter(lambda x: bool(re.search(r"<extra_id_\d+>", x)) is not None, self.additional_special_tokens)) ) def get_sentinel_token_ids(self):
Amend the PlacementFixture We recently merged something adding a version argument for the get method of the SchedulerReportClient. We should add that feature into the PlacementFixture. Also adding a comment explaining why we need to mock up the report client.
@@ -1175,6 +1175,9 @@ class PlacementFixture(fixtures.Fixture): self.addCleanup(self.service.stop) self._client = ks.Session(auth=None) + # NOTE(sbauza): We need to mock the scheduler report client because + # we need to fake Keystone by directly calling the endpoint instead + # of looking up the service catalog, like we did for the OSAPIFixture. self.useFixture(fixtures.MonkeyPatch( 'nova.scheduler.client.report.SchedulerReportClient.get', self._fake_get)) @@ -1188,15 +1191,22 @@ class PlacementFixture(fixtures.Fixture): 'nova.scheduler.client.report.SchedulerReportClient.delete', self._fake_delete)) - def _fake_get(self, *args): + def _fake_get(self, *args, **kwargs): (url,) = args[1:] + version = kwargs.get("version") # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 # in case a token is not provided. We should change that by creating # a fake token so we could remove adding the header below. + headers = {'x-auth-token': self.token} + if version is not None: + # TODO(mriedem): Perform some version discovery at some point. + headers.update({ + 'OpenStack-API-Version': 'placement %s' % version + }) return self._client.get( url, endpoint_override="http://127.0.0.1:%s" % self.service.port, - headers={'x-auth-token': self.token}, + headers=headers, raise_exc=False) def _fake_post(self, *args):
feat(stock_zh_a_gdhs_detail_em): add stock_zh_a_gdhs_detail_em interface add stock_zh_a_gdhs_detail_em interface
@@ -182,13 +182,13 @@ def stock_zh_a_tick_163_now(code: str = "000001") -> pd.DataFrame: if __name__ == "__main__": - stock_zh_a_tick_163_df = stock_zh_a_tick_163(code="sz000001", trade_date="20211021") + stock_zh_a_tick_163_df = stock_zh_a_tick_163(code="sz000001", trade_date="20211104") print(stock_zh_a_tick_163_df) stock_zh_a_tick_tx_js_df = stock_zh_a_tick_tx_js(code="sz000001") print(stock_zh_a_tick_tx_js_df) - stock_zh_a_tick_tx_df = stock_zh_a_tick_tx(code="sh600848", trade_date="20211021") + stock_zh_a_tick_tx_df = stock_zh_a_tick_tx(code="sh600848", trade_date="20211104") print(stock_zh_a_tick_tx_df) date_list = pd.date_range(start="20210601", end="20210613").tolist()
user_profile_modal: Fix label alignment for non-English languages. This fixes the issue in which the lengthy labels would either overflow or affect the alignment of the profile fields.
@@ -238,8 +238,13 @@ ul { .name { color: hsl(0, 0%, 20%); display: inline-block; - min-width: 120px; + width: 120px; font-weight: 600; + margin-right: 10px; + } + + .value { + vertical-align: top; } #exit-sign {
Wrong part name Part name should be "header" instead "location"
@@ -114,7 +114,7 @@ requests: matchers-condition: and matchers: - type: regex - part: location + part: header regex: - '(?m)^(?:Location\s*?:\s*?)(?:https?:\/\/|\/\/|\/\\\\|\/\\)?(?:[a-zA-Z0-9\-_\.@]*)evil\.com\/?(\/|[^.].*)?$' # https://regex101.com/r/ZDYhFh/1
Update compiler.py fix-bug: explicitly set log_v when querying cuda to avoid wrong output of jittor_utils.
@@ -987,7 +987,7 @@ if nvcc_path: nvcc_version = list(map(int,v.split('.'))) cu += v try: - r, s = sp.getstatusoutput(f"{sys.executable} -m jittor_utils.query_cuda_cc") + r, s = sp.getstatusoutput(f"log_v=0 {sys.executable} -m jittor_utils.query_cuda_cc") if r==0: s = sorted(list(set(s.strip().split()))) cu += "_sm_" + "_".join(s)
use WAL for sqlite event log Summary: WAL mode should allow for concurrent reads at least Test Plan: existing tests Reviewers: #ft, max
@@ -107,7 +107,7 @@ def wipe(self): CREATE TABLE IF NOT EXISTS event_logs ( row_id INTEGER PRIMARY KEY AUTOINCREMENT, event TEXT -) +); ''' FETCH_EVENTS_SQL = ''' @@ -147,6 +147,7 @@ def store_event(self, event): if not run_id in self._known_run_ids: with self._connect(run_id) as conn: conn.cursor().execute(CREATE_EVENT_LOG_SQL) + conn.cursor().execute('PRAGMA journal_mode=WAL;') self._known_run_ids.add(run_id) with self._connect(run_id) as conn: conn.cursor().execute(INSERT_EVENT_SQL, (serialize_dagster_namedtuple(event),))
Update editor information area Add challenge tite Add challenge hints and link to solution Add static programming hints area (no content)
<div class="col-12 col-md-4 programming__info-area"> <div class="programming__info-area-content"> + {% block page_heading %} + <h1> + {{ programming_challenge.name }} + </h1> {% if not programming_challenge.translation_available %} {% with model=programming_challenge parent=topic %} {% include 'topics/not-available-warning.html' %} <strong>{% trans "Challenge Level:" %}</strong> {{ programming_challenge.difficulty.name }} </p> {% endif %} + {% endblock page_heading %} {% if lessons %} <div class="alert alert-info" role="alert"> - <p>{% trans "This programming challenge is linked to the following lessons:" %}</p> + <p>{% trans "This programming challenge is linked to the following unplugged lessons:" %}</p> <ul class="mb-0"> {% for lesson in lessons %} </div> {% endif %} + + {% render_html_field programming_challenge.content %} + + {% comment %} Learning Outcomes {% endcomment %} {% if learning_outcomes %} <details class="panel-learning-outcomes"> <summary> </details> {% endif %} - {% render_html_field programming_challenge.content %} + {% comment %} Challenge Hints {% endcomment %} + {% for implementation in implementations %} + {% if implementation.language.name == "Python" and implementation.hints %} + <details> + <summary> + <strong> + {% trans "Hints" %} + </strong> + </summary> + + <div class="boxed-text-content"> + {% render_html_field implementation.hints %} + <p class="text-center"> + {% comment %} Link to old solution - in topics section {% endcomment %} + <a href="{% url 'topics:programming_challenge_solution' topic.slug programming_challenge.slug implementation.language.slug %}"> + {% blocktrans trimmed %} + Show {{ implementation.language.name }} solution + {% endblocktrans %} + </a> + </p> + </div> + </details> + {% endif %} + {% endfor %} + + {% comment %} Programming syntax reminders {% endcomment %} + <details> + <summary> + <strong>{% trans "Programming Reminders" %}</strong> + </summary> + <div class="boxed-text-content"> + <p>[Some static content here]</p> + </div> + </details> + </div> </div> <div class="col-12 col-md-4 programming__editor">
change denoising setup_task so that it can read from multiple shards Summary: Follow Roberta data handling to support | based data separation
@@ -147,7 +147,9 @@ class DenoisingTask(LegacyFairseqTask): @classmethod def setup_task(cls, args, **kwargs): """Setup the task.""" - dictionary = Dictionary.load(os.path.join(args.data, "dict.txt")) + paths = utils.split_paths(args.data) + assert len(paths) > 0 + dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt")) logger.info("dictionary: {} types".format(len(dictionary))) if not hasattr(args, "shuffle_instance"): args.shuffle_instance = False @@ -196,6 +198,7 @@ class DenoisingTask(LegacyFairseqTask): break_mode=self.args.sample_break_mode, document_sep_len=0, ) + logger.info("loaded {} blocks from: {}".format(len(dataset), split_path)) # prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
replaced target farm with remote Target farm is being used for rendering, this should better differentiate it.
@@ -73,8 +73,8 @@ def install(): "save/open/new callback installation..")) # Register default "local" target - print("Registering pyblish target: farm") - pyblish.api.register_target("farm") + print("Registering pyblish target: remote") + pyblish.api.register_target("remote") return print("Registering pyblish target: local")
use single quote in readme.rst As we use single quote in sanic package, we may be supposed to use single quote in readme also?
@@ -21,12 +21,12 @@ Hello World Example app = Sanic() - @app.route("/") + @app.route('/') async def test(request): - return json({"hello": "world"}) + return json({'hello': 'world'}) - if __name__ == "__main__": - app.run(host="0.0.0.0", port=8000) + if __name__ == '__main__': + app.run(host='0.0.0.0', port=8000) Installation ------------
parse incoming date [ICDS-CAS-JRA](https://sentry.io/organizations/dimagi/issues/1449152577)
@@ -2,6 +2,7 @@ from datetime import datetime, timedelta from celery.schedules import crontab from django.conf import settings +from iso8601 import parse_date from corehq.blobs import CODES, get_blob_db from corehq.blobs.models import BlobMeta @@ -14,6 +15,9 @@ from custom.icds.tasks.hosted_ccz import setup_ccz_file_for_hosting # noqa impo @periodic_task_on_envs(settings.ICDS_ENVS, run_every=crontab(minute=0, hour='22')) def delete_old_images(cutoff=None): + if cutoff and isinstance(cutoff, str): + cutoff = parse_date(cutoff) + cutoff = cutoff or datetime.utcnow() max_age = cutoff - timedelta(days=90) db = get_blob_db()
feat: add basic support for "preprocessors" section in configuration file preprocessors: audio: /path/to/corresponding/db.yml video: /path/to/corresponding/db.yml
@@ -86,9 +86,6 @@ class Application(object): super(Application, self).__init__() self.db_yml = db_yml - - self.preprocessors_ = {'audio': FileFinder(self.db_yml)} - self.experiment_dir = experiment_dir # load configuration @@ -96,6 +93,14 @@ class Application(object): with open(config_yml, 'r') as fp: self.config_ = yaml.load(fp) + # preprocessors + preprocessors = {} + PREPROCESSORS_DEFAULT = {'audio': db_yml} + for key, db_yml in self.config_.get('preprocessors', + PREPROCESSORS_DEFAULT): + preprocessors[key] = FileFinder(db_yml) + self.preprocessors_ = preprocessors + # scheduler SCHEDULER_DEFAULT = {'name': 'DavisKingScheduler', 'params': {'learning_rate': 'auto'}}
allow verbose shape information output for tf.layers * Improving the information when using `tf.layers` similar to the layers provided by tensorpack (like Conv2D, etc). I don't know if this is the perfect solution. * try to identity tower0
@@ -6,6 +6,8 @@ from collections import defaultdict import copy from functools import wraps from inspect import isfunction, getmembers +from ..utils import logger +import tensorflow as tf __all__ = ['argscope', 'get_arg_scope', 'enable_argscope_for_module'] @@ -64,7 +66,7 @@ def get_arg_scope(): return defaultdict(dict) -def argscope_mapper(func): +def argscope_mapper(func, log_shape=True): """Decorator for function to support argscope """ @wraps(func) @@ -72,13 +74,26 @@ def argscope_mapper(func): actual_args = copy.copy(get_arg_scope()[func.__name__]) actual_args.update(kwargs) out_tensor = func(*args, **actual_args) + + scope_name = tf.get_variable_scope().name + is_tower_scope = 'tower' in scope_name + + in_tensor = args[0] + name = '<unkown>' if 'name' not in kwargs else kwargs['name'] + if log_shape: + if is_tower_scope: + if 'tower0' in scope_name: + logger.info('%20s: %20s -> %20s' % (name, in_tensor.shape.as_list(), out_tensor.shape.as_list())) + else: + logger.info('%20s: %20s -> %20s' % (name, in_tensor.shape.as_list(), out_tensor.shape.as_list())) + return out_tensor # argscope requires this property wrapped_func.symbolic_function = None return wrapped_func -def enable_argscope_for_module(module): +def enable_argscope_for_module(module, log_shape=True): """ Overwrite all functions of a given module to support argscope. Note that this function monkey-patches the module and therefore could have unexpected consequences. @@ -86,4 +101,4 @@ def enable_argscope_for_module(module): """ for name, obj in getmembers(module): if isfunction(obj): - setattr(module, name, argscope_mapper(obj)) + setattr(module, name, argscope_mapper(obj, log_shape=log_shape))
Update test_mumbai.py Refactor: fix multiline silliness, yet make black happy
@@ -72,26 +72,14 @@ def _get_wallets(ocean): bob_private_key = os.getenv("REMOTE_TEST_PRIVATE_KEY2") instrs = "You must set it. It must hold Mumbai MATIC." - assert ( - alice_private_key is not None - ), f"Need envvar REMOTE_TEST_PRIVATE_KEY1. {instrs}" - assert ( - bob_private_key is not None - ), f"Need envvar REMOTE_TEST_PRIVATE_KEY2. {instrs}" + assert alice_private_key, f"Need envvar REMOTE_TEST_PRIVATE_KEY1. {instrs}" + assert bob_private_key, f"Need envvar REMOTE_TEST_PRIVATE_KEY2. {instrs}" # wallets - alice_wallet = Wallet( - web3, - alice_private_key, - config["BLOCK_CONFIRMATIONS"], - config["TRANSACTION_TIMEOUT"], - ) - bob_wallet = Wallet( - web3, - bob_private_key, - config["BLOCK_CONFIRMATIONS"], - config["TRANSACTION_TIMEOUT"], - ) + n_confirm, timeout = config["BLOCK_CONFIRMATIONS"], config["TRANSACTION_TIMEOUT"] + alice_wallet = Wallet(web3, alice_private_key, n_confirm, timeout) + alice_wallet = Wallet(web3, bob_private_key, n_confirm, timeout) + print(f"alice_wallet.address = '{alice_wallet.address}'") print(f"bob_wallet.address = '{bob_wallet.address}'")
Get rid of the post actions list Replace it by a flag "handle_children" action, that will delimitate the pre and the post actions.
@@ -76,6 +76,13 @@ def add_to_env(mappings, dest_env=None, metadata=None, resolver=None): return AddToEnv(mappings, dest_env, metadata, resolver) +def handle_children(): + """ + Handle the node's children lexical environments. + """ + return HandleChildren() + + class EnvSpec(object): """ Class defining a lexical environment specification for an ASTNode subclass. @@ -84,8 +91,7 @@ class EnvSpec(object): PROPERTY_COUNT = count(0) def __init__(self, - pre_actions=[], - post_actions=[], + actions=[], initial_env=None, env_hook_arg=None, call_parents=True): @@ -125,8 +131,15 @@ class EnvSpec(object): self._unresolved_initial_env = initial_env ":type: AbstractExpression" - self.pre_actions = list(pre_actions) - self.post_actions = list(post_actions) + pre, post = split_by( + lambda a: not isinstance(a, HandleChildren), actions + ) + + # Get rid of the HandleChildren delimiter action + post = post and post[1:] + + self.pre_actions = pre + self.post_actions = post self.actions = self.pre_actions + self.post_actions self._unresolved_env_hook_arg = env_hook_arg @@ -410,3 +423,10 @@ class RefEnvs(EnvAction): 'Referenced environment resolver must have no dynamically bound' ' variable' ) + + +class HandleChildren(EnvAction): + """ + Stub class to delimit pre and post env actions. + """ + pass
[skip ci] Fix onnx/models URLs These are broken in CI: The upstream changed their default branch from `master` -> `main` which broke the links used in these tests. This pins to a specific commit (the latest one at the time of filing this PR).
@@ -145,7 +145,7 @@ def pytorch_mobilenetv2_quantized(tmpdir_factory): @pytest.fixture(scope="session") def onnx_resnet50(): - base_url = "https://github.com/onnx/models/raw/master/vision/classification/resnet/model" + base_url = "https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/resnet/model" file_to_download = "resnet50-v2-7.onnx" model_file = download_testdata( "{}/{}".format(base_url, file_to_download), file_to_download, module=["tvmc"] @@ -168,7 +168,7 @@ def paddle_resnet50(tmpdir_factory): @pytest.fixture(scope="session") def onnx_mnist(): - base_url = "https://github.com/onnx/models/raw/master/vision/classification/mnist/model" + base_url = "https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/mnist/model" file_to_download = "mnist-1.onnx" model_file = download_testdata( "{}/{}".format(base_url, file_to_download), file_to_download, module=["tvmc"]
Fix jumping navbar Closes
{% load random_encode %} {% load bleach %} -<li class="nav-item dropdown pr-1 my-1"> +<li class="nav-item dropdown my-auto"> <a href="#" class="nav-link dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" </div> </li> {% else %} - <li class="pr-2 my-1"><a + <li class="pr-2 my-auto"><a class="btn btn-outline-primary" href="{% url 'account_login' %}?next={{ request.build_absolute_uri }}#signin">Sign In</a></li> - <li class="my-1"><a + <li class="my-auto"><a class="btn btn-outline-primary" href="{% url 'account_signup' %}?next={{ request.build_absolute_uri }}#register">Register</a></li> {% endif %}
Mention fixed (by d473b5ce2d) documentation issue Closes sympy/sympy#14387
@@ -63,3 +63,4 @@ These Sympy issues also were addressed: * :sympyissue:`23223`: Wrong integration results of trigonometric functions * :sympyissue:`23224`: Python code printer not respecting tuple with one element * :sympyissue:`23231`: Sympy giving the wrong solution +* :sympyissue:`14387`: Tutorial on limits creates impression that they are two-sided by default
Kill a couple of "for foo in range(len(bar))" Usually these aren't needed; the ones in tex.py defintely weren't. Also one bit of code reformat.
@@ -429,18 +429,23 @@ def InternalLaTeXAuxAction(XXXLaTeXAction, target = None, source= None, env=None return result # Now decide if latex will need to be run again due to newglossary command. - for ig in range(len(newglossary_suffix)): - if check_MD5(suffix_nodes[newglossary_suffix[ig][2]],newglossary_suffix[ig][2]) or (count == 1): + for ng in newglossary_suffix: + if check_MD5(suffix_nodes[ng[2]], ng[2]) or (count == 1): # We must run makeindex if Verbose: print("Need to run makeindex for newglossary") - newglfile = suffix_nodes[newglossary_suffix[ig][2]] - MakeNewGlossaryAction = SCons.Action.Action("$MAKENEWGLOSSARYCOM ${SOURCE.filebase}%s -s ${SOURCE.filebase}.ist -t ${SOURCE.filebase}%s -o ${SOURCE.filebase}%s" % (newglossary_suffix[ig][2],newglossary_suffix[ig][0],newglossary_suffix[ig][1]), "$MAKENEWGLOSSARYCOMSTR") + newglfile = suffix_nodes[ng[2]] + MakeNewGlossaryAction = SCons.Action.Action( + "$MAKENEWGLOSSARYCOM ${SOURCE.filebase}%s -s ${SOURCE.filebase}.ist -t ${SOURCE.filebase}%s -o ${SOURCE.filebase}%s" + % (ng[2], ng[0], ng[1]), + "$MAKENEWGLOSSARYCOMSTR", + ) result = MakeNewGlossaryAction(newglfile, newglfile, env) if result != 0: - check_file_error_message('%s (newglossary)' % env['MAKENEWGLOSSARY'], - newglossary_suffix[ig][0]) + check_file_error_message( + '%s (newglossary)' % env['MAKENEWGLOSSARY'], ng[0] + ) return result # Now decide if latex needs to be run yet again to resolve warnings. @@ -786,8 +791,8 @@ def tex_emitter_core(target, source, env, graphics_extensions): file_basename = os.path.join(targetdir, 'bu*.aux') file_list = glob.glob(file_basename) # remove the suffix '.aux' - for i in range(len(file_list)): - file_list.append(SCons.Util.splitext(file_list[i])[0]) + for fl in file_list: + file_list.append(SCons.Util.splitext(fl)[0]) # for multibib we need a list of files if suffix_list[-1] == 'multibib': for multibibmatch in multibib_re.finditer(content): @@ -797,8 +802,8 @@ def tex_emitter_core(target, source, env, graphics_extensions): baselist = multibibmatch.group(1).split(',') if Verbose: print("multibib list ", baselist) - for i in range(len(baselist)): - file_list.append(os.path.join(targetdir, baselist[i])) + for bl in baselist: + file_list.append(os.path.join(targetdir, bl)) # now define the side effects for file_name in file_list: for suffix in suffix_list[:-1]:
Update train_and_evaluate_using_ray.md "Volume" and "Close" starts with capital letter
@@ -61,7 +61,7 @@ yf_ticker = yfinance.Ticker(ticker=TICKER) df_training = yf_ticker.history(start=TRAIN_START_DATE, end=TRAIN_END_DATE, interval='60m') df_training.drop(['Dividends', 'Stock Splits'], axis=1, inplace=True) -df_training["volume"] = df_training["volume"].astype(int) +df_training["Volume"] = df_training["Volume"].astype(int) df_training.ta.log_return(append=True, length=16) df_training.ta.rsi(append=True, length=14) df_training.ta.macd(append=True, fast=12, slow=26) @@ -69,7 +69,7 @@ df_training.to_csv('training.csv', index=False) df_evaluation = yf_ticker.history(start=EVAL_START_DATE, end=EVAL_END_DATE, interval='60m') df_evaluation.drop(['Dividends', 'Stock Splits'], axis=1, inplace=True) -df_evaluation["volume"] = df_evaluation["volume"].astype(int) +df_evaluation["Volume"] = df_evaluation["Volume"].astype(int) df_evaluation.ta.log_return(append=True, length=16) df_evaluation.ta.rsi(append=True, length=14) df_evaluation.ta.macd(append=True, fast=12, slow=26) @@ -100,7 +100,7 @@ import tensortrade.env.default as default def create_env(config): dataset = pd.read_csv(filepath_or_buffer=config["csv_filename"], parse_dates=['Datetime']).fillna(method='backfill').fillna(method='ffill') ttse_commission = 0.0035 # TODO: adjust according to your commission percentage, if present - price = Stream.source(list(dataset["close"]), dtype="float").rename("USD-TTRD") + price = Stream.source(list(dataset["Close"]), dtype="float").rename("USD-TTRD") ttse_options = ExchangeOptions(commission=ttse_commission) ttse_exchange = Exchange("TTSE", service=execute_order, options=ttse_options)(price)
models/user.py: properly guard plain_text_password property Resolves the following issue, which occurs with force_otp enabled and OAuth authentication sources: File "/srv/powerdnsadmin/powerdnsadmin/models/user.py", line 481, in update_profile "utf-8") if self.plain_text_password else user.password AttributeError: 'User' object has no attribute 'plain_text_password'
@@ -107,7 +107,7 @@ class User(db.Model): def check_password(self, hashed_password): # Check hashed password. Using bcrypt, the salt is saved into the hash itself - if (self.plain_text_password): + if hasattr(self, "plain_text_password"): return bcrypt.checkpw(self.plain_text_password.encode('utf-8'), hashed_password.encode('utf-8')) return False @@ -423,7 +423,7 @@ class User(db.Model): name='Administrator').first().id self.password = self.get_hashed_password( - self.plain_text_password) if self.plain_text_password else '*' + self.plain_text_password) if hasattr(self, "plain_text_password") else '*' if self.password and self.password != '*': self.password = self.password.decode("utf-8") @@ -459,7 +459,7 @@ class User(db.Model): user.email = self.email # store new password hash (only if changed) - if self.plain_text_password: + if hasattr(self, "plain_text_password"): user.password = self.get_hashed_password( self.plain_text_password).decode("utf-8") @@ -478,7 +478,7 @@ class User(db.Model): user.lastname = self.lastname if self.lastname else user.lastname user.password = self.get_hashed_password( self.plain_text_password).decode( - "utf-8") if self.plain_text_password else user.password + "utf-8") if hasattr(self, "plain_text_password") else user.password if self.email: # Can not update to a new email that
tools/tree-diff: Use hash for content diffs We need to know the exact difference of modified files in both trees. Outputting the whole files into a diff might make a huge diff file, therefore only their hashes are written.
#!/usr/bin/env python3 import argparse +import hashlib import json import os +def hash_file(fd): + BLOCK_SIZE = 4096 + hasher = hashlib.sha256() + buf = os.read(fd, BLOCK_SIZE) + while len(buf) > 0: + hasher.update(buf) + buf = os.read(fd, BLOCK_SIZE) + + return f"sha256:{hasher.hexdigest()}" + + def stat_diff(stat1, stat2, path, differences): if stat1.st_mode != stat2.st_mode: props = differences.setdefault(path, {}) @@ -32,11 +44,7 @@ def selinux_diff(path1, path2, path, differences): return True -def content_diff(name, dir_fd1, dir_fd2, size1, size2, path, differences): - if size1 != size2: - props = differences.setdefault(path, {}) - props["content"] = "different" - return +def content_diff(name, dir_fd1, dir_fd2, path, differences): try: fd1 = os.open(name, flags=os.O_RDONLY, dir_fd=dir_fd1) except OSError: @@ -47,12 +55,12 @@ def content_diff(name, dir_fd1, dir_fd2, size1, size2, path, differences): os.close(fd1) return try: - for (byte_block1, byte_block2) in zip(iter(lambda f=fd1: os.read(f, 4096), b""), - iter(lambda f=fd2: os.read(f, 4096), b"")): - if byte_block1 != byte_block2: + hash1 = hash_file(fd1) + hash2 = hash_file(fd2) + + if hash1 != hash2: props = differences.setdefault(path, {}) - props["content"] = "different" - break + props["content"] = [hash1, hash2] finally: os.close(fd1) os.close(fd2) @@ -106,8 +114,6 @@ def diff_aux(dir_fd1, dir_fd2, path, report): content_diff(dirent.name, dir_fd1, dir_fd2, - stat1.st_size, - stat2.st_size, os.path.join(path, dirent.name), report["differences"]) elif dirent.is_dir(follow_symlinks=False):
Update src/acquisition/covidcast/csv_importer.py Fix mixed quotes
@@ -19,7 +19,7 @@ from delphi.epidata.acquisition.covidcast.database import CovidcastRow from delphi.epidata.acquisition.covidcast.logger import get_structured_logger DFRow = NamedTuple('DFRow', [('geo_id', str), ('value', float), ('stderr', float), ('sample_size', float), ('missing_value', int), ('missing_stderr', int), ('missing_sample_size', int)]) -PathDetails = NamedTuple('PathDetails', [('source', str), ("signal", str), ('time_type', str), ('geo_type', str), ('time_value', int), ('issue', int), ('lag', int)]) +PathDetails = NamedTuple('PathDetails', [('source', str), ('signal', str), ('time_type', str), ('geo_type', str), ('time_value', int), ('issue', int), ('lag', int)]) @dataclass
for - fix to stills process test uses new phil params
@@ -112,6 +112,11 @@ def test_sacla_h5(dials_regression, run_in_tmpdir, use_mpi, in_memory=False): detector.fix_list = Dist,Tau1 } } + profile { + gaussian_rs { + centroid_definition = com + } + } """ % geometry_path )
Disable evaluate-mnist-intro-example Example needs to be updated to use the latest TF API.
--- -doctest: -PY3 +PY36 +PY37 # 2022-04-11 these tests fail on github actions because TF 1.14 fails to install. We need to update to a more current tensorflow version that has wheels available. +doctest: +FIXME # Example needs to be updated --- # Evaluate MNIST intro example
fixed tutorial bug that would throw error the comments (lines 84-85) were hashtags instead of double forward slashes so the compiler (or whatever checks the program) would throw an error when the user has done nothing wrong
@@ -81,8 +81,8 @@ int main() for(int i = 0; i < 5; i++) { - # your code goes here. - # use a if else block to classify the person as Child / Adult / Retired + // your code goes here. + // use a if else block to classify the person as Child / Adult / Retired } return 0; }
[doc] document additional config file parameters see
@@ -110,6 +110,10 @@ An example: Configuration files ------------------- +Using a configuration file, it is possible to define a list of modules +that will be loaded if no modules are specified on the CLI, as well as +defining a default theme to use. + Any parameter that can be specified using ``-p <name>=<value>`` on the commandline, can alternatively be specified in one of the following configuration files: - ~/.bumblebee-status.conf - @@ -122,6 +126,10 @@ Configuration files have the following format: :: + [core] + modules = <comma-separated list of modules to load> + theme = <theme to use by default> + [module-parameters] <key> = <value> @@ -131,3 +139,5 @@ For example: [module-parameters] github.token=abcdefabcdef12345 + +
Always default `self.verify` to False for CloudManInstance Allow configuring both `use_ssl` and `verify` in the config, but overridable in `CloudManInstance.__init__()` kwargs
@@ -364,17 +364,10 @@ class CloudManInstance(GenericVMInstance): super().__init__(kwargs['launcher'], kwargs['launch_result']) else: super().__init__(None, None) - self.config = kwargs.pop('cloudman_config', None) - self.use_ssl = False - if not self.config: - self.password = password - self.verify = kwargs.get("verify",None) - self.use_ssl = kwargs.get("use_ssl",kwargs.get("verify",None) is not None) - else: - self.password = self.config.password - if self.config.kwargs.get('use_ssl',False): - self.verify = kwargs.get("verify",False) - self.use_ssl = True + self.config = kwargs.pop('cloudman_config', CloudManConfig()) + self.password = password or self.config.password + self.use_ssl = kwargs.get("use_ssl", self.config.kwargs.get("use_ssl", False)) + self.verify = kwargs.get("verify", self.config.kwargs.get("verify", False)) self.authuser = kwargs.get("authuser", "") self._set_url(url) @@ -735,10 +728,13 @@ class CloudManInstance(GenericVMInstance): if parameters is None: parameters = {} req_url = '/'.join((self.cloudman_url, 'root', url)) - extragetargs = {} - if self.verify is not None: - extragetargs = {'verify': self.verify} - r = requests.get(req_url, params=parameters, auth=(self.authuser, self.password), timeout=timeout, **extragetargs) + r = requests.get( + req_url, + params=parameters, + auth=(self.authuser, self.password), + timeout=timeout, + verify=self.verify, + ) try: json = r.json() return json
Update generic.txt Updating info + detection.
@@ -2674,8 +2674,15 @@ officecrack.gi2.cc untorsnot.in # Reference: https://twitter.com/0x13fdb33f/status/1122544651628576768 +# Reference: https://www.kernelmode.info/forum/viewtopic.php?p=32871 +# Reference: https://otx.alienvault.com/pulse/5cc6ca1e69cc6cfee80974a7 +fusu.icu keke.icu +luru.icu +qoqo.icu +susu.icu +zqfgy.app # Reference: https://twitter.com/dvk01uk/status/1122803607269773312
Special case `Edge`s in flow groupings Now `Edges` have their own code path, ensuring that they are handled correctly. Fixes
@@ -17,6 +17,8 @@ except ImportError: MutableMapping as MuMa) from itertools import chain, filterfalse +from oemof.network import Edge + class Grouping: """ @@ -247,7 +249,11 @@ class Flows(Nodes): return set(flows) def __call__(self, n, d): - flows = set(chain(n.outputs.values(), n.inputs.values())) + flows = ( + {n} + if isinstance(n, Edge) + else set(chain(n.outputs.values(), n.inputs.values())) + ) super().__call__(flows, d) @@ -267,9 +273,16 @@ class FlowsWithNodes(Nodes): return set(tuples) def __call__(self, n, d): - tuples = set(chain( + tuples = ( + {(n.input, n.output, n)} + if isinstance(n, Edge) + else set( + chain( ((n, t, f) for (t, f) in n.outputs.items()), - ((s, n, f) for (s, f) in n.inputs.items()))) + ((s, n, f) for (s, f) in n.inputs.items()), + ) + ) + ) super().__call__(tuples, d)
[IMPR] Simplify movepages.py Simplify iteration of pairsfile.
@@ -38,6 +38,7 @@ Furthermore, the following command line parameters are supported: # Distributed under the terms of the MIT license. # import re +from itertools import zip_longest import pywikibot from pywikibot import i18n, pagegenerators @@ -203,16 +204,14 @@ def main(*args: str) -> None: if opt == 'pairsfile': filename = value or pywikibot.input( 'Enter the name of the file containing pairs:') - old_name1 = None - for page in pagegenerators.TextIOPageGenerator(filename): - if old_name1: - from_to_pairs.append([old_name1, page.title()]) - old_name1 = None - else: - old_name1 = page.title() - if old_name1: + page_gen = [pagegenerators.TextIOPageGenerator(filename)] * 2 + for old_page, new_page in zip_longest(*page_gen, fillvalue=None): + if new_page is None: pywikibot.warning( - 'file {} contains odd number of links'.format(filename)) + 'file {} contains odd number ' + 'of links'.format(filename)) + else: + from_to_pairs.append([old_page.title(), new_page.title()]) elif opt in ('always', 'noredirect', 'skipredirects'): options[opt] = True elif opt in ('notalkpage', 'nosubpages'): @@ -240,10 +239,9 @@ def main(*args: str) -> None: if not site.logged_in(): site.login() - for pair in from_to_pairs: - page = pywikibot.Page(site, pair[0]) bot = MovePagesBot(**options) - bot.move_one(page, pair[1]) + for old_title, new_title in from_to_pairs: + bot.move_one(pywikibot.Page(site, old_title), new_title) gen = gen_factory.getCombinedGenerator(preload=True) if gen:
Update jax2tf.py mypy fix
@@ -203,7 +203,7 @@ def convert(fun: Callable, *, # Name input tensors args = tuple( - tree_util.tree_map(lambda x, i=i: tf.identity(x, f"jax2tf_arg_{i}"), a) + tree_util.tree_map(lambda x, i=i: tf.identity(x, f"jax2tf_arg_{i}"), a) # type: ignore for i, a in enumerate(args)) # This function may take pytrees of TfVals. We can only set
Change `backup_id` to `fsx_backup_id` The name of the parameter was changed in
@@ -1753,7 +1753,11 @@ def test_instances_architecture_compatibility_validator( "When restoring an FSx Lustre file system from backup, 'imported_file_chunk_size' cannot be specified.", ), ( - {"backup_id": "backup-0ff8da96d57f3b4e3", "fsx_kms_key_id": "somekey", "deployment_type": "PERSISTENT_1"}, + { + "fsx_backup_id": "backup-0ff8da96d57f3b4e3", + "fsx_kms_key_id": "somekey", + "deployment_type": "PERSISTENT_1", + }, None, 0, "When restoring an FSx Lustre file system from backup, 'fsx_kms_key_id' cannot be specified.",